Snap for 10447354 from 18a097787889700b94da4c0d8be261aa1e09001d to mainline-cellbroadcast-release

Change-Id: I449a87b7c1a65bfcb3354050fc27b09e66b7a801
diff --git a/BUILD.bazel b/BUILD.bazel
new file mode 100644
index 0000000..893bcb4
--- /dev/null
+++ b/BUILD.bazel
@@ -0,0 +1,11 @@
+package(default_visibility = ["//visibility:public"])
+
+[filegroup(
+    name = platform,
+    srcs = glob([
+        "%s/bin/*" % platform,
+        "%s/lib/*" % platform,
+        "%s/lib64/*" % platform,
+        "%s/clang-headers/*" % platform,
+    ]),
+) for platform in ("linux-x86", "darwin-x86")]
diff --git a/darwin-x86/bin/versioner b/darwin-x86/bin/versioner
index be05183..37c3438 100755
--- a/darwin-x86/bin/versioner
+++ b/darwin-x86/bin/versioner
Binary files differ
diff --git a/darwin-x86/clang-headers b/darwin-x86/clang-headers
index fabd016..e62fa5e 120000
--- a/darwin-x86/clang-headers
+++ b/darwin-x86/clang-headers
@@ -1 +1 @@
-lib64/clang/14.0.2/include
\ No newline at end of file
+lib/clang/16.0.2/include
\ No newline at end of file
diff --git a/darwin-x86/lib b/darwin-x86/lib
new file mode 120000
index 0000000..6b60c32
--- /dev/null
+++ b/darwin-x86/lib
@@ -0,0 +1 @@
+lib64
\ No newline at end of file
diff --git a/darwin-x86/lib64/clang/14.0.2/include/opencl-c.h b/darwin-x86/lib64/clang/14.0.2/include/opencl-c.h
deleted file mode 100644
index 32af848..0000000
--- a/darwin-x86/lib64/clang/14.0.2/include/opencl-c.h
+++ /dev/null
@@ -1,18498 +0,0 @@
-//===--- opencl-c.h - OpenCL C language builtin function header -----------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _OPENCL_H_
-#define _OPENCL_H_
-
-#include "opencl-c-base.h"
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifndef cl_khr_depth_images
-#define cl_khr_depth_images
-#endif //cl_khr_depth_images
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-#if __OPENCL_C_VERSION__ < CL_VERSION_2_0
-#ifdef cl_khr_3d_image_writes
-#pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable
-#endif //cl_khr_3d_image_writes
-#endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0
-
-#if (defined(__OPENCL_CPP_VERSION__) ||                                        \
-     (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) &&                              \
-    (defined(__SPIR__) || defined(__SPIRV__))
-#pragma OPENCL EXTENSION cl_intel_planar_yuv : begin
-#pragma OPENCL EXTENSION cl_intel_planar_yuv : end
-#endif // (defined(__OPENCL_CPP_VERSION__) ||
-       //  (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) &&
-       // (defined(__SPIR__) || defined(__SPIRV__))
-
-#define __ovld __attribute__((overloadable))
-#define __conv __attribute__((convergent))
-
-// Optimizations
-#define __purefn __attribute__((pure))
-#define __cnfn __attribute__((const))
-
-
-// OpenCL v1.1/1.2/2.0 s6.2.3 - Explicit conversions
-
-char __ovld __cnfn convert_char_rte(char);
-char __ovld __cnfn convert_char_sat_rte(char);
-char __ovld __cnfn convert_char_rtz(char);
-char __ovld __cnfn convert_char_sat_rtz(char);
-char __ovld __cnfn convert_char_rtp(char);
-char __ovld __cnfn convert_char_sat_rtp(char);
-char __ovld __cnfn convert_char_rtn(char);
-char __ovld __cnfn convert_char_sat_rtn(char);
-char __ovld __cnfn convert_char(char);
-char __ovld __cnfn convert_char_sat(char);
-char __ovld __cnfn convert_char_rte(uchar);
-char __ovld __cnfn convert_char_sat_rte(uchar);
-char __ovld __cnfn convert_char_rtz(uchar);
-char __ovld __cnfn convert_char_sat_rtz(uchar);
-char __ovld __cnfn convert_char_rtp(uchar);
-char __ovld __cnfn convert_char_sat_rtp(uchar);
-char __ovld __cnfn convert_char_rtn(uchar);
-char __ovld __cnfn convert_char_sat_rtn(uchar);
-char __ovld __cnfn convert_char(uchar);
-char __ovld __cnfn convert_char_sat(uchar);
-char __ovld __cnfn convert_char_rte(short);
-char __ovld __cnfn convert_char_sat_rte(short);
-char __ovld __cnfn convert_char_rtz(short);
-char __ovld __cnfn convert_char_sat_rtz(short);
-char __ovld __cnfn convert_char_rtp(short);
-char __ovld __cnfn convert_char_sat_rtp(short);
-char __ovld __cnfn convert_char_rtn(short);
-char __ovld __cnfn convert_char_sat_rtn(short);
-char __ovld __cnfn convert_char(short);
-char __ovld __cnfn convert_char_sat(short);
-char __ovld __cnfn convert_char_rte(ushort);
-char __ovld __cnfn convert_char_sat_rte(ushort);
-char __ovld __cnfn convert_char_rtz(ushort);
-char __ovld __cnfn convert_char_sat_rtz(ushort);
-char __ovld __cnfn convert_char_rtp(ushort);
-char __ovld __cnfn convert_char_sat_rtp(ushort);
-char __ovld __cnfn convert_char_rtn(ushort);
-char __ovld __cnfn convert_char_sat_rtn(ushort);
-char __ovld __cnfn convert_char(ushort);
-char __ovld __cnfn convert_char_sat(ushort);
-char __ovld __cnfn convert_char_rte(int);
-char __ovld __cnfn convert_char_sat_rte(int);
-char __ovld __cnfn convert_char_rtz(int);
-char __ovld __cnfn convert_char_sat_rtz(int);
-char __ovld __cnfn convert_char_rtp(int);
-char __ovld __cnfn convert_char_sat_rtp(int);
-char __ovld __cnfn convert_char_rtn(int);
-char __ovld __cnfn convert_char_sat_rtn(int);
-char __ovld __cnfn convert_char(int);
-char __ovld __cnfn convert_char_sat(int);
-char __ovld __cnfn convert_char_rte(uint);
-char __ovld __cnfn convert_char_sat_rte(uint);
-char __ovld __cnfn convert_char_rtz(uint);
-char __ovld __cnfn convert_char_sat_rtz(uint);
-char __ovld __cnfn convert_char_rtp(uint);
-char __ovld __cnfn convert_char_sat_rtp(uint);
-char __ovld __cnfn convert_char_rtn(uint);
-char __ovld __cnfn convert_char_sat_rtn(uint);
-char __ovld __cnfn convert_char(uint);
-char __ovld __cnfn convert_char_sat(uint);
-char __ovld __cnfn convert_char_rte(long);
-char __ovld __cnfn convert_char_sat_rte(long);
-char __ovld __cnfn convert_char_rtz(long);
-char __ovld __cnfn convert_char_sat_rtz(long);
-char __ovld __cnfn convert_char_rtp(long);
-char __ovld __cnfn convert_char_sat_rtp(long);
-char __ovld __cnfn convert_char_rtn(long);
-char __ovld __cnfn convert_char_sat_rtn(long);
-char __ovld __cnfn convert_char(long);
-char __ovld __cnfn convert_char_sat(long);
-char __ovld __cnfn convert_char_rte(ulong);
-char __ovld __cnfn convert_char_sat_rte(ulong);
-char __ovld __cnfn convert_char_rtz(ulong);
-char __ovld __cnfn convert_char_sat_rtz(ulong);
-char __ovld __cnfn convert_char_rtp(ulong);
-char __ovld __cnfn convert_char_sat_rtp(ulong);
-char __ovld __cnfn convert_char_rtn(ulong);
-char __ovld __cnfn convert_char_sat_rtn(ulong);
-char __ovld __cnfn convert_char(ulong);
-char __ovld __cnfn convert_char_sat(ulong);
-char __ovld __cnfn convert_char_rte(float);
-char __ovld __cnfn convert_char_sat_rte(float);
-char __ovld __cnfn convert_char_rtz(float);
-char __ovld __cnfn convert_char_sat_rtz(float);
-char __ovld __cnfn convert_char_rtp(float);
-char __ovld __cnfn convert_char_sat_rtp(float);
-char __ovld __cnfn convert_char_rtn(float);
-char __ovld __cnfn convert_char_sat_rtn(float);
-char __ovld __cnfn convert_char(float);
-char __ovld __cnfn convert_char_sat(float);
-uchar __ovld __cnfn convert_uchar_rte(char);
-uchar __ovld __cnfn convert_uchar_sat_rte(char);
-uchar __ovld __cnfn convert_uchar_rtz(char);
-uchar __ovld __cnfn convert_uchar_sat_rtz(char);
-uchar __ovld __cnfn convert_uchar_rtp(char);
-uchar __ovld __cnfn convert_uchar_sat_rtp(char);
-uchar __ovld __cnfn convert_uchar_rtn(char);
-uchar __ovld __cnfn convert_uchar_sat_rtn(char);
-uchar __ovld __cnfn convert_uchar(char);
-uchar __ovld __cnfn convert_uchar_sat(char);
-uchar __ovld __cnfn convert_uchar_rte(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rte(uchar);
-uchar __ovld __cnfn convert_uchar_rtz(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rtz(uchar);
-uchar __ovld __cnfn convert_uchar_rtp(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rtp(uchar);
-uchar __ovld __cnfn convert_uchar_rtn(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rtn(uchar);
-uchar __ovld __cnfn convert_uchar(uchar);
-uchar __ovld __cnfn convert_uchar_sat(uchar);
-uchar __ovld __cnfn convert_uchar_rte(short);
-uchar __ovld __cnfn convert_uchar_sat_rte(short);
-uchar __ovld __cnfn convert_uchar_rtz(short);
-uchar __ovld __cnfn convert_uchar_sat_rtz(short);
-uchar __ovld __cnfn convert_uchar_rtp(short);
-uchar __ovld __cnfn convert_uchar_sat_rtp(short);
-uchar __ovld __cnfn convert_uchar_rtn(short);
-uchar __ovld __cnfn convert_uchar_sat_rtn(short);
-uchar __ovld __cnfn convert_uchar(short);
-uchar __ovld __cnfn convert_uchar_sat(short);
-uchar __ovld __cnfn convert_uchar_rte(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rte(ushort);
-uchar __ovld __cnfn convert_uchar_rtz(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rtz(ushort);
-uchar __ovld __cnfn convert_uchar_rtp(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rtp(ushort);
-uchar __ovld __cnfn convert_uchar_rtn(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rtn(ushort);
-uchar __ovld __cnfn convert_uchar(ushort);
-uchar __ovld __cnfn convert_uchar_sat(ushort);
-uchar __ovld __cnfn convert_uchar_rte(int);
-uchar __ovld __cnfn convert_uchar_sat_rte(int);
-uchar __ovld __cnfn convert_uchar_rtz(int);
-uchar __ovld __cnfn convert_uchar_sat_rtz(int);
-uchar __ovld __cnfn convert_uchar_rtp(int);
-uchar __ovld __cnfn convert_uchar_sat_rtp(int);
-uchar __ovld __cnfn convert_uchar_rtn(int);
-uchar __ovld __cnfn convert_uchar_sat_rtn(int);
-uchar __ovld __cnfn convert_uchar(int);
-uchar __ovld __cnfn convert_uchar_sat(int);
-uchar __ovld __cnfn convert_uchar_rte(uint);
-uchar __ovld __cnfn convert_uchar_sat_rte(uint);
-uchar __ovld __cnfn convert_uchar_rtz(uint);
-uchar __ovld __cnfn convert_uchar_sat_rtz(uint);
-uchar __ovld __cnfn convert_uchar_rtp(uint);
-uchar __ovld __cnfn convert_uchar_sat_rtp(uint);
-uchar __ovld __cnfn convert_uchar_rtn(uint);
-uchar __ovld __cnfn convert_uchar_sat_rtn(uint);
-uchar __ovld __cnfn convert_uchar(uint);
-uchar __ovld __cnfn convert_uchar_sat(uint);
-uchar __ovld __cnfn convert_uchar_rte(long);
-uchar __ovld __cnfn convert_uchar_sat_rte(long);
-uchar __ovld __cnfn convert_uchar_rtz(long);
-uchar __ovld __cnfn convert_uchar_sat_rtz(long);
-uchar __ovld __cnfn convert_uchar_rtp(long);
-uchar __ovld __cnfn convert_uchar_sat_rtp(long);
-uchar __ovld __cnfn convert_uchar_rtn(long);
-uchar __ovld __cnfn convert_uchar_sat_rtn(long);
-uchar __ovld __cnfn convert_uchar(long);
-uchar __ovld __cnfn convert_uchar_sat(long);
-uchar __ovld __cnfn convert_uchar_rte(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rte(ulong);
-uchar __ovld __cnfn convert_uchar_rtz(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rtz(ulong);
-uchar __ovld __cnfn convert_uchar_rtp(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rtp(ulong);
-uchar __ovld __cnfn convert_uchar_rtn(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rtn(ulong);
-uchar __ovld __cnfn convert_uchar(ulong);
-uchar __ovld __cnfn convert_uchar_sat(ulong);
-uchar __ovld __cnfn convert_uchar_rte(float);
-uchar __ovld __cnfn convert_uchar_sat_rte(float);
-uchar __ovld __cnfn convert_uchar_rtz(float);
-uchar __ovld __cnfn convert_uchar_sat_rtz(float);
-uchar __ovld __cnfn convert_uchar_rtp(float);
-uchar __ovld __cnfn convert_uchar_sat_rtp(float);
-uchar __ovld __cnfn convert_uchar_rtn(float);
-uchar __ovld __cnfn convert_uchar_sat_rtn(float);
-uchar __ovld __cnfn convert_uchar(float);
-uchar __ovld __cnfn convert_uchar_sat(float);
-
-short __ovld __cnfn convert_short_rte(char);
-short __ovld __cnfn convert_short_sat_rte(char);
-short __ovld __cnfn convert_short_rtz(char);
-short __ovld __cnfn convert_short_sat_rtz(char);
-short __ovld __cnfn convert_short_rtp(char);
-short __ovld __cnfn convert_short_sat_rtp(char);
-short __ovld __cnfn convert_short_rtn(char);
-short __ovld __cnfn convert_short_sat_rtn(char);
-short __ovld __cnfn convert_short(char);
-short __ovld __cnfn convert_short_sat(char);
-short __ovld __cnfn convert_short_rte(uchar);
-short __ovld __cnfn convert_short_sat_rte(uchar);
-short __ovld __cnfn convert_short_rtz(uchar);
-short __ovld __cnfn convert_short_sat_rtz(uchar);
-short __ovld __cnfn convert_short_rtp(uchar);
-short __ovld __cnfn convert_short_sat_rtp(uchar);
-short __ovld __cnfn convert_short_rtn(uchar);
-short __ovld __cnfn convert_short_sat_rtn(uchar);
-short __ovld __cnfn convert_short(uchar);
-short __ovld __cnfn convert_short_sat(uchar);
-short __ovld __cnfn convert_short_rte(short);
-short __ovld __cnfn convert_short_sat_rte(short);
-short __ovld __cnfn convert_short_rtz(short);
-short __ovld __cnfn convert_short_sat_rtz(short);
-short __ovld __cnfn convert_short_rtp(short);
-short __ovld __cnfn convert_short_sat_rtp(short);
-short __ovld __cnfn convert_short_rtn(short);
-short __ovld __cnfn convert_short_sat_rtn(short);
-short __ovld __cnfn convert_short(short);
-short __ovld __cnfn convert_short_sat(short);
-short __ovld __cnfn convert_short_rte(ushort);
-short __ovld __cnfn convert_short_sat_rte(ushort);
-short __ovld __cnfn convert_short_rtz(ushort);
-short __ovld __cnfn convert_short_sat_rtz(ushort);
-short __ovld __cnfn convert_short_rtp(ushort);
-short __ovld __cnfn convert_short_sat_rtp(ushort);
-short __ovld __cnfn convert_short_rtn(ushort);
-short __ovld __cnfn convert_short_sat_rtn(ushort);
-short __ovld __cnfn convert_short(ushort);
-short __ovld __cnfn convert_short_sat(ushort);
-short __ovld __cnfn convert_short_rte(int);
-short __ovld __cnfn convert_short_sat_rte(int);
-short __ovld __cnfn convert_short_rtz(int);
-short __ovld __cnfn convert_short_sat_rtz(int);
-short __ovld __cnfn convert_short_rtp(int);
-short __ovld __cnfn convert_short_sat_rtp(int);
-short __ovld __cnfn convert_short_rtn(int);
-short __ovld __cnfn convert_short_sat_rtn(int);
-short __ovld __cnfn convert_short(int);
-short __ovld __cnfn convert_short_sat(int);
-short __ovld __cnfn convert_short_rte(uint);
-short __ovld __cnfn convert_short_sat_rte(uint);
-short __ovld __cnfn convert_short_rtz(uint);
-short __ovld __cnfn convert_short_sat_rtz(uint);
-short __ovld __cnfn convert_short_rtp(uint);
-short __ovld __cnfn convert_short_sat_rtp(uint);
-short __ovld __cnfn convert_short_rtn(uint);
-short __ovld __cnfn convert_short_sat_rtn(uint);
-short __ovld __cnfn convert_short(uint);
-short __ovld __cnfn convert_short_sat(uint);
-short __ovld __cnfn convert_short_rte(long);
-short __ovld __cnfn convert_short_sat_rte(long);
-short __ovld __cnfn convert_short_rtz(long);
-short __ovld __cnfn convert_short_sat_rtz(long);
-short __ovld __cnfn convert_short_rtp(long);
-short __ovld __cnfn convert_short_sat_rtp(long);
-short __ovld __cnfn convert_short_rtn(long);
-short __ovld __cnfn convert_short_sat_rtn(long);
-short __ovld __cnfn convert_short(long);
-short __ovld __cnfn convert_short_sat(long);
-short __ovld __cnfn convert_short_rte(ulong);
-short __ovld __cnfn convert_short_sat_rte(ulong);
-short __ovld __cnfn convert_short_rtz(ulong);
-short __ovld __cnfn convert_short_sat_rtz(ulong);
-short __ovld __cnfn convert_short_rtp(ulong);
-short __ovld __cnfn convert_short_sat_rtp(ulong);
-short __ovld __cnfn convert_short_rtn(ulong);
-short __ovld __cnfn convert_short_sat_rtn(ulong);
-short __ovld __cnfn convert_short(ulong);
-short __ovld __cnfn convert_short_sat(ulong);
-short __ovld __cnfn convert_short_rte(float);
-short __ovld __cnfn convert_short_sat_rte(float);
-short __ovld __cnfn convert_short_rtz(float);
-short __ovld __cnfn convert_short_sat_rtz(float);
-short __ovld __cnfn convert_short_rtp(float);
-short __ovld __cnfn convert_short_sat_rtp(float);
-short __ovld __cnfn convert_short_rtn(float);
-short __ovld __cnfn convert_short_sat_rtn(float);
-short __ovld __cnfn convert_short(float);
-short __ovld __cnfn convert_short_sat(float);
-ushort __ovld __cnfn convert_ushort_rte(char);
-ushort __ovld __cnfn convert_ushort_sat_rte(char);
-ushort __ovld __cnfn convert_ushort_rtz(char);
-ushort __ovld __cnfn convert_ushort_sat_rtz(char);
-ushort __ovld __cnfn convert_ushort_rtp(char);
-ushort __ovld __cnfn convert_ushort_sat_rtp(char);
-ushort __ovld __cnfn convert_ushort_rtn(char);
-ushort __ovld __cnfn convert_ushort_sat_rtn(char);
-ushort __ovld __cnfn convert_ushort(char);
-ushort __ovld __cnfn convert_ushort_sat(char);
-ushort __ovld __cnfn convert_ushort_rte(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rte(uchar);
-ushort __ovld __cnfn convert_ushort_rtz(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rtz(uchar);
-ushort __ovld __cnfn convert_ushort_rtp(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rtp(uchar);
-ushort __ovld __cnfn convert_ushort_rtn(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rtn(uchar);
-ushort __ovld __cnfn convert_ushort(uchar);
-ushort __ovld __cnfn convert_ushort_sat(uchar);
-ushort __ovld __cnfn convert_ushort_rte(short);
-ushort __ovld __cnfn convert_ushort_sat_rte(short);
-ushort __ovld __cnfn convert_ushort_rtz(short);
-ushort __ovld __cnfn convert_ushort_sat_rtz(short);
-ushort __ovld __cnfn convert_ushort_rtp(short);
-ushort __ovld __cnfn convert_ushort_sat_rtp(short);
-ushort __ovld __cnfn convert_ushort_rtn(short);
-ushort __ovld __cnfn convert_ushort_sat_rtn(short);
-ushort __ovld __cnfn convert_ushort(short);
-ushort __ovld __cnfn convert_ushort_sat(short);
-ushort __ovld __cnfn convert_ushort_rte(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rte(ushort);
-ushort __ovld __cnfn convert_ushort_rtz(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rtz(ushort);
-ushort __ovld __cnfn convert_ushort_rtp(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rtp(ushort);
-ushort __ovld __cnfn convert_ushort_rtn(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rtn(ushort);
-ushort __ovld __cnfn convert_ushort(ushort);
-ushort __ovld __cnfn convert_ushort_sat(ushort);
-ushort __ovld __cnfn convert_ushort_rte(int);
-ushort __ovld __cnfn convert_ushort_sat_rte(int);
-ushort __ovld __cnfn convert_ushort_rtz(int);
-ushort __ovld __cnfn convert_ushort_sat_rtz(int);
-ushort __ovld __cnfn convert_ushort_rtp(int);
-ushort __ovld __cnfn convert_ushort_sat_rtp(int);
-ushort __ovld __cnfn convert_ushort_rtn(int);
-ushort __ovld __cnfn convert_ushort_sat_rtn(int);
-ushort __ovld __cnfn convert_ushort(int);
-ushort __ovld __cnfn convert_ushort_sat(int);
-ushort __ovld __cnfn convert_ushort_rte(uint);
-ushort __ovld __cnfn convert_ushort_sat_rte(uint);
-ushort __ovld __cnfn convert_ushort_rtz(uint);
-ushort __ovld __cnfn convert_ushort_sat_rtz(uint);
-ushort __ovld __cnfn convert_ushort_rtp(uint);
-ushort __ovld __cnfn convert_ushort_sat_rtp(uint);
-ushort __ovld __cnfn convert_ushort_rtn(uint);
-ushort __ovld __cnfn convert_ushort_sat_rtn(uint);
-ushort __ovld __cnfn convert_ushort(uint);
-ushort __ovld __cnfn convert_ushort_sat(uint);
-ushort __ovld __cnfn convert_ushort_rte(long);
-ushort __ovld __cnfn convert_ushort_sat_rte(long);
-ushort __ovld __cnfn convert_ushort_rtz(long);
-ushort __ovld __cnfn convert_ushort_sat_rtz(long);
-ushort __ovld __cnfn convert_ushort_rtp(long);
-ushort __ovld __cnfn convert_ushort_sat_rtp(long);
-ushort __ovld __cnfn convert_ushort_rtn(long);
-ushort __ovld __cnfn convert_ushort_sat_rtn(long);
-ushort __ovld __cnfn convert_ushort(long);
-ushort __ovld __cnfn convert_ushort_sat(long);
-ushort __ovld __cnfn convert_ushort_rte(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rte(ulong);
-ushort __ovld __cnfn convert_ushort_rtz(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rtz(ulong);
-ushort __ovld __cnfn convert_ushort_rtp(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rtp(ulong);
-ushort __ovld __cnfn convert_ushort_rtn(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rtn(ulong);
-ushort __ovld __cnfn convert_ushort(ulong);
-ushort __ovld __cnfn convert_ushort_sat(ulong);
-ushort __ovld __cnfn convert_ushort_rte(float);
-ushort __ovld __cnfn convert_ushort_sat_rte(float);
-ushort __ovld __cnfn convert_ushort_rtz(float);
-ushort __ovld __cnfn convert_ushort_sat_rtz(float);
-ushort __ovld __cnfn convert_ushort_rtp(float);
-ushort __ovld __cnfn convert_ushort_sat_rtp(float);
-ushort __ovld __cnfn convert_ushort_rtn(float);
-ushort __ovld __cnfn convert_ushort_sat_rtn(float);
-ushort __ovld __cnfn convert_ushort(float);
-ushort __ovld __cnfn convert_ushort_sat(float);
-int __ovld __cnfn convert_int_rte(char);
-int __ovld __cnfn convert_int_sat_rte(char);
-int __ovld __cnfn convert_int_rtz(char);
-int __ovld __cnfn convert_int_sat_rtz(char);
-int __ovld __cnfn convert_int_rtp(char);
-int __ovld __cnfn convert_int_sat_rtp(char);
-int __ovld __cnfn convert_int_rtn(char);
-int __ovld __cnfn convert_int_sat_rtn(char);
-int __ovld __cnfn convert_int(char);
-int __ovld __cnfn convert_int_sat(char);
-int __ovld __cnfn convert_int_rte(uchar);
-int __ovld __cnfn convert_int_sat_rte(uchar);
-int __ovld __cnfn convert_int_rtz(uchar);
-int __ovld __cnfn convert_int_sat_rtz(uchar);
-int __ovld __cnfn convert_int_rtp(uchar);
-int __ovld __cnfn convert_int_sat_rtp(uchar);
-int __ovld __cnfn convert_int_rtn(uchar);
-int __ovld __cnfn convert_int_sat_rtn(uchar);
-int __ovld __cnfn convert_int(uchar);
-int __ovld __cnfn convert_int_sat(uchar);
-int __ovld __cnfn convert_int_rte(short);
-int __ovld __cnfn convert_int_sat_rte(short);
-int __ovld __cnfn convert_int_rtz(short);
-int __ovld __cnfn convert_int_sat_rtz(short);
-int __ovld __cnfn convert_int_rtp(short);
-int __ovld __cnfn convert_int_sat_rtp(short);
-int __ovld __cnfn convert_int_rtn(short);
-int __ovld __cnfn convert_int_sat_rtn(short);
-int __ovld __cnfn convert_int(short);
-int __ovld __cnfn convert_int_sat(short);
-int __ovld __cnfn convert_int_rte(ushort);
-int __ovld __cnfn convert_int_sat_rte(ushort);
-int __ovld __cnfn convert_int_rtz(ushort);
-int __ovld __cnfn convert_int_sat_rtz(ushort);
-int __ovld __cnfn convert_int_rtp(ushort);
-int __ovld __cnfn convert_int_sat_rtp(ushort);
-int __ovld __cnfn convert_int_rtn(ushort);
-int __ovld __cnfn convert_int_sat_rtn(ushort);
-int __ovld __cnfn convert_int(ushort);
-int __ovld __cnfn convert_int_sat(ushort);
-int __ovld __cnfn convert_int_rte(int);
-int __ovld __cnfn convert_int_sat_rte(int);
-int __ovld __cnfn convert_int_rtz(int);
-int __ovld __cnfn convert_int_sat_rtz(int);
-int __ovld __cnfn convert_int_rtp(int);
-int __ovld __cnfn convert_int_sat_rtp(int);
-int __ovld __cnfn convert_int_rtn(int);
-int __ovld __cnfn convert_int_sat_rtn(int);
-int __ovld __cnfn convert_int(int);
-int __ovld __cnfn convert_int_sat(int);
-int __ovld __cnfn convert_int_rte(uint);
-int __ovld __cnfn convert_int_sat_rte(uint);
-int __ovld __cnfn convert_int_rtz(uint);
-int __ovld __cnfn convert_int_sat_rtz(uint);
-int __ovld __cnfn convert_int_rtp(uint);
-int __ovld __cnfn convert_int_sat_rtp(uint);
-int __ovld __cnfn convert_int_rtn(uint);
-int __ovld __cnfn convert_int_sat_rtn(uint);
-int __ovld __cnfn convert_int(uint);
-int __ovld __cnfn convert_int_sat(uint);
-int __ovld __cnfn convert_int_rte(long);
-int __ovld __cnfn convert_int_sat_rte(long);
-int __ovld __cnfn convert_int_rtz(long);
-int __ovld __cnfn convert_int_sat_rtz(long);
-int __ovld __cnfn convert_int_rtp(long);
-int __ovld __cnfn convert_int_sat_rtp(long);
-int __ovld __cnfn convert_int_rtn(long);
-int __ovld __cnfn convert_int_sat_rtn(long);
-int __ovld __cnfn convert_int(long);
-int __ovld __cnfn convert_int_sat(long);
-int __ovld __cnfn convert_int_rte(ulong);
-int __ovld __cnfn convert_int_sat_rte(ulong);
-int __ovld __cnfn convert_int_rtz(ulong);
-int __ovld __cnfn convert_int_sat_rtz(ulong);
-int __ovld __cnfn convert_int_rtp(ulong);
-int __ovld __cnfn convert_int_sat_rtp(ulong);
-int __ovld __cnfn convert_int_rtn(ulong);
-int __ovld __cnfn convert_int_sat_rtn(ulong);
-int __ovld __cnfn convert_int(ulong);
-int __ovld __cnfn convert_int_sat(ulong);
-int __ovld __cnfn convert_int_rte(float);
-int __ovld __cnfn convert_int_sat_rte(float);
-int __ovld __cnfn convert_int_rtz(float);
-int __ovld __cnfn convert_int_sat_rtz(float);
-int __ovld __cnfn convert_int_rtp(float);
-int __ovld __cnfn convert_int_sat_rtp(float);
-int __ovld __cnfn convert_int_rtn(float);
-int __ovld __cnfn convert_int_sat_rtn(float);
-int __ovld __cnfn convert_int(float);
-int __ovld __cnfn convert_int_sat(float);
-uint __ovld __cnfn convert_uint_rte(char);
-uint __ovld __cnfn convert_uint_sat_rte(char);
-uint __ovld __cnfn convert_uint_rtz(char);
-uint __ovld __cnfn convert_uint_sat_rtz(char);
-uint __ovld __cnfn convert_uint_rtp(char);
-uint __ovld __cnfn convert_uint_sat_rtp(char);
-uint __ovld __cnfn convert_uint_rtn(char);
-uint __ovld __cnfn convert_uint_sat_rtn(char);
-uint __ovld __cnfn convert_uint(char);
-uint __ovld __cnfn convert_uint_sat(char);
-uint __ovld __cnfn convert_uint_rte(uchar);
-uint __ovld __cnfn convert_uint_sat_rte(uchar);
-uint __ovld __cnfn convert_uint_rtz(uchar);
-uint __ovld __cnfn convert_uint_sat_rtz(uchar);
-uint __ovld __cnfn convert_uint_rtp(uchar);
-uint __ovld __cnfn convert_uint_sat_rtp(uchar);
-uint __ovld __cnfn convert_uint_rtn(uchar);
-uint __ovld __cnfn convert_uint_sat_rtn(uchar);
-uint __ovld __cnfn convert_uint(uchar);
-uint __ovld __cnfn convert_uint_sat(uchar);
-uint __ovld __cnfn convert_uint_rte(short);
-uint __ovld __cnfn convert_uint_sat_rte(short);
-uint __ovld __cnfn convert_uint_rtz(short);
-uint __ovld __cnfn convert_uint_sat_rtz(short);
-uint __ovld __cnfn convert_uint_rtp(short);
-uint __ovld __cnfn convert_uint_sat_rtp(short);
-uint __ovld __cnfn convert_uint_rtn(short);
-uint __ovld __cnfn convert_uint_sat_rtn(short);
-uint __ovld __cnfn convert_uint(short);
-uint __ovld __cnfn convert_uint_sat(short);
-uint __ovld __cnfn convert_uint_rte(ushort);
-uint __ovld __cnfn convert_uint_sat_rte(ushort);
-uint __ovld __cnfn convert_uint_rtz(ushort);
-uint __ovld __cnfn convert_uint_sat_rtz(ushort);
-uint __ovld __cnfn convert_uint_rtp(ushort);
-uint __ovld __cnfn convert_uint_sat_rtp(ushort);
-uint __ovld __cnfn convert_uint_rtn(ushort);
-uint __ovld __cnfn convert_uint_sat_rtn(ushort);
-uint __ovld __cnfn convert_uint(ushort);
-uint __ovld __cnfn convert_uint_sat(ushort);
-uint __ovld __cnfn convert_uint_rte(int);
-uint __ovld __cnfn convert_uint_sat_rte(int);
-uint __ovld __cnfn convert_uint_rtz(int);
-uint __ovld __cnfn convert_uint_sat_rtz(int);
-uint __ovld __cnfn convert_uint_rtp(int);
-uint __ovld __cnfn convert_uint_sat_rtp(int);
-uint __ovld __cnfn convert_uint_rtn(int);
-uint __ovld __cnfn convert_uint_sat_rtn(int);
-uint __ovld __cnfn convert_uint(int);
-uint __ovld __cnfn convert_uint_sat(int);
-uint __ovld __cnfn convert_uint_rte(uint);
-uint __ovld __cnfn convert_uint_sat_rte(uint);
-uint __ovld __cnfn convert_uint_rtz(uint);
-uint __ovld __cnfn convert_uint_sat_rtz(uint);
-uint __ovld __cnfn convert_uint_rtp(uint);
-uint __ovld __cnfn convert_uint_sat_rtp(uint);
-uint __ovld __cnfn convert_uint_rtn(uint);
-uint __ovld __cnfn convert_uint_sat_rtn(uint);
-uint __ovld __cnfn convert_uint(uint);
-uint __ovld __cnfn convert_uint_sat(uint);
-uint __ovld __cnfn convert_uint_rte(long);
-uint __ovld __cnfn convert_uint_sat_rte(long);
-uint __ovld __cnfn convert_uint_rtz(long);
-uint __ovld __cnfn convert_uint_sat_rtz(long);
-uint __ovld __cnfn convert_uint_rtp(long);
-uint __ovld __cnfn convert_uint_sat_rtp(long);
-uint __ovld __cnfn convert_uint_rtn(long);
-uint __ovld __cnfn convert_uint_sat_rtn(long);
-uint __ovld __cnfn convert_uint(long);
-uint __ovld __cnfn convert_uint_sat(long);
-uint __ovld __cnfn convert_uint_rte(ulong);
-uint __ovld __cnfn convert_uint_sat_rte(ulong);
-uint __ovld __cnfn convert_uint_rtz(ulong);
-uint __ovld __cnfn convert_uint_sat_rtz(ulong);
-uint __ovld __cnfn convert_uint_rtp(ulong);
-uint __ovld __cnfn convert_uint_sat_rtp(ulong);
-uint __ovld __cnfn convert_uint_rtn(ulong);
-uint __ovld __cnfn convert_uint_sat_rtn(ulong);
-uint __ovld __cnfn convert_uint(ulong);
-uint __ovld __cnfn convert_uint_sat(ulong);
-uint __ovld __cnfn convert_uint_rte(float);
-uint __ovld __cnfn convert_uint_sat_rte(float);
-uint __ovld __cnfn convert_uint_rtz(float);
-uint __ovld __cnfn convert_uint_sat_rtz(float);
-uint __ovld __cnfn convert_uint_rtp(float);
-uint __ovld __cnfn convert_uint_sat_rtp(float);
-uint __ovld __cnfn convert_uint_rtn(float);
-uint __ovld __cnfn convert_uint_sat_rtn(float);
-uint __ovld __cnfn convert_uint(float);
-uint __ovld __cnfn convert_uint_sat(float);
-long __ovld __cnfn convert_long_rte(char);
-long __ovld __cnfn convert_long_sat_rte(char);
-long __ovld __cnfn convert_long_rtz(char);
-long __ovld __cnfn convert_long_sat_rtz(char);
-long __ovld __cnfn convert_long_rtp(char);
-long __ovld __cnfn convert_long_sat_rtp(char);
-long __ovld __cnfn convert_long_rtn(char);
-long __ovld __cnfn convert_long_sat_rtn(char);
-long __ovld __cnfn convert_long(char);
-long __ovld __cnfn convert_long_sat(char);
-long __ovld __cnfn convert_long_rte(uchar);
-long __ovld __cnfn convert_long_sat_rte(uchar);
-long __ovld __cnfn convert_long_rtz(uchar);
-long __ovld __cnfn convert_long_sat_rtz(uchar);
-long __ovld __cnfn convert_long_rtp(uchar);
-long __ovld __cnfn convert_long_sat_rtp(uchar);
-long __ovld __cnfn convert_long_rtn(uchar);
-long __ovld __cnfn convert_long_sat_rtn(uchar);
-long __ovld __cnfn convert_long(uchar);
-long __ovld __cnfn convert_long_sat(uchar);
-long __ovld __cnfn convert_long_rte(short);
-long __ovld __cnfn convert_long_sat_rte(short);
-long __ovld __cnfn convert_long_rtz(short);
-long __ovld __cnfn convert_long_sat_rtz(short);
-long __ovld __cnfn convert_long_rtp(short);
-long __ovld __cnfn convert_long_sat_rtp(short);
-long __ovld __cnfn convert_long_rtn(short);
-long __ovld __cnfn convert_long_sat_rtn(short);
-long __ovld __cnfn convert_long(short);
-long __ovld __cnfn convert_long_sat(short);
-long __ovld __cnfn convert_long_rte(ushort);
-long __ovld __cnfn convert_long_sat_rte(ushort);
-long __ovld __cnfn convert_long_rtz(ushort);
-long __ovld __cnfn convert_long_sat_rtz(ushort);
-long __ovld __cnfn convert_long_rtp(ushort);
-long __ovld __cnfn convert_long_sat_rtp(ushort);
-long __ovld __cnfn convert_long_rtn(ushort);
-long __ovld __cnfn convert_long_sat_rtn(ushort);
-long __ovld __cnfn convert_long(ushort);
-long __ovld __cnfn convert_long_sat(ushort);
-long __ovld __cnfn convert_long_rte(int);
-long __ovld __cnfn convert_long_sat_rte(int);
-long __ovld __cnfn convert_long_rtz(int);
-long __ovld __cnfn convert_long_sat_rtz(int);
-long __ovld __cnfn convert_long_rtp(int);
-long __ovld __cnfn convert_long_sat_rtp(int);
-long __ovld __cnfn convert_long_rtn(int);
-long __ovld __cnfn convert_long_sat_rtn(int);
-long __ovld __cnfn convert_long(int);
-long __ovld __cnfn convert_long_sat(int);
-long __ovld __cnfn convert_long_rte(uint);
-long __ovld __cnfn convert_long_sat_rte(uint);
-long __ovld __cnfn convert_long_rtz(uint);
-long __ovld __cnfn convert_long_sat_rtz(uint);
-long __ovld __cnfn convert_long_rtp(uint);
-long __ovld __cnfn convert_long_sat_rtp(uint);
-long __ovld __cnfn convert_long_rtn(uint);
-long __ovld __cnfn convert_long_sat_rtn(uint);
-long __ovld __cnfn convert_long(uint);
-long __ovld __cnfn convert_long_sat(uint);
-long __ovld __cnfn convert_long_rte(long);
-long __ovld __cnfn convert_long_sat_rte(long);
-long __ovld __cnfn convert_long_rtz(long);
-long __ovld __cnfn convert_long_sat_rtz(long);
-long __ovld __cnfn convert_long_rtp(long);
-long __ovld __cnfn convert_long_sat_rtp(long);
-long __ovld __cnfn convert_long_rtn(long);
-long __ovld __cnfn convert_long_sat_rtn(long);
-long __ovld __cnfn convert_long(long);
-long __ovld __cnfn convert_long_sat(long);
-long __ovld __cnfn convert_long_rte(ulong);
-long __ovld __cnfn convert_long_sat_rte(ulong);
-long __ovld __cnfn convert_long_rtz(ulong);
-long __ovld __cnfn convert_long_sat_rtz(ulong);
-long __ovld __cnfn convert_long_rtp(ulong);
-long __ovld __cnfn convert_long_sat_rtp(ulong);
-long __ovld __cnfn convert_long_rtn(ulong);
-long __ovld __cnfn convert_long_sat_rtn(ulong);
-long __ovld __cnfn convert_long(ulong);
-long __ovld __cnfn convert_long_sat(ulong);
-long __ovld __cnfn convert_long_rte(float);
-long __ovld __cnfn convert_long_sat_rte(float);
-long __ovld __cnfn convert_long_rtz(float);
-long __ovld __cnfn convert_long_sat_rtz(float);
-long __ovld __cnfn convert_long_rtp(float);
-long __ovld __cnfn convert_long_sat_rtp(float);
-long __ovld __cnfn convert_long_rtn(float);
-long __ovld __cnfn convert_long_sat_rtn(float);
-long __ovld __cnfn convert_long(float);
-long __ovld __cnfn convert_long_sat(float);
-ulong __ovld __cnfn convert_ulong_rte(char);
-ulong __ovld __cnfn convert_ulong_sat_rte(char);
-ulong __ovld __cnfn convert_ulong_rtz(char);
-ulong __ovld __cnfn convert_ulong_sat_rtz(char);
-ulong __ovld __cnfn convert_ulong_rtp(char);
-ulong __ovld __cnfn convert_ulong_sat_rtp(char);
-ulong __ovld __cnfn convert_ulong_rtn(char);
-ulong __ovld __cnfn convert_ulong_sat_rtn(char);
-ulong __ovld __cnfn convert_ulong(char);
-ulong __ovld __cnfn convert_ulong_sat(char);
-ulong __ovld __cnfn convert_ulong_rte(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rte(uchar);
-ulong __ovld __cnfn convert_ulong_rtz(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rtz(uchar);
-ulong __ovld __cnfn convert_ulong_rtp(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rtp(uchar);
-ulong __ovld __cnfn convert_ulong_rtn(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rtn(uchar);
-ulong __ovld __cnfn convert_ulong(uchar);
-ulong __ovld __cnfn convert_ulong_sat(uchar);
-ulong __ovld __cnfn convert_ulong_rte(short);
-ulong __ovld __cnfn convert_ulong_sat_rte(short);
-ulong __ovld __cnfn convert_ulong_rtz(short);
-ulong __ovld __cnfn convert_ulong_sat_rtz(short);
-ulong __ovld __cnfn convert_ulong_rtp(short);
-ulong __ovld __cnfn convert_ulong_sat_rtp(short);
-ulong __ovld __cnfn convert_ulong_rtn(short);
-ulong __ovld __cnfn convert_ulong_sat_rtn(short);
-ulong __ovld __cnfn convert_ulong(short);
-ulong __ovld __cnfn convert_ulong_sat(short);
-ulong __ovld __cnfn convert_ulong_rte(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rte(ushort);
-ulong __ovld __cnfn convert_ulong_rtz(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rtz(ushort);
-ulong __ovld __cnfn convert_ulong_rtp(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rtp(ushort);
-ulong __ovld __cnfn convert_ulong_rtn(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rtn(ushort);
-ulong __ovld __cnfn convert_ulong(ushort);
-ulong __ovld __cnfn convert_ulong_sat(ushort);
-ulong __ovld __cnfn convert_ulong_rte(int);
-ulong __ovld __cnfn convert_ulong_sat_rte(int);
-ulong __ovld __cnfn convert_ulong_rtz(int);
-ulong __ovld __cnfn convert_ulong_sat_rtz(int);
-ulong __ovld __cnfn convert_ulong_rtp(int);
-ulong __ovld __cnfn convert_ulong_sat_rtp(int);
-ulong __ovld __cnfn convert_ulong_rtn(int);
-ulong __ovld __cnfn convert_ulong_sat_rtn(int);
-ulong __ovld __cnfn convert_ulong(int);
-ulong __ovld __cnfn convert_ulong_sat(int);
-ulong __ovld __cnfn convert_ulong_rte(uint);
-ulong __ovld __cnfn convert_ulong_sat_rte(uint);
-ulong __ovld __cnfn convert_ulong_rtz(uint);
-ulong __ovld __cnfn convert_ulong_sat_rtz(uint);
-ulong __ovld __cnfn convert_ulong_rtp(uint);
-ulong __ovld __cnfn convert_ulong_sat_rtp(uint);
-ulong __ovld __cnfn convert_ulong_rtn(uint);
-ulong __ovld __cnfn convert_ulong_sat_rtn(uint);
-ulong __ovld __cnfn convert_ulong(uint);
-ulong __ovld __cnfn convert_ulong_sat(uint);
-ulong __ovld __cnfn convert_ulong_rte(long);
-ulong __ovld __cnfn convert_ulong_sat_rte(long);
-ulong __ovld __cnfn convert_ulong_rtz(long);
-ulong __ovld __cnfn convert_ulong_sat_rtz(long);
-ulong __ovld __cnfn convert_ulong_rtp(long);
-ulong __ovld __cnfn convert_ulong_sat_rtp(long);
-ulong __ovld __cnfn convert_ulong_rtn(long);
-ulong __ovld __cnfn convert_ulong_sat_rtn(long);
-ulong __ovld __cnfn convert_ulong(long);
-ulong __ovld __cnfn convert_ulong_sat(long);
-ulong __ovld __cnfn convert_ulong_rte(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rte(ulong);
-ulong __ovld __cnfn convert_ulong_rtz(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rtz(ulong);
-ulong __ovld __cnfn convert_ulong_rtp(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rtp(ulong);
-ulong __ovld __cnfn convert_ulong_rtn(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rtn(ulong);
-ulong __ovld __cnfn convert_ulong(ulong);
-ulong __ovld __cnfn convert_ulong_sat(ulong);
-ulong __ovld __cnfn convert_ulong_rte(float);
-ulong __ovld __cnfn convert_ulong_sat_rte(float);
-ulong __ovld __cnfn convert_ulong_rtz(float);
-ulong __ovld __cnfn convert_ulong_sat_rtz(float);
-ulong __ovld __cnfn convert_ulong_rtp(float);
-ulong __ovld __cnfn convert_ulong_sat_rtp(float);
-ulong __ovld __cnfn convert_ulong_rtn(float);
-ulong __ovld __cnfn convert_ulong_sat_rtn(float);
-ulong __ovld __cnfn convert_ulong(float);
-ulong __ovld __cnfn convert_ulong_sat(float);
-float __ovld __cnfn convert_float_rte(char);
-float __ovld __cnfn convert_float_rtz(char);
-float __ovld __cnfn convert_float_rtp(char);
-float __ovld __cnfn convert_float_rtn(char);
-float __ovld __cnfn convert_float(char);
-float __ovld __cnfn convert_float_rte(uchar);
-float __ovld __cnfn convert_float_rtz(uchar);
-float __ovld __cnfn convert_float_rtp(uchar);
-float __ovld __cnfn convert_float_rtn(uchar);
-float __ovld __cnfn convert_float(uchar);
-float __ovld __cnfn convert_float_rte(short);
-float __ovld __cnfn convert_float_rtz(short);
-float __ovld __cnfn convert_float_rtp(short);
-float __ovld __cnfn convert_float_rtn(short);
-float __ovld __cnfn convert_float(short);
-float __ovld __cnfn convert_float_rte(ushort);
-float __ovld __cnfn convert_float_rtz(ushort);
-float __ovld __cnfn convert_float_rtp(ushort);
-float __ovld __cnfn convert_float_rtn(ushort);
-float __ovld __cnfn convert_float(ushort);
-float __ovld __cnfn convert_float_rte(int);
-float __ovld __cnfn convert_float_rtz(int);
-float __ovld __cnfn convert_float_rtp(int);
-float __ovld __cnfn convert_float_rtn(int);
-float __ovld __cnfn convert_float(int);
-float __ovld __cnfn convert_float_rte(uint);
-float __ovld __cnfn convert_float_rtz(uint);
-float __ovld __cnfn convert_float_rtp(uint);
-float __ovld __cnfn convert_float_rtn(uint);
-float __ovld __cnfn convert_float(uint);
-float __ovld __cnfn convert_float_rte(long);
-float __ovld __cnfn convert_float_rtz(long);
-float __ovld __cnfn convert_float_rtp(long);
-float __ovld __cnfn convert_float_rtn(long);
-float __ovld __cnfn convert_float(long);
-float __ovld __cnfn convert_float_rte(ulong);
-float __ovld __cnfn convert_float_rtz(ulong);
-float __ovld __cnfn convert_float_rtp(ulong);
-float __ovld __cnfn convert_float_rtn(ulong);
-float __ovld __cnfn convert_float(ulong);
-float __ovld __cnfn convert_float_rte(float);
-float __ovld __cnfn convert_float_rtz(float);
-float __ovld __cnfn convert_float_rtp(float);
-float __ovld __cnfn convert_float_rtn(float);
-float __ovld __cnfn convert_float(float);
-char2 __ovld __cnfn convert_char2_rte(char2);
-char2 __ovld __cnfn convert_char2_sat_rte(char2);
-char2 __ovld __cnfn convert_char2_rtz(char2);
-char2 __ovld __cnfn convert_char2_sat_rtz(char2);
-char2 __ovld __cnfn convert_char2_rtp(char2);
-char2 __ovld __cnfn convert_char2_sat_rtp(char2);
-char2 __ovld __cnfn convert_char2_rtn(char2);
-char2 __ovld __cnfn convert_char2_sat_rtn(char2);
-char2 __ovld __cnfn convert_char2(char2);
-char2 __ovld __cnfn convert_char2_sat(char2);
-char2 __ovld __cnfn convert_char2_rte(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rte(uchar2);
-char2 __ovld __cnfn convert_char2_rtz(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rtz(uchar2);
-char2 __ovld __cnfn convert_char2_rtp(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rtp(uchar2);
-char2 __ovld __cnfn convert_char2_rtn(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rtn(uchar2);
-char2 __ovld __cnfn convert_char2(uchar2);
-char2 __ovld __cnfn convert_char2_sat(uchar2);
-char2 __ovld __cnfn convert_char2_rte(short2);
-char2 __ovld __cnfn convert_char2_sat_rte(short2);
-char2 __ovld __cnfn convert_char2_rtz(short2);
-char2 __ovld __cnfn convert_char2_sat_rtz(short2);
-char2 __ovld __cnfn convert_char2_rtp(short2);
-char2 __ovld __cnfn convert_char2_sat_rtp(short2);
-char2 __ovld __cnfn convert_char2_rtn(short2);
-char2 __ovld __cnfn convert_char2_sat_rtn(short2);
-char2 __ovld __cnfn convert_char2(short2);
-char2 __ovld __cnfn convert_char2_sat(short2);
-char2 __ovld __cnfn convert_char2_rte(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rte(ushort2);
-char2 __ovld __cnfn convert_char2_rtz(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rtz(ushort2);
-char2 __ovld __cnfn convert_char2_rtp(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rtp(ushort2);
-char2 __ovld __cnfn convert_char2_rtn(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rtn(ushort2);
-char2 __ovld __cnfn convert_char2(ushort2);
-char2 __ovld __cnfn convert_char2_sat(ushort2);
-char2 __ovld __cnfn convert_char2_rte(int2);
-char2 __ovld __cnfn convert_char2_sat_rte(int2);
-char2 __ovld __cnfn convert_char2_rtz(int2);
-char2 __ovld __cnfn convert_char2_sat_rtz(int2);
-char2 __ovld __cnfn convert_char2_rtp(int2);
-char2 __ovld __cnfn convert_char2_sat_rtp(int2);
-char2 __ovld __cnfn convert_char2_rtn(int2);
-char2 __ovld __cnfn convert_char2_sat_rtn(int2);
-char2 __ovld __cnfn convert_char2(int2);
-char2 __ovld __cnfn convert_char2_sat(int2);
-char2 __ovld __cnfn convert_char2_rte(uint2);
-char2 __ovld __cnfn convert_char2_sat_rte(uint2);
-char2 __ovld __cnfn convert_char2_rtz(uint2);
-char2 __ovld __cnfn convert_char2_sat_rtz(uint2);
-char2 __ovld __cnfn convert_char2_rtp(uint2);
-char2 __ovld __cnfn convert_char2_sat_rtp(uint2);
-char2 __ovld __cnfn convert_char2_rtn(uint2);
-char2 __ovld __cnfn convert_char2_sat_rtn(uint2);
-char2 __ovld __cnfn convert_char2(uint2);
-char2 __ovld __cnfn convert_char2_sat(uint2);
-char2 __ovld __cnfn convert_char2_rte(long2);
-char2 __ovld __cnfn convert_char2_sat_rte(long2);
-char2 __ovld __cnfn convert_char2_rtz(long2);
-char2 __ovld __cnfn convert_char2_sat_rtz(long2);
-char2 __ovld __cnfn convert_char2_rtp(long2);
-char2 __ovld __cnfn convert_char2_sat_rtp(long2);
-char2 __ovld __cnfn convert_char2_rtn(long2);
-char2 __ovld __cnfn convert_char2_sat_rtn(long2);
-char2 __ovld __cnfn convert_char2(long2);
-char2 __ovld __cnfn convert_char2_sat(long2);
-char2 __ovld __cnfn convert_char2_rte(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rte(ulong2);
-char2 __ovld __cnfn convert_char2_rtz(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rtz(ulong2);
-char2 __ovld __cnfn convert_char2_rtp(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rtp(ulong2);
-char2 __ovld __cnfn convert_char2_rtn(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rtn(ulong2);
-char2 __ovld __cnfn convert_char2(ulong2);
-char2 __ovld __cnfn convert_char2_sat(ulong2);
-char2 __ovld __cnfn convert_char2_rte(float2);
-char2 __ovld __cnfn convert_char2_sat_rte(float2);
-char2 __ovld __cnfn convert_char2_rtz(float2);
-char2 __ovld __cnfn convert_char2_sat_rtz(float2);
-char2 __ovld __cnfn convert_char2_rtp(float2);
-char2 __ovld __cnfn convert_char2_sat_rtp(float2);
-char2 __ovld __cnfn convert_char2_rtn(float2);
-char2 __ovld __cnfn convert_char2_sat_rtn(float2);
-char2 __ovld __cnfn convert_char2(float2);
-char2 __ovld __cnfn convert_char2_sat(float2);
-uchar2 __ovld __cnfn convert_uchar2_rte(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(char2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(char2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(char2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(char2);
-uchar2 __ovld __cnfn convert_uchar2(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat(char2);
-uchar2 __ovld __cnfn convert_uchar2_rte(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uchar2);
-uchar2 __ovld __cnfn convert_uchar2(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rte(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(short2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(short2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(short2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(short2);
-uchar2 __ovld __cnfn convert_uchar2(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat(short2);
-uchar2 __ovld __cnfn convert_uchar2_rte(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ushort2);
-uchar2 __ovld __cnfn convert_uchar2(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rte(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(int2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(int2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(int2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(int2);
-uchar2 __ovld __cnfn convert_uchar2(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat(int2);
-uchar2 __ovld __cnfn convert_uchar2_rte(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uint2);
-uchar2 __ovld __cnfn convert_uchar2(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rte(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(long2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(long2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(long2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(long2);
-uchar2 __ovld __cnfn convert_uchar2(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat(long2);
-uchar2 __ovld __cnfn convert_uchar2_rte(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ulong2);
-uchar2 __ovld __cnfn convert_uchar2(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rte(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(float2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(float2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(float2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(float2);
-uchar2 __ovld __cnfn convert_uchar2(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat(float2);
-short2 __ovld __cnfn convert_short2_rte(char2);
-short2 __ovld __cnfn convert_short2_sat_rte(char2);
-short2 __ovld __cnfn convert_short2_rtz(char2);
-short2 __ovld __cnfn convert_short2_sat_rtz(char2);
-short2 __ovld __cnfn convert_short2_rtp(char2);
-short2 __ovld __cnfn convert_short2_sat_rtp(char2);
-short2 __ovld __cnfn convert_short2_rtn(char2);
-short2 __ovld __cnfn convert_short2_sat_rtn(char2);
-short2 __ovld __cnfn convert_short2(char2);
-short2 __ovld __cnfn convert_short2_sat(char2);
-short2 __ovld __cnfn convert_short2_rte(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rte(uchar2);
-short2 __ovld __cnfn convert_short2_rtz(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rtz(uchar2);
-short2 __ovld __cnfn convert_short2_rtp(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rtp(uchar2);
-short2 __ovld __cnfn convert_short2_rtn(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rtn(uchar2);
-short2 __ovld __cnfn convert_short2(uchar2);
-short2 __ovld __cnfn convert_short2_sat(uchar2);
-short2 __ovld __cnfn convert_short2_rte(short2);
-short2 __ovld __cnfn convert_short2_sat_rte(short2);
-short2 __ovld __cnfn convert_short2_rtz(short2);
-short2 __ovld __cnfn convert_short2_sat_rtz(short2);
-short2 __ovld __cnfn convert_short2_rtp(short2);
-short2 __ovld __cnfn convert_short2_sat_rtp(short2);
-short2 __ovld __cnfn convert_short2_rtn(short2);
-short2 __ovld __cnfn convert_short2_sat_rtn(short2);
-short2 __ovld __cnfn convert_short2(short2);
-short2 __ovld __cnfn convert_short2_sat(short2);
-short2 __ovld __cnfn convert_short2_rte(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rte(ushort2);
-short2 __ovld __cnfn convert_short2_rtz(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rtz(ushort2);
-short2 __ovld __cnfn convert_short2_rtp(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rtp(ushort2);
-short2 __ovld __cnfn convert_short2_rtn(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rtn(ushort2);
-short2 __ovld __cnfn convert_short2(ushort2);
-short2 __ovld __cnfn convert_short2_sat(ushort2);
-short2 __ovld __cnfn convert_short2_rte(int2);
-short2 __ovld __cnfn convert_short2_sat_rte(int2);
-short2 __ovld __cnfn convert_short2_rtz(int2);
-short2 __ovld __cnfn convert_short2_sat_rtz(int2);
-short2 __ovld __cnfn convert_short2_rtp(int2);
-short2 __ovld __cnfn convert_short2_sat_rtp(int2);
-short2 __ovld __cnfn convert_short2_rtn(int2);
-short2 __ovld __cnfn convert_short2_sat_rtn(int2);
-short2 __ovld __cnfn convert_short2(int2);
-short2 __ovld __cnfn convert_short2_sat(int2);
-short2 __ovld __cnfn convert_short2_rte(uint2);
-short2 __ovld __cnfn convert_short2_sat_rte(uint2);
-short2 __ovld __cnfn convert_short2_rtz(uint2);
-short2 __ovld __cnfn convert_short2_sat_rtz(uint2);
-short2 __ovld __cnfn convert_short2_rtp(uint2);
-short2 __ovld __cnfn convert_short2_sat_rtp(uint2);
-short2 __ovld __cnfn convert_short2_rtn(uint2);
-short2 __ovld __cnfn convert_short2_sat_rtn(uint2);
-short2 __ovld __cnfn convert_short2(uint2);
-short2 __ovld __cnfn convert_short2_sat(uint2);
-short2 __ovld __cnfn convert_short2_rte(long2);
-short2 __ovld __cnfn convert_short2_sat_rte(long2);
-short2 __ovld __cnfn convert_short2_rtz(long2);
-short2 __ovld __cnfn convert_short2_sat_rtz(long2);
-short2 __ovld __cnfn convert_short2_rtp(long2);
-short2 __ovld __cnfn convert_short2_sat_rtp(long2);
-short2 __ovld __cnfn convert_short2_rtn(long2);
-short2 __ovld __cnfn convert_short2_sat_rtn(long2);
-short2 __ovld __cnfn convert_short2(long2);
-short2 __ovld __cnfn convert_short2_sat(long2);
-short2 __ovld __cnfn convert_short2_rte(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rte(ulong2);
-short2 __ovld __cnfn convert_short2_rtz(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rtz(ulong2);
-short2 __ovld __cnfn convert_short2_rtp(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rtp(ulong2);
-short2 __ovld __cnfn convert_short2_rtn(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rtn(ulong2);
-short2 __ovld __cnfn convert_short2(ulong2);
-short2 __ovld __cnfn convert_short2_sat(ulong2);
-short2 __ovld __cnfn convert_short2_rte(float2);
-short2 __ovld __cnfn convert_short2_sat_rte(float2);
-short2 __ovld __cnfn convert_short2_rtz(float2);
-short2 __ovld __cnfn convert_short2_sat_rtz(float2);
-short2 __ovld __cnfn convert_short2_rtp(float2);
-short2 __ovld __cnfn convert_short2_sat_rtp(float2);
-short2 __ovld __cnfn convert_short2_rtn(float2);
-short2 __ovld __cnfn convert_short2_sat_rtn(float2);
-short2 __ovld __cnfn convert_short2(float2);
-short2 __ovld __cnfn convert_short2_sat(float2);
-ushort2 __ovld __cnfn convert_ushort2_rte(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(char2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(char2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(char2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(char2);
-ushort2 __ovld __cnfn convert_ushort2(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat(char2);
-ushort2 __ovld __cnfn convert_ushort2_rte(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uchar2);
-ushort2 __ovld __cnfn convert_ushort2(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rte(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(short2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(short2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(short2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(short2);
-ushort2 __ovld __cnfn convert_ushort2(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat(short2);
-ushort2 __ovld __cnfn convert_ushort2_rte(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ushort2);
-ushort2 __ovld __cnfn convert_ushort2(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rte(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(int2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(int2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(int2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(int2);
-ushort2 __ovld __cnfn convert_ushort2(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat(int2);
-ushort2 __ovld __cnfn convert_ushort2_rte(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uint2);
-ushort2 __ovld __cnfn convert_ushort2(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rte(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(long2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(long2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(long2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(long2);
-ushort2 __ovld __cnfn convert_ushort2(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat(long2);
-ushort2 __ovld __cnfn convert_ushort2_rte(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ulong2);
-ushort2 __ovld __cnfn convert_ushort2(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rte(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(float2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(float2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(float2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(float2);
-ushort2 __ovld __cnfn convert_ushort2(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat(float2);
-int2 __ovld __cnfn convert_int2_rte(char2);
-int2 __ovld __cnfn convert_int2_sat_rte(char2);
-int2 __ovld __cnfn convert_int2_rtz(char2);
-int2 __ovld __cnfn convert_int2_sat_rtz(char2);
-int2 __ovld __cnfn convert_int2_rtp(char2);
-int2 __ovld __cnfn convert_int2_sat_rtp(char2);
-int2 __ovld __cnfn convert_int2_rtn(char2);
-int2 __ovld __cnfn convert_int2_sat_rtn(char2);
-int2 __ovld __cnfn convert_int2(char2);
-int2 __ovld __cnfn convert_int2_sat(char2);
-int2 __ovld __cnfn convert_int2_rte(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rte(uchar2);
-int2 __ovld __cnfn convert_int2_rtz(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rtz(uchar2);
-int2 __ovld __cnfn convert_int2_rtp(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rtp(uchar2);
-int2 __ovld __cnfn convert_int2_rtn(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rtn(uchar2);
-int2 __ovld __cnfn convert_int2(uchar2);
-int2 __ovld __cnfn convert_int2_sat(uchar2);
-int2 __ovld __cnfn convert_int2_rte(short2);
-int2 __ovld __cnfn convert_int2_sat_rte(short2);
-int2 __ovld __cnfn convert_int2_rtz(short2);
-int2 __ovld __cnfn convert_int2_sat_rtz(short2);
-int2 __ovld __cnfn convert_int2_rtp(short2);
-int2 __ovld __cnfn convert_int2_sat_rtp(short2);
-int2 __ovld __cnfn convert_int2_rtn(short2);
-int2 __ovld __cnfn convert_int2_sat_rtn(short2);
-int2 __ovld __cnfn convert_int2(short2);
-int2 __ovld __cnfn convert_int2_sat(short2);
-int2 __ovld __cnfn convert_int2_rte(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rte(ushort2);
-int2 __ovld __cnfn convert_int2_rtz(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rtz(ushort2);
-int2 __ovld __cnfn convert_int2_rtp(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rtp(ushort2);
-int2 __ovld __cnfn convert_int2_rtn(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rtn(ushort2);
-int2 __ovld __cnfn convert_int2(ushort2);
-int2 __ovld __cnfn convert_int2_sat(ushort2);
-int2 __ovld __cnfn convert_int2_rte(int2);
-int2 __ovld __cnfn convert_int2_sat_rte(int2);
-int2 __ovld __cnfn convert_int2_rtz(int2);
-int2 __ovld __cnfn convert_int2_sat_rtz(int2);
-int2 __ovld __cnfn convert_int2_rtp(int2);
-int2 __ovld __cnfn convert_int2_sat_rtp(int2);
-int2 __ovld __cnfn convert_int2_rtn(int2);
-int2 __ovld __cnfn convert_int2_sat_rtn(int2);
-int2 __ovld __cnfn convert_int2(int2);
-int2 __ovld __cnfn convert_int2_sat(int2);
-int2 __ovld __cnfn convert_int2_rte(uint2);
-int2 __ovld __cnfn convert_int2_sat_rte(uint2);
-int2 __ovld __cnfn convert_int2_rtz(uint2);
-int2 __ovld __cnfn convert_int2_sat_rtz(uint2);
-int2 __ovld __cnfn convert_int2_rtp(uint2);
-int2 __ovld __cnfn convert_int2_sat_rtp(uint2);
-int2 __ovld __cnfn convert_int2_rtn(uint2);
-int2 __ovld __cnfn convert_int2_sat_rtn(uint2);
-int2 __ovld __cnfn convert_int2(uint2);
-int2 __ovld __cnfn convert_int2_sat(uint2);
-int2 __ovld __cnfn convert_int2_rte(long2);
-int2 __ovld __cnfn convert_int2_sat_rte(long2);
-int2 __ovld __cnfn convert_int2_rtz(long2);
-int2 __ovld __cnfn convert_int2_sat_rtz(long2);
-int2 __ovld __cnfn convert_int2_rtp(long2);
-int2 __ovld __cnfn convert_int2_sat_rtp(long2);
-int2 __ovld __cnfn convert_int2_rtn(long2);
-int2 __ovld __cnfn convert_int2_sat_rtn(long2);
-int2 __ovld __cnfn convert_int2(long2);
-int2 __ovld __cnfn convert_int2_sat(long2);
-int2 __ovld __cnfn convert_int2_rte(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rte(ulong2);
-int2 __ovld __cnfn convert_int2_rtz(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rtz(ulong2);
-int2 __ovld __cnfn convert_int2_rtp(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rtp(ulong2);
-int2 __ovld __cnfn convert_int2_rtn(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rtn(ulong2);
-int2 __ovld __cnfn convert_int2(ulong2);
-int2 __ovld __cnfn convert_int2_sat(ulong2);
-int2 __ovld __cnfn convert_int2_rte(float2);
-int2 __ovld __cnfn convert_int2_sat_rte(float2);
-int2 __ovld __cnfn convert_int2_rtz(float2);
-int2 __ovld __cnfn convert_int2_sat_rtz(float2);
-int2 __ovld __cnfn convert_int2_rtp(float2);
-int2 __ovld __cnfn convert_int2_sat_rtp(float2);
-int2 __ovld __cnfn convert_int2_rtn(float2);
-int2 __ovld __cnfn convert_int2_sat_rtn(float2);
-int2 __ovld __cnfn convert_int2(float2);
-int2 __ovld __cnfn convert_int2_sat(float2);
-uint2 __ovld __cnfn convert_uint2_rte(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(char2);
-uint2 __ovld __cnfn convert_uint2_rtz(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(char2);
-uint2 __ovld __cnfn convert_uint2_rtp(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(char2);
-uint2 __ovld __cnfn convert_uint2_rtn(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(char2);
-uint2 __ovld __cnfn convert_uint2(char2);
-uint2 __ovld __cnfn convert_uint2_sat(char2);
-uint2 __ovld __cnfn convert_uint2_rte(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(uchar2);
-uint2 __ovld __cnfn convert_uint2_rtz(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(uchar2);
-uint2 __ovld __cnfn convert_uint2_rtp(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(uchar2);
-uint2 __ovld __cnfn convert_uint2_rtn(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(uchar2);
-uint2 __ovld __cnfn convert_uint2(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat(uchar2);
-uint2 __ovld __cnfn convert_uint2_rte(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(short2);
-uint2 __ovld __cnfn convert_uint2_rtz(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(short2);
-uint2 __ovld __cnfn convert_uint2_rtp(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(short2);
-uint2 __ovld __cnfn convert_uint2_rtn(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(short2);
-uint2 __ovld __cnfn convert_uint2(short2);
-uint2 __ovld __cnfn convert_uint2_sat(short2);
-uint2 __ovld __cnfn convert_uint2_rte(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(ushort2);
-uint2 __ovld __cnfn convert_uint2_rtz(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(ushort2);
-uint2 __ovld __cnfn convert_uint2_rtp(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(ushort2);
-uint2 __ovld __cnfn convert_uint2_rtn(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(ushort2);
-uint2 __ovld __cnfn convert_uint2(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat(ushort2);
-uint2 __ovld __cnfn convert_uint2_rte(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(int2);
-uint2 __ovld __cnfn convert_uint2_rtz(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(int2);
-uint2 __ovld __cnfn convert_uint2_rtp(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(int2);
-uint2 __ovld __cnfn convert_uint2_rtn(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(int2);
-uint2 __ovld __cnfn convert_uint2(int2);
-uint2 __ovld __cnfn convert_uint2_sat(int2);
-uint2 __ovld __cnfn convert_uint2_rte(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(uint2);
-uint2 __ovld __cnfn convert_uint2_rtz(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(uint2);
-uint2 __ovld __cnfn convert_uint2_rtp(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(uint2);
-uint2 __ovld __cnfn convert_uint2_rtn(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(uint2);
-uint2 __ovld __cnfn convert_uint2(uint2);
-uint2 __ovld __cnfn convert_uint2_sat(uint2);
-uint2 __ovld __cnfn convert_uint2_rte(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(long2);
-uint2 __ovld __cnfn convert_uint2_rtz(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(long2);
-uint2 __ovld __cnfn convert_uint2_rtp(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(long2);
-uint2 __ovld __cnfn convert_uint2_rtn(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(long2);
-uint2 __ovld __cnfn convert_uint2(long2);
-uint2 __ovld __cnfn convert_uint2_sat(long2);
-uint2 __ovld __cnfn convert_uint2_rte(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(ulong2);
-uint2 __ovld __cnfn convert_uint2_rtz(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(ulong2);
-uint2 __ovld __cnfn convert_uint2_rtp(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(ulong2);
-uint2 __ovld __cnfn convert_uint2_rtn(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(ulong2);
-uint2 __ovld __cnfn convert_uint2(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat(ulong2);
-uint2 __ovld __cnfn convert_uint2_rte(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(float2);
-uint2 __ovld __cnfn convert_uint2_rtz(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(float2);
-uint2 __ovld __cnfn convert_uint2_rtp(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(float2);
-uint2 __ovld __cnfn convert_uint2_rtn(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(float2);
-uint2 __ovld __cnfn convert_uint2(float2);
-uint2 __ovld __cnfn convert_uint2_sat(float2);
-long2 __ovld __cnfn convert_long2_rte(char2);
-long2 __ovld __cnfn convert_long2_sat_rte(char2);
-long2 __ovld __cnfn convert_long2_rtz(char2);
-long2 __ovld __cnfn convert_long2_sat_rtz(char2);
-long2 __ovld __cnfn convert_long2_rtp(char2);
-long2 __ovld __cnfn convert_long2_sat_rtp(char2);
-long2 __ovld __cnfn convert_long2_rtn(char2);
-long2 __ovld __cnfn convert_long2_sat_rtn(char2);
-long2 __ovld __cnfn convert_long2(char2);
-long2 __ovld __cnfn convert_long2_sat(char2);
-long2 __ovld __cnfn convert_long2_rte(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rte(uchar2);
-long2 __ovld __cnfn convert_long2_rtz(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rtz(uchar2);
-long2 __ovld __cnfn convert_long2_rtp(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rtp(uchar2);
-long2 __ovld __cnfn convert_long2_rtn(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rtn(uchar2);
-long2 __ovld __cnfn convert_long2(uchar2);
-long2 __ovld __cnfn convert_long2_sat(uchar2);
-long2 __ovld __cnfn convert_long2_rte(short2);
-long2 __ovld __cnfn convert_long2_sat_rte(short2);
-long2 __ovld __cnfn convert_long2_rtz(short2);
-long2 __ovld __cnfn convert_long2_sat_rtz(short2);
-long2 __ovld __cnfn convert_long2_rtp(short2);
-long2 __ovld __cnfn convert_long2_sat_rtp(short2);
-long2 __ovld __cnfn convert_long2_rtn(short2);
-long2 __ovld __cnfn convert_long2_sat_rtn(short2);
-long2 __ovld __cnfn convert_long2(short2);
-long2 __ovld __cnfn convert_long2_sat(short2);
-long2 __ovld __cnfn convert_long2_rte(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rte(ushort2);
-long2 __ovld __cnfn convert_long2_rtz(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rtz(ushort2);
-long2 __ovld __cnfn convert_long2_rtp(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rtp(ushort2);
-long2 __ovld __cnfn convert_long2_rtn(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rtn(ushort2);
-long2 __ovld __cnfn convert_long2(ushort2);
-long2 __ovld __cnfn convert_long2_sat(ushort2);
-long2 __ovld __cnfn convert_long2_rte(int2);
-long2 __ovld __cnfn convert_long2_sat_rte(int2);
-long2 __ovld __cnfn convert_long2_rtz(int2);
-long2 __ovld __cnfn convert_long2_sat_rtz(int2);
-long2 __ovld __cnfn convert_long2_rtp(int2);
-long2 __ovld __cnfn convert_long2_sat_rtp(int2);
-long2 __ovld __cnfn convert_long2_rtn(int2);
-long2 __ovld __cnfn convert_long2_sat_rtn(int2);
-long2 __ovld __cnfn convert_long2(int2);
-long2 __ovld __cnfn convert_long2_sat(int2);
-long2 __ovld __cnfn convert_long2_rte(uint2);
-long2 __ovld __cnfn convert_long2_sat_rte(uint2);
-long2 __ovld __cnfn convert_long2_rtz(uint2);
-long2 __ovld __cnfn convert_long2_sat_rtz(uint2);
-long2 __ovld __cnfn convert_long2_rtp(uint2);
-long2 __ovld __cnfn convert_long2_sat_rtp(uint2);
-long2 __ovld __cnfn convert_long2_rtn(uint2);
-long2 __ovld __cnfn convert_long2_sat_rtn(uint2);
-long2 __ovld __cnfn convert_long2(uint2);
-long2 __ovld __cnfn convert_long2_sat(uint2);
-long2 __ovld __cnfn convert_long2_rte(long2);
-long2 __ovld __cnfn convert_long2_sat_rte(long2);
-long2 __ovld __cnfn convert_long2_rtz(long2);
-long2 __ovld __cnfn convert_long2_sat_rtz(long2);
-long2 __ovld __cnfn convert_long2_rtp(long2);
-long2 __ovld __cnfn convert_long2_sat_rtp(long2);
-long2 __ovld __cnfn convert_long2_rtn(long2);
-long2 __ovld __cnfn convert_long2_sat_rtn(long2);
-long2 __ovld __cnfn convert_long2(long2);
-long2 __ovld __cnfn convert_long2_sat(long2);
-long2 __ovld __cnfn convert_long2_rte(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rte(ulong2);
-long2 __ovld __cnfn convert_long2_rtz(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rtz(ulong2);
-long2 __ovld __cnfn convert_long2_rtp(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rtp(ulong2);
-long2 __ovld __cnfn convert_long2_rtn(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rtn(ulong2);
-long2 __ovld __cnfn convert_long2(ulong2);
-long2 __ovld __cnfn convert_long2_sat(ulong2);
-long2 __ovld __cnfn convert_long2_rte(float2);
-long2 __ovld __cnfn convert_long2_sat_rte(float2);
-long2 __ovld __cnfn convert_long2_rtz(float2);
-long2 __ovld __cnfn convert_long2_sat_rtz(float2);
-long2 __ovld __cnfn convert_long2_rtp(float2);
-long2 __ovld __cnfn convert_long2_sat_rtp(float2);
-long2 __ovld __cnfn convert_long2_rtn(float2);
-long2 __ovld __cnfn convert_long2_sat_rtn(float2);
-long2 __ovld __cnfn convert_long2(float2);
-long2 __ovld __cnfn convert_long2_sat(float2);
-ulong2 __ovld __cnfn convert_ulong2_rte(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(char2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(char2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(char2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(char2);
-ulong2 __ovld __cnfn convert_ulong2(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat(char2);
-ulong2 __ovld __cnfn convert_ulong2_rte(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uchar2);
-ulong2 __ovld __cnfn convert_ulong2(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rte(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(short2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(short2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(short2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(short2);
-ulong2 __ovld __cnfn convert_ulong2(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat(short2);
-ulong2 __ovld __cnfn convert_ulong2_rte(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ushort2);
-ulong2 __ovld __cnfn convert_ulong2(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rte(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(int2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(int2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(int2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(int2);
-ulong2 __ovld __cnfn convert_ulong2(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat(int2);
-ulong2 __ovld __cnfn convert_ulong2_rte(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uint2);
-ulong2 __ovld __cnfn convert_ulong2(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rte(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(long2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(long2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(long2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(long2);
-ulong2 __ovld __cnfn convert_ulong2(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat(long2);
-ulong2 __ovld __cnfn convert_ulong2_rte(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ulong2);
-ulong2 __ovld __cnfn convert_ulong2(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rte(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(float2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(float2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(float2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(float2);
-ulong2 __ovld __cnfn convert_ulong2(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat(float2);
-float2 __ovld __cnfn convert_float2_rte(char2);
-float2 __ovld __cnfn convert_float2_rtz(char2);
-float2 __ovld __cnfn convert_float2_rtp(char2);
-float2 __ovld __cnfn convert_float2_rtn(char2);
-float2 __ovld __cnfn convert_float2(char2);
-float2 __ovld __cnfn convert_float2_rte(uchar2);
-float2 __ovld __cnfn convert_float2_rtz(uchar2);
-float2 __ovld __cnfn convert_float2_rtp(uchar2);
-float2 __ovld __cnfn convert_float2_rtn(uchar2);
-float2 __ovld __cnfn convert_float2(uchar2);
-float2 __ovld __cnfn convert_float2_rte(short2);
-float2 __ovld __cnfn convert_float2_rtz(short2);
-float2 __ovld __cnfn convert_float2_rtp(short2);
-float2 __ovld __cnfn convert_float2_rtn(short2);
-float2 __ovld __cnfn convert_float2(short2);
-float2 __ovld __cnfn convert_float2_rte(ushort2);
-float2 __ovld __cnfn convert_float2_rtz(ushort2);
-float2 __ovld __cnfn convert_float2_rtp(ushort2);
-float2 __ovld __cnfn convert_float2_rtn(ushort2);
-float2 __ovld __cnfn convert_float2(ushort2);
-float2 __ovld __cnfn convert_float2_rte(int2);
-float2 __ovld __cnfn convert_float2_rtz(int2);
-float2 __ovld __cnfn convert_float2_rtp(int2);
-float2 __ovld __cnfn convert_float2_rtn(int2);
-float2 __ovld __cnfn convert_float2(int2);
-float2 __ovld __cnfn convert_float2_rte(uint2);
-float2 __ovld __cnfn convert_float2_rtz(uint2);
-float2 __ovld __cnfn convert_float2_rtp(uint2);
-float2 __ovld __cnfn convert_float2_rtn(uint2);
-float2 __ovld __cnfn convert_float2(uint2);
-float2 __ovld __cnfn convert_float2_rte(long2);
-float2 __ovld __cnfn convert_float2_rtz(long2);
-float2 __ovld __cnfn convert_float2_rtp(long2);
-float2 __ovld __cnfn convert_float2_rtn(long2);
-float2 __ovld __cnfn convert_float2(long2);
-float2 __ovld __cnfn convert_float2_rte(ulong2);
-float2 __ovld __cnfn convert_float2_rtz(ulong2);
-float2 __ovld __cnfn convert_float2_rtp(ulong2);
-float2 __ovld __cnfn convert_float2_rtn(ulong2);
-float2 __ovld __cnfn convert_float2(ulong2);
-float2 __ovld __cnfn convert_float2_rte(float2);
-float2 __ovld __cnfn convert_float2_rtz(float2);
-float2 __ovld __cnfn convert_float2_rtp(float2);
-float2 __ovld __cnfn convert_float2_rtn(float2);
-float2 __ovld __cnfn convert_float2(float2);
-char3 __ovld __cnfn convert_char3_rte(char3);
-char3 __ovld __cnfn convert_char3_sat_rte(char3);
-char3 __ovld __cnfn convert_char3_rtz(char3);
-char3 __ovld __cnfn convert_char3_sat_rtz(char3);
-char3 __ovld __cnfn convert_char3_rtp(char3);
-char3 __ovld __cnfn convert_char3_sat_rtp(char3);
-char3 __ovld __cnfn convert_char3_rtn(char3);
-char3 __ovld __cnfn convert_char3_sat_rtn(char3);
-char3 __ovld __cnfn convert_char3(char3);
-char3 __ovld __cnfn convert_char3_sat(char3);
-char3 __ovld __cnfn convert_char3_rte(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rte(uchar3);
-char3 __ovld __cnfn convert_char3_rtz(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rtz(uchar3);
-char3 __ovld __cnfn convert_char3_rtp(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rtp(uchar3);
-char3 __ovld __cnfn convert_char3_rtn(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rtn(uchar3);
-char3 __ovld __cnfn convert_char3(uchar3);
-char3 __ovld __cnfn convert_char3_sat(uchar3);
-char3 __ovld __cnfn convert_char3_rte(short3);
-char3 __ovld __cnfn convert_char3_sat_rte(short3);
-char3 __ovld __cnfn convert_char3_rtz(short3);
-char3 __ovld __cnfn convert_char3_sat_rtz(short3);
-char3 __ovld __cnfn convert_char3_rtp(short3);
-char3 __ovld __cnfn convert_char3_sat_rtp(short3);
-char3 __ovld __cnfn convert_char3_rtn(short3);
-char3 __ovld __cnfn convert_char3_sat_rtn(short3);
-char3 __ovld __cnfn convert_char3(short3);
-char3 __ovld __cnfn convert_char3_sat(short3);
-char3 __ovld __cnfn convert_char3_rte(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rte(ushort3);
-char3 __ovld __cnfn convert_char3_rtz(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rtz(ushort3);
-char3 __ovld __cnfn convert_char3_rtp(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rtp(ushort3);
-char3 __ovld __cnfn convert_char3_rtn(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rtn(ushort3);
-char3 __ovld __cnfn convert_char3(ushort3);
-char3 __ovld __cnfn convert_char3_sat(ushort3);
-char3 __ovld __cnfn convert_char3_rte(int3);
-char3 __ovld __cnfn convert_char3_sat_rte(int3);
-char3 __ovld __cnfn convert_char3_rtz(int3);
-char3 __ovld __cnfn convert_char3_sat_rtz(int3);
-char3 __ovld __cnfn convert_char3_rtp(int3);
-char3 __ovld __cnfn convert_char3_sat_rtp(int3);
-char3 __ovld __cnfn convert_char3_rtn(int3);
-char3 __ovld __cnfn convert_char3_sat_rtn(int3);
-char3 __ovld __cnfn convert_char3(int3);
-char3 __ovld __cnfn convert_char3_sat(int3);
-char3 __ovld __cnfn convert_char3_rte(uint3);
-char3 __ovld __cnfn convert_char3_sat_rte(uint3);
-char3 __ovld __cnfn convert_char3_rtz(uint3);
-char3 __ovld __cnfn convert_char3_sat_rtz(uint3);
-char3 __ovld __cnfn convert_char3_rtp(uint3);
-char3 __ovld __cnfn convert_char3_sat_rtp(uint3);
-char3 __ovld __cnfn convert_char3_rtn(uint3);
-char3 __ovld __cnfn convert_char3_sat_rtn(uint3);
-char3 __ovld __cnfn convert_char3(uint3);
-char3 __ovld __cnfn convert_char3_sat(uint3);
-char3 __ovld __cnfn convert_char3_rte(long3);
-char3 __ovld __cnfn convert_char3_sat_rte(long3);
-char3 __ovld __cnfn convert_char3_rtz(long3);
-char3 __ovld __cnfn convert_char3_sat_rtz(long3);
-char3 __ovld __cnfn convert_char3_rtp(long3);
-char3 __ovld __cnfn convert_char3_sat_rtp(long3);
-char3 __ovld __cnfn convert_char3_rtn(long3);
-char3 __ovld __cnfn convert_char3_sat_rtn(long3);
-char3 __ovld __cnfn convert_char3(long3);
-char3 __ovld __cnfn convert_char3_sat(long3);
-char3 __ovld __cnfn convert_char3_rte(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rte(ulong3);
-char3 __ovld __cnfn convert_char3_rtz(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rtz(ulong3);
-char3 __ovld __cnfn convert_char3_rtp(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rtp(ulong3);
-char3 __ovld __cnfn convert_char3_rtn(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rtn(ulong3);
-char3 __ovld __cnfn convert_char3(ulong3);
-char3 __ovld __cnfn convert_char3_sat(ulong3);
-char3 __ovld __cnfn convert_char3_rte(float3);
-char3 __ovld __cnfn convert_char3_sat_rte(float3);
-char3 __ovld __cnfn convert_char3_rtz(float3);
-char3 __ovld __cnfn convert_char3_sat_rtz(float3);
-char3 __ovld __cnfn convert_char3_rtp(float3);
-char3 __ovld __cnfn convert_char3_sat_rtp(float3);
-char3 __ovld __cnfn convert_char3_rtn(float3);
-char3 __ovld __cnfn convert_char3_sat_rtn(float3);
-char3 __ovld __cnfn convert_char3(float3);
-char3 __ovld __cnfn convert_char3_sat(float3);
-uchar3 __ovld __cnfn convert_uchar3_rte(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(char3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(char3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(char3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(char3);
-uchar3 __ovld __cnfn convert_uchar3(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat(char3);
-uchar3 __ovld __cnfn convert_uchar3_rte(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uchar3);
-uchar3 __ovld __cnfn convert_uchar3(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rte(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(short3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(short3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(short3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(short3);
-uchar3 __ovld __cnfn convert_uchar3(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat(short3);
-uchar3 __ovld __cnfn convert_uchar3_rte(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ushort3);
-uchar3 __ovld __cnfn convert_uchar3(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rte(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(int3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(int3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(int3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(int3);
-uchar3 __ovld __cnfn convert_uchar3(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat(int3);
-uchar3 __ovld __cnfn convert_uchar3_rte(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uint3);
-uchar3 __ovld __cnfn convert_uchar3(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rte(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(long3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(long3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(long3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(long3);
-uchar3 __ovld __cnfn convert_uchar3(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat(long3);
-uchar3 __ovld __cnfn convert_uchar3_rte(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ulong3);
-uchar3 __ovld __cnfn convert_uchar3(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rte(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(float3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(float3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(float3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(float3);
-uchar3 __ovld __cnfn convert_uchar3(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat(float3);
-short3 __ovld __cnfn convert_short3_rte(char3);
-short3 __ovld __cnfn convert_short3_sat_rte(char3);
-short3 __ovld __cnfn convert_short3_rtz(char3);
-short3 __ovld __cnfn convert_short3_sat_rtz(char3);
-short3 __ovld __cnfn convert_short3_rtp(char3);
-short3 __ovld __cnfn convert_short3_sat_rtp(char3);
-short3 __ovld __cnfn convert_short3_rtn(char3);
-short3 __ovld __cnfn convert_short3_sat_rtn(char3);
-short3 __ovld __cnfn convert_short3(char3);
-short3 __ovld __cnfn convert_short3_sat(char3);
-short3 __ovld __cnfn convert_short3_rte(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rte(uchar3);
-short3 __ovld __cnfn convert_short3_rtz(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rtz(uchar3);
-short3 __ovld __cnfn convert_short3_rtp(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rtp(uchar3);
-short3 __ovld __cnfn convert_short3_rtn(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rtn(uchar3);
-short3 __ovld __cnfn convert_short3(uchar3);
-short3 __ovld __cnfn convert_short3_sat(uchar3);
-short3 __ovld __cnfn convert_short3_rte(short3);
-short3 __ovld __cnfn convert_short3_sat_rte(short3);
-short3 __ovld __cnfn convert_short3_rtz(short3);
-short3 __ovld __cnfn convert_short3_sat_rtz(short3);
-short3 __ovld __cnfn convert_short3_rtp(short3);
-short3 __ovld __cnfn convert_short3_sat_rtp(short3);
-short3 __ovld __cnfn convert_short3_rtn(short3);
-short3 __ovld __cnfn convert_short3_sat_rtn(short3);
-short3 __ovld __cnfn convert_short3(short3);
-short3 __ovld __cnfn convert_short3_sat(short3);
-short3 __ovld __cnfn convert_short3_rte(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rte(ushort3);
-short3 __ovld __cnfn convert_short3_rtz(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rtz(ushort3);
-short3 __ovld __cnfn convert_short3_rtp(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rtp(ushort3);
-short3 __ovld __cnfn convert_short3_rtn(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rtn(ushort3);
-short3 __ovld __cnfn convert_short3(ushort3);
-short3 __ovld __cnfn convert_short3_sat(ushort3);
-short3 __ovld __cnfn convert_short3_rte(int3);
-short3 __ovld __cnfn convert_short3_sat_rte(int3);
-short3 __ovld __cnfn convert_short3_rtz(int3);
-short3 __ovld __cnfn convert_short3_sat_rtz(int3);
-short3 __ovld __cnfn convert_short3_rtp(int3);
-short3 __ovld __cnfn convert_short3_sat_rtp(int3);
-short3 __ovld __cnfn convert_short3_rtn(int3);
-short3 __ovld __cnfn convert_short3_sat_rtn(int3);
-short3 __ovld __cnfn convert_short3(int3);
-short3 __ovld __cnfn convert_short3_sat(int3);
-short3 __ovld __cnfn convert_short3_rte(uint3);
-short3 __ovld __cnfn convert_short3_sat_rte(uint3);
-short3 __ovld __cnfn convert_short3_rtz(uint3);
-short3 __ovld __cnfn convert_short3_sat_rtz(uint3);
-short3 __ovld __cnfn convert_short3_rtp(uint3);
-short3 __ovld __cnfn convert_short3_sat_rtp(uint3);
-short3 __ovld __cnfn convert_short3_rtn(uint3);
-short3 __ovld __cnfn convert_short3_sat_rtn(uint3);
-short3 __ovld __cnfn convert_short3(uint3);
-short3 __ovld __cnfn convert_short3_sat(uint3);
-short3 __ovld __cnfn convert_short3_rte(long3);
-short3 __ovld __cnfn convert_short3_sat_rte(long3);
-short3 __ovld __cnfn convert_short3_rtz(long3);
-short3 __ovld __cnfn convert_short3_sat_rtz(long3);
-short3 __ovld __cnfn convert_short3_rtp(long3);
-short3 __ovld __cnfn convert_short3_sat_rtp(long3);
-short3 __ovld __cnfn convert_short3_rtn(long3);
-short3 __ovld __cnfn convert_short3_sat_rtn(long3);
-short3 __ovld __cnfn convert_short3(long3);
-short3 __ovld __cnfn convert_short3_sat(long3);
-short3 __ovld __cnfn convert_short3_rte(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rte(ulong3);
-short3 __ovld __cnfn convert_short3_rtz(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rtz(ulong3);
-short3 __ovld __cnfn convert_short3_rtp(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rtp(ulong3);
-short3 __ovld __cnfn convert_short3_rtn(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rtn(ulong3);
-short3 __ovld __cnfn convert_short3(ulong3);
-short3 __ovld __cnfn convert_short3_sat(ulong3);
-short3 __ovld __cnfn convert_short3_rte(float3);
-short3 __ovld __cnfn convert_short3_sat_rte(float3);
-short3 __ovld __cnfn convert_short3_rtz(float3);
-short3 __ovld __cnfn convert_short3_sat_rtz(float3);
-short3 __ovld __cnfn convert_short3_rtp(float3);
-short3 __ovld __cnfn convert_short3_sat_rtp(float3);
-short3 __ovld __cnfn convert_short3_rtn(float3);
-short3 __ovld __cnfn convert_short3_sat_rtn(float3);
-short3 __ovld __cnfn convert_short3(float3);
-short3 __ovld __cnfn convert_short3_sat(float3);
-ushort3 __ovld __cnfn convert_ushort3_rte(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(char3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(char3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(char3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(char3);
-ushort3 __ovld __cnfn convert_ushort3(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat(char3);
-ushort3 __ovld __cnfn convert_ushort3_rte(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uchar3);
-ushort3 __ovld __cnfn convert_ushort3(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rte(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(short3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(short3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(short3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(short3);
-ushort3 __ovld __cnfn convert_ushort3(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat(short3);
-ushort3 __ovld __cnfn convert_ushort3_rte(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ushort3);
-ushort3 __ovld __cnfn convert_ushort3(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rte(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(int3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(int3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(int3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(int3);
-ushort3 __ovld __cnfn convert_ushort3(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat(int3);
-ushort3 __ovld __cnfn convert_ushort3_rte(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uint3);
-ushort3 __ovld __cnfn convert_ushort3(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rte(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(long3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(long3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(long3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(long3);
-ushort3 __ovld __cnfn convert_ushort3(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat(long3);
-ushort3 __ovld __cnfn convert_ushort3_rte(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ulong3);
-ushort3 __ovld __cnfn convert_ushort3(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rte(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(float3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(float3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(float3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(float3);
-ushort3 __ovld __cnfn convert_ushort3(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat(float3);
-int3 __ovld __cnfn convert_int3_rte(char3);
-int3 __ovld __cnfn convert_int3_sat_rte(char3);
-int3 __ovld __cnfn convert_int3_rtz(char3);
-int3 __ovld __cnfn convert_int3_sat_rtz(char3);
-int3 __ovld __cnfn convert_int3_rtp(char3);
-int3 __ovld __cnfn convert_int3_sat_rtp(char3);
-int3 __ovld __cnfn convert_int3_rtn(char3);
-int3 __ovld __cnfn convert_int3_sat_rtn(char3);
-int3 __ovld __cnfn convert_int3(char3);
-int3 __ovld __cnfn convert_int3_sat(char3);
-int3 __ovld __cnfn convert_int3_rte(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rte(uchar3);
-int3 __ovld __cnfn convert_int3_rtz(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rtz(uchar3);
-int3 __ovld __cnfn convert_int3_rtp(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rtp(uchar3);
-int3 __ovld __cnfn convert_int3_rtn(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rtn(uchar3);
-int3 __ovld __cnfn convert_int3(uchar3);
-int3 __ovld __cnfn convert_int3_sat(uchar3);
-int3 __ovld __cnfn convert_int3_rte(short3);
-int3 __ovld __cnfn convert_int3_sat_rte(short3);
-int3 __ovld __cnfn convert_int3_rtz(short3);
-int3 __ovld __cnfn convert_int3_sat_rtz(short3);
-int3 __ovld __cnfn convert_int3_rtp(short3);
-int3 __ovld __cnfn convert_int3_sat_rtp(short3);
-int3 __ovld __cnfn convert_int3_rtn(short3);
-int3 __ovld __cnfn convert_int3_sat_rtn(short3);
-int3 __ovld __cnfn convert_int3(short3);
-int3 __ovld __cnfn convert_int3_sat(short3);
-int3 __ovld __cnfn convert_int3_rte(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rte(ushort3);
-int3 __ovld __cnfn convert_int3_rtz(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rtz(ushort3);
-int3 __ovld __cnfn convert_int3_rtp(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rtp(ushort3);
-int3 __ovld __cnfn convert_int3_rtn(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rtn(ushort3);
-int3 __ovld __cnfn convert_int3(ushort3);
-int3 __ovld __cnfn convert_int3_sat(ushort3);
-int3 __ovld __cnfn convert_int3_rte(int3);
-int3 __ovld __cnfn convert_int3_sat_rte(int3);
-int3 __ovld __cnfn convert_int3_rtz(int3);
-int3 __ovld __cnfn convert_int3_sat_rtz(int3);
-int3 __ovld __cnfn convert_int3_rtp(int3);
-int3 __ovld __cnfn convert_int3_sat_rtp(int3);
-int3 __ovld __cnfn convert_int3_rtn(int3);
-int3 __ovld __cnfn convert_int3_sat_rtn(int3);
-int3 __ovld __cnfn convert_int3(int3);
-int3 __ovld __cnfn convert_int3_sat(int3);
-int3 __ovld __cnfn convert_int3_rte(uint3);
-int3 __ovld __cnfn convert_int3_sat_rte(uint3);
-int3 __ovld __cnfn convert_int3_rtz(uint3);
-int3 __ovld __cnfn convert_int3_sat_rtz(uint3);
-int3 __ovld __cnfn convert_int3_rtp(uint3);
-int3 __ovld __cnfn convert_int3_sat_rtp(uint3);
-int3 __ovld __cnfn convert_int3_rtn(uint3);
-int3 __ovld __cnfn convert_int3_sat_rtn(uint3);
-int3 __ovld __cnfn convert_int3(uint3);
-int3 __ovld __cnfn convert_int3_sat(uint3);
-int3 __ovld __cnfn convert_int3_rte(long3);
-int3 __ovld __cnfn convert_int3_sat_rte(long3);
-int3 __ovld __cnfn convert_int3_rtz(long3);
-int3 __ovld __cnfn convert_int3_sat_rtz(long3);
-int3 __ovld __cnfn convert_int3_rtp(long3);
-int3 __ovld __cnfn convert_int3_sat_rtp(long3);
-int3 __ovld __cnfn convert_int3_rtn(long3);
-int3 __ovld __cnfn convert_int3_sat_rtn(long3);
-int3 __ovld __cnfn convert_int3(long3);
-int3 __ovld __cnfn convert_int3_sat(long3);
-int3 __ovld __cnfn convert_int3_rte(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rte(ulong3);
-int3 __ovld __cnfn convert_int3_rtz(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rtz(ulong3);
-int3 __ovld __cnfn convert_int3_rtp(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rtp(ulong3);
-int3 __ovld __cnfn convert_int3_rtn(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rtn(ulong3);
-int3 __ovld __cnfn convert_int3(ulong3);
-int3 __ovld __cnfn convert_int3_sat(ulong3);
-int3 __ovld __cnfn convert_int3_rte(float3);
-int3 __ovld __cnfn convert_int3_sat_rte(float3);
-int3 __ovld __cnfn convert_int3_rtz(float3);
-int3 __ovld __cnfn convert_int3_sat_rtz(float3);
-int3 __ovld __cnfn convert_int3_rtp(float3);
-int3 __ovld __cnfn convert_int3_sat_rtp(float3);
-int3 __ovld __cnfn convert_int3_rtn(float3);
-int3 __ovld __cnfn convert_int3_sat_rtn(float3);
-int3 __ovld __cnfn convert_int3(float3);
-int3 __ovld __cnfn convert_int3_sat(float3);
-uint3 __ovld __cnfn convert_uint3_rte(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(char3);
-uint3 __ovld __cnfn convert_uint3_rtz(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(char3);
-uint3 __ovld __cnfn convert_uint3_rtp(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(char3);
-uint3 __ovld __cnfn convert_uint3_rtn(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(char3);
-uint3 __ovld __cnfn convert_uint3(char3);
-uint3 __ovld __cnfn convert_uint3_sat(char3);
-uint3 __ovld __cnfn convert_uint3_rte(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(uchar3);
-uint3 __ovld __cnfn convert_uint3_rtz(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(uchar3);
-uint3 __ovld __cnfn convert_uint3_rtp(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(uchar3);
-uint3 __ovld __cnfn convert_uint3_rtn(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(uchar3);
-uint3 __ovld __cnfn convert_uint3(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat(uchar3);
-uint3 __ovld __cnfn convert_uint3_rte(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(short3);
-uint3 __ovld __cnfn convert_uint3_rtz(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(short3);
-uint3 __ovld __cnfn convert_uint3_rtp(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(short3);
-uint3 __ovld __cnfn convert_uint3_rtn(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(short3);
-uint3 __ovld __cnfn convert_uint3(short3);
-uint3 __ovld __cnfn convert_uint3_sat(short3);
-uint3 __ovld __cnfn convert_uint3_rte(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(ushort3);
-uint3 __ovld __cnfn convert_uint3_rtz(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(ushort3);
-uint3 __ovld __cnfn convert_uint3_rtp(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(ushort3);
-uint3 __ovld __cnfn convert_uint3_rtn(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(ushort3);
-uint3 __ovld __cnfn convert_uint3(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat(ushort3);
-uint3 __ovld __cnfn convert_uint3_rte(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(int3);
-uint3 __ovld __cnfn convert_uint3_rtz(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(int3);
-uint3 __ovld __cnfn convert_uint3_rtp(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(int3);
-uint3 __ovld __cnfn convert_uint3_rtn(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(int3);
-uint3 __ovld __cnfn convert_uint3(int3);
-uint3 __ovld __cnfn convert_uint3_sat(int3);
-uint3 __ovld __cnfn convert_uint3_rte(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(uint3);
-uint3 __ovld __cnfn convert_uint3_rtz(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(uint3);
-uint3 __ovld __cnfn convert_uint3_rtp(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(uint3);
-uint3 __ovld __cnfn convert_uint3_rtn(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(uint3);
-uint3 __ovld __cnfn convert_uint3(uint3);
-uint3 __ovld __cnfn convert_uint3_sat(uint3);
-uint3 __ovld __cnfn convert_uint3_rte(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(long3);
-uint3 __ovld __cnfn convert_uint3_rtz(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(long3);
-uint3 __ovld __cnfn convert_uint3_rtp(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(long3);
-uint3 __ovld __cnfn convert_uint3_rtn(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(long3);
-uint3 __ovld __cnfn convert_uint3(long3);
-uint3 __ovld __cnfn convert_uint3_sat(long3);
-uint3 __ovld __cnfn convert_uint3_rte(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(ulong3);
-uint3 __ovld __cnfn convert_uint3_rtz(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(ulong3);
-uint3 __ovld __cnfn convert_uint3_rtp(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(ulong3);
-uint3 __ovld __cnfn convert_uint3_rtn(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(ulong3);
-uint3 __ovld __cnfn convert_uint3(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat(ulong3);
-uint3 __ovld __cnfn convert_uint3_rte(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(float3);
-uint3 __ovld __cnfn convert_uint3_rtz(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(float3);
-uint3 __ovld __cnfn convert_uint3_rtp(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(float3);
-uint3 __ovld __cnfn convert_uint3_rtn(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(float3);
-uint3 __ovld __cnfn convert_uint3(float3);
-uint3 __ovld __cnfn convert_uint3_sat(float3);
-long3 __ovld __cnfn convert_long3_rte(char3);
-long3 __ovld __cnfn convert_long3_sat_rte(char3);
-long3 __ovld __cnfn convert_long3_rtz(char3);
-long3 __ovld __cnfn convert_long3_sat_rtz(char3);
-long3 __ovld __cnfn convert_long3_rtp(char3);
-long3 __ovld __cnfn convert_long3_sat_rtp(char3);
-long3 __ovld __cnfn convert_long3_rtn(char3);
-long3 __ovld __cnfn convert_long3_sat_rtn(char3);
-long3 __ovld __cnfn convert_long3(char3);
-long3 __ovld __cnfn convert_long3_sat(char3);
-long3 __ovld __cnfn convert_long3_rte(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rte(uchar3);
-long3 __ovld __cnfn convert_long3_rtz(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rtz(uchar3);
-long3 __ovld __cnfn convert_long3_rtp(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rtp(uchar3);
-long3 __ovld __cnfn convert_long3_rtn(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rtn(uchar3);
-long3 __ovld __cnfn convert_long3(uchar3);
-long3 __ovld __cnfn convert_long3_sat(uchar3);
-long3 __ovld __cnfn convert_long3_rte(short3);
-long3 __ovld __cnfn convert_long3_sat_rte(short3);
-long3 __ovld __cnfn convert_long3_rtz(short3);
-long3 __ovld __cnfn convert_long3_sat_rtz(short3);
-long3 __ovld __cnfn convert_long3_rtp(short3);
-long3 __ovld __cnfn convert_long3_sat_rtp(short3);
-long3 __ovld __cnfn convert_long3_rtn(short3);
-long3 __ovld __cnfn convert_long3_sat_rtn(short3);
-long3 __ovld __cnfn convert_long3(short3);
-long3 __ovld __cnfn convert_long3_sat(short3);
-long3 __ovld __cnfn convert_long3_rte(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rte(ushort3);
-long3 __ovld __cnfn convert_long3_rtz(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rtz(ushort3);
-long3 __ovld __cnfn convert_long3_rtp(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rtp(ushort3);
-long3 __ovld __cnfn convert_long3_rtn(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rtn(ushort3);
-long3 __ovld __cnfn convert_long3(ushort3);
-long3 __ovld __cnfn convert_long3_sat(ushort3);
-long3 __ovld __cnfn convert_long3_rte(int3);
-long3 __ovld __cnfn convert_long3_sat_rte(int3);
-long3 __ovld __cnfn convert_long3_rtz(int3);
-long3 __ovld __cnfn convert_long3_sat_rtz(int3);
-long3 __ovld __cnfn convert_long3_rtp(int3);
-long3 __ovld __cnfn convert_long3_sat_rtp(int3);
-long3 __ovld __cnfn convert_long3_rtn(int3);
-long3 __ovld __cnfn convert_long3_sat_rtn(int3);
-long3 __ovld __cnfn convert_long3(int3);
-long3 __ovld __cnfn convert_long3_sat(int3);
-long3 __ovld __cnfn convert_long3_rte(uint3);
-long3 __ovld __cnfn convert_long3_sat_rte(uint3);
-long3 __ovld __cnfn convert_long3_rtz(uint3);
-long3 __ovld __cnfn convert_long3_sat_rtz(uint3);
-long3 __ovld __cnfn convert_long3_rtp(uint3);
-long3 __ovld __cnfn convert_long3_sat_rtp(uint3);
-long3 __ovld __cnfn convert_long3_rtn(uint3);
-long3 __ovld __cnfn convert_long3_sat_rtn(uint3);
-long3 __ovld __cnfn convert_long3(uint3);
-long3 __ovld __cnfn convert_long3_sat(uint3);
-long3 __ovld __cnfn convert_long3_rte(long3);
-long3 __ovld __cnfn convert_long3_sat_rte(long3);
-long3 __ovld __cnfn convert_long3_rtz(long3);
-long3 __ovld __cnfn convert_long3_sat_rtz(long3);
-long3 __ovld __cnfn convert_long3_rtp(long3);
-long3 __ovld __cnfn convert_long3_sat_rtp(long3);
-long3 __ovld __cnfn convert_long3_rtn(long3);
-long3 __ovld __cnfn convert_long3_sat_rtn(long3);
-long3 __ovld __cnfn convert_long3(long3);
-long3 __ovld __cnfn convert_long3_sat(long3);
-long3 __ovld __cnfn convert_long3_rte(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rte(ulong3);
-long3 __ovld __cnfn convert_long3_rtz(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rtz(ulong3);
-long3 __ovld __cnfn convert_long3_rtp(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rtp(ulong3);
-long3 __ovld __cnfn convert_long3_rtn(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rtn(ulong3);
-long3 __ovld __cnfn convert_long3(ulong3);
-long3 __ovld __cnfn convert_long3_sat(ulong3);
-long3 __ovld __cnfn convert_long3_rte(float3);
-long3 __ovld __cnfn convert_long3_sat_rte(float3);
-long3 __ovld __cnfn convert_long3_rtz(float3);
-long3 __ovld __cnfn convert_long3_sat_rtz(float3);
-long3 __ovld __cnfn convert_long3_rtp(float3);
-long3 __ovld __cnfn convert_long3_sat_rtp(float3);
-long3 __ovld __cnfn convert_long3_rtn(float3);
-long3 __ovld __cnfn convert_long3_sat_rtn(float3);
-long3 __ovld __cnfn convert_long3(float3);
-long3 __ovld __cnfn convert_long3_sat(float3);
-ulong3 __ovld __cnfn convert_ulong3_rte(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(char3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(char3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(char3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(char3);
-ulong3 __ovld __cnfn convert_ulong3(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat(char3);
-ulong3 __ovld __cnfn convert_ulong3_rte(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uchar3);
-ulong3 __ovld __cnfn convert_ulong3(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rte(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(short3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(short3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(short3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(short3);
-ulong3 __ovld __cnfn convert_ulong3(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat(short3);
-ulong3 __ovld __cnfn convert_ulong3_rte(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ushort3);
-ulong3 __ovld __cnfn convert_ulong3(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rte(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(int3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(int3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(int3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(int3);
-ulong3 __ovld __cnfn convert_ulong3(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat(int3);
-ulong3 __ovld __cnfn convert_ulong3_rte(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uint3);
-ulong3 __ovld __cnfn convert_ulong3(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rte(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(long3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(long3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(long3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(long3);
-ulong3 __ovld __cnfn convert_ulong3(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat(long3);
-ulong3 __ovld __cnfn convert_ulong3_rte(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ulong3);
-ulong3 __ovld __cnfn convert_ulong3(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rte(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(float3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(float3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(float3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(float3);
-ulong3 __ovld __cnfn convert_ulong3(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat(float3);
-float3 __ovld __cnfn convert_float3_rte(char3);
-float3 __ovld __cnfn convert_float3_rtz(char3);
-float3 __ovld __cnfn convert_float3_rtp(char3);
-float3 __ovld __cnfn convert_float3_rtn(char3);
-float3 __ovld __cnfn convert_float3(char3);
-float3 __ovld __cnfn convert_float3_rte(uchar3);
-float3 __ovld __cnfn convert_float3_rtz(uchar3);
-float3 __ovld __cnfn convert_float3_rtp(uchar3);
-float3 __ovld __cnfn convert_float3_rtn(uchar3);
-float3 __ovld __cnfn convert_float3(uchar3);
-float3 __ovld __cnfn convert_float3_rte(short3);
-float3 __ovld __cnfn convert_float3_rtz(short3);
-float3 __ovld __cnfn convert_float3_rtp(short3);
-float3 __ovld __cnfn convert_float3_rtn(short3);
-float3 __ovld __cnfn convert_float3(short3);
-float3 __ovld __cnfn convert_float3_rte(ushort3);
-float3 __ovld __cnfn convert_float3_rtz(ushort3);
-float3 __ovld __cnfn convert_float3_rtp(ushort3);
-float3 __ovld __cnfn convert_float3_rtn(ushort3);
-float3 __ovld __cnfn convert_float3(ushort3);
-float3 __ovld __cnfn convert_float3_rte(int3);
-float3 __ovld __cnfn convert_float3_rtz(int3);
-float3 __ovld __cnfn convert_float3_rtp(int3);
-float3 __ovld __cnfn convert_float3_rtn(int3);
-float3 __ovld __cnfn convert_float3(int3);
-float3 __ovld __cnfn convert_float3_rte(uint3);
-float3 __ovld __cnfn convert_float3_rtz(uint3);
-float3 __ovld __cnfn convert_float3_rtp(uint3);
-float3 __ovld __cnfn convert_float3_rtn(uint3);
-float3 __ovld __cnfn convert_float3(uint3);
-float3 __ovld __cnfn convert_float3_rte(long3);
-float3 __ovld __cnfn convert_float3_rtz(long3);
-float3 __ovld __cnfn convert_float3_rtp(long3);
-float3 __ovld __cnfn convert_float3_rtn(long3);
-float3 __ovld __cnfn convert_float3(long3);
-float3 __ovld __cnfn convert_float3_rte(ulong3);
-float3 __ovld __cnfn convert_float3_rtz(ulong3);
-float3 __ovld __cnfn convert_float3_rtp(ulong3);
-float3 __ovld __cnfn convert_float3_rtn(ulong3);
-float3 __ovld __cnfn convert_float3(ulong3);
-float3 __ovld __cnfn convert_float3_rte(float3);
-float3 __ovld __cnfn convert_float3_rtz(float3);
-float3 __ovld __cnfn convert_float3_rtp(float3);
-float3 __ovld __cnfn convert_float3_rtn(float3);
-float3 __ovld __cnfn convert_float3(float3);
-char4 __ovld __cnfn convert_char4_rte(char4);
-char4 __ovld __cnfn convert_char4_sat_rte(char4);
-char4 __ovld __cnfn convert_char4_rtz(char4);
-char4 __ovld __cnfn convert_char4_sat_rtz(char4);
-char4 __ovld __cnfn convert_char4_rtp(char4);
-char4 __ovld __cnfn convert_char4_sat_rtp(char4);
-char4 __ovld __cnfn convert_char4_rtn(char4);
-char4 __ovld __cnfn convert_char4_sat_rtn(char4);
-char4 __ovld __cnfn convert_char4(char4);
-char4 __ovld __cnfn convert_char4_sat(char4);
-char4 __ovld __cnfn convert_char4_rte(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rte(uchar4);
-char4 __ovld __cnfn convert_char4_rtz(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rtz(uchar4);
-char4 __ovld __cnfn convert_char4_rtp(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rtp(uchar4);
-char4 __ovld __cnfn convert_char4_rtn(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rtn(uchar4);
-char4 __ovld __cnfn convert_char4(uchar4);
-char4 __ovld __cnfn convert_char4_sat(uchar4);
-char4 __ovld __cnfn convert_char4_rte(short4);
-char4 __ovld __cnfn convert_char4_sat_rte(short4);
-char4 __ovld __cnfn convert_char4_rtz(short4);
-char4 __ovld __cnfn convert_char4_sat_rtz(short4);
-char4 __ovld __cnfn convert_char4_rtp(short4);
-char4 __ovld __cnfn convert_char4_sat_rtp(short4);
-char4 __ovld __cnfn convert_char4_rtn(short4);
-char4 __ovld __cnfn convert_char4_sat_rtn(short4);
-char4 __ovld __cnfn convert_char4(short4);
-char4 __ovld __cnfn convert_char4_sat(short4);
-char4 __ovld __cnfn convert_char4_rte(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rte(ushort4);
-char4 __ovld __cnfn convert_char4_rtz(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rtz(ushort4);
-char4 __ovld __cnfn convert_char4_rtp(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rtp(ushort4);
-char4 __ovld __cnfn convert_char4_rtn(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rtn(ushort4);
-char4 __ovld __cnfn convert_char4(ushort4);
-char4 __ovld __cnfn convert_char4_sat(ushort4);
-char4 __ovld __cnfn convert_char4_rte(int4);
-char4 __ovld __cnfn convert_char4_sat_rte(int4);
-char4 __ovld __cnfn convert_char4_rtz(int4);
-char4 __ovld __cnfn convert_char4_sat_rtz(int4);
-char4 __ovld __cnfn convert_char4_rtp(int4);
-char4 __ovld __cnfn convert_char4_sat_rtp(int4);
-char4 __ovld __cnfn convert_char4_rtn(int4);
-char4 __ovld __cnfn convert_char4_sat_rtn(int4);
-char4 __ovld __cnfn convert_char4(int4);
-char4 __ovld __cnfn convert_char4_sat(int4);
-char4 __ovld __cnfn convert_char4_rte(uint4);
-char4 __ovld __cnfn convert_char4_sat_rte(uint4);
-char4 __ovld __cnfn convert_char4_rtz(uint4);
-char4 __ovld __cnfn convert_char4_sat_rtz(uint4);
-char4 __ovld __cnfn convert_char4_rtp(uint4);
-char4 __ovld __cnfn convert_char4_sat_rtp(uint4);
-char4 __ovld __cnfn convert_char4_rtn(uint4);
-char4 __ovld __cnfn convert_char4_sat_rtn(uint4);
-char4 __ovld __cnfn convert_char4(uint4);
-char4 __ovld __cnfn convert_char4_sat(uint4);
-char4 __ovld __cnfn convert_char4_rte(long4);
-char4 __ovld __cnfn convert_char4_sat_rte(long4);
-char4 __ovld __cnfn convert_char4_rtz(long4);
-char4 __ovld __cnfn convert_char4_sat_rtz(long4);
-char4 __ovld __cnfn convert_char4_rtp(long4);
-char4 __ovld __cnfn convert_char4_sat_rtp(long4);
-char4 __ovld __cnfn convert_char4_rtn(long4);
-char4 __ovld __cnfn convert_char4_sat_rtn(long4);
-char4 __ovld __cnfn convert_char4(long4);
-char4 __ovld __cnfn convert_char4_sat(long4);
-char4 __ovld __cnfn convert_char4_rte(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rte(ulong4);
-char4 __ovld __cnfn convert_char4_rtz(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rtz(ulong4);
-char4 __ovld __cnfn convert_char4_rtp(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rtp(ulong4);
-char4 __ovld __cnfn convert_char4_rtn(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rtn(ulong4);
-char4 __ovld __cnfn convert_char4(ulong4);
-char4 __ovld __cnfn convert_char4_sat(ulong4);
-char4 __ovld __cnfn convert_char4_rte(float4);
-char4 __ovld __cnfn convert_char4_sat_rte(float4);
-char4 __ovld __cnfn convert_char4_rtz(float4);
-char4 __ovld __cnfn convert_char4_sat_rtz(float4);
-char4 __ovld __cnfn convert_char4_rtp(float4);
-char4 __ovld __cnfn convert_char4_sat_rtp(float4);
-char4 __ovld __cnfn convert_char4_rtn(float4);
-char4 __ovld __cnfn convert_char4_sat_rtn(float4);
-char4 __ovld __cnfn convert_char4(float4);
-char4 __ovld __cnfn convert_char4_sat(float4);
-uchar4 __ovld __cnfn convert_uchar4_rte(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(char4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(char4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(char4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(char4);
-uchar4 __ovld __cnfn convert_uchar4(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat(char4);
-uchar4 __ovld __cnfn convert_uchar4_rte(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uchar4);
-uchar4 __ovld __cnfn convert_uchar4(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rte(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(short4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(short4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(short4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(short4);
-uchar4 __ovld __cnfn convert_uchar4(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat(short4);
-uchar4 __ovld __cnfn convert_uchar4_rte(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ushort4);
-uchar4 __ovld __cnfn convert_uchar4(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rte(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(int4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(int4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(int4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(int4);
-uchar4 __ovld __cnfn convert_uchar4(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat(int4);
-uchar4 __ovld __cnfn convert_uchar4_rte(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uint4);
-uchar4 __ovld __cnfn convert_uchar4(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rte(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(long4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(long4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(long4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(long4);
-uchar4 __ovld __cnfn convert_uchar4(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat(long4);
-uchar4 __ovld __cnfn convert_uchar4_rte(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ulong4);
-uchar4 __ovld __cnfn convert_uchar4(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rte(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(float4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(float4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(float4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(float4);
-uchar4 __ovld __cnfn convert_uchar4(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat(float4);
-short4 __ovld __cnfn convert_short4_rte(char4);
-short4 __ovld __cnfn convert_short4_sat_rte(char4);
-short4 __ovld __cnfn convert_short4_rtz(char4);
-short4 __ovld __cnfn convert_short4_sat_rtz(char4);
-short4 __ovld __cnfn convert_short4_rtp(char4);
-short4 __ovld __cnfn convert_short4_sat_rtp(char4);
-short4 __ovld __cnfn convert_short4_rtn(char4);
-short4 __ovld __cnfn convert_short4_sat_rtn(char4);
-short4 __ovld __cnfn convert_short4(char4);
-short4 __ovld __cnfn convert_short4_sat(char4);
-short4 __ovld __cnfn convert_short4_rte(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rte(uchar4);
-short4 __ovld __cnfn convert_short4_rtz(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rtz(uchar4);
-short4 __ovld __cnfn convert_short4_rtp(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rtp(uchar4);
-short4 __ovld __cnfn convert_short4_rtn(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rtn(uchar4);
-short4 __ovld __cnfn convert_short4(uchar4);
-short4 __ovld __cnfn convert_short4_sat(uchar4);
-short4 __ovld __cnfn convert_short4_rte(short4);
-short4 __ovld __cnfn convert_short4_sat_rte(short4);
-short4 __ovld __cnfn convert_short4_rtz(short4);
-short4 __ovld __cnfn convert_short4_sat_rtz(short4);
-short4 __ovld __cnfn convert_short4_rtp(short4);
-short4 __ovld __cnfn convert_short4_sat_rtp(short4);
-short4 __ovld __cnfn convert_short4_rtn(short4);
-short4 __ovld __cnfn convert_short4_sat_rtn(short4);
-short4 __ovld __cnfn convert_short4(short4);
-short4 __ovld __cnfn convert_short4_sat(short4);
-short4 __ovld __cnfn convert_short4_rte(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rte(ushort4);
-short4 __ovld __cnfn convert_short4_rtz(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rtz(ushort4);
-short4 __ovld __cnfn convert_short4_rtp(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rtp(ushort4);
-short4 __ovld __cnfn convert_short4_rtn(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rtn(ushort4);
-short4 __ovld __cnfn convert_short4(ushort4);
-short4 __ovld __cnfn convert_short4_sat(ushort4);
-short4 __ovld __cnfn convert_short4_rte(int4);
-short4 __ovld __cnfn convert_short4_sat_rte(int4);
-short4 __ovld __cnfn convert_short4_rtz(int4);
-short4 __ovld __cnfn convert_short4_sat_rtz(int4);
-short4 __ovld __cnfn convert_short4_rtp(int4);
-short4 __ovld __cnfn convert_short4_sat_rtp(int4);
-short4 __ovld __cnfn convert_short4_rtn(int4);
-short4 __ovld __cnfn convert_short4_sat_rtn(int4);
-short4 __ovld __cnfn convert_short4(int4);
-short4 __ovld __cnfn convert_short4_sat(int4);
-short4 __ovld __cnfn convert_short4_rte(uint4);
-short4 __ovld __cnfn convert_short4_sat_rte(uint4);
-short4 __ovld __cnfn convert_short4_rtz(uint4);
-short4 __ovld __cnfn convert_short4_sat_rtz(uint4);
-short4 __ovld __cnfn convert_short4_rtp(uint4);
-short4 __ovld __cnfn convert_short4_sat_rtp(uint4);
-short4 __ovld __cnfn convert_short4_rtn(uint4);
-short4 __ovld __cnfn convert_short4_sat_rtn(uint4);
-short4 __ovld __cnfn convert_short4(uint4);
-short4 __ovld __cnfn convert_short4_sat(uint4);
-short4 __ovld __cnfn convert_short4_rte(long4);
-short4 __ovld __cnfn convert_short4_sat_rte(long4);
-short4 __ovld __cnfn convert_short4_rtz(long4);
-short4 __ovld __cnfn convert_short4_sat_rtz(long4);
-short4 __ovld __cnfn convert_short4_rtp(long4);
-short4 __ovld __cnfn convert_short4_sat_rtp(long4);
-short4 __ovld __cnfn convert_short4_rtn(long4);
-short4 __ovld __cnfn convert_short4_sat_rtn(long4);
-short4 __ovld __cnfn convert_short4(long4);
-short4 __ovld __cnfn convert_short4_sat(long4);
-short4 __ovld __cnfn convert_short4_rte(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rte(ulong4);
-short4 __ovld __cnfn convert_short4_rtz(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rtz(ulong4);
-short4 __ovld __cnfn convert_short4_rtp(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rtp(ulong4);
-short4 __ovld __cnfn convert_short4_rtn(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rtn(ulong4);
-short4 __ovld __cnfn convert_short4(ulong4);
-short4 __ovld __cnfn convert_short4_sat(ulong4);
-short4 __ovld __cnfn convert_short4_rte(float4);
-short4 __ovld __cnfn convert_short4_sat_rte(float4);
-short4 __ovld __cnfn convert_short4_rtz(float4);
-short4 __ovld __cnfn convert_short4_sat_rtz(float4);
-short4 __ovld __cnfn convert_short4_rtp(float4);
-short4 __ovld __cnfn convert_short4_sat_rtp(float4);
-short4 __ovld __cnfn convert_short4_rtn(float4);
-short4 __ovld __cnfn convert_short4_sat_rtn(float4);
-short4 __ovld __cnfn convert_short4(float4);
-short4 __ovld __cnfn convert_short4_sat(float4);
-ushort4 __ovld __cnfn convert_ushort4_rte(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(char4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(char4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(char4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(char4);
-ushort4 __ovld __cnfn convert_ushort4(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat(char4);
-ushort4 __ovld __cnfn convert_ushort4_rte(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uchar4);
-ushort4 __ovld __cnfn convert_ushort4(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rte(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(short4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(short4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(short4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(short4);
-ushort4 __ovld __cnfn convert_ushort4(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat(short4);
-ushort4 __ovld __cnfn convert_ushort4_rte(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ushort4);
-ushort4 __ovld __cnfn convert_ushort4(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rte(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(int4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(int4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(int4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(int4);
-ushort4 __ovld __cnfn convert_ushort4(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat(int4);
-ushort4 __ovld __cnfn convert_ushort4_rte(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uint4);
-ushort4 __ovld __cnfn convert_ushort4(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rte(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(long4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(long4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(long4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(long4);
-ushort4 __ovld __cnfn convert_ushort4(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat(long4);
-ushort4 __ovld __cnfn convert_ushort4_rte(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ulong4);
-ushort4 __ovld __cnfn convert_ushort4(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rte(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(float4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(float4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(float4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(float4);
-ushort4 __ovld __cnfn convert_ushort4(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat(float4);
-int4 __ovld __cnfn convert_int4_rte(char4);
-int4 __ovld __cnfn convert_int4_sat_rte(char4);
-int4 __ovld __cnfn convert_int4_rtz(char4);
-int4 __ovld __cnfn convert_int4_sat_rtz(char4);
-int4 __ovld __cnfn convert_int4_rtp(char4);
-int4 __ovld __cnfn convert_int4_sat_rtp(char4);
-int4 __ovld __cnfn convert_int4_rtn(char4);
-int4 __ovld __cnfn convert_int4_sat_rtn(char4);
-int4 __ovld __cnfn convert_int4(char4);
-int4 __ovld __cnfn convert_int4_sat(char4);
-int4 __ovld __cnfn convert_int4_rte(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rte(uchar4);
-int4 __ovld __cnfn convert_int4_rtz(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rtz(uchar4);
-int4 __ovld __cnfn convert_int4_rtp(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rtp(uchar4);
-int4 __ovld __cnfn convert_int4_rtn(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rtn(uchar4);
-int4 __ovld __cnfn convert_int4(uchar4);
-int4 __ovld __cnfn convert_int4_sat(uchar4);
-int4 __ovld __cnfn convert_int4_rte(short4);
-int4 __ovld __cnfn convert_int4_sat_rte(short4);
-int4 __ovld __cnfn convert_int4_rtz(short4);
-int4 __ovld __cnfn convert_int4_sat_rtz(short4);
-int4 __ovld __cnfn convert_int4_rtp(short4);
-int4 __ovld __cnfn convert_int4_sat_rtp(short4);
-int4 __ovld __cnfn convert_int4_rtn(short4);
-int4 __ovld __cnfn convert_int4_sat_rtn(short4);
-int4 __ovld __cnfn convert_int4(short4);
-int4 __ovld __cnfn convert_int4_sat(short4);
-int4 __ovld __cnfn convert_int4_rte(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rte(ushort4);
-int4 __ovld __cnfn convert_int4_rtz(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rtz(ushort4);
-int4 __ovld __cnfn convert_int4_rtp(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rtp(ushort4);
-int4 __ovld __cnfn convert_int4_rtn(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rtn(ushort4);
-int4 __ovld __cnfn convert_int4(ushort4);
-int4 __ovld __cnfn convert_int4_sat(ushort4);
-int4 __ovld __cnfn convert_int4_rte(int4);
-int4 __ovld __cnfn convert_int4_sat_rte(int4);
-int4 __ovld __cnfn convert_int4_rtz(int4);
-int4 __ovld __cnfn convert_int4_sat_rtz(int4);
-int4 __ovld __cnfn convert_int4_rtp(int4);
-int4 __ovld __cnfn convert_int4_sat_rtp(int4);
-int4 __ovld __cnfn convert_int4_rtn(int4);
-int4 __ovld __cnfn convert_int4_sat_rtn(int4);
-int4 __ovld __cnfn convert_int4(int4);
-int4 __ovld __cnfn convert_int4_sat(int4);
-int4 __ovld __cnfn convert_int4_rte(uint4);
-int4 __ovld __cnfn convert_int4_sat_rte(uint4);
-int4 __ovld __cnfn convert_int4_rtz(uint4);
-int4 __ovld __cnfn convert_int4_sat_rtz(uint4);
-int4 __ovld __cnfn convert_int4_rtp(uint4);
-int4 __ovld __cnfn convert_int4_sat_rtp(uint4);
-int4 __ovld __cnfn convert_int4_rtn(uint4);
-int4 __ovld __cnfn convert_int4_sat_rtn(uint4);
-int4 __ovld __cnfn convert_int4(uint4);
-int4 __ovld __cnfn convert_int4_sat(uint4);
-int4 __ovld __cnfn convert_int4_rte(long4);
-int4 __ovld __cnfn convert_int4_sat_rte(long4);
-int4 __ovld __cnfn convert_int4_rtz(long4);
-int4 __ovld __cnfn convert_int4_sat_rtz(long4);
-int4 __ovld __cnfn convert_int4_rtp(long4);
-int4 __ovld __cnfn convert_int4_sat_rtp(long4);
-int4 __ovld __cnfn convert_int4_rtn(long4);
-int4 __ovld __cnfn convert_int4_sat_rtn(long4);
-int4 __ovld __cnfn convert_int4(long4);
-int4 __ovld __cnfn convert_int4_sat(long4);
-int4 __ovld __cnfn convert_int4_rte(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rte(ulong4);
-int4 __ovld __cnfn convert_int4_rtz(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rtz(ulong4);
-int4 __ovld __cnfn convert_int4_rtp(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rtp(ulong4);
-int4 __ovld __cnfn convert_int4_rtn(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rtn(ulong4);
-int4 __ovld __cnfn convert_int4(ulong4);
-int4 __ovld __cnfn convert_int4_sat(ulong4);
-int4 __ovld __cnfn convert_int4_rte(float4);
-int4 __ovld __cnfn convert_int4_sat_rte(float4);
-int4 __ovld __cnfn convert_int4_rtz(float4);
-int4 __ovld __cnfn convert_int4_sat_rtz(float4);
-int4 __ovld __cnfn convert_int4_rtp(float4);
-int4 __ovld __cnfn convert_int4_sat_rtp(float4);
-int4 __ovld __cnfn convert_int4_rtn(float4);
-int4 __ovld __cnfn convert_int4_sat_rtn(float4);
-int4 __ovld __cnfn convert_int4(float4);
-int4 __ovld __cnfn convert_int4_sat(float4);
-uint4 __ovld __cnfn convert_uint4_rte(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(char4);
-uint4 __ovld __cnfn convert_uint4_rtz(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(char4);
-uint4 __ovld __cnfn convert_uint4_rtp(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(char4);
-uint4 __ovld __cnfn convert_uint4_rtn(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(char4);
-uint4 __ovld __cnfn convert_uint4(char4);
-uint4 __ovld __cnfn convert_uint4_sat(char4);
-uint4 __ovld __cnfn convert_uint4_rte(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(uchar4);
-uint4 __ovld __cnfn convert_uint4_rtz(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(uchar4);
-uint4 __ovld __cnfn convert_uint4_rtp(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(uchar4);
-uint4 __ovld __cnfn convert_uint4_rtn(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(uchar4);
-uint4 __ovld __cnfn convert_uint4(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat(uchar4);
-uint4 __ovld __cnfn convert_uint4_rte(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(short4);
-uint4 __ovld __cnfn convert_uint4_rtz(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(short4);
-uint4 __ovld __cnfn convert_uint4_rtp(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(short4);
-uint4 __ovld __cnfn convert_uint4_rtn(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(short4);
-uint4 __ovld __cnfn convert_uint4(short4);
-uint4 __ovld __cnfn convert_uint4_sat(short4);
-uint4 __ovld __cnfn convert_uint4_rte(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(ushort4);
-uint4 __ovld __cnfn convert_uint4_rtz(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(ushort4);
-uint4 __ovld __cnfn convert_uint4_rtp(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(ushort4);
-uint4 __ovld __cnfn convert_uint4_rtn(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(ushort4);
-uint4 __ovld __cnfn convert_uint4(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat(ushort4);
-uint4 __ovld __cnfn convert_uint4_rte(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(int4);
-uint4 __ovld __cnfn convert_uint4_rtz(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(int4);
-uint4 __ovld __cnfn convert_uint4_rtp(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(int4);
-uint4 __ovld __cnfn convert_uint4_rtn(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(int4);
-uint4 __ovld __cnfn convert_uint4(int4);
-uint4 __ovld __cnfn convert_uint4_sat(int4);
-uint4 __ovld __cnfn convert_uint4_rte(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(uint4);
-uint4 __ovld __cnfn convert_uint4_rtz(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(uint4);
-uint4 __ovld __cnfn convert_uint4_rtp(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(uint4);
-uint4 __ovld __cnfn convert_uint4_rtn(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(uint4);
-uint4 __ovld __cnfn convert_uint4(uint4);
-uint4 __ovld __cnfn convert_uint4_sat(uint4);
-uint4 __ovld __cnfn convert_uint4_rte(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(long4);
-uint4 __ovld __cnfn convert_uint4_rtz(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(long4);
-uint4 __ovld __cnfn convert_uint4_rtp(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(long4);
-uint4 __ovld __cnfn convert_uint4_rtn(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(long4);
-uint4 __ovld __cnfn convert_uint4(long4);
-uint4 __ovld __cnfn convert_uint4_sat(long4);
-uint4 __ovld __cnfn convert_uint4_rte(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(ulong4);
-uint4 __ovld __cnfn convert_uint4_rtz(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(ulong4);
-uint4 __ovld __cnfn convert_uint4_rtp(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(ulong4);
-uint4 __ovld __cnfn convert_uint4_rtn(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(ulong4);
-uint4 __ovld __cnfn convert_uint4(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat(ulong4);
-uint4 __ovld __cnfn convert_uint4_rte(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(float4);
-uint4 __ovld __cnfn convert_uint4_rtz(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(float4);
-uint4 __ovld __cnfn convert_uint4_rtp(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(float4);
-uint4 __ovld __cnfn convert_uint4_rtn(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(float4);
-uint4 __ovld __cnfn convert_uint4(float4);
-uint4 __ovld __cnfn convert_uint4_sat(float4);
-long4 __ovld __cnfn convert_long4_rte(char4);
-long4 __ovld __cnfn convert_long4_sat_rte(char4);
-long4 __ovld __cnfn convert_long4_rtz(char4);
-long4 __ovld __cnfn convert_long4_sat_rtz(char4);
-long4 __ovld __cnfn convert_long4_rtp(char4);
-long4 __ovld __cnfn convert_long4_sat_rtp(char4);
-long4 __ovld __cnfn convert_long4_rtn(char4);
-long4 __ovld __cnfn convert_long4_sat_rtn(char4);
-long4 __ovld __cnfn convert_long4(char4);
-long4 __ovld __cnfn convert_long4_sat(char4);
-long4 __ovld __cnfn convert_long4_rte(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rte(uchar4);
-long4 __ovld __cnfn convert_long4_rtz(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rtz(uchar4);
-long4 __ovld __cnfn convert_long4_rtp(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rtp(uchar4);
-long4 __ovld __cnfn convert_long4_rtn(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rtn(uchar4);
-long4 __ovld __cnfn convert_long4(uchar4);
-long4 __ovld __cnfn convert_long4_sat(uchar4);
-long4 __ovld __cnfn convert_long4_rte(short4);
-long4 __ovld __cnfn convert_long4_sat_rte(short4);
-long4 __ovld __cnfn convert_long4_rtz(short4);
-long4 __ovld __cnfn convert_long4_sat_rtz(short4);
-long4 __ovld __cnfn convert_long4_rtp(short4);
-long4 __ovld __cnfn convert_long4_sat_rtp(short4);
-long4 __ovld __cnfn convert_long4_rtn(short4);
-long4 __ovld __cnfn convert_long4_sat_rtn(short4);
-long4 __ovld __cnfn convert_long4(short4);
-long4 __ovld __cnfn convert_long4_sat(short4);
-long4 __ovld __cnfn convert_long4_rte(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rte(ushort4);
-long4 __ovld __cnfn convert_long4_rtz(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rtz(ushort4);
-long4 __ovld __cnfn convert_long4_rtp(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rtp(ushort4);
-long4 __ovld __cnfn convert_long4_rtn(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rtn(ushort4);
-long4 __ovld __cnfn convert_long4(ushort4);
-long4 __ovld __cnfn convert_long4_sat(ushort4);
-long4 __ovld __cnfn convert_long4_rte(int4);
-long4 __ovld __cnfn convert_long4_sat_rte(int4);
-long4 __ovld __cnfn convert_long4_rtz(int4);
-long4 __ovld __cnfn convert_long4_sat_rtz(int4);
-long4 __ovld __cnfn convert_long4_rtp(int4);
-long4 __ovld __cnfn convert_long4_sat_rtp(int4);
-long4 __ovld __cnfn convert_long4_rtn(int4);
-long4 __ovld __cnfn convert_long4_sat_rtn(int4);
-long4 __ovld __cnfn convert_long4(int4);
-long4 __ovld __cnfn convert_long4_sat(int4);
-long4 __ovld __cnfn convert_long4_rte(uint4);
-long4 __ovld __cnfn convert_long4_sat_rte(uint4);
-long4 __ovld __cnfn convert_long4_rtz(uint4);
-long4 __ovld __cnfn convert_long4_sat_rtz(uint4);
-long4 __ovld __cnfn convert_long4_rtp(uint4);
-long4 __ovld __cnfn convert_long4_sat_rtp(uint4);
-long4 __ovld __cnfn convert_long4_rtn(uint4);
-long4 __ovld __cnfn convert_long4_sat_rtn(uint4);
-long4 __ovld __cnfn convert_long4(uint4);
-long4 __ovld __cnfn convert_long4_sat(uint4);
-long4 __ovld __cnfn convert_long4_rte(long4);
-long4 __ovld __cnfn convert_long4_sat_rte(long4);
-long4 __ovld __cnfn convert_long4_rtz(long4);
-long4 __ovld __cnfn convert_long4_sat_rtz(long4);
-long4 __ovld __cnfn convert_long4_rtp(long4);
-long4 __ovld __cnfn convert_long4_sat_rtp(long4);
-long4 __ovld __cnfn convert_long4_rtn(long4);
-long4 __ovld __cnfn convert_long4_sat_rtn(long4);
-long4 __ovld __cnfn convert_long4(long4);
-long4 __ovld __cnfn convert_long4_sat(long4);
-long4 __ovld __cnfn convert_long4_rte(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rte(ulong4);
-long4 __ovld __cnfn convert_long4_rtz(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rtz(ulong4);
-long4 __ovld __cnfn convert_long4_rtp(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rtp(ulong4);
-long4 __ovld __cnfn convert_long4_rtn(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rtn(ulong4);
-long4 __ovld __cnfn convert_long4(ulong4);
-long4 __ovld __cnfn convert_long4_sat(ulong4);
-long4 __ovld __cnfn convert_long4_rte(float4);
-long4 __ovld __cnfn convert_long4_sat_rte(float4);
-long4 __ovld __cnfn convert_long4_rtz(float4);
-long4 __ovld __cnfn convert_long4_sat_rtz(float4);
-long4 __ovld __cnfn convert_long4_rtp(float4);
-long4 __ovld __cnfn convert_long4_sat_rtp(float4);
-long4 __ovld __cnfn convert_long4_rtn(float4);
-long4 __ovld __cnfn convert_long4_sat_rtn(float4);
-long4 __ovld __cnfn convert_long4(float4);
-long4 __ovld __cnfn convert_long4_sat(float4);
-ulong4 __ovld __cnfn convert_ulong4_rte(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(char4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(char4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(char4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(char4);
-ulong4 __ovld __cnfn convert_ulong4(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat(char4);
-ulong4 __ovld __cnfn convert_ulong4_rte(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uchar4);
-ulong4 __ovld __cnfn convert_ulong4(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rte(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(short4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(short4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(short4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(short4);
-ulong4 __ovld __cnfn convert_ulong4(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat(short4);
-ulong4 __ovld __cnfn convert_ulong4_rte(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ushort4);
-ulong4 __ovld __cnfn convert_ulong4(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rte(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(int4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(int4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(int4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(int4);
-ulong4 __ovld __cnfn convert_ulong4(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat(int4);
-ulong4 __ovld __cnfn convert_ulong4_rte(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uint4);
-ulong4 __ovld __cnfn convert_ulong4(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rte(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(long4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(long4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(long4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(long4);
-ulong4 __ovld __cnfn convert_ulong4(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat(long4);
-ulong4 __ovld __cnfn convert_ulong4_rte(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ulong4);
-ulong4 __ovld __cnfn convert_ulong4(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rte(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(float4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(float4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(float4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(float4);
-ulong4 __ovld __cnfn convert_ulong4(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat(float4);
-float4 __ovld __cnfn convert_float4_rte(char4);
-float4 __ovld __cnfn convert_float4_rtz(char4);
-float4 __ovld __cnfn convert_float4_rtp(char4);
-float4 __ovld __cnfn convert_float4_rtn(char4);
-float4 __ovld __cnfn convert_float4(char4);
-float4 __ovld __cnfn convert_float4_rte(uchar4);
-float4 __ovld __cnfn convert_float4_rtz(uchar4);
-float4 __ovld __cnfn convert_float4_rtp(uchar4);
-float4 __ovld __cnfn convert_float4_rtn(uchar4);
-float4 __ovld __cnfn convert_float4(uchar4);
-float4 __ovld __cnfn convert_float4_rte(short4);
-float4 __ovld __cnfn convert_float4_rtz(short4);
-float4 __ovld __cnfn convert_float4_rtp(short4);
-float4 __ovld __cnfn convert_float4_rtn(short4);
-float4 __ovld __cnfn convert_float4(short4);
-float4 __ovld __cnfn convert_float4_rte(ushort4);
-float4 __ovld __cnfn convert_float4_rtz(ushort4);
-float4 __ovld __cnfn convert_float4_rtp(ushort4);
-float4 __ovld __cnfn convert_float4_rtn(ushort4);
-float4 __ovld __cnfn convert_float4(ushort4);
-float4 __ovld __cnfn convert_float4_rte(int4);
-float4 __ovld __cnfn convert_float4_rtz(int4);
-float4 __ovld __cnfn convert_float4_rtp(int4);
-float4 __ovld __cnfn convert_float4_rtn(int4);
-float4 __ovld __cnfn convert_float4(int4);
-float4 __ovld __cnfn convert_float4_rte(uint4);
-float4 __ovld __cnfn convert_float4_rtz(uint4);
-float4 __ovld __cnfn convert_float4_rtp(uint4);
-float4 __ovld __cnfn convert_float4_rtn(uint4);
-float4 __ovld __cnfn convert_float4(uint4);
-float4 __ovld __cnfn convert_float4_rte(long4);
-float4 __ovld __cnfn convert_float4_rtz(long4);
-float4 __ovld __cnfn convert_float4_rtp(long4);
-float4 __ovld __cnfn convert_float4_rtn(long4);
-float4 __ovld __cnfn convert_float4(long4);
-float4 __ovld __cnfn convert_float4_rte(ulong4);
-float4 __ovld __cnfn convert_float4_rtz(ulong4);
-float4 __ovld __cnfn convert_float4_rtp(ulong4);
-float4 __ovld __cnfn convert_float4_rtn(ulong4);
-float4 __ovld __cnfn convert_float4(ulong4);
-float4 __ovld __cnfn convert_float4_rte(float4);
-float4 __ovld __cnfn convert_float4_rtz(float4);
-float4 __ovld __cnfn convert_float4_rtp(float4);
-float4 __ovld __cnfn convert_float4_rtn(float4);
-float4 __ovld __cnfn convert_float4(float4);
-char8 __ovld __cnfn convert_char8_rte(char8);
-char8 __ovld __cnfn convert_char8_sat_rte(char8);
-char8 __ovld __cnfn convert_char8_rtz(char8);
-char8 __ovld __cnfn convert_char8_sat_rtz(char8);
-char8 __ovld __cnfn convert_char8_rtp(char8);
-char8 __ovld __cnfn convert_char8_sat_rtp(char8);
-char8 __ovld __cnfn convert_char8_rtn(char8);
-char8 __ovld __cnfn convert_char8_sat_rtn(char8);
-char8 __ovld __cnfn convert_char8(char8);
-char8 __ovld __cnfn convert_char8_sat(char8);
-char8 __ovld __cnfn convert_char8_rte(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rte(uchar8);
-char8 __ovld __cnfn convert_char8_rtz(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rtz(uchar8);
-char8 __ovld __cnfn convert_char8_rtp(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rtp(uchar8);
-char8 __ovld __cnfn convert_char8_rtn(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rtn(uchar8);
-char8 __ovld __cnfn convert_char8(uchar8);
-char8 __ovld __cnfn convert_char8_sat(uchar8);
-char8 __ovld __cnfn convert_char8_rte(short8);
-char8 __ovld __cnfn convert_char8_sat_rte(short8);
-char8 __ovld __cnfn convert_char8_rtz(short8);
-char8 __ovld __cnfn convert_char8_sat_rtz(short8);
-char8 __ovld __cnfn convert_char8_rtp(short8);
-char8 __ovld __cnfn convert_char8_sat_rtp(short8);
-char8 __ovld __cnfn convert_char8_rtn(short8);
-char8 __ovld __cnfn convert_char8_sat_rtn(short8);
-char8 __ovld __cnfn convert_char8(short8);
-char8 __ovld __cnfn convert_char8_sat(short8);
-char8 __ovld __cnfn convert_char8_rte(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rte(ushort8);
-char8 __ovld __cnfn convert_char8_rtz(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rtz(ushort8);
-char8 __ovld __cnfn convert_char8_rtp(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rtp(ushort8);
-char8 __ovld __cnfn convert_char8_rtn(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rtn(ushort8);
-char8 __ovld __cnfn convert_char8(ushort8);
-char8 __ovld __cnfn convert_char8_sat(ushort8);
-char8 __ovld __cnfn convert_char8_rte(int8);
-char8 __ovld __cnfn convert_char8_sat_rte(int8);
-char8 __ovld __cnfn convert_char8_rtz(int8);
-char8 __ovld __cnfn convert_char8_sat_rtz(int8);
-char8 __ovld __cnfn convert_char8_rtp(int8);
-char8 __ovld __cnfn convert_char8_sat_rtp(int8);
-char8 __ovld __cnfn convert_char8_rtn(int8);
-char8 __ovld __cnfn convert_char8_sat_rtn(int8);
-char8 __ovld __cnfn convert_char8(int8);
-char8 __ovld __cnfn convert_char8_sat(int8);
-char8 __ovld __cnfn convert_char8_rte(uint8);
-char8 __ovld __cnfn convert_char8_sat_rte(uint8);
-char8 __ovld __cnfn convert_char8_rtz(uint8);
-char8 __ovld __cnfn convert_char8_sat_rtz(uint8);
-char8 __ovld __cnfn convert_char8_rtp(uint8);
-char8 __ovld __cnfn convert_char8_sat_rtp(uint8);
-char8 __ovld __cnfn convert_char8_rtn(uint8);
-char8 __ovld __cnfn convert_char8_sat_rtn(uint8);
-char8 __ovld __cnfn convert_char8(uint8);
-char8 __ovld __cnfn convert_char8_sat(uint8);
-char8 __ovld __cnfn convert_char8_rte(long8);
-char8 __ovld __cnfn convert_char8_sat_rte(long8);
-char8 __ovld __cnfn convert_char8_rtz(long8);
-char8 __ovld __cnfn convert_char8_sat_rtz(long8);
-char8 __ovld __cnfn convert_char8_rtp(long8);
-char8 __ovld __cnfn convert_char8_sat_rtp(long8);
-char8 __ovld __cnfn convert_char8_rtn(long8);
-char8 __ovld __cnfn convert_char8_sat_rtn(long8);
-char8 __ovld __cnfn convert_char8(long8);
-char8 __ovld __cnfn convert_char8_sat(long8);
-char8 __ovld __cnfn convert_char8_rte(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rte(ulong8);
-char8 __ovld __cnfn convert_char8_rtz(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rtz(ulong8);
-char8 __ovld __cnfn convert_char8_rtp(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rtp(ulong8);
-char8 __ovld __cnfn convert_char8_rtn(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rtn(ulong8);
-char8 __ovld __cnfn convert_char8(ulong8);
-char8 __ovld __cnfn convert_char8_sat(ulong8);
-char8 __ovld __cnfn convert_char8_rte(float8);
-char8 __ovld __cnfn convert_char8_sat_rte(float8);
-char8 __ovld __cnfn convert_char8_rtz(float8);
-char8 __ovld __cnfn convert_char8_sat_rtz(float8);
-char8 __ovld __cnfn convert_char8_rtp(float8);
-char8 __ovld __cnfn convert_char8_sat_rtp(float8);
-char8 __ovld __cnfn convert_char8_rtn(float8);
-char8 __ovld __cnfn convert_char8_sat_rtn(float8);
-char8 __ovld __cnfn convert_char8(float8);
-char8 __ovld __cnfn convert_char8_sat(float8);
-uchar8 __ovld __cnfn convert_uchar8_rte(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(char8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(char8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(char8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(char8);
-uchar8 __ovld __cnfn convert_uchar8(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat(char8);
-uchar8 __ovld __cnfn convert_uchar8_rte(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uchar8);
-uchar8 __ovld __cnfn convert_uchar8(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rte(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(short8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(short8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(short8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(short8);
-uchar8 __ovld __cnfn convert_uchar8(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat(short8);
-uchar8 __ovld __cnfn convert_uchar8_rte(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ushort8);
-uchar8 __ovld __cnfn convert_uchar8(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rte(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(int8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(int8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(int8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(int8);
-uchar8 __ovld __cnfn convert_uchar8(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat(int8);
-uchar8 __ovld __cnfn convert_uchar8_rte(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uint8);
-uchar8 __ovld __cnfn convert_uchar8(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rte(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(long8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(long8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(long8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(long8);
-uchar8 __ovld __cnfn convert_uchar8(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat(long8);
-uchar8 __ovld __cnfn convert_uchar8_rte(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ulong8);
-uchar8 __ovld __cnfn convert_uchar8(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rte(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(float8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(float8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(float8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(float8);
-uchar8 __ovld __cnfn convert_uchar8(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat(float8);
-short8 __ovld __cnfn convert_short8_rte(char8);
-short8 __ovld __cnfn convert_short8_sat_rte(char8);
-short8 __ovld __cnfn convert_short8_rtz(char8);
-short8 __ovld __cnfn convert_short8_sat_rtz(char8);
-short8 __ovld __cnfn convert_short8_rtp(char8);
-short8 __ovld __cnfn convert_short8_sat_rtp(char8);
-short8 __ovld __cnfn convert_short8_rtn(char8);
-short8 __ovld __cnfn convert_short8_sat_rtn(char8);
-short8 __ovld __cnfn convert_short8(char8);
-short8 __ovld __cnfn convert_short8_sat(char8);
-short8 __ovld __cnfn convert_short8_rte(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rte(uchar8);
-short8 __ovld __cnfn convert_short8_rtz(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rtz(uchar8);
-short8 __ovld __cnfn convert_short8_rtp(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rtp(uchar8);
-short8 __ovld __cnfn convert_short8_rtn(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rtn(uchar8);
-short8 __ovld __cnfn convert_short8(uchar8);
-short8 __ovld __cnfn convert_short8_sat(uchar8);
-short8 __ovld __cnfn convert_short8_rte(short8);
-short8 __ovld __cnfn convert_short8_sat_rte(short8);
-short8 __ovld __cnfn convert_short8_rtz(short8);
-short8 __ovld __cnfn convert_short8_sat_rtz(short8);
-short8 __ovld __cnfn convert_short8_rtp(short8);
-short8 __ovld __cnfn convert_short8_sat_rtp(short8);
-short8 __ovld __cnfn convert_short8_rtn(short8);
-short8 __ovld __cnfn convert_short8_sat_rtn(short8);
-short8 __ovld __cnfn convert_short8(short8);
-short8 __ovld __cnfn convert_short8_sat(short8);
-short8 __ovld __cnfn convert_short8_rte(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rte(ushort8);
-short8 __ovld __cnfn convert_short8_rtz(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rtz(ushort8);
-short8 __ovld __cnfn convert_short8_rtp(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rtp(ushort8);
-short8 __ovld __cnfn convert_short8_rtn(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rtn(ushort8);
-short8 __ovld __cnfn convert_short8(ushort8);
-short8 __ovld __cnfn convert_short8_sat(ushort8);
-short8 __ovld __cnfn convert_short8_rte(int8);
-short8 __ovld __cnfn convert_short8_sat_rte(int8);
-short8 __ovld __cnfn convert_short8_rtz(int8);
-short8 __ovld __cnfn convert_short8_sat_rtz(int8);
-short8 __ovld __cnfn convert_short8_rtp(int8);
-short8 __ovld __cnfn convert_short8_sat_rtp(int8);
-short8 __ovld __cnfn convert_short8_rtn(int8);
-short8 __ovld __cnfn convert_short8_sat_rtn(int8);
-short8 __ovld __cnfn convert_short8(int8);
-short8 __ovld __cnfn convert_short8_sat(int8);
-short8 __ovld __cnfn convert_short8_rte(uint8);
-short8 __ovld __cnfn convert_short8_sat_rte(uint8);
-short8 __ovld __cnfn convert_short8_rtz(uint8);
-short8 __ovld __cnfn convert_short8_sat_rtz(uint8);
-short8 __ovld __cnfn convert_short8_rtp(uint8);
-short8 __ovld __cnfn convert_short8_sat_rtp(uint8);
-short8 __ovld __cnfn convert_short8_rtn(uint8);
-short8 __ovld __cnfn convert_short8_sat_rtn(uint8);
-short8 __ovld __cnfn convert_short8(uint8);
-short8 __ovld __cnfn convert_short8_sat(uint8);
-short8 __ovld __cnfn convert_short8_rte(long8);
-short8 __ovld __cnfn convert_short8_sat_rte(long8);
-short8 __ovld __cnfn convert_short8_rtz(long8);
-short8 __ovld __cnfn convert_short8_sat_rtz(long8);
-short8 __ovld __cnfn convert_short8_rtp(long8);
-short8 __ovld __cnfn convert_short8_sat_rtp(long8);
-short8 __ovld __cnfn convert_short8_rtn(long8);
-short8 __ovld __cnfn convert_short8_sat_rtn(long8);
-short8 __ovld __cnfn convert_short8(long8);
-short8 __ovld __cnfn convert_short8_sat(long8);
-short8 __ovld __cnfn convert_short8_rte(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rte(ulong8);
-short8 __ovld __cnfn convert_short8_rtz(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rtz(ulong8);
-short8 __ovld __cnfn convert_short8_rtp(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rtp(ulong8);
-short8 __ovld __cnfn convert_short8_rtn(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rtn(ulong8);
-short8 __ovld __cnfn convert_short8(ulong8);
-short8 __ovld __cnfn convert_short8_sat(ulong8);
-short8 __ovld __cnfn convert_short8_rte(float8);
-short8 __ovld __cnfn convert_short8_sat_rte(float8);
-short8 __ovld __cnfn convert_short8_rtz(float8);
-short8 __ovld __cnfn convert_short8_sat_rtz(float8);
-short8 __ovld __cnfn convert_short8_rtp(float8);
-short8 __ovld __cnfn convert_short8_sat_rtp(float8);
-short8 __ovld __cnfn convert_short8_rtn(float8);
-short8 __ovld __cnfn convert_short8_sat_rtn(float8);
-short8 __ovld __cnfn convert_short8(float8);
-short8 __ovld __cnfn convert_short8_sat(float8);
-ushort8 __ovld __cnfn convert_ushort8_rte(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(char8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(char8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(char8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(char8);
-ushort8 __ovld __cnfn convert_ushort8(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat(char8);
-ushort8 __ovld __cnfn convert_ushort8_rte(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uchar8);
-ushort8 __ovld __cnfn convert_ushort8(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rte(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(short8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(short8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(short8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(short8);
-ushort8 __ovld __cnfn convert_ushort8(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat(short8);
-ushort8 __ovld __cnfn convert_ushort8_rte(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ushort8);
-ushort8 __ovld __cnfn convert_ushort8(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rte(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(int8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(int8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(int8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(int8);
-ushort8 __ovld __cnfn convert_ushort8(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat(int8);
-ushort8 __ovld __cnfn convert_ushort8_rte(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uint8);
-ushort8 __ovld __cnfn convert_ushort8(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rte(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(long8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(long8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(long8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(long8);
-ushort8 __ovld __cnfn convert_ushort8(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat(long8);
-ushort8 __ovld __cnfn convert_ushort8_rte(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ulong8);
-ushort8 __ovld __cnfn convert_ushort8(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rte(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(float8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(float8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(float8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(float8);
-ushort8 __ovld __cnfn convert_ushort8(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat(float8);
-int8 __ovld __cnfn convert_int8_rte(char8);
-int8 __ovld __cnfn convert_int8_sat_rte(char8);
-int8 __ovld __cnfn convert_int8_rtz(char8);
-int8 __ovld __cnfn convert_int8_sat_rtz(char8);
-int8 __ovld __cnfn convert_int8_rtp(char8);
-int8 __ovld __cnfn convert_int8_sat_rtp(char8);
-int8 __ovld __cnfn convert_int8_rtn(char8);
-int8 __ovld __cnfn convert_int8_sat_rtn(char8);
-int8 __ovld __cnfn convert_int8(char8);
-int8 __ovld __cnfn convert_int8_sat(char8);
-int8 __ovld __cnfn convert_int8_rte(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rte(uchar8);
-int8 __ovld __cnfn convert_int8_rtz(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rtz(uchar8);
-int8 __ovld __cnfn convert_int8_rtp(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rtp(uchar8);
-int8 __ovld __cnfn convert_int8_rtn(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rtn(uchar8);
-int8 __ovld __cnfn convert_int8(uchar8);
-int8 __ovld __cnfn convert_int8_sat(uchar8);
-int8 __ovld __cnfn convert_int8_rte(short8);
-int8 __ovld __cnfn convert_int8_sat_rte(short8);
-int8 __ovld __cnfn convert_int8_rtz(short8);
-int8 __ovld __cnfn convert_int8_sat_rtz(short8);
-int8 __ovld __cnfn convert_int8_rtp(short8);
-int8 __ovld __cnfn convert_int8_sat_rtp(short8);
-int8 __ovld __cnfn convert_int8_rtn(short8);
-int8 __ovld __cnfn convert_int8_sat_rtn(short8);
-int8 __ovld __cnfn convert_int8(short8);
-int8 __ovld __cnfn convert_int8_sat(short8);
-int8 __ovld __cnfn convert_int8_rte(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rte(ushort8);
-int8 __ovld __cnfn convert_int8_rtz(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rtz(ushort8);
-int8 __ovld __cnfn convert_int8_rtp(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rtp(ushort8);
-int8 __ovld __cnfn convert_int8_rtn(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rtn(ushort8);
-int8 __ovld __cnfn convert_int8(ushort8);
-int8 __ovld __cnfn convert_int8_sat(ushort8);
-int8 __ovld __cnfn convert_int8_rte(int8);
-int8 __ovld __cnfn convert_int8_sat_rte(int8);
-int8 __ovld __cnfn convert_int8_rtz(int8);
-int8 __ovld __cnfn convert_int8_sat_rtz(int8);
-int8 __ovld __cnfn convert_int8_rtp(int8);
-int8 __ovld __cnfn convert_int8_sat_rtp(int8);
-int8 __ovld __cnfn convert_int8_rtn(int8);
-int8 __ovld __cnfn convert_int8_sat_rtn(int8);
-int8 __ovld __cnfn convert_int8(int8);
-int8 __ovld __cnfn convert_int8_sat(int8);
-int8 __ovld __cnfn convert_int8_rte(uint8);
-int8 __ovld __cnfn convert_int8_sat_rte(uint8);
-int8 __ovld __cnfn convert_int8_rtz(uint8);
-int8 __ovld __cnfn convert_int8_sat_rtz(uint8);
-int8 __ovld __cnfn convert_int8_rtp(uint8);
-int8 __ovld __cnfn convert_int8_sat_rtp(uint8);
-int8 __ovld __cnfn convert_int8_rtn(uint8);
-int8 __ovld __cnfn convert_int8_sat_rtn(uint8);
-int8 __ovld __cnfn convert_int8(uint8);
-int8 __ovld __cnfn convert_int8_sat(uint8);
-int8 __ovld __cnfn convert_int8_rte(long8);
-int8 __ovld __cnfn convert_int8_sat_rte(long8);
-int8 __ovld __cnfn convert_int8_rtz(long8);
-int8 __ovld __cnfn convert_int8_sat_rtz(long8);
-int8 __ovld __cnfn convert_int8_rtp(long8);
-int8 __ovld __cnfn convert_int8_sat_rtp(long8);
-int8 __ovld __cnfn convert_int8_rtn(long8);
-int8 __ovld __cnfn convert_int8_sat_rtn(long8);
-int8 __ovld __cnfn convert_int8(long8);
-int8 __ovld __cnfn convert_int8_sat(long8);
-int8 __ovld __cnfn convert_int8_rte(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rte(ulong8);
-int8 __ovld __cnfn convert_int8_rtz(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rtz(ulong8);
-int8 __ovld __cnfn convert_int8_rtp(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rtp(ulong8);
-int8 __ovld __cnfn convert_int8_rtn(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rtn(ulong8);
-int8 __ovld __cnfn convert_int8(ulong8);
-int8 __ovld __cnfn convert_int8_sat(ulong8);
-int8 __ovld __cnfn convert_int8_rte(float8);
-int8 __ovld __cnfn convert_int8_sat_rte(float8);
-int8 __ovld __cnfn convert_int8_rtz(float8);
-int8 __ovld __cnfn convert_int8_sat_rtz(float8);
-int8 __ovld __cnfn convert_int8_rtp(float8);
-int8 __ovld __cnfn convert_int8_sat_rtp(float8);
-int8 __ovld __cnfn convert_int8_rtn(float8);
-int8 __ovld __cnfn convert_int8_sat_rtn(float8);
-int8 __ovld __cnfn convert_int8(float8);
-int8 __ovld __cnfn convert_int8_sat(float8);
-uint8 __ovld __cnfn convert_uint8_rte(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(char8);
-uint8 __ovld __cnfn convert_uint8_rtz(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(char8);
-uint8 __ovld __cnfn convert_uint8_rtp(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(char8);
-uint8 __ovld __cnfn convert_uint8_rtn(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(char8);
-uint8 __ovld __cnfn convert_uint8(char8);
-uint8 __ovld __cnfn convert_uint8_sat(char8);
-uint8 __ovld __cnfn convert_uint8_rte(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(uchar8);
-uint8 __ovld __cnfn convert_uint8_rtz(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(uchar8);
-uint8 __ovld __cnfn convert_uint8_rtp(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(uchar8);
-uint8 __ovld __cnfn convert_uint8_rtn(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(uchar8);
-uint8 __ovld __cnfn convert_uint8(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat(uchar8);
-uint8 __ovld __cnfn convert_uint8_rte(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(short8);
-uint8 __ovld __cnfn convert_uint8_rtz(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(short8);
-uint8 __ovld __cnfn convert_uint8_rtp(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(short8);
-uint8 __ovld __cnfn convert_uint8_rtn(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(short8);
-uint8 __ovld __cnfn convert_uint8(short8);
-uint8 __ovld __cnfn convert_uint8_sat(short8);
-uint8 __ovld __cnfn convert_uint8_rte(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(ushort8);
-uint8 __ovld __cnfn convert_uint8_rtz(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(ushort8);
-uint8 __ovld __cnfn convert_uint8_rtp(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(ushort8);
-uint8 __ovld __cnfn convert_uint8_rtn(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(ushort8);
-uint8 __ovld __cnfn convert_uint8(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat(ushort8);
-uint8 __ovld __cnfn convert_uint8_rte(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(int8);
-uint8 __ovld __cnfn convert_uint8_rtz(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(int8);
-uint8 __ovld __cnfn convert_uint8_rtp(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(int8);
-uint8 __ovld __cnfn convert_uint8_rtn(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(int8);
-uint8 __ovld __cnfn convert_uint8(int8);
-uint8 __ovld __cnfn convert_uint8_sat(int8);
-uint8 __ovld __cnfn convert_uint8_rte(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(uint8);
-uint8 __ovld __cnfn convert_uint8_rtz(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(uint8);
-uint8 __ovld __cnfn convert_uint8_rtp(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(uint8);
-uint8 __ovld __cnfn convert_uint8_rtn(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(uint8);
-uint8 __ovld __cnfn convert_uint8(uint8);
-uint8 __ovld __cnfn convert_uint8_sat(uint8);
-uint8 __ovld __cnfn convert_uint8_rte(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(long8);
-uint8 __ovld __cnfn convert_uint8_rtz(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(long8);
-uint8 __ovld __cnfn convert_uint8_rtp(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(long8);
-uint8 __ovld __cnfn convert_uint8_rtn(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(long8);
-uint8 __ovld __cnfn convert_uint8(long8);
-uint8 __ovld __cnfn convert_uint8_sat(long8);
-uint8 __ovld __cnfn convert_uint8_rte(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(ulong8);
-uint8 __ovld __cnfn convert_uint8_rtz(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(ulong8);
-uint8 __ovld __cnfn convert_uint8_rtp(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(ulong8);
-uint8 __ovld __cnfn convert_uint8_rtn(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(ulong8);
-uint8 __ovld __cnfn convert_uint8(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat(ulong8);
-uint8 __ovld __cnfn convert_uint8_rte(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(float8);
-uint8 __ovld __cnfn convert_uint8_rtz(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(float8);
-uint8 __ovld __cnfn convert_uint8_rtp(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(float8);
-uint8 __ovld __cnfn convert_uint8_rtn(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(float8);
-uint8 __ovld __cnfn convert_uint8(float8);
-uint8 __ovld __cnfn convert_uint8_sat(float8);
-long8 __ovld __cnfn convert_long8_rte(char8);
-long8 __ovld __cnfn convert_long8_sat_rte(char8);
-long8 __ovld __cnfn convert_long8_rtz(char8);
-long8 __ovld __cnfn convert_long8_sat_rtz(char8);
-long8 __ovld __cnfn convert_long8_rtp(char8);
-long8 __ovld __cnfn convert_long8_sat_rtp(char8);
-long8 __ovld __cnfn convert_long8_rtn(char8);
-long8 __ovld __cnfn convert_long8_sat_rtn(char8);
-long8 __ovld __cnfn convert_long8(char8);
-long8 __ovld __cnfn convert_long8_sat(char8);
-long8 __ovld __cnfn convert_long8_rte(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rte(uchar8);
-long8 __ovld __cnfn convert_long8_rtz(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rtz(uchar8);
-long8 __ovld __cnfn convert_long8_rtp(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rtp(uchar8);
-long8 __ovld __cnfn convert_long8_rtn(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rtn(uchar8);
-long8 __ovld __cnfn convert_long8(uchar8);
-long8 __ovld __cnfn convert_long8_sat(uchar8);
-long8 __ovld __cnfn convert_long8_rte(short8);
-long8 __ovld __cnfn convert_long8_sat_rte(short8);
-long8 __ovld __cnfn convert_long8_rtz(short8);
-long8 __ovld __cnfn convert_long8_sat_rtz(short8);
-long8 __ovld __cnfn convert_long8_rtp(short8);
-long8 __ovld __cnfn convert_long8_sat_rtp(short8);
-long8 __ovld __cnfn convert_long8_rtn(short8);
-long8 __ovld __cnfn convert_long8_sat_rtn(short8);
-long8 __ovld __cnfn convert_long8(short8);
-long8 __ovld __cnfn convert_long8_sat(short8);
-long8 __ovld __cnfn convert_long8_rte(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rte(ushort8);
-long8 __ovld __cnfn convert_long8_rtz(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rtz(ushort8);
-long8 __ovld __cnfn convert_long8_rtp(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rtp(ushort8);
-long8 __ovld __cnfn convert_long8_rtn(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rtn(ushort8);
-long8 __ovld __cnfn convert_long8(ushort8);
-long8 __ovld __cnfn convert_long8_sat(ushort8);
-long8 __ovld __cnfn convert_long8_rte(int8);
-long8 __ovld __cnfn convert_long8_sat_rte(int8);
-long8 __ovld __cnfn convert_long8_rtz(int8);
-long8 __ovld __cnfn convert_long8_sat_rtz(int8);
-long8 __ovld __cnfn convert_long8_rtp(int8);
-long8 __ovld __cnfn convert_long8_sat_rtp(int8);
-long8 __ovld __cnfn convert_long8_rtn(int8);
-long8 __ovld __cnfn convert_long8_sat_rtn(int8);
-long8 __ovld __cnfn convert_long8(int8);
-long8 __ovld __cnfn convert_long8_sat(int8);
-long8 __ovld __cnfn convert_long8_rte(uint8);
-long8 __ovld __cnfn convert_long8_sat_rte(uint8);
-long8 __ovld __cnfn convert_long8_rtz(uint8);
-long8 __ovld __cnfn convert_long8_sat_rtz(uint8);
-long8 __ovld __cnfn convert_long8_rtp(uint8);
-long8 __ovld __cnfn convert_long8_sat_rtp(uint8);
-long8 __ovld __cnfn convert_long8_rtn(uint8);
-long8 __ovld __cnfn convert_long8_sat_rtn(uint8);
-long8 __ovld __cnfn convert_long8(uint8);
-long8 __ovld __cnfn convert_long8_sat(uint8);
-long8 __ovld __cnfn convert_long8_rte(long8);
-long8 __ovld __cnfn convert_long8_sat_rte(long8);
-long8 __ovld __cnfn convert_long8_rtz(long8);
-long8 __ovld __cnfn convert_long8_sat_rtz(long8);
-long8 __ovld __cnfn convert_long8_rtp(long8);
-long8 __ovld __cnfn convert_long8_sat_rtp(long8);
-long8 __ovld __cnfn convert_long8_rtn(long8);
-long8 __ovld __cnfn convert_long8_sat_rtn(long8);
-long8 __ovld __cnfn convert_long8(long8);
-long8 __ovld __cnfn convert_long8_sat(long8);
-long8 __ovld __cnfn convert_long8_rte(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rte(ulong8);
-long8 __ovld __cnfn convert_long8_rtz(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rtz(ulong8);
-long8 __ovld __cnfn convert_long8_rtp(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rtp(ulong8);
-long8 __ovld __cnfn convert_long8_rtn(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rtn(ulong8);
-long8 __ovld __cnfn convert_long8(ulong8);
-long8 __ovld __cnfn convert_long8_sat(ulong8);
-long8 __ovld __cnfn convert_long8_rte(float8);
-long8 __ovld __cnfn convert_long8_sat_rte(float8);
-long8 __ovld __cnfn convert_long8_rtz(float8);
-long8 __ovld __cnfn convert_long8_sat_rtz(float8);
-long8 __ovld __cnfn convert_long8_rtp(float8);
-long8 __ovld __cnfn convert_long8_sat_rtp(float8);
-long8 __ovld __cnfn convert_long8_rtn(float8);
-long8 __ovld __cnfn convert_long8_sat_rtn(float8);
-long8 __ovld __cnfn convert_long8(float8);
-long8 __ovld __cnfn convert_long8_sat(float8);
-ulong8 __ovld __cnfn convert_ulong8_rte(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(char8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(char8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(char8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(char8);
-ulong8 __ovld __cnfn convert_ulong8(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat(char8);
-ulong8 __ovld __cnfn convert_ulong8_rte(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uchar8);
-ulong8 __ovld __cnfn convert_ulong8(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rte(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(short8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(short8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(short8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(short8);
-ulong8 __ovld __cnfn convert_ulong8(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat(short8);
-ulong8 __ovld __cnfn convert_ulong8_rte(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ushort8);
-ulong8 __ovld __cnfn convert_ulong8(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rte(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(int8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(int8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(int8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(int8);
-ulong8 __ovld __cnfn convert_ulong8(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat(int8);
-ulong8 __ovld __cnfn convert_ulong8_rte(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uint8);
-ulong8 __ovld __cnfn convert_ulong8(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rte(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(long8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(long8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(long8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(long8);
-ulong8 __ovld __cnfn convert_ulong8(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat(long8);
-ulong8 __ovld __cnfn convert_ulong8_rte(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ulong8);
-ulong8 __ovld __cnfn convert_ulong8(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rte(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(float8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(float8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(float8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(float8);
-ulong8 __ovld __cnfn convert_ulong8(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat(float8);
-float8 __ovld __cnfn convert_float8_rte(char8);
-float8 __ovld __cnfn convert_float8_rtz(char8);
-float8 __ovld __cnfn convert_float8_rtp(char8);
-float8 __ovld __cnfn convert_float8_rtn(char8);
-float8 __ovld __cnfn convert_float8(char8);
-float8 __ovld __cnfn convert_float8_rte(uchar8);
-float8 __ovld __cnfn convert_float8_rtz(uchar8);
-float8 __ovld __cnfn convert_float8_rtp(uchar8);
-float8 __ovld __cnfn convert_float8_rtn(uchar8);
-float8 __ovld __cnfn convert_float8(uchar8);
-float8 __ovld __cnfn convert_float8_rte(short8);
-float8 __ovld __cnfn convert_float8_rtz(short8);
-float8 __ovld __cnfn convert_float8_rtp(short8);
-float8 __ovld __cnfn convert_float8_rtn(short8);
-float8 __ovld __cnfn convert_float8(short8);
-float8 __ovld __cnfn convert_float8_rte(ushort8);
-float8 __ovld __cnfn convert_float8_rtz(ushort8);
-float8 __ovld __cnfn convert_float8_rtp(ushort8);
-float8 __ovld __cnfn convert_float8_rtn(ushort8);
-float8 __ovld __cnfn convert_float8(ushort8);
-float8 __ovld __cnfn convert_float8_rte(int8);
-float8 __ovld __cnfn convert_float8_rtz(int8);
-float8 __ovld __cnfn convert_float8_rtp(int8);
-float8 __ovld __cnfn convert_float8_rtn(int8);
-float8 __ovld __cnfn convert_float8(int8);
-float8 __ovld __cnfn convert_float8_rte(uint8);
-float8 __ovld __cnfn convert_float8_rtz(uint8);
-float8 __ovld __cnfn convert_float8_rtp(uint8);
-float8 __ovld __cnfn convert_float8_rtn(uint8);
-float8 __ovld __cnfn convert_float8(uint8);
-float8 __ovld __cnfn convert_float8_rte(long8);
-float8 __ovld __cnfn convert_float8_rtz(long8);
-float8 __ovld __cnfn convert_float8_rtp(long8);
-float8 __ovld __cnfn convert_float8_rtn(long8);
-float8 __ovld __cnfn convert_float8(long8);
-float8 __ovld __cnfn convert_float8_rte(ulong8);
-float8 __ovld __cnfn convert_float8_rtz(ulong8);
-float8 __ovld __cnfn convert_float8_rtp(ulong8);
-float8 __ovld __cnfn convert_float8_rtn(ulong8);
-float8 __ovld __cnfn convert_float8(ulong8);
-float8 __ovld __cnfn convert_float8_rte(float8);
-float8 __ovld __cnfn convert_float8_rtz(float8);
-float8 __ovld __cnfn convert_float8_rtp(float8);
-float8 __ovld __cnfn convert_float8_rtn(float8);
-float8 __ovld __cnfn convert_float8(float8);
-char16 __ovld __cnfn convert_char16_rte(char16);
-char16 __ovld __cnfn convert_char16_sat_rte(char16);
-char16 __ovld __cnfn convert_char16_rtz(char16);
-char16 __ovld __cnfn convert_char16_sat_rtz(char16);
-char16 __ovld __cnfn convert_char16_rtp(char16);
-char16 __ovld __cnfn convert_char16_sat_rtp(char16);
-char16 __ovld __cnfn convert_char16_rtn(char16);
-char16 __ovld __cnfn convert_char16_sat_rtn(char16);
-char16 __ovld __cnfn convert_char16(char16);
-char16 __ovld __cnfn convert_char16_sat(char16);
-char16 __ovld __cnfn convert_char16_rte(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rte(uchar16);
-char16 __ovld __cnfn convert_char16_rtz(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rtz(uchar16);
-char16 __ovld __cnfn convert_char16_rtp(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rtp(uchar16);
-char16 __ovld __cnfn convert_char16_rtn(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rtn(uchar16);
-char16 __ovld __cnfn convert_char16(uchar16);
-char16 __ovld __cnfn convert_char16_sat(uchar16);
-char16 __ovld __cnfn convert_char16_rte(short16);
-char16 __ovld __cnfn convert_char16_sat_rte(short16);
-char16 __ovld __cnfn convert_char16_rtz(short16);
-char16 __ovld __cnfn convert_char16_sat_rtz(short16);
-char16 __ovld __cnfn convert_char16_rtp(short16);
-char16 __ovld __cnfn convert_char16_sat_rtp(short16);
-char16 __ovld __cnfn convert_char16_rtn(short16);
-char16 __ovld __cnfn convert_char16_sat_rtn(short16);
-char16 __ovld __cnfn convert_char16(short16);
-char16 __ovld __cnfn convert_char16_sat(short16);
-char16 __ovld __cnfn convert_char16_rte(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rte(ushort16);
-char16 __ovld __cnfn convert_char16_rtz(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rtz(ushort16);
-char16 __ovld __cnfn convert_char16_rtp(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rtp(ushort16);
-char16 __ovld __cnfn convert_char16_rtn(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rtn(ushort16);
-char16 __ovld __cnfn convert_char16(ushort16);
-char16 __ovld __cnfn convert_char16_sat(ushort16);
-char16 __ovld __cnfn convert_char16_rte(int16);
-char16 __ovld __cnfn convert_char16_sat_rte(int16);
-char16 __ovld __cnfn convert_char16_rtz(int16);
-char16 __ovld __cnfn convert_char16_sat_rtz(int16);
-char16 __ovld __cnfn convert_char16_rtp(int16);
-char16 __ovld __cnfn convert_char16_sat_rtp(int16);
-char16 __ovld __cnfn convert_char16_rtn(int16);
-char16 __ovld __cnfn convert_char16_sat_rtn(int16);
-char16 __ovld __cnfn convert_char16(int16);
-char16 __ovld __cnfn convert_char16_sat(int16);
-char16 __ovld __cnfn convert_char16_rte(uint16);
-char16 __ovld __cnfn convert_char16_sat_rte(uint16);
-char16 __ovld __cnfn convert_char16_rtz(uint16);
-char16 __ovld __cnfn convert_char16_sat_rtz(uint16);
-char16 __ovld __cnfn convert_char16_rtp(uint16);
-char16 __ovld __cnfn convert_char16_sat_rtp(uint16);
-char16 __ovld __cnfn convert_char16_rtn(uint16);
-char16 __ovld __cnfn convert_char16_sat_rtn(uint16);
-char16 __ovld __cnfn convert_char16(uint16);
-char16 __ovld __cnfn convert_char16_sat(uint16);
-char16 __ovld __cnfn convert_char16_rte(long16);
-char16 __ovld __cnfn convert_char16_sat_rte(long16);
-char16 __ovld __cnfn convert_char16_rtz(long16);
-char16 __ovld __cnfn convert_char16_sat_rtz(long16);
-char16 __ovld __cnfn convert_char16_rtp(long16);
-char16 __ovld __cnfn convert_char16_sat_rtp(long16);
-char16 __ovld __cnfn convert_char16_rtn(long16);
-char16 __ovld __cnfn convert_char16_sat_rtn(long16);
-char16 __ovld __cnfn convert_char16(long16);
-char16 __ovld __cnfn convert_char16_sat(long16);
-char16 __ovld __cnfn convert_char16_rte(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rte(ulong16);
-char16 __ovld __cnfn convert_char16_rtz(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rtz(ulong16);
-char16 __ovld __cnfn convert_char16_rtp(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rtp(ulong16);
-char16 __ovld __cnfn convert_char16_rtn(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rtn(ulong16);
-char16 __ovld __cnfn convert_char16(ulong16);
-char16 __ovld __cnfn convert_char16_sat(ulong16);
-char16 __ovld __cnfn convert_char16_rte(float16);
-char16 __ovld __cnfn convert_char16_sat_rte(float16);
-char16 __ovld __cnfn convert_char16_rtz(float16);
-char16 __ovld __cnfn convert_char16_sat_rtz(float16);
-char16 __ovld __cnfn convert_char16_rtp(float16);
-char16 __ovld __cnfn convert_char16_sat_rtp(float16);
-char16 __ovld __cnfn convert_char16_rtn(float16);
-char16 __ovld __cnfn convert_char16_sat_rtn(float16);
-char16 __ovld __cnfn convert_char16(float16);
-char16 __ovld __cnfn convert_char16_sat(float16);
-uchar16 __ovld __cnfn convert_uchar16_rte(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(char16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(char16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(char16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(char16);
-uchar16 __ovld __cnfn convert_uchar16(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat(char16);
-uchar16 __ovld __cnfn convert_uchar16_rte(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uchar16);
-uchar16 __ovld __cnfn convert_uchar16(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rte(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(short16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(short16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(short16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(short16);
-uchar16 __ovld __cnfn convert_uchar16(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat(short16);
-uchar16 __ovld __cnfn convert_uchar16_rte(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ushort16);
-uchar16 __ovld __cnfn convert_uchar16(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rte(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(int16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(int16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(int16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(int16);
-uchar16 __ovld __cnfn convert_uchar16(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat(int16);
-uchar16 __ovld __cnfn convert_uchar16_rte(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uint16);
-uchar16 __ovld __cnfn convert_uchar16(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rte(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(long16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(long16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(long16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(long16);
-uchar16 __ovld __cnfn convert_uchar16(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat(long16);
-uchar16 __ovld __cnfn convert_uchar16_rte(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ulong16);
-uchar16 __ovld __cnfn convert_uchar16(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rte(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(float16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(float16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(float16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(float16);
-uchar16 __ovld __cnfn convert_uchar16(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat(float16);
-short16 __ovld __cnfn convert_short16_rte(char16);
-short16 __ovld __cnfn convert_short16_sat_rte(char16);
-short16 __ovld __cnfn convert_short16_rtz(char16);
-short16 __ovld __cnfn convert_short16_sat_rtz(char16);
-short16 __ovld __cnfn convert_short16_rtp(char16);
-short16 __ovld __cnfn convert_short16_sat_rtp(char16);
-short16 __ovld __cnfn convert_short16_rtn(char16);
-short16 __ovld __cnfn convert_short16_sat_rtn(char16);
-short16 __ovld __cnfn convert_short16(char16);
-short16 __ovld __cnfn convert_short16_sat(char16);
-short16 __ovld __cnfn convert_short16_rte(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rte(uchar16);
-short16 __ovld __cnfn convert_short16_rtz(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rtz(uchar16);
-short16 __ovld __cnfn convert_short16_rtp(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rtp(uchar16);
-short16 __ovld __cnfn convert_short16_rtn(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rtn(uchar16);
-short16 __ovld __cnfn convert_short16(uchar16);
-short16 __ovld __cnfn convert_short16_sat(uchar16);
-short16 __ovld __cnfn convert_short16_rte(short16);
-short16 __ovld __cnfn convert_short16_sat_rte(short16);
-short16 __ovld __cnfn convert_short16_rtz(short16);
-short16 __ovld __cnfn convert_short16_sat_rtz(short16);
-short16 __ovld __cnfn convert_short16_rtp(short16);
-short16 __ovld __cnfn convert_short16_sat_rtp(short16);
-short16 __ovld __cnfn convert_short16_rtn(short16);
-short16 __ovld __cnfn convert_short16_sat_rtn(short16);
-short16 __ovld __cnfn convert_short16(short16);
-short16 __ovld __cnfn convert_short16_sat(short16);
-short16 __ovld __cnfn convert_short16_rte(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rte(ushort16);
-short16 __ovld __cnfn convert_short16_rtz(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rtz(ushort16);
-short16 __ovld __cnfn convert_short16_rtp(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rtp(ushort16);
-short16 __ovld __cnfn convert_short16_rtn(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rtn(ushort16);
-short16 __ovld __cnfn convert_short16(ushort16);
-short16 __ovld __cnfn convert_short16_sat(ushort16);
-short16 __ovld __cnfn convert_short16_rte(int16);
-short16 __ovld __cnfn convert_short16_sat_rte(int16);
-short16 __ovld __cnfn convert_short16_rtz(int16);
-short16 __ovld __cnfn convert_short16_sat_rtz(int16);
-short16 __ovld __cnfn convert_short16_rtp(int16);
-short16 __ovld __cnfn convert_short16_sat_rtp(int16);
-short16 __ovld __cnfn convert_short16_rtn(int16);
-short16 __ovld __cnfn convert_short16_sat_rtn(int16);
-short16 __ovld __cnfn convert_short16(int16);
-short16 __ovld __cnfn convert_short16_sat(int16);
-short16 __ovld __cnfn convert_short16_rte(uint16);
-short16 __ovld __cnfn convert_short16_sat_rte(uint16);
-short16 __ovld __cnfn convert_short16_rtz(uint16);
-short16 __ovld __cnfn convert_short16_sat_rtz(uint16);
-short16 __ovld __cnfn convert_short16_rtp(uint16);
-short16 __ovld __cnfn convert_short16_sat_rtp(uint16);
-short16 __ovld __cnfn convert_short16_rtn(uint16);
-short16 __ovld __cnfn convert_short16_sat_rtn(uint16);
-short16 __ovld __cnfn convert_short16(uint16);
-short16 __ovld __cnfn convert_short16_sat(uint16);
-short16 __ovld __cnfn convert_short16_rte(long16);
-short16 __ovld __cnfn convert_short16_sat_rte(long16);
-short16 __ovld __cnfn convert_short16_rtz(long16);
-short16 __ovld __cnfn convert_short16_sat_rtz(long16);
-short16 __ovld __cnfn convert_short16_rtp(long16);
-short16 __ovld __cnfn convert_short16_sat_rtp(long16);
-short16 __ovld __cnfn convert_short16_rtn(long16);
-short16 __ovld __cnfn convert_short16_sat_rtn(long16);
-short16 __ovld __cnfn convert_short16(long16);
-short16 __ovld __cnfn convert_short16_sat(long16);
-short16 __ovld __cnfn convert_short16_rte(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rte(ulong16);
-short16 __ovld __cnfn convert_short16_rtz(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rtz(ulong16);
-short16 __ovld __cnfn convert_short16_rtp(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rtp(ulong16);
-short16 __ovld __cnfn convert_short16_rtn(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rtn(ulong16);
-short16 __ovld __cnfn convert_short16(ulong16);
-short16 __ovld __cnfn convert_short16_sat(ulong16);
-short16 __ovld __cnfn convert_short16_rte(float16);
-short16 __ovld __cnfn convert_short16_sat_rte(float16);
-short16 __ovld __cnfn convert_short16_rtz(float16);
-short16 __ovld __cnfn convert_short16_sat_rtz(float16);
-short16 __ovld __cnfn convert_short16_rtp(float16);
-short16 __ovld __cnfn convert_short16_sat_rtp(float16);
-short16 __ovld __cnfn convert_short16_rtn(float16);
-short16 __ovld __cnfn convert_short16_sat_rtn(float16);
-short16 __ovld __cnfn convert_short16(float16);
-short16 __ovld __cnfn convert_short16_sat(float16);
-ushort16 __ovld __cnfn convert_ushort16_rte(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(char16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(char16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(char16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(char16);
-ushort16 __ovld __cnfn convert_ushort16(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat(char16);
-ushort16 __ovld __cnfn convert_ushort16_rte(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uchar16);
-ushort16 __ovld __cnfn convert_ushort16(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rte(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(short16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(short16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(short16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(short16);
-ushort16 __ovld __cnfn convert_ushort16(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat(short16);
-ushort16 __ovld __cnfn convert_ushort16_rte(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ushort16);
-ushort16 __ovld __cnfn convert_ushort16(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rte(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(int16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(int16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(int16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(int16);
-ushort16 __ovld __cnfn convert_ushort16(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat(int16);
-ushort16 __ovld __cnfn convert_ushort16_rte(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uint16);
-ushort16 __ovld __cnfn convert_ushort16(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rte(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(long16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(long16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(long16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(long16);
-ushort16 __ovld __cnfn convert_ushort16(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat(long16);
-ushort16 __ovld __cnfn convert_ushort16_rte(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ulong16);
-ushort16 __ovld __cnfn convert_ushort16(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rte(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(float16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(float16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(float16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(float16);
-ushort16 __ovld __cnfn convert_ushort16(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat(float16);
-int16 __ovld __cnfn convert_int16_rte(char16);
-int16 __ovld __cnfn convert_int16_sat_rte(char16);
-int16 __ovld __cnfn convert_int16_rtz(char16);
-int16 __ovld __cnfn convert_int16_sat_rtz(char16);
-int16 __ovld __cnfn convert_int16_rtp(char16);
-int16 __ovld __cnfn convert_int16_sat_rtp(char16);
-int16 __ovld __cnfn convert_int16_rtn(char16);
-int16 __ovld __cnfn convert_int16_sat_rtn(char16);
-int16 __ovld __cnfn convert_int16(char16);
-int16 __ovld __cnfn convert_int16_sat(char16);
-int16 __ovld __cnfn convert_int16_rte(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rte(uchar16);
-int16 __ovld __cnfn convert_int16_rtz(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rtz(uchar16);
-int16 __ovld __cnfn convert_int16_rtp(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rtp(uchar16);
-int16 __ovld __cnfn convert_int16_rtn(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rtn(uchar16);
-int16 __ovld __cnfn convert_int16(uchar16);
-int16 __ovld __cnfn convert_int16_sat(uchar16);
-int16 __ovld __cnfn convert_int16_rte(short16);
-int16 __ovld __cnfn convert_int16_sat_rte(short16);
-int16 __ovld __cnfn convert_int16_rtz(short16);
-int16 __ovld __cnfn convert_int16_sat_rtz(short16);
-int16 __ovld __cnfn convert_int16_rtp(short16);
-int16 __ovld __cnfn convert_int16_sat_rtp(short16);
-int16 __ovld __cnfn convert_int16_rtn(short16);
-int16 __ovld __cnfn convert_int16_sat_rtn(short16);
-int16 __ovld __cnfn convert_int16(short16);
-int16 __ovld __cnfn convert_int16_sat(short16);
-int16 __ovld __cnfn convert_int16_rte(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rte(ushort16);
-int16 __ovld __cnfn convert_int16_rtz(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rtz(ushort16);
-int16 __ovld __cnfn convert_int16_rtp(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rtp(ushort16);
-int16 __ovld __cnfn convert_int16_rtn(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rtn(ushort16);
-int16 __ovld __cnfn convert_int16(ushort16);
-int16 __ovld __cnfn convert_int16_sat(ushort16);
-int16 __ovld __cnfn convert_int16_rte(int16);
-int16 __ovld __cnfn convert_int16_sat_rte(int16);
-int16 __ovld __cnfn convert_int16_rtz(int16);
-int16 __ovld __cnfn convert_int16_sat_rtz(int16);
-int16 __ovld __cnfn convert_int16_rtp(int16);
-int16 __ovld __cnfn convert_int16_sat_rtp(int16);
-int16 __ovld __cnfn convert_int16_rtn(int16);
-int16 __ovld __cnfn convert_int16_sat_rtn(int16);
-int16 __ovld __cnfn convert_int16(int16);
-int16 __ovld __cnfn convert_int16_sat(int16);
-int16 __ovld __cnfn convert_int16_rte(uint16);
-int16 __ovld __cnfn convert_int16_sat_rte(uint16);
-int16 __ovld __cnfn convert_int16_rtz(uint16);
-int16 __ovld __cnfn convert_int16_sat_rtz(uint16);
-int16 __ovld __cnfn convert_int16_rtp(uint16);
-int16 __ovld __cnfn convert_int16_sat_rtp(uint16);
-int16 __ovld __cnfn convert_int16_rtn(uint16);
-int16 __ovld __cnfn convert_int16_sat_rtn(uint16);
-int16 __ovld __cnfn convert_int16(uint16);
-int16 __ovld __cnfn convert_int16_sat(uint16);
-int16 __ovld __cnfn convert_int16_rte(long16);
-int16 __ovld __cnfn convert_int16_sat_rte(long16);
-int16 __ovld __cnfn convert_int16_rtz(long16);
-int16 __ovld __cnfn convert_int16_sat_rtz(long16);
-int16 __ovld __cnfn convert_int16_rtp(long16);
-int16 __ovld __cnfn convert_int16_sat_rtp(long16);
-int16 __ovld __cnfn convert_int16_rtn(long16);
-int16 __ovld __cnfn convert_int16_sat_rtn(long16);
-int16 __ovld __cnfn convert_int16(long16);
-int16 __ovld __cnfn convert_int16_sat(long16);
-int16 __ovld __cnfn convert_int16_rte(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rte(ulong16);
-int16 __ovld __cnfn convert_int16_rtz(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rtz(ulong16);
-int16 __ovld __cnfn convert_int16_rtp(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rtp(ulong16);
-int16 __ovld __cnfn convert_int16_rtn(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rtn(ulong16);
-int16 __ovld __cnfn convert_int16(ulong16);
-int16 __ovld __cnfn convert_int16_sat(ulong16);
-int16 __ovld __cnfn convert_int16_rte(float16);
-int16 __ovld __cnfn convert_int16_sat_rte(float16);
-int16 __ovld __cnfn convert_int16_rtz(float16);
-int16 __ovld __cnfn convert_int16_sat_rtz(float16);
-int16 __ovld __cnfn convert_int16_rtp(float16);
-int16 __ovld __cnfn convert_int16_sat_rtp(float16);
-int16 __ovld __cnfn convert_int16_rtn(float16);
-int16 __ovld __cnfn convert_int16_sat_rtn(float16);
-int16 __ovld __cnfn convert_int16(float16);
-int16 __ovld __cnfn convert_int16_sat(float16);
-uint16 __ovld __cnfn convert_uint16_rte(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(char16);
-uint16 __ovld __cnfn convert_uint16_rtz(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(char16);
-uint16 __ovld __cnfn convert_uint16_rtp(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(char16);
-uint16 __ovld __cnfn convert_uint16_rtn(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(char16);
-uint16 __ovld __cnfn convert_uint16(char16);
-uint16 __ovld __cnfn convert_uint16_sat(char16);
-uint16 __ovld __cnfn convert_uint16_rte(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(uchar16);
-uint16 __ovld __cnfn convert_uint16_rtz(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(uchar16);
-uint16 __ovld __cnfn convert_uint16_rtp(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(uchar16);
-uint16 __ovld __cnfn convert_uint16_rtn(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(uchar16);
-uint16 __ovld __cnfn convert_uint16(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat(uchar16);
-uint16 __ovld __cnfn convert_uint16_rte(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(short16);
-uint16 __ovld __cnfn convert_uint16_rtz(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(short16);
-uint16 __ovld __cnfn convert_uint16_rtp(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(short16);
-uint16 __ovld __cnfn convert_uint16_rtn(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(short16);
-uint16 __ovld __cnfn convert_uint16(short16);
-uint16 __ovld __cnfn convert_uint16_sat(short16);
-uint16 __ovld __cnfn convert_uint16_rte(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(ushort16);
-uint16 __ovld __cnfn convert_uint16_rtz(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(ushort16);
-uint16 __ovld __cnfn convert_uint16_rtp(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(ushort16);
-uint16 __ovld __cnfn convert_uint16_rtn(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(ushort16);
-uint16 __ovld __cnfn convert_uint16(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat(ushort16);
-uint16 __ovld __cnfn convert_uint16_rte(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(int16);
-uint16 __ovld __cnfn convert_uint16_rtz(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(int16);
-uint16 __ovld __cnfn convert_uint16_rtp(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(int16);
-uint16 __ovld __cnfn convert_uint16_rtn(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(int16);
-uint16 __ovld __cnfn convert_uint16(int16);
-uint16 __ovld __cnfn convert_uint16_sat(int16);
-uint16 __ovld __cnfn convert_uint16_rte(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(uint16);
-uint16 __ovld __cnfn convert_uint16_rtz(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(uint16);
-uint16 __ovld __cnfn convert_uint16_rtp(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(uint16);
-uint16 __ovld __cnfn convert_uint16_rtn(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(uint16);
-uint16 __ovld __cnfn convert_uint16(uint16);
-uint16 __ovld __cnfn convert_uint16_sat(uint16);
-uint16 __ovld __cnfn convert_uint16_rte(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(long16);
-uint16 __ovld __cnfn convert_uint16_rtz(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(long16);
-uint16 __ovld __cnfn convert_uint16_rtp(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(long16);
-uint16 __ovld __cnfn convert_uint16_rtn(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(long16);
-uint16 __ovld __cnfn convert_uint16(long16);
-uint16 __ovld __cnfn convert_uint16_sat(long16);
-uint16 __ovld __cnfn convert_uint16_rte(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(ulong16);
-uint16 __ovld __cnfn convert_uint16_rtz(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(ulong16);
-uint16 __ovld __cnfn convert_uint16_rtp(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(ulong16);
-uint16 __ovld __cnfn convert_uint16_rtn(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(ulong16);
-uint16 __ovld __cnfn convert_uint16(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat(ulong16);
-uint16 __ovld __cnfn convert_uint16_rte(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(float16);
-uint16 __ovld __cnfn convert_uint16_rtz(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(float16);
-uint16 __ovld __cnfn convert_uint16_rtp(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(float16);
-uint16 __ovld __cnfn convert_uint16_rtn(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(float16);
-uint16 __ovld __cnfn convert_uint16(float16);
-uint16 __ovld __cnfn convert_uint16_sat(float16);
-long16 __ovld __cnfn convert_long16_rte(char16);
-long16 __ovld __cnfn convert_long16_sat_rte(char16);
-long16 __ovld __cnfn convert_long16_rtz(char16);
-long16 __ovld __cnfn convert_long16_sat_rtz(char16);
-long16 __ovld __cnfn convert_long16_rtp(char16);
-long16 __ovld __cnfn convert_long16_sat_rtp(char16);
-long16 __ovld __cnfn convert_long16_rtn(char16);
-long16 __ovld __cnfn convert_long16_sat_rtn(char16);
-long16 __ovld __cnfn convert_long16(char16);
-long16 __ovld __cnfn convert_long16_sat(char16);
-long16 __ovld __cnfn convert_long16_rte(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rte(uchar16);
-long16 __ovld __cnfn convert_long16_rtz(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rtz(uchar16);
-long16 __ovld __cnfn convert_long16_rtp(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rtp(uchar16);
-long16 __ovld __cnfn convert_long16_rtn(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rtn(uchar16);
-long16 __ovld __cnfn convert_long16(uchar16);
-long16 __ovld __cnfn convert_long16_sat(uchar16);
-long16 __ovld __cnfn convert_long16_rte(short16);
-long16 __ovld __cnfn convert_long16_sat_rte(short16);
-long16 __ovld __cnfn convert_long16_rtz(short16);
-long16 __ovld __cnfn convert_long16_sat_rtz(short16);
-long16 __ovld __cnfn convert_long16_rtp(short16);
-long16 __ovld __cnfn convert_long16_sat_rtp(short16);
-long16 __ovld __cnfn convert_long16_rtn(short16);
-long16 __ovld __cnfn convert_long16_sat_rtn(short16);
-long16 __ovld __cnfn convert_long16(short16);
-long16 __ovld __cnfn convert_long16_sat(short16);
-long16 __ovld __cnfn convert_long16_rte(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rte(ushort16);
-long16 __ovld __cnfn convert_long16_rtz(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rtz(ushort16);
-long16 __ovld __cnfn convert_long16_rtp(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rtp(ushort16);
-long16 __ovld __cnfn convert_long16_rtn(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rtn(ushort16);
-long16 __ovld __cnfn convert_long16(ushort16);
-long16 __ovld __cnfn convert_long16_sat(ushort16);
-long16 __ovld __cnfn convert_long16_rte(int16);
-long16 __ovld __cnfn convert_long16_sat_rte(int16);
-long16 __ovld __cnfn convert_long16_rtz(int16);
-long16 __ovld __cnfn convert_long16_sat_rtz(int16);
-long16 __ovld __cnfn convert_long16_rtp(int16);
-long16 __ovld __cnfn convert_long16_sat_rtp(int16);
-long16 __ovld __cnfn convert_long16_rtn(int16);
-long16 __ovld __cnfn convert_long16_sat_rtn(int16);
-long16 __ovld __cnfn convert_long16(int16);
-long16 __ovld __cnfn convert_long16_sat(int16);
-long16 __ovld __cnfn convert_long16_rte(uint16);
-long16 __ovld __cnfn convert_long16_sat_rte(uint16);
-long16 __ovld __cnfn convert_long16_rtz(uint16);
-long16 __ovld __cnfn convert_long16_sat_rtz(uint16);
-long16 __ovld __cnfn convert_long16_rtp(uint16);
-long16 __ovld __cnfn convert_long16_sat_rtp(uint16);
-long16 __ovld __cnfn convert_long16_rtn(uint16);
-long16 __ovld __cnfn convert_long16_sat_rtn(uint16);
-long16 __ovld __cnfn convert_long16(uint16);
-long16 __ovld __cnfn convert_long16_sat(uint16);
-long16 __ovld __cnfn convert_long16_rte(long16);
-long16 __ovld __cnfn convert_long16_sat_rte(long16);
-long16 __ovld __cnfn convert_long16_rtz(long16);
-long16 __ovld __cnfn convert_long16_sat_rtz(long16);
-long16 __ovld __cnfn convert_long16_rtp(long16);
-long16 __ovld __cnfn convert_long16_sat_rtp(long16);
-long16 __ovld __cnfn convert_long16_rtn(long16);
-long16 __ovld __cnfn convert_long16_sat_rtn(long16);
-long16 __ovld __cnfn convert_long16(long16);
-long16 __ovld __cnfn convert_long16_sat(long16);
-long16 __ovld __cnfn convert_long16_rte(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rte(ulong16);
-long16 __ovld __cnfn convert_long16_rtz(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rtz(ulong16);
-long16 __ovld __cnfn convert_long16_rtp(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rtp(ulong16);
-long16 __ovld __cnfn convert_long16_rtn(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rtn(ulong16);
-long16 __ovld __cnfn convert_long16(ulong16);
-long16 __ovld __cnfn convert_long16_sat(ulong16);
-long16 __ovld __cnfn convert_long16_rte(float16);
-long16 __ovld __cnfn convert_long16_sat_rte(float16);
-long16 __ovld __cnfn convert_long16_rtz(float16);
-long16 __ovld __cnfn convert_long16_sat_rtz(float16);
-long16 __ovld __cnfn convert_long16_rtp(float16);
-long16 __ovld __cnfn convert_long16_sat_rtp(float16);
-long16 __ovld __cnfn convert_long16_rtn(float16);
-long16 __ovld __cnfn convert_long16_sat_rtn(float16);
-long16 __ovld __cnfn convert_long16(float16);
-long16 __ovld __cnfn convert_long16_sat(float16);
-ulong16 __ovld __cnfn convert_ulong16_rte(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(char16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(char16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(char16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(char16);
-ulong16 __ovld __cnfn convert_ulong16(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat(char16);
-ulong16 __ovld __cnfn convert_ulong16_rte(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uchar16);
-ulong16 __ovld __cnfn convert_ulong16(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rte(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(short16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(short16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(short16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(short16);
-ulong16 __ovld __cnfn convert_ulong16(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat(short16);
-ulong16 __ovld __cnfn convert_ulong16_rte(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ushort16);
-ulong16 __ovld __cnfn convert_ulong16(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rte(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(int16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(int16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(int16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(int16);
-ulong16 __ovld __cnfn convert_ulong16(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat(int16);
-ulong16 __ovld __cnfn convert_ulong16_rte(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uint16);
-ulong16 __ovld __cnfn convert_ulong16(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rte(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(long16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(long16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(long16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(long16);
-ulong16 __ovld __cnfn convert_ulong16(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat(long16);
-ulong16 __ovld __cnfn convert_ulong16_rte(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ulong16);
-ulong16 __ovld __cnfn convert_ulong16(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rte(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(float16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(float16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(float16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(float16);
-ulong16 __ovld __cnfn convert_ulong16(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat(float16);
-float16 __ovld __cnfn convert_float16_rte(char16);
-float16 __ovld __cnfn convert_float16_rtz(char16);
-float16 __ovld __cnfn convert_float16_rtp(char16);
-float16 __ovld __cnfn convert_float16_rtn(char16);
-float16 __ovld __cnfn convert_float16(char16);
-float16 __ovld __cnfn convert_float16_rte(uchar16);
-float16 __ovld __cnfn convert_float16_rtz(uchar16);
-float16 __ovld __cnfn convert_float16_rtp(uchar16);
-float16 __ovld __cnfn convert_float16_rtn(uchar16);
-float16 __ovld __cnfn convert_float16(uchar16);
-float16 __ovld __cnfn convert_float16_rte(short16);
-float16 __ovld __cnfn convert_float16_rtz(short16);
-float16 __ovld __cnfn convert_float16_rtp(short16);
-float16 __ovld __cnfn convert_float16_rtn(short16);
-float16 __ovld __cnfn convert_float16(short16);
-float16 __ovld __cnfn convert_float16_rte(ushort16);
-float16 __ovld __cnfn convert_float16_rtz(ushort16);
-float16 __ovld __cnfn convert_float16_rtp(ushort16);
-float16 __ovld __cnfn convert_float16_rtn(ushort16);
-float16 __ovld __cnfn convert_float16(ushort16);
-float16 __ovld __cnfn convert_float16_rte(int16);
-float16 __ovld __cnfn convert_float16_rtz(int16);
-float16 __ovld __cnfn convert_float16_rtp(int16);
-float16 __ovld __cnfn convert_float16_rtn(int16);
-float16 __ovld __cnfn convert_float16(int16);
-float16 __ovld __cnfn convert_float16_rte(uint16);
-float16 __ovld __cnfn convert_float16_rtz(uint16);
-float16 __ovld __cnfn convert_float16_rtp(uint16);
-float16 __ovld __cnfn convert_float16_rtn(uint16);
-float16 __ovld __cnfn convert_float16(uint16);
-float16 __ovld __cnfn convert_float16_rte(long16);
-float16 __ovld __cnfn convert_float16_rtz(long16);
-float16 __ovld __cnfn convert_float16_rtp(long16);
-float16 __ovld __cnfn convert_float16_rtn(long16);
-float16 __ovld __cnfn convert_float16(long16);
-float16 __ovld __cnfn convert_float16_rte(ulong16);
-float16 __ovld __cnfn convert_float16_rtz(ulong16);
-float16 __ovld __cnfn convert_float16_rtp(ulong16);
-float16 __ovld __cnfn convert_float16_rtn(ulong16);
-float16 __ovld __cnfn convert_float16(ulong16);
-float16 __ovld __cnfn convert_float16_rte(float16);
-float16 __ovld __cnfn convert_float16_rtz(float16);
-float16 __ovld __cnfn convert_float16_rtp(float16);
-float16 __ovld __cnfn convert_float16_rtn(float16);
-float16 __ovld __cnfn convert_float16(float16);
-
-// Conversions with double data type parameters or return value.
-
-#ifdef cl_khr_fp64
-#pragma OPENCL EXTENSION cl_khr_fp64 : enable
-char __ovld __cnfn convert_char(double);
-char __ovld __cnfn convert_char_rte(double);
-char __ovld __cnfn convert_char_rtn(double);
-char __ovld __cnfn convert_char_rtp(double);
-char __ovld __cnfn convert_char_rtz(double);
-char __ovld __cnfn convert_char_sat(double);
-char __ovld __cnfn convert_char_sat_rte(double);
-char __ovld __cnfn convert_char_sat_rtn(double);
-char __ovld __cnfn convert_char_sat_rtp(double);
-char __ovld __cnfn convert_char_sat_rtz(double);
-char2 __ovld __cnfn convert_char2(double2);
-char2 __ovld __cnfn convert_char2_rte(double2);
-char2 __ovld __cnfn convert_char2_rtn(double2);
-char2 __ovld __cnfn convert_char2_rtp(double2);
-char2 __ovld __cnfn convert_char2_rtz(double2);
-char2 __ovld __cnfn convert_char2_sat(double2);
-char2 __ovld __cnfn convert_char2_sat_rte(double2);
-char2 __ovld __cnfn convert_char2_sat_rtn(double2);
-char2 __ovld __cnfn convert_char2_sat_rtp(double2);
-char2 __ovld __cnfn convert_char2_sat_rtz(double2);
-char3 __ovld __cnfn convert_char3(double3);
-char3 __ovld __cnfn convert_char3_rte(double3);
-char3 __ovld __cnfn convert_char3_rtn(double3);
-char3 __ovld __cnfn convert_char3_rtp(double3);
-char3 __ovld __cnfn convert_char3_rtz(double3);
-char3 __ovld __cnfn convert_char3_sat(double3);
-char3 __ovld __cnfn convert_char3_sat_rte(double3);
-char3 __ovld __cnfn convert_char3_sat_rtn(double3);
-char3 __ovld __cnfn convert_char3_sat_rtp(double3);
-char3 __ovld __cnfn convert_char3_sat_rtz(double3);
-char4 __ovld __cnfn convert_char4(double4);
-char4 __ovld __cnfn convert_char4_rte(double4);
-char4 __ovld __cnfn convert_char4_rtn(double4);
-char4 __ovld __cnfn convert_char4_rtp(double4);
-char4 __ovld __cnfn convert_char4_rtz(double4);
-char4 __ovld __cnfn convert_char4_sat(double4);
-char4 __ovld __cnfn convert_char4_sat_rte(double4);
-char4 __ovld __cnfn convert_char4_sat_rtn(double4);
-char4 __ovld __cnfn convert_char4_sat_rtp(double4);
-char4 __ovld __cnfn convert_char4_sat_rtz(double4);
-char8 __ovld __cnfn convert_char8(double8);
-char8 __ovld __cnfn convert_char8_rte(double8);
-char8 __ovld __cnfn convert_char8_rtn(double8);
-char8 __ovld __cnfn convert_char8_rtp(double8);
-char8 __ovld __cnfn convert_char8_rtz(double8);
-char8 __ovld __cnfn convert_char8_sat(double8);
-char8 __ovld __cnfn convert_char8_sat_rte(double8);
-char8 __ovld __cnfn convert_char8_sat_rtn(double8);
-char8 __ovld __cnfn convert_char8_sat_rtp(double8);
-char8 __ovld __cnfn convert_char8_sat_rtz(double8);
-char16 __ovld __cnfn convert_char16(double16);
-char16 __ovld __cnfn convert_char16_rte(double16);
-char16 __ovld __cnfn convert_char16_rtn(double16);
-char16 __ovld __cnfn convert_char16_rtp(double16);
-char16 __ovld __cnfn convert_char16_rtz(double16);
-char16 __ovld __cnfn convert_char16_sat(double16);
-char16 __ovld __cnfn convert_char16_sat_rte(double16);
-char16 __ovld __cnfn convert_char16_sat_rtn(double16);
-char16 __ovld __cnfn convert_char16_sat_rtp(double16);
-char16 __ovld __cnfn convert_char16_sat_rtz(double16);
-
-uchar __ovld __cnfn convert_uchar(double);
-uchar __ovld __cnfn convert_uchar_rte(double);
-uchar __ovld __cnfn convert_uchar_rtn(double);
-uchar __ovld __cnfn convert_uchar_rtp(double);
-uchar __ovld __cnfn convert_uchar_rtz(double);
-uchar __ovld __cnfn convert_uchar_sat(double);
-uchar __ovld __cnfn convert_uchar_sat_rte(double);
-uchar __ovld __cnfn convert_uchar_sat_rtn(double);
-uchar __ovld __cnfn convert_uchar_sat_rtp(double);
-uchar __ovld __cnfn convert_uchar_sat_rtz(double);
-uchar2 __ovld __cnfn convert_uchar2(double2);
-uchar2 __ovld __cnfn convert_uchar2_rte(double2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(double2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(double2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(double2);
-uchar3 __ovld __cnfn convert_uchar3(double3);
-uchar3 __ovld __cnfn convert_uchar3_rte(double3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(double3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(double3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(double3);
-uchar4 __ovld __cnfn convert_uchar4(double4);
-uchar4 __ovld __cnfn convert_uchar4_rte(double4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(double4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(double4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(double4);
-uchar8 __ovld __cnfn convert_uchar8(double8);
-uchar8 __ovld __cnfn convert_uchar8_rte(double8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(double8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(double8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(double8);
-uchar16 __ovld __cnfn convert_uchar16(double16);
-uchar16 __ovld __cnfn convert_uchar16_rte(double16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(double16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(double16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(double16);
-
-short __ovld __cnfn convert_short(double);
-short __ovld __cnfn convert_short_rte(double);
-short __ovld __cnfn convert_short_rtn(double);
-short __ovld __cnfn convert_short_rtp(double);
-short __ovld __cnfn convert_short_rtz(double);
-short __ovld __cnfn convert_short_sat(double);
-short __ovld __cnfn convert_short_sat_rte(double);
-short __ovld __cnfn convert_short_sat_rtn(double);
-short __ovld __cnfn convert_short_sat_rtp(double);
-short __ovld __cnfn convert_short_sat_rtz(double);
-short2 __ovld __cnfn convert_short2(double2);
-short2 __ovld __cnfn convert_short2_rte(double2);
-short2 __ovld __cnfn convert_short2_rtn(double2);
-short2 __ovld __cnfn convert_short2_rtp(double2);
-short2 __ovld __cnfn convert_short2_rtz(double2);
-short2 __ovld __cnfn convert_short2_sat(double2);
-short2 __ovld __cnfn convert_short2_sat_rte(double2);
-short2 __ovld __cnfn convert_short2_sat_rtn(double2);
-short2 __ovld __cnfn convert_short2_sat_rtp(double2);
-short2 __ovld __cnfn convert_short2_sat_rtz(double2);
-short3 __ovld __cnfn convert_short3(double3);
-short3 __ovld __cnfn convert_short3_rte(double3);
-short3 __ovld __cnfn convert_short3_rtn(double3);
-short3 __ovld __cnfn convert_short3_rtp(double3);
-short3 __ovld __cnfn convert_short3_rtz(double3);
-short3 __ovld __cnfn convert_short3_sat(double3);
-short3 __ovld __cnfn convert_short3_sat_rte(double3);
-short3 __ovld __cnfn convert_short3_sat_rtn(double3);
-short3 __ovld __cnfn convert_short3_sat_rtp(double3);
-short3 __ovld __cnfn convert_short3_sat_rtz(double3);
-short4 __ovld __cnfn convert_short4(double4);
-short4 __ovld __cnfn convert_short4_rte(double4);
-short4 __ovld __cnfn convert_short4_rtn(double4);
-short4 __ovld __cnfn convert_short4_rtp(double4);
-short4 __ovld __cnfn convert_short4_rtz(double4);
-short4 __ovld __cnfn convert_short4_sat(double4);
-short4 __ovld __cnfn convert_short4_sat_rte(double4);
-short4 __ovld __cnfn convert_short4_sat_rtn(double4);
-short4 __ovld __cnfn convert_short4_sat_rtp(double4);
-short4 __ovld __cnfn convert_short4_sat_rtz(double4);
-short8 __ovld __cnfn convert_short8(double8);
-short8 __ovld __cnfn convert_short8_rte(double8);
-short8 __ovld __cnfn convert_short8_rtn(double8);
-short8 __ovld __cnfn convert_short8_rtp(double8);
-short8 __ovld __cnfn convert_short8_rtz(double8);
-short8 __ovld __cnfn convert_short8_sat(double8);
-short8 __ovld __cnfn convert_short8_sat_rte(double8);
-short8 __ovld __cnfn convert_short8_sat_rtn(double8);
-short8 __ovld __cnfn convert_short8_sat_rtp(double8);
-short8 __ovld __cnfn convert_short8_sat_rtz(double8);
-short16 __ovld __cnfn convert_short16(double16);
-short16 __ovld __cnfn convert_short16_rte(double16);
-short16 __ovld __cnfn convert_short16_rtn(double16);
-short16 __ovld __cnfn convert_short16_rtp(double16);
-short16 __ovld __cnfn convert_short16_rtz(double16);
-short16 __ovld __cnfn convert_short16_sat(double16);
-short16 __ovld __cnfn convert_short16_sat_rte(double16);
-short16 __ovld __cnfn convert_short16_sat_rtn(double16);
-short16 __ovld __cnfn convert_short16_sat_rtp(double16);
-short16 __ovld __cnfn convert_short16_sat_rtz(double16);
-
-ushort __ovld __cnfn convert_ushort(double);
-ushort __ovld __cnfn convert_ushort_rte(double);
-ushort __ovld __cnfn convert_ushort_rtn(double);
-ushort __ovld __cnfn convert_ushort_rtp(double);
-ushort __ovld __cnfn convert_ushort_rtz(double);
-ushort __ovld __cnfn convert_ushort_sat(double);
-ushort __ovld __cnfn convert_ushort_sat_rte(double);
-ushort __ovld __cnfn convert_ushort_sat_rtn(double);
-ushort __ovld __cnfn convert_ushort_sat_rtp(double);
-ushort __ovld __cnfn convert_ushort_sat_rtz(double);
-ushort2 __ovld __cnfn convert_ushort2(double2);
-ushort2 __ovld __cnfn convert_ushort2_rte(double2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(double2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(double2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(double2);
-ushort3 __ovld __cnfn convert_ushort3(double3);
-ushort3 __ovld __cnfn convert_ushort3_rte(double3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(double3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(double3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(double3);
-ushort4 __ovld __cnfn convert_ushort4(double4);
-ushort4 __ovld __cnfn convert_ushort4_rte(double4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(double4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(double4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(double4);
-ushort8 __ovld __cnfn convert_ushort8(double8);
-ushort8 __ovld __cnfn convert_ushort8_rte(double8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(double8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(double8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(double8);
-ushort16 __ovld __cnfn convert_ushort16(double16);
-ushort16 __ovld __cnfn convert_ushort16_rte(double16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(double16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(double16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(double16);
-
-int __ovld __cnfn convert_int(double);
-int __ovld __cnfn convert_int_rte(double);
-int __ovld __cnfn convert_int_rtn(double);
-int __ovld __cnfn convert_int_rtp(double);
-int __ovld __cnfn convert_int_rtz(double);
-int __ovld __cnfn convert_int_sat(double);
-int __ovld __cnfn convert_int_sat_rte(double);
-int __ovld __cnfn convert_int_sat_rtn(double);
-int __ovld __cnfn convert_int_sat_rtp(double);
-int __ovld __cnfn convert_int_sat_rtz(double);
-int2 __ovld __cnfn convert_int2(double2);
-int2 __ovld __cnfn convert_int2_rte(double2);
-int2 __ovld __cnfn convert_int2_rtn(double2);
-int2 __ovld __cnfn convert_int2_rtp(double2);
-int2 __ovld __cnfn convert_int2_rtz(double2);
-int2 __ovld __cnfn convert_int2_sat(double2);
-int2 __ovld __cnfn convert_int2_sat_rte(double2);
-int2 __ovld __cnfn convert_int2_sat_rtn(double2);
-int2 __ovld __cnfn convert_int2_sat_rtp(double2);
-int2 __ovld __cnfn convert_int2_sat_rtz(double2);
-int3 __ovld __cnfn convert_int3(double3);
-int3 __ovld __cnfn convert_int3_rte(double3);
-int3 __ovld __cnfn convert_int3_rtn(double3);
-int3 __ovld __cnfn convert_int3_rtp(double3);
-int3 __ovld __cnfn convert_int3_rtz(double3);
-int3 __ovld __cnfn convert_int3_sat(double3);
-int3 __ovld __cnfn convert_int3_sat_rte(double3);
-int3 __ovld __cnfn convert_int3_sat_rtn(double3);
-int3 __ovld __cnfn convert_int3_sat_rtp(double3);
-int3 __ovld __cnfn convert_int3_sat_rtz(double3);
-int4 __ovld __cnfn convert_int4(double4);
-int4 __ovld __cnfn convert_int4_rte(double4);
-int4 __ovld __cnfn convert_int4_rtn(double4);
-int4 __ovld __cnfn convert_int4_rtp(double4);
-int4 __ovld __cnfn convert_int4_rtz(double4);
-int4 __ovld __cnfn convert_int4_sat(double4);
-int4 __ovld __cnfn convert_int4_sat_rte(double4);
-int4 __ovld __cnfn convert_int4_sat_rtn(double4);
-int4 __ovld __cnfn convert_int4_sat_rtp(double4);
-int4 __ovld __cnfn convert_int4_sat_rtz(double4);
-int8 __ovld __cnfn convert_int8(double8);
-int8 __ovld __cnfn convert_int8_rte(double8);
-int8 __ovld __cnfn convert_int8_rtn(double8);
-int8 __ovld __cnfn convert_int8_rtp(double8);
-int8 __ovld __cnfn convert_int8_rtz(double8);
-int8 __ovld __cnfn convert_int8_sat(double8);
-int8 __ovld __cnfn convert_int8_sat_rte(double8);
-int8 __ovld __cnfn convert_int8_sat_rtn(double8);
-int8 __ovld __cnfn convert_int8_sat_rtp(double8);
-int8 __ovld __cnfn convert_int8_sat_rtz(double8);
-int16 __ovld __cnfn convert_int16(double16);
-int16 __ovld __cnfn convert_int16_rte(double16);
-int16 __ovld __cnfn convert_int16_rtn(double16);
-int16 __ovld __cnfn convert_int16_rtp(double16);
-int16 __ovld __cnfn convert_int16_rtz(double16);
-int16 __ovld __cnfn convert_int16_sat(double16);
-int16 __ovld __cnfn convert_int16_sat_rte(double16);
-int16 __ovld __cnfn convert_int16_sat_rtn(double16);
-int16 __ovld __cnfn convert_int16_sat_rtp(double16);
-int16 __ovld __cnfn convert_int16_sat_rtz(double16);
-
-uint __ovld __cnfn convert_uint(double);
-uint __ovld __cnfn convert_uint_rte(double);
-uint __ovld __cnfn convert_uint_rtn(double);
-uint __ovld __cnfn convert_uint_rtp(double);
-uint __ovld __cnfn convert_uint_rtz(double);
-uint __ovld __cnfn convert_uint_sat(double);
-uint __ovld __cnfn convert_uint_sat_rte(double);
-uint __ovld __cnfn convert_uint_sat_rtn(double);
-uint __ovld __cnfn convert_uint_sat_rtp(double);
-uint __ovld __cnfn convert_uint_sat_rtz(double);
-uint2 __ovld __cnfn convert_uint2(double2);
-uint2 __ovld __cnfn convert_uint2_rte(double2);
-uint2 __ovld __cnfn convert_uint2_rtn(double2);
-uint2 __ovld __cnfn convert_uint2_rtp(double2);
-uint2 __ovld __cnfn convert_uint2_rtz(double2);
-uint2 __ovld __cnfn convert_uint2_sat(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(double2);
-uint3 __ovld __cnfn convert_uint3(double3);
-uint3 __ovld __cnfn convert_uint3_rte(double3);
-uint3 __ovld __cnfn convert_uint3_rtn(double3);
-uint3 __ovld __cnfn convert_uint3_rtp(double3);
-uint3 __ovld __cnfn convert_uint3_rtz(double3);
-uint3 __ovld __cnfn convert_uint3_sat(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(double3);
-uint4 __ovld __cnfn convert_uint4(double4);
-uint4 __ovld __cnfn convert_uint4_rte(double4);
-uint4 __ovld __cnfn convert_uint4_rtn(double4);
-uint4 __ovld __cnfn convert_uint4_rtp(double4);
-uint4 __ovld __cnfn convert_uint4_rtz(double4);
-uint4 __ovld __cnfn convert_uint4_sat(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(double4);
-uint8 __ovld __cnfn convert_uint8(double8);
-uint8 __ovld __cnfn convert_uint8_rte(double8);
-uint8 __ovld __cnfn convert_uint8_rtn(double8);
-uint8 __ovld __cnfn convert_uint8_rtp(double8);
-uint8 __ovld __cnfn convert_uint8_rtz(double8);
-uint8 __ovld __cnfn convert_uint8_sat(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(double8);
-uint16 __ovld __cnfn convert_uint16(double16);
-uint16 __ovld __cnfn convert_uint16_rte(double16);
-uint16 __ovld __cnfn convert_uint16_rtn(double16);
-uint16 __ovld __cnfn convert_uint16_rtp(double16);
-uint16 __ovld __cnfn convert_uint16_rtz(double16);
-uint16 __ovld __cnfn convert_uint16_sat(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(double16);
-
-long __ovld __cnfn convert_long(double);
-long __ovld __cnfn convert_long_rte(double);
-long __ovld __cnfn convert_long_rtn(double);
-long __ovld __cnfn convert_long_rtp(double);
-long __ovld __cnfn convert_long_rtz(double);
-long __ovld __cnfn convert_long_sat(double);
-long __ovld __cnfn convert_long_sat_rte(double);
-long __ovld __cnfn convert_long_sat_rtn(double);
-long __ovld __cnfn convert_long_sat_rtp(double);
-long __ovld __cnfn convert_long_sat_rtz(double);
-long2 __ovld __cnfn convert_long2(double2);
-long2 __ovld __cnfn convert_long2_rte(double2);
-long2 __ovld __cnfn convert_long2_rtn(double2);
-long2 __ovld __cnfn convert_long2_rtp(double2);
-long2 __ovld __cnfn convert_long2_rtz(double2);
-long2 __ovld __cnfn convert_long2_sat(double2);
-long2 __ovld __cnfn convert_long2_sat_rte(double2);
-long2 __ovld __cnfn convert_long2_sat_rtn(double2);
-long2 __ovld __cnfn convert_long2_sat_rtp(double2);
-long2 __ovld __cnfn convert_long2_sat_rtz(double2);
-long3 __ovld __cnfn convert_long3(double3);
-long3 __ovld __cnfn convert_long3_rte(double3);
-long3 __ovld __cnfn convert_long3_rtn(double3);
-long3 __ovld __cnfn convert_long3_rtp(double3);
-long3 __ovld __cnfn convert_long3_rtz(double3);
-long3 __ovld __cnfn convert_long3_sat(double3);
-long3 __ovld __cnfn convert_long3_sat_rte(double3);
-long3 __ovld __cnfn convert_long3_sat_rtn(double3);
-long3 __ovld __cnfn convert_long3_sat_rtp(double3);
-long3 __ovld __cnfn convert_long3_sat_rtz(double3);
-long4 __ovld __cnfn convert_long4(double4);
-long4 __ovld __cnfn convert_long4_rte(double4);
-long4 __ovld __cnfn convert_long4_rtn(double4);
-long4 __ovld __cnfn convert_long4_rtp(double4);
-long4 __ovld __cnfn convert_long4_rtz(double4);
-long4 __ovld __cnfn convert_long4_sat(double4);
-long4 __ovld __cnfn convert_long4_sat_rte(double4);
-long4 __ovld __cnfn convert_long4_sat_rtn(double4);
-long4 __ovld __cnfn convert_long4_sat_rtp(double4);
-long4 __ovld __cnfn convert_long4_sat_rtz(double4);
-long8 __ovld __cnfn convert_long8(double8);
-long8 __ovld __cnfn convert_long8_rte(double8);
-long8 __ovld __cnfn convert_long8_rtn(double8);
-long8 __ovld __cnfn convert_long8_rtp(double8);
-long8 __ovld __cnfn convert_long8_rtz(double8);
-long8 __ovld __cnfn convert_long8_sat(double8);
-long8 __ovld __cnfn convert_long8_sat_rte(double8);
-long8 __ovld __cnfn convert_long8_sat_rtn(double8);
-long8 __ovld __cnfn convert_long8_sat_rtp(double8);
-long8 __ovld __cnfn convert_long8_sat_rtz(double8);
-long16 __ovld __cnfn convert_long16(double16);
-long16 __ovld __cnfn convert_long16_rte(double16);
-long16 __ovld __cnfn convert_long16_rtn(double16);
-long16 __ovld __cnfn convert_long16_rtp(double16);
-long16 __ovld __cnfn convert_long16_rtz(double16);
-long16 __ovld __cnfn convert_long16_sat(double16);
-long16 __ovld __cnfn convert_long16_sat_rte(double16);
-long16 __ovld __cnfn convert_long16_sat_rtn(double16);
-long16 __ovld __cnfn convert_long16_sat_rtp(double16);
-long16 __ovld __cnfn convert_long16_sat_rtz(double16);
-
-ulong __ovld __cnfn convert_ulong(double);
-ulong __ovld __cnfn convert_ulong_rte(double);
-ulong __ovld __cnfn convert_ulong_rtn(double);
-ulong __ovld __cnfn convert_ulong_rtp(double);
-ulong __ovld __cnfn convert_ulong_rtz(double);
-ulong __ovld __cnfn convert_ulong_sat(double);
-ulong __ovld __cnfn convert_ulong_sat_rte(double);
-ulong __ovld __cnfn convert_ulong_sat_rtn(double);
-ulong __ovld __cnfn convert_ulong_sat_rtp(double);
-ulong __ovld __cnfn convert_ulong_sat_rtz(double);
-ulong2 __ovld __cnfn convert_ulong2(double2);
-ulong2 __ovld __cnfn convert_ulong2_rte(double2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(double2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(double2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(double2);
-ulong3 __ovld __cnfn convert_ulong3(double3);
-ulong3 __ovld __cnfn convert_ulong3_rte(double3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(double3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(double3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(double3);
-ulong4 __ovld __cnfn convert_ulong4(double4);
-ulong4 __ovld __cnfn convert_ulong4_rte(double4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(double4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(double4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(double4);
-ulong8 __ovld __cnfn convert_ulong8(double8);
-ulong8 __ovld __cnfn convert_ulong8_rte(double8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(double8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(double8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(double8);
-ulong16 __ovld __cnfn convert_ulong16(double16);
-ulong16 __ovld __cnfn convert_ulong16_rte(double16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(double16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(double16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(double16);
-
-float __ovld __cnfn convert_float(double);
-float __ovld __cnfn convert_float_rte(double);
-float __ovld __cnfn convert_float_rtn(double);
-float __ovld __cnfn convert_float_rtp(double);
-float __ovld __cnfn convert_float_rtz(double);
-float2 __ovld __cnfn convert_float2(double2);
-float2 __ovld __cnfn convert_float2_rte(double2);
-float2 __ovld __cnfn convert_float2_rtn(double2);
-float2 __ovld __cnfn convert_float2_rtp(double2);
-float2 __ovld __cnfn convert_float2_rtz(double2);
-float3 __ovld __cnfn convert_float3(double3);
-float3 __ovld __cnfn convert_float3_rte(double3);
-float3 __ovld __cnfn convert_float3_rtn(double3);
-float3 __ovld __cnfn convert_float3_rtp(double3);
-float3 __ovld __cnfn convert_float3_rtz(double3);
-float4 __ovld __cnfn convert_float4(double4);
-float4 __ovld __cnfn convert_float4_rte(double4);
-float4 __ovld __cnfn convert_float4_rtn(double4);
-float4 __ovld __cnfn convert_float4_rtp(double4);
-float4 __ovld __cnfn convert_float4_rtz(double4);
-float8 __ovld __cnfn convert_float8(double8);
-float8 __ovld __cnfn convert_float8_rte(double8);
-float8 __ovld __cnfn convert_float8_rtn(double8);
-float8 __ovld __cnfn convert_float8_rtp(double8);
-float8 __ovld __cnfn convert_float8_rtz(double8);
-float16 __ovld __cnfn convert_float16(double16);
-float16 __ovld __cnfn convert_float16_rte(double16);
-float16 __ovld __cnfn convert_float16_rtn(double16);
-float16 __ovld __cnfn convert_float16_rtp(double16);
-float16 __ovld __cnfn convert_float16_rtz(double16);
-
-double __ovld __cnfn convert_double(char);
-double __ovld __cnfn convert_double(double);
-double __ovld __cnfn convert_double(float);
-double __ovld __cnfn convert_double(int);
-double __ovld __cnfn convert_double(long);
-double __ovld __cnfn convert_double(short);
-double __ovld __cnfn convert_double(uchar);
-double __ovld __cnfn convert_double(uint);
-double __ovld __cnfn convert_double(ulong);
-double __ovld __cnfn convert_double(ushort);
-double __ovld __cnfn convert_double_rte(char);
-double __ovld __cnfn convert_double_rte(double);
-double __ovld __cnfn convert_double_rte(float);
-double __ovld __cnfn convert_double_rte(int);
-double __ovld __cnfn convert_double_rte(long);
-double __ovld __cnfn convert_double_rte(short);
-double __ovld __cnfn convert_double_rte(uchar);
-double __ovld __cnfn convert_double_rte(uint);
-double __ovld __cnfn convert_double_rte(ulong);
-double __ovld __cnfn convert_double_rte(ushort);
-double __ovld __cnfn convert_double_rtn(char);
-double __ovld __cnfn convert_double_rtn(double);
-double __ovld __cnfn convert_double_rtn(float);
-double __ovld __cnfn convert_double_rtn(int);
-double __ovld __cnfn convert_double_rtn(long);
-double __ovld __cnfn convert_double_rtn(short);
-double __ovld __cnfn convert_double_rtn(uchar);
-double __ovld __cnfn convert_double_rtn(uint);
-double __ovld __cnfn convert_double_rtn(ulong);
-double __ovld __cnfn convert_double_rtn(ushort);
-double __ovld __cnfn convert_double_rtp(char);
-double __ovld __cnfn convert_double_rtp(double);
-double __ovld __cnfn convert_double_rtp(float);
-double __ovld __cnfn convert_double_rtp(int);
-double __ovld __cnfn convert_double_rtp(long);
-double __ovld __cnfn convert_double_rtp(short);
-double __ovld __cnfn convert_double_rtp(uchar);
-double __ovld __cnfn convert_double_rtp(uint);
-double __ovld __cnfn convert_double_rtp(ulong);
-double __ovld __cnfn convert_double_rtp(ushort);
-double __ovld __cnfn convert_double_rtz(char);
-double __ovld __cnfn convert_double_rtz(double);
-double __ovld __cnfn convert_double_rtz(float);
-double __ovld __cnfn convert_double_rtz(int);
-double __ovld __cnfn convert_double_rtz(long);
-double __ovld __cnfn convert_double_rtz(short);
-double __ovld __cnfn convert_double_rtz(uchar);
-double __ovld __cnfn convert_double_rtz(uint);
-double __ovld __cnfn convert_double_rtz(ulong);
-double __ovld __cnfn convert_double_rtz(ushort);
-double2 __ovld __cnfn convert_double2(char2);
-double2 __ovld __cnfn convert_double2(double2);
-double2 __ovld __cnfn convert_double2(float2);
-double2 __ovld __cnfn convert_double2(int2);
-double2 __ovld __cnfn convert_double2(long2);
-double2 __ovld __cnfn convert_double2(short2);
-double2 __ovld __cnfn convert_double2(uchar2);
-double2 __ovld __cnfn convert_double2(uint2);
-double2 __ovld __cnfn convert_double2(ulong2);
-double2 __ovld __cnfn convert_double2(ushort2);
-double2 __ovld __cnfn convert_double2_rte(char2);
-double2 __ovld __cnfn convert_double2_rte(double2);
-double2 __ovld __cnfn convert_double2_rte(float2);
-double2 __ovld __cnfn convert_double2_rte(int2);
-double2 __ovld __cnfn convert_double2_rte(long2);
-double2 __ovld __cnfn convert_double2_rte(short2);
-double2 __ovld __cnfn convert_double2_rte(uchar2);
-double2 __ovld __cnfn convert_double2_rte(uint2);
-double2 __ovld __cnfn convert_double2_rte(ulong2);
-double2 __ovld __cnfn convert_double2_rte(ushort2);
-double2 __ovld __cnfn convert_double2_rtn(char2);
-double2 __ovld __cnfn convert_double2_rtn(double2);
-double2 __ovld __cnfn convert_double2_rtn(float2);
-double2 __ovld __cnfn convert_double2_rtn(int2);
-double2 __ovld __cnfn convert_double2_rtn(long2);
-double2 __ovld __cnfn convert_double2_rtn(short2);
-double2 __ovld __cnfn convert_double2_rtn(uchar2);
-double2 __ovld __cnfn convert_double2_rtn(uint2);
-double2 __ovld __cnfn convert_double2_rtn(ulong2);
-double2 __ovld __cnfn convert_double2_rtn(ushort2);
-double2 __ovld __cnfn convert_double2_rtp(char2);
-double2 __ovld __cnfn convert_double2_rtp(double2);
-double2 __ovld __cnfn convert_double2_rtp(float2);
-double2 __ovld __cnfn convert_double2_rtp(int2);
-double2 __ovld __cnfn convert_double2_rtp(long2);
-double2 __ovld __cnfn convert_double2_rtp(short2);
-double2 __ovld __cnfn convert_double2_rtp(uchar2);
-double2 __ovld __cnfn convert_double2_rtp(uint2);
-double2 __ovld __cnfn convert_double2_rtp(ulong2);
-double2 __ovld __cnfn convert_double2_rtp(ushort2);
-double2 __ovld __cnfn convert_double2_rtz(char2);
-double2 __ovld __cnfn convert_double2_rtz(double2);
-double2 __ovld __cnfn convert_double2_rtz(float2);
-double2 __ovld __cnfn convert_double2_rtz(int2);
-double2 __ovld __cnfn convert_double2_rtz(long2);
-double2 __ovld __cnfn convert_double2_rtz(short2);
-double2 __ovld __cnfn convert_double2_rtz(uchar2);
-double2 __ovld __cnfn convert_double2_rtz(uint2);
-double2 __ovld __cnfn convert_double2_rtz(ulong2);
-double2 __ovld __cnfn convert_double2_rtz(ushort2);
-double3 __ovld __cnfn convert_double3(char3);
-double3 __ovld __cnfn convert_double3(double3);
-double3 __ovld __cnfn convert_double3(float3);
-double3 __ovld __cnfn convert_double3(int3);
-double3 __ovld __cnfn convert_double3(long3);
-double3 __ovld __cnfn convert_double3(short3);
-double3 __ovld __cnfn convert_double3(uchar3);
-double3 __ovld __cnfn convert_double3(uint3);
-double3 __ovld __cnfn convert_double3(ulong3);
-double3 __ovld __cnfn convert_double3(ushort3);
-double3 __ovld __cnfn convert_double3_rte(char3);
-double3 __ovld __cnfn convert_double3_rte(double3);
-double3 __ovld __cnfn convert_double3_rte(float3);
-double3 __ovld __cnfn convert_double3_rte(int3);
-double3 __ovld __cnfn convert_double3_rte(long3);
-double3 __ovld __cnfn convert_double3_rte(short3);
-double3 __ovld __cnfn convert_double3_rte(uchar3);
-double3 __ovld __cnfn convert_double3_rte(uint3);
-double3 __ovld __cnfn convert_double3_rte(ulong3);
-double3 __ovld __cnfn convert_double3_rte(ushort3);
-double3 __ovld __cnfn convert_double3_rtn(char3);
-double3 __ovld __cnfn convert_double3_rtn(double3);
-double3 __ovld __cnfn convert_double3_rtn(float3);
-double3 __ovld __cnfn convert_double3_rtn(int3);
-double3 __ovld __cnfn convert_double3_rtn(long3);
-double3 __ovld __cnfn convert_double3_rtn(short3);
-double3 __ovld __cnfn convert_double3_rtn(uchar3);
-double3 __ovld __cnfn convert_double3_rtn(uint3);
-double3 __ovld __cnfn convert_double3_rtn(ulong3);
-double3 __ovld __cnfn convert_double3_rtn(ushort3);
-double3 __ovld __cnfn convert_double3_rtp(char3);
-double3 __ovld __cnfn convert_double3_rtp(double3);
-double3 __ovld __cnfn convert_double3_rtp(float3);
-double3 __ovld __cnfn convert_double3_rtp(int3);
-double3 __ovld __cnfn convert_double3_rtp(long3);
-double3 __ovld __cnfn convert_double3_rtp(short3);
-double3 __ovld __cnfn convert_double3_rtp(uchar3);
-double3 __ovld __cnfn convert_double3_rtp(uint3);
-double3 __ovld __cnfn convert_double3_rtp(ulong3);
-double3 __ovld __cnfn convert_double3_rtp(ushort3);
-double3 __ovld __cnfn convert_double3_rtz(char3);
-double3 __ovld __cnfn convert_double3_rtz(double3);
-double3 __ovld __cnfn convert_double3_rtz(float3);
-double3 __ovld __cnfn convert_double3_rtz(int3);
-double3 __ovld __cnfn convert_double3_rtz(long3);
-double3 __ovld __cnfn convert_double3_rtz(short3);
-double3 __ovld __cnfn convert_double3_rtz(uchar3);
-double3 __ovld __cnfn convert_double3_rtz(uint3);
-double3 __ovld __cnfn convert_double3_rtz(ulong3);
-double3 __ovld __cnfn convert_double3_rtz(ushort3);
-double4 __ovld __cnfn convert_double4(char4);
-double4 __ovld __cnfn convert_double4(double4);
-double4 __ovld __cnfn convert_double4(float4);
-double4 __ovld __cnfn convert_double4(int4);
-double4 __ovld __cnfn convert_double4(long4);
-double4 __ovld __cnfn convert_double4(short4);
-double4 __ovld __cnfn convert_double4(uchar4);
-double4 __ovld __cnfn convert_double4(uint4);
-double4 __ovld __cnfn convert_double4(ulong4);
-double4 __ovld __cnfn convert_double4(ushort4);
-double4 __ovld __cnfn convert_double4_rte(char4);
-double4 __ovld __cnfn convert_double4_rte(double4);
-double4 __ovld __cnfn convert_double4_rte(float4);
-double4 __ovld __cnfn convert_double4_rte(int4);
-double4 __ovld __cnfn convert_double4_rte(long4);
-double4 __ovld __cnfn convert_double4_rte(short4);
-double4 __ovld __cnfn convert_double4_rte(uchar4);
-double4 __ovld __cnfn convert_double4_rte(uint4);
-double4 __ovld __cnfn convert_double4_rte(ulong4);
-double4 __ovld __cnfn convert_double4_rte(ushort4);
-double4 __ovld __cnfn convert_double4_rtn(char4);
-double4 __ovld __cnfn convert_double4_rtn(double4);
-double4 __ovld __cnfn convert_double4_rtn(float4);
-double4 __ovld __cnfn convert_double4_rtn(int4);
-double4 __ovld __cnfn convert_double4_rtn(long4);
-double4 __ovld __cnfn convert_double4_rtn(short4);
-double4 __ovld __cnfn convert_double4_rtn(uchar4);
-double4 __ovld __cnfn convert_double4_rtn(uint4);
-double4 __ovld __cnfn convert_double4_rtn(ulong4);
-double4 __ovld __cnfn convert_double4_rtn(ushort4);
-double4 __ovld __cnfn convert_double4_rtp(char4);
-double4 __ovld __cnfn convert_double4_rtp(double4);
-double4 __ovld __cnfn convert_double4_rtp(float4);
-double4 __ovld __cnfn convert_double4_rtp(int4);
-double4 __ovld __cnfn convert_double4_rtp(long4);
-double4 __ovld __cnfn convert_double4_rtp(short4);
-double4 __ovld __cnfn convert_double4_rtp(uchar4);
-double4 __ovld __cnfn convert_double4_rtp(uint4);
-double4 __ovld __cnfn convert_double4_rtp(ulong4);
-double4 __ovld __cnfn convert_double4_rtp(ushort4);
-double4 __ovld __cnfn convert_double4_rtz(char4);
-double4 __ovld __cnfn convert_double4_rtz(double4);
-double4 __ovld __cnfn convert_double4_rtz(float4);
-double4 __ovld __cnfn convert_double4_rtz(int4);
-double4 __ovld __cnfn convert_double4_rtz(long4);
-double4 __ovld __cnfn convert_double4_rtz(short4);
-double4 __ovld __cnfn convert_double4_rtz(uchar4);
-double4 __ovld __cnfn convert_double4_rtz(uint4);
-double4 __ovld __cnfn convert_double4_rtz(ulong4);
-double4 __ovld __cnfn convert_double4_rtz(ushort4);
-double8 __ovld __cnfn convert_double8(char8);
-double8 __ovld __cnfn convert_double8(double8);
-double8 __ovld __cnfn convert_double8(float8);
-double8 __ovld __cnfn convert_double8(int8);
-double8 __ovld __cnfn convert_double8(long8);
-double8 __ovld __cnfn convert_double8(short8);
-double8 __ovld __cnfn convert_double8(uchar8);
-double8 __ovld __cnfn convert_double8(uint8);
-double8 __ovld __cnfn convert_double8(ulong8);
-double8 __ovld __cnfn convert_double8(ushort8);
-double8 __ovld __cnfn convert_double8_rte(char8);
-double8 __ovld __cnfn convert_double8_rte(double8);
-double8 __ovld __cnfn convert_double8_rte(float8);
-double8 __ovld __cnfn convert_double8_rte(int8);
-double8 __ovld __cnfn convert_double8_rte(long8);
-double8 __ovld __cnfn convert_double8_rte(short8);
-double8 __ovld __cnfn convert_double8_rte(uchar8);
-double8 __ovld __cnfn convert_double8_rte(uint8);
-double8 __ovld __cnfn convert_double8_rte(ulong8);
-double8 __ovld __cnfn convert_double8_rte(ushort8);
-double8 __ovld __cnfn convert_double8_rtn(char8);
-double8 __ovld __cnfn convert_double8_rtn(double8);
-double8 __ovld __cnfn convert_double8_rtn(float8);
-double8 __ovld __cnfn convert_double8_rtn(int8);
-double8 __ovld __cnfn convert_double8_rtn(long8);
-double8 __ovld __cnfn convert_double8_rtn(short8);
-double8 __ovld __cnfn convert_double8_rtn(uchar8);
-double8 __ovld __cnfn convert_double8_rtn(uint8);
-double8 __ovld __cnfn convert_double8_rtn(ulong8);
-double8 __ovld __cnfn convert_double8_rtn(ushort8);
-double8 __ovld __cnfn convert_double8_rtp(char8);
-double8 __ovld __cnfn convert_double8_rtp(double8);
-double8 __ovld __cnfn convert_double8_rtp(float8);
-double8 __ovld __cnfn convert_double8_rtp(int8);
-double8 __ovld __cnfn convert_double8_rtp(long8);
-double8 __ovld __cnfn convert_double8_rtp(short8);
-double8 __ovld __cnfn convert_double8_rtp(uchar8);
-double8 __ovld __cnfn convert_double8_rtp(uint8);
-double8 __ovld __cnfn convert_double8_rtp(ulong8);
-double8 __ovld __cnfn convert_double8_rtp(ushort8);
-double8 __ovld __cnfn convert_double8_rtz(char8);
-double8 __ovld __cnfn convert_double8_rtz(double8);
-double8 __ovld __cnfn convert_double8_rtz(float8);
-double8 __ovld __cnfn convert_double8_rtz(int8);
-double8 __ovld __cnfn convert_double8_rtz(long8);
-double8 __ovld __cnfn convert_double8_rtz(short8);
-double8 __ovld __cnfn convert_double8_rtz(uchar8);
-double8 __ovld __cnfn convert_double8_rtz(uint8);
-double8 __ovld __cnfn convert_double8_rtz(ulong8);
-double8 __ovld __cnfn convert_double8_rtz(ushort8);
-double16 __ovld __cnfn convert_double16(char16);
-double16 __ovld __cnfn convert_double16(double16);
-double16 __ovld __cnfn convert_double16(float16);
-double16 __ovld __cnfn convert_double16(int16);
-double16 __ovld __cnfn convert_double16(long16);
-double16 __ovld __cnfn convert_double16(short16);
-double16 __ovld __cnfn convert_double16(uchar16);
-double16 __ovld __cnfn convert_double16(uint16);
-double16 __ovld __cnfn convert_double16(ulong16);
-double16 __ovld __cnfn convert_double16(ushort16);
-double16 __ovld __cnfn convert_double16_rte(char16);
-double16 __ovld __cnfn convert_double16_rte(double16);
-double16 __ovld __cnfn convert_double16_rte(float16);
-double16 __ovld __cnfn convert_double16_rte(int16);
-double16 __ovld __cnfn convert_double16_rte(long16);
-double16 __ovld __cnfn convert_double16_rte(short16);
-double16 __ovld __cnfn convert_double16_rte(uchar16);
-double16 __ovld __cnfn convert_double16_rte(uint16);
-double16 __ovld __cnfn convert_double16_rte(ulong16);
-double16 __ovld __cnfn convert_double16_rte(ushort16);
-double16 __ovld __cnfn convert_double16_rtn(char16);
-double16 __ovld __cnfn convert_double16_rtn(double16);
-double16 __ovld __cnfn convert_double16_rtn(float16);
-double16 __ovld __cnfn convert_double16_rtn(int16);
-double16 __ovld __cnfn convert_double16_rtn(long16);
-double16 __ovld __cnfn convert_double16_rtn(short16);
-double16 __ovld __cnfn convert_double16_rtn(uchar16);
-double16 __ovld __cnfn convert_double16_rtn(uint16);
-double16 __ovld __cnfn convert_double16_rtn(ulong16);
-double16 __ovld __cnfn convert_double16_rtn(ushort16);
-double16 __ovld __cnfn convert_double16_rtp(char16);
-double16 __ovld __cnfn convert_double16_rtp(double16);
-double16 __ovld __cnfn convert_double16_rtp(float16);
-double16 __ovld __cnfn convert_double16_rtp(int16);
-double16 __ovld __cnfn convert_double16_rtp(long16);
-double16 __ovld __cnfn convert_double16_rtp(short16);
-double16 __ovld __cnfn convert_double16_rtp(uchar16);
-double16 __ovld __cnfn convert_double16_rtp(uint16);
-double16 __ovld __cnfn convert_double16_rtp(ulong16);
-double16 __ovld __cnfn convert_double16_rtp(ushort16);
-double16 __ovld __cnfn convert_double16_rtz(char16);
-double16 __ovld __cnfn convert_double16_rtz(double16);
-double16 __ovld __cnfn convert_double16_rtz(float16);
-double16 __ovld __cnfn convert_double16_rtz(int16);
-double16 __ovld __cnfn convert_double16_rtz(long16);
-double16 __ovld __cnfn convert_double16_rtz(short16);
-double16 __ovld __cnfn convert_double16_rtz(uchar16);
-double16 __ovld __cnfn convert_double16_rtz(uint16);
-double16 __ovld __cnfn convert_double16_rtz(ulong16);
-double16 __ovld __cnfn convert_double16_rtz(ushort16);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-#pragma OPENCL EXTENSION cl_khr_fp16 : enable
-// Convert half types to non-double types.
-uchar __ovld __cnfn convert_uchar(half);
-uchar __ovld __cnfn convert_uchar_rte(half);
-uchar __ovld __cnfn convert_uchar_rtp(half);
-uchar __ovld __cnfn convert_uchar_rtn(half);
-uchar __ovld __cnfn convert_uchar_rtz(half);
-uchar __ovld __cnfn convert_uchar_sat(half);
-uchar __ovld __cnfn convert_uchar_sat_rte(half);
-uchar __ovld __cnfn convert_uchar_sat_rtp(half);
-uchar __ovld __cnfn convert_uchar_sat_rtn(half);
-uchar __ovld __cnfn convert_uchar_sat_rtz(half);
-uchar2 __ovld __cnfn convert_uchar2(half2);
-uchar2 __ovld __cnfn convert_uchar2_rte(half2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(half2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(half2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(half2);
-uchar3 __ovld __cnfn convert_uchar3(half3);
-uchar3 __ovld __cnfn convert_uchar3_rte(half3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(half3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(half3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(half3);
-uchar4 __ovld __cnfn convert_uchar4(half4);
-uchar4 __ovld __cnfn convert_uchar4_rte(half4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(half4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(half4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(half4);
-uchar8 __ovld __cnfn convert_uchar8(half8);
-uchar8 __ovld __cnfn convert_uchar8_rte(half8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(half8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(half8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(half8);
-uchar16 __ovld __cnfn convert_uchar16(half16);
-uchar16 __ovld __cnfn convert_uchar16_rte(half16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(half16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(half16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(half16);
-ushort __ovld __cnfn convert_ushort(half);
-ushort __ovld __cnfn convert_ushort_rte(half);
-ushort __ovld __cnfn convert_ushort_rtp(half);
-ushort __ovld __cnfn convert_ushort_rtn(half);
-ushort __ovld __cnfn convert_ushort_rtz(half);
-ushort __ovld __cnfn convert_ushort_sat(half);
-ushort __ovld __cnfn convert_ushort_sat_rte(half);
-ushort __ovld __cnfn convert_ushort_sat_rtp(half);
-ushort __ovld __cnfn convert_ushort_sat_rtn(half);
-ushort __ovld __cnfn convert_ushort_sat_rtz(half);
-ushort2 __ovld __cnfn convert_ushort2(half2);
-ushort2 __ovld __cnfn convert_ushort2_rte(half2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(half2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(half2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(half2);
-ushort3 __ovld __cnfn convert_ushort3(half3);
-ushort3 __ovld __cnfn convert_ushort3_rte(half3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(half3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(half3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(half3);
-ushort4 __ovld __cnfn convert_ushort4(half4);
-ushort4 __ovld __cnfn convert_ushort4_rte(half4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(half4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(half4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(half4);
-ushort8 __ovld __cnfn convert_ushort8(half8);
-ushort8 __ovld __cnfn convert_ushort8_rte(half8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(half8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(half8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(half8);
-ushort16 __ovld __cnfn convert_ushort16(half16);
-ushort16 __ovld __cnfn convert_ushort16_rte(half16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(half16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(half16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(half16);
-uint __ovld __cnfn convert_uint(half);
-uint __ovld __cnfn convert_uint_rte(half);
-uint __ovld __cnfn convert_uint_rtp(half);
-uint __ovld __cnfn convert_uint_rtn(half);
-uint __ovld __cnfn convert_uint_rtz(half);
-uint __ovld __cnfn convert_uint_sat(half);
-uint __ovld __cnfn convert_uint_sat_rte(half);
-uint __ovld __cnfn convert_uint_sat_rtp(half);
-uint __ovld __cnfn convert_uint_sat_rtn(half);
-uint __ovld __cnfn convert_uint_sat_rtz(half);
-uint2 __ovld __cnfn convert_uint2(half2);
-uint2 __ovld __cnfn convert_uint2_rte(half2);
-uint2 __ovld __cnfn convert_uint2_rtp(half2);
-uint2 __ovld __cnfn convert_uint2_rtn(half2);
-uint2 __ovld __cnfn convert_uint2_rtz(half2);
-uint2 __ovld __cnfn convert_uint2_sat(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(half2);
-uint3 __ovld __cnfn convert_uint3(half3);
-uint3 __ovld __cnfn convert_uint3_rte(half3);
-uint3 __ovld __cnfn convert_uint3_rtp(half3);
-uint3 __ovld __cnfn convert_uint3_rtn(half3);
-uint3 __ovld __cnfn convert_uint3_rtz(half3);
-uint3 __ovld __cnfn convert_uint3_sat(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(half3);
-uint4 __ovld __cnfn convert_uint4(half4);
-uint4 __ovld __cnfn convert_uint4_rte(half4);
-uint4 __ovld __cnfn convert_uint4_rtp(half4);
-uint4 __ovld __cnfn convert_uint4_rtn(half4);
-uint4 __ovld __cnfn convert_uint4_rtz(half4);
-uint4 __ovld __cnfn convert_uint4_sat(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(half4);
-uint8 __ovld __cnfn convert_uint8(half8);
-uint8 __ovld __cnfn convert_uint8_rte(half8);
-uint8 __ovld __cnfn convert_uint8_rtp(half8);
-uint8 __ovld __cnfn convert_uint8_rtn(half8);
-uint8 __ovld __cnfn convert_uint8_rtz(half8);
-uint8 __ovld __cnfn convert_uint8_sat(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(half8);
-uint16 __ovld __cnfn convert_uint16(half16);
-uint16 __ovld __cnfn convert_uint16_rte(half16);
-uint16 __ovld __cnfn convert_uint16_rtp(half16);
-uint16 __ovld __cnfn convert_uint16_rtn(half16);
-uint16 __ovld __cnfn convert_uint16_rtz(half16);
-uint16 __ovld __cnfn convert_uint16_sat(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(half16);
-ulong __ovld __cnfn convert_ulong(half);
-ulong __ovld __cnfn convert_ulong_rte(half);
-ulong __ovld __cnfn convert_ulong_rtp(half);
-ulong __ovld __cnfn convert_ulong_rtn(half);
-ulong __ovld __cnfn convert_ulong_rtz(half);
-ulong __ovld __cnfn convert_ulong_sat(half);
-ulong __ovld __cnfn convert_ulong_sat_rte(half);
-ulong __ovld __cnfn convert_ulong_sat_rtp(half);
-ulong __ovld __cnfn convert_ulong_sat_rtn(half);
-ulong __ovld __cnfn convert_ulong_sat_rtz(half);
-ulong2 __ovld __cnfn convert_ulong2(half2);
-ulong2 __ovld __cnfn convert_ulong2_rte(half2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(half2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(half2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(half2);
-ulong3 __ovld __cnfn convert_ulong3(half3);
-ulong3 __ovld __cnfn convert_ulong3_rte(half3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(half3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(half3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(half3);
-ulong4 __ovld __cnfn convert_ulong4(half4);
-ulong4 __ovld __cnfn convert_ulong4_rte(half4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(half4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(half4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(half4);
-ulong8 __ovld __cnfn convert_ulong8(half8);
-ulong8 __ovld __cnfn convert_ulong8_rte(half8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(half8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(half8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(half8);
-ulong16 __ovld __cnfn convert_ulong16(half16);
-ulong16 __ovld __cnfn convert_ulong16_rte(half16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(half16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(half16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(half16);
-char __ovld __cnfn convert_char(half);
-char __ovld __cnfn convert_char_rte(half);
-char __ovld __cnfn convert_char_rtp(half);
-char __ovld __cnfn convert_char_rtn(half);
-char __ovld __cnfn convert_char_rtz(half);
-char __ovld __cnfn convert_char_sat(half);
-char __ovld __cnfn convert_char_sat_rte(half);
-char __ovld __cnfn convert_char_sat_rtp(half);
-char __ovld __cnfn convert_char_sat_rtn(half);
-char __ovld __cnfn convert_char_sat_rtz(half);
-char2 __ovld __cnfn convert_char2(half2);
-char2 __ovld __cnfn convert_char2_rte(half2);
-char2 __ovld __cnfn convert_char2_rtp(half2);
-char2 __ovld __cnfn convert_char2_rtn(half2);
-char2 __ovld __cnfn convert_char2_rtz(half2);
-char2 __ovld __cnfn convert_char2_sat(half2);
-char2 __ovld __cnfn convert_char2_sat_rte(half2);
-char2 __ovld __cnfn convert_char2_sat_rtp(half2);
-char2 __ovld __cnfn convert_char2_sat_rtn(half2);
-char2 __ovld __cnfn convert_char2_sat_rtz(half2);
-char3 __ovld __cnfn convert_char3(half3);
-char3 __ovld __cnfn convert_char3_rte(half3);
-char3 __ovld __cnfn convert_char3_rtp(half3);
-char3 __ovld __cnfn convert_char3_rtn(half3);
-char3 __ovld __cnfn convert_char3_rtz(half3);
-char3 __ovld __cnfn convert_char3_sat(half3);
-char3 __ovld __cnfn convert_char3_sat_rte(half3);
-char3 __ovld __cnfn convert_char3_sat_rtp(half3);
-char3 __ovld __cnfn convert_char3_sat_rtn(half3);
-char3 __ovld __cnfn convert_char3_sat_rtz(half3);
-char4 __ovld __cnfn convert_char4(half4);
-char4 __ovld __cnfn convert_char4_rte(half4);
-char4 __ovld __cnfn convert_char4_rtp(half4);
-char4 __ovld __cnfn convert_char4_rtn(half4);
-char4 __ovld __cnfn convert_char4_rtz(half4);
-char4 __ovld __cnfn convert_char4_sat(half4);
-char4 __ovld __cnfn convert_char4_sat_rte(half4);
-char4 __ovld __cnfn convert_char4_sat_rtp(half4);
-char4 __ovld __cnfn convert_char4_sat_rtn(half4);
-char4 __ovld __cnfn convert_char4_sat_rtz(half4);
-char8 __ovld __cnfn convert_char8(half8);
-char8 __ovld __cnfn convert_char8_rte(half8);
-char8 __ovld __cnfn convert_char8_rtp(half8);
-char8 __ovld __cnfn convert_char8_rtn(half8);
-char8 __ovld __cnfn convert_char8_rtz(half8);
-char8 __ovld __cnfn convert_char8_sat(half8);
-char8 __ovld __cnfn convert_char8_sat_rte(half8);
-char8 __ovld __cnfn convert_char8_sat_rtp(half8);
-char8 __ovld __cnfn convert_char8_sat_rtn(half8);
-char8 __ovld __cnfn convert_char8_sat_rtz(half8);
-char16 __ovld __cnfn convert_char16(half16);
-char16 __ovld __cnfn convert_char16_rte(half16);
-char16 __ovld __cnfn convert_char16_rtp(half16);
-char16 __ovld __cnfn convert_char16_rtn(half16);
-char16 __ovld __cnfn convert_char16_rtz(half16);
-char16 __ovld __cnfn convert_char16_sat(half16);
-char16 __ovld __cnfn convert_char16_sat_rte(half16);
-char16 __ovld __cnfn convert_char16_sat_rtp(half16);
-char16 __ovld __cnfn convert_char16_sat_rtn(half16);
-char16 __ovld __cnfn convert_char16_sat_rtz(half16);
-short __ovld __cnfn convert_short(half);
-short __ovld __cnfn convert_short_rte(half);
-short __ovld __cnfn convert_short_rtp(half);
-short __ovld __cnfn convert_short_rtn(half);
-short __ovld __cnfn convert_short_rtz(half);
-short __ovld __cnfn convert_short_sat(half);
-short __ovld __cnfn convert_short_sat_rte(half);
-short __ovld __cnfn convert_short_sat_rtp(half);
-short __ovld __cnfn convert_short_sat_rtn(half);
-short __ovld __cnfn convert_short_sat_rtz(half);
-short2 __ovld __cnfn convert_short2(half2);
-short2 __ovld __cnfn convert_short2_rte(half2);
-short2 __ovld __cnfn convert_short2_rtp(half2);
-short2 __ovld __cnfn convert_short2_rtn(half2);
-short2 __ovld __cnfn convert_short2_rtz(half2);
-short2 __ovld __cnfn convert_short2_sat(half2);
-short2 __ovld __cnfn convert_short2_sat_rte(half2);
-short2 __ovld __cnfn convert_short2_sat_rtp(half2);
-short2 __ovld __cnfn convert_short2_sat_rtn(half2);
-short2 __ovld __cnfn convert_short2_sat_rtz(half2);
-short3 __ovld __cnfn convert_short3(half3);
-short3 __ovld __cnfn convert_short3_rte(half3);
-short3 __ovld __cnfn convert_short3_rtp(half3);
-short3 __ovld __cnfn convert_short3_rtn(half3);
-short3 __ovld __cnfn convert_short3_rtz(half3);
-short3 __ovld __cnfn convert_short3_sat(half3);
-short3 __ovld __cnfn convert_short3_sat_rte(half3);
-short3 __ovld __cnfn convert_short3_sat_rtp(half3);
-short3 __ovld __cnfn convert_short3_sat_rtn(half3);
-short3 __ovld __cnfn convert_short3_sat_rtz(half3);
-short4 __ovld __cnfn convert_short4(half4);
-short4 __ovld __cnfn convert_short4_rte(half4);
-short4 __ovld __cnfn convert_short4_rtp(half4);
-short4 __ovld __cnfn convert_short4_rtn(half4);
-short4 __ovld __cnfn convert_short4_rtz(half4);
-short4 __ovld __cnfn convert_short4_sat(half4);
-short4 __ovld __cnfn convert_short4_sat_rte(half4);
-short4 __ovld __cnfn convert_short4_sat_rtp(half4);
-short4 __ovld __cnfn convert_short4_sat_rtn(half4);
-short4 __ovld __cnfn convert_short4_sat_rtz(half4);
-short8 __ovld __cnfn convert_short8(half8);
-short8 __ovld __cnfn convert_short8_rte(half8);
-short8 __ovld __cnfn convert_short8_rtp(half8);
-short8 __ovld __cnfn convert_short8_rtn(half8);
-short8 __ovld __cnfn convert_short8_rtz(half8);
-short8 __ovld __cnfn convert_short8_sat(half8);
-short8 __ovld __cnfn convert_short8_sat_rte(half8);
-short8 __ovld __cnfn convert_short8_sat_rtp(half8);
-short8 __ovld __cnfn convert_short8_sat_rtn(half8);
-short8 __ovld __cnfn convert_short8_sat_rtz(half8);
-short16 __ovld __cnfn convert_short16(half16);
-short16 __ovld __cnfn convert_short16_rte(half16);
-short16 __ovld __cnfn convert_short16_rtp(half16);
-short16 __ovld __cnfn convert_short16_rtn(half16);
-short16 __ovld __cnfn convert_short16_rtz(half16);
-short16 __ovld __cnfn convert_short16_sat(half16);
-short16 __ovld __cnfn convert_short16_sat_rte(half16);
-short16 __ovld __cnfn convert_short16_sat_rtp(half16);
-short16 __ovld __cnfn convert_short16_sat_rtn(half16);
-short16 __ovld __cnfn convert_short16_sat_rtz(half16);
-int __ovld __cnfn convert_int(half);
-int __ovld __cnfn convert_int_rte(half);
-int __ovld __cnfn convert_int_rtp(half);
-int __ovld __cnfn convert_int_rtn(half);
-int __ovld __cnfn convert_int_rtz(half);
-int __ovld __cnfn convert_int_sat(half);
-int __ovld __cnfn convert_int_sat_rte(half);
-int __ovld __cnfn convert_int_sat_rtp(half);
-int __ovld __cnfn convert_int_sat_rtn(half);
-int __ovld __cnfn convert_int_sat_rtz(half);
-int2 __ovld __cnfn convert_int2(half2);
-int2 __ovld __cnfn convert_int2_rte(half2);
-int2 __ovld __cnfn convert_int2_rtp(half2);
-int2 __ovld __cnfn convert_int2_rtn(half2);
-int2 __ovld __cnfn convert_int2_rtz(half2);
-int2 __ovld __cnfn convert_int2_sat(half2);
-int2 __ovld __cnfn convert_int2_sat_rte(half2);
-int2 __ovld __cnfn convert_int2_sat_rtp(half2);
-int2 __ovld __cnfn convert_int2_sat_rtn(half2);
-int2 __ovld __cnfn convert_int2_sat_rtz(half2);
-int3 __ovld __cnfn convert_int3(half3);
-int3 __ovld __cnfn convert_int3_rte(half3);
-int3 __ovld __cnfn convert_int3_rtp(half3);
-int3 __ovld __cnfn convert_int3_rtn(half3);
-int3 __ovld __cnfn convert_int3_rtz(half3);
-int3 __ovld __cnfn convert_int3_sat(half3);
-int3 __ovld __cnfn convert_int3_sat_rte(half3);
-int3 __ovld __cnfn convert_int3_sat_rtp(half3);
-int3 __ovld __cnfn convert_int3_sat_rtn(half3);
-int3 __ovld __cnfn convert_int3_sat_rtz(half3);
-int4 __ovld __cnfn convert_int4(half4);
-int4 __ovld __cnfn convert_int4_rte(half4);
-int4 __ovld __cnfn convert_int4_rtp(half4);
-int4 __ovld __cnfn convert_int4_rtn(half4);
-int4 __ovld __cnfn convert_int4_rtz(half4);
-int4 __ovld __cnfn convert_int4_sat(half4);
-int4 __ovld __cnfn convert_int4_sat_rte(half4);
-int4 __ovld __cnfn convert_int4_sat_rtp(half4);
-int4 __ovld __cnfn convert_int4_sat_rtn(half4);
-int4 __ovld __cnfn convert_int4_sat_rtz(half4);
-int8 __ovld __cnfn convert_int8(half8);
-int8 __ovld __cnfn convert_int8_rte(half8);
-int8 __ovld __cnfn convert_int8_rtp(half8);
-int8 __ovld __cnfn convert_int8_rtn(half8);
-int8 __ovld __cnfn convert_int8_rtz(half8);
-int8 __ovld __cnfn convert_int8_sat(half8);
-int8 __ovld __cnfn convert_int8_sat_rte(half8);
-int8 __ovld __cnfn convert_int8_sat_rtp(half8);
-int8 __ovld __cnfn convert_int8_sat_rtn(half8);
-int8 __ovld __cnfn convert_int8_sat_rtz(half8);
-int16 __ovld __cnfn convert_int16(half16);
-int16 __ovld __cnfn convert_int16_rte(half16);
-int16 __ovld __cnfn convert_int16_rtp(half16);
-int16 __ovld __cnfn convert_int16_rtn(half16);
-int16 __ovld __cnfn convert_int16_rtz(half16);
-int16 __ovld __cnfn convert_int16_sat(half16);
-int16 __ovld __cnfn convert_int16_sat_rte(half16);
-int16 __ovld __cnfn convert_int16_sat_rtp(half16);
-int16 __ovld __cnfn convert_int16_sat_rtn(half16);
-int16 __ovld __cnfn convert_int16_sat_rtz(half16);
-long __ovld __cnfn convert_long(half);
-long __ovld __cnfn convert_long_rte(half);
-long __ovld __cnfn convert_long_rtp(half);
-long __ovld __cnfn convert_long_rtn(half);
-long __ovld __cnfn convert_long_rtz(half);
-long __ovld __cnfn convert_long_sat(half);
-long __ovld __cnfn convert_long_sat_rte(half);
-long __ovld __cnfn convert_long_sat_rtp(half);
-long __ovld __cnfn convert_long_sat_rtn(half);
-long __ovld __cnfn convert_long_sat_rtz(half);
-long2 __ovld __cnfn convert_long2(half2);
-long2 __ovld __cnfn convert_long2_rte(half2);
-long2 __ovld __cnfn convert_long2_rtp(half2);
-long2 __ovld __cnfn convert_long2_rtn(half2);
-long2 __ovld __cnfn convert_long2_rtz(half2);
-long2 __ovld __cnfn convert_long2_sat(half2);
-long2 __ovld __cnfn convert_long2_sat_rte(half2);
-long2 __ovld __cnfn convert_long2_sat_rtp(half2);
-long2 __ovld __cnfn convert_long2_sat_rtn(half2);
-long2 __ovld __cnfn convert_long2_sat_rtz(half2);
-long3 __ovld __cnfn convert_long3(half3);
-long3 __ovld __cnfn convert_long3_rte(half3);
-long3 __ovld __cnfn convert_long3_rtp(half3);
-long3 __ovld __cnfn convert_long3_rtn(half3);
-long3 __ovld __cnfn convert_long3_rtz(half3);
-long3 __ovld __cnfn convert_long3_sat(half3);
-long3 __ovld __cnfn convert_long3_sat_rte(half3);
-long3 __ovld __cnfn convert_long3_sat_rtp(half3);
-long3 __ovld __cnfn convert_long3_sat_rtn(half3);
-long3 __ovld __cnfn convert_long3_sat_rtz(half3);
-long4 __ovld __cnfn convert_long4(half4);
-long4 __ovld __cnfn convert_long4_rte(half4);
-long4 __ovld __cnfn convert_long4_rtp(half4);
-long4 __ovld __cnfn convert_long4_rtn(half4);
-long4 __ovld __cnfn convert_long4_rtz(half4);
-long4 __ovld __cnfn convert_long4_sat(half4);
-long4 __ovld __cnfn convert_long4_sat_rte(half4);
-long4 __ovld __cnfn convert_long4_sat_rtp(half4);
-long4 __ovld __cnfn convert_long4_sat_rtn(half4);
-long4 __ovld __cnfn convert_long4_sat_rtz(half4);
-long8 __ovld __cnfn convert_long8(half8);
-long8 __ovld __cnfn convert_long8_rte(half8);
-long8 __ovld __cnfn convert_long8_rtp(half8);
-long8 __ovld __cnfn convert_long8_rtn(half8);
-long8 __ovld __cnfn convert_long8_rtz(half8);
-long8 __ovld __cnfn convert_long8_sat(half8);
-long8 __ovld __cnfn convert_long8_sat_rte(half8);
-long8 __ovld __cnfn convert_long8_sat_rtp(half8);
-long8 __ovld __cnfn convert_long8_sat_rtn(half8);
-long8 __ovld __cnfn convert_long8_sat_rtz(half8);
-long16 __ovld __cnfn convert_long16(half16);
-long16 __ovld __cnfn convert_long16_rte(half16);
-long16 __ovld __cnfn convert_long16_rtp(half16);
-long16 __ovld __cnfn convert_long16_rtn(half16);
-long16 __ovld __cnfn convert_long16_rtz(half16);
-long16 __ovld __cnfn convert_long16_sat(half16);
-long16 __ovld __cnfn convert_long16_sat_rte(half16);
-long16 __ovld __cnfn convert_long16_sat_rtp(half16);
-long16 __ovld __cnfn convert_long16_sat_rtn(half16);
-long16 __ovld __cnfn convert_long16_sat_rtz(half16);
-float __ovld __cnfn convert_float(half);
-float __ovld __cnfn convert_float_rte(half);
-float __ovld __cnfn convert_float_rtp(half);
-float __ovld __cnfn convert_float_rtn(half);
-float __ovld __cnfn convert_float_rtz(half);
-float2 __ovld __cnfn convert_float2(half2);
-float2 __ovld __cnfn convert_float2_rte(half2);
-float2 __ovld __cnfn convert_float2_rtp(half2);
-float2 __ovld __cnfn convert_float2_rtn(half2);
-float2 __ovld __cnfn convert_float2_rtz(half2);
-float3 __ovld __cnfn convert_float3(half3);
-float3 __ovld __cnfn convert_float3_rte(half3);
-float3 __ovld __cnfn convert_float3_rtp(half3);
-float3 __ovld __cnfn convert_float3_rtn(half3);
-float3 __ovld __cnfn convert_float3_rtz(half3);
-float4 __ovld __cnfn convert_float4(half4);
-float4 __ovld __cnfn convert_float4_rte(half4);
-float4 __ovld __cnfn convert_float4_rtp(half4);
-float4 __ovld __cnfn convert_float4_rtn(half4);
-float4 __ovld __cnfn convert_float4_rtz(half4);
-float8 __ovld __cnfn convert_float8(half8);
-float8 __ovld __cnfn convert_float8_rte(half8);
-float8 __ovld __cnfn convert_float8_rtp(half8);
-float8 __ovld __cnfn convert_float8_rtn(half8);
-float8 __ovld __cnfn convert_float8_rtz(half8);
-float16 __ovld __cnfn convert_float16(half16);
-float16 __ovld __cnfn convert_float16_rte(half16);
-float16 __ovld __cnfn convert_float16_rtp(half16);
-float16 __ovld __cnfn convert_float16_rtn(half16);
-float16 __ovld __cnfn convert_float16_rtz(half16);
-
-// Convert non-double types to half types.
-half __ovld __cnfn convert_half(uchar);
-half __ovld __cnfn convert_half(ushort);
-half __ovld __cnfn convert_half(uint);
-half __ovld __cnfn convert_half(ulong);
-half __ovld __cnfn convert_half(char);
-half __ovld __cnfn convert_half(short);
-half __ovld __cnfn convert_half(int);
-half __ovld __cnfn convert_half(long);
-half __ovld __cnfn convert_half(float);
-half __ovld __cnfn convert_half(half);
-half __ovld __cnfn convert_half_rte(uchar);
-half __ovld __cnfn convert_half_rte(ushort);
-half __ovld __cnfn convert_half_rte(uint);
-half __ovld __cnfn convert_half_rte(ulong);
-half __ovld __cnfn convert_half_rte(char);
-half __ovld __cnfn convert_half_rte(short);
-half __ovld __cnfn convert_half_rte(int);
-half __ovld __cnfn convert_half_rte(long);
-half __ovld __cnfn convert_half_rte(float);
-half __ovld __cnfn convert_half_rte(half);
-half __ovld __cnfn convert_half_rtp(uchar);
-half __ovld __cnfn convert_half_rtp(ushort);
-half __ovld __cnfn convert_half_rtp(uint);
-half __ovld __cnfn convert_half_rtp(ulong);
-half __ovld __cnfn convert_half_rtp(char);
-half __ovld __cnfn convert_half_rtp(short);
-half __ovld __cnfn convert_half_rtp(int);
-half __ovld __cnfn convert_half_rtp(long);
-half __ovld __cnfn convert_half_rtp(float);
-half __ovld __cnfn convert_half_rtp(half);
-half __ovld __cnfn convert_half_rtn(uchar);
-half __ovld __cnfn convert_half_rtn(ushort);
-half __ovld __cnfn convert_half_rtn(uint);
-half __ovld __cnfn convert_half_rtn(ulong);
-half __ovld __cnfn convert_half_rtn(char);
-half __ovld __cnfn convert_half_rtn(short);
-half __ovld __cnfn convert_half_rtn(int);
-half __ovld __cnfn convert_half_rtn(long);
-half __ovld __cnfn convert_half_rtn(float);
-half __ovld __cnfn convert_half_rtn(half);
-half __ovld __cnfn convert_half_rtz(uchar);
-half __ovld __cnfn convert_half_rtz(ushort);
-half __ovld __cnfn convert_half_rtz(uint);
-half __ovld __cnfn convert_half_rtz(ulong);
-half __ovld __cnfn convert_half_rtz(char);
-half __ovld __cnfn convert_half_rtz(short);
-half __ovld __cnfn convert_half_rtz(int);
-half __ovld __cnfn convert_half_rtz(long);
-half __ovld __cnfn convert_half_rtz(float);
-half __ovld __cnfn convert_half_rtz(half);
-half2 __ovld __cnfn convert_half2(char2);
-half2 __ovld __cnfn convert_half2(uchar2);
-half2 __ovld __cnfn convert_half2(short2);
-half2 __ovld __cnfn convert_half2(ushort2);
-half2 __ovld __cnfn convert_half2(int2);
-half2 __ovld __cnfn convert_half2(uint2);
-half2 __ovld __cnfn convert_half2(long2);
-half2 __ovld __cnfn convert_half2(ulong2);
-half2 __ovld __cnfn convert_half2(float2);
-half2 __ovld __cnfn convert_half2(half2);
-half2 __ovld __cnfn convert_half2_rte(char2);
-half2 __ovld __cnfn convert_half2_rte(uchar2);
-half2 __ovld __cnfn convert_half2_rte(short2);
-half2 __ovld __cnfn convert_half2_rte(ushort2);
-half2 __ovld __cnfn convert_half2_rte(int2);
-half2 __ovld __cnfn convert_half2_rte(uint2);
-half2 __ovld __cnfn convert_half2_rte(long2);
-half2 __ovld __cnfn convert_half2_rte(ulong2);
-half2 __ovld __cnfn convert_half2_rte(float2);
-half2 __ovld __cnfn convert_half2_rte(half2);
-half2 __ovld __cnfn convert_half2_rtp(char2);
-half2 __ovld __cnfn convert_half2_rtp(uchar2);
-half2 __ovld __cnfn convert_half2_rtp(short2);
-half2 __ovld __cnfn convert_half2_rtp(ushort2);
-half2 __ovld __cnfn convert_half2_rtp(int2);
-half2 __ovld __cnfn convert_half2_rtp(uint2);
-half2 __ovld __cnfn convert_half2_rtp(long2);
-half2 __ovld __cnfn convert_half2_rtp(ulong2);
-half2 __ovld __cnfn convert_half2_rtp(float2);
-half2 __ovld __cnfn convert_half2_rtp(half2);
-half2 __ovld __cnfn convert_half2_rtn(char2);
-half2 __ovld __cnfn convert_half2_rtn(uchar2);
-half2 __ovld __cnfn convert_half2_rtn(short2);
-half2 __ovld __cnfn convert_half2_rtn(ushort2);
-half2 __ovld __cnfn convert_half2_rtn(int2);
-half2 __ovld __cnfn convert_half2_rtn(uint2);
-half2 __ovld __cnfn convert_half2_rtn(long2);
-half2 __ovld __cnfn convert_half2_rtn(ulong2);
-half2 __ovld __cnfn convert_half2_rtn(float2);
-half2 __ovld __cnfn convert_half2_rtn(half2);
-half2 __ovld __cnfn convert_half2_rtz(char2);
-half2 __ovld __cnfn convert_half2_rtz(uchar2);
-half2 __ovld __cnfn convert_half2_rtz(short2);
-half2 __ovld __cnfn convert_half2_rtz(ushort2);
-half2 __ovld __cnfn convert_half2_rtz(int2);
-half2 __ovld __cnfn convert_half2_rtz(uint2);
-half2 __ovld __cnfn convert_half2_rtz(long2);
-half2 __ovld __cnfn convert_half2_rtz(ulong2);
-half2 __ovld __cnfn convert_half2_rtz(float2);
-half2 __ovld __cnfn convert_half2_rtz(half2);
-half3 __ovld __cnfn convert_half3(char3);
-half3 __ovld __cnfn convert_half3(uchar3);
-half3 __ovld __cnfn convert_half3(short3);
-half3 __ovld __cnfn convert_half3(ushort3);
-half3 __ovld __cnfn convert_half3(int3);
-half3 __ovld __cnfn convert_half3(uint3);
-half3 __ovld __cnfn convert_half3(long3);
-half3 __ovld __cnfn convert_half3(ulong3);
-half3 __ovld __cnfn convert_half3(float3);
-half3 __ovld __cnfn convert_half3(half3);
-half3 __ovld __cnfn convert_half3_rte(char3);
-half3 __ovld __cnfn convert_half3_rte(uchar3);
-half3 __ovld __cnfn convert_half3_rte(short3);
-half3 __ovld __cnfn convert_half3_rte(ushort3);
-half3 __ovld __cnfn convert_half3_rte(int3);
-half3 __ovld __cnfn convert_half3_rte(uint3);
-half3 __ovld __cnfn convert_half3_rte(long3);
-half3 __ovld __cnfn convert_half3_rte(ulong3);
-half3 __ovld __cnfn convert_half3_rte(float3);
-half3 __ovld __cnfn convert_half3_rte(half3);
-half3 __ovld __cnfn convert_half3_rtp(char3);
-half3 __ovld __cnfn convert_half3_rtp(uchar3);
-half3 __ovld __cnfn convert_half3_rtp(short3);
-half3 __ovld __cnfn convert_half3_rtp(ushort3);
-half3 __ovld __cnfn convert_half3_rtp(int3);
-half3 __ovld __cnfn convert_half3_rtp(uint3);
-half3 __ovld __cnfn convert_half3_rtp(long3);
-half3 __ovld __cnfn convert_half3_rtp(ulong3);
-half3 __ovld __cnfn convert_half3_rtp(float3);
-half3 __ovld __cnfn convert_half3_rtp(half3);
-half3 __ovld __cnfn convert_half3_rtn(char3);
-half3 __ovld __cnfn convert_half3_rtn(uchar3);
-half3 __ovld __cnfn convert_half3_rtn(short3);
-half3 __ovld __cnfn convert_half3_rtn(ushort3);
-half3 __ovld __cnfn convert_half3_rtn(int3);
-half3 __ovld __cnfn convert_half3_rtn(uint3);
-half3 __ovld __cnfn convert_half3_rtn(long3);
-half3 __ovld __cnfn convert_half3_rtn(ulong3);
-half3 __ovld __cnfn convert_half3_rtn(float3);
-half3 __ovld __cnfn convert_half3_rtn(half3);
-half3 __ovld __cnfn convert_half3_rtz(char3);
-half3 __ovld __cnfn convert_half3_rtz(uchar3);
-half3 __ovld __cnfn convert_half3_rtz(short3);
-half3 __ovld __cnfn convert_half3_rtz(ushort3);
-half3 __ovld __cnfn convert_half3_rtz(int3);
-half3 __ovld __cnfn convert_half3_rtz(uint3);
-half3 __ovld __cnfn convert_half3_rtz(long3);
-half3 __ovld __cnfn convert_half3_rtz(ulong3);
-half3 __ovld __cnfn convert_half3_rtz(float3);
-half3 __ovld __cnfn convert_half3_rtz(half3);
-half4 __ovld __cnfn convert_half4(char4);
-half4 __ovld __cnfn convert_half4(uchar4);
-half4 __ovld __cnfn convert_half4(short4);
-half4 __ovld __cnfn convert_half4(ushort4);
-half4 __ovld __cnfn convert_half4(int4);
-half4 __ovld __cnfn convert_half4(uint4);
-half4 __ovld __cnfn convert_half4(long4);
-half4 __ovld __cnfn convert_half4(ulong4);
-half4 __ovld __cnfn convert_half4(float4);
-half4 __ovld __cnfn convert_half4(half4);
-half4 __ovld __cnfn convert_half4_rte(char4);
-half4 __ovld __cnfn convert_half4_rte(uchar4);
-half4 __ovld __cnfn convert_half4_rte(short4);
-half4 __ovld __cnfn convert_half4_rte(ushort4);
-half4 __ovld __cnfn convert_half4_rte(int4);
-half4 __ovld __cnfn convert_half4_rte(uint4);
-half4 __ovld __cnfn convert_half4_rte(long4);
-half4 __ovld __cnfn convert_half4_rte(ulong4);
-half4 __ovld __cnfn convert_half4_rte(float4);
-half4 __ovld __cnfn convert_half4_rte(half4);
-half4 __ovld __cnfn convert_half4_rtp(char4);
-half4 __ovld __cnfn convert_half4_rtp(uchar4);
-half4 __ovld __cnfn convert_half4_rtp(short4);
-half4 __ovld __cnfn convert_half4_rtp(ushort4);
-half4 __ovld __cnfn convert_half4_rtp(int4);
-half4 __ovld __cnfn convert_half4_rtp(uint4);
-half4 __ovld __cnfn convert_half4_rtp(long4);
-half4 __ovld __cnfn convert_half4_rtp(ulong4);
-half4 __ovld __cnfn convert_half4_rtp(float4);
-half4 __ovld __cnfn convert_half4_rtp(half4);
-half4 __ovld __cnfn convert_half4_rtn(char4);
-half4 __ovld __cnfn convert_half4_rtn(uchar4);
-half4 __ovld __cnfn convert_half4_rtn(short4);
-half4 __ovld __cnfn convert_half4_rtn(ushort4);
-half4 __ovld __cnfn convert_half4_rtn(int4);
-half4 __ovld __cnfn convert_half4_rtn(uint4);
-half4 __ovld __cnfn convert_half4_rtn(long4);
-half4 __ovld __cnfn convert_half4_rtn(ulong4);
-half4 __ovld __cnfn convert_half4_rtn(float4);
-half4 __ovld __cnfn convert_half4_rtn(half4);
-half4 __ovld __cnfn convert_half4_rtz(char4);
-half4 __ovld __cnfn convert_half4_rtz(uchar4);
-half4 __ovld __cnfn convert_half4_rtz(short4);
-half4 __ovld __cnfn convert_half4_rtz(ushort4);
-half4 __ovld __cnfn convert_half4_rtz(int4);
-half4 __ovld __cnfn convert_half4_rtz(uint4);
-half4 __ovld __cnfn convert_half4_rtz(long4);
-half4 __ovld __cnfn convert_half4_rtz(ulong4);
-half4 __ovld __cnfn convert_half4_rtz(float4);
-half4 __ovld __cnfn convert_half4_rtz(half4);
-half8 __ovld __cnfn convert_half8(char8);
-half8 __ovld __cnfn convert_half8(uchar8);
-half8 __ovld __cnfn convert_half8(short8);
-half8 __ovld __cnfn convert_half8(ushort8);
-half8 __ovld __cnfn convert_half8(int8);
-half8 __ovld __cnfn convert_half8(uint8);
-half8 __ovld __cnfn convert_half8(long8);
-half8 __ovld __cnfn convert_half8(ulong8);
-half8 __ovld __cnfn convert_half8(float8);
-half8 __ovld __cnfn convert_half8(half8);
-half8 __ovld __cnfn convert_half8_rte(char8);
-half8 __ovld __cnfn convert_half8_rte(uchar8);
-half8 __ovld __cnfn convert_half8_rte(short8);
-half8 __ovld __cnfn convert_half8_rte(ushort8);
-half8 __ovld __cnfn convert_half8_rte(int8);
-half8 __ovld __cnfn convert_half8_rte(uint8);
-half8 __ovld __cnfn convert_half8_rte(long8);
-half8 __ovld __cnfn convert_half8_rte(ulong8);
-half8 __ovld __cnfn convert_half8_rte(float8);
-half8 __ovld __cnfn convert_half8_rte(half8);
-half8 __ovld __cnfn convert_half8_rtp(char8);
-half8 __ovld __cnfn convert_half8_rtp(uchar8);
-half8 __ovld __cnfn convert_half8_rtp(short8);
-half8 __ovld __cnfn convert_half8_rtp(ushort8);
-half8 __ovld __cnfn convert_half8_rtp(int8);
-half8 __ovld __cnfn convert_half8_rtp(uint8);
-half8 __ovld __cnfn convert_half8_rtp(long8);
-half8 __ovld __cnfn convert_half8_rtp(ulong8);
-half8 __ovld __cnfn convert_half8_rtp(float8);
-half8 __ovld __cnfn convert_half8_rtp(half8);
-half8 __ovld __cnfn convert_half8_rtn(char8);
-half8 __ovld __cnfn convert_half8_rtn(uchar8);
-half8 __ovld __cnfn convert_half8_rtn(short8);
-half8 __ovld __cnfn convert_half8_rtn(ushort8);
-half8 __ovld __cnfn convert_half8_rtn(int8);
-half8 __ovld __cnfn convert_half8_rtn(uint8);
-half8 __ovld __cnfn convert_half8_rtn(long8);
-half8 __ovld __cnfn convert_half8_rtn(ulong8);
-half8 __ovld __cnfn convert_half8_rtn(float8);
-half8 __ovld __cnfn convert_half8_rtn(half8);
-half8 __ovld __cnfn convert_half8_rtz(char8);
-half8 __ovld __cnfn convert_half8_rtz(uchar8);
-half8 __ovld __cnfn convert_half8_rtz(short8);
-half8 __ovld __cnfn convert_half8_rtz(ushort8);
-half8 __ovld __cnfn convert_half8_rtz(int8);
-half8 __ovld __cnfn convert_half8_rtz(uint8);
-half8 __ovld __cnfn convert_half8_rtz(long8);
-half8 __ovld __cnfn convert_half8_rtz(ulong8);
-half8 __ovld __cnfn convert_half8_rtz(float8);
-half8 __ovld __cnfn convert_half8_rtz(half8);
-half16 __ovld __cnfn convert_half16(char16);
-half16 __ovld __cnfn convert_half16(uchar16);
-half16 __ovld __cnfn convert_half16(short16);
-half16 __ovld __cnfn convert_half16(ushort16);
-half16 __ovld __cnfn convert_half16(int16);
-half16 __ovld __cnfn convert_half16(uint16);
-half16 __ovld __cnfn convert_half16(long16);
-half16 __ovld __cnfn convert_half16(ulong16);
-half16 __ovld __cnfn convert_half16(float16);
-half16 __ovld __cnfn convert_half16(half16);
-half16 __ovld __cnfn convert_half16_rte(char16);
-half16 __ovld __cnfn convert_half16_rte(uchar16);
-half16 __ovld __cnfn convert_half16_rte(short16);
-half16 __ovld __cnfn convert_half16_rte(ushort16);
-half16 __ovld __cnfn convert_half16_rte(int16);
-half16 __ovld __cnfn convert_half16_rte(uint16);
-half16 __ovld __cnfn convert_half16_rte(long16);
-half16 __ovld __cnfn convert_half16_rte(ulong16);
-half16 __ovld __cnfn convert_half16_rte(float16);
-half16 __ovld __cnfn convert_half16_rte(half16);
-half16 __ovld __cnfn convert_half16_rtp(char16);
-half16 __ovld __cnfn convert_half16_rtp(uchar16);
-half16 __ovld __cnfn convert_half16_rtp(short16);
-half16 __ovld __cnfn convert_half16_rtp(ushort16);
-half16 __ovld __cnfn convert_half16_rtp(int16);
-half16 __ovld __cnfn convert_half16_rtp(uint16);
-half16 __ovld __cnfn convert_half16_rtp(long16);
-half16 __ovld __cnfn convert_half16_rtp(ulong16);
-half16 __ovld __cnfn convert_half16_rtp(float16);
-half16 __ovld __cnfn convert_half16_rtp(half16);
-half16 __ovld __cnfn convert_half16_rtn(char16);
-half16 __ovld __cnfn convert_half16_rtn(uchar16);
-half16 __ovld __cnfn convert_half16_rtn(short16);
-half16 __ovld __cnfn convert_half16_rtn(ushort16);
-half16 __ovld __cnfn convert_half16_rtn(int16);
-half16 __ovld __cnfn convert_half16_rtn(uint16);
-half16 __ovld __cnfn convert_half16_rtn(long16);
-half16 __ovld __cnfn convert_half16_rtn(ulong16);
-half16 __ovld __cnfn convert_half16_rtn(float16);
-half16 __ovld __cnfn convert_half16_rtn(half16);
-half16 __ovld __cnfn convert_half16_rtz(char16);
-half16 __ovld __cnfn convert_half16_rtz(uchar16);
-half16 __ovld __cnfn convert_half16_rtz(short16);
-half16 __ovld __cnfn convert_half16_rtz(ushort16);
-half16 __ovld __cnfn convert_half16_rtz(int16);
-half16 __ovld __cnfn convert_half16_rtz(uint16);
-half16 __ovld __cnfn convert_half16_rtz(long16);
-half16 __ovld __cnfn convert_half16_rtz(ulong16);
-half16 __ovld __cnfn convert_half16_rtz(float16);
-half16 __ovld __cnfn convert_half16_rtz(half16);
-
-// Convert half types to double types.
-#ifdef cl_khr_fp64
-double __ovld __cnfn convert_double(half);
-double __ovld __cnfn convert_double_rte(half);
-double __ovld __cnfn convert_double_rtp(half);
-double __ovld __cnfn convert_double_rtn(half);
-double __ovld __cnfn convert_double_rtz(half);
-double2 __ovld __cnfn convert_double2(half2);
-double2 __ovld __cnfn convert_double2_rte(half2);
-double2 __ovld __cnfn convert_double2_rtp(half2);
-double2 __ovld __cnfn convert_double2_rtn(half2);
-double2 __ovld __cnfn convert_double2_rtz(half2);
-double3 __ovld __cnfn convert_double3(half3);
-double3 __ovld __cnfn convert_double3_rte(half3);
-double3 __ovld __cnfn convert_double3_rtp(half3);
-double3 __ovld __cnfn convert_double3_rtn(half3);
-double3 __ovld __cnfn convert_double3_rtz(half3);
-double4 __ovld __cnfn convert_double4(half4);
-double4 __ovld __cnfn convert_double4_rte(half4);
-double4 __ovld __cnfn convert_double4_rtp(half4);
-double4 __ovld __cnfn convert_double4_rtn(half4);
-double4 __ovld __cnfn convert_double4_rtz(half4);
-double8 __ovld __cnfn convert_double8(half8);
-double8 __ovld __cnfn convert_double8_rte(half8);
-double8 __ovld __cnfn convert_double8_rtp(half8);
-double8 __ovld __cnfn convert_double8_rtn(half8);
-double8 __ovld __cnfn convert_double8_rtz(half8);
-double16 __ovld __cnfn convert_double16(half16);
-double16 __ovld __cnfn convert_double16_rte(half16);
-double16 __ovld __cnfn convert_double16_rtp(half16);
-double16 __ovld __cnfn convert_double16_rtn(half16);
-double16 __ovld __cnfn convert_double16_rtz(half16);
-
-// Convert double types to half types.
-half __ovld __cnfn convert_half(double);
-half __ovld __cnfn convert_half_rte(double);
-half __ovld __cnfn convert_half_rtp(double);
-half __ovld __cnfn convert_half_rtn(double);
-half __ovld __cnfn convert_half_rtz(double);
-half2 __ovld __cnfn convert_half2(double2);
-half2 __ovld __cnfn convert_half2_rte(double2);
-half2 __ovld __cnfn convert_half2_rtp(double2);
-half2 __ovld __cnfn convert_half2_rtn(double2);
-half2 __ovld __cnfn convert_half2_rtz(double2);
-half3 __ovld __cnfn convert_half3(double3);
-half3 __ovld __cnfn convert_half3_rte(double3);
-half3 __ovld __cnfn convert_half3_rtp(double3);
-half3 __ovld __cnfn convert_half3_rtn(double3);
-half3 __ovld __cnfn convert_half3_rtz(double3);
-half4 __ovld __cnfn convert_half4(double4);
-half4 __ovld __cnfn convert_half4_rte(double4);
-half4 __ovld __cnfn convert_half4_rtp(double4);
-half4 __ovld __cnfn convert_half4_rtn(double4);
-half4 __ovld __cnfn convert_half4_rtz(double4);
-half8 __ovld __cnfn convert_half8(double8);
-half8 __ovld __cnfn convert_half8_rte(double8);
-half8 __ovld __cnfn convert_half8_rtp(double8);
-half8 __ovld __cnfn convert_half8_rtn(double8);
-half8 __ovld __cnfn convert_half8_rtz(double8);
-half16 __ovld __cnfn convert_half16(double16);
-half16 __ovld __cnfn convert_half16_rte(double16);
-half16 __ovld __cnfn convert_half16_rtp(double16);
-half16 __ovld __cnfn convert_half16_rtn(double16);
-half16 __ovld __cnfn convert_half16_rtz(double16);
-#endif //cl_khr_fp64
-
-#endif // cl_khr_fp16
-
-// OpenCL v1.1 s6.11.1, v1.2 s6.12.1, v2.0 s6.13.1 - Work-item Functions
-
-/**
- * Returns the number of dimensions in use. This is the
- * value given to the work_dim argument specified in
- * clEnqueueNDRangeKernel.
- * For clEnqueueTask, this returns 1.
- */
-uint __ovld __cnfn get_work_dim(void);
-
-/**
- * Returns the number of global work-items specified for
- * dimension identified by dimindx. This value is given by
- * the global_work_size argument to
- * clEnqueueNDRangeKernel. Valid values of dimindx
- * are 0 to get_work_dim() - 1. For other values of
- * dimindx, get_global_size() returns 1.
- * For clEnqueueTask, this always returns 1.
- */
-size_t __ovld __cnfn get_global_size(uint dimindx);
-
-/**
- * Returns the unique global work-item ID value for
- * dimension identified by dimindx. The global work-item
- * ID specifies the work-item ID based on the number of
- * global work-items specified to execute the kernel. Valid
- * values of dimindx are 0 to get_work_dim() - 1. For
- * other values of dimindx, get_global_id() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_global_id(uint dimindx);
-
-/**
- * Returns the number of local work-items specified in
- * dimension identified by dimindx. This value is given by
- * the local_work_size argument to
- * clEnqueueNDRangeKernel if local_work_size is not
- * NULL; otherwise the OpenCL implementation chooses
- * an appropriate local_work_size value which is returned
- * by this function. Valid values of dimindx are 0 to
- * get_work_dim() - 1. For other values of dimindx,
- * get_local_size() returns 1.
- * For clEnqueueTask, this always returns 1.
- */
-size_t __ovld __cnfn get_local_size(uint dimindx);
-
-/**
- * Returns the unique local work-item ID i.e. a work-item
- * within a specific work-group for dimension identified by
- * dimindx. Valid values of dimindx are 0 to
- * get_work_dim() - 1. For other values of dimindx,
- * get_local_id() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_local_id(uint dimindx);
-
-/**
- * Returns the number of work-groups that will execute a
- * kernel for dimension identified by dimindx.
- * Valid values of dimindx are 0 to get_work_dim() - 1.
- * For other values of dimindx, get_num_groups() returns 1.
- * For clEnqueueTask, this always returns 1.
- */
-size_t __ovld __cnfn get_num_groups(uint dimindx);
-
-/**
- * get_group_id returns the work-group ID which is a
- * number from 0 .. get_num_groups(dimindx) - 1.
- * Valid values of dimindx are 0 to get_work_dim() - 1.
- * For other values, get_group_id() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_group_id(uint dimindx);
-
-/**
- * get_global_offset returns the offset values specified in
- * global_work_offset argument to
- * clEnqueueNDRangeKernel.
- * Valid values of dimindx are 0 to get_work_dim() - 1.
- * For other values, get_global_offset() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_global_offset(uint dimindx);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-size_t __ovld get_enqueued_local_size(uint dimindx);
-size_t __ovld get_global_linear_id(void);
-size_t __ovld get_local_linear_id(void);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.2, v1.2 s6.12.2, v2.0 s6.13.2 - Math functions
-
-/**
- * Arc cosine function.
- */
-float __ovld __cnfn acos(float);
-float2 __ovld __cnfn acos(float2);
-float3 __ovld __cnfn acos(float3);
-float4 __ovld __cnfn acos(float4);
-float8 __ovld __cnfn acos(float8);
-float16 __ovld __cnfn acos(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn acos(double);
-double2 __ovld __cnfn acos(double2);
-double3 __ovld __cnfn acos(double3);
-double4 __ovld __cnfn acos(double4);
-double8 __ovld __cnfn acos(double8);
-double16 __ovld __cnfn acos(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn acos(half);
-half2 __ovld __cnfn acos(half2);
-half3 __ovld __cnfn acos(half3);
-half4 __ovld __cnfn acos(half4);
-half8 __ovld __cnfn acos(half8);
-half16 __ovld __cnfn acos(half16);
-#endif //cl_khr_fp16
-
-/**
- * Inverse hyperbolic cosine.
- */
-float __ovld __cnfn acosh(float);
-float2 __ovld __cnfn acosh(float2);
-float3 __ovld __cnfn acosh(float3);
-float4 __ovld __cnfn acosh(float4);
-float8 __ovld __cnfn acosh(float8);
-float16 __ovld __cnfn acosh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn acosh(double);
-double2 __ovld __cnfn acosh(double2);
-double3 __ovld __cnfn acosh(double3);
-double4 __ovld __cnfn acosh(double4);
-double8 __ovld __cnfn acosh(double8);
-double16 __ovld __cnfn acosh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn acosh(half);
-half2 __ovld __cnfn acosh(half2);
-half3 __ovld __cnfn acosh(half3);
-half4 __ovld __cnfn acosh(half4);
-half8 __ovld __cnfn acosh(half8);
-half16 __ovld __cnfn acosh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute acos (x) / PI.
- */
-float __ovld __cnfn acospi(float x);
-float2 __ovld __cnfn acospi(float2 x);
-float3 __ovld __cnfn acospi(float3 x);
-float4 __ovld __cnfn acospi(float4 x);
-float8 __ovld __cnfn acospi(float8 x);
-float16 __ovld __cnfn acospi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn acospi(double x);
-double2 __ovld __cnfn acospi(double2 x);
-double3 __ovld __cnfn acospi(double3 x);
-double4 __ovld __cnfn acospi(double4 x);
-double8 __ovld __cnfn acospi(double8 x);
-double16 __ovld __cnfn acospi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn acospi(half x);
-half2 __ovld __cnfn acospi(half2 x);
-half3 __ovld __cnfn acospi(half3 x);
-half4 __ovld __cnfn acospi(half4 x);
-half8 __ovld __cnfn acospi(half8 x);
-half16 __ovld __cnfn acospi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Arc sine function.
- */
-float __ovld __cnfn asin(float);
-float2 __ovld __cnfn asin(float2);
-float3 __ovld __cnfn asin(float3);
-float4 __ovld __cnfn asin(float4);
-float8 __ovld __cnfn asin(float8);
-float16 __ovld __cnfn asin(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn asin(double);
-double2 __ovld __cnfn asin(double2);
-double3 __ovld __cnfn asin(double3);
-double4 __ovld __cnfn asin(double4);
-double8 __ovld __cnfn asin(double8);
-double16 __ovld __cnfn asin(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn asin(half);
-half2 __ovld __cnfn asin(half2);
-half3 __ovld __cnfn asin(half3);
-half4 __ovld __cnfn asin(half4);
-half8 __ovld __cnfn asin(half8);
-half16 __ovld __cnfn asin(half16);
-#endif //cl_khr_fp16
-
-/**
- * Inverse hyperbolic sine.
- */
-float __ovld __cnfn asinh(float);
-float2 __ovld __cnfn asinh(float2);
-float3 __ovld __cnfn asinh(float3);
-float4 __ovld __cnfn asinh(float4);
-float8 __ovld __cnfn asinh(float8);
-float16 __ovld __cnfn asinh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn asinh(double);
-double2 __ovld __cnfn asinh(double2);
-double3 __ovld __cnfn asinh(double3);
-double4 __ovld __cnfn asinh(double4);
-double8 __ovld __cnfn asinh(double8);
-double16 __ovld __cnfn asinh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn asinh(half);
-half2 __ovld __cnfn asinh(half2);
-half3 __ovld __cnfn asinh(half3);
-half4 __ovld __cnfn asinh(half4);
-half8 __ovld __cnfn asinh(half8);
-half16 __ovld __cnfn asinh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute asin (x) / PI.
- */
-float __ovld __cnfn asinpi(float x);
-float2 __ovld __cnfn asinpi(float2 x);
-float3 __ovld __cnfn asinpi(float3 x);
-float4 __ovld __cnfn asinpi(float4 x);
-float8 __ovld __cnfn asinpi(float8 x);
-float16 __ovld __cnfn asinpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn asinpi(double x);
-double2 __ovld __cnfn asinpi(double2 x);
-double3 __ovld __cnfn asinpi(double3 x);
-double4 __ovld __cnfn asinpi(double4 x);
-double8 __ovld __cnfn asinpi(double8 x);
-double16 __ovld __cnfn asinpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn asinpi(half x);
-half2 __ovld __cnfn asinpi(half2 x);
-half3 __ovld __cnfn asinpi(half3 x);
-half4 __ovld __cnfn asinpi(half4 x);
-half8 __ovld __cnfn asinpi(half8 x);
-half16 __ovld __cnfn asinpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Arc tangent function.
- */
-float __ovld __cnfn atan(float y_over_x);
-float2 __ovld __cnfn atan(float2 y_over_x);
-float3 __ovld __cnfn atan(float3 y_over_x);
-float4 __ovld __cnfn atan(float4 y_over_x);
-float8 __ovld __cnfn atan(float8 y_over_x);
-float16 __ovld __cnfn atan(float16 y_over_x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atan(double y_over_x);
-double2 __ovld __cnfn atan(double2 y_over_x);
-double3 __ovld __cnfn atan(double3 y_over_x);
-double4 __ovld __cnfn atan(double4 y_over_x);
-double8 __ovld __cnfn atan(double8 y_over_x);
-double16 __ovld __cnfn atan(double16 y_over_x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atan(half y_over_x);
-half2 __ovld __cnfn atan(half2 y_over_x);
-half3 __ovld __cnfn atan(half3 y_over_x);
-half4 __ovld __cnfn atan(half4 y_over_x);
-half8 __ovld __cnfn atan(half8 y_over_x);
-half16 __ovld __cnfn atan(half16 y_over_x);
-#endif //cl_khr_fp16
-
-/**
- * Arc tangent of y / x.
- */
-float __ovld __cnfn atan2(float y, float x);
-float2 __ovld __cnfn atan2(float2 y, float2 x);
-float3 __ovld __cnfn atan2(float3 y, float3 x);
-float4 __ovld __cnfn atan2(float4 y, float4 x);
-float8 __ovld __cnfn atan2(float8 y, float8 x);
-float16 __ovld __cnfn atan2(float16 y, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atan2(double y, double x);
-double2 __ovld __cnfn atan2(double2 y, double2 x);
-double3 __ovld __cnfn atan2(double3 y, double3 x);
-double4 __ovld __cnfn atan2(double4 y, double4 x);
-double8 __ovld __cnfn atan2(double8 y, double8 x);
-double16 __ovld __cnfn atan2(double16 y, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atan2(half y, half x);
-half2 __ovld __cnfn atan2(half2 y, half2 x);
-half3 __ovld __cnfn atan2(half3 y, half3 x);
-half4 __ovld __cnfn atan2(half4 y, half4 x);
-half8 __ovld __cnfn atan2(half8 y, half8 x);
-half16 __ovld __cnfn atan2(half16 y, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Hyperbolic arc tangent.
- */
-float __ovld __cnfn atanh(float);
-float2 __ovld __cnfn atanh(float2);
-float3 __ovld __cnfn atanh(float3);
-float4 __ovld __cnfn atanh(float4);
-float8 __ovld __cnfn atanh(float8);
-float16 __ovld __cnfn atanh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atanh(double);
-double2 __ovld __cnfn atanh(double2);
-double3 __ovld __cnfn atanh(double3);
-double4 __ovld __cnfn atanh(double4);
-double8 __ovld __cnfn atanh(double8);
-double16 __ovld __cnfn atanh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atanh(half);
-half2 __ovld __cnfn atanh(half2);
-half3 __ovld __cnfn atanh(half3);
-half4 __ovld __cnfn atanh(half4);
-half8 __ovld __cnfn atanh(half8);
-half16 __ovld __cnfn atanh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute atan (x) / PI.
- */
-float __ovld __cnfn atanpi(float x);
-float2 __ovld __cnfn atanpi(float2 x);
-float3 __ovld __cnfn atanpi(float3 x);
-float4 __ovld __cnfn atanpi(float4 x);
-float8 __ovld __cnfn atanpi(float8 x);
-float16 __ovld __cnfn atanpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atanpi(double x);
-double2 __ovld __cnfn atanpi(double2 x);
-double3 __ovld __cnfn atanpi(double3 x);
-double4 __ovld __cnfn atanpi(double4 x);
-double8 __ovld __cnfn atanpi(double8 x);
-double16 __ovld __cnfn atanpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atanpi(half x);
-half2 __ovld __cnfn atanpi(half2 x);
-half3 __ovld __cnfn atanpi(half3 x);
-half4 __ovld __cnfn atanpi(half4 x);
-half8 __ovld __cnfn atanpi(half8 x);
-half16 __ovld __cnfn atanpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute atan2 (y, x) / PI.
- */
-float __ovld __cnfn atan2pi(float y, float x);
-float2 __ovld __cnfn atan2pi(float2 y, float2 x);
-float3 __ovld __cnfn atan2pi(float3 y, float3 x);
-float4 __ovld __cnfn atan2pi(float4 y, float4 x);
-float8 __ovld __cnfn atan2pi(float8 y, float8 x);
-float16 __ovld __cnfn atan2pi(float16 y, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atan2pi(double y, double x);
-double2 __ovld __cnfn atan2pi(double2 y, double2 x);
-double3 __ovld __cnfn atan2pi(double3 y, double3 x);
-double4 __ovld __cnfn atan2pi(double4 y, double4 x);
-double8 __ovld __cnfn atan2pi(double8 y, double8 x);
-double16 __ovld __cnfn atan2pi(double16 y, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atan2pi(half y, half x);
-half2 __ovld __cnfn atan2pi(half2 y, half2 x);
-half3 __ovld __cnfn atan2pi(half3 y, half3 x);
-half4 __ovld __cnfn atan2pi(half4 y, half4 x);
-half8 __ovld __cnfn atan2pi(half8 y, half8 x);
-half16 __ovld __cnfn atan2pi(half16 y, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute cube-root.
- */
-float __ovld __cnfn cbrt(float);
-float2 __ovld __cnfn cbrt(float2);
-float3 __ovld __cnfn cbrt(float3);
-float4 __ovld __cnfn cbrt(float4);
-float8 __ovld __cnfn cbrt(float8);
-float16 __ovld __cnfn cbrt(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cbrt(double);
-double2 __ovld __cnfn cbrt(double2);
-double3 __ovld __cnfn cbrt(double3);
-double4 __ovld __cnfn cbrt(double4);
-double8 __ovld __cnfn cbrt(double8);
-double16 __ovld __cnfn cbrt(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cbrt(half);
-half2 __ovld __cnfn cbrt(half2);
-half3 __ovld __cnfn cbrt(half3);
-half4 __ovld __cnfn cbrt(half4);
-half8 __ovld __cnfn cbrt(half8);
-half16 __ovld __cnfn cbrt(half16);
-#endif //cl_khr_fp16
-
-/**
- * Round to integral value using the round to positive
- * infinity rounding mode.
- */
-float __ovld __cnfn ceil(float);
-float2 __ovld __cnfn ceil(float2);
-float3 __ovld __cnfn ceil(float3);
-float4 __ovld __cnfn ceil(float4);
-float8 __ovld __cnfn ceil(float8);
-float16 __ovld __cnfn ceil(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn ceil(double);
-double2 __ovld __cnfn ceil(double2);
-double3 __ovld __cnfn ceil(double3);
-double4 __ovld __cnfn ceil(double4);
-double8 __ovld __cnfn ceil(double8);
-double16 __ovld __cnfn ceil(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn ceil(half);
-half2 __ovld __cnfn ceil(half2);
-half3 __ovld __cnfn ceil(half3);
-half4 __ovld __cnfn ceil(half4);
-half8 __ovld __cnfn ceil(half8);
-half16 __ovld __cnfn ceil(half16);
-#endif //cl_khr_fp16
-
-/**
- * Returns x with its sign changed to match the sign of y.
- */
-float __ovld __cnfn copysign(float x, float y);
-float2 __ovld __cnfn copysign(float2 x, float2 y);
-float3 __ovld __cnfn copysign(float3 x, float3 y);
-float4 __ovld __cnfn copysign(float4 x, float4 y);
-float8 __ovld __cnfn copysign(float8 x, float8 y);
-float16 __ovld __cnfn copysign(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn copysign(double x, double y);
-double2 __ovld __cnfn copysign(double2 x, double2 y);
-double3 __ovld __cnfn copysign(double3 x, double3 y);
-double4 __ovld __cnfn copysign(double4 x, double4 y);
-double8 __ovld __cnfn copysign(double8 x, double8 y);
-double16 __ovld __cnfn copysign(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn copysign(half x, half y);
-half2 __ovld __cnfn copysign(half2 x, half2 y);
-half3 __ovld __cnfn copysign(half3 x, half3 y);
-half4 __ovld __cnfn copysign(half4 x, half4 y);
-half8 __ovld __cnfn copysign(half8 x, half8 y);
-half16 __ovld __cnfn copysign(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute cosine.
- */
-float __ovld __cnfn cos(float);
-float2 __ovld __cnfn cos(float2);
-float3 __ovld __cnfn cos(float3);
-float4 __ovld __cnfn cos(float4);
-float8 __ovld __cnfn cos(float8);
-float16 __ovld __cnfn cos(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cos(double);
-double2 __ovld __cnfn cos(double2);
-double3 __ovld __cnfn cos(double3);
-double4 __ovld __cnfn cos(double4);
-double8 __ovld __cnfn cos(double8);
-double16 __ovld __cnfn cos(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cos(half);
-half2 __ovld __cnfn cos(half2);
-half3 __ovld __cnfn cos(half3);
-half4 __ovld __cnfn cos(half4);
-half8 __ovld __cnfn cos(half8);
-half16 __ovld __cnfn cos(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute hyperbolic cosine.
- */
-float __ovld __cnfn cosh(float);
-float2 __ovld __cnfn cosh(float2);
-float3 __ovld __cnfn cosh(float3);
-float4 __ovld __cnfn cosh(float4);
-float8 __ovld __cnfn cosh(float8);
-float16 __ovld __cnfn cosh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cosh(double);
-double2 __ovld __cnfn cosh(double2);
-double3 __ovld __cnfn cosh(double3);
-double4 __ovld __cnfn cosh(double4);
-double8 __ovld __cnfn cosh(double8);
-double16 __ovld __cnfn cosh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cosh(half);
-half2 __ovld __cnfn cosh(half2);
-half3 __ovld __cnfn cosh(half3);
-half4 __ovld __cnfn cosh(half4);
-half8 __ovld __cnfn cosh(half8);
-half16 __ovld __cnfn cosh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute cos (PI * x).
- */
-float __ovld __cnfn cospi(float x);
-float2 __ovld __cnfn cospi(float2 x);
-float3 __ovld __cnfn cospi(float3 x);
-float4 __ovld __cnfn cospi(float4 x);
-float8 __ovld __cnfn cospi(float8 x);
-float16 __ovld __cnfn cospi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cospi(double x);
-double2 __ovld __cnfn cospi(double2 x);
-double3 __ovld __cnfn cospi(double3 x);
-double4 __ovld __cnfn cospi(double4 x);
-double8 __ovld __cnfn cospi(double8 x);
-double16 __ovld __cnfn cospi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cospi(half x);
-half2 __ovld __cnfn cospi(half2 x);
-half3 __ovld __cnfn cospi(half3 x);
-half4 __ovld __cnfn cospi(half4 x);
-half8 __ovld __cnfn cospi(half8 x);
-half16 __ovld __cnfn cospi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Complementary error function.
- */
-float __ovld __cnfn erfc(float);
-float2 __ovld __cnfn erfc(float2);
-float3 __ovld __cnfn erfc(float3);
-float4 __ovld __cnfn erfc(float4);
-float8 __ovld __cnfn erfc(float8);
-float16 __ovld __cnfn erfc(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn erfc(double);
-double2 __ovld __cnfn erfc(double2);
-double3 __ovld __cnfn erfc(double3);
-double4 __ovld __cnfn erfc(double4);
-double8 __ovld __cnfn erfc(double8);
-double16 __ovld __cnfn erfc(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn erfc(half);
-half2 __ovld __cnfn erfc(half2);
-half3 __ovld __cnfn erfc(half3);
-half4 __ovld __cnfn erfc(half4);
-half8 __ovld __cnfn erfc(half8);
-half16 __ovld __cnfn erfc(half16);
-#endif //cl_khr_fp16
-
-/**
- * Error function encountered in integrating the
- * normal distribution.
- */
-float __ovld __cnfn erf(float);
-float2 __ovld __cnfn erf(float2);
-float3 __ovld __cnfn erf(float3);
-float4 __ovld __cnfn erf(float4);
-float8 __ovld __cnfn erf(float8);
-float16 __ovld __cnfn erf(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn erf(double);
-double2 __ovld __cnfn erf(double2);
-double3 __ovld __cnfn erf(double3);
-double4 __ovld __cnfn erf(double4);
-double8 __ovld __cnfn erf(double8);
-double16 __ovld __cnfn erf(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn erf(half);
-half2 __ovld __cnfn erf(half2);
-half3 __ovld __cnfn erf(half3);
-half4 __ovld __cnfn erf(half4);
-half8 __ovld __cnfn erf(half8);
-half16 __ovld __cnfn erf(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute the base e exponential function of x.
- */
-float __ovld __cnfn exp(float x);
-float2 __ovld __cnfn exp(float2 x);
-float3 __ovld __cnfn exp(float3 x);
-float4 __ovld __cnfn exp(float4 x);
-float8 __ovld __cnfn exp(float8 x);
-float16 __ovld __cnfn exp(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn exp(double x);
-double2 __ovld __cnfn exp(double2 x);
-double3 __ovld __cnfn exp(double3 x);
-double4 __ovld __cnfn exp(double4 x);
-double8 __ovld __cnfn exp(double8 x);
-double16 __ovld __cnfn exp(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn exp(half x);
-half2 __ovld __cnfn exp(half2 x);
-half3 __ovld __cnfn exp(half3 x);
-half4 __ovld __cnfn exp(half4 x);
-half8 __ovld __cnfn exp(half8 x);
-half16 __ovld __cnfn exp(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Exponential base 2 function.
- */
-float __ovld __cnfn exp2(float);
-float2 __ovld __cnfn exp2(float2);
-float3 __ovld __cnfn exp2(float3);
-float4 __ovld __cnfn exp2(float4);
-float8 __ovld __cnfn exp2(float8);
-float16 __ovld __cnfn exp2(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn exp2(double);
-double2 __ovld __cnfn exp2(double2);
-double3 __ovld __cnfn exp2(double3);
-double4 __ovld __cnfn exp2(double4);
-double8 __ovld __cnfn exp2(double8);
-double16 __ovld __cnfn exp2(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn exp2(half);
-half2 __ovld __cnfn exp2(half2);
-half3 __ovld __cnfn exp2(half3);
-half4 __ovld __cnfn exp2(half4);
-half8 __ovld __cnfn exp2(half8);
-half16 __ovld __cnfn exp2(half16);
-#endif //cl_khr_fp16
-
-/**
- * Exponential base 10 function.
- */
-float __ovld __cnfn exp10(float);
-float2 __ovld __cnfn exp10(float2);
-float3 __ovld __cnfn exp10(float3);
-float4 __ovld __cnfn exp10(float4);
-float8 __ovld __cnfn exp10(float8);
-float16 __ovld __cnfn exp10(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn exp10(double);
-double2 __ovld __cnfn exp10(double2);
-double3 __ovld __cnfn exp10(double3);
-double4 __ovld __cnfn exp10(double4);
-double8 __ovld __cnfn exp10(double8);
-double16 __ovld __cnfn exp10(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn exp10(half);
-half2 __ovld __cnfn exp10(half2);
-half3 __ovld __cnfn exp10(half3);
-half4 __ovld __cnfn exp10(half4);
-half8 __ovld __cnfn exp10(half8);
-half16 __ovld __cnfn exp10(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute e^x- 1.0.
- */
-float __ovld __cnfn expm1(float x);
-float2 __ovld __cnfn expm1(float2 x);
-float3 __ovld __cnfn expm1(float3 x);
-float4 __ovld __cnfn expm1(float4 x);
-float8 __ovld __cnfn expm1(float8 x);
-float16 __ovld __cnfn expm1(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn expm1(double x);
-double2 __ovld __cnfn expm1(double2 x);
-double3 __ovld __cnfn expm1(double3 x);
-double4 __ovld __cnfn expm1(double4 x);
-double8 __ovld __cnfn expm1(double8 x);
-double16 __ovld __cnfn expm1(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn expm1(half x);
-half2 __ovld __cnfn expm1(half2 x);
-half3 __ovld __cnfn expm1(half3 x);
-half4 __ovld __cnfn expm1(half4 x);
-half8 __ovld __cnfn expm1(half8 x);
-half16 __ovld __cnfn expm1(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute absolute value of a floating-point number.
- */
-float __ovld __cnfn fabs(float);
-float2 __ovld __cnfn fabs(float2);
-float3 __ovld __cnfn fabs(float3);
-float4 __ovld __cnfn fabs(float4);
-float8 __ovld __cnfn fabs(float8);
-float16 __ovld __cnfn fabs(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fabs(double);
-double2 __ovld __cnfn fabs(double2);
-double3 __ovld __cnfn fabs(double3);
-double4 __ovld __cnfn fabs(double4);
-double8 __ovld __cnfn fabs(double8);
-double16 __ovld __cnfn fabs(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fabs(half);
-half2 __ovld __cnfn fabs(half2);
-half3 __ovld __cnfn fabs(half3);
-half4 __ovld __cnfn fabs(half4);
-half8 __ovld __cnfn fabs(half8);
-half16 __ovld __cnfn fabs(half16);
-#endif //cl_khr_fp16
-
-/**
- * x - y if x > y, +0 if x is less than or equal to y.
- */
-float __ovld __cnfn fdim(float x, float y);
-float2 __ovld __cnfn fdim(float2 x, float2 y);
-float3 __ovld __cnfn fdim(float3 x, float3 y);
-float4 __ovld __cnfn fdim(float4 x, float4 y);
-float8 __ovld __cnfn fdim(float8 x, float8 y);
-float16 __ovld __cnfn fdim(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fdim(double x, double y);
-double2 __ovld __cnfn fdim(double2 x, double2 y);
-double3 __ovld __cnfn fdim(double3 x, double3 y);
-double4 __ovld __cnfn fdim(double4 x, double4 y);
-double8 __ovld __cnfn fdim(double8 x, double8 y);
-double16 __ovld __cnfn fdim(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fdim(half x, half y);
-half2 __ovld __cnfn fdim(half2 x, half2 y);
-half3 __ovld __cnfn fdim(half3 x, half3 y);
-half4 __ovld __cnfn fdim(half4 x, half4 y);
-half8 __ovld __cnfn fdim(half8 x, half8 y);
-half16 __ovld __cnfn fdim(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Round to integral value using the round to -ve
- * infinity rounding mode.
- */
-float __ovld __cnfn floor(float);
-float2 __ovld __cnfn floor(float2);
-float3 __ovld __cnfn floor(float3);
-float4 __ovld __cnfn floor(float4);
-float8 __ovld __cnfn floor(float8);
-float16 __ovld __cnfn floor(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn floor(double);
-double2 __ovld __cnfn floor(double2);
-double3 __ovld __cnfn floor(double3);
-double4 __ovld __cnfn floor(double4);
-double8 __ovld __cnfn floor(double8);
-double16 __ovld __cnfn floor(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn floor(half);
-half2 __ovld __cnfn floor(half2);
-half3 __ovld __cnfn floor(half3);
-half4 __ovld __cnfn floor(half4);
-half8 __ovld __cnfn floor(half8);
-half16 __ovld __cnfn floor(half16);
-#endif //cl_khr_fp16
-
-/**
- * Returns the correctly rounded floating-point
- * representation of the sum of c with the infinitely
- * precise product of a and b. Rounding of
- * intermediate products shall not occur. Edge case
- * behavior is per the IEEE 754-2008 standard.
- */
-float __ovld __cnfn fma(float a, float b, float c);
-float2 __ovld __cnfn fma(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn fma(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn fma(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn fma(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn fma(float16 a, float16 b, float16 c);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fma(double a, double b, double c);
-double2 __ovld __cnfn fma(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn fma(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn fma(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn fma(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn fma(double16 a, double16 b, double16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fma(half a, half b, half c);
-half2 __ovld __cnfn fma(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn fma(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn fma(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn fma(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn fma(half16 a, half16 b, half16 c);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if x < y, otherwise it returns x. If one
- * argument is a NaN, fmax() returns the other
- * argument. If both arguments are NaNs, fmax()
- * returns a NaN.
- */
-float __ovld __cnfn fmax(float x, float y);
-float2 __ovld __cnfn fmax(float2 x, float2 y);
-float3 __ovld __cnfn fmax(float3 x, float3 y);
-float4 __ovld __cnfn fmax(float4 x, float4 y);
-float8 __ovld __cnfn fmax(float8 x, float8 y);
-float16 __ovld __cnfn fmax(float16 x, float16 y);
-float2 __ovld __cnfn fmax(float2 x, float y);
-float3 __ovld __cnfn fmax(float3 x, float y);
-float4 __ovld __cnfn fmax(float4 x, float y);
-float8 __ovld __cnfn fmax(float8 x, float y);
-float16 __ovld __cnfn fmax(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fmax(double x, double y);
-double2 __ovld __cnfn fmax(double2 x, double2 y);
-double3 __ovld __cnfn fmax(double3 x, double3 y);
-double4 __ovld __cnfn fmax(double4 x, double4 y);
-double8 __ovld __cnfn fmax(double8 x, double8 y);
-double16 __ovld __cnfn fmax(double16 x, double16 y);
-double2 __ovld __cnfn fmax(double2 x, double y);
-double3 __ovld __cnfn fmax(double3 x, double y);
-double4 __ovld __cnfn fmax(double4 x, double y);
-double8 __ovld __cnfn fmax(double8 x, double y);
-double16 __ovld __cnfn fmax(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fmax(half x, half y);
-half2 __ovld __cnfn fmax(half2 x, half2 y);
-half3 __ovld __cnfn fmax(half3 x, half3 y);
-half4 __ovld __cnfn fmax(half4 x, half4 y);
-half8 __ovld __cnfn fmax(half8 x, half8 y);
-half16 __ovld __cnfn fmax(half16 x, half16 y);
-half2 __ovld __cnfn fmax(half2 x, half y);
-half3 __ovld __cnfn fmax(half3 x, half y);
-half4 __ovld __cnfn fmax(half4 x, half y);
-half8 __ovld __cnfn fmax(half8 x, half y);
-half16 __ovld __cnfn fmax(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if y < x, otherwise it returns x. If one
- * argument is a NaN, fmin() returns the other
- * argument. If both arguments are NaNs, fmin()
- * returns a NaN.
- */
-float __ovld __cnfn fmin(float x, float y);
-float2 __ovld __cnfn fmin(float2 x, float2 y);
-float3 __ovld __cnfn fmin(float3 x, float3 y);
-float4 __ovld __cnfn fmin(float4 x, float4 y);
-float8 __ovld __cnfn fmin(float8 x, float8 y);
-float16 __ovld __cnfn fmin(float16 x, float16 y);
-float2 __ovld __cnfn fmin(float2 x, float y);
-float3 __ovld __cnfn fmin(float3 x, float y);
-float4 __ovld __cnfn fmin(float4 x, float y);
-float8 __ovld __cnfn fmin(float8 x, float y);
-float16 __ovld __cnfn fmin(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fmin(double x, double y);
-double2 __ovld __cnfn fmin(double2 x, double2 y);
-double3 __ovld __cnfn fmin(double3 x, double3 y);
-double4 __ovld __cnfn fmin(double4 x, double4 y);
-double8 __ovld __cnfn fmin(double8 x, double8 y);
-double16 __ovld __cnfn fmin(double16 x, double16 y);
-double2 __ovld __cnfn fmin(double2 x, double y);
-double3 __ovld __cnfn fmin(double3 x, double y);
-double4 __ovld __cnfn fmin(double4 x, double y);
-double8 __ovld __cnfn fmin(double8 x, double y);
-double16 __ovld __cnfn fmin(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fmin(half x, half y);
-half2 __ovld __cnfn fmin(half2 x, half2 y);
-half3 __ovld __cnfn fmin(half3 x, half3 y);
-half4 __ovld __cnfn fmin(half4 x, half4 y);
-half8 __ovld __cnfn fmin(half8 x, half8 y);
-half16 __ovld __cnfn fmin(half16 x, half16 y);
-half2 __ovld __cnfn fmin(half2 x, half y);
-half3 __ovld __cnfn fmin(half3 x, half y);
-half4 __ovld __cnfn fmin(half4 x, half y);
-half8 __ovld __cnfn fmin(half8 x, half y);
-half16 __ovld __cnfn fmin(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Modulus. Returns x - y * trunc (x/y).
- */
-float __ovld __cnfn fmod(float x, float y);
-float2 __ovld __cnfn fmod(float2 x, float2 y);
-float3 __ovld __cnfn fmod(float3 x, float3 y);
-float4 __ovld __cnfn fmod(float4 x, float4 y);
-float8 __ovld __cnfn fmod(float8 x, float8 y);
-float16 __ovld __cnfn fmod(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fmod(double x, double y);
-double2 __ovld __cnfn fmod(double2 x, double2 y);
-double3 __ovld __cnfn fmod(double3 x, double3 y);
-double4 __ovld __cnfn fmod(double4 x, double4 y);
-double8 __ovld __cnfn fmod(double8 x, double8 y);
-double16 __ovld __cnfn fmod(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fmod(half x, half y);
-half2 __ovld __cnfn fmod(half2 x, half2 y);
-half3 __ovld __cnfn fmod(half3 x, half3 y);
-half4 __ovld __cnfn fmod(half4 x, half4 y);
-half8 __ovld __cnfn fmod(half8 x, half8 y);
-half16 __ovld __cnfn fmod(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns fmin(x - floor (x), 0x1.fffffep-1f ).
- * floor(x) is returned in iptr.
- */
-#if defined(__opencl_c_generic_address_space)
-float __ovld fract(float x, float *iptr);
-float2 __ovld fract(float2 x, float2 *iptr);
-float3 __ovld fract(float3 x, float3 *iptr);
-float4 __ovld fract(float4 x, float4 *iptr);
-float8 __ovld fract(float8 x, float8 *iptr);
-float16 __ovld fract(float16 x, float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld fract(double x, double *iptr);
-double2 __ovld fract(double2 x, double2 *iptr);
-double3 __ovld fract(double3 x, double3 *iptr);
-double4 __ovld fract(double4 x, double4 *iptr);
-double8 __ovld fract(double8 x, double8 *iptr);
-double16 __ovld fract(double16 x, double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld fract(half x, half *iptr);
-half2 __ovld fract(half2 x, half2 *iptr);
-half3 __ovld fract(half3 x, half3 *iptr);
-half4 __ovld fract(half4 x, half4 *iptr);
-half8 __ovld fract(half8 x, half8 *iptr);
-half16 __ovld fract(half16 x, half16 *iptr);
-#endif //cl_khr_fp16
-#else
-float __ovld fract(float x, __global float *iptr);
-float2 __ovld fract(float2 x, __global float2 *iptr);
-float3 __ovld fract(float3 x, __global float3 *iptr);
-float4 __ovld fract(float4 x, __global float4 *iptr);
-float8 __ovld fract(float8 x, __global float8 *iptr);
-float16 __ovld fract(float16 x, __global float16 *iptr);
-float __ovld fract(float x, __local float *iptr);
-float2 __ovld fract(float2 x, __local float2 *iptr);
-float3 __ovld fract(float3 x, __local float3 *iptr);
-float4 __ovld fract(float4 x, __local float4 *iptr);
-float8 __ovld fract(float8 x, __local float8 *iptr);
-float16 __ovld fract(float16 x, __local float16 *iptr);
-float __ovld fract(float x, __private float *iptr);
-float2 __ovld fract(float2 x, __private float2 *iptr);
-float3 __ovld fract(float3 x, __private float3 *iptr);
-float4 __ovld fract(float4 x, __private float4 *iptr);
-float8 __ovld fract(float8 x, __private float8 *iptr);
-float16 __ovld fract(float16 x, __private float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld fract(double x, __global double *iptr);
-double2 __ovld fract(double2 x, __global double2 *iptr);
-double3 __ovld fract(double3 x, __global double3 *iptr);
-double4 __ovld fract(double4 x, __global double4 *iptr);
-double8 __ovld fract(double8 x, __global double8 *iptr);
-double16 __ovld fract(double16 x, __global double16 *iptr);
-double __ovld fract(double x, __local double *iptr);
-double2 __ovld fract(double2 x, __local double2 *iptr);
-double3 __ovld fract(double3 x, __local double3 *iptr);
-double4 __ovld fract(double4 x, __local double4 *iptr);
-double8 __ovld fract(double8 x, __local double8 *iptr);
-double16 __ovld fract(double16 x, __local double16 *iptr);
-double __ovld fract(double x, __private double *iptr);
-double2 __ovld fract(double2 x, __private double2 *iptr);
-double3 __ovld fract(double3 x, __private double3 *iptr);
-double4 __ovld fract(double4 x, __private double4 *iptr);
-double8 __ovld fract(double8 x, __private double8 *iptr);
-double16 __ovld fract(double16 x, __private double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld fract(half x, __global half *iptr);
-half2 __ovld fract(half2 x, __global half2 *iptr);
-half3 __ovld fract(half3 x, __global half3 *iptr);
-half4 __ovld fract(half4 x, __global half4 *iptr);
-half8 __ovld fract(half8 x, __global half8 *iptr);
-half16 __ovld fract(half16 x, __global half16 *iptr);
-half __ovld fract(half x, __local half *iptr);
-half2 __ovld fract(half2 x, __local half2 *iptr);
-half3 __ovld fract(half3 x, __local half3 *iptr);
-half4 __ovld fract(half4 x, __local half4 *iptr);
-half8 __ovld fract(half8 x, __local half8 *iptr);
-half16 __ovld fract(half16 x, __local half16 *iptr);
-half __ovld fract(half x, __private half *iptr);
-half2 __ovld fract(half2 x, __private half2 *iptr);
-half3 __ovld fract(half3 x, __private half3 *iptr);
-half4 __ovld fract(half4 x, __private half4 *iptr);
-half8 __ovld fract(half8 x, __private half8 *iptr);
-half16 __ovld fract(half16 x, __private half16 *iptr);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Extract mantissa and exponent from x. For each
- * component the mantissa returned is a float with
- * magnitude in the interval [1/2, 1) or 0. Each
- * component of x equals mantissa returned * 2^exp.
- */
-#if defined(__opencl_c_generic_address_space)
-float __ovld frexp(float x, int *exp);
-float2 __ovld frexp(float2 x, int2 *exp);
-float3 __ovld frexp(float3 x, int3 *exp);
-float4 __ovld frexp(float4 x, int4 *exp);
-float8 __ovld frexp(float8 x, int8 *exp);
-float16 __ovld frexp(float16 x, int16 *exp);
-#ifdef cl_khr_fp64
-double __ovld frexp(double x, int *exp);
-double2 __ovld frexp(double2 x, int2 *exp);
-double3 __ovld frexp(double3 x, int3 *exp);
-double4 __ovld frexp(double4 x, int4 *exp);
-double8 __ovld frexp(double8 x, int8 *exp);
-double16 __ovld frexp(double16 x, int16 *exp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld frexp(half x, int *exp);
-half2 __ovld frexp(half2 x, int2 *exp);
-half3 __ovld frexp(half3 x, int3 *exp);
-half4 __ovld frexp(half4 x, int4 *exp);
-half8 __ovld frexp(half8 x, int8 *exp);
-half16 __ovld frexp(half16 x, int16 *exp);
-#endif //cl_khr_fp16
-#else
-float __ovld frexp(float x, __global int *exp);
-float2 __ovld frexp(float2 x, __global int2 *exp);
-float3 __ovld frexp(float3 x, __global int3 *exp);
-float4 __ovld frexp(float4 x, __global int4 *exp);
-float8 __ovld frexp(float8 x, __global int8 *exp);
-float16 __ovld frexp(float16 x, __global int16 *exp);
-float __ovld frexp(float x, __local int *exp);
-float2 __ovld frexp(float2 x, __local int2 *exp);
-float3 __ovld frexp(float3 x, __local int3 *exp);
-float4 __ovld frexp(float4 x, __local int4 *exp);
-float8 __ovld frexp(float8 x, __local int8 *exp);
-float16 __ovld frexp(float16 x, __local int16 *exp);
-float __ovld frexp(float x, __private int *exp);
-float2 __ovld frexp(float2 x, __private int2 *exp);
-float3 __ovld frexp(float3 x, __private int3 *exp);
-float4 __ovld frexp(float4 x, __private int4 *exp);
-float8 __ovld frexp(float8 x, __private int8 *exp);
-float16 __ovld frexp(float16 x, __private int16 *exp);
-#ifdef cl_khr_fp64
-double __ovld frexp(double x, __global int *exp);
-double2 __ovld frexp(double2 x, __global int2 *exp);
-double3 __ovld frexp(double3 x, __global int3 *exp);
-double4 __ovld frexp(double4 x, __global int4 *exp);
-double8 __ovld frexp(double8 x, __global int8 *exp);
-double16 __ovld frexp(double16 x, __global int16 *exp);
-double __ovld frexp(double x, __local int *exp);
-double2 __ovld frexp(double2 x, __local int2 *exp);
-double3 __ovld frexp(double3 x, __local int3 *exp);
-double4 __ovld frexp(double4 x, __local int4 *exp);
-double8 __ovld frexp(double8 x, __local int8 *exp);
-double16 __ovld frexp(double16 x, __local int16 *exp);
-double __ovld frexp(double x, __private int *exp);
-double2 __ovld frexp(double2 x, __private int2 *exp);
-double3 __ovld frexp(double3 x, __private int3 *exp);
-double4 __ovld frexp(double4 x, __private int4 *exp);
-double8 __ovld frexp(double8 x, __private int8 *exp);
-double16 __ovld frexp(double16 x, __private int16 *exp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld frexp(half x, __global int *exp);
-half2 __ovld frexp(half2 x, __global int2 *exp);
-half3 __ovld frexp(half3 x, __global int3 *exp);
-half4 __ovld frexp(half4 x, __global int4 *exp);
-half8 __ovld frexp(half8 x, __global int8 *exp);
-half16 __ovld frexp(half16 x, __global int16 *exp);
-half __ovld frexp(half x, __local int *exp);
-half2 __ovld frexp(half2 x, __local int2 *exp);
-half3 __ovld frexp(half3 x, __local int3 *exp);
-half4 __ovld frexp(half4 x, __local int4 *exp);
-half8 __ovld frexp(half8 x, __local int8 *exp);
-half16 __ovld frexp(half16 x, __local int16 *exp);
-half __ovld frexp(half x, __private int *exp);
-half2 __ovld frexp(half2 x, __private int2 *exp);
-half3 __ovld frexp(half3 x, __private int3 *exp);
-half4 __ovld frexp(half4 x, __private int4 *exp);
-half8 __ovld frexp(half8 x, __private int8 *exp);
-half16 __ovld frexp(half16 x, __private int16 *exp);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Compute the value of the square root of x^2 + y^2
- * without undue overflow or underflow.
- */
-float __ovld __cnfn hypot(float x, float y);
-float2 __ovld __cnfn hypot(float2 x, float2 y);
-float3 __ovld __cnfn hypot(float3 x, float3 y);
-float4 __ovld __cnfn hypot(float4 x, float4 y);
-float8 __ovld __cnfn hypot(float8 x, float8 y);
-float16 __ovld __cnfn hypot(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn hypot(double x, double y);
-double2 __ovld __cnfn hypot(double2 x, double2 y);
-double3 __ovld __cnfn hypot(double3 x, double3 y);
-double4 __ovld __cnfn hypot(double4 x, double4 y);
-double8 __ovld __cnfn hypot(double8 x, double8 y);
-double16 __ovld __cnfn hypot(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn hypot(half x, half y);
-half2 __ovld __cnfn hypot(half2 x, half2 y);
-half3 __ovld __cnfn hypot(half3 x, half3 y);
-half4 __ovld __cnfn hypot(half4 x, half4 y);
-half8 __ovld __cnfn hypot(half8 x, half8 y);
-half16 __ovld __cnfn hypot(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Return the exponent as an integer value.
- */
-int __ovld __cnfn ilogb(float x);
-int2 __ovld __cnfn ilogb(float2 x);
-int3 __ovld __cnfn ilogb(float3 x);
-int4 __ovld __cnfn ilogb(float4 x);
-int8 __ovld __cnfn ilogb(float8 x);
-int16 __ovld __cnfn ilogb(float16 x);
-#ifdef cl_khr_fp64
-int __ovld __cnfn ilogb(double x);
-int2 __ovld __cnfn ilogb(double2 x);
-int3 __ovld __cnfn ilogb(double3 x);
-int4 __ovld __cnfn ilogb(double4 x);
-int8 __ovld __cnfn ilogb(double8 x);
-int16 __ovld __cnfn ilogb(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn ilogb(half x);
-int2 __ovld __cnfn ilogb(half2 x);
-int3 __ovld __cnfn ilogb(half3 x);
-int4 __ovld __cnfn ilogb(half4 x);
-int8 __ovld __cnfn ilogb(half8 x);
-int16 __ovld __cnfn ilogb(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Multiply x by 2 to the power n.
- */
-float __ovld __cnfn ldexp(float x, int n);
-float2 __ovld __cnfn ldexp(float2 x, int2 n);
-float3 __ovld __cnfn ldexp(float3 x, int3 n);
-float4 __ovld __cnfn ldexp(float4 x, int4 n);
-float8 __ovld __cnfn ldexp(float8 x, int8 n);
-float16 __ovld __cnfn ldexp(float16 x, int16 n);
-float2 __ovld __cnfn ldexp(float2 x, int n);
-float3 __ovld __cnfn ldexp(float3 x, int n);
-float4 __ovld __cnfn ldexp(float4 x, int n);
-float8 __ovld __cnfn ldexp(float8 x, int n);
-float16 __ovld __cnfn ldexp(float16 x, int n);
-#ifdef cl_khr_fp64
-double __ovld __cnfn ldexp(double x, int n);
-double2 __ovld __cnfn ldexp(double2 x, int2 n);
-double3 __ovld __cnfn ldexp(double3 x, int3 n);
-double4 __ovld __cnfn ldexp(double4 x, int4 n);
-double8 __ovld __cnfn ldexp(double8 x, int8 n);
-double16 __ovld __cnfn ldexp(double16 x, int16 n);
-double2 __ovld __cnfn ldexp(double2 x, int n);
-double3 __ovld __cnfn ldexp(double3 x, int n);
-double4 __ovld __cnfn ldexp(double4 x, int n);
-double8 __ovld __cnfn ldexp(double8 x, int n);
-double16 __ovld __cnfn ldexp(double16 x, int n);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn ldexp(half x, int n);
-half2 __ovld __cnfn ldexp(half2 x, int2 n);
-half3 __ovld __cnfn ldexp(half3 x, int3 n);
-half4 __ovld __cnfn ldexp(half4 x, int4 n);
-half8 __ovld __cnfn ldexp(half8 x, int8 n);
-half16 __ovld __cnfn ldexp(half16 x, int16 n);
-half2 __ovld __cnfn ldexp(half2 x, int n);
-half3 __ovld __cnfn ldexp(half3 x, int n);
-half4 __ovld __cnfn ldexp(half4 x, int n);
-half8 __ovld __cnfn ldexp(half8 x, int n);
-half16 __ovld __cnfn ldexp(half16 x, int n);
-#endif //cl_khr_fp16
-
-/**
- * Log gamma function. Returns the natural
- * logarithm of the absolute value of the gamma
- * function. The sign of the gamma function is
- * returned in the signp argument of lgamma_r.
- */
-float __ovld __cnfn lgamma(float x);
-float2 __ovld __cnfn lgamma(float2 x);
-float3 __ovld __cnfn lgamma(float3 x);
-float4 __ovld __cnfn lgamma(float4 x);
-float8 __ovld __cnfn lgamma(float8 x);
-float16 __ovld __cnfn lgamma(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn lgamma(double x);
-double2 __ovld __cnfn lgamma(double2 x);
-double3 __ovld __cnfn lgamma(double3 x);
-double4 __ovld __cnfn lgamma(double4 x);
-double8 __ovld __cnfn lgamma(double8 x);
-double16 __ovld __cnfn lgamma(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn lgamma(half x);
-half2 __ovld __cnfn lgamma(half2 x);
-half3 __ovld __cnfn lgamma(half3 x);
-half4 __ovld __cnfn lgamma(half4 x);
-half8 __ovld __cnfn lgamma(half8 x);
-half16 __ovld __cnfn lgamma(half16 x);
-#endif //cl_khr_fp16
-
-#if defined(__opencl_c_generic_address_space)
-float __ovld lgamma_r(float x, int *signp);
-float2 __ovld lgamma_r(float2 x, int2 *signp);
-float3 __ovld lgamma_r(float3 x, int3 *signp);
-float4 __ovld lgamma_r(float4 x, int4 *signp);
-float8 __ovld lgamma_r(float8 x, int8 *signp);
-float16 __ovld lgamma_r(float16 x, int16 *signp);
-#ifdef cl_khr_fp64
-double __ovld lgamma_r(double x, int *signp);
-double2 __ovld lgamma_r(double2 x, int2 *signp);
-double3 __ovld lgamma_r(double3 x, int3 *signp);
-double4 __ovld lgamma_r(double4 x, int4 *signp);
-double8 __ovld lgamma_r(double8 x, int8 *signp);
-double16 __ovld lgamma_r(double16 x, int16 *signp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld lgamma_r(half x, int *signp);
-half2 __ovld lgamma_r(half2 x, int2 *signp);
-half3 __ovld lgamma_r(half3 x, int3 *signp);
-half4 __ovld lgamma_r(half4 x, int4 *signp);
-half8 __ovld lgamma_r(half8 x, int8 *signp);
-half16 __ovld lgamma_r(half16 x, int16 *signp);
-#endif //cl_khr_fp16
-#else
-float __ovld lgamma_r(float x, __global int *signp);
-float2 __ovld lgamma_r(float2 x, __global int2 *signp);
-float3 __ovld lgamma_r(float3 x, __global int3 *signp);
-float4 __ovld lgamma_r(float4 x, __global int4 *signp);
-float8 __ovld lgamma_r(float8 x, __global int8 *signp);
-float16 __ovld lgamma_r(float16 x, __global int16 *signp);
-float __ovld lgamma_r(float x, __local int *signp);
-float2 __ovld lgamma_r(float2 x, __local int2 *signp);
-float3 __ovld lgamma_r(float3 x, __local int3 *signp);
-float4 __ovld lgamma_r(float4 x, __local int4 *signp);
-float8 __ovld lgamma_r(float8 x, __local int8 *signp);
-float16 __ovld lgamma_r(float16 x, __local int16 *signp);
-float __ovld lgamma_r(float x, __private int *signp);
-float2 __ovld lgamma_r(float2 x, __private int2 *signp);
-float3 __ovld lgamma_r(float3 x, __private int3 *signp);
-float4 __ovld lgamma_r(float4 x, __private int4 *signp);
-float8 __ovld lgamma_r(float8 x, __private int8 *signp);
-float16 __ovld lgamma_r(float16 x, __private int16 *signp);
-#ifdef cl_khr_fp64
-double __ovld lgamma_r(double x, __global int *signp);
-double2 __ovld lgamma_r(double2 x, __global int2 *signp);
-double3 __ovld lgamma_r(double3 x, __global int3 *signp);
-double4 __ovld lgamma_r(double4 x, __global int4 *signp);
-double8 __ovld lgamma_r(double8 x, __global int8 *signp);
-double16 __ovld lgamma_r(double16 x, __global int16 *signp);
-double __ovld lgamma_r(double x, __local int *signp);
-double2 __ovld lgamma_r(double2 x, __local int2 *signp);
-double3 __ovld lgamma_r(double3 x, __local int3 *signp);
-double4 __ovld lgamma_r(double4 x, __local int4 *signp);
-double8 __ovld lgamma_r(double8 x, __local int8 *signp);
-double16 __ovld lgamma_r(double16 x, __local int16 *signp);
-double __ovld lgamma_r(double x, __private int *signp);
-double2 __ovld lgamma_r(double2 x, __private int2 *signp);
-double3 __ovld lgamma_r(double3 x, __private int3 *signp);
-double4 __ovld lgamma_r(double4 x, __private int4 *signp);
-double8 __ovld lgamma_r(double8 x, __private int8 *signp);
-double16 __ovld lgamma_r(double16 x, __private int16 *signp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld lgamma_r(half x, __global int *signp);
-half2 __ovld lgamma_r(half2 x, __global int2 *signp);
-half3 __ovld lgamma_r(half3 x, __global int3 *signp);
-half4 __ovld lgamma_r(half4 x, __global int4 *signp);
-half8 __ovld lgamma_r(half8 x, __global int8 *signp);
-half16 __ovld lgamma_r(half16 x, __global int16 *signp);
-half __ovld lgamma_r(half x, __local int *signp);
-half2 __ovld lgamma_r(half2 x, __local int2 *signp);
-half3 __ovld lgamma_r(half3 x, __local int3 *signp);
-half4 __ovld lgamma_r(half4 x, __local int4 *signp);
-half8 __ovld lgamma_r(half8 x, __local int8 *signp);
-half16 __ovld lgamma_r(half16 x, __local int16 *signp);
-half __ovld lgamma_r(half x, __private int *signp);
-half2 __ovld lgamma_r(half2 x, __private int2 *signp);
-half3 __ovld lgamma_r(half3 x, __private int3 *signp);
-half4 __ovld lgamma_r(half4 x, __private int4 *signp);
-half8 __ovld lgamma_r(half8 x, __private int8 *signp);
-half16 __ovld lgamma_r(half16 x, __private int16 *signp);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Compute natural logarithm.
- */
-float __ovld __cnfn log(float);
-float2 __ovld __cnfn log(float2);
-float3 __ovld __cnfn log(float3);
-float4 __ovld __cnfn log(float4);
-float8 __ovld __cnfn log(float8);
-float16 __ovld __cnfn log(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log(double);
-double2 __ovld __cnfn log(double2);
-double3 __ovld __cnfn log(double3);
-double4 __ovld __cnfn log(double4);
-double8 __ovld __cnfn log(double8);
-double16 __ovld __cnfn log(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log(half);
-half2 __ovld __cnfn log(half2);
-half3 __ovld __cnfn log(half3);
-half4 __ovld __cnfn log(half4);
-half8 __ovld __cnfn log(half8);
-half16 __ovld __cnfn log(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute a base 2 logarithm.
- */
-float __ovld __cnfn log2(float);
-float2 __ovld __cnfn log2(float2);
-float3 __ovld __cnfn log2(float3);
-float4 __ovld __cnfn log2(float4);
-float8 __ovld __cnfn log2(float8);
-float16 __ovld __cnfn log2(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log2(double);
-double2 __ovld __cnfn log2(double2);
-double3 __ovld __cnfn log2(double3);
-double4 __ovld __cnfn log2(double4);
-double8 __ovld __cnfn log2(double8);
-double16 __ovld __cnfn log2(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log2(half);
-half2 __ovld __cnfn log2(half2);
-half3 __ovld __cnfn log2(half3);
-half4 __ovld __cnfn log2(half4);
-half8 __ovld __cnfn log2(half8);
-half16 __ovld __cnfn log2(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute a base 10 logarithm.
- */
-float __ovld __cnfn log10(float);
-float2 __ovld __cnfn log10(float2);
-float3 __ovld __cnfn log10(float3);
-float4 __ovld __cnfn log10(float4);
-float8 __ovld __cnfn log10(float8);
-float16 __ovld __cnfn log10(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log10(double);
-double2 __ovld __cnfn log10(double2);
-double3 __ovld __cnfn log10(double3);
-double4 __ovld __cnfn log10(double4);
-double8 __ovld __cnfn log10(double8);
-double16 __ovld __cnfn log10(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log10(half);
-half2 __ovld __cnfn log10(half2);
-half3 __ovld __cnfn log10(half3);
-half4 __ovld __cnfn log10(half4);
-half8 __ovld __cnfn log10(half8);
-half16 __ovld __cnfn log10(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute a base e logarithm of (1.0 + x).
- */
-float __ovld __cnfn log1p(float x);
-float2 __ovld __cnfn log1p(float2 x);
-float3 __ovld __cnfn log1p(float3 x);
-float4 __ovld __cnfn log1p(float4 x);
-float8 __ovld __cnfn log1p(float8 x);
-float16 __ovld __cnfn log1p(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log1p(double x);
-double2 __ovld __cnfn log1p(double2 x);
-double3 __ovld __cnfn log1p(double3 x);
-double4 __ovld __cnfn log1p(double4 x);
-double8 __ovld __cnfn log1p(double8 x);
-double16 __ovld __cnfn log1p(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log1p(half x);
-half2 __ovld __cnfn log1p(half2 x);
-half3 __ovld __cnfn log1p(half3 x);
-half4 __ovld __cnfn log1p(half4 x);
-half8 __ovld __cnfn log1p(half8 x);
-half16 __ovld __cnfn log1p(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute the exponent of x, which is the integral
- * part of logr | x |.
- */
-float __ovld __cnfn logb(float x);
-float2 __ovld __cnfn logb(float2 x);
-float3 __ovld __cnfn logb(float3 x);
-float4 __ovld __cnfn logb(float4 x);
-float8 __ovld __cnfn logb(float8 x);
-float16 __ovld __cnfn logb(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn logb(double x);
-double2 __ovld __cnfn logb(double2 x);
-double3 __ovld __cnfn logb(double3 x);
-double4 __ovld __cnfn logb(double4 x);
-double8 __ovld __cnfn logb(double8 x);
-double16 __ovld __cnfn logb(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn logb(half x);
-half2 __ovld __cnfn logb(half2 x);
-half3 __ovld __cnfn logb(half3 x);
-half4 __ovld __cnfn logb(half4 x);
-half8 __ovld __cnfn logb(half8 x);
-half16 __ovld __cnfn logb(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * mad approximates a * b + c. Whether or how the
- * product of a * b is rounded and how supernormal or
- * subnormal intermediate products are handled is not
- * defined. mad is intended to be used where speed is
- * preferred over accuracy.
- */
-float __ovld __cnfn mad(float a, float b, float c);
-float2 __ovld __cnfn mad(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn mad(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn mad(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn mad(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn mad(float16 a, float16 b, float16 c);
-#ifdef cl_khr_fp64
-double __ovld __cnfn mad(double a, double b, double c);
-double2 __ovld __cnfn mad(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn mad(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn mad(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn mad(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn mad(double16 a, double16 b, double16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn mad(half a, half b, half c);
-half2 __ovld __cnfn mad(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn mad(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn mad(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn mad(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn mad(half16 a, half16 b, half16 c);
-#endif //cl_khr_fp16
-
-/**
- * Returns x if | x | > | y |, y if | y | > | x |, otherwise
- * fmax(x, y).
- */
-float __ovld __cnfn maxmag(float x, float y);
-float2 __ovld __cnfn maxmag(float2 x, float2 y);
-float3 __ovld __cnfn maxmag(float3 x, float3 y);
-float4 __ovld __cnfn maxmag(float4 x, float4 y);
-float8 __ovld __cnfn maxmag(float8 x, float8 y);
-float16 __ovld __cnfn maxmag(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn maxmag(double x, double y);
-double2 __ovld __cnfn maxmag(double2 x, double2 y);
-double3 __ovld __cnfn maxmag(double3 x, double3 y);
-double4 __ovld __cnfn maxmag(double4 x, double4 y);
-double8 __ovld __cnfn maxmag(double8 x, double8 y);
-double16 __ovld __cnfn maxmag(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn maxmag(half x, half y);
-half2 __ovld __cnfn maxmag(half2 x, half2 y);
-half3 __ovld __cnfn maxmag(half3 x, half3 y);
-half4 __ovld __cnfn maxmag(half4 x, half4 y);
-half8 __ovld __cnfn maxmag(half8 x, half8 y);
-half16 __ovld __cnfn maxmag(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns x if | x | < | y |, y if | y | < | x |, otherwise
- * fmin(x, y).
- */
-float __ovld __cnfn minmag(float x, float y);
-float2 __ovld __cnfn minmag(float2 x, float2 y);
-float3 __ovld __cnfn minmag(float3 x, float3 y);
-float4 __ovld __cnfn minmag(float4 x, float4 y);
-float8 __ovld __cnfn minmag(float8 x, float8 y);
-float16 __ovld __cnfn minmag(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn minmag(double x, double y);
-double2 __ovld __cnfn minmag(double2 x, double2 y);
-double3 __ovld __cnfn minmag(double3 x, double3 y);
-double4 __ovld __cnfn minmag(double4 x, double4 y);
-double8 __ovld __cnfn minmag(double8 x, double8 y);
-double16 __ovld __cnfn minmag(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn minmag(half x, half y);
-half2 __ovld __cnfn minmag(half2 x, half2 y);
-half3 __ovld __cnfn minmag(half3 x, half3 y);
-half4 __ovld __cnfn minmag(half4 x, half4 y);
-half8 __ovld __cnfn minmag(half8 x, half8 y);
-half16 __ovld __cnfn minmag(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Decompose a floating-point number. The modf
- * function breaks the argument x into integral and
- * fractional parts, each of which has the same sign as
- * the argument. It stores the integral part in the object
- * pointed to by iptr.
- */
-#if defined(__opencl_c_generic_address_space)
-float __ovld modf(float x, float *iptr);
-float2 __ovld modf(float2 x, float2 *iptr);
-float3 __ovld modf(float3 x, float3 *iptr);
-float4 __ovld modf(float4 x, float4 *iptr);
-float8 __ovld modf(float8 x, float8 *iptr);
-float16 __ovld modf(float16 x, float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld modf(double x, double *iptr);
-double2 __ovld modf(double2 x, double2 *iptr);
-double3 __ovld modf(double3 x, double3 *iptr);
-double4 __ovld modf(double4 x, double4 *iptr);
-double8 __ovld modf(double8 x, double8 *iptr);
-double16 __ovld modf(double16 x, double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld modf(half x, half *iptr);
-half2 __ovld modf(half2 x, half2 *iptr);
-half3 __ovld modf(half3 x, half3 *iptr);
-half4 __ovld modf(half4 x, half4 *iptr);
-half8 __ovld modf(half8 x, half8 *iptr);
-half16 __ovld modf(half16 x, half16 *iptr);
-#endif //cl_khr_fp16
-#else
-float __ovld modf(float x, __global float *iptr);
-float2 __ovld modf(float2 x, __global float2 *iptr);
-float3 __ovld modf(float3 x, __global float3 *iptr);
-float4 __ovld modf(float4 x, __global float4 *iptr);
-float8 __ovld modf(float8 x, __global float8 *iptr);
-float16 __ovld modf(float16 x, __global float16 *iptr);
-float __ovld modf(float x, __local float *iptr);
-float2 __ovld modf(float2 x, __local float2 *iptr);
-float3 __ovld modf(float3 x, __local float3 *iptr);
-float4 __ovld modf(float4 x, __local float4 *iptr);
-float8 __ovld modf(float8 x, __local float8 *iptr);
-float16 __ovld modf(float16 x, __local float16 *iptr);
-float __ovld modf(float x, __private float *iptr);
-float2 __ovld modf(float2 x, __private float2 *iptr);
-float3 __ovld modf(float3 x, __private float3 *iptr);
-float4 __ovld modf(float4 x, __private float4 *iptr);
-float8 __ovld modf(float8 x, __private float8 *iptr);
-float16 __ovld modf(float16 x, __private float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld modf(double x, __global double *iptr);
-double2 __ovld modf(double2 x, __global double2 *iptr);
-double3 __ovld modf(double3 x, __global double3 *iptr);
-double4 __ovld modf(double4 x, __global double4 *iptr);
-double8 __ovld modf(double8 x, __global double8 *iptr);
-double16 __ovld modf(double16 x, __global double16 *iptr);
-double __ovld modf(double x, __local double *iptr);
-double2 __ovld modf(double2 x, __local double2 *iptr);
-double3 __ovld modf(double3 x, __local double3 *iptr);
-double4 __ovld modf(double4 x, __local double4 *iptr);
-double8 __ovld modf(double8 x, __local double8 *iptr);
-double16 __ovld modf(double16 x, __local double16 *iptr);
-double __ovld modf(double x, __private double *iptr);
-double2 __ovld modf(double2 x, __private double2 *iptr);
-double3 __ovld modf(double3 x, __private double3 *iptr);
-double4 __ovld modf(double4 x, __private double4 *iptr);
-double8 __ovld modf(double8 x, __private double8 *iptr);
-double16 __ovld modf(double16 x, __private double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld modf(half x, __global half *iptr);
-half2 __ovld modf(half2 x, __global half2 *iptr);
-half3 __ovld modf(half3 x, __global half3 *iptr);
-half4 __ovld modf(half4 x, __global half4 *iptr);
-half8 __ovld modf(half8 x, __global half8 *iptr);
-half16 __ovld modf(half16 x, __global half16 *iptr);
-half __ovld modf(half x, __local half *iptr);
-half2 __ovld modf(half2 x, __local half2 *iptr);
-half3 __ovld modf(half3 x, __local half3 *iptr);
-half4 __ovld modf(half4 x, __local half4 *iptr);
-half8 __ovld modf(half8 x, __local half8 *iptr);
-half16 __ovld modf(half16 x, __local half16 *iptr);
-half __ovld modf(half x, __private half *iptr);
-half2 __ovld modf(half2 x, __private half2 *iptr);
-half3 __ovld modf(half3 x, __private half3 *iptr);
-half4 __ovld modf(half4 x, __private half4 *iptr);
-half8 __ovld modf(half8 x, __private half8 *iptr);
-half16 __ovld modf(half16 x, __private half16 *iptr);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Returns a quiet NaN. The nancode may be placed
- * in the significand of the resulting NaN.
- */
-float __ovld __cnfn nan(uint nancode);
-float2 __ovld __cnfn nan(uint2 nancode);
-float3 __ovld __cnfn nan(uint3 nancode);
-float4 __ovld __cnfn nan(uint4 nancode);
-float8 __ovld __cnfn nan(uint8 nancode);
-float16 __ovld __cnfn nan(uint16 nancode);
-#ifdef cl_khr_fp64
-double __ovld __cnfn nan(ulong nancode);
-double2 __ovld __cnfn nan(ulong2 nancode);
-double3 __ovld __cnfn nan(ulong3 nancode);
-double4 __ovld __cnfn nan(ulong4 nancode);
-double8 __ovld __cnfn nan(ulong8 nancode);
-double16 __ovld __cnfn nan(ulong16 nancode);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn nan(ushort nancode);
-half2 __ovld __cnfn nan(ushort2 nancode);
-half3 __ovld __cnfn nan(ushort3 nancode);
-half4 __ovld __cnfn nan(ushort4 nancode);
-half8 __ovld __cnfn nan(ushort8 nancode);
-half16 __ovld __cnfn nan(ushort16 nancode);
-#endif //cl_khr_fp16
-
-/**
- * Computes the next representable single-precision
- * floating-point value following x in the direction of
- * y. Thus, if y is less than x, nextafter() returns the
- * largest representable floating-point number less
- * than x.
- */
-float __ovld __cnfn nextafter(float x, float y);
-float2 __ovld __cnfn nextafter(float2 x, float2 y);
-float3 __ovld __cnfn nextafter(float3 x, float3 y);
-float4 __ovld __cnfn nextafter(float4 x, float4 y);
-float8 __ovld __cnfn nextafter(float8 x, float8 y);
-float16 __ovld __cnfn nextafter(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn nextafter(double x, double y);
-double2 __ovld __cnfn nextafter(double2 x, double2 y);
-double3 __ovld __cnfn nextafter(double3 x, double3 y);
-double4 __ovld __cnfn nextafter(double4 x, double4 y);
-double8 __ovld __cnfn nextafter(double8 x, double8 y);
-double16 __ovld __cnfn nextafter(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn nextafter(half x, half y);
-half2 __ovld __cnfn nextafter(half2 x, half2 y);
-half3 __ovld __cnfn nextafter(half3 x, half3 y);
-half4 __ovld __cnfn nextafter(half4 x, half4 y);
-half8 __ovld __cnfn nextafter(half8 x, half8 y);
-half16 __ovld __cnfn nextafter(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power y.
- */
-float __ovld __cnfn pow(float x, float y);
-float2 __ovld __cnfn pow(float2 x, float2 y);
-float3 __ovld __cnfn pow(float3 x, float3 y);
-float4 __ovld __cnfn pow(float4 x, float4 y);
-float8 __ovld __cnfn pow(float8 x, float8 y);
-float16 __ovld __cnfn pow(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn pow(double x, double y);
-double2 __ovld __cnfn pow(double2 x, double2 y);
-double3 __ovld __cnfn pow(double3 x, double3 y);
-double4 __ovld __cnfn pow(double4 x, double4 y);
-double8 __ovld __cnfn pow(double8 x, double8 y);
-double16 __ovld __cnfn pow(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn pow(half x, half y);
-half2 __ovld __cnfn pow(half2 x, half2 y);
-half3 __ovld __cnfn pow(half3 x, half3 y);
-half4 __ovld __cnfn pow(half4 x, half4 y);
-half8 __ovld __cnfn pow(half8 x, half8 y);
-half16 __ovld __cnfn pow(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power y, where y is an integer.
- */
-float __ovld __cnfn pown(float x, int y);
-float2 __ovld __cnfn pown(float2 x, int2 y);
-float3 __ovld __cnfn pown(float3 x, int3 y);
-float4 __ovld __cnfn pown(float4 x, int4 y);
-float8 __ovld __cnfn pown(float8 x, int8 y);
-float16 __ovld __cnfn pown(float16 x, int16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn pown(double x, int y);
-double2 __ovld __cnfn pown(double2 x, int2 y);
-double3 __ovld __cnfn pown(double3 x, int3 y);
-double4 __ovld __cnfn pown(double4 x, int4 y);
-double8 __ovld __cnfn pown(double8 x, int8 y);
-double16 __ovld __cnfn pown(double16 x, int16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn pown(half x, int y);
-half2 __ovld __cnfn pown(half2 x, int2 y);
-half3 __ovld __cnfn pown(half3 x, int3 y);
-half4 __ovld __cnfn pown(half4 x, int4 y);
-half8 __ovld __cnfn pown(half8 x, int8 y);
-half16 __ovld __cnfn pown(half16 x, int16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power y, where x is >= 0.
- */
-float __ovld __cnfn powr(float x, float y);
-float2 __ovld __cnfn powr(float2 x, float2 y);
-float3 __ovld __cnfn powr(float3 x, float3 y);
-float4 __ovld __cnfn powr(float4 x, float4 y);
-float8 __ovld __cnfn powr(float8 x, float8 y);
-float16 __ovld __cnfn powr(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn powr(double x, double y);
-double2 __ovld __cnfn powr(double2 x, double2 y);
-double3 __ovld __cnfn powr(double3 x, double3 y);
-double4 __ovld __cnfn powr(double4 x, double4 y);
-double8 __ovld __cnfn powr(double8 x, double8 y);
-double16 __ovld __cnfn powr(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn powr(half x, half y);
-half2 __ovld __cnfn powr(half2 x, half2 y);
-half3 __ovld __cnfn powr(half3 x, half3 y);
-half4 __ovld __cnfn powr(half4 x, half4 y);
-half8 __ovld __cnfn powr(half8 x, half8 y);
-half16 __ovld __cnfn powr(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute the value r such that r = x - n*y, where n
- * is the integer nearest the exact value of x/y. If there
- * are two integers closest to x/y, n shall be the even
- * one. If r is zero, it is given the same sign as x.
- */
-float __ovld __cnfn remainder(float x, float y);
-float2 __ovld __cnfn remainder(float2 x, float2 y);
-float3 __ovld __cnfn remainder(float3 x, float3 y);
-float4 __ovld __cnfn remainder(float4 x, float4 y);
-float8 __ovld __cnfn remainder(float8 x, float8 y);
-float16 __ovld __cnfn remainder(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn remainder(double x, double y);
-double2 __ovld __cnfn remainder(double2 x, double2 y);
-double3 __ovld __cnfn remainder(double3 x, double3 y);
-double4 __ovld __cnfn remainder(double4 x, double4 y);
-double8 __ovld __cnfn remainder(double8 x, double8 y);
-double16 __ovld __cnfn remainder(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn remainder(half x, half y);
-half2 __ovld __cnfn remainder(half2 x, half2 y);
-half3 __ovld __cnfn remainder(half3 x, half3 y);
-half4 __ovld __cnfn remainder(half4 x, half4 y);
-half8 __ovld __cnfn remainder(half8 x, half8 y);
-half16 __ovld __cnfn remainder(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * The remquo function computes the value r such
- * that r = x - n*y, where n is the integer nearest the
- * exact value of x/y. If there are two integers closest
- * to x/y, n shall be the even one. If r is zero, it is
- * given the same sign as x. This is the same value
- * that is returned by the remainder function.
- * remquo also calculates the lower seven bits of the
- * integral quotient x/y, and gives that value the same
- * sign as x/y. It stores this signed value in the object
- * pointed to by quo.
- */
-#if defined(__opencl_c_generic_address_space)
-float __ovld remquo(float x, float y, int *quo);
-float2 __ovld remquo(float2 x, float2 y, int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, int16 *quo);
-#ifdef cl_khr_fp64
-double __ovld remquo(double x, double y, int *quo);
-double2 __ovld remquo(double2 x, double2 y, int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, int16 *quo);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld remquo(half x, half y, int *quo);
-half2 __ovld remquo(half2 x, half2 y, int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, int16 *quo);
-
-#endif //cl_khr_fp16
-#else
-float __ovld remquo(float x, float y, __global int *quo);
-float2 __ovld remquo(float2 x, float2 y, __global int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __global int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __global int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __global int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __global int16 *quo);
-float __ovld remquo(float x, float y, __local int *quo);
-float2 __ovld remquo(float2 x, float2 y, __local int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __local int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __local int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __local int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __local int16 *quo);
-float __ovld remquo(float x, float y, __private int *quo);
-float2 __ovld remquo(float2 x, float2 y, __private int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __private int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __private int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __private int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __private int16 *quo);
-#ifdef cl_khr_fp64
-double __ovld remquo(double x, double y, __global int *quo);
-double2 __ovld remquo(double2 x, double2 y, __global int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __global int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __global int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __global int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __global int16 *quo);
-double __ovld remquo(double x, double y, __local int *quo);
-double2 __ovld remquo(double2 x, double2 y, __local int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __local int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __local int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __local int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __local int16 *quo);
-double __ovld remquo(double x, double y, __private int *quo);
-double2 __ovld remquo(double2 x, double2 y, __private int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __private int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __private int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __private int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __private int16 *quo);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld remquo(half x, half y, __global int *quo);
-half2 __ovld remquo(half2 x, half2 y, __global int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __global int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __global int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __global int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __global int16 *quo);
-half __ovld remquo(half x, half y, __local int *quo);
-half2 __ovld remquo(half2 x, half2 y, __local int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __local int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __local int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __local int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __local int16 *quo);
-half __ovld remquo(half x, half y, __private int *quo);
-half2 __ovld remquo(half2 x, half2 y, __private int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __private int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __private int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __private int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __private int16 *quo);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-/**
- * Round to integral value (using round to nearest
- * even rounding mode) in floating-point format.
- * Refer to section 7.1 for description of rounding
- * modes.
- */
-float __ovld __cnfn rint(float);
-float2 __ovld __cnfn rint(float2);
-float3 __ovld __cnfn rint(float3);
-float4 __ovld __cnfn rint(float4);
-float8 __ovld __cnfn rint(float8);
-float16 __ovld __cnfn rint(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn rint(double);
-double2 __ovld __cnfn rint(double2);
-double3 __ovld __cnfn rint(double3);
-double4 __ovld __cnfn rint(double4);
-double8 __ovld __cnfn rint(double8);
-double16 __ovld __cnfn rint(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn rint(half);
-half2 __ovld __cnfn rint(half2);
-half3 __ovld __cnfn rint(half3);
-half4 __ovld __cnfn rint(half4);
-half8 __ovld __cnfn rint(half8);
-half16 __ovld __cnfn rint(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power 1/y.
- */
-float __ovld __cnfn rootn(float x, int y);
-float2 __ovld __cnfn rootn(float2 x, int2 y);
-float3 __ovld __cnfn rootn(float3 x, int3 y);
-float4 __ovld __cnfn rootn(float4 x, int4 y);
-float8 __ovld __cnfn rootn(float8 x, int8 y);
-float16 __ovld __cnfn rootn(float16 x, int16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn rootn(double x, int y);
-double2 __ovld __cnfn rootn(double2 x, int2 y);
-double3 __ovld __cnfn rootn(double3 x, int3 y);
-double4 __ovld __cnfn rootn(double4 x, int4 y);
-double8 __ovld __cnfn rootn(double8 x, int8 y);
-double16 __ovld __cnfn rootn(double16 x, int16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn rootn(half x, int y);
-half2 __ovld __cnfn rootn(half2 x, int2 y);
-half3 __ovld __cnfn rootn(half3 x, int3 y);
-half4 __ovld __cnfn rootn(half4 x, int4 y);
-half8 __ovld __cnfn rootn(half8 x, int8 y);
-half16 __ovld __cnfn rootn(half16 x, int16 y);
-#endif //cl_khr_fp16
-
-/**
- * Return the integral value nearest to x rounding
- * halfway cases away from zero, regardless of the
- * current rounding direction.
- */
-float __ovld __cnfn round(float x);
-float2 __ovld __cnfn round(float2 x);
-float3 __ovld __cnfn round(float3 x);
-float4 __ovld __cnfn round(float4 x);
-float8 __ovld __cnfn round(float8 x);
-float16 __ovld __cnfn round(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn round(double x);
-double2 __ovld __cnfn round(double2 x);
-double3 __ovld __cnfn round(double3 x);
-double4 __ovld __cnfn round(double4 x);
-double8 __ovld __cnfn round(double8 x);
-double16 __ovld __cnfn round(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn round(half x);
-half2 __ovld __cnfn round(half2 x);
-half3 __ovld __cnfn round(half3 x);
-half4 __ovld __cnfn round(half4 x);
-half8 __ovld __cnfn round(half8 x);
-half16 __ovld __cnfn round(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute inverse square root.
- */
-float __ovld __cnfn rsqrt(float);
-float2 __ovld __cnfn rsqrt(float2);
-float3 __ovld __cnfn rsqrt(float3);
-float4 __ovld __cnfn rsqrt(float4);
-float8 __ovld __cnfn rsqrt(float8);
-float16 __ovld __cnfn rsqrt(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn rsqrt(double);
-double2 __ovld __cnfn rsqrt(double2);
-double3 __ovld __cnfn rsqrt(double3);
-double4 __ovld __cnfn rsqrt(double4);
-double8 __ovld __cnfn rsqrt(double8);
-double16 __ovld __cnfn rsqrt(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn rsqrt(half);
-half2 __ovld __cnfn rsqrt(half2);
-half3 __ovld __cnfn rsqrt(half3);
-half4 __ovld __cnfn rsqrt(half4);
-half8 __ovld __cnfn rsqrt(half8);
-half16 __ovld __cnfn rsqrt(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute sine.
- */
-float __ovld __cnfn sin(float);
-float2 __ovld __cnfn sin(float2);
-float3 __ovld __cnfn sin(float3);
-float4 __ovld __cnfn sin(float4);
-float8 __ovld __cnfn sin(float8);
-float16 __ovld __cnfn sin(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sin(double);
-double2 __ovld __cnfn sin(double2);
-double3 __ovld __cnfn sin(double3);
-double4 __ovld __cnfn sin(double4);
-double8 __ovld __cnfn sin(double8);
-double16 __ovld __cnfn sin(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sin(half);
-half2 __ovld __cnfn sin(half2);
-half3 __ovld __cnfn sin(half3);
-half4 __ovld __cnfn sin(half4);
-half8 __ovld __cnfn sin(half8);
-half16 __ovld __cnfn sin(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute sine and cosine of x. The computed sine
- * is the return value and computed cosine is returned
- * in cosval.
- */
-#if defined(__opencl_c_generic_address_space)
-float __ovld sincos(float x, float *cosval);
-float2 __ovld sincos(float2 x, float2 *cosval);
-float3 __ovld sincos(float3 x, float3 *cosval);
-float4 __ovld sincos(float4 x, float4 *cosval);
-float8 __ovld sincos(float8 x, float8 *cosval);
-float16 __ovld sincos(float16 x, float16 *cosval);
-#ifdef cl_khr_fp64
-double __ovld sincos(double x, double *cosval);
-double2 __ovld sincos(double2 x, double2 *cosval);
-double3 __ovld sincos(double3 x, double3 *cosval);
-double4 __ovld sincos(double4 x, double4 *cosval);
-double8 __ovld sincos(double8 x, double8 *cosval);
-double16 __ovld sincos(double16 x, double16 *cosval);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld sincos(half x, half *cosval);
-half2 __ovld sincos(half2 x, half2 *cosval);
-half3 __ovld sincos(half3 x, half3 *cosval);
-half4 __ovld sincos(half4 x, half4 *cosval);
-half8 __ovld sincos(half8 x, half8 *cosval);
-half16 __ovld sincos(half16 x, half16 *cosval);
-#endif //cl_khr_fp16
-#else
-float __ovld sincos(float x, __global float *cosval);
-float2 __ovld sincos(float2 x, __global float2 *cosval);
-float3 __ovld sincos(float3 x, __global float3 *cosval);
-float4 __ovld sincos(float4 x, __global float4 *cosval);
-float8 __ovld sincos(float8 x, __global float8 *cosval);
-float16 __ovld sincos(float16 x, __global float16 *cosval);
-float __ovld sincos(float x, __local float *cosval);
-float2 __ovld sincos(float2 x, __local float2 *cosval);
-float3 __ovld sincos(float3 x, __local float3 *cosval);
-float4 __ovld sincos(float4 x, __local float4 *cosval);
-float8 __ovld sincos(float8 x, __local float8 *cosval);
-float16 __ovld sincos(float16 x, __local float16 *cosval);
-float __ovld sincos(float x, __private float *cosval);
-float2 __ovld sincos(float2 x, __private float2 *cosval);
-float3 __ovld sincos(float3 x, __private float3 *cosval);
-float4 __ovld sincos(float4 x, __private float4 *cosval);
-float8 __ovld sincos(float8 x, __private float8 *cosval);
-float16 __ovld sincos(float16 x, __private float16 *cosval);
-#ifdef cl_khr_fp64
-double __ovld sincos(double x, __global double *cosval);
-double2 __ovld sincos(double2 x, __global double2 *cosval);
-double3 __ovld sincos(double3 x, __global double3 *cosval);
-double4 __ovld sincos(double4 x, __global double4 *cosval);
-double8 __ovld sincos(double8 x, __global double8 *cosval);
-double16 __ovld sincos(double16 x, __global double16 *cosval);
-double __ovld sincos(double x, __local double *cosval);
-double2 __ovld sincos(double2 x, __local double2 *cosval);
-double3 __ovld sincos(double3 x, __local double3 *cosval);
-double4 __ovld sincos(double4 x, __local double4 *cosval);
-double8 __ovld sincos(double8 x, __local double8 *cosval);
-double16 __ovld sincos(double16 x, __local double16 *cosval);
-double __ovld sincos(double x, __private double *cosval);
-double2 __ovld sincos(double2 x, __private double2 *cosval);
-double3 __ovld sincos(double3 x, __private double3 *cosval);
-double4 __ovld sincos(double4 x, __private double4 *cosval);
-double8 __ovld sincos(double8 x, __private double8 *cosval);
-double16 __ovld sincos(double16 x, __private double16 *cosval);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld sincos(half x, __global half *cosval);
-half2 __ovld sincos(half2 x, __global half2 *cosval);
-half3 __ovld sincos(half3 x, __global half3 *cosval);
-half4 __ovld sincos(half4 x, __global half4 *cosval);
-half8 __ovld sincos(half8 x, __global half8 *cosval);
-half16 __ovld sincos(half16 x, __global half16 *cosval);
-half __ovld sincos(half x, __local half *cosval);
-half2 __ovld sincos(half2 x, __local half2 *cosval);
-half3 __ovld sincos(half3 x, __local half3 *cosval);
-half4 __ovld sincos(half4 x, __local half4 *cosval);
-half8 __ovld sincos(half8 x, __local half8 *cosval);
-half16 __ovld sincos(half16 x, __local half16 *cosval);
-half __ovld sincos(half x, __private half *cosval);
-half2 __ovld sincos(half2 x, __private half2 *cosval);
-half3 __ovld sincos(half3 x, __private half3 *cosval);
-half4 __ovld sincos(half4 x, __private half4 *cosval);
-half8 __ovld sincos(half8 x, __private half8 *cosval);
-half16 __ovld sincos(half16 x, __private half16 *cosval);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Compute hyperbolic sine.
- */
-float __ovld __cnfn sinh(float);
-float2 __ovld __cnfn sinh(float2);
-float3 __ovld __cnfn sinh(float3);
-float4 __ovld __cnfn sinh(float4);
-float8 __ovld __cnfn sinh(float8);
-float16 __ovld __cnfn sinh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sinh(double);
-double2 __ovld __cnfn sinh(double2);
-double3 __ovld __cnfn sinh(double3);
-double4 __ovld __cnfn sinh(double4);
-double8 __ovld __cnfn sinh(double8);
-double16 __ovld __cnfn sinh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sinh(half);
-half2 __ovld __cnfn sinh(half2);
-half3 __ovld __cnfn sinh(half3);
-half4 __ovld __cnfn sinh(half4);
-half8 __ovld __cnfn sinh(half8);
-half16 __ovld __cnfn sinh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute sin (PI * x).
- */
-float __ovld __cnfn sinpi(float x);
-float2 __ovld __cnfn sinpi(float2 x);
-float3 __ovld __cnfn sinpi(float3 x);
-float4 __ovld __cnfn sinpi(float4 x);
-float8 __ovld __cnfn sinpi(float8 x);
-float16 __ovld __cnfn sinpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sinpi(double x);
-double2 __ovld __cnfn sinpi(double2 x);
-double3 __ovld __cnfn sinpi(double3 x);
-double4 __ovld __cnfn sinpi(double4 x);
-double8 __ovld __cnfn sinpi(double8 x);
-double16 __ovld __cnfn sinpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sinpi(half x);
-half2 __ovld __cnfn sinpi(half2 x);
-half3 __ovld __cnfn sinpi(half3 x);
-half4 __ovld __cnfn sinpi(half4 x);
-half8 __ovld __cnfn sinpi(half8 x);
-half16 __ovld __cnfn sinpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute square root.
- */
-float __ovld __cnfn sqrt(float);
-float2 __ovld __cnfn sqrt(float2);
-float3 __ovld __cnfn sqrt(float3);
-float4 __ovld __cnfn sqrt(float4);
-float8 __ovld __cnfn sqrt(float8);
-float16 __ovld __cnfn sqrt(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sqrt(double);
-double2 __ovld __cnfn sqrt(double2);
-double3 __ovld __cnfn sqrt(double3);
-double4 __ovld __cnfn sqrt(double4);
-double8 __ovld __cnfn sqrt(double8);
-double16 __ovld __cnfn sqrt(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sqrt(half);
-half2 __ovld __cnfn sqrt(half2);
-half3 __ovld __cnfn sqrt(half3);
-half4 __ovld __cnfn sqrt(half4);
-half8 __ovld __cnfn sqrt(half8);
-half16 __ovld __cnfn sqrt(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute tangent.
- */
-float __ovld __cnfn tan(float);
-float2 __ovld __cnfn tan(float2);
-float3 __ovld __cnfn tan(float3);
-float4 __ovld __cnfn tan(float4);
-float8 __ovld __cnfn tan(float8);
-float16 __ovld __cnfn tan(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tan(double);
-double2 __ovld __cnfn tan(double2);
-double3 __ovld __cnfn tan(double3);
-double4 __ovld __cnfn tan(double4);
-double8 __ovld __cnfn tan(double8);
-double16 __ovld __cnfn tan(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tan(half);
-half2 __ovld __cnfn tan(half2);
-half3 __ovld __cnfn tan(half3);
-half4 __ovld __cnfn tan(half4);
-half8 __ovld __cnfn tan(half8);
-half16 __ovld __cnfn tan(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute hyperbolic tangent.
- */
-float __ovld __cnfn tanh(float);
-float2 __ovld __cnfn tanh(float2);
-float3 __ovld __cnfn tanh(float3);
-float4 __ovld __cnfn tanh(float4);
-float8 __ovld __cnfn tanh(float8);
-float16 __ovld __cnfn tanh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tanh(double);
-double2 __ovld __cnfn tanh(double2);
-double3 __ovld __cnfn tanh(double3);
-double4 __ovld __cnfn tanh(double4);
-double8 __ovld __cnfn tanh(double8);
-double16 __ovld __cnfn tanh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tanh(half);
-half2 __ovld __cnfn tanh(half2);
-half3 __ovld __cnfn tanh(half3);
-half4 __ovld __cnfn tanh(half4);
-half8 __ovld __cnfn tanh(half8);
-half16 __ovld __cnfn tanh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute tan (PI * x).
- */
-float __ovld __cnfn tanpi(float x);
-float2 __ovld __cnfn tanpi(float2 x);
-float3 __ovld __cnfn tanpi(float3 x);
-float4 __ovld __cnfn tanpi(float4 x);
-float8 __ovld __cnfn tanpi(float8 x);
-float16 __ovld __cnfn tanpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tanpi(double x);
-double2 __ovld __cnfn tanpi(double2 x);
-double3 __ovld __cnfn tanpi(double3 x);
-double4 __ovld __cnfn tanpi(double4 x);
-double8 __ovld __cnfn tanpi(double8 x);
-double16 __ovld __cnfn tanpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tanpi(half x);
-half2 __ovld __cnfn tanpi(half2 x);
-half3 __ovld __cnfn tanpi(half3 x);
-half4 __ovld __cnfn tanpi(half4 x);
-half8 __ovld __cnfn tanpi(half8 x);
-half16 __ovld __cnfn tanpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute the gamma function.
- */
-float __ovld __cnfn tgamma(float);
-float2 __ovld __cnfn tgamma(float2);
-float3 __ovld __cnfn tgamma(float3);
-float4 __ovld __cnfn tgamma(float4);
-float8 __ovld __cnfn tgamma(float8);
-float16 __ovld __cnfn tgamma(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tgamma(double);
-double2 __ovld __cnfn tgamma(double2);
-double3 __ovld __cnfn tgamma(double3);
-double4 __ovld __cnfn tgamma(double4);
-double8 __ovld __cnfn tgamma(double8);
-double16 __ovld __cnfn tgamma(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tgamma(half);
-half2 __ovld __cnfn tgamma(half2);
-half3 __ovld __cnfn tgamma(half3);
-half4 __ovld __cnfn tgamma(half4);
-half8 __ovld __cnfn tgamma(half8);
-half16 __ovld __cnfn tgamma(half16);
-#endif //cl_khr_fp16
-
-/**
- * Round to integral value using the round to zero
- * rounding mode.
- */
-float __ovld __cnfn trunc(float);
-float2 __ovld __cnfn trunc(float2);
-float3 __ovld __cnfn trunc(float3);
-float4 __ovld __cnfn trunc(float4);
-float8 __ovld __cnfn trunc(float8);
-float16 __ovld __cnfn trunc(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn trunc(double);
-double2 __ovld __cnfn trunc(double2);
-double3 __ovld __cnfn trunc(double3);
-double4 __ovld __cnfn trunc(double4);
-double8 __ovld __cnfn trunc(double8);
-double16 __ovld __cnfn trunc(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn trunc(half);
-half2 __ovld __cnfn trunc(half2);
-half3 __ovld __cnfn trunc(half3);
-half4 __ovld __cnfn trunc(half4);
-half8 __ovld __cnfn trunc(half8);
-half16 __ovld __cnfn trunc(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute cosine. x must be in the range -2^16 ... +2^16.
- */
-float __ovld __cnfn half_cos(float x);
-float2 __ovld __cnfn half_cos(float2 x);
-float3 __ovld __cnfn half_cos(float3 x);
-float4 __ovld __cnfn half_cos(float4 x);
-float8 __ovld __cnfn half_cos(float8 x);
-float16 __ovld __cnfn half_cos(float16 x);
-
-/**
- * Compute x / y.
- */
-float __ovld __cnfn half_divide(float x, float y);
-float2 __ovld __cnfn half_divide(float2 x, float2 y);
-float3 __ovld __cnfn half_divide(float3 x, float3 y);
-float4 __ovld __cnfn half_divide(float4 x, float4 y);
-float8 __ovld __cnfn half_divide(float8 x, float8 y);
-float16 __ovld __cnfn half_divide(float16 x, float16 y);
-
-/**
- * Compute the base- e exponential of x.
- */
-float __ovld __cnfn half_exp(float x);
-float2 __ovld __cnfn half_exp(float2 x);
-float3 __ovld __cnfn half_exp(float3 x);
-float4 __ovld __cnfn half_exp(float4 x);
-float8 __ovld __cnfn half_exp(float8 x);
-float16 __ovld __cnfn half_exp(float16 x);
-
-/**
- * Compute the base- 2 exponential of x.
- */
-float __ovld __cnfn half_exp2(float x);
-float2 __ovld __cnfn half_exp2(float2 x);
-float3 __ovld __cnfn half_exp2(float3 x);
-float4 __ovld __cnfn half_exp2(float4 x);
-float8 __ovld __cnfn half_exp2(float8 x);
-float16 __ovld __cnfn half_exp2(float16 x);
-
-/**
- * Compute the base- 10 exponential of x.
- */
-float __ovld __cnfn half_exp10(float x);
-float2 __ovld __cnfn half_exp10(float2 x);
-float3 __ovld __cnfn half_exp10(float3 x);
-float4 __ovld __cnfn half_exp10(float4 x);
-float8 __ovld __cnfn half_exp10(float8 x);
-float16 __ovld __cnfn half_exp10(float16 x);
-
-/**
- * Compute natural logarithm.
- */
-float __ovld __cnfn half_log(float x);
-float2 __ovld __cnfn half_log(float2 x);
-float3 __ovld __cnfn half_log(float3 x);
-float4 __ovld __cnfn half_log(float4 x);
-float8 __ovld __cnfn half_log(float8 x);
-float16 __ovld __cnfn half_log(float16 x);
-
-/**
- * Compute a base 2 logarithm.
- */
-float __ovld __cnfn half_log2(float x);
-float2 __ovld __cnfn half_log2(float2 x);
-float3 __ovld __cnfn half_log2(float3 x);
-float4 __ovld __cnfn half_log2(float4 x);
-float8 __ovld __cnfn half_log2(float8 x);
-float16 __ovld __cnfn half_log2(float16 x);
-
-/**
- * Compute a base 10 logarithm.
- */
-float __ovld __cnfn half_log10(float x);
-float2 __ovld __cnfn half_log10(float2 x);
-float3 __ovld __cnfn half_log10(float3 x);
-float4 __ovld __cnfn half_log10(float4 x);
-float8 __ovld __cnfn half_log10(float8 x);
-float16 __ovld __cnfn half_log10(float16 x);
-
-/**
- * Compute x to the power y, where x is >= 0.
- */
-float __ovld __cnfn half_powr(float x, float y);
-float2 __ovld __cnfn half_powr(float2 x, float2 y);
-float3 __ovld __cnfn half_powr(float3 x, float3 y);
-float4 __ovld __cnfn half_powr(float4 x, float4 y);
-float8 __ovld __cnfn half_powr(float8 x, float8 y);
-float16 __ovld __cnfn half_powr(float16 x, float16 y);
-
-/**
- * Compute reciprocal.
- */
-float __ovld __cnfn half_recip(float x);
-float2 __ovld __cnfn half_recip(float2 x);
-float3 __ovld __cnfn half_recip(float3 x);
-float4 __ovld __cnfn half_recip(float4 x);
-float8 __ovld __cnfn half_recip(float8 x);
-float16 __ovld __cnfn half_recip(float16 x);
-
-/**
- * Compute inverse square root.
- */
-float __ovld __cnfn half_rsqrt(float x);
-float2 __ovld __cnfn half_rsqrt(float2 x);
-float3 __ovld __cnfn half_rsqrt(float3 x);
-float4 __ovld __cnfn half_rsqrt(float4 x);
-float8 __ovld __cnfn half_rsqrt(float8 x);
-float16 __ovld __cnfn half_rsqrt(float16 x);
-
-/**
- * Compute sine. x must be in the range -2^16 ... +2^16.
- */
-float __ovld __cnfn half_sin(float x);
-float2 __ovld __cnfn half_sin(float2 x);
-float3 __ovld __cnfn half_sin(float3 x);
-float4 __ovld __cnfn half_sin(float4 x);
-float8 __ovld __cnfn half_sin(float8 x);
-float16 __ovld __cnfn half_sin(float16 x);
-
-/**
- * Compute square root.
- */
-float __ovld __cnfn half_sqrt(float x);
-float2 __ovld __cnfn half_sqrt(float2 x);
-float3 __ovld __cnfn half_sqrt(float3 x);
-float4 __ovld __cnfn half_sqrt(float4 x);
-float8 __ovld __cnfn half_sqrt(float8 x);
-float16 __ovld __cnfn half_sqrt(float16 x);
-
-/**
- * Compute tangent. x must be in the range -216 ... +216.
- */
-float __ovld __cnfn half_tan(float x);
-float2 __ovld __cnfn half_tan(float2 x);
-float3 __ovld __cnfn half_tan(float3 x);
-float4 __ovld __cnfn half_tan(float4 x);
-float8 __ovld __cnfn half_tan(float8 x);
-float16 __ovld __cnfn half_tan(float16 x);
-
-/**
- * Compute cosine over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_cos(float x);
-float2 __ovld __cnfn native_cos(float2 x);
-float3 __ovld __cnfn native_cos(float3 x);
-float4 __ovld __cnfn native_cos(float4 x);
-float8 __ovld __cnfn native_cos(float8 x);
-float16 __ovld __cnfn native_cos(float16 x);
-
-/**
- * Compute x / y over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_divide(float x, float y);
-float2 __ovld __cnfn native_divide(float2 x, float2 y);
-float3 __ovld __cnfn native_divide(float3 x, float3 y);
-float4 __ovld __cnfn native_divide(float4 x, float4 y);
-float8 __ovld __cnfn native_divide(float8 x, float8 y);
-float16 __ovld __cnfn native_divide(float16 x, float16 y);
-
-/**
- * Compute the base- e exponential of x over an
- * implementation-defined range. The maximum error is
- * implementation-defined.
- */
-float __ovld __cnfn native_exp(float x);
-float2 __ovld __cnfn native_exp(float2 x);
-float3 __ovld __cnfn native_exp(float3 x);
-float4 __ovld __cnfn native_exp(float4 x);
-float8 __ovld __cnfn native_exp(float8 x);
-float16 __ovld __cnfn native_exp(float16 x);
-
-/**
- * Compute the base- 2 exponential of x over an
- * implementation-defined range. The maximum error is
- * implementation-defined.
- */
-float __ovld __cnfn native_exp2(float x);
-float2 __ovld __cnfn native_exp2(float2 x);
-float3 __ovld __cnfn native_exp2(float3 x);
-float4 __ovld __cnfn native_exp2(float4 x);
-float8 __ovld __cnfn native_exp2(float8 x);
-float16 __ovld __cnfn native_exp2(float16 x);
-
-/**
- * Compute the base- 10 exponential of x over an
- * implementation-defined range. The maximum error is
- * implementation-defined.
- */
-float __ovld __cnfn native_exp10(float x);
-float2 __ovld __cnfn native_exp10(float2 x);
-float3 __ovld __cnfn native_exp10(float3 x);
-float4 __ovld __cnfn native_exp10(float4 x);
-float8 __ovld __cnfn native_exp10(float8 x);
-float16 __ovld __cnfn native_exp10(float16 x);
-
-/**
- * Compute natural logarithm over an implementationdefined
- * range. The maximum error is implementation
- * defined.
- */
-float __ovld __cnfn native_log(float x);
-float2 __ovld __cnfn native_log(float2 x);
-float3 __ovld __cnfn native_log(float3 x);
-float4 __ovld __cnfn native_log(float4 x);
-float8 __ovld __cnfn native_log(float8 x);
-float16 __ovld __cnfn native_log(float16 x);
-
-/**
- * Compute a base 2 logarithm over an implementationdefined
- * range. The maximum error is implementationdefined.
- */
-float __ovld __cnfn native_log2(float x);
-float2 __ovld __cnfn native_log2(float2 x);
-float3 __ovld __cnfn native_log2(float3 x);
-float4 __ovld __cnfn native_log2(float4 x);
-float8 __ovld __cnfn native_log2(float8 x);
-float16 __ovld __cnfn native_log2(float16 x);
-
-/**
- * Compute a base 10 logarithm over an implementationdefined
- * range. The maximum error is implementationdefined.
- */
-float __ovld __cnfn native_log10(float x);
-float2 __ovld __cnfn native_log10(float2 x);
-float3 __ovld __cnfn native_log10(float3 x);
-float4 __ovld __cnfn native_log10(float4 x);
-float8 __ovld __cnfn native_log10(float8 x);
-float16 __ovld __cnfn native_log10(float16 x);
-
-/**
- * Compute x to the power y, where x is >= 0. The range of
- * x and y are implementation-defined. The maximum error
- * is implementation-defined.
- */
-float __ovld __cnfn native_powr(float x, float y);
-float2 __ovld __cnfn native_powr(float2 x, float2 y);
-float3 __ovld __cnfn native_powr(float3 x, float3 y);
-float4 __ovld __cnfn native_powr(float4 x, float4 y);
-float8 __ovld __cnfn native_powr(float8 x, float8 y);
-float16 __ovld __cnfn native_powr(float16 x, float16 y);
-
-/**
- * Compute reciprocal over an implementation-defined
- * range. The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_recip(float x);
-float2 __ovld __cnfn native_recip(float2 x);
-float3 __ovld __cnfn native_recip(float3 x);
-float4 __ovld __cnfn native_recip(float4 x);
-float8 __ovld __cnfn native_recip(float8 x);
-float16 __ovld __cnfn native_recip(float16 x);
-
-/**
- * Compute inverse square root over an implementationdefined
- * range. The maximum error is implementationdefined.
- */
-float __ovld __cnfn native_rsqrt(float x);
-float2 __ovld __cnfn native_rsqrt(float2 x);
-float3 __ovld __cnfn native_rsqrt(float3 x);
-float4 __ovld __cnfn native_rsqrt(float4 x);
-float8 __ovld __cnfn native_rsqrt(float8 x);
-float16 __ovld __cnfn native_rsqrt(float16 x);
-
-/**
- * Compute sine over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_sin(float x);
-float2 __ovld __cnfn native_sin(float2 x);
-float3 __ovld __cnfn native_sin(float3 x);
-float4 __ovld __cnfn native_sin(float4 x);
-float8 __ovld __cnfn native_sin(float8 x);
-float16 __ovld __cnfn native_sin(float16 x);
-
-/**
- * Compute square root over an implementation-defined
- * range. The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_sqrt(float x);
-float2 __ovld __cnfn native_sqrt(float2 x);
-float3 __ovld __cnfn native_sqrt(float3 x);
-float4 __ovld __cnfn native_sqrt(float4 x);
-float8 __ovld __cnfn native_sqrt(float8 x);
-float16 __ovld __cnfn native_sqrt(float16 x);
-
-/**
- * Compute tangent over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_tan(float x);
-float2 __ovld __cnfn native_tan(float2 x);
-float3 __ovld __cnfn native_tan(float3 x);
-float4 __ovld __cnfn native_tan(float4 x);
-float8 __ovld __cnfn native_tan(float8 x);
-float16 __ovld __cnfn native_tan(float16 x);
-
-// OpenCL v1.1 s6.11.3, v1.2 s6.12.3, v2.0 s6.13.3 - Integer Functions
-
-/**
- * Returns | x |.
- */
-uchar __ovld __cnfn abs(char x);
-uchar __ovld __cnfn abs(uchar x);
-uchar2 __ovld __cnfn abs(char2 x);
-uchar2 __ovld __cnfn abs(uchar2 x);
-uchar3 __ovld __cnfn abs(char3 x);
-uchar3 __ovld __cnfn abs(uchar3 x);
-uchar4 __ovld __cnfn abs(char4 x);
-uchar4 __ovld __cnfn abs(uchar4 x);
-uchar8 __ovld __cnfn abs(char8 x);
-uchar8 __ovld __cnfn abs(uchar8 x);
-uchar16 __ovld __cnfn abs(char16 x);
-uchar16 __ovld __cnfn abs(uchar16 x);
-ushort __ovld __cnfn abs(short x);
-ushort __ovld __cnfn abs(ushort x);
-ushort2 __ovld __cnfn abs(short2 x);
-ushort2 __ovld __cnfn abs(ushort2 x);
-ushort3 __ovld __cnfn abs(short3 x);
-ushort3 __ovld __cnfn abs(ushort3 x);
-ushort4 __ovld __cnfn abs(short4 x);
-ushort4 __ovld __cnfn abs(ushort4 x);
-ushort8 __ovld __cnfn abs(short8 x);
-ushort8 __ovld __cnfn abs(ushort8 x);
-ushort16 __ovld __cnfn abs(short16 x);
-ushort16 __ovld __cnfn abs(ushort16 x);
-uint __ovld __cnfn abs(int x);
-uint __ovld __cnfn abs(uint x);
-uint2 __ovld __cnfn abs(int2 x);
-uint2 __ovld __cnfn abs(uint2 x);
-uint3 __ovld __cnfn abs(int3 x);
-uint3 __ovld __cnfn abs(uint3 x);
-uint4 __ovld __cnfn abs(int4 x);
-uint4 __ovld __cnfn abs(uint4 x);
-uint8 __ovld __cnfn abs(int8 x);
-uint8 __ovld __cnfn abs(uint8 x);
-uint16 __ovld __cnfn abs(int16 x);
-uint16 __ovld __cnfn abs(uint16 x);
-ulong __ovld __cnfn abs(long x);
-ulong __ovld __cnfn abs(ulong x);
-ulong2 __ovld __cnfn abs(long2 x);
-ulong2 __ovld __cnfn abs(ulong2 x);
-ulong3 __ovld __cnfn abs(long3 x);
-ulong3 __ovld __cnfn abs(ulong3 x);
-ulong4 __ovld __cnfn abs(long4 x);
-ulong4 __ovld __cnfn abs(ulong4 x);
-ulong8 __ovld __cnfn abs(long8 x);
-ulong8 __ovld __cnfn abs(ulong8 x);
-ulong16 __ovld __cnfn abs(long16 x);
-ulong16 __ovld __cnfn abs(ulong16 x);
-
-/**
- * Returns | x - y | without modulo overflow.
- */
-uchar __ovld __cnfn abs_diff(char x, char y);
-uchar __ovld __cnfn abs_diff(uchar x, uchar y);
-uchar2 __ovld __cnfn abs_diff(char2 x, char2 y);
-uchar2 __ovld __cnfn abs_diff(uchar2 x, uchar2 y);
-uchar3 __ovld __cnfn abs_diff(char3 x, char3 y);
-uchar3 __ovld __cnfn abs_diff(uchar3 x, uchar3 y);
-uchar4 __ovld __cnfn abs_diff(char4 x, char4 y);
-uchar4 __ovld __cnfn abs_diff(uchar4 x, uchar4 y);
-uchar8 __ovld __cnfn abs_diff(char8 x, char8 y);
-uchar8 __ovld __cnfn abs_diff(uchar8 x, uchar8 y);
-uchar16 __ovld __cnfn abs_diff(char16 x, char16 y);
-uchar16 __ovld __cnfn abs_diff(uchar16 x, uchar16 y);
-ushort __ovld __cnfn abs_diff(short x, short y);
-ushort __ovld __cnfn abs_diff(ushort x, ushort y);
-ushort2 __ovld __cnfn abs_diff(short2 x, short2 y);
-ushort2 __ovld __cnfn abs_diff(ushort2 x, ushort2 y);
-ushort3 __ovld __cnfn abs_diff(short3 x, short3 y);
-ushort3 __ovld __cnfn abs_diff(ushort3 x, ushort3 y);
-ushort4 __ovld __cnfn abs_diff(short4 x, short4 y);
-ushort4 __ovld __cnfn abs_diff(ushort4 x, ushort4 y);
-ushort8 __ovld __cnfn abs_diff(short8 x, short8 y);
-ushort8 __ovld __cnfn abs_diff(ushort8 x, ushort8 y);
-ushort16 __ovld __cnfn abs_diff(short16 x, short16 y);
-ushort16 __ovld __cnfn abs_diff(ushort16 x, ushort16 y);
-uint __ovld __cnfn abs_diff(int x, int y);
-uint __ovld __cnfn abs_diff(uint x, uint y);
-uint2 __ovld __cnfn abs_diff(int2 x, int2 y);
-uint2 __ovld __cnfn abs_diff(uint2 x, uint2 y);
-uint3 __ovld __cnfn abs_diff(int3 x, int3 y);
-uint3 __ovld __cnfn abs_diff(uint3 x, uint3 y);
-uint4 __ovld __cnfn abs_diff(int4 x, int4 y);
-uint4 __ovld __cnfn abs_diff(uint4 x, uint4 y);
-uint8 __ovld __cnfn abs_diff(int8 x, int8 y);
-uint8 __ovld __cnfn abs_diff(uint8 x, uint8 y);
-uint16 __ovld __cnfn abs_diff(int16 x, int16 y);
-uint16 __ovld __cnfn abs_diff(uint16 x, uint16 y);
-ulong __ovld __cnfn abs_diff(long x, long y);
-ulong __ovld __cnfn abs_diff(ulong x, ulong y);
-ulong2 __ovld __cnfn abs_diff(long2 x, long2 y);
-ulong2 __ovld __cnfn abs_diff(ulong2 x, ulong2 y);
-ulong3 __ovld __cnfn abs_diff(long3 x, long3 y);
-ulong3 __ovld __cnfn abs_diff(ulong3 x, ulong3 y);
-ulong4 __ovld __cnfn abs_diff(long4 x, long4 y);
-ulong4 __ovld __cnfn abs_diff(ulong4 x, ulong4 y);
-ulong8 __ovld __cnfn abs_diff(long8 x, long8 y);
-ulong8 __ovld __cnfn abs_diff(ulong8 x, ulong8 y);
-ulong16 __ovld __cnfn abs_diff(long16 x, long16 y);
-ulong16 __ovld __cnfn abs_diff(ulong16 x, ulong16 y);
-
-/**
- * Returns x + y and saturates the result.
- */
-char __ovld __cnfn add_sat(char x, char y);
-uchar __ovld __cnfn add_sat(uchar x, uchar y);
-char2 __ovld __cnfn add_sat(char2 x, char2 y);
-uchar2 __ovld __cnfn add_sat(uchar2 x, uchar2 y);
-char3 __ovld __cnfn add_sat(char3 x, char3 y);
-uchar3 __ovld __cnfn add_sat(uchar3 x, uchar3 y);
-char4 __ovld __cnfn add_sat(char4 x, char4 y);
-uchar4 __ovld __cnfn add_sat(uchar4 x, uchar4 y);
-char8 __ovld __cnfn add_sat(char8 x, char8 y);
-uchar8 __ovld __cnfn add_sat(uchar8 x, uchar8 y);
-char16 __ovld __cnfn add_sat(char16 x, char16 y);
-uchar16 __ovld __cnfn add_sat(uchar16 x, uchar16 y);
-short __ovld __cnfn add_sat(short x, short y);
-ushort __ovld __cnfn add_sat(ushort x, ushort y);
-short2 __ovld __cnfn add_sat(short2 x, short2 y);
-ushort2 __ovld __cnfn add_sat(ushort2 x, ushort2 y);
-short3 __ovld __cnfn add_sat(short3 x, short3 y);
-ushort3 __ovld __cnfn add_sat(ushort3 x, ushort3 y);
-short4 __ovld __cnfn add_sat(short4 x, short4 y);
-ushort4 __ovld __cnfn add_sat(ushort4 x, ushort4 y);
-short8 __ovld __cnfn add_sat(short8 x, short8 y);
-ushort8 __ovld __cnfn add_sat(ushort8 x, ushort8 y);
-short16 __ovld __cnfn add_sat(short16 x, short16 y);
-ushort16 __ovld __cnfn add_sat(ushort16 x, ushort16 y);
-int __ovld __cnfn add_sat(int x, int y);
-uint __ovld __cnfn add_sat(uint x, uint y);
-int2 __ovld __cnfn add_sat(int2 x, int2 y);
-uint2 __ovld __cnfn add_sat(uint2 x, uint2 y);
-int3 __ovld __cnfn add_sat(int3 x, int3 y);
-uint3 __ovld __cnfn add_sat(uint3 x, uint3 y);
-int4 __ovld __cnfn add_sat(int4 x, int4 y);
-uint4 __ovld __cnfn add_sat(uint4 x, uint4 y);
-int8 __ovld __cnfn add_sat(int8 x, int8 y);
-uint8 __ovld __cnfn add_sat(uint8 x, uint8 y);
-int16 __ovld __cnfn add_sat(int16 x, int16 y);
-uint16 __ovld __cnfn add_sat(uint16 x, uint16 y);
-long __ovld __cnfn add_sat(long x, long y);
-ulong __ovld __cnfn add_sat(ulong x, ulong y);
-long2 __ovld __cnfn add_sat(long2 x, long2 y);
-ulong2 __ovld __cnfn add_sat(ulong2 x, ulong2 y);
-long3 __ovld __cnfn add_sat(long3 x, long3 y);
-ulong3 __ovld __cnfn add_sat(ulong3 x, ulong3 y);
-long4 __ovld __cnfn add_sat(long4 x, long4 y);
-ulong4 __ovld __cnfn add_sat(ulong4 x, ulong4 y);
-long8 __ovld __cnfn add_sat(long8 x, long8 y);
-ulong8 __ovld __cnfn add_sat(ulong8 x, ulong8 y);
-long16 __ovld __cnfn add_sat(long16 x, long16 y);
-ulong16 __ovld __cnfn add_sat(ulong16 x, ulong16 y);
-
-/**
- * Returns (x + y) >> 1. The intermediate sum does
- * not modulo overflow.
- */
-char __ovld __cnfn hadd(char x, char y);
-uchar __ovld __cnfn hadd(uchar x, uchar y);
-char2 __ovld __cnfn hadd(char2 x, char2 y);
-uchar2 __ovld __cnfn hadd(uchar2 x, uchar2 y);
-char3 __ovld __cnfn hadd(char3 x, char3 y);
-uchar3 __ovld __cnfn hadd(uchar3 x, uchar3 y);
-char4 __ovld __cnfn hadd(char4 x, char4 y);
-uchar4 __ovld __cnfn hadd(uchar4 x, uchar4 y);
-char8 __ovld __cnfn hadd(char8 x, char8 y);
-uchar8 __ovld __cnfn hadd(uchar8 x, uchar8 y);
-char16 __ovld __cnfn hadd(char16 x, char16 y);
-uchar16 __ovld __cnfn hadd(uchar16 x, uchar16 y);
-short __ovld __cnfn hadd(short x, short y);
-ushort __ovld __cnfn hadd(ushort x, ushort y);
-short2 __ovld __cnfn hadd(short2 x, short2 y);
-ushort2 __ovld __cnfn hadd(ushort2 x, ushort2 y);
-short3 __ovld __cnfn hadd(short3 x, short3 y);
-ushort3 __ovld __cnfn hadd(ushort3 x, ushort3 y);
-short4 __ovld __cnfn hadd(short4 x, short4 y);
-ushort4 __ovld __cnfn hadd(ushort4 x, ushort4 y);
-short8 __ovld __cnfn hadd(short8 x, short8 y);
-ushort8 __ovld __cnfn hadd(ushort8 x, ushort8 y);
-short16 __ovld __cnfn hadd(short16 x, short16 y);
-ushort16 __ovld __cnfn hadd(ushort16 x, ushort16 y);
-int __ovld __cnfn hadd(int x, int y);
-uint __ovld __cnfn hadd(uint x, uint y);
-int2 __ovld __cnfn hadd(int2 x, int2 y);
-uint2 __ovld __cnfn hadd(uint2 x, uint2 y);
-int3 __ovld __cnfn hadd(int3 x, int3 y);
-uint3 __ovld __cnfn hadd(uint3 x, uint3 y);
-int4 __ovld __cnfn hadd(int4 x, int4 y);
-uint4 __ovld __cnfn hadd(uint4 x, uint4 y);
-int8 __ovld __cnfn hadd(int8 x, int8 y);
-uint8 __ovld __cnfn hadd(uint8 x, uint8 y);
-int16 __ovld __cnfn hadd(int16 x, int16 y);
-uint16 __ovld __cnfn hadd(uint16 x, uint16 y);
-long __ovld __cnfn hadd(long x, long y);
-ulong __ovld __cnfn hadd(ulong x, ulong y);
-long2 __ovld __cnfn hadd(long2 x, long2 y);
-ulong2 __ovld __cnfn hadd(ulong2 x, ulong2 y);
-long3 __ovld __cnfn hadd(long3 x, long3 y);
-ulong3 __ovld __cnfn hadd(ulong3 x, ulong3 y);
-long4 __ovld __cnfn hadd(long4 x, long4 y);
-ulong4 __ovld __cnfn hadd(ulong4 x, ulong4 y);
-long8 __ovld __cnfn hadd(long8 x, long8 y);
-ulong8 __ovld __cnfn hadd(ulong8 x, ulong8 y);
-long16 __ovld __cnfn hadd(long16 x, long16 y);
-ulong16 __ovld __cnfn hadd(ulong16 x, ulong16 y);
-
-/**
- * Returns (x + y + 1) >> 1. The intermediate sum
- * does not modulo overflow.
- */
-char __ovld __cnfn rhadd(char x, char y);
-uchar __ovld __cnfn rhadd(uchar x, uchar y);
-char2 __ovld __cnfn rhadd(char2 x, char2 y);
-uchar2 __ovld __cnfn rhadd(uchar2 x, uchar2 y);
-char3 __ovld __cnfn rhadd(char3 x, char3 y);
-uchar3 __ovld __cnfn rhadd(uchar3 x, uchar3 y);
-char4 __ovld __cnfn rhadd(char4 x, char4 y);
-uchar4 __ovld __cnfn rhadd(uchar4 x, uchar4 y);
-char8 __ovld __cnfn rhadd(char8 x, char8 y);
-uchar8 __ovld __cnfn rhadd(uchar8 x, uchar8 y);
-char16 __ovld __cnfn rhadd(char16 x, char16 y);
-uchar16 __ovld __cnfn rhadd(uchar16 x, uchar16 y);
-short __ovld __cnfn rhadd(short x, short y);
-ushort __ovld __cnfn rhadd(ushort x, ushort y);
-short2 __ovld __cnfn rhadd(short2 x, short2 y);
-ushort2 __ovld __cnfn rhadd(ushort2 x, ushort2 y);
-short3 __ovld __cnfn rhadd(short3 x, short3 y);
-ushort3 __ovld __cnfn rhadd(ushort3 x, ushort3 y);
-short4 __ovld __cnfn rhadd(short4 x, short4 y);
-ushort4 __ovld __cnfn rhadd(ushort4 x, ushort4 y);
-short8 __ovld __cnfn rhadd(short8 x, short8 y);
-ushort8 __ovld __cnfn rhadd(ushort8 x, ushort8 y);
-short16 __ovld __cnfn rhadd(short16 x, short16 y);
-ushort16 __ovld __cnfn rhadd(ushort16 x, ushort16 y);
-int __ovld __cnfn rhadd(int x, int y);
-uint __ovld __cnfn rhadd(uint x, uint y);
-int2 __ovld __cnfn rhadd(int2 x, int2 y);
-uint2 __ovld __cnfn rhadd(uint2 x, uint2 y);
-int3 __ovld __cnfn rhadd(int3 x, int3 y);
-uint3 __ovld __cnfn rhadd(uint3 x, uint3 y);
-int4 __ovld __cnfn rhadd(int4 x, int4 y);
-uint4 __ovld __cnfn rhadd(uint4 x, uint4 y);
-int8 __ovld __cnfn rhadd(int8 x, int8 y);
-uint8 __ovld __cnfn rhadd(uint8 x, uint8 y);
-int16 __ovld __cnfn rhadd(int16 x, int16 y);
-uint16 __ovld __cnfn rhadd(uint16 x, uint16 y);
-long __ovld __cnfn rhadd(long x, long y);
-ulong __ovld __cnfn rhadd(ulong x, ulong y);
-long2 __ovld __cnfn rhadd(long2 x, long2 y);
-ulong2 __ovld __cnfn rhadd(ulong2 x, ulong2 y);
-long3 __ovld __cnfn rhadd(long3 x, long3 y);
-ulong3 __ovld __cnfn rhadd(ulong3 x, ulong3 y);
-long4 __ovld __cnfn rhadd(long4 x, long4 y);
-ulong4 __ovld __cnfn rhadd(ulong4 x, ulong4 y);
-long8 __ovld __cnfn rhadd(long8 x, long8 y);
-ulong8 __ovld __cnfn rhadd(ulong8 x, ulong8 y);
-long16 __ovld __cnfn rhadd(long16 x, long16 y);
-ulong16 __ovld __cnfn rhadd(ulong16 x, ulong16 y);
-
-/**
- * Returns min(max(x, minval), maxval).
- * Results are undefined if minval > maxval.
- */
-char __ovld __cnfn clamp(char x, char minval, char maxval);
-uchar __ovld __cnfn clamp(uchar x, uchar minval, uchar maxval);
-char2 __ovld __cnfn clamp(char2 x, char2 minval, char2 maxval);
-uchar2 __ovld __cnfn clamp(uchar2 x, uchar2 minval, uchar2 maxval);
-char3 __ovld __cnfn clamp(char3 x, char3 minval, char3 maxval);
-uchar3 __ovld __cnfn clamp(uchar3 x, uchar3 minval, uchar3 maxval);
-char4 __ovld __cnfn clamp(char4 x, char4 minval, char4 maxval);
-uchar4 __ovld __cnfn clamp(uchar4 x, uchar4 minval, uchar4 maxval);
-char8 __ovld __cnfn clamp(char8 x, char8 minval, char8 maxval);
-uchar8 __ovld __cnfn clamp(uchar8 x, uchar8 minval, uchar8 maxval);
-char16 __ovld __cnfn clamp(char16 x, char16 minval, char16 maxval);
-uchar16 __ovld __cnfn clamp(uchar16 x, uchar16 minval, uchar16 maxval);
-short __ovld __cnfn clamp(short x, short minval, short maxval);
-ushort __ovld __cnfn clamp(ushort x, ushort minval, ushort maxval);
-short2 __ovld __cnfn clamp(short2 x, short2 minval, short2 maxval);
-ushort2 __ovld __cnfn clamp(ushort2 x, ushort2 minval, ushort2 maxval);
-short3 __ovld __cnfn clamp(short3 x, short3 minval, short3 maxval);
-ushort3 __ovld __cnfn clamp(ushort3 x, ushort3 minval, ushort3 maxval);
-short4 __ovld __cnfn clamp(short4 x, short4 minval, short4 maxval);
-ushort4 __ovld __cnfn clamp(ushort4 x, ushort4 minval, ushort4 maxval);
-short8 __ovld __cnfn clamp(short8 x, short8 minval, short8 maxval);
-ushort8 __ovld __cnfn clamp(ushort8 x, ushort8 minval, ushort8 maxval);
-short16 __ovld __cnfn clamp(short16 x, short16 minval, short16 maxval);
-ushort16 __ovld __cnfn clamp(ushort16 x, ushort16 minval, ushort16 maxval);
-int __ovld __cnfn clamp(int x, int minval, int maxval);
-uint __ovld __cnfn clamp(uint x, uint minval, uint maxval);
-int2 __ovld __cnfn clamp(int2 x, int2 minval, int2 maxval);
-uint2 __ovld __cnfn clamp(uint2 x, uint2 minval, uint2 maxval);
-int3 __ovld __cnfn clamp(int3 x, int3 minval, int3 maxval);
-uint3 __ovld __cnfn clamp(uint3 x, uint3 minval, uint3 maxval);
-int4 __ovld __cnfn clamp(int4 x, int4 minval, int4 maxval);
-uint4 __ovld __cnfn clamp(uint4 x, uint4 minval, uint4 maxval);
-int8 __ovld __cnfn clamp(int8 x, int8 minval, int8 maxval);
-uint8 __ovld __cnfn clamp(uint8 x, uint8 minval, uint8 maxval);
-int16 __ovld __cnfn clamp(int16 x, int16 minval, int16 maxval);
-uint16 __ovld __cnfn clamp(uint16 x, uint16 minval, uint16 maxval);
-long __ovld __cnfn clamp(long x, long minval, long maxval);
-ulong __ovld __cnfn clamp(ulong x, ulong minval, ulong maxval);
-long2 __ovld __cnfn clamp(long2 x, long2 minval, long2 maxval);
-ulong2 __ovld __cnfn clamp(ulong2 x, ulong2 minval, ulong2 maxval);
-long3 __ovld __cnfn clamp(long3 x, long3 minval, long3 maxval);
-ulong3 __ovld __cnfn clamp(ulong3 x, ulong3 minval, ulong3 maxval);
-long4 __ovld __cnfn clamp(long4 x, long4 minval, long4 maxval);
-ulong4 __ovld __cnfn clamp(ulong4 x, ulong4 minval, ulong4 maxval);
-long8 __ovld __cnfn clamp(long8 x, long8 minval, long8 maxval);
-ulong8 __ovld __cnfn clamp(ulong8 x, ulong8 minval, ulong8 maxval);
-long16 __ovld __cnfn clamp(long16 x, long16 minval, long16 maxval);
-ulong16 __ovld __cnfn clamp(ulong16 x, ulong16 minval, ulong16 maxval);
-char2 __ovld __cnfn clamp(char2 x, char minval, char maxval);
-uchar2 __ovld __cnfn clamp(uchar2 x, uchar minval, uchar maxval);
-char3 __ovld __cnfn clamp(char3 x, char minval, char maxval);
-uchar3 __ovld __cnfn clamp(uchar3 x, uchar minval, uchar maxval);
-char4 __ovld __cnfn clamp(char4 x, char minval, char maxval);
-uchar4 __ovld __cnfn clamp(uchar4 x, uchar minval, uchar maxval);
-char8 __ovld __cnfn clamp(char8 x, char minval, char maxval);
-uchar8 __ovld __cnfn clamp(uchar8 x, uchar minval, uchar maxval);
-char16 __ovld __cnfn clamp(char16 x, char minval, char maxval);
-uchar16 __ovld __cnfn clamp(uchar16 x, uchar minval, uchar maxval);
-short2 __ovld __cnfn clamp(short2 x, short minval, short maxval);
-ushort2 __ovld __cnfn clamp(ushort2 x, ushort minval, ushort maxval);
-short3 __ovld __cnfn clamp(short3 x, short minval, short maxval);
-ushort3 __ovld __cnfn clamp(ushort3 x, ushort minval, ushort maxval);
-short4 __ovld __cnfn clamp(short4 x, short minval, short maxval);
-ushort4 __ovld __cnfn clamp(ushort4 x, ushort minval, ushort maxval);
-short8 __ovld __cnfn clamp(short8 x, short minval, short maxval);
-ushort8 __ovld __cnfn clamp(ushort8 x, ushort minval, ushort maxval);
-short16 __ovld __cnfn clamp(short16 x, short minval, short maxval);
-ushort16 __ovld __cnfn clamp(ushort16 x, ushort minval, ushort maxval);
-int2 __ovld __cnfn clamp(int2 x, int minval, int maxval);
-uint2 __ovld __cnfn clamp(uint2 x, uint minval, uint maxval);
-int3 __ovld __cnfn clamp(int3 x, int minval, int maxval);
-uint3 __ovld __cnfn clamp(uint3 x, uint minval, uint maxval);
-int4 __ovld __cnfn clamp(int4 x, int minval, int maxval);
-uint4 __ovld __cnfn clamp(uint4 x, uint minval, uint maxval);
-int8 __ovld __cnfn clamp(int8 x, int minval, int maxval);
-uint8 __ovld __cnfn clamp(uint8 x, uint minval, uint maxval);
-int16 __ovld __cnfn clamp(int16 x, int minval, int maxval);
-uint16 __ovld __cnfn clamp(uint16 x, uint minval, uint maxval);
-long2 __ovld __cnfn clamp(long2 x, long minval, long maxval);
-ulong2 __ovld __cnfn clamp(ulong2 x, ulong minval, ulong maxval);
-long3 __ovld __cnfn clamp(long3 x, long minval, long maxval);
-ulong3 __ovld __cnfn clamp(ulong3 x, ulong minval, ulong maxval);
-long4 __ovld __cnfn clamp(long4 x, long minval, long maxval);
-ulong4 __ovld __cnfn clamp(ulong4 x, ulong minval, ulong maxval);
-long8 __ovld __cnfn clamp(long8 x, long minval, long maxval);
-ulong8 __ovld __cnfn clamp(ulong8 x, ulong minval, ulong maxval);
-long16 __ovld __cnfn clamp(long16 x, long minval, long maxval);
-ulong16 __ovld __cnfn clamp(ulong16 x, ulong minval, ulong maxval);
-
-/**
- * Returns the number of leading 0-bits in x, starting
- * at the most significant bit position.
- */
-char __ovld __cnfn clz(char x);
-uchar __ovld __cnfn clz(uchar x);
-char2 __ovld __cnfn clz(char2 x);
-uchar2 __ovld __cnfn clz(uchar2 x);
-char3 __ovld __cnfn clz(char3 x);
-uchar3 __ovld __cnfn clz(uchar3 x);
-char4 __ovld __cnfn clz(char4 x);
-uchar4 __ovld __cnfn clz(uchar4 x);
-char8 __ovld __cnfn clz(char8 x);
-uchar8 __ovld __cnfn clz(uchar8 x);
-char16 __ovld __cnfn clz(char16 x);
-uchar16 __ovld __cnfn clz(uchar16 x);
-short __ovld __cnfn clz(short x);
-ushort __ovld __cnfn clz(ushort x);
-short2 __ovld __cnfn clz(short2 x);
-ushort2 __ovld __cnfn clz(ushort2 x);
-short3 __ovld __cnfn clz(short3 x);
-ushort3 __ovld __cnfn clz(ushort3 x);
-short4 __ovld __cnfn clz(short4 x);
-ushort4 __ovld __cnfn clz(ushort4 x);
-short8 __ovld __cnfn clz(short8 x);
-ushort8 __ovld __cnfn clz(ushort8 x);
-short16 __ovld __cnfn clz(short16 x);
-ushort16 __ovld __cnfn clz(ushort16 x);
-int __ovld __cnfn clz(int x);
-uint __ovld __cnfn clz(uint x);
-int2 __ovld __cnfn clz(int2 x);
-uint2 __ovld __cnfn clz(uint2 x);
-int3 __ovld __cnfn clz(int3 x);
-uint3 __ovld __cnfn clz(uint3 x);
-int4 __ovld __cnfn clz(int4 x);
-uint4 __ovld __cnfn clz(uint4 x);
-int8 __ovld __cnfn clz(int8 x);
-uint8 __ovld __cnfn clz(uint8 x);
-int16 __ovld __cnfn clz(int16 x);
-uint16 __ovld __cnfn clz(uint16 x);
-long __ovld __cnfn clz(long x);
-ulong __ovld __cnfn clz(ulong x);
-long2 __ovld __cnfn clz(long2 x);
-ulong2 __ovld __cnfn clz(ulong2 x);
-long3 __ovld __cnfn clz(long3 x);
-ulong3 __ovld __cnfn clz(ulong3 x);
-long4 __ovld __cnfn clz(long4 x);
-ulong4 __ovld __cnfn clz(ulong4 x);
-long8 __ovld __cnfn clz(long8 x);
-ulong8 __ovld __cnfn clz(ulong8 x);
-long16 __ovld __cnfn clz(long16 x);
-ulong16 __ovld __cnfn clz(ulong16 x);
-
-/**
- * Returns the count of trailing 0-bits in x. If x is 0,
- * returns the size in bits of the type of x or
- * component type of x, if x is a vector.
- */
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-char __ovld __cnfn ctz(char x);
-uchar __ovld __cnfn ctz(uchar x);
-char2 __ovld __cnfn ctz(char2 x);
-uchar2 __ovld __cnfn ctz(uchar2 x);
-char3 __ovld __cnfn ctz(char3 x);
-uchar3 __ovld __cnfn ctz(uchar3 x);
-char4 __ovld __cnfn ctz(char4 x);
-uchar4 __ovld __cnfn ctz(uchar4 x);
-char8 __ovld __cnfn ctz(char8 x);
-uchar8 __ovld __cnfn ctz(uchar8 x);
-char16 __ovld __cnfn ctz(char16 x);
-uchar16 __ovld __cnfn ctz(uchar16 x);
-short __ovld __cnfn ctz(short x);
-ushort __ovld __cnfn ctz(ushort x);
-short2 __ovld __cnfn ctz(short2 x);
-ushort2 __ovld __cnfn ctz(ushort2 x);
-short3 __ovld __cnfn ctz(short3 x);
-ushort3 __ovld __cnfn ctz(ushort3 x);
-short4 __ovld __cnfn ctz(short4 x);
-ushort4 __ovld __cnfn ctz(ushort4 x);
-short8 __ovld __cnfn ctz(short8 x);
-ushort8 __ovld __cnfn ctz(ushort8 x);
-short16 __ovld __cnfn ctz(short16 x);
-ushort16 __ovld __cnfn ctz(ushort16 x);
-int __ovld __cnfn ctz(int x);
-uint __ovld __cnfn ctz(uint x);
-int2 __ovld __cnfn ctz(int2 x);
-uint2 __ovld __cnfn ctz(uint2 x);
-int3 __ovld __cnfn ctz(int3 x);
-uint3 __ovld __cnfn ctz(uint3 x);
-int4 __ovld __cnfn ctz(int4 x);
-uint4 __ovld __cnfn ctz(uint4 x);
-int8 __ovld __cnfn ctz(int8 x);
-uint8 __ovld __cnfn ctz(uint8 x);
-int16 __ovld __cnfn ctz(int16 x);
-uint16 __ovld __cnfn ctz(uint16 x);
-long __ovld __cnfn ctz(long x);
-ulong __ovld __cnfn ctz(ulong x);
-long2 __ovld __cnfn ctz(long2 x);
-ulong2 __ovld __cnfn ctz(ulong2 x);
-long3 __ovld __cnfn ctz(long3 x);
-ulong3 __ovld __cnfn ctz(ulong3 x);
-long4 __ovld __cnfn ctz(long4 x);
-ulong4 __ovld __cnfn ctz(ulong4 x);
-long8 __ovld __cnfn ctz(long8 x);
-ulong8 __ovld __cnfn ctz(ulong8 x);
-long16 __ovld __cnfn ctz(long16 x);
-ulong16 __ovld __cnfn ctz(ulong16 x);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Returns mul_hi(a, b) + c.
- */
-char __ovld __cnfn mad_hi(char a, char b, char c);
-uchar __ovld __cnfn mad_hi(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn mad_hi(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn mad_hi(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn mad_hi(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn mad_hi(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn mad_hi(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn mad_hi(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn mad_hi(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn mad_hi(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn mad_hi(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn mad_hi(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn mad_hi(short a, short b, short c);
-ushort __ovld __cnfn mad_hi(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn mad_hi(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn mad_hi(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn mad_hi(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn mad_hi(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn mad_hi(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn mad_hi(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn mad_hi(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn mad_hi(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn mad_hi(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn mad_hi(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn mad_hi(int a, int b, int c);
-uint __ovld __cnfn mad_hi(uint a, uint b, uint c);
-int2 __ovld __cnfn mad_hi(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn mad_hi(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn mad_hi(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn mad_hi(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn mad_hi(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn mad_hi(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn mad_hi(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn mad_hi(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn mad_hi(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn mad_hi(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn mad_hi(long a, long b, long c);
-ulong __ovld __cnfn mad_hi(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn mad_hi(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn mad_hi(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn mad_hi(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn mad_hi(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn mad_hi(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn mad_hi(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn mad_hi(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn mad_hi(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn mad_hi(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn mad_hi(ulong16 a, ulong16 b, ulong16 c);
-
-/**
- * Returns a * b + c and saturates the result.
- */
-char __ovld __cnfn mad_sat(char a, char b, char c);
-uchar __ovld __cnfn mad_sat(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn mad_sat(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn mad_sat(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn mad_sat(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn mad_sat(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn mad_sat(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn mad_sat(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn mad_sat(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn mad_sat(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn mad_sat(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn mad_sat(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn mad_sat(short a, short b, short c);
-ushort __ovld __cnfn mad_sat(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn mad_sat(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn mad_sat(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn mad_sat(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn mad_sat(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn mad_sat(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn mad_sat(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn mad_sat(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn mad_sat(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn mad_sat(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn mad_sat(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn mad_sat(int a, int b, int c);
-uint __ovld __cnfn mad_sat(uint a, uint b, uint c);
-int2 __ovld __cnfn mad_sat(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn mad_sat(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn mad_sat(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn mad_sat(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn mad_sat(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn mad_sat(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn mad_sat(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn mad_sat(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn mad_sat(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn mad_sat(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn mad_sat(long a, long b, long c);
-ulong __ovld __cnfn mad_sat(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn mad_sat(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn mad_sat(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn mad_sat(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn mad_sat(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn mad_sat(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn mad_sat(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn mad_sat(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn mad_sat(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn mad_sat(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn mad_sat(ulong16 a, ulong16 b, ulong16 c);
-
-/**
- * Returns y if x < y, otherwise it returns x.
- */
-char __ovld __cnfn max(char x, char y);
-uchar __ovld __cnfn max(uchar x, uchar y);
-char2 __ovld __cnfn max(char2 x, char2 y);
-uchar2 __ovld __cnfn max(uchar2 x, uchar2 y);
-char3 __ovld __cnfn max(char3 x, char3 y);
-uchar3 __ovld __cnfn max(uchar3 x, uchar3 y);
-char4 __ovld __cnfn max(char4 x, char4 y);
-uchar4 __ovld __cnfn max(uchar4 x, uchar4 y);
-char8 __ovld __cnfn max(char8 x, char8 y);
-uchar8 __ovld __cnfn max(uchar8 x, uchar8 y);
-char16 __ovld __cnfn max(char16 x, char16 y);
-uchar16 __ovld __cnfn max(uchar16 x, uchar16 y);
-short __ovld __cnfn max(short x, short y);
-ushort __ovld __cnfn max(ushort x, ushort y);
-short2 __ovld __cnfn max(short2 x, short2 y);
-ushort2 __ovld __cnfn max(ushort2 x, ushort2 y);
-short3 __ovld __cnfn max(short3 x, short3 y);
-ushort3 __ovld __cnfn max(ushort3 x, ushort3 y);
-short4 __ovld __cnfn max(short4 x, short4 y);
-ushort4 __ovld __cnfn max(ushort4 x, ushort4 y);
-short8 __ovld __cnfn max(short8 x, short8 y);
-ushort8 __ovld __cnfn max(ushort8 x, ushort8 y);
-short16 __ovld __cnfn max(short16 x, short16 y);
-ushort16 __ovld __cnfn max(ushort16 x, ushort16 y);
-int __ovld __cnfn max(int x, int y);
-uint __ovld __cnfn max(uint x, uint y);
-int2 __ovld __cnfn max(int2 x, int2 y);
-uint2 __ovld __cnfn max(uint2 x, uint2 y);
-int3 __ovld __cnfn max(int3 x, int3 y);
-uint3 __ovld __cnfn max(uint3 x, uint3 y);
-int4 __ovld __cnfn max(int4 x, int4 y);
-uint4 __ovld __cnfn max(uint4 x, uint4 y);
-int8 __ovld __cnfn max(int8 x, int8 y);
-uint8 __ovld __cnfn max(uint8 x, uint8 y);
-int16 __ovld __cnfn max(int16 x, int16 y);
-uint16 __ovld __cnfn max(uint16 x, uint16 y);
-long __ovld __cnfn max(long x, long y);
-ulong __ovld __cnfn max(ulong x, ulong y);
-long2 __ovld __cnfn max(long2 x, long2 y);
-ulong2 __ovld __cnfn max(ulong2 x, ulong2 y);
-long3 __ovld __cnfn max(long3 x, long3 y);
-ulong3 __ovld __cnfn max(ulong3 x, ulong3 y);
-long4 __ovld __cnfn max(long4 x, long4 y);
-ulong4 __ovld __cnfn max(ulong4 x, ulong4 y);
-long8 __ovld __cnfn max(long8 x, long8 y);
-ulong8 __ovld __cnfn max(ulong8 x, ulong8 y);
-long16 __ovld __cnfn max(long16 x, long16 y);
-ulong16 __ovld __cnfn max(ulong16 x, ulong16 y);
-char2 __ovld __cnfn max(char2 x, char y);
-uchar2 __ovld __cnfn max(uchar2 x, uchar y);
-char3 __ovld __cnfn max(char3 x, char y);
-uchar3 __ovld __cnfn max(uchar3 x, uchar y);
-char4 __ovld __cnfn max(char4 x, char y);
-uchar4 __ovld __cnfn max(uchar4 x, uchar y);
-char8 __ovld __cnfn max(char8 x, char y);
-uchar8 __ovld __cnfn max(uchar8 x, uchar y);
-char16 __ovld __cnfn max(char16 x, char y);
-uchar16 __ovld __cnfn max(uchar16 x, uchar y);
-short2 __ovld __cnfn max(short2 x, short y);
-ushort2 __ovld __cnfn max(ushort2 x, ushort y);
-short3 __ovld __cnfn max(short3 x, short y);
-ushort3 __ovld __cnfn max(ushort3 x, ushort y);
-short4 __ovld __cnfn max(short4 x, short y);
-ushort4 __ovld __cnfn max(ushort4 x, ushort y);
-short8 __ovld __cnfn max(short8 x, short y);
-ushort8 __ovld __cnfn max(ushort8 x, ushort y);
-short16 __ovld __cnfn max(short16 x, short y);
-ushort16 __ovld __cnfn max(ushort16 x, ushort y);
-int2 __ovld __cnfn max(int2 x, int y);
-uint2 __ovld __cnfn max(uint2 x, uint y);
-int3 __ovld __cnfn max(int3 x, int y);
-uint3 __ovld __cnfn max(uint3 x, uint y);
-int4 __ovld __cnfn max(int4 x, int y);
-uint4 __ovld __cnfn max(uint4 x, uint y);
-int8 __ovld __cnfn max(int8 x, int y);
-uint8 __ovld __cnfn max(uint8 x, uint y);
-int16 __ovld __cnfn max(int16 x, int y);
-uint16 __ovld __cnfn max(uint16 x, uint y);
-long2 __ovld __cnfn max(long2 x, long y);
-ulong2 __ovld __cnfn max(ulong2 x, ulong y);
-long3 __ovld __cnfn max(long3 x, long y);
-ulong3 __ovld __cnfn max(ulong3 x, ulong y);
-long4 __ovld __cnfn max(long4 x, long y);
-ulong4 __ovld __cnfn max(ulong4 x, ulong y);
-long8 __ovld __cnfn max(long8 x, long y);
-ulong8 __ovld __cnfn max(ulong8 x, ulong y);
-long16 __ovld __cnfn max(long16 x, long y);
-ulong16 __ovld __cnfn max(ulong16 x, ulong y);
-
-/**
- * Returns y if y < x, otherwise it returns x.
- */
-char __ovld __cnfn min(char x, char y);
-uchar __ovld __cnfn min(uchar x, uchar y);
-char2 __ovld __cnfn min(char2 x, char2 y);
-uchar2 __ovld __cnfn min(uchar2 x, uchar2 y);
-char3 __ovld __cnfn min(char3 x, char3 y);
-uchar3 __ovld __cnfn min(uchar3 x, uchar3 y);
-char4 __ovld __cnfn min(char4 x, char4 y);
-uchar4 __ovld __cnfn min(uchar4 x, uchar4 y);
-char8 __ovld __cnfn min(char8 x, char8 y);
-uchar8 __ovld __cnfn min(uchar8 x, uchar8 y);
-char16 __ovld __cnfn min(char16 x, char16 y);
-uchar16 __ovld __cnfn min(uchar16 x, uchar16 y);
-short __ovld __cnfn min(short x, short y);
-ushort __ovld __cnfn min(ushort x, ushort y);
-short2 __ovld __cnfn min(short2 x, short2 y);
-ushort2 __ovld __cnfn min(ushort2 x, ushort2 y);
-short3 __ovld __cnfn min(short3 x, short3 y);
-ushort3 __ovld __cnfn min(ushort3 x, ushort3 y);
-short4 __ovld __cnfn min(short4 x, short4 y);
-ushort4 __ovld __cnfn min(ushort4 x, ushort4 y);
-short8 __ovld __cnfn min(short8 x, short8 y);
-ushort8 __ovld __cnfn min(ushort8 x, ushort8 y);
-short16 __ovld __cnfn min(short16 x, short16 y);
-ushort16 __ovld __cnfn min(ushort16 x, ushort16 y);
-int __ovld __cnfn min(int x, int y);
-uint __ovld __cnfn min(uint x, uint y);
-int2 __ovld __cnfn min(int2 x, int2 y);
-uint2 __ovld __cnfn min(uint2 x, uint2 y);
-int3 __ovld __cnfn min(int3 x, int3 y);
-uint3 __ovld __cnfn min(uint3 x, uint3 y);
-int4 __ovld __cnfn min(int4 x, int4 y);
-uint4 __ovld __cnfn min(uint4 x, uint4 y);
-int8 __ovld __cnfn min(int8 x, int8 y);
-uint8 __ovld __cnfn min(uint8 x, uint8 y);
-int16 __ovld __cnfn min(int16 x, int16 y);
-uint16 __ovld __cnfn min(uint16 x, uint16 y);
-long __ovld __cnfn min(long x, long y);
-ulong __ovld __cnfn min(ulong x, ulong y);
-long2 __ovld __cnfn min(long2 x, long2 y);
-ulong2 __ovld __cnfn min(ulong2 x, ulong2 y);
-long3 __ovld __cnfn min(long3 x, long3 y);
-ulong3 __ovld __cnfn min(ulong3 x, ulong3 y);
-long4 __ovld __cnfn min(long4 x, long4 y);
-ulong4 __ovld __cnfn min(ulong4 x, ulong4 y);
-long8 __ovld __cnfn min(long8 x, long8 y);
-ulong8 __ovld __cnfn min(ulong8 x, ulong8 y);
-long16 __ovld __cnfn min(long16 x, long16 y);
-ulong16 __ovld __cnfn min(ulong16 x, ulong16 y);
-char2 __ovld __cnfn min(char2 x, char y);
-uchar2 __ovld __cnfn min(uchar2 x, uchar y);
-char3 __ovld __cnfn min(char3 x, char y);
-uchar3 __ovld __cnfn min(uchar3 x, uchar y);
-char4 __ovld __cnfn min(char4 x, char y);
-uchar4 __ovld __cnfn min(uchar4 x, uchar y);
-char8 __ovld __cnfn min(char8 x, char y);
-uchar8 __ovld __cnfn min(uchar8 x, uchar y);
-char16 __ovld __cnfn min(char16 x, char y);
-uchar16 __ovld __cnfn min(uchar16 x, uchar y);
-short2 __ovld __cnfn min(short2 x, short y);
-ushort2 __ovld __cnfn min(ushort2 x, ushort y);
-short3 __ovld __cnfn min(short3 x, short y);
-ushort3 __ovld __cnfn min(ushort3 x, ushort y);
-short4 __ovld __cnfn min(short4 x, short y);
-ushort4 __ovld __cnfn min(ushort4 x, ushort y);
-short8 __ovld __cnfn min(short8 x, short y);
-ushort8 __ovld __cnfn min(ushort8 x, ushort y);
-short16 __ovld __cnfn min(short16 x, short y);
-ushort16 __ovld __cnfn min(ushort16 x, ushort y);
-int2 __ovld __cnfn min(int2 x, int y);
-uint2 __ovld __cnfn min(uint2 x, uint y);
-int3 __ovld __cnfn min(int3 x, int y);
-uint3 __ovld __cnfn min(uint3 x, uint y);
-int4 __ovld __cnfn min(int4 x, int y);
-uint4 __ovld __cnfn min(uint4 x, uint y);
-int8 __ovld __cnfn min(int8 x, int y);
-uint8 __ovld __cnfn min(uint8 x, uint y);
-int16 __ovld __cnfn min(int16 x, int y);
-uint16 __ovld __cnfn min(uint16 x, uint y);
-long2 __ovld __cnfn min(long2 x, long y);
-ulong2 __ovld __cnfn min(ulong2 x, ulong y);
-long3 __ovld __cnfn min(long3 x, long y);
-ulong3 __ovld __cnfn min(ulong3 x, ulong y);
-long4 __ovld __cnfn min(long4 x, long y);
-ulong4 __ovld __cnfn min(ulong4 x, ulong y);
-long8 __ovld __cnfn min(long8 x, long y);
-ulong8 __ovld __cnfn min(ulong8 x, ulong y);
-long16 __ovld __cnfn min(long16 x, long y);
-ulong16 __ovld __cnfn min(ulong16 x, ulong y);
-
-/**
- * Computes x * y and returns the high half of the
- * product of x and y.
- */
-char __ovld __cnfn mul_hi(char x, char y);
-uchar __ovld __cnfn mul_hi(uchar x, uchar y);
-char2 __ovld __cnfn mul_hi(char2 x, char2 y);
-uchar2 __ovld __cnfn mul_hi(uchar2 x, uchar2 y);
-char3 __ovld __cnfn mul_hi(char3 x, char3 y);
-uchar3 __ovld __cnfn mul_hi(uchar3 x, uchar3 y);
-char4 __ovld __cnfn mul_hi(char4 x, char4 y);
-uchar4 __ovld __cnfn mul_hi(uchar4 x, uchar4 y);
-char8 __ovld __cnfn mul_hi(char8 x, char8 y);
-uchar8 __ovld __cnfn mul_hi(uchar8 x, uchar8 y);
-char16 __ovld __cnfn mul_hi(char16 x, char16 y);
-uchar16 __ovld __cnfn mul_hi(uchar16 x, uchar16 y);
-short __ovld __cnfn mul_hi(short x, short y);
-ushort __ovld __cnfn mul_hi(ushort x, ushort y);
-short2 __ovld __cnfn mul_hi(short2 x, short2 y);
-ushort2 __ovld __cnfn mul_hi(ushort2 x, ushort2 y);
-short3 __ovld __cnfn mul_hi(short3 x, short3 y);
-ushort3 __ovld __cnfn mul_hi(ushort3 x, ushort3 y);
-short4 __ovld __cnfn mul_hi(short4 x, short4 y);
-ushort4 __ovld __cnfn mul_hi(ushort4 x, ushort4 y);
-short8 __ovld __cnfn mul_hi(short8 x, short8 y);
-ushort8 __ovld __cnfn mul_hi(ushort8 x, ushort8 y);
-short16 __ovld __cnfn mul_hi(short16 x, short16 y);
-ushort16 __ovld __cnfn mul_hi(ushort16 x, ushort16 y);
-int __ovld __cnfn mul_hi(int x, int y);
-uint __ovld __cnfn mul_hi(uint x, uint y);
-int2 __ovld __cnfn mul_hi(int2 x, int2 y);
-uint2 __ovld __cnfn mul_hi(uint2 x, uint2 y);
-int3 __ovld __cnfn mul_hi(int3 x, int3 y);
-uint3 __ovld __cnfn mul_hi(uint3 x, uint3 y);
-int4 __ovld __cnfn mul_hi(int4 x, int4 y);
-uint4 __ovld __cnfn mul_hi(uint4 x, uint4 y);
-int8 __ovld __cnfn mul_hi(int8 x, int8 y);
-uint8 __ovld __cnfn mul_hi(uint8 x, uint8 y);
-int16 __ovld __cnfn mul_hi(int16 x, int16 y);
-uint16 __ovld __cnfn mul_hi(uint16 x, uint16 y);
-long __ovld __cnfn mul_hi(long x, long y);
-ulong __ovld __cnfn mul_hi(ulong x, ulong y);
-long2 __ovld __cnfn mul_hi(long2 x, long2 y);
-ulong2 __ovld __cnfn mul_hi(ulong2 x, ulong2 y);
-long3 __ovld __cnfn mul_hi(long3 x, long3 y);
-ulong3 __ovld __cnfn mul_hi(ulong3 x, ulong3 y);
-long4 __ovld __cnfn mul_hi(long4 x, long4 y);
-ulong4 __ovld __cnfn mul_hi(ulong4 x, ulong4 y);
-long8 __ovld __cnfn mul_hi(long8 x, long8 y);
-ulong8 __ovld __cnfn mul_hi(ulong8 x, ulong8 y);
-long16 __ovld __cnfn mul_hi(long16 x, long16 y);
-ulong16 __ovld __cnfn mul_hi(ulong16 x, ulong16 y);
-
-/**
- * For each element in v, the bits are shifted left by
- * the number of bits given by the corresponding
- * element in i (subject to usual shift modulo rules
- * described in section 6.3). Bits shifted off the left
- * side of the element are shifted back in from the
- * right.
- */
-char __ovld __cnfn rotate(char v, char i);
-uchar __ovld __cnfn rotate(uchar v, uchar i);
-char2 __ovld __cnfn rotate(char2 v, char2 i);
-uchar2 __ovld __cnfn rotate(uchar2 v, uchar2 i);
-char3 __ovld __cnfn rotate(char3 v, char3 i);
-uchar3 __ovld __cnfn rotate(uchar3 v, uchar3 i);
-char4 __ovld __cnfn rotate(char4 v, char4 i);
-uchar4 __ovld __cnfn rotate(uchar4 v, uchar4 i);
-char8 __ovld __cnfn rotate(char8 v, char8 i);
-uchar8 __ovld __cnfn rotate(uchar8 v, uchar8 i);
-char16 __ovld __cnfn rotate(char16 v, char16 i);
-uchar16 __ovld __cnfn rotate(uchar16 v, uchar16 i);
-short __ovld __cnfn rotate(short v, short i);
-ushort __ovld __cnfn rotate(ushort v, ushort i);
-short2 __ovld __cnfn rotate(short2 v, short2 i);
-ushort2 __ovld __cnfn rotate(ushort2 v, ushort2 i);
-short3 __ovld __cnfn rotate(short3 v, short3 i);
-ushort3 __ovld __cnfn rotate(ushort3 v, ushort3 i);
-short4 __ovld __cnfn rotate(short4 v, short4 i);
-ushort4 __ovld __cnfn rotate(ushort4 v, ushort4 i);
-short8 __ovld __cnfn rotate(short8 v, short8 i);
-ushort8 __ovld __cnfn rotate(ushort8 v, ushort8 i);
-short16 __ovld __cnfn rotate(short16 v, short16 i);
-ushort16 __ovld __cnfn rotate(ushort16 v, ushort16 i);
-int __ovld __cnfn rotate(int v, int i);
-uint __ovld __cnfn rotate(uint v, uint i);
-int2 __ovld __cnfn rotate(int2 v, int2 i);
-uint2 __ovld __cnfn rotate(uint2 v, uint2 i);
-int3 __ovld __cnfn rotate(int3 v, int3 i);
-uint3 __ovld __cnfn rotate(uint3 v, uint3 i);
-int4 __ovld __cnfn rotate(int4 v, int4 i);
-uint4 __ovld __cnfn rotate(uint4 v, uint4 i);
-int8 __ovld __cnfn rotate(int8 v, int8 i);
-uint8 __ovld __cnfn rotate(uint8 v, uint8 i);
-int16 __ovld __cnfn rotate(int16 v, int16 i);
-uint16 __ovld __cnfn rotate(uint16 v, uint16 i);
-long __ovld __cnfn rotate(long v, long i);
-ulong __ovld __cnfn rotate(ulong v, ulong i);
-long2 __ovld __cnfn rotate(long2 v, long2 i);
-ulong2 __ovld __cnfn rotate(ulong2 v, ulong2 i);
-long3 __ovld __cnfn rotate(long3 v, long3 i);
-ulong3 __ovld __cnfn rotate(ulong3 v, ulong3 i);
-long4 __ovld __cnfn rotate(long4 v, long4 i);
-ulong4 __ovld __cnfn rotate(ulong4 v, ulong4 i);
-long8 __ovld __cnfn rotate(long8 v, long8 i);
-ulong8 __ovld __cnfn rotate(ulong8 v, ulong8 i);
-long16 __ovld __cnfn rotate(long16 v, long16 i);
-ulong16 __ovld __cnfn rotate(ulong16 v, ulong16 i);
-
-/**
- * Returns x - y and saturates the result.
- */
-char __ovld __cnfn sub_sat(char x, char y);
-uchar __ovld __cnfn sub_sat(uchar x, uchar y);
-char2 __ovld __cnfn sub_sat(char2 x, char2 y);
-uchar2 __ovld __cnfn sub_sat(uchar2 x, uchar2 y);
-char3 __ovld __cnfn sub_sat(char3 x, char3 y);
-uchar3 __ovld __cnfn sub_sat(uchar3 x, uchar3 y);
-char4 __ovld __cnfn sub_sat(char4 x, char4 y);
-uchar4 __ovld __cnfn sub_sat(uchar4 x, uchar4 y);
-char8 __ovld __cnfn sub_sat(char8 x, char8 y);
-uchar8 __ovld __cnfn sub_sat(uchar8 x, uchar8 y);
-char16 __ovld __cnfn sub_sat(char16 x, char16 y);
-uchar16 __ovld __cnfn sub_sat(uchar16 x, uchar16 y);
-short __ovld __cnfn sub_sat(short x, short y);
-ushort __ovld __cnfn sub_sat(ushort x, ushort y);
-short2 __ovld __cnfn sub_sat(short2 x, short2 y);
-ushort2 __ovld __cnfn sub_sat(ushort2 x, ushort2 y);
-short3 __ovld __cnfn sub_sat(short3 x, short3 y);
-ushort3 __ovld __cnfn sub_sat(ushort3 x, ushort3 y);
-short4 __ovld __cnfn sub_sat(short4 x, short4 y);
-ushort4 __ovld __cnfn sub_sat(ushort4 x, ushort4 y);
-short8 __ovld __cnfn sub_sat(short8 x, short8 y);
-ushort8 __ovld __cnfn sub_sat(ushort8 x, ushort8 y);
-short16 __ovld __cnfn sub_sat(short16 x, short16 y);
-ushort16 __ovld __cnfn sub_sat(ushort16 x, ushort16 y);
-int __ovld __cnfn sub_sat(int x, int y);
-uint __ovld __cnfn sub_sat(uint x, uint y);
-int2 __ovld __cnfn sub_sat(int2 x, int2 y);
-uint2 __ovld __cnfn sub_sat(uint2 x, uint2 y);
-int3 __ovld __cnfn sub_sat(int3 x, int3 y);
-uint3 __ovld __cnfn sub_sat(uint3 x, uint3 y);
-int4 __ovld __cnfn sub_sat(int4 x, int4 y);
-uint4 __ovld __cnfn sub_sat(uint4 x, uint4 y);
-int8 __ovld __cnfn sub_sat(int8 x, int8 y);
-uint8 __ovld __cnfn sub_sat(uint8 x, uint8 y);
-int16 __ovld __cnfn sub_sat(int16 x, int16 y);
-uint16 __ovld __cnfn sub_sat(uint16 x, uint16 y);
-long __ovld __cnfn sub_sat(long x, long y);
-ulong __ovld __cnfn sub_sat(ulong x, ulong y);
-long2 __ovld __cnfn sub_sat(long2 x, long2 y);
-ulong2 __ovld __cnfn sub_sat(ulong2 x, ulong2 y);
-long3 __ovld __cnfn sub_sat(long3 x, long3 y);
-ulong3 __ovld __cnfn sub_sat(ulong3 x, ulong3 y);
-long4 __ovld __cnfn sub_sat(long4 x, long4 y);
-ulong4 __ovld __cnfn sub_sat(ulong4 x, ulong4 y);
-long8 __ovld __cnfn sub_sat(long8 x, long8 y);
-ulong8 __ovld __cnfn sub_sat(ulong8 x, ulong8 y);
-long16 __ovld __cnfn sub_sat(long16 x, long16 y);
-ulong16 __ovld __cnfn sub_sat(ulong16 x, ulong16 y);
-
-/**
- * result[i] = ((short)hi[i] << 8) | lo[i]
- * result[i] = ((ushort)hi[i] << 8) | lo[i]
- */
-short __ovld __cnfn upsample(char hi, uchar lo);
-ushort __ovld __cnfn upsample(uchar hi, uchar lo);
-short2 __ovld __cnfn upsample(char2 hi, uchar2 lo);
-short3 __ovld __cnfn upsample(char3 hi, uchar3 lo);
-short4 __ovld __cnfn upsample(char4 hi, uchar4 lo);
-short8 __ovld __cnfn upsample(char8 hi, uchar8 lo);
-short16 __ovld __cnfn upsample(char16 hi, uchar16 lo);
-ushort2 __ovld __cnfn upsample(uchar2 hi, uchar2 lo);
-ushort3 __ovld __cnfn upsample(uchar3 hi, uchar3 lo);
-ushort4 __ovld __cnfn upsample(uchar4 hi, uchar4 lo);
-ushort8 __ovld __cnfn upsample(uchar8 hi, uchar8 lo);
-ushort16 __ovld __cnfn upsample(uchar16 hi, uchar16 lo);
-
-/**
- * result[i] = ((int)hi[i] << 16) | lo[i]
- * result[i] = ((uint)hi[i] << 16) | lo[i]
- */
-int __ovld __cnfn upsample(short hi, ushort lo);
-uint __ovld __cnfn upsample(ushort hi, ushort lo);
-int2 __ovld __cnfn upsample(short2 hi, ushort2 lo);
-int3 __ovld __cnfn upsample(short3 hi, ushort3 lo);
-int4 __ovld __cnfn upsample(short4 hi, ushort4 lo);
-int8 __ovld __cnfn upsample(short8 hi, ushort8 lo);
-int16 __ovld __cnfn upsample(short16 hi, ushort16 lo);
-uint2 __ovld __cnfn upsample(ushort2 hi, ushort2 lo);
-uint3 __ovld __cnfn upsample(ushort3 hi, ushort3 lo);
-uint4 __ovld __cnfn upsample(ushort4 hi, ushort4 lo);
-uint8 __ovld __cnfn upsample(ushort8 hi, ushort8 lo);
-uint16 __ovld __cnfn upsample(ushort16 hi, ushort16 lo);
-/**
- * result[i] = ((long)hi[i] << 32) | lo[i]
- * result[i] = ((ulong)hi[i] << 32) | lo[i]
- */
-long __ovld __cnfn upsample(int hi, uint lo);
-ulong __ovld __cnfn upsample(uint hi, uint lo);
-long2 __ovld __cnfn upsample(int2 hi, uint2 lo);
-long3 __ovld __cnfn upsample(int3 hi, uint3 lo);
-long4 __ovld __cnfn upsample(int4 hi, uint4 lo);
-long8 __ovld __cnfn upsample(int8 hi, uint8 lo);
-long16 __ovld __cnfn upsample(int16 hi, uint16 lo);
-ulong2 __ovld __cnfn upsample(uint2 hi, uint2 lo);
-ulong3 __ovld __cnfn upsample(uint3 hi, uint3 lo);
-ulong4 __ovld __cnfn upsample(uint4 hi, uint4 lo);
-ulong8 __ovld __cnfn upsample(uint8 hi, uint8 lo);
-ulong16 __ovld __cnfn upsample(uint16 hi, uint16 lo);
-
-/*
- * popcount(x): returns the number of set bit in x
- */
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-char __ovld __cnfn popcount(char x);
-uchar __ovld __cnfn popcount(uchar x);
-char2 __ovld __cnfn popcount(char2 x);
-uchar2 __ovld __cnfn popcount(uchar2 x);
-char3 __ovld __cnfn popcount(char3 x);
-uchar3 __ovld __cnfn popcount(uchar3 x);
-char4 __ovld __cnfn popcount(char4 x);
-uchar4 __ovld __cnfn popcount(uchar4 x);
-char8 __ovld __cnfn popcount(char8 x);
-uchar8 __ovld __cnfn popcount(uchar8 x);
-char16 __ovld __cnfn popcount(char16 x);
-uchar16 __ovld __cnfn popcount(uchar16 x);
-short __ovld __cnfn popcount(short x);
-ushort __ovld __cnfn popcount(ushort x);
-short2 __ovld __cnfn popcount(short2 x);
-ushort2 __ovld __cnfn popcount(ushort2 x);
-short3 __ovld __cnfn popcount(short3 x);
-ushort3 __ovld __cnfn popcount(ushort3 x);
-short4 __ovld __cnfn popcount(short4 x);
-ushort4 __ovld __cnfn popcount(ushort4 x);
-short8 __ovld __cnfn popcount(short8 x);
-ushort8 __ovld __cnfn popcount(ushort8 x);
-short16 __ovld __cnfn popcount(short16 x);
-ushort16 __ovld __cnfn popcount(ushort16 x);
-int __ovld __cnfn popcount(int x);
-uint __ovld __cnfn popcount(uint x);
-int2 __ovld __cnfn popcount(int2 x);
-uint2 __ovld __cnfn popcount(uint2 x);
-int3 __ovld __cnfn popcount(int3 x);
-uint3 __ovld __cnfn popcount(uint3 x);
-int4 __ovld __cnfn popcount(int4 x);
-uint4 __ovld __cnfn popcount(uint4 x);
-int8 __ovld __cnfn popcount(int8 x);
-uint8 __ovld __cnfn popcount(uint8 x);
-int16 __ovld __cnfn popcount(int16 x);
-uint16 __ovld __cnfn popcount(uint16 x);
-long __ovld __cnfn popcount(long x);
-ulong __ovld __cnfn popcount(ulong x);
-long2 __ovld __cnfn popcount(long2 x);
-ulong2 __ovld __cnfn popcount(ulong2 x);
-long3 __ovld __cnfn popcount(long3 x);
-ulong3 __ovld __cnfn popcount(ulong3 x);
-long4 __ovld __cnfn popcount(long4 x);
-ulong4 __ovld __cnfn popcount(ulong4 x);
-long8 __ovld __cnfn popcount(long8 x);
-ulong8 __ovld __cnfn popcount(ulong8 x);
-long16 __ovld __cnfn popcount(long16 x);
-ulong16 __ovld __cnfn popcount(ulong16 x);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-/**
- * Multiply two 24-bit integer values x and y and add
- * the 32-bit integer result to the 32-bit integer z.
- * Refer to definition of mul24 to see how the 24-bit
- * integer multiplication is performed.
- */
-int __ovld __cnfn mad24(int x, int y, int z);
-uint __ovld __cnfn mad24(uint x, uint y, uint z);
-int2 __ovld __cnfn mad24(int2 x, int2 y, int2 z);
-uint2 __ovld __cnfn mad24(uint2 x, uint2 y, uint2 z);
-int3 __ovld __cnfn mad24(int3 x, int3 y, int3 z);
-uint3 __ovld __cnfn mad24(uint3 x, uint3 y, uint3 z);
-int4 __ovld __cnfn mad24(int4 x, int4 y, int4 z);
-uint4 __ovld __cnfn mad24(uint4 x, uint4 y, uint4 z);
-int8 __ovld __cnfn mad24(int8 x, int8 y, int8 z);
-uint8 __ovld __cnfn mad24(uint8 x, uint8 y, uint8 z);
-int16 __ovld __cnfn mad24(int16 x, int16 y, int16 z);
-uint16 __ovld __cnfn mad24(uint16 x, uint16 y, uint16 z);
-
-/**
- * Multiply two 24-bit integer values x and y. x and y
- * are 32-bit integers but only the low 24-bits are used
- * to perform the multiplication. mul24 should only
- * be used when values in x and y are in the range [-
- * 2^23, 2^23-1] if x and y are signed integers and in the
- * range [0, 2^24-1] if x and y are unsigned integers. If
- * x and y are not in this range, the multiplication
- * result is implementation-defined.
- */
-int __ovld __cnfn mul24(int x, int y);
-uint __ovld __cnfn mul24(uint x, uint y);
-int2 __ovld __cnfn mul24(int2 x, int2 y);
-uint2 __ovld __cnfn mul24(uint2 x, uint2 y);
-int3 __ovld __cnfn mul24(int3 x, int3 y);
-uint3 __ovld __cnfn mul24(uint3 x, uint3 y);
-int4 __ovld __cnfn mul24(int4 x, int4 y);
-uint4 __ovld __cnfn mul24(uint4 x, uint4 y);
-int8 __ovld __cnfn mul24(int8 x, int8 y);
-uint8 __ovld __cnfn mul24(uint8 x, uint8 y);
-int16 __ovld __cnfn mul24(int16 x, int16 y);
-uint16 __ovld __cnfn mul24(uint16 x, uint16 y);
-
-// OpenCL v1.1 s6.11.4, v1.2 s6.12.4, v2.0 s6.13.4 - Common Functions
-
-/**
- * Returns fmin(fmax(x, minval), maxval).
- * Results are undefined if minval > maxval.
- */
-float __ovld __cnfn clamp(float x, float minval, float maxval);
-float2 __ovld __cnfn clamp(float2 x, float2 minval, float2 maxval);
-float3 __ovld __cnfn clamp(float3 x, float3 minval, float3 maxval);
-float4 __ovld __cnfn clamp(float4 x, float4 minval, float4 maxval);
-float8 __ovld __cnfn clamp(float8 x, float8 minval, float8 maxval);
-float16 __ovld __cnfn clamp(float16 x, float16 minval, float16 maxval);
-float2 __ovld __cnfn clamp(float2 x, float minval, float maxval);
-float3 __ovld __cnfn clamp(float3 x, float minval, float maxval);
-float4 __ovld __cnfn clamp(float4 x, float minval, float maxval);
-float8 __ovld __cnfn clamp(float8 x, float minval, float maxval);
-float16 __ovld __cnfn clamp(float16 x, float minval, float maxval);
-#ifdef cl_khr_fp64
-double __ovld __cnfn clamp(double x, double minval, double maxval);
-double2 __ovld __cnfn clamp(double2 x, double2 minval, double2 maxval);
-double3 __ovld __cnfn clamp(double3 x, double3 minval, double3 maxval);
-double4 __ovld __cnfn clamp(double4 x, double4 minval, double4 maxval);
-double8 __ovld __cnfn clamp(double8 x, double8 minval, double8 maxval);
-double16 __ovld __cnfn clamp(double16 x, double16 minval, double16 maxval);
-double2 __ovld __cnfn clamp(double2 x, double minval, double maxval);
-double3 __ovld __cnfn clamp(double3 x, double minval, double maxval);
-double4 __ovld __cnfn clamp(double4 x, double minval, double maxval);
-double8 __ovld __cnfn clamp(double8 x, double minval, double maxval);
-double16 __ovld __cnfn clamp(double16 x, double minval, double maxval);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn clamp(half x, half minval, half maxval);
-half2 __ovld __cnfn clamp(half2 x, half2 minval, half2 maxval);
-half3 __ovld __cnfn clamp(half3 x, half3 minval, half3 maxval);
-half4 __ovld __cnfn clamp(half4 x, half4 minval, half4 maxval);
-half8 __ovld __cnfn clamp(half8 x, half8 minval, half8 maxval);
-half16 __ovld __cnfn clamp(half16 x, half16 minval, half16 maxval);
-half2 __ovld __cnfn clamp(half2 x, half minval, half maxval);
-half3 __ovld __cnfn clamp(half3 x, half minval, half maxval);
-half4 __ovld __cnfn clamp(half4 x, half minval, half maxval);
-half8 __ovld __cnfn clamp(half8 x, half minval, half maxval);
-half16 __ovld __cnfn clamp(half16 x, half minval, half maxval);
-#endif //cl_khr_fp16
-
-/**
- * Converts radians to degrees, i.e. (180 / PI) *
- * radians.
- */
-float __ovld __cnfn degrees(float radians);
-float2 __ovld __cnfn degrees(float2 radians);
-float3 __ovld __cnfn degrees(float3 radians);
-float4 __ovld __cnfn degrees(float4 radians);
-float8 __ovld __cnfn degrees(float8 radians);
-float16 __ovld __cnfn degrees(float16 radians);
-#ifdef cl_khr_fp64
-double __ovld __cnfn degrees(double radians);
-double2 __ovld __cnfn degrees(double2 radians);
-double3 __ovld __cnfn degrees(double3 radians);
-double4 __ovld __cnfn degrees(double4 radians);
-double8 __ovld __cnfn degrees(double8 radians);
-double16 __ovld __cnfn degrees(double16 radians);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn degrees(half radians);
-half2 __ovld __cnfn degrees(half2 radians);
-half3 __ovld __cnfn degrees(half3 radians);
-half4 __ovld __cnfn degrees(half4 radians);
-half8 __ovld __cnfn degrees(half8 radians);
-half16 __ovld __cnfn degrees(half16 radians);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if x < y, otherwise it returns x. If x and y
- * are infinite or NaN, the return values are undefined.
- */
-float __ovld __cnfn max(float x, float y);
-float2 __ovld __cnfn max(float2 x, float2 y);
-float3 __ovld __cnfn max(float3 x, float3 y);
-float4 __ovld __cnfn max(float4 x, float4 y);
-float8 __ovld __cnfn max(float8 x, float8 y);
-float16 __ovld __cnfn max(float16 x, float16 y);
-float2 __ovld __cnfn max(float2 x, float y);
-float3 __ovld __cnfn max(float3 x, float y);
-float4 __ovld __cnfn max(float4 x, float y);
-float8 __ovld __cnfn max(float8 x, float y);
-float16 __ovld __cnfn max(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn max(double x, double y);
-double2 __ovld __cnfn max(double2 x, double2 y);
-double3 __ovld __cnfn max(double3 x, double3 y);
-double4 __ovld __cnfn max(double4 x, double4 y);
-double8 __ovld __cnfn max(double8 x, double8 y);
-double16 __ovld __cnfn max(double16 x, double16 y);
-double2 __ovld __cnfn max(double2 x, double y);
-double3 __ovld __cnfn max(double3 x, double y);
-double4 __ovld __cnfn max(double4 x, double y);
-double8 __ovld __cnfn max(double8 x, double y);
-double16 __ovld __cnfn max(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn max(half x, half y);
-half2 __ovld __cnfn max(half2 x, half2 y);
-half3 __ovld __cnfn max(half3 x, half3 y);
-half4 __ovld __cnfn max(half4 x, half4 y);
-half8 __ovld __cnfn max(half8 x, half8 y);
-half16 __ovld __cnfn max(half16 x, half16 y);
-half2 __ovld __cnfn max(half2 x, half y);
-half3 __ovld __cnfn max(half3 x, half y);
-half4 __ovld __cnfn max(half4 x, half y);
-half8 __ovld __cnfn max(half8 x, half y);
-half16 __ovld __cnfn max(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if y < x, otherwise it returns x. If x and y
- * are infinite or NaN, the return values are undefined.
- */
-float __ovld __cnfn min(float x, float y);
-float2 __ovld __cnfn min(float2 x, float2 y);
-float3 __ovld __cnfn min(float3 x, float3 y);
-float4 __ovld __cnfn min(float4 x, float4 y);
-float8 __ovld __cnfn min(float8 x, float8 y);
-float16 __ovld __cnfn min(float16 x, float16 y);
-float2 __ovld __cnfn min(float2 x, float y);
-float3 __ovld __cnfn min(float3 x, float y);
-float4 __ovld __cnfn min(float4 x, float y);
-float8 __ovld __cnfn min(float8 x, float y);
-float16 __ovld __cnfn min(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn min(double x, double y);
-double2 __ovld __cnfn min(double2 x, double2 y);
-double3 __ovld __cnfn min(double3 x, double3 y);
-double4 __ovld __cnfn min(double4 x, double4 y);
-double8 __ovld __cnfn min(double8 x, double8 y);
-double16 __ovld __cnfn min(double16 x, double16 y);
-double2 __ovld __cnfn min(double2 x, double y);
-double3 __ovld __cnfn min(double3 x, double y);
-double4 __ovld __cnfn min(double4 x, double y);
-double8 __ovld __cnfn min(double8 x, double y);
-double16 __ovld __cnfn min(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn min(half x, half y);
-half2 __ovld __cnfn min(half2 x, half2 y);
-half3 __ovld __cnfn min(half3 x, half3 y);
-half4 __ovld __cnfn min(half4 x, half4 y);
-half8 __ovld __cnfn min(half8 x, half8 y);
-half16 __ovld __cnfn min(half16 x, half16 y);
-half2 __ovld __cnfn min(half2 x, half y);
-half3 __ovld __cnfn min(half3 x, half y);
-half4 __ovld __cnfn min(half4 x, half y);
-half8 __ovld __cnfn min(half8 x, half y);
-half16 __ovld __cnfn min(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the linear blend of x & y implemented as:
- * x + (y - x) * a
- * a must be a value in the range 0.0 ... 1.0. If a is not
- * in the range 0.0 ... 1.0, the return values are
- * undefined.
- */
-float __ovld __cnfn mix(float x, float y, float a);
-float2 __ovld __cnfn mix(float2 x, float2 y, float2 a);
-float3 __ovld __cnfn mix(float3 x, float3 y, float3 a);
-float4 __ovld __cnfn mix(float4 x, float4 y, float4 a);
-float8 __ovld __cnfn mix(float8 x, float8 y, float8 a);
-float16 __ovld __cnfn mix(float16 x, float16 y, float16 a);
-float2 __ovld __cnfn mix(float2 x, float2 y, float a);
-float3 __ovld __cnfn mix(float3 x, float3 y, float a);
-float4 __ovld __cnfn mix(float4 x, float4 y, float a);
-float8 __ovld __cnfn mix(float8 x, float8 y, float a);
-float16 __ovld __cnfn mix(float16 x, float16 y, float a);
-#ifdef cl_khr_fp64
-double __ovld __cnfn mix(double x, double y, double a);
-double2 __ovld __cnfn mix(double2 x, double2 y, double2 a);
-double3 __ovld __cnfn mix(double3 x, double3 y, double3 a);
-double4 __ovld __cnfn mix(double4 x, double4 y, double4 a);
-double8 __ovld __cnfn mix(double8 x, double8 y, double8 a);
-double16 __ovld __cnfn mix(double16 x, double16 y, double16 a);
-double2 __ovld __cnfn mix(double2 x, double2 y, double a);
-double3 __ovld __cnfn mix(double3 x, double3 y, double a);
-double4 __ovld __cnfn mix(double4 x, double4 y, double a);
-double8 __ovld __cnfn mix(double8 x, double8 y, double a);
-double16 __ovld __cnfn mix(double16 x, double16 y, double a);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn mix(half x, half y, half a);
-half2 __ovld __cnfn mix(half2 x, half2 y, half2 a);
-half3 __ovld __cnfn mix(half3 x, half3 y, half3 a);
-half4 __ovld __cnfn mix(half4 x, half4 y, half4 a);
-half8 __ovld __cnfn mix(half8 x, half8 y, half8 a);
-half16 __ovld __cnfn mix(half16 x, half16 y, half16 a);
-half2 __ovld __cnfn mix(half2 x, half2 y, half a);
-half3 __ovld __cnfn mix(half3 x, half3 y, half a);
-half4 __ovld __cnfn mix(half4 x, half4 y, half a);
-half8 __ovld __cnfn mix(half8 x, half8 y, half a);
-half16 __ovld __cnfn mix(half16 x, half16 y, half a);
-#endif //cl_khr_fp16
-
-/**
- * Converts degrees to radians, i.e. (PI / 180) *
- * degrees.
- */
-float __ovld __cnfn radians(float degrees);
-float2 __ovld __cnfn radians(float2 degrees);
-float3 __ovld __cnfn radians(float3 degrees);
-float4 __ovld __cnfn radians(float4 degrees);
-float8 __ovld __cnfn radians(float8 degrees);
-float16 __ovld __cnfn radians(float16 degrees);
-#ifdef cl_khr_fp64
-double __ovld __cnfn radians(double degrees);
-double2 __ovld __cnfn radians(double2 degrees);
-double3 __ovld __cnfn radians(double3 degrees);
-double4 __ovld __cnfn radians(double4 degrees);
-double8 __ovld __cnfn radians(double8 degrees);
-double16 __ovld __cnfn radians(double16 degrees);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn radians(half degrees);
-half2 __ovld __cnfn radians(half2 degrees);
-half3 __ovld __cnfn radians(half3 degrees);
-half4 __ovld __cnfn radians(half4 degrees);
-half8 __ovld __cnfn radians(half8 degrees);
-half16 __ovld __cnfn radians(half16 degrees);
-#endif //cl_khr_fp16
-
-/**
- * Returns 0.0 if x < edge, otherwise it returns 1.0.
- */
-float __ovld __cnfn step(float edge, float x);
-float2 __ovld __cnfn step(float2 edge, float2 x);
-float3 __ovld __cnfn step(float3 edge, float3 x);
-float4 __ovld __cnfn step(float4 edge, float4 x);
-float8 __ovld __cnfn step(float8 edge, float8 x);
-float16 __ovld __cnfn step(float16 edge, float16 x);
-float2 __ovld __cnfn step(float edge, float2 x);
-float3 __ovld __cnfn step(float edge, float3 x);
-float4 __ovld __cnfn step(float edge, float4 x);
-float8 __ovld __cnfn step(float edge, float8 x);
-float16 __ovld __cnfn step(float edge, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn step(double edge, double x);
-double2 __ovld __cnfn step(double2 edge, double2 x);
-double3 __ovld __cnfn step(double3 edge, double3 x);
-double4 __ovld __cnfn step(double4 edge, double4 x);
-double8 __ovld __cnfn step(double8 edge, double8 x);
-double16 __ovld __cnfn step(double16 edge, double16 x);
-double2 __ovld __cnfn step(double edge, double2 x);
-double3 __ovld __cnfn step(double edge, double3 x);
-double4 __ovld __cnfn step(double edge, double4 x);
-double8 __ovld __cnfn step(double edge, double8 x);
-double16 __ovld __cnfn step(double edge, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn step(half edge, half x);
-half2 __ovld __cnfn step(half2 edge, half2 x);
-half3 __ovld __cnfn step(half3 edge, half3 x);
-half4 __ovld __cnfn step(half4 edge, half4 x);
-half8 __ovld __cnfn step(half8 edge, half8 x);
-half16 __ovld __cnfn step(half16 edge, half16 x);
-half2 __ovld __cnfn step(half edge, half2 x);
-half3 __ovld __cnfn step(half edge, half3 x);
-half4 __ovld __cnfn step(half edge, half4 x);
-half8 __ovld __cnfn step(half edge, half8 x);
-half16 __ovld __cnfn step(half edge, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and
- * performs smooth Hermite interpolation between 0
- * and 1when edge0 < x < edge1. This is useful in
- * cases where you would want a threshold function
- * with a smooth transition.
- * This is equivalent to:
- * gentype t;
- * t = clamp ((x - edge0) / (edge1 - edge0), 0, 1);
- * return t * t * (3 - 2 * t);
- * Results are undefined if edge0 >= edge1 or if x,
- * edge0 or edge1 is a NaN.
- */
-float __ovld __cnfn smoothstep(float edge0, float edge1, float x);
-float2 __ovld __cnfn smoothstep(float2 edge0, float2 edge1, float2 x);
-float3 __ovld __cnfn smoothstep(float3 edge0, float3 edge1, float3 x);
-float4 __ovld __cnfn smoothstep(float4 edge0, float4 edge1, float4 x);
-float8 __ovld __cnfn smoothstep(float8 edge0, float8 edge1, float8 x);
-float16 __ovld __cnfn smoothstep(float16 edge0, float16 edge1, float16 x);
-float2 __ovld __cnfn smoothstep(float edge0, float edge1, float2 x);
-float3 __ovld __cnfn smoothstep(float edge0, float edge1, float3 x);
-float4 __ovld __cnfn smoothstep(float edge0, float edge1, float4 x);
-float8 __ovld __cnfn smoothstep(float edge0, float edge1, float8 x);
-float16 __ovld __cnfn smoothstep(float edge0, float edge1, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn smoothstep(double edge0, double edge1, double x);
-double2 __ovld __cnfn smoothstep(double2 edge0, double2 edge1, double2 x);
-double3 __ovld __cnfn smoothstep(double3 edge0, double3 edge1, double3 x);
-double4 __ovld __cnfn smoothstep(double4 edge0, double4 edge1, double4 x);
-double8 __ovld __cnfn smoothstep(double8 edge0, double8 edge1, double8 x);
-double16 __ovld __cnfn smoothstep(double16 edge0, double16 edge1, double16 x);
-double2 __ovld __cnfn smoothstep(double edge0, double edge1, double2 x);
-double3 __ovld __cnfn smoothstep(double edge0, double edge1, double3 x);
-double4 __ovld __cnfn smoothstep(double edge0, double edge1, double4 x);
-double8 __ovld __cnfn smoothstep(double edge0, double edge1, double8 x);
-double16 __ovld __cnfn smoothstep(double edge0, double edge1, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn smoothstep(half edge0, half edge1, half x);
-half2 __ovld __cnfn smoothstep(half2 edge0, half2 edge1, half2 x);
-half3 __ovld __cnfn smoothstep(half3 edge0, half3 edge1, half3 x);
-half4 __ovld __cnfn smoothstep(half4 edge0, half4 edge1, half4 x);
-half8 __ovld __cnfn smoothstep(half8 edge0, half8 edge1, half8 x);
-half16 __ovld __cnfn smoothstep(half16 edge0, half16 edge1, half16 x);
-half2 __ovld __cnfn smoothstep(half edge0, half edge1, half2 x);
-half3 __ovld __cnfn smoothstep(half edge0, half edge1, half3 x);
-half4 __ovld __cnfn smoothstep(half edge0, half edge1, half4 x);
-half8 __ovld __cnfn smoothstep(half edge0, half edge1, half8 x);
-half16 __ovld __cnfn smoothstep(half edge0, half edge1, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x =
- * +0.0, or -1.0 if x < 0. Returns 0.0 if x is a NaN.
- */
-float __ovld __cnfn sign(float x);
-float2 __ovld __cnfn sign(float2 x);
-float3 __ovld __cnfn sign(float3 x);
-float4 __ovld __cnfn sign(float4 x);
-float8 __ovld __cnfn sign(float8 x);
-float16 __ovld __cnfn sign(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sign(double x);
-double2 __ovld __cnfn sign(double2 x);
-double3 __ovld __cnfn sign(double3 x);
-double4 __ovld __cnfn sign(double4 x);
-double8 __ovld __cnfn sign(double8 x);
-double16 __ovld __cnfn sign(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sign(half x);
-half2 __ovld __cnfn sign(half2 x);
-half3 __ovld __cnfn sign(half3 x);
-half4 __ovld __cnfn sign(half4 x);
-half8 __ovld __cnfn sign(half8 x);
-half16 __ovld __cnfn sign(half16 x);
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.11.5, v1.2 s6.12.5, v2.0 s6.13.5 - Geometric Functions
-
-/**
- * Returns the cross product of p0.xyz and p1.xyz. The
- * w component of float4 result returned will be 0.0.
- */
-float4 __ovld __cnfn cross(float4 p0, float4 p1);
-float3 __ovld __cnfn cross(float3 p0, float3 p1);
-#ifdef cl_khr_fp64
-double4 __ovld __cnfn cross(double4 p0, double4 p1);
-double3 __ovld __cnfn cross(double3 p0, double3 p1);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half4 __ovld __cnfn cross(half4 p0, half4 p1);
-half3 __ovld __cnfn cross(half3 p0, half3 p1);
-#endif //cl_khr_fp16
-
-/**
- * Compute dot product.
- */
-float __ovld __cnfn dot(float p0, float p1);
-float __ovld __cnfn dot(float2 p0, float2 p1);
-float __ovld __cnfn dot(float3 p0, float3 p1);
-float __ovld __cnfn dot(float4 p0, float4 p1);
-#ifdef cl_khr_fp64
-double __ovld __cnfn dot(double p0, double p1);
-double __ovld __cnfn dot(double2 p0, double2 p1);
-double __ovld __cnfn dot(double3 p0, double3 p1);
-double __ovld __cnfn dot(double4 p0, double4 p1);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn dot(half p0, half p1);
-half __ovld __cnfn dot(half2 p0, half2 p1);
-half __ovld __cnfn dot(half3 p0, half3 p1);
-half __ovld __cnfn dot(half4 p0, half4 p1);
-#endif //cl_khr_fp16
-
-/**
- * Returns the distance between p0 and p1. This is
- * calculated as length(p0 - p1).
- */
-float __ovld __cnfn distance(float p0, float p1);
-float __ovld __cnfn distance(float2 p0, float2 p1);
-float __ovld __cnfn distance(float3 p0, float3 p1);
-float __ovld __cnfn distance(float4 p0, float4 p1);
-#ifdef cl_khr_fp64
-double __ovld __cnfn distance(double p0, double p1);
-double __ovld __cnfn distance(double2 p0, double2 p1);
-double __ovld __cnfn distance(double3 p0, double3 p1);
-double __ovld __cnfn distance(double4 p0, double4 p1);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn distance(half p0, half p1);
-half __ovld __cnfn distance(half2 p0, half2 p1);
-half __ovld __cnfn distance(half3 p0, half3 p1);
-half __ovld __cnfn distance(half4 p0, half4 p1);
-#endif //cl_khr_fp16
-
-/**
- * Return the length of vector p, i.e.,
- * sqrt(p.x2 + p.y 2 + ...)
- */
-float __ovld __cnfn length(float p);
-float __ovld __cnfn length(float2 p);
-float __ovld __cnfn length(float3 p);
-float __ovld __cnfn length(float4 p);
-#ifdef cl_khr_fp64
-double __ovld __cnfn length(double p);
-double __ovld __cnfn length(double2 p);
-double __ovld __cnfn length(double3 p);
-double __ovld __cnfn length(double4 p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn length(half p);
-half __ovld __cnfn length(half2 p);
-half __ovld __cnfn length(half3 p);
-half __ovld __cnfn length(half4 p);
-#endif //cl_khr_fp16
-
-/**
- * Returns a vector in the same direction as p but with a
- * length of 1.
- */
-float __ovld __cnfn normalize(float p);
-float2 __ovld __cnfn normalize(float2 p);
-float3 __ovld __cnfn normalize(float3 p);
-float4 __ovld __cnfn normalize(float4 p);
-#ifdef cl_khr_fp64
-double __ovld __cnfn normalize(double p);
-double2 __ovld __cnfn normalize(double2 p);
-double3 __ovld __cnfn normalize(double3 p);
-double4 __ovld __cnfn normalize(double4 p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn normalize(half p);
-half2 __ovld __cnfn normalize(half2 p);
-half3 __ovld __cnfn normalize(half3 p);
-half4 __ovld __cnfn normalize(half4 p);
-#endif //cl_khr_fp16
-
-/**
- * Returns fast_length(p0 - p1).
- */
-float __ovld __cnfn fast_distance(float p0, float p1);
-float __ovld __cnfn fast_distance(float2 p0, float2 p1);
-float __ovld __cnfn fast_distance(float3 p0, float3 p1);
-float __ovld __cnfn fast_distance(float4 p0, float4 p1);
-#ifdef cl_khr_fp16
-half __ovld __cnfn fast_distance(half p0, half p1);
-half __ovld __cnfn fast_distance(half2 p0, half2 p1);
-half __ovld __cnfn fast_distance(half3 p0, half3 p1);
-half __ovld __cnfn fast_distance(half4 p0, half4 p1);
-#endif //cl_khr_fp16
-
-/**
- * Returns the length of vector p computed as:
- * half_sqrt(p.x2 + p.y2 + ...)
- */
-float __ovld __cnfn fast_length(float p);
-float __ovld __cnfn fast_length(float2 p);
-float __ovld __cnfn fast_length(float3 p);
-float __ovld __cnfn fast_length(float4 p);
-#ifdef cl_khr_fp16
-half __ovld __cnfn fast_length(half p);
-half __ovld __cnfn fast_length(half2 p);
-half __ovld __cnfn fast_length(half3 p);
-half __ovld __cnfn fast_length(half4 p);
-#endif //cl_khr_fp16
-
-/**
- * Returns a vector in the same direction as p but with a
- * length of 1. fast_normalize is computed as:
- * p * half_rsqrt (p.x^2 + p.y^2 + ... )
- * The result shall be within 8192 ulps error from the
- * infinitely precise result of
- * if (all(p == 0.0f))
- * result = p;
- * else
- * result = p / sqrt (p.x^2 + p.y^2 + ...);
- * with the following exceptions:
- * 1) If the sum of squares is greater than FLT_MAX
- * then the value of the floating-point values in the
- * result vector are undefined.
- * 2) If the sum of squares is less than FLT_MIN then
- * the implementation may return back p.
- * 3) If the device is in "denorms are flushed to zero"
- * mode, individual operand elements with magnitude
- * less than sqrt(FLT_MIN) may be flushed to zero
- * before proceeding with the calculation.
- */
-float __ovld __cnfn fast_normalize(float p);
-float2 __ovld __cnfn fast_normalize(float2 p);
-float3 __ovld __cnfn fast_normalize(float3 p);
-float4 __ovld __cnfn fast_normalize(float4 p);
-#ifdef cl_khr_fp16
-half __ovld __cnfn fast_normalize(half p);
-half2 __ovld __cnfn fast_normalize(half2 p);
-half3 __ovld __cnfn fast_normalize(half3 p);
-half4 __ovld __cnfn fast_normalize(half4 p);
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.11.6, v1.2 s6.12.6, v2.0 s6.13.6 - Relational Functions
-
-/**
- * intn isequal (floatn x, floatn y)
- * Returns the component-wise compare of x == y.
- */
-int __ovld __cnfn isequal(float x, float y);
-int2 __ovld __cnfn isequal(float2 x, float2 y);
-int3 __ovld __cnfn isequal(float3 x, float3 y);
-int4 __ovld __cnfn isequal(float4 x, float4 y);
-int8 __ovld __cnfn isequal(float8 x, float8 y);
-int16 __ovld __cnfn isequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isequal(double x, double y);
-long2 __ovld __cnfn isequal(double2 x, double2 y);
-long3 __ovld __cnfn isequal(double3 x, double3 y);
-long4 __ovld __cnfn isequal(double4 x, double4 y);
-long8 __ovld __cnfn isequal(double8 x, double8 y);
-long16 __ovld __cnfn isequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isequal(half x, half y);
-short2 __ovld __cnfn isequal(half2 x, half2 y);
-short3 __ovld __cnfn isequal(half3 x, half3 y);
-short4 __ovld __cnfn isequal(half4 x, half4 y);
-short8 __ovld __cnfn isequal(half8 x, half8 y);
-short16 __ovld __cnfn isequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x != y.
- */
-int __ovld __cnfn isnotequal(float x, float y);
-int2 __ovld __cnfn isnotequal(float2 x, float2 y);
-int3 __ovld __cnfn isnotequal(float3 x, float3 y);
-int4 __ovld __cnfn isnotequal(float4 x, float4 y);
-int8 __ovld __cnfn isnotequal(float8 x, float8 y);
-int16 __ovld __cnfn isnotequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isnotequal(double x, double y);
-long2 __ovld __cnfn isnotequal(double2 x, double2 y);
-long3 __ovld __cnfn isnotequal(double3 x, double3 y);
-long4 __ovld __cnfn isnotequal(double4 x, double4 y);
-long8 __ovld __cnfn isnotequal(double8 x, double8 y);
-long16 __ovld __cnfn isnotequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isnotequal(half x, half y);
-short2 __ovld __cnfn isnotequal(half2 x, half2 y);
-short3 __ovld __cnfn isnotequal(half3 x, half3 y);
-short4 __ovld __cnfn isnotequal(half4 x, half4 y);
-short8 __ovld __cnfn isnotequal(half8 x, half8 y);
-short16 __ovld __cnfn isnotequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x > y.
- */
-int __ovld __cnfn isgreater(float x, float y);
-int2 __ovld __cnfn isgreater(float2 x, float2 y);
-int3 __ovld __cnfn isgreater(float3 x, float3 y);
-int4 __ovld __cnfn isgreater(float4 x, float4 y);
-int8 __ovld __cnfn isgreater(float8 x, float8 y);
-int16 __ovld __cnfn isgreater(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isgreater(double x, double y);
-long2 __ovld __cnfn isgreater(double2 x, double2 y);
-long3 __ovld __cnfn isgreater(double3 x, double3 y);
-long4 __ovld __cnfn isgreater(double4 x, double4 y);
-long8 __ovld __cnfn isgreater(double8 x, double8 y);
-long16 __ovld __cnfn isgreater(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isgreater(half x, half y);
-short2 __ovld __cnfn isgreater(half2 x, half2 y);
-short3 __ovld __cnfn isgreater(half3 x, half3 y);
-short4 __ovld __cnfn isgreater(half4 x, half4 y);
-short8 __ovld __cnfn isgreater(half8 x, half8 y);
-short16 __ovld __cnfn isgreater(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x >= y.
- */
-int __ovld __cnfn isgreaterequal(float x, float y);
-int2 __ovld __cnfn isgreaterequal(float2 x, float2 y);
-int3 __ovld __cnfn isgreaterequal(float3 x, float3 y);
-int4 __ovld __cnfn isgreaterequal(float4 x, float4 y);
-int8 __ovld __cnfn isgreaterequal(float8 x, float8 y);
-int16 __ovld __cnfn isgreaterequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isgreaterequal(double x, double y);
-long2 __ovld __cnfn isgreaterequal(double2 x, double2 y);
-long3 __ovld __cnfn isgreaterequal(double3 x, double3 y);
-long4 __ovld __cnfn isgreaterequal(double4 x, double4 y);
-long8 __ovld __cnfn isgreaterequal(double8 x, double8 y);
-long16 __ovld __cnfn isgreaterequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isgreaterequal(half x, half y);
-short2 __ovld __cnfn isgreaterequal(half2 x, half2 y);
-short3 __ovld __cnfn isgreaterequal(half3 x, half3 y);
-short4 __ovld __cnfn isgreaterequal(half4 x, half4 y);
-short8 __ovld __cnfn isgreaterequal(half8 x, half8 y);
-short16 __ovld __cnfn isgreaterequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x < y.
- */
-int __ovld __cnfn isless(float x, float y);
-int2 __ovld __cnfn isless(float2 x, float2 y);
-int3 __ovld __cnfn isless(float3 x, float3 y);
-int4 __ovld __cnfn isless(float4 x, float4 y);
-int8 __ovld __cnfn isless(float8 x, float8 y);
-int16 __ovld __cnfn isless(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isless(double x, double y);
-long2 __ovld __cnfn isless(double2 x, double2 y);
-long3 __ovld __cnfn isless(double3 x, double3 y);
-long4 __ovld __cnfn isless(double4 x, double4 y);
-long8 __ovld __cnfn isless(double8 x, double8 y);
-long16 __ovld __cnfn isless(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isless(half x, half y);
-short2 __ovld __cnfn isless(half2 x, half2 y);
-short3 __ovld __cnfn isless(half3 x, half3 y);
-short4 __ovld __cnfn isless(half4 x, half4 y);
-short8 __ovld __cnfn isless(half8 x, half8 y);
-short16 __ovld __cnfn isless(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x <= y.
- */
-int __ovld __cnfn islessequal(float x, float y);
-int2 __ovld __cnfn islessequal(float2 x, float2 y);
-int3 __ovld __cnfn islessequal(float3 x, float3 y);
-int4 __ovld __cnfn islessequal(float4 x, float4 y);
-int8 __ovld __cnfn islessequal(float8 x, float8 y);
-int16 __ovld __cnfn islessequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn islessequal(double x, double y);
-long2 __ovld __cnfn islessequal(double2 x, double2 y);
-long3 __ovld __cnfn islessequal(double3 x, double3 y);
-long4 __ovld __cnfn islessequal(double4 x, double4 y);
-long8 __ovld __cnfn islessequal(double8 x, double8 y);
-long16 __ovld __cnfn islessequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn islessequal(half x, half y);
-short2 __ovld __cnfn islessequal(half2 x, half2 y);
-short3 __ovld __cnfn islessequal(half3 x, half3 y);
-short4 __ovld __cnfn islessequal(half4 x, half4 y);
-short8 __ovld __cnfn islessequal(half8 x, half8 y);
-short16 __ovld __cnfn islessequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of
- * (x < y) || (x > y) .
- */
-int __ovld __cnfn islessgreater(float x, float y);
-int2 __ovld __cnfn islessgreater(float2 x, float2 y);
-int3 __ovld __cnfn islessgreater(float3 x, float3 y);
-int4 __ovld __cnfn islessgreater(float4 x, float4 y);
-int8 __ovld __cnfn islessgreater(float8 x, float8 y);
-int16 __ovld __cnfn islessgreater(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn islessgreater(double x, double y);
-long2 __ovld __cnfn islessgreater(double2 x, double2 y);
-long3 __ovld __cnfn islessgreater(double3 x, double3 y);
-long4 __ovld __cnfn islessgreater(double4 x, double4 y);
-long8 __ovld __cnfn islessgreater(double8 x, double8 y);
-long16 __ovld __cnfn islessgreater(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn islessgreater(half x, half y);
-short2 __ovld __cnfn islessgreater(half2 x, half2 y);
-short3 __ovld __cnfn islessgreater(half3 x, half3 y);
-short4 __ovld __cnfn islessgreater(half4 x, half4 y);
-short8 __ovld __cnfn islessgreater(half8 x, half8 y);
-short16 __ovld __cnfn islessgreater(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Test for finite value.
- */
-int __ovld __cnfn isfinite(float);
-int2 __ovld __cnfn isfinite(float2);
-int3 __ovld __cnfn isfinite(float3);
-int4 __ovld __cnfn isfinite(float4);
-int8 __ovld __cnfn isfinite(float8);
-int16 __ovld __cnfn isfinite(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isfinite(double);
-long2 __ovld __cnfn isfinite(double2);
-long3 __ovld __cnfn isfinite(double3);
-long4 __ovld __cnfn isfinite(double4);
-long8 __ovld __cnfn isfinite(double8);
-long16 __ovld __cnfn isfinite(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isfinite(half);
-short2 __ovld __cnfn isfinite(half2);
-short3 __ovld __cnfn isfinite(half3);
-short4 __ovld __cnfn isfinite(half4);
-short8 __ovld __cnfn isfinite(half8);
-short16 __ovld __cnfn isfinite(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test for infinity value (+ve or -ve) .
- */
-int __ovld __cnfn isinf(float);
-int2 __ovld __cnfn isinf(float2);
-int3 __ovld __cnfn isinf(float3);
-int4 __ovld __cnfn isinf(float4);
-int8 __ovld __cnfn isinf(float8);
-int16 __ovld __cnfn isinf(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isinf(double);
-long2 __ovld __cnfn isinf(double2);
-long3 __ovld __cnfn isinf(double3);
-long4 __ovld __cnfn isinf(double4);
-long8 __ovld __cnfn isinf(double8);
-long16 __ovld __cnfn isinf(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isinf(half);
-short2 __ovld __cnfn isinf(half2);
-short3 __ovld __cnfn isinf(half3);
-short4 __ovld __cnfn isinf(half4);
-short8 __ovld __cnfn isinf(half8);
-short16 __ovld __cnfn isinf(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test for a NaN.
- */
-int __ovld __cnfn isnan(float);
-int2 __ovld __cnfn isnan(float2);
-int3 __ovld __cnfn isnan(float3);
-int4 __ovld __cnfn isnan(float4);
-int8 __ovld __cnfn isnan(float8);
-int16 __ovld __cnfn isnan(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isnan(double);
-long2 __ovld __cnfn isnan(double2);
-long3 __ovld __cnfn isnan(double3);
-long4 __ovld __cnfn isnan(double4);
-long8 __ovld __cnfn isnan(double8);
-long16 __ovld __cnfn isnan(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isnan(half);
-short2 __ovld __cnfn isnan(half2);
-short3 __ovld __cnfn isnan(half3);
-short4 __ovld __cnfn isnan(half4);
-short8 __ovld __cnfn isnan(half8);
-short16 __ovld __cnfn isnan(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test for a normal value.
- */
-int __ovld __cnfn isnormal(float);
-int2 __ovld __cnfn isnormal(float2);
-int3 __ovld __cnfn isnormal(float3);
-int4 __ovld __cnfn isnormal(float4);
-int8 __ovld __cnfn isnormal(float8);
-int16 __ovld __cnfn isnormal(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isnormal(double);
-long2 __ovld __cnfn isnormal(double2);
-long3 __ovld __cnfn isnormal(double3);
-long4 __ovld __cnfn isnormal(double4);
-long8 __ovld __cnfn isnormal(double8);
-long16 __ovld __cnfn isnormal(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isnormal(half);
-short2 __ovld __cnfn isnormal(half2);
-short3 __ovld __cnfn isnormal(half3);
-short4 __ovld __cnfn isnormal(half4);
-short8 __ovld __cnfn isnormal(half8);
-short16 __ovld __cnfn isnormal(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test if arguments are ordered. isordered() takes
- * arguments x and y, and returns the result
- * isequal(x, x) && isequal(y, y).
- */
-int __ovld __cnfn isordered(float x, float y);
-int2 __ovld __cnfn isordered(float2 x, float2 y);
-int3 __ovld __cnfn isordered(float3 x, float3 y);
-int4 __ovld __cnfn isordered(float4 x, float4 y);
-int8 __ovld __cnfn isordered(float8 x, float8 y);
-int16 __ovld __cnfn isordered(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isordered(double x, double y);
-long2 __ovld __cnfn isordered(double2 x, double2 y);
-long3 __ovld __cnfn isordered(double3 x, double3 y);
-long4 __ovld __cnfn isordered(double4 x, double4 y);
-long8 __ovld __cnfn isordered(double8 x, double8 y);
-long16 __ovld __cnfn isordered(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isordered(half x, half y);
-short2 __ovld __cnfn isordered(half2 x, half2 y);
-short3 __ovld __cnfn isordered(half3 x, half3 y);
-short4 __ovld __cnfn isordered(half4 x, half4 y);
-short8 __ovld __cnfn isordered(half8 x, half8 y);
-short16 __ovld __cnfn isordered(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Test if arguments are unordered. isunordered()
- * takes arguments x and y, returning non-zero if x or y
- * is NaN, and zero otherwise.
- */
-int __ovld __cnfn isunordered(float x, float y);
-int2 __ovld __cnfn isunordered(float2 x, float2 y);
-int3 __ovld __cnfn isunordered(float3 x, float3 y);
-int4 __ovld __cnfn isunordered(float4 x, float4 y);
-int8 __ovld __cnfn isunordered(float8 x, float8 y);
-int16 __ovld __cnfn isunordered(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isunordered(double x, double y);
-long2 __ovld __cnfn isunordered(double2 x, double2 y);
-long3 __ovld __cnfn isunordered(double3 x, double3 y);
-long4 __ovld __cnfn isunordered(double4 x, double4 y);
-long8 __ovld __cnfn isunordered(double8 x, double8 y);
-long16 __ovld __cnfn isunordered(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isunordered(half x, half y);
-short2 __ovld __cnfn isunordered(half2 x, half2 y);
-short3 __ovld __cnfn isunordered(half3 x, half3 y);
-short4 __ovld __cnfn isunordered(half4 x, half4 y);
-short8 __ovld __cnfn isunordered(half8 x, half8 y);
-short16 __ovld __cnfn isunordered(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Test for sign bit. The scalar version of the function
- * returns a 1 if the sign bit in the float is set else returns
- * 0. The vector version of the function returns the
- * following for each component in floatn: a -1 if the
- * sign bit in the float is set else returns 0.
- */
-int __ovld __cnfn signbit(float);
-int2 __ovld __cnfn signbit(float2);
-int3 __ovld __cnfn signbit(float3);
-int4 __ovld __cnfn signbit(float4);
-int8 __ovld __cnfn signbit(float8);
-int16 __ovld __cnfn signbit(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn signbit(double);
-long2 __ovld __cnfn signbit(double2);
-long3 __ovld __cnfn signbit(double3);
-long4 __ovld __cnfn signbit(double4);
-long8 __ovld __cnfn signbit(double8);
-long16 __ovld __cnfn signbit(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn signbit(half);
-short2 __ovld __cnfn signbit(half2);
-short3 __ovld __cnfn signbit(half3);
-short4 __ovld __cnfn signbit(half4);
-short8 __ovld __cnfn signbit(half8);
-short16 __ovld __cnfn signbit(half16);
-#endif //cl_khr_fp16
-
-/**
- * Returns 1 if the most significant bit in any component
- * of x is set; otherwise returns 0.
- */
-int __ovld __cnfn any(char x);
-int __ovld __cnfn any(char2 x);
-int __ovld __cnfn any(char3 x);
-int __ovld __cnfn any(char4 x);
-int __ovld __cnfn any(char8 x);
-int __ovld __cnfn any(char16 x);
-int __ovld __cnfn any(short x);
-int __ovld __cnfn any(short2 x);
-int __ovld __cnfn any(short3 x);
-int __ovld __cnfn any(short4 x);
-int __ovld __cnfn any(short8 x);
-int __ovld __cnfn any(short16 x);
-int __ovld __cnfn any(int x);
-int __ovld __cnfn any(int2 x);
-int __ovld __cnfn any(int3 x);
-int __ovld __cnfn any(int4 x);
-int __ovld __cnfn any(int8 x);
-int __ovld __cnfn any(int16 x);
-int __ovld __cnfn any(long x);
-int __ovld __cnfn any(long2 x);
-int __ovld __cnfn any(long3 x);
-int __ovld __cnfn any(long4 x);
-int __ovld __cnfn any(long8 x);
-int __ovld __cnfn any(long16 x);
-
-/**
- * Returns 1 if the most significant bit in all components
- * of x is set; otherwise returns 0.
- */
-int __ovld __cnfn all(char x);
-int __ovld __cnfn all(char2 x);
-int __ovld __cnfn all(char3 x);
-int __ovld __cnfn all(char4 x);
-int __ovld __cnfn all(char8 x);
-int __ovld __cnfn all(char16 x);
-int __ovld __cnfn all(short x);
-int __ovld __cnfn all(short2 x);
-int __ovld __cnfn all(short3 x);
-int __ovld __cnfn all(short4 x);
-int __ovld __cnfn all(short8 x);
-int __ovld __cnfn all(short16 x);
-int __ovld __cnfn all(int x);
-int __ovld __cnfn all(int2 x);
-int __ovld __cnfn all(int3 x);
-int __ovld __cnfn all(int4 x);
-int __ovld __cnfn all(int8 x);
-int __ovld __cnfn all(int16 x);
-int __ovld __cnfn all(long x);
-int __ovld __cnfn all(long2 x);
-int __ovld __cnfn all(long3 x);
-int __ovld __cnfn all(long4 x);
-int __ovld __cnfn all(long8 x);
-int __ovld __cnfn all(long16 x);
-
-/**
- * Each bit of the result is the corresponding bit of a if
- * the corresponding bit of c is 0. Otherwise it is the
- * corresponding bit of b.
- */
-char __ovld __cnfn bitselect(char a, char b, char c);
-uchar __ovld __cnfn bitselect(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn bitselect(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn bitselect(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn bitselect(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn bitselect(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn bitselect(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn bitselect(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn bitselect(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn bitselect(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn bitselect(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn bitselect(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn bitselect(short a, short b, short c);
-ushort __ovld __cnfn bitselect(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn bitselect(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn bitselect(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn bitselect(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn bitselect(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn bitselect(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn bitselect(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn bitselect(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn bitselect(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn bitselect(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn bitselect(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn bitselect(int a, int b, int c);
-uint __ovld __cnfn bitselect(uint a, uint b, uint c);
-int2 __ovld __cnfn bitselect(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn bitselect(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn bitselect(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn bitselect(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn bitselect(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn bitselect(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn bitselect(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn bitselect(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn bitselect(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn bitselect(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn bitselect(long a, long b, long c);
-ulong __ovld __cnfn bitselect(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn bitselect(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn bitselect(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn bitselect(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn bitselect(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn bitselect(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn bitselect(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn bitselect(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn bitselect(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn bitselect(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn bitselect(ulong16 a, ulong16 b, ulong16 c);
-float __ovld __cnfn bitselect(float a, float b, float c);
-float2 __ovld __cnfn bitselect(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn bitselect(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn bitselect(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn bitselect(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn bitselect(float16 a, float16 b, float16 c);
-#ifdef cl_khr_fp64
-double __ovld __cnfn bitselect(double a, double b, double c);
-double2 __ovld __cnfn bitselect(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn bitselect(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn bitselect(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn bitselect(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn bitselect(double16 a, double16 b, double16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn bitselect(half a, half b, half c);
-half2 __ovld __cnfn bitselect(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn bitselect(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn bitselect(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn bitselect(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn bitselect(half16 a, half16 b, half16 c);
-#endif //cl_khr_fp16
-
-/**
- * For each component of a vector type,
- * result[i] = if MSB of c[i] is set ? b[i] : a[i].
- * For a scalar type, result = c ? b : a.
- * b and a must have the same type.
- * c must have the same number of elements and bits as a.
- */
-char __ovld __cnfn select(char a, char b, char c);
-uchar __ovld __cnfn select(uchar a, uchar b, char c);
-char2 __ovld __cnfn select(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, char2 c);
-char3 __ovld __cnfn select(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, char3 c);
-char4 __ovld __cnfn select(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, char4 c);
-char8 __ovld __cnfn select(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, char8 c);
-char16 __ovld __cnfn select(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, char16 c);
-
-short __ovld __cnfn select(short a, short b, short c);
-ushort __ovld __cnfn select(ushort a, ushort b, short c);
-short2 __ovld __cnfn select(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, short2 c);
-short3 __ovld __cnfn select(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, short3 c);
-short4 __ovld __cnfn select(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, short4 c);
-short8 __ovld __cnfn select(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, short8 c);
-short16 __ovld __cnfn select(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, short16 c);
-
-int __ovld __cnfn select(int a, int b, int c);
-uint __ovld __cnfn select(uint a, uint b, int c);
-int2 __ovld __cnfn select(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn select(uint2 a, uint2 b, int2 c);
-int3 __ovld __cnfn select(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn select(uint3 a, uint3 b, int3 c);
-int4 __ovld __cnfn select(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn select(uint4 a, uint4 b, int4 c);
-int8 __ovld __cnfn select(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn select(uint8 a, uint8 b, int8 c);
-int16 __ovld __cnfn select(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn select(uint16 a, uint16 b, int16 c);
-float __ovld __cnfn select(float a, float b, int c);
-float2 __ovld __cnfn select(float2 a, float2 b, int2 c);
-float3 __ovld __cnfn select(float3 a, float3 b, int3 c);
-float4 __ovld __cnfn select(float4 a, float4 b, int4 c);
-float8 __ovld __cnfn select(float8 a, float8 b, int8 c);
-float16 __ovld __cnfn select(float16 a, float16 b, int16 c);
-
-long __ovld __cnfn select(long a, long b, long c);
-ulong __ovld __cnfn select(ulong a, ulong b, long c);
-long2 __ovld __cnfn select(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, long2 c);
-long3 __ovld __cnfn select(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, long3 c);
-long4 __ovld __cnfn select(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, long4 c);
-long8 __ovld __cnfn select(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, long8 c);
-long16 __ovld __cnfn select(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, long16 c);
-
-char __ovld __cnfn select(char a, char b, uchar c);
-uchar __ovld __cnfn select(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn select(char2 a, char2 b, uchar2 c);
-uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn select(char3 a, char3 b, uchar3 c);
-uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn select(char4 a, char4 b, uchar4 c);
-uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn select(char8 a, char8 b, uchar8 c);
-uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn select(char16 a, char16 b, uchar16 c);
-uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, uchar16 c);
-
-short __ovld __cnfn select(short a, short b, ushort c);
-ushort __ovld __cnfn select(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn select(short2 a, short2 b, ushort2 c);
-ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn select(short3 a, short3 b, ushort3 c);
-ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn select(short4 a, short4 b, ushort4 c);
-ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn select(short8 a, short8 b, ushort8 c);
-ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn select(short16 a, short16 b, ushort16 c);
-ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, ushort16 c);
-
-int __ovld __cnfn select(int a, int b, uint c);
-uint __ovld __cnfn select(uint a, uint b, uint c);
-int2 __ovld __cnfn select(int2 a, int2 b, uint2 c);
-uint2 __ovld __cnfn select(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn select(int3 a, int3 b, uint3 c);
-uint3 __ovld __cnfn select(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn select(int4 a, int4 b, uint4 c);
-uint4 __ovld __cnfn select(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn select(int8 a, int8 b, uint8 c);
-uint8 __ovld __cnfn select(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn select(int16 a, int16 b, uint16 c);
-uint16 __ovld __cnfn select(uint16 a, uint16 b, uint16 c);
-float __ovld __cnfn select(float a, float b, uint c);
-float2 __ovld __cnfn select(float2 a, float2 b, uint2 c);
-float3 __ovld __cnfn select(float3 a, float3 b, uint3 c);
-float4 __ovld __cnfn select(float4 a, float4 b, uint4 c);
-float8 __ovld __cnfn select(float8 a, float8 b, uint8 c);
-float16 __ovld __cnfn select(float16 a, float16 b, uint16 c);
-
-long __ovld __cnfn select(long a, long b, ulong c);
-ulong __ovld __cnfn select(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn select(long2 a, long2 b, ulong2 c);
-ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn select(long3 a, long3 b, ulong3 c);
-ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn select(long4 a, long4 b, ulong4 c);
-ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn select(long8 a, long8 b, ulong8 c);
-ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn select(long16 a, long16 b, ulong16 c);
-ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, ulong16 c);
-
-#ifdef cl_khr_fp64
-double __ovld __cnfn select(double a, double b, long c);
-double2 __ovld __cnfn select(double2 a, double2 b, long2 c);
-double3 __ovld __cnfn select(double3 a, double3 b, long3 c);
-double4 __ovld __cnfn select(double4 a, double4 b, long4 c);
-double8 __ovld __cnfn select(double8 a, double8 b, long8 c);
-double16 __ovld __cnfn select(double16 a, double16 b, long16 c);
-double __ovld __cnfn select(double a, double b, ulong c);
-double2 __ovld __cnfn select(double2 a, double2 b, ulong2 c);
-double3 __ovld __cnfn select(double3 a, double3 b, ulong3 c);
-double4 __ovld __cnfn select(double4 a, double4 b, ulong4 c);
-double8 __ovld __cnfn select(double8 a, double8 b, ulong8 c);
-double16 __ovld __cnfn select(double16 a, double16 b, ulong16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn select(half a, half b, short c);
-half2 __ovld __cnfn select(half2 a, half2 b, short2 c);
-half3 __ovld __cnfn select(half3 a, half3 b, short3 c);
-half4 __ovld __cnfn select(half4 a, half4 b, short4 c);
-half8 __ovld __cnfn select(half8 a, half8 b, short8 c);
-half16 __ovld __cnfn select(half16 a, half16 b, short16 c);
-half __ovld __cnfn select(half a, half b, ushort c);
-half2 __ovld __cnfn select(half2 a, half2 b, ushort2 c);
-half3 __ovld __cnfn select(half3 a, half3 b, ushort3 c);
-half4 __ovld __cnfn select(half4 a, half4 b, ushort4 c);
-half8 __ovld __cnfn select(half8 a, half8 b, ushort8 c);
-half16 __ovld __cnfn select(half16 a, half16 b, ushort16 c);
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.11.7, v1.2 s6.12.7, v2.0 s6.13.7 - Vector Data Load and Store Functions
-// OpenCL extensions v1.1 s9.6.6, v1.2 s9.5.6, v2.0 s9.4.6 - Vector Data Load and Store Functions for Half Type
-/**
- * Use generic type gentype to indicate the built-in data types
- * char, uchar, short, ushort, int, uint, long, ulong, float,
- * double or half.
- *
- * vloadn return sizeof (gentypen) bytes of data read from address (p + (offset * n)).
- *
- * vstoren write sizeof (gentypen) bytes given by data to address (p + (offset * n)).
- *
- * The address computed as (p + (offset * n)) must be
- * 8-bit aligned if gentype is char, uchar;
- * 16-bit aligned if gentype is short, ushort, half;
- * 32-bit aligned if gentype is int, uint, float;
- * 64-bit aligned if gentype is long, ulong, double.
- */
-
-char2 __ovld vload2(size_t offset, const __constant char *p);
-uchar2 __ovld vload2(size_t offset, const __constant uchar *p);
-short2 __ovld vload2(size_t offset, const __constant short *p);
-ushort2 __ovld vload2(size_t offset, const __constant ushort *p);
-int2 __ovld vload2(size_t offset, const __constant int *p);
-uint2 __ovld vload2(size_t offset, const __constant uint *p);
-long2 __ovld vload2(size_t offset, const __constant long *p);
-ulong2 __ovld vload2(size_t offset, const __constant ulong *p);
-float2 __ovld vload2(size_t offset, const __constant float *p);
-char3 __ovld vload3(size_t offset, const __constant char *p);
-uchar3 __ovld vload3(size_t offset, const __constant uchar *p);
-short3 __ovld vload3(size_t offset, const __constant short *p);
-ushort3 __ovld vload3(size_t offset, const __constant ushort *p);
-int3 __ovld vload3(size_t offset, const __constant int *p);
-uint3 __ovld vload3(size_t offset, const __constant uint *p);
-long3 __ovld vload3(size_t offset, const __constant long *p);
-ulong3 __ovld vload3(size_t offset, const __constant ulong *p);
-float3 __ovld vload3(size_t offset, const __constant float *p);
-char4 __ovld vload4(size_t offset, const __constant char *p);
-uchar4 __ovld vload4(size_t offset, const __constant uchar *p);
-short4 __ovld vload4(size_t offset, const __constant short *p);
-ushort4 __ovld vload4(size_t offset, const __constant ushort *p);
-int4 __ovld vload4(size_t offset, const __constant int *p);
-uint4 __ovld vload4(size_t offset, const __constant uint *p);
-long4 __ovld vload4(size_t offset, const __constant long *p);
-ulong4 __ovld vload4(size_t offset, const __constant ulong *p);
-float4 __ovld vload4(size_t offset, const __constant float *p);
-char8 __ovld vload8(size_t offset, const __constant char *p);
-uchar8 __ovld vload8(size_t offset, const __constant uchar *p);
-short8 __ovld vload8(size_t offset, const __constant short *p);
-ushort8 __ovld vload8(size_t offset, const __constant ushort *p);
-int8 __ovld vload8(size_t offset, const __constant int *p);
-uint8 __ovld vload8(size_t offset, const __constant uint *p);
-long8 __ovld vload8(size_t offset, const __constant long *p);
-ulong8 __ovld vload8(size_t offset, const __constant ulong *p);
-float8 __ovld vload8(size_t offset, const __constant float *p);
-char16 __ovld vload16(size_t offset, const __constant char *p);
-uchar16 __ovld vload16(size_t offset, const __constant uchar *p);
-short16 __ovld vload16(size_t offset, const __constant short *p);
-ushort16 __ovld vload16(size_t offset, const __constant ushort *p);
-int16 __ovld vload16(size_t offset, const __constant int *p);
-uint16 __ovld vload16(size_t offset, const __constant uint *p);
-long16 __ovld vload16(size_t offset, const __constant long *p);
-ulong16 __ovld vload16(size_t offset, const __constant ulong *p);
-float16 __ovld vload16(size_t offset, const __constant float *p);
-#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __constant double *p);
-double3 __ovld vload3(size_t offset, const __constant double *p);
-double4 __ovld vload4(size_t offset, const __constant double *p);
-double8 __ovld vload8(size_t offset, const __constant double *p);
-double16 __ovld vload16(size_t offset, const __constant double *p);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __constant half *p);
-half2 __ovld vload2(size_t offset, const __constant half *p);
-half3 __ovld vload3(size_t offset, const __constant half *p);
-half4 __ovld vload4(size_t offset, const __constant half *p);
-half8 __ovld vload8(size_t offset, const __constant half *p);
-half16 __ovld vload16(size_t offset, const __constant half *p);
-#endif //cl_khr_fp16
-
-#if defined(__opencl_c_generic_address_space)
-char2 __ovld vload2(size_t offset, const char *p);
-uchar2 __ovld vload2(size_t offset, const uchar *p);
-short2 __ovld vload2(size_t offset, const short *p);
-ushort2 __ovld vload2(size_t offset, const ushort *p);
-int2 __ovld vload2(size_t offset, const int *p);
-uint2 __ovld vload2(size_t offset, const uint *p);
-long2 __ovld vload2(size_t offset, const long *p);
-ulong2 __ovld vload2(size_t offset, const ulong *p);
-float2 __ovld vload2(size_t offset, const float *p);
-char3 __ovld vload3(size_t offset, const char *p);
-uchar3 __ovld vload3(size_t offset, const uchar *p);
-short3 __ovld vload3(size_t offset, const short *p);
-ushort3 __ovld vload3(size_t offset, const ushort *p);
-int3 __ovld vload3(size_t offset, const int *p);
-uint3 __ovld vload3(size_t offset, const uint *p);
-long3 __ovld vload3(size_t offset, const long *p);
-ulong3 __ovld vload3(size_t offset, const ulong *p);
-float3 __ovld vload3(size_t offset, const float *p);
-char4 __ovld vload4(size_t offset, const char *p);
-uchar4 __ovld vload4(size_t offset, const uchar *p);
-short4 __ovld vload4(size_t offset, const short *p);
-ushort4 __ovld vload4(size_t offset, const ushort *p);
-int4 __ovld vload4(size_t offset, const int *p);
-uint4 __ovld vload4(size_t offset, const uint *p);
-long4 __ovld vload4(size_t offset, const long *p);
-ulong4 __ovld vload4(size_t offset, const ulong *p);
-float4 __ovld vload4(size_t offset, const float *p);
-char8 __ovld vload8(size_t offset, const char *p);
-uchar8 __ovld vload8(size_t offset, const uchar *p);
-short8 __ovld vload8(size_t offset, const short *p);
-ushort8 __ovld vload8(size_t offset, const ushort *p);
-int8 __ovld vload8(size_t offset, const int *p);
-uint8 __ovld vload8(size_t offset, const uint *p);
-long8 __ovld vload8(size_t offset, const long *p);
-ulong8 __ovld vload8(size_t offset, const ulong *p);
-float8 __ovld vload8(size_t offset, const float *p);
-char16 __ovld vload16(size_t offset, const char *p);
-uchar16 __ovld vload16(size_t offset, const uchar *p);
-short16 __ovld vload16(size_t offset, const short *p);
-ushort16 __ovld vload16(size_t offset, const ushort *p);
-int16 __ovld vload16(size_t offset, const int *p);
-uint16 __ovld vload16(size_t offset, const uint *p);
-long16 __ovld vload16(size_t offset, const long *p);
-ulong16 __ovld vload16(size_t offset, const ulong *p);
-float16 __ovld vload16(size_t offset, const float *p);
-
-#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const double *p);
-double3 __ovld vload3(size_t offset, const double *p);
-double4 __ovld vload4(size_t offset, const double *p);
-double8 __ovld vload8(size_t offset, const double *p);
-double16 __ovld vload16(size_t offset, const double *p);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const half *p);
-half2 __ovld vload2(size_t offset, const half *p);
-half3 __ovld vload3(size_t offset, const half *p);
-half4 __ovld vload4(size_t offset, const half *p);
-half8 __ovld vload8(size_t offset, const half *p);
-half16 __ovld vload16(size_t offset, const half *p);
-#endif //cl_khr_fp16
-#else
-char2 __ovld vload2(size_t offset, const __global char *p);
-uchar2 __ovld vload2(size_t offset, const __global uchar *p);
-short2 __ovld vload2(size_t offset, const __global short *p);
-ushort2 __ovld vload2(size_t offset, const __global ushort *p);
-int2 __ovld vload2(size_t offset, const __global int *p);
-uint2 __ovld vload2(size_t offset, const __global uint *p);
-long2 __ovld vload2(size_t offset, const __global long *p);
-ulong2 __ovld vload2(size_t offset, const __global ulong *p);
-float2 __ovld vload2(size_t offset, const __global float *p);
-char3 __ovld vload3(size_t offset, const __global char *p);
-uchar3 __ovld vload3(size_t offset, const __global uchar *p);
-short3 __ovld vload3(size_t offset, const __global short *p);
-ushort3 __ovld vload3(size_t offset, const __global ushort *p);
-int3 __ovld vload3(size_t offset, const __global int *p);
-uint3 __ovld vload3(size_t offset, const __global uint *p);
-long3 __ovld vload3(size_t offset, const __global long *p);
-ulong3 __ovld vload3(size_t offset, const __global ulong *p);
-float3 __ovld vload3(size_t offset, const __global float *p);
-char4 __ovld vload4(size_t offset, const __global char *p);
-uchar4 __ovld vload4(size_t offset, const __global uchar *p);
-short4 __ovld vload4(size_t offset, const __global short *p);
-ushort4 __ovld vload4(size_t offset, const __global ushort *p);
-int4 __ovld vload4(size_t offset, const __global int *p);
-uint4 __ovld vload4(size_t offset, const __global uint *p);
-long4 __ovld vload4(size_t offset, const __global long *p);
-ulong4 __ovld vload4(size_t offset, const __global ulong *p);
-float4 __ovld vload4(size_t offset, const __global float *p);
-char8 __ovld vload8(size_t offset, const __global char *p);
-uchar8 __ovld vload8(size_t offset, const __global uchar *p);
-short8 __ovld vload8(size_t offset, const __global short *p);
-ushort8 __ovld vload8(size_t offset, const __global ushort *p);
-int8 __ovld vload8(size_t offset, const __global int *p);
-uint8 __ovld vload8(size_t offset, const __global uint *p);
-long8 __ovld vload8(size_t offset, const __global long *p);
-ulong8 __ovld vload8(size_t offset, const __global ulong *p);
-float8 __ovld vload8(size_t offset, const __global float *p);
-char16 __ovld vload16(size_t offset, const __global char *p);
-uchar16 __ovld vload16(size_t offset, const __global uchar *p);
-short16 __ovld vload16(size_t offset, const __global short *p);
-ushort16 __ovld vload16(size_t offset, const __global ushort *p);
-int16 __ovld vload16(size_t offset, const __global int *p);
-uint16 __ovld vload16(size_t offset, const __global uint *p);
-long16 __ovld vload16(size_t offset, const __global long *p);
-ulong16 __ovld vload16(size_t offset, const __global ulong *p);
-float16 __ovld vload16(size_t offset, const __global float *p);
-char2 __ovld vload2(size_t offset, const __local char *p);
-uchar2 __ovld vload2(size_t offset, const __local uchar *p);
-short2 __ovld vload2(size_t offset, const __local short *p);
-ushort2 __ovld vload2(size_t offset, const __local ushort *p);
-int2 __ovld vload2(size_t offset, const __local int *p);
-uint2 __ovld vload2(size_t offset, const __local uint *p);
-long2 __ovld vload2(size_t offset, const __local long *p);
-ulong2 __ovld vload2(size_t offset, const __local ulong *p);
-float2 __ovld vload2(size_t offset, const __local float *p);
-char3 __ovld vload3(size_t offset, const __local char *p);
-uchar3 __ovld vload3(size_t offset, const __local uchar *p);
-short3 __ovld vload3(size_t offset, const __local short *p);
-ushort3 __ovld vload3(size_t offset, const __local ushort *p);
-int3 __ovld vload3(size_t offset, const __local int *p);
-uint3 __ovld vload3(size_t offset, const __local uint *p);
-long3 __ovld vload3(size_t offset, const __local long *p);
-ulong3 __ovld vload3(size_t offset, const __local ulong *p);
-float3 __ovld vload3(size_t offset, const __local float *p);
-char4 __ovld vload4(size_t offset, const __local char *p);
-uchar4 __ovld vload4(size_t offset, const __local uchar *p);
-short4 __ovld vload4(size_t offset, const __local short *p);
-ushort4 __ovld vload4(size_t offset, const __local ushort *p);
-int4 __ovld vload4(size_t offset, const __local int *p);
-uint4 __ovld vload4(size_t offset, const __local uint *p);
-long4 __ovld vload4(size_t offset, const __local long *p);
-ulong4 __ovld vload4(size_t offset, const __local ulong *p);
-float4 __ovld vload4(size_t offset, const __local float *p);
-char8 __ovld vload8(size_t offset, const __local char *p);
-uchar8 __ovld vload8(size_t offset, const __local uchar *p);
-short8 __ovld vload8(size_t offset, const __local short *p);
-ushort8 __ovld vload8(size_t offset, const __local ushort *p);
-int8 __ovld vload8(size_t offset, const __local int *p);
-uint8 __ovld vload8(size_t offset, const __local uint *p);
-long8 __ovld vload8(size_t offset, const __local long *p);
-ulong8 __ovld vload8(size_t offset, const __local ulong *p);
-float8 __ovld vload8(size_t offset, const __local float *p);
-char16 __ovld vload16(size_t offset, const __local char *p);
-uchar16 __ovld vload16(size_t offset, const __local uchar *p);
-short16 __ovld vload16(size_t offset, const __local short *p);
-ushort16 __ovld vload16(size_t offset, const __local ushort *p);
-int16 __ovld vload16(size_t offset, const __local int *p);
-uint16 __ovld vload16(size_t offset, const __local uint *p);
-long16 __ovld vload16(size_t offset, const __local long *p);
-ulong16 __ovld vload16(size_t offset, const __local ulong *p);
-float16 __ovld vload16(size_t offset, const __local float *p);
-char2 __ovld vload2(size_t offset, const __private char *p);
-uchar2 __ovld vload2(size_t offset, const __private uchar *p);
-short2 __ovld vload2(size_t offset, const __private short *p);
-ushort2 __ovld vload2(size_t offset, const __private ushort *p);
-int2 __ovld vload2(size_t offset, const __private int *p);
-uint2 __ovld vload2(size_t offset, const __private uint *p);
-long2 __ovld vload2(size_t offset, const __private long *p);
-ulong2 __ovld vload2(size_t offset, const __private ulong *p);
-float2 __ovld vload2(size_t offset, const __private float *p);
-char3 __ovld vload3(size_t offset, const __private char *p);
-uchar3 __ovld vload3(size_t offset, const __private uchar *p);
-short3 __ovld vload3(size_t offset, const __private short *p);
-ushort3 __ovld vload3(size_t offset, const __private ushort *p);
-int3 __ovld vload3(size_t offset, const __private int *p);
-uint3 __ovld vload3(size_t offset, const __private uint *p);
-long3 __ovld vload3(size_t offset, const __private long *p);
-ulong3 __ovld vload3(size_t offset, const __private ulong *p);
-float3 __ovld vload3(size_t offset, const __private float *p);
-char4 __ovld vload4(size_t offset, const __private char *p);
-uchar4 __ovld vload4(size_t offset, const __private uchar *p);
-short4 __ovld vload4(size_t offset, const __private short *p);
-ushort4 __ovld vload4(size_t offset, const __private ushort *p);
-int4 __ovld vload4(size_t offset, const __private int *p);
-uint4 __ovld vload4(size_t offset, const __private uint *p);
-long4 __ovld vload4(size_t offset, const __private long *p);
-ulong4 __ovld vload4(size_t offset, const __private ulong *p);
-float4 __ovld vload4(size_t offset, const __private float *p);
-char8 __ovld vload8(size_t offset, const __private char *p);
-uchar8 __ovld vload8(size_t offset, const __private uchar *p);
-short8 __ovld vload8(size_t offset, const __private short *p);
-ushort8 __ovld vload8(size_t offset, const __private ushort *p);
-int8 __ovld vload8(size_t offset, const __private int *p);
-uint8 __ovld vload8(size_t offset, const __private uint *p);
-long8 __ovld vload8(size_t offset, const __private long *p);
-ulong8 __ovld vload8(size_t offset, const __private ulong *p);
-float8 __ovld vload8(size_t offset, const __private float *p);
-char16 __ovld vload16(size_t offset, const __private char *p);
-uchar16 __ovld vload16(size_t offset, const __private uchar *p);
-short16 __ovld vload16(size_t offset, const __private short *p);
-ushort16 __ovld vload16(size_t offset, const __private ushort *p);
-int16 __ovld vload16(size_t offset, const __private int *p);
-uint16 __ovld vload16(size_t offset, const __private uint *p);
-long16 __ovld vload16(size_t offset, const __private long *p);
-ulong16 __ovld vload16(size_t offset, const __private ulong *p);
-float16 __ovld vload16(size_t offset, const __private float *p);
-
-#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __global double *p);
-double3 __ovld vload3(size_t offset, const __global double *p);
-double4 __ovld vload4(size_t offset, const __global double *p);
-double8 __ovld vload8(size_t offset, const __global double *p);
-double16 __ovld vload16(size_t offset, const __global double *p);
-double2 __ovld vload2(size_t offset, const __local double *p);
-double3 __ovld vload3(size_t offset, const __local double *p);
-double4 __ovld vload4(size_t offset, const __local double *p);
-double8 __ovld vload8(size_t offset, const __local double *p);
-double16 __ovld vload16(size_t offset, const __local double *p);
-double2 __ovld vload2(size_t offset, const __private double *p);
-double3 __ovld vload3(size_t offset, const __private double *p);
-double4 __ovld vload4(size_t offset, const __private double *p);
-double8 __ovld vload8(size_t offset, const __private double *p);
-double16 __ovld vload16(size_t offset, const __private double *p);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __global half *p);
-half2 __ovld vload2(size_t offset, const __global half *p);
-half3 __ovld vload3(size_t offset, const __global half *p);
-half4 __ovld vload4(size_t offset, const __global half *p);
-half8 __ovld vload8(size_t offset, const __global half *p);
-half16 __ovld vload16(size_t offset, const __global half *p);
-half __ovld vload(size_t offset, const __local half *p);
-half2 __ovld vload2(size_t offset, const __local half *p);
-half3 __ovld vload3(size_t offset, const __local half *p);
-half4 __ovld vload4(size_t offset, const __local half *p);
-half8 __ovld vload8(size_t offset, const __local half *p);
-half16 __ovld vload16(size_t offset, const __local half *p);
-half __ovld vload(size_t offset, const __private half *p);
-half2 __ovld vload2(size_t offset, const __private half *p);
-half3 __ovld vload3(size_t offset, const __private half *p);
-half4 __ovld vload4(size_t offset, const __private half *p);
-half8 __ovld vload8(size_t offset, const __private half *p);
-half16 __ovld vload16(size_t offset, const __private half *p);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-#if defined(__opencl_c_generic_address_space)
-void __ovld vstore2(char2 data, size_t offset, char *p);
-void __ovld vstore2(uchar2 data, size_t offset, uchar *p);
-void __ovld vstore2(short2 data, size_t offset, short *p);
-void __ovld vstore2(ushort2 data, size_t offset, ushort *p);
-void __ovld vstore2(int2 data, size_t offset, int *p);
-void __ovld vstore2(uint2 data, size_t offset, uint *p);
-void __ovld vstore2(long2 data, size_t offset, long *p);
-void __ovld vstore2(ulong2 data, size_t offset, ulong *p);
-void __ovld vstore2(float2 data, size_t offset, float *p);
-void __ovld vstore3(char3 data, size_t offset, char *p);
-void __ovld vstore3(uchar3 data, size_t offset, uchar *p);
-void __ovld vstore3(short3 data, size_t offset, short *p);
-void __ovld vstore3(ushort3 data, size_t offset, ushort *p);
-void __ovld vstore3(int3 data, size_t offset, int *p);
-void __ovld vstore3(uint3 data, size_t offset, uint *p);
-void __ovld vstore3(long3 data, size_t offset, long *p);
-void __ovld vstore3(ulong3 data, size_t offset, ulong *p);
-void __ovld vstore3(float3 data, size_t offset, float *p);
-void __ovld vstore4(char4 data, size_t offset, char *p);
-void __ovld vstore4(uchar4 data, size_t offset, uchar *p);
-void __ovld vstore4(short4 data, size_t offset, short *p);
-void __ovld vstore4(ushort4 data, size_t offset, ushort *p);
-void __ovld vstore4(int4 data, size_t offset, int *p);
-void __ovld vstore4(uint4 data, size_t offset, uint *p);
-void __ovld vstore4(long4 data, size_t offset, long *p);
-void __ovld vstore4(ulong4 data, size_t offset, ulong *p);
-void __ovld vstore4(float4 data, size_t offset, float *p);
-void __ovld vstore8(char8 data, size_t offset, char *p);
-void __ovld vstore8(uchar8 data, size_t offset, uchar *p);
-void __ovld vstore8(short8 data, size_t offset, short *p);
-void __ovld vstore8(ushort8 data, size_t offset, ushort *p);
-void __ovld vstore8(int8 data, size_t offset, int *p);
-void __ovld vstore8(uint8 data, size_t offset, uint *p);
-void __ovld vstore8(long8 data, size_t offset, long *p);
-void __ovld vstore8(ulong8 data, size_t offset, ulong *p);
-void __ovld vstore8(float8 data, size_t offset, float *p);
-void __ovld vstore16(char16 data, size_t offset, char *p);
-void __ovld vstore16(uchar16 data, size_t offset, uchar *p);
-void __ovld vstore16(short16 data, size_t offset, short *p);
-void __ovld vstore16(ushort16 data, size_t offset, ushort *p);
-void __ovld vstore16(int16 data, size_t offset, int *p);
-void __ovld vstore16(uint16 data, size_t offset, uint *p);
-void __ovld vstore16(long16 data, size_t offset, long *p);
-void __ovld vstore16(ulong16 data, size_t offset, ulong *p);
-void __ovld vstore16(float16 data, size_t offset, float *p);
-#ifdef cl_khr_fp64
-void __ovld vstore2(double2 data, size_t offset, double *p);
-void __ovld vstore3(double3 data, size_t offset, double *p);
-void __ovld vstore4(double4 data, size_t offset, double *p);
-void __ovld vstore8(double8 data, size_t offset, double *p);
-void __ovld vstore16(double16 data, size_t offset, double *p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-void __ovld vstore(half data, size_t offset, half *p);
-void __ovld vstore2(half2 data, size_t offset, half *p);
-void __ovld vstore3(half3 data, size_t offset, half *p);
-void __ovld vstore4(half4 data, size_t offset, half *p);
-void __ovld vstore8(half8 data, size_t offset, half *p);
-void __ovld vstore16(half16 data, size_t offset, half *p);
-#endif //cl_khr_fp16
-#else
-void __ovld vstore2(char2 data, size_t offset, __global char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __global uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __global short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __global ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __global int *p);
-void __ovld vstore2(uint2 data, size_t offset, __global uint *p);
-void __ovld vstore2(long2 data, size_t offset, __global long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __global ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __global float *p);
-void __ovld vstore3(char3 data, size_t offset, __global char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __global uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __global short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __global ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __global int *p);
-void __ovld vstore3(uint3 data, size_t offset, __global uint *p);
-void __ovld vstore3(long3 data, size_t offset, __global long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __global ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __global float *p);
-void __ovld vstore4(char4 data, size_t offset, __global char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __global uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __global short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __global ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __global int *p);
-void __ovld vstore4(uint4 data, size_t offset, __global uint *p);
-void __ovld vstore4(long4 data, size_t offset, __global long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __global ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __global float *p);
-void __ovld vstore8(char8 data, size_t offset, __global char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __global uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __global short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __global ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __global int *p);
-void __ovld vstore8(uint8 data, size_t offset, __global uint *p);
-void __ovld vstore8(long8 data, size_t offset, __global long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __global ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __global float *p);
-void __ovld vstore16(char16 data, size_t offset, __global char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __global uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __global short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __global ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __global int *p);
-void __ovld vstore16(uint16 data, size_t offset, __global uint *p);
-void __ovld vstore16(long16 data, size_t offset, __global long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __global ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __global float *p);
-void __ovld vstore2(char2 data, size_t offset, __local char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __local uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __local short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __local ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __local int *p);
-void __ovld vstore2(uint2 data, size_t offset, __local uint *p);
-void __ovld vstore2(long2 data, size_t offset, __local long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __local ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __local float *p);
-void __ovld vstore3(char3 data, size_t offset, __local char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __local uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __local short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __local ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __local int *p);
-void __ovld vstore3(uint3 data, size_t offset, __local uint *p);
-void __ovld vstore3(long3 data, size_t offset, __local long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __local ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __local float *p);
-void __ovld vstore4(char4 data, size_t offset, __local char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __local uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __local short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __local ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __local int *p);
-void __ovld vstore4(uint4 data, size_t offset, __local uint *p);
-void __ovld vstore4(long4 data, size_t offset, __local long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __local ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __local float *p);
-void __ovld vstore8(char8 data, size_t offset, __local char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __local uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __local short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __local ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __local int *p);
-void __ovld vstore8(uint8 data, size_t offset, __local uint *p);
-void __ovld vstore8(long8 data, size_t offset, __local long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __local ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __local float *p);
-void __ovld vstore16(char16 data, size_t offset, __local char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __local uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __local short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __local ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __local int *p);
-void __ovld vstore16(uint16 data, size_t offset, __local uint *p);
-void __ovld vstore16(long16 data, size_t offset, __local long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __local ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __local float *p);
-void __ovld vstore2(char2 data, size_t offset, __private char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __private uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __private short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __private ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __private int *p);
-void __ovld vstore2(uint2 data, size_t offset, __private uint *p);
-void __ovld vstore2(long2 data, size_t offset, __private long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __private ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __private float *p);
-void __ovld vstore3(char3 data, size_t offset, __private char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __private uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __private short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __private ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __private int *p);
-void __ovld vstore3(uint3 data, size_t offset, __private uint *p);
-void __ovld vstore3(long3 data, size_t offset, __private long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __private ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __private float *p);
-void __ovld vstore4(char4 data, size_t offset, __private char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __private uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __private short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __private ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __private int *p);
-void __ovld vstore4(uint4 data, size_t offset, __private uint *p);
-void __ovld vstore4(long4 data, size_t offset, __private long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __private ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __private float *p);
-void __ovld vstore8(char8 data, size_t offset, __private char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __private uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __private short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __private ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __private int *p);
-void __ovld vstore8(uint8 data, size_t offset, __private uint *p);
-void __ovld vstore8(long8 data, size_t offset, __private long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __private ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __private float *p);
-void __ovld vstore16(char16 data, size_t offset, __private char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __private uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __private short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __private ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __private int *p);
-void __ovld vstore16(uint16 data, size_t offset, __private uint *p);
-void __ovld vstore16(long16 data, size_t offset, __private long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __private ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __private float *p);
-#ifdef cl_khr_fp64
-void __ovld vstore2(double2 data, size_t offset, __global double *p);
-void __ovld vstore3(double3 data, size_t offset, __global double *p);
-void __ovld vstore4(double4 data, size_t offset, __global double *p);
-void __ovld vstore8(double8 data, size_t offset, __global double *p);
-void __ovld vstore16(double16 data, size_t offset, __global double *p);
-void __ovld vstore2(double2 data, size_t offset, __local double *p);
-void __ovld vstore3(double3 data, size_t offset, __local double *p);
-void __ovld vstore4(double4 data, size_t offset, __local double *p);
-void __ovld vstore8(double8 data, size_t offset, __local double *p);
-void __ovld vstore16(double16 data, size_t offset, __local double *p);
-void __ovld vstore2(double2 data, size_t offset, __private double *p);
-void __ovld vstore3(double3 data, size_t offset, __private double *p);
-void __ovld vstore4(double4 data, size_t offset, __private double *p);
-void __ovld vstore8(double8 data, size_t offset, __private double *p);
-void __ovld vstore16(double16 data, size_t offset, __private double *p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-void __ovld vstore(half data, size_t offset, __global half *p);
-void __ovld vstore2(half2 data, size_t offset, __global half *p);
-void __ovld vstore3(half3 data, size_t offset, __global half *p);
-void __ovld vstore4(half4 data, size_t offset, __global half *p);
-void __ovld vstore8(half8 data, size_t offset, __global half *p);
-void __ovld vstore16(half16 data, size_t offset, __global half *p);
-void __ovld vstore(half data, size_t offset, __local half *p);
-void __ovld vstore2(half2 data, size_t offset, __local half *p);
-void __ovld vstore3(half3 data, size_t offset, __local half *p);
-void __ovld vstore4(half4 data, size_t offset, __local half *p);
-void __ovld vstore8(half8 data, size_t offset, __local half *p);
-void __ovld vstore16(half16 data, size_t offset, __local half *p);
-void __ovld vstore(half data, size_t offset, __private half *p);
-void __ovld vstore2(half2 data, size_t offset, __private half *p);
-void __ovld vstore3(half3 data, size_t offset, __private half *p);
-void __ovld vstore4(half4 data, size_t offset, __private half *p);
-void __ovld vstore8(half8 data, size_t offset, __private half *p);
-void __ovld vstore16(half16 data, size_t offset, __private half *p);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Read sizeof (half) bytes of data from address
- * (p + offset). The data read is interpreted as a
- * half value. The half value is converted to a
- * float value and the float value is returned.
- * The read address computed as (p + offset)
- * must be 16-bit aligned.
- */
-float __ovld vload_half(size_t offset, const __constant half *p);
-#if defined(__opencl_c_generic_address_space)
-float __ovld vload_half(size_t offset, const half *p);
-#else
-float __ovld vload_half(size_t offset, const __global half *p);
-float __ovld vload_half(size_t offset, const __local half *p);
-float __ovld vload_half(size_t offset, const __private half *p);
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Read sizeof (halfn) bytes of data from address
- * (p + (offset * n)). The data read is interpreted
- * as a halfn value. The halfn value read is
- * converted to a floatn value and the floatn
- * value is returned. The read address computed
- * as (p + (offset * n)) must be 16-bit aligned.
- */
-float2 __ovld vload_half2(size_t offset, const __constant half *p);
-float3 __ovld vload_half3(size_t offset, const __constant half *p);
-float4 __ovld vload_half4(size_t offset, const __constant half *p);
-float8 __ovld vload_half8(size_t offset, const __constant half *p);
-float16 __ovld vload_half16(size_t offset, const __constant half *p);
-#if defined(__opencl_c_generic_address_space)
-float2 __ovld vload_half2(size_t offset, const half *p);
-float3 __ovld vload_half3(size_t offset, const half *p);
-float4 __ovld vload_half4(size_t offset, const half *p);
-float8 __ovld vload_half8(size_t offset, const half *p);
-float16 __ovld vload_half16(size_t offset, const half *p);
-#else
-float2 __ovld vload_half2(size_t offset, const __global half *p);
-float3 __ovld vload_half3(size_t offset, const __global half *p);
-float4 __ovld vload_half4(size_t offset, const __global half *p);
-float8 __ovld vload_half8(size_t offset, const __global half *p);
-float16 __ovld vload_half16(size_t offset, const __global half *p);
-float2 __ovld vload_half2(size_t offset, const __local half *p);
-float3 __ovld vload_half3(size_t offset, const __local half *p);
-float4 __ovld vload_half4(size_t offset, const __local half *p);
-float8 __ovld vload_half8(size_t offset, const __local half *p);
-float16 __ovld vload_half16(size_t offset, const __local half *p);
-float2 __ovld vload_half2(size_t offset, const __private half *p);
-float3 __ovld vload_half3(size_t offset, const __private half *p);
-float4 __ovld vload_half4(size_t offset, const __private half *p);
-float8 __ovld vload_half8(size_t offset, const __private half *p);
-float16 __ovld vload_half16(size_t offset, const __private half *p);
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * The float value given by data is first
- * converted to a half value using the appropriate
- * rounding mode. The half value is then written
- * to address computed as (p + offset). The
- * address computed as (p + offset) must be 16-
- * bit aligned.
- * vstore_half use the current rounding mode.
- * The default current rounding mode is round to
- * nearest even.
- */
-#if defined(__opencl_c_generic_address_space)
-void __ovld vstore_half(float data, size_t offset, half *p);
-void __ovld vstore_half_rte(float data, size_t offset, half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half(double data, size_t offset, half *p);
-void __ovld vstore_half_rte(double data, size_t offset, half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, half *p);
-#endif //cl_khr_fp64
-#else
-void __ovld vstore_half(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __global half *p);
-void __ovld vstore_half(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __local half *p);
-void __ovld vstore_half(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __private half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __global half *p);
-void __ovld vstore_half(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __local half *p);
-void __ovld vstore_half(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
-#endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * The floatn value given by data is converted to
- * a halfn value using the appropriate rounding
- * mode. The halfn value is then written to
- * address computed as (p + (offset * n)). The
- * address computed as (p + (offset * n)) must be
- * 16-bit aligned.
- * vstore_halfn uses the current rounding mode.
- * The default current rounding mode is round to
- * nearest even.
- */
-#if defined(__opencl_c_generic_address_space)
-void __ovld vstore_half2(float2 data, size_t offset, half *p);
-void __ovld vstore_half3(float3 data, size_t offset, half *p);
-void __ovld vstore_half4(float4 data, size_t offset, half *p);
-void __ovld vstore_half8(float8 data, size_t offset, half *p);
-void __ovld vstore_half16(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half2(double2 data, size_t offset, half *p);
-void __ovld vstore_half3(double3 data, size_t offset, half *p);
-void __ovld vstore_half4(double4 data, size_t offset, half *p);
-void __ovld vstore_half8(double8 data, size_t offset, half *p);
-void __ovld vstore_half16(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, half *p);
-#endif //cl_khr_fp64
-#else
-void __ovld vstore_half2(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __private half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half2(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
-#endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * For n = 1, 2, 4, 8 and 16 read sizeof (halfn)
- * bytes of data from address (p + (offset * n)).
- * The data read is interpreted as a halfn value.
- * The halfn value read is converted to a floatn
- * value and the floatn value is returned.
- * The address computed as (p + (offset * n))
- * must be aligned to sizeof (halfn) bytes.
- * For n = 3, vloada_half3 reads a half3 from
- * address (p + (offset * 4)) and returns a float3.
- * The address computed as (p + (offset * 4))
- * must be aligned to sizeof (half) * 4 bytes.
- */
-float2 __ovld vloada_half2(size_t offset, const __constant half *p);
-float3 __ovld vloada_half3(size_t offset, const __constant half *p);
-float4 __ovld vloada_half4(size_t offset, const __constant half *p);
-float8 __ovld vloada_half8(size_t offset, const __constant half *p);
-float16 __ovld vloada_half16(size_t offset, const __constant half *p);
-#if defined(__opencl_c_generic_address_space)
-float2 __ovld vloada_half2(size_t offset, const half *p);
-float3 __ovld vloada_half3(size_t offset, const half *p);
-float4 __ovld vloada_half4(size_t offset, const half *p);
-float8 __ovld vloada_half8(size_t offset, const half *p);
-float16 __ovld vloada_half16(size_t offset, const half *p);
-#else
-float2 __ovld vloada_half2(size_t offset, const __global half *p);
-float3 __ovld vloada_half3(size_t offset, const __global half *p);
-float4 __ovld vloada_half4(size_t offset, const __global half *p);
-float8 __ovld vloada_half8(size_t offset, const __global half *p);
-float16 __ovld vloada_half16(size_t offset, const __global half *p);
-float2 __ovld vloada_half2(size_t offset, const __local half *p);
-float3 __ovld vloada_half3(size_t offset, const __local half *p);
-float4 __ovld vloada_half4(size_t offset, const __local half *p);
-float8 __ovld vloada_half8(size_t offset, const __local half *p);
-float16 __ovld vloada_half16(size_t offset, const __local half *p);
-float2 __ovld vloada_half2(size_t offset, const __private half *p);
-float3 __ovld vloada_half3(size_t offset, const __private half *p);
-float4 __ovld vloada_half4(size_t offset, const __private half *p);
-float8 __ovld vloada_half8(size_t offset, const __private half *p);
-float16 __ovld vloada_half16(size_t offset, const __private half *p);
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * The floatn value given by data is converted to
- * a halfn value using the appropriate rounding
- * mode.
- * For n = 1, 2, 4, 8 and 16, the halfn value is
- * written to the address computed as (p + (offset
- * * n)). The address computed as (p + (offset *
- * n)) must be aligned to sizeof (halfn) bytes.
- * For n = 3, the half3 value is written to the
- * address computed as (p + (offset * 4)). The
- * address computed as (p + (offset * 4)) must be
- * aligned to sizeof (half) * 4 bytes.
- * vstorea_halfn uses the current rounding
- * mode. The default current rounding mode is
- * round to nearest even.
- */
-#if defined(__opencl_c_generic_address_space)
-void __ovld vstorea_half2(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, half *p);
-
-#ifdef cl_khr_fp64
-void __ovld vstorea_half2(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, half *p);
-#endif //cl_khr_fp64
-
-#else
-void __ovld vstorea_half2(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __private half *p);
-
-#ifdef cl_khr_fp64
-void __ovld vstorea_half2(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtn(double2 data,size_t offset, __private half *p);
-void __ovld vstorea_half3_rtn(double3 data,size_t offset, __private half *p);
-void __ovld vstorea_half4_rtn(double4 data,size_t offset, __private half *p);
-void __ovld vstorea_half8_rtn(double8 data,size_t offset, __private half *p);
-void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
-#endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
-
-// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
-
-/**
- * All work-items in a work-group executing the kernel
- * on a processor must execute this function before any
- * are allowed to continue execution beyond the barrier.
- * This function must be encountered by all work-items in
- * a work-group executing the kernel.
- * If barrier is inside a conditional statement, then all
- * work-items must enter the conditional if any work-item
- * enters the conditional statement and executes the
- * barrier.
- * If barrer is inside a loop, all work-items must execute
- * the barrier for each iteration of the loop before any are
- * allowed to continue execution beyond the barrier.
- * The barrier function also queues a memory fence
- * (reads and writes) to ensure correct ordering of
- * memory operations to local or global memory.
- * The flags argument specifies the memory address space
- * and can be set to a combination of the following literal
- * values.
- * CLK_LOCAL_MEM_FENCE - The barrier function
- * will either flush any variables stored in local memory
- * or queue a memory fence to ensure correct ordering of
- * memory operations to local memory.
- * CLK_GLOBAL_MEM_FENCE - The barrier function
- * will queue a memory fence to ensure correct ordering
- * of memory operations to global memory. This can be
- * useful when work-items, for example, write to buffer or
- * image objects and then want to read the updated data.
- */
-
-void __ovld __conv barrier(cl_mem_fence_flags flags);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
-void __ovld __conv work_group_barrier(cl_mem_fence_flags flags);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.9, v1.2 s6.12.9 - Explicit Memory Fence Functions
-
-/**
- * Orders loads and stores of a work-item
- * executing a kernel. This means that loads
- * and stores preceding the mem_fence will
- * be committed to memory before any loads
- * and stores following the mem_fence.
- * The flags argument specifies the memory
- * address space and can be set to a
- * combination of the following literal
- * values:
- * CLK_LOCAL_MEM_FENCE
- * CLK_GLOBAL_MEM_FENCE.
- */
-void __ovld mem_fence(cl_mem_fence_flags flags);
-
-/**
- * Read memory barrier that orders only
- * loads.
- * The flags argument specifies the memory
- * address space and can be set to a
- * combination of the following literal
- * values:
- * CLK_LOCAL_MEM_FENCE
- * CLK_GLOBAL_MEM_FENCE.
- */
-void __ovld read_mem_fence(cl_mem_fence_flags flags);
-
-/**
- * Write memory barrier that orders only
- * stores.
- * The flags argument specifies the memory
- * address space and can be set to a
- * combination of the following literal
- * values:
- * CLK_LOCAL_MEM_FENCE
- * CLK_GLOBAL_MEM_FENCE.
- */
-void __ovld write_mem_fence(cl_mem_fence_flags flags);
-
-// OpenCL v2.0 s6.13.9 - Address Space Qualifier Functions
-
-#if defined(__opencl_c_generic_address_space)
-cl_mem_fence_flags __ovld get_fence(const void *ptr);
-cl_mem_fence_flags __ovld get_fence(void *ptr);
-
-/**
- * Builtin functions to_global, to_local, and to_private need to be declared as Clang builtin functions
- * and checked in Sema since they should be declared as
- *   addr gentype* to_addr (gentype*);
- * where gentype is builtin type or user defined type.
- */
-
-#endif //defined(__opencl_c_generic_address_space)
-
-// OpenCL v1.1 s6.11.10, v1.2 s6.12.10, v2.0 s6.13.10 - Async Copies from Global to Local Memory, Local to Global Memory, and Prefetch
-
-/**
- * event_t async_work_group_copy (
- * __global gentype *dst,
- * const __local gentype *src,
- * size_t num_elements,
- * event_t event)
- * Perform an async copy of num_elements
- * gentype elements from src to dst. The async
- * copy is performed by all work-items in a workgroup
- * and this built-in function must therefore
- * be encountered by all work-items in a workgroup
- * executing the kernel with the same
- * argument values; otherwise the results are
- * undefined.
- * Returns an event object that can be used by
- * wait_group_events to wait for the async copy
- * to finish. The event argument can also be used
- * to associate the async_work_group_copy with
- * a previous async copy allowing an event to be
- * shared by multiple async copies; otherwise event
- * should be zero.
- * If event argument is non-zero, the event object
- * supplied in event argument will be returned.
- * This function does not perform any implicit
- * synchronization of source data such as using a
- * barrier before performing the copy.
- */
-event_t __ovld async_work_group_copy(__local char *dst, const __global char *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short *dst, const __global short *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int *dst, const __global int *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint *dst, const __global uint *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long *dst, const __global long *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float *dst, const __global float *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char *dst, const __local char *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short *dst, const __local short *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int *dst, const __local int *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint *dst, const __local uint *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long *dst, const __local long *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float *dst, const __local float *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, event_t event);
-#ifdef cl_khr_fp64
-event_t __ovld async_work_group_copy(__local double *dst, const __global double *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double *dst, const __local double *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, event_t event);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-event_t __ovld async_work_group_copy(__local half *dst, const __global half *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half *dst, const __local half *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, event_t event);
-#endif //cl_khr_fp16
-
-/**
- * Perform an async gather of num_elements
- * gentype elements from src to dst. The
- * src_stride is the stride in elements for each
- * gentype element read from src. The dst_stride
- * is the stride in elements for each gentype
- * element written to dst. The async gather is
- * performed by all work-items in a work-group.
- * This built-in function must therefore be
- * encountered by all work-items in a work-group
- * executing the kernel with the same argument
- * values; otherwise the results are undefined.
- * Returns an event object that can be used by
- * wait_group_events to wait for the async copy
- * to finish. The event argument can also be used
- * to associate the
- * async_work_group_strided_copy with a
- * previous async copy allowing an event to be
- * shared by multiple async copies; otherwise event
- * should be zero.
- * If event argument is non-zero, the event object
- * supplied in event argument will be returned.
- * This function does not perform any implicit
- * synchronization of source data such as using a
- * barrier before performing the copy.
- */
-event_t __ovld async_work_group_strided_copy(__local char *dst, const __global char *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short *dst, const __global short *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int *dst, const __global int *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint *dst, const __global uint *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long *dst, const __global long *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float *dst, const __global float *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char *dst, const __local char *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short *dst, const __local short *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int *dst, const __local int *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint *dst, const __local uint *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long *dst, const __local long *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float *dst, const __local float *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, size_t dst_stride, event_t event);
-#ifdef cl_khr_fp64
-event_t __ovld async_work_group_strided_copy(__local double *dst, const __global double *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double *dst, const __local double *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, size_t dst_stride, event_t event);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-event_t __ovld async_work_group_strided_copy(__local half *dst, const __global half *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half *dst, const __local half *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, size_t dst_stride, event_t event);
-#endif //cl_khr_fp16
-
-/**
- * Wait for events that identify the
- * async_work_group_copy operations to
- * complete. The event objects specified in
- * event_list will be released after the wait is
- * performed.
- * This function must be encountered by all workitems
- * in a work-group executing the kernel with
- * the same num_events and event objects specified
- * in event_list; otherwise the results are undefined.
- */
-void __ovld wait_group_events(int num_events, event_t *event_list);
-
-/**
- * Prefetch num_elements * sizeof(gentype)
- * bytes into the global cache. The prefetch
- * instruction is applied to a work-item in a workgroup
- * and does not affect the functional
- * behavior of the kernel.
- */
-void __ovld prefetch(const __global char *p, size_t num_elements);
-void __ovld prefetch(const __global uchar *p, size_t num_elements);
-void __ovld prefetch(const __global short *p, size_t num_elements);
-void __ovld prefetch(const __global ushort *p, size_t num_elements);
-void __ovld prefetch(const __global int *p, size_t num_elements);
-void __ovld prefetch(const __global uint *p, size_t num_elements);
-void __ovld prefetch(const __global long *p, size_t num_elements);
-void __ovld prefetch(const __global ulong *p, size_t num_elements);
-void __ovld prefetch(const __global float *p, size_t num_elements);
-void __ovld prefetch(const __global char2 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar2 *p, size_t num_elements);
-void __ovld prefetch(const __global short2 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort2 *p, size_t num_elements);
-void __ovld prefetch(const __global int2 *p, size_t num_elements);
-void __ovld prefetch(const __global uint2 *p, size_t num_elements);
-void __ovld prefetch(const __global long2 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong2 *p, size_t num_elements);
-void __ovld prefetch(const __global float2 *p, size_t num_elements);
-void __ovld prefetch(const __global char3 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar3 *p, size_t num_elements);
-void __ovld prefetch(const __global short3 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort3 *p, size_t num_elements);
-void __ovld prefetch(const __global int3 *p, size_t num_elements);
-void __ovld prefetch(const __global uint3 *p, size_t num_elements);
-void __ovld prefetch(const __global long3 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong3 *p, size_t num_elements);
-void __ovld prefetch(const __global float3 *p, size_t num_elements);
-void __ovld prefetch(const __global char4 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar4 *p, size_t num_elements);
-void __ovld prefetch(const __global short4 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort4 *p, size_t num_elements);
-void __ovld prefetch(const __global int4 *p, size_t num_elements);
-void __ovld prefetch(const __global uint4 *p, size_t num_elements);
-void __ovld prefetch(const __global long4 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong4 *p, size_t num_elements);
-void __ovld prefetch(const __global float4 *p, size_t num_elements);
-void __ovld prefetch(const __global char8 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar8 *p, size_t num_elements);
-void __ovld prefetch(const __global short8 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort8 *p, size_t num_elements);
-void __ovld prefetch(const __global int8 *p, size_t num_elements);
-void __ovld prefetch(const __global uint8 *p, size_t num_elements);
-void __ovld prefetch(const __global long8 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong8 *p, size_t num_elements);
-void __ovld prefetch(const __global float8 *p, size_t num_elements);
-void __ovld prefetch(const __global char16 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar16 *p, size_t num_elements);
-void __ovld prefetch(const __global short16 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort16 *p, size_t num_elements);
-void __ovld prefetch(const __global int16 *p, size_t num_elements);
-void __ovld prefetch(const __global uint16 *p, size_t num_elements);
-void __ovld prefetch(const __global long16 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong16 *p, size_t num_elements);
-void __ovld prefetch(const __global float16 *p, size_t num_elements);
-#ifdef cl_khr_fp64
-void __ovld prefetch(const __global double *p, size_t num_elements);
-void __ovld prefetch(const __global double2 *p, size_t num_elements);
-void __ovld prefetch(const __global double3 *p, size_t num_elements);
-void __ovld prefetch(const __global double4 *p, size_t num_elements);
-void __ovld prefetch(const __global double8 *p, size_t num_elements);
-void __ovld prefetch(const __global double16 *p, size_t num_elements);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-void __ovld prefetch(const __global half *p, size_t num_elements);
-void __ovld prefetch(const __global half2 *p, size_t num_elements);
-void __ovld prefetch(const __global half3 *p, size_t num_elements);
-void __ovld prefetch(const __global half4 *p, size_t num_elements);
-void __ovld prefetch(const __global half8 *p, size_t num_elements);
-void __ovld prefetch(const __global half16 *p, size_t num_elements);
-#endif // cl_khr_fp16
-
-// OpenCL v1.1 s6.11.1, v1.2 s6.12.11 - Atomic Functions
-
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
-#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
-#endif
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old + val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_add(volatile __global int *p, int val);
-unsigned int __ovld atomic_add(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_add(volatile __local int *p, int val);
-unsigned int __ovld atomic_add(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_add(volatile int *p, int val);
-unsigned int __ovld atomic_add(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_add(volatile __global int *p, int val);
-unsigned int __ovld atom_add(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_add(volatile __local int *p, int val);
-unsigned int __ovld atom_add(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_add(volatile __global long *p, long val);
-unsigned long __ovld atom_add(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_add(volatile __local long *p, long val);
-unsigned long __ovld atom_add(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old) stored at location pointed by p.
- * Compute (old - val) and store result at location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_sub(volatile __global int *p, int val);
-unsigned int __ovld atomic_sub(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_sub(volatile __local int *p, int val);
-unsigned int __ovld atomic_sub(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_sub(volatile int *p, int val);
-unsigned int __ovld atomic_sub(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_sub(volatile __global int *p, int val);
-unsigned int __ovld atom_sub(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_sub(volatile __local int *p, int val);
-unsigned int __ovld atom_sub(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_sub(volatile __global long *p, long val);
-unsigned long __ovld atom_sub(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_sub(volatile __local long *p, long val);
-unsigned long __ovld atom_sub(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Swaps the old value stored at location p
- * with new value given by val. Returns old
- * value.
- */
-int __ovld atomic_xchg(volatile __global int *p, int val);
-unsigned int __ovld atomic_xchg(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_xchg(volatile __local int *p, int val);
-unsigned int __ovld atomic_xchg(volatile __local unsigned int *p, unsigned int val);
-float __ovld atomic_xchg(volatile __global float *p, float val);
-float __ovld atomic_xchg(volatile __local float *p, float val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_xchg(volatile int *p, int val);
-unsigned int __ovld atomic_xchg(volatile unsigned int *p, unsigned int val);
-float __ovld atomic_xchg(volatile float *p, float val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_xchg(volatile __global int *p, int val);
-unsigned int __ovld atom_xchg(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_xchg(volatile __local int *p, int val);
-unsigned int __ovld atom_xchg(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_xchg(volatile __global long *p, long val);
-long __ovld atom_xchg(volatile __local long *p, long val);
-unsigned long __ovld atom_xchg(volatile __global unsigned long *p, unsigned long val);
-unsigned long __ovld atom_xchg(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old + 1) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_inc(volatile __global int *p);
-unsigned int __ovld atomic_inc(volatile __global unsigned int *p);
-int __ovld atomic_inc(volatile __local int *p);
-unsigned int __ovld atomic_inc(volatile __local unsigned int *p);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_inc(volatile int *p);
-unsigned int __ovld atomic_inc(volatile unsigned int *p);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_inc(volatile __global int *p);
-unsigned int __ovld atom_inc(volatile __global unsigned int *p);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_inc(volatile __local int *p);
-unsigned int __ovld atom_inc(volatile __local unsigned int *p);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_inc(volatile __global long *p);
-unsigned long __ovld atom_inc(volatile __global unsigned long *p);
-long __ovld atom_inc(volatile __local long *p);
-unsigned long __ovld atom_inc(volatile __local unsigned long *p);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old - 1) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_dec(volatile __global int *p);
-unsigned int __ovld atomic_dec(volatile __global unsigned int *p);
-int __ovld atomic_dec(volatile __local int *p);
-unsigned int __ovld atomic_dec(volatile __local unsigned int *p);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_dec(volatile int *p);
-unsigned int __ovld atomic_dec(volatile unsigned int *p);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_dec(volatile __global int *p);
-unsigned int __ovld atom_dec(volatile __global unsigned int *p);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_dec(volatile __local int *p);
-unsigned int __ovld atom_dec(volatile __local unsigned int *p);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_dec(volatile __global long *p);
-unsigned long __ovld atom_dec(volatile __global unsigned long *p);
-long __ovld atom_dec(volatile __local long *p);
-unsigned long __ovld atom_dec(volatile __local unsigned long *p);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old == cmp) ? val : old and store result at
- * location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_cmpxchg(volatile __global int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
-int __ovld atomic_cmpxchg(volatile __local int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_cmpxchg(volatile int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile unsigned int *p, unsigned int cmp, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_cmpxchg(volatile __global int *p, int cmp, int val);
-unsigned int __ovld atom_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_cmpxchg(volatile __local int *p, int cmp, int val);
-unsigned int __ovld atom_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_cmpxchg(volatile __global long *p, long cmp, long val);
-unsigned long __ovld atom_cmpxchg(volatile __global unsigned long *p, unsigned long cmp, unsigned long val);
-long __ovld atom_cmpxchg(volatile __local long *p, long cmp, long val);
-unsigned long __ovld atom_cmpxchg(volatile __local unsigned long *p, unsigned long cmp, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * min(old, val) and store minimum value at
- * location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_min(volatile __global int *p, int val);
-unsigned int __ovld atomic_min(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_min(volatile __local int *p, int val);
-unsigned int __ovld atomic_min(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_min(volatile int *p, int val);
-unsigned int __ovld atomic_min(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_min(volatile __global int *p, int val);
-unsigned int __ovld atom_min(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_min(volatile __local int *p, int val);
-unsigned int __ovld atom_min(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_min(volatile __global long *p, long val);
-unsigned long __ovld atom_min(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_min(volatile __local long *p, long val);
-unsigned long __ovld atom_min(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * max(old, val) and store maximum value at
- * location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_max(volatile __global int *p, int val);
-unsigned int __ovld atomic_max(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_max(volatile __local int *p, int val);
-unsigned int __ovld atomic_max(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_max(volatile int *p, int val);
-unsigned int __ovld atomic_max(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_max(volatile __global int *p, int val);
-unsigned int __ovld atom_max(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_max(volatile __local int *p, int val);
-unsigned int __ovld atom_max(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_max(volatile __global long *p, long val);
-unsigned long __ovld atom_max(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_max(volatile __local long *p, long val);
-unsigned long __ovld atom_max(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old & val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_and(volatile __global int *p, int val);
-unsigned int __ovld atomic_and(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_and(volatile __local int *p, int val);
-unsigned int __ovld atomic_and(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_and(volatile int *p, int val);
-unsigned int __ovld atomic_and(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_and(volatile __global int *p, int val);
-unsigned int __ovld atom_and(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_and(volatile __local int *p, int val);
-unsigned int __ovld atom_and(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_and(volatile __global long *p, long val);
-unsigned long __ovld atom_and(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_and(volatile __local long *p, long val);
-unsigned long __ovld atom_and(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old | val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_or(volatile __global int *p, int val);
-unsigned int __ovld atomic_or(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_or(volatile __local int *p, int val);
-unsigned int __ovld atomic_or(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_or(volatile int *p, int val);
-unsigned int __ovld atomic_or(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_or(volatile __global int *p, int val);
-unsigned int __ovld atom_or(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_or(volatile __local int *p, int val);
-unsigned int __ovld atom_or(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_or(volatile __global long *p, long val);
-unsigned long __ovld atom_or(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_or(volatile __local long *p, long val);
-unsigned long __ovld atom_or(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old ^ val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_xor(volatile __global int *p, int val);
-unsigned int __ovld atomic_xor(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_xor(volatile __local int *p, int val);
-unsigned int __ovld atomic_xor(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_xor(volatile int *p, int val);
-unsigned int __ovld atomic_xor(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_xor(volatile __global int *p, int val);
-unsigned int __ovld atom_xor(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_xor(volatile __local int *p, int val);
-unsigned int __ovld atom_xor(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_xor(volatile __global long *p, long val);
-unsigned long __ovld atom_xor(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_xor(volatile __local long *p, long val);
-unsigned long __ovld atom_xor(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : disable
-#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : disable
-#endif
-
-// OpenCL v2.0 s6.13.11 - Atomics Functions
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// double atomics support requires extensions cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
-#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
-#endif
-
-// atomic_init()
-#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_init(volatile atomic_int *object, int value);
-void __ovld atomic_init(volatile atomic_uint *object, uint value);
-void __ovld atomic_init(volatile atomic_float *object, float value);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-void __ovld atomic_init(volatile atomic_long *object, long value);
-void __ovld atomic_init(volatile atomic_ulong *object, ulong value);
-#ifdef cl_khr_fp64
-void __ovld atomic_init(volatile atomic_double *object, double value);
-#endif //cl_khr_fp64
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_init(volatile __global atomic_int *object, int value);
-void __ovld atomic_init(volatile __local atomic_int *object, int value);
-void __ovld atomic_init(volatile __global atomic_uint *object, uint value);
-void __ovld atomic_init(volatile __local atomic_uint *object, uint value);
-void __ovld atomic_init(volatile __global atomic_float *object, float value);
-void __ovld atomic_init(volatile __local atomic_float *object, float value);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-void __ovld atomic_init(volatile __global atomic_long *object, long value);
-void __ovld atomic_init(volatile __local atomic_long *object, long value);
-void __ovld atomic_init(volatile __global atomic_ulong *object, ulong value);
-void __ovld atomic_init(volatile __local atomic_ulong *object, ulong value);
-#ifdef cl_khr_fp64
-void __ovld atomic_init(volatile __global atomic_double *object, double value);
-void __ovld atomic_init(volatile __local atomic_double *object, double value);
-#endif //cl_khr_fp64
-#endif
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// atomic_work_item_fence()
-void __ovld atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope);
-
-// atomic_fetch()
-// OpenCL v2.0 s6.13.11.7.5:
-// add/sub: atomic type argument can be uintptr_t/intptr_t, value type argument can be ptrdiff_t.
-
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_fetch_add(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_add(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_sub(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_sub(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_or(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_or(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_xor(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_xor(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_and(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_and(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_min(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_min(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_max(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_max(volatile atomic_uint *object, uint operand);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_add(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_sub(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_sub(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_or(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_or(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_xor(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_xor(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_and(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_and(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_min(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_max(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *object, ptrdiff_t operand);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_fetch_add(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_add(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_add(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_add(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_sub(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_sub(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_sub(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_sub(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_or(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_or(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_or(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_or(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_xor(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_xor(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_xor(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_xor(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_and(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_and(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_and(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_and(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_min(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_min(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_min(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_min(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_max(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_max(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_max(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_max(volatile __local atomic_uint *object, uint operand);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_add(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_add(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_add(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_add(volatile __global atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_add(volatile __local atomic_uintptr_t *object, ptrdiff_t operand);
-long __ovld atomic_fetch_sub(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_sub(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_sub(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_sub(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_sub(volatile __global atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_sub(volatile __local atomic_uintptr_t *object, ptrdiff_t operand);
-long __ovld atomic_fetch_or(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_or(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_or(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_or(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_or(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_or(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_or(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_or(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_xor(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_xor(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_xor(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_xor(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_xor(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_xor(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_xor(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_xor(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_and(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_and(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_and(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_and(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_and(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_and(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_and(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_and(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_min(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_min(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_min(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_min(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_min(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_min(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_min(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_min(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_max(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_max(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_max(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_max(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_add(volatile __global atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_sub(volatile __local atomic_uintptr_t *object, ptrdiff_t operand);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// The functionality added by cl_ext_float_atomics extension
-#if defined(cl_ext_float_atomics)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_load_store)
-void __ovld atomic_store(volatile __global atomic_half *object, half operand);
-void __ovld atomic_store_explicit(volatile __global atomic_half *object,
-                                  half operand, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_half *object,
-                                  half operand, memory_order order,
-                                  memory_scope scope);
-half __ovld atomic_load(volatile __global atomic_half *object);
-half __ovld atomic_load_explicit(volatile __global atomic_half *object,
-                                 memory_order order);
-half __ovld atomic_load_explicit(volatile __global atomic_half *object,
-                                 memory_order order, memory_scope scope);
-half __ovld atomic_exchange(volatile __global atomic_half *object,
-                            half operand);
-half __ovld atomic_exchange_explicit(volatile __global atomic_half *object,
-                                     half operand, memory_order order);
-half __ovld atomic_exchange_explicit(volatile __global atomic_half *object,
-                                     half operand, memory_order order,
-                                     memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store)
-
-#if defined(__opencl_c_ext_fp16_local_atomic_load_store)
-void __ovld atomic_store(volatile __local atomic_half *object, half operand);
-void __ovld atomic_store_explicit(volatile __local atomic_half *object,
-                                  half operand, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_half *object,
-                                  half operand, memory_order order,
-                                  memory_scope scope);
-half __ovld atomic_load(volatile __local atomic_half *object);
-half __ovld atomic_load_explicit(volatile __local atomic_half *object,
-                                 memory_order order);
-half __ovld atomic_load_explicit(volatile __local atomic_half *object,
-                                 memory_order order, memory_scope scope);
-half __ovld atomic_exchange(volatile __local atomic_half *object, half operand);
-half __ovld atomic_exchange_explicit(volatile __local atomic_half *object,
-                                     half operand, memory_order order);
-half __ovld atomic_exchange_explicit(volatile __local atomic_half *object,
-                                     half operand, memory_order order,
-                                     memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_local_atomic_load_store)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_load_store) &&                   \
-    defined(__opencl_c_ext_fp16_local_atomic_load_store)
-void __ovld atomic_store(volatile atomic_half *object, half operand);
-void __ovld atomic_store_explicit(volatile atomic_half *object, half operand,
-                                  memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_half *object, half operand,
-                                  memory_order order, memory_scope scope);
-half __ovld atomic_load(volatile atomic_half *object);
-half __ovld atomic_load_explicit(volatile atomic_half *object,
-                                 memory_order order);
-half __ovld atomic_load_explicit(volatile atomic_half *object,
-                                 memory_order order, memory_scope scope);
-half __ovld atomic_exchange(volatile atomic_half *object, half operand);
-half __ovld atomic_exchange_explicit(volatile atomic_half *object, half operand,
-                                     memory_order order);
-half __ovld atomic_exchange_explicit(volatile atomic_half *object, half operand,
-                                     memory_order order, memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store) &&
-       // defined(__opencl_c_ext_fp16_local_atomic_load_store)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_min_max)
-half __ovld atomic_fetch_min(volatile __global atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_max(volatile __global atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp16_local_atomic_min_max)
-half __ovld atomic_fetch_min(volatile __local atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_max(volatile __local atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_min_max) &&                      \
-    defined(__opencl_c_ext_fp16_local_atomic_min_max)
-half __ovld atomic_fetch_min(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_max(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_min_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_max_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_min_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_max_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max) &&                \
-    defined(__opencl_c_ext_fp16_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp32_global_atomic_min_max)
-float __ovld atomic_fetch_min(volatile __global atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_max(volatile __global atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp32_local_atomic_min_max)
-float __ovld atomic_fetch_min(volatile __local atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_max(volatile __local atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp32_global_atomic_min_max) &&                      \
-    defined(__opencl_c_ext_fp32_local_atomic_min_max)
-float __ovld atomic_fetch_min(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_max(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_min_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_max_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_min_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_max_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max) &&                \
-    defined(__opencl_c_ext_fp32_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp64_global_atomic_min_max)
-double __ovld atomic_fetch_min(volatile __global atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_max(volatile __global atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp64_local_atomic_min_max)
-double __ovld atomic_fetch_min(volatile __local atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_max(volatile __local atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp64_global_atomic_min_max) &&                      \
-    defined(__opencl_c_ext_fp64_local_atomic_min_max)
-double __ovld atomic_fetch_min(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_max(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_min_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_max_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_min_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_max_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max) &&                \
-    defined(__opencl_c_ext_fp64_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_add)
-half __ovld atomic_fetch_add(volatile __global atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_sub(volatile __global atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_add)
-
-#if defined(__opencl_c_ext_fp16_local_atomic_add)
-half __ovld atomic_fetch_add(volatile __local atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_sub(volatile __local atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_local_atomic_add)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_add) &&                          \
-    defined(__opencl_c_ext_fp16_local_atomic_add)
-half __ovld atomic_fetch_add(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_sub(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_add_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_sub_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_add_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_sub_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_add) &&                    \
-    defined(__opencl_c_ext_fp16_local_atomic_add)
-
-#if defined(__opencl_c_ext_fp32_global_atomic_add)
-float __ovld atomic_fetch_add(volatile __global atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_sub(volatile __global atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_global_atomic_add)
-
-#if defined(__opencl_c_ext_fp32_local_atomic_add)
-float __ovld atomic_fetch_add(volatile __local atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_sub(volatile __local atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_local_atomic_add)
-
-#if defined(__opencl_c_ext_fp32_global_atomic_add) &&                          \
-    defined(__opencl_c_ext_fp32_local_atomic_add)
-float __ovld atomic_fetch_add(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_sub(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_add_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_sub_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_add_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_sub_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_global_atomic_add) &&                    \
-    defined(__opencl_c_ext_fp32_local_atomic_add)
-
-#if defined(__opencl_c_ext_fp64_global_atomic_add)
-double __ovld atomic_fetch_add(volatile __global atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_sub(volatile __global atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_global_atomic_add)
-
-#if defined(__opencl_c_ext_fp64_local_atomic_add)
-double __ovld atomic_fetch_add(volatile __local atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_sub(volatile __local atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_local_atomic_add)
-
-#if defined(__opencl_c_ext_fp64_global_atomic_add) &&                          \
-    defined(__opencl_c_ext_fp64_local_atomic_add)
-double __ovld atomic_fetch_add(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_sub(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_add_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_sub_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_add_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_sub_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_global_atomic_add) &&                    \
-    defined(__opencl_c_ext_fp64_local_atomic_add)
-
-#endif // cl_ext_float_atomics
-
-// atomic_store()
-
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_store(volatile atomic_int *object, int desired);
-void __ovld atomic_store(volatile atomic_uint *object, uint desired);
-void __ovld atomic_store(volatile atomic_float *object, float desired);
-
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store(volatile atomic_double *object, double desired);
-#endif //cl_khr_fp64
-void __ovld atomic_store(volatile atomic_long *object, long desired);
-void __ovld atomic_store(volatile atomic_ulong *object, ulong desired);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_store(volatile __global atomic_int *object, int desired);
-void __ovld atomic_store(volatile __local atomic_int *object, int desired);
-void __ovld atomic_store(volatile __global atomic_uint *object, uint desired);
-void __ovld atomic_store(volatile __local atomic_uint *object, uint desired);
-void __ovld atomic_store(volatile __global atomic_float *object, float desired);
-void __ovld atomic_store(volatile __local atomic_float *object, float desired);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store(volatile __global atomic_double *object, double desired);
-void __ovld atomic_store(volatile __local atomic_double *object, double desired);
-#endif //cl_khr_fp64
-void __ovld atomic_store(volatile __global atomic_long *object, long desired);
-void __ovld atomic_store(volatile __local atomic_long *object, long desired);
-void __ovld atomic_store(volatile __global atomic_ulong *object, ulong desired);
-void __ovld atomic_store(volatile __local atomic_ulong *object, ulong desired);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order);
-#endif //cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_store_explicit(volatile __global atomic_int *object, int desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_int *object, int desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_uint *object, uint desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_uint *object, uint desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_float *object, float desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_float *object, float desired, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile __global atomic_double *object, double desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_double *object, double desired, memory_order order);
-#endif
-void __ovld atomic_store_explicit(volatile __global atomic_long *object, long desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_long *object, long desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_store_explicit(volatile __global atomic_int *object, int desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_int *object, int desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __global atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __global atomic_float *object, float desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_float *object, float desired, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile __global atomic_double *object, double desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_double *object, double desired, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-void __ovld atomic_store_explicit(volatile __global atomic_long *object, long desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_long *object, long desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// atomic_load()
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_load(volatile atomic_int *object);
-uint __ovld atomic_load(volatile atomic_uint *object);
-float __ovld atomic_load(volatile atomic_float *object);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load(volatile atomic_double *object);
-#endif //cl_khr_fp64
-long __ovld atomic_load(volatile atomic_long *object);
-ulong __ovld atomic_load(volatile atomic_ulong *object);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_load(volatile __global atomic_int *object);
-int __ovld atomic_load(volatile __local atomic_int *object);
-uint __ovld atomic_load(volatile __global atomic_uint *object);
-uint __ovld atomic_load(volatile __local atomic_uint *object);
-float __ovld atomic_load(volatile __global atomic_float *object);
-float __ovld atomic_load(volatile __local atomic_float *object);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load(volatile __global atomic_double *object);
-double __ovld atomic_load(volatile __local atomic_double *object);
-#endif //cl_khr_fp64
-long __ovld atomic_load(volatile __global atomic_long *object);
-long __ovld atomic_load(volatile __local atomic_long *object);
-ulong __ovld atomic_load(volatile __global atomic_ulong *object);
-ulong __ovld atomic_load(volatile __local atomic_ulong *object);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order);
-uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order);
-float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order);
-#endif //cl_khr_fp64
-long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order);
-ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_load_explicit(volatile __global atomic_int *object, memory_order order);
-int __ovld atomic_load_explicit(volatile __local atomic_int *object, memory_order order);
-uint __ovld atomic_load_explicit(volatile __global atomic_uint *object, memory_order order);
-uint __ovld atomic_load_explicit(volatile __local atomic_uint *object, memory_order order);
-float __ovld atomic_load_explicit(volatile __global atomic_float *object, memory_order order);
-float __ovld atomic_load_explicit(volatile __local atomic_float *object, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile __global atomic_double *object, memory_order order);
-double __ovld atomic_load_explicit(volatile __local atomic_double *object, memory_order order);
-#endif //cl_khr_fp64
-long __ovld atomic_load_explicit(volatile __global atomic_long *object, memory_order order);
-long __ovld atomic_load_explicit(volatile __local atomic_long *object, memory_order order);
-ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *object, memory_order order);
-ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *object, memory_order order);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order, memory_scope scope);
-uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order, memory_scope scope);
-float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order, memory_scope scope);
-ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order, memory_scope scope);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_load_explicit(volatile __global atomic_int *object, memory_order order, memory_scope scope);
-int __ovld atomic_load_explicit(volatile __local atomic_int *object, memory_order order, memory_scope scope);
-uint __ovld atomic_load_explicit(volatile __global atomic_uint *object, memory_order order, memory_scope scope);
-uint __ovld atomic_load_explicit(volatile __local atomic_uint *object, memory_order order, memory_scope scope);
-float __ovld atomic_load_explicit(volatile __global atomic_float *object, memory_order order, memory_scope scope);
-float __ovld atomic_load_explicit(volatile __local atomic_float *object, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile __global atomic_double *object, memory_order order, memory_scope scope);
-double __ovld atomic_load_explicit(volatile __local atomic_double *object, memory_order order, memory_scope scope);
-#endif
-long __ovld atomic_load_explicit(volatile __global atomic_long *object, memory_order order, memory_scope scope);
-long __ovld atomic_load_explicit(volatile __local atomic_long *object, memory_order order, memory_scope scope);
-ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *object, memory_order order, memory_scope scope);
-ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *object, memory_order order, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// atomic_exchange()
-
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_exchange(volatile atomic_int *object, int desired);
-uint __ovld atomic_exchange(volatile atomic_uint *object, uint desired);
-float __ovld atomic_exchange(volatile atomic_float *object, float desired);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange(volatile atomic_double *object, double desired);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange(volatile atomic_long *object, long desired);
-ulong __ovld atomic_exchange(volatile atomic_ulong *object, ulong desired);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_exchange(volatile __global atomic_int *object, int desired);
-int __ovld atomic_exchange(volatile __local atomic_int *object, int desired);
-uint __ovld atomic_exchange(volatile __global atomic_uint *object, uint desired);
-uint __ovld atomic_exchange(volatile __local atomic_uint *object, uint desired);
-float __ovld atomic_exchange(volatile __global atomic_float *object, float desired);
-float __ovld atomic_exchange(volatile __local atomic_float *object, float desired);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange(volatile __global atomic_double *object, double desired);
-double __ovld atomic_exchange(volatile __local atomic_double *object, double desired);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange(volatile __global atomic_long *object, long desired);
-long __ovld atomic_exchange(volatile __local atomic_long *object, long desired);
-ulong __ovld atomic_exchange(volatile __global atomic_ulong *object, ulong desired);
-ulong __ovld atomic_exchange(volatile __local atomic_ulong *object, ulong desired);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order);
-uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order);
-float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order);
-ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_exchange_explicit(volatile __global atomic_int *object, int desired, memory_order order);
-int __ovld atomic_exchange_explicit(volatile __local atomic_int *object, int desired, memory_order order);
-uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *object, uint desired, memory_order order);
-uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *object, uint desired, memory_order order);
-float __ovld atomic_exchange_explicit(volatile __global atomic_float *object, float desired, memory_order order);
-float __ovld atomic_exchange_explicit(volatile __local atomic_float *object, float desired, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile __global atomic_double *object, double desired, memory_order order);
-double __ovld atomic_exchange_explicit(volatile __local atomic_double *object, double desired, memory_order order);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile __global atomic_long *object, long desired, memory_order order);
-long __ovld atomic_exchange_explicit(volatile __local atomic_long *object, long desired, memory_order order);
-ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order);
-ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)wi
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
-uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
-ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_exchange_explicit(volatile __global atomic_int *object, int desired, memory_order order, memory_scope scope);
-int __ovld atomic_exchange_explicit(volatile __local atomic_int *object, int desired, memory_order order, memory_scope scope);
-uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-float __ovld atomic_exchange_explicit(volatile __global atomic_float *object, float desired, memory_order order, memory_scope scope);
-float __ovld atomic_exchange_explicit(volatile __local atomic_float *object, float desired, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile __global atomic_double *object, double desired, memory_order order, memory_scope scope);
-double __ovld atomic_exchange_explicit(volatile __local atomic_double *object, double desired, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile __global atomic_long *object, long desired, memory_order order, memory_scope scope);
-long __ovld atomic_exchange_explicit(volatile __local atomic_long *object, long desired, memory_order order, memory_scope scope);
-ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// atomic_compare_exchange_strong() and atomic_compare_exchange_weak()
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_compare_exchange_strong(volatile atomic_int *object, int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *object, uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_int *object, int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *object, uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_float *object, float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_float *object, float *expected, float desired);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile atomic_double *object, double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_double *object, double *expected, double desired);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile atomic_long *object, long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_long *object, long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *object, ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *object, ulong *expected, ulong desired);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *object, __private float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *object, __private float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *object, __private float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *object, __private float *expected, float desired);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *object, __private double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *object, __private double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *object, __private double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *object, __private double *expected, double desired);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *object, __private ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *object, __private ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *object, __private ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *object, __private ulong *expected, ulong desired);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// atomic_flag_test_and_set() and atomic_flag_clear()
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_flag_test_and_set(volatile atomic_flag *object);
-void __ovld atomic_flag_clear(volatile atomic_flag *object);
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_flag_test_and_set(volatile __global atomic_flag *object);
-bool __ovld atomic_flag_test_and_set(volatile __local atomic_flag *object);
-void __ovld atomic_flag_clear(volatile __global atomic_flag *object);
-void __ovld atomic_flag_clear(volatile __local atomic_flag *object);
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order);
-void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order);
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *object, memory_order order);
-bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *object, memory_order order);
-void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *object, memory_order order);
-void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *object, memory_order order);
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
-void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *object, memory_order order, memory_scope scope);
-bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *object, memory_order order, memory_scope scope);
-void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *object, memory_order order, memory_scope scope);
-void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *object, memory_order order, memory_scope scope);
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions
-
-/**
- * The shuffle and shuffle2 built-in functions construct
- * a permutation of elements from one or two input
- * vectors respectively that are of the same type,
- * returning a vector with the same element type as the
- * input and length that is the same as the shuffle mask.
- * The size of each element in the mask must match the
- * size of each element in the result. For shuffle, only
- * the ilogb(2m-1) least significant bits of each mask
- * element are considered. For shuffle2, only the
- * ilogb(2m-1)+1 least significant bits of each mask
- * element are considered. Other bits in the mask shall
- * be ignored.
- * The elements of the input vectors are numbered from
- * left to right across one or both of the vectors. For this
- * purpose, the number of elements in a vector is given
- * by vec_step(gentypem). The shuffle mask operand
- * specifies, for each element of the result vector, which
- * element of the one or two input vectors the result
- * element gets.
- * Examples:
- * uint4 mask = (uint4)(3, 2,
- * 1, 0);
- * float4 a;
- * float4 r = shuffle(a, mask);
- * // r.s0123 = a.wzyx
- * uint8 mask = (uint8)(0, 1, 2, 3,
- * 4, 5, 6, 7);
- * float4 a, b;
- * float8 r = shuffle2(a, b, mask);
- * // r.s0123 = a.xyzw
- * // r.s4567 = b.xyzw
- * uint4 mask;
- * float8 a;
- * float4 b;
- * b = shuffle(a, mask);
- * Examples that are not valid are:
- * uint8 mask;
- * short16 a;
- * short8 b;
- * b = shuffle(a, mask); <- not valid
- */
-char2 __ovld __cnfn shuffle(char2 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char4 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char8 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char16 x, uchar2 mask);
-
-uchar2 __ovld __cnfn shuffle(uchar2 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar4 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar8 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar16 x, uchar2 mask);
-
-short2 __ovld __cnfn shuffle(short2 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short4 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short8 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short16 x, ushort2 mask);
-
-ushort2 __ovld __cnfn shuffle(ushort2 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort4 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort8 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort16 x, ushort2 mask);
-
-int2 __ovld __cnfn shuffle(int2 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int4 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int8 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int16 x, uint2 mask);
-
-uint2 __ovld __cnfn shuffle(uint2 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint4 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint8 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint16 x, uint2 mask);
-
-long2 __ovld __cnfn shuffle(long2 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long4 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long8 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long16 x, ulong2 mask);
-
-ulong2 __ovld __cnfn shuffle(ulong2 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong4 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong8 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong16 x, ulong2 mask);
-
-float2 __ovld __cnfn shuffle(float2 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float4 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float8 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float16 x, uint2 mask);
-
-char4 __ovld __cnfn shuffle(char2 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char4 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char8 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char16 x, uchar4 mask);
-
-uchar4 __ovld __cnfn shuffle(uchar2 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar4 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar8 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar16 x, uchar4 mask);
-
-short4 __ovld __cnfn shuffle(short2 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short4 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short8 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short16 x, ushort4 mask);
-
-ushort4 __ovld __cnfn shuffle(ushort2 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort4 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort8 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort16 x, ushort4 mask);
-
-int4 __ovld __cnfn shuffle(int2 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int4 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int8 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int16 x, uint4 mask);
-
-uint4 __ovld __cnfn shuffle(uint2 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint4 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint8 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint16 x, uint4 mask);
-
-long4 __ovld __cnfn shuffle(long2 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long4 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long8 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long16 x, ulong4 mask);
-
-ulong4 __ovld __cnfn shuffle(ulong2 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong4 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong8 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong16 x, ulong4 mask);
-
-float4 __ovld __cnfn shuffle(float2 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float4 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float8 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float16 x, uint4 mask);
-
-char8 __ovld __cnfn shuffle(char2 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char4 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char8 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char16 x, uchar8 mask);
-
-uchar8 __ovld __cnfn shuffle(uchar2 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar4 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar8 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar16 x, uchar8 mask);
-
-short8 __ovld __cnfn shuffle(short2 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short4 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short8 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short16 x, ushort8 mask);
-
-ushort8 __ovld __cnfn shuffle(ushort2 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort4 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort8 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort16 x, ushort8 mask);
-
-int8 __ovld __cnfn shuffle(int2 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int4 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int8 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int16 x, uint8 mask);
-
-uint8 __ovld __cnfn shuffle(uint2 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint4 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint8 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint16 x, uint8 mask);
-
-long8 __ovld __cnfn shuffle(long2 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long4 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long8 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long16 x, ulong8 mask);
-
-ulong8 __ovld __cnfn shuffle(ulong2 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong4 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong8 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong16 x, ulong8 mask);
-
-float8 __ovld __cnfn shuffle(float2 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float4 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float8 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float16 x, uint8 mask);
-
-char16 __ovld __cnfn shuffle(char2 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char4 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char8 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char16 x, uchar16 mask);
-
-uchar16 __ovld __cnfn shuffle(uchar2 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar4 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar8 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar16 x, uchar16 mask);
-
-short16 __ovld __cnfn shuffle(short2 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short4 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short8 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short16 x, ushort16 mask);
-
-ushort16 __ovld __cnfn shuffle(ushort2 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort4 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort8 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort16 x, ushort16 mask);
-
-int16 __ovld __cnfn shuffle(int2 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int4 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int8 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int16 x, uint16 mask);
-
-uint16 __ovld __cnfn shuffle(uint2 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint4 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint8 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint16 x, uint16 mask);
-
-long16 __ovld __cnfn shuffle(long2 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long4 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long8 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long16 x, ulong16 mask);
-
-ulong16 __ovld __cnfn shuffle(ulong2 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong4 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong8 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong16 x, ulong16 mask);
-
-float16 __ovld __cnfn shuffle(float2 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float4 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float8 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float16 x, uint16 mask);
-
-#ifdef cl_khr_fp64
-double2 __ovld __cnfn shuffle(double2 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double4 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double8 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double16 x, ulong2 mask);
-
-double4 __ovld __cnfn shuffle(double2 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double4 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double8 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double16 x, ulong4 mask);
-
-double8 __ovld __cnfn shuffle(double2 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double4 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double8 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double16 x, ulong8 mask);
-
-double16 __ovld __cnfn shuffle(double2 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double4 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double8 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double16 x, ulong16 mask);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half2 __ovld __cnfn shuffle(half2 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half4 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half8 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half16 x, ushort2 mask);
-
-half4 __ovld __cnfn shuffle(half2 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half4 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half8 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half16 x, ushort4 mask);
-
-half8 __ovld __cnfn shuffle(half2 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half4 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half8 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half16 x, ushort8 mask);
-
-half16 __ovld __cnfn shuffle(half2 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half4 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half8 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half16 x, ushort16 mask);
-#endif //cl_khr_fp16
-
-char2 __ovld __cnfn shuffle2(char2 x, char2 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char4 x, char4 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char8 x, char8 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char16 x, char16 y, uchar2 mask);
-
-uchar2 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar2 mask);
-
-short2 __ovld __cnfn shuffle2(short2 x, short2 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short4 x, short4 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short8 x, short8 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short16 x, short16 y, ushort2 mask);
-
-ushort2 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort2 mask);
-
-int2 __ovld __cnfn shuffle2(int2 x, int2 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int4 x, int4 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int8 x, int8 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int16 x, int16 y, uint2 mask);
-
-uint2 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint2 mask);
-
-long2 __ovld __cnfn shuffle2(long2 x, long2 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long4 x, long4 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long8 x, long8 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long16 x, long16 y, ulong2 mask);
-
-ulong2 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong2 mask);
-
-float2 __ovld __cnfn shuffle2(float2 x, float2 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float4 x, float4 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float8 x, float8 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float16 x, float16 y, uint2 mask);
-
-char4 __ovld __cnfn shuffle2(char2 x, char2 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char4 x, char4 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char8 x, char8 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char16 x, char16 y, uchar4 mask);
-
-uchar4 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar4 mask);
-
-short4 __ovld __cnfn shuffle2(short2 x, short2 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short4 x, short4 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short8 x, short8 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short16 x, short16 y, ushort4 mask);
-
-ushort4 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort4 mask);
-
-int4 __ovld __cnfn shuffle2(int2 x, int2 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int4 x, int4 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int8 x, int8 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int16 x, int16 y, uint4 mask);
-
-uint4 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint4 mask);
-
-long4 __ovld __cnfn shuffle2(long2 x, long2 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long4 x, long4 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long8 x, long8 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long16 x, long16 y, ulong4 mask);
-
-ulong4 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong4 mask);
-
-float4 __ovld __cnfn shuffle2(float2 x, float2 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float4 x, float4 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float8 x, float8 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float16 x, float16 y, uint4 mask);
-
-char8 __ovld __cnfn shuffle2(char2 x, char2 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char4 x, char4 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char8 x, char8 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char16 x, char16 y, uchar8 mask);
-
-uchar8 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar8 mask);
-
-short8 __ovld __cnfn shuffle2(short2 x, short2 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short4 x, short4 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short8 x, short8 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short16 x, short16 y, ushort8 mask);
-
-ushort8 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort8 mask);
-
-int8 __ovld __cnfn shuffle2(int2 x, int2 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int4 x, int4 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int8 x, int8 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int16 x, int16 y, uint8 mask);
-
-uint8 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint8 mask);
-
-long8 __ovld __cnfn shuffle2(long2 x, long2 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long4 x, long4 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long8 x, long8 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long16 x, long16 y, ulong8 mask);
-
-ulong8 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong8 mask);
-
-float8 __ovld __cnfn shuffle2(float2 x, float2 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float4 x, float4 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float8 x, float8 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float16 x, float16 y, uint8 mask);
-
-char16 __ovld __cnfn shuffle2(char2 x, char2 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char4 x, char4 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char8 x, char8 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char16 x, char16 y, uchar16 mask);
-
-uchar16 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar16 mask);
-
-short16 __ovld __cnfn shuffle2(short2 x, short2 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short4 x, short4 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short8 x, short8 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short16 x, short16 y, ushort16 mask);
-
-ushort16 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort16 mask);
-
-int16 __ovld __cnfn shuffle2(int2 x, int2 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int4 x, int4 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int8 x, int8 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int16 x, int16 y, uint16 mask);
-
-uint16 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint16 mask);
-
-long16 __ovld __cnfn shuffle2(long2 x, long2 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long4 x, long4 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long8 x, long8 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long16 x, long16 y, ulong16 mask);
-
-ulong16 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong16 mask);
-
-float16 __ovld __cnfn shuffle2(float2 x, float2 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float4 x, float4 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float8 x, float8 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float16 x, float16 y, uint16 mask);
-
-#ifdef cl_khr_fp64
-double2 __ovld __cnfn shuffle2(double2 x, double2 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double4 x, double4 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double8 x, double8 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double16 x, double16 y, ulong2 mask);
-
-double4 __ovld __cnfn shuffle2(double2 x, double2 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double4 x, double4 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double8 x, double8 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double16 x, double16 y, ulong4 mask);
-
-double8 __ovld __cnfn shuffle2(double2 x, double2 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double4 x, double4 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double8 x, double8 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double16 x, double16 y, ulong8 mask);
-
-double16 __ovld __cnfn shuffle2(double2 x, double2 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double4 x, double4 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double8 x, double8 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double16 x, double16 y, ulong16 mask);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half2 __ovld __cnfn shuffle2(half2 x, half2 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half4 x, half4 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half8 x, half8 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half16 x, half16 y, ushort2 mask);
-
-half4 __ovld __cnfn shuffle2(half2 x, half2 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half4 x, half4 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half8 x, half8 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half16 x, half16 y, ushort4 mask);
-
-half8 __ovld __cnfn shuffle2(half2 x, half2 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half4 x, half4 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half8 x, half8 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half16 x, half16 y, ushort8 mask);
-
-half16 __ovld __cnfn shuffle2(half2 x, half2 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half4 x, half4 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half8 x, half8 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask);
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
-
-#ifdef cl_khr_gl_msaa_sharing
-#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
-#endif //cl_khr_gl_msaa_sharing
-
-/**
- * Use the coordinate (coord.xy) to do an element lookup in
- * the 2D image object specified by image.
- *
- * Use the coordinate (coord.x, coord.y, coord.z) to do
- * an element lookup in the 3D image object specified
- * by image. coord.w is ignored.
- *
- * Use the coordinate (coord.z) to index into the
- * 2D image array object specified by image_array
- * and (coord.x, coord.y) to do an element lookup in
- * the 2D image object specified by image.
- *
- * Use the coordinate (x) to do an element lookup in
- * the 1D image object specified by image.
- *
- * Use the coordinate (coord.y) to index into the
- * 1D image array object specified by image_array
- * and (coord.x) to do an element lookup in
- * the 1D image object specified by image.
- *
- * Use the coordinate (cood.xy) and sample to do an
- * element lookup in the 2D multi-sample image specified
- * by image.
- *
- * Use coord.xy and sample to do an element
- * lookup in the 2D multi-sample image layer
- * identified by index coord.z in the 2D multi-sample
- * image array specified by image.
- *
- * For mipmap images, use the mip-level specified by
- * the Level-of-Detail (lod) or use gradients for LOD
- * computation.
- *
- * read_imagef returns floating-point values in the
- * range [0.0 ... 1.0] for image objects created with
- * image_channel_data_type set to one of the predefined
- * packed formats or CL_UNORM_INT8, or
- * CL_UNORM_INT16.
- *
- * read_imagef returns floating-point values in the
- * range [-1.0 ... 1.0] for image objects created with
- * image_channel_data_type set to CL_SNORM_INT8,
- * or CL_SNORM_INT16.
- *
- * read_imagef returns floating-point values for image
- * objects created with image_channel_data_type set to
- * CL_HALF_FLOAT or CL_FLOAT.
- *
- * read_imagei and read_imageui return
- * unnormalized signed integer and unsigned integer
- * values respectively. Each channel will be stored in a
- * 32-bit integer.
- *
- * read_imagei can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_SIGNED_INT8,
- * CL_SIGNED_INT16 and
- * CL_SIGNED_INT32.
- * If the image_channel_data_type is not one of the
- * above values, the values returned by read_imagei
- * are undefined.
- *
- * read_imageui can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_UNSIGNED_INT8,
- * CL_UNSIGNED_INT16 and
- * CL_UNSIGNED_INT32.
- * If the image_channel_data_type is not one of the
- * above values, the values returned by read_imageui
- * are undefined.
- *
- * The read_image{i|ui} calls support a nearest filter
- * only. The filter_mode specified in sampler
- * must be set to CLK_FILTER_NEAREST; otherwise
- * the values returned are undefined.
-
- * The read_image{f|i|ui} calls that take
- * integer coordinates must use a sampler with
- * normalized coordinates set to
- * CLK_NORMALIZED_COORDS_FALSE and
- * addressing mode set to
- * CLK_ADDRESS_CLAMP_TO_EDGE,
- * CLK_ADDRESS_CLAMP or CLK_ADDRESS_NONE;
- * otherwise the values returned are undefined.
- *
- * Values returned by read_imagef for image objects
- * with image_channel_data_type values not specified
- * in the description above are undefined.
- */
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, int2 coord);
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord);
-
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord);
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, int4 coord);
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord);
-
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
-
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, int coord);
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord);
-
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
-
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord);
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, int2 coord);
-
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, int4 coord);
-#endif //cl_khr_depth_images
-
-#if defined(cl_khr_gl_msaa_sharing)
-float4 __purefn __ovld read_imagef(read_only image2d_msaa_t image, int2 coord, int sample);
-int4 __purefn __ovld read_imagei(read_only image2d_msaa_t image, int2 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_only image2d_msaa_t image, int2 coord, int sample);
-
-float __purefn __ovld read_imagef(read_only image2d_msaa_depth_t image, int2 coord, int sample);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_msaa_t image, int4 coord, int sample);
-int4 __purefn __ovld read_imagei(read_only image2d_array_msaa_t image, int4 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_msaa_t image, int4 coord, int sample);
-
-float __purefn __ovld read_imagef(read_only image2d_array_msaa_depth_t image, int4 coord, int sample);
-#endif //cl_khr_gl_msaa_sharing
-
-// OpenCL Extension v2.0 s9.18 - Mipmaps
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-
-#endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-/**
-* Sampler-less Image Access
-*/
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_only image1d_buffer_t image, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_buffer_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_buffer_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image, int4 coord);
-
-#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, int2 coord);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, int4 coord);
-#endif //cl_khr_depth_images
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, int4 coord);
-
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-// Image read functions returning half4 type
-#ifdef cl_khr_fp16
-half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, int coord);
-half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, float coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, float2 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, float4 coord);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, float4 coord);
-/**
- * Sampler-less Image Access
- */
-half4 __purefn __ovld read_imageh(read_only image1d_t image, int coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_buffer_t image, int coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-#endif //cl_khr_fp16
-
-// Image read functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-float4 __purefn __ovld read_imagef(read_write image1d_t image, int coord);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_write image1d_buffer_t image, int coord);
-int4 __purefn __ovld read_imagei(read_write image1d_buffer_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_buffer_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_write image2d_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image, int4 coord);
-
-float4 __purefn __ovld read_imagef(read_write image3d_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, int4 coord);
-
-#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, int2 coord);
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, int4 coord);
-#endif //cl_khr_depth_images
-
-#if cl_khr_gl_msaa_sharing
-float4 __purefn __ovld read_imagef(read_write image2d_msaa_t image, int2 coord, int sample);
-int4 __purefn __ovld read_imagei(read_write image2d_msaa_t image, int2 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_write image2d_msaa_t image, int2 coord, int sample);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_msaa_t image, int4 coord, int sample);
-int4 __purefn __ovld read_imagei(read_write image2d_array_msaa_t image, int4 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_msaa_t image, int4 coord, int sample);
-
-float __purefn __ovld read_imagef(read_write image2d_msaa_depth_t image, int2 coord, int sample);
-float __purefn __ovld read_imagef(read_write image2d_array_msaa_depth_t image, int4 coord, int sample);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
-float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-
-#endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Image read functions returning half4 type
-#ifdef cl_khr_fp16
-half4 __purefn __ovld read_imageh(read_write image1d_t image, int coord);
-half4 __purefn __ovld read_imageh(read_write image2d_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_write image3d_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_write image1d_array_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Write color value to location specified by coordinate
- * (coord.x, coord.y) in the 2D image object specified by image.
- * (coord.x, coord.y) are considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1, and 0
- * ... image height - 1.
-
- * Write color value to location specified by coordinate
- * (coord.x, coord.y) in the 2D image object specified by index
- * (coord.z) of the 2D image array object image_array.
- * (coord.x, coord.y) are considered to be unnormalized
- * coordinates and must be in the range 0 ... image width
- * - 1.
- *
- * Write color value to location specified by coordinate
- * (coord) in the 1D image (buffer) object specified by image.
- * coord is considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1.
- *
- * Write color value to location specified by coordinate
- * (coord.x) in the 1D image object specified by index
- * (coord.y) of the 1D image array object image_array.
- * x is considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1.
- *
- * Write color value to location specified by coordinate
- * (coord.x, coord.y, coord.z) in the 3D image object specified by image.
- * coord.x & coord.y are considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1, and 0
- * ... image height - 1.
- *
- * For mipmap images, use mip-level specified by lod.
- *
- * Appropriate data format conversion to the specified
- * image format is done before writing the color value.
- *
- * write_imagef can only be used with image objects
- * created with image_channel_data_type set to one of
- * the pre-defined packed formats or set to
- * CL_SNORM_INT8, CL_UNORM_INT8,
- * CL_SNORM_INT16, CL_UNORM_INT16,
- * CL_HALF_FLOAT or CL_FLOAT. Appropriate data
- * format conversion will be done to convert channel
- * data from a floating-point value to actual data format
- * in which the channels are stored.
- *
- * write_imagei can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_SIGNED_INT8,
- * CL_SIGNED_INT16 and
- * CL_SIGNED_INT32.
- *
- * write_imageui can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_UNSIGNED_INT8,
- * CL_UNSIGNED_INT16 and
- * CL_UNSIGNED_INT32.
- *
- * The behavior of write_imagef, write_imagei and
- * write_imageui for image objects created with
- * image_channel_data_type values not specified in
- * the description above or with (x, y) coordinate
- * values that are not in the range (0 ... image width -1,
- * 0 ... image height - 1), respectively, is undefined.
- */
-void __ovld write_imagef(write_only image2d_t image, int2 coord, float4 color);
-void __ovld write_imagei(write_only image2d_t image, int2 coord, int4 color);
-void __ovld write_imageui(write_only image2d_t image, int2 coord, uint4 color);
-
-void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, float4 color);
-void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int4 color);
-void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, uint4 color);
-
-void __ovld write_imagef(write_only image1d_t image, int coord, float4 color);
-void __ovld write_imagei(write_only image1d_t image, int coord, int4 color);
-void __ovld write_imageui(write_only image1d_t image, int coord, uint4 color);
-
-void __ovld write_imagef(write_only image1d_buffer_t image, int coord, float4 color);
-void __ovld write_imagei(write_only image1d_buffer_t image, int coord, int4 color);
-void __ovld write_imageui(write_only image1d_buffer_t image, int coord, uint4 color);
-
-void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, float4 color);
-void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int4 color);
-void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, uint4 color);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(write_only image3d_t image, int4 coord, float4 color);
-void __ovld write_imagei(write_only image3d_t image, int4 coord, int4 color);
-void __ovld write_imageui(write_only image3d_t image, int4 coord, uint4 color);
-#endif
-
-#ifdef cl_khr_depth_images
-void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, float color);
-void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, float color);
-#endif //cl_khr_depth_images
-
-// OpenCL Extension v2.0 s9.18 - Mipmaps
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#if defined(cl_khr_mipmap_image_writes)
-void __ovld write_imagef(write_only image1d_t image, int coord, int lod, float4 color);
-void __ovld write_imagei(write_only image1d_t image, int coord, int lod, int4 color);
-void __ovld write_imageui(write_only image1d_t image, int coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image2d_t image, int2 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image2d_t image, int2 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image2d_t image, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, int lod, float depth);
-void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, int lod, float depth);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(write_only image3d_t image, int4 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image3d_t image, int4 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image3d_t image, int4 coord, int lod, uint4 color);
-#endif //cl_khr_3d_image_writes
-
-#endif //defined(cl_khr_mipmap_image_writes)
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Image write functions for half4 type
-#ifdef cl_khr_fp16
-void __ovld write_imageh(write_only image1d_t image, int coord, half4 color);
-void __ovld write_imageh(write_only image2d_t image, int2 coord, half4 color);
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imageh(write_only image3d_t image, int4 coord, half4 color);
-#endif
-void __ovld write_imageh(write_only image1d_array_t image, int2 coord, half4 color);
-void __ovld write_imageh(write_only image2d_array_t image, int4 coord, half4 color);
-void __ovld write_imageh(write_only image1d_buffer_t image, int coord, half4 color);
-#endif //cl_khr_fp16
-
-// Image write functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld write_imagef(read_write image2d_t image, int2 coord, float4 color);
-void __ovld write_imagei(read_write image2d_t image, int2 coord, int4 color);
-void __ovld write_imageui(read_write image2d_t image, int2 coord, uint4 color);
-
-void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, float4 color);
-void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int4 color);
-void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, uint4 color);
-
-void __ovld write_imagef(read_write image1d_t image, int coord, float4 color);
-void __ovld write_imagei(read_write image1d_t image, int coord, int4 color);
-void __ovld write_imageui(read_write image1d_t image, int coord, uint4 color);
-
-void __ovld write_imagef(read_write image1d_buffer_t image, int coord, float4 color);
-void __ovld write_imagei(read_write image1d_buffer_t image, int coord, int4 color);
-void __ovld write_imageui(read_write image1d_buffer_t image, int coord, uint4 color);
-
-void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, float4 color);
-void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int4 color);
-void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, uint4 color);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(read_write image3d_t image, int4 coord, float4 color);
-void __ovld write_imagei(read_write image3d_t image, int4 coord, int4 color);
-void __ovld write_imageui(read_write image3d_t image, int4 coord, uint4 color);
-#endif
-
-#ifdef cl_khr_depth_images
-void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, float color);
-void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, float color);
-#endif //cl_khr_depth_images
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#if defined(cl_khr_mipmap_image_writes)
-void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color);
-void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color);
-void __ovld write_imageui(read_write image1d_t image, int coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image2d_t image, int2 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image2d_t image, int2 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image2d_t image, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, int lod, float color);
-void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, int lod, float color);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(read_write image3d_t image, int4 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image3d_t image, int4 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image3d_t image, int4 coord, int lod, uint4 color);
-#endif //cl_khr_3d_image_writes
-
-#endif //cl_khr_mipmap_image_writes
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Image write functions for half4 type
-#ifdef cl_khr_fp16
-void __ovld write_imageh(read_write image1d_t image, int coord, half4 color);
-void __ovld write_imageh(read_write image2d_t image, int2 coord, half4 color);
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imageh(read_write image3d_t image, int4 coord, half4 color);
-#endif
-void __ovld write_imageh(read_write image1d_array_t image, int2 coord, half4 color);
-void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color);
-void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 color);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have
-// access qualifier, which by default assume read_only access qualifier. Image query builtin
-// functions with write_only image argument should also be declared.
-
-/**
- * Return the image width in pixels.
- *
-  */
-int __ovld __cnfn get_image_width(read_only image1d_t image);
-int __ovld __cnfn get_image_width(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_width(read_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_width(read_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_width(read_only image1d_array_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_width(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_width(write_only image1d_t image);
-int __ovld __cnfn get_image_width(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_width(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_width(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_width(write_only image1d_array_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_width(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_width(read_write image1d_t image);
-int __ovld __cnfn get_image_width(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_width(read_write image2d_t image);
-int __ovld __cnfn get_image_width(read_write image3d_t image);
-int __ovld __cnfn get_image_width(read_write image1d_array_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image height in pixels.
- */
-int __ovld __cnfn get_image_height(read_only image2d_t image);
-int __ovld __cnfn get_image_height(read_only image3d_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_height(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_height(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_height(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_height(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_height(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_height(read_write image2d_t image);
-int __ovld __cnfn get_image_height(read_write image3d_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image depth in pixels.
- */
-int __ovld __cnfn get_image_depth(read_only image3d_t image);
-
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_depth(write_only image3d_t image);
-#endif
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_depth(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL Extension v2.0 s9.18 - Mipmaps
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
-/**
- * Return the image miplevels.
- */
-
-int __ovld get_image_num_mip_levels(read_only image1d_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_t image);
-int __ovld get_image_num_mip_levels(read_only image3d_t image);
-
-int __ovld get_image_num_mip_levels(write_only image1d_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld get_image_num_mip_levels(write_only image3d_t image);
-#endif
-
-int __ovld get_image_num_mip_levels(read_write image1d_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_t image);
-int __ovld get_image_num_mip_levels(read_write image3d_t image);
-
-int __ovld get_image_num_mip_levels(read_only image1d_array_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_array_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_depth_t image);
-
-int __ovld get_image_num_mip_levels(write_only image1d_array_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_array_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_depth_t image);
-
-int __ovld get_image_num_mip_levels(read_write image1d_array_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_array_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
-
-#endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the channel data type. Valid values are:
- * CLK_SNORM_INT8
- * CLK_SNORM_INT16
- * CLK_UNORM_INT8
- * CLK_UNORM_INT16
- * CLK_UNORM_SHORT_565
- * CLK_UNORM_SHORT_555
- * CLK_UNORM_SHORT_101010
- * CLK_SIGNED_INT8
- * CLK_SIGNED_INT16
- * CLK_SIGNED_INT32
- * CLK_UNSIGNED_INT8
- * CLK_UNSIGNED_INT16
- * CLK_UNSIGNED_INT32
- * CLK_HALF_FLOAT
- * CLK_FLOAT
- */
-
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image3d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_channel_data_type(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image3d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image channel order. Valid values are:
- * CLK_A
- * CLK_R
- * CLK_Rx
- * CLK_RG
- * CLK_RGx
- * CLK_RA
- * CLK_RGB
- * CLK_RGBx
- * CLK_RGBA
- * CLK_ARGB
- * CLK_BGRA
- * CLK_INTENSITY
- * CLK_LUMINANCE
- */
-
-int __ovld __cnfn get_image_channel_order(read_only image1d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image3d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_channel_order(write_only image1d_t image);
-int __ovld __cnfn get_image_channel_order(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_channel_order(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_channel_order(write_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_channel_order(read_write image1d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image3d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the 2D image width and height as an int2
- * type. The width is returned in the x component, and
- * the height in the y component.
- */
-int2 __ovld __cnfn get_image_dim(read_only image2d_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int2 __ovld __cnfn get_image_dim(write_only image2d_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int2 __ovld __cnfn get_image_dim(read_write image2d_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the 3D image width, height, and depth as an
- * int4 type. The width is returned in the x
- * component, height in the y component, depth in the z
- * component and the w component is 0.
- */
-int4 __ovld __cnfn get_image_dim(read_only image3d_t image);
-#ifdef cl_khr_3d_image_writes
-int4 __ovld __cnfn get_image_dim(write_only image3d_t image);
-#endif
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int4 __ovld __cnfn get_image_dim(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image array size.
- */
-
-size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_t image_array);
-#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_depth_t image_array);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_depth_t image_array);
-#endif //cl_khr_gl_msaa_sharing
-
-size_t __ovld __cnfn get_image_array_size(write_only image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_t image_array);
-#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_depth_t image_array);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t image_array);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t image_array);
-#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t image_array);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t image_array);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
-* Return the number of samples associated with image
-*/
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld get_image_num_samples(read_only image2d_msaa_t image);
-int __ovld get_image_num_samples(read_only image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(read_only image2d_array_msaa_t image);
-int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
-
-int __ovld get_image_num_samples(write_only image2d_msaa_t image);
-int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
-int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld get_image_num_samples(read_write image2d_msaa_t image);
-int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
-int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#endif
-
-// OpenCL v2.0 s6.13.15 - Work-group Functions
-
-#if defined(__opencl_c_work_group_collective_functions)
-int __ovld __conv work_group_all(int predicate);
-int __ovld __conv work_group_any(int predicate);
-
-#ifdef cl_khr_fp16
-half __ovld __conv work_group_broadcast(half a, size_t local_id);
-half __ovld __conv work_group_broadcast(half a, size_t x, size_t y);
-half __ovld __conv work_group_broadcast(half a, size_t x, size_t y, size_t z);
-#endif
-int __ovld __conv work_group_broadcast(int a, size_t local_id);
-int __ovld __conv work_group_broadcast(int a, size_t x, size_t y);
-int __ovld __conv work_group_broadcast(int a, size_t x, size_t y, size_t z);
-uint __ovld __conv work_group_broadcast(uint a, size_t local_id);
-uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y);
-uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y, size_t z);
-long __ovld __conv work_group_broadcast(long a, size_t local_id);
-long __ovld __conv work_group_broadcast(long a, size_t x, size_t y);
-long __ovld __conv work_group_broadcast(long a, size_t x, size_t y, size_t z);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t local_id);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y, size_t z);
-float __ovld __conv work_group_broadcast(float a, size_t local_id);
-float __ovld __conv work_group_broadcast(float a, size_t x, size_t y);
-float __ovld __conv work_group_broadcast(float a, size_t x, size_t y, size_t z);
-#ifdef cl_khr_fp64
-double __ovld __conv work_group_broadcast(double a, size_t local_id);
-double __ovld __conv work_group_broadcast(double a, size_t x, size_t y);
-double __ovld __conv work_group_broadcast(double a, size_t x, size_t y, size_t z);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld __conv work_group_reduce_add(half x);
-half __ovld __conv work_group_reduce_min(half x);
-half __ovld __conv work_group_reduce_max(half x);
-half __ovld __conv work_group_scan_exclusive_add(half x);
-half __ovld __conv work_group_scan_exclusive_min(half x);
-half __ovld __conv work_group_scan_exclusive_max(half x);
-half __ovld __conv work_group_scan_inclusive_add(half x);
-half __ovld __conv work_group_scan_inclusive_min(half x);
-half __ovld __conv work_group_scan_inclusive_max(half x);
-#endif
-int __ovld __conv work_group_reduce_add(int x);
-int __ovld __conv work_group_reduce_min(int x);
-int __ovld __conv work_group_reduce_max(int x);
-int __ovld __conv work_group_scan_exclusive_add(int x);
-int __ovld __conv work_group_scan_exclusive_min(int x);
-int __ovld __conv work_group_scan_exclusive_max(int x);
-int __ovld __conv work_group_scan_inclusive_add(int x);
-int __ovld __conv work_group_scan_inclusive_min(int x);
-int __ovld __conv work_group_scan_inclusive_max(int x);
-uint __ovld __conv work_group_reduce_add(uint x);
-uint __ovld __conv work_group_reduce_min(uint x);
-uint __ovld __conv work_group_reduce_max(uint x);
-uint __ovld __conv work_group_scan_exclusive_add(uint x);
-uint __ovld __conv work_group_scan_exclusive_min(uint x);
-uint __ovld __conv work_group_scan_exclusive_max(uint x);
-uint __ovld __conv work_group_scan_inclusive_add(uint x);
-uint __ovld __conv work_group_scan_inclusive_min(uint x);
-uint __ovld __conv work_group_scan_inclusive_max(uint x);
-long __ovld __conv work_group_reduce_add(long x);
-long __ovld __conv work_group_reduce_min(long x);
-long __ovld __conv work_group_reduce_max(long x);
-long __ovld __conv work_group_scan_exclusive_add(long x);
-long __ovld __conv work_group_scan_exclusive_min(long x);
-long __ovld __conv work_group_scan_exclusive_max(long x);
-long __ovld __conv work_group_scan_inclusive_add(long x);
-long __ovld __conv work_group_scan_inclusive_min(long x);
-long __ovld __conv work_group_scan_inclusive_max(long x);
-ulong __ovld __conv work_group_reduce_add(ulong x);
-ulong __ovld __conv work_group_reduce_min(ulong x);
-ulong __ovld __conv work_group_reduce_max(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_add(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_min(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_max(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_add(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_min(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_max(ulong x);
-float __ovld __conv work_group_reduce_add(float x);
-float __ovld __conv work_group_reduce_min(float x);
-float __ovld __conv work_group_reduce_max(float x);
-float __ovld __conv work_group_scan_exclusive_add(float x);
-float __ovld __conv work_group_scan_exclusive_min(float x);
-float __ovld __conv work_group_scan_exclusive_max(float x);
-float __ovld __conv work_group_scan_inclusive_add(float x);
-float __ovld __conv work_group_scan_inclusive_min(float x);
-float __ovld __conv work_group_scan_inclusive_max(float x);
-#ifdef cl_khr_fp64
-double __ovld __conv work_group_reduce_add(double x);
-double __ovld __conv work_group_reduce_min(double x);
-double __ovld __conv work_group_reduce_max(double x);
-double __ovld __conv work_group_scan_exclusive_add(double x);
-double __ovld __conv work_group_scan_exclusive_min(double x);
-double __ovld __conv work_group_scan_exclusive_max(double x);
-double __ovld __conv work_group_scan_inclusive_add(double x);
-double __ovld __conv work_group_scan_inclusive_min(double x);
-double __ovld __conv work_group_scan_inclusive_max(double x);
-#endif //cl_khr_fp64
-
-#endif //defined(__opencl_c_work_group_collective_functions)
-
-// OpenCL v2.0 s6.13.16 - Pipe Functions
-#if defined(__opencl_c_pipes)
-bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);
-#endif //defined(__opencl_c_pipes)
-
-
-// OpenCL v2.0 s6.13.17 - Enqueue Kernels
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-ndrange_t __ovld ndrange_1D(size_t);
-ndrange_t __ovld ndrange_1D(size_t, size_t);
-ndrange_t __ovld ndrange_1D(size_t, size_t, size_t);
-
-ndrange_t __ovld ndrange_2D(const size_t[2]);
-ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2]);
-ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2], const size_t[2]);
-
-ndrange_t __ovld ndrange_3D(const size_t[3]);
-ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3]);
-ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3], const size_t[3]);
-
-int __ovld enqueue_marker(queue_t, uint, const clk_event_t*, clk_event_t*);
-
-void __ovld retain_event(clk_event_t);
-
-void __ovld release_event(clk_event_t);
-
-clk_event_t __ovld create_user_event(void);
-
-void __ovld set_user_event_status(clk_event_t e, int state);
-
-bool __ovld is_valid_event (clk_event_t event);
-
-void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void* value);
-
-queue_t __ovld get_default_queue(void);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL Extension v2.0 s9.17 - Sub-groups
-
-#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups)
-// Shared Sub Group Functions
-uint    __ovld get_sub_group_size(void);
-uint    __ovld get_max_sub_group_size(void);
-uint    __ovld get_num_sub_groups(void);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint    __ovld get_enqueued_num_sub_groups(void);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint    __ovld get_sub_group_id(void);
-uint    __ovld get_sub_group_local_id(void);
-
-void    __ovld __conv sub_group_barrier(cl_mem_fence_flags flags);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void    __ovld __conv sub_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-int     __ovld __conv sub_group_all(int predicate);
-int     __ovld __conv sub_group_any(int predicate);
-
-int     __ovld __conv sub_group_broadcast(int   x, uint sub_group_local_id);
-uint    __ovld __conv sub_group_broadcast(uint  x, uint sub_group_local_id);
-long    __ovld __conv sub_group_broadcast(long  x, uint sub_group_local_id);
-ulong   __ovld __conv sub_group_broadcast(ulong x, uint sub_group_local_id);
-float   __ovld __conv sub_group_broadcast(float x, uint sub_group_local_id);
-
-int     __ovld __conv sub_group_reduce_add(int   x);
-uint    __ovld __conv sub_group_reduce_add(uint  x);
-long    __ovld __conv sub_group_reduce_add(long  x);
-ulong   __ovld __conv sub_group_reduce_add(ulong x);
-float   __ovld __conv sub_group_reduce_add(float x);
-int     __ovld __conv sub_group_reduce_min(int   x);
-uint    __ovld __conv sub_group_reduce_min(uint  x);
-long    __ovld __conv sub_group_reduce_min(long  x);
-ulong   __ovld __conv sub_group_reduce_min(ulong x);
-float   __ovld __conv sub_group_reduce_min(float x);
-int     __ovld __conv sub_group_reduce_max(int   x);
-uint    __ovld __conv sub_group_reduce_max(uint  x);
-long    __ovld __conv sub_group_reduce_max(long  x);
-ulong   __ovld __conv sub_group_reduce_max(ulong x);
-float   __ovld __conv sub_group_reduce_max(float x);
-
-int     __ovld __conv sub_group_scan_exclusive_add(int   x);
-uint    __ovld __conv sub_group_scan_exclusive_add(uint  x);
-long    __ovld __conv sub_group_scan_exclusive_add(long  x);
-ulong   __ovld __conv sub_group_scan_exclusive_add(ulong x);
-float   __ovld __conv sub_group_scan_exclusive_add(float x);
-int     __ovld __conv sub_group_scan_exclusive_min(int   x);
-uint    __ovld __conv sub_group_scan_exclusive_min(uint  x);
-long    __ovld __conv sub_group_scan_exclusive_min(long  x);
-ulong   __ovld __conv sub_group_scan_exclusive_min(ulong x);
-float   __ovld __conv sub_group_scan_exclusive_min(float x);
-int     __ovld __conv sub_group_scan_exclusive_max(int   x);
-uint    __ovld __conv sub_group_scan_exclusive_max(uint  x);
-long    __ovld __conv sub_group_scan_exclusive_max(long  x);
-ulong   __ovld __conv sub_group_scan_exclusive_max(ulong x);
-float   __ovld __conv sub_group_scan_exclusive_max(float x);
-
-int     __ovld __conv sub_group_scan_inclusive_add(int   x);
-uint    __ovld __conv sub_group_scan_inclusive_add(uint  x);
-long    __ovld __conv sub_group_scan_inclusive_add(long  x);
-ulong   __ovld __conv sub_group_scan_inclusive_add(ulong x);
-float   __ovld __conv sub_group_scan_inclusive_add(float x);
-int     __ovld __conv sub_group_scan_inclusive_min(int   x);
-uint    __ovld __conv sub_group_scan_inclusive_min(uint  x);
-long    __ovld __conv sub_group_scan_inclusive_min(long  x);
-ulong   __ovld __conv sub_group_scan_inclusive_min(ulong x);
-float   __ovld __conv sub_group_scan_inclusive_min(float x);
-int     __ovld __conv sub_group_scan_inclusive_max(int   x);
-uint    __ovld __conv sub_group_scan_inclusive_max(uint  x);
-long    __ovld __conv sub_group_scan_inclusive_max(long  x);
-ulong   __ovld __conv sub_group_scan_inclusive_max(ulong x);
-float   __ovld __conv sub_group_scan_inclusive_max(float x);
-
-#ifdef cl_khr_fp16
-half    __ovld __conv sub_group_broadcast(half x, uint sub_group_local_id);
-half    __ovld __conv sub_group_reduce_add(half x);
-half    __ovld __conv sub_group_reduce_min(half x);
-half    __ovld __conv sub_group_reduce_max(half x);
-half    __ovld __conv sub_group_scan_exclusive_add(half x);
-half    __ovld __conv sub_group_scan_exclusive_min(half x);
-half    __ovld __conv sub_group_scan_exclusive_max(half x);
-half    __ovld __conv sub_group_scan_inclusive_add(half x);
-half    __ovld __conv sub_group_scan_inclusive_min(half x);
-half    __ovld __conv sub_group_scan_inclusive_max(half x);
-#endif //cl_khr_fp16
-
-#ifdef cl_khr_fp64
-double  __ovld __conv sub_group_broadcast(double x, uint sub_group_local_id);
-double  __ovld __conv sub_group_reduce_add(double x);
-double  __ovld __conv sub_group_reduce_min(double x);
-double  __ovld __conv sub_group_reduce_max(double x);
-double  __ovld __conv sub_group_scan_exclusive_add(double x);
-double  __ovld __conv sub_group_scan_exclusive_min(double x);
-double  __ovld __conv sub_group_scan_exclusive_max(double x);
-double  __ovld __conv sub_group_scan_inclusive_add(double x);
-double  __ovld __conv sub_group_scan_inclusive_min(double x);
-double  __ovld __conv sub_group_scan_inclusive_max(double x);
-#endif //cl_khr_fp64
-
-#endif //cl_khr_subgroups cl_intel_subgroups __opencl_c_subgroups
-
-#if defined(cl_khr_subgroup_extended_types)
-char __ovld __conv sub_group_broadcast( char value, uint index );
-char2 __ovld __conv sub_group_broadcast( char2 value, uint index );
-char3 __ovld __conv sub_group_broadcast( char3 value, uint index );
-char4 __ovld __conv sub_group_broadcast( char4 value, uint index );
-char8 __ovld __conv sub_group_broadcast( char8 value, uint index );
-char16 __ovld __conv sub_group_broadcast( char16 value, uint index );
-
-uchar __ovld __conv sub_group_broadcast( uchar value, uint index );
-uchar2 __ovld __conv sub_group_broadcast( uchar2 value, uint index );
-uchar3 __ovld __conv sub_group_broadcast( uchar3 value, uint index );
-uchar4 __ovld __conv sub_group_broadcast( uchar4 value, uint index );
-uchar8 __ovld __conv sub_group_broadcast( uchar8 value, uint index );
-uchar16 __ovld __conv sub_group_broadcast( uchar16 value, uint index );
-
-short __ovld __conv sub_group_broadcast( short value, uint index );
-short2 __ovld __conv sub_group_broadcast( short2 value, uint index );
-short3 __ovld __conv sub_group_broadcast( short3 value, uint index );
-short4 __ovld __conv sub_group_broadcast( short4 value, uint index );
-short8 __ovld __conv sub_group_broadcast( short8 value, uint index );
-short16 __ovld __conv sub_group_broadcast( short16 value, uint index );
-
-ushort __ovld __conv sub_group_broadcast( ushort value, uint index );
-ushort2 __ovld __conv sub_group_broadcast( ushort2 value, uint index );
-ushort3 __ovld __conv sub_group_broadcast( ushort3 value, uint index );
-ushort4 __ovld __conv sub_group_broadcast( ushort4 value, uint index );
-ushort8 __ovld __conv sub_group_broadcast( ushort8 value, uint index );
-ushort16 __ovld __conv sub_group_broadcast( ushort16 value, uint index );
-
-// scalar int broadcast is part of cl_khr_subgroups
-int2 __ovld __conv sub_group_broadcast( int2 value, uint index );
-int3 __ovld __conv sub_group_broadcast( int3 value, uint index );
-int4 __ovld __conv sub_group_broadcast( int4 value, uint index );
-int8 __ovld __conv sub_group_broadcast( int8 value, uint index );
-int16 __ovld __conv sub_group_broadcast( int16 value, uint index );
-
-// scalar uint broadcast is part of cl_khr_subgroups
-uint2 __ovld __conv sub_group_broadcast( uint2 value, uint index );
-uint3 __ovld __conv sub_group_broadcast( uint3 value, uint index );
-uint4 __ovld __conv sub_group_broadcast( uint4 value, uint index );
-uint8 __ovld __conv sub_group_broadcast( uint8 value, uint index );
-uint16 __ovld __conv sub_group_broadcast( uint16 value, uint index );
-
-// scalar long broadcast is part of cl_khr_subgroups
-long2 __ovld __conv sub_group_broadcast( long2 value, uint index );
-long3 __ovld __conv sub_group_broadcast( long3 value, uint index );
-long4 __ovld __conv sub_group_broadcast( long4 value, uint index );
-long8 __ovld __conv sub_group_broadcast( long8 value, uint index );
-long16 __ovld __conv sub_group_broadcast( long16 value, uint index );
-
-// scalar ulong broadcast is part of cl_khr_subgroups
-ulong2 __ovld __conv sub_group_broadcast( ulong2 value, uint index );
-ulong3 __ovld __conv sub_group_broadcast( ulong3 value, uint index );
-ulong4 __ovld __conv sub_group_broadcast( ulong4 value, uint index );
-ulong8 __ovld __conv sub_group_broadcast( ulong8 value, uint index );
-ulong16 __ovld __conv sub_group_broadcast( ulong16 value, uint index );
-
-// scalar float broadcast is part of cl_khr_subgroups
-float2 __ovld __conv sub_group_broadcast( float2 value, uint index );
-float3 __ovld __conv sub_group_broadcast( float3 value, uint index );
-float4 __ovld __conv sub_group_broadcast( float4 value, uint index );
-float8 __ovld __conv sub_group_broadcast( float8 value, uint index );
-float16 __ovld __conv sub_group_broadcast( float16 value, uint index );
-
-char __ovld __conv sub_group_reduce_add( char value );
-uchar __ovld __conv sub_group_reduce_add( uchar value );
-short __ovld __conv sub_group_reduce_add( short value );
-ushort __ovld __conv sub_group_reduce_add( ushort value );
-
-char __ovld __conv sub_group_reduce_min( char value );
-uchar __ovld __conv sub_group_reduce_min( uchar value );
-short __ovld __conv sub_group_reduce_min( short value );
-ushort __ovld __conv sub_group_reduce_min( ushort value );
-
-char __ovld __conv sub_group_reduce_max( char value );
-uchar __ovld __conv sub_group_reduce_max( uchar value );
-short __ovld __conv sub_group_reduce_max( short value );
-ushort __ovld __conv sub_group_reduce_max( ushort value );
-
-char __ovld __conv sub_group_scan_inclusive_add( char value );
-uchar __ovld __conv sub_group_scan_inclusive_add( uchar value );
-short __ovld __conv sub_group_scan_inclusive_add( short value );
-ushort __ovld __conv sub_group_scan_inclusive_add( ushort value );
-
-char __ovld __conv sub_group_scan_inclusive_min( char value );
-uchar __ovld __conv sub_group_scan_inclusive_min( uchar value );
-short __ovld __conv sub_group_scan_inclusive_min( short value );
-ushort __ovld __conv sub_group_scan_inclusive_min( ushort value );
-
-char __ovld __conv sub_group_scan_inclusive_max( char value );
-uchar __ovld __conv sub_group_scan_inclusive_max( uchar value );
-short __ovld __conv sub_group_scan_inclusive_max( short value );
-ushort __ovld __conv sub_group_scan_inclusive_max( ushort value );
-
-char __ovld __conv sub_group_scan_exclusive_add( char value );
-uchar __ovld __conv sub_group_scan_exclusive_add( uchar value );
-short __ovld __conv sub_group_scan_exclusive_add( short value );
-ushort __ovld __conv sub_group_scan_exclusive_add( ushort value );
-
-char __ovld __conv sub_group_scan_exclusive_min( char value );
-uchar __ovld __conv sub_group_scan_exclusive_min( uchar value );
-short __ovld __conv sub_group_scan_exclusive_min( short value );
-ushort __ovld __conv sub_group_scan_exclusive_min( ushort value );
-
-char __ovld __conv sub_group_scan_exclusive_max( char value );
-uchar __ovld __conv sub_group_scan_exclusive_max( uchar value );
-short __ovld __conv sub_group_scan_exclusive_max( short value );
-ushort __ovld __conv sub_group_scan_exclusive_max( ushort value );
-
-#if defined(cl_khr_fp16)
-// scalar half broadcast is part of cl_khr_subgroups
-half2 __ovld __conv sub_group_broadcast( half2 value, uint index );
-half3 __ovld __conv sub_group_broadcast( half3 value, uint index );
-half4 __ovld __conv sub_group_broadcast( half4 value, uint index );
-half8 __ovld __conv sub_group_broadcast( half8 value, uint index );
-half16 __ovld __conv sub_group_broadcast( half16 value, uint index );
-#endif  // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-// scalar double broadcast is part of cl_khr_subgroups
-double2 __ovld __conv sub_group_broadcast( double2 value, uint index );
-double3 __ovld __conv sub_group_broadcast( double3 value, uint index );
-double4 __ovld __conv sub_group_broadcast( double4 value, uint index );
-double8 __ovld __conv sub_group_broadcast( double8 value, uint index );
-double16 __ovld __conv sub_group_broadcast( double16 value, uint index );
-#endif  // cl_khr_fp64
-
-#endif  // cl_khr_subgroup_extended_types
-
-#if defined(cl_khr_subgroup_non_uniform_vote)
-int     __ovld sub_group_elect(void);
-int     __ovld sub_group_non_uniform_all( int predicate );
-int     __ovld sub_group_non_uniform_any( int predicate );
-
-int     __ovld sub_group_non_uniform_all_equal( char value );
-int     __ovld sub_group_non_uniform_all_equal( uchar value );
-int     __ovld sub_group_non_uniform_all_equal( short value );
-int     __ovld sub_group_non_uniform_all_equal( ushort value );
-int     __ovld sub_group_non_uniform_all_equal( int value );
-int     __ovld sub_group_non_uniform_all_equal( uint value );
-int     __ovld sub_group_non_uniform_all_equal( long value );
-int     __ovld sub_group_non_uniform_all_equal( ulong value );
-int     __ovld sub_group_non_uniform_all_equal( float value );
-
-#if defined(cl_khr_fp16)
-int     __ovld sub_group_non_uniform_all_equal( half value );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-int     __ovld sub_group_non_uniform_all_equal( double value );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_non_uniform_vote
-
-#if defined(cl_khr_subgroup_ballot)
-char    __ovld sub_group_non_uniform_broadcast( char value, uint index );
-char2   __ovld sub_group_non_uniform_broadcast( char2 value, uint index );
-char3   __ovld sub_group_non_uniform_broadcast( char3 value, uint index );
-char4   __ovld sub_group_non_uniform_broadcast( char4 value, uint index );
-char8   __ovld sub_group_non_uniform_broadcast( char8 value, uint index );
-char16  __ovld sub_group_non_uniform_broadcast( char16 value, uint index );
-
-uchar   __ovld sub_group_non_uniform_broadcast( uchar value, uint index );
-uchar2  __ovld sub_group_non_uniform_broadcast( uchar2 value, uint index );
-uchar3  __ovld sub_group_non_uniform_broadcast( uchar3 value, uint index );
-uchar4  __ovld sub_group_non_uniform_broadcast( uchar4 value, uint index );
-uchar8  __ovld sub_group_non_uniform_broadcast( uchar8 value, uint index );
-uchar16 __ovld sub_group_non_uniform_broadcast( uchar16 value, uint index );
-
-short   __ovld sub_group_non_uniform_broadcast( short value, uint index );
-short2  __ovld sub_group_non_uniform_broadcast( short2 value, uint index );
-short3  __ovld sub_group_non_uniform_broadcast( short3 value, uint index );
-short4  __ovld sub_group_non_uniform_broadcast( short4 value, uint index );
-short8  __ovld sub_group_non_uniform_broadcast( short8 value, uint index );
-short16 __ovld sub_group_non_uniform_broadcast( short16 value, uint index );
-
-ushort  __ovld sub_group_non_uniform_broadcast( ushort value, uint index );
-ushort2 __ovld sub_group_non_uniform_broadcast( ushort2 value, uint index );
-ushort3 __ovld sub_group_non_uniform_broadcast( ushort3 value, uint index );
-ushort4 __ovld sub_group_non_uniform_broadcast( ushort4 value, uint index );
-ushort8 __ovld sub_group_non_uniform_broadcast( ushort8 value, uint index );
-ushort16 __ovld sub_group_non_uniform_broadcast( ushort16 value, uint index );
-
-int     __ovld sub_group_non_uniform_broadcast( int value, uint index );
-int2    __ovld sub_group_non_uniform_broadcast( int2 value, uint index );
-int3    __ovld sub_group_non_uniform_broadcast( int3 value, uint index );
-int4    __ovld sub_group_non_uniform_broadcast( int4 value, uint index );
-int8    __ovld sub_group_non_uniform_broadcast( int8 value, uint index );
-int16   __ovld sub_group_non_uniform_broadcast( int16 value, uint index );
-
-uint    __ovld sub_group_non_uniform_broadcast( uint value, uint index );
-uint2   __ovld sub_group_non_uniform_broadcast( uint2 value, uint index );
-uint3   __ovld sub_group_non_uniform_broadcast( uint3 value, uint index );
-uint4   __ovld sub_group_non_uniform_broadcast( uint4 value, uint index );
-uint8   __ovld sub_group_non_uniform_broadcast( uint8 value, uint index );
-uint16  __ovld sub_group_non_uniform_broadcast( uint16 value, uint index );
-
-long    __ovld sub_group_non_uniform_broadcast( long value, uint index );
-long2   __ovld sub_group_non_uniform_broadcast( long2 value, uint index );
-long3   __ovld sub_group_non_uniform_broadcast( long3 value, uint index );
-long4   __ovld sub_group_non_uniform_broadcast( long4 value, uint index );
-long8   __ovld sub_group_non_uniform_broadcast( long8 value, uint index );
-long16  __ovld sub_group_non_uniform_broadcast( long16 value, uint index );
-
-ulong   __ovld sub_group_non_uniform_broadcast( ulong value, uint index );
-ulong2  __ovld sub_group_non_uniform_broadcast( ulong2 value, uint index );
-ulong3  __ovld sub_group_non_uniform_broadcast( ulong3 value, uint index );
-ulong4  __ovld sub_group_non_uniform_broadcast( ulong4 value, uint index );
-ulong8  __ovld sub_group_non_uniform_broadcast( ulong8 value, uint index );
-ulong16 __ovld sub_group_non_uniform_broadcast( ulong16 value, uint index );
-
-float   __ovld sub_group_non_uniform_broadcast( float value, uint index );
-float2  __ovld sub_group_non_uniform_broadcast( float2 value, uint index );
-float3  __ovld sub_group_non_uniform_broadcast( float3 value, uint index );
-float4  __ovld sub_group_non_uniform_broadcast( float4 value, uint index );
-float8  __ovld sub_group_non_uniform_broadcast( float8 value, uint index );
-float16 __ovld sub_group_non_uniform_broadcast( float16 value, uint index );
-
-char    __ovld sub_group_broadcast_first( char value );
-uchar   __ovld sub_group_broadcast_first( uchar value );
-short   __ovld sub_group_broadcast_first( short value );
-ushort  __ovld sub_group_broadcast_first( ushort value );
-int     __ovld sub_group_broadcast_first( int value );
-uint    __ovld sub_group_broadcast_first( uint value );
-long    __ovld sub_group_broadcast_first( long value );
-ulong   __ovld sub_group_broadcast_first( ulong value );
-float   __ovld sub_group_broadcast_first( float value );
-
-uint4   __ovld sub_group_ballot( int predicate );
-int     __ovld __cnfn sub_group_inverse_ballot( uint4 value );
-int     __ovld __cnfn sub_group_ballot_bit_extract( uint4 value, uint index );
-uint    __ovld __cnfn sub_group_ballot_bit_count( uint4 value );
-
-uint    __ovld sub_group_ballot_inclusive_scan( uint4 value );
-uint    __ovld sub_group_ballot_exclusive_scan( uint4 value );
-uint    __ovld sub_group_ballot_find_lsb( uint4 value );
-uint    __ovld sub_group_ballot_find_msb( uint4 value );
-
-uint4   __ovld __cnfn get_sub_group_eq_mask(void);
-uint4   __ovld __cnfn get_sub_group_ge_mask(void);
-uint4   __ovld __cnfn get_sub_group_gt_mask(void);
-uint4   __ovld __cnfn get_sub_group_le_mask(void);
-uint4   __ovld __cnfn get_sub_group_lt_mask(void);
-
-#if defined(cl_khr_fp16)
-half    __ovld sub_group_non_uniform_broadcast( half value, uint index );
-half2   __ovld sub_group_non_uniform_broadcast( half2 value, uint index );
-half3   __ovld sub_group_non_uniform_broadcast( half3 value, uint index );
-half4   __ovld sub_group_non_uniform_broadcast( half4 value, uint index );
-half8   __ovld sub_group_non_uniform_broadcast( half8 value, uint index );
-half16  __ovld sub_group_non_uniform_broadcast( half16 value, uint index );
-
-half    __ovld sub_group_broadcast_first( half value );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-double   __ovld sub_group_non_uniform_broadcast( double value, uint index );
-double2  __ovld sub_group_non_uniform_broadcast( double2 value, uint index );
-double3  __ovld sub_group_non_uniform_broadcast( double3 value, uint index );
-double4  __ovld sub_group_non_uniform_broadcast( double4 value, uint index );
-double8  __ovld sub_group_non_uniform_broadcast( double8 value, uint index );
-double16 __ovld sub_group_non_uniform_broadcast( double16 value, uint index );
-
-double   __ovld sub_group_broadcast_first( double value );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_ballot
-
-#if defined(cl_khr_subgroup_non_uniform_arithmetic)
-char    __ovld sub_group_non_uniform_reduce_add( char value );
-uchar   __ovld sub_group_non_uniform_reduce_add( uchar value );
-short   __ovld sub_group_non_uniform_reduce_add( short value );
-ushort  __ovld sub_group_non_uniform_reduce_add( ushort value );
-int     __ovld sub_group_non_uniform_reduce_add( int value );
-uint    __ovld sub_group_non_uniform_reduce_add( uint value );
-long    __ovld sub_group_non_uniform_reduce_add( long value );
-ulong   __ovld sub_group_non_uniform_reduce_add( ulong value );
-float   __ovld sub_group_non_uniform_reduce_add( float value );
-
-char    __ovld sub_group_non_uniform_reduce_mul( char value );
-uchar   __ovld sub_group_non_uniform_reduce_mul( uchar value );
-short   __ovld sub_group_non_uniform_reduce_mul( short value );
-ushort  __ovld sub_group_non_uniform_reduce_mul( ushort value );
-int     __ovld sub_group_non_uniform_reduce_mul( int value );
-uint    __ovld sub_group_non_uniform_reduce_mul( uint value );
-long    __ovld sub_group_non_uniform_reduce_mul( long value );
-ulong   __ovld sub_group_non_uniform_reduce_mul( ulong value );
-float   __ovld sub_group_non_uniform_reduce_mul( float value );
-
-char    __ovld sub_group_non_uniform_reduce_min( char value );
-uchar   __ovld sub_group_non_uniform_reduce_min( uchar value );
-short   __ovld sub_group_non_uniform_reduce_min( short value );
-ushort  __ovld sub_group_non_uniform_reduce_min( ushort value );
-int     __ovld sub_group_non_uniform_reduce_min( int value );
-uint    __ovld sub_group_non_uniform_reduce_min( uint value );
-long    __ovld sub_group_non_uniform_reduce_min( long value );
-ulong   __ovld sub_group_non_uniform_reduce_min( ulong value );
-float   __ovld sub_group_non_uniform_reduce_min( float value );
-
-char    __ovld sub_group_non_uniform_reduce_max( char value );
-uchar   __ovld sub_group_non_uniform_reduce_max( uchar value );
-short   __ovld sub_group_non_uniform_reduce_max( short value );
-ushort  __ovld sub_group_non_uniform_reduce_max( ushort value );
-int     __ovld sub_group_non_uniform_reduce_max( int value );
-uint    __ovld sub_group_non_uniform_reduce_max( uint value );
-long    __ovld sub_group_non_uniform_reduce_max( long value );
-ulong   __ovld sub_group_non_uniform_reduce_max( ulong value );
-float   __ovld sub_group_non_uniform_reduce_max( float value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_add( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_add( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_add( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_add( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_add( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_add( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_add( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_add( ulong value );
-float   __ovld sub_group_non_uniform_scan_inclusive_add( float value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_mul( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_mul( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_mul( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_mul( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_mul( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_mul( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_mul( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_mul( ulong value );
-float   __ovld sub_group_non_uniform_scan_inclusive_mul( float value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_min( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_min( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_min( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_min( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_min( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_min( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_min( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_min( ulong value );
-float   __ovld sub_group_non_uniform_scan_inclusive_min( float value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_max( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_max( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_max( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_max( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_max( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_max( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_max( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_max( ulong value );
-float   __ovld sub_group_non_uniform_scan_inclusive_max( float value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_add( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_add( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_add( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_add( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_add( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_add( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_add( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_add( ulong value );
-float   __ovld sub_group_non_uniform_scan_exclusive_add( float value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_mul( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_mul( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_mul( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_mul( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_mul( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_mul( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_mul( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_mul( ulong value );
-float   __ovld sub_group_non_uniform_scan_exclusive_mul( float value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_min( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_min( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_min( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_min( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_min( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_min( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_min( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_min( ulong value );
-float   __ovld sub_group_non_uniform_scan_exclusive_min( float value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_max( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_max( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_max( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_max( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_max( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_max( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_max( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_max( ulong value );
-float   __ovld sub_group_non_uniform_scan_exclusive_max( float value );
-
-char    __ovld sub_group_non_uniform_reduce_and( char value );
-uchar   __ovld sub_group_non_uniform_reduce_and( uchar value );
-short   __ovld sub_group_non_uniform_reduce_and( short value );
-ushort  __ovld sub_group_non_uniform_reduce_and( ushort value );
-int     __ovld sub_group_non_uniform_reduce_and( int value );
-uint    __ovld sub_group_non_uniform_reduce_and( uint value );
-long    __ovld sub_group_non_uniform_reduce_and( long value );
-ulong   __ovld sub_group_non_uniform_reduce_and( ulong value );
-
-char    __ovld sub_group_non_uniform_reduce_or( char value );
-uchar   __ovld sub_group_non_uniform_reduce_or( uchar value );
-short   __ovld sub_group_non_uniform_reduce_or( short value );
-ushort  __ovld sub_group_non_uniform_reduce_or( ushort value );
-int     __ovld sub_group_non_uniform_reduce_or( int value );
-uint    __ovld sub_group_non_uniform_reduce_or( uint value );
-long    __ovld sub_group_non_uniform_reduce_or( long value );
-ulong   __ovld sub_group_non_uniform_reduce_or( ulong value );
-
-char    __ovld sub_group_non_uniform_reduce_xor( char value );
-uchar   __ovld sub_group_non_uniform_reduce_xor( uchar value );
-short   __ovld sub_group_non_uniform_reduce_xor( short value );
-ushort  __ovld sub_group_non_uniform_reduce_xor( ushort value );
-int     __ovld sub_group_non_uniform_reduce_xor( int value );
-uint    __ovld sub_group_non_uniform_reduce_xor( uint value );
-long    __ovld sub_group_non_uniform_reduce_xor( long value );
-ulong   __ovld sub_group_non_uniform_reduce_xor( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_and( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_and( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_and( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_and( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_and( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_and( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_and( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_and( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_or( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_or( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_or( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_or( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_or( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_or( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_or( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_or( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_xor( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_xor( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_xor( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_xor( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_xor( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_xor( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_xor( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_xor( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_and( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_and( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_and( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_and( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_and( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_and( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_and( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_and( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_or( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_or( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_or( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_or( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_or( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_or( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_or( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_or( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_xor( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_xor( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_xor( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_xor( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_xor( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_xor( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_xor( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_xor( ulong value );
-
-int     __ovld sub_group_non_uniform_reduce_logical_and( int predicate );
-int     __ovld sub_group_non_uniform_reduce_logical_or( int predicate );
-int     __ovld sub_group_non_uniform_reduce_logical_xor( int predicate );
-
-int     __ovld sub_group_non_uniform_scan_inclusive_logical_and( int predicate );
-int     __ovld sub_group_non_uniform_scan_inclusive_logical_or( int predicate );
-int     __ovld sub_group_non_uniform_scan_inclusive_logical_xor( int predicate );
-
-int     __ovld sub_group_non_uniform_scan_exclusive_logical_and( int predicate );
-int     __ovld sub_group_non_uniform_scan_exclusive_logical_or( int predicate );
-int     __ovld sub_group_non_uniform_scan_exclusive_logical_xor( int predicate );
-
-#if defined(cl_khr_fp16)
-half    __ovld sub_group_non_uniform_reduce_add( half value );
-half    __ovld sub_group_non_uniform_reduce_mul( half value );
-half    __ovld sub_group_non_uniform_reduce_min( half value );
-half    __ovld sub_group_non_uniform_reduce_max( half value );
-half    __ovld sub_group_non_uniform_scan_inclusive_add( half value );
-half    __ovld sub_group_non_uniform_scan_inclusive_mul( half value );
-half    __ovld sub_group_non_uniform_scan_inclusive_min( half value );
-half    __ovld sub_group_non_uniform_scan_inclusive_max( half value );
-half    __ovld sub_group_non_uniform_scan_exclusive_add( half value );
-half    __ovld sub_group_non_uniform_scan_exclusive_mul( half value );
-half    __ovld sub_group_non_uniform_scan_exclusive_min( half value );
-half    __ovld sub_group_non_uniform_scan_exclusive_max( half value );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-double  __ovld sub_group_non_uniform_reduce_add( double value );
-double  __ovld sub_group_non_uniform_reduce_mul( double value );
-double  __ovld sub_group_non_uniform_reduce_min( double value );
-double  __ovld sub_group_non_uniform_reduce_max( double value );
-double  __ovld sub_group_non_uniform_scan_inclusive_add( double value );
-double  __ovld sub_group_non_uniform_scan_inclusive_mul( double value );
-double  __ovld sub_group_non_uniform_scan_inclusive_min( double value );
-double  __ovld sub_group_non_uniform_scan_inclusive_max( double value );
-double  __ovld sub_group_non_uniform_scan_exclusive_add( double value );
-double  __ovld sub_group_non_uniform_scan_exclusive_mul( double value );
-double  __ovld sub_group_non_uniform_scan_exclusive_min( double value );
-double  __ovld sub_group_non_uniform_scan_exclusive_max( double value );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_non_uniform_arithmetic
-
-#if defined(cl_khr_subgroup_shuffle)
-char    __ovld sub_group_shuffle( char value, uint index );
-uchar   __ovld sub_group_shuffle( uchar value, uint index );
-short   __ovld sub_group_shuffle( short value, uint index );
-ushort  __ovld sub_group_shuffle( ushort value, uint index );
-int     __ovld sub_group_shuffle( int value, uint index );
-uint    __ovld sub_group_shuffle( uint value, uint index );
-long    __ovld sub_group_shuffle( long value, uint index );
-ulong   __ovld sub_group_shuffle( ulong value, uint index );
-float   __ovld sub_group_shuffle( float value, uint index );
-
-char    __ovld sub_group_shuffle_xor( char value, uint mask );
-uchar   __ovld sub_group_shuffle_xor( uchar value, uint mask );
-short   __ovld sub_group_shuffle_xor( short value, uint mask );
-ushort  __ovld sub_group_shuffle_xor( ushort value, uint mask );
-int     __ovld sub_group_shuffle_xor( int value, uint mask );
-uint    __ovld sub_group_shuffle_xor( uint value, uint mask );
-long    __ovld sub_group_shuffle_xor( long value, uint mask );
-ulong   __ovld sub_group_shuffle_xor( ulong value, uint mask );
-float   __ovld sub_group_shuffle_xor( float value, uint mask );
-
-#if defined(cl_khr_fp16)
-half    __ovld sub_group_shuffle( half value, uint index );
-half    __ovld sub_group_shuffle_xor( half value, uint mask );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-double  __ovld sub_group_shuffle( double value, uint index );
-double  __ovld sub_group_shuffle_xor( double value, uint mask );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_shuffle
-
-#if defined(cl_khr_subgroup_shuffle_relative)
-char    __ovld sub_group_shuffle_up( char value, uint delta );
-uchar   __ovld sub_group_shuffle_up( uchar value, uint delta );
-short   __ovld sub_group_shuffle_up( short value, uint delta );
-ushort  __ovld sub_group_shuffle_up( ushort value, uint delta );
-int     __ovld sub_group_shuffle_up( int value, uint delta );
-uint    __ovld sub_group_shuffle_up( uint value, uint delta );
-long    __ovld sub_group_shuffle_up( long value, uint delta );
-ulong   __ovld sub_group_shuffle_up( ulong value, uint delta );
-float   __ovld sub_group_shuffle_up( float value, uint delta );
-
-char    __ovld sub_group_shuffle_down( char value, uint delta );
-uchar   __ovld sub_group_shuffle_down( uchar value, uint delta );
-short   __ovld sub_group_shuffle_down( short value, uint delta );
-ushort  __ovld sub_group_shuffle_down( ushort value, uint delta );
-int     __ovld sub_group_shuffle_down( int value, uint delta );
-uint    __ovld sub_group_shuffle_down( uint value, uint delta );
-long    __ovld sub_group_shuffle_down( long value, uint delta );
-ulong   __ovld sub_group_shuffle_down( ulong value, uint delta );
-float   __ovld sub_group_shuffle_down( float value, uint delta );
-
-#if defined(cl_khr_fp16)
-half    __ovld sub_group_shuffle_up( half value, uint delta );
-half    __ovld sub_group_shuffle_down( half value, uint delta );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-double  __ovld sub_group_shuffle_up( double value, uint delta );
-double  __ovld sub_group_shuffle_down( double value, uint delta );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_shuffle_relative
-
-#if defined(cl_khr_subgroup_clustered_reduce)
-char    __ovld sub_group_clustered_reduce_add( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_add( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_add( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_add( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_add( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_add( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_add( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_add( ulong value, uint clustersize );
-float   __ovld sub_group_clustered_reduce_add( float value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_mul( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_mul( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_mul( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_mul( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_mul( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_mul( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_mul( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_mul( ulong value, uint clustersize );
-float   __ovld sub_group_clustered_reduce_mul( float value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_min( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_min( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_min( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_min( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_min( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_min( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_min( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_min( ulong value, uint clustersize );
-float   __ovld sub_group_clustered_reduce_min( float value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_max( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_max( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_max( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_max( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_max( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_max( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_max( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_max( ulong value, uint clustersize );
-float   __ovld sub_group_clustered_reduce_max( float value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_and( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_and( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_and( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_and( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_and( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_and( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_and( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_and( ulong value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_or( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_or( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_or( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_or( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_or( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_or( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_or( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_or( ulong value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_xor( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_xor( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_xor( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_xor( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_xor( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_xor( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_xor( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_xor( ulong value, uint clustersize );
-
-int     __ovld sub_group_clustered_reduce_logical_and( int predicate, uint clustersize );
-int     __ovld sub_group_clustered_reduce_logical_or( int predicate, uint clustersize );
-int     __ovld sub_group_clustered_reduce_logical_xor( int predicate, uint clustersize );
-
-#if defined(cl_khr_fp16)
-half    __ovld sub_group_clustered_reduce_add( half value, uint clustersize );
-half    __ovld sub_group_clustered_reduce_mul( half value, uint clustersize );
-half    __ovld sub_group_clustered_reduce_min( half value, uint clustersize );
-half    __ovld sub_group_clustered_reduce_max( half value, uint clustersize );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-double  __ovld sub_group_clustered_reduce_add( double value, uint clustersize );
-double  __ovld sub_group_clustered_reduce_mul( double value, uint clustersize );
-double  __ovld sub_group_clustered_reduce_min( double value, uint clustersize );
-double  __ovld sub_group_clustered_reduce_max( double value, uint clustersize );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_clustered_reduce
-
-#if defined(cl_khr_extended_bit_ops)
-char __ovld __cnfn bitfield_insert(char, char, uint, uint);
-uchar __ovld __cnfn bitfield_insert(uchar, uchar, uint, uint);
-short __ovld __cnfn bitfield_insert(short, short, uint, uint);
-ushort __ovld __cnfn bitfield_insert(ushort, ushort, uint, uint);
-int __ovld __cnfn bitfield_insert(int, int, uint, uint);
-uint __ovld __cnfn bitfield_insert(uint, uint, uint, uint);
-long __ovld __cnfn bitfield_insert(long, long, uint, uint);
-ulong __ovld __cnfn bitfield_insert(ulong, ulong, uint, uint);
-char2 __ovld __cnfn bitfield_insert(char2, char2, uint, uint);
-uchar2 __ovld __cnfn bitfield_insert(uchar2, uchar2, uint, uint);
-short2 __ovld __cnfn bitfield_insert(short2, short2, uint, uint);
-ushort2 __ovld __cnfn bitfield_insert(ushort2, ushort2, uint, uint);
-int2 __ovld __cnfn bitfield_insert(int2, int2, uint, uint);
-uint2 __ovld __cnfn bitfield_insert(uint2, uint2, uint, uint);
-long2 __ovld __cnfn bitfield_insert(long2, long2, uint, uint);
-ulong2 __ovld __cnfn bitfield_insert(ulong2, ulong2, uint, uint);
-char3 __ovld __cnfn bitfield_insert(char3, char3, uint, uint);
-uchar3 __ovld __cnfn bitfield_insert(uchar3, uchar3, uint, uint);
-short3 __ovld __cnfn bitfield_insert(short3, short3, uint, uint);
-ushort3 __ovld __cnfn bitfield_insert(ushort3, ushort3, uint, uint);
-int3 __ovld __cnfn bitfield_insert(int3, int3, uint, uint);
-uint3 __ovld __cnfn bitfield_insert(uint3, uint3, uint, uint);
-long3 __ovld __cnfn bitfield_insert(long3, long3, uint, uint);
-ulong3 __ovld __cnfn bitfield_insert(ulong3, ulong3, uint, uint);
-char4 __ovld __cnfn bitfield_insert(char4, char4, uint, uint);
-uchar4 __ovld __cnfn bitfield_insert(uchar4, uchar4, uint, uint);
-short4 __ovld __cnfn bitfield_insert(short4, short4, uint, uint);
-ushort4 __ovld __cnfn bitfield_insert(ushort4, ushort4, uint, uint);
-int4 __ovld __cnfn bitfield_insert(int4, int4, uint, uint);
-uint4 __ovld __cnfn bitfield_insert(uint4, uint4, uint, uint);
-long4 __ovld __cnfn bitfield_insert(long4, long4, uint, uint);
-ulong4 __ovld __cnfn bitfield_insert(ulong4, ulong4, uint, uint);
-char8 __ovld __cnfn bitfield_insert(char8, char8, uint, uint);
-uchar8 __ovld __cnfn bitfield_insert(uchar8, uchar8, uint, uint);
-short8 __ovld __cnfn bitfield_insert(short8, short8, uint, uint);
-ushort8 __ovld __cnfn bitfield_insert(ushort8, ushort8, uint, uint);
-int8 __ovld __cnfn bitfield_insert(int8, int8, uint, uint);
-uint8 __ovld __cnfn bitfield_insert(uint8, uint8, uint, uint);
-long8 __ovld __cnfn bitfield_insert(long8, long8, uint, uint);
-ulong8 __ovld __cnfn bitfield_insert(ulong8, ulong8, uint, uint);
-char16 __ovld __cnfn bitfield_insert(char16, char16, uint, uint);
-uchar16 __ovld __cnfn bitfield_insert(uchar16, uchar16, uint, uint);
-short16 __ovld __cnfn bitfield_insert(short16, short16, uint, uint);
-ushort16 __ovld __cnfn bitfield_insert(ushort16, ushort16, uint, uint);
-int16 __ovld __cnfn bitfield_insert(int16, int16, uint, uint);
-uint16 __ovld __cnfn bitfield_insert(uint16, uint16, uint, uint);
-long16 __ovld __cnfn bitfield_insert(long16, long16, uint, uint);
-ulong16 __ovld __cnfn bitfield_insert(ulong16, ulong16, uint, uint);
-
-char __ovld __cnfn bitfield_extract_signed(char, uint, uint);
-short __ovld __cnfn bitfield_extract_signed(short, uint, uint);
-int __ovld __cnfn bitfield_extract_signed(int, uint, uint);
-long __ovld __cnfn bitfield_extract_signed(long, uint, uint);
-char2 __ovld __cnfn bitfield_extract_signed(char2, uint, uint);
-short2 __ovld __cnfn bitfield_extract_signed(short2, uint, uint);
-int2 __ovld __cnfn bitfield_extract_signed(int2, uint, uint);
-long2 __ovld __cnfn bitfield_extract_signed(long2, uint, uint);
-char3 __ovld __cnfn bitfield_extract_signed(char3, uint, uint);
-short3 __ovld __cnfn bitfield_extract_signed(short3, uint, uint);
-int3 __ovld __cnfn bitfield_extract_signed(int3, uint, uint);
-long3 __ovld __cnfn bitfield_extract_signed(long3, uint, uint);
-char4 __ovld __cnfn bitfield_extract_signed(char4, uint, uint);
-short4 __ovld __cnfn bitfield_extract_signed(short4, uint, uint);
-int4 __ovld __cnfn bitfield_extract_signed(int4, uint, uint);
-long4 __ovld __cnfn bitfield_extract_signed(long4, uint, uint);
-char8 __ovld __cnfn bitfield_extract_signed(char8, uint, uint);
-short8 __ovld __cnfn bitfield_extract_signed(short8, uint, uint);
-int8 __ovld __cnfn bitfield_extract_signed(int8, uint, uint);
-long8 __ovld __cnfn bitfield_extract_signed(long8, uint, uint);
-char16 __ovld __cnfn bitfield_extract_signed(char16, uint, uint);
-short16 __ovld __cnfn bitfield_extract_signed(short16, uint, uint);
-int16 __ovld __cnfn bitfield_extract_signed(int16, uint, uint);
-long16 __ovld __cnfn bitfield_extract_signed(long16, uint, uint);
-
-char __ovld __cnfn bitfield_extract_signed(uchar, uint, uint);
-short __ovld __cnfn bitfield_extract_signed(ushort, uint, uint);
-int __ovld __cnfn bitfield_extract_signed(uint, uint, uint);
-long __ovld __cnfn bitfield_extract_signed(ulong, uint, uint);
-char2 __ovld __cnfn bitfield_extract_signed(uchar2, uint, uint);
-short2 __ovld __cnfn bitfield_extract_signed(ushort2, uint, uint);
-int2 __ovld __cnfn bitfield_extract_signed(uint2, uint, uint);
-long2 __ovld __cnfn bitfield_extract_signed(ulong2, uint, uint);
-char3 __ovld __cnfn bitfield_extract_signed(uchar3, uint, uint);
-short3 __ovld __cnfn bitfield_extract_signed(ushort3, uint, uint);
-int3 __ovld __cnfn bitfield_extract_signed(uint3, uint, uint);
-long3 __ovld __cnfn bitfield_extract_signed(ulong3, uint, uint);
-char4 __ovld __cnfn bitfield_extract_signed(uchar4, uint, uint);
-short4 __ovld __cnfn bitfield_extract_signed(ushort4, uint, uint);
-int4 __ovld __cnfn bitfield_extract_signed(uint4, uint, uint);
-long4 __ovld __cnfn bitfield_extract_signed(ulong4, uint, uint);
-char8 __ovld __cnfn bitfield_extract_signed(uchar8, uint, uint);
-short8 __ovld __cnfn bitfield_extract_signed(ushort8, uint, uint);
-int8 __ovld __cnfn bitfield_extract_signed(uint8, uint, uint);
-long8 __ovld __cnfn bitfield_extract_signed(ulong8, uint, uint);
-char16 __ovld __cnfn bitfield_extract_signed(uchar16, uint, uint);
-short16 __ovld __cnfn bitfield_extract_signed(ushort16, uint, uint);
-int16 __ovld __cnfn bitfield_extract_signed(uint16, uint, uint);
-long16 __ovld __cnfn bitfield_extract_signed(ulong16, uint, uint);
-
-uchar __ovld __cnfn bitfield_extract_unsigned(char, uint, uint);
-ushort __ovld __cnfn bitfield_extract_unsigned(short, uint, uint);
-uint __ovld __cnfn bitfield_extract_unsigned(int, uint, uint);
-ulong __ovld __cnfn bitfield_extract_unsigned(long, uint, uint);
-uchar2 __ovld __cnfn bitfield_extract_unsigned(char2, uint, uint);
-ushort2 __ovld __cnfn bitfield_extract_unsigned(short2, uint, uint);
-uint2 __ovld __cnfn bitfield_extract_unsigned(int2, uint, uint);
-ulong2 __ovld __cnfn bitfield_extract_unsigned(long2, uint, uint);
-uchar3 __ovld __cnfn bitfield_extract_unsigned(char3, uint, uint);
-ushort3 __ovld __cnfn bitfield_extract_unsigned(short3, uint, uint);
-uint3 __ovld __cnfn bitfield_extract_unsigned(int3, uint, uint);
-ulong3 __ovld __cnfn bitfield_extract_unsigned(long3, uint, uint);
-uchar4 __ovld __cnfn bitfield_extract_unsigned(char4, uint, uint);
-ushort4 __ovld __cnfn bitfield_extract_unsigned(short4, uint, uint);
-uint4 __ovld __cnfn bitfield_extract_unsigned(int4, uint, uint);
-ulong4 __ovld __cnfn bitfield_extract_unsigned(long4, uint, uint);
-uchar8 __ovld __cnfn bitfield_extract_unsigned(char8, uint, uint);
-ushort8 __ovld __cnfn bitfield_extract_unsigned(short8, uint, uint);
-uint8 __ovld __cnfn bitfield_extract_unsigned(int8, uint, uint);
-ulong8 __ovld __cnfn bitfield_extract_unsigned(long8, uint, uint);
-uchar16 __ovld __cnfn bitfield_extract_unsigned(char16, uint, uint);
-ushort16 __ovld __cnfn bitfield_extract_unsigned(short16, uint, uint);
-uint16 __ovld __cnfn bitfield_extract_unsigned(int16, uint, uint);
-ulong16 __ovld __cnfn bitfield_extract_unsigned(long16, uint, uint);
-
-uchar __ovld __cnfn bitfield_extract_unsigned(uchar, uint, uint);
-ushort __ovld __cnfn bitfield_extract_unsigned(ushort, uint, uint);
-uint __ovld __cnfn bitfield_extract_unsigned(uint, uint, uint);
-ulong __ovld __cnfn bitfield_extract_unsigned(ulong, uint, uint);
-uchar2 __ovld __cnfn bitfield_extract_unsigned(uchar2, uint, uint);
-ushort2 __ovld __cnfn bitfield_extract_unsigned(ushort2, uint, uint);
-uint2 __ovld __cnfn bitfield_extract_unsigned(uint2, uint, uint);
-ulong2 __ovld __cnfn bitfield_extract_unsigned(ulong2, uint, uint);
-uchar3 __ovld __cnfn bitfield_extract_unsigned(uchar3, uint, uint);
-ushort3 __ovld __cnfn bitfield_extract_unsigned(ushort3, uint, uint);
-uint3 __ovld __cnfn bitfield_extract_unsigned(uint3, uint, uint);
-ulong3 __ovld __cnfn bitfield_extract_unsigned(ulong3, uint, uint);
-uchar4 __ovld __cnfn bitfield_extract_unsigned(uchar4, uint, uint);
-ushort4 __ovld __cnfn bitfield_extract_unsigned(ushort4, uint, uint);
-uint4 __ovld __cnfn bitfield_extract_unsigned(uint4, uint, uint);
-ulong4 __ovld __cnfn bitfield_extract_unsigned(ulong4, uint, uint);
-uchar8 __ovld __cnfn bitfield_extract_unsigned(uchar8, uint, uint);
-ushort8 __ovld __cnfn bitfield_extract_unsigned(ushort8, uint, uint);
-uint8 __ovld __cnfn bitfield_extract_unsigned(uint8, uint, uint);
-ulong8 __ovld __cnfn bitfield_extract_unsigned(ulong8, uint, uint);
-uchar16 __ovld __cnfn bitfield_extract_unsigned(uchar16, uint, uint);
-ushort16 __ovld __cnfn bitfield_extract_unsigned(ushort16, uint, uint);
-uint16 __ovld __cnfn bitfield_extract_unsigned(uint16, uint, uint);
-ulong16 __ovld __cnfn bitfield_extract_unsigned(ulong16, uint, uint);
-
-char __ovld __cnfn bit_reverse(char);
-uchar __ovld __cnfn bit_reverse(uchar);
-short __ovld __cnfn bit_reverse(short);
-ushort __ovld __cnfn bit_reverse(ushort);
-int __ovld __cnfn bit_reverse(int);
-uint __ovld __cnfn bit_reverse(uint);
-long __ovld __cnfn bit_reverse(long);
-ulong __ovld __cnfn bit_reverse(ulong);
-char2 __ovld __cnfn bit_reverse(char2);
-uchar2 __ovld __cnfn bit_reverse(uchar2);
-short2 __ovld __cnfn bit_reverse(short2);
-ushort2 __ovld __cnfn bit_reverse(ushort2);
-int2 __ovld __cnfn bit_reverse(int2);
-uint2 __ovld __cnfn bit_reverse(uint2);
-long2 __ovld __cnfn bit_reverse(long2);
-ulong2 __ovld __cnfn bit_reverse(ulong2);
-char3 __ovld __cnfn bit_reverse(char3);
-uchar3 __ovld __cnfn bit_reverse(uchar3);
-short3 __ovld __cnfn bit_reverse(short3);
-ushort3 __ovld __cnfn bit_reverse(ushort3);
-int3 __ovld __cnfn bit_reverse(int3);
-uint3 __ovld __cnfn bit_reverse(uint3);
-long3 __ovld __cnfn bit_reverse(long3);
-ulong3 __ovld __cnfn bit_reverse(ulong3);
-char4 __ovld __cnfn bit_reverse(char4);
-uchar4 __ovld __cnfn bit_reverse(uchar4);
-short4 __ovld __cnfn bit_reverse(short4);
-ushort4 __ovld __cnfn bit_reverse(ushort4);
-int4 __ovld __cnfn bit_reverse(int4);
-uint4 __ovld __cnfn bit_reverse(uint4);
-long4 __ovld __cnfn bit_reverse(long4);
-ulong4 __ovld __cnfn bit_reverse(ulong4);
-char8 __ovld __cnfn bit_reverse(char8);
-uchar8 __ovld __cnfn bit_reverse(uchar8);
-short8 __ovld __cnfn bit_reverse(short8);
-ushort8 __ovld __cnfn bit_reverse(ushort8);
-int8 __ovld __cnfn bit_reverse(int8);
-uint8 __ovld __cnfn bit_reverse(uint8);
-long8 __ovld __cnfn bit_reverse(long8);
-ulong8 __ovld __cnfn bit_reverse(ulong8);
-char16 __ovld __cnfn bit_reverse(char16);
-uchar16 __ovld __cnfn bit_reverse(uchar16);
-short16 __ovld __cnfn bit_reverse(short16);
-ushort16 __ovld __cnfn bit_reverse(ushort16);
-int16 __ovld __cnfn bit_reverse(int16);
-uint16 __ovld __cnfn bit_reverse(uint16);
-long16 __ovld __cnfn bit_reverse(long16);
-ulong16 __ovld __cnfn bit_reverse(ulong16);
-#endif // cl_khr_extended_bit_ops
-
-#if defined(__opencl_c_integer_dot_product_input_4x8bit)
-uint __ovld __cnfn dot(uchar4, uchar4);
-int __ovld __cnfn dot(char4, char4);
-int __ovld __cnfn dot(uchar4, char4);
-int __ovld __cnfn dot(char4, uchar4);
-
-uint __ovld __cnfn dot_acc_sat(uchar4, uchar4, uint);
-int __ovld __cnfn dot_acc_sat(char4, char4, int);
-int __ovld __cnfn dot_acc_sat(uchar4, char4, int);
-int __ovld __cnfn dot_acc_sat(char4, uchar4, int);
-#endif // __opencl_c_integer_dot_product_input_4x8bit
-
-#if defined(__opencl_c_integer_dot_product_input_4x8bit_packed)
-uint __ovld __cnfn dot_4x8packed_uu_uint(uint, uint);
-int __ovld __cnfn dot_4x8packed_ss_int(uint, uint);
-int __ovld __cnfn dot_4x8packed_us_int(uint, uint);
-int __ovld __cnfn dot_4x8packed_su_int(uint, uint);
-
-uint __ovld __cnfn dot_acc_sat_4x8packed_uu_uint(uint, uint, uint);
-int __ovld __cnfn dot_acc_sat_4x8packed_ss_int(uint, uint, int);
-int __ovld __cnfn dot_acc_sat_4x8packed_us_int(uint, uint, int);
-int __ovld __cnfn dot_acc_sat_4x8packed_su_int(uint, uint, int);
-#endif // __opencl_c_integer_dot_product_input_4x8bit_packed
-
-#if defined(cl_intel_subgroups)
-// Intel-Specific Sub Group Functions
-float   __ovld __conv intel_sub_group_shuffle( float  x, uint c );
-float2  __ovld __conv intel_sub_group_shuffle( float2 x, uint c );
-float3  __ovld __conv intel_sub_group_shuffle( float3 x, uint c );
-float4  __ovld __conv intel_sub_group_shuffle( float4 x, uint c );
-float8  __ovld __conv intel_sub_group_shuffle( float8 x, uint c );
-float16 __ovld __conv intel_sub_group_shuffle( float16 x, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle( int  x, uint c );
-int2    __ovld __conv intel_sub_group_shuffle( int2 x, uint c );
-int3    __ovld __conv intel_sub_group_shuffle( int3 x, uint c );
-int4    __ovld __conv intel_sub_group_shuffle( int4 x, uint c );
-int8    __ovld __conv intel_sub_group_shuffle( int8 x, uint c );
-int16   __ovld __conv intel_sub_group_shuffle( int16 x, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle( uint  x, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle( uint2 x, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle( uint3 x, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle( uint4 x, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle( uint8 x, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle( uint16 x, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle( long x, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle( ulong x, uint c );
-
-float   __ovld __conv intel_sub_group_shuffle_down( float  cur, float  next, uint c );
-float2  __ovld __conv intel_sub_group_shuffle_down( float2 cur, float2 next, uint c );
-float3  __ovld __conv intel_sub_group_shuffle_down( float3 cur, float3 next, uint c );
-float4  __ovld __conv intel_sub_group_shuffle_down( float4 cur, float4 next, uint c );
-float8  __ovld __conv intel_sub_group_shuffle_down( float8 cur, float8 next, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_down( float16 cur, float16 next, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle_down( int  cur, int  next, uint c );
-int2    __ovld __conv intel_sub_group_shuffle_down( int2 cur, int2 next, uint c );
-int3    __ovld __conv intel_sub_group_shuffle_down( int3 cur, int3 next, uint c );
-int4    __ovld __conv intel_sub_group_shuffle_down( int4 cur, int4 next, uint c );
-int8    __ovld __conv intel_sub_group_shuffle_down( int8 cur, int8 next, uint c );
-int16   __ovld __conv intel_sub_group_shuffle_down( int16 cur, int16 next, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle_down( uint  cur, uint  next, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle_down( uint2 cur, uint2 next, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle_down( uint3 cur, uint3 next, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle_down( uint4 cur, uint4 next, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle_down( uint8 cur, uint8 next, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle_down( uint16 cur, uint16 next, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle_down( long prev, long cur, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle_down( ulong prev, ulong cur, uint c );
-
-float   __ovld __conv intel_sub_group_shuffle_up( float  prev, float  cur, uint c );
-float2  __ovld __conv intel_sub_group_shuffle_up( float2 prev, float2 cur, uint c );
-float3  __ovld __conv intel_sub_group_shuffle_up( float3 prev, float3 cur, uint c );
-float4  __ovld __conv intel_sub_group_shuffle_up( float4 prev, float4 cur, uint c );
-float8  __ovld __conv intel_sub_group_shuffle_up( float8 prev, float8 cur, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_up( float16 prev, float16 cur, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle_up( int  prev, int  cur, uint c );
-int2    __ovld __conv intel_sub_group_shuffle_up( int2 prev, int2 cur, uint c );
-int3    __ovld __conv intel_sub_group_shuffle_up( int3 prev, int3 cur, uint c );
-int4    __ovld __conv intel_sub_group_shuffle_up( int4 prev, int4 cur, uint c );
-int8    __ovld __conv intel_sub_group_shuffle_up( int8 prev, int8 cur, uint c );
-int16   __ovld __conv intel_sub_group_shuffle_up( int16 prev, int16 cur, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle_up( uint  prev, uint  cur, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle_up( uint2 prev, uint2 cur, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle_up( uint3 prev, uint3 cur, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle_up( uint4 prev, uint4 cur, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle_up( uint8 prev, uint8 cur, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle_up( uint16 prev, uint16 cur, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle_up( long prev, long cur, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle_up( ulong prev, ulong cur, uint c );
-
-float   __ovld __conv intel_sub_group_shuffle_xor( float  x, uint c );
-float2  __ovld __conv intel_sub_group_shuffle_xor( float2 x, uint c );
-float3  __ovld __conv intel_sub_group_shuffle_xor( float3 x, uint c );
-float4  __ovld __conv intel_sub_group_shuffle_xor( float4 x, uint c );
-float8  __ovld __conv intel_sub_group_shuffle_xor( float8 x, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_xor( float16 x, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle_xor( int  x, uint c );
-int2    __ovld __conv intel_sub_group_shuffle_xor( int2 x, uint c );
-int3    __ovld __conv intel_sub_group_shuffle_xor( int3 x, uint c );
-int4    __ovld __conv intel_sub_group_shuffle_xor( int4 x, uint c );
-int8    __ovld __conv intel_sub_group_shuffle_xor( int8 x, uint c );
-int16   __ovld __conv intel_sub_group_shuffle_xor( int16 x, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle_xor( uint  x, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle_xor( uint2 x, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle_xor( uint3 x, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle_xor( uint4 x, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle_xor( uint8 x, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle_xor( uint16 x, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle_xor( long x, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle_xor( ulong x, uint c );
-
-uint    __ovld __conv intel_sub_group_block_read( read_only image2d_t image, int2 coord );
-uint2   __ovld __conv intel_sub_group_block_read2( read_only image2d_t image, int2 coord );
-uint4   __ovld __conv intel_sub_group_block_read4( read_only image2d_t image, int2 coord );
-uint8   __ovld __conv intel_sub_group_block_read8( read_only image2d_t image, int2 coord );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint    __ovld __conv intel_sub_group_block_read(read_write image2d_t image, int2 coord);
-uint2   __ovld __conv intel_sub_group_block_read2(read_write image2d_t image, int2 coord);
-uint4   __ovld __conv intel_sub_group_block_read4(read_write image2d_t image, int2 coord);
-uint8   __ovld __conv intel_sub_group_block_read8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-uint    __ovld __conv intel_sub_group_block_read( const __global uint* p );
-uint2   __ovld __conv intel_sub_group_block_read2( const __global uint* p );
-uint4   __ovld __conv intel_sub_group_block_read4( const __global uint* p );
-uint8   __ovld __conv intel_sub_group_block_read8( const __global uint* p );
-
-void    __ovld __conv intel_sub_group_block_write(write_only image2d_t image, int2 coord, uint data);
-void    __ovld __conv intel_sub_group_block_write2(write_only image2d_t image, int2 coord, uint2 data);
-void    __ovld __conv intel_sub_group_block_write4(write_only image2d_t image, int2 coord, uint4 data);
-void    __ovld __conv intel_sub_group_block_write8(write_only image2d_t image, int2 coord, uint8 data);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void    __ovld __conv intel_sub_group_block_write(read_write image2d_t image, int2 coord, uint data);
-void    __ovld __conv intel_sub_group_block_write2(read_write image2d_t image, int2 coord, uint2 data);
-void    __ovld __conv intel_sub_group_block_write4(read_write image2d_t image, int2 coord, uint4 data);
-void    __ovld __conv intel_sub_group_block_write8(read_write image2d_t image, int2 coord, uint8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-void    __ovld __conv intel_sub_group_block_write( __global uint* p, uint data );
-void    __ovld __conv intel_sub_group_block_write2( __global uint* p, uint2 data );
-void    __ovld __conv intel_sub_group_block_write4( __global uint* p, uint4 data );
-void    __ovld __conv intel_sub_group_block_write8( __global uint* p, uint8 data );
-
-#ifdef cl_khr_fp16
-half    __ovld __conv intel_sub_group_shuffle( half x, uint c );
-half    __ovld __conv intel_sub_group_shuffle_down( half prev, half cur, uint c );
-half    __ovld __conv intel_sub_group_shuffle_up( half prev, half cur, uint c );
-half    __ovld __conv intel_sub_group_shuffle_xor( half x, uint c );
-#endif
-
-#if defined(cl_khr_fp64)
-double  __ovld __conv intel_sub_group_shuffle( double x, uint c );
-double  __ovld __conv intel_sub_group_shuffle_down( double prev, double cur, uint c );
-double  __ovld __conv intel_sub_group_shuffle_up( double prev, double cur, uint c );
-double  __ovld __conv intel_sub_group_shuffle_xor( double x, uint c );
-#endif
-
-#endif //cl_intel_subgroups
-
-#if defined(cl_intel_subgroups_short)
-short       __ovld __conv intel_sub_group_broadcast( short  x, uint sub_group_local_id );
-short2      __ovld __conv intel_sub_group_broadcast( short2 x, uint sub_group_local_id );
-short3      __ovld __conv intel_sub_group_broadcast( short3 x, uint sub_group_local_id );
-short4      __ovld __conv intel_sub_group_broadcast( short4 x, uint sub_group_local_id );
-short8      __ovld __conv intel_sub_group_broadcast( short8 x, uint sub_group_local_id );
-
-ushort      __ovld __conv intel_sub_group_broadcast( ushort  x, uint sub_group_local_id );
-ushort2     __ovld __conv intel_sub_group_broadcast( ushort2 x, uint sub_group_local_id );
-ushort3     __ovld __conv intel_sub_group_broadcast( ushort3 x, uint sub_group_local_id );
-ushort4     __ovld __conv intel_sub_group_broadcast( ushort4 x, uint sub_group_local_id );
-ushort8     __ovld __conv intel_sub_group_broadcast( ushort8 x, uint sub_group_local_id );
-
-short       __ovld __conv intel_sub_group_shuffle( short   x, uint c );
-short2      __ovld __conv intel_sub_group_shuffle( short2  x, uint c );
-short3      __ovld __conv intel_sub_group_shuffle( short3  x, uint c );
-short4      __ovld __conv intel_sub_group_shuffle( short4  x, uint c );
-short8      __ovld __conv intel_sub_group_shuffle( short8  x, uint c );
-short16     __ovld __conv intel_sub_group_shuffle( short16 x, uint c);
-
-ushort      __ovld __conv intel_sub_group_shuffle( ushort   x, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle( ushort2  x, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle( ushort3  x, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle( ushort4  x, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle( ushort8  x, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle( ushort16 x, uint c );
-
-short       __ovld __conv intel_sub_group_shuffle_down( short   cur, short   next, uint c );
-short2      __ovld __conv intel_sub_group_shuffle_down( short2  cur, short2  next, uint c );
-short3      __ovld __conv intel_sub_group_shuffle_down( short3  cur, short3  next, uint c );
-short4      __ovld __conv intel_sub_group_shuffle_down( short4  cur, short4  next, uint c );
-short8      __ovld __conv intel_sub_group_shuffle_down( short8  cur, short8  next, uint c );
-short16     __ovld __conv intel_sub_group_shuffle_down( short16 cur, short16 next, uint c );
-
-ushort      __ovld __conv intel_sub_group_shuffle_down( ushort   cur, ushort   next, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle_down( ushort2  cur, ushort2  next, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle_down( ushort3  cur, ushort3  next, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle_down( ushort4  cur, ushort4  next, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle_down( ushort8  cur, ushort8  next, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle_down( ushort16 cur, ushort16 next, uint c );
-
-short       __ovld __conv intel_sub_group_shuffle_up( short   cur, short   next, uint c );
-short2      __ovld __conv intel_sub_group_shuffle_up( short2  cur, short2  next, uint c );
-short3      __ovld __conv intel_sub_group_shuffle_up( short3  cur, short3  next, uint c );
-short4      __ovld __conv intel_sub_group_shuffle_up( short4  cur, short4  next, uint c );
-short8      __ovld __conv intel_sub_group_shuffle_up( short8  cur, short8  next, uint c );
-short16     __ovld __conv intel_sub_group_shuffle_up( short16 cur, short16 next, uint c );
-
-ushort      __ovld __conv intel_sub_group_shuffle_up( ushort   cur, ushort   next, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle_up( ushort2  cur, ushort2  next, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle_up( ushort3  cur, ushort3  next, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle_up( ushort4  cur, ushort4  next, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle_up( ushort8  cur, ushort8  next, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle_up( ushort16 cur, ushort16 next, uint c );
-
-short       __ovld __conv intel_sub_group_shuffle_xor( short   x, uint c );
-short2      __ovld __conv intel_sub_group_shuffle_xor( short2  x, uint c );
-short3      __ovld __conv intel_sub_group_shuffle_xor( short3  x, uint c );
-short4      __ovld __conv intel_sub_group_shuffle_xor( short4  x, uint c );
-short8      __ovld __conv intel_sub_group_shuffle_xor( short8  x, uint c );
-short16     __ovld __conv intel_sub_group_shuffle_xor( short16 x, uint c );
-
-ushort      __ovld __conv intel_sub_group_shuffle_xor( ushort   x, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle_xor( ushort2  x, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle_xor( ushort3  x, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle_xor( ushort4  x, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle_xor( ushort8  x, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle_xor( ushort16 x, uint c );
-
-short       __ovld __conv intel_sub_group_reduce_add( short   x );
-ushort      __ovld __conv intel_sub_group_reduce_add( ushort  x );
-short       __ovld __conv intel_sub_group_reduce_min( short   x );
-ushort      __ovld __conv intel_sub_group_reduce_min( ushort  x );
-short       __ovld __conv intel_sub_group_reduce_max( short   x );
-ushort      __ovld __conv intel_sub_group_reduce_max( ushort  x );
-
-short       __ovld __conv intel_sub_group_scan_exclusive_add( short   x );
-ushort      __ovld __conv intel_sub_group_scan_exclusive_add( ushort  x );
-short       __ovld __conv intel_sub_group_scan_exclusive_min( short   x );
-ushort      __ovld __conv intel_sub_group_scan_exclusive_min( ushort  x );
-short       __ovld __conv intel_sub_group_scan_exclusive_max( short   x );
-ushort      __ovld __conv intel_sub_group_scan_exclusive_max( ushort  x );
-
-short       __ovld __conv intel_sub_group_scan_inclusive_add( short   x );
-ushort      __ovld __conv intel_sub_group_scan_inclusive_add( ushort  x );
-short       __ovld __conv intel_sub_group_scan_inclusive_min( short   x );
-ushort      __ovld __conv intel_sub_group_scan_inclusive_min( ushort  x );
-short       __ovld __conv intel_sub_group_scan_inclusive_max( short   x );
-ushort      __ovld __conv intel_sub_group_scan_inclusive_max( ushort  x );
-
-uint       __ovld __conv intel_sub_group_block_read_ui( read_only image2d_t image, int2 byte_coord );
-uint2      __ovld __conv intel_sub_group_block_read_ui2( read_only image2d_t image, int2 byte_coord );
-uint4      __ovld __conv intel_sub_group_block_read_ui4( read_only image2d_t image, int2 byte_coord );
-uint8      __ovld __conv intel_sub_group_block_read_ui8( read_only image2d_t image, int2 byte_coord );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint       __ovld __conv intel_sub_group_block_read_ui( read_write image2d_t image, int2 byte_coord );
-uint2      __ovld __conv intel_sub_group_block_read_ui2( read_write image2d_t image, int2 byte_coord );
-uint4      __ovld __conv intel_sub_group_block_read_ui4( read_write image2d_t image, int2 byte_coord );
-uint8      __ovld __conv intel_sub_group_block_read_ui8( read_write image2d_t image, int2 byte_coord );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-uint       __ovld __conv intel_sub_group_block_read_ui( const __global uint* p );
-uint2      __ovld __conv intel_sub_group_block_read_ui2( const __global uint* p );
-uint4      __ovld __conv intel_sub_group_block_read_ui4( const __global uint* p );
-uint8      __ovld __conv intel_sub_group_block_read_ui8( const __global uint* p );
-
-void       __ovld __conv intel_sub_group_block_write_ui( read_only image2d_t image, int2 byte_coord, uint data );
-void       __ovld __conv intel_sub_group_block_write_ui2( read_only image2d_t image, int2 byte_coord, uint2 data );
-void       __ovld __conv intel_sub_group_block_write_ui4( read_only image2d_t image, int2 byte_coord, uint4 data );
-void       __ovld __conv intel_sub_group_block_write_ui8( read_only image2d_t image, int2 byte_coord, uint8 data );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void       __ovld __conv intel_sub_group_block_write_ui( read_write image2d_t image, int2 byte_coord, uint data );
-void       __ovld __conv intel_sub_group_block_write_ui2( read_write image2d_t image, int2 byte_coord, uint2 data );
-void       __ovld __conv intel_sub_group_block_write_ui4( read_write image2d_t image, int2 byte_coord, uint4 data );
-void       __ovld __conv intel_sub_group_block_write_ui8( read_write image2d_t image, int2 byte_coord, uint8 data );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-void       __ovld __conv intel_sub_group_block_write_ui( __global uint* p, uint data );
-void       __ovld __conv intel_sub_group_block_write_ui2( __global uint* p, uint2 data );
-void       __ovld __conv intel_sub_group_block_write_ui4( __global uint* p, uint4 data );
-void       __ovld __conv intel_sub_group_block_write_ui8( __global uint* p, uint8 data );
-
-ushort      __ovld __conv intel_sub_group_block_read_us( read_only image2d_t image, int2 coord );
-ushort2     __ovld __conv intel_sub_group_block_read_us2( read_only image2d_t image, int2 coord );
-ushort4     __ovld __conv intel_sub_group_block_read_us4( read_only image2d_t image, int2 coord );
-ushort8     __ovld __conv intel_sub_group_block_read_us8( read_only image2d_t image, int2 coord );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-ushort      __ovld __conv intel_sub_group_block_read_us(read_write image2d_t image, int2 coord);
-ushort2     __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t image, int2 coord);
-ushort4     __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t image, int2 coord);
-ushort8     __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-ushort      __ovld __conv intel_sub_group_block_read_us(  const __global ushort* p );
-ushort2     __ovld __conv intel_sub_group_block_read_us2( const __global ushort* p );
-ushort4     __ovld __conv intel_sub_group_block_read_us4( const __global ushort* p );
-ushort8     __ovld __conv intel_sub_group_block_read_us8( const __global ushort* p );
-
-void        __ovld __conv intel_sub_group_block_write_us(write_only image2d_t image, int2 coord, ushort  data);
-void        __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t image, int2 coord, ushort2 data);
-void        __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t image, int2 coord, ushort4 data);
-void        __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t image, int2 coord, ushort8 data);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void        __ovld __conv intel_sub_group_block_write_us(read_write image2d_t image, int2 coord, ushort  data);
-void        __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t image, int2 coord, ushort2 data);
-void        __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t image, int2 coord, ushort4 data);
-void        __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t image, int2 coord, ushort8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-void        __ovld __conv intel_sub_group_block_write_us(  __global ushort* p, ushort  data );
-void        __ovld __conv intel_sub_group_block_write_us2( __global ushort* p, ushort2 data );
-void        __ovld __conv intel_sub_group_block_write_us4( __global ushort* p, ushort4 data );
-void        __ovld __conv intel_sub_group_block_write_us8( __global ushort* p, ushort8 data );
-#endif // cl_intel_subgroups_short
-
-#ifdef cl_intel_device_side_avc_motion_estimation
-#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : begin
-
-// MCE built-in functions
-uchar __ovld
-intel_sub_group_avc_mce_get_default_inter_base_multi_reference_penalty(
-    uchar slice_type, uchar qp);
-ulong __ovld intel_sub_group_avc_mce_get_default_inter_shape_penalty(
-    uchar slice_type, uchar qp);
-uchar __ovld intel_sub_group_avc_mce_get_default_inter_direction_penalty(
-    uchar slice_type, uchar qp);
-uint __ovld intel_sub_group_avc_mce_get_default_intra_luma_shape_penalty(
-    uchar slice_type, uchar qp);
-uint2 __ovld
-intel_sub_group_avc_mce_get_default_inter_motion_vector_cost_table(
-    uchar slice_type, uchar qp);
-uchar __ovld intel_sub_group_avc_mce_get_default_intra_luma_mode_penalty(
-    uchar slice_type, uchar qp);
-
-uint2 __ovld intel_sub_group_avc_mce_get_default_high_penalty_cost_table();
-uint2 __ovld intel_sub_group_avc_mce_get_default_medium_penalty_cost_table();
-uint2 __ovld intel_sub_group_avc_mce_get_default_low_penalty_cost_table();
-uint __ovld intel_sub_group_avc_mce_get_default_non_dc_luma_intra_penalty();
-uchar __ovld
-intel_sub_group_avc_mce_get_default_intra_chroma_mode_base_penalty();
-
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_inter_shape_penalty(
-    ulong packed_shape_penalty, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_ac_only_haar(
-    intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_mce_payload_t payload);
-
-ulong __ovld intel_sub_group_avc_mce_get_motion_vectors(
-    intel_sub_group_avc_mce_result_t result);
-ushort __ovld intel_sub_group_avc_mce_get_inter_distortions(
-    intel_sub_group_avc_mce_result_t result);
-ushort __ovld intel_sub_group_avc_mce_get_best_inter_distortion(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_major_shape(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_minor_shapes(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_directions(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_motion_vector_count(
-    intel_sub_group_avc_mce_result_t result);
-uint __ovld intel_sub_group_avc_mce_get_inter_reference_ids(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld
-intel_sub_group_avc_mce_get_inter_reference_interlaced_field_polarities(
-    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
-    intel_sub_group_avc_mce_result_t result);
-
-// IME built-in functions
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_initialize(
-    ushort2 src_coord, uchar partition_mask, uchar sad_adjustment);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_single_reference(
-    short2 ref_offset, uchar search_window_config,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_dual_reference(
-    short2 fwd_ref_offset, short2 bwd_ref_offset, uchar search_window_config,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_max_motion_vector_count(
-    uchar max_motion_vector_count, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_unidirectional_mix_disable(
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_early_search_termination_threshold(
-    uchar threshold, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_weighted_sad(
-    uint packed_sad_weights, intel_sub_group_avc_ime_payload_t payload);
-
-__attribute__((deprecated("If you use the latest Intel driver, please use "
-                          "intel_sub_group_avc_ime_ref_window_size instead",
-                          "intel_sub_group_avc_ime_ref_window_size")))
-ushort2 __ovld
-intel_sub_group_ime_ref_window_size(uchar search_window_config, char dual_ref);
-ushort2 __ovld intel_sub_group_avc_ime_ref_window_size(
-    uchar search_window_config, char dual_ref);
-short2 __ovld intel_sub_group_avc_ime_adjust_ref_offset(
-    short2 ref_offset, ushort2 src_coord, ushort2 ref_window_size,
-    ushort2 image_size);
-
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference_streamout(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference_streamout(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference_streamin(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference_streamin(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
-intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference_streaminout(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
-intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference_streaminout(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
-
-intel_sub_group_avc_ime_single_reference_streamin_t __ovld
-intel_sub_group_avc_ime_get_single_reference_streamin(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result);
-intel_sub_group_avc_ime_dual_reference_streamin_t __ovld
-intel_sub_group_avc_ime_get_dual_reference_streamin(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_strip_single_reference_streamout(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_strip_dual_reference_streamout(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
-
-uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
-    uchar major_shape);
-ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
-    uchar major_shape);
-uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
-    uchar major_shape);
-uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
-    uchar major_shape, uchar direction);
-ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
-    uchar major_shape, uchar direction);
-uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
-    uchar major_shape, uchar direction);
-
-uchar __ovld intel_sub_group_avc_ime_get_border_reached(
-    uchar image_select, intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ime_get_truncated_search_indication(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld
-intel_sub_group_avc_ime_get_unidirectional_early_search_termination(
-    intel_sub_group_avc_ime_result_t result);
-uint __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_motion_vector(
-    intel_sub_group_avc_ime_result_t result);
-ushort __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_distortion(
-    intel_sub_group_avc_ime_result_t result);
-
-// REF built-in functions
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_fme_initialize(
-    ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
-    uchar minor_shapes, uchar directions, uchar pixel_resolution,
-    uchar sad_adjustment);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_bme_initialize(
-    ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
-    uchar minor_shapes, uchar directions, uchar pixel_resolution,
-    uchar bidirectional_weight, uchar sad_adjustment);
-
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_bidirectional_mix_disable(
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_bilinear_filter_enable(
-    intel_sub_group_avc_ref_payload_t payload);
-
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_single_reference(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_dual_reference(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ref_payload_t payload);
-
-// SIC built-in functions
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_initialize(
-    ushort2 src_coord);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_configure_skc(
-    uint skip_block_partition_type, uint skip_motion_vector_mask,
-    ulong motion_vectors, uchar bidirectional_weight, uchar skip_sad_adjustment,
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_configure_ipe(
-    uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty,
-    uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
-    uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
-    uchar intra_sad_adjustment, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_configure_ipe(
-    uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty,
-    uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
-    uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
-    ushort left_edge_chroma_pixels, ushort upper_left_corner_chroma_pixel,
-    ushort upper_edge_chroma_pixels, uchar intra_sad_adjustment,
-    intel_sub_group_avc_sic_payload_t payload);
-uint __ovld
-intel_sub_group_avc_sic_get_motion_vector_mask(
-    uint skip_block_partition_type, uchar direction);
-
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_intra_luma_shape_penalty(
-    uint packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_intra_luma_mode_cost_function(
-    uchar luma_mode_penalty, uint luma_packed_neighbor_modes,
-    uint luma_packed_non_dc_penalty, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_intra_chroma_mode_cost_function(
-    uchar chroma_mode_penalty, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_skc_bilinear_filter_enable(
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_skc_forward_transform_enable(
-    ulong packed_sad_coefficients, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_block_based_raw_skip_sad(
-    uchar block_based_skip_type,
-    intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_ipe(
-    read_only image2d_t src_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_single_reference(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_dual_reference(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
-    intel_sub_group_avc_sic_payload_t payload);
-
-uchar __ovld intel_sub_group_avc_sic_get_ipe_luma_shape(
-    intel_sub_group_avc_sic_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_best_ipe_luma_distortion(
-    intel_sub_group_avc_sic_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_best_ipe_chroma_distortion(
-    intel_sub_group_avc_sic_result_t result);
-ulong __ovld intel_sub_group_avc_sic_get_packed_ipe_luma_modes(
-    intel_sub_group_avc_sic_result_t result);
-uchar __ovld intel_sub_group_avc_sic_get_ipe_chroma_mode(
-    intel_sub_group_avc_sic_result_t result);
-uint __ovld intel_sub_group_avc_sic_get_packed_skc_luma_count_threshold(
-    intel_sub_group_avc_sic_result_t result);
-ulong __ovld intel_sub_group_avc_sic_get_packed_skc_luma_sum_threshold(
-    intel_sub_group_avc_sic_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_inter_raw_sads(
-    intel_sub_group_avc_sic_result_t result);
-
-// Wrappers
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_inter_shape_penalty(
-    ulong packed_shape_cost, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_inter_shape_penalty(
-    ulong packed_shape_cost, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_inter_shape_penalty(
-    ulong packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_ac_only_haar(
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_ac_only_haar(
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_ac_only_haar(
-    intel_sub_group_avc_sic_payload_t payload);
-
-ulong __ovld intel_sub_group_avc_ime_get_motion_vectors(
-    intel_sub_group_avc_ime_result_t result);
-ulong __ovld intel_sub_group_avc_ref_get_motion_vectors(
-    intel_sub_group_avc_ref_result_t result);
-
-ushort __ovld intel_sub_group_avc_ime_get_inter_distortions(
-    intel_sub_group_avc_ime_result_t result);
-ushort __ovld intel_sub_group_avc_ref_get_inter_distortions(
-    intel_sub_group_avc_ref_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_inter_distortions(
-    intel_sub_group_avc_sic_result_t result);
-
-ushort __ovld intel_sub_group_avc_ime_get_best_inter_distortion(
-    intel_sub_group_avc_ime_result_t result);
-ushort __ovld intel_sub_group_avc_ref_get_best_inter_distortion(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld intel_sub_group_avc_ime_get_inter_major_shape(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_major_shape(
-    intel_sub_group_avc_ref_result_t result);
-uchar __ovld intel_sub_group_avc_ime_get_inter_minor_shapes(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_minor_shapes(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld intel_sub_group_avc_ime_get_inter_directions(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_directions(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld intel_sub_group_avc_ime_get_inter_motion_vector_count(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_motion_vector_count(
-    intel_sub_group_avc_ref_result_t result);
-
-uint __ovld intel_sub_group_avc_ime_get_inter_reference_ids(
-    intel_sub_group_avc_ime_result_t result);
-uint __ovld intel_sub_group_avc_ref_get_inter_reference_ids(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld
-intel_sub_group_avc_ime_get_inter_reference_interlaced_field_polarities(
-    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld
-intel_sub_group_avc_ref_get_inter_reference_interlaced_field_polarities(
-    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
-    intel_sub_group_avc_ref_result_t result);
-
-// Type conversion functions
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_ime_convert_to_mce_payload(
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_mce_convert_to_ime_payload(
-    intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_ref_convert_to_mce_payload(
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_mce_convert_to_ref_payload(
-    intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_sic_convert_to_mce_payload(
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_mce_convert_to_sic_payload(
-    intel_sub_group_avc_mce_payload_t payload);
-
-intel_sub_group_avc_mce_result_t __ovld
-intel_sub_group_avc_ime_convert_to_mce_result(
-    intel_sub_group_avc_ime_result_t result);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_mce_convert_to_ime_result(
-    intel_sub_group_avc_mce_result_t result);
-intel_sub_group_avc_mce_result_t __ovld
-intel_sub_group_avc_ref_convert_to_mce_result(
-    intel_sub_group_avc_ref_result_t result);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_mce_convert_to_ref_result(
-    intel_sub_group_avc_mce_result_t result);
-intel_sub_group_avc_mce_result_t __ovld
-intel_sub_group_avc_sic_convert_to_mce_result(
-    intel_sub_group_avc_sic_result_t result);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_mce_convert_to_sic_result(
-    intel_sub_group_avc_mce_result_t result);
-#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : end
-#endif // cl_intel_device_side_avc_motion_estimation
-
-#ifdef cl_amd_media_ops
-uint __ovld amd_bitalign(uint a, uint b, uint c);
-uint2 __ovld amd_bitalign(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_bitalign(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_bitalign(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_bitalign(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_bitalign(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_bytealign(uint a, uint b, uint c);
-uint2 __ovld amd_bytealign(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_bytealign(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_bytealign(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_bytealign(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_bytealign(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_lerp(uint a, uint b, uint c);
-uint2 __ovld amd_lerp(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_lerp(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_lerp(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_lerp(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_lerp(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_pack(float4 v);
-
-uint __ovld amd_sad4(uint4 x, uint4 y, uint z);
-
-uint __ovld amd_sadhi(uint a, uint b, uint c);
-uint2 __ovld amd_sadhi(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_sadhi(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_sadhi(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_sadhi(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_sadhi(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_sad(uint a, uint b, uint c);
-uint2 __ovld amd_sad(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_sad(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_sad(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_sad(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_sad(uint16 a, uint16 b, uint16 c);
-
-float __ovld amd_unpack0(uint a);
-float2 __ovld amd_unpack0(uint2 a);
-float3 __ovld amd_unpack0(uint3 a);
-float4 __ovld amd_unpack0(uint4 a);
-float8 __ovld amd_unpack0(uint8 a);
-float16 __ovld amd_unpack0(uint16 a);
-
-float __ovld amd_unpack1(uint a);
-float2 __ovld amd_unpack1(uint2 a);
-float3 __ovld amd_unpack1(uint3 a);
-float4 __ovld amd_unpack1(uint4 a);
-float8 __ovld amd_unpack1(uint8 a);
-float16 __ovld amd_unpack1(uint16 a);
-
-float __ovld amd_unpack2(uint a);
-float2 __ovld amd_unpack2(uint2 a);
-float3 __ovld amd_unpack2(uint3 a);
-float4 __ovld amd_unpack2(uint4 a);
-float8 __ovld amd_unpack2(uint8 a);
-float16 __ovld amd_unpack2(uint16 a);
-
-float __ovld amd_unpack3(uint a);
-float2 __ovld amd_unpack3(uint2 a);
-float3 __ovld amd_unpack3(uint3 a);
-float4 __ovld amd_unpack3(uint4 a);
-float8 __ovld amd_unpack3(uint8 a);
-float16 __ovld amd_unpack3(uint16 a);
-#endif // cl_amd_media_ops
-
-#ifdef cl_amd_media_ops2
-int __ovld amd_bfe(int src0, uint src1, uint src2);
-int2 __ovld amd_bfe(int2 src0, uint2 src1, uint2 src2);
-int3 __ovld amd_bfe(int3 src0, uint3 src1, uint3 src2);
-int4 __ovld amd_bfe(int4 src0, uint4 src1, uint4 src2);
-int8 __ovld amd_bfe(int8 src0, uint8 src1, uint8 src2);
-int16 __ovld amd_bfe(int16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_bfe(uint src0, uint src1, uint src2);
-uint2 __ovld amd_bfe(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_bfe(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_bfe(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_bfe(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_bfe(uint16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_bfm(uint src0, uint src1);
-uint2 __ovld amd_bfm(uint2 src0, uint2 src1);
-uint3 __ovld amd_bfm(uint3 src0, uint3 src1);
-uint4 __ovld amd_bfm(uint4 src0, uint4 src1);
-uint8 __ovld amd_bfm(uint8 src0, uint8 src1);
-uint16 __ovld amd_bfm(uint16 src0, uint16 src1);
-
-float __ovld amd_max3(float src0, float src1, float src2);
-float2 __ovld amd_max3(float2 src0, float2 src1, float2 src2);
-float3 __ovld amd_max3(float3 src0, float3 src1, float3 src2);
-float4 __ovld amd_max3(float4 src0, float4 src1, float4 src2);
-float8 __ovld amd_max3(float8 src0, float8 src1, float8 src2);
-float16 __ovld amd_max3(float16 src0, float16 src1, float16 src2);
-
-int __ovld amd_max3(int src0, int src1, int src2);
-int2 __ovld amd_max3(int2 src0, int2 src1, int2 src2);
-int3 __ovld amd_max3(int3 src0, int3 src1, int3 src2);
-int4 __ovld amd_max3(int4 src0, int4 src1, int4 src2);
-int8 __ovld amd_max3(int8 src0, int8 src1, int8 src2);
-int16 __ovld amd_max3(int16 src0, int16 src1, int16 src2);
-
-uint __ovld amd_max3(uint src0, uint src1, uint src2);
-uint2 __ovld amd_max3(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_max3(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_max3(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_max3(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_max3(uint16 src0, uint16 src1, uint16 src2);
-
-float __ovld amd_median3(float src0, float src1, float src2);
-float2 __ovld amd_median3(float2 src0, float2 src1, float2 src2);
-float3 __ovld amd_median3(float3 src0, float3 src1, float3 src2);
-float4 __ovld amd_median3(float4 src0, float4 src1, float4 src2);
-float8 __ovld amd_median3(float8 src0, float8 src1, float8 src2);
-float16 __ovld amd_median3(float16 src0, float16 src1, float16 src2);
-
-int __ovld amd_median3(int src0, int src1, int src2);
-int2 __ovld amd_median3(int2 src0, int2 src1, int2 src2);
-int3 __ovld amd_median3(int3 src0, int3 src1, int3 src2);
-int4 __ovld amd_median3(int4 src0, int4 src1, int4 src2);
-int8 __ovld amd_median3(int8 src0, int8 src1, int8 src2);
-int16 __ovld amd_median3(int16 src0, int16 src1, int16 src2);
-
-uint __ovld amd_median3(uint src0, uint src1, uint src2);
-uint2 __ovld amd_median3(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_median3(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_median3(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_median3(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_median3(uint16 src0, uint16 src1, uint16 src2);
-
-float __ovld amd_min3(float src0, float src1, float src);
-float2 __ovld amd_min3(float2 src0, float2 src1, float2 src);
-float3 __ovld amd_min3(float3 src0, float3 src1, float3 src);
-float4 __ovld amd_min3(float4 src0, float4 src1, float4 src);
-float8 __ovld amd_min3(float8 src0, float8 src1, float8 src);
-float16 __ovld amd_min3(float16 src0, float16 src1, float16 src);
-
-int __ovld amd_min3(int src0, int src1, int src2);
-int2 __ovld amd_min3(int2 src0, int2 src1, int2 src2);
-int3 __ovld amd_min3(int3 src0, int3 src1, int3 src2);
-int4 __ovld amd_min3(int4 src0, int4 src1, int4 src2);
-int8 __ovld amd_min3(int8 src0, int8 src1, int8 src2);
-int16 __ovld amd_min3(int16 src0, int16 src1, int16 src2);
-
-uint __ovld amd_min3(uint src0, uint src1, uint src2);
-uint2 __ovld amd_min3(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_min3(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_min3(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_min3(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_min3(uint16 src0, uint16 src1, uint16 src2);
-
-ulong __ovld amd_mqsad(ulong src0, uint src1, ulong src2);
-ulong2 __ovld amd_mqsad(ulong2 src0, uint2 src1, ulong2 src2);
-ulong3 __ovld amd_mqsad(ulong3 src0, uint3 src1, ulong3 src2);
-ulong4 __ovld amd_mqsad(ulong4 src0, uint4 src1, ulong4 src2);
-ulong8 __ovld amd_mqsad(ulong8 src0, uint8 src1, ulong8 src2);
-ulong16 __ovld amd_mqsad(ulong16 src0, uint16 src1, ulong16 src2);
-
-ulong __ovld amd_qsad(ulong src0, uint src1, ulong src2);
-ulong2 __ovld amd_qsad(ulong2 src0, uint2 src1, ulong2 src2);
-ulong3 __ovld amd_qsad(ulong3 src0, uint3 src1, ulong3 src2);
-ulong4 __ovld amd_qsad(ulong4 src0, uint4 src1, ulong4 src2);
-ulong8 __ovld amd_qsad(ulong8 src0, uint8 src1, ulong8 src2);
-ulong16 __ovld amd_qsad(ulong16 src0, uint16 src1, ulong16 src2);
-
-uint __ovld amd_msad(uint src0, uint src1, uint src2);
-uint2 __ovld amd_msad(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_msad(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_msad(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_msad(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_msad(uint16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_sadd(uint src0, uint src1, uint src2);
-uint2 __ovld amd_sadd(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_sadd(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_sadd(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_sadd(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_sadd(uint16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_sadw(uint src0, uint src1, uint src2);
-uint2 __ovld amd_sadw(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_sadw(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_sadw(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_sadw(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2);
-#endif // cl_amd_media_ops2
-
-#if defined(cl_arm_integer_dot_product_int8)
-uint __ovld arm_dot(uchar4 a, uchar4 b);
-int __ovld arm_dot(char4 a, char4 b);
-#endif // defined(cl_arm_integer_dot_product_int8)
-
-#if defined(cl_arm_integer_dot_product_accumulate_int8)
-uint __ovld arm_dot_acc(uchar4 a, uchar4 b, uint c);
-int __ovld arm_dot_acc(char4 a, char4 b, int c);
-#endif // defined(cl_arm_integer_dot_product_accumulate_int8)
-
-#if defined(cl_arm_integer_dot_product_accumulate_int16)
-uint __ovld arm_dot_acc(ushort2 a, ushort2 b, uint c);
-int __ovld arm_dot_acc(short2 a, short2 b, int c);
-#endif // defined(cl_arm_integer_dot_product_accumulate_int16)
-
-#if defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
-uint __ovld arm_dot_acc_sat(uchar4 a, uchar4 b, uint c);
-int __ovld arm_dot_acc_sat(char4 a, char4 b, int c);
-#endif // defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
-
-// Disable any extensions we may have enabled previously.
-#pragma OPENCL EXTENSION all : disable
-
-#undef __cnfn
-#undef __ovld
-#endif //_OPENCL_H_
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/emmintrin.h b/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/emmintrin.h
deleted file mode 100644
index 82a7178..0000000
--- a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/emmintrin.h
+++ /dev/null
@@ -1,2325 +0,0 @@
-/*===---- emmintrin.h - Implementation of SSE2 intrinsics on PowerPC -------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* Implemented from the specification included in the Intel C++ Compiler
-   User Guide and Reference, version 9.0.  */
-
-#ifndef NO_WARN_X86_INTRINSICS
-/* This header file is to help porting code using Intel intrinsics
-   explicitly from x86_64 to powerpc64/powerpc64le.
-
-   Since X86 SSE2 intrinsics mainly handles __m128i and __m128d type,
-   PowerPC VMX/VSX ISA is a good match for vector float SIMD operations.
-   However scalar float operations in vector (XMM) registers require
-   the POWER8 VSX ISA (2.07) level. There are differences for data
-   format and placement of float scalars in the vector register, which
-   require extra steps to match SSE2 scalar float semantics on POWER.
-
-   It should be noted that there's much difference between X86_64's
-   MXSCR and PowerISA's FPSCR/VSCR registers. It's recommended to use
-   portable <fenv.h> instead of access MXSCR directly.
-
-   Most SSE2 scalar float intrinsic operations can be performed more
-   efficiently as C language float scalar operations or optimized to
-   use vector SIMD operations. We recommend this for new applications.
-*/
-#error "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this error."
-#endif
-
-#ifndef EMMINTRIN_H_
-#define EMMINTRIN_H_
-
-#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
-
-#include <altivec.h>
-
-/* We need definitions from the SSE header files.  */
-#include <xmmintrin.h>
-
-/* SSE2 */
-typedef __vector double __v2df;
-typedef __vector long long __v2di;
-typedef __vector unsigned long long __v2du;
-typedef __vector int __v4si;
-typedef __vector unsigned int __v4su;
-typedef __vector short __v8hi;
-typedef __vector unsigned short __v8hu;
-typedef __vector signed char __v16qi;
-typedef __vector unsigned char __v16qu;
-
-/* The Intel API is flexible enough that we must allow aliasing with other
-   vector types, and their scalar components.  */
-typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__));
-typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__));
-
-/* Unaligned version of the same types.  */
-typedef long long __m128i_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1)));
-typedef double __m128d_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1)));
-
-/* Define two value permute mask.  */
-#define _MM_SHUFFLE2(x,y) (((x) << 1) | (y))
-
-/* Create a vector with element 0 as F and the rest zero.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_sd (double __F)
-{
-  return __extension__ (__m128d){ __F, 0.0 };
-}
-
-/* Create a vector with both elements equal to F.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_pd (double __F)
-{
-  return __extension__ (__m128d){ __F, __F };
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_pd1 (double __F)
-{
-  return _mm_set1_pd (__F);
-}
-
-/* Create a vector with the lower value X and upper value W.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_pd (double __W, double __X)
-{
-  return __extension__ (__m128d){ __X, __W };
-}
-
-/* Create a vector with the lower value W and upper value X.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_pd (double __W, double __X)
-{
-  return __extension__ (__m128d){ __W, __X };
-}
-
-/* Create an undefined vector.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_undefined_pd (void)
-{
-  __m128d __Y = __Y;
-  return __Y;
-}
-
-/* Create a vector of zeros.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setzero_pd (void)
-{
-  return (__m128d) vec_splats (0);
-}
-
-/* Sets the low DPFP value of A from the low value of B.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_move_sd (__m128d __A, __m128d __B)
-{
-  __v2df result = (__v2df) __A;
-  result [0] = ((__v2df) __B)[0];
-  return (__m128d) result;
-}
-
-/* Load two DPFP values from P.  The address must be 16-byte aligned.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_pd (double const *__P)
-{
-  return ((__m128d)vec_ld(0, (__v16qu*)__P));
-}
-
-/* Load two DPFP values from P.  The address need not be 16-byte aligned.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadu_pd (double const *__P)
-{
-  return (vec_vsx_ld(0, __P));
-}
-
-/* Create a vector with all two elements equal to *P.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load1_pd (double const *__P)
-{
-  return (vec_splats (*__P));
-}
-
-/* Create a vector with element 0 as *P and the rest zero.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_sd (double const *__P)
-{
-  return _mm_set_sd (*__P);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_pd1 (double const *__P)
-{
-  return _mm_load1_pd (__P);
-}
-
-/* Load two DPFP values in reverse order.  The address must be aligned.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadr_pd (double const *__P)
-{
-  __v2df __tmp = _mm_load_pd (__P);
-  return (__m128d)vec_xxpermdi (__tmp, __tmp, 2);
-}
-
-/* Store two DPFP values.  The address must be 16-byte aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_pd (double *__P, __m128d __A)
-{
-  vec_st((__v16qu)__A, 0, (__v16qu*)__P);
-}
-
-/* Store two DPFP values.  The address need not be 16-byte aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeu_pd (double *__P, __m128d __A)
-{
-  *(__m128d_u *)__P = __A;
-}
-
-/* Stores the lower DPFP value.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_sd (double *__P, __m128d __A)
-{
-  *__P = ((__v2df)__A)[0];
-}
-
-extern __inline double __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_f64 (__m128d __A)
-{
-  return ((__v2df)__A)[0];
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storel_pd (double *__P, __m128d __A)
-{
-  _mm_store_sd (__P, __A);
-}
-
-/* Stores the upper DPFP value.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeh_pd (double *__P, __m128d __A)
-{
-  *__P = ((__v2df)__A)[1];
-}
-/* Store the lower DPFP value across two words.
-   The address must be 16-byte aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store1_pd (double *__P, __m128d __A)
-{
-  _mm_store_pd (__P, vec_splat (__A, 0));
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_pd1 (double *__P, __m128d __A)
-{
-  _mm_store1_pd (__P, __A);
-}
-
-/* Store two DPFP values in reverse order.  The address must be aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storer_pd (double *__P, __m128d __A)
-{
-  _mm_store_pd (__P, vec_xxpermdi (__A, __A, 2));
-}
-
-/* Intel intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi128_si64 (__m128i __A)
-{
-  return ((__v2di)__A)[0];
-}
-
-/* Microsoft intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi128_si64x (__m128i __A)
-{
-  return ((__v2di)__A)[0];
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_pd (__m128d __A, __m128d __B)
-{
-  return (__m128d) ((__v2df)__A + (__v2df)__B);
-}
-
-/* Add the lower double-precision (64-bit) floating-point element in
-   a and b, store the result in the lower element of dst, and copy
-   the upper element from a to the upper element of dst. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_sd (__m128d __A, __m128d __B)
-{
-  __A[0] = __A[0] + __B[0];
-  return (__A);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_pd (__m128d __A, __m128d __B)
-{
-  return (__m128d) ((__v2df)__A - (__v2df)__B);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_sd (__m128d __A, __m128d __B)
-{
-  __A[0] = __A[0] - __B[0];
-  return (__A);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_pd (__m128d __A, __m128d __B)
-{
-  return (__m128d) ((__v2df)__A * (__v2df)__B);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_sd (__m128d __A, __m128d __B)
-{
-  __A[0] = __A[0] * __B[0];
-  return (__A);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_pd (__m128d __A, __m128d __B)
-{
-  return (__m128d) ((__v2df)__A / (__v2df)__B);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_sd (__m128d __A, __m128d __B)
-{
-  __A[0] = __A[0] / __B[0];
-  return (__A);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_pd (__m128d __A)
-{
-  return (vec_sqrt (__A));
-}
-
-/* Return pair {sqrt (B[0]), A[1]}.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_sd (__m128d __A, __m128d __B)
-{
-  __v2df c;
-  c = vec_sqrt ((__v2df) _mm_set1_pd (__B[0]));
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_pd (__m128d __A, __m128d __B)
-{
-  return (vec_min (__A, __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = vec_min (a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_pd (__m128d __A, __m128d __B)
-{
-  return (vec_max (__A, __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = vec_max (a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmpeq ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmplt ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmple ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmpgt ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmpge ((__v2df) __A,(__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_pd (__m128d __A, __m128d __B)
-{
-  __v2df temp = (__v2df) vec_cmpeq ((__v2df) __A, (__v2df)__B);
-  return ((__m128d)vec_nor (temp, temp));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmpge ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmpgt ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmple ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmplt ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_pd (__m128d __A, __m128d __B)
-{
-#if _ARCH_PWR8
-  __v2du c, d;
-  /* Compare against self will return false (0's) if NAN.  */
-  c = (__v2du)vec_cmpeq (__A, __A);
-  d = (__v2du)vec_cmpeq (__B, __B);
-#else
-  __v2du a, b;
-  __v2du c, d;
-  const __v2du double_exp_mask  = {0x7ff0000000000000, 0x7ff0000000000000};
-  a = (__v2du)vec_abs ((__v2df)__A);
-  b = (__v2du)vec_abs ((__v2df)__B);
-  c = (__v2du)vec_cmpgt (double_exp_mask, a);
-  d = (__v2du)vec_cmpgt (double_exp_mask, b);
-#endif
-  /* A != NAN and B != NAN.  */
-  return ((__m128d)vec_and(c, d));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_pd (__m128d __A, __m128d __B)
-{
-#if _ARCH_PWR8
-  __v2du c, d;
-  /* Compare against self will return false (0's) if NAN.  */
-  c = (__v2du)vec_cmpeq ((__v2df)__A, (__v2df)__A);
-  d = (__v2du)vec_cmpeq ((__v2df)__B, (__v2df)__B);
-  /* A == NAN OR B == NAN converts too:
-     NOT(A != NAN) OR NOT(B != NAN).  */
-  c = vec_nor (c, c);
-  return ((__m128d)vec_orc(c, d));
-#else
-  __v2du c, d;
-  /* Compare against self will return false (0's) if NAN.  */
-  c = (__v2du)vec_cmpeq ((__v2df)__A, (__v2df)__A);
-  d = (__v2du)vec_cmpeq ((__v2df)__B, (__v2df)__B);
-  /* Convert the true ('1's) is NAN.  */
-  c = vec_nor (c, c);
-  d = vec_nor (d, d);
-  return ((__m128d)vec_or(c, d));
-#endif
-}
-
-extern __inline  __m128d  __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_sd(__m128d  __A, __m128d  __B)
-{
-  __v2df a, b, c;
-  /* PowerISA VSX does not allow partial (for just lower double)
-     results. So to insure we don't generate spurious exceptions
-     (from the upper double values) we splat the lower double
-     before we do the operation. */
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = (__v2df) vec_cmpeq(a, b);
-  /* Then we merge the lower double result with the original upper
-     double from __A.  */
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = (__v2df) vec_cmplt(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = (__v2df) vec_cmple(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = (__v2df) vec_cmpgt(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = (__v2df) vec_cmpge(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = (__v2df) vec_cmpeq(a, b);
-  c = vec_nor (c, c);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  /* Not less than is just greater than or equal.  */
-  c = (__v2df) vec_cmpge(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  /* Not less than or equal is just greater than.  */
-  c = (__v2df) vec_cmpge(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  /* Not greater than is just less than or equal.  */
-  c = (__v2df) vec_cmple(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  /* Not greater than or equal is just less than.  */
-  c = (__v2df) vec_cmplt(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_sd (__m128d __A, __m128d __B)
-{
-  __v2df r;
-  r = (__v2df)_mm_cmpord_pd (vec_splats (__A[0]), vec_splats (__B[0]));
-  return (__m128d) _mm_setr_pd (r[0], ((__v2df)__A)[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_sd (__m128d __A, __m128d __B)
-{
-  __v2df r;
-  r = _mm_cmpunord_pd (vec_splats (__A[0]), vec_splats (__B[0]));
-  return (__m128d) _mm_setr_pd (r[0], __A[1]);
-}
-
-/* FIXME
-   The __mm_comi??_sd and __mm_ucomi??_sd implementations below are
-   exactly the same because GCC for PowerPC only generates unordered
-   compares (scalar and vector).
-   Technically __mm_comieq_sp et all should be using the ordered
-   compare and signal for QNaNs.  The __mm_ucomieq_sd et all should
-   be OK.   */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comieq_sd (__m128d __A, __m128d __B)
-{
-  return (__A[0] == __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comilt_sd (__m128d __A, __m128d __B)
-{
-  return (__A[0] < __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comile_sd (__m128d __A, __m128d __B)
-{
-  return (__A[0] <= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comigt_sd (__m128d __A, __m128d __B)
-{
-  return (__A[0] > __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comige_sd (__m128d __A, __m128d __B)
-{
-  return (__A[0] >= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comineq_sd (__m128d __A, __m128d __B)
-{
-  return (__A[0] != __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomieq_sd (__m128d __A, __m128d __B)
-{
-	return (__A[0] == __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomilt_sd (__m128d __A, __m128d __B)
-{
-	return (__A[0] < __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomile_sd (__m128d __A, __m128d __B)
-{
-	return (__A[0] <= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomigt_sd (__m128d __A, __m128d __B)
-{
-	return (__A[0] > __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomige_sd (__m128d __A, __m128d __B)
-{
-	return (__A[0] >= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomineq_sd (__m128d __A, __m128d __B)
-{
-  return (__A[0] != __B[0]);
-}
-
-/* Create a vector of Qi, where i is the element number.  */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi64x (long long __q1, long long __q0)
-{
-  return __extension__ (__m128i)(__v2di){ __q0, __q1 };
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi64 (__m64 __q1,  __m64 __q0)
-{
-  return _mm_set_epi64x ((long long)__q1, (long long)__q0);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi32 (int __q3, int __q2, int __q1, int __q0)
-{
-  return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 };
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi16 (short __q7, short __q6, short __q5, short __q4,
-	       short __q3, short __q2, short __q1, short __q0)
-{
-  return __extension__ (__m128i)(__v8hi){
-    __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 };
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi8 (char __q15, char __q14, char __q13, char __q12,
-	      char __q11, char __q10, char __q09, char __q08,
-	      char __q07, char __q06, char __q05, char __q04,
-	      char __q03, char __q02, char __q01, char __q00)
-{
-  return __extension__ (__m128i)(__v16qi){
-    __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
-    __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15
-  };
-}
-
-/* Set all of the elements of the vector to A.  */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi64x (long long __A)
-{
-  return _mm_set_epi64x (__A, __A);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi64 (__m64 __A)
-{
-  return _mm_set_epi64 (__A, __A);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi32 (int __A)
-{
-  return _mm_set_epi32 (__A, __A, __A, __A);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi16 (short __A)
-{
-  return _mm_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi8 (char __A)
-{
-  return _mm_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A,
-		       __A, __A, __A, __A, __A, __A, __A, __A);
-}
-
-/* Create a vector of Qi, where i is the element number.
-   The parameter order is reversed from the _mm_set_epi* functions.  */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi64 (__m64 __q0, __m64 __q1)
-{
-  return _mm_set_epi64 (__q1, __q0);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3)
-{
-  return _mm_set_epi32 (__q3, __q2, __q1, __q0);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3,
-	        short __q4, short __q5, short __q6, short __q7)
-{
-  return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03,
-	       char __q04, char __q05, char __q06, char __q07,
-	       char __q08, char __q09, char __q10, char __q11,
-	       char __q12, char __q13, char __q14, char __q15)
-{
-  return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08,
-		       __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00);
-}
-
-/* Create a vector with element 0 as *P and the rest zero.  */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_si128 (__m128i const *__P)
-{
-  return *__P;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadu_si128 (__m128i_u const *__P)
-{
-  return (__m128i) (vec_vsx_ld(0, (signed int const *)__P));
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadl_epi64 (__m128i_u const *__P)
-{
-  return _mm_set_epi64 ((__m64)0LL, *(__m64 *)__P);
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_si128 (__m128i *__P, __m128i __B)
-{
-  vec_st ((__v16qu) __B, 0, (__v16qu*)__P);
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeu_si128 (__m128i_u *__P, __m128i __B)
-{
-  *__P = __B;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storel_epi64 (__m128i_u *__P, __m128i __B)
-{
-  *(long long *)__P = ((__v2di)__B)[0];
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movepi64_pi64 (__m128i_u __B)
-{
-  return (__m64) ((__v2di)__B)[0];
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movpi64_epi64 (__m64 __A)
-{
-  return _mm_set_epi64 ((__m64)0LL, __A);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_move_epi64 (__m128i __A)
-{
-  return _mm_set_epi64 ((__m64)0LL, (__m64)__A[0]);
-}
-
-/* Create an undefined vector.  */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_undefined_si128 (void)
-{
-  __m128i __Y = __Y;
-  return __Y;
-}
-
-/* Create a vector of zeros.  */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setzero_si128 (void)
-{
-  return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 };
-}
-
-#ifdef _ARCH_PWR8
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtepi32_pd (__m128i __A)
-{
-  __v2di val;
-  /* For LE need to generate Vector Unpack Low Signed Word.
-     Which is generated from unpackh.  */
-  val = (__v2di)vec_unpackh ((__v4si)__A);
-
-  return (__m128d)vec_ctf (val, 0);
-}
-#endif
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtepi32_ps (__m128i __A)
-{
-  return ((__m128)vec_ctf((__v4si)__A, 0));
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpd_epi32 (__m128d __A)
-{
-  __v2df rounded = vec_rint (__A);
-  __v4si result, temp;
-  const __v4si vzero =
-    { 0, 0, 0, 0 };
-
-  /* VSX Vector truncate Double-Precision to integer and Convert to
-   Signed Integer Word format with Saturate.  */
-  __asm__(
-      "xvcvdpsxws %x0,%x1"
-      : "=wa" (temp)
-      : "wa" (rounded)
-      : );
-
-#ifdef _ARCH_PWR8
-  temp = vec_mergeo (temp, temp);
-  result = (__v4si) vec_vpkudum ((__vector long long) temp,
-				 (__vector long long) vzero);
-#else
-  {
-    const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
-	0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
-    result = (__v4si) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
-  }
-#endif
-  return (__m128i) result;
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpd_pi32 (__m128d __A)
-{
-  __m128i result = _mm_cvtpd_epi32(__A);
-
-  return (__m64) result[0];
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpd_ps (__m128d __A)
-{
-  __v4sf result;
-  __v4si temp;
-  const __v4si vzero = { 0, 0, 0, 0 };
-
-  __asm__(
-      "xvcvdpsp %x0,%x1"
-      : "=wa" (temp)
-      : "wa" (__A)
-      : );
-
-#ifdef _ARCH_PWR8
-  temp = vec_mergeo (temp, temp);
-  result = (__v4sf) vec_vpkudum ((__vector long long) temp,
-				 (__vector long long) vzero);
-#else
-  {
-    const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
-	0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
-    result = (__v4sf) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
-  }
-#endif
-  return ((__m128)result);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttpd_epi32 (__m128d __A)
-{
-  __v4si result;
-  __v4si temp;
-  const __v4si vzero = { 0, 0, 0, 0 };
-
-  /* VSX Vector truncate Double-Precision to integer and Convert to
-   Signed Integer Word format with Saturate.  */
-  __asm__(
-      "xvcvdpsxws %x0,%x1"
-      : "=wa" (temp)
-      : "wa" (__A)
-      : );
-
-#ifdef _ARCH_PWR8
-  temp = vec_mergeo (temp, temp);
-  result = (__v4si) vec_vpkudum ((__vector long long) temp,
-				 (__vector long long) vzero);
-#else
-  {
-    const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
-	0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
-    result = (__v4si) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
-  }
-#endif
-
-  return ((__m128i) result);
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttpd_pi32 (__m128d __A)
-{
-  __m128i result = _mm_cvttpd_epi32 (__A);
-
-  return (__m64) result[0];
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi128_si32 (__m128i __A)
-{
-  return ((__v4si)__A)[0];
-}
-
-#ifdef _ARCH_PWR8
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi32_pd (__m64 __A)
-{
-  __v4si temp;
-  __v2di tmp2;
-  __v2df result;
-
-  temp = (__v4si)vec_splats (__A);
-  tmp2 = (__v2di)vec_unpackl (temp);
-  result = vec_ctf ((__vector signed long long) tmp2, 0);
-  return (__m128d)result;
-}
-#endif
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_epi32 (__m128 __A)
-{
-  __v4sf rounded;
-  __v4si result;
-
-  rounded = vec_rint((__v4sf) __A);
-  result = vec_cts (rounded, 0);
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttps_epi32 (__m128 __A)
-{
-  __v4si result;
-
-  result = vec_cts ((__v4sf) __A, 0);
-  return (__m128i) result;
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pd (__m128 __A)
-{
-  /* Check if vec_doubleh is defined by <altivec.h>. If so use that. */
-#ifdef vec_doubleh
-  return (__m128d) vec_doubleh ((__v4sf)__A);
-#else
-  /* Otherwise the compiler is not current and so need to generate the
-     equivalent code.  */
-  __v4sf a = (__v4sf)__A;
-  __v4sf temp;
-  __v2df result;
-#ifdef __LITTLE_ENDIAN__
-  /* The input float values are in elements {[0], [1]} but the convert
-     instruction needs them in elements {[1], [3]}, So we use two
-     shift left double vector word immediates to get the elements
-     lined up.  */
-  temp = __builtin_vsx_xxsldwi (a, a, 3);
-  temp = __builtin_vsx_xxsldwi (a, temp, 2);
-#else
-  /* The input float values are in elements {[0], [1]} but the convert
-     instruction needs them in elements {[0], [2]}, So we use two
-     shift left double vector word immediates to get the elements
-     lined up.  */
-  temp = vec_vmrghw (a, a);
-#endif
-  __asm__(
-      " xvcvspdp %x0,%x1"
-      : "=wa" (result)
-      : "wa" (temp)
-      : );
-  return (__m128d) result;
-#endif
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_si32 (__m128d __A)
-{
-  __v2df rounded = vec_rint((__v2df) __A);
-  int result = ((__v2df)rounded)[0];
-
-  return result;
-}
-/* Intel intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_si64 (__m128d __A)
-{
-  __v2df rounded = vec_rint ((__v2df) __A );
-  long long result = ((__v2df) rounded)[0];
-
-  return result;
-}
-
-/* Microsoft intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_si64x (__m128d __A)
-{
-  return _mm_cvtsd_si64 ((__v2df)__A);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_si32 (__m128d __A)
-{
-  int result = ((__v2df)__A)[0];
-
-  return result;
-}
-
-/* Intel intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_si64 (__m128d __A)
-{
-  long long result = ((__v2df)__A)[0];
-
-  return result;
-}
-
-/* Microsoft intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_si64x (__m128d __A)
-{
-  return _mm_cvttsd_si64 (__A);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_ss (__m128 __A, __m128d __B)
-{
-  __v4sf result = (__v4sf)__A;
-
-#ifdef __LITTLE_ENDIAN__
-  __v4sf temp_s;
-  /* Copy double element[0] to element [1] for conversion.  */
-  __v2df temp_b = vec_splat((__v2df)__B, 0);
-
-  /* Pre-rotate __A left 3 (logically right 1) elements.  */
-  result = __builtin_vsx_xxsldwi (result, result, 3);
-  /* Convert double to single float scalar in a vector.  */
-  __asm__(
-      "xscvdpsp %x0,%x1"
-      : "=wa" (temp_s)
-      : "wa" (temp_b)
-      : );
-  /* Shift the resulting scalar into vector element [0].  */
-  result = __builtin_vsx_xxsldwi (result, temp_s, 1);
-#else
-  result [0] = ((__v2df)__B)[0];
-#endif
-  return (__m128) result;
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi32_sd (__m128d __A, int __B)
-{
-  __v2df result = (__v2df)__A;
-  double db = __B;
-  result [0] = db;
-  return (__m128d)result;
-}
-
-/* Intel intrinsic.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64_sd (__m128d __A, long long __B)
-{
-  __v2df result = (__v2df)__A;
-  double db = __B;
-  result [0] = db;
-  return (__m128d)result;
-}
-
-/* Microsoft intrinsic.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64x_sd (__m128d __A, long long __B)
-{
-  return _mm_cvtsi64_sd (__A, __B);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_sd (__m128d __A, __m128 __B)
-{
-#ifdef __LITTLE_ENDIAN__
-  /* Use splat to move element [0] into position for the convert. */
-  __v4sf temp = vec_splat ((__v4sf)__B, 0);
-  __v2df res;
-  /* Convert single float scalar to double in a vector.  */
-  __asm__(
-      "xscvspdp %x0,%x1"
-      : "=wa" (res)
-      : "wa" (temp)
-      : );
-  return (__m128d) vec_mergel (res, (__v2df)__A);
-#else
-  __v2df res = (__v2df)__A;
-  res [0] = ((__v4sf)__B) [0];
-  return (__m128d) res;
-#endif
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask)
-{
-  __vector double result;
-  const int litmsk = __mask & 0x3;
-
-  if (litmsk == 0)
-    result = vec_mergeh (__A, __B);
-#if __GNUC__ < 6
-  else if (litmsk == 1)
-    result = vec_xxpermdi (__B, __A, 2);
-  else if (litmsk == 2)
-    result = vec_xxpermdi (__B, __A, 1);
-#else
-  else if (litmsk == 1)
-    result = vec_xxpermdi (__A, __B, 2);
-  else if (litmsk == 2)
-    result = vec_xxpermdi (__A, __B, 1);
-#endif
-  else
-    result = vec_mergel (__A, __B);
-
-  return result;
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_pd (__m128d __A, __m128d __B)
-{
-  return (__m128d) vec_mergel ((__v2df)__A, (__v2df)__B);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_pd (__m128d __A, __m128d __B)
-{
-  return (__m128d) vec_mergeh ((__v2df)__A, (__v2df)__B);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadh_pd (__m128d __A, double const *__B)
-{
-  __v2df result = (__v2df)__A;
-  result [1] = *__B;
-  return (__m128d)result;
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadl_pd (__m128d __A, double const *__B)
-{
-  __v2df result = (__v2df)__A;
-  result [0] = *__B;
-  return (__m128d)result;
-}
-
-#ifdef _ARCH_PWR8
-/* Intrinsic functions that require PowerISA 2.07 minimum.  */
-
-/* Creates a 2-bit mask from the most significant bits of the DPFP values.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_pd (__m128d  __A)
-{
-  __vector unsigned long long result;
-  static const __vector unsigned int perm_mask =
-    {
-#ifdef __LITTLE_ENDIAN__
-	0x80800040, 0x80808080, 0x80808080, 0x80808080
-#else
-      0x80808080, 0x80808080, 0x80808080, 0x80804000
-#endif
-    };
-
-  result = ((__vector unsigned long long)
-	    vec_vbpermq ((__vector unsigned char) __A,
-			 (__vector unsigned char) perm_mask));
-
-#ifdef __LITTLE_ENDIAN__
-  return result[1];
-#else
-  return result[0];
-#endif
-}
-#endif /* _ARCH_PWR8 */
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_packs_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_packs ((__v8hi) __A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_packs_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_packs ((__v4si)__A, (__v4si)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_packus_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_packsu ((__v8hi) __A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergel ((__v16qu)__A, (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergel ((__v8hu)__A, (__v8hu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergel ((__v4su)__A, (__v4su)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi64 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergel ((__vector long long) __A,
-			       (__vector long long) __B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergeh ((__v16qu)__A, (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergeh ((__v8hi)__A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergeh ((__v4si)__A, (__v4si)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi64 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergeh ((__vector long long) __A,
-			       (__vector long long) __B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v16qu)__A + (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v8hu)__A + (__v8hu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v4su)__A + (__v4su)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi64 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v2du)__A + (__v2du)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_adds ((__v16qi)__A, (__v16qi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_adds ((__v8hi)__A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epu8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_adds ((__v16qu)__A, (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epu16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_adds ((__v8hu)__A, (__v8hu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v16qu)__A - (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v8hu)__A - (__v8hu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v4su)__A - (__v4su)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi64 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v2du)__A - (__v2du)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_subs ((__v16qi)__A, (__v16qi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_subs ((__v8hi)__A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epu8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_subs ((__v16qu)__A, (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epu16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_subs ((__v8hu)__A, (__v8hu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_madd_epi16 (__m128i __A, __m128i __B)
-{
-  __vector signed int zero = {0, 0, 0, 0};
-
-  return (__m128i) vec_vmsumshm ((__v8hi)__A, (__v8hi)__B, zero);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhi_epi16 (__m128i __A, __m128i __B)
-{
-  __vector signed int w0, w1;
-
-  __vector unsigned char xform1 = {
-#ifdef __LITTLE_ENDIAN__
-      0x02, 0x03, 0x12, 0x13,  0x06, 0x07, 0x16, 0x17,
-      0x0A, 0x0B, 0x1A, 0x1B,  0x0E, 0x0F, 0x1E, 0x1F
-#else
-      0x00, 0x01, 0x10, 0x11,  0x04, 0x05, 0x14, 0x15,
-      0x08, 0x09, 0x18, 0x19,  0x0C, 0x0D, 0x1C, 0x1D
-#endif
-    };
-
-  w0 = vec_vmulesh ((__v8hi)__A, (__v8hi)__B);
-  w1 = vec_vmulosh ((__v8hi)__A, (__v8hi)__B);
-  return (__m128i) vec_perm (w0, w1, xform1);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mullo_epi16 (__m128i __A, __m128i __B)
-{
-    return (__m128i) ((__v8hi)__A * (__v8hi)__B);
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_su32 (__m64 __A, __m64 __B)
-{
-  unsigned int a = __A;
-  unsigned int b = __B;
-
-  return ((__m64)a * (__m64)b);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_epu32 (__m128i __A, __m128i __B)
-{
-#if __GNUC__ < 8
-  __v2du result;
-
-#ifdef __LITTLE_ENDIAN__
-  /* VMX Vector Multiply Odd Unsigned Word.  */
-  __asm__(
-      "vmulouw %0,%1,%2"
-      : "=v" (result)
-      : "v" (__A), "v" (__B)
-      : );
-#else
-  /* VMX Vector Multiply Even Unsigned Word.  */
-  __asm__(
-      "vmuleuw %0,%1,%2"
-      : "=v" (result)
-      : "v" (__A), "v" (__B)
-      : );
-#endif
-  return (__m128i) result;
-#else
-  return (__m128i) vec_mule ((__v4su)__A, (__v4su)__B);
-#endif
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_epi16 (__m128i __A, int __B)
-{
-  __v8hu lshift;
-  __v8hi result = { 0, 0, 0, 0, 0, 0, 0, 0 };
-
-  if (__B >= 0 && __B < 16)
-    {
-      if (__builtin_constant_p(__B))
-	lshift = (__v8hu) vec_splat_s16(__B);
-      else
-	lshift = vec_splats ((unsigned short) __B);
-
-      result = vec_sl ((__v8hi) __A, lshift);
-    }
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_epi32 (__m128i __A, int __B)
-{
-  __v4su lshift;
-  __v4si result = { 0, 0, 0, 0 };
-
-  if (__B >= 0 && __B < 32)
-    {
-      if (__builtin_constant_p(__B) && __B < 16)
-	lshift = (__v4su) vec_splat_s32(__B);
-      else
-	lshift = vec_splats ((unsigned int) __B);
-
-      result = vec_sl ((__v4si) __A, lshift);
-    }
-
-  return (__m128i) result;
-}
-
-#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_epi64 (__m128i __A, int __B)
-{
-  __v2du lshift;
-  __v2di result = { 0, 0 };
-
-  if (__B >= 0 && __B < 64)
-    {
-      if (__builtin_constant_p(__B) && __B < 16)
-	lshift = (__v2du) vec_splat_s32(__B);
-      else
-	lshift = (__v2du) vec_splats ((unsigned int) __B);
-
-      result = vec_sl ((__v2di) __A, lshift);
-    }
-
-  return (__m128i) result;
-}
-#endif
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srai_epi16 (__m128i __A, int __B)
-{
-  __v8hu rshift = { 15, 15, 15, 15, 15, 15, 15, 15 };
-  __v8hi result;
-
-  if (__B < 16)
-    {
-      if (__builtin_constant_p(__B))
-	rshift = (__v8hu) vec_splat_s16(__B);
-      else
-	rshift = vec_splats ((unsigned short) __B);
-    }
-  result = vec_sra ((__v8hi) __A, rshift);
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srai_epi32 (__m128i __A, int __B)
-{
-  __v4su rshift = { 31, 31, 31, 31 };
-  __v4si result;
-
-  if (__B < 32)
-    {
-      if (__builtin_constant_p(__B))
-	{
-	  if (__B < 16)
-	      rshift = (__v4su) vec_splat_s32(__B);
-	    else
-	      rshift = (__v4su) vec_splats((unsigned int)__B);
-	}
-      else
-	rshift = vec_splats ((unsigned int) __B);
-    }
-  result = vec_sra ((__v4si) __A, rshift);
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_bslli_si128 (__m128i __A, const int __N)
-{
-  __v16qu result;
-  const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-
-  if (__N < 16)
-    result = vec_sld ((__v16qu) __A, zeros, __N);
-  else
-    result = zeros;
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_bsrli_si128 (__m128i __A, const int __N)
-{
-  __v16qu result;
-  const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-
-  if (__N < 16)
-#ifdef __LITTLE_ENDIAN__
-    if (__builtin_constant_p(__N))
-      /* Would like to use Vector Shift Left Double by Octet
-	 Immediate here to use the immediate form and avoid
-	 load of __N * 8 value into a separate VR.  */
-      result = vec_sld (zeros, (__v16qu) __A, (16 - __N));
-    else
-#endif
-      {
-	__v16qu shift = vec_splats((unsigned char)(__N*8));
-#ifdef __LITTLE_ENDIAN__
-	result = vec_sro ((__v16qu)__A, shift);
-#else
-	result = vec_slo ((__v16qu)__A, shift);
-#endif
-      }
-  else
-    result = zeros;
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srli_si128 (__m128i __A, const int __N)
-{
-  return _mm_bsrli_si128 (__A, __N);
-}
-
-extern __inline  __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_si128 (__m128i __A, const int _imm5)
-{
-  __v16qu result;
-  const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-
-  if (_imm5 < 16)
-#ifdef __LITTLE_ENDIAN__
-    result = vec_sld ((__v16qu) __A, zeros, _imm5);
-#else
-    result = vec_sld (zeros, (__v16qu) __A, (16 - _imm5));
-#endif
-  else
-    result = zeros;
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-
-_mm_srli_epi16 (__m128i  __A, int __B)
-{
-  __v8hu rshift;
-  __v8hi result = { 0, 0, 0, 0, 0, 0, 0, 0 };
-
-  if (__B < 16)
-    {
-      if (__builtin_constant_p(__B))
-	rshift = (__v8hu) vec_splat_s16(__B);
-      else
-	rshift = vec_splats ((unsigned short) __B);
-
-      result = vec_sr ((__v8hi) __A, rshift);
-    }
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srli_epi32 (__m128i __A, int __B)
-{
-  __v4su rshift;
-  __v4si result = { 0, 0, 0, 0 };
-
-  if (__B < 32)
-    {
-      if (__builtin_constant_p(__B))
-	{
-	  if (__B < 16)
-	      rshift = (__v4su) vec_splat_s32(__B);
-	    else
-	      rshift = (__v4su) vec_splats((unsigned int)__B);
-	}
-      else
-	rshift = vec_splats ((unsigned int) __B);
-
-      result = vec_sr ((__v4si) __A, rshift);
-    }
-
-  return (__m128i) result;
-}
-
-#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srli_epi64 (__m128i __A, int __B)
-{
-  __v2du rshift;
-  __v2di result = { 0, 0 };
-
-  if (__B < 64)
-    {
-      if (__builtin_constant_p(__B))
-	{
-	  if (__B < 16)
-	      rshift = (__v2du) vec_splat_s32(__B);
-	    else
-	      rshift = (__v2du) vec_splats((unsigned long long)__B);
-	}
-      else
-	rshift = (__v2du) vec_splats ((unsigned int) __B);
-
-      result = vec_sr ((__v2di) __A, rshift);
-    }
-
-  return (__m128i) result;
-}
-#endif
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sll_epi16 (__m128i __A, __m128i __B)
-{
-  __v8hu lshift;
-  __vector __bool short shmask;
-  const __v8hu shmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
-  __v8hu result;
-
-#ifdef __LITTLE_ENDIAN__
-  lshift = vec_splat ((__v8hu) __B, 0);
-#else
-  lshift = vec_splat ((__v8hu) __B, 3);
-#endif
-  shmask = vec_cmple (lshift, shmax);
-  result = vec_sl ((__v8hu) __A, lshift);
-  result = vec_sel ((__v8hu) shmask, result, shmask);
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sll_epi32 (__m128i __A, __m128i __B)
-{
-  __v4su lshift;
-  __vector __bool int shmask;
-  const __v4su shmax = { 32, 32, 32, 32 };
-  __v4su result;
-#ifdef __LITTLE_ENDIAN__
-  lshift = vec_splat ((__v4su) __B, 0);
-#else
-  lshift = vec_splat ((__v4su) __B, 1);
-#endif
-  shmask = vec_cmplt (lshift, shmax);
-  result = vec_sl ((__v4su) __A, lshift);
-  result = vec_sel ((__v4su) shmask, result, shmask);
-
-  return (__m128i) result;
-}
-
-#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sll_epi64 (__m128i __A, __m128i __B)
-{
-  __v2du lshift;
-  __vector __bool long long shmask;
-  const __v2du shmax = { 64, 64 };
-  __v2du result;
-
-  lshift = vec_splat ((__v2du) __B, 0);
-  shmask = vec_cmplt (lshift, shmax);
-  result = vec_sl ((__v2du) __A, lshift);
-  result = (__v2du)vec_sel ((__v2df) shmask, (__v2df)result, shmask);
-
-  return (__m128i) result;
-}
-#endif
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sra_epi16 (__m128i __A, __m128i __B)
-{
-  const __v8hu rshmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
-  __v8hu rshift;
-  __v8hi result;
-
-#ifdef __LITTLE_ENDIAN__
-  rshift = vec_splat ((__v8hu)__B, 0);
-#else
-  rshift = vec_splat ((__v8hu)__B, 3);
-#endif
-  rshift = vec_min (rshift, rshmax);
-  result = vec_sra ((__v8hi) __A, rshift);
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sra_epi32 (__m128i __A, __m128i __B)
-{
-  const __v4su rshmax = { 31, 31, 31, 31 };
-  __v4su rshift;
-  __v4si result;
-
-#ifdef __LITTLE_ENDIAN__
-  rshift = vec_splat ((__v4su)__B, 0);
-#else
-  rshift = vec_splat ((__v4su)__B, 1);
-#endif
-  rshift = vec_min (rshift, rshmax);
-  result = vec_sra ((__v4si) __A, rshift);
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srl_epi16 (__m128i __A, __m128i __B)
-{
-  __v8hu rshift;
-  __vector __bool short shmask;
-  const __v8hu shmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
-  __v8hu result;
-
-#ifdef __LITTLE_ENDIAN__
-  rshift = vec_splat ((__v8hu) __B, 0);
-#else
-  rshift = vec_splat ((__v8hu) __B, 3);
-#endif
-  shmask = vec_cmple (rshift, shmax);
-  result = vec_sr ((__v8hu) __A, rshift);
-  result = vec_sel ((__v8hu) shmask, result, shmask);
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srl_epi32 (__m128i __A, __m128i __B)
-{
-  __v4su rshift;
-  __vector __bool int shmask;
-  const __v4su shmax = { 32, 32, 32, 32 };
-  __v4su result;
-
-#ifdef __LITTLE_ENDIAN__
-  rshift = vec_splat ((__v4su) __B, 0);
-#else
-  rshift = vec_splat ((__v4su) __B, 1);
-#endif
-  shmask = vec_cmplt (rshift, shmax);
-  result = vec_sr ((__v4su) __A, rshift);
-  result = vec_sel ((__v4su) shmask, result, shmask);
-
-  return (__m128i) result;
-}
-
-#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srl_epi64 (__m128i __A, __m128i __B)
-{
-  __v2du rshift;
-  __vector __bool long long shmask;
-  const __v2du shmax = { 64, 64 };
-  __v2du result;
-
-  rshift = vec_splat ((__v2du) __B, 0);
-  shmask = vec_cmplt (rshift, shmax);
-  result = vec_sr ((__v2du) __A, rshift);
-  result = (__v2du)vec_sel ((__v2df) shmask, (__v2df)result, shmask);
-
-  return (__m128i) result;
-}
-#endif
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_and_pd (__m128d __A, __m128d __B)
-{
-  return (vec_and ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_andnot_pd (__m128d __A, __m128d __B)
-{
-  return (vec_andc ((__v2df) __B, (__v2df) __A));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_or_pd (__m128d __A, __m128d __B)
-{
-  return (vec_or ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_xor_pd (__m128d __A, __m128d __B)
-{
-  return (vec_xor ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_and_si128 (__m128i __A, __m128i __B)
-{
-  return (__m128i)vec_and ((__v2di) __A, (__v2di) __B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_andnot_si128 (__m128i __A, __m128i __B)
-{
-  return (__m128i)vec_andc ((__v2di) __B, (__v2di) __A);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_or_si128 (__m128i __A, __m128i __B)
-{
-  return (__m128i)vec_or ((__v2di) __A, (__v2di) __B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_xor_si128 (__m128i __A, __m128i __B)
-{
-  return (__m128i)vec_xor ((__v2di) __A, (__v2di) __B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmpeq ((__v16qi) __A, (__v16qi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmpeq ((__v8hi) __A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmpeq ((__v4si) __A, (__v4si)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmplt ((__v16qi) __A, (__v16qi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmplt ((__v8hi) __A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmplt ((__v4si) __A, (__v4si)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmpgt ((__v16qi) __A, (__v16qi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmpgt ((__v8hi) __A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmpgt ((__v4si) __A, (__v4si)__B);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_extract_epi16 (__m128i const __A, int const __N)
-{
-  return (unsigned short) ((__v8hi)__A)[__N & 7];
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_insert_epi16 (__m128i const __A, int const __D, int const __N)
-{
-  __v8hi result = (__v8hi)__A;
-
-  result [(__N & 7)] = __D;
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_max ((__v8hi)__A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_epu8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_max ((__v16qu) __A, (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_min ((__v8hi) __A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_epu8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_min ((__v16qu) __A, (__v16qu)__B);
-}
-
-
-#ifdef _ARCH_PWR8
-/* Intrinsic functions that require PowerISA 2.07 minimum.  */
-
-/* Creates a 4-bit mask from the most significant bits of the SPFP values.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_epi8 (__m128i __A)
-{
-  __vector unsigned long long result;
-  static const __vector unsigned char perm_mask =
-    {
-	0x78, 0x70, 0x68, 0x60, 0x58, 0x50, 0x48, 0x40,
-	0x38, 0x30, 0x28, 0x20, 0x18, 0x10, 0x08, 0x00
-    };
-
-  result = ((__vector unsigned long long)
-	    vec_vbpermq ((__vector unsigned char) __A,
-			 (__vector unsigned char) perm_mask));
-
-#ifdef __LITTLE_ENDIAN__
-  return result[1];
-#else
-  return result[0];
-#endif
-}
-#endif /* _ARCH_PWR8 */
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhi_epu16 (__m128i __A, __m128i __B)
-{
-  __v4su w0, w1;
-  __v16qu xform1 = {
-#ifdef __LITTLE_ENDIAN__
-      0x02, 0x03, 0x12, 0x13,  0x06, 0x07, 0x16, 0x17,
-      0x0A, 0x0B, 0x1A, 0x1B,  0x0E, 0x0F, 0x1E, 0x1F
-#else
-      0x00, 0x01, 0x10, 0x11,  0x04, 0x05, 0x14, 0x15,
-      0x08, 0x09, 0x18, 0x19,  0x0C, 0x0D, 0x1C, 0x1D
-#endif
-    };
-
-  w0 = vec_vmuleuh ((__v8hu)__A, (__v8hu)__B);
-  w1 = vec_vmulouh ((__v8hu)__A, (__v8hu)__B);
-  return (__m128i) vec_perm (w0, w1, xform1);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shufflehi_epi16 (__m128i __A, const int __mask)
-{
-  unsigned long element_selector_98 = __mask & 0x03;
-  unsigned long element_selector_BA = (__mask >> 2) & 0x03;
-  unsigned long element_selector_DC = (__mask >> 4) & 0x03;
-  unsigned long element_selector_FE = (__mask >> 6) & 0x03;
-  static const unsigned short permute_selectors[4] =
-    {
-#ifdef __LITTLE_ENDIAN__
-	      0x0908, 0x0B0A, 0x0D0C, 0x0F0E
-#else
-	      0x0809, 0x0A0B, 0x0C0D, 0x0E0F
-#endif
-    };
-  __v2du pmask =
-#ifdef __LITTLE_ENDIAN__
-      { 0x1716151413121110UL,  0UL};
-#else
-      { 0x1011121314151617UL,  0UL};
-#endif
-  __m64_union t;
-  __v2du a, r;
-
-  t.as_short[0] = permute_selectors[element_selector_98];
-  t.as_short[1] = permute_selectors[element_selector_BA];
-  t.as_short[2] = permute_selectors[element_selector_DC];
-  t.as_short[3] = permute_selectors[element_selector_FE];
-  pmask[1] = t.as_m64;
-  a = (__v2du)__A;
-  r = vec_perm (a, a, (__vector unsigned char)pmask);
-  return (__m128i) r;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shufflelo_epi16 (__m128i __A, const int __mask)
-{
-  unsigned long element_selector_10 = __mask & 0x03;
-  unsigned long element_selector_32 = (__mask >> 2) & 0x03;
-  unsigned long element_selector_54 = (__mask >> 4) & 0x03;
-  unsigned long element_selector_76 = (__mask >> 6) & 0x03;
-  static const unsigned short permute_selectors[4] =
-    {
-#ifdef __LITTLE_ENDIAN__
-	      0x0100, 0x0302, 0x0504, 0x0706
-#else
-	      0x0001, 0x0203, 0x0405, 0x0607
-#endif
-    };
-  __v2du pmask =
-#ifdef __LITTLE_ENDIAN__
-                 { 0UL,  0x1f1e1d1c1b1a1918UL};
-#else
-                 { 0UL,  0x18191a1b1c1d1e1fUL};
-#endif
-  __m64_union t;
-  __v2du a, r;
-  t.as_short[0] = permute_selectors[element_selector_10];
-  t.as_short[1] = permute_selectors[element_selector_32];
-  t.as_short[2] = permute_selectors[element_selector_54];
-  t.as_short[3] = permute_selectors[element_selector_76];
-  pmask[0] = t.as_m64;
-  a = (__v2du)__A;
-  r = vec_perm (a, a, (__vector unsigned char)pmask);
-  return (__m128i) r;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_epi32 (__m128i __A, const int __mask)
-{
-  unsigned long element_selector_10 = __mask & 0x03;
-  unsigned long element_selector_32 = (__mask >> 2) & 0x03;
-  unsigned long element_selector_54 = (__mask >> 4) & 0x03;
-  unsigned long element_selector_76 = (__mask >> 6) & 0x03;
-  static const unsigned int permute_selectors[4] =
-    {
-#ifdef __LITTLE_ENDIAN__
-	0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
-#else
-      0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
-#endif
-    };
-  __v4su t;
-
-  t[0] = permute_selectors[element_selector_10];
-  t[1] = permute_selectors[element_selector_32];
-  t[2] = permute_selectors[element_selector_54] + 0x10101010;
-  t[3] = permute_selectors[element_selector_76] + 0x10101010;
-  return (__m128i)vec_perm ((__v4si) __A, (__v4si)__A, (__vector unsigned char)t);
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
-{
-  __v2du hibit = { 0x7f7f7f7f7f7f7f7fUL, 0x7f7f7f7f7f7f7f7fUL};
-  __v16qu mask, tmp;
-  __m128i_u *p = (__m128i_u*)__C;
-
-  tmp = (__v16qu)_mm_loadu_si128(p);
-  mask = (__v16qu)vec_cmpgt ((__v16qu)__B, (__v16qu)hibit);
-  tmp = vec_sel (tmp, (__v16qu)__A, mask);
-  _mm_storeu_si128 (p, (__m128i)tmp);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_epu8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_avg ((__v16qu)__A, (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_epu16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_avg ((__v8hu)__A, (__v8hu)__B);
-}
-
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sad_epu8 (__m128i __A, __m128i __B)
-{
-  __v16qu a, b;
-  __v16qu vmin, vmax, vabsdiff;
-  __v4si vsum;
-  const __v4su zero = { 0, 0, 0, 0 };
-  __v4si result;
-
-  a = (__v16qu) __A;
-  b = (__v16qu) __B;
-  vmin = vec_min (a, b);
-  vmax = vec_max (a, b);
-  vabsdiff = vec_sub (vmax, vmin);
-  /* Sum four groups of bytes into integers.  */
-  vsum = (__vector signed int) vec_sum4s (vabsdiff, zero);
-  /* Sum across four integers with two integer results.  */
-  result = vec_sum2s (vsum, (__vector signed int) zero);
-  /* Rotate the sums into the correct position.  */
-#ifdef __LITTLE_ENDIAN__
-  result = vec_sld (result, result, 4);
-#else
-  result = vec_sld (result, result, 6);
-#endif
-  /* Rotate the sums into the correct position.  */
-  return (__m128i) result;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_si32 (int *__A, int __B)
-{
-  /* Use the data cache block touch for store transient.  */
-  __asm__ (
-    "dcbtstt 0,%0"
-    :
-    : "b" (__A)
-    : "memory"
-  );
-  *__A = __B;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_si64 (long long int *__A, long long int __B)
-{
-  /* Use the data cache block touch for store transient.  */
-  __asm__ (
-    "	dcbtstt	0,%0"
-    :
-    : "b" (__A)
-    : "memory"
-  );
-  *__A = __B;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_si128 (__m128i *__A, __m128i __B)
-{
-  /* Use the data cache block touch for store transient.  */
-  __asm__ (
-    "dcbtstt 0,%0"
-    :
-    : "b" (__A)
-    : "memory"
-  );
-  *__A = __B;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_pd (double *__A, __m128d __B)
-{
-  /* Use the data cache block touch for store transient.  */
-  __asm__ (
-    "dcbtstt 0,%0"
-    :
-    : "b" (__A)
-    : "memory"
-  );
-  *(__m128d*)__A = __B;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_clflush (void const *__A)
-{
-  /* Use the data cache block flush.  */
-  __asm__ (
-    "dcbf 0,%0"
-    :
-    : "b" (__A)
-    : "memory"
-  );
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_lfence (void)
-{
-  /* Use light weight sync for load to load ordering.  */
-  __atomic_thread_fence (__ATOMIC_RELEASE);
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mfence (void)
-{
-  /* Use heavy weight sync for any to any ordering.  */
-  __atomic_thread_fence (__ATOMIC_SEQ_CST);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi32_si128 (int __A)
-{
-  return _mm_set_epi32 (0, 0, 0, __A);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64_si128 (long long __A)
-{
-  return __extension__ (__m128i)(__v2di){ __A, 0LL };
-}
-
-/* Microsoft intrinsic.  */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64x_si128 (long long __A)
-{
-  return __extension__ (__m128i)(__v2di){ __A, 0LL };
-}
-
-/* Casts between various SP, DP, INT vector types.  Note that these do no
-   conversion of values, they just change the type.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castpd_ps(__m128d __A)
-{
-  return (__m128) __A;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castpd_si128(__m128d __A)
-{
-  return (__m128i) __A;
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castps_pd(__m128 __A)
-{
-  return (__m128d) __A;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castps_si128(__m128 __A)
-{
-  return (__m128i) __A;
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castsi128_ps(__m128i __A)
-{
-  return (__m128) __A;
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castsi128_pd(__m128i __A)
-{
-  return (__m128d) __A;
-}
-
-#else
-#include_next <emmintrin.h>
-#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))   \
-        */
-
-#endif /* EMMINTRIN_H_ */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/mm_malloc.h b/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/mm_malloc.h
deleted file mode 100644
index 86cf1a0..0000000
--- a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/mm_malloc.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*===---- mm_malloc.h - Implementation of _mm_malloc and _mm_free ----------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef _MM_MALLOC_H_INCLUDED
-#define _MM_MALLOC_H_INCLUDED
-
-#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
-
-#include <stdlib.h>
-
-/* We can't depend on <stdlib.h> since the prototype of posix_memalign
-   may not be visible.  */
-#ifndef __cplusplus
-extern int posix_memalign (void **, size_t, size_t);
-#else
-extern "C" int posix_memalign (void **, size_t, size_t) throw ();
-#endif
-
-static __inline void *
-_mm_malloc (size_t size, size_t alignment)
-{
-  /* PowerPC64 ELF V2 ABI requires quadword alignment.  */
-  size_t vec_align = sizeof (__vector float);
-  void *ptr;
-
-  if (alignment < vec_align)
-    alignment = vec_align;
-  if (posix_memalign (&ptr, alignment, size) == 0)
-    return ptr;
-  else
-    return NULL;
-}
-
-static __inline void
-_mm_free (void * ptr)
-{
-  free (ptr);
-}
-
-#else
-#include_next <mm_malloc.h>
-#endif
-
-#endif /* _MM_MALLOC_H_INCLUDED */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/pmmintrin.h b/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/pmmintrin.h
deleted file mode 100644
index 8d4046b..0000000
--- a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/pmmintrin.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/*===---- pmmintrin.h - Implementation of SSE3 intrinsics on PowerPC -------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* Implemented from the specification included in the Intel C++ Compiler
-   User Guide and Reference, version 9.0.  */
-
-#ifndef NO_WARN_X86_INTRINSICS
-/* This header is distributed to simplify porting x86_64 code that
-   makes explicit use of Intel intrinsics to powerpc64le.
-   It is the user's responsibility to determine if the results are
-   acceptable and make additional changes as necessary.
-   Note that much code that uses Intel intrinsics can be rewritten in
-   standard C or GNU C extensions, which are more portable and better
-   optimized across multiple targets.
-
-   In the specific case of X86 SSE3 intrinsics, the PowerPC VMX/VSX ISA
-   is a good match for most SIMD operations.  However the Horizontal
-   add/sub requires the data pairs be permuted into a separate
-   registers with vertical even/odd alignment for the operation.
-   And the addsub operation requires the sign of only the even numbered
-   elements be flipped (xored with -0.0).
-   For larger blocks of code using these intrinsic implementations,
-   the compiler be should be able to schedule instructions to avoid
-   additional latency.
-
-   In the specific case of the monitor and mwait instructions there are
-   no direct equivalent in the PowerISA at this time.  So those
-   intrinsics are not implemented.  */
-#error "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this warning."
-#endif
-
-#ifndef PMMINTRIN_H_
-#define PMMINTRIN_H_
-
-#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
-
-/* We need definitions from the SSE2 and SSE header files*/
-#include <emmintrin.h>
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_addsub_ps (__m128 __X, __m128 __Y)
-{
-  const __v4sf even_n0 = {-0.0, 0.0, -0.0, 0.0};
-  __v4sf even_neg_Y = vec_xor(__Y, even_n0);
-  return (__m128) vec_add (__X, even_neg_Y);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_addsub_pd (__m128d __X, __m128d __Y)
-{
-  const __v2df even_n0 = {-0.0, 0.0};
-  __v2df even_neg_Y = vec_xor(__Y, even_n0);
-  return (__m128d) vec_add (__X, even_neg_Y);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_ps (__m128 __X, __m128 __Y)
-{
-  __vector unsigned char xform2 = {
-      0x00, 0x01, 0x02, 0x03,
-      0x08, 0x09, 0x0A, 0x0B,
-      0x10, 0x11, 0x12, 0x13,
-      0x18, 0x19, 0x1A, 0x1B
-    };
-  __vector unsigned char xform1 = {
-      0x04, 0x05, 0x06, 0x07,
-      0x0C, 0x0D, 0x0E, 0x0F,
-      0x14, 0x15, 0x16, 0x17,
-      0x1C, 0x1D, 0x1E, 0x1F
-    };
-  return (__m128) vec_add (vec_perm ((__v4sf) __X, (__v4sf) __Y, xform2),
-			   vec_perm ((__v4sf) __X, (__v4sf) __Y, xform1));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_ps (__m128 __X, __m128 __Y)
-{
-  __vector unsigned char xform2 = {
-      0x00, 0x01, 0x02, 0x03,
-      0x08, 0x09, 0x0A, 0x0B,
-      0x10, 0x11, 0x12, 0x13,
-      0x18, 0x19, 0x1A, 0x1B
-    };
-  __vector unsigned char xform1 = {
-      0x04, 0x05, 0x06, 0x07,
-      0x0C, 0x0D, 0x0E, 0x0F,
-      0x14, 0x15, 0x16, 0x17,
-      0x1C, 0x1D, 0x1E, 0x1F
-    };
-  return (__m128) vec_sub (vec_perm ((__v4sf) __X, (__v4sf) __Y, xform2),
-			   vec_perm ((__v4sf) __X, (__v4sf) __Y, xform1));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_pd (__m128d __X, __m128d __Y)
-{
-  return (__m128d) vec_add (vec_mergeh ((__v2df) __X, (__v2df)__Y),
-				  vec_mergel ((__v2df) __X, (__v2df)__Y));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_pd (__m128d __X, __m128d __Y)
-{
-  return (__m128d) vec_sub (vec_mergeh ((__v2df) __X, (__v2df)__Y),
-			    vec_mergel ((__v2df) __X, (__v2df)__Y));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movehdup_ps (__m128 __X)
-{
-  return (__m128)vec_mergeo ((__v4su)__X, (__v4su)__X);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_moveldup_ps (__m128 __X)
-{
-  return (__m128)vec_mergee ((__v4su)__X, (__v4su)__X);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loaddup_pd (double const *__P)
-{
-  return (__m128d) vec_splats (*__P);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movedup_pd (__m128d __X)
-{
-  return _mm_shuffle_pd (__X, __X, _MM_SHUFFLE2 (0,0));
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_lddqu_si128 (__m128i const *__P)
-{
-  return (__m128i) (vec_vsx_ld(0, (signed int const *)__P));
-}
-
-/* POWER8 / POWER9 have no equivalent for _mm_monitor nor _mm_wait.  */
-
-#else
-#include_next <pmmintrin.h>
-#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))   \
-        */
-
-#endif /* PMMINTRIN_H_ */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/smmintrin.h b/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/smmintrin.h
deleted file mode 100644
index 6747032..0000000
--- a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/smmintrin.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*===---- smmintrin.h - Implementation of SSE4 intrinsics on PowerPC -------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* Implemented from the specification included in the Intel C++ Compiler
-   User Guide and Reference, version 9.0.
-
-   NOTE: This is NOT a complete implementation of the SSE4 intrinsics!  */
-
-#ifndef NO_WARN_X86_INTRINSICS
-/* This header is distributed to simplify porting x86_64 code that
-   makes explicit use of Intel intrinsics to powerp64/powerpc64le.
-
-   It is the user's responsibility to determine if the results are
-   acceptable and make additional changes as necessary.
-
-   Note that much code that uses Intel intrinsics can be rewritten in
-   standard C or GNU C extensions, which are more portable and better
-   optimized across multiple targets.  */
-#error                                                                         \
-    "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this error."
-#endif
-
-#ifndef SMMINTRIN_H_
-#define SMMINTRIN_H_
-
-#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
-
-#include <altivec.h>
-#include <tmmintrin.h>
-
-extern __inline int
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_extract_epi8(__m128i __X, const int __N) {
-  return (unsigned char)((__v16qi)__X)[__N & 15];
-}
-
-extern __inline int
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_extract_epi32(__m128i __X, const int __N) {
-  return ((__v4si)__X)[__N & 3];
-}
-
-extern __inline int
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_extract_epi64(__m128i __X, const int __N) {
-  return ((__v2di)__X)[__N & 1];
-}
-
-extern __inline int
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_extract_ps(__m128 __X, const int __N) {
-  return ((__v4si)__X)[__N & 3];
-}
-
-extern __inline __m128i
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_blend_epi16(__m128i __A, __m128i __B, const int __imm8) {
-  __v16qi __charmask = vec_splats((signed char)__imm8);
-  __charmask = vec_gb(__charmask);
-  __v8hu __shortmask = (__v8hu)vec_unpackh(__charmask);
-#ifdef __BIG_ENDIAN__
-  __shortmask = vec_reve(__shortmask);
-#endif
-  return (__m128i)vec_sel((__v8hu)__A, (__v8hu)__B, __shortmask);
-}
-
-extern __inline __m128i
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_blendv_epi8(__m128i __A, __m128i __B, __m128i __mask) {
-  const __v16qu __seven = vec_splats((unsigned char)0x07);
-  __v16qu __lmask = vec_sra((__v16qu)__mask, __seven);
-  return (__m128i)vec_sel((__v16qu)__A, (__v16qu)__B, __lmask);
-}
-
-extern __inline __m128i
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_insert_epi8(__m128i const __A, int const __D, int const __N) {
-  __v16qi result = (__v16qi)__A;
-  result[__N & 0xf] = __D;
-  return (__m128i)result;
-}
-
-extern __inline __m128i
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_insert_epi32(__m128i const __A, int const __D, int const __N) {
-  __v4si result = (__v4si)__A;
-  result[__N & 3] = __D;
-  return (__m128i)result;
-}
-
-extern __inline __m128i
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_insert_epi64(__m128i const __A, long long const __D, int const __N) {
-  __v2di result = (__v2di)__A;
-  result[__N & 1] = __D;
-  return (__m128i)result;
-}
-
-#else
-#include_next <smmintrin.h>
-#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))   \
-        */
-
-#endif /* _SMMINTRIN_H_ */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/tmmintrin.h b/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/tmmintrin.h
deleted file mode 100644
index ebef7b8..0000000
--- a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/tmmintrin.h
+++ /dev/null
@@ -1,496 +0,0 @@
-/*===---- tmmintrin.h - Implementation of SSSE3 intrinsics on PowerPC ------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* Implemented from the specification included in the Intel C++ Compiler
-   User Guide and Reference, version 9.0.  */
-
-#ifndef NO_WARN_X86_INTRINSICS
-/* This header is distributed to simplify porting x86_64 code that
-   makes explicit use of Intel intrinsics to powerpc64le.
-
-   It is the user's responsibility to determine if the results are
-   acceptable and make additional changes as necessary.
-
-   Note that much code that uses Intel intrinsics can be rewritten in
-   standard C or GNU C extensions, which are more portable and better
-   optimized across multiple targets.  */
-#endif
-
-#ifndef TMMINTRIN_H_
-#define TMMINTRIN_H_
-
-#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
-
-#include <altivec.h>
-
-/* We need definitions from the SSE header files.  */
-#include <pmmintrin.h>
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_epi16 (__m128i __A)
-{
-  return (__m128i) vec_abs ((__v8hi) __A);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_epi32 (__m128i __A)
-{
-  return (__m128i) vec_abs ((__v4si) __A);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_epi8 (__m128i __A)
-{
-  return (__m128i) vec_abs ((__v16qi) __A);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_pi16 (__m64 __A)
-{
-  __v8hi __B = (__v8hi) (__v2du) { __A, __A };
-  return (__m64) ((__v2du) vec_abs (__B))[0];
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_pi32 (__m64 __A)
-{
-  __v4si __B = (__v4si) (__v2du) { __A, __A };
-  return (__m64) ((__v2du) vec_abs (__B))[0];
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_pi8 (__m64 __A)
-{
-  __v16qi __B = (__v16qi) (__v2du) { __A, __A };
-  return (__m64) ((__v2du) vec_abs (__B))[0];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_alignr_epi8 (__m128i __A, __m128i __B, const unsigned int __count)
-{
-  if (__builtin_constant_p (__count) && __count < 16)
-    {
-#ifdef __LITTLE_ENDIAN__
-      __A = (__m128i) vec_reve ((__v16qu) __A);
-      __B = (__m128i) vec_reve ((__v16qu) __B);
-#endif
-      __A = (__m128i) vec_sld ((__v16qu) __B, (__v16qu) __A, __count);
-#ifdef __LITTLE_ENDIAN__
-      __A = (__m128i) vec_reve ((__v16qu) __A);
-#endif
-      return __A;
-    }
-
-  if (__count == 0)
-    return __B;
-
-  if (__count >= 16)
-    {
-      if (__count >= 32)
-	{
-	  const __v16qu zero = { 0 };
-	  return (__m128i) zero;
-	}
-      else
-	{
-	  const __v16qu __shift =
-	    vec_splats ((unsigned char) ((__count - 16) * 8));
-#ifdef __LITTLE_ENDIAN__
-	  return (__m128i) vec_sro ((__v16qu) __A, __shift);
-#else
-	  return (__m128i) vec_slo ((__v16qu) __A, __shift);
-#endif
-	}
-    }
-  else
-    {
-      const __v16qu __shiftA =
-	vec_splats ((unsigned char) ((16 - __count) * 8));
-      const __v16qu __shiftB = vec_splats ((unsigned char) (__count * 8));
-#ifdef __LITTLE_ENDIAN__
-      __A = (__m128i) vec_slo ((__v16qu) __A, __shiftA);
-      __B = (__m128i) vec_sro ((__v16qu) __B, __shiftB);
-#else
-      __A = (__m128i) vec_sro ((__v16qu) __A, __shiftA);
-      __B = (__m128i) vec_slo ((__v16qu) __B, __shiftB);
-#endif
-      return (__m128i) vec_or ((__v16qu) __A, (__v16qu) __B);
-    }
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_alignr_pi8 (__m64 __A, __m64 __B, unsigned int __count)
-{
-  if (__count < 16)
-    {
-      __v2du __C = { __B, __A };
-#ifdef __LITTLE_ENDIAN__
-      const __v4su __shift = { __count << 3, 0, 0, 0 };
-      __C = (__v2du) vec_sro ((__v16qu) __C, (__v16qu) __shift);
-#else
-      const __v4su __shift = { 0, 0, 0, __count << 3 };
-      __C = (__v2du) vec_slo ((__v16qu) __C, (__v16qu) __shift);
-#endif
-      return (__m64) __C[0];
-    }
-  else
-    {
-      const __m64 __zero = { 0 };
-      return __zero;
-    }
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_epi16 (__m128i __A, __m128i __B)
-{
-  const __v16qu __P =
-    {  0,  1,  4,  5,  8,  9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
-  const __v16qu __Q =
-    {  2,  3,  6,  7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
-  __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P);
-  __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q);
-  return (__m128i) vec_add (__C, __D);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_epi32 (__m128i __A, __m128i __B)
-{
-  const __v16qu __P =
-    {  0,  1,  2,  3,  8,  9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 };
-  const __v16qu __Q =
-    {  4,  5,  6,  7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 };
-  __v4si __C = vec_perm ((__v4si) __A, (__v4si) __B, __P);
-  __v4si __D = vec_perm ((__v4si) __A, (__v4si) __B, __Q);
-  return (__m128i) vec_add (__C, __D);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_pi16 (__m64 __A, __m64 __B)
-{
-  __v8hi __C = (__v8hi) (__v2du) { __A, __B };
-  const __v16qu __P =
-    {  0,  1,  4,  5,  8,  9, 12, 13,  0,  1,  4,  5,  8,  9, 12, 13 };
-  const __v16qu __Q =
-    {  2,  3,  6,  7, 10, 11, 14, 15,  2,  3,  6,  7, 10, 11, 14, 15 };
-  __v8hi __D = vec_perm (__C, __C, __Q);
-  __C = vec_perm (__C, __C, __P);
-  __C = vec_add (__C, __D);
-  return (__m64) ((__v2du) __C)[1];
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_pi32 (__m64 __A, __m64 __B)
-{
-  __v4si __C = (__v4si) (__v2du) { __A, __B };
-  const __v16qu __P =
-    {  0,  1,  2,  3,  8,  9, 10, 11,  0,  1,  2,  3,  8,  9, 10, 11 };
-  const __v16qu __Q =
-    {  4,  5,  6,  7, 12, 13, 14, 15,  4,  5,  6,  7, 12, 13, 14, 15 };
-  __v4si __D = vec_perm (__C, __C, __Q);
-  __C = vec_perm (__C, __C, __P);
-  __C = vec_add (__C, __D);
-  return (__m64) ((__v2du) __C)[1];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadds_epi16 (__m128i __A, __m128i __B)
-{
-  __v4si __C = { 0 }, __D = { 0 };
-  __C = vec_sum4s ((__v8hi) __A, __C);
-  __D = vec_sum4s ((__v8hi) __B, __D);
-  __C = (__v4si) vec_packs (__C, __D);
-  return (__m128i) __C;
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadds_pi16 (__m64 __A, __m64 __B)
-{
-  const __v4si __zero = { 0 };
-  __v8hi __C = (__v8hi) (__v2du) { __A, __B };
-  __v4si __D = vec_sum4s (__C, __zero);
-  __C = vec_packs (__D, __D);
-  return (__m64) ((__v2du) __C)[1];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_epi16 (__m128i __A, __m128i __B)
-{
-  const __v16qu __P =
-    {  0,  1,  4,  5,  8,  9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
-  const __v16qu __Q =
-    {  2,  3,  6,  7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
-  __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P);
-  __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q);
-  return (__m128i) vec_sub (__C, __D);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_epi32 (__m128i __A, __m128i __B)
-{
-  const __v16qu __P =
-    {  0,  1,  2,  3,  8,  9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 };
-  const __v16qu __Q =
-    {  4,  5,  6,  7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 };
-  __v4si __C = vec_perm ((__v4si) __A, (__v4si) __B, __P);
-  __v4si __D = vec_perm ((__v4si) __A, (__v4si) __B, __Q);
-  return (__m128i) vec_sub (__C, __D);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_pi16 (__m64 __A, __m64 __B)
-{
-  const __v16qu __P =
-    {  0,  1,  4,  5,  8,  9, 12, 13,  0,  1,  4,  5,  8,  9, 12, 13 };
-  const __v16qu __Q =
-    {  2,  3,  6,  7, 10, 11, 14, 15,  2,  3,  6,  7, 10, 11, 14, 15 };
-  __v8hi __C = (__v8hi) (__v2du) { __A, __B };
-  __v8hi __D = vec_perm (__C, __C, __Q);
-  __C = vec_perm (__C, __C, __P);
-  __C = vec_sub (__C, __D);
-  return (__m64) ((__v2du) __C)[1];
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_pi32 (__m64 __A, __m64 __B)
-{
-  const __v16qu __P =
-    {  0,  1,  2,  3,  8,  9, 10, 11,  0,  1,  2,  3,  8,  9, 10, 11 };
-  const __v16qu __Q =
-    {  4,  5,  6,  7, 12, 13, 14, 15,  4,  5,  6,  7, 12, 13, 14, 15 };
-  __v4si __C = (__v4si) (__v2du) { __A, __B };
-  __v4si __D = vec_perm (__C, __C, __Q);
-  __C = vec_perm (__C, __C, __P);
-  __C = vec_sub (__C, __D);
-  return (__m64) ((__v2du) __C)[1];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsubs_epi16 (__m128i __A, __m128i __B)
-{
-  const __v16qu __P =
-    {  0,  1,  4,  5,  8,  9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
-  const __v16qu __Q =
-    {  2,  3,  6,  7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
-  __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P);
-  __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q);
-  return (__m128i) vec_subs (__C, __D);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsubs_pi16 (__m64 __A, __m64 __B)
-{
-  const __v16qu __P =
-    {  0,  1,  4,  5,  8,  9, 12, 13,  0,  1,  4,  5,  8,  9, 12, 13 };
-  const __v16qu __Q =
-    {  2,  3,  6,  7, 10, 11, 14, 15,  2,  3,  6,  7, 10, 11, 14, 15 };
-  __v8hi __C = (__v8hi) (__v2du) { __A, __B };
-  __v8hi __D = vec_perm (__C, __C, __P);
-  __v8hi __E = vec_perm (__C, __C, __Q);
-  __C = vec_subs (__D, __E);
-  return (__m64) ((__v2du) __C)[1];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_epi8 (__m128i __A, __m128i __B)
-{
-  const __v16qi __zero = { 0 };
-  __vector __bool char __select = vec_cmplt ((__v16qi) __B, __zero);
-  __v16qi __C = vec_perm ((__v16qi) __A, (__v16qi) __A, (__v16qu) __B);
-  return (__m128i) vec_sel (__C, __zero, __select);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_pi8 (__m64 __A, __m64 __B)
-{
-  const __v16qi __zero = { 0 };
-  __v16qi __C = (__v16qi) (__v2du) { __A, __A };
-  __v16qi __D = (__v16qi) (__v2du) { __B, __B };
-  __vector __bool char __select = vec_cmplt ((__v16qi) __D, __zero);
-  __C = vec_perm ((__v16qi) __C, (__v16qi) __C, (__v16qu) __D);
-  __C = vec_sel (__C, __zero, __select);
-  return (__m64) ((__v2du) (__C))[0];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_epi8 (__m128i __A, __m128i __B)
-{
-  const __v16qi __zero = { 0 };
-  __v16qi __selectneg = (__v16qi) vec_cmplt ((__v16qi) __B, __zero);
-  __v16qi __selectpos =
-    (__v16qi) vec_neg ((__v16qi) vec_cmpgt ((__v16qi) __B, __zero));
-  __v16qi __conv = vec_add (__selectneg, __selectpos);
-  return (__m128i) vec_mul ((__v16qi) __A, (__v16qi) __conv);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_epi16 (__m128i __A, __m128i __B)
-{
-  const __v8hi __zero = { 0 };
-  __v8hi __selectneg = (__v8hi) vec_cmplt ((__v8hi) __B, __zero);
-  __v8hi __selectpos =
-    (__v8hi) vec_neg ((__v8hi) vec_cmpgt ((__v8hi) __B, __zero));
-  __v8hi __conv = vec_add (__selectneg, __selectpos);
-  return (__m128i) vec_mul ((__v8hi) __A, (__v8hi) __conv);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_epi32 (__m128i __A, __m128i __B)
-{
-  const __v4si __zero = { 0 };
-  __v4si __selectneg = (__v4si) vec_cmplt ((__v4si) __B, __zero);
-  __v4si __selectpos =
-    (__v4si) vec_neg ((__v4si) vec_cmpgt ((__v4si) __B, __zero));
-  __v4si __conv = vec_add (__selectneg, __selectpos);
-  return (__m128i) vec_mul ((__v4si) __A, (__v4si) __conv);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_pi8 (__m64 __A, __m64 __B)
-{
-  const __v16qi __zero = { 0 };
-  __v16qi __C = (__v16qi) (__v2du) { __A, __A };
-  __v16qi __D = (__v16qi) (__v2du) { __B, __B };
-  __C = (__v16qi) _mm_sign_epi8 ((__m128i) __C, (__m128i) __D);
-  return (__m64) ((__v2du) (__C))[0];
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_pi16 (__m64 __A, __m64 __B)
-{
-  const __v8hi __zero = { 0 };
-  __v8hi __C = (__v8hi) (__v2du) { __A, __A };
-  __v8hi __D = (__v8hi) (__v2du) { __B, __B };
-  __C = (__v8hi) _mm_sign_epi16 ((__m128i) __C, (__m128i) __D);
-  return (__m64) ((__v2du) (__C))[0];
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_pi32 (__m64 __A, __m64 __B)
-{
-  const __v4si __zero = { 0 };
-  __v4si __C = (__v4si) (__v2du) { __A, __A };
-  __v4si __D = (__v4si) (__v2du) { __B, __B };
-  __C = (__v4si) _mm_sign_epi32 ((__m128i) __C, (__m128i) __D);
-  return (__m64) ((__v2du) (__C))[0];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maddubs_epi16 (__m128i __A, __m128i __B)
-{
-  __v8hi __unsigned = vec_splats ((signed short) 0x00ff);
-  __v8hi __C = vec_and (vec_unpackh ((__v16qi) __A), __unsigned);
-  __v8hi __D = vec_and (vec_unpackl ((__v16qi) __A), __unsigned);
-  __v8hi __E = vec_unpackh ((__v16qi) __B);
-  __v8hi __F = vec_unpackl ((__v16qi) __B);
-  __C = vec_mul (__C, __E);
-  __D = vec_mul (__D, __F);
-  const __v16qu __odds  =
-    {  0,  1,  4,  5,  8,  9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
-  const __v16qu __evens =
-    {  2,  3,  6,  7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
-  __E = vec_perm (__C, __D, __odds);
-  __F = vec_perm (__C, __D, __evens);
-  return (__m128i) vec_adds (__E, __F);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maddubs_pi16 (__m64 __A, __m64 __B)
-{
-  __v8hi __C = (__v8hi) (__v2du) { __A, __A };
-  __C = vec_unpackl ((__v16qi) __C);
-  const __v8hi __unsigned = vec_splats ((signed short) 0x00ff);
-  __C = vec_and (__C, __unsigned);
-  __v8hi __D = (__v8hi) (__v2du) { __B, __B };
-  __D = vec_unpackl ((__v16qi) __D);
-  __D = vec_mul (__C, __D);
-  const __v16qu __odds  =
-    {  0,  1,  4,  5,  8,  9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
-  const __v16qu __evens =
-    {  2,  3,  6,  7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
-  __C = vec_perm (__D, __D, __odds);
-  __D = vec_perm (__D, __D, __evens);
-  __C = vec_adds (__C, __D);
-  return (__m64) ((__v2du) (__C))[0];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhrs_epi16 (__m128i __A, __m128i __B)
-{
-  __v4si __C = vec_unpackh ((__v8hi) __A);
-  __v4si __D = vec_unpackh ((__v8hi) __B);
-  __C = vec_mul (__C, __D);
-  __D = vec_unpackl ((__v8hi) __A);
-  __v4si __E = vec_unpackl ((__v8hi) __B);
-  __D = vec_mul (__D, __E);
-  const __v4su __shift = vec_splats ((unsigned int) 14);
-  __C = vec_sr (__C, __shift);
-  __D = vec_sr (__D, __shift);
-  const __v4si __ones = vec_splats ((signed int) 1);
-  __C = vec_add (__C, __ones);
-  __C = vec_sr (__C, (__v4su) __ones);
-  __D = vec_add (__D, __ones);
-  __D = vec_sr (__D, (__v4su) __ones);
-  return (__m128i) vec_pack (__C, __D);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhrs_pi16 (__m64 __A, __m64 __B)
-{
-  __v4si __C = (__v4si) (__v2du) { __A, __A };
-  __C = vec_unpackh ((__v8hi) __C);
-  __v4si __D = (__v4si) (__v2du) { __B, __B };
-  __D = vec_unpackh ((__v8hi) __D);
-  __C = vec_mul (__C, __D);
-  const __v4su __shift = vec_splats ((unsigned int) 14);
-  __C = vec_sr (__C, __shift);
-  const __v4si __ones = vec_splats ((signed int) 1);
-  __C = vec_add (__C, __ones);
-  __C = vec_sr (__C, (__v4su) __ones);
-  __v8hi __E = vec_pack (__C, __D);
-  return (__m64) ((__v2du) (__E))[0];
-}
-
-#else
-#include_next <tmmintrin.h>
-#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))   \
-        */
-
-#endif /* TMMINTRIN_H_ */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/xmmintrin.h b/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/xmmintrin.h
deleted file mode 100644
index 956603d..0000000
--- a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/xmmintrin.h
+++ /dev/null
@@ -1,1844 +0,0 @@
-/*===---- xmmintrin.h - Implementation of SSE intrinsics on PowerPC --------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* Implemented from the specification included in the Intel C++ Compiler
-   User Guide and Reference, version 9.0.  */
-
-#ifndef NO_WARN_X86_INTRINSICS
-/* This header file is to help porting code using Intel intrinsics
-   explicitly from x86_64 to powerpc64/powerpc64le.
-
-   Since X86 SSE intrinsics mainly handles __m128 type, PowerPC
-   VMX/VSX ISA is a good match for vector float SIMD operations.
-   However scalar float operations in vector (XMM) registers require
-   the POWER8 VSX ISA (2.07) level. There are differences for data
-   format and placement of float scalars in the vector register, which
-   require extra steps to match SSE scalar float semantics on POWER.
-
-   It should be noted that there's much difference between X86_64's
-   MXSCR and PowerISA's FPSCR/VSCR registers. It's recommended to use
-   portable <fenv.h> instead of access MXSCR directly.
-
-   Most SSE scalar float intrinsic operations can be performed more
-   efficiently as C language float scalar operations or optimized to
-   use vector SIMD operations. We recommend this for new applications. */
-#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
-#endif
-
-#ifndef _XMMINTRIN_H_INCLUDED
-#define _XMMINTRIN_H_INCLUDED
-
-#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
-
-/* Define four value permute mask */
-#define _MM_SHUFFLE(w,x,y,z) (((w) << 6) | ((x) << 4) | ((y) << 2) | (z))
-
-#include <altivec.h>
-
-/* Avoid collisions between altivec.h and strict adherence to C++ and
-   C11 standards.  This should eventually be done inside altivec.h itself,
-   but only after testing a full distro build.  */
-#if defined(__STRICT_ANSI__) && (defined(__cplusplus) || \
-				 (defined(__STDC_VERSION__) &&	\
-				  __STDC_VERSION__ >= 201112L))
-#undef vector
-#undef pixel
-#undef bool
-#endif
-
-/* We need type definitions from the MMX header file.  */
-#include <mmintrin.h>
-
-/* Get _mm_malloc () and _mm_free ().  */
-#if __STDC_HOSTED__
-#include <mm_malloc.h>
-#endif
-
-/* The Intel API is flexible enough that we must allow aliasing with other
-   vector types, and their scalar components.  */
-typedef vector float __m128 __attribute__((__may_alias__));
-
-/* Unaligned version of the same type.  */
-typedef vector float __m128_u __attribute__((__may_alias__, __aligned__(1)));
-
-/* Internal data types for implementing the intrinsics.  */
-typedef vector float __v4sf;
-
-/* Create an undefined vector.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_undefined_ps (void)
-{
-  __m128 __Y = __Y;
-  return __Y;
-}
-
-/* Create a vector of zeros.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setzero_ps (void)
-{
-  return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
-}
-
-/* Load four SPFP values from P.  The address must be 16-byte aligned.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_ps (float const *__P)
-{
-  return ((__m128)vec_ld(0, (__v4sf*)__P));
-}
-
-/* Load four SPFP values from P.  The address need not be 16-byte aligned.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadu_ps (float const *__P)
-{
-  return (vec_vsx_ld(0, __P));
-}
-
-/* Load four SPFP values in reverse order.  The address must be aligned.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadr_ps (float const *__P)
-{
-  __v4sf   __tmp;
-  __m128 result;
-  static const __vector unsigned char permute_vector =
-    { 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B, 0x14, 0x15, 0x16,
-	0x17, 0x10, 0x11, 0x12, 0x13 };
-
-  __tmp = vec_ld (0, (__v4sf *) __P);
-  result = (__m128) vec_perm (__tmp, __tmp, permute_vector);
-  return result;
-}
-
-/* Create a vector with all four elements equal to F.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_ps (float __F)
-{
-  return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_ps1 (float __F)
-{
-  return _mm_set1_ps (__F);
-}
-
-/* Create the vector [Z Y X W].  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
-{
-  return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
-}
-
-/* Create the vector [W X Y Z].  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_ps (float __Z, float __Y, float __X, float __W)
-{
-  return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
-}
-
-/* Store four SPFP values.  The address must be 16-byte aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_ps (float *__P, __m128 __A)
-{
-  vec_st((__v4sf)__A, 0, (__v4sf*)__P);
-}
-
-/* Store four SPFP values.  The address need not be 16-byte aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeu_ps (float *__P, __m128 __A)
-{
-  *(__m128_u *)__P = __A;
-}
-
-/* Store four SPFP values in reverse order.  The address must be aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storer_ps (float *__P, __m128 __A)
-{
-  __v4sf   __tmp;
-  static const __vector unsigned char permute_vector =
-    { 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B, 0x14, 0x15, 0x16,
-	0x17, 0x10, 0x11, 0x12, 0x13 };
-
-  __tmp = (__m128) vec_perm (__A, __A, permute_vector);
-
-  _mm_store_ps (__P, __tmp);
-}
-
-/* Store the lower SPFP value across four words.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store1_ps (float *__P, __m128 __A)
-{
-  __v4sf __va = vec_splat((__v4sf)__A, 0);
-  _mm_store_ps (__P, __va);
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_ps1 (float *__P, __m128 __A)
-{
-  _mm_store1_ps (__P, __A);
-}
-
-/* Create a vector with element 0 as F and the rest zero.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_ss (float __F)
-{
-  return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
-}
-
-/* Sets the low SPFP value of A from the low value of B.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_move_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-
-  return (vec_sel ((__v4sf)__A, (__v4sf)__B, mask));
-}
-
-/* Create a vector with element 0 as *P and the rest zero.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_ss (float const *__P)
-{
-  return _mm_set_ss (*__P);
-}
-
-/* Stores the lower SPFP value.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_ss (float *__P, __m128 __A)
-{
-  *__P = ((__v4sf)__A)[0];
-}
-
-/* Perform the respective operation on the lower SPFP (single-precision
-   floating-point) values of A and B; the upper three SPFP values are
-   passed through from A.  */
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_ss (__m128 __A, __m128 __B)
-{
-#ifdef _ARCH_PWR7
-  __m128 a, b, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower double)
-     results. So to insure we don't generate spurious exceptions
-     (from the upper double values) we splat the lower double
-     before we to the operation.  */
-  a = vec_splat (__A, 0);
-  b = vec_splat (__B, 0);
-  c = a + b;
-  /* Then we merge the lower float result with the original upper
-     float elements from __A.  */
-  return (vec_sel (__A, c, mask));
-#else
-  __A[0] = __A[0] + __B[0];
-  return (__A);
-#endif
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_ss (__m128 __A, __m128 __B)
-{
-#ifdef _ARCH_PWR7
-  __m128 a, b, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower double)
-     results. So to insure we don't generate spurious exceptions
-     (from the upper double values) we splat the lower double
-     before we to the operation.  */
-  a = vec_splat (__A, 0);
-  b = vec_splat (__B, 0);
-  c = a - b;
-  /* Then we merge the lower float result with the original upper
-     float elements from __A.  */
-  return (vec_sel (__A, c, mask));
-#else
-  __A[0] = __A[0] - __B[0];
-  return (__A);
-#endif
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_ss (__m128 __A, __m128 __B)
-{
-#ifdef _ARCH_PWR7
-  __m128 a, b, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower double)
-     results. So to insure we don't generate spurious exceptions
-     (from the upper double values) we splat the lower double
-     before we to the operation.  */
-  a = vec_splat (__A, 0);
-  b = vec_splat (__B, 0);
-  c = a * b;
-  /* Then we merge the lower float result with the original upper
-     float elements from __A.  */
-  return (vec_sel (__A, c, mask));
-#else
-  __A[0] = __A[0] * __B[0];
-  return (__A);
-#endif
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_ss (__m128 __A, __m128 __B)
-{
-#ifdef _ARCH_PWR7
-  __m128 a, b, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower double)
-     results. So to insure we don't generate spurious exceptions
-     (from the upper double values) we splat the lower double
-     before we to the operation.  */
-  a = vec_splat (__A, 0);
-  b = vec_splat (__B, 0);
-  c = a / b;
-  /* Then we merge the lower float result with the original upper
-     float elements from __A.  */
-  return (vec_sel (__A, c, mask));
-#else
-  __A[0] = __A[0] / __B[0];
-  return (__A);
-#endif
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_ss (__m128 __A)
-{
-  __m128 a, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower double)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper double values) we splat the lower double
-   * before we to the operation. */
-  a = vec_splat (__A, 0);
-  c = vec_sqrt (a);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return (vec_sel (__A, c, mask));
-}
-
-/* Perform the respective operation on the four SPFP values in A and B.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) ((__v4sf)__A + (__v4sf)__B);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) ((__v4sf)__A - (__v4sf)__B);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) ((__v4sf)__A * (__v4sf)__B);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) ((__v4sf)__A / (__v4sf)__B);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_ps (__m128 __A)
-{
-  return (vec_sqrt ((__v4sf)__A));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rcp_ps (__m128 __A)
-{
-  return (vec_re ((__v4sf)__A));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rsqrt_ps (__m128 __A)
-{
-  return (vec_rsqrte (__A));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rcp_ss (__m128 __A)
-{
-  __m128 a, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower double)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper double values) we splat the lower double
-   * before we to the operation. */
-  a = vec_splat (__A, 0);
-  c = _mm_rcp_ps (a);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return (vec_sel (__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rsqrt_ss (__m128 __A)
-{
-  __m128 a, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower double)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper double values) we splat the lower double
-   * before we to the operation. */
-  a = vec_splat (__A, 0);
-  c = vec_rsqrte (a);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return (vec_sel (__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_ss (__m128 __A, __m128 __B)
-{
-  __v4sf a, b, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower float)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper float values) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf)__A, 0);
-  b = vec_splat ((__v4sf)__B, 0);
-  c = vec_min (a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return (vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_ss (__m128 __A, __m128 __B)
-{
-  __v4sf a, b, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower float)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper float values) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat (__A, 0);
-  b = vec_splat (__B, 0);
-  c = vec_max (a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return (vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_ps (__m128 __A, __m128 __B)
-{
-  __vector __bool int m = vec_cmpgt ((__v4sf) __B, (__v4sf) __A);
-  return vec_sel (__B, __A, m);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_ps (__m128 __A, __m128 __B)
-{
-  __vector __bool int m = vec_cmpgt ((__v4sf) __A, (__v4sf) __B);
-  return vec_sel (__B, __A, m);
-}
-
-/* Perform logical bit-wise operations on 128-bit values.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_and_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_and ((__v4sf)__A, (__v4sf)__B));
-//  return __builtin_ia32_andps (__A, __B);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_andnot_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_andc ((__v4sf)__B, (__v4sf)__A));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_or_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_or ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_xor_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_xor ((__v4sf)__A, (__v4sf)__B));
-}
-
-/* Perform a comparison on the four SPFP values of A and B.  For each
-   element, if the comparison is true, place a mask of all ones in the
-   result, otherwise a mask of zeros.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmpeq ((__v4sf)__A,(__v4sf) __B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmplt ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmple ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmpgt ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmpge ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline  __m128  __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_ps (__m128  __A, __m128  __B)
-{
-  __v4sf temp = (__v4sf ) vec_cmpeq ((__v4sf) __A, (__v4sf)__B);
-  return ((__m128)vec_nor (temp, temp));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmpge ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmpgt ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmple ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmplt ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline  __m128  __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_ps (__m128  __A, __m128  __B)
-{
-  __vector unsigned int a, b;
-  __vector unsigned int c, d;
-  static const __vector unsigned int float_exp_mask =
-    { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
-
-  a = (__vector unsigned int) vec_abs ((__v4sf)__A);
-  b = (__vector unsigned int) vec_abs ((__v4sf)__B);
-  c = (__vector unsigned int) vec_cmpgt (float_exp_mask, a);
-  d = (__vector unsigned int) vec_cmpgt (float_exp_mask, b);
-  return ((__m128 ) vec_and (c, d));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_ps (__m128 __A, __m128 __B)
-{
-  __vector unsigned int a, b;
-  __vector unsigned int c, d;
-  static const __vector unsigned int float_exp_mask =
-    { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
-
-  a = (__vector unsigned int) vec_abs ((__v4sf)__A);
-  b = (__vector unsigned int) vec_abs ((__v4sf)__B);
-  c = (__vector unsigned int) vec_cmpgt (a, float_exp_mask);
-  d = (__vector unsigned int) vec_cmpgt (b, float_exp_mask);
-  return ((__m128 ) vec_or (c, d));
-}
-
-/* Perform a comparison on the lower SPFP values of A and B.  If the
-   comparison is true, place a mask of all ones in the result, otherwise a
-   mask of zeros.  The upper three SPFP values are passed through from A.  */
-extern __inline  __m128  __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_ss (__m128  __A, __m128  __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmpeq(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmplt(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmple(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmpgt(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmpge(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmpeq(a, b);
-  c = vec_nor (c, c);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmpge(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmpgt(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmple(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we do the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmplt(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_ss (__m128 __A, __m128 __B)
-{
-  __vector unsigned int a, b;
-  __vector unsigned int c, d;
-  static const __vector unsigned int float_exp_mask =
-    { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-
-  a = (__vector unsigned int) vec_abs ((__v4sf)__A);
-  b = (__vector unsigned int) vec_abs ((__v4sf)__B);
-  c = (__vector unsigned int) vec_cmpgt (float_exp_mask, a);
-  d = (__vector unsigned int) vec_cmpgt (float_exp_mask, b);
-  c = vec_and (c, d);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, (__v4sf)c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_ss (__m128 __A, __m128 __B)
-{
-  __vector unsigned int a, b;
-  __vector unsigned int c, d;
-  static const __vector unsigned int float_exp_mask =
-    { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-
-  a = (__vector unsigned int) vec_abs ((__v4sf)__A);
-  b = (__vector unsigned int) vec_abs ((__v4sf)__B);
-  c = (__vector unsigned int) vec_cmpgt (a, float_exp_mask);
-  d = (__vector unsigned int) vec_cmpgt (b, float_exp_mask);
-  c = vec_or (c, d);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, (__v4sf)c, mask));
-}
-
-/* Compare the lower SPFP values of A and B and return 1 if true
-   and 0 if false.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comieq_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] == __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comilt_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] < __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comile_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] <= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comigt_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] > __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comige_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] >= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comineq_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] != __B[0]);
-}
-
-/* FIXME
- * The __mm_ucomi??_ss implementations below are exactly the same as
- * __mm_comi??_ss because GCC for PowerPC only generates unordered
- * compares (scalar and vector).
- * Technically __mm_comieq_ss et al should be using the ordered
- * compare and signal for QNaNs.
- * The __mm_ucomieq_sd et all should be OK, as is.
- */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomieq_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] == __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomilt_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] < __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomile_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] <= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomigt_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] > __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomige_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] >= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomineq_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] != __B[0]);
-}
-
-extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_f32 (__m128 __A)
-{
-  return ((__v4sf)__A)[0];
-}
-
-/* Convert the lower SPFP value to a 32-bit integer according to the current
-   rounding mode.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_si32 (__m128 __A)
-{
-  __m64 res = 0;
-#ifdef _ARCH_PWR8
-  double dtmp;
-  __asm__(
-#ifdef __LITTLE_ENDIAN__
-      "xxsldwi %x0,%x0,%x0,3;\n"
-#endif
-      "xscvspdp %x2,%x0;\n"
-      "fctiw  %2,%2;\n"
-      "mfvsrd  %1,%x2;\n"
-      : "+wa" (__A),
-        "=r" (res),
-        "=f" (dtmp)
-      : );
-#else
-  res = __builtin_rint(__A[0]);
-#endif
-  return (res);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_ss2si (__m128 __A)
-{
-  return _mm_cvtss_si32 (__A);
-}
-
-/* Convert the lower SPFP value to a 32-bit integer according to the
-   current rounding mode.  */
-
-/* Intel intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_si64 (__m128 __A)
-{
-  __m64 res = 0;
-#ifdef _ARCH_PWR8
-  double dtmp;
-  __asm__(
-#ifdef __LITTLE_ENDIAN__
-      "xxsldwi %x0,%x0,%x0,3;\n"
-#endif
-      "xscvspdp %x2,%x0;\n"
-      "fctid  %2,%2;\n"
-      "mfvsrd  %1,%x2;\n"
-      : "+wa" (__A),
-        "=r" (res),
-        "=f" (dtmp)
-      : );
-#else
-  res = __builtin_llrint(__A[0]);
-#endif
-  return (res);
-}
-
-/* Microsoft intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_si64x (__m128 __A)
-{
-  return _mm_cvtss_si64 ((__v4sf) __A);
-}
-
-/* Constants for use with _mm_prefetch.  */
-enum _mm_hint
-{
-  /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit.  */
-  _MM_HINT_ET0 = 7,
-  _MM_HINT_ET1 = 6,
-  _MM_HINT_T0 = 3,
-  _MM_HINT_T1 = 2,
-  _MM_HINT_T2 = 1,
-  _MM_HINT_NTA = 0
-};
-
-/* Loads one cache line from address P to a location "closer" to the
-   processor.  The selector I specifies the type of prefetch operation.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_prefetch (const void *__P, enum _mm_hint __I)
-{
-  /* Current PowerPC will ignores the hint parameters.  */
-  __builtin_prefetch (__P);
-}
-
-/* Convert the two lower SPFP values to 32-bit integers according to the
-   current rounding mode.  Return the integers in packed form.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi32 (__m128 __A)
-{
-  /* Splat two lower SPFP values to both halves.  */
-  __v4sf temp, rounded;
-  __vector unsigned long long result;
-
-  /* Splat two lower SPFP values to both halves.  */
-  temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
-  rounded = vec_rint(temp);
-  result = (__vector unsigned long long) vec_cts (rounded, 0);
-
-  return (__m64) ((__vector long long) result)[0];
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_ps2pi (__m128 __A)
-{
-  return _mm_cvtps_pi32 (__A);
-}
-
-/* Truncate the lower SPFP value to a 32-bit integer.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_si32 (__m128 __A)
-{
-  /* Extract the lower float element.  */
-  float temp = __A[0];
-  /* truncate to 32-bit integer and return.  */
-  return temp;
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_ss2si (__m128 __A)
-{
-  return _mm_cvttss_si32 (__A);
-}
-
-/* Intel intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_si64 (__m128 __A)
-{
-  /* Extract the lower float element.  */
-  float temp = __A[0];
-  /* truncate to 32-bit integer and return.  */
-  return temp;
-}
-
-/* Microsoft intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_si64x (__m128 __A)
-{
-  /* Extract the lower float element.  */
-  float temp = __A[0];
-  /* truncate to 32-bit integer and return.  */
-  return temp;
-}
-
-/* Truncate the two lower SPFP values to 32-bit integers.  Return the
-   integers in packed form.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttps_pi32 (__m128 __A)
-{
-  __v4sf temp;
-  __vector unsigned long long result;
-
-  /* Splat two lower SPFP values to both halves.  */
-  temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
-  result = (__vector unsigned long long) vec_cts (temp, 0);
-
-  return (__m64) ((__vector long long) result)[0];
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_ps2pi (__m128 __A)
-{
-  return _mm_cvttps_pi32 (__A);
-}
-
-/* Convert B to a SPFP value and insert it as element zero in A.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi32_ss (__m128 __A, int __B)
-{
-  float temp = __B;
-  __A[0] = temp;
-
-  return __A;
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_si2ss (__m128 __A, int __B)
-{
-  return _mm_cvtsi32_ss (__A, __B);
-}
-
-/* Convert B to a SPFP value and insert it as element zero in A.  */
-/* Intel intrinsic.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64_ss (__m128 __A, long long __B)
-{
-  float temp = __B;
-  __A[0] = temp;
-
-  return __A;
-}
-
-/* Microsoft intrinsic.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64x_ss (__m128 __A, long long __B)
-{
-  return _mm_cvtsi64_ss (__A, __B);
-}
-
-/* Convert the two 32-bit values in B to SPFP form and insert them
-   as the two lower elements in A.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi32_ps (__m128        __A, __m64        __B)
-{
-  __vector signed int vm1;
-  __vector float vf1;
-
-  vm1 = (__vector signed int) (__vector unsigned long long) {__B, __B};
-  vf1 = (__vector float) vec_ctf (vm1, 0);
-
-  return ((__m128) (__vector unsigned long long)
-    { ((__vector unsigned long long)vf1) [0],
-	((__vector unsigned long long)__A) [1]});
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_pi2ps (__m128 __A, __m64 __B)
-{
-  return _mm_cvtpi32_ps (__A, __B);
-}
-
-/* Convert the four signed 16-bit values in A to SPFP form.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi16_ps (__m64 __A)
-{
-  __vector signed short vs8;
-  __vector signed int vi4;
-  __vector float vf1;
-
-  vs8 = (__vector signed short) (__vector unsigned long long) { __A, __A };
-  vi4 = vec_vupklsh (vs8);
-  vf1 = (__vector float) vec_ctf (vi4, 0);
-
-  return (__m128) vf1;
-}
-
-/* Convert the four unsigned 16-bit values in A to SPFP form.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpu16_ps (__m64 __A)
-{
-  const __vector unsigned short zero =
-    { 0, 0, 0, 0, 0, 0, 0, 0 };
-  __vector unsigned short vs8;
-  __vector unsigned int vi4;
-  __vector float vf1;
-
-  vs8 = (__vector unsigned short) (__vector unsigned long long) { __A, __A };
-  vi4 = (__vector unsigned int) vec_mergel
-#ifdef __LITTLE_ENDIAN__
-                                           (vs8, zero);
-#else
-                                           (zero, vs8);
-#endif
-  vf1 = (__vector float) vec_ctf (vi4, 0);
-
-  return (__m128) vf1;
-}
-
-/* Convert the low four signed 8-bit values in A to SPFP form.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi8_ps (__m64 __A)
-{
-  __vector signed char vc16;
-  __vector signed short vs8;
-  __vector signed int vi4;
-  __vector float vf1;
-
-  vc16 = (__vector signed char) (__vector unsigned long long) { __A, __A };
-  vs8 = vec_vupkhsb (vc16);
-  vi4 = vec_vupkhsh (vs8);
-  vf1 = (__vector float) vec_ctf (vi4, 0);
-
-  return (__m128) vf1;
-}
-
-/* Convert the low four unsigned 8-bit values in A to SPFP form.  */
-extern __inline  __m128  __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-
-_mm_cvtpu8_ps (__m64  __A)
-{
-  const __vector unsigned char zero =
-    { 0, 0, 0, 0, 0, 0, 0, 0 };
-  __vector unsigned char vc16;
-  __vector unsigned short vs8;
-  __vector unsigned int vi4;
-  __vector float vf1;
-
-  vc16 = (__vector unsigned char) (__vector unsigned long long) { __A, __A };
-#ifdef __LITTLE_ENDIAN__
-  vs8 = (__vector unsigned short) vec_mergel (vc16, zero);
-  vi4 = (__vector unsigned int) vec_mergeh (vs8,
-					    (__vector unsigned short) zero);
-#else
-  vs8 = (__vector unsigned short) vec_mergel (zero, vc16);
-  vi4 = (__vector unsigned int) vec_mergeh ((__vector unsigned short) zero,
-                                            vs8);
-#endif
-  vf1 = (__vector float) vec_ctf (vi4, 0);
-
-  return (__m128) vf1;
-}
-
-/* Convert the four signed 32-bit values in A and B to SPFP form.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi32x2_ps (__m64 __A, __m64 __B)
-{
-  __vector signed int vi4;
-  __vector float vf4;
-
-  vi4 = (__vector signed int) (__vector unsigned long long) { __A, __B };
-  vf4 = (__vector float) vec_ctf (vi4, 0);
-  return (__m128) vf4;
-}
-
-/* Convert the four SPFP values in A to four signed 16-bit integers.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi16 (__m128 __A)
-{
-  __v4sf rounded;
-  __vector signed int temp;
-  __vector unsigned long long result;
-
-  rounded = vec_rint(__A);
-  temp = vec_cts (rounded, 0);
-  result = (__vector unsigned long long) vec_pack (temp, temp);
-
-  return (__m64) ((__vector long long) result)[0];
-}
-
-/* Convert the four SPFP values in A to four signed 8-bit integers.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi8 (__m128 __A)
-{
-  __v4sf rounded;
-  __vector signed int tmp_i;
-  static const __vector signed int zero = {0, 0, 0, 0};
-  __vector signed short tmp_s;
-  __vector signed char res_v;
-
-  rounded = vec_rint(__A);
-  tmp_i = vec_cts (rounded, 0);
-  tmp_s = vec_pack (tmp_i, zero);
-  res_v = vec_pack (tmp_s, tmp_s);
-  return (__m64) ((__vector long long) res_v)[0];
-}
-
-/* Selects four specific SPFP values from A and B based on MASK.  */
-extern __inline  __m128  __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-
-_mm_shuffle_ps (__m128  __A, __m128  __B, int const __mask)
-{
-  unsigned long element_selector_10 = __mask & 0x03;
-  unsigned long element_selector_32 = (__mask >> 2) & 0x03;
-  unsigned long element_selector_54 = (__mask >> 4) & 0x03;
-  unsigned long element_selector_76 = (__mask >> 6) & 0x03;
-  static const unsigned int permute_selectors[4] =
-    {
-#ifdef __LITTLE_ENDIAN__
-      0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
-#else
-      0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
-#endif
-    };
-  __vector unsigned int t;
-
-  t[0] = permute_selectors[element_selector_10];
-  t[1] = permute_selectors[element_selector_32];
-  t[2] = permute_selectors[element_selector_54] + 0x10101010;
-  t[3] = permute_selectors[element_selector_76] + 0x10101010;
-  return vec_perm ((__v4sf) __A, (__v4sf)__B, (__vector unsigned char)t);
-}
-
-/* Selects and interleaves the upper two SPFP values from A and B.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) vec_vmrglw ((__v4sf) __A, (__v4sf)__B);
-}
-
-/* Selects and interleaves the lower two SPFP values from A and B.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) vec_vmrghw ((__v4sf) __A, (__v4sf)__B);
-}
-
-/* Sets the upper two SPFP values with 64-bits of data loaded from P;
-   the lower two values are passed through from A.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadh_pi (__m128 __A, __m64 const *__P)
-{
-  __vector unsigned long long __a = (__vector unsigned long long)__A;
-  __vector unsigned long long __p = vec_splats(*__P);
-  __a [1] = __p [1];
-
-  return (__m128)__a;
-}
-
-/* Stores the upper two SPFP values of A into P.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeh_pi (__m64 *__P, __m128 __A)
-{
-  __vector unsigned long long __a = (__vector unsigned long long) __A;
-
-  *__P = __a[1];
-}
-
-/* Moves the upper two values of B into the lower two values of A.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movehl_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) vec_mergel ((__vector unsigned long long)__B,
-			      (__vector unsigned long long)__A);
-}
-
-/* Moves the lower two values of B into the upper two values of A.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movelh_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) vec_mergeh ((__vector unsigned long long)__A,
-			      (__vector unsigned long long)__B);
-}
-
-/* Sets the lower two SPFP values with 64-bits of data loaded from P;
-   the upper two values are passed through from A.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadl_pi (__m128 __A, __m64 const *__P)
-{
-  __vector unsigned long long __a = (__vector unsigned long long)__A;
-  __vector unsigned long long __p = vec_splats(*__P);
-  __a [0] = __p [0];
-
-  return (__m128)__a;
-}
-
-/* Stores the lower two SPFP values of A into P.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storel_pi (__m64 *__P, __m128 __A)
-{
-  __vector unsigned long long __a = (__vector unsigned long long) __A;
-
-  *__P = __a[0];
-}
-
-#ifdef _ARCH_PWR8
-/* Intrinsic functions that require PowerISA 2.07 minimum.  */
-
-/* Creates a 4-bit mask from the most significant bits of the SPFP values.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_ps (__m128  __A)
-{
-  __vector unsigned long long result;
-  static const __vector unsigned int perm_mask =
-    {
-#ifdef __LITTLE_ENDIAN__
-	0x00204060, 0x80808080, 0x80808080, 0x80808080
-#else
-      0x80808080, 0x80808080, 0x80808080, 0x00204060
-#endif
-    };
-
-  result = ((__vector unsigned long long)
-	    vec_vbpermq ((__vector unsigned char) __A,
-			 (__vector unsigned char) perm_mask));
-
-#ifdef __LITTLE_ENDIAN__
-  return result[1];
-#else
-  return result[0];
-#endif
-}
-#endif /* _ARCH_PWR8 */
-
-/* Create a vector with all four elements equal to *P.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load1_ps (float const *__P)
-{
-  return _mm_set1_ps (*__P);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_ps1 (float const *__P)
-{
-  return _mm_load1_ps (__P);
-}
-
-/* Extracts one of the four words of A.  The selector N must be immediate.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_extract_pi16 (__m64 const __A, int const __N)
-{
-  unsigned int shiftr = __N & 3;
-#ifdef __BIG_ENDIAN__
-  shiftr = 3 - shiftr;
-#endif
-
-  return ((__A >> (shiftr * 16)) & 0xffff);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pextrw (__m64 const __A, int const __N)
-{
-  return _mm_extract_pi16 (__A, __N);
-}
-
-/* Inserts word D into one of four words of A.  The selector N must be
-   immediate.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
-{
-  const int shiftl = (__N & 3) * 16;
-  const __m64 shiftD = (const __m64) __D << shiftl;
-  const __m64 mask = 0xffffUL << shiftl;
-  __m64 result = (__A & (~mask)) | (shiftD & mask);
-
-  return (result);
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pinsrw (__m64 const __A, int const __D, int const __N)
-{
-  return _mm_insert_pi16 (__A, __D, __N);
-}
-
-/* Compute the element-wise maximum of signed 16-bit values.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-
-_mm_max_pi16 (__m64 __A, __m64 __B)
-{
-#if _ARCH_PWR8
-  __vector signed short a, b, r;
-  __vector __bool short c;
-
-  a = (__vector signed short)vec_splats (__A);
-  b = (__vector signed short)vec_splats (__B);
-  c = (__vector __bool short)vec_cmpgt (a, b);
-  r = vec_sel (b, a, c);
-  return (__m64) ((__vector long long) r)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __A;
-  m2.as_m64 = __B;
-
-  res.as_short[0] =
-      (m1.as_short[0] > m2.as_short[0]) ? m1.as_short[0] : m2.as_short[0];
-  res.as_short[1] =
-      (m1.as_short[1] > m2.as_short[1]) ? m1.as_short[1] : m2.as_short[1];
-  res.as_short[2] =
-      (m1.as_short[2] > m2.as_short[2]) ? m1.as_short[2] : m2.as_short[2];
-  res.as_short[3] =
-      (m1.as_short[3] > m2.as_short[3]) ? m1.as_short[3] : m2.as_short[3];
-
-  return (__m64) res.as_m64;
-#endif
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmaxsw (__m64 __A, __m64 __B)
-{
-  return _mm_max_pi16 (__A, __B);
-}
-
-/* Compute the element-wise maximum of unsigned 8-bit values.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_pu8 (__m64 __A, __m64 __B)
-{
-#if _ARCH_PWR8
-  __vector unsigned char a, b, r;
-  __vector __bool char c;
-
-  a = (__vector unsigned char)vec_splats (__A);
-  b = (__vector unsigned char)vec_splats (__B);
-  c = (__vector __bool char)vec_cmpgt (a, b);
-  r = vec_sel (b, a, c);
-  return (__m64) ((__vector long long) r)[0];
-#else
-  __m64_union m1, m2, res;
-  long i;
-
-  m1.as_m64 = __A;
-  m2.as_m64 = __B;
-
-
-  for (i = 0; i < 8; i++)
-  res.as_char[i] =
-      ((unsigned char) m1.as_char[i] > (unsigned char) m2.as_char[i]) ?
-	  m1.as_char[i] : m2.as_char[i];
-
-  return (__m64) res.as_m64;
-#endif
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmaxub (__m64 __A, __m64 __B)
-{
-  return _mm_max_pu8 (__A, __B);
-}
-
-/* Compute the element-wise minimum of signed 16-bit values.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_pi16 (__m64 __A, __m64 __B)
-{
-#if _ARCH_PWR8
-  __vector signed short a, b, r;
-  __vector __bool short c;
-
-  a = (__vector signed short)vec_splats (__A);
-  b = (__vector signed short)vec_splats (__B);
-  c = (__vector __bool short)vec_cmplt (a, b);
-  r = vec_sel (b, a, c);
-  return (__m64) ((__vector long long) r)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __A;
-  m2.as_m64 = __B;
-
-  res.as_short[0] =
-      (m1.as_short[0] < m2.as_short[0]) ? m1.as_short[0] : m2.as_short[0];
-  res.as_short[1] =
-      (m1.as_short[1] < m2.as_short[1]) ? m1.as_short[1] : m2.as_short[1];
-  res.as_short[2] =
-      (m1.as_short[2] < m2.as_short[2]) ? m1.as_short[2] : m2.as_short[2];
-  res.as_short[3] =
-      (m1.as_short[3] < m2.as_short[3]) ? m1.as_short[3] : m2.as_short[3];
-
-  return (__m64) res.as_m64;
-#endif
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pminsw (__m64 __A, __m64 __B)
-{
-  return _mm_min_pi16 (__A, __B);
-}
-
-/* Compute the element-wise minimum of unsigned 8-bit values.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_pu8 (__m64 __A, __m64 __B)
-{
-#if _ARCH_PWR8
-  __vector unsigned char a, b, r;
-  __vector __bool char c;
-
-  a = (__vector unsigned char)vec_splats (__A);
-  b = (__vector unsigned char)vec_splats (__B);
-  c = (__vector __bool char)vec_cmplt (a, b);
-  r = vec_sel (b, a, c);
-  return (__m64) ((__vector long long) r)[0];
-#else
-  __m64_union m1, m2, res;
-  long i;
-
-  m1.as_m64 = __A;
-  m2.as_m64 = __B;
-
-
-  for (i = 0; i < 8; i++)
-  res.as_char[i] =
-      ((unsigned char) m1.as_char[i] < (unsigned char) m2.as_char[i]) ?
-	  m1.as_char[i] : m2.as_char[i];
-
-  return (__m64) res.as_m64;
-#endif
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pminub (__m64 __A, __m64 __B)
-{
-  return _mm_min_pu8 (__A, __B);
-}
-
-/* Create an 8-bit mask of the signs of 8-bit values.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_pi8 (__m64 __A)
-{
-  unsigned long long p =
-#ifdef __LITTLE_ENDIAN__
-                         0x0008101820283038UL; // permute control for sign bits
-#else
-                         0x3830282018100800UL; // permute control for sign bits
-#endif
-  return __builtin_bpermd (p, __A);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmovmskb (__m64 __A)
-{
-  return _mm_movemask_pi8 (__A);
-}
-
-/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
-   in B and produce the high 16 bits of the 32-bit results.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhi_pu16 (__m64 __A, __m64 __B)
-{
-  __vector unsigned short a, b;
-  __vector unsigned short c;
-  __vector unsigned int w0, w1;
-  __vector unsigned char xform1 = {
-#ifdef __LITTLE_ENDIAN__
-      0x02, 0x03, 0x12, 0x13,  0x06, 0x07, 0x16, 0x17,
-      0x0A, 0x0B, 0x1A, 0x1B,  0x0E, 0x0F, 0x1E, 0x1F
-#else
-      0x00, 0x01, 0x10, 0x11,  0x04, 0x05, 0x14, 0x15,
-      0x00, 0x01, 0x10, 0x11,  0x04, 0x05, 0x14, 0x15
-#endif
-    };
-
-  a = (__vector unsigned short)vec_splats (__A);
-  b = (__vector unsigned short)vec_splats (__B);
-
-  w0 = vec_vmuleuh (a, b);
-  w1 = vec_vmulouh (a, b);
-  c = (__vector unsigned short)vec_perm (w0, w1, xform1);
-
-  return (__m64) ((__vector long long) c)[0];
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmulhuw (__m64 __A, __m64 __B)
-{
-  return _mm_mulhi_pu16 (__A, __B);
-}
-
-/* Return a combination of the four 16-bit values in A.  The selector
-   must be an immediate.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_pi16 (__m64 __A, int const __N)
-{
-  unsigned long element_selector_10 = __N & 0x03;
-  unsigned long element_selector_32 = (__N >> 2) & 0x03;
-  unsigned long element_selector_54 = (__N >> 4) & 0x03;
-  unsigned long element_selector_76 = (__N >> 6) & 0x03;
-  static const unsigned short permute_selectors[4] =
-    {
-#ifdef __LITTLE_ENDIAN__
-	      0x0908, 0x0B0A, 0x0D0C, 0x0F0E
-#else
-	      0x0607, 0x0405, 0x0203, 0x0001
-#endif
-    };
-  __m64_union t;
-  __vector unsigned long long a, p, r;
-
-#ifdef __LITTLE_ENDIAN__
-  t.as_short[0] = permute_selectors[element_selector_10];
-  t.as_short[1] = permute_selectors[element_selector_32];
-  t.as_short[2] = permute_selectors[element_selector_54];
-  t.as_short[3] = permute_selectors[element_selector_76];
-#else
-  t.as_short[3] = permute_selectors[element_selector_10];
-  t.as_short[2] = permute_selectors[element_selector_32];
-  t.as_short[1] = permute_selectors[element_selector_54];
-  t.as_short[0] = permute_selectors[element_selector_76];
-#endif
-  p = vec_splats (t.as_m64);
-  a = vec_splats (__A);
-  r = vec_perm (a, a, (__vector unsigned char)p);
-  return (__m64) ((__vector long long) r)[0];
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pshufw (__m64 __A, int const __N)
-{
-  return _mm_shuffle_pi16 (__A, __N);
-}
-
-/* Conditionally store byte elements of A into P.  The high bit of each
-   byte in the selector N determines whether the corresponding byte from
-   A is stored.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
-{
-  __m64 hibit = 0x8080808080808080UL;
-  __m64 mask, tmp;
-  __m64 *p = (__m64*)__P;
-
-  tmp = *p;
-  mask = _mm_cmpeq_pi8 ((__N & hibit), hibit);
-  tmp = (tmp & (~mask)) | (__A & mask);
-  *p = tmp;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_maskmovq (__m64 __A, __m64 __N, char *__P)
-{
-  _mm_maskmove_si64 (__A, __N, __P);
-}
-
-/* Compute the rounded averages of the unsigned 8-bit values in A and B.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_pu8 (__m64 __A, __m64 __B)
-{
-  __vector unsigned char a, b, c;
-
-  a = (__vector unsigned char)vec_splats (__A);
-  b = (__vector unsigned char)vec_splats (__B);
-  c = vec_avg (a, b);
-  return (__m64) ((__vector long long) c)[0];
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pavgb (__m64 __A, __m64 __B)
-{
-  return _mm_avg_pu8 (__A, __B);
-}
-
-/* Compute the rounded averages of the unsigned 16-bit values in A and B.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_pu16 (__m64 __A, __m64 __B)
-{
-  __vector unsigned short a, b, c;
-
-  a = (__vector unsigned short)vec_splats (__A);
-  b = (__vector unsigned short)vec_splats (__B);
-  c = vec_avg (a, b);
-  return (__m64) ((__vector long long) c)[0];
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pavgw (__m64 __A, __m64 __B)
-{
-  return _mm_avg_pu16 (__A, __B);
-}
-
-/* Compute the sum of the absolute differences of the unsigned 8-bit
-   values in A and B.  Return the value in the lower 16-bit word; the
-   upper words are cleared.  */
-extern __inline    __m64    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sad_pu8 (__m64  __A, __m64  __B)
-{
-  __vector unsigned char a, b;
-  __vector unsigned char vmin, vmax, vabsdiff;
-  __vector signed int vsum;
-  const __vector unsigned int zero =
-    { 0, 0, 0, 0 };
-  __m64_union result = {0};
-
-  a = (__vector unsigned char) (__vector unsigned long long) { 0UL, __A };
-  b = (__vector unsigned char) (__vector unsigned long long) { 0UL, __B };
-  vmin = vec_min (a, b);
-  vmax = vec_max (a, b);
-  vabsdiff = vec_sub (vmax, vmin);
-  /* Sum four groups of bytes into integers.  */
-  vsum = (__vector signed int) vec_sum4s (vabsdiff, zero);
-  /* Sum across four integers with integer result.  */
-  vsum = vec_sums (vsum, (__vector signed int) zero);
-  /* The sum is in the right most 32-bits of the vector result.
-     Transfer to a GPR and truncate to 16 bits.  */
-  result.as_short[0] = vsum[3];
-  return result.as_m64;
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_psadbw (__m64 __A, __m64 __B)
-{
-  return _mm_sad_pu8 (__A, __B);
-}
-
-/* Stores the data in A to the address P without polluting the caches.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_pi (__m64 *__P, __m64 __A)
-{
-  /* Use the data cache block touch for store transient.  */
-  __asm__ (
-    "	dcbtstt	0,%0"
-    :
-    : "b" (__P)
-    : "memory"
-  );
-  *__P = __A;
-}
-
-/* Likewise.  The address must be 16-byte aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_ps (float *__P, __m128 __A)
-{
-  /* Use the data cache block touch for store transient.  */
-  __asm__ (
-    "	dcbtstt	0,%0"
-    :
-    : "b" (__P)
-    : "memory"
-  );
-  _mm_store_ps (__P, __A);
-}
-
-/* Guarantees that every preceding store is globally visible before
-   any subsequent store.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sfence (void)
-{
-  /* Generate a light weight sync.  */
-  __atomic_thread_fence (__ATOMIC_RELEASE);
-}
-
-/* The execution of the next instruction is delayed by an implementation
-   specific amount of time.  The instruction does not modify the
-   architectural state.  This is after the pop_options pragma because
-   it does not require SSE support in the processor--the encoding is a
-   nop on processors that do not support it.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_pause (void)
-{
-  /* There is no exact match with this construct, but the following is
-     close to the desired effect.  */
-#if _ARCH_PWR8
-  /* On power8 and later processors we can depend on Program Priority
-     (PRI) and associated "very low" PPI setting.  Since we don't know
-     what PPI this thread is running at we: 1) save the current PRI
-     from the PPR SPR into a local GRP, 2) set the PRI to "very low*
-     via the special or 31,31,31 encoding. 3) issue an "isync" to
-     insure the PRI change takes effect before we execute any more
-     instructions.
-     Now we can execute a lwsync (release barrier) while we execute
-     this thread at "very low" PRI.  Finally we restore the original
-     PRI and continue execution.  */
-  unsigned long __PPR;
-
-  __asm__ volatile (
-    "	mfppr	%0;"
-    "   or 31,31,31;"
-    "   isync;"
-    "   lwsync;"
-    "   isync;"
-    "   mtppr	%0;"
-    : "=r" (__PPR)
-    :
-    : "memory"
-  );
-#else
-  /* For older processor where we may not even have Program Priority
-     controls we can only depend on Heavy Weight Sync.  */
-  __atomic_thread_fence (__ATOMIC_SEQ_CST);
-#endif
-}
-
-/* Transpose the 4x4 matrix composed of row[0-3].  */
-#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3)			\
-do {									\
-  __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3);	\
-  __v4sf __t0 = vec_vmrghw (__r0, __r1);			\
-  __v4sf __t1 = vec_vmrghw (__r2, __r3);			\
-  __v4sf __t2 = vec_vmrglw (__r0, __r1);			\
-  __v4sf __t3 = vec_vmrglw (__r2, __r3);			\
-  (row0) = (__v4sf)vec_mergeh ((__vector long long)__t0, 	\
-			       (__vector long long)__t1);	\
-  (row1) = (__v4sf)vec_mergel ((__vector long long)__t0,	\
-			       (__vector long long)__t1);	\
-  (row2) = (__v4sf)vec_mergeh ((__vector long long)__t2,	\
-			       (__vector long long)__t3);	\
-  (row3) = (__v4sf)vec_mergel ((__vector long long)__t2,	\
-			       (__vector long long)__t3);	\
-} while (0)
-
-/* For backward source compatibility.  */
-//# include <emmintrin.h>
-
-#else
-#include_next <xmmintrin.h>
-#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))   \
-        */
-
-#endif /* _XMMINTRIN_H_INCLUDED */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stdnoreturn.h b/darwin-x86/lib64/clang/14.0.2/include/stdnoreturn.h
deleted file mode 100644
index e83cd81..0000000
--- a/darwin-x86/lib64/clang/14.0.2/include/stdnoreturn.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*===---- stdnoreturn.h - Standard header for noreturn macro ---------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __STDNORETURN_H
-#define __STDNORETURN_H
-
-#define noreturn _Noreturn
-#define __noreturn_is_defined 1
-
-#endif /* __STDNORETURN_H */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/x86gprintrin.h b/darwin-x86/lib64/clang/14.0.2/include/x86gprintrin.h
deleted file mode 100644
index 01e741f..0000000
--- a/darwin-x86/lib64/clang/14.0.2/include/x86gprintrin.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*===--------------- x86gprintrin.h - X86 GPR intrinsics ------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __X86GPRINTRIN_H
-#define __X86GPRINTRIN_H
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__HRESET__)
-#include <hresetintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__UINTR__)
-#include <uintrintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__CRC32__)
-#include <crc32intrin.h>
-#endif
-
-#define __SSC_MARK(Tag)                                                        \
-  __asm__ __volatile__("mov {%%ebx, %%eax|eax, ebx}; "                      \
-                       "mov {%0, %%ebx|ebx, %0}; "                          \
-                       ".byte 0x64, 0x67, 0x90; "                              \
-                       "mov {%%eax, %%ebx|ebx, eax};" ::"i"(Tag)            \
-                       : "%eax");
-
-#endif /* __X86GPRINTRIN_H */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_builtin_vars.h b/darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_builtin_vars.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_builtin_vars.h
rename to darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_builtin_vars.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_cmath.h b/darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_cmath.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_cmath.h
rename to darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_cmath.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_complex_builtins.h b/darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_complex_builtins.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_complex_builtins.h
rename to darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_complex_builtins.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_device_functions.h b/darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_device_functions.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_device_functions.h
rename to darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_device_functions.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_intrinsics.h b/darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_intrinsics.h
similarity index 97%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_intrinsics.h
rename to darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_intrinsics.h
index e0875bb..b87413e 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_intrinsics.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_intrinsics.h
@@ -71,8 +71,8 @@
   }                                                                            \
   inline __device__ unsigned long long __FnName(                               \
       unsigned long long __val, __Type __offset, int __width = warpSize) {     \
-    return static_cast<unsigned long long>(::__FnName(                         \
-        static_cast<unsigned long long>(__val), __offset, __width));           \
+    return static_cast<unsigned long long>(                                    \
+        ::__FnName(static_cast<long long>(__val), __offset, __width));         \
   }                                                                            \
   inline __device__ double __FnName(double __val, __Type __offset,             \
                                     int __width = warpSize) {                  \
@@ -139,8 +139,8 @@
   inline __device__ unsigned long long __FnName(                               \
       unsigned int __mask, unsigned long long __val, __Type __offset,          \
       int __width = warpSize) {                                                \
-    return static_cast<unsigned long long>(::__FnName(                         \
-        __mask, static_cast<unsigned long long>(__val), __offset, __width));   \
+    return static_cast<unsigned long long>(                                    \
+        ::__FnName(__mask, static_cast<long long>(__val), __offset, __width)); \
   }                                                                            \
   inline __device__ long __FnName(unsigned int __mask, long __val,             \
                                   __Type __offset, int __width = warpSize) {   \
@@ -234,7 +234,7 @@
   return __nvvm_match_any_sync_i32(mask, value);
 }
 
-inline __device__ unsigned long long
+inline __device__ unsigned int
 __match64_any_sync(unsigned int mask, unsigned long long value) {
   return __nvvm_match_any_sync_i64(mask, value);
 }
@@ -244,7 +244,7 @@
   return __nvvm_match_all_sync_i32p(mask, value, pred);
 }
 
-inline __device__ unsigned long long
+inline __device__ unsigned int
 __match64_all_sync(unsigned int mask, unsigned long long value, int *pred) {
   return __nvvm_match_all_sync_i64p(mask, value, pred);
 }
@@ -509,7 +509,7 @@
 __device__ inline void *__nv_cvta_local_to_generic_impl(size_t __ptr) {
   return (void *)(void __attribute__((address_space(5))) *)__ptr;
 }
-__device__ inline uint32_t __nvvm_get_smem_pointer(void *__ptr) {
+__device__ inline cuuint32_t __nvvm_get_smem_pointer(void *__ptr) {
   return __nv_cvta_generic_to_shared_impl(__ptr);
 }
 } // extern "C"
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_libdevice_declares.h b/darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_libdevice_declares.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_libdevice_declares.h
rename to darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_libdevice_declares.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h b/darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_math.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h
rename to darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_math.h
index 538556f..e447590 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_math.h
@@ -345,4 +345,4 @@
 #pragma pop_macro("__DEVICE_VOID__")
 #pragma pop_macro("__FAST_OR_SLOW")
 
-#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
+#endif // __CLANG_CUDA_MATH_H__
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math_forward_declares.h b/darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_math_forward_declares.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math_forward_declares.h
rename to darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_math_forward_declares.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_runtime_wrapper.h b/darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_runtime_wrapper.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_runtime_wrapper.h
rename to darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_runtime_wrapper.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_texture_intrinsics.h b/darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_texture_intrinsics.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_texture_intrinsics.h
rename to darwin-x86/lib64/clang/16.0.2/include/__clang_cuda_texture_intrinsics.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_hip_cmath.h b/darwin-x86/lib64/clang/16.0.2/include/__clang_hip_cmath.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_hip_cmath.h
rename to darwin-x86/lib64/clang/16.0.2/include/__clang_hip_cmath.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_hip_libdevice_declares.h b/darwin-x86/lib64/clang/16.0.2/include/__clang_hip_libdevice_declares.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_hip_libdevice_declares.h
rename to darwin-x86/lib64/clang/16.0.2/include/__clang_hip_libdevice_declares.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_hip_math.h b/darwin-x86/lib64/clang/16.0.2/include/__clang_hip_math.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_hip_math.h
rename to darwin-x86/lib64/clang/16.0.2/include/__clang_hip_math.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h b/darwin-x86/lib64/clang/16.0.2/include/__clang_hip_runtime_wrapper.h
similarity index 80%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h
rename to darwin-x86/lib64/clang/16.0.2/include/__clang_hip_runtime_wrapper.h
index 73021d2..10cec58 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/__clang_hip_runtime_wrapper.h
@@ -50,6 +50,9 @@
 #include <cmath>
 #include <cstdlib>
 #include <stdlib.h>
+#if __has_include("hip/hip_version.h")
+#include "hip/hip_version.h"
+#endif // __has_include("hip/hip_version.h")
 #else
 typedef __SIZE_TYPE__ size_t;
 // Define macros which are needed to declare HIP device API's without standard
@@ -74,25 +77,35 @@
 extern "C" {
 #endif //__cplusplus
 
+#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 405
+extern "C" __device__ unsigned long long __ockl_dm_alloc(unsigned long long __size);
+extern "C" __device__ void __ockl_dm_dealloc(unsigned long long __addr);
+__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
+  return (void *) __ockl_dm_alloc(__size);
+}
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
+  __ockl_dm_dealloc((unsigned long long)__ptr);
+}
+#else  // HIP version check
 #if __HIP_ENABLE_DEVICE_MALLOC__
 __device__ void *__hip_malloc(__hip_size_t __size);
 __device__ void *__hip_free(void *__ptr);
 __attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
   return __hip_malloc(__size);
 }
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
-  return __hip_free(__ptr);
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
+  __hip_free(__ptr);
 }
 #else
 __attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
   __builtin_trap();
   return (void *)0;
 }
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
   __builtin_trap();
-  return (void *)0;
 }
 #endif
+#endif // HIP version check
 
 #ifdef __cplusplus
 } // extern "C"
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__stddef_max_align_t.h b/darwin-x86/lib64/clang/16.0.2/include/__stddef_max_align_t.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__stddef_max_align_t.h
rename to darwin-x86/lib64/clang/16.0.2/include/__stddef_max_align_t.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__wmmintrin_aes.h b/darwin-x86/lib64/clang/16.0.2/include/__wmmintrin_aes.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__wmmintrin_aes.h
rename to darwin-x86/lib64/clang/16.0.2/include/__wmmintrin_aes.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__wmmintrin_pclmul.h b/darwin-x86/lib64/clang/16.0.2/include/__wmmintrin_pclmul.h
similarity index 76%
rename from darwin-x86/lib64/clang/14.0.2/include/__wmmintrin_pclmul.h
rename to darwin-x86/lib64/clang/16.0.2/include/__wmmintrin_pclmul.h
index fef4b93..c9a6d50 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/__wmmintrin_pclmul.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/__wmmintrin_pclmul.h
@@ -22,23 +22,23 @@
 /// \headerfile <x86intrin.h>
 ///
 /// \code
-/// __m128i _mm_clmulepi64_si128(__m128i __X, __m128i __Y, const int __I);
+/// __m128i _mm_clmulepi64_si128(__m128i X, __m128i Y, const int I);
 /// \endcode
 ///
 /// This intrinsic corresponds to the <c> VPCLMULQDQ </c> instruction.
 ///
-/// \param __X
+/// \param X
 ///    A 128-bit vector of [2 x i64] containing one of the source operands.
-/// \param __Y
+/// \param Y
 ///    A 128-bit vector of [2 x i64] containing one of the source operands.
-/// \param __I
+/// \param I
 ///    An immediate value specifying which 64-bit values to select from the
-///    operands. Bit 0 is used to select a value from operand \a __X, and bit
-///    4 is used to select a value from operand \a __Y: \n
-///    Bit[0]=0 indicates that bits[63:0] of operand \a __X are used. \n
-///    Bit[0]=1 indicates that bits[127:64] of operand \a __X are used. \n
-///    Bit[4]=0 indicates that bits[63:0] of operand \a __Y are used. \n
-///    Bit[4]=1 indicates that bits[127:64] of operand \a __Y are used.
+///    operands. Bit 0 is used to select a value from operand \a X, and bit
+///    4 is used to select a value from operand \a Y: \n
+///    Bit[0]=0 indicates that bits[63:0] of operand \a X are used. \n
+///    Bit[0]=1 indicates that bits[127:64] of operand \a X are used. \n
+///    Bit[4]=0 indicates that bits[63:0] of operand \a Y are used. \n
+///    Bit[4]=1 indicates that bits[127:64] of operand \a Y are used.
 /// \returns The 128-bit integer vector containing the result of the carry-less
 ///    multiplication of the selected 64-bit values.
 #define _mm_clmulepi64_si128(X, Y, I) \
diff --git a/darwin-x86/lib64/clang/14.0.2/include/adxintrin.h b/darwin-x86/lib64/clang/16.0.2/include/adxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/adxintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/adxintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/altivec.h b/darwin-x86/lib64/clang/16.0.2/include/altivec.h
similarity index 95%
rename from darwin-x86/lib64/clang/14.0.2/include/altivec.h
rename to darwin-x86/lib64/clang/16.0.2/include/altivec.h
index 55195b0..0b1e76e 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/altivec.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/altivec.h
@@ -311,7 +311,7 @@
 
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_add_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vadduqm(__a, __b);
+  return (vector unsigned char)__builtin_altivec_vadduqm(__a, __b);
 }
 #elif defined(__VSX__)
 static __inline__ vector signed long long __ATTRS_o_ai
@@ -325,9 +325,9 @@
       (vector unsigned int)__a + (vector unsigned int)__b;
   vector unsigned int __carry = __builtin_altivec_vaddcuw(
       (vector unsigned int)__a, (vector unsigned int)__b);
-  __carry = __builtin_shufflevector((vector unsigned char)__carry,
-                                    (vector unsigned char)__carry, 0, 0, 0, 7,
-                                    0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0);
+  __carry = (vector unsigned int)__builtin_shufflevector(
+      (vector unsigned char)__carry, (vector unsigned char)__carry, 0, 0, 0, 7,
+      0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0);
   return (vector signed long long)(__res + __carry);
 #endif
 }
@@ -358,7 +358,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_adde(vector signed __int128 __a, vector signed __int128 __b,
          vector signed __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vaddeuqm(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -371,7 +373,9 @@
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_adde_u128(vector unsigned char __a, vector unsigned char __b,
               vector unsigned char __c) {
-  return (vector unsigned char)__builtin_altivec_vaddeuqm(__a, __b, __c);
+  return (vector unsigned char)__builtin_altivec_vaddeuqm_c(
+      (vector unsigned char)__a, (vector unsigned char)__b,
+      (vector unsigned char)__c);
 }
 #endif
 
@@ -398,7 +402,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_addec(vector signed __int128 __a, vector signed __int128 __b,
           vector signed __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vaddecuq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -411,7 +417,9 @@
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_addec_u128(vector unsigned char __a, vector unsigned char __b,
                vector unsigned char __c) {
-  return (vector unsigned char)__builtin_altivec_vaddecuq(__a, __b, __c);
+  return (vector unsigned char)__builtin_altivec_vaddecuq_c(
+      (vector unsigned char)__a, (vector unsigned char)__b,
+      (vector unsigned char)__c);
 }
 
 #ifdef __powerpc64__
@@ -600,7 +608,8 @@
 
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_addc_u128(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vaddcuq(__a, __b);
+  return (vector unsigned char)__builtin_altivec_vaddcuq_c(
+      (vector unsigned char)__a, (vector unsigned char)__b);
 }
 #endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
 
@@ -824,7 +833,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vaddeuqm(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vaddeuqm(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -837,7 +848,8 @@
 
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vaddcuq(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vaddcuq(__a, __b);
+  return (vector signed __int128)__builtin_altivec_vaddcuq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -850,7 +862,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vaddecuq(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vaddecuq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -1806,18 +1820,19 @@
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpeq(vector signed __int128 __a, vector signed __int128 __b) {
   return (vector bool __int128)__builtin_altivec_vcmpequq(
-      (vector bool __int128)__a, (vector bool __int128)__b);
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
 }
 
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpeq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
   return (vector bool __int128)__builtin_altivec_vcmpequq(
-      (vector bool __int128)__a, (vector bool __int128)__b);
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
 }
 
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpeq(vector bool __int128 __a, vector bool  __int128 __b) {
-  return (vector bool __int128)__builtin_altivec_vcmpequq(__a, __b);
+  return (vector bool __int128)__builtin_altivec_vcmpequq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
 }
 #endif
 
@@ -1887,19 +1902,20 @@
 #if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpne(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
-      (vector bool __int128)__a, (vector bool __int128)__b));
+  return (vector bool __int128)~(__builtin_altivec_vcmpequq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b));
 }
 
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpne(vector signed __int128 __a, vector signed __int128 __b) {
-  return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
-      (vector bool __int128)__a, (vector bool __int128)__b));
+  return (vector bool __int128)~(__builtin_altivec_vcmpequq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b));
 }
 
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpne(vector bool __int128 __a, vector bool __int128 __b) {
-  return (vector bool __int128) ~(__builtin_altivec_vcmpequq(__a, __b));
+  return (vector bool __int128)~(__builtin_altivec_vcmpequq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b));
 }
 #endif
 
@@ -1944,16 +1960,16 @@
 static __inline__ signed int __ATTRS_o_ai
 vec_cntlz_lsbb(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vctzlsbb(__a);
+  return __builtin_altivec_vctzlsbb((vector unsigned char)__a);
 #else
-  return __builtin_altivec_vclzlsbb(__a);
+  return __builtin_altivec_vclzlsbb((vector unsigned char)__a);
 #endif
 }
 
 static __inline__ signed int __ATTRS_o_ai
 vec_cntlz_lsbb(vector unsigned char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vctzlsbb(__a);
+  return __builtin_altivec_vctzlsbb((vector unsigned char)__a);
 #else
   return __builtin_altivec_vclzlsbb(__a);
 #endif
@@ -1962,9 +1978,9 @@
 static __inline__ signed int __ATTRS_o_ai
 vec_cnttz_lsbb(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclzlsbb(__a);
+  return __builtin_altivec_vclzlsbb((vector unsigned char)__a);
 #else
-  return __builtin_altivec_vctzlsbb(__a);
+  return __builtin_altivec_vctzlsbb((vector unsigned char)__a);
 #endif
 }
 
@@ -1984,7 +2000,7 @@
 
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_parity_lsbb(vector signed int __a) {
-  return __builtin_altivec_vprtybw(__a);
+  return __builtin_altivec_vprtybw((vector unsigned int)__a);
 }
 
 #ifdef __SIZEOF_INT128__
@@ -1995,7 +2011,7 @@
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_parity_lsbb(vector signed __int128 __a) {
-  return __builtin_altivec_vprtybq(__a);
+  return __builtin_altivec_vprtybq((vector unsigned __int128)__a);
 }
 #endif
 
@@ -2006,7 +2022,7 @@
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_parity_lsbb(vector signed long long __a) {
-  return __builtin_altivec_vprtybd(__a);
+  return __builtin_altivec_vprtybd((vector unsigned long long)__a);
 }
 
 #else
@@ -2212,14 +2228,12 @@
 #if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpgt(vector signed __int128 __a, vector signed __int128 __b) {
-  return (vector bool __int128)__builtin_altivec_vcmpgtsq(
-      (vector bool __int128)__a, (vector bool __int128)__b);
+  return (vector bool __int128)__builtin_altivec_vcmpgtsq(__a, __b);
 }
 
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpgt(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return (vector bool __int128)__builtin_altivec_vcmpgtuq(
-      (vector bool __int128)__a, (vector bool __int128)__b);
+  return (vector bool __int128)__builtin_altivec_vcmpgtuq(__a, __b);
 }
 #endif
 
@@ -2488,7 +2502,8 @@
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_popcnt(vector signed char __a) {
-  return __builtin_altivec_vpopcntb(__a);
+  return (vector unsigned char)__builtin_altivec_vpopcntb(
+      (vector unsigned char)__a);
 }
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_popcnt(vector unsigned char __a) {
@@ -2496,7 +2511,8 @@
 }
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_popcnt(vector signed short __a) {
-  return __builtin_altivec_vpopcnth(__a);
+  return (vector unsigned short)__builtin_altivec_vpopcnth(
+      (vector unsigned short)__a);
 }
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_popcnt(vector unsigned short __a) {
@@ -2504,7 +2520,7 @@
 }
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_popcnt(vector signed int __a) {
-  return __builtin_altivec_vpopcntw(__a);
+  return __builtin_altivec_vpopcntw((vector unsigned int)__a);
 }
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_popcnt(vector unsigned int __a) {
@@ -2512,7 +2528,7 @@
 }
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_popcnt(vector signed long long __a) {
-  return __builtin_altivec_vpopcntd(__a);
+  return __builtin_altivec_vpopcntd((vector unsigned long long)__a);
 }
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_popcnt(vector unsigned long long __a) {
@@ -2524,7 +2540,7 @@
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_cntlz(vector signed char __a) {
-  return __builtin_altivec_vclzb(__a);
+  return (vector signed char)__builtin_altivec_vclzb((vector unsigned char)__a);
 }
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_cntlz(vector unsigned char __a) {
@@ -2532,7 +2548,8 @@
 }
 static __inline__ vector signed short __ATTRS_o_ai
 vec_cntlz(vector signed short __a) {
-  return __builtin_altivec_vclzh(__a);
+  return (vector signed short)__builtin_altivec_vclzh(
+      (vector unsigned short)__a);
 }
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_cntlz(vector unsigned short __a) {
@@ -2540,7 +2557,7 @@
 }
 static __inline__ vector signed int __ATTRS_o_ai
 vec_cntlz(vector signed int __a) {
-  return __builtin_altivec_vclzw(__a);
+  return (vector signed int)__builtin_altivec_vclzw((vector unsigned int)__a);
 }
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_cntlz(vector unsigned int __a) {
@@ -2548,7 +2565,8 @@
 }
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_cntlz(vector signed long long __a) {
-  return __builtin_altivec_vclzd(__a);
+  return (vector signed long long)__builtin_altivec_vclzd(
+      (vector unsigned long long)__a);
 }
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_cntlz(vector unsigned long long __a) {
@@ -2562,7 +2580,7 @@
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_cnttz(vector signed char __a) {
-  return __builtin_altivec_vctzb(__a);
+  return (vector signed char)__builtin_altivec_vctzb((vector unsigned char)__a);
 }
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_cnttz(vector unsigned char __a) {
@@ -2570,7 +2588,8 @@
 }
 static __inline__ vector signed short __ATTRS_o_ai
 vec_cnttz(vector signed short __a) {
-  return __builtin_altivec_vctzh(__a);
+  return (vector signed short)__builtin_altivec_vctzh(
+      (vector unsigned short)__a);
 }
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_cnttz(vector unsigned short __a) {
@@ -2578,7 +2597,7 @@
 }
 static __inline__ vector signed int __ATTRS_o_ai
 vec_cnttz(vector signed int __a) {
-  return __builtin_altivec_vctzw(__a);
+  return (vector signed int)__builtin_altivec_vctzw((vector unsigned int)__a);
 }
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_cnttz(vector unsigned int __a) {
@@ -2586,7 +2605,8 @@
 }
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_cnttz(vector signed long long __a) {
-  return __builtin_altivec_vctzd(__a);
+  return (vector signed long long)__builtin_altivec_vctzd(
+      (vector unsigned long long)__a);
 }
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_cnttz(vector unsigned long long __a) {
@@ -3144,7 +3164,8 @@
   vector unsigned char __mask =
       (vector unsigned char)__builtin_altivec_lvsl(16 - __c, (int *)NULL);
   vector unsigned char __res =
-      __builtin_altivec_vperm_4si((vector int)__a, (vector int)__a, __mask);
+      (vector unsigned char)__builtin_altivec_vperm_4si(
+          (vector int)__a, (vector int)__a, __mask);
   return __builtin_vsx_stxvll((vector int)__res, __b, (__c << 56));
 }
 #endif
@@ -3181,34 +3202,40 @@
 // the XL-compatible signatures are used for those functions.
 #ifdef __XL_COMPAT_ALTIVEC__
 #define vec_ctf(__a, __b)                                                      \
-  _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),  \
-             vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
-                                                   (__b)),                     \
-             vector unsigned long long                                         \
-           : (__builtin_vsx_xvcvuxdsp((vector unsigned long long)(__a)) *      \
-              (vector float)(vector unsigned)((0x7f - (__b)) << 23)),          \
-             vector signed long long                                           \
-           : (__builtin_vsx_xvcvsxdsp((vector signed long long)(__a)) *        \
-              (vector float)(vector unsigned)((0x7f - (__b)) << 23)))
+  _Generic(                                                                    \
+      (__a), vector int                                                        \
+      : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),       \
+        vector unsigned int                                                    \
+      : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a),      \
+                                              (__b)),                          \
+        vector unsigned long long                                              \
+      : (vector float)(__builtin_vsx_xvcvuxdsp(                                \
+                           (vector unsigned long long)(__a)) *                 \
+                       (vector float)(vector unsigned)((0x7f - (__b)) << 23)), \
+        vector signed long long                                                \
+      : (vector float)(__builtin_vsx_xvcvsxdsp(                                \
+                           (vector signed long long)(__a)) *                   \
+                       (vector float)(vector unsigned)((0x7f - (__b)) << 23)))
 #else // __XL_COMPAT_ALTIVEC__
 #define vec_ctf(__a, __b)                                                      \
-  _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),  \
-             vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
-                                                   (__b)),                     \
-             vector unsigned long long                                         \
-           : (__builtin_convertvector((vector unsigned long long)(__a),        \
-                                      vector double) *                         \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)),              \
-             vector signed long long                                           \
-           : (__builtin_convertvector((vector signed long long)(__a),          \
-                                      vector double) *                         \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)))
+  _Generic(                                                                    \
+      (__a), vector int                                                        \
+      : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),       \
+        vector unsigned int                                                    \
+      : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a),      \
+                                              (__b)),                          \
+        vector unsigned long long                                              \
+      : (vector float)(__builtin_convertvector(                                \
+                           (vector unsigned long long)(__a), vector double) *  \
+                       (vector double)(vector unsigned long long)((0x3ffULL -  \
+                                                                   (__b))      \
+                                                                  << 52)),     \
+        vector signed long long                                                \
+      : (vector float)(__builtin_convertvector((vector signed long long)(__a), \
+                                               vector double) *                \
+                       (vector double)(vector unsigned long long)((0x3ffULL -  \
+                                                                   (__b))      \
+                                                                  << 52)))
 #endif // __XL_COMPAT_ALTIVEC__
 #else
 #define vec_ctf(__a, __b)                                                      \
@@ -3255,26 +3282,29 @@
 #ifdef __XL_COMPAT_ALTIVEC__
 #define vec_cts(__a, __b)                                                      \
   _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctsxs((vector float)(__a), (__b)),             \
+           : (vector signed int)__builtin_altivec_vctsxs((vector float)(__a),  \
+                                                         (__b)),               \
              vector double                                                     \
            : __extension__({                                                   \
              vector double __ret =                                             \
                  (vector double)(__a) *                                        \
                  (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
                                                             << 52);            \
-             __builtin_vsx_xvcvdpsxws(__ret);                                  \
+             (vector signed long long)__builtin_vsx_xvcvdpsxws(__ret);         \
            }))
 #else // __XL_COMPAT_ALTIVEC__
 #define vec_cts(__a, __b)                                                      \
   _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctsxs((vector float)(__a), (__b)),             \
+           : (vector signed int)__builtin_altivec_vctsxs((vector float)(__a),  \
+                                                         (__b)),               \
              vector double                                                     \
            : __extension__({                                                   \
              vector double __ret =                                             \
                  (vector double)(__a) *                                        \
                  (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
                                                             << 52);            \
-             __builtin_convertvector(__ret, vector signed long long);          \
+             (vector signed long long)__builtin_convertvector(                 \
+                 __ret, vector signed long long);                              \
            }))
 #endif // __XL_COMPAT_ALTIVEC__
 #else
@@ -3291,26 +3321,29 @@
 #ifdef __XL_COMPAT_ALTIVEC__
 #define vec_ctu(__a, __b)                                                      \
   _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctuxs((vector float)(__a), (__b)),             \
+           : (vector unsigned int)__builtin_altivec_vctuxs(                    \
+                 (vector float)(__a), (__b)),                                  \
              vector double                                                     \
            : __extension__({                                                   \
              vector double __ret =                                             \
                  (vector double)(__a) *                                        \
                  (vector double)(vector unsigned long long)((0x3ffULL + __b)   \
                                                             << 52);            \
-             __builtin_vsx_xvcvdpuxws(__ret);                                  \
+             (vector unsigned long long)__builtin_vsx_xvcvdpuxws(__ret);       \
            }))
 #else // __XL_COMPAT_ALTIVEC__
 #define vec_ctu(__a, __b)                                                      \
   _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctuxs((vector float)(__a), (__b)),             \
+           : (vector unsigned int)__builtin_altivec_vctuxs(                    \
+                 (vector float)(__a), (__b)),                                  \
              vector double                                                     \
            : __extension__({                                                   \
              vector double __ret =                                             \
                  (vector double)(__a) *                                        \
                  (vector double)(vector unsigned long long)((0x3ffULL + __b)   \
                                                             << 52);            \
-             __builtin_convertvector(__ret, vector unsigned long long);        \
+             (vector unsigned long long)__builtin_convertvector(               \
+                 __ret, vector unsigned long long);                            \
            }))
 #endif // __XL_COMPAT_ALTIVEC__
 #else
@@ -6491,12 +6524,12 @@
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_nand(vector signed char __a, vector bool char __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector signed char)__b);
 }
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_nand(vector bool char __a, vector signed char __b) {
-  return ~(__a & __b);
+  return (vector signed char)~(__a & (vector bool char)__b);
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
@@ -6506,12 +6539,12 @@
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_nand(vector unsigned char __a, vector bool char __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector unsigned char)__b);
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_nand(vector bool char __a, vector unsigned char __b) {
-  return ~(__a & __b);
+  return (vector unsigned char)~(__a & (vector bool char)__b);
 }
 
 static __inline__ vector bool char __ATTRS_o_ai vec_nand(vector bool char __a,
@@ -6526,12 +6559,12 @@
 
 static __inline__ vector signed short __ATTRS_o_ai
 vec_nand(vector signed short __a, vector bool short __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector signed short)__b);
 }
 
 static __inline__ vector signed short __ATTRS_o_ai
 vec_nand(vector bool short __a, vector signed short __b) {
-  return ~(__a & __b);
+  return (vector signed short)~(__a & (vector bool short)__b);
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
@@ -6541,7 +6574,7 @@
 
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_nand(vector unsigned short __a, vector bool short __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector unsigned short)__b);
 }
 
 static __inline__ vector bool short __ATTRS_o_ai
@@ -6556,12 +6589,12 @@
 
 static __inline__ vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
                                                           vector bool int __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector signed int)__b);
 }
 
 static __inline__ vector signed int __ATTRS_o_ai
 vec_nand(vector bool int __a, vector signed int __b) {
-  return ~(__a & __b);
+  return (vector signed int)~(__a & (vector bool int)__b);
 }
 
 static __inline__ vector unsigned int __ATTRS_o_ai
@@ -6571,12 +6604,12 @@
 
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_nand(vector unsigned int __a, vector bool int __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector unsigned int)__b);
 }
 
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_nand(vector bool int __a, vector unsigned int __b) {
-  return ~(__a & __b);
+  return (vector unsigned int)~(__a & (vector bool int)__b);
 }
 
 static __inline__ vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
@@ -6597,12 +6630,12 @@
 
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_nand(vector signed long long __a, vector bool long long __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector signed long long)__b);
 }
 
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_nand(vector bool long long __a, vector signed long long __b) {
-  return ~(__a & __b);
+  return (vector signed long long)~(__a & (vector bool long long)__b);
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
@@ -6612,12 +6645,12 @@
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_nand(vector unsigned long long __a, vector bool long long __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector unsigned long long)__b);
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_nand(vector bool long long __a, vector unsigned long long __b) {
-  return ~(__a & __b);
+  return (vector unsigned long long)~(__a & (vector bool long long)__b);
 }
 
 static __inline__ vector bool long long __ATTRS_o_ai
@@ -7005,12 +7038,12 @@
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_orc(vector signed char __a, vector bool char __b) {
-  return __a | ~__b;
+  return __a | (vector signed char)~__b;
 }
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_orc(vector bool char __a, vector signed char __b) {
-  return __a | ~__b;
+  return (vector signed char)(__a | (vector bool char)~__b);
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
@@ -7020,12 +7053,12 @@
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_orc(vector unsigned char __a, vector bool char __b) {
-  return __a | ~__b;
+  return __a | (vector unsigned char)~__b;
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_orc(vector bool char __a, vector unsigned char __b) {
-  return __a | ~__b;
+  return (vector unsigned char)(__a | (vector bool char)~__b);
 }
 
 static __inline__ vector bool char __ATTRS_o_ai vec_orc(vector bool char __a,
@@ -7040,12 +7073,12 @@
 
 static __inline__ vector signed short __ATTRS_o_ai
 vec_orc(vector signed short __a, vector bool short __b) {
-  return __a | ~__b;
+  return __a | (vector signed short)~__b;
 }
 
 static __inline__ vector signed short __ATTRS_o_ai
 vec_orc(vector bool short __a, vector signed short __b) {
-  return __a | ~__b;
+  return (vector signed short)(__a | (vector bool short)~__b);
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
@@ -7055,12 +7088,12 @@
 
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_orc(vector unsigned short __a, vector bool short __b) {
-  return __a | ~__b;
+  return __a | (vector unsigned short)~__b;
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_orc(vector bool short __a, vector unsigned short __b) {
-  return __a | ~__b;
+  return (vector unsigned short)(__a | (vector bool short)~__b);
 }
 
 static __inline__ vector bool short __ATTRS_o_ai
@@ -7075,12 +7108,12 @@
 
 static __inline__ vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
                                                          vector bool int __b) {
-  return __a | ~__b;
+  return __a | (vector signed int)~__b;
 }
 
 static __inline__ vector signed int __ATTRS_o_ai
 vec_orc(vector bool int __a, vector signed int __b) {
-  return __a | ~__b;
+  return (vector signed int)(__a | (vector bool int)~__b);
 }
 
 static __inline__ vector unsigned int __ATTRS_o_ai
@@ -7090,12 +7123,12 @@
 
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_orc(vector unsigned int __a, vector bool int __b) {
-  return __a | ~__b;
+  return __a | (vector unsigned int)~__b;
 }
 
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_orc(vector bool int __a, vector unsigned int __b) {
-  return __a | ~__b;
+  return (vector unsigned int)(__a | (vector bool int)~__b);
 }
 
 static __inline__ vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
@@ -7105,12 +7138,12 @@
 
 static __inline__ vector float __ATTRS_o_ai
 vec_orc(vector bool int __a, vector float __b) {
- return (vector float)(__a | ~(vector unsigned int)__b);
+  return (vector float)(__a | ~(vector bool int)__b);
 }
 
 static __inline__ vector float __ATTRS_o_ai
 vec_orc(vector float __a, vector bool int __b) {
-  return (vector float)((vector unsigned int)__a | ~__b);
+  return (vector float)((vector bool int)__a | ~__b);
 }
 
 static __inline__ vector float __ATTRS_o_ai vec_orc(vector float __a,
@@ -7125,12 +7158,12 @@
 
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_orc(vector signed long long __a, vector bool long long __b) {
-  return __a | ~__b;
+  return __a | (vector signed long long)~__b;
 }
 
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_orc(vector bool long long __a, vector signed long long __b) {
-  return __a | ~__b;
+  return (vector signed long long)(__a | (vector bool long long)~__b);
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
@@ -7140,12 +7173,12 @@
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_orc(vector unsigned long long __a, vector bool long long __b) {
-  return __a | ~__b;
+  return __a | (vector unsigned long long)~__b;
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_orc(vector bool long long __a, vector unsigned long long __b) {
-  return __a | ~__b;
+  return (vector unsigned long long)(__a | (vector bool long long)~__b);
 }
 
 static __inline__ vector bool long long __ATTRS_o_ai
@@ -7155,17 +7188,17 @@
 
 static __inline__ vector double __ATTRS_o_ai
 vec_orc(vector double __a, vector bool long long __b) {
-  return (vector double)((vector unsigned long long)__a | ~__b);
+  return (vector double)((vector bool long long)__a | ~__b);
 }
 
 static __inline__ vector double __ATTRS_o_ai
 vec_orc(vector bool long long __a, vector double __b) {
-  return (vector double)(__a | ~(vector unsigned long long)__b);
+  return (vector double)(__a | ~(vector bool long long)__b);
 }
 
 static __inline__ vector double __ATTRS_o_ai vec_orc(vector double __a,
                                                      vector double __b) {
-  return (vector double)((vector bool long long)__a |
+  return (vector double)((vector unsigned long long)__a |
                          ~(vector unsigned long long)__b);
 }
 #endif
@@ -8276,14 +8309,20 @@
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vrld(__a, __b);
+  return (vector unsigned long long)__builtin_altivec_vrld(
+      (vector long long)__a, __b);
 }
 #endif
 
 #if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_rl(vector signed __int128 __a, vector unsigned __int128 __b) {
-  return (__b << __a)|(__b >> ((__CHAR_BIT__ * sizeof(vector signed __int128)) - __a));
+  return (vector signed __int128)(((vector unsigned __int128)__b
+                                   << (vector unsigned __int128)__a) |
+                                  ((vector unsigned __int128)__b >>
+                                   ((__CHAR_BIT__ *
+                                     sizeof(vector unsigned __int128)) -
+                                    (vector unsigned __int128)__a)));
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -8317,7 +8356,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_rlmi(vector signed __int128 __a, vector signed __int128 __b,
          vector signed __int128 __c) {
-  return __builtin_altivec_vrlqmi(__a, __c, __b);
+  return (vector signed __int128)__builtin_altivec_vrlqmi(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__c,
+      (vector unsigned __int128)__b);
 }
 #endif
 
@@ -8370,7 +8411,8 @@
       __builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, 31, 30, 15, -1,
                               -1, -1, -1, -1, -1, -1, -1);
 #endif
-  return __builtin_altivec_vrlqnm(__a, (vector unsigned __int128) MaskAndShift);
+  return (vector signed __int128)__builtin_altivec_vrlqnm(
+      (vector unsigned __int128)__a, (vector unsigned __int128)MaskAndShift);
 }
 #endif
 
@@ -12070,13 +12112,15 @@
 
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_subc(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
+  return (vector signed __int128)__builtin_altivec_vsubcuq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
 }
 #endif
 
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_subc_u128(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsubcuq(__a, __b);
+  return (vector unsigned char)__builtin_altivec_vsubcuq_c(
+      (vector unsigned char)__a, (vector unsigned char)__b);
 }
 #endif // __POWER8_VECTOR__
 
@@ -12298,7 +12342,7 @@
 
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsubuqm(__a, __b);
+  return (vector unsigned char)__builtin_altivec_vsubuqm(__a, __b);
 }
 
 /* vec_vsubeuqm */
@@ -12307,7 +12351,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vsubeuqm(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12319,7 +12365,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_sube(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vsubeuqm(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12332,7 +12380,9 @@
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
               vector unsigned char __c) {
-  return (vector unsigned char)__builtin_altivec_vsubeuqm(__a, __b, __c);
+  return (vector unsigned char)__builtin_altivec_vsubeuqm_c(
+      (vector unsigned char)__a, (vector unsigned char)__b,
+      (vector unsigned char)__c);
 }
 
 /* vec_vsubcuq */
@@ -12340,7 +12390,8 @@
 #ifdef __SIZEOF_INT128__
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
+  return (vector signed __int128)__builtin_altivec_vsubcuq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12353,7 +12404,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vsubecuq(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vsubecuq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12381,7 +12434,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_subec(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vsubecuq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12394,7 +12449,9 @@
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_subec_u128(vector unsigned char __a, vector unsigned char __b,
                vector unsigned char __c) {
-  return (vector unsigned char)__builtin_altivec_vsubecuq(__a, __b, __c);
+  return (vector unsigned char)__builtin_altivec_vsubecuq_c(
+      (vector unsigned char)__a, (vector unsigned char)__b,
+      (vector unsigned char)__c);
 }
 #endif // __POWER8_VECTOR__
 
@@ -14900,17 +14957,20 @@
 #if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
 static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed __int128 __a,
                                               vector signed __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_LT, (vector unsigned __int128)__a,
+                                      (vector signed __int128)__b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned __int128 __a,
                                               vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_LT, __a,
+                                      (vector signed __int128)__b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool __int128 __a,
                                               vector bool __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_LT, (vector unsigned __int128)__a,
+                                      (vector signed __int128)__b);
 }
 #endif
 
@@ -15850,17 +15910,20 @@
 #if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
 static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed __int128 __a,
                                               vector signed __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ, (vector unsigned __int128)__a,
+                                      __b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned __int128 __a,
                                               vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a,
+                                      (vector signed __int128)__b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool __int128 __a,
                                               vector bool __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ, (vector unsigned __int128)__a,
+                                      (vector signed __int128)__b);
 }
 #endif
 
@@ -16144,17 +16207,20 @@
 #if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
 static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed __int128 __a,
                                               vector signed __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV,
+                                      (vector unsigned __int128)__a, __b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned __int128 __a,
                                               vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a,
+                                      (vector signed __int128)__b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool __int128 __a,
                                               vector bool __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+  return __builtin_altivec_vcmpequq_p(
+      __CR6_EQ_REV, (vector unsigned __int128)__a, (vector signed __int128)__b);
 }
 #endif
 
@@ -17124,17 +17190,20 @@
 #if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
 static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed __int128 __a,
                                               vector signed __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV,
+                                      (vector unsigned __int128)__a, __b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned __int128 __a,
                                               vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a,
+                                      (vector signed __int128)__b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool __int128 __a,
                                               vector bool __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+  return __builtin_altivec_vcmpequq_p(
+      __CR6_LT_REV, (vector unsigned __int128)__a, (vector signed __int128)__b);
 }
 #endif
 
@@ -17297,13 +17366,17 @@
 static __inline__ vector bool char __ATTRS_o_ai
 vec_permxor(vector bool char __a, vector bool char __b,
             vector bool char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
+  return (vector bool char)__builtin_altivec_crypto_vpermxor(
+      (vector unsigned char)__a, (vector unsigned char)__b,
+      (vector unsigned char)__c);
 }
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_permxor(vector signed char __a, vector signed char __b,
             vector signed char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
+  return (vector signed char)__builtin_altivec_crypto_vpermxor(
+      (vector unsigned char)__a, (vector unsigned char)__b,
+      (vector unsigned char)__c);
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
@@ -17365,7 +17438,7 @@
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_vgbbd(vector signed char __a) {
-  return __builtin_altivec_vgbbd((vector unsigned char)__a);
+  return (vector signed char)__builtin_altivec_vgbbd((vector unsigned char)__a);
 }
 
 #define vec_pmsum_be __builtin_crypto_vpmsumb
@@ -17378,23 +17451,25 @@
 
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_gbb(vector signed long long __a) {
-  return __builtin_altivec_vgbbd((vector unsigned char)__a);
+  return (vector signed long long)__builtin_altivec_vgbbd(
+      (vector unsigned char)__a);
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_gbb(vector unsigned long long __a) {
-  return __builtin_altivec_vgbbd((vector unsigned char)__a);
+  return (vector unsigned long long)__builtin_altivec_vgbbd(
+      (vector unsigned char)__a);
 }
 
 static __inline__ vector long long __ATTRS_o_ai
 vec_vbpermq(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vbpermq((vector unsigned char)__a,
-                                   (vector unsigned char)__b);
+  return (vector long long)__builtin_altivec_vbpermq((vector unsigned char)__a,
+                                                     (vector unsigned char)__b);
 }
 
 static __inline__ vector long long __ATTRS_o_ai
 vec_vbpermq(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vbpermq(__a, __b);
+  return (vector long long)__builtin_altivec_vbpermq(__a, __b);
 }
 
 #if defined(__powerpc64__) && defined(__SIZEOF_INT128__)
@@ -17406,7 +17481,7 @@
 #endif
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_bperm(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vbpermq(__a, __b);
+  return (vector unsigned char)__builtin_altivec_vbpermq(__a, __b);
 }
 #endif // __POWER8_VECTOR__
 #ifdef __POWER9_VECTOR__
@@ -17777,26 +17852,26 @@
 #if defined(__POWER10_VECTOR__) && defined(__VSX__) &&                         \
     defined(__SIZEOF_INT128__)
 
-/* vect_xl_sext */
+/* vec_xl_sext */
 
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_xl_sext(ptrdiff_t __offset, const signed char *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
+  return (vector signed __int128)*(__pointer + __offset);
 }
 
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_xl_sext(ptrdiff_t __offset, const signed short *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
+  return (vector signed __int128)*(__pointer + __offset);
 }
 
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_xl_sext(ptrdiff_t __offset, const signed int *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
+  return (vector signed __int128)*(__pointer + __offset);
 }
 
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_xl_sext(ptrdiff_t __offset, const signed long long *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
+  return (vector signed __int128)*(__pointer + __offset);
 }
 
 /* vec_xl_zext */
@@ -18260,13 +18335,17 @@
 
 #define vec_cntm(__a, __mp)                                                    \
   _Generic((__a), vector unsigned char                                         \
-           : __builtin_altivec_vcntmbb((__a), (unsigned char)(__mp)),          \
+           : __builtin_altivec_vcntmbb((vector unsigned char)(__a),            \
+                                       (unsigned char)(__mp)),                 \
              vector unsigned short                                             \
-           : __builtin_altivec_vcntmbh((__a), (unsigned char)(__mp)),          \
+           : __builtin_altivec_vcntmbh((vector unsigned short)(__a),           \
+                                       (unsigned char)(__mp)),                 \
              vector unsigned int                                               \
-           : __builtin_altivec_vcntmbw((__a), (unsigned char)(__mp)),          \
+           : __builtin_altivec_vcntmbw((vector unsigned int)(__a),             \
+                                       (unsigned char)(__mp)),                 \
              vector unsigned long long                                         \
-           : __builtin_altivec_vcntmbd((__a), (unsigned char)(__mp)))
+           : __builtin_altivec_vcntmbd((vector unsigned long long)(__a),       \
+                                       (unsigned char)(__mp)))
 
 /* vec_gen[b|h|w|d|q]m */
 
@@ -18327,43 +18406,52 @@
 #ifdef __SIZEOF_INT128__
 #define vec_ternarylogic(__a, __b, __c, __imm)                                 \
   _Generic((__a), vector unsigned char                                         \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
+           : (vector unsigned char)__builtin_vsx_xxeval(                       \
+                 (vector unsigned long long)(__a),                             \
+                 (vector unsigned long long)(__b),                             \
+                 (vector unsigned long long)(__c), (__imm)),                   \
              vector unsigned short                                             \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
+           : (vector unsigned short)__builtin_vsx_xxeval(                      \
+                 (vector unsigned long long)(__a),                             \
+                 (vector unsigned long long)(__b),                             \
+                 (vector unsigned long long)(__c), (__imm)),                   \
              vector unsigned int                                               \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
+           : (vector unsigned int)__builtin_vsx_xxeval(                        \
+                 (vector unsigned long long)(__a),                             \
+                 (vector unsigned long long)(__b),                             \
+                 (vector unsigned long long)(__c), (__imm)),                   \
              vector unsigned long long                                         \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
+           : (vector unsigned long long)__builtin_vsx_xxeval(                  \
+                 (vector unsigned long long)(__a),                             \
+                 (vector unsigned long long)(__b),                             \
+                 (vector unsigned long long)(__c), (__imm)),                   \
              vector unsigned __int128                                          \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)))
+           : (vector unsigned __int128)__builtin_vsx_xxeval(                   \
+               (vector unsigned long long)(__a),                               \
+               (vector unsigned long long)(__b),                               \
+               (vector unsigned long long)(__c), (__imm)))
 #else
 #define vec_ternarylogic(__a, __b, __c, __imm)                                 \
   _Generic((__a), vector unsigned char                                         \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
+           : (vector unsigned char)__builtin_vsx_xxeval(                       \
+                 (vector unsigned long long)(__a),                             \
+                 (vector unsigned long long)(__b),                             \
+                 (vector unsigned long long)(__c), (__imm)),                   \
              vector unsigned short                                             \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
+           : (vector unsigned short)__builtin_vsx_xxeval(                      \
+                 (vector unsigned long long)(__a),                             \
+                 (vector unsigned long long)(__b),                             \
+                 (vector unsigned long long)(__c), (__imm)),                   \
              vector unsigned int                                               \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
+           : (vector unsigned int)__builtin_vsx_xxeval(                        \
+                 (vector unsigned long long)(__a),                             \
+                 (vector unsigned long long)(__b),                             \
+                 (vector unsigned long long)(__c), (__imm)),                   \
              vector unsigned long long                                         \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)))
+           : (vector unsigned long long)__builtin_vsx_xxeval(                  \
+               (vector unsigned long long)(__a),                               \
+               (vector unsigned long long)(__b),                               \
+               (vector unsigned long long)(__c), (__imm)))
 #endif /* __SIZEOF_INT128__ */
 #endif /* __VSX__ */
 
@@ -18371,14 +18459,16 @@
 
 #ifdef __VSX__
 #define vec_genpcvm(__a, __imm)                                                \
-  _Generic((__a), vector unsigned char                                         \
-           : __builtin_vsx_xxgenpcvbm((__a), (int)(__imm)),                    \
-             vector unsigned short                                             \
-           : __builtin_vsx_xxgenpcvhm((__a), (int)(__imm)),                    \
-             vector unsigned int                                               \
-           : __builtin_vsx_xxgenpcvwm((__a), (int)(__imm)),                    \
-             vector unsigned long long                                         \
-           : __builtin_vsx_xxgenpcvdm((__a), (int)(__imm)))
+  _Generic(                                                                    \
+      (__a), vector unsigned char                                              \
+      : __builtin_vsx_xxgenpcvbm((vector unsigned char)(__a), (int)(__imm)),   \
+        vector unsigned short                                                  \
+      : __builtin_vsx_xxgenpcvhm((vector unsigned short)(__a), (int)(__imm)),  \
+        vector unsigned int                                                    \
+      : __builtin_vsx_xxgenpcvwm((vector unsigned int)(__a), (int)(__imm)),    \
+        vector unsigned long long                                              \
+      : __builtin_vsx_xxgenpcvdm((vector unsigned long long)(__a),             \
+                                 (int)(__imm)))
 #endif /* __VSX__ */
 
 /* vec_clr_first */
@@ -18386,18 +18476,22 @@
 static __inline__ vector signed char __ATTRS_o_ai
 vec_clr_first(vector signed char __a, unsigned int __n) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrrb(__a, __n);
+  return (vector signed char)__builtin_altivec_vclrrb((vector unsigned char)__a,
+                                                      __n);
 #else
-  return __builtin_altivec_vclrlb( __a, __n);
+  return (vector signed char)__builtin_altivec_vclrlb((vector unsigned char)__a,
+                                                      __n);
 #endif
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_clr_first(vector unsigned char __a, unsigned int __n) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+  return (vector unsigned char)__builtin_altivec_vclrrb(
+      (vector unsigned char)__a, __n);
 #else
-  return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+  return (vector unsigned char)__builtin_altivec_vclrlb(
+      (vector unsigned char)__a, __n);
 #endif
 }
 
@@ -18406,18 +18500,22 @@
 static __inline__ vector signed char __ATTRS_o_ai
 vec_clr_last(vector signed char __a, unsigned int __n) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrlb(__a, __n);
+  return (vector signed char)__builtin_altivec_vclrlb((vector unsigned char)__a,
+                                                      __n);
 #else
-  return __builtin_altivec_vclrrb( __a, __n);
+  return (vector signed char)__builtin_altivec_vclrrb((vector unsigned char)__a,
+                                                      __n);
 #endif
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_clr_last(vector unsigned char __a, unsigned int __n) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+  return (vector unsigned char)__builtin_altivec_vclrlb(
+      (vector unsigned char)__a, __n);
 #else
-  return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+  return (vector unsigned char)__builtin_altivec_vclrrb(
+      (vector unsigned char)__a, __n);
 #endif
 }
 
@@ -18469,13 +18567,75 @@
 }
 #endif
 
-/* vec_sldbi */
+/* vec_sldb */
+#define vec_sldb(__a, __b, __c)                                                \
+  _Generic(                                                                    \
+      (__a), vector unsigned char                                              \
+      : (vector unsigned char)__builtin_altivec_vsldbi(                        \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed char                                                     \
+      : (vector signed char)__builtin_altivec_vsldbi(                          \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector unsigned short                                                  \
+      : (vector unsigned short)__builtin_altivec_vsldbi(                       \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed short                                                    \
+      : (vector signed short)__builtin_altivec_vsldbi(                         \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector unsigned int                                                    \
+      : (vector unsigned int)__builtin_altivec_vsldbi(                         \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed int                                                      \
+      : (vector signed int)__builtin_altivec_vsldbi((vector unsigned char)__a, \
+                                                    (vector unsigned char)__b, \
+                                                    (__c & 0x7)),              \
+        vector unsigned long long                                              \
+      : (vector unsigned long long)__builtin_altivec_vsldbi(                   \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed long long                                                \
+      : (vector signed long long)__builtin_altivec_vsldbi(                     \
+          (vector unsigned char)__a, (vector unsigned char)__b, (__c & 0x7)))
 
-#define vec_sldb(__a, __b, __c) __builtin_altivec_vsldbi(__a, __b, (__c & 0x7))
-
-/* vec_srdbi */
-
-#define vec_srdb(__a, __b, __c) __builtin_altivec_vsrdbi(__a, __b, (__c & 0x7))
+/* vec_srdb */
+#define vec_srdb(__a, __b, __c)                                                \
+  _Generic(                                                                    \
+      (__a), vector unsigned char                                              \
+      : (vector unsigned char)__builtin_altivec_vsrdbi(                        \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed char                                                     \
+      : (vector signed char)__builtin_altivec_vsrdbi(                          \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector unsigned short                                                  \
+      : (vector unsigned short)__builtin_altivec_vsrdbi(                       \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed short                                                    \
+      : (vector signed short)__builtin_altivec_vsrdbi(                         \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector unsigned int                                                    \
+      : (vector unsigned int)__builtin_altivec_vsrdbi(                         \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed int                                                      \
+      : (vector signed int)__builtin_altivec_vsrdbi((vector unsigned char)__a, \
+                                                    (vector unsigned char)__b, \
+                                                    (__c & 0x7)),              \
+        vector unsigned long long                                              \
+      : (vector unsigned long long)__builtin_altivec_vsrdbi(                   \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed long long                                                \
+      : (vector signed long long)__builtin_altivec_vsrdbi(                     \
+          (vector unsigned char)__a, (vector unsigned char)__b, (__c & 0x7)))
 
 /* vec_insertl */
 
@@ -18704,16 +18864,46 @@
 #ifdef __VSX__
 
 /* vec_permx */
-
 #define vec_permx(__a, __b, __c, __d)                                          \
-  __builtin_vsx_xxpermx((__a), (__b), (__c), (__d))
+  _Generic(                                                                    \
+      (__a), vector unsigned char                                              \
+      : (vector unsigned char)__builtin_vsx_xxpermx(                           \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector signed char                                                     \
+      : (vector signed char)__builtin_vsx_xxpermx(                             \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector unsigned short                                                  \
+      : (vector unsigned short)__builtin_vsx_xxpermx(                          \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector signed short                                                    \
+      : (vector signed short)__builtin_vsx_xxpermx(                            \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector unsigned int                                                    \
+      : (vector unsigned int)__builtin_vsx_xxpermx(                            \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector signed int                                                      \
+      : (vector signed int)__builtin_vsx_xxpermx(                              \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector unsigned long long                                              \
+      : (vector unsigned long long)__builtin_vsx_xxpermx(                      \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector signed long long                                                \
+      : (vector signed long long)__builtin_vsx_xxpermx(                        \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector float                                                           \
+      : (vector float)__builtin_vsx_xxpermx(                                   \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector double                                                          \
+      : (vector double)__builtin_vsx_xxpermx(                                  \
+          (vector unsigned char)__a, (vector unsigned char)__b, __c, __d))
 
 /* vec_blendv */
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_blendv(vector signed char __a, vector signed char __b,
            vector unsigned char __c) {
-  return __builtin_vsx_xxblendvb(__a, __b, __c);
+  return (vector signed char)__builtin_vsx_xxblendvb(
+      (vector unsigned char)__a, (vector unsigned char)__b, __c);
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
@@ -18725,7 +18915,8 @@
 static __inline__ vector signed short __ATTRS_o_ai
 vec_blendv(vector signed short __a, vector signed short __b,
            vector unsigned short __c) {
-  return __builtin_vsx_xxblendvh(__a, __b, __c);
+  return (vector signed short)__builtin_vsx_xxblendvh(
+      (vector unsigned short)__a, (vector unsigned short)__b, __c);
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
@@ -18737,7 +18928,8 @@
 static __inline__ vector signed int __ATTRS_o_ai
 vec_blendv(vector signed int __a, vector signed int __b,
            vector unsigned int __c) {
-  return __builtin_vsx_xxblendvw(__a, __b, __c);
+  return (vector signed int)__builtin_vsx_xxblendvw(
+      (vector unsigned int)__a, (vector unsigned int)__b, __c);
 }
 
 static __inline__ vector unsigned int __ATTRS_o_ai
@@ -18749,33 +18941,68 @@
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_blendv(vector signed long long __a, vector signed long long __b,
            vector unsigned long long __c) {
-  return __builtin_vsx_xxblendvd(__a, __b, __c);
+  return (vector signed long long)__builtin_vsx_xxblendvd(
+      (vector unsigned long long)__a, (vector unsigned long long)__b, __c);
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_blendv(vector unsigned long long __a, vector unsigned long long __b,
            vector unsigned long long __c) {
-  return __builtin_vsx_xxblendvd(__a, __b, __c);
+  return (vector unsigned long long)__builtin_vsx_xxblendvd(__a, __b, __c);
 }
 
 static __inline__ vector float __ATTRS_o_ai
 vec_blendv(vector float __a, vector float __b, vector unsigned int __c) {
-  return __builtin_vsx_xxblendvw(__a, __b, __c);
+  return (vector float)__builtin_vsx_xxblendvw((vector unsigned int)__a,
+                                               (vector unsigned int)__b, __c);
 }
 
 static __inline__ vector double __ATTRS_o_ai
 vec_blendv(vector double __a, vector double __b,
            vector unsigned long long __c) {
-  return __builtin_vsx_xxblendvd(__a, __b, __c);
+  return (vector double)__builtin_vsx_xxblendvd(
+      (vector unsigned long long)__a, (vector unsigned long long)__b, __c);
 }
 
-/* vec_replace_elt */
+#define vec_replace_unaligned(__a, __b, __c)                                   \
+  _Generic((__a), vector signed int                                            \
+           : __builtin_altivec_vinsw((vector unsigned char)__a,                \
+                                     (unsigned int)__b, __c),                  \
+             vector unsigned int                                               \
+           : __builtin_altivec_vinsw((vector unsigned char)__a,                \
+                                     (unsigned int)__b, __c),                  \
+             vector unsigned long long                                         \
+           : __builtin_altivec_vinsd((vector unsigned char)__a,                \
+                                     (unsigned long long)__b, __c),            \
+             vector signed long long                                           \
+           : __builtin_altivec_vinsd((vector unsigned char)__a,                \
+                                     (unsigned long long)__b, __c),            \
+             vector float                                                      \
+           : __builtin_altivec_vinsw((vector unsigned char)__a,                \
+                                     (unsigned int)__b, __c),                  \
+             vector double                                                     \
+           : __builtin_altivec_vinsd((vector unsigned char)__a,                \
+                                     (unsigned long long)__b, __c))
 
-#define vec_replace_elt __builtin_altivec_vec_replace_elt
-
-/* vec_replace_unaligned */
-
-#define vec_replace_unaligned __builtin_altivec_vec_replace_unaligned
+#define vec_replace_elt(__a, __b, __c)                                         \
+  _Generic((__a), vector signed int                                            \
+           : (vector signed int)__builtin_altivec_vinsw_elt(                   \
+                 (vector unsigned char)__a, (unsigned int)__b, __c),           \
+             vector unsigned int                                               \
+           : (vector unsigned int)__builtin_altivec_vinsw_elt(                 \
+                 (vector unsigned char)__a, (unsigned int)__b, __c),           \
+             vector unsigned long long                                         \
+           : (vector unsigned long long)__builtin_altivec_vinsd_elt(           \
+                 (vector unsigned char)__a, (unsigned long long)__b, __c),     \
+             vector signed long long                                           \
+           : (vector signed long long)__builtin_altivec_vinsd_elt(             \
+                 (vector unsigned char)__a, (unsigned long long)__b, __c),     \
+             vector float                                                      \
+           : (vector float)__builtin_altivec_vinsw_elt(                        \
+                 (vector unsigned char)__a, (unsigned int)__b, __c),           \
+             vector double                                                     \
+           : (vector double)__builtin_altivec_vinsd_elt(                       \
+               (vector unsigned char)__a, (unsigned long long)__b, __c))
 
 /* vec_splati */
 
@@ -18852,27 +19079,33 @@
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_stril(vector unsigned char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribr((vector signed char)__a);
+  return (vector unsigned char)__builtin_altivec_vstribr(
+      (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribl((vector signed char)__a);
+  return (vector unsigned char)__builtin_altivec_vstribl(
+      (vector unsigned char)__a);
 #endif
 }
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_stril(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribr(__a);
+  return (vector signed char)__builtin_altivec_vstribr(
+      (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribl(__a);
+  return (vector signed char)__builtin_altivec_vstribl(
+      (vector unsigned char)__a);
 #endif
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_stril(vector unsigned short __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstrihr((vector signed short)__a);
+  return (vector unsigned short)__builtin_altivec_vstrihr(
+      (vector signed short)__a);
 #else
-  return __builtin_altivec_vstrihl((vector signed short)__a);
+  return (vector unsigned short)__builtin_altivec_vstrihl(
+      (vector signed short)__a);
 #endif
 }
 
@@ -18889,17 +19122,17 @@
 
 static __inline__ int __ATTRS_o_ai vec_stril_p(vector unsigned char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
+  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
+  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
 #endif
 }
 
 static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
+  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
+  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
 #endif
 }
 
@@ -18924,27 +19157,33 @@
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_strir(vector unsigned char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribl((vector signed char)__a);
+  return (vector unsigned char)__builtin_altivec_vstribl(
+      (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribr((vector signed char)__a);
+  return (vector unsigned char)__builtin_altivec_vstribr(
+      (vector unsigned char)__a);
 #endif
 }
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_strir(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribl(__a);
+  return (vector signed char)__builtin_altivec_vstribl(
+      (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribr(__a);
+  return (vector signed char)__builtin_altivec_vstribr(
+      (vector unsigned char)__a);
 #endif
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_strir(vector unsigned short __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstrihl((vector signed short)__a);
+  return (vector unsigned short)__builtin_altivec_vstrihl(
+      (vector signed short)__a);
 #else
-  return __builtin_altivec_vstrihr((vector signed short)__a);
+  return (vector unsigned short)__builtin_altivec_vstrihr(
+      (vector signed short)__a);
 #endif
 }
 
@@ -18961,17 +19200,17 @@
 
 static __inline__ int __ATTRS_o_ai vec_strir_p(vector unsigned char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
+  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
+  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
 #endif
 }
 
 static __inline__ int __ATTRS_o_ai vec_strir_p(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
+  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
+  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
 #endif
 }
 
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ammintrin.h b/darwin-x86/lib64/clang/16.0.2/include/ammintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/ammintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/ammintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/amxintrin.h b/darwin-x86/lib64/clang/16.0.2/include/amxintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/amxintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/amxintrin.h
index 4940666..ec67a87 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/amxintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/amxintrin.h
@@ -439,8 +439,6 @@
 ///
 /// This intrinsic corresponds to the <c> TILESTORED </c> instruction.
 ///
-/// \param dst
-///    A destination tile. Max size is 1024 Bytes.
 /// \param base
 ///    A pointer to base address.
 /// \param stride
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm64intr.h b/darwin-x86/lib64/clang/16.0.2/include/arm64intr.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/arm64intr.h
rename to darwin-x86/lib64/clang/16.0.2/include/arm64intr.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_acle.h b/darwin-x86/lib64/clang/16.0.2/include/arm_acle.h
similarity index 90%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_acle.h
rename to darwin-x86/lib64/clang/16.0.2/include/arm_acle.h
index 45fac24..ed3fc1d 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/arm_acle.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/arm_acle.h
@@ -64,7 +64,7 @@
 }
 #endif
 
-#if __ARM_32BIT_STATE
+#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
 #define __dbg(t) __builtin_arm_dbg(t)
 #endif
 
@@ -82,7 +82,7 @@
 /* 8.6.1 Data prefetch */
 #define __pld(addr) __pldx(0, 0, 0, addr)
 
-#if __ARM_32BIT_STATE
+#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
 #define __pldx(access_kind, cache_level, retention_policy, addr) \
   __builtin_arm_prefetch(addr, access_kind, 1)
 #else
@@ -93,7 +93,7 @@
 /* 8.6.2 Instruction prefetch */
 #define __pli(addr) __plix(0, 0, addr)
 
-#if __ARM_32BIT_STATE
+#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
 #define __plix(cache_level, retention_policy, addr) \
   __builtin_arm_prefetch(addr, 0, 0)
 #else
@@ -140,17 +140,17 @@
 /* CLZ */
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
 __clz(uint32_t __t) {
-  return __builtin_clz(__t);
+  return (uint32_t)__builtin_clz(__t);
 }
 
 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
 __clzl(unsigned long __t) {
-  return __builtin_clzl(__t);
+  return (unsigned long)__builtin_clzl(__t);
 }
 
 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
 __clzll(uint64_t __t) {
-  return __builtin_clzll(__t);
+  return (uint64_t)__builtin_clzll(__t);
 }
 
 /* CLS */
@@ -201,7 +201,7 @@
 
 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
 __rev16ll(uint64_t __t) {
-  return (((uint64_t)__rev16(__t >> 32)) << 32) | __rev16(__t);
+  return (((uint64_t)__rev16(__t >> 32)) << 32) | (uint64_t)__rev16((uint32_t)__t);
 }
 
 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
@@ -216,7 +216,7 @@
 /* REVSH */
 static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))
 __revsh(int16_t __t) {
-  return __builtin_bswap16(__t);
+  return (int16_t)__builtin_bswap16((uint16_t)__t);
 }
 
 /* RBIT */
@@ -227,7 +227,7 @@
 
 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
 __rbitll(uint64_t __t) {
-#if __ARM_32BIT_STATE
+#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
   return (((uint64_t)__builtin_arm_rbit(__t)) << 32) |
          __builtin_arm_rbit(__t >> 32);
 #else
@@ -247,7 +247,7 @@
 /*
  * 9.3 16-bit multiplications
  */
-#if __ARM_FEATURE_DSP
+#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
 static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
 __smulbb(int32_t __a, int32_t __b) {
   return __builtin_arm_smulbb(__a, __b);
@@ -277,17 +277,17 @@
 /*
  * 9.4 Saturating intrinsics
  *
- * FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag
+ * FIXME: Change guard to their corresponding __ARM_FEATURE flag when Q flag
  * intrinsics are implemented and the flag is enabled.
  */
 /* 9.4.1 Width-specified saturation intrinsics */
-#if __ARM_FEATURE_SAT
+#if defined(__ARM_FEATURE_SAT) && __ARM_FEATURE_SAT
 #define __ssat(x, y) __builtin_arm_ssat(x, y)
 #define __usat(x, y) __builtin_arm_usat(x, y)
 #endif
 
 /* 9.4.2 Saturating addition and subtraction intrinsics */
-#if __ARM_FEATURE_DSP
+#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
 __qadd(int32_t __t, int32_t __v) {
   return __builtin_arm_qadd(__t, __v);
@@ -305,7 +305,7 @@
 #endif
 
 /* 9.4.3 Accumultating multiplications */
-#if __ARM_FEATURE_DSP
+#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
 __smlabb(int32_t __a, int32_t __b, int32_t __c) {
   return __builtin_arm_smlabb(__a, __b, __c);
@@ -334,13 +334,13 @@
 
 
 /* 9.5.4 Parallel 16-bit saturation */
-#if __ARM_FEATURE_SIMD32
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
 #define __ssat16(x, y) __builtin_arm_ssat16(x, y)
 #define __usat16(x, y) __builtin_arm_usat16(x, y)
 #endif
 
 /* 9.5.5 Packing and unpacking */
-#if __ARM_FEATURE_SIMD32
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
 typedef int32_t int8x4_t;
 typedef int32_t int16x2_t;
 typedef uint32_t uint8x4_t;
@@ -365,7 +365,7 @@
 #endif
 
 /* 9.5.6 Parallel selection */
-#if __ARM_FEATURE_SIMD32
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
 __sel(uint8x4_t __a, uint8x4_t __b) {
   return __builtin_arm_sel(__a, __b);
@@ -373,7 +373,7 @@
 #endif
 
 /* 9.5.7 Parallel 8-bit addition and subtraction */
-#if __ARM_FEATURE_SIMD32
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
 __qadd8(int8x4_t __a, int8x4_t __b) {
   return __builtin_arm_qadd8(__a, __b);
@@ -425,7 +425,7 @@
 #endif
 
 /* 9.5.8 Sum of 8-bit absolute differences */
-#if __ARM_FEATURE_SIMD32
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
 __usad8(uint8x4_t __a, uint8x4_t __b) {
   return __builtin_arm_usad8(__a, __b);
@@ -437,7 +437,7 @@
 #endif
 
 /* 9.5.9 Parallel 16-bit addition and subtraction */
-#if __ARM_FEATURE_SIMD32
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
 __qadd16(int16x2_t __a, int16x2_t __b) {
   return __builtin_arm_qadd16(__a, __b);
@@ -537,7 +537,7 @@
 #endif
 
 /* 9.5.10 Parallel 16-bit multiplications */
-#if __ARM_FEATURE_SIMD32
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
 __smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {
   return __builtin_arm_smlad(__a, __b, __c);
@@ -589,7 +589,7 @@
 #endif
 
 /* 9.7 CRC32 intrinsics */
-#if __ARM_FEATURE_CRC32
+#if defined(__ARM_FEATURE_CRC32) && __ARM_FEATURE_CRC32
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
 __crc32b(uint32_t __a, uint8_t __b) {
   return __builtin_arm_crc32b(__a, __b);
@@ -632,7 +632,7 @@
 #endif
 
 /* Armv8.3-A Javascript conversion intrinsic */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_JCVT)
+#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE && defined(__ARM_FEATURE_JCVT)
 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
 __jcvt(double __a) {
   return __builtin_arm_jcvt(__a);
@@ -640,50 +640,50 @@
 #endif
 
 /* Armv8.5-A FP rounding intrinsics */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_FRINT)
+#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE && defined(__ARM_FEATURE_FRINT)
 static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint32zf(float __a) {
-  return __builtin_arm_frint32zf(__a);
+__rint32zf(float __a) {
+  return __builtin_arm_rint32zf(__a);
 }
 
 static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint32z(double __a) {
-  return __builtin_arm_frint32z(__a);
+__rint32z(double __a) {
+  return __builtin_arm_rint32z(__a);
 }
 
 static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint64zf(float __a) {
-  return __builtin_arm_frint64zf(__a);
+__rint64zf(float __a) {
+  return __builtin_arm_rint64zf(__a);
 }
 
 static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint64z(double __a) {
-  return __builtin_arm_frint64z(__a);
+__rint64z(double __a) {
+  return __builtin_arm_rint64z(__a);
 }
 
 static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint32xf(float __a) {
-  return __builtin_arm_frint32xf(__a);
+__rint32xf(float __a) {
+  return __builtin_arm_rint32xf(__a);
 }
 
 static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint32x(double __a) {
-  return __builtin_arm_frint32x(__a);
+__rint32x(double __a) {
+  return __builtin_arm_rint32x(__a);
 }
 
 static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint64xf(float __a) {
-  return __builtin_arm_frint64xf(__a);
+__rint64xf(float __a) {
+  return __builtin_arm_rint64xf(__a);
 }
 
 static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint64x(double __a) {
-  return __builtin_arm_frint64x(__a);
+__rint64x(double __a) {
+  return __builtin_arm_rint64x(__a);
 }
 #endif
 
 /* Armv8.7-A load/store 64-byte intrinsics */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_LS64)
+#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE && defined(__ARM_FEATURE_LS64)
 typedef struct {
     uint64_t val[8];
 } data512_t;
@@ -721,7 +721,7 @@
 #define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v))
 
 /* Memory Tagging Extensions (MTE) Intrinsics */
-#if __ARM_FEATURE_MEMORY_TAGGING
+#if defined(__ARM_FEATURE_MEMORY_TAGGING) && __ARM_FEATURE_MEMORY_TAGGING
 #define __arm_mte_create_random_tag(__ptr, __mask)  __builtin_arm_irg(__ptr, __mask)
 #define __arm_mte_increment_tag(__ptr, __tag_offset)  __builtin_arm_addg(__ptr, __tag_offset)
 #define __arm_mte_exclude_tag(__ptr, __excluded)  __builtin_arm_gmi(__ptr, __excluded)
@@ -730,8 +730,14 @@
 #define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb)
 #endif
 
+/* Memory Operations Intrinsics */
+#if defined(__ARM_FEATURE_MOPS) && __ARM_FEATURE_MOPS && defined(__ARM_FEATURE_MEMORY_TAGGING) && __ARM_FEATURE_MEMORY_TAGGING
+#define __arm_mops_memset_tag(__tagged_address, __value, __size)    \
+  __builtin_arm_mops_memset_tag(__tagged_address, __value, __size)
+#endif
+
 /* Transactional Memory Extension (TME) Intrinsics */
-#if __ARM_FEATURE_TME
+#if defined(__ARM_FEATURE_TME) && __ARM_FEATURE_TME
 
 #define _TMFAILURE_REASON  0x00007fffu
 #define _TMFAILURE_RTRY    0x00008000u
@@ -753,7 +759,7 @@
 #endif /* __ARM_FEATURE_TME */
 
 /* Armv8.5-A Random number generation intrinsics */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_RNG)
+#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE && defined(__ARM_FEATURE_RNG)
 static __inline__ int __attribute__((__always_inline__, __nodebug__))
 __rndr(uint64_t *__p) {
   return __builtin_arm_rndr(__p);
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_bf16.h b/darwin-x86/lib64/clang/16.0.2/include/arm_bf16.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_bf16.h
rename to darwin-x86/lib64/clang/16.0.2/include/arm_bf16.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_cde.h b/darwin-x86/lib64/clang/16.0.2/include/arm_cde.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_cde.h
rename to darwin-x86/lib64/clang/16.0.2/include/arm_cde.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_cmse.h b/darwin-x86/lib64/clang/16.0.2/include/arm_cmse.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_cmse.h
rename to darwin-x86/lib64/clang/16.0.2/include/arm_cmse.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_fp16.h b/darwin-x86/lib64/clang/16.0.2/include/arm_fp16.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_fp16.h
rename to darwin-x86/lib64/clang/16.0.2/include/arm_fp16.h
index ce993ce..6e8470f 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/arm_fp16.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/arm_fp16.h
@@ -31,561 +31,561 @@
 
 #if defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) && defined(__aarch64__)
 #define vabdh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vabdh_f16(__s0, __s1); \
   __ret; \
 })
 #define vabsh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vabsh_f16(__s0); \
   __ret; \
 })
 #define vaddh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vaddh_f16(__s0, __s1); \
   __ret; \
 })
 #define vcageh_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vcageh_f16(__s0, __s1); \
   __ret; \
 })
 #define vcagth_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vcagth_f16(__s0, __s1); \
   __ret; \
 })
 #define vcaleh_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vcaleh_f16(__s0, __s1); \
   __ret; \
 })
 #define vcalth_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vcalth_f16(__s0, __s1); \
   __ret; \
 })
 #define vceqh_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vceqh_f16(__s0, __s1); \
   __ret; \
 })
 #define vceqzh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vceqzh_f16(__s0); \
   __ret; \
 })
 #define vcgeh_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vcgeh_f16(__s0, __s1); \
   __ret; \
 })
 #define vcgezh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcgezh_f16(__s0); \
   __ret; \
 })
 #define vcgth_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vcgth_f16(__s0, __s1); \
   __ret; \
 })
 #define vcgtzh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcgtzh_f16(__s0); \
   __ret; \
 })
 #define vcleh_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vcleh_f16(__s0, __s1); \
   __ret; \
 })
 #define vclezh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vclezh_f16(__s0); \
   __ret; \
 })
 #define vclth_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vclth_f16(__s0, __s1); \
   __ret; \
 })
 #define vcltzh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcltzh_f16(__s0); \
   __ret; \
 })
 #define vcvth_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vcvth_n_s16_f16(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_s32_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vcvth_n_s32_f16(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_s64_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vcvth_n_s64_f16(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcvth_n_u16_f16(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_u32_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vcvth_n_u32_f16(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_u64_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vcvth_n_u64_f16(__s0, __p1); \
   __ret; \
 })
 #define vcvth_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vcvth_s16_f16(__s0); \
   __ret; \
 })
 #define vcvth_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vcvth_s32_f16(__s0); \
   __ret; \
 })
 #define vcvth_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vcvth_s64_f16(__s0); \
   __ret; \
 })
 #define vcvth_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcvth_u16_f16(__s0); \
   __ret; \
 })
 #define vcvth_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vcvth_u32_f16(__s0); \
   __ret; \
 })
 #define vcvth_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vcvth_u64_f16(__s0); \
   __ret; \
 })
 #define vcvtah_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vcvtah_s16_f16(__s0); \
   __ret; \
 })
 #define vcvtah_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vcvtah_s32_f16(__s0); \
   __ret; \
 })
 #define vcvtah_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vcvtah_s64_f16(__s0); \
   __ret; \
 })
 #define vcvtah_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcvtah_u16_f16(__s0); \
   __ret; \
 })
 #define vcvtah_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vcvtah_u32_f16(__s0); \
   __ret; \
 })
 #define vcvtah_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vcvtah_u64_f16(__s0); \
   __ret; \
 })
 #define vcvth_f16_u16(__p0) __extension__ ({ \
-  uint16_t __s0 = __p0; \
   float16_t __ret; \
+  uint16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_f16_u16(__s0); \
   __ret; \
 })
 #define vcvth_f16_s16(__p0) __extension__ ({ \
-  int16_t __s0 = __p0; \
   float16_t __ret; \
+  int16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__s0); \
   __ret; \
 })
 #define vcvth_f16_u32(__p0) __extension__ ({ \
-  uint32_t __s0 = __p0; \
   float16_t __ret; \
+  uint32_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__s0); \
   __ret; \
 })
 #define vcvth_f16_s32(__p0) __extension__ ({ \
-  int32_t __s0 = __p0; \
   float16_t __ret; \
+  int32_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_f16_s32(__s0); \
   __ret; \
 })
 #define vcvth_f16_u64(__p0) __extension__ ({ \
-  uint64_t __s0 = __p0; \
   float16_t __ret; \
+  uint64_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__s0); \
   __ret; \
 })
 #define vcvth_f16_s64(__p0) __extension__ ({ \
-  int64_t __s0 = __p0; \
   float16_t __ret; \
+  int64_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_f16_s64(__s0); \
   __ret; \
 })
 #define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
   float16_t __ret; \
+  uint32_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
   float16_t __ret; \
+  int32_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
   float16_t __ret; \
+  uint64_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
   float16_t __ret; \
+  int64_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
   float16_t __ret; \
+  uint16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
   float16_t __ret; \
+  int16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_s16(__s0, __p1); \
   __ret; \
 })
 #define vcvtmh_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vcvtmh_s16_f16(__s0); \
   __ret; \
 })
 #define vcvtmh_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vcvtmh_s32_f16(__s0); \
   __ret; \
 })
 #define vcvtmh_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vcvtmh_s64_f16(__s0); \
   __ret; \
 })
 #define vcvtmh_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcvtmh_u16_f16(__s0); \
   __ret; \
 })
 #define vcvtmh_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vcvtmh_u32_f16(__s0); \
   __ret; \
 })
 #define vcvtmh_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vcvtmh_u64_f16(__s0); \
   __ret; \
 })
 #define vcvtnh_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vcvtnh_s16_f16(__s0); \
   __ret; \
 })
 #define vcvtnh_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vcvtnh_s32_f16(__s0); \
   __ret; \
 })
 #define vcvtnh_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vcvtnh_s64_f16(__s0); \
   __ret; \
 })
 #define vcvtnh_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcvtnh_u16_f16(__s0); \
   __ret; \
 })
 #define vcvtnh_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vcvtnh_u32_f16(__s0); \
   __ret; \
 })
 #define vcvtnh_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vcvtnh_u64_f16(__s0); \
   __ret; \
 })
 #define vcvtph_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vcvtph_s16_f16(__s0); \
   __ret; \
 })
 #define vcvtph_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vcvtph_s32_f16(__s0); \
   __ret; \
 })
 #define vcvtph_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vcvtph_s64_f16(__s0); \
   __ret; \
 })
 #define vcvtph_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcvtph_u16_f16(__s0); \
   __ret; \
 })
 #define vcvtph_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vcvtph_u32_f16(__s0); \
   __ret; \
 })
 #define vcvtph_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vcvtph_u64_f16(__s0); \
   __ret; \
 })
 #define vdivh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vdivh_f16(__s0, __s1); \
   __ret; \
 })
 #define vfmah_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
   float16_t __s2 = __p2; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vfmah_f16(__s0, __s1, __s2); \
   __ret; \
 })
 #define vfmsh_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
   float16_t __s2 = __p2; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vfmsh_f16(__s0, __s1, __s2); \
   __ret; \
 })
 #define vmaxh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vmaxh_f16(__s0, __s1); \
   __ret; \
 })
 #define vmaxnmh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vmaxnmh_f16(__s0, __s1); \
   __ret; \
 })
 #define vminh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vminh_f16(__s0, __s1); \
   __ret; \
 })
 #define vminnmh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vminnmh_f16(__s0, __s1); \
   __ret; \
 })
 #define vmulh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vmulh_f16(__s0, __s1); \
   __ret; \
 })
 #define vmulxh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vmulxh_f16(__s0, __s1); \
   __ret; \
 })
 #define vnegh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vnegh_f16(__s0); \
   __ret; \
 })
 #define vrecpeh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrecpeh_f16(__s0); \
   __ret; \
 })
 #define vrecpsh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vrecpsh_f16(__s0, __s1); \
   __ret; \
 })
 #define vrecpxh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrecpxh_f16(__s0); \
   __ret; \
 })
 #define vrndh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrndh_f16(__s0); \
   __ret; \
 })
 #define vrndah_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrndah_f16(__s0); \
   __ret; \
 })
 #define vrndih_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrndih_f16(__s0); \
   __ret; \
 })
 #define vrndmh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrndmh_f16(__s0); \
   __ret; \
 })
 #define vrndnh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrndnh_f16(__s0); \
   __ret; \
 })
 #define vrndph_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrndph_f16(__s0); \
   __ret; \
 })
 #define vrndxh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrndxh_f16(__s0); \
   __ret; \
 })
 #define vrsqrteh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrsqrteh_f16(__s0); \
   __ret; \
 })
 #define vrsqrtsh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vrsqrtsh_f16(__s0, __s1); \
   __ret; \
 })
 #define vsqrth_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vsqrth_f16(__s0); \
   __ret; \
 })
 #define vsubh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vsubh_f16(__s0, __s1); \
   __ret; \
 })
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_mve.h b/darwin-x86/lib64/clang/16.0.2/include/arm_mve.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_mve.h
rename to darwin-x86/lib64/clang/16.0.2/include/arm_mve.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_neon.h b/darwin-x86/lib64/clang/16.0.2/include/arm_neon.h
similarity index 89%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_neon.h
rename to darwin-x86/lib64/clang/16.0.2/include/arm_neon.h
index 2448870..2991b12 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/arm_neon.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/arm_neon.h
@@ -462,53 +462,53 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
   poly8x8_t __ret; \
+  poly8x8_t __s0 = __p0; \
   __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \
   __ret; \
 })
 #else
 #define splat_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 4); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
   poly8x8_t __ret; \
+  poly8x8_t __s0 = __p0; \
   __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \
   __ret; \
 })
 #endif
 
 #define splat_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
   poly64x1_t __ret; \
+  poly64x1_t __s0 = __p0; \
   __ret = (poly64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 6); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
   poly16x4_t __ret; \
+  poly16x4_t __s0 = __p0; \
   __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \
   __ret; \
 })
 #else
 #define splat_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 5); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
   poly16x4_t __ret; \
+  poly16x4_t __s0 = __p0; \
   __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \
   __ret; \
 })
@@ -516,23 +516,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
   poly8x16_t __ret; \
+  poly8x8_t __s0 = __p0; \
   __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \
   __ret; \
 })
 #else
 #define splatq_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 4); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
   poly8x16_t __ret; \
+  poly8x8_t __s0 = __p0; \
   __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \
   __ret; \
 })
@@ -540,22 +540,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
   poly64x2_t __ret; \
+  poly64x1_t __s0 = __p0; \
   __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
   __ret; \
 })
 #else
 #define splatq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
   poly64x2_t __ret; \
+  poly64x1_t __s0 = __p0; \
   __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
   poly64x2_t __ret; \
+  poly64x1_t __s0 = __p0; \
   __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
   __ret; \
 })
@@ -563,23 +563,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
   poly16x8_t __ret; \
+  poly16x4_t __s0 = __p0; \
   __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \
   __ret; \
 })
 #else
 #define splatq_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 5); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
   poly16x8_t __ret; \
+  poly16x4_t __s0 = __p0; \
   __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \
   __ret; \
 })
@@ -587,23 +587,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define splatq_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -611,23 +611,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define splatq_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
@@ -635,22 +635,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
 #else
 #define splatq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
@@ -658,23 +658,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define splatq_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -682,23 +682,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define splatq_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
@@ -706,22 +706,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
   float64x2_t __ret; \
+  float64x1_t __s0 = __p0; \
   __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
   __ret; \
 })
 #else
 #define splatq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
   float64x2_t __ret; \
+  float64x1_t __s0 = __p0; \
   __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
   float64x2_t __ret; \
+  float64x1_t __s0 = __p0; \
   __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
   __ret; \
 })
@@ -729,23 +729,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   float32x4_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \
   __ret; \
 })
 #else
 #define splatq_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __ret; \
   float32x2_t __s0 = __p0; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 9); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   float32x4_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \
   __ret; \
 })
@@ -753,23 +753,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16x4_t __s0 = __p0; \
   __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \
   __ret; \
 })
 #else
 #define splatq_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
   float16x4_t __s0 = __p0; \
   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __ret; \
   __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 8); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16x4_t __s0 = __p0; \
   __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \
   __ret; \
 })
@@ -777,23 +777,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define splatq_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
@@ -801,22 +801,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
   __ret; \
 })
 #else
 #define splatq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
   __ret; \
 })
@@ -824,23 +824,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define splatq_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
@@ -848,23 +848,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define splat_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -872,53 +872,53 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define splat_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #endif
 
 #define splat_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x1_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define splat_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -926,53 +926,53 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define splat_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
 #endif
 
 #define splat_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
   float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
   __ret = (float64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 10); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \
   __ret; \
 })
 #else
 #define splat_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __ret; \
   float32x2_t __s0 = __p0; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 9); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \
   __ret; \
 })
@@ -980,23 +980,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
   __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \
   __ret; \
 })
 #else
 #define splat_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
   float16x4_t __s0 = __p0; \
   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
   __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 8); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
   __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \
   __ret; \
 })
@@ -1004,53 +1004,53 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define splat_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #endif
 
 #define splat_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x1_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define splat_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
@@ -1058,23 +1058,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
   poly8x8_t __ret; \
+  poly8x16_t __s0 = __p0; \
   __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \
   __ret; \
 })
 #else
 #define splat_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 36); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
   poly8x8_t __ret; \
+  poly8x16_t __s0 = __p0; \
   __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \
   __ret; \
 })
@@ -1082,22 +1082,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
   poly64x1_t __ret; \
+  poly64x2_t __s0 = __p0; \
   __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \
   __ret; \
 })
 #else
 #define splat_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x1_t __ret; \
   poly64x2_t __s0 = __p0; \
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x1_t __ret; \
   __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 38); \
   __ret; \
 })
 #define __noswap_splat_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
   poly64x1_t __ret; \
+  poly64x2_t __s0 = __p0; \
   __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \
   __ret; \
 })
@@ -1105,23 +1105,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
   poly16x4_t __ret; \
+  poly16x8_t __s0 = __p0; \
   __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \
   __ret; \
 })
 #else
 #define splat_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 37); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
   poly16x4_t __ret; \
+  poly16x8_t __s0 = __p0; \
   __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \
   __ret; \
 })
@@ -1129,23 +1129,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
   poly8x16_t __ret; \
+  poly8x16_t __s0 = __p0; \
   __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \
   __ret; \
 })
 #else
 #define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 36); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
   poly8x16_t __ret; \
+  poly8x16_t __s0 = __p0; \
   __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \
   __ret; \
 })
@@ -1153,23 +1153,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
   poly64x2_t __ret; \
+  poly64x2_t __s0 = __p0; \
   __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \
   __ret; \
 })
 #else
 #define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64x2_t __s0 = __p0; \
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 38); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
   poly64x2_t __ret; \
+  poly64x2_t __s0 = __p0; \
   __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \
   __ret; \
 })
@@ -1177,23 +1177,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
   poly16x8_t __ret; \
+  poly16x8_t __s0 = __p0; \
   __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \
   __ret; \
 })
 #else
 #define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 37); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
   poly16x8_t __ret; \
+  poly16x8_t __s0 = __p0; \
   __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \
   __ret; \
 })
@@ -1201,23 +1201,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
 #else
 #define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
@@ -1225,23 +1225,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
@@ -1249,23 +1249,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
@@ -1273,23 +1273,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
@@ -1297,23 +1297,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
 #else
 #define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
@@ -1321,23 +1321,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
   float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
   __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \
   __ret; \
 })
 #else
 #define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x2_t __ret; \
   float64x2_t __s0 = __p0; \
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
   __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 42); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
   float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
   __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \
   __ret; \
 })
@@ -1345,23 +1345,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \
   __ret; \
 })
 #else
 #define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __ret; \
   float32x4_t __s0 = __p0; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 41); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \
   __ret; \
 })
@@ -1369,23 +1369,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
   __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \
   __ret; \
 })
 #else
 #define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
   float16x8_t __s0 = __p0; \
   float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
   __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 40); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
   __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \
   __ret; \
 })
@@ -1393,23 +1393,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
@@ -1417,23 +1417,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
 #else
 #define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
@@ -1441,23 +1441,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
 #else
 #define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
@@ -1465,23 +1465,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
 #else
 #define splat_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
@@ -1489,23 +1489,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define splat_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
@@ -1513,22 +1513,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x1_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define splat_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x1_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x1_t __ret; \
   __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 51); \
   __ret; \
 })
 #define __noswap_splat_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x1_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
@@ -1536,23 +1536,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define splat_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
@@ -1560,23 +1560,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
 #else
 #define splat_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
@@ -1584,22 +1584,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
   float64x1_t __ret; \
+  float64x2_t __s0 = __p0; \
   __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \
   __ret; \
 })
 #else
 #define splat_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x1_t __ret; \
   float64x2_t __s0 = __p0; \
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x1_t __ret; \
   __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 42); \
   __ret; \
 })
 #define __noswap_splat_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
   float64x1_t __ret; \
+  float64x2_t __s0 = __p0; \
   __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \
   __ret; \
 })
@@ -1607,23 +1607,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   float32x2_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \
   __ret; \
 })
 #else
 #define splat_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __ret; \
   float32x4_t __s0 = __p0; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 41); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   float32x2_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \
   __ret; \
 })
@@ -1631,23 +1631,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16x8_t __s0 = __p0; \
   __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \
   __ret; \
 })
 #else
 #define splat_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
   float16x8_t __s0 = __p0; \
   float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
   __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 40); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16x8_t __s0 = __p0; \
   __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \
   __ret; \
 })
@@ -1655,23 +1655,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define splat_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
@@ -1679,22 +1679,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x1_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
 #else
 #define splat_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x1_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x1_t __ret; \
   __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 35); \
   __ret; \
 })
 #define __noswap_splat_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x1_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
@@ -1702,23 +1702,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
 #else
 #define splat_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
@@ -1732,9 +1732,9 @@
 }
 #else
 __ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -1754,9 +1754,9 @@
 }
 #else
 __ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -1776,9 +1776,9 @@
 }
 #else
 __ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -1798,9 +1798,9 @@
 }
 #else
 __ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -1820,9 +1820,9 @@
 }
 #else
 __ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -1837,9 +1837,9 @@
 }
 #else
 __ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -1859,9 +1859,9 @@
 }
 #else
 __ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -1881,9 +1881,9 @@
 }
 #else
 __ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -1903,9 +1903,9 @@
 }
 #else
 __ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -1925,9 +1925,9 @@
 }
 #else
 __ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -1947,9 +1947,9 @@
 }
 #else
 __ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -1969,9 +1969,9 @@
 }
 #else
 __ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -1986,9 +1986,9 @@
 }
 #else
 __ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2008,9 +2008,9 @@
 }
 #else
 __ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2030,8 +2030,8 @@
 }
 #else
 __ai int8x16_t vabsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2046,8 +2046,8 @@
 }
 #else
 __ai float32x4_t vabsq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2062,8 +2062,8 @@
 }
 #else
 __ai int32x4_t vabsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2078,8 +2078,8 @@
 }
 #else
 __ai int16x8_t vabsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2094,8 +2094,8 @@
 }
 #else
 __ai int8x8_t vabs_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2110,8 +2110,8 @@
 }
 #else
 __ai float32x2_t vabs_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2126,8 +2126,8 @@
 }
 #else
 __ai int32x2_t vabs_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2142,8 +2142,8 @@
 }
 #else
 __ai int16x4_t vabs_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2158,9 +2158,9 @@
 }
 #else
 __ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2175,9 +2175,9 @@
 }
 #else
 __ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2192,9 +2192,9 @@
 }
 #else
 __ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2209,9 +2209,9 @@
 }
 #else
 __ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2226,9 +2226,9 @@
 }
 #else
 __ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2243,9 +2243,9 @@
 }
 #else
 __ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2260,9 +2260,9 @@
 }
 #else
 __ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2277,9 +2277,9 @@
 }
 #else
 __ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2294,9 +2294,9 @@
 }
 #else
 __ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2311,9 +2311,9 @@
 }
 #else
 __ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2328,9 +2328,9 @@
 }
 #else
 __ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2350,9 +2350,9 @@
 }
 #else
 __ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2367,9 +2367,9 @@
 }
 #else
 __ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2384,9 +2384,9 @@
 }
 #else
 __ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2401,9 +2401,9 @@
 }
 #else
 __ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2423,9 +2423,9 @@
 }
 #else
 __ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2440,9 +2440,9 @@
 }
 #else
 __ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2462,9 +2462,9 @@
 }
 #else
 __ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2479,9 +2479,9 @@
 }
 #else
 __ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2496,9 +2496,9 @@
 }
 #else
 __ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
   __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 38);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2513,9 +2513,9 @@
 }
 #else
 __ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 37);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2530,9 +2530,9 @@
 }
 #else
 __ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint16x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2552,9 +2552,9 @@
 }
 #else
 __ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint32x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2574,9 +2574,9 @@
 }
 #else
 __ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint8x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2596,9 +2596,9 @@
 }
 #else
 __ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+  int16x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2618,9 +2618,9 @@
 }
 #else
 __ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+  int32x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2640,9 +2640,9 @@
 }
 #else
 __ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+  int8x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2662,9 +2662,9 @@
 }
 #else
 __ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2679,9 +2679,9 @@
 }
 #else
 __ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2696,9 +2696,9 @@
 }
 #else
 __ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2713,9 +2713,9 @@
 }
 #else
 __ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2730,9 +2730,9 @@
 }
 #else
 __ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2747,9 +2747,9 @@
 }
 #else
 __ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2764,9 +2764,9 @@
 }
 #else
 __ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2781,9 +2781,9 @@
 }
 #else
 __ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2798,9 +2798,9 @@
 }
 #else
 __ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2815,9 +2815,9 @@
 }
 #else
 __ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2837,9 +2837,9 @@
 }
 #else
 __ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2854,9 +2854,9 @@
 }
 #else
 __ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2871,9 +2871,9 @@
 }
 #else
 __ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2893,9 +2893,9 @@
 }
 #else
 __ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2910,9 +2910,9 @@
 }
 #else
 __ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2927,9 +2927,9 @@
 }
 #else
 __ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2944,9 +2944,9 @@
 }
 #else
 __ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2961,9 +2961,9 @@
 }
 #else
 __ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2978,9 +2978,9 @@
 }
 #else
 __ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2995,9 +2995,9 @@
 }
 #else
 __ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3012,9 +3012,9 @@
 }
 #else
 __ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3029,9 +3029,9 @@
 }
 #else
 __ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3046,9 +3046,9 @@
 }
 #else
 __ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3063,9 +3063,9 @@
 }
 #else
 __ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3085,9 +3085,9 @@
 }
 #else
 __ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3102,9 +3102,9 @@
 }
 #else
 __ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3119,9 +3119,9 @@
 }
 #else
 __ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3141,9 +3141,9 @@
 }
 #else
 __ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3158,10 +3158,10 @@
 }
 #else
 __ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
+  poly8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3176,10 +3176,10 @@
 }
 #else
 __ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
+  poly16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   poly16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3194,10 +3194,10 @@
 }
 #else
 __ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
+  poly8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3212,10 +3212,10 @@
 }
 #else
 __ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
+  poly16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3230,10 +3230,10 @@
 }
 #else
 __ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3248,10 +3248,10 @@
 }
 #else
 __ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3266,10 +3266,10 @@
 }
 #else
 __ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3284,10 +3284,10 @@
 }
 #else
 __ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3302,10 +3302,10 @@
 }
 #else
 __ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3320,10 +3320,10 @@
 }
 #else
 __ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3338,10 +3338,10 @@
 }
 #else
 __ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3356,10 +3356,10 @@
 }
 #else
 __ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3374,10 +3374,10 @@
 }
 #else
 __ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3392,10 +3392,10 @@
 }
 #else
 __ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3410,10 +3410,10 @@
 }
 #else
 __ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3433,10 +3433,10 @@
 }
 #else
 __ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3451,10 +3451,10 @@
 }
 #else
 __ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3469,10 +3469,10 @@
 }
 #else
 __ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3487,10 +3487,10 @@
 }
 #else
 __ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3510,10 +3510,10 @@
 }
 #else
 __ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3528,9 +3528,9 @@
 }
 #else
 __ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3545,9 +3545,9 @@
 }
 #else
 __ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3562,9 +3562,9 @@
 }
 #else
 __ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3579,9 +3579,9 @@
 }
 #else
 __ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3596,9 +3596,9 @@
 }
 #else
 __ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3613,9 +3613,9 @@
 }
 #else
 __ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3630,9 +3630,9 @@
 }
 #else
 __ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3647,9 +3647,9 @@
 }
 #else
 __ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3664,9 +3664,9 @@
 }
 #else
 __ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  uint8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3681,9 +3681,9 @@
 }
 #else
 __ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  uint8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3698,9 +3698,9 @@
 }
 #else
 __ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3715,9 +3715,9 @@
 }
 #else
 __ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3732,9 +3732,9 @@
 }
 #else
 __ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3749,9 +3749,9 @@
 }
 #else
 __ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3766,9 +3766,9 @@
 }
 #else
 __ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3783,9 +3783,9 @@
 }
 #else
 __ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3800,9 +3800,9 @@
 }
 #else
 __ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3817,9 +3817,9 @@
 }
 #else
 __ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3834,9 +3834,9 @@
 }
 #else
 __ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3851,9 +3851,9 @@
 }
 #else
 __ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3868,9 +3868,9 @@
 }
 #else
 __ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3885,9 +3885,9 @@
 }
 #else
 __ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3902,9 +3902,9 @@
 }
 #else
 __ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3919,9 +3919,9 @@
 }
 #else
 __ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3936,9 +3936,9 @@
 }
 #else
 __ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3953,9 +3953,9 @@
 }
 #else
 __ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3970,9 +3970,9 @@
 }
 #else
 __ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3987,9 +3987,9 @@
 }
 #else
 __ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4004,9 +4004,9 @@
 }
 #else
 __ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4021,9 +4021,9 @@
 }
 #else
 __ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4038,9 +4038,9 @@
 }
 #else
 __ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4055,9 +4055,9 @@
 }
 #else
 __ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4072,9 +4072,9 @@
 }
 #else
 __ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4089,9 +4089,9 @@
 }
 #else
 __ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4106,9 +4106,9 @@
 }
 #else
 __ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4123,9 +4123,9 @@
 }
 #else
 __ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4140,9 +4140,9 @@
 }
 #else
 __ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4157,9 +4157,9 @@
 }
 #else
 __ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4174,9 +4174,9 @@
 }
 #else
 __ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4191,9 +4191,9 @@
 }
 #else
 __ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4208,9 +4208,9 @@
 }
 #else
 __ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4225,9 +4225,9 @@
 }
 #else
 __ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4242,9 +4242,9 @@
 }
 #else
 __ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4259,9 +4259,9 @@
 }
 #else
 __ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4276,9 +4276,9 @@
 }
 #else
 __ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4293,9 +4293,9 @@
 }
 #else
 __ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4310,9 +4310,9 @@
 }
 #else
 __ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4327,9 +4327,9 @@
 }
 #else
 __ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4344,9 +4344,9 @@
 }
 #else
 __ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4361,9 +4361,9 @@
 }
 #else
 __ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4378,9 +4378,9 @@
 }
 #else
 __ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4395,9 +4395,9 @@
 }
 #else
 __ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4412,9 +4412,9 @@
 }
 #else
 __ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4429,9 +4429,9 @@
 }
 #else
 __ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4446,9 +4446,9 @@
 }
 #else
 __ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4463,9 +4463,9 @@
 }
 #else
 __ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4480,9 +4480,9 @@
 }
 #else
 __ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4497,9 +4497,9 @@
 }
 #else
 __ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4514,9 +4514,9 @@
 }
 #else
 __ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4531,9 +4531,9 @@
 }
 #else
 __ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4548,9 +4548,9 @@
 }
 #else
 __ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4565,9 +4565,9 @@
 }
 #else
 __ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4582,9 +4582,9 @@
 }
 #else
 __ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4599,9 +4599,9 @@
 }
 #else
 __ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4616,9 +4616,9 @@
 }
 #else
 __ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4633,9 +4633,9 @@
 }
 #else
 __ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4650,8 +4650,8 @@
 }
 #else
 __ai int8x16_t vclsq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4666,8 +4666,8 @@
 }
 #else
 __ai int32x4_t vclsq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4682,8 +4682,8 @@
 }
 #else
 __ai int16x8_t vclsq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4698,8 +4698,8 @@
 }
 #else
 __ai int8x16_t vclsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4714,8 +4714,8 @@
 }
 #else
 __ai int32x4_t vclsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4730,8 +4730,8 @@
 }
 #else
 __ai int16x8_t vclsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4746,8 +4746,8 @@
 }
 #else
 __ai int8x8_t vcls_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4762,8 +4762,8 @@
 }
 #else
 __ai int32x2_t vcls_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4778,8 +4778,8 @@
 }
 #else
 __ai int16x4_t vcls_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4794,8 +4794,8 @@
 }
 #else
 __ai int8x8_t vcls_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4810,8 +4810,8 @@
 }
 #else
 __ai int32x2_t vcls_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4826,8 +4826,8 @@
 }
 #else
 __ai int16x4_t vcls_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4842,9 +4842,9 @@
 }
 #else
 __ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4859,9 +4859,9 @@
 }
 #else
 __ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4876,9 +4876,9 @@
 }
 #else
 __ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4893,9 +4893,9 @@
 }
 #else
 __ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4910,9 +4910,9 @@
 }
 #else
 __ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4927,9 +4927,9 @@
 }
 #else
 __ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4944,9 +4944,9 @@
 }
 #else
 __ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4961,9 +4961,9 @@
 }
 #else
 __ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4978,9 +4978,9 @@
 }
 #else
 __ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4995,9 +4995,9 @@
 }
 #else
 __ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5012,9 +5012,9 @@
 }
 #else
 __ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5029,9 +5029,9 @@
 }
 #else
 __ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5046,9 +5046,9 @@
 }
 #else
 __ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5063,9 +5063,9 @@
 }
 #else
 __ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5080,8 +5080,8 @@
 }
 #else
 __ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5096,8 +5096,8 @@
 }
 #else
 __ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5112,8 +5112,8 @@
 }
 #else
 __ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5128,8 +5128,8 @@
 }
 #else
 __ai int8x16_t vclzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5144,8 +5144,8 @@
 }
 #else
 __ai int32x4_t vclzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5160,8 +5160,8 @@
 }
 #else
 __ai int16x8_t vclzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5176,8 +5176,8 @@
 }
 #else
 __ai uint8x8_t vclz_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5192,8 +5192,8 @@
 }
 #else
 __ai uint32x2_t vclz_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5208,8 +5208,8 @@
 }
 #else
 __ai uint16x4_t vclz_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5224,8 +5224,8 @@
 }
 #else
 __ai int8x8_t vclz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5240,8 +5240,8 @@
 }
 #else
 __ai int32x2_t vclz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5256,8 +5256,8 @@
 }
 #else
 __ai int16x4_t vclz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5272,8 +5272,8 @@
 }
 #else
 __ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5288,8 +5288,8 @@
 }
 #else
 __ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5304,8 +5304,8 @@
 }
 #else
 __ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5320,8 +5320,8 @@
 }
 #else
 __ai int8x16_t vcntq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5336,8 +5336,8 @@
 }
 #else
 __ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5352,8 +5352,8 @@
 }
 #else
 __ai int8x8_t vcnt_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5368,9 +5368,9 @@
 }
 #else
 __ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x16_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5385,9 +5385,9 @@
 }
 #else
 __ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x8_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5402,9 +5402,9 @@
 }
 #else
 __ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x16_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5424,9 +5424,9 @@
 }
 #else
 __ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x4_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5461,9 +5461,9 @@
 }
 #else
 __ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x8_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5483,9 +5483,9 @@
 }
 #else
 __ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x16_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5505,9 +5505,9 @@
 }
 #else
 __ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x4_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5527,9 +5527,9 @@
 }
 #else
 __ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x8_t __ret;
   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5549,9 +5549,9 @@
 }
 #else
 __ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x4_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5586,9 +5586,9 @@
 }
 #else
 __ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x8_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5680,8 +5680,8 @@
 }
 #else
 __ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5696,8 +5696,8 @@
 }
 #else
 __ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5712,8 +5712,8 @@
 }
 #else
 __ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5728,8 +5728,8 @@
 }
 #else
 __ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5738,16 +5738,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   float32x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
+  float32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -5756,16 +5756,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   float32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
+  float32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -5774,16 +5774,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   float32x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
+  float32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -5792,16 +5792,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   float32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
+  float32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -5810,16 +5810,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   float32x4_t __s0 = __p0; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -5828,16 +5828,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   float32x2_t __s0 = __p0; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -5846,16 +5846,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   float32x4_t __s0 = __p0; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -5864,16 +5864,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   float32x2_t __s0 = __p0; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -5888,8 +5888,8 @@
 }
 #else
 __ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5904,8 +5904,8 @@
 }
 #else
 __ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5920,8 +5920,8 @@
 }
 #else
 __ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5936,8 +5936,8 @@
 }
 #else
 __ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5946,16 +5946,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdup_lane_p8(__p0_0, __p1_0) __extension__ ({ \
-  poly8x8_t __s0_0 = __p0_0; \
   poly8x8_t __ret_0; \
+  poly8x8_t __s0_0 = __p0_0; \
   __ret_0 = splat_lane_p8(__s0_0, __p1_0); \
   __ret_0; \
 })
 #else
 #define vdup_lane_p8(__p0_1, __p1_1) __extension__ ({ \
+  poly8x8_t __ret_1; \
   poly8x8_t __s0_1 = __p0_1; \
   poly8x8_t __rev0_1;  __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_1; \
   __ret_1 = __noswap_splat_lane_p8(__rev0_1, __p1_1); \
   __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_1; \
@@ -5964,16 +5964,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdup_lane_p16(__p0_2, __p1_2) __extension__ ({ \
-  poly16x4_t __s0_2 = __p0_2; \
   poly16x4_t __ret_2; \
+  poly16x4_t __s0_2 = __p0_2; \
   __ret_2 = splat_lane_p16(__s0_2, __p1_2); \
   __ret_2; \
 })
 #else
 #define vdup_lane_p16(__p0_3, __p1_3) __extension__ ({ \
+  poly16x4_t __ret_3; \
   poly16x4_t __s0_3 = __p0_3; \
   poly16x4_t __rev0_3;  __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 3, 2, 1, 0); \
-  poly16x4_t __ret_3; \
   __ret_3 = __noswap_splat_lane_p16(__rev0_3, __p1_3); \
   __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 3, 2, 1, 0); \
   __ret_3; \
@@ -5982,16 +5982,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_p8(__p0_4, __p1_4) __extension__ ({ \
-  poly8x8_t __s0_4 = __p0_4; \
   poly8x16_t __ret_4; \
+  poly8x8_t __s0_4 = __p0_4; \
   __ret_4 = splatq_lane_p8(__s0_4, __p1_4); \
   __ret_4; \
 })
 #else
 #define vdupq_lane_p8(__p0_5, __p1_5) __extension__ ({ \
+  poly8x16_t __ret_5; \
   poly8x8_t __s0_5 = __p0_5; \
   poly8x8_t __rev0_5;  __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_5; \
   __ret_5 = __noswap_splatq_lane_p8(__rev0_5, __p1_5); \
   __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_5; \
@@ -6000,16 +6000,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_p16(__p0_6, __p1_6) __extension__ ({ \
-  poly16x4_t __s0_6 = __p0_6; \
   poly16x8_t __ret_6; \
+  poly16x4_t __s0_6 = __p0_6; \
   __ret_6 = splatq_lane_p16(__s0_6, __p1_6); \
   __ret_6; \
 })
 #else
 #define vdupq_lane_p16(__p0_7, __p1_7) __extension__ ({ \
+  poly16x8_t __ret_7; \
   poly16x4_t __s0_7 = __p0_7; \
   poly16x4_t __rev0_7;  __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 3, 2, 1, 0); \
-  poly16x8_t __ret_7; \
   __ret_7 = __noswap_splatq_lane_p16(__rev0_7, __p1_7); \
   __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_7; \
@@ -6018,16 +6018,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_u8(__p0_8, __p1_8) __extension__ ({ \
-  uint8x8_t __s0_8 = __p0_8; \
   uint8x16_t __ret_8; \
+  uint8x8_t __s0_8 = __p0_8; \
   __ret_8 = splatq_lane_u8(__s0_8, __p1_8); \
   __ret_8; \
 })
 #else
 #define vdupq_lane_u8(__p0_9, __p1_9) __extension__ ({ \
+  uint8x16_t __ret_9; \
   uint8x8_t __s0_9 = __p0_9; \
   uint8x8_t __rev0_9;  __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_9; \
   __ret_9 = __noswap_splatq_lane_u8(__rev0_9, __p1_9); \
   __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_9; \
@@ -6036,16 +6036,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_u32(__p0_10, __p1_10) __extension__ ({ \
-  uint32x2_t __s0_10 = __p0_10; \
   uint32x4_t __ret_10; \
+  uint32x2_t __s0_10 = __p0_10; \
   __ret_10 = splatq_lane_u32(__s0_10, __p1_10); \
   __ret_10; \
 })
 #else
 #define vdupq_lane_u32(__p0_11, __p1_11) __extension__ ({ \
+  uint32x4_t __ret_11; \
   uint32x2_t __s0_11 = __p0_11; \
   uint32x2_t __rev0_11;  __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 1, 0); \
-  uint32x4_t __ret_11; \
   __ret_11 = __noswap_splatq_lane_u32(__rev0_11, __p1_11); \
   __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \
   __ret_11; \
@@ -6054,15 +6054,15 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_u64(__p0_12, __p1_12) __extension__ ({ \
-  uint64x1_t __s0_12 = __p0_12; \
   uint64x2_t __ret_12; \
+  uint64x1_t __s0_12 = __p0_12; \
   __ret_12 = splatq_lane_u64(__s0_12, __p1_12); \
   __ret_12; \
 })
 #else
 #define vdupq_lane_u64(__p0_13, __p1_13) __extension__ ({ \
-  uint64x1_t __s0_13 = __p0_13; \
   uint64x2_t __ret_13; \
+  uint64x1_t __s0_13 = __p0_13; \
   __ret_13 = __noswap_splatq_lane_u64(__s0_13, __p1_13); \
   __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 1, 0); \
   __ret_13; \
@@ -6071,16 +6071,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_u16(__p0_14, __p1_14) __extension__ ({ \
-  uint16x4_t __s0_14 = __p0_14; \
   uint16x8_t __ret_14; \
+  uint16x4_t __s0_14 = __p0_14; \
   __ret_14 = splatq_lane_u16(__s0_14, __p1_14); \
   __ret_14; \
 })
 #else
 #define vdupq_lane_u16(__p0_15, __p1_15) __extension__ ({ \
+  uint16x8_t __ret_15; \
   uint16x4_t __s0_15 = __p0_15; \
   uint16x4_t __rev0_15;  __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \
-  uint16x8_t __ret_15; \
   __ret_15 = __noswap_splatq_lane_u16(__rev0_15, __p1_15); \
   __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_15; \
@@ -6089,16 +6089,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_s8(__p0_16, __p1_16) __extension__ ({ \
-  int8x8_t __s0_16 = __p0_16; \
   int8x16_t __ret_16; \
+  int8x8_t __s0_16 = __p0_16; \
   __ret_16 = splatq_lane_s8(__s0_16, __p1_16); \
   __ret_16; \
 })
 #else
 #define vdupq_lane_s8(__p0_17, __p1_17) __extension__ ({ \
+  int8x16_t __ret_17; \
   int8x8_t __s0_17 = __p0_17; \
   int8x8_t __rev0_17;  __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_17; \
   __ret_17 = __noswap_splatq_lane_s8(__rev0_17, __p1_17); \
   __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_17; \
@@ -6107,16 +6107,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_f32(__p0_18, __p1_18) __extension__ ({ \
-  float32x2_t __s0_18 = __p0_18; \
   float32x4_t __ret_18; \
+  float32x2_t __s0_18 = __p0_18; \
   __ret_18 = splatq_lane_f32(__s0_18, __p1_18); \
   __ret_18; \
 })
 #else
 #define vdupq_lane_f32(__p0_19, __p1_19) __extension__ ({ \
+  float32x4_t __ret_19; \
   float32x2_t __s0_19 = __p0_19; \
   float32x2_t __rev0_19;  __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \
-  float32x4_t __ret_19; \
   __ret_19 = __noswap_splatq_lane_f32(__rev0_19, __p1_19); \
   __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \
   __ret_19; \
@@ -6124,194 +6124,230 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s32(__p0_20, __p1_20) __extension__ ({ \
-  int32x2_t __s0_20 = __p0_20; \
-  int32x4_t __ret_20; \
-  __ret_20 = splatq_lane_s32(__s0_20, __p1_20); \
+#define vdupq_lane_f16(__p0_20, __p1_20) __extension__ ({ \
+  float16x8_t __ret_20; \
+  float16x4_t __s0_20 = __p0_20; \
+  __ret_20 = splatq_lane_f16(__s0_20, __p1_20); \
   __ret_20; \
 })
 #else
-#define vdupq_lane_s32(__p0_21, __p1_21) __extension__ ({ \
-  int32x2_t __s0_21 = __p0_21; \
-  int32x2_t __rev0_21;  __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \
-  int32x4_t __ret_21; \
-  __ret_21 = __noswap_splatq_lane_s32(__rev0_21, __p1_21); \
-  __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 3, 2, 1, 0); \
+#define vdupq_lane_f16(__p0_21, __p1_21) __extension__ ({ \
+  float16x8_t __ret_21; \
+  float16x4_t __s0_21 = __p0_21; \
+  float16x4_t __rev0_21;  __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 3, 2, 1, 0); \
+  __ret_21 = __noswap_splatq_lane_f16(__rev0_21, __p1_21); \
+  __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_21; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s64(__p0_22, __p1_22) __extension__ ({ \
-  int64x1_t __s0_22 = __p0_22; \
-  int64x2_t __ret_22; \
-  __ret_22 = splatq_lane_s64(__s0_22, __p1_22); \
+#define vdupq_lane_s32(__p0_22, __p1_22) __extension__ ({ \
+  int32x4_t __ret_22; \
+  int32x2_t __s0_22 = __p0_22; \
+  __ret_22 = splatq_lane_s32(__s0_22, __p1_22); \
   __ret_22; \
 })
 #else
-#define vdupq_lane_s64(__p0_23, __p1_23) __extension__ ({ \
-  int64x1_t __s0_23 = __p0_23; \
-  int64x2_t __ret_23; \
-  __ret_23 = __noswap_splatq_lane_s64(__s0_23, __p1_23); \
-  __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 1, 0); \
+#define vdupq_lane_s32(__p0_23, __p1_23) __extension__ ({ \
+  int32x4_t __ret_23; \
+  int32x2_t __s0_23 = __p0_23; \
+  int32x2_t __rev0_23;  __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 1, 0); \
+  __ret_23 = __noswap_splatq_lane_s32(__rev0_23, __p1_23); \
+  __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 3, 2, 1, 0); \
   __ret_23; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s16(__p0_24, __p1_24) __extension__ ({ \
-  int16x4_t __s0_24 = __p0_24; \
-  int16x8_t __ret_24; \
-  __ret_24 = splatq_lane_s16(__s0_24, __p1_24); \
+#define vdupq_lane_s64(__p0_24, __p1_24) __extension__ ({ \
+  int64x2_t __ret_24; \
+  int64x1_t __s0_24 = __p0_24; \
+  __ret_24 = splatq_lane_s64(__s0_24, __p1_24); \
   __ret_24; \
 })
 #else
-#define vdupq_lane_s16(__p0_25, __p1_25) __extension__ ({ \
-  int16x4_t __s0_25 = __p0_25; \
-  int16x4_t __rev0_25;  __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 3, 2, 1, 0); \
-  int16x8_t __ret_25; \
-  __ret_25 = __noswap_splatq_lane_s16(__rev0_25, __p1_25); \
-  __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vdupq_lane_s64(__p0_25, __p1_25) __extension__ ({ \
+  int64x2_t __ret_25; \
+  int64x1_t __s0_25 = __p0_25; \
+  __ret_25 = __noswap_splatq_lane_s64(__s0_25, __p1_25); \
+  __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 1, 0); \
   __ret_25; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u8(__p0_26, __p1_26) __extension__ ({ \
-  uint8x8_t __s0_26 = __p0_26; \
-  uint8x8_t __ret_26; \
-  __ret_26 = splat_lane_u8(__s0_26, __p1_26); \
+#define vdupq_lane_s16(__p0_26, __p1_26) __extension__ ({ \
+  int16x8_t __ret_26; \
+  int16x4_t __s0_26 = __p0_26; \
+  __ret_26 = splatq_lane_s16(__s0_26, __p1_26); \
   __ret_26; \
 })
 #else
-#define vdup_lane_u8(__p0_27, __p1_27) __extension__ ({ \
-  uint8x8_t __s0_27 = __p0_27; \
-  uint8x8_t __rev0_27;  __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_27; \
-  __ret_27 = __noswap_splat_lane_u8(__rev0_27, __p1_27); \
+#define vdupq_lane_s16(__p0_27, __p1_27) __extension__ ({ \
+  int16x8_t __ret_27; \
+  int16x4_t __s0_27 = __p0_27; \
+  int16x4_t __rev0_27;  __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 3, 2, 1, 0); \
+  __ret_27 = __noswap_splatq_lane_s16(__rev0_27, __p1_27); \
   __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_27; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u32(__p0_28, __p1_28) __extension__ ({ \
-  uint32x2_t __s0_28 = __p0_28; \
-  uint32x2_t __ret_28; \
-  __ret_28 = splat_lane_u32(__s0_28, __p1_28); \
+#define vdup_lane_u8(__p0_28, __p1_28) __extension__ ({ \
+  uint8x8_t __ret_28; \
+  uint8x8_t __s0_28 = __p0_28; \
+  __ret_28 = splat_lane_u8(__s0_28, __p1_28); \
   __ret_28; \
 })
 #else
-#define vdup_lane_u32(__p0_29, __p1_29) __extension__ ({ \
-  uint32x2_t __s0_29 = __p0_29; \
-  uint32x2_t __rev0_29;  __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 1, 0); \
-  uint32x2_t __ret_29; \
-  __ret_29 = __noswap_splat_lane_u32(__rev0_29, __p1_29); \
-  __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 1, 0); \
+#define vdup_lane_u8(__p0_29, __p1_29) __extension__ ({ \
+  uint8x8_t __ret_29; \
+  uint8x8_t __s0_29 = __p0_29; \
+  uint8x8_t __rev0_29;  __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_29 = __noswap_splat_lane_u8(__rev0_29, __p1_29); \
+  __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_29; \
 })
 #endif
 
-#define vdup_lane_u64(__p0_30, __p1_30) __extension__ ({ \
-  uint64x1_t __s0_30 = __p0_30; \
-  uint64x1_t __ret_30; \
-  __ret_30 = splat_lane_u64(__s0_30, __p1_30); \
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_u32(__p0_30, __p1_30) __extension__ ({ \
+  uint32x2_t __ret_30; \
+  uint32x2_t __s0_30 = __p0_30; \
+  __ret_30 = splat_lane_u32(__s0_30, __p1_30); \
   __ret_30; \
 })
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u16(__p0_31, __p1_31) __extension__ ({ \
-  uint16x4_t __s0_31 = __p0_31; \
-  uint16x4_t __ret_31; \
-  __ret_31 = splat_lane_u16(__s0_31, __p1_31); \
-  __ret_31; \
-})
 #else
-#define vdup_lane_u16(__p0_32, __p1_32) __extension__ ({ \
-  uint16x4_t __s0_32 = __p0_32; \
-  uint16x4_t __rev0_32;  __rev0_32 = __builtin_shufflevector(__s0_32, __s0_32, 3, 2, 1, 0); \
-  uint16x4_t __ret_32; \
-  __ret_32 = __noswap_splat_lane_u16(__rev0_32, __p1_32); \
-  __ret_32 = __builtin_shufflevector(__ret_32, __ret_32, 3, 2, 1, 0); \
-  __ret_32; \
+#define vdup_lane_u32(__p0_31, __p1_31) __extension__ ({ \
+  uint32x2_t __ret_31; \
+  uint32x2_t __s0_31 = __p0_31; \
+  uint32x2_t __rev0_31;  __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 1, 0); \
+  __ret_31 = __noswap_splat_lane_u32(__rev0_31, __p1_31); \
+  __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 1, 0); \
+  __ret_31; \
 })
 #endif
 
+#define vdup_lane_u64(__p0_32, __p1_32) __extension__ ({ \
+  uint64x1_t __ret_32; \
+  uint64x1_t __s0_32 = __p0_32; \
+  __ret_32 = splat_lane_u64(__s0_32, __p1_32); \
+  __ret_32; \
+})
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s8(__p0_33, __p1_33) __extension__ ({ \
-  int8x8_t __s0_33 = __p0_33; \
-  int8x8_t __ret_33; \
-  __ret_33 = splat_lane_s8(__s0_33, __p1_33); \
+#define vdup_lane_u16(__p0_33, __p1_33) __extension__ ({ \
+  uint16x4_t __ret_33; \
+  uint16x4_t __s0_33 = __p0_33; \
+  __ret_33 = splat_lane_u16(__s0_33, __p1_33); \
   __ret_33; \
 })
 #else
-#define vdup_lane_s8(__p0_34, __p1_34) __extension__ ({ \
-  int8x8_t __s0_34 = __p0_34; \
-  int8x8_t __rev0_34;  __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_34; \
-  __ret_34 = __noswap_splat_lane_s8(__rev0_34, __p1_34); \
-  __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vdup_lane_u16(__p0_34, __p1_34) __extension__ ({ \
+  uint16x4_t __ret_34; \
+  uint16x4_t __s0_34 = __p0_34; \
+  uint16x4_t __rev0_34;  __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 3, 2, 1, 0); \
+  __ret_34 = __noswap_splat_lane_u16(__rev0_34, __p1_34); \
+  __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 3, 2, 1, 0); \
   __ret_34; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f32(__p0_35, __p1_35) __extension__ ({ \
-  float32x2_t __s0_35 = __p0_35; \
-  float32x2_t __ret_35; \
-  __ret_35 = splat_lane_f32(__s0_35, __p1_35); \
+#define vdup_lane_s8(__p0_35, __p1_35) __extension__ ({ \
+  int8x8_t __ret_35; \
+  int8x8_t __s0_35 = __p0_35; \
+  __ret_35 = splat_lane_s8(__s0_35, __p1_35); \
   __ret_35; \
 })
 #else
-#define vdup_lane_f32(__p0_36, __p1_36) __extension__ ({ \
-  float32x2_t __s0_36 = __p0_36; \
-  float32x2_t __rev0_36;  __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 1, 0); \
-  float32x2_t __ret_36; \
-  __ret_36 = __noswap_splat_lane_f32(__rev0_36, __p1_36); \
-  __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 1, 0); \
+#define vdup_lane_s8(__p0_36, __p1_36) __extension__ ({ \
+  int8x8_t __ret_36; \
+  int8x8_t __s0_36 = __p0_36; \
+  int8x8_t __rev0_36;  __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_36 = __noswap_splat_lane_s8(__rev0_36, __p1_36); \
+  __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_36; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s32(__p0_37, __p1_37) __extension__ ({ \
-  int32x2_t __s0_37 = __p0_37; \
-  int32x2_t __ret_37; \
-  __ret_37 = splat_lane_s32(__s0_37, __p1_37); \
+#define vdup_lane_f32(__p0_37, __p1_37) __extension__ ({ \
+  float32x2_t __ret_37; \
+  float32x2_t __s0_37 = __p0_37; \
+  __ret_37 = splat_lane_f32(__s0_37, __p1_37); \
   __ret_37; \
 })
 #else
-#define vdup_lane_s32(__p0_38, __p1_38) __extension__ ({ \
-  int32x2_t __s0_38 = __p0_38; \
-  int32x2_t __rev0_38;  __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \
-  int32x2_t __ret_38; \
-  __ret_38 = __noswap_splat_lane_s32(__rev0_38, __p1_38); \
+#define vdup_lane_f32(__p0_38, __p1_38) __extension__ ({ \
+  float32x2_t __ret_38; \
+  float32x2_t __s0_38 = __p0_38; \
+  float32x2_t __rev0_38;  __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \
+  __ret_38 = __noswap_splat_lane_f32(__rev0_38, __p1_38); \
   __ret_38 = __builtin_shufflevector(__ret_38, __ret_38, 1, 0); \
   __ret_38; \
 })
 #endif
 
-#define vdup_lane_s64(__p0_39, __p1_39) __extension__ ({ \
-  int64x1_t __s0_39 = __p0_39; \
-  int64x1_t __ret_39; \
-  __ret_39 = splat_lane_s64(__s0_39, __p1_39); \
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_f16(__p0_39, __p1_39) __extension__ ({ \
+  float16x4_t __ret_39; \
+  float16x4_t __s0_39 = __p0_39; \
+  __ret_39 = splat_lane_f16(__s0_39, __p1_39); \
   __ret_39; \
 })
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s16(__p0_40, __p1_40) __extension__ ({ \
-  int16x4_t __s0_40 = __p0_40; \
-  int16x4_t __ret_40; \
-  __ret_40 = splat_lane_s16(__s0_40, __p1_40); \
+#else
+#define vdup_lane_f16(__p0_40, __p1_40) __extension__ ({ \
+  float16x4_t __ret_40; \
+  float16x4_t __s0_40 = __p0_40; \
+  float16x4_t __rev0_40;  __rev0_40 = __builtin_shufflevector(__s0_40, __s0_40, 3, 2, 1, 0); \
+  __ret_40 = __noswap_splat_lane_f16(__rev0_40, __p1_40); \
+  __ret_40 = __builtin_shufflevector(__ret_40, __ret_40, 3, 2, 1, 0); \
   __ret_40; \
 })
-#else
-#define vdup_lane_s16(__p0_41, __p1_41) __extension__ ({ \
-  int16x4_t __s0_41 = __p0_41; \
-  int16x4_t __rev0_41;  __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 3, 2, 1, 0); \
-  int16x4_t __ret_41; \
-  __ret_41 = __noswap_splat_lane_s16(__rev0_41, __p1_41); \
-  __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 3, 2, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_s32(__p0_41, __p1_41) __extension__ ({ \
+  int32x2_t __ret_41; \
+  int32x2_t __s0_41 = __p0_41; \
+  __ret_41 = splat_lane_s32(__s0_41, __p1_41); \
   __ret_41; \
 })
+#else
+#define vdup_lane_s32(__p0_42, __p1_42) __extension__ ({ \
+  int32x2_t __ret_42; \
+  int32x2_t __s0_42 = __p0_42; \
+  int32x2_t __rev0_42;  __rev0_42 = __builtin_shufflevector(__s0_42, __s0_42, 1, 0); \
+  __ret_42 = __noswap_splat_lane_s32(__rev0_42, __p1_42); \
+  __ret_42 = __builtin_shufflevector(__ret_42, __ret_42, 1, 0); \
+  __ret_42; \
+})
+#endif
+
+#define vdup_lane_s64(__p0_43, __p1_43) __extension__ ({ \
+  int64x1_t __ret_43; \
+  int64x1_t __s0_43 = __p0_43; \
+  __ret_43 = splat_lane_s64(__s0_43, __p1_43); \
+  __ret_43; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_s16(__p0_44, __p1_44) __extension__ ({ \
+  int16x4_t __ret_44; \
+  int16x4_t __s0_44 = __p0_44; \
+  __ret_44 = splat_lane_s16(__s0_44, __p1_44); \
+  __ret_44; \
+})
+#else
+#define vdup_lane_s16(__p0_45, __p1_45) __extension__ ({ \
+  int16x4_t __ret_45; \
+  int16x4_t __s0_45 = __p0_45; \
+  int16x4_t __rev0_45;  __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 3, 2, 1, 0); \
+  __ret_45 = __noswap_splat_lane_s16(__rev0_45, __p1_45); \
+  __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 3, 2, 1, 0); \
+  __ret_45; \
+})
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -6466,15 +6502,15 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
   __ret; \
 })
 #else
 #define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -6608,15 +6644,15 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
   __ret; \
 })
 #else
 #define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -6666,9 +6702,9 @@
 }
 #else
 __ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -6683,9 +6719,9 @@
 }
 #else
 __ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -6700,9 +6736,9 @@
 }
 #else
 __ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -6717,9 +6753,9 @@
 }
 #else
 __ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -6734,9 +6770,9 @@
 }
 #else
 __ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -6751,9 +6787,9 @@
 }
 #else
 __ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -6768,9 +6804,9 @@
 }
 #else
 __ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -6785,9 +6821,9 @@
 }
 #else
 __ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -6802,9 +6838,9 @@
 }
 #else
 __ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -6819,9 +6855,9 @@
 }
 #else
 __ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -6841,9 +6877,9 @@
 }
 #else
 __ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -6858,9 +6894,9 @@
 }
 #else
 __ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -6875,9 +6911,9 @@
 }
 #else
 __ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -6897,9 +6933,9 @@
 }
 #else
 __ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -6908,19 +6944,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vext_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
   __ret; \
 })
 #else
 #define vext_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -6929,19 +6965,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vext_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
   __ret; \
 })
 #else
 #define vext_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -6950,19 +6986,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
   __ret; \
 })
 #else
 #define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -6971,19 +7007,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
   __ret; \
 })
 #else
 #define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -6992,19 +7028,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
   __ret; \
 })
 #else
 #define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -7013,19 +7049,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
   __ret; \
 })
 #else
 #define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -7034,19 +7070,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
   __ret; \
 })
 #else
 #define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -7055,19 +7091,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
   __ret; \
 })
 #else
 #define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -7076,19 +7112,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
   __ret; \
 })
 #else
 #define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -7097,19 +7133,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4_t __ret; \
   float32x4_t __s0 = __p0; \
   float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \
   __ret; \
 })
 #else
 #define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4_t __ret; \
   float32x4_t __s0 = __p0; \
   float32x4_t __s1 = __p1; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -7118,19 +7154,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
   __ret; \
 })
 #else
 #define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -7139,19 +7175,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
   __ret; \
 })
 #else
 #define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -7160,19 +7196,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
   __ret; \
 })
 #else
 #define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -7181,19 +7217,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vext_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
   __ret; \
 })
 #else
 #define vext_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -7202,19 +7238,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vext_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
   __ret; \
 })
 #else
 #define vext_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -7222,27 +7258,27 @@
 #endif
 
 #define vext_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1_t __ret; \
   uint64x1_t __s0 = __p0; \
   uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
   __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vext_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
   __ret; \
 })
 #else
 #define vext_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -7251,19 +7287,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vext_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
   __ret; \
 })
 #else
 #define vext_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -7272,19 +7308,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vext_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2_t __ret; \
   float32x2_t __s0 = __p0; \
   float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \
   __ret; \
 })
 #else
 #define vext_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2_t __ret; \
   float32x2_t __s0 = __p0; \
   float32x2_t __s1 = __p1; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -7293,19 +7329,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vext_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
 #define vext_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -7313,27 +7349,27 @@
 #endif
 
 #define vext_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1_t __ret; \
   int64x1_t __s0 = __p0; \
   int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
   __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vext_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
 #define vext_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -7348,8 +7384,8 @@
 }
 #else
 __ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -7369,8 +7405,8 @@
 }
 #else
 __ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x4_t __ret;
+  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -7385,8 +7421,8 @@
 }
 #else
 __ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -7406,8 +7442,8 @@
 }
 #else
 __ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x2_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -7427,8 +7463,8 @@
 }
 #else
 __ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x1_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1);
   return __ret;
 }
@@ -7442,8 +7478,8 @@
 }
 #else
 __ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -7463,8 +7499,8 @@
 }
 #else
 __ai int8x8_t vget_high_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -7484,8 +7520,8 @@
 }
 #else
 __ai float32x2_t vget_high_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x2_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -7505,8 +7541,8 @@
 }
 #else
 __ai float16x4_t vget_high_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -7526,8 +7562,8 @@
 }
 #else
 __ai int32x2_t vget_high_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x2_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -7547,8 +7583,8 @@
 }
 #else
 __ai int64x1_t vget_high_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x1_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1);
   return __ret;
 }
@@ -7562,8 +7598,8 @@
 }
 #else
 __ai int16x4_t vget_high_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -7577,22 +7613,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
   poly8_t __ret; \
+  poly8x8_t __s0 = __p0; \
   __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
   __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
   poly8_t __ret; \
+  poly8x8_t __s0 = __p0; \
   __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
   __ret; \
 })
@@ -7600,22 +7636,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
   poly16_t __ret; \
+  poly16x4_t __s0 = __p0; \
   __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16_t __ret; \
   __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
   poly16_t __ret; \
+  poly16x4_t __s0 = __p0; \
   __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
   __ret; \
 })
@@ -7623,22 +7659,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
   poly8_t __ret; \
+  poly8x16_t __s0 = __p0; \
   __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
   __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
   poly8_t __ret; \
+  poly8x16_t __s0 = __p0; \
   __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
   __ret; \
 })
@@ -7646,22 +7682,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
   poly16_t __ret; \
+  poly16x8_t __s0 = __p0; \
   __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16_t __ret; \
   __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
   poly16_t __ret; \
+  poly16x8_t __s0 = __p0; \
   __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
   __ret; \
 })
@@ -7669,22 +7705,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
   __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
   __ret; \
 })
@@ -7692,22 +7728,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32_t __ret; \
   __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
@@ -7715,22 +7751,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
+  uint64_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64_t __ret; \
   __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
@@ -7738,22 +7774,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
@@ -7761,22 +7797,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
   __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
   __ret; \
 })
@@ -7784,22 +7820,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   float32_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
   float32x4_t __s0 = __p0; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32_t __ret; \
   __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   float32_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
   __ret; \
 })
@@ -7807,22 +7843,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32_t __ret; \
   __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
@@ -7830,22 +7866,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64_t __ret; \
   __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
@@ -7853,22 +7889,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret; \
   __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
@@ -7876,22 +7912,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
   __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
   __ret; \
 })
@@ -7899,51 +7935,51 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32_t __ret; \
   __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
 
 #define vget_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
@@ -7951,22 +7987,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
   __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
   __ret; \
 })
@@ -7974,22 +8010,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   float32_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_f32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
   float32x2_t __s0 = __p0; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32_t __ret; \
   __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   float32_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
   __ret; \
 })
@@ -7997,51 +8033,51 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32_t __ret; \
   __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
 
 #define vget_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16_t __ret; \
   __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
@@ -8055,8 +8091,8 @@
 }
 #else
 __ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8071,8 +8107,8 @@
 }
 #else
 __ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
+  poly16x4_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8087,8 +8123,8 @@
 }
 #else
 __ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
+  uint8x8_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8103,8 +8139,8 @@
 }
 #else
 __ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x2_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -8119,8 +8155,8 @@
 }
 #else
 __ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x1_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0);
   return __ret;
 }
@@ -8134,8 +8170,8 @@
 }
 #else
 __ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8150,8 +8186,8 @@
 }
 #else
 __ai int8x8_t vget_low_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8166,8 +8202,8 @@
 }
 #else
 __ai float32x2_t vget_low_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x2_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -8182,8 +8218,8 @@
 }
 #else
 __ai float16x4_t vget_low_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8198,8 +8234,8 @@
 }
 #else
 __ai int32x2_t vget_low_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x2_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -8214,8 +8250,8 @@
 }
 #else
 __ai int64x1_t vget_low_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x1_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0);
   return __ret;
 }
@@ -8229,8 +8265,8 @@
 }
 #else
 __ai int16x4_t vget_low_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8245,9 +8281,9 @@
 }
 #else
 __ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8262,9 +8298,9 @@
 }
 #else
 __ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8279,9 +8315,9 @@
 }
 #else
 __ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8296,9 +8332,9 @@
 }
 #else
 __ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8313,9 +8349,9 @@
 }
 #else
 __ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8330,9 +8366,9 @@
 }
 #else
 __ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8347,9 +8383,9 @@
 }
 #else
 __ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8364,9 +8400,9 @@
 }
 #else
 __ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -8381,9 +8417,9 @@
 }
 #else
 __ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8398,9 +8434,9 @@
 }
 #else
 __ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8415,9 +8451,9 @@
 }
 #else
 __ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -8432,9 +8468,9 @@
 }
 #else
 __ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8449,9 +8485,9 @@
 }
 #else
 __ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8466,9 +8502,9 @@
 }
 #else
 __ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8483,9 +8519,9 @@
 }
 #else
 __ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8500,9 +8536,9 @@
 }
 #else
 __ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8517,9 +8553,9 @@
 }
 #else
 __ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8534,9 +8570,9 @@
 }
 #else
 __ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8551,9 +8587,9 @@
 }
 #else
 __ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8568,9 +8604,9 @@
 }
 #else
 __ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -8585,9 +8621,9 @@
 }
 #else
 __ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8602,9 +8638,9 @@
 }
 #else
 __ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8619,9 +8655,9 @@
 }
 #else
 __ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -8636,9 +8672,9 @@
 }
 #else
 __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -9267,16 +9303,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
   poly8x8_t __ret; \
+  poly8x8_t __s1 = __p1; \
   __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
   __ret; \
 })
 #else
 #define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s1 = __p1; \
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9285,16 +9321,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
   poly16x4_t __ret; \
+  poly16x4_t __s1 = __p1; \
   __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
   __ret; \
 })
 #else
 #define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s1 = __p1; \
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -9303,16 +9339,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
   poly8x16_t __ret; \
+  poly8x16_t __s1 = __p1; \
   __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
   __ret; \
 })
 #else
 #define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s1 = __p1; \
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9321,16 +9357,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
   poly16x8_t __ret; \
+  poly16x8_t __s1 = __p1; \
   __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
   __ret; \
 })
 #else
 #define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s1 = __p1; \
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9339,16 +9375,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
   uint8x16_t __ret; \
+  uint8x16_t __s1 = __p1; \
   __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
   __ret; \
 })
 #else
 #define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s1 = __p1; \
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9357,16 +9393,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
   uint32x4_t __ret; \
+  uint32x4_t __s1 = __p1; \
   __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
   __ret; \
 })
 #else
 #define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -9375,16 +9411,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
   uint64x2_t __ret; \
+  uint64x2_t __s1 = __p1; \
   __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
   __ret; \
 })
 #else
 #define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -9393,16 +9429,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
   uint16x8_t __ret; \
+  uint16x8_t __s1 = __p1; \
   __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
   __ret; \
 })
 #else
 #define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9411,16 +9447,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
   int8x16_t __ret; \
+  int8x16_t __s1 = __p1; \
   __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
   __ret; \
 })
 #else
 #define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s1 = __p1; \
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9429,16 +9465,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
   float32x4_t __ret; \
+  float32x4_t __s1 = __p1; \
   __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
   __ret; \
 })
 #else
 #define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4_t __ret; \
   float32x4_t __s1 = __p1; \
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -9447,16 +9483,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
   int32x4_t __ret; \
+  int32x4_t __s1 = __p1; \
   __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
   __ret; \
 })
 #else
 #define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s1 = __p1; \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -9465,16 +9501,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
   int64x2_t __ret; \
+  int64x2_t __s1 = __p1; \
   __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
   __ret; \
 })
 #else
 #define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s1 = __p1; \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -9483,16 +9519,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
   int16x8_t __ret; \
+  int16x8_t __s1 = __p1; \
   __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
   __ret; \
 })
 #else
 #define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s1 = __p1; \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9501,16 +9537,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
   uint8x8_t __ret; \
+  uint8x8_t __s1 = __p1; \
   __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
   __ret; \
 })
 #else
 #define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s1 = __p1; \
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9519,16 +9555,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
   uint32x2_t __ret; \
+  uint32x2_t __s1 = __p1; \
   __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
   __ret; \
 })
 #else
 #define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -9536,23 +9572,23 @@
 #endif
 
 #define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
   uint64x1_t __ret; \
+  uint64x1_t __s1 = __p1; \
   __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
   uint16x4_t __ret; \
+  uint16x4_t __s1 = __p1; \
   __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
   __ret; \
 })
 #else
 #define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -9561,16 +9597,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
   int8x8_t __ret; \
+  int8x8_t __s1 = __p1; \
   __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
   __ret; \
 })
 #else
 #define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s1 = __p1; \
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9579,16 +9615,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
   float32x2_t __ret; \
+  float32x2_t __s1 = __p1; \
   __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
   __ret; \
 })
 #else
 #define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2_t __ret; \
   float32x2_t __s1 = __p1; \
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -9597,16 +9633,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
   int32x2_t __ret; \
+  int32x2_t __s1 = __p1; \
   __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
 #define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s1 = __p1; \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -9614,23 +9650,23 @@
 #endif
 
 #define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
   int64x1_t __ret; \
+  int64x1_t __s1 = __p1; \
   __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
   int16x4_t __ret; \
+  int16x4_t __s1 = __p1; \
   __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
 #define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s1 = __p1; \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -11415,18 +11451,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
   poly8x8x2_t __ret; \
+  poly8x8x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
   __ret; \
 })
 #else
 #define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8x2_t __ret; \
   poly8x8x2_t __s1 = __p1; \
   poly8x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -11437,18 +11473,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
   poly16x4x2_t __ret; \
+  poly16x4x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
   __ret; \
 })
 #else
 #define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4x2_t __ret; \
   poly16x4x2_t __s1 = __p1; \
   poly16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  poly16x4x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -11459,18 +11495,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
   poly16x8x2_t __ret; \
+  poly16x8x2_t __s1 = __p1; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
   __ret; \
 })
 #else
 #define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8x2_t __ret; \
   poly16x8x2_t __s1 = __p1; \
   poly16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x2_t __ret; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -11481,18 +11517,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
   uint32x4x2_t __ret; \
+  uint32x4x2_t __s1 = __p1; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
   __ret; \
 })
 #else
 #define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4x2_t __ret; \
   uint32x4x2_t __s1 = __p1; \
   uint32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  uint32x4x2_t __ret; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -11503,18 +11539,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
   uint16x8x2_t __ret; \
+  uint16x8x2_t __s1 = __p1; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
   __ret; \
 })
 #else
 #define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8x2_t __ret; \
   uint16x8x2_t __s1 = __p1; \
   uint16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x2_t __ret; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -11525,18 +11561,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
   float32x4x2_t __ret; \
+  float32x4x2_t __s1 = __p1; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \
   __ret; \
 })
 #else
 #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4x2_t __ret; \
   float32x4x2_t __s1 = __p1; \
   float32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  float32x4x2_t __ret; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -11547,18 +11583,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
   int32x4x2_t __ret; \
+  int32x4x2_t __s1 = __p1; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \
   __ret; \
 })
 #else
 #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4x2_t __ret; \
   int32x4x2_t __s1 = __p1; \
   int32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  int32x4x2_t __ret; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -11569,18 +11605,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
   int16x8x2_t __ret; \
+  int16x8x2_t __s1 = __p1; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \
   __ret; \
 })
 #else
 #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8x2_t __ret; \
   int16x8x2_t __s1 = __p1; \
   int16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x2_t __ret; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -11591,18 +11627,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
   uint8x8x2_t __ret; \
+  uint8x8x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
   __ret; \
 })
 #else
 #define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8x2_t __ret; \
   uint8x8x2_t __s1 = __p1; \
   uint8x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -11613,18 +11649,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
   uint32x2x2_t __ret; \
+  uint32x2x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
   __ret; \
 })
 #else
 #define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2x2_t __ret; \
   uint32x2x2_t __s1 = __p1; \
   uint32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  uint32x2x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -11635,18 +11671,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
   uint16x4x2_t __ret; \
+  uint16x4x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
   __ret; \
 })
 #else
 #define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4x2_t __ret; \
   uint16x4x2_t __s1 = __p1; \
   uint16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  uint16x4x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -11657,18 +11693,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
   int8x8x2_t __ret; \
+  int8x8x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
   __ret; \
 })
 #else
 #define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8x2_t __ret; \
   int8x8x2_t __s1 = __p1; \
   int8x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -11679,18 +11715,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
   float32x2x2_t __ret; \
+  float32x2x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \
   __ret; \
 })
 #else
 #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2x2_t __ret; \
   float32x2x2_t __s1 = __p1; \
   float32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  float32x2x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -11701,18 +11737,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
   int32x2x2_t __ret; \
+  int32x2x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \
   __ret; \
 })
 #else
 #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2x2_t __ret; \
   int32x2x2_t __s1 = __p1; \
   int32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  int32x2x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -11723,18 +11759,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
   int16x4x2_t __ret; \
+  int16x4x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \
   __ret; \
 })
 #else
 #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4x2_t __ret; \
   int16x4x2_t __s1 = __p1; \
   int16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  int16x4x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -12449,19 +12485,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
   poly8x8x3_t __ret; \
+  poly8x8x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
   __ret; \
 })
 #else
 #define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8x3_t __ret; \
   poly8x8x3_t __s1 = __p1; \
   poly8x8x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -12473,19 +12509,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
   poly16x4x3_t __ret; \
+  poly16x4x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
   __ret; \
 })
 #else
 #define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4x3_t __ret; \
   poly16x4x3_t __s1 = __p1; \
   poly16x4x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  poly16x4x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -12497,19 +12533,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
   poly16x8x3_t __ret; \
+  poly16x8x3_t __s1 = __p1; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
   __ret; \
 })
 #else
 #define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8x3_t __ret; \
   poly16x8x3_t __s1 = __p1; \
   poly16x8x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x3_t __ret; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -12521,19 +12557,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
   uint32x4x3_t __ret; \
+  uint32x4x3_t __s1 = __p1; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
   __ret; \
 })
 #else
 #define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4x3_t __ret; \
   uint32x4x3_t __s1 = __p1; \
   uint32x4x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  uint32x4x3_t __ret; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -12545,19 +12581,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
   uint16x8x3_t __ret; \
+  uint16x8x3_t __s1 = __p1; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
   __ret; \
 })
 #else
 #define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8x3_t __ret; \
   uint16x8x3_t __s1 = __p1; \
   uint16x8x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x3_t __ret; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -12569,19 +12605,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
   float32x4x3_t __ret; \
+  float32x4x3_t __s1 = __p1; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \
   __ret; \
 })
 #else
 #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4x3_t __ret; \
   float32x4x3_t __s1 = __p1; \
   float32x4x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  float32x4x3_t __ret; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -12593,19 +12629,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
   int32x4x3_t __ret; \
+  int32x4x3_t __s1 = __p1; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \
   __ret; \
 })
 #else
 #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4x3_t __ret; \
   int32x4x3_t __s1 = __p1; \
   int32x4x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  int32x4x3_t __ret; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -12617,19 +12653,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
   int16x8x3_t __ret; \
+  int16x8x3_t __s1 = __p1; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \
   __ret; \
 })
 #else
 #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8x3_t __ret; \
   int16x8x3_t __s1 = __p1; \
   int16x8x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x3_t __ret; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -12641,19 +12677,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
   uint8x8x3_t __ret; \
+  uint8x8x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
   __ret; \
 })
 #else
 #define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8x3_t __ret; \
   uint8x8x3_t __s1 = __p1; \
   uint8x8x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -12665,19 +12701,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
   uint32x2x3_t __ret; \
+  uint32x2x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
   __ret; \
 })
 #else
 #define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2x3_t __ret; \
   uint32x2x3_t __s1 = __p1; \
   uint32x2x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  uint32x2x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -12689,19 +12725,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
   uint16x4x3_t __ret; \
+  uint16x4x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
   __ret; \
 })
 #else
 #define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4x3_t __ret; \
   uint16x4x3_t __s1 = __p1; \
   uint16x4x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  uint16x4x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -12713,19 +12749,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
   int8x8x3_t __ret; \
+  int8x8x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
   __ret; \
 })
 #else
 #define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8x3_t __ret; \
   int8x8x3_t __s1 = __p1; \
   int8x8x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -12737,19 +12773,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
   float32x2x3_t __ret; \
+  float32x2x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \
   __ret; \
 })
 #else
 #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2x3_t __ret; \
   float32x2x3_t __s1 = __p1; \
   float32x2x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  float32x2x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -12761,19 +12797,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
   int32x2x3_t __ret; \
+  int32x2x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \
   __ret; \
 })
 #else
 #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2x3_t __ret; \
   int32x2x3_t __s1 = __p1; \
   int32x2x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  int32x2x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -12785,19 +12821,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
   int16x4x3_t __ret; \
+  int16x4x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \
   __ret; \
 })
 #else
 #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4x3_t __ret; \
   int16x4x3_t __s1 = __p1; \
   int16x4x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  int16x4x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -13551,20 +13587,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
   poly8x8x4_t __ret; \
+  poly8x8x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
   __ret; \
 })
 #else
 #define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8x4_t __ret; \
   poly8x8x4_t __s1 = __p1; \
   poly8x8x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -13577,20 +13613,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
   poly16x4x4_t __ret; \
+  poly16x4x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
   __ret; \
 })
 #else
 #define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4x4_t __ret; \
   poly16x4x4_t __s1 = __p1; \
   poly16x4x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  poly16x4x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -13603,20 +13639,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
   poly16x8x4_t __ret; \
+  poly16x8x4_t __s1 = __p1; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
   __ret; \
 })
 #else
 #define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8x4_t __ret; \
   poly16x8x4_t __s1 = __p1; \
   poly16x8x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x4_t __ret; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -13629,20 +13665,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
   uint32x4x4_t __ret; \
+  uint32x4x4_t __s1 = __p1; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
   __ret; \
 })
 #else
 #define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4x4_t __ret; \
   uint32x4x4_t __s1 = __p1; \
   uint32x4x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  uint32x4x4_t __ret; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -13655,20 +13691,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
   uint16x8x4_t __ret; \
+  uint16x8x4_t __s1 = __p1; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
   __ret; \
 })
 #else
 #define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8x4_t __ret; \
   uint16x8x4_t __s1 = __p1; \
   uint16x8x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x4_t __ret; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -13681,20 +13717,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
   float32x4x4_t __ret; \
+  float32x4x4_t __s1 = __p1; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \
   __ret; \
 })
 #else
 #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4x4_t __ret; \
   float32x4x4_t __s1 = __p1; \
   float32x4x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  float32x4x4_t __ret; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -13707,20 +13743,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
   int32x4x4_t __ret; \
+  int32x4x4_t __s1 = __p1; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \
   __ret; \
 })
 #else
 #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4x4_t __ret; \
   int32x4x4_t __s1 = __p1; \
   int32x4x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  int32x4x4_t __ret; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -13733,20 +13769,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
   int16x8x4_t __ret; \
+  int16x8x4_t __s1 = __p1; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \
   __ret; \
 })
 #else
 #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8x4_t __ret; \
   int16x8x4_t __s1 = __p1; \
   int16x8x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x4_t __ret; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -13759,20 +13795,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
   uint8x8x4_t __ret; \
+  uint8x8x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
   __ret; \
 })
 #else
 #define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8x4_t __ret; \
   uint8x8x4_t __s1 = __p1; \
   uint8x8x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -13785,20 +13821,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
   uint32x2x4_t __ret; \
+  uint32x2x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
   __ret; \
 })
 #else
 #define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2x4_t __ret; \
   uint32x2x4_t __s1 = __p1; \
   uint32x2x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  uint32x2x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -13811,20 +13847,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
   uint16x4x4_t __ret; \
+  uint16x4x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
   __ret; \
 })
 #else
 #define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4x4_t __ret; \
   uint16x4x4_t __s1 = __p1; \
   uint16x4x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  uint16x4x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -13837,20 +13873,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
   int8x8x4_t __ret; \
+  int8x8x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
   __ret; \
 })
 #else
 #define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8x4_t __ret; \
   int8x8x4_t __s1 = __p1; \
   int8x8x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -13863,20 +13899,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
   float32x2x4_t __ret; \
+  float32x2x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \
   __ret; \
 })
 #else
 #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2x4_t __ret; \
   float32x2x4_t __s1 = __p1; \
   float32x2x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  float32x2x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -13889,20 +13925,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
   int32x2x4_t __ret; \
+  int32x2x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \
   __ret; \
 })
 #else
 #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2x4_t __ret; \
   int32x2x4_t __s1 = __p1; \
   int32x2x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  int32x2x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -13915,20 +13951,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
   int16x4x4_t __ret; \
+  int16x4x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \
   __ret; \
 })
 #else
 #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4x4_t __ret; \
   int16x4x4_t __s1 = __p1; \
   int16x4x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  int16x4x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -13947,9 +13983,9 @@
 }
 #else
 __ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -13964,9 +14000,9 @@
 }
 #else
 __ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -13981,9 +14017,9 @@
 }
 #else
 __ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -13998,9 +14034,9 @@
 }
 #else
 __ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14015,9 +14051,9 @@
 }
 #else
 __ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14032,9 +14068,9 @@
 }
 #else
 __ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14049,9 +14085,9 @@
 }
 #else
 __ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14066,9 +14102,9 @@
 }
 #else
 __ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14083,9 +14119,9 @@
 }
 #else
 __ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14100,9 +14136,9 @@
 }
 #else
 __ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14117,9 +14153,9 @@
 }
 #else
 __ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14134,9 +14170,9 @@
 }
 #else
 __ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14151,9 +14187,9 @@
 }
 #else
 __ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14168,9 +14204,9 @@
 }
 #else
 __ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14185,9 +14221,9 @@
 }
 #else
 __ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14202,9 +14238,9 @@
 }
 #else
 __ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14219,9 +14255,9 @@
 }
 #else
 __ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14236,9 +14272,9 @@
 }
 #else
 __ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14253,9 +14289,9 @@
 }
 #else
 __ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14270,9 +14306,9 @@
 }
 #else
 __ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14287,9 +14323,9 @@
 }
 #else
 __ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14304,9 +14340,9 @@
 }
 #else
 __ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14321,9 +14357,9 @@
 }
 #else
 __ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14338,9 +14374,9 @@
 }
 #else
 __ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14355,9 +14391,9 @@
 }
 #else
 __ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14372,9 +14408,9 @@
 }
 #else
 __ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14389,9 +14425,9 @@
 }
 #else
 __ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14406,9 +14442,9 @@
 }
 #else
 __ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14423,10 +14459,10 @@
 }
 #else
 __ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14441,10 +14477,10 @@
 }
 #else
 __ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14459,10 +14495,10 @@
 }
 #else
 __ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14477,10 +14513,10 @@
 }
 #else
 __ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14495,10 +14531,10 @@
 }
 #else
 __ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14513,10 +14549,10 @@
 }
 #else
 __ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14531,10 +14567,10 @@
 }
 #else
 __ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14549,10 +14585,10 @@
 }
 #else
 __ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14567,10 +14603,10 @@
 }
 #else
 __ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14585,10 +14621,10 @@
 }
 #else
 __ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14603,10 +14639,10 @@
 }
 #else
 __ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14621,10 +14657,10 @@
 }
 #else
 __ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14639,10 +14675,10 @@
 }
 #else
 __ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14657,10 +14693,10 @@
 }
 #else
 __ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14668,246 +14704,246 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u32(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
-  uint32x4_t __s0_42 = __p0_42; \
-  uint32x4_t __s1_42 = __p1_42; \
-  uint32x2_t __s2_42 = __p2_42; \
-  uint32x4_t __ret_42; \
-  __ret_42 = __s0_42 + __s1_42 * splatq_lane_u32(__s2_42, __p3_42); \
-  __ret_42; \
-})
-#else
-#define vmlaq_lane_u32(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
-  uint32x4_t __s0_43 = __p0_43; \
-  uint32x4_t __s1_43 = __p1_43; \
-  uint32x2_t __s2_43 = __p2_43; \
-  uint32x4_t __rev0_43;  __rev0_43 = __builtin_shufflevector(__s0_43, __s0_43, 3, 2, 1, 0); \
-  uint32x4_t __rev1_43;  __rev1_43 = __builtin_shufflevector(__s1_43, __s1_43, 3, 2, 1, 0); \
-  uint32x2_t __rev2_43;  __rev2_43 = __builtin_shufflevector(__s2_43, __s2_43, 1, 0); \
-  uint32x4_t __ret_43; \
-  __ret_43 = __rev0_43 + __rev1_43 * __noswap_splatq_lane_u32(__rev2_43, __p3_43); \
-  __ret_43 = __builtin_shufflevector(__ret_43, __ret_43, 3, 2, 1, 0); \
-  __ret_43; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
-  uint16x8_t __s0_44 = __p0_44; \
-  uint16x8_t __s1_44 = __p1_44; \
-  uint16x4_t __s2_44 = __p2_44; \
-  uint16x8_t __ret_44; \
-  __ret_44 = __s0_44 + __s1_44 * splatq_lane_u16(__s2_44, __p3_44); \
-  __ret_44; \
-})
-#else
-#define vmlaq_lane_u16(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
-  uint16x8_t __s0_45 = __p0_45; \
-  uint16x8_t __s1_45 = __p1_45; \
-  uint16x4_t __s2_45 = __p2_45; \
-  uint16x8_t __rev0_45;  __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_45;  __rev1_45 = __builtin_shufflevector(__s1_45, __s1_45, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_45;  __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 3, 2, 1, 0); \
-  uint16x8_t __ret_45; \
-  __ret_45 = __rev0_45 + __rev1_45 * __noswap_splatq_lane_u16(__rev2_45, __p3_45); \
-  __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_45; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_f32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
-  float32x4_t __s0_46 = __p0_46; \
-  float32x4_t __s1_46 = __p1_46; \
-  float32x2_t __s2_46 = __p2_46; \
-  float32x4_t __ret_46; \
-  __ret_46 = __s0_46 + __s1_46 * splatq_lane_f32(__s2_46, __p3_46); \
+#define vmlaq_lane_u32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
+  uint32x4_t __ret_46; \
+  uint32x4_t __s0_46 = __p0_46; \
+  uint32x4_t __s1_46 = __p1_46; \
+  uint32x2_t __s2_46 = __p2_46; \
+  __ret_46 = __s0_46 + __s1_46 * splatq_lane_u32(__s2_46, __p3_46); \
   __ret_46; \
 })
 #else
-#define vmlaq_lane_f32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
-  float32x4_t __s0_47 = __p0_47; \
-  float32x4_t __s1_47 = __p1_47; \
-  float32x2_t __s2_47 = __p2_47; \
-  float32x4_t __rev0_47;  __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \
-  float32x4_t __rev1_47;  __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \
-  float32x2_t __rev2_47;  __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \
-  float32x4_t __ret_47; \
-  __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_f32(__rev2_47, __p3_47); \
+#define vmlaq_lane_u32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
+  uint32x4_t __ret_47; \
+  uint32x4_t __s0_47 = __p0_47; \
+  uint32x4_t __s1_47 = __p1_47; \
+  uint32x2_t __s2_47 = __p2_47; \
+  uint32x4_t __rev0_47;  __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \
+  uint32x4_t __rev1_47;  __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \
+  uint32x2_t __rev2_47;  __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \
+  __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_u32(__rev2_47, __p3_47); \
   __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 3, 2, 1, 0); \
   __ret_47; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
-  int32x4_t __s0_48 = __p0_48; \
-  int32x4_t __s1_48 = __p1_48; \
-  int32x2_t __s2_48 = __p2_48; \
-  int32x4_t __ret_48; \
-  __ret_48 = __s0_48 + __s1_48 * splatq_lane_s32(__s2_48, __p3_48); \
+#define vmlaq_lane_u16(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
+  uint16x8_t __ret_48; \
+  uint16x8_t __s0_48 = __p0_48; \
+  uint16x8_t __s1_48 = __p1_48; \
+  uint16x4_t __s2_48 = __p2_48; \
+  __ret_48 = __s0_48 + __s1_48 * splatq_lane_u16(__s2_48, __p3_48); \
   __ret_48; \
 })
 #else
-#define vmlaq_lane_s32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
-  int32x4_t __s0_49 = __p0_49; \
-  int32x4_t __s1_49 = __p1_49; \
-  int32x2_t __s2_49 = __p2_49; \
-  int32x4_t __rev0_49;  __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 3, 2, 1, 0); \
-  int32x4_t __rev1_49;  __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 3, 2, 1, 0); \
-  int32x2_t __rev2_49;  __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 1, 0); \
-  int32x4_t __ret_49; \
-  __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_s32(__rev2_49, __p3_49); \
-  __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 3, 2, 1, 0); \
+#define vmlaq_lane_u16(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
+  uint16x8_t __ret_49; \
+  uint16x8_t __s0_49 = __p0_49; \
+  uint16x8_t __s1_49 = __p1_49; \
+  uint16x4_t __s2_49 = __p2_49; \
+  uint16x8_t __rev0_49;  __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_49;  __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_49;  __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 3, 2, 1, 0); \
+  __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_u16(__rev2_49, __p3_49); \
+  __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_49; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s16(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
-  int16x8_t __s0_50 = __p0_50; \
-  int16x8_t __s1_50 = __p1_50; \
-  int16x4_t __s2_50 = __p2_50; \
-  int16x8_t __ret_50; \
-  __ret_50 = __s0_50 + __s1_50 * splatq_lane_s16(__s2_50, __p3_50); \
+#define vmlaq_lane_f32(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
+  float32x4_t __ret_50; \
+  float32x4_t __s0_50 = __p0_50; \
+  float32x4_t __s1_50 = __p1_50; \
+  float32x2_t __s2_50 = __p2_50; \
+  __ret_50 = __s0_50 + __s1_50 * splatq_lane_f32(__s2_50, __p3_50); \
   __ret_50; \
 })
 #else
-#define vmlaq_lane_s16(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
-  int16x8_t __s0_51 = __p0_51; \
-  int16x8_t __s1_51 = __p1_51; \
-  int16x4_t __s2_51 = __p2_51; \
-  int16x8_t __rev0_51;  __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_51;  __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_51;  __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 3, 2, 1, 0); \
-  int16x8_t __ret_51; \
-  __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_s16(__rev2_51, __p3_51); \
-  __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vmlaq_lane_f32(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
+  float32x4_t __ret_51; \
+  float32x4_t __s0_51 = __p0_51; \
+  float32x4_t __s1_51 = __p1_51; \
+  float32x2_t __s2_51 = __p2_51; \
+  float32x4_t __rev0_51;  __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 3, 2, 1, 0); \
+  float32x4_t __rev1_51;  __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 3, 2, 1, 0); \
+  float32x2_t __rev2_51;  __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 1, 0); \
+  __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_f32(__rev2_51, __p3_51); \
+  __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 3, 2, 1, 0); \
   __ret_51; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
-  uint32x2_t __s0_52 = __p0_52; \
-  uint32x2_t __s1_52 = __p1_52; \
-  uint32x2_t __s2_52 = __p2_52; \
-  uint32x2_t __ret_52; \
-  __ret_52 = __s0_52 + __s1_52 * splat_lane_u32(__s2_52, __p3_52); \
+#define vmlaq_lane_s32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
+  int32x4_t __ret_52; \
+  int32x4_t __s0_52 = __p0_52; \
+  int32x4_t __s1_52 = __p1_52; \
+  int32x2_t __s2_52 = __p2_52; \
+  __ret_52 = __s0_52 + __s1_52 * splatq_lane_s32(__s2_52, __p3_52); \
   __ret_52; \
 })
 #else
-#define vmla_lane_u32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
-  uint32x2_t __s0_53 = __p0_53; \
-  uint32x2_t __s1_53 = __p1_53; \
-  uint32x2_t __s2_53 = __p2_53; \
-  uint32x2_t __rev0_53;  __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 1, 0); \
-  uint32x2_t __rev1_53;  __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 1, 0); \
-  uint32x2_t __rev2_53;  __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \
-  uint32x2_t __ret_53; \
-  __ret_53 = __rev0_53 + __rev1_53 * __noswap_splat_lane_u32(__rev2_53, __p3_53); \
-  __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 1, 0); \
+#define vmlaq_lane_s32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
+  int32x4_t __ret_53; \
+  int32x4_t __s0_53 = __p0_53; \
+  int32x4_t __s1_53 = __p1_53; \
+  int32x2_t __s2_53 = __p2_53; \
+  int32x4_t __rev0_53;  __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 3, 2, 1, 0); \
+  int32x4_t __rev1_53;  __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 3, 2, 1, 0); \
+  int32x2_t __rev2_53;  __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \
+  __ret_53 = __rev0_53 + __rev1_53 * __noswap_splatq_lane_s32(__rev2_53, __p3_53); \
+  __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 3, 2, 1, 0); \
   __ret_53; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
-  uint16x4_t __s0_54 = __p0_54; \
-  uint16x4_t __s1_54 = __p1_54; \
-  uint16x4_t __s2_54 = __p2_54; \
-  uint16x4_t __ret_54; \
-  __ret_54 = __s0_54 + __s1_54 * splat_lane_u16(__s2_54, __p3_54); \
+#define vmlaq_lane_s16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
+  int16x8_t __ret_54; \
+  int16x8_t __s0_54 = __p0_54; \
+  int16x8_t __s1_54 = __p1_54; \
+  int16x4_t __s2_54 = __p2_54; \
+  __ret_54 = __s0_54 + __s1_54 * splatq_lane_s16(__s2_54, __p3_54); \
   __ret_54; \
 })
 #else
-#define vmla_lane_u16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
-  uint16x4_t __s0_55 = __p0_55; \
-  uint16x4_t __s1_55 = __p1_55; \
-  uint16x4_t __s2_55 = __p2_55; \
-  uint16x4_t __rev0_55;  __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 3, 2, 1, 0); \
-  uint16x4_t __rev1_55;  __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 3, 2, 1, 0); \
-  uint16x4_t __rev2_55;  __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \
-  uint16x4_t __ret_55; \
-  __ret_55 = __rev0_55 + __rev1_55 * __noswap_splat_lane_u16(__rev2_55, __p3_55); \
-  __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 3, 2, 1, 0); \
+#define vmlaq_lane_s16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
+  int16x8_t __ret_55; \
+  int16x8_t __s0_55 = __p0_55; \
+  int16x8_t __s1_55 = __p1_55; \
+  int16x4_t __s2_55 = __p2_55; \
+  int16x8_t __rev0_55;  __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_55;  __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_55;  __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \
+  __ret_55 = __rev0_55 + __rev1_55 * __noswap_splatq_lane_s16(__rev2_55, __p3_55); \
+  __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_55; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_f32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
-  float32x2_t __s0_56 = __p0_56; \
-  float32x2_t __s1_56 = __p1_56; \
-  float32x2_t __s2_56 = __p2_56; \
-  float32x2_t __ret_56; \
-  __ret_56 = __s0_56 + __s1_56 * splat_lane_f32(__s2_56, __p3_56); \
+#define vmla_lane_u32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
+  uint32x2_t __ret_56; \
+  uint32x2_t __s0_56 = __p0_56; \
+  uint32x2_t __s1_56 = __p1_56; \
+  uint32x2_t __s2_56 = __p2_56; \
+  __ret_56 = __s0_56 + __s1_56 * splat_lane_u32(__s2_56, __p3_56); \
   __ret_56; \
 })
 #else
-#define vmla_lane_f32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
-  float32x2_t __s0_57 = __p0_57; \
-  float32x2_t __s1_57 = __p1_57; \
-  float32x2_t __s2_57 = __p2_57; \
-  float32x2_t __rev0_57;  __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \
-  float32x2_t __rev1_57;  __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \
-  float32x2_t __rev2_57;  __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \
-  float32x2_t __ret_57; \
-  __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_f32(__rev2_57, __p3_57); \
+#define vmla_lane_u32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
+  uint32x2_t __ret_57; \
+  uint32x2_t __s0_57 = __p0_57; \
+  uint32x2_t __s1_57 = __p1_57; \
+  uint32x2_t __s2_57 = __p2_57; \
+  uint32x2_t __rev0_57;  __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \
+  uint32x2_t __rev1_57;  __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \
+  uint32x2_t __rev2_57;  __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \
+  __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_u32(__rev2_57, __p3_57); \
   __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 1, 0); \
   __ret_57; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
-  int32x2_t __s0_58 = __p0_58; \
-  int32x2_t __s1_58 = __p1_58; \
-  int32x2_t __s2_58 = __p2_58; \
-  int32x2_t __ret_58; \
-  __ret_58 = __s0_58 + __s1_58 * splat_lane_s32(__s2_58, __p3_58); \
+#define vmla_lane_u16(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
+  uint16x4_t __ret_58; \
+  uint16x4_t __s0_58 = __p0_58; \
+  uint16x4_t __s1_58 = __p1_58; \
+  uint16x4_t __s2_58 = __p2_58; \
+  __ret_58 = __s0_58 + __s1_58 * splat_lane_u16(__s2_58, __p3_58); \
   __ret_58; \
 })
 #else
-#define vmla_lane_s32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
-  int32x2_t __s0_59 = __p0_59; \
-  int32x2_t __s1_59 = __p1_59; \
-  int32x2_t __s2_59 = __p2_59; \
-  int32x2_t __rev0_59;  __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 1, 0); \
-  int32x2_t __rev1_59;  __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 1, 0); \
-  int32x2_t __rev2_59;  __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 1, 0); \
-  int32x2_t __ret_59; \
-  __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_s32(__rev2_59, __p3_59); \
-  __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 1, 0); \
+#define vmla_lane_u16(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
+  uint16x4_t __ret_59; \
+  uint16x4_t __s0_59 = __p0_59; \
+  uint16x4_t __s1_59 = __p1_59; \
+  uint16x4_t __s2_59 = __p2_59; \
+  uint16x4_t __rev0_59;  __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 3, 2, 1, 0); \
+  uint16x4_t __rev1_59;  __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 3, 2, 1, 0); \
+  uint16x4_t __rev2_59;  __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 3, 2, 1, 0); \
+  __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_u16(__rev2_59, __p3_59); \
+  __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 3, 2, 1, 0); \
   __ret_59; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s16(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
-  int16x4_t __s0_60 = __p0_60; \
-  int16x4_t __s1_60 = __p1_60; \
-  int16x4_t __s2_60 = __p2_60; \
-  int16x4_t __ret_60; \
-  __ret_60 = __s0_60 + __s1_60 * splat_lane_s16(__s2_60, __p3_60); \
+#define vmla_lane_f32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
+  float32x2_t __ret_60; \
+  float32x2_t __s0_60 = __p0_60; \
+  float32x2_t __s1_60 = __p1_60; \
+  float32x2_t __s2_60 = __p2_60; \
+  __ret_60 = __s0_60 + __s1_60 * splat_lane_f32(__s2_60, __p3_60); \
   __ret_60; \
 })
 #else
-#define vmla_lane_s16(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
-  int16x4_t __s0_61 = __p0_61; \
-  int16x4_t __s1_61 = __p1_61; \
-  int16x4_t __s2_61 = __p2_61; \
-  int16x4_t __rev0_61;  __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \
-  int16x4_t __rev1_61;  __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 3, 2, 1, 0); \
-  int16x4_t __rev2_61;  __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \
-  int16x4_t __ret_61; \
-  __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_s16(__rev2_61, __p3_61); \
-  __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \
+#define vmla_lane_f32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
+  float32x2_t __ret_61; \
+  float32x2_t __s0_61 = __p0_61; \
+  float32x2_t __s1_61 = __p1_61; \
+  float32x2_t __s2_61 = __p2_61; \
+  float32x2_t __rev0_61;  __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 1, 0); \
+  float32x2_t __rev1_61;  __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 1, 0); \
+  float32x2_t __rev2_61;  __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 1, 0); \
+  __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_f32(__rev2_61, __p3_61); \
+  __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 1, 0); \
   __ret_61; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vmla_lane_s32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
+  int32x2_t __ret_62; \
+  int32x2_t __s0_62 = __p0_62; \
+  int32x2_t __s1_62 = __p1_62; \
+  int32x2_t __s2_62 = __p2_62; \
+  __ret_62 = __s0_62 + __s1_62 * splat_lane_s32(__s2_62, __p3_62); \
+  __ret_62; \
+})
+#else
+#define vmla_lane_s32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
+  int32x2_t __ret_63; \
+  int32x2_t __s0_63 = __p0_63; \
+  int32x2_t __s1_63 = __p1_63; \
+  int32x2_t __s2_63 = __p2_63; \
+  int32x2_t __rev0_63;  __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 1, 0); \
+  int32x2_t __rev1_63;  __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 1, 0); \
+  int32x2_t __rev2_63;  __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \
+  __ret_63 = __rev0_63 + __rev1_63 * __noswap_splat_lane_s32(__rev2_63, __p3_63); \
+  __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 1, 0); \
+  __ret_63; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_lane_s16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
+  int16x4_t __ret_64; \
+  int16x4_t __s0_64 = __p0_64; \
+  int16x4_t __s1_64 = __p1_64; \
+  int16x4_t __s2_64 = __p2_64; \
+  __ret_64 = __s0_64 + __s1_64 * splat_lane_s16(__s2_64, __p3_64); \
+  __ret_64; \
+})
+#else
+#define vmla_lane_s16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
+  int16x4_t __ret_65; \
+  int16x4_t __s0_65 = __p0_65; \
+  int16x4_t __s1_65 = __p1_65; \
+  int16x4_t __s2_65 = __p2_65; \
+  int16x4_t __rev0_65;  __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 3, 2, 1, 0); \
+  int16x4_t __rev1_65;  __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 3, 2, 1, 0); \
+  int16x4_t __rev2_65;  __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \
+  __ret_65 = __rev0_65 + __rev1_65 * __noswap_splat_lane_s16(__rev2_65, __p3_65); \
+  __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 3, 2, 1, 0); \
+  __ret_65; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
   uint32x4_t __ret;
   __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
@@ -14915,9 +14951,9 @@
 }
 #else
 __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14932,9 +14968,9 @@
 }
 #else
 __ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14949,9 +14985,9 @@
 }
 #else
 __ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14966,9 +15002,9 @@
 }
 #else
 __ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14983,9 +15019,9 @@
 }
 #else
 __ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15000,9 +15036,9 @@
 }
 #else
 __ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15017,9 +15053,9 @@
 }
 #else
 __ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15034,9 +15070,9 @@
 }
 #else
 __ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15051,9 +15087,9 @@
 }
 #else
 __ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15068,9 +15104,9 @@
 }
 #else
 __ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15085,10 +15121,10 @@
 }
 #else
 __ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15103,10 +15139,10 @@
 }
 #else
 __ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15121,10 +15157,10 @@
 }
 #else
 __ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15139,10 +15175,10 @@
 }
 #else
 __ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15157,10 +15193,10 @@
 }
 #else
 __ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15175,10 +15211,10 @@
 }
 #else
 __ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15193,10 +15229,10 @@
 }
 #else
 __ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15211,10 +15247,10 @@
 }
 #else
 __ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15229,10 +15265,10 @@
 }
 #else
 __ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15247,10 +15283,10 @@
 }
 #else
 __ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15265,10 +15301,10 @@
 }
 #else
 __ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15283,10 +15319,10 @@
 }
 #else
 __ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15301,10 +15337,10 @@
 }
 #else
 __ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15319,10 +15355,10 @@
 }
 #else
 __ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15330,246 +15366,246 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
-  uint32x4_t __s0_62 = __p0_62; \
-  uint32x4_t __s1_62 = __p1_62; \
-  uint32x2_t __s2_62 = __p2_62; \
-  uint32x4_t __ret_62; \
-  __ret_62 = __s0_62 - __s1_62 * splatq_lane_u32(__s2_62, __p3_62); \
-  __ret_62; \
-})
-#else
-#define vmlsq_lane_u32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
-  uint32x4_t __s0_63 = __p0_63; \
-  uint32x4_t __s1_63 = __p1_63; \
-  uint32x2_t __s2_63 = __p2_63; \
-  uint32x4_t __rev0_63;  __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 3, 2, 1, 0); \
-  uint32x4_t __rev1_63;  __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 3, 2, 1, 0); \
-  uint32x2_t __rev2_63;  __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \
-  uint32x4_t __ret_63; \
-  __ret_63 = __rev0_63 - __rev1_63 * __noswap_splatq_lane_u32(__rev2_63, __p3_63); \
-  __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 3, 2, 1, 0); \
-  __ret_63; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
-  uint16x8_t __s0_64 = __p0_64; \
-  uint16x8_t __s1_64 = __p1_64; \
-  uint16x4_t __s2_64 = __p2_64; \
-  uint16x8_t __ret_64; \
-  __ret_64 = __s0_64 - __s1_64 * splatq_lane_u16(__s2_64, __p3_64); \
-  __ret_64; \
-})
-#else
-#define vmlsq_lane_u16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
-  uint16x8_t __s0_65 = __p0_65; \
-  uint16x8_t __s1_65 = __p1_65; \
-  uint16x4_t __s2_65 = __p2_65; \
-  uint16x8_t __rev0_65;  __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_65;  __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_65;  __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \
-  uint16x8_t __ret_65; \
-  __ret_65 = __rev0_65 - __rev1_65 * __noswap_splatq_lane_u16(__rev2_65, __p3_65); \
-  __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_65; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_f32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
-  float32x4_t __s0_66 = __p0_66; \
-  float32x4_t __s1_66 = __p1_66; \
-  float32x2_t __s2_66 = __p2_66; \
-  float32x4_t __ret_66; \
-  __ret_66 = __s0_66 - __s1_66 * splatq_lane_f32(__s2_66, __p3_66); \
+#define vmlsq_lane_u32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
+  uint32x4_t __ret_66; \
+  uint32x4_t __s0_66 = __p0_66; \
+  uint32x4_t __s1_66 = __p1_66; \
+  uint32x2_t __s2_66 = __p2_66; \
+  __ret_66 = __s0_66 - __s1_66 * splatq_lane_u32(__s2_66, __p3_66); \
   __ret_66; \
 })
 #else
-#define vmlsq_lane_f32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
-  float32x4_t __s0_67 = __p0_67; \
-  float32x4_t __s1_67 = __p1_67; \
-  float32x2_t __s2_67 = __p2_67; \
-  float32x4_t __rev0_67;  __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \
-  float32x4_t __rev1_67;  __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \
-  float32x2_t __rev2_67;  __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \
-  float32x4_t __ret_67; \
-  __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_f32(__rev2_67, __p3_67); \
+#define vmlsq_lane_u32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
+  uint32x4_t __ret_67; \
+  uint32x4_t __s0_67 = __p0_67; \
+  uint32x4_t __s1_67 = __p1_67; \
+  uint32x2_t __s2_67 = __p2_67; \
+  uint32x4_t __rev0_67;  __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \
+  uint32x4_t __rev1_67;  __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \
+  uint32x2_t __rev2_67;  __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \
+  __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_u32(__rev2_67, __p3_67); \
   __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \
   __ret_67; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
-  int32x4_t __s0_68 = __p0_68; \
-  int32x4_t __s1_68 = __p1_68; \
-  int32x2_t __s2_68 = __p2_68; \
-  int32x4_t __ret_68; \
-  __ret_68 = __s0_68 - __s1_68 * splatq_lane_s32(__s2_68, __p3_68); \
+#define vmlsq_lane_u16(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
+  uint16x8_t __ret_68; \
+  uint16x8_t __s0_68 = __p0_68; \
+  uint16x8_t __s1_68 = __p1_68; \
+  uint16x4_t __s2_68 = __p2_68; \
+  __ret_68 = __s0_68 - __s1_68 * splatq_lane_u16(__s2_68, __p3_68); \
   __ret_68; \
 })
 #else
-#define vmlsq_lane_s32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
-  int32x4_t __s0_69 = __p0_69; \
-  int32x4_t __s1_69 = __p1_69; \
-  int32x2_t __s2_69 = __p2_69; \
-  int32x4_t __rev0_69;  __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \
-  int32x4_t __rev1_69;  __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 3, 2, 1, 0); \
-  int32x2_t __rev2_69;  __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 1, 0); \
-  int32x4_t __ret_69; \
-  __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_s32(__rev2_69, __p3_69); \
-  __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \
+#define vmlsq_lane_u16(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
+  uint16x8_t __ret_69; \
+  uint16x8_t __s0_69 = __p0_69; \
+  uint16x8_t __s1_69 = __p1_69; \
+  uint16x4_t __s2_69 = __p2_69; \
+  uint16x8_t __rev0_69;  __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_69;  __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_69;  __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 3, 2, 1, 0); \
+  __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_u16(__rev2_69, __p3_69); \
+  __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_69; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s16(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
-  int16x8_t __s0_70 = __p0_70; \
-  int16x8_t __s1_70 = __p1_70; \
-  int16x4_t __s2_70 = __p2_70; \
-  int16x8_t __ret_70; \
-  __ret_70 = __s0_70 - __s1_70 * splatq_lane_s16(__s2_70, __p3_70); \
+#define vmlsq_lane_f32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
+  float32x4_t __ret_70; \
+  float32x4_t __s0_70 = __p0_70; \
+  float32x4_t __s1_70 = __p1_70; \
+  float32x2_t __s2_70 = __p2_70; \
+  __ret_70 = __s0_70 - __s1_70 * splatq_lane_f32(__s2_70, __p3_70); \
   __ret_70; \
 })
 #else
-#define vmlsq_lane_s16(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
-  int16x8_t __s0_71 = __p0_71; \
-  int16x8_t __s1_71 = __p1_71; \
-  int16x4_t __s2_71 = __p2_71; \
-  int16x8_t __rev0_71;  __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_71;  __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_71;  __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \
-  int16x8_t __ret_71; \
-  __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_s16(__rev2_71, __p3_71); \
-  __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vmlsq_lane_f32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
+  float32x4_t __ret_71; \
+  float32x4_t __s0_71 = __p0_71; \
+  float32x4_t __s1_71 = __p1_71; \
+  float32x2_t __s2_71 = __p2_71; \
+  float32x4_t __rev0_71;  __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 3, 2, 1, 0); \
+  float32x4_t __rev1_71;  __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 3, 2, 1, 0); \
+  float32x2_t __rev2_71;  __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 1, 0); \
+  __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_f32(__rev2_71, __p3_71); \
+  __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 3, 2, 1, 0); \
   __ret_71; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
-  uint32x2_t __s0_72 = __p0_72; \
-  uint32x2_t __s1_72 = __p1_72; \
-  uint32x2_t __s2_72 = __p2_72; \
-  uint32x2_t __ret_72; \
-  __ret_72 = __s0_72 - __s1_72 * splat_lane_u32(__s2_72, __p3_72); \
+#define vmlsq_lane_s32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
+  int32x4_t __ret_72; \
+  int32x4_t __s0_72 = __p0_72; \
+  int32x4_t __s1_72 = __p1_72; \
+  int32x2_t __s2_72 = __p2_72; \
+  __ret_72 = __s0_72 - __s1_72 * splatq_lane_s32(__s2_72, __p3_72); \
   __ret_72; \
 })
 #else
-#define vmls_lane_u32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
-  uint32x2_t __s0_73 = __p0_73; \
-  uint32x2_t __s1_73 = __p1_73; \
-  uint32x2_t __s2_73 = __p2_73; \
-  uint32x2_t __rev0_73;  __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \
-  uint32x2_t __rev1_73;  __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 1, 0); \
-  uint32x2_t __rev2_73;  __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \
-  uint32x2_t __ret_73; \
-  __ret_73 = __rev0_73 - __rev1_73 * __noswap_splat_lane_u32(__rev2_73, __p3_73); \
-  __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \
+#define vmlsq_lane_s32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
+  int32x4_t __ret_73; \
+  int32x4_t __s0_73 = __p0_73; \
+  int32x4_t __s1_73 = __p1_73; \
+  int32x2_t __s2_73 = __p2_73; \
+  int32x4_t __rev0_73;  __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 3, 2, 1, 0); \
+  int32x4_t __rev1_73;  __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 3, 2, 1, 0); \
+  int32x2_t __rev2_73;  __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \
+  __ret_73 = __rev0_73 - __rev1_73 * __noswap_splatq_lane_s32(__rev2_73, __p3_73); \
+  __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 3, 2, 1, 0); \
   __ret_73; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
-  uint16x4_t __s0_74 = __p0_74; \
-  uint16x4_t __s1_74 = __p1_74; \
-  uint16x4_t __s2_74 = __p2_74; \
-  uint16x4_t __ret_74; \
-  __ret_74 = __s0_74 - __s1_74 * splat_lane_u16(__s2_74, __p3_74); \
+#define vmlsq_lane_s16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
+  int16x8_t __ret_74; \
+  int16x8_t __s0_74 = __p0_74; \
+  int16x8_t __s1_74 = __p1_74; \
+  int16x4_t __s2_74 = __p2_74; \
+  __ret_74 = __s0_74 - __s1_74 * splatq_lane_s16(__s2_74, __p3_74); \
   __ret_74; \
 })
 #else
-#define vmls_lane_u16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
-  uint16x4_t __s0_75 = __p0_75; \
-  uint16x4_t __s1_75 = __p1_75; \
-  uint16x4_t __s2_75 = __p2_75; \
-  uint16x4_t __rev0_75;  __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 3, 2, 1, 0); \
-  uint16x4_t __rev1_75;  __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 3, 2, 1, 0); \
-  uint16x4_t __rev2_75;  __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \
-  uint16x4_t __ret_75; \
-  __ret_75 = __rev0_75 - __rev1_75 * __noswap_splat_lane_u16(__rev2_75, __p3_75); \
-  __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 3, 2, 1, 0); \
+#define vmlsq_lane_s16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
+  int16x8_t __ret_75; \
+  int16x8_t __s0_75 = __p0_75; \
+  int16x8_t __s1_75 = __p1_75; \
+  int16x4_t __s2_75 = __p2_75; \
+  int16x8_t __rev0_75;  __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_75;  __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_75;  __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \
+  __ret_75 = __rev0_75 - __rev1_75 * __noswap_splatq_lane_s16(__rev2_75, __p3_75); \
+  __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_75; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_f32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
-  float32x2_t __s0_76 = __p0_76; \
-  float32x2_t __s1_76 = __p1_76; \
-  float32x2_t __s2_76 = __p2_76; \
-  float32x2_t __ret_76; \
-  __ret_76 = __s0_76 - __s1_76 * splat_lane_f32(__s2_76, __p3_76); \
+#define vmls_lane_u32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
+  uint32x2_t __ret_76; \
+  uint32x2_t __s0_76 = __p0_76; \
+  uint32x2_t __s1_76 = __p1_76; \
+  uint32x2_t __s2_76 = __p2_76; \
+  __ret_76 = __s0_76 - __s1_76 * splat_lane_u32(__s2_76, __p3_76); \
   __ret_76; \
 })
 #else
-#define vmls_lane_f32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
-  float32x2_t __s0_77 = __p0_77; \
-  float32x2_t __s1_77 = __p1_77; \
-  float32x2_t __s2_77 = __p2_77; \
-  float32x2_t __rev0_77;  __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \
-  float32x2_t __rev1_77;  __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \
-  float32x2_t __rev2_77;  __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \
-  float32x2_t __ret_77; \
-  __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_f32(__rev2_77, __p3_77); \
+#define vmls_lane_u32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
+  uint32x2_t __ret_77; \
+  uint32x2_t __s0_77 = __p0_77; \
+  uint32x2_t __s1_77 = __p1_77; \
+  uint32x2_t __s2_77 = __p2_77; \
+  uint32x2_t __rev0_77;  __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \
+  uint32x2_t __rev1_77;  __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \
+  uint32x2_t __rev2_77;  __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \
+  __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_u32(__rev2_77, __p3_77); \
   __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 1, 0); \
   __ret_77; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s32(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
-  int32x2_t __s0_78 = __p0_78; \
-  int32x2_t __s1_78 = __p1_78; \
-  int32x2_t __s2_78 = __p2_78; \
-  int32x2_t __ret_78; \
-  __ret_78 = __s0_78 - __s1_78 * splat_lane_s32(__s2_78, __p3_78); \
+#define vmls_lane_u16(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
+  uint16x4_t __ret_78; \
+  uint16x4_t __s0_78 = __p0_78; \
+  uint16x4_t __s1_78 = __p1_78; \
+  uint16x4_t __s2_78 = __p2_78; \
+  __ret_78 = __s0_78 - __s1_78 * splat_lane_u16(__s2_78, __p3_78); \
   __ret_78; \
 })
 #else
-#define vmls_lane_s32(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
-  int32x2_t __s0_79 = __p0_79; \
-  int32x2_t __s1_79 = __p1_79; \
-  int32x2_t __s2_79 = __p2_79; \
-  int32x2_t __rev0_79;  __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 1, 0); \
-  int32x2_t __rev1_79;  __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 1, 0); \
-  int32x2_t __rev2_79;  __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 1, 0); \
-  int32x2_t __ret_79; \
-  __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_s32(__rev2_79, __p3_79); \
-  __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 1, 0); \
+#define vmls_lane_u16(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
+  uint16x4_t __ret_79; \
+  uint16x4_t __s0_79 = __p0_79; \
+  uint16x4_t __s1_79 = __p1_79; \
+  uint16x4_t __s2_79 = __p2_79; \
+  uint16x4_t __rev0_79;  __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 3, 2, 1, 0); \
+  uint16x4_t __rev1_79;  __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 3, 2, 1, 0); \
+  uint16x4_t __rev2_79;  __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 3, 2, 1, 0); \
+  __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_u16(__rev2_79, __p3_79); \
+  __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 3, 2, 1, 0); \
   __ret_79; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s16(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
-  int16x4_t __s0_80 = __p0_80; \
-  int16x4_t __s1_80 = __p1_80; \
-  int16x4_t __s2_80 = __p2_80; \
-  int16x4_t __ret_80; \
-  __ret_80 = __s0_80 - __s1_80 * splat_lane_s16(__s2_80, __p3_80); \
+#define vmls_lane_f32(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
+  float32x2_t __ret_80; \
+  float32x2_t __s0_80 = __p0_80; \
+  float32x2_t __s1_80 = __p1_80; \
+  float32x2_t __s2_80 = __p2_80; \
+  __ret_80 = __s0_80 - __s1_80 * splat_lane_f32(__s2_80, __p3_80); \
   __ret_80; \
 })
 #else
-#define vmls_lane_s16(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
-  int16x4_t __s0_81 = __p0_81; \
-  int16x4_t __s1_81 = __p1_81; \
-  int16x4_t __s2_81 = __p2_81; \
-  int16x4_t __rev0_81;  __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 3, 2, 1, 0); \
-  int16x4_t __rev1_81;  __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 3, 2, 1, 0); \
-  int16x4_t __rev2_81;  __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 3, 2, 1, 0); \
-  int16x4_t __ret_81; \
-  __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_s16(__rev2_81, __p3_81); \
-  __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 3, 2, 1, 0); \
+#define vmls_lane_f32(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
+  float32x2_t __ret_81; \
+  float32x2_t __s0_81 = __p0_81; \
+  float32x2_t __s1_81 = __p1_81; \
+  float32x2_t __s2_81 = __p2_81; \
+  float32x2_t __rev0_81;  __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 1, 0); \
+  float32x2_t __rev1_81;  __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 1, 0); \
+  float32x2_t __rev2_81;  __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 1, 0); \
+  __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_f32(__rev2_81, __p3_81); \
+  __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 1, 0); \
   __ret_81; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vmls_lane_s32(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \
+  int32x2_t __ret_82; \
+  int32x2_t __s0_82 = __p0_82; \
+  int32x2_t __s1_82 = __p1_82; \
+  int32x2_t __s2_82 = __p2_82; \
+  __ret_82 = __s0_82 - __s1_82 * splat_lane_s32(__s2_82, __p3_82); \
+  __ret_82; \
+})
+#else
+#define vmls_lane_s32(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \
+  int32x2_t __ret_83; \
+  int32x2_t __s0_83 = __p0_83; \
+  int32x2_t __s1_83 = __p1_83; \
+  int32x2_t __s2_83 = __p2_83; \
+  int32x2_t __rev0_83;  __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 1, 0); \
+  int32x2_t __rev1_83;  __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \
+  int32x2_t __rev2_83;  __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 1, 0); \
+  __ret_83 = __rev0_83 - __rev1_83 * __noswap_splat_lane_s32(__rev2_83, __p3_83); \
+  __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 1, 0); \
+  __ret_83; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_lane_s16(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \
+  int16x4_t __ret_84; \
+  int16x4_t __s0_84 = __p0_84; \
+  int16x4_t __s1_84 = __p1_84; \
+  int16x4_t __s2_84 = __p2_84; \
+  __ret_84 = __s0_84 - __s1_84 * splat_lane_s16(__s2_84, __p3_84); \
+  __ret_84; \
+})
+#else
+#define vmls_lane_s16(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \
+  int16x4_t __ret_85; \
+  int16x4_t __s0_85 = __p0_85; \
+  int16x4_t __s1_85 = __p1_85; \
+  int16x4_t __s2_85 = __p2_85; \
+  int16x4_t __rev0_85;  __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 3, 2, 1, 0); \
+  int16x4_t __rev1_85;  __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \
+  int16x4_t __rev2_85;  __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 3, 2, 1, 0); \
+  __ret_85 = __rev0_85 - __rev1_85 * __noswap_splat_lane_s16(__rev2_85, __p3_85); \
+  __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 3, 2, 1, 0); \
+  __ret_85; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
   uint32x4_t __ret;
   __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
@@ -15577,9 +15613,9 @@
 }
 #else
 __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15594,9 +15630,9 @@
 }
 #else
 __ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15611,9 +15647,9 @@
 }
 #else
 __ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15628,9 +15664,9 @@
 }
 #else
 __ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15645,9 +15681,9 @@
 }
 #else
 __ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15662,9 +15698,9 @@
 }
 #else
 __ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15679,9 +15715,9 @@
 }
 #else
 __ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15696,9 +15732,9 @@
 }
 #else
 __ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15713,9 +15749,9 @@
 }
 #else
 __ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15730,9 +15766,9 @@
 }
 #else
 __ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15891,15 +15927,15 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
   __ret; \
 })
 #else
 #define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -16033,15 +16069,15 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
   __ret; \
 })
 #else
 #define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -16091,8 +16127,8 @@
 }
 #else
 __ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16112,8 +16148,8 @@
 }
 #else
 __ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16133,8 +16169,8 @@
 }
 #else
 __ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16154,8 +16190,8 @@
 }
 #else
 __ai int16x8_t vmovl_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16175,8 +16211,8 @@
 }
 #else
 __ai int64x2_t vmovl_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16196,8 +16232,8 @@
 }
 #else
 __ai int32x4_t vmovl_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16217,8 +16253,8 @@
 }
 #else
 __ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16238,8 +16274,8 @@
 }
 #else
 __ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16259,8 +16295,8 @@
 }
 #else
 __ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16280,8 +16316,8 @@
 }
 #else
 __ai int16x4_t vmovn_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16301,8 +16337,8 @@
 }
 #else
 __ai int32x2_t vmovn_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16322,8 +16358,8 @@
 }
 #else
 __ai int8x8_t vmovn_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16343,9 +16379,9 @@
 }
 #else
 __ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16360,9 +16396,9 @@
 }
 #else
 __ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16377,9 +16413,9 @@
 }
 #else
 __ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16394,9 +16430,9 @@
 }
 #else
 __ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16411,9 +16447,9 @@
 }
 #else
 __ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16428,9 +16464,9 @@
 }
 #else
 __ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16445,9 +16481,9 @@
 }
 #else
 __ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16462,9 +16498,9 @@
 }
 #else
 __ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16479,9 +16515,9 @@
 }
 #else
 __ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16496,9 +16532,9 @@
 }
 #else
 __ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16513,9 +16549,9 @@
 }
 #else
 __ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16530,9 +16566,9 @@
 }
 #else
 __ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16547,9 +16583,9 @@
 }
 #else
 __ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16564,9 +16600,9 @@
 }
 #else
 __ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16581,9 +16617,9 @@
 }
 #else
 __ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16598,9 +16634,9 @@
 }
 #else
 __ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16608,216 +16644,216 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u32(__p0_82, __p1_82, __p2_82) __extension__ ({ \
-  uint32x4_t __s0_82 = __p0_82; \
-  uint32x2_t __s1_82 = __p1_82; \
-  uint32x4_t __ret_82; \
-  __ret_82 = __s0_82 * splatq_lane_u32(__s1_82, __p2_82); \
-  __ret_82; \
-})
-#else
-#define vmulq_lane_u32(__p0_83, __p1_83, __p2_83) __extension__ ({ \
-  uint32x4_t __s0_83 = __p0_83; \
-  uint32x2_t __s1_83 = __p1_83; \
-  uint32x4_t __rev0_83;  __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 3, 2, 1, 0); \
-  uint32x2_t __rev1_83;  __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \
-  uint32x4_t __ret_83; \
-  __ret_83 = __rev0_83 * __noswap_splatq_lane_u32(__rev1_83, __p2_83); \
-  __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 3, 2, 1, 0); \
-  __ret_83; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u16(__p0_84, __p1_84, __p2_84) __extension__ ({ \
-  uint16x8_t __s0_84 = __p0_84; \
-  uint16x4_t __s1_84 = __p1_84; \
-  uint16x8_t __ret_84; \
-  __ret_84 = __s0_84 * splatq_lane_u16(__s1_84, __p2_84); \
-  __ret_84; \
-})
-#else
-#define vmulq_lane_u16(__p0_85, __p1_85, __p2_85) __extension__ ({ \
-  uint16x8_t __s0_85 = __p0_85; \
-  uint16x4_t __s1_85 = __p1_85; \
-  uint16x8_t __rev0_85;  __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1_85;  __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \
-  uint16x8_t __ret_85; \
-  __ret_85 = __rev0_85 * __noswap_splatq_lane_u16(__rev1_85, __p2_85); \
-  __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_85; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f32(__p0_86, __p1_86, __p2_86) __extension__ ({ \
-  float32x4_t __s0_86 = __p0_86; \
-  float32x2_t __s1_86 = __p1_86; \
-  float32x4_t __ret_86; \
-  __ret_86 = __s0_86 * splatq_lane_f32(__s1_86, __p2_86); \
+#define vmulq_lane_u32(__p0_86, __p1_86, __p2_86) __extension__ ({ \
+  uint32x4_t __ret_86; \
+  uint32x4_t __s0_86 = __p0_86; \
+  uint32x2_t __s1_86 = __p1_86; \
+  __ret_86 = __s0_86 * splatq_lane_u32(__s1_86, __p2_86); \
   __ret_86; \
 })
 #else
-#define vmulq_lane_f32(__p0_87, __p1_87, __p2_87) __extension__ ({ \
-  float32x4_t __s0_87 = __p0_87; \
-  float32x2_t __s1_87 = __p1_87; \
-  float32x4_t __rev0_87;  __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \
-  float32x2_t __rev1_87;  __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \
-  float32x4_t __ret_87; \
-  __ret_87 = __rev0_87 * __noswap_splatq_lane_f32(__rev1_87, __p2_87); \
+#define vmulq_lane_u32(__p0_87, __p1_87, __p2_87) __extension__ ({ \
+  uint32x4_t __ret_87; \
+  uint32x4_t __s0_87 = __p0_87; \
+  uint32x2_t __s1_87 = __p1_87; \
+  uint32x4_t __rev0_87;  __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \
+  uint32x2_t __rev1_87;  __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \
+  __ret_87 = __rev0_87 * __noswap_splatq_lane_u32(__rev1_87, __p2_87); \
   __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \
   __ret_87; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s32(__p0_88, __p1_88, __p2_88) __extension__ ({ \
-  int32x4_t __s0_88 = __p0_88; \
-  int32x2_t __s1_88 = __p1_88; \
-  int32x4_t __ret_88; \
-  __ret_88 = __s0_88 * splatq_lane_s32(__s1_88, __p2_88); \
+#define vmulq_lane_u16(__p0_88, __p1_88, __p2_88) __extension__ ({ \
+  uint16x8_t __ret_88; \
+  uint16x8_t __s0_88 = __p0_88; \
+  uint16x4_t __s1_88 = __p1_88; \
+  __ret_88 = __s0_88 * splatq_lane_u16(__s1_88, __p2_88); \
   __ret_88; \
 })
 #else
-#define vmulq_lane_s32(__p0_89, __p1_89, __p2_89) __extension__ ({ \
-  int32x4_t __s0_89 = __p0_89; \
-  int32x2_t __s1_89 = __p1_89; \
-  int32x4_t __rev0_89;  __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 3, 2, 1, 0); \
-  int32x2_t __rev1_89;  __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 1, 0); \
-  int32x4_t __ret_89; \
-  __ret_89 = __rev0_89 * __noswap_splatq_lane_s32(__rev1_89, __p2_89); \
-  __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 3, 2, 1, 0); \
+#define vmulq_lane_u16(__p0_89, __p1_89, __p2_89) __extension__ ({ \
+  uint16x8_t __ret_89; \
+  uint16x8_t __s0_89 = __p0_89; \
+  uint16x4_t __s1_89 = __p1_89; \
+  uint16x8_t __rev0_89;  __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev1_89;  __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 3, 2, 1, 0); \
+  __ret_89 = __rev0_89 * __noswap_splatq_lane_u16(__rev1_89, __p2_89); \
+  __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_89; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s16(__p0_90, __p1_90, __p2_90) __extension__ ({ \
-  int16x8_t __s0_90 = __p0_90; \
-  int16x4_t __s1_90 = __p1_90; \
-  int16x8_t __ret_90; \
-  __ret_90 = __s0_90 * splatq_lane_s16(__s1_90, __p2_90); \
+#define vmulq_lane_f32(__p0_90, __p1_90, __p2_90) __extension__ ({ \
+  float32x4_t __ret_90; \
+  float32x4_t __s0_90 = __p0_90; \
+  float32x2_t __s1_90 = __p1_90; \
+  __ret_90 = __s0_90 * splatq_lane_f32(__s1_90, __p2_90); \
   __ret_90; \
 })
 #else
-#define vmulq_lane_s16(__p0_91, __p1_91, __p2_91) __extension__ ({ \
-  int16x8_t __s0_91 = __p0_91; \
-  int16x4_t __s1_91 = __p1_91; \
-  int16x8_t __rev0_91;  __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_91;  __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 3, 2, 1, 0); \
-  int16x8_t __ret_91; \
-  __ret_91 = __rev0_91 * __noswap_splatq_lane_s16(__rev1_91, __p2_91); \
-  __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vmulq_lane_f32(__p0_91, __p1_91, __p2_91) __extension__ ({ \
+  float32x4_t __ret_91; \
+  float32x4_t __s0_91 = __p0_91; \
+  float32x2_t __s1_91 = __p1_91; \
+  float32x4_t __rev0_91;  __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 3, 2, 1, 0); \
+  float32x2_t __rev1_91;  __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 1, 0); \
+  __ret_91 = __rev0_91 * __noswap_splatq_lane_f32(__rev1_91, __p2_91); \
+  __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 3, 2, 1, 0); \
   __ret_91; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u32(__p0_92, __p1_92, __p2_92) __extension__ ({ \
-  uint32x2_t __s0_92 = __p0_92; \
-  uint32x2_t __s1_92 = __p1_92; \
-  uint32x2_t __ret_92; \
-  __ret_92 = __s0_92 * splat_lane_u32(__s1_92, __p2_92); \
+#define vmulq_lane_s32(__p0_92, __p1_92, __p2_92) __extension__ ({ \
+  int32x4_t __ret_92; \
+  int32x4_t __s0_92 = __p0_92; \
+  int32x2_t __s1_92 = __p1_92; \
+  __ret_92 = __s0_92 * splatq_lane_s32(__s1_92, __p2_92); \
   __ret_92; \
 })
 #else
-#define vmul_lane_u32(__p0_93, __p1_93, __p2_93) __extension__ ({ \
-  uint32x2_t __s0_93 = __p0_93; \
-  uint32x2_t __s1_93 = __p1_93; \
-  uint32x2_t __rev0_93;  __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \
-  uint32x2_t __rev1_93;  __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \
-  uint32x2_t __ret_93; \
-  __ret_93 = __rev0_93 * __noswap_splat_lane_u32(__rev1_93, __p2_93); \
-  __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \
+#define vmulq_lane_s32(__p0_93, __p1_93, __p2_93) __extension__ ({ \
+  int32x4_t __ret_93; \
+  int32x4_t __s0_93 = __p0_93; \
+  int32x2_t __s1_93 = __p1_93; \
+  int32x4_t __rev0_93;  __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 3, 2, 1, 0); \
+  int32x2_t __rev1_93;  __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \
+  __ret_93 = __rev0_93 * __noswap_splatq_lane_s32(__rev1_93, __p2_93); \
+  __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 3, 2, 1, 0); \
   __ret_93; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u16(__p0_94, __p1_94, __p2_94) __extension__ ({ \
-  uint16x4_t __s0_94 = __p0_94; \
-  uint16x4_t __s1_94 = __p1_94; \
-  uint16x4_t __ret_94; \
-  __ret_94 = __s0_94 * splat_lane_u16(__s1_94, __p2_94); \
+#define vmulq_lane_s16(__p0_94, __p1_94, __p2_94) __extension__ ({ \
+  int16x8_t __ret_94; \
+  int16x8_t __s0_94 = __p0_94; \
+  int16x4_t __s1_94 = __p1_94; \
+  __ret_94 = __s0_94 * splatq_lane_s16(__s1_94, __p2_94); \
   __ret_94; \
 })
 #else
-#define vmul_lane_u16(__p0_95, __p1_95, __p2_95) __extension__ ({ \
-  uint16x4_t __s0_95 = __p0_95; \
-  uint16x4_t __s1_95 = __p1_95; \
-  uint16x4_t __rev0_95;  __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \
-  uint16x4_t __rev1_95;  __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \
-  uint16x4_t __ret_95; \
-  __ret_95 = __rev0_95 * __noswap_splat_lane_u16(__rev1_95, __p2_95); \
-  __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \
+#define vmulq_lane_s16(__p0_95, __p1_95, __p2_95) __extension__ ({ \
+  int16x8_t __ret_95; \
+  int16x8_t __s0_95 = __p0_95; \
+  int16x4_t __s1_95 = __p1_95; \
+  int16x8_t __rev0_95;  __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_95;  __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \
+  __ret_95 = __rev0_95 * __noswap_splatq_lane_s16(__rev1_95, __p2_95); \
+  __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_95; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f32(__p0_96, __p1_96, __p2_96) __extension__ ({ \
-  float32x2_t __s0_96 = __p0_96; \
-  float32x2_t __s1_96 = __p1_96; \
-  float32x2_t __ret_96; \
-  __ret_96 = __s0_96 * splat_lane_f32(__s1_96, __p2_96); \
+#define vmul_lane_u32(__p0_96, __p1_96, __p2_96) __extension__ ({ \
+  uint32x2_t __ret_96; \
+  uint32x2_t __s0_96 = __p0_96; \
+  uint32x2_t __s1_96 = __p1_96; \
+  __ret_96 = __s0_96 * splat_lane_u32(__s1_96, __p2_96); \
   __ret_96; \
 })
 #else
-#define vmul_lane_f32(__p0_97, __p1_97, __p2_97) __extension__ ({ \
-  float32x2_t __s0_97 = __p0_97; \
-  float32x2_t __s1_97 = __p1_97; \
-  float32x2_t __rev0_97;  __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \
-  float32x2_t __rev1_97;  __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \
-  float32x2_t __ret_97; \
-  __ret_97 = __rev0_97 * __noswap_splat_lane_f32(__rev1_97, __p2_97); \
+#define vmul_lane_u32(__p0_97, __p1_97, __p2_97) __extension__ ({ \
+  uint32x2_t __ret_97; \
+  uint32x2_t __s0_97 = __p0_97; \
+  uint32x2_t __s1_97 = __p1_97; \
+  uint32x2_t __rev0_97;  __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \
+  uint32x2_t __rev1_97;  __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \
+  __ret_97 = __rev0_97 * __noswap_splat_lane_u32(__rev1_97, __p2_97); \
   __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 1, 0); \
   __ret_97; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s32(__p0_98, __p1_98, __p2_98) __extension__ ({ \
-  int32x2_t __s0_98 = __p0_98; \
-  int32x2_t __s1_98 = __p1_98; \
-  int32x2_t __ret_98; \
-  __ret_98 = __s0_98 * splat_lane_s32(__s1_98, __p2_98); \
+#define vmul_lane_u16(__p0_98, __p1_98, __p2_98) __extension__ ({ \
+  uint16x4_t __ret_98; \
+  uint16x4_t __s0_98 = __p0_98; \
+  uint16x4_t __s1_98 = __p1_98; \
+  __ret_98 = __s0_98 * splat_lane_u16(__s1_98, __p2_98); \
   __ret_98; \
 })
 #else
-#define vmul_lane_s32(__p0_99, __p1_99, __p2_99) __extension__ ({ \
-  int32x2_t __s0_99 = __p0_99; \
-  int32x2_t __s1_99 = __p1_99; \
-  int32x2_t __rev0_99;  __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 1, 0); \
-  int32x2_t __rev1_99;  __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \
-  int32x2_t __ret_99; \
-  __ret_99 = __rev0_99 * __noswap_splat_lane_s32(__rev1_99, __p2_99); \
-  __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 1, 0); \
+#define vmul_lane_u16(__p0_99, __p1_99, __p2_99) __extension__ ({ \
+  uint16x4_t __ret_99; \
+  uint16x4_t __s0_99 = __p0_99; \
+  uint16x4_t __s1_99 = __p1_99; \
+  uint16x4_t __rev0_99;  __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 3, 2, 1, 0); \
+  uint16x4_t __rev1_99;  __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 3, 2, 1, 0); \
+  __ret_99 = __rev0_99 * __noswap_splat_lane_u16(__rev1_99, __p2_99); \
+  __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 3, 2, 1, 0); \
   __ret_99; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s16(__p0_100, __p1_100, __p2_100) __extension__ ({ \
-  int16x4_t __s0_100 = __p0_100; \
-  int16x4_t __s1_100 = __p1_100; \
-  int16x4_t __ret_100; \
-  __ret_100 = __s0_100 * splat_lane_s16(__s1_100, __p2_100); \
+#define vmul_lane_f32(__p0_100, __p1_100, __p2_100) __extension__ ({ \
+  float32x2_t __ret_100; \
+  float32x2_t __s0_100 = __p0_100; \
+  float32x2_t __s1_100 = __p1_100; \
+  __ret_100 = __s0_100 * splat_lane_f32(__s1_100, __p2_100); \
   __ret_100; \
 })
 #else
-#define vmul_lane_s16(__p0_101, __p1_101, __p2_101) __extension__ ({ \
-  int16x4_t __s0_101 = __p0_101; \
-  int16x4_t __s1_101 = __p1_101; \
-  int16x4_t __rev0_101;  __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 3, 2, 1, 0); \
-  int16x4_t __rev1_101;  __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 3, 2, 1, 0); \
-  int16x4_t __ret_101; \
-  __ret_101 = __rev0_101 * __noswap_splat_lane_s16(__rev1_101, __p2_101); \
-  __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 3, 2, 1, 0); \
+#define vmul_lane_f32(__p0_101, __p1_101, __p2_101) __extension__ ({ \
+  float32x2_t __ret_101; \
+  float32x2_t __s0_101 = __p0_101; \
+  float32x2_t __s1_101 = __p1_101; \
+  float32x2_t __rev0_101;  __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 1, 0); \
+  float32x2_t __rev1_101;  __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 1, 0); \
+  __ret_101 = __rev0_101 * __noswap_splat_lane_f32(__rev1_101, __p2_101); \
+  __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 1, 0); \
   __ret_101; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vmul_lane_s32(__p0_102, __p1_102, __p2_102) __extension__ ({ \
+  int32x2_t __ret_102; \
+  int32x2_t __s0_102 = __p0_102; \
+  int32x2_t __s1_102 = __p1_102; \
+  __ret_102 = __s0_102 * splat_lane_s32(__s1_102, __p2_102); \
+  __ret_102; \
+})
+#else
+#define vmul_lane_s32(__p0_103, __p1_103, __p2_103) __extension__ ({ \
+  int32x2_t __ret_103; \
+  int32x2_t __s0_103 = __p0_103; \
+  int32x2_t __s1_103 = __p1_103; \
+  int32x2_t __rev0_103;  __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \
+  int32x2_t __rev1_103;  __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \
+  __ret_103 = __rev0_103 * __noswap_splat_lane_s32(__rev1_103, __p2_103); \
+  __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \
+  __ret_103; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_lane_s16(__p0_104, __p1_104, __p2_104) __extension__ ({ \
+  int16x4_t __ret_104; \
+  int16x4_t __s0_104 = __p0_104; \
+  int16x4_t __s1_104 = __p1_104; \
+  __ret_104 = __s0_104 * splat_lane_s16(__s1_104, __p2_104); \
+  __ret_104; \
+})
+#else
+#define vmul_lane_s16(__p0_105, __p1_105, __p2_105) __extension__ ({ \
+  int16x4_t __ret_105; \
+  int16x4_t __s0_105 = __p0_105; \
+  int16x4_t __s1_105 = __p1_105; \
+  int16x4_t __rev0_105;  __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \
+  int16x4_t __rev1_105;  __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \
+  __ret_105 = __rev0_105 * __noswap_splat_lane_s16(__rev1_105, __p2_105); \
+  __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \
+  __ret_105; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
   uint32x4_t __ret;
   __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
@@ -16825,8 +16861,8 @@
 }
 #else
 __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16841,8 +16877,8 @@
 }
 #else
 __ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16857,8 +16893,8 @@
 }
 #else
 __ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16873,8 +16909,8 @@
 }
 #else
 __ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16889,8 +16925,8 @@
 }
 #else
 __ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16905,8 +16941,8 @@
 }
 #else
 __ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __rev0 * (uint32x2_t) {__p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16921,8 +16957,8 @@
 }
 #else
 __ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16937,8 +16973,8 @@
 }
 #else
 __ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __rev0 * (float32x2_t) {__p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16953,8 +16989,8 @@
 }
 #else
 __ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __rev0 * (int32x2_t) {__p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16969,8 +17005,8 @@
 }
 #else
 __ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16985,9 +17021,9 @@
 }
 #else
 __ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly16x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17007,9 +17043,9 @@
 }
 #else
 __ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint16x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17029,9 +17065,9 @@
 }
 #else
 __ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint64x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17051,9 +17087,9 @@
 }
 #else
 __ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint32x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17073,9 +17109,9 @@
 }
 #else
 __ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
+  int16x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17095,9 +17131,9 @@
 }
 #else
 __ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
+  int64x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17117,9 +17153,9 @@
 }
 #else
 __ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
+  int32x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17132,90 +17168,90 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u32(__p0_102, __p1_102, __p2_102) __extension__ ({ \
-  uint32x2_t __s0_102 = __p0_102; \
-  uint32x2_t __s1_102 = __p1_102; \
-  uint64x2_t __ret_102; \
-  __ret_102 = vmull_u32(__s0_102, splat_lane_u32(__s1_102, __p2_102)); \
-  __ret_102; \
-})
-#else
-#define vmull_lane_u32(__p0_103, __p1_103, __p2_103) __extension__ ({ \
-  uint32x2_t __s0_103 = __p0_103; \
-  uint32x2_t __s1_103 = __p1_103; \
-  uint32x2_t __rev0_103;  __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \
-  uint32x2_t __rev1_103;  __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \
-  uint64x2_t __ret_103; \
-  __ret_103 = __noswap_vmull_u32(__rev0_103, __noswap_splat_lane_u32(__rev1_103, __p2_103)); \
-  __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \
-  __ret_103; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u16(__p0_104, __p1_104, __p2_104) __extension__ ({ \
-  uint16x4_t __s0_104 = __p0_104; \
-  uint16x4_t __s1_104 = __p1_104; \
-  uint32x4_t __ret_104; \
-  __ret_104 = vmull_u16(__s0_104, splat_lane_u16(__s1_104, __p2_104)); \
-  __ret_104; \
-})
-#else
-#define vmull_lane_u16(__p0_105, __p1_105, __p2_105) __extension__ ({ \
-  uint16x4_t __s0_105 = __p0_105; \
-  uint16x4_t __s1_105 = __p1_105; \
-  uint16x4_t __rev0_105;  __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \
-  uint16x4_t __rev1_105;  __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \
-  uint32x4_t __ret_105; \
-  __ret_105 = __noswap_vmull_u16(__rev0_105, __noswap_splat_lane_u16(__rev1_105, __p2_105)); \
-  __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \
-  __ret_105; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s32(__p0_106, __p1_106, __p2_106) __extension__ ({ \
-  int32x2_t __s0_106 = __p0_106; \
-  int32x2_t __s1_106 = __p1_106; \
-  int64x2_t __ret_106; \
-  __ret_106 = vmull_s32(__s0_106, splat_lane_s32(__s1_106, __p2_106)); \
+#define vmull_lane_u32(__p0_106, __p1_106, __p2_106) __extension__ ({ \
+  uint64x2_t __ret_106; \
+  uint32x2_t __s0_106 = __p0_106; \
+  uint32x2_t __s1_106 = __p1_106; \
+  __ret_106 = vmull_u32(__s0_106, splat_lane_u32(__s1_106, __p2_106)); \
   __ret_106; \
 })
 #else
-#define vmull_lane_s32(__p0_107, __p1_107, __p2_107) __extension__ ({ \
-  int32x2_t __s0_107 = __p0_107; \
-  int32x2_t __s1_107 = __p1_107; \
-  int32x2_t __rev0_107;  __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \
-  int32x2_t __rev1_107;  __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \
-  int64x2_t __ret_107; \
-  __ret_107 = __noswap_vmull_s32(__rev0_107, __noswap_splat_lane_s32(__rev1_107, __p2_107)); \
+#define vmull_lane_u32(__p0_107, __p1_107, __p2_107) __extension__ ({ \
+  uint64x2_t __ret_107; \
+  uint32x2_t __s0_107 = __p0_107; \
+  uint32x2_t __s1_107 = __p1_107; \
+  uint32x2_t __rev0_107;  __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \
+  uint32x2_t __rev1_107;  __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \
+  __ret_107 = __noswap_vmull_u32(__rev0_107, __noswap_splat_lane_u32(__rev1_107, __p2_107)); \
   __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \
   __ret_107; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s16(__p0_108, __p1_108, __p2_108) __extension__ ({ \
-  int16x4_t __s0_108 = __p0_108; \
-  int16x4_t __s1_108 = __p1_108; \
-  int32x4_t __ret_108; \
-  __ret_108 = vmull_s16(__s0_108, splat_lane_s16(__s1_108, __p2_108)); \
+#define vmull_lane_u16(__p0_108, __p1_108, __p2_108) __extension__ ({ \
+  uint32x4_t __ret_108; \
+  uint16x4_t __s0_108 = __p0_108; \
+  uint16x4_t __s1_108 = __p1_108; \
+  __ret_108 = vmull_u16(__s0_108, splat_lane_u16(__s1_108, __p2_108)); \
   __ret_108; \
 })
 #else
-#define vmull_lane_s16(__p0_109, __p1_109, __p2_109) __extension__ ({ \
-  int16x4_t __s0_109 = __p0_109; \
-  int16x4_t __s1_109 = __p1_109; \
-  int16x4_t __rev0_109;  __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \
-  int16x4_t __rev1_109;  __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \
-  int32x4_t __ret_109; \
-  __ret_109 = __noswap_vmull_s16(__rev0_109, __noswap_splat_lane_s16(__rev1_109, __p2_109)); \
+#define vmull_lane_u16(__p0_109, __p1_109, __p2_109) __extension__ ({ \
+  uint32x4_t __ret_109; \
+  uint16x4_t __s0_109 = __p0_109; \
+  uint16x4_t __s1_109 = __p1_109; \
+  uint16x4_t __rev0_109;  __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \
+  uint16x4_t __rev1_109;  __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \
+  __ret_109 = __noswap_vmull_u16(__rev0_109, __noswap_splat_lane_u16(__rev1_109, __p2_109)); \
   __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 3, 2, 1, 0); \
   __ret_109; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vmull_lane_s32(__p0_110, __p1_110, __p2_110) __extension__ ({ \
+  int64x2_t __ret_110; \
+  int32x2_t __s0_110 = __p0_110; \
+  int32x2_t __s1_110 = __p1_110; \
+  __ret_110 = vmull_s32(__s0_110, splat_lane_s32(__s1_110, __p2_110)); \
+  __ret_110; \
+})
+#else
+#define vmull_lane_s32(__p0_111, __p1_111, __p2_111) __extension__ ({ \
+  int64x2_t __ret_111; \
+  int32x2_t __s0_111 = __p0_111; \
+  int32x2_t __s1_111 = __p1_111; \
+  int32x2_t __rev0_111;  __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \
+  int32x2_t __rev1_111;  __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \
+  __ret_111 = __noswap_vmull_s32(__rev0_111, __noswap_splat_lane_s32(__rev1_111, __p2_111)); \
+  __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \
+  __ret_111; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_lane_s16(__p0_112, __p1_112, __p2_112) __extension__ ({ \
+  int32x4_t __ret_112; \
+  int16x4_t __s0_112 = __p0_112; \
+  int16x4_t __s1_112 = __p1_112; \
+  __ret_112 = vmull_s16(__s0_112, splat_lane_s16(__s1_112, __p2_112)); \
+  __ret_112; \
+})
+#else
+#define vmull_lane_s16(__p0_113, __p1_113, __p2_113) __extension__ ({ \
+  int32x4_t __ret_113; \
+  int16x4_t __s0_113 = __p0_113; \
+  int16x4_t __s1_113 = __p1_113; \
+  int16x4_t __rev0_113;  __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \
+  int16x4_t __rev1_113;  __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \
+  __ret_113 = __noswap_vmull_s16(__rev0_113, __noswap_splat_lane_s16(__rev1_113, __p2_113)); \
+  __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \
+  __ret_113; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
   uint64x2_t __ret;
   __ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
@@ -17223,8 +17259,8 @@
 }
 #else
 __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17244,8 +17280,8 @@
 }
 #else
 __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17265,8 +17301,8 @@
 }
 #else
 __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17286,8 +17322,8 @@
 }
 #else
 __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17307,8 +17343,8 @@
 }
 #else
 __ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17323,8 +17359,8 @@
 }
 #else
 __ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17339,8 +17375,8 @@
 }
 #else
 __ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17355,8 +17391,8 @@
 }
 #else
 __ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17371,8 +17407,8 @@
 }
 #else
 __ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17387,8 +17423,8 @@
 }
 #else
 __ai int8x16_t vmvnq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17403,8 +17439,8 @@
 }
 #else
 __ai int32x4_t vmvnq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17419,8 +17455,8 @@
 }
 #else
 __ai int16x8_t vmvnq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17435,8 +17471,8 @@
 }
 #else
 __ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17451,8 +17487,8 @@
 }
 #else
 __ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17467,8 +17503,8 @@
 }
 #else
 __ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17483,8 +17519,8 @@
 }
 #else
 __ai int8x8_t vmvn_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17499,8 +17535,8 @@
 }
 #else
 __ai int32x2_t vmvn_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17515,8 +17551,8 @@
 }
 #else
 __ai int16x4_t vmvn_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17531,8 +17567,8 @@
 }
 #else
 __ai int8x16_t vnegq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17547,8 +17583,8 @@
 }
 #else
 __ai float32x4_t vnegq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17563,8 +17599,8 @@
 }
 #else
 __ai int32x4_t vnegq_s32(int32x4_t __p0) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17579,8 +17615,8 @@
 }
 #else
 __ai int16x8_t vnegq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17595,8 +17631,8 @@
 }
 #else
 __ai int8x8_t vneg_s8(int8x8_t __p0) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17611,8 +17647,8 @@
 }
 #else
 __ai float32x2_t vneg_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17627,8 +17663,8 @@
 }
 #else
 __ai int32x2_t vneg_s32(int32x2_t __p0) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17643,8 +17679,8 @@
 }
 #else
 __ai int16x4_t vneg_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17659,9 +17695,9 @@
 }
 #else
 __ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17676,9 +17712,9 @@
 }
 #else
 __ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17693,9 +17729,9 @@
 }
 #else
 __ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17710,9 +17746,9 @@
 }
 #else
 __ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17727,9 +17763,9 @@
 }
 #else
 __ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17744,9 +17780,9 @@
 }
 #else
 __ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17761,9 +17797,9 @@
 }
 #else
 __ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17778,9 +17814,9 @@
 }
 #else
 __ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17795,9 +17831,9 @@
 }
 #else
 __ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17812,9 +17848,9 @@
 }
 #else
 __ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17834,9 +17870,9 @@
 }
 #else
 __ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17851,9 +17887,9 @@
 }
 #else
 __ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17868,9 +17904,9 @@
 }
 #else
 __ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17890,9 +17926,9 @@
 }
 #else
 __ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17907,9 +17943,9 @@
 }
 #else
 __ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17924,9 +17960,9 @@
 }
 #else
 __ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17941,9 +17977,9 @@
 }
 #else
 __ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17958,9 +17994,9 @@
 }
 #else
 __ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17975,9 +18011,9 @@
 }
 #else
 __ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17992,9 +18028,9 @@
 }
 #else
 __ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18009,9 +18045,9 @@
 }
 #else
 __ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18026,9 +18062,9 @@
 }
 #else
 __ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18043,9 +18079,9 @@
 }
 #else
 __ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18060,9 +18096,9 @@
 }
 #else
 __ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18082,9 +18118,9 @@
 }
 #else
 __ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18099,9 +18135,9 @@
 }
 #else
 __ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18116,9 +18152,9 @@
 }
 #else
 __ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18138,9 +18174,9 @@
 }
 #else
 __ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18155,9 +18191,9 @@
 }
 #else
 __ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18172,9 +18208,9 @@
 }
 #else
 __ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18189,9 +18225,9 @@
 }
 #else
 __ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18206,9 +18242,9 @@
 }
 #else
 __ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18223,9 +18259,9 @@
 }
 #else
 __ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18240,9 +18276,9 @@
 }
 #else
 __ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18257,9 +18293,9 @@
 }
 #else
 __ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18274,8 +18310,8 @@
 }
 #else
 __ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x1_t __ret;
+  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19);
   return __ret;
 }
@@ -18289,9 +18325,9 @@
 }
 #else
 __ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18306,9 +18342,9 @@
 }
 #else
 __ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18323,8 +18359,8 @@
 }
 #else
 __ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x1_t __ret;
+  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3);
   return __ret;
 }
@@ -18338,9 +18374,9 @@
 }
 #else
 __ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18355,9 +18391,9 @@
 }
 #else
 __ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18372,9 +18408,9 @@
 }
 #else
 __ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18389,9 +18425,9 @@
 }
 #else
 __ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18406,9 +18442,9 @@
 }
 #else
 __ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18423,9 +18459,9 @@
 }
 #else
 __ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18440,9 +18476,9 @@
 }
 #else
 __ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18457,9 +18493,9 @@
 }
 #else
 __ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18474,8 +18510,8 @@
 }
 #else
 __ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18490,8 +18526,8 @@
 }
 #else
 __ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint64x2_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18506,8 +18542,8 @@
 }
 #else
 __ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint32x4_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18522,8 +18558,8 @@
 }
 #else
 __ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18538,8 +18574,8 @@
 }
 #else
 __ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int64x2_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18554,8 +18590,8 @@
 }
 #else
 __ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int32x4_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18570,8 +18606,8 @@
 }
 #else
 __ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18586,8 +18622,8 @@
 }
 #else
 __ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x1_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19);
   return __ret;
 }
@@ -18601,8 +18637,8 @@
 }
 #else
 __ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x2_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18617,8 +18653,8 @@
 }
 #else
 __ai int16x4_t vpaddl_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x4_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18633,8 +18669,8 @@
 }
 #else
 __ai int64x1_t vpaddl_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x1_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3);
   return __ret;
 }
@@ -18648,8 +18684,8 @@
 }
 #else
 __ai int32x2_t vpaddl_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x2_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18664,9 +18700,9 @@
 }
 #else
 __ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18681,9 +18717,9 @@
 }
 #else
 __ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18698,9 +18734,9 @@
 }
 #else
 __ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18715,9 +18751,9 @@
 }
 #else
 __ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18732,9 +18768,9 @@
 }
 #else
 __ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18749,9 +18785,9 @@
 }
 #else
 __ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18766,9 +18802,9 @@
 }
 #else
 __ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18783,9 +18819,9 @@
 }
 #else
 __ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18800,9 +18836,9 @@
 }
 #else
 __ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18817,9 +18853,9 @@
 }
 #else
 __ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18834,9 +18870,9 @@
 }
 #else
 __ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18851,9 +18887,9 @@
 }
 #else
 __ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18868,9 +18904,9 @@
 }
 #else
 __ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18885,9 +18921,9 @@
 }
 #else
 __ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18902,8 +18938,8 @@
 }
 #else
 __ai int8x16_t vqabsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18918,8 +18954,8 @@
 }
 #else
 __ai int32x4_t vqabsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18934,8 +18970,8 @@
 }
 #else
 __ai int16x8_t vqabsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18950,8 +18986,8 @@
 }
 #else
 __ai int8x8_t vqabs_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18966,8 +19002,8 @@
 }
 #else
 __ai int32x2_t vqabs_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18982,8 +19018,8 @@
 }
 #else
 __ai int16x4_t vqabs_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18998,9 +19034,9 @@
 }
 #else
 __ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19015,9 +19051,9 @@
 }
 #else
 __ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19032,9 +19068,9 @@
 }
 #else
 __ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19049,9 +19085,9 @@
 }
 #else
 __ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19066,9 +19102,9 @@
 }
 #else
 __ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19083,18 +19119,13 @@
 }
 #else
 __ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -19105,9 +19136,9 @@
 }
 #else
 __ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19122,18 +19153,13 @@
 }
 #else
 __ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -19144,9 +19170,9 @@
 }
 #else
 __ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19161,9 +19187,9 @@
 }
 #else
 __ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19183,9 +19209,9 @@
 }
 #else
 __ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19200,9 +19226,9 @@
 }
 #else
 __ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19217,18 +19243,13 @@
 }
 #else
 __ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
-__ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
 #endif
 
 __ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
@@ -19244,18 +19265,13 @@
 }
 #else
 __ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -19266,10 +19282,10 @@
 }
 #else
 __ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19289,10 +19305,10 @@
 }
 #else
 __ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19305,50 +19321,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
-  int64x2_t __s0_110 = __p0_110; \
-  int32x2_t __s1_110 = __p1_110; \
-  int32x2_t __s2_110 = __p2_110; \
-  int64x2_t __ret_110; \
-  __ret_110 = vqdmlal_s32(__s0_110, __s1_110, splat_lane_s32(__s2_110, __p3_110)); \
-  __ret_110; \
+#define vqdmlal_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
+  int64x2_t __ret_114; \
+  int64x2_t __s0_114 = __p0_114; \
+  int32x2_t __s1_114 = __p1_114; \
+  int32x2_t __s2_114 = __p2_114; \
+  __ret_114 = vqdmlal_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \
+  __ret_114; \
 })
 #else
-#define vqdmlal_lane_s32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
-  int64x2_t __s0_111 = __p0_111; \
-  int32x2_t __s1_111 = __p1_111; \
-  int32x2_t __s2_111 = __p2_111; \
-  int64x2_t __rev0_111;  __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \
-  int32x2_t __rev1_111;  __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \
-  int32x2_t __rev2_111;  __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 1, 0); \
-  int64x2_t __ret_111; \
-  __ret_111 = __noswap_vqdmlal_s32(__rev0_111, __rev1_111, __noswap_splat_lane_s32(__rev2_111, __p3_111)); \
-  __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \
-  __ret_111; \
+#define vqdmlal_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
+  int64x2_t __ret_115; \
+  int64x2_t __s0_115 = __p0_115; \
+  int32x2_t __s1_115 = __p1_115; \
+  int32x2_t __s2_115 = __p2_115; \
+  int64x2_t __rev0_115;  __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \
+  int32x2_t __rev1_115;  __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \
+  int32x2_t __rev2_115;  __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \
+  __ret_115 = __noswap_vqdmlal_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \
+  __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \
+  __ret_115; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s16(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \
-  int32x4_t __s0_112 = __p0_112; \
-  int16x4_t __s1_112 = __p1_112; \
-  int16x4_t __s2_112 = __p2_112; \
-  int32x4_t __ret_112; \
-  __ret_112 = vqdmlal_s16(__s0_112, __s1_112, splat_lane_s16(__s2_112, __p3_112)); \
-  __ret_112; \
+#define vqdmlal_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
+  int32x4_t __ret_116; \
+  int32x4_t __s0_116 = __p0_116; \
+  int16x4_t __s1_116 = __p1_116; \
+  int16x4_t __s2_116 = __p2_116; \
+  __ret_116 = vqdmlal_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \
+  __ret_116; \
 })
 #else
-#define vqdmlal_lane_s16(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \
-  int32x4_t __s0_113 = __p0_113; \
-  int16x4_t __s1_113 = __p1_113; \
-  int16x4_t __s2_113 = __p2_113; \
-  int32x4_t __rev0_113;  __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \
-  int16x4_t __rev1_113;  __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \
-  int16x4_t __rev2_113;  __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 3, 2, 1, 0); \
-  int32x4_t __ret_113; \
-  __ret_113 = __noswap_vqdmlal_s16(__rev0_113, __rev1_113, __noswap_splat_lane_s16(__rev2_113, __p3_113)); \
-  __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \
-  __ret_113; \
+#define vqdmlal_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
+  int32x4_t __ret_117; \
+  int32x4_t __s0_117 = __p0_117; \
+  int16x4_t __s1_117 = __p1_117; \
+  int16x4_t __s2_117 = __p2_117; \
+  int32x4_t __rev0_117;  __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \
+  int16x4_t __rev1_117;  __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \
+  int16x4_t __rev2_117;  __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \
+  __ret_117 = __noswap_vqdmlal_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \
+  __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \
+  __ret_117; \
 })
 #endif
 
@@ -19360,9 +19376,9 @@
 }
 #else
 __ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vqdmlal_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19382,9 +19398,9 @@
 }
 #else
 __ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vqdmlal_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19404,10 +19420,10 @@
 }
 #else
 __ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19427,10 +19443,10 @@
 }
 #else
 __ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19443,50 +19459,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
-  int64x2_t __s0_114 = __p0_114; \
-  int32x2_t __s1_114 = __p1_114; \
-  int32x2_t __s2_114 = __p2_114; \
-  int64x2_t __ret_114; \
-  __ret_114 = vqdmlsl_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \
-  __ret_114; \
+#define vqdmlsl_lane_s32(__p0_118, __p1_118, __p2_118, __p3_118) __extension__ ({ \
+  int64x2_t __ret_118; \
+  int64x2_t __s0_118 = __p0_118; \
+  int32x2_t __s1_118 = __p1_118; \
+  int32x2_t __s2_118 = __p2_118; \
+  __ret_118 = vqdmlsl_s32(__s0_118, __s1_118, splat_lane_s32(__s2_118, __p3_118)); \
+  __ret_118; \
 })
 #else
-#define vqdmlsl_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
-  int64x2_t __s0_115 = __p0_115; \
-  int32x2_t __s1_115 = __p1_115; \
-  int32x2_t __s2_115 = __p2_115; \
-  int64x2_t __rev0_115;  __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \
-  int32x2_t __rev1_115;  __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \
-  int32x2_t __rev2_115;  __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \
-  int64x2_t __ret_115; \
-  __ret_115 = __noswap_vqdmlsl_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \
-  __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \
-  __ret_115; \
+#define vqdmlsl_lane_s32(__p0_119, __p1_119, __p2_119, __p3_119) __extension__ ({ \
+  int64x2_t __ret_119; \
+  int64x2_t __s0_119 = __p0_119; \
+  int32x2_t __s1_119 = __p1_119; \
+  int32x2_t __s2_119 = __p2_119; \
+  int64x2_t __rev0_119;  __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \
+  int32x2_t __rev1_119;  __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \
+  int32x2_t __rev2_119;  __rev2_119 = __builtin_shufflevector(__s2_119, __s2_119, 1, 0); \
+  __ret_119 = __noswap_vqdmlsl_s32(__rev0_119, __rev1_119, __noswap_splat_lane_s32(__rev2_119, __p3_119)); \
+  __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \
+  __ret_119; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
-  int32x4_t __s0_116 = __p0_116; \
-  int16x4_t __s1_116 = __p1_116; \
-  int16x4_t __s2_116 = __p2_116; \
-  int32x4_t __ret_116; \
-  __ret_116 = vqdmlsl_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \
-  __ret_116; \
+#define vqdmlsl_lane_s16(__p0_120, __p1_120, __p2_120, __p3_120) __extension__ ({ \
+  int32x4_t __ret_120; \
+  int32x4_t __s0_120 = __p0_120; \
+  int16x4_t __s1_120 = __p1_120; \
+  int16x4_t __s2_120 = __p2_120; \
+  __ret_120 = vqdmlsl_s16(__s0_120, __s1_120, splat_lane_s16(__s2_120, __p3_120)); \
+  __ret_120; \
 })
 #else
-#define vqdmlsl_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
-  int32x4_t __s0_117 = __p0_117; \
-  int16x4_t __s1_117 = __p1_117; \
-  int16x4_t __s2_117 = __p2_117; \
-  int32x4_t __rev0_117;  __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \
-  int16x4_t __rev1_117;  __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \
-  int16x4_t __rev2_117;  __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \
-  int32x4_t __ret_117; \
-  __ret_117 = __noswap_vqdmlsl_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \
-  __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \
-  __ret_117; \
+#define vqdmlsl_lane_s16(__p0_121, __p1_121, __p2_121, __p3_121) __extension__ ({ \
+  int32x4_t __ret_121; \
+  int32x4_t __s0_121 = __p0_121; \
+  int16x4_t __s1_121 = __p1_121; \
+  int16x4_t __s2_121 = __p2_121; \
+  int32x4_t __rev0_121;  __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \
+  int16x4_t __rev1_121;  __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \
+  int16x4_t __rev2_121;  __rev2_121 = __builtin_shufflevector(__s2_121, __s2_121, 3, 2, 1, 0); \
+  __ret_121 = __noswap_vqdmlsl_s16(__rev0_121, __rev1_121, __noswap_splat_lane_s16(__rev2_121, __p3_121)); \
+  __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \
+  __ret_121; \
 })
 #endif
 
@@ -19498,9 +19514,9 @@
 }
 #else
 __ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19520,9 +19536,9 @@
 }
 #else
 __ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19542,9 +19558,9 @@
 }
 #else
 __ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19564,9 +19580,9 @@
 }
 #else
 __ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19586,9 +19602,9 @@
 }
 #else
 __ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19608,9 +19624,9 @@
 }
 #else
 __ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19630,8 +19646,8 @@
 }
 #else
 __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19646,8 +19662,8 @@
 }
 #else
 __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19662,8 +19678,8 @@
 }
 #else
 __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19678,8 +19694,8 @@
 }
 #else
 __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19694,9 +19710,9 @@
 }
 #else
 __ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
+  int64x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19716,9 +19732,9 @@
 }
 #else
 __ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
+  int32x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19731,44 +19747,44 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s32(__p0_118, __p1_118, __p2_118) __extension__ ({ \
-  int32x2_t __s0_118 = __p0_118; \
-  int32x2_t __s1_118 = __p1_118; \
-  int64x2_t __ret_118; \
-  __ret_118 = vqdmull_s32(__s0_118, splat_lane_s32(__s1_118, __p2_118)); \
-  __ret_118; \
+#define vqdmull_lane_s32(__p0_122, __p1_122, __p2_122) __extension__ ({ \
+  int64x2_t __ret_122; \
+  int32x2_t __s0_122 = __p0_122; \
+  int32x2_t __s1_122 = __p1_122; \
+  __ret_122 = vqdmull_s32(__s0_122, splat_lane_s32(__s1_122, __p2_122)); \
+  __ret_122; \
 })
 #else
-#define vqdmull_lane_s32(__p0_119, __p1_119, __p2_119) __extension__ ({ \
-  int32x2_t __s0_119 = __p0_119; \
-  int32x2_t __s1_119 = __p1_119; \
-  int32x2_t __rev0_119;  __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \
-  int32x2_t __rev1_119;  __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \
-  int64x2_t __ret_119; \
-  __ret_119 = __noswap_vqdmull_s32(__rev0_119, __noswap_splat_lane_s32(__rev1_119, __p2_119)); \
-  __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \
-  __ret_119; \
+#define vqdmull_lane_s32(__p0_123, __p1_123, __p2_123) __extension__ ({ \
+  int64x2_t __ret_123; \
+  int32x2_t __s0_123 = __p0_123; \
+  int32x2_t __s1_123 = __p1_123; \
+  int32x2_t __rev0_123;  __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 1, 0); \
+  int32x2_t __rev1_123;  __rev1_123 = __builtin_shufflevector(__s1_123, __s1_123, 1, 0); \
+  __ret_123 = __noswap_vqdmull_s32(__rev0_123, __noswap_splat_lane_s32(__rev1_123, __p2_123)); \
+  __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 1, 0); \
+  __ret_123; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s16(__p0_120, __p1_120, __p2_120) __extension__ ({ \
-  int16x4_t __s0_120 = __p0_120; \
-  int16x4_t __s1_120 = __p1_120; \
-  int32x4_t __ret_120; \
-  __ret_120 = vqdmull_s16(__s0_120, splat_lane_s16(__s1_120, __p2_120)); \
-  __ret_120; \
+#define vqdmull_lane_s16(__p0_124, __p1_124, __p2_124) __extension__ ({ \
+  int32x4_t __ret_124; \
+  int16x4_t __s0_124 = __p0_124; \
+  int16x4_t __s1_124 = __p1_124; \
+  __ret_124 = vqdmull_s16(__s0_124, splat_lane_s16(__s1_124, __p2_124)); \
+  __ret_124; \
 })
 #else
-#define vqdmull_lane_s16(__p0_121, __p1_121, __p2_121) __extension__ ({ \
-  int16x4_t __s0_121 = __p0_121; \
-  int16x4_t __s1_121 = __p1_121; \
-  int16x4_t __rev0_121;  __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \
-  int16x4_t __rev1_121;  __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \
-  int32x4_t __ret_121; \
-  __ret_121 = __noswap_vqdmull_s16(__rev0_121, __noswap_splat_lane_s16(__rev1_121, __p2_121)); \
-  __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \
-  __ret_121; \
+#define vqdmull_lane_s16(__p0_125, __p1_125, __p2_125) __extension__ ({ \
+  int32x4_t __ret_125; \
+  int16x4_t __s0_125 = __p0_125; \
+  int16x4_t __s1_125 = __p1_125; \
+  int16x4_t __rev0_125;  __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \
+  int16x4_t __rev1_125;  __rev1_125 = __builtin_shufflevector(__s1_125, __s1_125, 3, 2, 1, 0); \
+  __ret_125 = __noswap_vqdmull_s16(__rev0_125, __noswap_splat_lane_s16(__rev1_125, __p2_125)); \
+  __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \
+  __ret_125; \
 })
 #endif
 
@@ -19780,8 +19796,8 @@
 }
 #else
 __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19801,8 +19817,8 @@
 }
 #else
 __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19822,8 +19838,8 @@
 }
 #else
 __ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19843,8 +19859,8 @@
 }
 #else
 __ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19864,8 +19880,8 @@
 }
 #else
 __ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19885,8 +19901,8 @@
 }
 #else
 __ai int16x4_t vqmovn_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19906,8 +19922,8 @@
 }
 #else
 __ai int32x2_t vqmovn_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19927,8 +19943,8 @@
 }
 #else
 __ai int8x8_t vqmovn_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19948,8 +19964,8 @@
 }
 #else
 __ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19969,8 +19985,8 @@
 }
 #else
 __ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19990,8 +20006,8 @@
 }
 #else
 __ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20011,8 +20027,8 @@
 }
 #else
 __ai int8x16_t vqnegq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20027,8 +20043,8 @@
 }
 #else
 __ai int32x4_t vqnegq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20043,8 +20059,8 @@
 }
 #else
 __ai int16x8_t vqnegq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20059,8 +20075,8 @@
 }
 #else
 __ai int8x8_t vqneg_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20075,8 +20091,8 @@
 }
 #else
 __ai int32x2_t vqneg_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20091,8 +20107,8 @@
 }
 #else
 __ai int16x4_t vqneg_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20107,9 +20123,9 @@
 }
 #else
 __ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20129,9 +20145,9 @@
 }
 #else
 __ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20151,9 +20167,9 @@
 }
 #else
 __ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20173,9 +20189,9 @@
 }
 #else
 __ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20195,8 +20211,8 @@
 }
 #else
 __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20211,8 +20227,8 @@
 }
 #else
 __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20227,8 +20243,8 @@
 }
 #else
 __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20243,8 +20259,8 @@
 }
 #else
 __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20259,9 +20275,9 @@
 }
 #else
 __ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20276,9 +20292,9 @@
 }
 #else
 __ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20293,9 +20309,9 @@
 }
 #else
 __ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20310,9 +20326,9 @@
 }
 #else
 __ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20327,9 +20343,9 @@
 }
 #else
 __ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20344,9 +20360,9 @@
 }
 #else
 __ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20361,9 +20377,9 @@
 }
 #else
 __ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20378,9 +20394,9 @@
 }
 #else
 __ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20395,9 +20411,9 @@
 }
 #else
 __ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20412,9 +20428,9 @@
 }
 #else
 __ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20434,9 +20450,9 @@
 }
 #else
 __ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20451,9 +20467,9 @@
 }
 #else
 __ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20468,9 +20484,9 @@
 }
 #else
 __ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20490,9 +20506,9 @@
 }
 #else
 __ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20501,23 +20517,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -20525,23 +20541,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
@@ -20549,23 +20565,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -20573,23 +20589,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
@@ -20597,23 +20613,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
@@ -20621,23 +20637,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
@@ -20645,23 +20661,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -20669,23 +20685,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
@@ -20693,23 +20709,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -20723,9 +20739,9 @@
 }
 #else
 __ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20740,9 +20756,9 @@
 }
 #else
 __ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20757,9 +20773,9 @@
 }
 #else
 __ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20774,9 +20790,9 @@
 }
 #else
 __ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20791,9 +20807,9 @@
 }
 #else
 __ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20808,9 +20824,9 @@
 }
 #else
 __ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20825,9 +20841,9 @@
 }
 #else
 __ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20842,9 +20858,9 @@
 }
 #else
 __ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20859,9 +20875,9 @@
 }
 #else
 __ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20876,9 +20892,9 @@
 }
 #else
 __ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20898,9 +20914,9 @@
 }
 #else
 __ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20915,9 +20931,9 @@
 }
 #else
 __ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20932,9 +20948,9 @@
 }
 #else
 __ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20954,9 +20970,9 @@
 }
 #else
 __ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20965,16 +20981,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
 #else
 #define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -20983,16 +20999,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -21001,16 +21017,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -21019,16 +21035,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21037,16 +21053,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
 #else
 #define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21055,16 +21071,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -21073,16 +21089,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
 #else
 #define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -21091,16 +21107,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
 #else
 #define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21109,16 +21125,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vqshl_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21127,16 +21143,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vqshl_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -21144,23 +21160,23 @@
 #endif
 
 #define vqshl_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x1_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vqshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vqshl_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -21169,16 +21185,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vqshl_n_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21187,16 +21203,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vqshl_n_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -21204,23 +21220,23 @@
 #endif
 
 #define vqshl_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x1_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vqshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vqshl_n_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -21229,16 +21245,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   uint8x16_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
 #else
 #define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21247,16 +21263,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -21265,16 +21281,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -21283,16 +21299,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21301,16 +21317,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21319,16 +21335,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -21336,23 +21352,23 @@
 #endif
 
 #define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   uint64x1_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -21361,23 +21377,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -21385,23 +21401,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
@@ -21409,23 +21425,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -21433,23 +21449,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
@@ -21457,23 +21473,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
@@ -21481,23 +21497,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
@@ -21505,23 +21521,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -21529,23 +21545,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
@@ -21553,23 +21569,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -21583,9 +21599,9 @@
 }
 #else
 __ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -21600,9 +21616,9 @@
 }
 #else
 __ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -21617,9 +21633,9 @@
 }
 #else
 __ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -21634,9 +21650,9 @@
 }
 #else
 __ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -21651,9 +21667,9 @@
 }
 #else
 __ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -21668,18 +21684,13 @@
 }
 #else
 __ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -21690,9 +21701,9 @@
 }
 #else
 __ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -21707,18 +21718,13 @@
 }
 #else
 __ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -21729,9 +21735,9 @@
 }
 #else
 __ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -21746,9 +21752,9 @@
 }
 #else
 __ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -21768,9 +21774,9 @@
 }
 #else
 __ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -21785,9 +21791,9 @@
 }
 #else
 __ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -21802,18 +21808,13 @@
 }
 #else
 __ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
-__ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
 #endif
 
 __ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
@@ -21829,18 +21830,13 @@
 }
 #else
 __ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -21851,9 +21847,9 @@
 }
 #else
 __ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint16x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -21873,9 +21869,9 @@
 }
 #else
 __ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint32x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -21895,9 +21891,9 @@
 }
 #else
 __ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint8x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -21917,9 +21913,9 @@
 }
 #else
 __ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+  int16x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -21939,9 +21935,9 @@
 }
 #else
 __ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+  int32x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -21961,9 +21957,9 @@
 }
 #else
 __ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+  int8x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -21983,8 +21979,8 @@
 }
 #else
 __ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -21999,8 +21995,8 @@
 }
 #else
 __ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22015,8 +22011,8 @@
 }
 #else
 __ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22031,8 +22027,8 @@
 }
 #else
 __ai float32x2_t vrecpe_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22047,9 +22043,9 @@
 }
 #else
 __ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22064,9 +22060,9 @@
 }
 #else
 __ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22081,8 +22077,8 @@
 }
 #else
 __ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22097,8 +22093,8 @@
 }
 #else
 __ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22113,8 +22109,8 @@
 }
 #else
 __ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22129,8 +22125,8 @@
 }
 #else
 __ai int8x16_t vrev16q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22145,8 +22141,8 @@
 }
 #else
 __ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22161,8 +22157,8 @@
 }
 #else
 __ai int8x8_t vrev16_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22177,8 +22173,8 @@
 }
 #else
 __ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22193,8 +22189,8 @@
 }
 #else
 __ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22209,8 +22205,8 @@
 }
 #else
 __ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22225,8 +22221,8 @@
 }
 #else
 __ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22241,8 +22237,8 @@
 }
 #else
 __ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22257,8 +22253,8 @@
 }
 #else
 __ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22273,8 +22269,8 @@
 }
 #else
 __ai int8x16_t vrev32q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22289,8 +22285,8 @@
 }
 #else
 __ai int16x8_t vrev32q_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22305,8 +22301,8 @@
 }
 #else
 __ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22321,8 +22317,8 @@
 }
 #else
 __ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22337,8 +22333,8 @@
 }
 #else
 __ai int8x8_t vrev32_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22353,8 +22349,8 @@
 }
 #else
 __ai int16x4_t vrev32_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22369,8 +22365,8 @@
 }
 #else
 __ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22385,8 +22381,8 @@
 }
 #else
 __ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __ret;
+  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22401,8 +22397,8 @@
 }
 #else
 __ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22417,8 +22413,8 @@
 }
 #else
 __ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __ret;
+  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22433,8 +22429,8 @@
 }
 #else
 __ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22449,8 +22445,8 @@
 }
 #else
 __ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22465,8 +22461,8 @@
 }
 #else
 __ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22481,8 +22477,8 @@
 }
 #else
 __ai int8x16_t vrev64q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22497,8 +22493,8 @@
 }
 #else
 __ai float32x4_t vrev64q_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22513,8 +22509,8 @@
 }
 #else
 __ai int32x4_t vrev64q_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22529,8 +22525,8 @@
 }
 #else
 __ai int16x8_t vrev64q_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22545,8 +22541,8 @@
 }
 #else
 __ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22561,8 +22557,8 @@
 }
 #else
 __ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22577,8 +22573,8 @@
 }
 #else
 __ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22593,8 +22589,8 @@
 }
 #else
 __ai int8x8_t vrev64_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22609,8 +22605,8 @@
 }
 #else
 __ai float32x2_t vrev64_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22625,8 +22621,8 @@
 }
 #else
 __ai int32x2_t vrev64_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22641,8 +22637,8 @@
 }
 #else
 __ai int16x4_t vrev64_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22657,9 +22653,9 @@
 }
 #else
 __ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22674,9 +22670,9 @@
 }
 #else
 __ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22691,9 +22687,9 @@
 }
 #else
 __ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22708,9 +22704,9 @@
 }
 #else
 __ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22725,9 +22721,9 @@
 }
 #else
 __ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22742,9 +22738,9 @@
 }
 #else
 __ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22759,9 +22755,9 @@
 }
 #else
 __ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22776,9 +22772,9 @@
 }
 #else
 __ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22793,9 +22789,9 @@
 }
 #else
 __ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22810,9 +22806,9 @@
 }
 #else
 __ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22827,9 +22823,9 @@
 }
 #else
 __ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22844,9 +22840,9 @@
 }
 #else
 __ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22861,9 +22857,9 @@
 }
 #else
 __ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22878,9 +22874,9 @@
 }
 #else
 __ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22895,9 +22891,9 @@
 }
 #else
 __ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22912,9 +22908,9 @@
 }
 #else
 __ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22929,9 +22925,9 @@
 }
 #else
 __ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22946,9 +22942,9 @@
 }
 #else
 __ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22963,9 +22959,9 @@
 }
 #else
 __ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22980,9 +22976,9 @@
 }
 #else
 __ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22997,9 +22993,9 @@
 }
 #else
 __ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -23014,9 +23010,9 @@
 }
 #else
 __ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -23036,9 +23032,9 @@
 }
 #else
 __ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -23053,9 +23049,9 @@
 }
 #else
 __ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -23070,9 +23066,9 @@
 }
 #else
 __ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -23092,9 +23088,9 @@
 }
 #else
 __ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -23103,16 +23099,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
 #else
 #define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23121,16 +23117,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23139,16 +23135,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23157,16 +23153,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23175,16 +23171,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
 #else
 #define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23193,16 +23189,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23211,16 +23207,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
 #else
 #define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23229,16 +23225,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
 #else
 #define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23247,16 +23243,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vrshr_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23265,16 +23261,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vrshr_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23282,23 +23278,23 @@
 #endif
 
 #define vrshr_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x1_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vrshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vrshr_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23307,16 +23303,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vrshr_n_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23325,16 +23321,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vrshr_n_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23342,23 +23338,23 @@
 #endif
 
 #define vrshr_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x1_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vrshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vrshr_n_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23367,23 +23363,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -23391,23 +23387,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
@@ -23415,23 +23411,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -23439,23 +23435,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
@@ -23463,23 +23459,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
@@ -23487,23 +23483,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
@@ -23517,8 +23513,8 @@
 }
 #else
 __ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -23533,8 +23529,8 @@
 }
 #else
 __ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -23549,8 +23545,8 @@
 }
 #else
 __ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -23565,8 +23561,8 @@
 }
 #else
 __ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -23581,9 +23577,9 @@
 }
 #else
 __ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -23598,9 +23594,9 @@
 }
 #else
 __ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -23609,19 +23605,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
   __ret; \
 })
 #else
 #define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23630,19 +23626,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
   __ret; \
 })
 #else
 #define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23651,19 +23647,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
   __ret; \
 })
 #else
 #define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23672,19 +23668,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
   __ret; \
 })
 #else
 #define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23693,19 +23689,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
   __ret; \
 })
 #else
 #define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23714,19 +23710,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
   __ret; \
 })
 #else
 #define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23735,19 +23731,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
   __ret; \
 })
 #else
 #define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23756,19 +23752,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
   __ret; \
 })
 #else
 #define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23777,19 +23773,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
   __ret; \
 })
 #else
 #define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23798,19 +23794,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
   __ret; \
 })
 #else
 #define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23818,27 +23814,27 @@
 #endif
 
 #define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1_t __ret; \
   uint64x1_t __s0 = __p0; \
   uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
   __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
   __ret; \
 })
 #else
 #define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23847,19 +23843,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
   __ret; \
 })
 #else
 #define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23868,19 +23864,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
 #define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23888,27 +23884,27 @@
 #endif
 
 #define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1_t __ret; \
   int64x1_t __s0 = __p0; \
   int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
   __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
 #define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23923,9 +23919,9 @@
 }
 #else
 __ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint16x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -23945,9 +23941,9 @@
 }
 #else
 __ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint32x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -23967,9 +23963,9 @@
 }
 #else
 __ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint8x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -23989,9 +23985,9 @@
 }
 #else
 __ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+  int16x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -24011,9 +24007,9 @@
 }
 #else
 __ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+  int32x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -24033,9 +24029,9 @@
 }
 #else
 __ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+  int8x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -24049,26 +24045,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
   __ret; \
 })
@@ -24076,26 +24072,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
   __ret; \
 })
@@ -24103,26 +24099,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
   __ret; \
 })
@@ -24130,26 +24126,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
   __ret; \
 })
@@ -24157,26 +24153,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
   __ret; \
 })
@@ -24184,26 +24180,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
@@ -24211,26 +24207,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
@@ -24238,26 +24234,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
@@ -24265,26 +24261,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
   __ret; \
 })
@@ -24292,26 +24288,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4_t __ret; \
   float32_t __s0 = __p0; \
   float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4_t __ret; \
   float32_t __s0 = __p0; \
   float32x4_t __s1 = __p1; \
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4_t __ret; \
   float32_t __s0 = __p0; \
   float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
   __ret; \
 })
@@ -24319,26 +24315,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
@@ -24346,26 +24342,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
@@ -24373,26 +24369,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
@@ -24400,26 +24396,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
   __ret; \
 })
@@ -24427,60 +24423,60 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
 
 #define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1_t __ret; \
   uint64_t __s0 = __p0; \
   uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
   __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
@@ -24488,26 +24484,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
   __ret; \
 })
@@ -24515,26 +24511,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2_t __ret; \
   float32_t __s0 = __p0; \
   float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2_t __ret; \
   float32_t __s0 = __p0; \
   float32x2_t __s1 = __p1; \
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2_t __ret; \
   float32_t __s0 = __p0; \
   float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
   __ret; \
 })
@@ -24542,60 +24538,60 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
 
 #define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1_t __ret; \
   int64_t __s0 = __p0; \
   int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
   __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
@@ -24609,9 +24605,9 @@
 }
 #else
 __ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -24626,9 +24622,9 @@
 }
 #else
 __ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -24643,9 +24639,9 @@
 }
 #else
 __ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -24660,9 +24656,9 @@
 }
 #else
 __ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -24677,9 +24673,9 @@
 }
 #else
 __ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -24694,9 +24690,9 @@
 }
 #else
 __ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -24711,9 +24707,9 @@
 }
 #else
 __ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -24728,9 +24724,9 @@
 }
 #else
 __ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -24745,9 +24741,9 @@
 }
 #else
 __ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -24762,9 +24758,9 @@
 }
 #else
 __ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -24784,9 +24780,9 @@
 }
 #else
 __ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -24801,9 +24797,9 @@
 }
 #else
 __ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -24818,9 +24814,9 @@
 }
 #else
 __ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -24840,9 +24836,9 @@
 }
 #else
 __ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -24851,16 +24847,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
 #else
 #define vshlq_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -24869,16 +24865,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vshlq_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -24887,16 +24883,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define vshlq_n_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -24905,16 +24901,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define vshlq_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -24923,16 +24919,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
 #else
 #define vshlq_n_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -24941,16 +24937,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define vshlq_n_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -24959,16 +24955,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
 #else
 #define vshlq_n_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -24977,16 +24973,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
 #else
 #define vshlq_n_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -24995,16 +24991,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vshl_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25013,16 +25009,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vshl_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25030,23 +25026,23 @@
 #endif
 
 #define vshl_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x1_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vshl_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25055,16 +25051,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vshl_n_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25073,16 +25069,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vshl_n_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25090,23 +25086,23 @@
 #endif
 
 #define vshl_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x1_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vshl_n_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25115,23 +25111,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define vshll_n_u8(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
   __ret; \
 })
@@ -25139,23 +25135,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define vshll_n_u32(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
   __ret; \
 })
@@ -25163,23 +25159,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vshll_n_u16(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
   __ret; \
 })
@@ -25187,23 +25183,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
   __ret; \
 })
 #else
 #define vshll_n_s8(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
   __ret; \
 })
@@ -25211,23 +25207,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
   __ret; \
 })
 #else
 #define vshll_n_s32(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
   __ret; \
 })
@@ -25235,23 +25231,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define vshll_n_s16(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
   __ret; \
 })
@@ -25259,16 +25255,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
 #else
 #define vshrq_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25277,16 +25273,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vshrq_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25295,16 +25291,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define vshrq_n_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25313,16 +25309,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define vshrq_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25331,16 +25327,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
 #else
 #define vshrq_n_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25349,16 +25345,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define vshrq_n_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25367,16 +25363,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
 #else
 #define vshrq_n_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25385,16 +25381,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
 #else
 #define vshrq_n_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25403,16 +25399,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vshr_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25421,16 +25417,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vshr_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25438,23 +25434,23 @@
 #endif
 
 #define vshr_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x1_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vshr_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25463,16 +25459,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vshr_n_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25481,16 +25477,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vshr_n_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25498,23 +25494,23 @@
 #endif
 
 #define vshr_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x1_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vshr_n_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25523,23 +25519,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vshrn_n_u32(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -25547,23 +25543,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vshrn_n_u64(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
@@ -25571,23 +25567,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vshrn_n_u16(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -25595,23 +25591,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vshrn_n_s32(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
@@ -25619,23 +25615,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vshrn_n_s64(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
@@ -25643,23 +25639,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vshrn_n_s16(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
@@ -25667,19 +25663,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
   __ret; \
 })
 #else
 #define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25688,19 +25684,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
   __ret; \
 })
 #else
 #define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25709,19 +25705,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
   __ret; \
 })
 #else
 #define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25730,19 +25726,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
   __ret; \
 })
 #else
 #define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25751,19 +25747,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
   __ret; \
 })
 #else
 #define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25772,19 +25768,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
   __ret; \
 })
 #else
 #define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25793,19 +25789,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
   __ret; \
 })
 #else
 #define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25814,19 +25810,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
   __ret; \
 })
 #else
 #define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25835,19 +25831,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
   __ret; \
 })
 #else
 #define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25856,19 +25852,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
   __ret; \
 })
 #else
 #define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25877,19 +25873,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
   __ret; \
 })
 #else
 #define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25898,19 +25894,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
   __ret; \
 })
 #else
 #define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25919,19 +25915,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
   __ret; \
 })
 #else
 #define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25940,19 +25936,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
   __ret; \
 })
 #else
 #define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25960,27 +25956,27 @@
 #endif
 
 #define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1_t __ret; \
   uint64x1_t __s0 = __p0; \
   uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
   __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
   __ret; \
 })
 #else
 #define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25989,19 +25985,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
   __ret; \
 })
 #else
 #define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26010,19 +26006,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
 #define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26030,27 +26026,27 @@
 #endif
 
 #define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1_t __ret; \
   int64x1_t __s0 = __p0; \
   int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
   __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
 #define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26059,19 +26055,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
   __ret; \
 })
 #else
 #define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26080,19 +26076,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
   __ret; \
 })
 #else
 #define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26101,19 +26097,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
   __ret; \
 })
 #else
 #define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26122,19 +26118,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
   __ret; \
 })
 #else
 #define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26143,19 +26139,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
   __ret; \
 })
 #else
 #define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26164,19 +26160,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
   __ret; \
 })
 #else
 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26185,19 +26181,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
   __ret; \
 })
 #else
 #define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26206,19 +26202,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
   __ret; \
 })
 #else
 #define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26227,19 +26223,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
   __ret; \
 })
 #else
 #define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26248,19 +26244,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
   __ret; \
 })
 #else
 #define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26268,27 +26264,27 @@
 #endif
 
 #define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1_t __ret; \
   uint64x1_t __s0 = __p0; \
   uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
   __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
   __ret; \
 })
 #else
 #define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26297,19 +26293,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
   __ret; \
 })
 #else
 #define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26318,19 +26314,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
 #define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26338,27 +26334,27 @@
 #endif
 
 #define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1_t __ret; \
   int64x1_t __s0 = __p0; \
   int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
   __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
 #define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26367,19 +26363,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
   __ret; \
 })
 #else
 #define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26388,19 +26384,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
   __ret; \
 })
 #else
 #define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26409,19 +26405,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
   __ret; \
 })
 #else
 #define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26430,19 +26426,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
   __ret; \
 })
 #else
 #define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26451,19 +26447,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
   __ret; \
 })
 #else
 #define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26472,19 +26468,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
   __ret; \
 })
 #else
 #define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26493,19 +26489,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
   __ret; \
 })
 #else
 #define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26514,19 +26510,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
   __ret; \
 })
 #else
 #define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26535,19 +26531,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
   __ret; \
 })
 #else
 #define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26556,19 +26552,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
   __ret; \
 })
 #else
 #define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26577,19 +26573,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
   __ret; \
 })
 #else
 #define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26598,19 +26594,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
   __ret; \
 })
 #else
 #define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26619,19 +26615,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
   __ret; \
 })
 #else
 #define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26640,19 +26636,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
   __ret; \
 })
 #else
 #define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26660,27 +26656,27 @@
 #endif
 
 #define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1_t __ret; \
   uint64x1_t __s0 = __p0; \
   uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
   __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
   __ret; \
 })
 #else
 #define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26689,19 +26685,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
   __ret; \
 })
 #else
 #define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26710,19 +26706,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
 #define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26730,27 +26726,27 @@
 #endif
 
 #define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1_t __ret; \
   int64x1_t __s0 = __p0; \
   int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
   __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
 #define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -29893,9 +29889,9 @@
 }
 #else
 __ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -29910,9 +29906,9 @@
 }
 #else
 __ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -29927,9 +29923,9 @@
 }
 #else
 __ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -29944,9 +29940,9 @@
 }
 #else
 __ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -29961,9 +29957,9 @@
 }
 #else
 __ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -29978,9 +29974,9 @@
 }
 #else
 __ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -29995,9 +29991,9 @@
 }
 #else
 __ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30012,9 +30008,9 @@
 }
 #else
 __ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30029,9 +30025,9 @@
 }
 #else
 __ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30046,9 +30042,9 @@
 }
 #else
 __ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30063,9 +30059,9 @@
 }
 #else
 __ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30085,9 +30081,9 @@
 }
 #else
 __ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30102,9 +30098,9 @@
 }
 #else
 __ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30119,9 +30115,9 @@
 }
 #else
 __ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30136,9 +30132,9 @@
 }
 #else
 __ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30158,9 +30154,9 @@
 }
 #else
 __ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30175,9 +30171,9 @@
 }
 #else
 __ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint16x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30197,9 +30193,9 @@
 }
 #else
 __ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint32x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30219,9 +30215,9 @@
 }
 #else
 __ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint8x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30241,9 +30237,9 @@
 }
 #else
 __ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+  int16x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30263,9 +30259,9 @@
 }
 #else
 __ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+  int32x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30285,9 +30281,9 @@
 }
 #else
 __ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+  int8x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30307,9 +30303,9 @@
 }
 #else
 __ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint16x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30324,9 +30320,9 @@
 }
 #else
 __ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint64x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30341,9 +30337,9 @@
 }
 #else
 __ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint32x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30358,9 +30354,9 @@
 }
 #else
 __ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
+  int16x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30375,9 +30371,9 @@
 }
 #else
 __ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
+  int64x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30392,9 +30388,9 @@
 }
 #else
 __ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
+  int32x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30409,9 +30405,9 @@
 }
 #else
 __ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 - __noswap_vmovl_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30426,9 +30422,9 @@
 }
 #else
 __ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 - __noswap_vmovl_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30443,9 +30439,9 @@
 }
 #else
 __ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 - __noswap_vmovl_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30460,9 +30456,9 @@
 }
 #else
 __ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 - __noswap_vmovl_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30477,9 +30473,9 @@
 }
 #else
 __ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 - __noswap_vmovl_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30494,9 +30490,9 @@
 }
 #else
 __ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 - __noswap_vmovl_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30511,9 +30507,9 @@
 }
 #else
 __ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30528,9 +30524,9 @@
 }
 #else
 __ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30545,9 +30541,9 @@
 }
 #else
 __ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30562,11 +30558,11 @@
 }
 #else
 __ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8x2_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30581,11 +30577,11 @@
 }
 #else
 __ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8x2_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30600,11 +30596,11 @@
 }
 #else
 __ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8x2_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30619,12 +30615,12 @@
 }
 #else
 __ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8x3_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30639,12 +30635,12 @@
 }
 #else
 __ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8x3_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30659,12 +30655,12 @@
 }
 #else
 __ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8x3_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30679,13 +30675,13 @@
 }
 #else
 __ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8x4_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30700,13 +30696,13 @@
 }
 #else
 __ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8x4_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30721,13 +30717,13 @@
 }
 #else
 __ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8x4_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30742,10 +30738,10 @@
 }
 #else
 __ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30760,10 +30756,10 @@
 }
 #else
 __ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30778,10 +30774,10 @@
 }
 #else
 __ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30796,12 +30792,12 @@
 }
 #else
 __ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8x2_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30816,12 +30812,12 @@
 }
 #else
 __ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8x2_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30836,12 +30832,12 @@
 }
 #else
 __ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8x2_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30856,13 +30852,13 @@
 }
 #else
 __ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8x3_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30877,13 +30873,13 @@
 }
 #else
 __ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8x3_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30898,13 +30894,13 @@
 }
 #else
 __ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8x3_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30919,6 +30915,7 @@
 }
 #else
 __ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8x4_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -30926,7 +30923,6 @@
   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30941,6 +30937,7 @@
 }
 #else
 __ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8x4_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -30948,7 +30945,6 @@
   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30963,6 +30959,7 @@
 }
 #else
 __ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8x4_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -30970,7 +30967,6 @@
   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30985,9 +30981,9 @@
 }
 #else
 __ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8x2_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31004,9 +31000,9 @@
 }
 #else
 __ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4x2_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31023,9 +31019,9 @@
 }
 #else
 __ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16x2_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31042,9 +31038,9 @@
 }
 #else
 __ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8x2_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31061,9 +31057,9 @@
 }
 #else
 __ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16x2_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31080,9 +31076,9 @@
 }
 #else
 __ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4x2_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31099,9 +31095,9 @@
 }
 #else
 __ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8x2_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31118,9 +31114,9 @@
 }
 #else
 __ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16x2_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31137,9 +31133,9 @@
 }
 #else
 __ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4x2_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31156,9 +31152,9 @@
 }
 #else
 __ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4x2_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31175,9 +31171,9 @@
 }
 #else
 __ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8x2_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31194,9 +31190,9 @@
 }
 #else
 __ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8x2_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31213,9 +31209,9 @@
 }
 #else
 __ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -31232,9 +31228,9 @@
 }
 #else
 __ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4x2_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31251,9 +31247,9 @@
 }
 #else
 __ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8x2_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31270,9 +31266,9 @@
 }
 #else
 __ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -31289,9 +31285,9 @@
 }
 #else
 __ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -31308,9 +31304,9 @@
 }
 #else
 __ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4x2_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31327,9 +31323,9 @@
 }
 #else
 __ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  uint8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31344,9 +31340,9 @@
 }
 #else
 __ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  uint16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -31361,9 +31357,9 @@
 }
 #else
 __ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  uint8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31378,9 +31374,9 @@
 }
 #else
 __ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  uint16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31395,9 +31391,9 @@
 }
 #else
 __ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31412,9 +31408,9 @@
 }
 #else
 __ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -31429,9 +31425,9 @@
 }
 #else
 __ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31446,9 +31442,9 @@
 }
 #else
 __ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31463,9 +31459,9 @@
 }
 #else
 __ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -31480,9 +31476,9 @@
 }
 #else
 __ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31497,9 +31493,9 @@
 }
 #else
 __ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31514,9 +31510,9 @@
 }
 #else
 __ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -31531,9 +31527,9 @@
 }
 #else
 __ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -31548,9 +31544,9 @@
 }
 #else
 __ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31565,9 +31561,9 @@
 }
 #else
 __ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -31582,9 +31578,9 @@
 }
 #else
 __ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -31599,9 +31595,9 @@
 }
 #else
 __ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8x2_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31618,9 +31614,9 @@
 }
 #else
 __ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4x2_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31637,9 +31633,9 @@
 }
 #else
 __ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16x2_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31656,9 +31652,9 @@
 }
 #else
 __ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8x2_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31675,9 +31671,9 @@
 }
 #else
 __ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16x2_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31694,9 +31690,9 @@
 }
 #else
 __ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4x2_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31713,9 +31709,9 @@
 }
 #else
 __ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8x2_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31732,9 +31728,9 @@
 }
 #else
 __ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16x2_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31751,9 +31747,9 @@
 }
 #else
 __ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4x2_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31770,9 +31766,9 @@
 }
 #else
 __ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4x2_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31789,9 +31785,9 @@
 }
 #else
 __ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8x2_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31808,9 +31804,9 @@
 }
 #else
 __ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8x2_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31827,9 +31823,9 @@
 }
 #else
 __ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -31846,9 +31842,9 @@
 }
 #else
 __ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4x2_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31865,9 +31861,9 @@
 }
 #else
 __ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8x2_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31884,9 +31880,9 @@
 }
 #else
 __ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -31903,9 +31899,9 @@
 }
 #else
 __ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -31922,9 +31918,9 @@
 }
 #else
 __ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4x2_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31941,9 +31937,9 @@
 }
 #else
 __ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8x2_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31960,9 +31956,9 @@
 }
 #else
 __ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4x2_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31979,9 +31975,9 @@
 }
 #else
 __ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16x2_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31998,9 +31994,9 @@
 }
 #else
 __ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8x2_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -32017,9 +32013,9 @@
 }
 #else
 __ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16x2_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -32036,9 +32032,9 @@
 }
 #else
 __ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4x2_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -32055,9 +32051,9 @@
 }
 #else
 __ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8x2_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -32074,9 +32070,9 @@
 }
 #else
 __ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16x2_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -32093,9 +32089,9 @@
 }
 #else
 __ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4x2_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -32112,9 +32108,9 @@
 }
 #else
 __ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4x2_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -32131,9 +32127,9 @@
 }
 #else
 __ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8x2_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -32150,9 +32146,9 @@
 }
 #else
 __ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8x2_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -32169,9 +32165,9 @@
 }
 #else
 __ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -32188,9 +32184,9 @@
 }
 #else
 __ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4x2_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -32207,9 +32203,9 @@
 }
 #else
 __ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8x2_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -32226,9 +32222,9 @@
 }
 #else
 __ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -32245,9 +32241,9 @@
 }
 #else
 __ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -32264,9 +32260,9 @@
 }
 #else
 __ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4x2_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -32277,124 +32273,20 @@
 
 #if !defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0_122, __p1_122) __extension__ ({ \
-  float16x4_t __s0_122 = __p0_122; \
-  float16x8_t __ret_122; \
-  __ret_122 = splatq_lane_f16(__s0_122, __p1_122); \
-  __ret_122; \
-})
-#else
-#define vdupq_lane_f16(__p0_123, __p1_123) __extension__ ({ \
-  float16x4_t __s0_123 = __p0_123; \
-  float16x4_t __rev0_123;  __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 3, 2, 1, 0); \
-  float16x8_t __ret_123; \
-  __ret_123 = __noswap_splatq_lane_f16(__rev0_123, __p1_123); \
-  __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_123; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0_124, __p1_124) __extension__ ({ \
-  float16x4_t __s0_124 = __p0_124; \
-  float16x4_t __ret_124; \
-  __ret_124 = splat_lane_f16(__s0_124, __p1_124); \
-  __ret_124; \
-})
-#else
-#define vdup_lane_f16(__p0_125, __p1_125) __extension__ ({ \
-  float16x4_t __s0_125 = __p0_125; \
-  float16x4_t __rev0_125;  __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \
-  float16x4_t __ret_125; \
-  __ret_125 = __noswap_splat_lane_f16(__rev0_125, __p1_125); \
-  __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \
-  __ret_125; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
 #define vqdmulhq_lane_s32(__p0_126, __p1_126, __p2_126) __extension__ ({ \
+  int32x4_t __ret_126; \
   int32x4_t __s0_126 = __p0_126; \
   int32x2_t __s1_126 = __p1_126; \
-  int32x4_t __ret_126; \
   __ret_126 = vqdmulhq_s32(__s0_126, splatq_lane_s32(__s1_126, __p2_126)); \
   __ret_126; \
 })
 #else
 #define vqdmulhq_lane_s32(__p0_127, __p1_127, __p2_127) __extension__ ({ \
+  int32x4_t __ret_127; \
   int32x4_t __s0_127 = __p0_127; \
   int32x2_t __s1_127 = __p1_127; \
   int32x4_t __rev0_127;  __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 3, 2, 1, 0); \
   int32x2_t __rev1_127;  __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 1, 0); \
-  int32x4_t __ret_127; \
   __ret_127 = __noswap_vqdmulhq_s32(__rev0_127, __noswap_splatq_lane_s32(__rev1_127, __p2_127)); \
   __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0); \
   __ret_127; \
@@ -32403,19 +32295,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqdmulhq_lane_s16(__p0_128, __p1_128, __p2_128) __extension__ ({ \
+  int16x8_t __ret_128; \
   int16x8_t __s0_128 = __p0_128; \
   int16x4_t __s1_128 = __p1_128; \
-  int16x8_t __ret_128; \
   __ret_128 = vqdmulhq_s16(__s0_128, splatq_lane_s16(__s1_128, __p2_128)); \
   __ret_128; \
 })
 #else
 #define vqdmulhq_lane_s16(__p0_129, __p1_129, __p2_129) __extension__ ({ \
+  int16x8_t __ret_129; \
   int16x8_t __s0_129 = __p0_129; \
   int16x4_t __s1_129 = __p1_129; \
   int16x8_t __rev0_129;  __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x4_t __rev1_129;  __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \
-  int16x8_t __ret_129; \
   __ret_129 = __noswap_vqdmulhq_s16(__rev0_129, __noswap_splatq_lane_s16(__rev1_129, __p2_129)); \
   __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_129; \
@@ -32424,19 +32316,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqdmulh_lane_s32(__p0_130, __p1_130, __p2_130) __extension__ ({ \
+  int32x2_t __ret_130; \
   int32x2_t __s0_130 = __p0_130; \
   int32x2_t __s1_130 = __p1_130; \
-  int32x2_t __ret_130; \
   __ret_130 = vqdmulh_s32(__s0_130, splat_lane_s32(__s1_130, __p2_130)); \
   __ret_130; \
 })
 #else
 #define vqdmulh_lane_s32(__p0_131, __p1_131, __p2_131) __extension__ ({ \
+  int32x2_t __ret_131; \
   int32x2_t __s0_131 = __p0_131; \
   int32x2_t __s1_131 = __p1_131; \
   int32x2_t __rev0_131;  __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 1, 0); \
   int32x2_t __rev1_131;  __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 1, 0); \
-  int32x2_t __ret_131; \
   __ret_131 = __noswap_vqdmulh_s32(__rev0_131, __noswap_splat_lane_s32(__rev1_131, __p2_131)); \
   __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 1, 0); \
   __ret_131; \
@@ -32445,19 +32337,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqdmulh_lane_s16(__p0_132, __p1_132, __p2_132) __extension__ ({ \
+  int16x4_t __ret_132; \
   int16x4_t __s0_132 = __p0_132; \
   int16x4_t __s1_132 = __p1_132; \
-  int16x4_t __ret_132; \
   __ret_132 = vqdmulh_s16(__s0_132, splat_lane_s16(__s1_132, __p2_132)); \
   __ret_132; \
 })
 #else
 #define vqdmulh_lane_s16(__p0_133, __p1_133, __p2_133) __extension__ ({ \
+  int16x4_t __ret_133; \
   int16x4_t __s0_133 = __p0_133; \
   int16x4_t __s1_133 = __p1_133; \
   int16x4_t __rev0_133;  __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 3, 2, 1, 0); \
   int16x4_t __rev1_133;  __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \
-  int16x4_t __ret_133; \
   __ret_133 = __noswap_vqdmulh_s16(__rev0_133, __noswap_splat_lane_s16(__rev1_133, __p2_133)); \
   __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 3, 2, 1, 0); \
   __ret_133; \
@@ -32466,19 +32358,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrdmulhq_lane_s32(__p0_134, __p1_134, __p2_134) __extension__ ({ \
+  int32x4_t __ret_134; \
   int32x4_t __s0_134 = __p0_134; \
   int32x2_t __s1_134 = __p1_134; \
-  int32x4_t __ret_134; \
   __ret_134 = vqrdmulhq_s32(__s0_134, splatq_lane_s32(__s1_134, __p2_134)); \
   __ret_134; \
 })
 #else
 #define vqrdmulhq_lane_s32(__p0_135, __p1_135, __p2_135) __extension__ ({ \
+  int32x4_t __ret_135; \
   int32x4_t __s0_135 = __p0_135; \
   int32x2_t __s1_135 = __p1_135; \
   int32x4_t __rev0_135;  __rev0_135 = __builtin_shufflevector(__s0_135, __s0_135, 3, 2, 1, 0); \
   int32x2_t __rev1_135;  __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, 1, 0); \
-  int32x4_t __ret_135; \
   __ret_135 = __noswap_vqrdmulhq_s32(__rev0_135, __noswap_splatq_lane_s32(__rev1_135, __p2_135)); \
   __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); \
   __ret_135; \
@@ -32487,19 +32379,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrdmulhq_lane_s16(__p0_136, __p1_136, __p2_136) __extension__ ({ \
+  int16x8_t __ret_136; \
   int16x8_t __s0_136 = __p0_136; \
   int16x4_t __s1_136 = __p1_136; \
-  int16x8_t __ret_136; \
   __ret_136 = vqrdmulhq_s16(__s0_136, splatq_lane_s16(__s1_136, __p2_136)); \
   __ret_136; \
 })
 #else
 #define vqrdmulhq_lane_s16(__p0_137, __p1_137, __p2_137) __extension__ ({ \
+  int16x8_t __ret_137; \
   int16x8_t __s0_137 = __p0_137; \
   int16x4_t __s1_137 = __p1_137; \
   int16x8_t __rev0_137;  __rev0_137 = __builtin_shufflevector(__s0_137, __s0_137, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x4_t __rev1_137;  __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 3, 2, 1, 0); \
-  int16x8_t __ret_137; \
   __ret_137 = __noswap_vqrdmulhq_s16(__rev0_137, __noswap_splatq_lane_s16(__rev1_137, __p2_137)); \
   __ret_137 = __builtin_shufflevector(__ret_137, __ret_137, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_137; \
@@ -32508,19 +32400,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrdmulh_lane_s32(__p0_138, __p1_138, __p2_138) __extension__ ({ \
+  int32x2_t __ret_138; \
   int32x2_t __s0_138 = __p0_138; \
   int32x2_t __s1_138 = __p1_138; \
-  int32x2_t __ret_138; \
   __ret_138 = vqrdmulh_s32(__s0_138, splat_lane_s32(__s1_138, __p2_138)); \
   __ret_138; \
 })
 #else
 #define vqrdmulh_lane_s32(__p0_139, __p1_139, __p2_139) __extension__ ({ \
+  int32x2_t __ret_139; \
   int32x2_t __s0_139 = __p0_139; \
   int32x2_t __s1_139 = __p1_139; \
   int32x2_t __rev0_139;  __rev0_139 = __builtin_shufflevector(__s0_139, __s0_139, 1, 0); \
   int32x2_t __rev1_139;  __rev1_139 = __builtin_shufflevector(__s1_139, __s1_139, 1, 0); \
-  int32x2_t __ret_139; \
   __ret_139 = __noswap_vqrdmulh_s32(__rev0_139, __noswap_splat_lane_s32(__rev1_139, __p2_139)); \
   __ret_139 = __builtin_shufflevector(__ret_139, __ret_139, 1, 0); \
   __ret_139; \
@@ -32529,19 +32421,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrdmulh_lane_s16(__p0_140, __p1_140, __p2_140) __extension__ ({ \
+  int16x4_t __ret_140; \
   int16x4_t __s0_140 = __p0_140; \
   int16x4_t __s1_140 = __p1_140; \
-  int16x4_t __ret_140; \
   __ret_140 = vqrdmulh_s16(__s0_140, splat_lane_s16(__s1_140, __p2_140)); \
   __ret_140; \
 })
 #else
 #define vqrdmulh_lane_s16(__p0_141, __p1_141, __p2_141) __extension__ ({ \
+  int16x4_t __ret_141; \
   int16x4_t __s0_141 = __p0_141; \
   int16x4_t __s1_141 = __p1_141; \
   int16x4_t __rev0_141;  __rev0_141 = __builtin_shufflevector(__s0_141, __s0_141, 3, 2, 1, 0); \
   int16x4_t __rev1_141;  __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 3, 2, 1, 0); \
-  int16x4_t __ret_141; \
   __ret_141 = __noswap_vqrdmulh_s16(__rev0_141, __noswap_splat_lane_s16(__rev1_141, __p2_141)); \
   __ret_141 = __builtin_shufflevector(__ret_141, __ret_141, 3, 2, 1, 0); \
   __ret_141; \
@@ -33878,8 +33770,8 @@
 }
 #else
 __ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -33899,8 +33791,8 @@
 }
 #else
 __ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -33974,16 +33866,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
   float16x8_t __ret; \
+  float16x8_t __s1 = __p1; \
   __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
   __ret; \
 })
 #else
 #define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8_t __ret; \
   float16x8_t __s1 = __p1; \
   float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
   __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -33992,16 +33884,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
   float16x4_t __ret; \
+  float16x4_t __s1 = __p1; \
   __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
   __ret; \
 })
 #else
 #define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4_t __ret; \
   float16x4_t __s1 = __p1; \
   float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
   __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -34186,18 +34078,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
   float16x8x2_t __ret; \
+  float16x8x2_t __s1 = __p1; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \
   __ret; \
 })
 #else
 #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8x2_t __ret; \
   float16x8x2_t __s1 = __p1; \
   float16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x2_t __ret; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -34208,18 +34100,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
   float16x4x2_t __ret; \
+  float16x4x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \
   __ret; \
 })
 #else
 #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4x2_t __ret; \
   float16x4x2_t __s1 = __p1; \
   float16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  float16x4x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -34302,19 +34194,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
   float16x8x3_t __ret; \
+  float16x8x3_t __s1 = __p1; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \
   __ret; \
 })
 #else
 #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8x3_t __ret; \
   float16x8x3_t __s1 = __p1; \
   float16x8x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x3_t __ret; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -34326,19 +34218,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
   float16x4x3_t __ret; \
+  float16x4x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \
   __ret; \
 })
 #else
 #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4x3_t __ret; \
   float16x4x3_t __s1 = __p1; \
   float16x4x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  float16x4x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -34426,20 +34318,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
   float16x8x4_t __ret; \
+  float16x8x4_t __s1 = __p1; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \
   __ret; \
 })
 #else
 #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8x4_t __ret; \
   float16x8x4_t __s1 = __p1; \
   float16x8x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x4_t __ret; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -34452,20 +34344,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
   float16x4x4_t __ret; \
+  float16x4x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \
   __ret; \
 })
 #else
 #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4x4_t __ret; \
   float16x4x4_t __s1 = __p1; \
   float16x4x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  float16x4x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -34826,8 +34718,8 @@
 }
 #else
 __ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -34842,8 +34734,8 @@
 }
 #else
 __ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -34858,8 +34750,8 @@
 }
 #else
 __ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -34874,8 +34766,8 @@
 }
 #else
 __ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -34890,8 +34782,8 @@
 }
 #else
 __ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -34906,8 +34798,8 @@
 }
 #else
 __ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -34922,8 +34814,8 @@
 }
 #else
 __ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -34938,8 +34830,8 @@
 }
 #else
 __ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -34954,8 +34846,8 @@
 }
 #else
 __ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -34970,8 +34862,8 @@
 }
 #else
 __ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -34986,8 +34878,8 @@
 }
 #else
 __ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35002,8 +34894,8 @@
 }
 #else
 __ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35018,8 +34910,8 @@
 }
 #else
 __ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35034,8 +34926,8 @@
 }
 #else
 __ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35050,8 +34942,8 @@
 }
 #else
 __ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35066,8 +34958,8 @@
 }
 #else
 __ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35084,9 +34976,9 @@
 }
 #else
 __ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35101,9 +34993,9 @@
 }
 #else
 __ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35118,8 +35010,8 @@
 }
 #else
 __ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35134,8 +35026,8 @@
 }
 #else
 __ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35152,8 +35044,8 @@
 }
 #else
 __ai float32x4_t vrndq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35168,8 +35060,8 @@
 }
 #else
 __ai float32x2_t vrnd_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35184,8 +35076,8 @@
 }
 #else
 __ai float32x4_t vrndaq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35200,8 +35092,8 @@
 }
 #else
 __ai float32x2_t vrnda_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35216,8 +35108,8 @@
 }
 #else
 __ai float32x4_t vrndiq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35232,8 +35124,8 @@
 }
 #else
 __ai float32x2_t vrndi_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35248,8 +35140,8 @@
 }
 #else
 __ai float32x4_t vrndmq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35264,8 +35156,8 @@
 }
 #else
 __ai float32x2_t vrndm_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35280,8 +35172,8 @@
 }
 #else
 __ai float32x4_t vrndnq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35296,8 +35188,8 @@
 }
 #else
 __ai float32x2_t vrndn_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35317,8 +35209,8 @@
 }
 #else
 __ai float32x4_t vrndpq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35333,8 +35225,8 @@
 }
 #else
 __ai float32x2_t vrndp_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35349,8 +35241,8 @@
 }
 #else
 __ai float32x4_t vrndxq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35365,8 +35257,8 @@
 }
 #else
 __ai float32x2_t vrndx_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35383,8 +35275,8 @@
 }
 #else
 __ai float16x8_t vrndq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35399,8 +35291,8 @@
 }
 #else
 __ai float16x4_t vrnd_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35415,8 +35307,8 @@
 }
 #else
 __ai float16x8_t vrndaq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35431,8 +35323,8 @@
 }
 #else
 __ai float16x4_t vrnda_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35447,8 +35339,8 @@
 }
 #else
 __ai float16x8_t vrndmq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35463,8 +35355,8 @@
 }
 #else
 __ai float16x4_t vrndm_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35479,8 +35371,8 @@
 }
 #else
 __ai float16x8_t vrndnq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35495,8 +35387,8 @@
 }
 #else
 __ai float16x4_t vrndn_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35511,8 +35403,8 @@
 }
 #else
 __ai float16x8_t vrndpq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35527,8 +35419,8 @@
 }
 #else
 __ai float16x4_t vrndp_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35543,8 +35435,8 @@
 }
 #else
 __ai float16x8_t vrndxq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35559,8 +35451,8 @@
 }
 #else
 __ai float16x4_t vrndx_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35577,9 +35469,9 @@
 }
 #else
 __ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35594,9 +35486,9 @@
 }
 #else
 __ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35611,9 +35503,9 @@
 }
 #else
 __ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35628,9 +35520,9 @@
 }
 #else
 __ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35647,9 +35539,9 @@
 }
 #else
 __ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
   float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
   __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35664,9 +35556,9 @@
 }
 #else
 __ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
   __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35681,9 +35573,9 @@
 }
 #else
 __ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
   float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
   __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35698,9 +35590,9 @@
 }
 #else
 __ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
   __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35717,9 +35609,9 @@
 }
 #else
 __ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__rev0, __p1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35739,9 +35631,9 @@
 }
 #else
 __ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__rev0, __p1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35756,9 +35648,9 @@
 }
 #else
 __ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__rev0, __p1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35773,10 +35665,10 @@
 }
 #else
 __ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35791,9 +35683,9 @@
 }
 #else
 __ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35808,10 +35700,10 @@
 }
 #else
 __ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35826,10 +35718,10 @@
 }
 #else
 __ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35844,9 +35736,9 @@
 }
 #else
 __ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35861,10 +35753,10 @@
 }
 #else
 __ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35872,7 +35764,8900 @@
 #endif
 
 #endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA3) && defined(__aarch64__)
+#if defined(__ARM_FEATURE_BF16) && !defined(__aarch64__)
+__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t)(__p0);
+  return __ret;
+}
+__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
+  poly64x1_t __ret;
+  __ret = (poly64x1_t)(__p0);
+  return __ret;
+}
+__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
+  poly16x4_t __ret;
+  __ret = (poly16x4_t)(__p0);
+  return __ret;
+}
+__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t)(__p0);
+  return __ret;
+}
+__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t)(__p0);
+  return __ret;
+}
+__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
+  poly16x8_t __ret;
+  __ret = (poly16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t)(__p0);
+  return __ret;
+}
+__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t)(__p0);
+  return __ret;
+}
+__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0);
+  return __ret;
+}
+__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0);
+  return __ret;
+}
+__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
+  int8x16_t __ret;
+  __ret = (int8x16_t)(__p0);
+  return __ret;
+}
+__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t)(__p0);
+  return __ret;
+}
+__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t)(__p0);
+  return __ret;
+}
+__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
+  int32x4_t __ret;
+  __ret = (int32x4_t)(__p0);
+  return __ret;
+}
+__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t)(__p0);
+  return __ret;
+}
+__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t)(__p0);
+  return __ret;
+}
+__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t)(__p0);
+  return __ret;
+}
+__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0);
+  return __ret;
+}
+__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0);
+  return __ret;
+}
+__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
+  int8x8_t __ret;
+  __ret = (int8x8_t)(__p0);
+  return __ret;
+}
+__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t)(__p0);
+  return __ret;
+}
+__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t)(__p0);
+  return __ret;
+}
+__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
+  int32x2_t __ret;
+  __ret = (int32x2_t)(__p0);
+  return __ret;
+}
+__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t)(__p0);
+  return __ret;
+}
+__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+#endif
+#if defined(__ARM_FEATURE_BF16) && defined(__aarch64__)
+__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t)(__p0);
+  return __ret;
+}
+__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
+  poly64x1_t __ret;
+  __ret = (poly64x1_t)(__p0);
+  return __ret;
+}
+__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
+  poly16x4_t __ret;
+  __ret = (poly16x4_t)(__p0);
+  return __ret;
+}
+__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t)(__p0);
+  return __ret;
+}
+__ai poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) {
+  poly128_t __ret;
+  __ret = (poly128_t)(__p0);
+  return __ret;
+}
+__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t)(__p0);
+  return __ret;
+}
+__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
+  poly16x8_t __ret;
+  __ret = (poly16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t)(__p0);
+  return __ret;
+}
+__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t)(__p0);
+  return __ret;
+}
+__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0);
+  return __ret;
+}
+__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0);
+  return __ret;
+}
+__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
+  int8x16_t __ret;
+  __ret = (int8x16_t)(__p0);
+  return __ret;
+}
+__ai float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t)(__p0);
+  return __ret;
+}
+__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t)(__p0);
+  return __ret;
+}
+__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t)(__p0);
+  return __ret;
+}
+__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
+  int32x4_t __ret;
+  __ret = (int32x4_t)(__p0);
+  return __ret;
+}
+__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t)(__p0);
+  return __ret;
+}
+__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t)(__p0);
+  return __ret;
+}
+__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t)(__p0);
+  return __ret;
+}
+__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0);
+  return __ret;
+}
+__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0);
+  return __ret;
+}
+__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
+  int8x8_t __ret;
+  __ret = (int8x8_t)(__p0);
+  return __ret;
+}
+__ai float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t)(__p0);
+  return __ret;
+}
+__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t)(__p0);
+  return __ret;
+}
+__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t)(__p0);
+  return __ret;
+}
+__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
+  int32x2_t __ret;
+  __ret = (int32x2_t)(__p0);
+  return __ret;
+}
+__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t)(__p0);
+  return __ret;
+}
+__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+#endif
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#else
+#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#else
+#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#else
+#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#else
+#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  bfloat16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdotq_lane_f32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \
+  float32x4_t __ret_142; \
+  float32x4_t __s0_142 = __p0_142; \
+  bfloat16x8_t __s1_142 = __p1_142; \
+  bfloat16x4_t __s2_142 = __p2_142; \
+bfloat16x4_t __reint_142 = __s2_142; \
+float32x4_t __reint1_142 = splatq_lane_f32(*(float32x2_t *) &__reint_142, __p3_142); \
+  __ret_142 = vbfdotq_f32(__s0_142, __s1_142, *(bfloat16x8_t *) &__reint1_142); \
+  __ret_142; \
+})
+#else
+#define vbfdotq_lane_f32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \
+  float32x4_t __ret_143; \
+  float32x4_t __s0_143 = __p0_143; \
+  bfloat16x8_t __s1_143 = __p1_143; \
+  bfloat16x4_t __s2_143 = __p2_143; \
+  float32x4_t __rev0_143;  __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_143;  __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_143;  __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, 3, 2, 1, 0); \
+bfloat16x4_t __reint_143 = __rev2_143; \
+float32x4_t __reint1_143 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_143, __p3_143); \
+  __ret_143 = __noswap_vbfdotq_f32(__rev0_143, __rev1_143, *(bfloat16x8_t *) &__reint1_143); \
+  __ret_143 = __builtin_shufflevector(__ret_143, __ret_143, 3, 2, 1, 0); \
+  __ret_143; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdot_lane_f32(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \
+  float32x2_t __ret_144; \
+  float32x2_t __s0_144 = __p0_144; \
+  bfloat16x4_t __s1_144 = __p1_144; \
+  bfloat16x4_t __s2_144 = __p2_144; \
+bfloat16x4_t __reint_144 = __s2_144; \
+float32x2_t __reint1_144 = splat_lane_f32(*(float32x2_t *) &__reint_144, __p3_144); \
+  __ret_144 = vbfdot_f32(__s0_144, __s1_144, *(bfloat16x4_t *) &__reint1_144); \
+  __ret_144; \
+})
+#else
+#define vbfdot_lane_f32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \
+  float32x2_t __ret_145; \
+  float32x2_t __s0_145 = __p0_145; \
+  bfloat16x4_t __s1_145 = __p1_145; \
+  bfloat16x4_t __s2_145 = __p2_145; \
+  float32x2_t __rev0_145;  __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, 1, 0); \
+  bfloat16x4_t __rev1_145;  __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_145;  __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, 3, 2, 1, 0); \
+bfloat16x4_t __reint_145 = __rev2_145; \
+float32x2_t __reint1_145 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_145, __p3_145); \
+  __ret_145 = __noswap_vbfdot_f32(__rev0_145, __rev1_145, *(bfloat16x4_t *) &__reint1_145); \
+  __ret_145 = __builtin_shufflevector(__ret_145, __ret_145, 1, 0); \
+  __ret_145; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdotq_laneq_f32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \
+  float32x4_t __ret_146; \
+  float32x4_t __s0_146 = __p0_146; \
+  bfloat16x8_t __s1_146 = __p1_146; \
+  bfloat16x8_t __s2_146 = __p2_146; \
+bfloat16x8_t __reint_146 = __s2_146; \
+float32x4_t __reint1_146 = splatq_laneq_f32(*(float32x4_t *) &__reint_146, __p3_146); \
+  __ret_146 = vbfdotq_f32(__s0_146, __s1_146, *(bfloat16x8_t *) &__reint1_146); \
+  __ret_146; \
+})
+#else
+#define vbfdotq_laneq_f32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \
+  float32x4_t __ret_147; \
+  float32x4_t __s0_147 = __p0_147; \
+  bfloat16x8_t __s1_147 = __p1_147; \
+  bfloat16x8_t __s2_147 = __p2_147; \
+  float32x4_t __rev0_147;  __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_147;  __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_147;  __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, 7, 6, 5, 4, 3, 2, 1, 0); \
+bfloat16x8_t __reint_147 = __rev2_147; \
+float32x4_t __reint1_147 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_147, __p3_147); \
+  __ret_147 = __noswap_vbfdotq_f32(__rev0_147, __rev1_147, *(bfloat16x8_t *) &__reint1_147); \
+  __ret_147 = __builtin_shufflevector(__ret_147, __ret_147, 3, 2, 1, 0); \
+  __ret_147; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdot_laneq_f32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \
+  float32x2_t __ret_148; \
+  float32x2_t __s0_148 = __p0_148; \
+  bfloat16x4_t __s1_148 = __p1_148; \
+  bfloat16x8_t __s2_148 = __p2_148; \
+bfloat16x8_t __reint_148 = __s2_148; \
+float32x2_t __reint1_148 = splat_laneq_f32(*(float32x4_t *) &__reint_148, __p3_148); \
+  __ret_148 = vbfdot_f32(__s0_148, __s1_148, *(bfloat16x4_t *) &__reint1_148); \
+  __ret_148; \
+})
+#else
+#define vbfdot_laneq_f32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \
+  float32x2_t __ret_149; \
+  float32x2_t __s0_149 = __p0_149; \
+  bfloat16x4_t __s1_149 = __p1_149; \
+  bfloat16x8_t __s2_149 = __p2_149; \
+  float32x2_t __rev0_149;  __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, 1, 0); \
+  bfloat16x4_t __rev1_149;  __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_149;  __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, 7, 6, 5, 4, 3, 2, 1, 0); \
+bfloat16x8_t __reint_149 = __rev2_149; \
+float32x2_t __reint1_149 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_149, __p3_149); \
+  __ret_149 = __noswap_vbfdot_f32(__rev0_149, __rev1_149, *(bfloat16x4_t *) &__reint1_149); \
+  __ret_149 = __builtin_shufflevector(__ret_149, __ret_149, 1, 0); \
+  __ret_149; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
+  bfloat16x8_t __ret;
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+  return __ret;
+}
+#endif
+
+#define vcreate_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  uint64_t __promote = __p0; \
+  __ret = (bfloat16x4_t)(__promote); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_150) {
+  float32x4_t __ret_150;
+bfloat16x4_t __reint_150 = __p0_150;
+int32x4_t __reint1_150 = vshll_n_s16(*(int16x4_t *) &__reint_150, 16);
+  __ret_150 = *(float32x4_t *) &__reint1_150;
+  return __ret_150;
+}
+#else
+__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_151) {
+  float32x4_t __ret_151;
+  bfloat16x4_t __rev0_151;  __rev0_151 = __builtin_shufflevector(__p0_151, __p0_151, 3, 2, 1, 0);
+bfloat16x4_t __reint_151 = __rev0_151;
+int32x4_t __reint1_151 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_151, 16);
+  __ret_151 = *(float32x4_t *) &__reint1_151;
+  __ret_151 = __builtin_shufflevector(__ret_151, __ret_151, 3, 2, 1, 0);
+  return __ret_151;
+}
+__ai float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_152) {
+  float32x4_t __ret_152;
+bfloat16x4_t __reint_152 = __p0_152;
+int32x4_t __reint1_152 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_152, 16);
+  __ret_152 = *(float32x4_t *) &__reint1_152;
+  return __ret_152;
+}
+#endif
+
+__ai float32_t vcvtah_f32_bf16(bfloat16_t __p0) {
+  float32_t __ret;
+bfloat16_t __reint = __p0;
+int32_t __reint1 = *(int32_t *) &__reint << 16;
+  __ret = *(float32_t *) &__reint1;
+  return __ret;
+}
+__ai bfloat16_t vcvth_bf16_f32(float32_t __p0) {
+  bfloat16_t __ret;
+  __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_bf16(__p0_153, __p1_153) __extension__ ({ \
+  bfloat16x8_t __ret_153; \
+  bfloat16x4_t __s0_153 = __p0_153; \
+  __ret_153 = splatq_lane_bf16(__s0_153, __p1_153); \
+  __ret_153; \
+})
+#else
+#define vdupq_lane_bf16(__p0_154, __p1_154) __extension__ ({ \
+  bfloat16x8_t __ret_154; \
+  bfloat16x4_t __s0_154 = __p0_154; \
+  bfloat16x4_t __rev0_154;  __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 3, 2, 1, 0); \
+  __ret_154 = __noswap_splatq_lane_bf16(__rev0_154, __p1_154); \
+  __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_154; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_bf16(__p0_155, __p1_155) __extension__ ({ \
+  bfloat16x4_t __ret_155; \
+  bfloat16x4_t __s0_155 = __p0_155; \
+  __ret_155 = splat_lane_bf16(__s0_155, __p1_155); \
+  __ret_155; \
+})
+#else
+#define vdup_lane_bf16(__p0_156, __p1_156) __extension__ ({ \
+  bfloat16x4_t __ret_156; \
+  bfloat16x4_t __s0_156 = __p0_156; \
+  bfloat16x4_t __rev0_156;  __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \
+  __ret_156 = __noswap_splat_lane_bf16(__rev0_156, __p1_156); \
+  __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \
+  __ret_156; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_bf16(__p0_157, __p1_157) __extension__ ({ \
+  bfloat16x8_t __ret_157; \
+  bfloat16x8_t __s0_157 = __p0_157; \
+  __ret_157 = splatq_laneq_bf16(__s0_157, __p1_157); \
+  __ret_157; \
+})
+#else
+#define vdupq_laneq_bf16(__p0_158, __p1_158) __extension__ ({ \
+  bfloat16x8_t __ret_158; \
+  bfloat16x8_t __s0_158 = __p0_158; \
+  bfloat16x8_t __rev0_158;  __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_158 = __noswap_splatq_laneq_bf16(__rev0_158, __p1_158); \
+  __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_158; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_bf16(__p0_159, __p1_159) __extension__ ({ \
+  bfloat16x4_t __ret_159; \
+  bfloat16x8_t __s0_159 = __p0_159; \
+  __ret_159 = splat_laneq_bf16(__s0_159, __p1_159); \
+  __ret_159; \
+})
+#else
+#define vdup_laneq_bf16(__p0_160, __p1_160) __extension__ ({ \
+  bfloat16x4_t __ret_160; \
+  bfloat16x8_t __s0_160 = __p0_160; \
+  bfloat16x8_t __rev0_160;  __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_160 = __noswap_splat_laneq_bf16(__rev0_160, __p1_160); \
+  __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 3, 2, 1, 0); \
+  __ret_160; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x8_t __s1 = __p1; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
+  __ret; \
+})
+#else
+#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x4_t __s1 = __p1; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
+  __ret; \
+})
+#else
+#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld2q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld2_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld2q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld2_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  bfloat16x8x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
+  __ret; \
+})
+#else
+#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  bfloat16x4x2_t __s1 = __p1; \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
+  __ret; \
+})
+#else
+#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld3q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld3_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld3q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld3_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  bfloat16x8x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
+  __ret; \
+})
+#else
+#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  bfloat16x4x3_t __s1 = __p1; \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
+  __ret; \
+})
+#else
+#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld4q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld4_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld4q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld4_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  bfloat16x8x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
+  __ret; \
+})
+#else
+#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  bfloat16x4x4_t __s1 = __p1; \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
+  __ret; \
+})
+#else
+#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x8_t __s1 = __p1; \
+  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
+  __ret; \
+})
+#else
+#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x8_t __s1 = __p1; \
+  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x4_t __s1 = __p1; \
+  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
+  __ret; \
+})
+#else
+#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x4_t __s1 = __p1; \
+  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 43); \
+})
+#else
+#define vst1q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 11); \
+})
+#else
+#define vst1_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
+})
+#else
+#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
+})
+#else
+#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
+})
+#else
+#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
+})
+#else
+#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
+})
+#else
+#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
+})
+#else
+#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
+})
+#else
+#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
+})
+#else
+#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
+})
+#else
+#define vst2q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
+})
+#else
+#define vst2_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
+})
+#else
+#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
+})
+#else
+#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
+})
+#else
+#define vst3q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
+})
+#else
+#define vst3_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
+})
+#else
+#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
+})
+#else
+#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
+})
+#else
+#define vst4q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
+})
+#else
+#define vst4_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
+})
+#else
+#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
+})
+#else
+#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__rev0, 11);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __a32_vcvt_bf16_f32(__p0);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap___a32_vcvt_bf16_f32(__rev0);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0));
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __ret;
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0));
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__rev0, 43);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_bf16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \
+  bfloat16x8_t __ret_161; \
+  bfloat16x8_t __s0_161 = __p0_161; \
+  bfloat16x4_t __s2_161 = __p2_161; \
+  __ret_161 = vsetq_lane_bf16(vget_lane_bf16(__s2_161, __p3_161), __s0_161, __p1_161); \
+  __ret_161; \
+})
+#else
+#define vcopyq_lane_bf16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \
+  bfloat16x8_t __ret_162; \
+  bfloat16x8_t __s0_162 = __p0_162; \
+  bfloat16x4_t __s2_162 = __p2_162; \
+  bfloat16x8_t __rev0_162;  __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_162;  __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 3, 2, 1, 0); \
+  __ret_162 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_162, __p3_162), __rev0_162, __p1_162); \
+  __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_162; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_bf16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \
+  bfloat16x4_t __ret_163; \
+  bfloat16x4_t __s0_163 = __p0_163; \
+  bfloat16x4_t __s2_163 = __p2_163; \
+  __ret_163 = vset_lane_bf16(vget_lane_bf16(__s2_163, __p3_163), __s0_163, __p1_163); \
+  __ret_163; \
+})
+#else
+#define vcopy_lane_bf16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \
+  bfloat16x4_t __ret_164; \
+  bfloat16x4_t __s0_164 = __p0_164; \
+  bfloat16x4_t __s2_164 = __p2_164; \
+  bfloat16x4_t __rev0_164;  __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_164;  __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \
+  __ret_164 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_164, __p3_164), __rev0_164, __p1_164); \
+  __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 3, 2, 1, 0); \
+  __ret_164; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_bf16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \
+  bfloat16x8_t __ret_165; \
+  bfloat16x8_t __s0_165 = __p0_165; \
+  bfloat16x8_t __s2_165 = __p2_165; \
+  __ret_165 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_165, __p3_165), __s0_165, __p1_165); \
+  __ret_165; \
+})
+#else
+#define vcopyq_laneq_bf16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \
+  bfloat16x8_t __ret_166; \
+  bfloat16x8_t __s0_166 = __p0_166; \
+  bfloat16x8_t __s2_166 = __p2_166; \
+  bfloat16x8_t __rev0_166;  __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_166;  __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_166 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_166, __p3_166), __rev0_166, __p1_166); \
+  __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_166; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_bf16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \
+  bfloat16x4_t __ret_167; \
+  bfloat16x4_t __s0_167 = __p0_167; \
+  bfloat16x8_t __s2_167 = __p2_167; \
+  __ret_167 = vset_lane_bf16(vgetq_lane_bf16(__s2_167, __p3_167), __s0_167, __p1_167); \
+  __ret_167; \
+})
+#else
+#define vcopy_laneq_bf16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \
+  bfloat16x4_t __ret_168; \
+  bfloat16x4_t __s0_168 = __p0_168; \
+  bfloat16x8_t __s2_168 = __p2_168; \
+  bfloat16x4_t __rev0_168;  __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_168;  __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_168 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_168, __p3_168), __rev0_168, __p1_168); \
+  __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \
+  __ret_168; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0));
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__p0, (int8x16_t)__p1, 43);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __ret;
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__rev0, (int8x16_t)__rev1, 43);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = __a64_vcvtq_low_bf16_f32(__p0);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_COMPLEX)
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_lane_f32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \
+  float32x2_t __ret_169; \
+  float32x2_t __s0_169 = __p0_169; \
+  float32x2_t __s1_169 = __p1_169; \
+  float32x2_t __s2_169 = __p2_169; \
+float32x2_t __reint_169 = __s2_169; \
+uint64x1_t __reint1_169 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_169, __p3_169)}; \
+  __ret_169 = vcmla_f32(__s0_169, __s1_169, *(float32x2_t *) &__reint1_169); \
+  __ret_169; \
+})
+#else
+#define vcmla_lane_f32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \
+  float32x2_t __ret_170; \
+  float32x2_t __s0_170 = __p0_170; \
+  float32x2_t __s1_170 = __p1_170; \
+  float32x2_t __s2_170 = __p2_170; \
+  float32x2_t __rev0_170;  __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 1, 0); \
+  float32x2_t __rev1_170;  __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 1, 0); \
+  float32x2_t __rev2_170;  __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 1, 0); \
+float32x2_t __reint_170 = __rev2_170; \
+uint64x1_t __reint1_170 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_170, __p3_170)}; \
+  __ret_170 = __noswap_vcmla_f32(__rev0_170, __rev1_170, *(float32x2_t *) &__reint1_170); \
+  __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 1, 0); \
+  __ret_170; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_lane_f32(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \
+  float32x4_t __ret_171; \
+  float32x4_t __s0_171 = __p0_171; \
+  float32x4_t __s1_171 = __p1_171; \
+  float32x2_t __s2_171 = __p2_171; \
+float32x2_t __reint_171 = __s2_171; \
+uint64x2_t __reint1_171 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171), vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171)}; \
+  __ret_171 = vcmlaq_f32(__s0_171, __s1_171, *(float32x4_t *) &__reint1_171); \
+  __ret_171; \
+})
+#else
+#define vcmlaq_lane_f32(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \
+  float32x4_t __ret_172; \
+  float32x4_t __s0_172 = __p0_172; \
+  float32x4_t __s1_172 = __p1_172; \
+  float32x2_t __s2_172 = __p2_172; \
+  float32x4_t __rev0_172;  __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 3, 2, 1, 0); \
+  float32x4_t __rev1_172;  __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 3, 2, 1, 0); \
+  float32x2_t __rev2_172;  __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 1, 0); \
+float32x2_t __reint_172 = __rev2_172; \
+uint64x2_t __reint1_172 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172), vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172)}; \
+  __ret_172 = __noswap_vcmlaq_f32(__rev0_172, __rev1_172, *(float32x4_t *) &__reint1_172); \
+  __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 3, 2, 1, 0); \
+  __ret_172; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_laneq_f32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \
+  float32x2_t __ret_173; \
+  float32x2_t __s0_173 = __p0_173; \
+  float32x2_t __s1_173 = __p1_173; \
+  float32x4_t __s2_173 = __p2_173; \
+float32x4_t __reint_173 = __s2_173; \
+uint64x1_t __reint1_173 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_173, __p3_173)}; \
+  __ret_173 = vcmla_f32(__s0_173, __s1_173, *(float32x2_t *) &__reint1_173); \
+  __ret_173; \
+})
+#else
+#define vcmla_laneq_f32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \
+  float32x2_t __ret_174; \
+  float32x2_t __s0_174 = __p0_174; \
+  float32x2_t __s1_174 = __p1_174; \
+  float32x4_t __s2_174 = __p2_174; \
+  float32x2_t __rev0_174;  __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \
+  float32x2_t __rev1_174;  __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 1, 0); \
+  float32x4_t __rev2_174;  __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 3, 2, 1, 0); \
+float32x4_t __reint_174 = __rev2_174; \
+uint64x1_t __reint1_174 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_174, __p3_174)}; \
+  __ret_174 = __noswap_vcmla_f32(__rev0_174, __rev1_174, *(float32x2_t *) &__reint1_174); \
+  __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \
+  __ret_174; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_laneq_f32(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \
+  float32x4_t __ret_175; \
+  float32x4_t __s0_175 = __p0_175; \
+  float32x4_t __s1_175 = __p1_175; \
+  float32x4_t __s2_175 = __p2_175; \
+float32x4_t __reint_175 = __s2_175; \
+uint64x2_t __reint1_175 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175), vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175)}; \
+  __ret_175 = vcmlaq_f32(__s0_175, __s1_175, *(float32x4_t *) &__reint1_175); \
+  __ret_175; \
+})
+#else
+#define vcmlaq_laneq_f32(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \
+  float32x4_t __ret_176; \
+  float32x4_t __s0_176 = __p0_176; \
+  float32x4_t __s1_176 = __p1_176; \
+  float32x4_t __s2_176 = __p2_176; \
+  float32x4_t __rev0_176;  __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 3, 2, 1, 0); \
+  float32x4_t __rev1_176;  __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 3, 2, 1, 0); \
+  float32x4_t __rev2_176;  __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 3, 2, 1, 0); \
+float32x4_t __reint_176 = __rev2_176; \
+uint64x2_t __reint1_176 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176)}; \
+  __ret_176 = __noswap_vcmlaq_f32(__rev0_176, __rev1_176, *(float32x4_t *) &__reint1_176); \
+  __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 3, 2, 1, 0); \
+  __ret_176; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot180_lane_f32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \
+  float32x2_t __ret_177; \
+  float32x2_t __s0_177 = __p0_177; \
+  float32x2_t __s1_177 = __p1_177; \
+  float32x2_t __s2_177 = __p2_177; \
+float32x2_t __reint_177 = __s2_177; \
+uint64x1_t __reint1_177 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_177, __p3_177)}; \
+  __ret_177 = vcmla_rot180_f32(__s0_177, __s1_177, *(float32x2_t *) &__reint1_177); \
+  __ret_177; \
+})
+#else
+#define vcmla_rot180_lane_f32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \
+  float32x2_t __ret_178; \
+  float32x2_t __s0_178 = __p0_178; \
+  float32x2_t __s1_178 = __p1_178; \
+  float32x2_t __s2_178 = __p2_178; \
+  float32x2_t __rev0_178;  __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 1, 0); \
+  float32x2_t __rev1_178;  __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 1, 0); \
+  float32x2_t __rev2_178;  __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 1, 0); \
+float32x2_t __reint_178 = __rev2_178; \
+uint64x1_t __reint1_178 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_178, __p3_178)}; \
+  __ret_178 = __noswap_vcmla_rot180_f32(__rev0_178, __rev1_178, *(float32x2_t *) &__reint1_178); \
+  __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 1, 0); \
+  __ret_178; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot180_lane_f32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \
+  float32x4_t __ret_179; \
+  float32x4_t __s0_179 = __p0_179; \
+  float32x4_t __s1_179 = __p1_179; \
+  float32x2_t __s2_179 = __p2_179; \
+float32x2_t __reint_179 = __s2_179; \
+uint64x2_t __reint1_179 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179), vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179)}; \
+  __ret_179 = vcmlaq_rot180_f32(__s0_179, __s1_179, *(float32x4_t *) &__reint1_179); \
+  __ret_179; \
+})
+#else
+#define vcmlaq_rot180_lane_f32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \
+  float32x4_t __ret_180; \
+  float32x4_t __s0_180 = __p0_180; \
+  float32x4_t __s1_180 = __p1_180; \
+  float32x2_t __s2_180 = __p2_180; \
+  float32x4_t __rev0_180;  __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \
+  float32x4_t __rev1_180;  __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 3, 2, 1, 0); \
+  float32x2_t __rev2_180;  __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 1, 0); \
+float32x2_t __reint_180 = __rev2_180; \
+uint64x2_t __reint1_180 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180), vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180)}; \
+  __ret_180 = __noswap_vcmlaq_rot180_f32(__rev0_180, __rev1_180, *(float32x4_t *) &__reint1_180); \
+  __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \
+  __ret_180; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot180_laneq_f32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \
+  float32x2_t __ret_181; \
+  float32x2_t __s0_181 = __p0_181; \
+  float32x2_t __s1_181 = __p1_181; \
+  float32x4_t __s2_181 = __p2_181; \
+float32x4_t __reint_181 = __s2_181; \
+uint64x1_t __reint1_181 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_181, __p3_181)}; \
+  __ret_181 = vcmla_rot180_f32(__s0_181, __s1_181, *(float32x2_t *) &__reint1_181); \
+  __ret_181; \
+})
+#else
+#define vcmla_rot180_laneq_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \
+  float32x2_t __ret_182; \
+  float32x2_t __s0_182 = __p0_182; \
+  float32x2_t __s1_182 = __p1_182; \
+  float32x4_t __s2_182 = __p2_182; \
+  float32x2_t __rev0_182;  __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \
+  float32x2_t __rev1_182;  __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 1, 0); \
+  float32x4_t __rev2_182;  __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 3, 2, 1, 0); \
+float32x4_t __reint_182 = __rev2_182; \
+uint64x1_t __reint1_182 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_182, __p3_182)}; \
+  __ret_182 = __noswap_vcmla_rot180_f32(__rev0_182, __rev1_182, *(float32x2_t *) &__reint1_182); \
+  __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \
+  __ret_182; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot180_laneq_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \
+  float32x4_t __ret_183; \
+  float32x4_t __s0_183 = __p0_183; \
+  float32x4_t __s1_183 = __p1_183; \
+  float32x4_t __s2_183 = __p2_183; \
+float32x4_t __reint_183 = __s2_183; \
+uint64x2_t __reint1_183 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183), vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183)}; \
+  __ret_183 = vcmlaq_rot180_f32(__s0_183, __s1_183, *(float32x4_t *) &__reint1_183); \
+  __ret_183; \
+})
+#else
+#define vcmlaq_rot180_laneq_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \
+  float32x4_t __ret_184; \
+  float32x4_t __s0_184 = __p0_184; \
+  float32x4_t __s1_184 = __p1_184; \
+  float32x4_t __s2_184 = __p2_184; \
+  float32x4_t __rev0_184;  __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 3, 2, 1, 0); \
+  float32x4_t __rev1_184;  __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 3, 2, 1, 0); \
+  float32x4_t __rev2_184;  __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 3, 2, 1, 0); \
+float32x4_t __reint_184 = __rev2_184; \
+uint64x2_t __reint1_184 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184)}; \
+  __ret_184 = __noswap_vcmlaq_rot180_f32(__rev0_184, __rev1_184, *(float32x4_t *) &__reint1_184); \
+  __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 3, 2, 1, 0); \
+  __ret_184; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot270_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \
+  float32x2_t __ret_185; \
+  float32x2_t __s0_185 = __p0_185; \
+  float32x2_t __s1_185 = __p1_185; \
+  float32x2_t __s2_185 = __p2_185; \
+float32x2_t __reint_185 = __s2_185; \
+uint64x1_t __reint1_185 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \
+  __ret_185 = vcmla_rot270_f32(__s0_185, __s1_185, *(float32x2_t *) &__reint1_185); \
+  __ret_185; \
+})
+#else
+#define vcmla_rot270_lane_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \
+  float32x2_t __ret_186; \
+  float32x2_t __s0_186 = __p0_186; \
+  float32x2_t __s1_186 = __p1_186; \
+  float32x2_t __s2_186 = __p2_186; \
+  float32x2_t __rev0_186;  __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 1, 0); \
+  float32x2_t __rev1_186;  __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 1, 0); \
+  float32x2_t __rev2_186;  __rev2_186 = __builtin_shufflevector(__s2_186, __s2_186, 1, 0); \
+float32x2_t __reint_186 = __rev2_186; \
+uint64x1_t __reint1_186 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_186, __p3_186)}; \
+  __ret_186 = __noswap_vcmla_rot270_f32(__rev0_186, __rev1_186, *(float32x2_t *) &__reint1_186); \
+  __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 1, 0); \
+  __ret_186; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot270_lane_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \
+  float32x4_t __ret_187; \
+  float32x4_t __s0_187 = __p0_187; \
+  float32x4_t __s1_187 = __p1_187; \
+  float32x2_t __s2_187 = __p2_187; \
+float32x2_t __reint_187 = __s2_187; \
+uint64x2_t __reint1_187 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187), vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187)}; \
+  __ret_187 = vcmlaq_rot270_f32(__s0_187, __s1_187, *(float32x4_t *) &__reint1_187); \
+  __ret_187; \
+})
+#else
+#define vcmlaq_rot270_lane_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \
+  float32x4_t __ret_188; \
+  float32x4_t __s0_188 = __p0_188; \
+  float32x4_t __s1_188 = __p1_188; \
+  float32x2_t __s2_188 = __p2_188; \
+  float32x4_t __rev0_188;  __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \
+  float32x4_t __rev1_188;  __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \
+  float32x2_t __rev2_188;  __rev2_188 = __builtin_shufflevector(__s2_188, __s2_188, 1, 0); \
+float32x2_t __reint_188 = __rev2_188; \
+uint64x2_t __reint1_188 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188), vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188)}; \
+  __ret_188 = __noswap_vcmlaq_rot270_f32(__rev0_188, __rev1_188, *(float32x4_t *) &__reint1_188); \
+  __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \
+  __ret_188; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot270_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \
+  float32x2_t __ret_189; \
+  float32x2_t __s0_189 = __p0_189; \
+  float32x2_t __s1_189 = __p1_189; \
+  float32x4_t __s2_189 = __p2_189; \
+float32x4_t __reint_189 = __s2_189; \
+uint64x1_t __reint1_189 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \
+  __ret_189 = vcmla_rot270_f32(__s0_189, __s1_189, *(float32x2_t *) &__reint1_189); \
+  __ret_189; \
+})
+#else
+#define vcmla_rot270_laneq_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \
+  float32x2_t __ret_190; \
+  float32x2_t __s0_190 = __p0_190; \
+  float32x2_t __s1_190 = __p1_190; \
+  float32x4_t __s2_190 = __p2_190; \
+  float32x2_t __rev0_190;  __rev0_190 = __builtin_shufflevector(__s0_190, __s0_190, 1, 0); \
+  float32x2_t __rev1_190;  __rev1_190 = __builtin_shufflevector(__s1_190, __s1_190, 1, 0); \
+  float32x4_t __rev2_190;  __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \
+float32x4_t __reint_190 = __rev2_190; \
+uint64x1_t __reint1_190 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_190, __p3_190)}; \
+  __ret_190 = __noswap_vcmla_rot270_f32(__rev0_190, __rev1_190, *(float32x2_t *) &__reint1_190); \
+  __ret_190 = __builtin_shufflevector(__ret_190, __ret_190, 1, 0); \
+  __ret_190; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot270_laneq_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \
+  float32x4_t __ret_191; \
+  float32x4_t __s0_191 = __p0_191; \
+  float32x4_t __s1_191 = __p1_191; \
+  float32x4_t __s2_191 = __p2_191; \
+float32x4_t __reint_191 = __s2_191; \
+uint64x2_t __reint1_191 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191), vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191)}; \
+  __ret_191 = vcmlaq_rot270_f32(__s0_191, __s1_191, *(float32x4_t *) &__reint1_191); \
+  __ret_191; \
+})
+#else
+#define vcmlaq_rot270_laneq_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \
+  float32x4_t __ret_192; \
+  float32x4_t __s0_192 = __p0_192; \
+  float32x4_t __s1_192 = __p1_192; \
+  float32x4_t __s2_192 = __p2_192; \
+  float32x4_t __rev0_192;  __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 3, 2, 1, 0); \
+  float32x4_t __rev1_192;  __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 3, 2, 1, 0); \
+  float32x4_t __rev2_192;  __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \
+float32x4_t __reint_192 = __rev2_192; \
+uint64x2_t __reint1_192 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192)}; \
+  __ret_192 = __noswap_vcmlaq_rot270_f32(__rev0_192, __rev1_192, *(float32x4_t *) &__reint1_192); \
+  __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 3, 2, 1, 0); \
+  __ret_192; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot90_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \
+  float32x2_t __ret_193; \
+  float32x2_t __s0_193 = __p0_193; \
+  float32x2_t __s1_193 = __p1_193; \
+  float32x2_t __s2_193 = __p2_193; \
+float32x2_t __reint_193 = __s2_193; \
+uint64x1_t __reint1_193 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \
+  __ret_193 = vcmla_rot90_f32(__s0_193, __s1_193, *(float32x2_t *) &__reint1_193); \
+  __ret_193; \
+})
+#else
+#define vcmla_rot90_lane_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \
+  float32x2_t __ret_194; \
+  float32x2_t __s0_194 = __p0_194; \
+  float32x2_t __s1_194 = __p1_194; \
+  float32x2_t __s2_194 = __p2_194; \
+  float32x2_t __rev0_194;  __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 1, 0); \
+  float32x2_t __rev1_194;  __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 1, 0); \
+  float32x2_t __rev2_194;  __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 1, 0); \
+float32x2_t __reint_194 = __rev2_194; \
+uint64x1_t __reint1_194 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_194, __p3_194)}; \
+  __ret_194 = __noswap_vcmla_rot90_f32(__rev0_194, __rev1_194, *(float32x2_t *) &__reint1_194); \
+  __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 1, 0); \
+  __ret_194; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot90_lane_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \
+  float32x4_t __ret_195; \
+  float32x4_t __s0_195 = __p0_195; \
+  float32x4_t __s1_195 = __p1_195; \
+  float32x2_t __s2_195 = __p2_195; \
+float32x2_t __reint_195 = __s2_195; \
+uint64x2_t __reint1_195 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195), vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195)}; \
+  __ret_195 = vcmlaq_rot90_f32(__s0_195, __s1_195, *(float32x4_t *) &__reint1_195); \
+  __ret_195; \
+})
+#else
+#define vcmlaq_rot90_lane_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \
+  float32x4_t __ret_196; \
+  float32x4_t __s0_196 = __p0_196; \
+  float32x4_t __s1_196 = __p1_196; \
+  float32x2_t __s2_196 = __p2_196; \
+  float32x4_t __rev0_196;  __rev0_196 = __builtin_shufflevector(__s0_196, __s0_196, 3, 2, 1, 0); \
+  float32x4_t __rev1_196;  __rev1_196 = __builtin_shufflevector(__s1_196, __s1_196, 3, 2, 1, 0); \
+  float32x2_t __rev2_196;  __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 1, 0); \
+float32x2_t __reint_196 = __rev2_196; \
+uint64x2_t __reint1_196 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196), vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196)}; \
+  __ret_196 = __noswap_vcmlaq_rot90_f32(__rev0_196, __rev1_196, *(float32x4_t *) &__reint1_196); \
+  __ret_196 = __builtin_shufflevector(__ret_196, __ret_196, 3, 2, 1, 0); \
+  __ret_196; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot90_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \
+  float32x2_t __ret_197; \
+  float32x2_t __s0_197 = __p0_197; \
+  float32x2_t __s1_197 = __p1_197; \
+  float32x4_t __s2_197 = __p2_197; \
+float32x4_t __reint_197 = __s2_197; \
+uint64x1_t __reint1_197 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \
+  __ret_197 = vcmla_rot90_f32(__s0_197, __s1_197, *(float32x2_t *) &__reint1_197); \
+  __ret_197; \
+})
+#else
+#define vcmla_rot90_laneq_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \
+  float32x2_t __ret_198; \
+  float32x2_t __s0_198 = __p0_198; \
+  float32x2_t __s1_198 = __p1_198; \
+  float32x4_t __s2_198 = __p2_198; \
+  float32x2_t __rev0_198;  __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 1, 0); \
+  float32x2_t __rev1_198;  __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 1, 0); \
+  float32x4_t __rev2_198;  __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 3, 2, 1, 0); \
+float32x4_t __reint_198 = __rev2_198; \
+uint64x1_t __reint1_198 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_198, __p3_198)}; \
+  __ret_198 = __noswap_vcmla_rot90_f32(__rev0_198, __rev1_198, *(float32x2_t *) &__reint1_198); \
+  __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 1, 0); \
+  __ret_198; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot90_laneq_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \
+  float32x4_t __ret_199; \
+  float32x4_t __s0_199 = __p0_199; \
+  float32x4_t __s1_199 = __p1_199; \
+  float32x4_t __s2_199 = __p2_199; \
+float32x4_t __reint_199 = __s2_199; \
+uint64x2_t __reint1_199 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199), vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199)}; \
+  __ret_199 = vcmlaq_rot90_f32(__s0_199, __s1_199, *(float32x4_t *) &__reint1_199); \
+  __ret_199; \
+})
+#else
+#define vcmlaq_rot90_laneq_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \
+  float32x4_t __ret_200; \
+  float32x4_t __s0_200 = __p0_200; \
+  float32x4_t __s1_200 = __p1_200; \
+  float32x4_t __s2_200 = __p2_200; \
+  float32x4_t __rev0_200;  __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \
+  float32x4_t __rev1_200;  __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \
+  float32x4_t __rev2_200;  __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 3, 2, 1, 0); \
+float32x4_t __reint_200 = __rev2_200; \
+uint64x2_t __reint1_200 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200)}; \
+  __ret_200 = __noswap_vcmlaq_rot90_f32(__rev0_200, __rev1_200, *(float32x4_t *) &__reint1_200); \
+  __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \
+  __ret_200; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_COMPLEX) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_lane_f16(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \
+  float16x4_t __ret_201; \
+  float16x4_t __s0_201 = __p0_201; \
+  float16x4_t __s1_201 = __p1_201; \
+  float16x4_t __s2_201 = __p2_201; \
+float16x4_t __reint_201 = __s2_201; \
+uint32x2_t __reint1_201 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201), vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201)}; \
+  __ret_201 = vcmla_f16(__s0_201, __s1_201, *(float16x4_t *) &__reint1_201); \
+  __ret_201; \
+})
+#else
+#define vcmla_lane_f16(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \
+  float16x4_t __ret_202; \
+  float16x4_t __s0_202 = __p0_202; \
+  float16x4_t __s1_202 = __p1_202; \
+  float16x4_t __s2_202 = __p2_202; \
+  float16x4_t __rev0_202;  __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 3, 2, 1, 0); \
+  float16x4_t __rev1_202;  __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 3, 2, 1, 0); \
+  float16x4_t __rev2_202;  __rev2_202 = __builtin_shufflevector(__s2_202, __s2_202, 3, 2, 1, 0); \
+float16x4_t __reint_202 = __rev2_202; \
+uint32x2_t __reint1_202 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202)}; \
+  __ret_202 = __noswap_vcmla_f16(__rev0_202, __rev1_202, *(float16x4_t *) &__reint1_202); \
+  __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 3, 2, 1, 0); \
+  __ret_202; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_lane_f16(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \
+  float16x8_t __ret_203; \
+  float16x8_t __s0_203 = __p0_203; \
+  float16x8_t __s1_203 = __p1_203; \
+  float16x4_t __s2_203 = __p2_203; \
+float16x4_t __reint_203 = __s2_203; \
+uint32x4_t __reint1_203 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203)}; \
+  __ret_203 = vcmlaq_f16(__s0_203, __s1_203, *(float16x8_t *) &__reint1_203); \
+  __ret_203; \
+})
+#else
+#define vcmlaq_lane_f16(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \
+  float16x8_t __ret_204; \
+  float16x8_t __s0_204 = __p0_204; \
+  float16x8_t __s1_204 = __p1_204; \
+  float16x4_t __s2_204 = __p2_204; \
+  float16x8_t __rev0_204;  __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_204;  __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_204;  __rev2_204 = __builtin_shufflevector(__s2_204, __s2_204, 3, 2, 1, 0); \
+float16x4_t __reint_204 = __rev2_204; \
+uint32x4_t __reint1_204 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204)}; \
+  __ret_204 = __noswap_vcmlaq_f16(__rev0_204, __rev1_204, *(float16x8_t *) &__reint1_204); \
+  __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_204; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_laneq_f16(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \
+  float16x4_t __ret_205; \
+  float16x4_t __s0_205 = __p0_205; \
+  float16x4_t __s1_205 = __p1_205; \
+  float16x8_t __s2_205 = __p2_205; \
+float16x8_t __reint_205 = __s2_205; \
+uint32x2_t __reint1_205 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205), vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205)}; \
+  __ret_205 = vcmla_f16(__s0_205, __s1_205, *(float16x4_t *) &__reint1_205); \
+  __ret_205; \
+})
+#else
+#define vcmla_laneq_f16(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \
+  float16x4_t __ret_206; \
+  float16x4_t __s0_206 = __p0_206; \
+  float16x4_t __s1_206 = __p1_206; \
+  float16x8_t __s2_206 = __p2_206; \
+  float16x4_t __rev0_206;  __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 3, 2, 1, 0); \
+  float16x4_t __rev1_206;  __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 3, 2, 1, 0); \
+  float16x8_t __rev2_206;  __rev2_206 = __builtin_shufflevector(__s2_206, __s2_206, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_206 = __rev2_206; \
+uint32x2_t __reint1_206 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206)}; \
+  __ret_206 = __noswap_vcmla_f16(__rev0_206, __rev1_206, *(float16x4_t *) &__reint1_206); \
+  __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 3, 2, 1, 0); \
+  __ret_206; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_laneq_f16(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \
+  float16x8_t __ret_207; \
+  float16x8_t __s0_207 = __p0_207; \
+  float16x8_t __s1_207 = __p1_207; \
+  float16x8_t __s2_207 = __p2_207; \
+float16x8_t __reint_207 = __s2_207; \
+uint32x4_t __reint1_207 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207)}; \
+  __ret_207 = vcmlaq_f16(__s0_207, __s1_207, *(float16x8_t *) &__reint1_207); \
+  __ret_207; \
+})
+#else
+#define vcmlaq_laneq_f16(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \
+  float16x8_t __ret_208; \
+  float16x8_t __s0_208 = __p0_208; \
+  float16x8_t __s1_208 = __p1_208; \
+  float16x8_t __s2_208 = __p2_208; \
+  float16x8_t __rev0_208;  __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_208;  __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_208;  __rev2_208 = __builtin_shufflevector(__s2_208, __s2_208, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_208 = __rev2_208; \
+uint32x4_t __reint1_208 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208)}; \
+  __ret_208 = __noswap_vcmlaq_f16(__rev0_208, __rev1_208, *(float16x8_t *) &__reint1_208); \
+  __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_208; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot180_lane_f16(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \
+  float16x4_t __ret_209; \
+  float16x4_t __s0_209 = __p0_209; \
+  float16x4_t __s1_209 = __p1_209; \
+  float16x4_t __s2_209 = __p2_209; \
+float16x4_t __reint_209 = __s2_209; \
+uint32x2_t __reint1_209 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209), vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209)}; \
+  __ret_209 = vcmla_rot180_f16(__s0_209, __s1_209, *(float16x4_t *) &__reint1_209); \
+  __ret_209; \
+})
+#else
+#define vcmla_rot180_lane_f16(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \
+  float16x4_t __ret_210; \
+  float16x4_t __s0_210 = __p0_210; \
+  float16x4_t __s1_210 = __p1_210; \
+  float16x4_t __s2_210 = __p2_210; \
+  float16x4_t __rev0_210;  __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 3, 2, 1, 0); \
+  float16x4_t __rev1_210;  __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 3, 2, 1, 0); \
+  float16x4_t __rev2_210;  __rev2_210 = __builtin_shufflevector(__s2_210, __s2_210, 3, 2, 1, 0); \
+float16x4_t __reint_210 = __rev2_210; \
+uint32x2_t __reint1_210 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210)}; \
+  __ret_210 = __noswap_vcmla_rot180_f16(__rev0_210, __rev1_210, *(float16x4_t *) &__reint1_210); \
+  __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 3, 2, 1, 0); \
+  __ret_210; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot180_lane_f16(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \
+  float16x8_t __ret_211; \
+  float16x8_t __s0_211 = __p0_211; \
+  float16x8_t __s1_211 = __p1_211; \
+  float16x4_t __s2_211 = __p2_211; \
+float16x4_t __reint_211 = __s2_211; \
+uint32x4_t __reint1_211 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211)}; \
+  __ret_211 = vcmlaq_rot180_f16(__s0_211, __s1_211, *(float16x8_t *) &__reint1_211); \
+  __ret_211; \
+})
+#else
+#define vcmlaq_rot180_lane_f16(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \
+  float16x8_t __ret_212; \
+  float16x8_t __s0_212 = __p0_212; \
+  float16x8_t __s1_212 = __p1_212; \
+  float16x4_t __s2_212 = __p2_212; \
+  float16x8_t __rev0_212;  __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_212;  __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_212;  __rev2_212 = __builtin_shufflevector(__s2_212, __s2_212, 3, 2, 1, 0); \
+float16x4_t __reint_212 = __rev2_212; \
+uint32x4_t __reint1_212 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212)}; \
+  __ret_212 = __noswap_vcmlaq_rot180_f16(__rev0_212, __rev1_212, *(float16x8_t *) &__reint1_212); \
+  __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_212; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot180_laneq_f16(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \
+  float16x4_t __ret_213; \
+  float16x4_t __s0_213 = __p0_213; \
+  float16x4_t __s1_213 = __p1_213; \
+  float16x8_t __s2_213 = __p2_213; \
+float16x8_t __reint_213 = __s2_213; \
+uint32x2_t __reint1_213 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213), vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213)}; \
+  __ret_213 = vcmla_rot180_f16(__s0_213, __s1_213, *(float16x4_t *) &__reint1_213); \
+  __ret_213; \
+})
+#else
+#define vcmla_rot180_laneq_f16(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \
+  float16x4_t __ret_214; \
+  float16x4_t __s0_214 = __p0_214; \
+  float16x4_t __s1_214 = __p1_214; \
+  float16x8_t __s2_214 = __p2_214; \
+  float16x4_t __rev0_214;  __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \
+  float16x4_t __rev1_214;  __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 3, 2, 1, 0); \
+  float16x8_t __rev2_214;  __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_214 = __rev2_214; \
+uint32x2_t __reint1_214 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214)}; \
+  __ret_214 = __noswap_vcmla_rot180_f16(__rev0_214, __rev1_214, *(float16x4_t *) &__reint1_214); \
+  __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \
+  __ret_214; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot180_laneq_f16(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \
+  float16x8_t __ret_215; \
+  float16x8_t __s0_215 = __p0_215; \
+  float16x8_t __s1_215 = __p1_215; \
+  float16x8_t __s2_215 = __p2_215; \
+float16x8_t __reint_215 = __s2_215; \
+uint32x4_t __reint1_215 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215)}; \
+  __ret_215 = vcmlaq_rot180_f16(__s0_215, __s1_215, *(float16x8_t *) &__reint1_215); \
+  __ret_215; \
+})
+#else
+#define vcmlaq_rot180_laneq_f16(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \
+  float16x8_t __ret_216; \
+  float16x8_t __s0_216 = __p0_216; \
+  float16x8_t __s1_216 = __p1_216; \
+  float16x8_t __s2_216 = __p2_216; \
+  float16x8_t __rev0_216;  __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_216;  __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_216;  __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_216 = __rev2_216; \
+uint32x4_t __reint1_216 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216)}; \
+  __ret_216 = __noswap_vcmlaq_rot180_f16(__rev0_216, __rev1_216, *(float16x8_t *) &__reint1_216); \
+  __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_216; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot270_lane_f16(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \
+  float16x4_t __ret_217; \
+  float16x4_t __s0_217 = __p0_217; \
+  float16x4_t __s1_217 = __p1_217; \
+  float16x4_t __s2_217 = __p2_217; \
+float16x4_t __reint_217 = __s2_217; \
+uint32x2_t __reint1_217 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217), vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217)}; \
+  __ret_217 = vcmla_rot270_f16(__s0_217, __s1_217, *(float16x4_t *) &__reint1_217); \
+  __ret_217; \
+})
+#else
+#define vcmla_rot270_lane_f16(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \
+  float16x4_t __ret_218; \
+  float16x4_t __s0_218 = __p0_218; \
+  float16x4_t __s1_218 = __p1_218; \
+  float16x4_t __s2_218 = __p2_218; \
+  float16x4_t __rev0_218;  __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \
+  float16x4_t __rev1_218;  __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \
+  float16x4_t __rev2_218;  __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 3, 2, 1, 0); \
+float16x4_t __reint_218 = __rev2_218; \
+uint32x2_t __reint1_218 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218)}; \
+  __ret_218 = __noswap_vcmla_rot270_f16(__rev0_218, __rev1_218, *(float16x4_t *) &__reint1_218); \
+  __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \
+  __ret_218; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot270_lane_f16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \
+  float16x8_t __ret_219; \
+  float16x8_t __s0_219 = __p0_219; \
+  float16x8_t __s1_219 = __p1_219; \
+  float16x4_t __s2_219 = __p2_219; \
+float16x4_t __reint_219 = __s2_219; \
+uint32x4_t __reint1_219 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219)}; \
+  __ret_219 = vcmlaq_rot270_f16(__s0_219, __s1_219, *(float16x8_t *) &__reint1_219); \
+  __ret_219; \
+})
+#else
+#define vcmlaq_rot270_lane_f16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \
+  float16x8_t __ret_220; \
+  float16x8_t __s0_220 = __p0_220; \
+  float16x8_t __s1_220 = __p1_220; \
+  float16x4_t __s2_220 = __p2_220; \
+  float16x8_t __rev0_220;  __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_220;  __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_220;  __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \
+float16x4_t __reint_220 = __rev2_220; \
+uint32x4_t __reint1_220 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220)}; \
+  __ret_220 = __noswap_vcmlaq_rot270_f16(__rev0_220, __rev1_220, *(float16x8_t *) &__reint1_220); \
+  __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_220; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot270_laneq_f16(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \
+  float16x4_t __ret_221; \
+  float16x4_t __s0_221 = __p0_221; \
+  float16x4_t __s1_221 = __p1_221; \
+  float16x8_t __s2_221 = __p2_221; \
+float16x8_t __reint_221 = __s2_221; \
+uint32x2_t __reint1_221 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221), vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221)}; \
+  __ret_221 = vcmla_rot270_f16(__s0_221, __s1_221, *(float16x4_t *) &__reint1_221); \
+  __ret_221; \
+})
+#else
+#define vcmla_rot270_laneq_f16(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \
+  float16x4_t __ret_222; \
+  float16x4_t __s0_222 = __p0_222; \
+  float16x4_t __s1_222 = __p1_222; \
+  float16x8_t __s2_222 = __p2_222; \
+  float16x4_t __rev0_222;  __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 3, 2, 1, 0); \
+  float16x4_t __rev1_222;  __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 3, 2, 1, 0); \
+  float16x8_t __rev2_222;  __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_222 = __rev2_222; \
+uint32x2_t __reint1_222 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222)}; \
+  __ret_222 = __noswap_vcmla_rot270_f16(__rev0_222, __rev1_222, *(float16x4_t *) &__reint1_222); \
+  __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 3, 2, 1, 0); \
+  __ret_222; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot270_laneq_f16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \
+  float16x8_t __ret_223; \
+  float16x8_t __s0_223 = __p0_223; \
+  float16x8_t __s1_223 = __p1_223; \
+  float16x8_t __s2_223 = __p2_223; \
+float16x8_t __reint_223 = __s2_223; \
+uint32x4_t __reint1_223 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223)}; \
+  __ret_223 = vcmlaq_rot270_f16(__s0_223, __s1_223, *(float16x8_t *) &__reint1_223); \
+  __ret_223; \
+})
+#else
+#define vcmlaq_rot270_laneq_f16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \
+  float16x8_t __ret_224; \
+  float16x8_t __s0_224 = __p0_224; \
+  float16x8_t __s1_224 = __p1_224; \
+  float16x8_t __s2_224 = __p2_224; \
+  float16x8_t __rev0_224;  __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_224;  __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_224;  __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_224 = __rev2_224; \
+uint32x4_t __reint1_224 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224)}; \
+  __ret_224 = __noswap_vcmlaq_rot270_f16(__rev0_224, __rev1_224, *(float16x8_t *) &__reint1_224); \
+  __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_224; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot90_lane_f16(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \
+  float16x4_t __ret_225; \
+  float16x4_t __s0_225 = __p0_225; \
+  float16x4_t __s1_225 = __p1_225; \
+  float16x4_t __s2_225 = __p2_225; \
+float16x4_t __reint_225 = __s2_225; \
+uint32x2_t __reint1_225 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225), vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225)}; \
+  __ret_225 = vcmla_rot90_f16(__s0_225, __s1_225, *(float16x4_t *) &__reint1_225); \
+  __ret_225; \
+})
+#else
+#define vcmla_rot90_lane_f16(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \
+  float16x4_t __ret_226; \
+  float16x4_t __s0_226 = __p0_226; \
+  float16x4_t __s1_226 = __p1_226; \
+  float16x4_t __s2_226 = __p2_226; \
+  float16x4_t __rev0_226;  __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \
+  float16x4_t __rev1_226;  __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \
+  float16x4_t __rev2_226;  __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 3, 2, 1, 0); \
+float16x4_t __reint_226 = __rev2_226; \
+uint32x2_t __reint1_226 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226)}; \
+  __ret_226 = __noswap_vcmla_rot90_f16(__rev0_226, __rev1_226, *(float16x4_t *) &__reint1_226); \
+  __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \
+  __ret_226; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot90_lane_f16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \
+  float16x8_t __ret_227; \
+  float16x8_t __s0_227 = __p0_227; \
+  float16x8_t __s1_227 = __p1_227; \
+  float16x4_t __s2_227 = __p2_227; \
+float16x4_t __reint_227 = __s2_227; \
+uint32x4_t __reint1_227 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227)}; \
+  __ret_227 = vcmlaq_rot90_f16(__s0_227, __s1_227, *(float16x8_t *) &__reint1_227); \
+  __ret_227; \
+})
+#else
+#define vcmlaq_rot90_lane_f16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \
+  float16x8_t __ret_228; \
+  float16x8_t __s0_228 = __p0_228; \
+  float16x8_t __s1_228 = __p1_228; \
+  float16x4_t __s2_228 = __p2_228; \
+  float16x8_t __rev0_228;  __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_228;  __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_228;  __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \
+float16x4_t __reint_228 = __rev2_228; \
+uint32x4_t __reint1_228 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228)}; \
+  __ret_228 = __noswap_vcmlaq_rot90_f16(__rev0_228, __rev1_228, *(float16x8_t *) &__reint1_228); \
+  __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_228; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot90_laneq_f16(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \
+  float16x4_t __ret_229; \
+  float16x4_t __s0_229 = __p0_229; \
+  float16x4_t __s1_229 = __p1_229; \
+  float16x8_t __s2_229 = __p2_229; \
+float16x8_t __reint_229 = __s2_229; \
+uint32x2_t __reint1_229 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229), vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229)}; \
+  __ret_229 = vcmla_rot90_f16(__s0_229, __s1_229, *(float16x4_t *) &__reint1_229); \
+  __ret_229; \
+})
+#else
+#define vcmla_rot90_laneq_f16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \
+  float16x4_t __ret_230; \
+  float16x4_t __s0_230 = __p0_230; \
+  float16x4_t __s1_230 = __p1_230; \
+  float16x8_t __s2_230 = __p2_230; \
+  float16x4_t __rev0_230;  __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 3, 2, 1, 0); \
+  float16x4_t __rev1_230;  __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 3, 2, 1, 0); \
+  float16x8_t __rev2_230;  __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_230 = __rev2_230; \
+uint32x2_t __reint1_230 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230)}; \
+  __ret_230 = __noswap_vcmla_rot90_f16(__rev0_230, __rev1_230, *(float16x4_t *) &__reint1_230); \
+  __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 3, 2, 1, 0); \
+  __ret_230; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot90_laneq_f16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \
+  float16x8_t __ret_231; \
+  float16x8_t __s0_231 = __p0_231; \
+  float16x8_t __s1_231 = __p1_231; \
+  float16x8_t __s2_231 = __p2_231; \
+float16x8_t __reint_231 = __s2_231; \
+uint32x4_t __reint1_231 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231)}; \
+  __ret_231 = vcmlaq_rot90_f16(__s0_231, __s1_231, *(float16x8_t *) &__reint1_231); \
+  __ret_231; \
+})
+#else
+#define vcmlaq_rot90_laneq_f16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \
+  float16x8_t __ret_232; \
+  float16x8_t __s0_232 = __p0_232; \
+  float16x8_t __s1_232 = __p1_232; \
+  float16x8_t __s2_232 = __p2_232; \
+  float16x8_t __rev0_232;  __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_232;  __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_232;  __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_232 = __rev2_232; \
+uint32x4_t __reint1_232 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232)}; \
+  __ret_232 = __noswap_vcmlaq_rot90_f16(__rev0_232, __rev1_232, *(float16x8_t *) &__reint1_232); \
+  __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_232; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+  return __ret;
+}
+#define vcmla_lane_f64(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \
+  float64x1_t __ret_233; \
+  float64x1_t __s0_233 = __p0_233; \
+  float64x1_t __s1_233 = __p1_233; \
+  float64x1_t __s2_233 = __p2_233; \
+float64x1_t __reint_233 = __s2_233; \
+uint64x2_t __reint1_233 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233), vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233)}; \
+  __ret_233 = vcmla_f64(__s0_233, __s1_233, *(float64x1_t *) &__reint1_233); \
+  __ret_233; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_lane_f64(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \
+  float64x2_t __ret_234; \
+  float64x2_t __s0_234 = __p0_234; \
+  float64x2_t __s1_234 = __p1_234; \
+  float64x1_t __s2_234 = __p2_234; \
+float64x1_t __reint_234 = __s2_234; \
+uint64x2_t __reint1_234 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234), vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234)}; \
+  __ret_234 = vcmlaq_f64(__s0_234, __s1_234, *(float64x2_t *) &__reint1_234); \
+  __ret_234; \
+})
+#else
+#define vcmlaq_lane_f64(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \
+  float64x2_t __ret_235; \
+  float64x2_t __s0_235 = __p0_235; \
+  float64x2_t __s1_235 = __p1_235; \
+  float64x1_t __s2_235 = __p2_235; \
+  float64x2_t __rev0_235;  __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 1, 0); \
+  float64x2_t __rev1_235;  __rev1_235 = __builtin_shufflevector(__s1_235, __s1_235, 1, 0); \
+float64x1_t __reint_235 = __s2_235; \
+uint64x2_t __reint1_235 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235)}; \
+  __ret_235 = __noswap_vcmlaq_f64(__rev0_235, __rev1_235, *(float64x2_t *) &__reint1_235); \
+  __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 1, 0); \
+  __ret_235; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_laneq_f64(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \
+  float64x1_t __ret_236; \
+  float64x1_t __s0_236 = __p0_236; \
+  float64x1_t __s1_236 = __p1_236; \
+  float64x2_t __s2_236 = __p2_236; \
+float64x2_t __reint_236 = __s2_236; \
+uint64x2_t __reint1_236 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236), vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236)}; \
+  __ret_236 = vcmla_f64(__s0_236, __s1_236, *(float64x1_t *) &__reint1_236); \
+  __ret_236; \
+})
+#else
+#define vcmla_laneq_f64(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \
+  float64x1_t __ret_237; \
+  float64x1_t __s0_237 = __p0_237; \
+  float64x1_t __s1_237 = __p1_237; \
+  float64x2_t __s2_237 = __p2_237; \
+  float64x2_t __rev2_237;  __rev2_237 = __builtin_shufflevector(__s2_237, __s2_237, 1, 0); \
+float64x2_t __reint_237 = __rev2_237; \
+uint64x2_t __reint1_237 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237)}; \
+  __ret_237 = vcmla_f64(__s0_237, __s1_237, *(float64x1_t *) &__reint1_237); \
+  __ret_237; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_laneq_f64(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \
+  float64x2_t __ret_238; \
+  float64x2_t __s0_238 = __p0_238; \
+  float64x2_t __s1_238 = __p1_238; \
+  float64x2_t __s2_238 = __p2_238; \
+float64x2_t __reint_238 = __s2_238; \
+uint64x2_t __reint1_238 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238), vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238)}; \
+  __ret_238 = vcmlaq_f64(__s0_238, __s1_238, *(float64x2_t *) &__reint1_238); \
+  __ret_238; \
+})
+#else
+#define vcmlaq_laneq_f64(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \
+  float64x2_t __ret_239; \
+  float64x2_t __s0_239 = __p0_239; \
+  float64x2_t __s1_239 = __p1_239; \
+  float64x2_t __s2_239 = __p2_239; \
+  float64x2_t __rev0_239;  __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 1, 0); \
+  float64x2_t __rev1_239;  __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 1, 0); \
+  float64x2_t __rev2_239;  __rev2_239 = __builtin_shufflevector(__s2_239, __s2_239, 1, 0); \
+float64x2_t __reint_239 = __rev2_239; \
+uint64x2_t __reint1_239 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239)}; \
+  __ret_239 = __noswap_vcmlaq_f64(__rev0_239, __rev1_239, *(float64x2_t *) &__reint1_239); \
+  __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 1, 0); \
+  __ret_239; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+  return __ret;
+}
+#define vcmla_rot180_lane_f64(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \
+  float64x1_t __ret_240; \
+  float64x1_t __s0_240 = __p0_240; \
+  float64x1_t __s1_240 = __p1_240; \
+  float64x1_t __s2_240 = __p2_240; \
+float64x1_t __reint_240 = __s2_240; \
+uint64x2_t __reint1_240 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240), vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240)}; \
+  __ret_240 = vcmla_rot180_f64(__s0_240, __s1_240, *(float64x1_t *) &__reint1_240); \
+  __ret_240; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot180_lane_f64(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \
+  float64x2_t __ret_241; \
+  float64x2_t __s0_241 = __p0_241; \
+  float64x2_t __s1_241 = __p1_241; \
+  float64x1_t __s2_241 = __p2_241; \
+float64x1_t __reint_241 = __s2_241; \
+uint64x2_t __reint1_241 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241), vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241)}; \
+  __ret_241 = vcmlaq_rot180_f64(__s0_241, __s1_241, *(float64x2_t *) &__reint1_241); \
+  __ret_241; \
+})
+#else
+#define vcmlaq_rot180_lane_f64(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \
+  float64x2_t __ret_242; \
+  float64x2_t __s0_242 = __p0_242; \
+  float64x2_t __s1_242 = __p1_242; \
+  float64x1_t __s2_242 = __p2_242; \
+  float64x2_t __rev0_242;  __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 1, 0); \
+  float64x2_t __rev1_242;  __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 1, 0); \
+float64x1_t __reint_242 = __s2_242; \
+uint64x2_t __reint1_242 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242)}; \
+  __ret_242 = __noswap_vcmlaq_rot180_f64(__rev0_242, __rev1_242, *(float64x2_t *) &__reint1_242); \
+  __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 1, 0); \
+  __ret_242; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot180_laneq_f64(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \
+  float64x1_t __ret_243; \
+  float64x1_t __s0_243 = __p0_243; \
+  float64x1_t __s1_243 = __p1_243; \
+  float64x2_t __s2_243 = __p2_243; \
+float64x2_t __reint_243 = __s2_243; \
+uint64x2_t __reint1_243 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243), vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243)}; \
+  __ret_243 = vcmla_rot180_f64(__s0_243, __s1_243, *(float64x1_t *) &__reint1_243); \
+  __ret_243; \
+})
+#else
+#define vcmla_rot180_laneq_f64(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \
+  float64x1_t __ret_244; \
+  float64x1_t __s0_244 = __p0_244; \
+  float64x1_t __s1_244 = __p1_244; \
+  float64x2_t __s2_244 = __p2_244; \
+  float64x2_t __rev2_244;  __rev2_244 = __builtin_shufflevector(__s2_244, __s2_244, 1, 0); \
+float64x2_t __reint_244 = __rev2_244; \
+uint64x2_t __reint1_244 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244)}; \
+  __ret_244 = vcmla_rot180_f64(__s0_244, __s1_244, *(float64x1_t *) &__reint1_244); \
+  __ret_244; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot180_laneq_f64(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \
+  float64x2_t __ret_245; \
+  float64x2_t __s0_245 = __p0_245; \
+  float64x2_t __s1_245 = __p1_245; \
+  float64x2_t __s2_245 = __p2_245; \
+float64x2_t __reint_245 = __s2_245; \
+uint64x2_t __reint1_245 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245), vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245)}; \
+  __ret_245 = vcmlaq_rot180_f64(__s0_245, __s1_245, *(float64x2_t *) &__reint1_245); \
+  __ret_245; \
+})
+#else
+#define vcmlaq_rot180_laneq_f64(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \
+  float64x2_t __ret_246; \
+  float64x2_t __s0_246 = __p0_246; \
+  float64x2_t __s1_246 = __p1_246; \
+  float64x2_t __s2_246 = __p2_246; \
+  float64x2_t __rev0_246;  __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \
+  float64x2_t __rev1_246;  __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \
+  float64x2_t __rev2_246;  __rev2_246 = __builtin_shufflevector(__s2_246, __s2_246, 1, 0); \
+float64x2_t __reint_246 = __rev2_246; \
+uint64x2_t __reint1_246 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246)}; \
+  __ret_246 = __noswap_vcmlaq_rot180_f64(__rev0_246, __rev1_246, *(float64x2_t *) &__reint1_246); \
+  __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \
+  __ret_246; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+  return __ret;
+}
+#define vcmla_rot270_lane_f64(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \
+  float64x1_t __ret_247; \
+  float64x1_t __s0_247 = __p0_247; \
+  float64x1_t __s1_247 = __p1_247; \
+  float64x1_t __s2_247 = __p2_247; \
+float64x1_t __reint_247 = __s2_247; \
+uint64x2_t __reint1_247 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247), vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247)}; \
+  __ret_247 = vcmla_rot270_f64(__s0_247, __s1_247, *(float64x1_t *) &__reint1_247); \
+  __ret_247; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot270_lane_f64(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \
+  float64x2_t __ret_248; \
+  float64x2_t __s0_248 = __p0_248; \
+  float64x2_t __s1_248 = __p1_248; \
+  float64x1_t __s2_248 = __p2_248; \
+float64x1_t __reint_248 = __s2_248; \
+uint64x2_t __reint1_248 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248), vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248)}; \
+  __ret_248 = vcmlaq_rot270_f64(__s0_248, __s1_248, *(float64x2_t *) &__reint1_248); \
+  __ret_248; \
+})
+#else
+#define vcmlaq_rot270_lane_f64(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \
+  float64x2_t __ret_249; \
+  float64x2_t __s0_249 = __p0_249; \
+  float64x2_t __s1_249 = __p1_249; \
+  float64x1_t __s2_249 = __p2_249; \
+  float64x2_t __rev0_249;  __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 1, 0); \
+  float64x2_t __rev1_249;  __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 1, 0); \
+float64x1_t __reint_249 = __s2_249; \
+uint64x2_t __reint1_249 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249)}; \
+  __ret_249 = __noswap_vcmlaq_rot270_f64(__rev0_249, __rev1_249, *(float64x2_t *) &__reint1_249); \
+  __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 1, 0); \
+  __ret_249; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot270_laneq_f64(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \
+  float64x1_t __ret_250; \
+  float64x1_t __s0_250 = __p0_250; \
+  float64x1_t __s1_250 = __p1_250; \
+  float64x2_t __s2_250 = __p2_250; \
+float64x2_t __reint_250 = __s2_250; \
+uint64x2_t __reint1_250 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250), vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250)}; \
+  __ret_250 = vcmla_rot270_f64(__s0_250, __s1_250, *(float64x1_t *) &__reint1_250); \
+  __ret_250; \
+})
+#else
+#define vcmla_rot270_laneq_f64(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \
+  float64x1_t __ret_251; \
+  float64x1_t __s0_251 = __p0_251; \
+  float64x1_t __s1_251 = __p1_251; \
+  float64x2_t __s2_251 = __p2_251; \
+  float64x2_t __rev2_251;  __rev2_251 = __builtin_shufflevector(__s2_251, __s2_251, 1, 0); \
+float64x2_t __reint_251 = __rev2_251; \
+uint64x2_t __reint1_251 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251)}; \
+  __ret_251 = vcmla_rot270_f64(__s0_251, __s1_251, *(float64x1_t *) &__reint1_251); \
+  __ret_251; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot270_laneq_f64(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \
+  float64x2_t __ret_252; \
+  float64x2_t __s0_252 = __p0_252; \
+  float64x2_t __s1_252 = __p1_252; \
+  float64x2_t __s2_252 = __p2_252; \
+float64x2_t __reint_252 = __s2_252; \
+uint64x2_t __reint1_252 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252), vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252)}; \
+  __ret_252 = vcmlaq_rot270_f64(__s0_252, __s1_252, *(float64x2_t *) &__reint1_252); \
+  __ret_252; \
+})
+#else
+#define vcmlaq_rot270_laneq_f64(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \
+  float64x2_t __ret_253; \
+  float64x2_t __s0_253 = __p0_253; \
+  float64x2_t __s1_253 = __p1_253; \
+  float64x2_t __s2_253 = __p2_253; \
+  float64x2_t __rev0_253;  __rev0_253 = __builtin_shufflevector(__s0_253, __s0_253, 1, 0); \
+  float64x2_t __rev1_253;  __rev1_253 = __builtin_shufflevector(__s1_253, __s1_253, 1, 0); \
+  float64x2_t __rev2_253;  __rev2_253 = __builtin_shufflevector(__s2_253, __s2_253, 1, 0); \
+float64x2_t __reint_253 = __rev2_253; \
+uint64x2_t __reint1_253 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253)}; \
+  __ret_253 = __noswap_vcmlaq_rot270_f64(__rev0_253, __rev1_253, *(float64x2_t *) &__reint1_253); \
+  __ret_253 = __builtin_shufflevector(__ret_253, __ret_253, 1, 0); \
+  __ret_253; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+  return __ret;
+}
+#define vcmla_rot90_lane_f64(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \
+  float64x1_t __ret_254; \
+  float64x1_t __s0_254 = __p0_254; \
+  float64x1_t __s1_254 = __p1_254; \
+  float64x1_t __s2_254 = __p2_254; \
+float64x1_t __reint_254 = __s2_254; \
+uint64x2_t __reint1_254 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254), vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254)}; \
+  __ret_254 = vcmla_rot90_f64(__s0_254, __s1_254, *(float64x1_t *) &__reint1_254); \
+  __ret_254; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot90_lane_f64(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \
+  float64x2_t __ret_255; \
+  float64x2_t __s0_255 = __p0_255; \
+  float64x2_t __s1_255 = __p1_255; \
+  float64x1_t __s2_255 = __p2_255; \
+float64x1_t __reint_255 = __s2_255; \
+uint64x2_t __reint1_255 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255), vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255)}; \
+  __ret_255 = vcmlaq_rot90_f64(__s0_255, __s1_255, *(float64x2_t *) &__reint1_255); \
+  __ret_255; \
+})
+#else
+#define vcmlaq_rot90_lane_f64(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \
+  float64x2_t __ret_256; \
+  float64x2_t __s0_256 = __p0_256; \
+  float64x2_t __s1_256 = __p1_256; \
+  float64x1_t __s2_256 = __p2_256; \
+  float64x2_t __rev0_256;  __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 1, 0); \
+  float64x2_t __rev1_256;  __rev1_256 = __builtin_shufflevector(__s1_256, __s1_256, 1, 0); \
+float64x1_t __reint_256 = __s2_256; \
+uint64x2_t __reint1_256 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256)}; \
+  __ret_256 = __noswap_vcmlaq_rot90_f64(__rev0_256, __rev1_256, *(float64x2_t *) &__reint1_256); \
+  __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 1, 0); \
+  __ret_256; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot90_laneq_f64(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \
+  float64x1_t __ret_257; \
+  float64x1_t __s0_257 = __p0_257; \
+  float64x1_t __s1_257 = __p1_257; \
+  float64x2_t __s2_257 = __p2_257; \
+float64x2_t __reint_257 = __s2_257; \
+uint64x2_t __reint1_257 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257), vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257)}; \
+  __ret_257 = vcmla_rot90_f64(__s0_257, __s1_257, *(float64x1_t *) &__reint1_257); \
+  __ret_257; \
+})
+#else
+#define vcmla_rot90_laneq_f64(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
+  float64x1_t __ret_258; \
+  float64x1_t __s0_258 = __p0_258; \
+  float64x1_t __s1_258 = __p1_258; \
+  float64x2_t __s2_258 = __p2_258; \
+  float64x2_t __rev2_258;  __rev2_258 = __builtin_shufflevector(__s2_258, __s2_258, 1, 0); \
+float64x2_t __reint_258 = __rev2_258; \
+uint64x2_t __reint1_258 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258)}; \
+  __ret_258 = vcmla_rot90_f64(__s0_258, __s1_258, *(float64x1_t *) &__reint1_258); \
+  __ret_258; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot90_laneq_f64(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
+  float64x2_t __ret_259; \
+  float64x2_t __s0_259 = __p0_259; \
+  float64x2_t __s1_259 = __p1_259; \
+  float64x2_t __s2_259 = __p2_259; \
+float64x2_t __reint_259 = __s2_259; \
+uint64x2_t __reint1_259 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259), vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259)}; \
+  __ret_259 = vcmlaq_rot90_f64(__s0_259, __s1_259, *(float64x2_t *) &__reint1_259); \
+  __ret_259; \
+})
+#else
+#define vcmlaq_rot90_laneq_f64(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
+  float64x2_t __ret_260; \
+  float64x2_t __s0_260 = __p0_260; \
+  float64x2_t __s1_260 = __p1_260; \
+  float64x2_t __s2_260 = __p2_260; \
+  float64x2_t __rev0_260;  __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 1, 0); \
+  float64x2_t __rev1_260;  __rev1_260 = __builtin_shufflevector(__s1_260, __s1_260, 1, 0); \
+  float64x2_t __rev2_260;  __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 1, 0); \
+float64x2_t __reint_260 = __rev2_260; \
+uint64x2_t __reint1_260 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260)}; \
+  __ret_260 = __noswap_vcmlaq_rot90_f64(__rev0_260, __rev1_260, *(float64x2_t *) &__reint1_260); \
+  __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 1, 0); \
+  __ret_260; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_DOTPROD)
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#else
+__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdotq_lane_u32(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
+  uint32x4_t __ret_261; \
+  uint32x4_t __s0_261 = __p0_261; \
+  uint8x16_t __s1_261 = __p1_261; \
+  uint8x8_t __s2_261 = __p2_261; \
+uint8x8_t __reint_261 = __s2_261; \
+uint32x4_t __reint1_261 = splatq_lane_u32(*(uint32x2_t *) &__reint_261, __p3_261); \
+  __ret_261 = vdotq_u32(__s0_261, __s1_261, *(uint8x16_t *) &__reint1_261); \
+  __ret_261; \
+})
+#else
+#define vdotq_lane_u32(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
+  uint32x4_t __ret_262; \
+  uint32x4_t __s0_262 = __p0_262; \
+  uint8x16_t __s1_262 = __p1_262; \
+  uint8x8_t __s2_262 = __p2_262; \
+  uint32x4_t __rev0_262;  __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 3, 2, 1, 0); \
+  uint8x16_t __rev1_262;  __rev1_262 = __builtin_shufflevector(__s1_262, __s1_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_262;  __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x8_t __reint_262 = __rev2_262; \
+uint32x4_t __reint1_262 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_262, __p3_262); \
+  __ret_262 = __noswap_vdotq_u32(__rev0_262, __rev1_262, *(uint8x16_t *) &__reint1_262); \
+  __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 3, 2, 1, 0); \
+  __ret_262; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdotq_lane_s32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
+  int32x4_t __ret_263; \
+  int32x4_t __s0_263 = __p0_263; \
+  int8x16_t __s1_263 = __p1_263; \
+  int8x8_t __s2_263 = __p2_263; \
+int8x8_t __reint_263 = __s2_263; \
+int32x4_t __reint1_263 = splatq_lane_s32(*(int32x2_t *) &__reint_263, __p3_263); \
+  __ret_263 = vdotq_s32(__s0_263, __s1_263, *(int8x16_t *) &__reint1_263); \
+  __ret_263; \
+})
+#else
+#define vdotq_lane_s32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
+  int32x4_t __ret_264; \
+  int32x4_t __s0_264 = __p0_264; \
+  int8x16_t __s1_264 = __p1_264; \
+  int8x8_t __s2_264 = __p2_264; \
+  int32x4_t __rev0_264;  __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \
+  int8x16_t __rev1_264;  __rev1_264 = __builtin_shufflevector(__s1_264, __s1_264, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_264;  __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x8_t __reint_264 = __rev2_264; \
+int32x4_t __reint1_264 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_264, __p3_264); \
+  __ret_264 = __noswap_vdotq_s32(__rev0_264, __rev1_264, *(int8x16_t *) &__reint1_264); \
+  __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \
+  __ret_264; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdot_lane_u32(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
+  uint32x2_t __ret_265; \
+  uint32x2_t __s0_265 = __p0_265; \
+  uint8x8_t __s1_265 = __p1_265; \
+  uint8x8_t __s2_265 = __p2_265; \
+uint8x8_t __reint_265 = __s2_265; \
+uint32x2_t __reint1_265 = splat_lane_u32(*(uint32x2_t *) &__reint_265, __p3_265); \
+  __ret_265 = vdot_u32(__s0_265, __s1_265, *(uint8x8_t *) &__reint1_265); \
+  __ret_265; \
+})
+#else
+#define vdot_lane_u32(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \
+  uint32x2_t __ret_266; \
+  uint32x2_t __s0_266 = __p0_266; \
+  uint8x8_t __s1_266 = __p1_266; \
+  uint8x8_t __s2_266 = __p2_266; \
+  uint32x2_t __rev0_266;  __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 1, 0); \
+  uint8x8_t __rev1_266;  __rev1_266 = __builtin_shufflevector(__s1_266, __s1_266, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_266;  __rev2_266 = __builtin_shufflevector(__s2_266, __s2_266, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x8_t __reint_266 = __rev2_266; \
+uint32x2_t __reint1_266 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_266, __p3_266); \
+  __ret_266 = __noswap_vdot_u32(__rev0_266, __rev1_266, *(uint8x8_t *) &__reint1_266); \
+  __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 1, 0); \
+  __ret_266; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdot_lane_s32(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \
+  int32x2_t __ret_267; \
+  int32x2_t __s0_267 = __p0_267; \
+  int8x8_t __s1_267 = __p1_267; \
+  int8x8_t __s2_267 = __p2_267; \
+int8x8_t __reint_267 = __s2_267; \
+int32x2_t __reint1_267 = splat_lane_s32(*(int32x2_t *) &__reint_267, __p3_267); \
+  __ret_267 = vdot_s32(__s0_267, __s1_267, *(int8x8_t *) &__reint1_267); \
+  __ret_267; \
+})
+#else
+#define vdot_lane_s32(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
+  int32x2_t __ret_268; \
+  int32x2_t __s0_268 = __p0_268; \
+  int8x8_t __s1_268 = __p1_268; \
+  int8x8_t __s2_268 = __p2_268; \
+  int32x2_t __rev0_268;  __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 1, 0); \
+  int8x8_t __rev1_268;  __rev1_268 = __builtin_shufflevector(__s1_268, __s1_268, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_268;  __rev2_268 = __builtin_shufflevector(__s2_268, __s2_268, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x8_t __reint_268 = __rev2_268; \
+int32x2_t __reint1_268 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_268, __p3_268); \
+  __ret_268 = __noswap_vdot_s32(__rev0_268, __rev1_268, *(int8x8_t *) &__reint1_268); \
+  __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 1, 0); \
+  __ret_268; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+#define vdotq_laneq_u32(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
+  uint32x4_t __ret_269; \
+  uint32x4_t __s0_269 = __p0_269; \
+  uint8x16_t __s1_269 = __p1_269; \
+  uint8x16_t __s2_269 = __p2_269; \
+uint8x16_t __reint_269 = __s2_269; \
+uint32x4_t __reint1_269 = splatq_laneq_u32(*(uint32x4_t *) &__reint_269, __p3_269); \
+  __ret_269 = vdotq_u32(__s0_269, __s1_269, *(uint8x16_t *) &__reint1_269); \
+  __ret_269; \
+})
+#else
+#define vdotq_laneq_u32(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
+  uint32x4_t __ret_270; \
+  uint32x4_t __s0_270 = __p0_270; \
+  uint8x16_t __s1_270 = __p1_270; \
+  uint8x16_t __s2_270 = __p2_270; \
+  uint32x4_t __rev0_270;  __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 3, 2, 1, 0); \
+  uint8x16_t __rev1_270;  __rev1_270 = __builtin_shufflevector(__s1_270, __s1_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_270;  __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x16_t __reint_270 = __rev2_270; \
+uint32x4_t __reint1_270 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_270, __p3_270); \
+  __ret_270 = __noswap_vdotq_u32(__rev0_270, __rev1_270, *(uint8x16_t *) &__reint1_270); \
+  __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 3, 2, 1, 0); \
+  __ret_270; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdotq_laneq_s32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
+  int32x4_t __ret_271; \
+  int32x4_t __s0_271 = __p0_271; \
+  int8x16_t __s1_271 = __p1_271; \
+  int8x16_t __s2_271 = __p2_271; \
+int8x16_t __reint_271 = __s2_271; \
+int32x4_t __reint1_271 = splatq_laneq_s32(*(int32x4_t *) &__reint_271, __p3_271); \
+  __ret_271 = vdotq_s32(__s0_271, __s1_271, *(int8x16_t *) &__reint1_271); \
+  __ret_271; \
+})
+#else
+#define vdotq_laneq_s32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
+  int32x4_t __ret_272; \
+  int32x4_t __s0_272 = __p0_272; \
+  int8x16_t __s1_272 = __p1_272; \
+  int8x16_t __s2_272 = __p2_272; \
+  int32x4_t __rev0_272;  __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 3, 2, 1, 0); \
+  int8x16_t __rev1_272;  __rev1_272 = __builtin_shufflevector(__s1_272, __s1_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_272;  __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x16_t __reint_272 = __rev2_272; \
+int32x4_t __reint1_272 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_272, __p3_272); \
+  __ret_272 = __noswap_vdotq_s32(__rev0_272, __rev1_272, *(int8x16_t *) &__reint1_272); \
+  __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 3, 2, 1, 0); \
+  __ret_272; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdot_laneq_u32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
+  uint32x2_t __ret_273; \
+  uint32x2_t __s0_273 = __p0_273; \
+  uint8x8_t __s1_273 = __p1_273; \
+  uint8x16_t __s2_273 = __p2_273; \
+uint8x16_t __reint_273 = __s2_273; \
+uint32x2_t __reint1_273 = splat_laneq_u32(*(uint32x4_t *) &__reint_273, __p3_273); \
+  __ret_273 = vdot_u32(__s0_273, __s1_273, *(uint8x8_t *) &__reint1_273); \
+  __ret_273; \
+})
+#else
+#define vdot_laneq_u32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
+  uint32x2_t __ret_274; \
+  uint32x2_t __s0_274 = __p0_274; \
+  uint8x8_t __s1_274 = __p1_274; \
+  uint8x16_t __s2_274 = __p2_274; \
+  uint32x2_t __rev0_274;  __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 1, 0); \
+  uint8x8_t __rev1_274;  __rev1_274 = __builtin_shufflevector(__s1_274, __s1_274, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_274;  __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x16_t __reint_274 = __rev2_274; \
+uint32x2_t __reint1_274 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_274, __p3_274); \
+  __ret_274 = __noswap_vdot_u32(__rev0_274, __rev1_274, *(uint8x8_t *) &__reint1_274); \
+  __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 1, 0); \
+  __ret_274; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdot_laneq_s32(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
+  int32x2_t __ret_275; \
+  int32x2_t __s0_275 = __p0_275; \
+  int8x8_t __s1_275 = __p1_275; \
+  int8x16_t __s2_275 = __p2_275; \
+int8x16_t __reint_275 = __s2_275; \
+int32x2_t __reint1_275 = splat_laneq_s32(*(int32x4_t *) &__reint_275, __p3_275); \
+  __ret_275 = vdot_s32(__s0_275, __s1_275, *(int8x8_t *) &__reint1_275); \
+  __ret_275; \
+})
+#else
+#define vdot_laneq_s32(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
+  int32x2_t __ret_276; \
+  int32x2_t __s0_276 = __p0_276; \
+  int8x8_t __s1_276 = __p1_276; \
+  int8x16_t __s2_276 = __p2_276; \
+  int32x2_t __rev0_276;  __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 1, 0); \
+  int8x8_t __rev1_276;  __rev1_276 = __builtin_shufflevector(__s1_276, __s1_276, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_276;  __rev2_276 = __builtin_shufflevector(__s2_276, __s2_276, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x16_t __reint_276 = __rev2_276; \
+int32x2_t __reint1_276 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_276, __p3_276); \
+  __ret_276 = __noswap_vdot_s32(__rev0_276, __rev1_276, *(int8x8_t *) &__reint1_276); \
+  __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 1, 0); \
+  __ret_276; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_FMA)
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+  float32x4_t __ret;
+  __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
+  return __ret;
+}
+#else
+__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+  float32x2_t __ret;
+  __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
+  return __ret;
+}
+#else
+__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = vfmaq_f32(__p0, -__p1, __p2);
+  return __ret;
+}
+#else
+__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = vfma_f32(__p0, -__p1, __p2);
+  return __ret;
+}
+#else
+__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vabsq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vabsq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vabs_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vabs_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __p0 + __p1;
+  return __ret;
+}
+#else
+__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __rev0 + __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __p0 + __p1;
+  return __ret;
+}
+#else
+__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __rev0 + __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0 == __p1);
+  return __ret;
+}
+#else
+__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t)(__rev0 == __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0 == __p1);
+  return __ret;
+}
+#else
+__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t)(__rev0 == __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0 >= __p1);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t)(__rev0 >= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0 >= __p1);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t)(__rev0 >= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0 > __p1);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t)(__rev0 > __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0 > __p1);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t)(__rev0 > __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0 <= __p1);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t)(__rev0 <= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0 <= __p1);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t)(__rev0 <= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vclez_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vclez_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0 < __p1);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t)(__rev0 < __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0 < __p1);
+  return __ret;
+}
+#else
+__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t)(__rev0 < __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
+  float16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33);
+  return __ret;
+}
+#else
+__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
+  float16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
+  float16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1);
+  return __ret;
+}
+#else
+__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
+  float16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
+  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \
+  __ret; \
+})
+#else
+#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \
+  __ret; \
+})
+#else
+#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
+  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \
+  __ret; \
+})
+#else
+#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \
+  __ret; \
+})
+#else
+#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \
+  __ret; \
+})
+#else
+#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \
+  __ret; \
+})
+#else
+#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \
+  __ret; \
+})
+#else
+#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \
+  __ret; \
+})
+#else
+#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1);
+  return __ret;
+}
+#else
+__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1);
+  return __ret;
+}
+#else
+__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1);
+  return __ret;
+}
+#else
+__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1);
+  return __ret;
+}
+#else
+__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1);
+  return __ret;
+}
+#else
+__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \
+  __ret; \
+})
+#else
+#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \
+  __ret; \
+})
+#else
+#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = vfmaq_f16(__p0, -__p1, __p2);
+  return __ret;
+}
+#else
+__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = vfma_f16(__p0, -__p1, __p2);
+  return __ret;
+}
+#else
+__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __p0 * __p1;
+  return __ret;
+}
+#else
+__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __rev0 * __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __p0 * __p1;
+  return __ret;
+}
+#else
+__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __rev0 * __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_lane_f16(__p0_277, __p1_277, __p2_277) __extension__ ({ \
+  float16x8_t __ret_277; \
+  float16x8_t __s0_277 = __p0_277; \
+  float16x4_t __s1_277 = __p1_277; \
+  __ret_277 = __s0_277 * splatq_lane_f16(__s1_277, __p2_277); \
+  __ret_277; \
+})
+#else
+#define vmulq_lane_f16(__p0_278, __p1_278, __p2_278) __extension__ ({ \
+  float16x8_t __ret_278; \
+  float16x8_t __s0_278 = __p0_278; \
+  float16x4_t __s1_278 = __p1_278; \
+  float16x8_t __rev0_278;  __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev1_278;  __rev1_278 = __builtin_shufflevector(__s1_278, __s1_278, 3, 2, 1, 0); \
+  __ret_278 = __rev0_278 * __noswap_splatq_lane_f16(__rev1_278, __p2_278); \
+  __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_278; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_lane_f16(__p0_279, __p1_279, __p2_279) __extension__ ({ \
+  float16x4_t __ret_279; \
+  float16x4_t __s0_279 = __p0_279; \
+  float16x4_t __s1_279 = __p1_279; \
+  __ret_279 = __s0_279 * splat_lane_f16(__s1_279, __p2_279); \
+  __ret_279; \
+})
+#else
+#define vmul_lane_f16(__p0_280, __p1_280, __p2_280) __extension__ ({ \
+  float16x4_t __ret_280; \
+  float16x4_t __s0_280 = __p0_280; \
+  float16x4_t __s1_280 = __p1_280; \
+  float16x4_t __rev0_280;  __rev0_280 = __builtin_shufflevector(__s0_280, __s0_280, 3, 2, 1, 0); \
+  float16x4_t __rev1_280;  __rev1_280 = __builtin_shufflevector(__s1_280, __s1_280, 3, 2, 1, 0); \
+  __ret_280 = __rev0_280 * __noswap_splat_lane_f16(__rev1_280, __p2_280); \
+  __ret_280 = __builtin_shufflevector(__ret_280, __ret_280, 3, 2, 1, 0); \
+  __ret_280; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
+  __ret; \
+})
+#else
+#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_n_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
+  __ret; \
+})
+#else
+#define vmul_n_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vnegq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = -__p0;
+  return __ret;
+}
+#else
+__ai float16x8_t vnegq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = -__rev0;
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vneg_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = -__p0;
+  return __ret;
+}
+#else
+__ai float16x4_t vneg_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = -__rev0;
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
+  return __ret;
+}
+#else
+__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vrev64_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  return __ret;
+}
+#else
+__ai float16x4_t vrev64_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __p0 - __p1;
+  return __ret;
+}
+#else
+__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __rev0 - __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __p0 - __p1;
+  return __ret;
+}
+#else
+__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __rev0 - __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8x2_t __ret;
+  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8x2_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4x2_t __ret;
+  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4x2_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8x2_t __ret;
+  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8x2_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4x2_t __ret;
+  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4x2_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8x2_t __ret;
+  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8x2_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4x2_t __ret;
+  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4x2_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __p0 / __p1;
+  return __ret;
+}
+#else
+__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __rev0 / __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __p0 / __p1;
+  return __ret;
+}
+#else
+__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __rev0 / __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
+  __ret; \
+})
+#else
+#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \
+  __ret; \
+})
+#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
+  __ret; \
+})
+#else
+#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
+  __ret; \
+})
+#else
+#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
+  __ret; \
+})
+#else
+#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \
+  __ret; \
+})
+#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
+  __ret; \
+})
+#else
+#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
+  __ret; \
+})
+#else
+#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
+  __ret; \
+})
+#else
+#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
+  __ret; \
+})
+#else
+#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsh_lane_f16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
+  float16_t __ret_281; \
+  float16_t __s0_281 = __p0_281; \
+  float16_t __s1_281 = __p1_281; \
+  float16x4_t __s2_281 = __p2_281; \
+  __ret_281 = vfmah_lane_f16(__s0_281, -__s1_281, __s2_281, __p3_281); \
+  __ret_281; \
+})
+#else
+#define vfmsh_lane_f16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
+  float16_t __ret_282; \
+  float16_t __s0_282 = __p0_282; \
+  float16_t __s1_282 = __p1_282; \
+  float16x4_t __s2_282 = __p2_282; \
+  float16x4_t __rev2_282;  __rev2_282 = __builtin_shufflevector(__s2_282, __s2_282, 3, 2, 1, 0); \
+  __ret_282 = __noswap_vfmah_lane_f16(__s0_282, -__s1_282, __rev2_282, __p3_282); \
+  __ret_282; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_lane_f16(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
+  float16x8_t __ret_283; \
+  float16x8_t __s0_283 = __p0_283; \
+  float16x8_t __s1_283 = __p1_283; \
+  float16x4_t __s2_283 = __p2_283; \
+  __ret_283 = vfmaq_lane_f16(__s0_283, -__s1_283, __s2_283, __p3_283); \
+  __ret_283; \
+})
+#else
+#define vfmsq_lane_f16(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
+  float16x8_t __ret_284; \
+  float16x8_t __s0_284 = __p0_284; \
+  float16x8_t __s1_284 = __p1_284; \
+  float16x4_t __s2_284 = __p2_284; \
+  float16x8_t __rev0_284;  __rev0_284 = __builtin_shufflevector(__s0_284, __s0_284, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_284;  __rev1_284 = __builtin_shufflevector(__s1_284, __s1_284, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_284;  __rev2_284 = __builtin_shufflevector(__s2_284, __s2_284, 3, 2, 1, 0); \
+  __ret_284 = __noswap_vfmaq_lane_f16(__rev0_284, -__rev1_284, __rev2_284, __p3_284); \
+  __ret_284 = __builtin_shufflevector(__ret_284, __ret_284, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_284; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_lane_f16(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
+  float16x4_t __ret_285; \
+  float16x4_t __s0_285 = __p0_285; \
+  float16x4_t __s1_285 = __p1_285; \
+  float16x4_t __s2_285 = __p2_285; \
+  __ret_285 = vfma_lane_f16(__s0_285, -__s1_285, __s2_285, __p3_285); \
+  __ret_285; \
+})
+#else
+#define vfms_lane_f16(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
+  float16x4_t __ret_286; \
+  float16x4_t __s0_286 = __p0_286; \
+  float16x4_t __s1_286 = __p1_286; \
+  float16x4_t __s2_286 = __p2_286; \
+  float16x4_t __rev0_286;  __rev0_286 = __builtin_shufflevector(__s0_286, __s0_286, 3, 2, 1, 0); \
+  float16x4_t __rev1_286;  __rev1_286 = __builtin_shufflevector(__s1_286, __s1_286, 3, 2, 1, 0); \
+  float16x4_t __rev2_286;  __rev2_286 = __builtin_shufflevector(__s2_286, __s2_286, 3, 2, 1, 0); \
+  __ret_286 = __noswap_vfma_lane_f16(__rev0_286, -__rev1_286, __rev2_286, __p3_286); \
+  __ret_286 = __builtin_shufflevector(__ret_286, __ret_286, 3, 2, 1, 0); \
+  __ret_286; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsh_laneq_f16(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
+  float16_t __ret_287; \
+  float16_t __s0_287 = __p0_287; \
+  float16_t __s1_287 = __p1_287; \
+  float16x8_t __s2_287 = __p2_287; \
+  __ret_287 = vfmah_laneq_f16(__s0_287, -__s1_287, __s2_287, __p3_287); \
+  __ret_287; \
+})
+#else
+#define vfmsh_laneq_f16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
+  float16_t __ret_288; \
+  float16_t __s0_288 = __p0_288; \
+  float16_t __s1_288 = __p1_288; \
+  float16x8_t __s2_288 = __p2_288; \
+  float16x8_t __rev2_288;  __rev2_288 = __builtin_shufflevector(__s2_288, __s2_288, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_288 = __noswap_vfmah_laneq_f16(__s0_288, -__s1_288, __rev2_288, __p3_288); \
+  __ret_288; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_laneq_f16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
+  float16x8_t __ret_289; \
+  float16x8_t __s0_289 = __p0_289; \
+  float16x8_t __s1_289 = __p1_289; \
+  float16x8_t __s2_289 = __p2_289; \
+  __ret_289 = vfmaq_laneq_f16(__s0_289, -__s1_289, __s2_289, __p3_289); \
+  __ret_289; \
+})
+#else
+#define vfmsq_laneq_f16(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \
+  float16x8_t __ret_290; \
+  float16x8_t __s0_290 = __p0_290; \
+  float16x8_t __s1_290 = __p1_290; \
+  float16x8_t __s2_290 = __p2_290; \
+  float16x8_t __rev0_290;  __rev0_290 = __builtin_shufflevector(__s0_290, __s0_290, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_290;  __rev1_290 = __builtin_shufflevector(__s1_290, __s1_290, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_290;  __rev2_290 = __builtin_shufflevector(__s2_290, __s2_290, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_290 = __noswap_vfmaq_laneq_f16(__rev0_290, -__rev1_290, __rev2_290, __p3_290); \
+  __ret_290 = __builtin_shufflevector(__ret_290, __ret_290, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_290; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_laneq_f16(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \
+  float16x4_t __ret_291; \
+  float16x4_t __s0_291 = __p0_291; \
+  float16x4_t __s1_291 = __p1_291; \
+  float16x8_t __s2_291 = __p2_291; \
+  __ret_291 = vfma_laneq_f16(__s0_291, -__s1_291, __s2_291, __p3_291); \
+  __ret_291; \
+})
+#else
+#define vfms_laneq_f16(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \
+  float16x4_t __ret_292; \
+  float16x4_t __s0_292 = __p0_292; \
+  float16x4_t __s1_292 = __p1_292; \
+  float16x8_t __s2_292 = __p2_292; \
+  float16x4_t __rev0_292;  __rev0_292 = __builtin_shufflevector(__s0_292, __s0_292, 3, 2, 1, 0); \
+  float16x4_t __rev1_292;  __rev1_292 = __builtin_shufflevector(__s1_292, __s1_292, 3, 2, 1, 0); \
+  float16x8_t __rev2_292;  __rev2_292 = __builtin_shufflevector(__s2_292, __s2_292, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_292 = __noswap_vfma_laneq_f16(__rev0_292, -__rev1_292, __rev2_292, __p3_292); \
+  __ret_292 = __builtin_shufflevector(__ret_292, __ret_292, 3, 2, 1, 0); \
+  __ret_292; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
+  __ret; \
+})
+#else
+#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
+  __ret; \
+})
+#else
+#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmaxnmvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \
+  __ret; \
+})
+#else
+#define vmaxnmvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmaxnmv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \
+  __ret; \
+})
+#else
+#define vmaxnmv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmaxvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \
+  __ret; \
+})
+#else
+#define vmaxvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmaxv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \
+  __ret; \
+})
+#else
+#define vmaxv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vminnmvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \
+  __ret; \
+})
+#else
+#define vminnmvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vminnmv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \
+  __ret; \
+})
+#else
+#define vminnmv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vminvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \
+  __ret; \
+})
+#else
+#define vminvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vminv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \
+  __ret; \
+})
+#else
+#define vminv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_f16(__p0_293, __p1_293, __p2_293) __extension__ ({ \
+  float16x8_t __ret_293; \
+  float16x8_t __s0_293 = __p0_293; \
+  float16x8_t __s1_293 = __p1_293; \
+  __ret_293 = __s0_293 * splatq_laneq_f16(__s1_293, __p2_293); \
+  __ret_293; \
+})
+#else
+#define vmulq_laneq_f16(__p0_294, __p1_294, __p2_294) __extension__ ({ \
+  float16x8_t __ret_294; \
+  float16x8_t __s0_294 = __p0_294; \
+  float16x8_t __s1_294 = __p1_294; \
+  float16x8_t __rev0_294;  __rev0_294 = __builtin_shufflevector(__s0_294, __s0_294, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_294;  __rev1_294 = __builtin_shufflevector(__s1_294, __s1_294, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_294 = __rev0_294 * __noswap_splatq_laneq_f16(__rev1_294, __p2_294); \
+  __ret_294 = __builtin_shufflevector(__ret_294, __ret_294, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_294; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_f16(__p0_295, __p1_295, __p2_295) __extension__ ({ \
+  float16x4_t __ret_295; \
+  float16x4_t __s0_295 = __p0_295; \
+  float16x8_t __s1_295 = __p1_295; \
+  __ret_295 = __s0_295 * splat_laneq_f16(__s1_295, __p2_295); \
+  __ret_295; \
+})
+#else
+#define vmul_laneq_f16(__p0_296, __p1_296, __p2_296) __extension__ ({ \
+  float16x4_t __ret_296; \
+  float16x4_t __s0_296 = __p0_296; \
+  float16x8_t __s1_296 = __p1_296; \
+  float16x4_t __rev0_296;  __rev0_296 = __builtin_shufflevector(__s0_296, __s0_296, 3, 2, 1, 0); \
+  float16x8_t __rev1_296;  __rev1_296 = __builtin_shufflevector(__s1_296, __s1_296, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_296 = __rev0_296 * __noswap_splat_laneq_f16(__rev1_296, __p2_296); \
+  __ret_296 = __builtin_shufflevector(__ret_296, __ret_296, 3, 2, 1, 0); \
+  __ret_296; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \
+  __ret; \
+})
+#else
+#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_lane_f16(__p0_297, __p1_297, __p2_297) __extension__ ({ \
+  float16x8_t __ret_297; \
+  float16x8_t __s0_297 = __p0_297; \
+  float16x4_t __s1_297 = __p1_297; \
+  __ret_297 = vmulxq_f16(__s0_297, splatq_lane_f16(__s1_297, __p2_297)); \
+  __ret_297; \
+})
+#else
+#define vmulxq_lane_f16(__p0_298, __p1_298, __p2_298) __extension__ ({ \
+  float16x8_t __ret_298; \
+  float16x8_t __s0_298 = __p0_298; \
+  float16x4_t __s1_298 = __p1_298; \
+  float16x8_t __rev0_298;  __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev1_298;  __rev1_298 = __builtin_shufflevector(__s1_298, __s1_298, 3, 2, 1, 0); \
+  __ret_298 = __noswap_vmulxq_f16(__rev0_298, __noswap_splatq_lane_f16(__rev1_298, __p2_298)); \
+  __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_298; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_lane_f16(__p0_299, __p1_299, __p2_299) __extension__ ({ \
+  float16x4_t __ret_299; \
+  float16x4_t __s0_299 = __p0_299; \
+  float16x4_t __s1_299 = __p1_299; \
+  __ret_299 = vmulx_f16(__s0_299, splat_lane_f16(__s1_299, __p2_299)); \
+  __ret_299; \
+})
+#else
+#define vmulx_lane_f16(__p0_300, __p1_300, __p2_300) __extension__ ({ \
+  float16x4_t __ret_300; \
+  float16x4_t __s0_300 = __p0_300; \
+  float16x4_t __s1_300 = __p1_300; \
+  float16x4_t __rev0_300;  __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 3, 2, 1, 0); \
+  float16x4_t __rev1_300;  __rev1_300 = __builtin_shufflevector(__s1_300, __s1_300, 3, 2, 1, 0); \
+  __ret_300 = __noswap_vmulx_f16(__rev0_300, __noswap_splat_lane_f16(__rev1_300, __p2_300)); \
+  __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 3, 2, 1, 0); \
+  __ret_300; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \
+  __ret; \
+})
+#else
+#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_laneq_f16(__p0_301, __p1_301, __p2_301) __extension__ ({ \
+  float16x8_t __ret_301; \
+  float16x8_t __s0_301 = __p0_301; \
+  float16x8_t __s1_301 = __p1_301; \
+  __ret_301 = vmulxq_f16(__s0_301, splatq_laneq_f16(__s1_301, __p2_301)); \
+  __ret_301; \
+})
+#else
+#define vmulxq_laneq_f16(__p0_302, __p1_302, __p2_302) __extension__ ({ \
+  float16x8_t __ret_302; \
+  float16x8_t __s0_302 = __p0_302; \
+  float16x8_t __s1_302 = __p1_302; \
+  float16x8_t __rev0_302;  __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_302;  __rev1_302 = __builtin_shufflevector(__s1_302, __s1_302, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_302 = __noswap_vmulxq_f16(__rev0_302, __noswap_splatq_laneq_f16(__rev1_302, __p2_302)); \
+  __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_302; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_laneq_f16(__p0_303, __p1_303, __p2_303) __extension__ ({ \
+  float16x4_t __ret_303; \
+  float16x4_t __s0_303 = __p0_303; \
+  float16x8_t __s1_303 = __p1_303; \
+  __ret_303 = vmulx_f16(__s0_303, splat_laneq_f16(__s1_303, __p2_303)); \
+  __ret_303; \
+})
+#else
+#define vmulx_laneq_f16(__p0_304, __p1_304, __p2_304) __extension__ ({ \
+  float16x4_t __ret_304; \
+  float16x4_t __s0_304 = __p0_304; \
+  float16x8_t __s1_304 = __p1_304; \
+  float16x4_t __rev0_304;  __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 3, 2, 1, 0); \
+  float16x8_t __rev1_304;  __rev1_304 = __builtin_shufflevector(__s1_304, __s1_304, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_304 = __noswap_vmulx_f16(__rev0_304, __noswap_splat_laneq_f16(__rev1_304, __p2_304)); \
+  __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 3, 2, 1, 0); \
+  __ret_304; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
+  __ret; \
+})
+#else
+#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
+  __ret; \
+})
+#else
+#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vrndi_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vrndi_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
+  return __ret;
+}
+#else
+__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
+  return __ret;
+}
+#else
+__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
+  return __ret;
+}
+#else
+__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
+  return __ret;
+}
+#else
+__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
+  return __ret;
+}
+#else
+__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
+  return __ret;
+}
+#else
+__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
+  return __ret;
+}
+#else
+__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
+  return __ret;
+}
+#else
+__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
+  return __ret;
+}
+#else
+__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
+  return __ret;
+}
+#else
+__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
+  return __ret;
+}
+#else
+__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
+  return __ret;
+}
+#else
+__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_MATMUL_INT8)
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#else
+__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vusdotq_lane_s32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \
+  int32x4_t __ret_305; \
+  int32x4_t __s0_305 = __p0_305; \
+  uint8x16_t __s1_305 = __p1_305; \
+  int8x8_t __s2_305 = __p2_305; \
+int8x8_t __reint_305 = __s2_305; \
+  __ret_305 = vusdotq_s32(__s0_305, __s1_305, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_305, __p3_305))); \
+  __ret_305; \
+})
+#else
+#define vusdotq_lane_s32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \
+  int32x4_t __ret_306; \
+  int32x4_t __s0_306 = __p0_306; \
+  uint8x16_t __s1_306 = __p1_306; \
+  int8x8_t __s2_306 = __p2_306; \
+  int32x4_t __rev0_306;  __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \
+  uint8x16_t __rev1_306;  __rev1_306 = __builtin_shufflevector(__s1_306, __s1_306, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_306;  __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x8_t __reint_306 = __rev2_306; \
+  __ret_306 = __noswap_vusdotq_s32(__rev0_306, __rev1_306, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_306, __p3_306))); \
+  __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \
+  __ret_306; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vusdot_lane_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \
+  int32x2_t __ret_307; \
+  int32x2_t __s0_307 = __p0_307; \
+  uint8x8_t __s1_307 = __p1_307; \
+  int8x8_t __s2_307 = __p2_307; \
+int8x8_t __reint_307 = __s2_307; \
+  __ret_307 = vusdot_s32(__s0_307, __s1_307, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_307, __p3_307))); \
+  __ret_307; \
+})
+#else
+#define vusdot_lane_s32(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \
+  int32x2_t __ret_308; \
+  int32x2_t __s0_308 = __p0_308; \
+  uint8x8_t __s1_308 = __p1_308; \
+  int8x8_t __s2_308 = __p2_308; \
+  int32x2_t __rev0_308;  __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 1, 0); \
+  uint8x8_t __rev1_308;  __rev1_308 = __builtin_shufflevector(__s1_308, __s1_308, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_308;  __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x8_t __reint_308 = __rev2_308; \
+  __ret_308 = __noswap_vusdot_s32(__rev0_308, __rev1_308, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_308, __p3_308))); \
+  __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 1, 0); \
+  __ret_308; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_QRDMX)
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int32x4_t __noswap_vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int16x8_t __noswap_vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#else
+__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai int32x2_t __noswap_vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
+  return __ret;
+}
+#else
+__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int16x4_t __noswap_vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahq_lane_s32(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \
+  int32x4_t __ret_309; \
+  int32x4_t __s0_309 = __p0_309; \
+  int32x4_t __s1_309 = __p1_309; \
+  int32x2_t __s2_309 = __p2_309; \
+  __ret_309 = vqrdmlahq_s32(__s0_309, __s1_309, splatq_lane_s32(__s2_309, __p3_309)); \
+  __ret_309; \
+})
+#else
+#define vqrdmlahq_lane_s32(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \
+  int32x4_t __ret_310; \
+  int32x4_t __s0_310 = __p0_310; \
+  int32x4_t __s1_310 = __p1_310; \
+  int32x2_t __s2_310 = __p2_310; \
+  int32x4_t __rev0_310;  __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 3, 2, 1, 0); \
+  int32x4_t __rev1_310;  __rev1_310 = __builtin_shufflevector(__s1_310, __s1_310, 3, 2, 1, 0); \
+  int32x2_t __rev2_310;  __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 1, 0); \
+  __ret_310 = __noswap_vqrdmlahq_s32(__rev0_310, __rev1_310, __noswap_splatq_lane_s32(__rev2_310, __p3_310)); \
+  __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 3, 2, 1, 0); \
+  __ret_310; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahq_lane_s16(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \
+  int16x8_t __ret_311; \
+  int16x8_t __s0_311 = __p0_311; \
+  int16x8_t __s1_311 = __p1_311; \
+  int16x4_t __s2_311 = __p2_311; \
+  __ret_311 = vqrdmlahq_s16(__s0_311, __s1_311, splatq_lane_s16(__s2_311, __p3_311)); \
+  __ret_311; \
+})
+#else
+#define vqrdmlahq_lane_s16(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \
+  int16x8_t __ret_312; \
+  int16x8_t __s0_312 = __p0_312; \
+  int16x8_t __s1_312 = __p1_312; \
+  int16x4_t __s2_312 = __p2_312; \
+  int16x8_t __rev0_312;  __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_312;  __rev1_312 = __builtin_shufflevector(__s1_312, __s1_312, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_312;  __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 3, 2, 1, 0); \
+  __ret_312 = __noswap_vqrdmlahq_s16(__rev0_312, __rev1_312, __noswap_splatq_lane_s16(__rev2_312, __p3_312)); \
+  __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_312; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlah_lane_s32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \
+  int32x2_t __ret_313; \
+  int32x2_t __s0_313 = __p0_313; \
+  int32x2_t __s1_313 = __p1_313; \
+  int32x2_t __s2_313 = __p2_313; \
+  __ret_313 = vqrdmlah_s32(__s0_313, __s1_313, splat_lane_s32(__s2_313, __p3_313)); \
+  __ret_313; \
+})
+#else
+#define vqrdmlah_lane_s32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \
+  int32x2_t __ret_314; \
+  int32x2_t __s0_314 = __p0_314; \
+  int32x2_t __s1_314 = __p1_314; \
+  int32x2_t __s2_314 = __p2_314; \
+  int32x2_t __rev0_314;  __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 1, 0); \
+  int32x2_t __rev1_314;  __rev1_314 = __builtin_shufflevector(__s1_314, __s1_314, 1, 0); \
+  int32x2_t __rev2_314;  __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 1, 0); \
+  __ret_314 = __noswap_vqrdmlah_s32(__rev0_314, __rev1_314, __noswap_splat_lane_s32(__rev2_314, __p3_314)); \
+  __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 1, 0); \
+  __ret_314; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlah_lane_s16(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \
+  int16x4_t __ret_315; \
+  int16x4_t __s0_315 = __p0_315; \
+  int16x4_t __s1_315 = __p1_315; \
+  int16x4_t __s2_315 = __p2_315; \
+  __ret_315 = vqrdmlah_s16(__s0_315, __s1_315, splat_lane_s16(__s2_315, __p3_315)); \
+  __ret_315; \
+})
+#else
+#define vqrdmlah_lane_s16(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \
+  int16x4_t __ret_316; \
+  int16x4_t __s0_316 = __p0_316; \
+  int16x4_t __s1_316 = __p1_316; \
+  int16x4_t __s2_316 = __p2_316; \
+  int16x4_t __rev0_316;  __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \
+  int16x4_t __rev1_316;  __rev1_316 = __builtin_shufflevector(__s1_316, __s1_316, 3, 2, 1, 0); \
+  int16x4_t __rev2_316;  __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 3, 2, 1, 0); \
+  __ret_316 = __noswap_vqrdmlah_s16(__rev0_316, __rev1_316, __noswap_splat_lane_s16(__rev2_316, __p3_316)); \
+  __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \
+  __ret_316; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int32x4_t __noswap_vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int16x8_t __noswap_vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#else
+__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai int32x2_t __noswap_vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
+  return __ret;
+}
+#else
+__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int16x4_t __noswap_vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_lane_s32(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \
+  int32x4_t __ret_317; \
+  int32x4_t __s0_317 = __p0_317; \
+  int32x4_t __s1_317 = __p1_317; \
+  int32x2_t __s2_317 = __p2_317; \
+  __ret_317 = vqrdmlshq_s32(__s0_317, __s1_317, splatq_lane_s32(__s2_317, __p3_317)); \
+  __ret_317; \
+})
+#else
+#define vqrdmlshq_lane_s32(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \
+  int32x4_t __ret_318; \
+  int32x4_t __s0_318 = __p0_318; \
+  int32x4_t __s1_318 = __p1_318; \
+  int32x2_t __s2_318 = __p2_318; \
+  int32x4_t __rev0_318;  __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 3, 2, 1, 0); \
+  int32x4_t __rev1_318;  __rev1_318 = __builtin_shufflevector(__s1_318, __s1_318, 3, 2, 1, 0); \
+  int32x2_t __rev2_318;  __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 1, 0); \
+  __ret_318 = __noswap_vqrdmlshq_s32(__rev0_318, __rev1_318, __noswap_splatq_lane_s32(__rev2_318, __p3_318)); \
+  __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 3, 2, 1, 0); \
+  __ret_318; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_lane_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \
+  int16x8_t __ret_319; \
+  int16x8_t __s0_319 = __p0_319; \
+  int16x8_t __s1_319 = __p1_319; \
+  int16x4_t __s2_319 = __p2_319; \
+  __ret_319 = vqrdmlshq_s16(__s0_319, __s1_319, splatq_lane_s16(__s2_319, __p3_319)); \
+  __ret_319; \
+})
+#else
+#define vqrdmlshq_lane_s16(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \
+  int16x8_t __ret_320; \
+  int16x8_t __s0_320 = __p0_320; \
+  int16x8_t __s1_320 = __p1_320; \
+  int16x4_t __s2_320 = __p2_320; \
+  int16x8_t __rev0_320;  __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_320;  __rev1_320 = __builtin_shufflevector(__s1_320, __s1_320, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_320;  __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 3, 2, 1, 0); \
+  __ret_320 = __noswap_vqrdmlshq_s16(__rev0_320, __rev1_320, __noswap_splatq_lane_s16(__rev2_320, __p3_320)); \
+  __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_320; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_lane_s32(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \
+  int32x2_t __ret_321; \
+  int32x2_t __s0_321 = __p0_321; \
+  int32x2_t __s1_321 = __p1_321; \
+  int32x2_t __s2_321 = __p2_321; \
+  __ret_321 = vqrdmlsh_s32(__s0_321, __s1_321, splat_lane_s32(__s2_321, __p3_321)); \
+  __ret_321; \
+})
+#else
+#define vqrdmlsh_lane_s32(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \
+  int32x2_t __ret_322; \
+  int32x2_t __s0_322 = __p0_322; \
+  int32x2_t __s1_322 = __p1_322; \
+  int32x2_t __s2_322 = __p2_322; \
+  int32x2_t __rev0_322;  __rev0_322 = __builtin_shufflevector(__s0_322, __s0_322, 1, 0); \
+  int32x2_t __rev1_322;  __rev1_322 = __builtin_shufflevector(__s1_322, __s1_322, 1, 0); \
+  int32x2_t __rev2_322;  __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 1, 0); \
+  __ret_322 = __noswap_vqrdmlsh_s32(__rev0_322, __rev1_322, __noswap_splat_lane_s32(__rev2_322, __p3_322)); \
+  __ret_322 = __builtin_shufflevector(__ret_322, __ret_322, 1, 0); \
+  __ret_322; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_lane_s16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \
+  int16x4_t __ret_323; \
+  int16x4_t __s0_323 = __p0_323; \
+  int16x4_t __s1_323 = __p1_323; \
+  int16x4_t __s2_323 = __p2_323; \
+  __ret_323 = vqrdmlsh_s16(__s0_323, __s1_323, splat_lane_s16(__s2_323, __p3_323)); \
+  __ret_323; \
+})
+#else
+#define vqrdmlsh_lane_s16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \
+  int16x4_t __ret_324; \
+  int16x4_t __s0_324 = __p0_324; \
+  int16x4_t __s1_324 = __p1_324; \
+  int16x4_t __s2_324 = __p2_324; \
+  int16x4_t __rev0_324;  __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \
+  int16x4_t __rev1_324;  __rev1_324 = __builtin_shufflevector(__s1_324, __s1_324, 3, 2, 1, 0); \
+  int16x4_t __rev2_324;  __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 3, 2, 1, 0); \
+  __ret_324 = __noswap_vqrdmlsh_s16(__rev0_324, __rev1_324, __noswap_splat_lane_s16(__rev2_324, __p3_324)); \
+  __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \
+  __ret_324; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
+__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqrdmlahs_s32(__p0, __p1, __p2);
+  return __ret;
+}
+__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqrdmlahh_s16(__p0, __p1, __p2);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahs_lane_s32(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \
+  int32_t __ret_325; \
+  int32_t __s0_325 = __p0_325; \
+  int32_t __s1_325 = __p1_325; \
+  int32x2_t __s2_325 = __p2_325; \
+  __ret_325 = vqrdmlahs_s32(__s0_325, __s1_325, vget_lane_s32(__s2_325, __p3_325)); \
+  __ret_325; \
+})
+#else
+#define vqrdmlahs_lane_s32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \
+  int32_t __ret_326; \
+  int32_t __s0_326 = __p0_326; \
+  int32_t __s1_326 = __p1_326; \
+  int32x2_t __s2_326 = __p2_326; \
+  int32x2_t __rev2_326;  __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 1, 0); \
+  __ret_326 = vqrdmlahs_s32(__s0_326, __s1_326, __noswap_vget_lane_s32(__rev2_326, __p3_326)); \
+  __ret_326; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahh_lane_s16(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \
+  int16_t __ret_327; \
+  int16_t __s0_327 = __p0_327; \
+  int16_t __s1_327 = __p1_327; \
+  int16x4_t __s2_327 = __p2_327; \
+  __ret_327 = vqrdmlahh_s16(__s0_327, __s1_327, vget_lane_s16(__s2_327, __p3_327)); \
+  __ret_327; \
+})
+#else
+#define vqrdmlahh_lane_s16(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \
+  int16_t __ret_328; \
+  int16_t __s0_328 = __p0_328; \
+  int16_t __s1_328 = __p1_328; \
+  int16x4_t __s2_328 = __p2_328; \
+  int16x4_t __rev2_328;  __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 3, 2, 1, 0); \
+  __ret_328 = vqrdmlahh_s16(__s0_328, __s1_328, __noswap_vget_lane_s16(__rev2_328, __p3_328)); \
+  __ret_328; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahs_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \
+  int32_t __ret_329; \
+  int32_t __s0_329 = __p0_329; \
+  int32_t __s1_329 = __p1_329; \
+  int32x4_t __s2_329 = __p2_329; \
+  __ret_329 = vqrdmlahs_s32(__s0_329, __s1_329, vgetq_lane_s32(__s2_329, __p3_329)); \
+  __ret_329; \
+})
+#else
+#define vqrdmlahs_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \
+  int32_t __ret_330; \
+  int32_t __s0_330 = __p0_330; \
+  int32_t __s1_330 = __p1_330; \
+  int32x4_t __s2_330 = __p2_330; \
+  int32x4_t __rev2_330;  __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 3, 2, 1, 0); \
+  __ret_330 = vqrdmlahs_s32(__s0_330, __s1_330, __noswap_vgetq_lane_s32(__rev2_330, __p3_330)); \
+  __ret_330; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahh_laneq_s16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \
+  int16_t __ret_331; \
+  int16_t __s0_331 = __p0_331; \
+  int16_t __s1_331 = __p1_331; \
+  int16x8_t __s2_331 = __p2_331; \
+  __ret_331 = vqrdmlahh_s16(__s0_331, __s1_331, vgetq_lane_s16(__s2_331, __p3_331)); \
+  __ret_331; \
+})
+#else
+#define vqrdmlahh_laneq_s16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \
+  int16_t __ret_332; \
+  int16_t __s0_332 = __p0_332; \
+  int16_t __s1_332 = __p1_332; \
+  int16x8_t __s2_332 = __p2_332; \
+  int16x8_t __rev2_332;  __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_332 = vqrdmlahh_s16(__s0_332, __s1_332, __noswap_vgetq_lane_s16(__rev2_332, __p3_332)); \
+  __ret_332; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahq_laneq_s32(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \
+  int32x4_t __ret_333; \
+  int32x4_t __s0_333 = __p0_333; \
+  int32x4_t __s1_333 = __p1_333; \
+  int32x4_t __s2_333 = __p2_333; \
+  __ret_333 = vqrdmlahq_s32(__s0_333, __s1_333, splatq_laneq_s32(__s2_333, __p3_333)); \
+  __ret_333; \
+})
+#else
+#define vqrdmlahq_laneq_s32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \
+  int32x4_t __ret_334; \
+  int32x4_t __s0_334 = __p0_334; \
+  int32x4_t __s1_334 = __p1_334; \
+  int32x4_t __s2_334 = __p2_334; \
+  int32x4_t __rev0_334;  __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 3, 2, 1, 0); \
+  int32x4_t __rev1_334;  __rev1_334 = __builtin_shufflevector(__s1_334, __s1_334, 3, 2, 1, 0); \
+  int32x4_t __rev2_334;  __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 3, 2, 1, 0); \
+  __ret_334 = __noswap_vqrdmlahq_s32(__rev0_334, __rev1_334, __noswap_splatq_laneq_s32(__rev2_334, __p3_334)); \
+  __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 3, 2, 1, 0); \
+  __ret_334; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahq_laneq_s16(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \
+  int16x8_t __ret_335; \
+  int16x8_t __s0_335 = __p0_335; \
+  int16x8_t __s1_335 = __p1_335; \
+  int16x8_t __s2_335 = __p2_335; \
+  __ret_335 = vqrdmlahq_s16(__s0_335, __s1_335, splatq_laneq_s16(__s2_335, __p3_335)); \
+  __ret_335; \
+})
+#else
+#define vqrdmlahq_laneq_s16(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \
+  int16x8_t __ret_336; \
+  int16x8_t __s0_336 = __p0_336; \
+  int16x8_t __s1_336 = __p1_336; \
+  int16x8_t __s2_336 = __p2_336; \
+  int16x8_t __rev0_336;  __rev0_336 = __builtin_shufflevector(__s0_336, __s0_336, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_336;  __rev1_336 = __builtin_shufflevector(__s1_336, __s1_336, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_336;  __rev2_336 = __builtin_shufflevector(__s2_336, __s2_336, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_336 = __noswap_vqrdmlahq_s16(__rev0_336, __rev1_336, __noswap_splatq_laneq_s16(__rev2_336, __p3_336)); \
+  __ret_336 = __builtin_shufflevector(__ret_336, __ret_336, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_336; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlah_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \
+  int32x2_t __ret_337; \
+  int32x2_t __s0_337 = __p0_337; \
+  int32x2_t __s1_337 = __p1_337; \
+  int32x4_t __s2_337 = __p2_337; \
+  __ret_337 = vqrdmlah_s32(__s0_337, __s1_337, splat_laneq_s32(__s2_337, __p3_337)); \
+  __ret_337; \
+})
+#else
+#define vqrdmlah_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \
+  int32x2_t __ret_338; \
+  int32x2_t __s0_338 = __p0_338; \
+  int32x2_t __s1_338 = __p1_338; \
+  int32x4_t __s2_338 = __p2_338; \
+  int32x2_t __rev0_338;  __rev0_338 = __builtin_shufflevector(__s0_338, __s0_338, 1, 0); \
+  int32x2_t __rev1_338;  __rev1_338 = __builtin_shufflevector(__s1_338, __s1_338, 1, 0); \
+  int32x4_t __rev2_338;  __rev2_338 = __builtin_shufflevector(__s2_338, __s2_338, 3, 2, 1, 0); \
+  __ret_338 = __noswap_vqrdmlah_s32(__rev0_338, __rev1_338, __noswap_splat_laneq_s32(__rev2_338, __p3_338)); \
+  __ret_338 = __builtin_shufflevector(__ret_338, __ret_338, 1, 0); \
+  __ret_338; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlah_laneq_s16(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \
+  int16x4_t __ret_339; \
+  int16x4_t __s0_339 = __p0_339; \
+  int16x4_t __s1_339 = __p1_339; \
+  int16x8_t __s2_339 = __p2_339; \
+  __ret_339 = vqrdmlah_s16(__s0_339, __s1_339, splat_laneq_s16(__s2_339, __p3_339)); \
+  __ret_339; \
+})
+#else
+#define vqrdmlah_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \
+  int16x4_t __ret_340; \
+  int16x4_t __s0_340 = __p0_340; \
+  int16x4_t __s1_340 = __p1_340; \
+  int16x8_t __s2_340 = __p2_340; \
+  int16x4_t __rev0_340;  __rev0_340 = __builtin_shufflevector(__s0_340, __s0_340, 3, 2, 1, 0); \
+  int16x4_t __rev1_340;  __rev1_340 = __builtin_shufflevector(__s1_340, __s1_340, 3, 2, 1, 0); \
+  int16x8_t __rev2_340;  __rev2_340 = __builtin_shufflevector(__s2_340, __s2_340, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_340 = __noswap_vqrdmlah_s16(__rev0_340, __rev1_340, __noswap_splat_laneq_s16(__rev2_340, __p3_340)); \
+  __ret_340 = __builtin_shufflevector(__ret_340, __ret_340, 3, 2, 1, 0); \
+  __ret_340; \
+})
+#endif
+
+__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqrdmlshs_s32(__p0, __p1, __p2);
+  return __ret;
+}
+__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqrdmlshh_s16(__p0, __p1, __p2);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshs_lane_s32(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \
+  int32_t __ret_341; \
+  int32_t __s0_341 = __p0_341; \
+  int32_t __s1_341 = __p1_341; \
+  int32x2_t __s2_341 = __p2_341; \
+  __ret_341 = vqrdmlshs_s32(__s0_341, __s1_341, vget_lane_s32(__s2_341, __p3_341)); \
+  __ret_341; \
+})
+#else
+#define vqrdmlshs_lane_s32(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \
+  int32_t __ret_342; \
+  int32_t __s0_342 = __p0_342; \
+  int32_t __s1_342 = __p1_342; \
+  int32x2_t __s2_342 = __p2_342; \
+  int32x2_t __rev2_342;  __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 1, 0); \
+  __ret_342 = vqrdmlshs_s32(__s0_342, __s1_342, __noswap_vget_lane_s32(__rev2_342, __p3_342)); \
+  __ret_342; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshh_lane_s16(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \
+  int16_t __ret_343; \
+  int16_t __s0_343 = __p0_343; \
+  int16_t __s1_343 = __p1_343; \
+  int16x4_t __s2_343 = __p2_343; \
+  __ret_343 = vqrdmlshh_s16(__s0_343, __s1_343, vget_lane_s16(__s2_343, __p3_343)); \
+  __ret_343; \
+})
+#else
+#define vqrdmlshh_lane_s16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \
+  int16_t __ret_344; \
+  int16_t __s0_344 = __p0_344; \
+  int16_t __s1_344 = __p1_344; \
+  int16x4_t __s2_344 = __p2_344; \
+  int16x4_t __rev2_344;  __rev2_344 = __builtin_shufflevector(__s2_344, __s2_344, 3, 2, 1, 0); \
+  __ret_344 = vqrdmlshh_s16(__s0_344, __s1_344, __noswap_vget_lane_s16(__rev2_344, __p3_344)); \
+  __ret_344; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshs_laneq_s32(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \
+  int32_t __ret_345; \
+  int32_t __s0_345 = __p0_345; \
+  int32_t __s1_345 = __p1_345; \
+  int32x4_t __s2_345 = __p2_345; \
+  __ret_345 = vqrdmlshs_s32(__s0_345, __s1_345, vgetq_lane_s32(__s2_345, __p3_345)); \
+  __ret_345; \
+})
+#else
+#define vqrdmlshs_laneq_s32(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \
+  int32_t __ret_346; \
+  int32_t __s0_346 = __p0_346; \
+  int32_t __s1_346 = __p1_346; \
+  int32x4_t __s2_346 = __p2_346; \
+  int32x4_t __rev2_346;  __rev2_346 = __builtin_shufflevector(__s2_346, __s2_346, 3, 2, 1, 0); \
+  __ret_346 = vqrdmlshs_s32(__s0_346, __s1_346, __noswap_vgetq_lane_s32(__rev2_346, __p3_346)); \
+  __ret_346; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshh_laneq_s16(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \
+  int16_t __ret_347; \
+  int16_t __s0_347 = __p0_347; \
+  int16_t __s1_347 = __p1_347; \
+  int16x8_t __s2_347 = __p2_347; \
+  __ret_347 = vqrdmlshh_s16(__s0_347, __s1_347, vgetq_lane_s16(__s2_347, __p3_347)); \
+  __ret_347; \
+})
+#else
+#define vqrdmlshh_laneq_s16(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \
+  int16_t __ret_348; \
+  int16_t __s0_348 = __p0_348; \
+  int16_t __s1_348 = __p1_348; \
+  int16x8_t __s2_348 = __p2_348; \
+  int16x8_t __rev2_348;  __rev2_348 = __builtin_shufflevector(__s2_348, __s2_348, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_348 = vqrdmlshh_s16(__s0_348, __s1_348, __noswap_vgetq_lane_s16(__rev2_348, __p3_348)); \
+  __ret_348; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_laneq_s32(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \
+  int32x4_t __ret_349; \
+  int32x4_t __s0_349 = __p0_349; \
+  int32x4_t __s1_349 = __p1_349; \
+  int32x4_t __s2_349 = __p2_349; \
+  __ret_349 = vqrdmlshq_s32(__s0_349, __s1_349, splatq_laneq_s32(__s2_349, __p3_349)); \
+  __ret_349; \
+})
+#else
+#define vqrdmlshq_laneq_s32(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \
+  int32x4_t __ret_350; \
+  int32x4_t __s0_350 = __p0_350; \
+  int32x4_t __s1_350 = __p1_350; \
+  int32x4_t __s2_350 = __p2_350; \
+  int32x4_t __rev0_350;  __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 3, 2, 1, 0); \
+  int32x4_t __rev1_350;  __rev1_350 = __builtin_shufflevector(__s1_350, __s1_350, 3, 2, 1, 0); \
+  int32x4_t __rev2_350;  __rev2_350 = __builtin_shufflevector(__s2_350, __s2_350, 3, 2, 1, 0); \
+  __ret_350 = __noswap_vqrdmlshq_s32(__rev0_350, __rev1_350, __noswap_splatq_laneq_s32(__rev2_350, __p3_350)); \
+  __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 3, 2, 1, 0); \
+  __ret_350; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_laneq_s16(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \
+  int16x8_t __ret_351; \
+  int16x8_t __s0_351 = __p0_351; \
+  int16x8_t __s1_351 = __p1_351; \
+  int16x8_t __s2_351 = __p2_351; \
+  __ret_351 = vqrdmlshq_s16(__s0_351, __s1_351, splatq_laneq_s16(__s2_351, __p3_351)); \
+  __ret_351; \
+})
+#else
+#define vqrdmlshq_laneq_s16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \
+  int16x8_t __ret_352; \
+  int16x8_t __s0_352 = __p0_352; \
+  int16x8_t __s1_352 = __p1_352; \
+  int16x8_t __s2_352 = __p2_352; \
+  int16x8_t __rev0_352;  __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_352;  __rev1_352 = __builtin_shufflevector(__s1_352, __s1_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_352;  __rev2_352 = __builtin_shufflevector(__s2_352, __s2_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_352 = __noswap_vqrdmlshq_s16(__rev0_352, __rev1_352, __noswap_splatq_laneq_s16(__rev2_352, __p3_352)); \
+  __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_352; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_laneq_s32(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \
+  int32x2_t __ret_353; \
+  int32x2_t __s0_353 = __p0_353; \
+  int32x2_t __s1_353 = __p1_353; \
+  int32x4_t __s2_353 = __p2_353; \
+  __ret_353 = vqrdmlsh_s32(__s0_353, __s1_353, splat_laneq_s32(__s2_353, __p3_353)); \
+  __ret_353; \
+})
+#else
+#define vqrdmlsh_laneq_s32(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \
+  int32x2_t __ret_354; \
+  int32x2_t __s0_354 = __p0_354; \
+  int32x2_t __s1_354 = __p1_354; \
+  int32x4_t __s2_354 = __p2_354; \
+  int32x2_t __rev0_354;  __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 1, 0); \
+  int32x2_t __rev1_354;  __rev1_354 = __builtin_shufflevector(__s1_354, __s1_354, 1, 0); \
+  int32x4_t __rev2_354;  __rev2_354 = __builtin_shufflevector(__s2_354, __s2_354, 3, 2, 1, 0); \
+  __ret_354 = __noswap_vqrdmlsh_s32(__rev0_354, __rev1_354, __noswap_splat_laneq_s32(__rev2_354, __p3_354)); \
+  __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 1, 0); \
+  __ret_354; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_laneq_s16(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \
+  int16x4_t __ret_355; \
+  int16x4_t __s0_355 = __p0_355; \
+  int16x4_t __s1_355 = __p1_355; \
+  int16x8_t __s2_355 = __p2_355; \
+  __ret_355 = vqrdmlsh_s16(__s0_355, __s1_355, splat_laneq_s16(__s2_355, __p3_355)); \
+  __ret_355; \
+})
+#else
+#define vqrdmlsh_laneq_s16(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \
+  int16x4_t __ret_356; \
+  int16x4_t __s0_356 = __p0_356; \
+  int16x4_t __s1_356 = __p1_356; \
+  int16x8_t __s2_356 = __p2_356; \
+  int16x4_t __rev0_356;  __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 3, 2, 1, 0); \
+  int16x4_t __rev1_356;  __rev1_356 = __builtin_shufflevector(__s1_356, __s1_356, 3, 2, 1, 0); \
+  int16x8_t __rev2_356;  __rev2_356 = __builtin_shufflevector(__s2_356, __s2_356, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_356 = __noswap_vqrdmlsh_s16(__rev0_356, __rev1_356, __noswap_splat_laneq_s16(__rev2_356, __p3_356)); \
+  __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 3, 2, 1, 0); \
+  __ret_356; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_SHA3) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
 __ai uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
   uint8x16_t __ret;
@@ -35881,10 +44666,10 @@
 }
 #else
 __ai uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35899,10 +44684,10 @@
 }
 #else
 __ai uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35917,10 +44702,10 @@
 }
 #else
 __ai uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35935,10 +44720,10 @@
 }
 #else
 __ai uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35953,10 +44738,10 @@
 }
 #else
 __ai int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35971,10 +44756,10 @@
 }
 #else
 __ai int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35989,10 +44774,10 @@
 }
 #else
 __ai int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36007,10 +44792,10 @@
 }
 #else
 __ai int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -36025,10 +44810,10 @@
 }
 #else
 __ai uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -36043,10 +44828,10 @@
 }
 #else
 __ai uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -36061,10 +44846,10 @@
 }
 #else
 __ai uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36079,10 +44864,10 @@
 }
 #else
 __ai uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -36097,10 +44882,10 @@
 }
 #else
 __ai int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -36115,10 +44900,10 @@
 }
 #else
 __ai int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -36133,10 +44918,10 @@
 }
 #else
 __ai int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36151,10 +44936,10 @@
 }
 #else
 __ai int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -36169,9 +44954,9 @@
 }
 #else
 __ai uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vrax1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36180,19 +44965,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vxarq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
   __ret; \
 })
 #else
 #define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vxarq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -36200,7 +44985,7 @@
 #endif
 
 #endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA512) && defined(__aarch64__)
+#if defined(__ARM_FEATURE_SHA512) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
 __ai uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
   uint64x2_t __ret;
@@ -36209,10 +44994,10 @@
 }
 #else
 __ai uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vsha512hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36227,10 +45012,10 @@
 }
 #else
 __ai uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vsha512h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36245,9 +45030,9 @@
 }
 #else
 __ai uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vsha512su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36262,10 +45047,10 @@
 }
 #else
 __ai uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vsha512su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36273,7 +45058,7 @@
 #endif
 
 #endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM3) && defined(__aarch64__)
+#if defined(__ARM_FEATURE_SM3) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
   uint32x4_t __ret;
@@ -36282,10 +45067,10 @@
 }
 #else
 __ai uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -36300,10 +45085,10 @@
 }
 #else
 __ai uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -36318,10 +45103,10 @@
 }
 #else
 __ai uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -36330,22 +45115,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
   __ret; \
 })
 #else
 #define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -36354,22 +45139,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
   __ret; \
 })
 #else
 #define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -36378,22 +45163,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
   __ret; \
 })
 #else
 #define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -36402,22 +45187,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
   __ret; \
 })
 #else
 #define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -36425,7 +45210,7 @@
 #endif
 
 #endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM4) && defined(__aarch64__)
+#if defined(__ARM_FEATURE_SM4) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) {
   uint32x4_t __ret;
@@ -36434,9 +45219,9 @@
 }
 #else
 __ai uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsm4eq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -36451,9 +45236,9 @@
 }
 #else
 __ai uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -36461,7 +45246,3705 @@
 #endif
 
 #endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__)
+#if defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+  return __ret;
+}
+__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
+  return __ret;
+}
+__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vabsq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vabsq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vabsq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35);
+  return __ret;
+}
+#else
+__ai int64x2_t vabsq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vabs_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+__ai int64x1_t vabs_s64(int64x1_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
+  return __ret;
+}
+__ai int64_t vabsd_s64(int64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = __p0 + __p1;
+  return __ret;
+}
+#else
+__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __rev0 + __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = __p0 + __p1;
+  return __ret;
+}
+__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
+  return __ret;
+}
+__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
+  return __ret;
+}
+__ai poly128_t vaddq_p128(poly128_t __p0, poly128_t __p1) {
+  poly128_t __ret;
+  __ret = (poly128_t) __builtin_neon_vaddq_p128(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint16x8_t __ret;
+  __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2));
+  return __ret;
+}
+#else
+__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint16x8_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint32x4_t __ret;
+  __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2));
+  return __ret;
+}
+#else
+__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint32x4_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint8x16_t __ret;
+  __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2));
+  return __ret;
+}
+#else
+__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint8x16_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int16x8_t __ret;
+  __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2));
+  return __ret;
+}
+#else
+__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int16x8_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int32x4_t __ret;
+  __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2));
+  return __ret;
+}
+#else
+__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int32x4_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int8x16_t __ret;
+  __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2));
+  return __ret;
+}
+#else
+__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int8x16_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
+  uint16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
+  uint64_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
+  uint32_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vaddlvq_s8(int8x16_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vaddlvq_s8(int8x16_t __p0) {
+  int16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vaddlvq_s32(int32x4_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0);
+  return __ret;
+}
+#else
+__ai int64_t vaddlvq_s32(int32x4_t __p0) {
+  int64_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vaddlvq_s16(int16x8_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vaddlvq_s16(int16x8_t __p0) {
+  int32_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
+  uint16_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
+  uint64_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
+  uint32_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vaddlv_s8(int8x8_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vaddlv_s8(int8x8_t __p0) {
+  int16_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vaddlv_s32(int32x2_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0);
+  return __ret;
+}
+#else
+__ai int64_t vaddlv_s32(int32x2_t __p0) {
+  int64_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vaddlv_s16(int16x4_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vaddlv_s16(int16x4_t __p0) {
+  int32_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
+  uint8_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
+  uint32_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0);
+  return __ret;
+}
+#else
+__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
+  uint64_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
+  uint16_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vaddvq_s8(int8x16_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0);
+  return __ret;
+}
+#else
+__ai int8_t vaddvq_s8(int8x16_t __p0) {
+  int8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vaddvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vaddvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vaddvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vaddvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vaddvq_s32(int32x4_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vaddvq_s32(int32x4_t __p0) {
+  int32_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vaddvq_s64(int64x2_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0);
+  return __ret;
+}
+#else
+__ai int64_t vaddvq_s64(int64x2_t __p0) {
+  int64_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vaddvq_s16(int16x8_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vaddvq_s16(int16x8_t __p0) {
+  int16_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vaddv_u8(uint8x8_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint8_t vaddv_u8(uint8x8_t __p0) {
+  uint8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vaddv_u32(uint32x2_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vaddv_u32(uint32x2_t __p0) {
+  uint32_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vaddv_u16(uint16x4_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vaddv_u16(uint16x4_t __p0) {
+  uint16_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vaddv_s8(int8x8_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vaddv_s8(__p0);
+  return __ret;
+}
+#else
+__ai int8_t vaddv_s8(int8x8_t __p0) {
+  int8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vaddv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vaddv_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vaddv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vaddv_s32(int32x2_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vaddv_s32(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vaddv_s32(int32x2_t __p0) {
+  int32_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vaddv_s16(int16x4_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vaddv_s16(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vaddv_s16(int16x4_t __p0) {
+  int16_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0);
+  return __ret;
+}
+#endif
+
+__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
+  poly64x1_t __ret;
+  __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38);
+  return __ret;
+}
+#else
+__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
+  poly64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  poly64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+  return __ret;
+}
+__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+  return __ret;
+}
+__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+  return __ret;
+}
+__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+  return __ret;
+}
+__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
+  return __ret;
+}
+__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 == __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 == __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  uint64x2_t __ret;
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 == __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 == __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 == __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 == __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 == __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 == __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 == __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 == __p1);
+  return __ret;
+}
+__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 == __p1);
+  return __ret;
+}
+__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 == __p1);
+  return __ret;
+}
+__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vceqd_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqd_s64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
+  uint8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
+  uint8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
+  uint64x2_t __ret;
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
+  uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
+  uint64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
+  uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
+  uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
+  uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
+  uint16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vceqz_f64(float64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vceqz_s64(int64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64_t vceqzd_u64(uint64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
+  return __ret;
+}
+__ai uint64_t vceqzd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqzd_s64(__p0);
+  return __ret;
+}
+__ai uint64_t vceqzd_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vceqzs_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 >= __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 >= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 >= __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 >= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 >= __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 >= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 >= __p1);
+  return __ret;
+}
+__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 >= __p1);
+  return __ret;
+}
+__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 >= __p1);
+  return __ret;
+}
+__ai uint64_t vcged_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcged_s64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcgez_f64(float64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcgez_s64(int64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64_t vcgezd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgezd_s64(__p0);
+  return __ret;
+}
+__ai uint64_t vcgezd_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vcgezs_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 > __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 > __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 > __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 > __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 > __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 > __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 > __p1);
+  return __ret;
+}
+__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 > __p1);
+  return __ret;
+}
+__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 > __p1);
+  return __ret;
+}
+__ai uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64_t vcgtzd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgtzd_s64(__p0);
+  return __ret;
+}
+__ai uint64_t vcgtzd_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vcgtzs_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 <= __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 <= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 <= __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 <= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 <= __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 <= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 <= __p1);
+  return __ret;
+}
+__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 <= __p1);
+  return __ret;
+}
+__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 <= __p1);
+  return __ret;
+}
+__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcled_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcled_s64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vclez_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vclez_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vclez_f64(float64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vclez_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vclez_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vclez_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vclez_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vclez_s64(int64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vclez_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vclez_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64_t vclezd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vclezd_s64(__p0);
+  return __ret;
+}
+__ai uint64_t vclezd_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vclezs_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 < __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 < __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 < __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 < __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 < __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 < __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 < __p1);
+  return __ret;
+}
+__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 < __p1);
+  return __ret;
+}
+__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 < __p1);
+  return __ret;
+}
+__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcltd_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcltd_s64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcltz_f64(float64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcltz_s64(int64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64_t vcltzd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcltzd_s64(__p0);
+  return __ret;
+}
+__ai uint64_t vcltzd_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vcltzs_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
+  poly64x2_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+  return __ret;
+}
+#else
+__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
+  poly64x2_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x2_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+  return __ret;
+}
+#else
+__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x2_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_p8(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \
+  poly8x16_t __ret_357; \
+  poly8x16_t __s0_357 = __p0_357; \
+  poly8x8_t __s2_357 = __p2_357; \
+  __ret_357 = vsetq_lane_p8(vget_lane_p8(__s2_357, __p3_357), __s0_357, __p1_357); \
+  __ret_357; \
+})
+#else
+#define vcopyq_lane_p8(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \
+  poly8x16_t __ret_358; \
+  poly8x16_t __s0_358 = __p0_358; \
+  poly8x8_t __s2_358 = __p2_358; \
+  poly8x16_t __rev0_358;  __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __rev2_358;  __rev2_358 = __builtin_shufflevector(__s2_358, __s2_358, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_358 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_358, __p3_358), __rev0_358, __p1_358); \
+  __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_358; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_p16(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \
+  poly16x8_t __ret_359; \
+  poly16x8_t __s0_359 = __p0_359; \
+  poly16x4_t __s2_359 = __p2_359; \
+  __ret_359 = vsetq_lane_p16(vget_lane_p16(__s2_359, __p3_359), __s0_359, __p1_359); \
+  __ret_359; \
+})
+#else
+#define vcopyq_lane_p16(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \
+  poly16x8_t __ret_360; \
+  poly16x8_t __s0_360 = __p0_360; \
+  poly16x4_t __s2_360 = __p2_360; \
+  poly16x8_t __rev0_360;  __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __rev2_360;  __rev2_360 = __builtin_shufflevector(__s2_360, __s2_360, 3, 2, 1, 0); \
+  __ret_360 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_360, __p3_360), __rev0_360, __p1_360); \
+  __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_360; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_u8(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \
+  uint8x16_t __ret_361; \
+  uint8x16_t __s0_361 = __p0_361; \
+  uint8x8_t __s2_361 = __p2_361; \
+  __ret_361 = vsetq_lane_u8(vget_lane_u8(__s2_361, __p3_361), __s0_361, __p1_361); \
+  __ret_361; \
+})
+#else
+#define vcopyq_lane_u8(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \
+  uint8x16_t __ret_362; \
+  uint8x16_t __s0_362 = __p0_362; \
+  uint8x8_t __s2_362 = __p2_362; \
+  uint8x16_t __rev0_362;  __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_362;  __rev2_362 = __builtin_shufflevector(__s2_362, __s2_362, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_362 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_362, __p3_362), __rev0_362, __p1_362); \
+  __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_362; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_u32(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \
+  uint32x4_t __ret_363; \
+  uint32x4_t __s0_363 = __p0_363; \
+  uint32x2_t __s2_363 = __p2_363; \
+  __ret_363 = vsetq_lane_u32(vget_lane_u32(__s2_363, __p3_363), __s0_363, __p1_363); \
+  __ret_363; \
+})
+#else
+#define vcopyq_lane_u32(__p0_364, __p1_364, __p2_364, __p3_364) __extension__ ({ \
+  uint32x4_t __ret_364; \
+  uint32x4_t __s0_364 = __p0_364; \
+  uint32x2_t __s2_364 = __p2_364; \
+  uint32x4_t __rev0_364;  __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 3, 2, 1, 0); \
+  uint32x2_t __rev2_364;  __rev2_364 = __builtin_shufflevector(__s2_364, __s2_364, 1, 0); \
+  __ret_364 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_364, __p3_364), __rev0_364, __p1_364); \
+  __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 3, 2, 1, 0); \
+  __ret_364; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_u64(__p0_365, __p1_365, __p2_365, __p3_365) __extension__ ({ \
+  uint64x2_t __ret_365; \
+  uint64x2_t __s0_365 = __p0_365; \
+  uint64x1_t __s2_365 = __p2_365; \
+  __ret_365 = vsetq_lane_u64(vget_lane_u64(__s2_365, __p3_365), __s0_365, __p1_365); \
+  __ret_365; \
+})
+#else
+#define vcopyq_lane_u64(__p0_366, __p1_366, __p2_366, __p3_366) __extension__ ({ \
+  uint64x2_t __ret_366; \
+  uint64x2_t __s0_366 = __p0_366; \
+  uint64x1_t __s2_366 = __p2_366; \
+  uint64x2_t __rev0_366;  __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 1, 0); \
+  __ret_366 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_366, __p3_366), __rev0_366, __p1_366); \
+  __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 1, 0); \
+  __ret_366; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_u16(__p0_367, __p1_367, __p2_367, __p3_367) __extension__ ({ \
+  uint16x8_t __ret_367; \
+  uint16x8_t __s0_367 = __p0_367; \
+  uint16x4_t __s2_367 = __p2_367; \
+  __ret_367 = vsetq_lane_u16(vget_lane_u16(__s2_367, __p3_367), __s0_367, __p1_367); \
+  __ret_367; \
+})
+#else
+#define vcopyq_lane_u16(__p0_368, __p1_368, __p2_368, __p3_368) __extension__ ({ \
+  uint16x8_t __ret_368; \
+  uint16x8_t __s0_368 = __p0_368; \
+  uint16x4_t __s2_368 = __p2_368; \
+  uint16x8_t __rev0_368;  __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_368;  __rev2_368 = __builtin_shufflevector(__s2_368, __s2_368, 3, 2, 1, 0); \
+  __ret_368 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_368, __p3_368), __rev0_368, __p1_368); \
+  __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_368; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s8(__p0_369, __p1_369, __p2_369, __p3_369) __extension__ ({ \
+  int8x16_t __ret_369; \
+  int8x16_t __s0_369 = __p0_369; \
+  int8x8_t __s2_369 = __p2_369; \
+  __ret_369 = vsetq_lane_s8(vget_lane_s8(__s2_369, __p3_369), __s0_369, __p1_369); \
+  __ret_369; \
+})
+#else
+#define vcopyq_lane_s8(__p0_370, __p1_370, __p2_370, __p3_370) __extension__ ({ \
+  int8x16_t __ret_370; \
+  int8x16_t __s0_370 = __p0_370; \
+  int8x8_t __s2_370 = __p2_370; \
+  int8x16_t __rev0_370;  __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_370;  __rev2_370 = __builtin_shufflevector(__s2_370, __s2_370, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_370 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_370, __p3_370), __rev0_370, __p1_370); \
+  __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_370; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_f32(__p0_371, __p1_371, __p2_371, __p3_371) __extension__ ({ \
+  float32x4_t __ret_371; \
+  float32x4_t __s0_371 = __p0_371; \
+  float32x2_t __s2_371 = __p2_371; \
+  __ret_371 = vsetq_lane_f32(vget_lane_f32(__s2_371, __p3_371), __s0_371, __p1_371); \
+  __ret_371; \
+})
+#else
+#define vcopyq_lane_f32(__p0_372, __p1_372, __p2_372, __p3_372) __extension__ ({ \
+  float32x4_t __ret_372; \
+  float32x4_t __s0_372 = __p0_372; \
+  float32x2_t __s2_372 = __p2_372; \
+  float32x4_t __rev0_372;  __rev0_372 = __builtin_shufflevector(__s0_372, __s0_372, 3, 2, 1, 0); \
+  float32x2_t __rev2_372;  __rev2_372 = __builtin_shufflevector(__s2_372, __s2_372, 1, 0); \
+  __ret_372 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_372, __p3_372), __rev0_372, __p1_372); \
+  __ret_372 = __builtin_shufflevector(__ret_372, __ret_372, 3, 2, 1, 0); \
+  __ret_372; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s32(__p0_373, __p1_373, __p2_373, __p3_373) __extension__ ({ \
+  int32x4_t __ret_373; \
+  int32x4_t __s0_373 = __p0_373; \
+  int32x2_t __s2_373 = __p2_373; \
+  __ret_373 = vsetq_lane_s32(vget_lane_s32(__s2_373, __p3_373), __s0_373, __p1_373); \
+  __ret_373; \
+})
+#else
+#define vcopyq_lane_s32(__p0_374, __p1_374, __p2_374, __p3_374) __extension__ ({ \
+  int32x4_t __ret_374; \
+  int32x4_t __s0_374 = __p0_374; \
+  int32x2_t __s2_374 = __p2_374; \
+  int32x4_t __rev0_374;  __rev0_374 = __builtin_shufflevector(__s0_374, __s0_374, 3, 2, 1, 0); \
+  int32x2_t __rev2_374;  __rev2_374 = __builtin_shufflevector(__s2_374, __s2_374, 1, 0); \
+  __ret_374 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_374, __p3_374), __rev0_374, __p1_374); \
+  __ret_374 = __builtin_shufflevector(__ret_374, __ret_374, 3, 2, 1, 0); \
+  __ret_374; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s64(__p0_375, __p1_375, __p2_375, __p3_375) __extension__ ({ \
+  int64x2_t __ret_375; \
+  int64x2_t __s0_375 = __p0_375; \
+  int64x1_t __s2_375 = __p2_375; \
+  __ret_375 = vsetq_lane_s64(vget_lane_s64(__s2_375, __p3_375), __s0_375, __p1_375); \
+  __ret_375; \
+})
+#else
+#define vcopyq_lane_s64(__p0_376, __p1_376, __p2_376, __p3_376) __extension__ ({ \
+  int64x2_t __ret_376; \
+  int64x2_t __s0_376 = __p0_376; \
+  int64x1_t __s2_376 = __p2_376; \
+  int64x2_t __rev0_376;  __rev0_376 = __builtin_shufflevector(__s0_376, __s0_376, 1, 0); \
+  __ret_376 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_376, __p3_376), __rev0_376, __p1_376); \
+  __ret_376 = __builtin_shufflevector(__ret_376, __ret_376, 1, 0); \
+  __ret_376; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s16(__p0_377, __p1_377, __p2_377, __p3_377) __extension__ ({ \
+  int16x8_t __ret_377; \
+  int16x8_t __s0_377 = __p0_377; \
+  int16x4_t __s2_377 = __p2_377; \
+  __ret_377 = vsetq_lane_s16(vget_lane_s16(__s2_377, __p3_377), __s0_377, __p1_377); \
+  __ret_377; \
+})
+#else
+#define vcopyq_lane_s16(__p0_378, __p1_378, __p2_378, __p3_378) __extension__ ({ \
+  int16x8_t __ret_378; \
+  int16x8_t __s0_378 = __p0_378; \
+  int16x4_t __s2_378 = __p2_378; \
+  int16x8_t __rev0_378;  __rev0_378 = __builtin_shufflevector(__s0_378, __s0_378, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_378;  __rev2_378 = __builtin_shufflevector(__s2_378, __s2_378, 3, 2, 1, 0); \
+  __ret_378 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_378, __p3_378), __rev0_378, __p1_378); \
+  __ret_378 = __builtin_shufflevector(__ret_378, __ret_378, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_378; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_p8(__p0_379, __p1_379, __p2_379, __p3_379) __extension__ ({ \
+  poly8x8_t __ret_379; \
+  poly8x8_t __s0_379 = __p0_379; \
+  poly8x8_t __s2_379 = __p2_379; \
+  __ret_379 = vset_lane_p8(vget_lane_p8(__s2_379, __p3_379), __s0_379, __p1_379); \
+  __ret_379; \
+})
+#else
+#define vcopy_lane_p8(__p0_380, __p1_380, __p2_380, __p3_380) __extension__ ({ \
+  poly8x8_t __ret_380; \
+  poly8x8_t __s0_380 = __p0_380; \
+  poly8x8_t __s2_380 = __p2_380; \
+  poly8x8_t __rev0_380;  __rev0_380 = __builtin_shufflevector(__s0_380, __s0_380, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __rev2_380;  __rev2_380 = __builtin_shufflevector(__s2_380, __s2_380, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_380 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_380, __p3_380), __rev0_380, __p1_380); \
+  __ret_380 = __builtin_shufflevector(__ret_380, __ret_380, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_380; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_p16(__p0_381, __p1_381, __p2_381, __p3_381) __extension__ ({ \
+  poly16x4_t __ret_381; \
+  poly16x4_t __s0_381 = __p0_381; \
+  poly16x4_t __s2_381 = __p2_381; \
+  __ret_381 = vset_lane_p16(vget_lane_p16(__s2_381, __p3_381), __s0_381, __p1_381); \
+  __ret_381; \
+})
+#else
+#define vcopy_lane_p16(__p0_382, __p1_382, __p2_382, __p3_382) __extension__ ({ \
+  poly16x4_t __ret_382; \
+  poly16x4_t __s0_382 = __p0_382; \
+  poly16x4_t __s2_382 = __p2_382; \
+  poly16x4_t __rev0_382;  __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \
+  poly16x4_t __rev2_382;  __rev2_382 = __builtin_shufflevector(__s2_382, __s2_382, 3, 2, 1, 0); \
+  __ret_382 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_382, __p3_382), __rev0_382, __p1_382); \
+  __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 3, 2, 1, 0); \
+  __ret_382; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_u8(__p0_383, __p1_383, __p2_383, __p3_383) __extension__ ({ \
+  uint8x8_t __ret_383; \
+  uint8x8_t __s0_383 = __p0_383; \
+  uint8x8_t __s2_383 = __p2_383; \
+  __ret_383 = vset_lane_u8(vget_lane_u8(__s2_383, __p3_383), __s0_383, __p1_383); \
+  __ret_383; \
+})
+#else
+#define vcopy_lane_u8(__p0_384, __p1_384, __p2_384, __p3_384) __extension__ ({ \
+  uint8x8_t __ret_384; \
+  uint8x8_t __s0_384 = __p0_384; \
+  uint8x8_t __s2_384 = __p2_384; \
+  uint8x8_t __rev0_384;  __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_384;  __rev2_384 = __builtin_shufflevector(__s2_384, __s2_384, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_384 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_384, __p3_384), __rev0_384, __p1_384); \
+  __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_384; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_u32(__p0_385, __p1_385, __p2_385, __p3_385) __extension__ ({ \
+  uint32x2_t __ret_385; \
+  uint32x2_t __s0_385 = __p0_385; \
+  uint32x2_t __s2_385 = __p2_385; \
+  __ret_385 = vset_lane_u32(vget_lane_u32(__s2_385, __p3_385), __s0_385, __p1_385); \
+  __ret_385; \
+})
+#else
+#define vcopy_lane_u32(__p0_386, __p1_386, __p2_386, __p3_386) __extension__ ({ \
+  uint32x2_t __ret_386; \
+  uint32x2_t __s0_386 = __p0_386; \
+  uint32x2_t __s2_386 = __p2_386; \
+  uint32x2_t __rev0_386;  __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 1, 0); \
+  uint32x2_t __rev2_386;  __rev2_386 = __builtin_shufflevector(__s2_386, __s2_386, 1, 0); \
+  __ret_386 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_386, __p3_386), __rev0_386, __p1_386); \
+  __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 1, 0); \
+  __ret_386; \
+})
+#endif
+
+#define vcopy_lane_u64(__p0_387, __p1_387, __p2_387, __p3_387) __extension__ ({ \
+  uint64x1_t __ret_387; \
+  uint64x1_t __s0_387 = __p0_387; \
+  uint64x1_t __s2_387 = __p2_387; \
+  __ret_387 = vset_lane_u64(vget_lane_u64(__s2_387, __p3_387), __s0_387, __p1_387); \
+  __ret_387; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_u16(__p0_388, __p1_388, __p2_388, __p3_388) __extension__ ({ \
+  uint16x4_t __ret_388; \
+  uint16x4_t __s0_388 = __p0_388; \
+  uint16x4_t __s2_388 = __p2_388; \
+  __ret_388 = vset_lane_u16(vget_lane_u16(__s2_388, __p3_388), __s0_388, __p1_388); \
+  __ret_388; \
+})
+#else
+#define vcopy_lane_u16(__p0_389, __p1_389, __p2_389, __p3_389) __extension__ ({ \
+  uint16x4_t __ret_389; \
+  uint16x4_t __s0_389 = __p0_389; \
+  uint16x4_t __s2_389 = __p2_389; \
+  uint16x4_t __rev0_389;  __rev0_389 = __builtin_shufflevector(__s0_389, __s0_389, 3, 2, 1, 0); \
+  uint16x4_t __rev2_389;  __rev2_389 = __builtin_shufflevector(__s2_389, __s2_389, 3, 2, 1, 0); \
+  __ret_389 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_389, __p3_389), __rev0_389, __p1_389); \
+  __ret_389 = __builtin_shufflevector(__ret_389, __ret_389, 3, 2, 1, 0); \
+  __ret_389; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_s8(__p0_390, __p1_390, __p2_390, __p3_390) __extension__ ({ \
+  int8x8_t __ret_390; \
+  int8x8_t __s0_390 = __p0_390; \
+  int8x8_t __s2_390 = __p2_390; \
+  __ret_390 = vset_lane_s8(vget_lane_s8(__s2_390, __p3_390), __s0_390, __p1_390); \
+  __ret_390; \
+})
+#else
+#define vcopy_lane_s8(__p0_391, __p1_391, __p2_391, __p3_391) __extension__ ({ \
+  int8x8_t __ret_391; \
+  int8x8_t __s0_391 = __p0_391; \
+  int8x8_t __s2_391 = __p2_391; \
+  int8x8_t __rev0_391;  __rev0_391 = __builtin_shufflevector(__s0_391, __s0_391, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_391;  __rev2_391 = __builtin_shufflevector(__s2_391, __s2_391, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_391 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_391, __p3_391), __rev0_391, __p1_391); \
+  __ret_391 = __builtin_shufflevector(__ret_391, __ret_391, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_391; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_f32(__p0_392, __p1_392, __p2_392, __p3_392) __extension__ ({ \
+  float32x2_t __ret_392; \
+  float32x2_t __s0_392 = __p0_392; \
+  float32x2_t __s2_392 = __p2_392; \
+  __ret_392 = vset_lane_f32(vget_lane_f32(__s2_392, __p3_392), __s0_392, __p1_392); \
+  __ret_392; \
+})
+#else
+#define vcopy_lane_f32(__p0_393, __p1_393, __p2_393, __p3_393) __extension__ ({ \
+  float32x2_t __ret_393; \
+  float32x2_t __s0_393 = __p0_393; \
+  float32x2_t __s2_393 = __p2_393; \
+  float32x2_t __rev0_393;  __rev0_393 = __builtin_shufflevector(__s0_393, __s0_393, 1, 0); \
+  float32x2_t __rev2_393;  __rev2_393 = __builtin_shufflevector(__s2_393, __s2_393, 1, 0); \
+  __ret_393 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_393, __p3_393), __rev0_393, __p1_393); \
+  __ret_393 = __builtin_shufflevector(__ret_393, __ret_393, 1, 0); \
+  __ret_393; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_s32(__p0_394, __p1_394, __p2_394, __p3_394) __extension__ ({ \
+  int32x2_t __ret_394; \
+  int32x2_t __s0_394 = __p0_394; \
+  int32x2_t __s2_394 = __p2_394; \
+  __ret_394 = vset_lane_s32(vget_lane_s32(__s2_394, __p3_394), __s0_394, __p1_394); \
+  __ret_394; \
+})
+#else
+#define vcopy_lane_s32(__p0_395, __p1_395, __p2_395, __p3_395) __extension__ ({ \
+  int32x2_t __ret_395; \
+  int32x2_t __s0_395 = __p0_395; \
+  int32x2_t __s2_395 = __p2_395; \
+  int32x2_t __rev0_395;  __rev0_395 = __builtin_shufflevector(__s0_395, __s0_395, 1, 0); \
+  int32x2_t __rev2_395;  __rev2_395 = __builtin_shufflevector(__s2_395, __s2_395, 1, 0); \
+  __ret_395 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_395, __p3_395), __rev0_395, __p1_395); \
+  __ret_395 = __builtin_shufflevector(__ret_395, __ret_395, 1, 0); \
+  __ret_395; \
+})
+#endif
+
+#define vcopy_lane_s64(__p0_396, __p1_396, __p2_396, __p3_396) __extension__ ({ \
+  int64x1_t __ret_396; \
+  int64x1_t __s0_396 = __p0_396; \
+  int64x1_t __s2_396 = __p2_396; \
+  __ret_396 = vset_lane_s64(vget_lane_s64(__s2_396, __p3_396), __s0_396, __p1_396); \
+  __ret_396; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_s16(__p0_397, __p1_397, __p2_397, __p3_397) __extension__ ({ \
+  int16x4_t __ret_397; \
+  int16x4_t __s0_397 = __p0_397; \
+  int16x4_t __s2_397 = __p2_397; \
+  __ret_397 = vset_lane_s16(vget_lane_s16(__s2_397, __p3_397), __s0_397, __p1_397); \
+  __ret_397; \
+})
+#else
+#define vcopy_lane_s16(__p0_398, __p1_398, __p2_398, __p3_398) __extension__ ({ \
+  int16x4_t __ret_398; \
+  int16x4_t __s0_398 = __p0_398; \
+  int16x4_t __s2_398 = __p2_398; \
+  int16x4_t __rev0_398;  __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 3, 2, 1, 0); \
+  int16x4_t __rev2_398;  __rev2_398 = __builtin_shufflevector(__s2_398, __s2_398, 3, 2, 1, 0); \
+  __ret_398 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_398, __p3_398), __rev0_398, __p1_398); \
+  __ret_398 = __builtin_shufflevector(__ret_398, __ret_398, 3, 2, 1, 0); \
+  __ret_398; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_p8(__p0_399, __p1_399, __p2_399, __p3_399) __extension__ ({ \
+  poly8x16_t __ret_399; \
+  poly8x16_t __s0_399 = __p0_399; \
+  poly8x16_t __s2_399 = __p2_399; \
+  __ret_399 = vsetq_lane_p8(vgetq_lane_p8(__s2_399, __p3_399), __s0_399, __p1_399); \
+  __ret_399; \
+})
+#else
+#define vcopyq_laneq_p8(__p0_400, __p1_400, __p2_400, __p3_400) __extension__ ({ \
+  poly8x16_t __ret_400; \
+  poly8x16_t __s0_400 = __p0_400; \
+  poly8x16_t __s2_400 = __p2_400; \
+  poly8x16_t __rev0_400;  __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __rev2_400;  __rev2_400 = __builtin_shufflevector(__s2_400, __s2_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_400 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_400, __p3_400), __rev0_400, __p1_400); \
+  __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_400; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_p16(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \
+  poly16x8_t __ret_401; \
+  poly16x8_t __s0_401 = __p0_401; \
+  poly16x8_t __s2_401 = __p2_401; \
+  __ret_401 = vsetq_lane_p16(vgetq_lane_p16(__s2_401, __p3_401), __s0_401, __p1_401); \
+  __ret_401; \
+})
+#else
+#define vcopyq_laneq_p16(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \
+  poly16x8_t __ret_402; \
+  poly16x8_t __s0_402 = __p0_402; \
+  poly16x8_t __s2_402 = __p2_402; \
+  poly16x8_t __rev0_402;  __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __rev2_402;  __rev2_402 = __builtin_shufflevector(__s2_402, __s2_402, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_402 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_402, __p3_402), __rev0_402, __p1_402); \
+  __ret_402 = __builtin_shufflevector(__ret_402, __ret_402, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_402; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_u8(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \
+  uint8x16_t __ret_403; \
+  uint8x16_t __s0_403 = __p0_403; \
+  uint8x16_t __s2_403 = __p2_403; \
+  __ret_403 = vsetq_lane_u8(vgetq_lane_u8(__s2_403, __p3_403), __s0_403, __p1_403); \
+  __ret_403; \
+})
+#else
+#define vcopyq_laneq_u8(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \
+  uint8x16_t __ret_404; \
+  uint8x16_t __s0_404 = __p0_404; \
+  uint8x16_t __s2_404 = __p2_404; \
+  uint8x16_t __rev0_404;  __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_404;  __rev2_404 = __builtin_shufflevector(__s2_404, __s2_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_404 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_404, __p3_404), __rev0_404, __p1_404); \
+  __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_404; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_u32(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \
+  uint32x4_t __ret_405; \
+  uint32x4_t __s0_405 = __p0_405; \
+  uint32x4_t __s2_405 = __p2_405; \
+  __ret_405 = vsetq_lane_u32(vgetq_lane_u32(__s2_405, __p3_405), __s0_405, __p1_405); \
+  __ret_405; \
+})
+#else
+#define vcopyq_laneq_u32(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \
+  uint32x4_t __ret_406; \
+  uint32x4_t __s0_406 = __p0_406; \
+  uint32x4_t __s2_406 = __p2_406; \
+  uint32x4_t __rev0_406;  __rev0_406 = __builtin_shufflevector(__s0_406, __s0_406, 3, 2, 1, 0); \
+  uint32x4_t __rev2_406;  __rev2_406 = __builtin_shufflevector(__s2_406, __s2_406, 3, 2, 1, 0); \
+  __ret_406 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_406, __p3_406), __rev0_406, __p1_406); \
+  __ret_406 = __builtin_shufflevector(__ret_406, __ret_406, 3, 2, 1, 0); \
+  __ret_406; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_u64(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \
+  uint64x2_t __ret_407; \
+  uint64x2_t __s0_407 = __p0_407; \
+  uint64x2_t __s2_407 = __p2_407; \
+  __ret_407 = vsetq_lane_u64(vgetq_lane_u64(__s2_407, __p3_407), __s0_407, __p1_407); \
+  __ret_407; \
+})
+#else
+#define vcopyq_laneq_u64(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \
+  uint64x2_t __ret_408; \
+  uint64x2_t __s0_408 = __p0_408; \
+  uint64x2_t __s2_408 = __p2_408; \
+  uint64x2_t __rev0_408;  __rev0_408 = __builtin_shufflevector(__s0_408, __s0_408, 1, 0); \
+  uint64x2_t __rev2_408;  __rev2_408 = __builtin_shufflevector(__s2_408, __s2_408, 1, 0); \
+  __ret_408 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_408, __p3_408), __rev0_408, __p1_408); \
+  __ret_408 = __builtin_shufflevector(__ret_408, __ret_408, 1, 0); \
+  __ret_408; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_u16(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \
+  uint16x8_t __ret_409; \
+  uint16x8_t __s0_409 = __p0_409; \
+  uint16x8_t __s2_409 = __p2_409; \
+  __ret_409 = vsetq_lane_u16(vgetq_lane_u16(__s2_409, __p3_409), __s0_409, __p1_409); \
+  __ret_409; \
+})
+#else
+#define vcopyq_laneq_u16(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \
+  uint16x8_t __ret_410; \
+  uint16x8_t __s0_410 = __p0_410; \
+  uint16x8_t __s2_410 = __p2_410; \
+  uint16x8_t __rev0_410;  __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_410;  __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_410 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_410, __p3_410), __rev0_410, __p1_410); \
+  __ret_410 = __builtin_shufflevector(__ret_410, __ret_410, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_410; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_s8(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \
+  int8x16_t __ret_411; \
+  int8x16_t __s0_411 = __p0_411; \
+  int8x16_t __s2_411 = __p2_411; \
+  __ret_411 = vsetq_lane_s8(vgetq_lane_s8(__s2_411, __p3_411), __s0_411, __p1_411); \
+  __ret_411; \
+})
+#else
+#define vcopyq_laneq_s8(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \
+  int8x16_t __ret_412; \
+  int8x16_t __s0_412 = __p0_412; \
+  int8x16_t __s2_412 = __p2_412; \
+  int8x16_t __rev0_412;  __rev0_412 = __builtin_shufflevector(__s0_412, __s0_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_412;  __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_412 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_412, __p3_412), __rev0_412, __p1_412); \
+  __ret_412 = __builtin_shufflevector(__ret_412, __ret_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_412; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_f32(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \
+  float32x4_t __ret_413; \
+  float32x4_t __s0_413 = __p0_413; \
+  float32x4_t __s2_413 = __p2_413; \
+  __ret_413 = vsetq_lane_f32(vgetq_lane_f32(__s2_413, __p3_413), __s0_413, __p1_413); \
+  __ret_413; \
+})
+#else
+#define vcopyq_laneq_f32(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \
+  float32x4_t __ret_414; \
+  float32x4_t __s0_414 = __p0_414; \
+  float32x4_t __s2_414 = __p2_414; \
+  float32x4_t __rev0_414;  __rev0_414 = __builtin_shufflevector(__s0_414, __s0_414, 3, 2, 1, 0); \
+  float32x4_t __rev2_414;  __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 3, 2, 1, 0); \
+  __ret_414 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_414, __p3_414), __rev0_414, __p1_414); \
+  __ret_414 = __builtin_shufflevector(__ret_414, __ret_414, 3, 2, 1, 0); \
+  __ret_414; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_s32(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \
+  int32x4_t __ret_415; \
+  int32x4_t __s0_415 = __p0_415; \
+  int32x4_t __s2_415 = __p2_415; \
+  __ret_415 = vsetq_lane_s32(vgetq_lane_s32(__s2_415, __p3_415), __s0_415, __p1_415); \
+  __ret_415; \
+})
+#else
+#define vcopyq_laneq_s32(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \
+  int32x4_t __ret_416; \
+  int32x4_t __s0_416 = __p0_416; \
+  int32x4_t __s2_416 = __p2_416; \
+  int32x4_t __rev0_416;  __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 3, 2, 1, 0); \
+  int32x4_t __rev2_416;  __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 3, 2, 1, 0); \
+  __ret_416 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_416, __p3_416), __rev0_416, __p1_416); \
+  __ret_416 = __builtin_shufflevector(__ret_416, __ret_416, 3, 2, 1, 0); \
+  __ret_416; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_s64(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \
+  int64x2_t __ret_417; \
+  int64x2_t __s0_417 = __p0_417; \
+  int64x2_t __s2_417 = __p2_417; \
+  __ret_417 = vsetq_lane_s64(vgetq_lane_s64(__s2_417, __p3_417), __s0_417, __p1_417); \
+  __ret_417; \
+})
+#else
+#define vcopyq_laneq_s64(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \
+  int64x2_t __ret_418; \
+  int64x2_t __s0_418 = __p0_418; \
+  int64x2_t __s2_418 = __p2_418; \
+  int64x2_t __rev0_418;  __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 1, 0); \
+  int64x2_t __rev2_418;  __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 1, 0); \
+  __ret_418 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_418, __p3_418), __rev0_418, __p1_418); \
+  __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 1, 0); \
+  __ret_418; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_s16(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \
+  int16x8_t __ret_419; \
+  int16x8_t __s0_419 = __p0_419; \
+  int16x8_t __s2_419 = __p2_419; \
+  __ret_419 = vsetq_lane_s16(vgetq_lane_s16(__s2_419, __p3_419), __s0_419, __p1_419); \
+  __ret_419; \
+})
+#else
+#define vcopyq_laneq_s16(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \
+  int16x8_t __ret_420; \
+  int16x8_t __s0_420 = __p0_420; \
+  int16x8_t __s2_420 = __p2_420; \
+  int16x8_t __rev0_420;  __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_420;  __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_420 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_420, __p3_420), __rev0_420, __p1_420); \
+  __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_420; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_p8(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \
+  poly8x8_t __ret_421; \
+  poly8x8_t __s0_421 = __p0_421; \
+  poly8x16_t __s2_421 = __p2_421; \
+  __ret_421 = vset_lane_p8(vgetq_lane_p8(__s2_421, __p3_421), __s0_421, __p1_421); \
+  __ret_421; \
+})
+#else
+#define vcopy_laneq_p8(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \
+  poly8x8_t __ret_422; \
+  poly8x8_t __s0_422 = __p0_422; \
+  poly8x16_t __s2_422 = __p2_422; \
+  poly8x8_t __rev0_422;  __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __rev2_422;  __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_422 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_422, __p3_422), __rev0_422, __p1_422); \
+  __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_422; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_p16(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \
+  poly16x4_t __ret_423; \
+  poly16x4_t __s0_423 = __p0_423; \
+  poly16x8_t __s2_423 = __p2_423; \
+  __ret_423 = vset_lane_p16(vgetq_lane_p16(__s2_423, __p3_423), __s0_423, __p1_423); \
+  __ret_423; \
+})
+#else
+#define vcopy_laneq_p16(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \
+  poly16x4_t __ret_424; \
+  poly16x4_t __s0_424 = __p0_424; \
+  poly16x8_t __s2_424 = __p2_424; \
+  poly16x4_t __rev0_424;  __rev0_424 = __builtin_shufflevector(__s0_424, __s0_424, 3, 2, 1, 0); \
+  poly16x8_t __rev2_424;  __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_424 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_424, __p3_424), __rev0_424, __p1_424); \
+  __ret_424 = __builtin_shufflevector(__ret_424, __ret_424, 3, 2, 1, 0); \
+  __ret_424; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u8(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \
+  uint8x8_t __ret_425; \
+  uint8x8_t __s0_425 = __p0_425; \
+  uint8x16_t __s2_425 = __p2_425; \
+  __ret_425 = vset_lane_u8(vgetq_lane_u8(__s2_425, __p3_425), __s0_425, __p1_425); \
+  __ret_425; \
+})
+#else
+#define vcopy_laneq_u8(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \
+  uint8x8_t __ret_426; \
+  uint8x8_t __s0_426 = __p0_426; \
+  uint8x16_t __s2_426 = __p2_426; \
+  uint8x8_t __rev0_426;  __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_426;  __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_426 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_426, __p3_426), __rev0_426, __p1_426); \
+  __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_426; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \
+  uint32x2_t __ret_427; \
+  uint32x2_t __s0_427 = __p0_427; \
+  uint32x4_t __s2_427 = __p2_427; \
+  __ret_427 = vset_lane_u32(vgetq_lane_u32(__s2_427, __p3_427), __s0_427, __p1_427); \
+  __ret_427; \
+})
+#else
+#define vcopy_laneq_u32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \
+  uint32x2_t __ret_428; \
+  uint32x2_t __s0_428 = __p0_428; \
+  uint32x4_t __s2_428 = __p2_428; \
+  uint32x2_t __rev0_428;  __rev0_428 = __builtin_shufflevector(__s0_428, __s0_428, 1, 0); \
+  uint32x4_t __rev2_428;  __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 3, 2, 1, 0); \
+  __ret_428 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_428, __p3_428), __rev0_428, __p1_428); \
+  __ret_428 = __builtin_shufflevector(__ret_428, __ret_428, 1, 0); \
+  __ret_428; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u64(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \
+  uint64x1_t __ret_429; \
+  uint64x1_t __s0_429 = __p0_429; \
+  uint64x2_t __s2_429 = __p2_429; \
+  __ret_429 = vset_lane_u64(vgetq_lane_u64(__s2_429, __p3_429), __s0_429, __p1_429); \
+  __ret_429; \
+})
+#else
+#define vcopy_laneq_u64(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \
+  uint64x1_t __ret_430; \
+  uint64x1_t __s0_430 = __p0_430; \
+  uint64x2_t __s2_430 = __p2_430; \
+  uint64x2_t __rev2_430;  __rev2_430 = __builtin_shufflevector(__s2_430, __s2_430, 1, 0); \
+  __ret_430 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_430, __p3_430), __s0_430, __p1_430); \
+  __ret_430; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u16(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \
+  uint16x4_t __ret_431; \
+  uint16x4_t __s0_431 = __p0_431; \
+  uint16x8_t __s2_431 = __p2_431; \
+  __ret_431 = vset_lane_u16(vgetq_lane_u16(__s2_431, __p3_431), __s0_431, __p1_431); \
+  __ret_431; \
+})
+#else
+#define vcopy_laneq_u16(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \
+  uint16x4_t __ret_432; \
+  uint16x4_t __s0_432 = __p0_432; \
+  uint16x8_t __s2_432 = __p2_432; \
+  uint16x4_t __rev0_432;  __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 3, 2, 1, 0); \
+  uint16x8_t __rev2_432;  __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_432 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_432, __p3_432), __rev0_432, __p1_432); \
+  __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 3, 2, 1, 0); \
+  __ret_432; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s8(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \
+  int8x8_t __ret_433; \
+  int8x8_t __s0_433 = __p0_433; \
+  int8x16_t __s2_433 = __p2_433; \
+  __ret_433 = vset_lane_s8(vgetq_lane_s8(__s2_433, __p3_433), __s0_433, __p1_433); \
+  __ret_433; \
+})
+#else
+#define vcopy_laneq_s8(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \
+  int8x8_t __ret_434; \
+  int8x8_t __s0_434 = __p0_434; \
+  int8x16_t __s2_434 = __p2_434; \
+  int8x8_t __rev0_434;  __rev0_434 = __builtin_shufflevector(__s0_434, __s0_434, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_434;  __rev2_434 = __builtin_shufflevector(__s2_434, __s2_434, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_434 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_434, __p3_434), __rev0_434, __p1_434); \
+  __ret_434 = __builtin_shufflevector(__ret_434, __ret_434, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_434; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_f32(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \
+  float32x2_t __ret_435; \
+  float32x2_t __s0_435 = __p0_435; \
+  float32x4_t __s2_435 = __p2_435; \
+  __ret_435 = vset_lane_f32(vgetq_lane_f32(__s2_435, __p3_435), __s0_435, __p1_435); \
+  __ret_435; \
+})
+#else
+#define vcopy_laneq_f32(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \
+  float32x2_t __ret_436; \
+  float32x2_t __s0_436 = __p0_436; \
+  float32x4_t __s2_436 = __p2_436; \
+  float32x2_t __rev0_436;  __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 1, 0); \
+  float32x4_t __rev2_436;  __rev2_436 = __builtin_shufflevector(__s2_436, __s2_436, 3, 2, 1, 0); \
+  __ret_436 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_436, __p3_436), __rev0_436, __p1_436); \
+  __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 1, 0); \
+  __ret_436; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s32(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \
+  int32x2_t __ret_437; \
+  int32x2_t __s0_437 = __p0_437; \
+  int32x4_t __s2_437 = __p2_437; \
+  __ret_437 = vset_lane_s32(vgetq_lane_s32(__s2_437, __p3_437), __s0_437, __p1_437); \
+  __ret_437; \
+})
+#else
+#define vcopy_laneq_s32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \
+  int32x2_t __ret_438; \
+  int32x2_t __s0_438 = __p0_438; \
+  int32x4_t __s2_438 = __p2_438; \
+  int32x2_t __rev0_438;  __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 1, 0); \
+  int32x4_t __rev2_438;  __rev2_438 = __builtin_shufflevector(__s2_438, __s2_438, 3, 2, 1, 0); \
+  __ret_438 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_438, __p3_438), __rev0_438, __p1_438); \
+  __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 1, 0); \
+  __ret_438; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s64(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \
+  int64x1_t __ret_439; \
+  int64x1_t __s0_439 = __p0_439; \
+  int64x2_t __s2_439 = __p2_439; \
+  __ret_439 = vset_lane_s64(vgetq_lane_s64(__s2_439, __p3_439), __s0_439, __p1_439); \
+  __ret_439; \
+})
+#else
+#define vcopy_laneq_s64(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \
+  int64x1_t __ret_440; \
+  int64x1_t __s0_440 = __p0_440; \
+  int64x2_t __s2_440 = __p2_440; \
+  int64x2_t __rev2_440;  __rev2_440 = __builtin_shufflevector(__s2_440, __s2_440, 1, 0); \
+  __ret_440 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_440, __p3_440), __s0_440, __p1_440); \
+  __ret_440; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s16(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \
+  int16x4_t __ret_441; \
+  int16x4_t __s0_441 = __p0_441; \
+  int16x8_t __s2_441 = __p2_441; \
+  __ret_441 = vset_lane_s16(vgetq_lane_s16(__s2_441, __p3_441), __s0_441, __p1_441); \
+  __ret_441; \
+})
+#else
+#define vcopy_laneq_s16(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \
+  int16x4_t __ret_442; \
+  int16x4_t __s0_442 = __p0_442; \
+  int16x8_t __s2_442 = __p2_442; \
+  int16x4_t __rev0_442;  __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 3, 2, 1, 0); \
+  int16x8_t __rev2_442;  __rev2_442 = __builtin_shufflevector(__s2_442, __s2_442, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_442 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_442, __p3_442), __rev0_442, __p1_442); \
+  __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \
+  __ret_442; \
+})
+#endif
+
+#define vcreate_p64(__p0) __extension__ ({ \
+  poly64x1_t __ret; \
+  uint64_t __promote = __p0; \
+  __ret = (poly64x1_t)(__promote); \
+  __ret; \
+})
+#define vcreate_f64(__p0) __extension__ ({ \
+  float64x1_t __ret; \
+  uint64_t __promote = __p0; \
+  __ret = (float64x1_t)(__promote); \
+  __ret; \
+})
+__ai float32_t vcvts_f32_s32(int32_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
+  return __ret;
+}
+__ai float32_t vcvts_f32_u32(uint32_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
+  float32x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
+  return __ret;
+}
+#endif
+
+__ai float64_t vcvtd_f64_s64(int64_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
+  return __ret;
+}
+__ai float64_t vcvtd_f64_u64(uint64_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
+  float64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35);
+  return __ret;
+}
+#else
+__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
+  float64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
+  float64x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
+  float16x8_t __ret;
+  __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1));
+  return __ret;
+}
+#else
+__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
+  float16x8_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
+  float32x4_t __ret;
+  __ret = vcvt_f32_f16(vget_high_f16(__p0));
+  return __ret;
+}
+#else
+__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
+  float32x4_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
+  float32x4_t __ret;
+  __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1));
+  return __ret;
+}
+#else
+__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
+  float32x4_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
+  float64x2_t __ret;
+  __ret = vcvt_f64_f32(vget_high_f32(__p0));
+  return __ret;
+}
+#else
+__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
+  float64x2_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
+  uint32_t __s0 = __p0; \
+  __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
+  __ret; \
+})
+#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
+  int32_t __s0 = __p0; \
+  __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
+  float64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
+  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \
+  __ret; \
+})
+#else
+#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
+  float64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
+  float64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
+  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \
+  __ret; \
+})
+#else
+#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
+  float64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
+  float64x1_t __ret; \
+  uint64x1_t __s0 = __p0; \
+  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
+  __ret; \
+})
+#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
+  float64x1_t __ret; \
+  int64x1_t __s0 = __p0; \
+  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
+  __ret; \
+})
+#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  uint64_t __s0 = __p0; \
+  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
+  __ret; \
+})
+#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
+  __ret; \
+})
+#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  float32_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \
+  __ret; \
+})
+#else
+#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
+  int64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
+  __ret; \
+})
+#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
+  float64_t __s0 = __p0; \
+  __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
+  __ret; \
+})
+#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  float32_t __s0 = __p0; \
+  __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \
+  __ret; \
+})
+#else
+#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
+  uint64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
+  __ret; \
+})
+#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
+  uint64_t __ret; \
+  float64_t __s0 = __p0; \
+  __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
+  __ret; \
+})
+__ai int32_t vcvts_s32_f32(float32_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
+  return __ret;
+}
+__ai int64_t vcvtd_s64_f64(float64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35);
+  return __ret;
+}
+#else
+__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
+  int64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
+  return __ret;
+}
+__ai uint32_t vcvts_u32_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
+  return __ret;
+}
+__ai uint64_t vcvtd_u64_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+__ai int32_t vcvtas_s32_f32(float32_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
   int64x2_t __ret;
@@ -36470,8 +48953,8 @@
 }
 #else
 __ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36483,6 +48966,16 @@
   __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
   return __ret;
 }
+__ai int64_t vcvtad_s64_f64(float64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vcvtas_u32_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
   uint64x2_t __ret;
@@ -36491,8 +48984,8 @@
 }
 #else
 __ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36504,6 +48997,16 @@
   __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
   return __ret;
 }
+__ai uint64_t vcvtad_u64_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
+  return __ret;
+}
+__ai int32_t vcvtms_s32_f32(float32_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
   int64x2_t __ret;
@@ -36512,8 +49015,8 @@
 }
 #else
 __ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36525,6 +49028,16 @@
   __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
   return __ret;
 }
+__ai int64_t vcvtmd_s64_f64(float64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vcvtms_u32_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
   uint64x2_t __ret;
@@ -36533,8 +49046,8 @@
 }
 #else
 __ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36546,6 +49059,16 @@
   __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
   return __ret;
 }
+__ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
+  return __ret;
+}
+__ai int32_t vcvtns_s32_f32(float32_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
   int64x2_t __ret;
@@ -36554,8 +49077,8 @@
 }
 #else
 __ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36567,6 +49090,16 @@
   __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
   return __ret;
 }
+__ai int64_t vcvtnd_s64_f64(float64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vcvtns_u32_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
   uint64x2_t __ret;
@@ -36575,8 +49108,8 @@
 }
 #else
 __ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36588,6 +49121,16 @@
   __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
   return __ret;
 }
+__ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
+  return __ret;
+}
+__ai int32_t vcvtps_s32_f32(float32_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
   int64x2_t __ret;
@@ -36596,8 +49139,8 @@
 }
 #else
 __ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36609,6 +49152,16 @@
   __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
   return __ret;
 }
+__ai int64_t vcvtpd_s64_f64(float64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vcvtps_u32_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
   uint64x2_t __ret;
@@ -36617,8 +49170,8 @@
 }
 #else
 __ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36630,6 +49183,10654 @@
   __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
   return __ret;
 }
+__ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
+  return __ret;
+}
+__ai float32_t vcvtxd_f32_f64(float64_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
+  float32x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
+  float32x4_t __ret;
+  __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1));
+  return __ret;
+}
+#else
+__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
+  float32x4_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = __p0 / __p1;
+  return __ret;
+}
+#else
+__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __rev0 / __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = __p0 / __p1;
+  return __ret;
+}
+#else
+__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __rev0 / __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = __p0 / __p1;
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  __ret = __p0 / __p1;
+  return __ret;
+}
+#else
+__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __rev0 / __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8_t __ret; \
+  poly8x8_t __s0 = __p0; \
+  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8_t __ret; \
+  poly8x8_t __s0 = __p0; \
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16_t __ret; \
+  poly16x4_t __s0 = __p0; \
+  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16_t __ret; \
+  poly16x4_t __s0 = __p0; \
+  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
+  uint8x8_t __s0 = __p0; \
+  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
+  uint8x8_t __s0 = __p0; \
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  uint32x2_t __s0 = __p0; \
+  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  uint32x2_t __s0 = __p0; \
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
+  uint64_t __ret; \
+  uint64x1_t __s0 = __p0; \
+  __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
+  uint16x4_t __s0 = __p0; \
+  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
+  uint16x4_t __s0 = __p0; \
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int8x8_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int8x8_t __s0 = __p0; \
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  float64x1_t __s0 = __p0; \
+  __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
+  float32x2_t __s0 = __p0; \
+  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int32x2_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
+  int64x1_t __s0 = __p0; \
+  __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int16x4_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#define vdup_lane_p64(__p0_443, __p1_443) __extension__ ({ \
+  poly64x1_t __ret_443; \
+  poly64x1_t __s0_443 = __p0_443; \
+  __ret_443 = splat_lane_p64(__s0_443, __p1_443); \
+  __ret_443; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_p64(__p0_444, __p1_444) __extension__ ({ \
+  poly64x2_t __ret_444; \
+  poly64x1_t __s0_444 = __p0_444; \
+  __ret_444 = splatq_lane_p64(__s0_444, __p1_444); \
+  __ret_444; \
+})
+#else
+#define vdupq_lane_p64(__p0_445, __p1_445) __extension__ ({ \
+  poly64x2_t __ret_445; \
+  poly64x1_t __s0_445 = __p0_445; \
+  __ret_445 = __noswap_splatq_lane_p64(__s0_445, __p1_445); \
+  __ret_445 = __builtin_shufflevector(__ret_445, __ret_445, 1, 0); \
+  __ret_445; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_f64(__p0_446, __p1_446) __extension__ ({ \
+  float64x2_t __ret_446; \
+  float64x1_t __s0_446 = __p0_446; \
+  __ret_446 = splatq_lane_f64(__s0_446, __p1_446); \
+  __ret_446; \
+})
+#else
+#define vdupq_lane_f64(__p0_447, __p1_447) __extension__ ({ \
+  float64x2_t __ret_447; \
+  float64x1_t __s0_447 = __p0_447; \
+  __ret_447 = __noswap_splatq_lane_f64(__s0_447, __p1_447); \
+  __ret_447 = __builtin_shufflevector(__ret_447, __ret_447, 1, 0); \
+  __ret_447; \
+})
+#endif
+
+#define vdup_lane_f64(__p0_448, __p1_448) __extension__ ({ \
+  float64x1_t __ret_448; \
+  float64x1_t __s0_448 = __p0_448; \
+  __ret_448 = splat_lane_f64(__s0_448, __p1_448); \
+  __ret_448; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8_t __ret; \
+  poly8x16_t __s0 = __p0; \
+  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8_t __ret; \
+  poly8x16_t __s0 = __p0; \
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16_t __ret; \
+  poly16x8_t __s0 = __p0; \
+  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16_t __ret; \
+  poly16x8_t __s0 = __p0; \
+  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
+  uint8x16_t __s0 = __p0; \
+  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
+  uint8x16_t __s0 = __p0; \
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  uint32x4_t __s0 = __p0; \
+  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  uint32x4_t __s0 = __p0; \
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64_t __ret; \
+  uint64x2_t __s0 = __p0; \
+  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64_t __ret; \
+  uint64x2_t __s0 = __p0; \
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
+  uint16x8_t __s0 = __p0; \
+  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
+  uint16x8_t __s0 = __p0; \
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int8x16_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int8x16_t __s0 = __p0; \
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  float64x2_t __s0 = __p0; \
+  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
+  float32x4_t __s0 = __p0; \
+  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int32x4_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
+  int64x2_t __s0 = __p0; \
+  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
+  int64x2_t __s0 = __p0; \
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int16x8_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_p8(__p0_449, __p1_449) __extension__ ({ \
+  poly8x8_t __ret_449; \
+  poly8x16_t __s0_449 = __p0_449; \
+  __ret_449 = splat_laneq_p8(__s0_449, __p1_449); \
+  __ret_449; \
+})
+#else
+#define vdup_laneq_p8(__p0_450, __p1_450) __extension__ ({ \
+  poly8x8_t __ret_450; \
+  poly8x16_t __s0_450 = __p0_450; \
+  poly8x16_t __rev0_450;  __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_450 = __noswap_splat_laneq_p8(__rev0_450, __p1_450); \
+  __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_450; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_p64(__p0_451, __p1_451) __extension__ ({ \
+  poly64x1_t __ret_451; \
+  poly64x2_t __s0_451 = __p0_451; \
+  __ret_451 = splat_laneq_p64(__s0_451, __p1_451); \
+  __ret_451; \
+})
+#else
+#define vdup_laneq_p64(__p0_452, __p1_452) __extension__ ({ \
+  poly64x1_t __ret_452; \
+  poly64x2_t __s0_452 = __p0_452; \
+  poly64x2_t __rev0_452;  __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 1, 0); \
+  __ret_452 = __noswap_splat_laneq_p64(__rev0_452, __p1_452); \
+  __ret_452; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_p16(__p0_453, __p1_453) __extension__ ({ \
+  poly16x4_t __ret_453; \
+  poly16x8_t __s0_453 = __p0_453; \
+  __ret_453 = splat_laneq_p16(__s0_453, __p1_453); \
+  __ret_453; \
+})
+#else
+#define vdup_laneq_p16(__p0_454, __p1_454) __extension__ ({ \
+  poly16x4_t __ret_454; \
+  poly16x8_t __s0_454 = __p0_454; \
+  poly16x8_t __rev0_454;  __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_454 = __noswap_splat_laneq_p16(__rev0_454, __p1_454); \
+  __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 3, 2, 1, 0); \
+  __ret_454; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_p8(__p0_455, __p1_455) __extension__ ({ \
+  poly8x16_t __ret_455; \
+  poly8x16_t __s0_455 = __p0_455; \
+  __ret_455 = splatq_laneq_p8(__s0_455, __p1_455); \
+  __ret_455; \
+})
+#else
+#define vdupq_laneq_p8(__p0_456, __p1_456) __extension__ ({ \
+  poly8x16_t __ret_456; \
+  poly8x16_t __s0_456 = __p0_456; \
+  poly8x16_t __rev0_456;  __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_456 = __noswap_splatq_laneq_p8(__rev0_456, __p1_456); \
+  __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_456; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_p64(__p0_457, __p1_457) __extension__ ({ \
+  poly64x2_t __ret_457; \
+  poly64x2_t __s0_457 = __p0_457; \
+  __ret_457 = splatq_laneq_p64(__s0_457, __p1_457); \
+  __ret_457; \
+})
+#else
+#define vdupq_laneq_p64(__p0_458, __p1_458) __extension__ ({ \
+  poly64x2_t __ret_458; \
+  poly64x2_t __s0_458 = __p0_458; \
+  poly64x2_t __rev0_458;  __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 1, 0); \
+  __ret_458 = __noswap_splatq_laneq_p64(__rev0_458, __p1_458); \
+  __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 1, 0); \
+  __ret_458; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_p16(__p0_459, __p1_459) __extension__ ({ \
+  poly16x8_t __ret_459; \
+  poly16x8_t __s0_459 = __p0_459; \
+  __ret_459 = splatq_laneq_p16(__s0_459, __p1_459); \
+  __ret_459; \
+})
+#else
+#define vdupq_laneq_p16(__p0_460, __p1_460) __extension__ ({ \
+  poly16x8_t __ret_460; \
+  poly16x8_t __s0_460 = __p0_460; \
+  poly16x8_t __rev0_460;  __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_460 = __noswap_splatq_laneq_p16(__rev0_460, __p1_460); \
+  __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_460; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_u8(__p0_461, __p1_461) __extension__ ({ \
+  uint8x16_t __ret_461; \
+  uint8x16_t __s0_461 = __p0_461; \
+  __ret_461 = splatq_laneq_u8(__s0_461, __p1_461); \
+  __ret_461; \
+})
+#else
+#define vdupq_laneq_u8(__p0_462, __p1_462) __extension__ ({ \
+  uint8x16_t __ret_462; \
+  uint8x16_t __s0_462 = __p0_462; \
+  uint8x16_t __rev0_462;  __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_462 = __noswap_splatq_laneq_u8(__rev0_462, __p1_462); \
+  __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_462; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_u32(__p0_463, __p1_463) __extension__ ({ \
+  uint32x4_t __ret_463; \
+  uint32x4_t __s0_463 = __p0_463; \
+  __ret_463 = splatq_laneq_u32(__s0_463, __p1_463); \
+  __ret_463; \
+})
+#else
+#define vdupq_laneq_u32(__p0_464, __p1_464) __extension__ ({ \
+  uint32x4_t __ret_464; \
+  uint32x4_t __s0_464 = __p0_464; \
+  uint32x4_t __rev0_464;  __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 3, 2, 1, 0); \
+  __ret_464 = __noswap_splatq_laneq_u32(__rev0_464, __p1_464); \
+  __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 3, 2, 1, 0); \
+  __ret_464; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_u64(__p0_465, __p1_465) __extension__ ({ \
+  uint64x2_t __ret_465; \
+  uint64x2_t __s0_465 = __p0_465; \
+  __ret_465 = splatq_laneq_u64(__s0_465, __p1_465); \
+  __ret_465; \
+})
+#else
+#define vdupq_laneq_u64(__p0_466, __p1_466) __extension__ ({ \
+  uint64x2_t __ret_466; \
+  uint64x2_t __s0_466 = __p0_466; \
+  uint64x2_t __rev0_466;  __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 1, 0); \
+  __ret_466 = __noswap_splatq_laneq_u64(__rev0_466, __p1_466); \
+  __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 1, 0); \
+  __ret_466; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_u16(__p0_467, __p1_467) __extension__ ({ \
+  uint16x8_t __ret_467; \
+  uint16x8_t __s0_467 = __p0_467; \
+  __ret_467 = splatq_laneq_u16(__s0_467, __p1_467); \
+  __ret_467; \
+})
+#else
+#define vdupq_laneq_u16(__p0_468, __p1_468) __extension__ ({ \
+  uint16x8_t __ret_468; \
+  uint16x8_t __s0_468 = __p0_468; \
+  uint16x8_t __rev0_468;  __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_468 = __noswap_splatq_laneq_u16(__rev0_468, __p1_468); \
+  __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_468; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_s8(__p0_469, __p1_469) __extension__ ({ \
+  int8x16_t __ret_469; \
+  int8x16_t __s0_469 = __p0_469; \
+  __ret_469 = splatq_laneq_s8(__s0_469, __p1_469); \
+  __ret_469; \
+})
+#else
+#define vdupq_laneq_s8(__p0_470, __p1_470) __extension__ ({ \
+  int8x16_t __ret_470; \
+  int8x16_t __s0_470 = __p0_470; \
+  int8x16_t __rev0_470;  __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_470 = __noswap_splatq_laneq_s8(__rev0_470, __p1_470); \
+  __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_470; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_f64(__p0_471, __p1_471) __extension__ ({ \
+  float64x2_t __ret_471; \
+  float64x2_t __s0_471 = __p0_471; \
+  __ret_471 = splatq_laneq_f64(__s0_471, __p1_471); \
+  __ret_471; \
+})
+#else
+#define vdupq_laneq_f64(__p0_472, __p1_472) __extension__ ({ \
+  float64x2_t __ret_472; \
+  float64x2_t __s0_472 = __p0_472; \
+  float64x2_t __rev0_472;  __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 1, 0); \
+  __ret_472 = __noswap_splatq_laneq_f64(__rev0_472, __p1_472); \
+  __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 1, 0); \
+  __ret_472; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_f32(__p0_473, __p1_473) __extension__ ({ \
+  float32x4_t __ret_473; \
+  float32x4_t __s0_473 = __p0_473; \
+  __ret_473 = splatq_laneq_f32(__s0_473, __p1_473); \
+  __ret_473; \
+})
+#else
+#define vdupq_laneq_f32(__p0_474, __p1_474) __extension__ ({ \
+  float32x4_t __ret_474; \
+  float32x4_t __s0_474 = __p0_474; \
+  float32x4_t __rev0_474;  __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 3, 2, 1, 0); \
+  __ret_474 = __noswap_splatq_laneq_f32(__rev0_474, __p1_474); \
+  __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 3, 2, 1, 0); \
+  __ret_474; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_f16(__p0_475, __p1_475) __extension__ ({ \
+  float16x8_t __ret_475; \
+  float16x8_t __s0_475 = __p0_475; \
+  __ret_475 = splatq_laneq_f16(__s0_475, __p1_475); \
+  __ret_475; \
+})
+#else
+#define vdupq_laneq_f16(__p0_476, __p1_476) __extension__ ({ \
+  float16x8_t __ret_476; \
+  float16x8_t __s0_476 = __p0_476; \
+  float16x8_t __rev0_476;  __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_476 = __noswap_splatq_laneq_f16(__rev0_476, __p1_476); \
+  __ret_476 = __builtin_shufflevector(__ret_476, __ret_476, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_476; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_s32(__p0_477, __p1_477) __extension__ ({ \
+  int32x4_t __ret_477; \
+  int32x4_t __s0_477 = __p0_477; \
+  __ret_477 = splatq_laneq_s32(__s0_477, __p1_477); \
+  __ret_477; \
+})
+#else
+#define vdupq_laneq_s32(__p0_478, __p1_478) __extension__ ({ \
+  int32x4_t __ret_478; \
+  int32x4_t __s0_478 = __p0_478; \
+  int32x4_t __rev0_478;  __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 3, 2, 1, 0); \
+  __ret_478 = __noswap_splatq_laneq_s32(__rev0_478, __p1_478); \
+  __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 3, 2, 1, 0); \
+  __ret_478; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_s64(__p0_479, __p1_479) __extension__ ({ \
+  int64x2_t __ret_479; \
+  int64x2_t __s0_479 = __p0_479; \
+  __ret_479 = splatq_laneq_s64(__s0_479, __p1_479); \
+  __ret_479; \
+})
+#else
+#define vdupq_laneq_s64(__p0_480, __p1_480) __extension__ ({ \
+  int64x2_t __ret_480; \
+  int64x2_t __s0_480 = __p0_480; \
+  int64x2_t __rev0_480;  __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 1, 0); \
+  __ret_480 = __noswap_splatq_laneq_s64(__rev0_480, __p1_480); \
+  __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 1, 0); \
+  __ret_480; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_s16(__p0_481, __p1_481) __extension__ ({ \
+  int16x8_t __ret_481; \
+  int16x8_t __s0_481 = __p0_481; \
+  __ret_481 = splatq_laneq_s16(__s0_481, __p1_481); \
+  __ret_481; \
+})
+#else
+#define vdupq_laneq_s16(__p0_482, __p1_482) __extension__ ({ \
+  int16x8_t __ret_482; \
+  int16x8_t __s0_482 = __p0_482; \
+  int16x8_t __rev0_482;  __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_482 = __noswap_splatq_laneq_s16(__rev0_482, __p1_482); \
+  __ret_482 = __builtin_shufflevector(__ret_482, __ret_482, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_482; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_u8(__p0_483, __p1_483) __extension__ ({ \
+  uint8x8_t __ret_483; \
+  uint8x16_t __s0_483 = __p0_483; \
+  __ret_483 = splat_laneq_u8(__s0_483, __p1_483); \
+  __ret_483; \
+})
+#else
+#define vdup_laneq_u8(__p0_484, __p1_484) __extension__ ({ \
+  uint8x8_t __ret_484; \
+  uint8x16_t __s0_484 = __p0_484; \
+  uint8x16_t __rev0_484;  __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_484 = __noswap_splat_laneq_u8(__rev0_484, __p1_484); \
+  __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_484; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_u32(__p0_485, __p1_485) __extension__ ({ \
+  uint32x2_t __ret_485; \
+  uint32x4_t __s0_485 = __p0_485; \
+  __ret_485 = splat_laneq_u32(__s0_485, __p1_485); \
+  __ret_485; \
+})
+#else
+#define vdup_laneq_u32(__p0_486, __p1_486) __extension__ ({ \
+  uint32x2_t __ret_486; \
+  uint32x4_t __s0_486 = __p0_486; \
+  uint32x4_t __rev0_486;  __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 3, 2, 1, 0); \
+  __ret_486 = __noswap_splat_laneq_u32(__rev0_486, __p1_486); \
+  __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 1, 0); \
+  __ret_486; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_u64(__p0_487, __p1_487) __extension__ ({ \
+  uint64x1_t __ret_487; \
+  uint64x2_t __s0_487 = __p0_487; \
+  __ret_487 = splat_laneq_u64(__s0_487, __p1_487); \
+  __ret_487; \
+})
+#else
+#define vdup_laneq_u64(__p0_488, __p1_488) __extension__ ({ \
+  uint64x1_t __ret_488; \
+  uint64x2_t __s0_488 = __p0_488; \
+  uint64x2_t __rev0_488;  __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 1, 0); \
+  __ret_488 = __noswap_splat_laneq_u64(__rev0_488, __p1_488); \
+  __ret_488; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_u16(__p0_489, __p1_489) __extension__ ({ \
+  uint16x4_t __ret_489; \
+  uint16x8_t __s0_489 = __p0_489; \
+  __ret_489 = splat_laneq_u16(__s0_489, __p1_489); \
+  __ret_489; \
+})
+#else
+#define vdup_laneq_u16(__p0_490, __p1_490) __extension__ ({ \
+  uint16x4_t __ret_490; \
+  uint16x8_t __s0_490 = __p0_490; \
+  uint16x8_t __rev0_490;  __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_490 = __noswap_splat_laneq_u16(__rev0_490, __p1_490); \
+  __ret_490 = __builtin_shufflevector(__ret_490, __ret_490, 3, 2, 1, 0); \
+  __ret_490; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s8(__p0_491, __p1_491) __extension__ ({ \
+  int8x8_t __ret_491; \
+  int8x16_t __s0_491 = __p0_491; \
+  __ret_491 = splat_laneq_s8(__s0_491, __p1_491); \
+  __ret_491; \
+})
+#else
+#define vdup_laneq_s8(__p0_492, __p1_492) __extension__ ({ \
+  int8x8_t __ret_492; \
+  int8x16_t __s0_492 = __p0_492; \
+  int8x16_t __rev0_492;  __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_492 = __noswap_splat_laneq_s8(__rev0_492, __p1_492); \
+  __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_492; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_f64(__p0_493, __p1_493) __extension__ ({ \
+  float64x1_t __ret_493; \
+  float64x2_t __s0_493 = __p0_493; \
+  __ret_493 = splat_laneq_f64(__s0_493, __p1_493); \
+  __ret_493; \
+})
+#else
+#define vdup_laneq_f64(__p0_494, __p1_494) __extension__ ({ \
+  float64x1_t __ret_494; \
+  float64x2_t __s0_494 = __p0_494; \
+  float64x2_t __rev0_494;  __rev0_494 = __builtin_shufflevector(__s0_494, __s0_494, 1, 0); \
+  __ret_494 = __noswap_splat_laneq_f64(__rev0_494, __p1_494); \
+  __ret_494; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_f32(__p0_495, __p1_495) __extension__ ({ \
+  float32x2_t __ret_495; \
+  float32x4_t __s0_495 = __p0_495; \
+  __ret_495 = splat_laneq_f32(__s0_495, __p1_495); \
+  __ret_495; \
+})
+#else
+#define vdup_laneq_f32(__p0_496, __p1_496) __extension__ ({ \
+  float32x2_t __ret_496; \
+  float32x4_t __s0_496 = __p0_496; \
+  float32x4_t __rev0_496;  __rev0_496 = __builtin_shufflevector(__s0_496, __s0_496, 3, 2, 1, 0); \
+  __ret_496 = __noswap_splat_laneq_f32(__rev0_496, __p1_496); \
+  __ret_496 = __builtin_shufflevector(__ret_496, __ret_496, 1, 0); \
+  __ret_496; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_f16(__p0_497, __p1_497) __extension__ ({ \
+  float16x4_t __ret_497; \
+  float16x8_t __s0_497 = __p0_497; \
+  __ret_497 = splat_laneq_f16(__s0_497, __p1_497); \
+  __ret_497; \
+})
+#else
+#define vdup_laneq_f16(__p0_498, __p1_498) __extension__ ({ \
+  float16x4_t __ret_498; \
+  float16x8_t __s0_498 = __p0_498; \
+  float16x8_t __rev0_498;  __rev0_498 = __builtin_shufflevector(__s0_498, __s0_498, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_498 = __noswap_splat_laneq_f16(__rev0_498, __p1_498); \
+  __ret_498 = __builtin_shufflevector(__ret_498, __ret_498, 3, 2, 1, 0); \
+  __ret_498; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s32(__p0_499, __p1_499) __extension__ ({ \
+  int32x2_t __ret_499; \
+  int32x4_t __s0_499 = __p0_499; \
+  __ret_499 = splat_laneq_s32(__s0_499, __p1_499); \
+  __ret_499; \
+})
+#else
+#define vdup_laneq_s32(__p0_500, __p1_500) __extension__ ({ \
+  int32x2_t __ret_500; \
+  int32x4_t __s0_500 = __p0_500; \
+  int32x4_t __rev0_500;  __rev0_500 = __builtin_shufflevector(__s0_500, __s0_500, 3, 2, 1, 0); \
+  __ret_500 = __noswap_splat_laneq_s32(__rev0_500, __p1_500); \
+  __ret_500 = __builtin_shufflevector(__ret_500, __ret_500, 1, 0); \
+  __ret_500; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s64(__p0_501, __p1_501) __extension__ ({ \
+  int64x1_t __ret_501; \
+  int64x2_t __s0_501 = __p0_501; \
+  __ret_501 = splat_laneq_s64(__s0_501, __p1_501); \
+  __ret_501; \
+})
+#else
+#define vdup_laneq_s64(__p0_502, __p1_502) __extension__ ({ \
+  int64x1_t __ret_502; \
+  int64x2_t __s0_502 = __p0_502; \
+  int64x2_t __rev0_502;  __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 1, 0); \
+  __ret_502 = __noswap_splat_laneq_s64(__rev0_502, __p1_502); \
+  __ret_502; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s16(__p0_503, __p1_503) __extension__ ({ \
+  int16x4_t __ret_503; \
+  int16x8_t __s0_503 = __p0_503; \
+  __ret_503 = splat_laneq_s16(__s0_503, __p1_503); \
+  __ret_503; \
+})
+#else
+#define vdup_laneq_s16(__p0_504, __p1_504) __extension__ ({ \
+  int16x4_t __ret_504; \
+  int16x8_t __s0_504 = __p0_504; \
+  int16x8_t __rev0_504;  __rev0_504 = __builtin_shufflevector(__s0_504, __s0_504, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_504 = __noswap_splat_laneq_s16(__rev0_504, __p1_504); \
+  __ret_504 = __builtin_shufflevector(__ret_504, __ret_504, 3, 2, 1, 0); \
+  __ret_504; \
+})
+#endif
+
+__ai poly64x1_t vdup_n_p64(poly64_t __p0) {
+  poly64x1_t __ret;
+  __ret = (poly64x1_t) {__p0};
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t) {__p0, __p0};
+  return __ret;
+}
+#else
+__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t) {__p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vdupq_n_f64(float64_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) {__p0, __p0};
+  return __ret;
+}
+#else
+__ai float64x2_t vdupq_n_f64(float64_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) {__p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vdup_n_f64(float64_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) {__p0};
+  return __ret;
+}
+#define vext_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1_t __ret; \
+  poly64x1_t __s0 = __p0; \
+  poly64x1_t __s1 = __p1; \
+  __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
+  poly64x2_t __s0 = __p0; \
+  poly64x2_t __s1 = __p1; \
+  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
+  __ret; \
+})
+#else
+#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
+  poly64x2_t __s0 = __p0; \
+  poly64x2_t __s1 = __p1; \
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \
+  __ret; \
+})
+#else
+#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vext_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x1_t __s1 = __p1; \
+  __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+  return __ret;
+}
+#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64_t __ret; \
+  float64_t __s0 = __p0; \
+  float64_t __s1 = __p1; \
+  float64x1_t __s2 = __p2; \
+  __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32_t __ret; \
+  float32_t __s0 = __p0; \
+  float32_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
+  __ret; \
+})
+#else
+#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32_t __ret; \
+  float32_t __s0 = __p0; \
+  float32_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \
+  __ret; \
+})
+#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32_t __ret; \
+  float32_t __s0 = __p0; \
+  float32_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x1_t __s2 = __p2; \
+  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
+  __ret; \
+})
+#else
+#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x1_t __s2 = __p2; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x1_t __s2 = __p2; \
+  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
+  __ret; \
+})
+#else
+#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
+  __ret; \
+})
+#endif
+
+#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x1_t __s1 = __p1; \
+  float64x1_t __s2 = __p2; \
+  __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
+  __ret; \
+})
+#else
+#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64_t __ret; \
+  float64_t __s0 = __p0; \
+  float64_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
+  __ret; \
+})
+#else
+#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64_t __ret; \
+  float64_t __s0 = __p0; \
+  float64_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \
+  __ret; \
+})
+#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64_t __ret; \
+  float64_t __s0 = __p0; \
+  float64_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32_t __ret; \
+  float32_t __s0 = __p0; \
+  float32_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
+  __ret; \
+})
+#else
+#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32_t __ret; \
+  float32_t __s0 = __p0; \
+  float32_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \
+  __ret; \
+})
+#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32_t __ret; \
+  float32_t __s0 = __p0; \
+  float32_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
+  __ret; \
+})
+#else
+#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
+  __ret; \
+})
+#else
+#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x1_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
+  __ret; \
+})
+#else
+#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x1_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \
+  __ret; \
+})
+#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x1_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
+  __ret; \
+})
+#else
+#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+  float64x2_t __ret;
+  __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
+  return __ret;
+}
+#else
+__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
+  float64x1_t __ret;
+  __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2});
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = vfmaq_f64(__p0, -__p1, __p2);
+  return __ret;
+}
+#else
+__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = vfma_f64(__p0, -__p1, __p2);
+  return __ret;
+}
+#define vfmsd_lane_f64(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \
+  float64_t __ret_505; \
+  float64_t __s0_505 = __p0_505; \
+  float64_t __s1_505 = __p1_505; \
+  float64x1_t __s2_505 = __p2_505; \
+  __ret_505 = vfmad_lane_f64(__s0_505, -__s1_505, __s2_505, __p3_505); \
+  __ret_505; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vfmss_lane_f32(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \
+  float32_t __ret_506; \
+  float32_t __s0_506 = __p0_506; \
+  float32_t __s1_506 = __p1_506; \
+  float32x2_t __s2_506 = __p2_506; \
+  __ret_506 = vfmas_lane_f32(__s0_506, -__s1_506, __s2_506, __p3_506); \
+  __ret_506; \
+})
+#else
+#define vfmss_lane_f32(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \
+  float32_t __ret_507; \
+  float32_t __s0_507 = __p0_507; \
+  float32_t __s1_507 = __p1_507; \
+  float32x2_t __s2_507 = __p2_507; \
+  float32x2_t __rev2_507;  __rev2_507 = __builtin_shufflevector(__s2_507, __s2_507, 1, 0); \
+  __ret_507 = __noswap_vfmas_lane_f32(__s0_507, -__s1_507, __rev2_507, __p3_507); \
+  __ret_507; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_lane_f64(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \
+  float64x2_t __ret_508; \
+  float64x2_t __s0_508 = __p0_508; \
+  float64x2_t __s1_508 = __p1_508; \
+  float64x1_t __s2_508 = __p2_508; \
+  __ret_508 = vfmaq_lane_f64(__s0_508, -__s1_508, __s2_508, __p3_508); \
+  __ret_508; \
+})
+#else
+#define vfmsq_lane_f64(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \
+  float64x2_t __ret_509; \
+  float64x2_t __s0_509 = __p0_509; \
+  float64x2_t __s1_509 = __p1_509; \
+  float64x1_t __s2_509 = __p2_509; \
+  float64x2_t __rev0_509;  __rev0_509 = __builtin_shufflevector(__s0_509, __s0_509, 1, 0); \
+  float64x2_t __rev1_509;  __rev1_509 = __builtin_shufflevector(__s1_509, __s1_509, 1, 0); \
+  __ret_509 = __noswap_vfmaq_lane_f64(__rev0_509, -__rev1_509, __s2_509, __p3_509); \
+  __ret_509 = __builtin_shufflevector(__ret_509, __ret_509, 1, 0); \
+  __ret_509; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_lane_f32(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \
+  float32x4_t __ret_510; \
+  float32x4_t __s0_510 = __p0_510; \
+  float32x4_t __s1_510 = __p1_510; \
+  float32x2_t __s2_510 = __p2_510; \
+  __ret_510 = vfmaq_lane_f32(__s0_510, -__s1_510, __s2_510, __p3_510); \
+  __ret_510; \
+})
+#else
+#define vfmsq_lane_f32(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \
+  float32x4_t __ret_511; \
+  float32x4_t __s0_511 = __p0_511; \
+  float32x4_t __s1_511 = __p1_511; \
+  float32x2_t __s2_511 = __p2_511; \
+  float32x4_t __rev0_511;  __rev0_511 = __builtin_shufflevector(__s0_511, __s0_511, 3, 2, 1, 0); \
+  float32x4_t __rev1_511;  __rev1_511 = __builtin_shufflevector(__s1_511, __s1_511, 3, 2, 1, 0); \
+  float32x2_t __rev2_511;  __rev2_511 = __builtin_shufflevector(__s2_511, __s2_511, 1, 0); \
+  __ret_511 = __noswap_vfmaq_lane_f32(__rev0_511, -__rev1_511, __rev2_511, __p3_511); \
+  __ret_511 = __builtin_shufflevector(__ret_511, __ret_511, 3, 2, 1, 0); \
+  __ret_511; \
+})
+#endif
+
+#define vfms_lane_f64(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \
+  float64x1_t __ret_512; \
+  float64x1_t __s0_512 = __p0_512; \
+  float64x1_t __s1_512 = __p1_512; \
+  float64x1_t __s2_512 = __p2_512; \
+  __ret_512 = vfma_lane_f64(__s0_512, -__s1_512, __s2_512, __p3_512); \
+  __ret_512; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vfms_lane_f32(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \
+  float32x2_t __ret_513; \
+  float32x2_t __s0_513 = __p0_513; \
+  float32x2_t __s1_513 = __p1_513; \
+  float32x2_t __s2_513 = __p2_513; \
+  __ret_513 = vfma_lane_f32(__s0_513, -__s1_513, __s2_513, __p3_513); \
+  __ret_513; \
+})
+#else
+#define vfms_lane_f32(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \
+  float32x2_t __ret_514; \
+  float32x2_t __s0_514 = __p0_514; \
+  float32x2_t __s1_514 = __p1_514; \
+  float32x2_t __s2_514 = __p2_514; \
+  float32x2_t __rev0_514;  __rev0_514 = __builtin_shufflevector(__s0_514, __s0_514, 1, 0); \
+  float32x2_t __rev1_514;  __rev1_514 = __builtin_shufflevector(__s1_514, __s1_514, 1, 0); \
+  float32x2_t __rev2_514;  __rev2_514 = __builtin_shufflevector(__s2_514, __s2_514, 1, 0); \
+  __ret_514 = __noswap_vfma_lane_f32(__rev0_514, -__rev1_514, __rev2_514, __p3_514); \
+  __ret_514 = __builtin_shufflevector(__ret_514, __ret_514, 1, 0); \
+  __ret_514; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsd_laneq_f64(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \
+  float64_t __ret_515; \
+  float64_t __s0_515 = __p0_515; \
+  float64_t __s1_515 = __p1_515; \
+  float64x2_t __s2_515 = __p2_515; \
+  __ret_515 = vfmad_laneq_f64(__s0_515, -__s1_515, __s2_515, __p3_515); \
+  __ret_515; \
+})
+#else
+#define vfmsd_laneq_f64(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \
+  float64_t __ret_516; \
+  float64_t __s0_516 = __p0_516; \
+  float64_t __s1_516 = __p1_516; \
+  float64x2_t __s2_516 = __p2_516; \
+  float64x2_t __rev2_516;  __rev2_516 = __builtin_shufflevector(__s2_516, __s2_516, 1, 0); \
+  __ret_516 = __noswap_vfmad_laneq_f64(__s0_516, -__s1_516, __rev2_516, __p3_516); \
+  __ret_516; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmss_laneq_f32(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \
+  float32_t __ret_517; \
+  float32_t __s0_517 = __p0_517; \
+  float32_t __s1_517 = __p1_517; \
+  float32x4_t __s2_517 = __p2_517; \
+  __ret_517 = vfmas_laneq_f32(__s0_517, -__s1_517, __s2_517, __p3_517); \
+  __ret_517; \
+})
+#else
+#define vfmss_laneq_f32(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \
+  float32_t __ret_518; \
+  float32_t __s0_518 = __p0_518; \
+  float32_t __s1_518 = __p1_518; \
+  float32x4_t __s2_518 = __p2_518; \
+  float32x4_t __rev2_518;  __rev2_518 = __builtin_shufflevector(__s2_518, __s2_518, 3, 2, 1, 0); \
+  __ret_518 = __noswap_vfmas_laneq_f32(__s0_518, -__s1_518, __rev2_518, __p3_518); \
+  __ret_518; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_laneq_f64(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \
+  float64x2_t __ret_519; \
+  float64x2_t __s0_519 = __p0_519; \
+  float64x2_t __s1_519 = __p1_519; \
+  float64x2_t __s2_519 = __p2_519; \
+  __ret_519 = vfmaq_laneq_f64(__s0_519, -__s1_519, __s2_519, __p3_519); \
+  __ret_519; \
+})
+#else
+#define vfmsq_laneq_f64(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \
+  float64x2_t __ret_520; \
+  float64x2_t __s0_520 = __p0_520; \
+  float64x2_t __s1_520 = __p1_520; \
+  float64x2_t __s2_520 = __p2_520; \
+  float64x2_t __rev0_520;  __rev0_520 = __builtin_shufflevector(__s0_520, __s0_520, 1, 0); \
+  float64x2_t __rev1_520;  __rev1_520 = __builtin_shufflevector(__s1_520, __s1_520, 1, 0); \
+  float64x2_t __rev2_520;  __rev2_520 = __builtin_shufflevector(__s2_520, __s2_520, 1, 0); \
+  __ret_520 = __noswap_vfmaq_laneq_f64(__rev0_520, -__rev1_520, __rev2_520, __p3_520); \
+  __ret_520 = __builtin_shufflevector(__ret_520, __ret_520, 1, 0); \
+  __ret_520; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_laneq_f32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \
+  float32x4_t __ret_521; \
+  float32x4_t __s0_521 = __p0_521; \
+  float32x4_t __s1_521 = __p1_521; \
+  float32x4_t __s2_521 = __p2_521; \
+  __ret_521 = vfmaq_laneq_f32(__s0_521, -__s1_521, __s2_521, __p3_521); \
+  __ret_521; \
+})
+#else
+#define vfmsq_laneq_f32(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \
+  float32x4_t __ret_522; \
+  float32x4_t __s0_522 = __p0_522; \
+  float32x4_t __s1_522 = __p1_522; \
+  float32x4_t __s2_522 = __p2_522; \
+  float32x4_t __rev0_522;  __rev0_522 = __builtin_shufflevector(__s0_522, __s0_522, 3, 2, 1, 0); \
+  float32x4_t __rev1_522;  __rev1_522 = __builtin_shufflevector(__s1_522, __s1_522, 3, 2, 1, 0); \
+  float32x4_t __rev2_522;  __rev2_522 = __builtin_shufflevector(__s2_522, __s2_522, 3, 2, 1, 0); \
+  __ret_522 = __noswap_vfmaq_laneq_f32(__rev0_522, -__rev1_522, __rev2_522, __p3_522); \
+  __ret_522 = __builtin_shufflevector(__ret_522, __ret_522, 3, 2, 1, 0); \
+  __ret_522; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_laneq_f64(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \
+  float64x1_t __ret_523; \
+  float64x1_t __s0_523 = __p0_523; \
+  float64x1_t __s1_523 = __p1_523; \
+  float64x2_t __s2_523 = __p2_523; \
+  __ret_523 = vfma_laneq_f64(__s0_523, -__s1_523, __s2_523, __p3_523); \
+  __ret_523; \
+})
+#else
+#define vfms_laneq_f64(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \
+  float64x1_t __ret_524; \
+  float64x1_t __s0_524 = __p0_524; \
+  float64x1_t __s1_524 = __p1_524; \
+  float64x2_t __s2_524 = __p2_524; \
+  float64x2_t __rev2_524;  __rev2_524 = __builtin_shufflevector(__s2_524, __s2_524, 1, 0); \
+  __ret_524 = __noswap_vfma_laneq_f64(__s0_524, -__s1_524, __rev2_524, __p3_524); \
+  __ret_524; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_laneq_f32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \
+  float32x2_t __ret_525; \
+  float32x2_t __s0_525 = __p0_525; \
+  float32x2_t __s1_525 = __p1_525; \
+  float32x4_t __s2_525 = __p2_525; \
+  __ret_525 = vfma_laneq_f32(__s0_525, -__s1_525, __s2_525, __p3_525); \
+  __ret_525; \
+})
+#else
+#define vfms_laneq_f32(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \
+  float32x2_t __ret_526; \
+  float32x2_t __s0_526 = __p0_526; \
+  float32x2_t __s1_526 = __p1_526; \
+  float32x4_t __s2_526 = __p2_526; \
+  float32x2_t __rev0_526;  __rev0_526 = __builtin_shufflevector(__s0_526, __s0_526, 1, 0); \
+  float32x2_t __rev1_526;  __rev1_526 = __builtin_shufflevector(__s1_526, __s1_526, 1, 0); \
+  float32x4_t __rev2_526;  __rev2_526 = __builtin_shufflevector(__s2_526, __s2_526, 3, 2, 1, 0); \
+  __ret_526 = __noswap_vfma_laneq_f32(__rev0_526, -__rev1_526, __rev2_526, __p3_526); \
+  __ret_526 = __builtin_shufflevector(__ret_526, __ret_526, 1, 0); \
+  __ret_526; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+  float64x2_t __ret;
+  __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2});
+  return __ret;
+}
+#else
+__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2});
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+  float32x4_t __ret;
+  __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2});
+  return __ret;
+}
+#else
+__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
+  float64x1_t __ret;
+  __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2});
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+  float32x2_t __ret;
+  __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2});
+  return __ret;
+}
+#else
+__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2});
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
+  poly64x1_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 1);
+  return __ret;
+}
+#else
+__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
+  poly64x1_t __ret;
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
+  return __ret;
+}
+__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) {
+  poly64x1_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 1);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vget_high_f64(float64x2_t __p0) {
+  float64x1_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 1);
+  return __ret;
+}
+#else
+__ai float64x1_t vget_high_f64(float64x2_t __p0) {
+  float64x1_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
+  return __ret;
+}
+#endif
+
+#define vget_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64_t __ret; \
+  poly64x1_t __s0 = __p0; \
+  __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64_t __ret; \
+  poly64x2_t __s0 = __p0; \
+  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64_t __ret; \
+  poly64x2_t __s0 = __p0; \
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \
+  __ret; \
+})
+#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64_t __ret; \
+  poly64x2_t __s0 = __p0; \
+  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  float64x2_t __s0 = __p0; \
+  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \
+  __ret; \
+})
+#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  float64x2_t __s0 = __p0; \
+  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
+  __ret; \
+})
+#endif
+
+#define vget_lane_f64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  float64x1_t __s0 = __p0; \
+  __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
+  poly64x1_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 0);
+  return __ret;
+}
+#else
+__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
+  poly64x1_t __ret;
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vget_low_f64(float64x2_t __p0) {
+  float64x1_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 0);
+  return __ret;
+}
+#else
+__ai float64x1_t vget_low_f64(float64x2_t __p0) {
+  float64x1_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
+  return __ret;
+}
+#endif
+
+#define vld1_p64(__p0) __extension__ ({ \
+  poly64x1_t __ret; \
+  __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p64(__p0) __extension__ ({ \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
+  __ret; \
+})
+#else
+#define vld1q_p64(__p0) __extension__ ({ \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f64(__p0) __extension__ ({ \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
+  __ret; \
+})
+#else
+#define vld1q_f64(__p0) __extension__ ({ \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld1_f64(__p0) __extension__ ({ \
+  float64x1_t __ret; \
+  __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
+  __ret; \
+})
+#define vld1_dup_p64(__p0) __extension__ ({ \
+  poly64x1_t __ret; \
+  __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_p64(__p0) __extension__ ({ \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
+  __ret; \
+})
+#else
+#define vld1q_dup_p64(__p0) __extension__ ({ \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_f64(__p0) __extension__ ({ \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
+  __ret; \
+})
+#else
+#define vld1q_dup_f64(__p0) __extension__ ({ \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld1_dup_f64(__p0) __extension__ ({ \
+  float64x1_t __ret; \
+  __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
+  __ret; \
+})
+#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1_t __ret; \
+  poly64x1_t __s1 = __p1; \
+  __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
+  poly64x2_t __s1 = __p1; \
+  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
+  __ret; \
+})
+#else
+#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
+  poly64x2_t __s1 = __p1; \
+  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s1 = __p1; \
+  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
+  __ret; \
+})
+#else
+#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s1 = __p1; \
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s1 = __p1; \
+  __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
+  __ret; \
+})
+#define vld1_p64_x2(__p0) __extension__ ({ \
+  poly64x1x2_t __ret; \
+  __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p64_x2(__p0) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld1q_p64_x2(__p0) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f64_x2(__p0) __extension__ ({ \
+  float64x2x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld1q_f64_x2(__p0) __extension__ ({ \
+  float64x2x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld1_f64_x2(__p0) __extension__ ({ \
+  float64x1x2_t __ret; \
+  __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld1_p64_x3(__p0) __extension__ ({ \
+  poly64x1x3_t __ret; \
+  __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p64_x3(__p0) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld1q_p64_x3(__p0) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f64_x3(__p0) __extension__ ({ \
+  float64x2x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld1q_f64_x3(__p0) __extension__ ({ \
+  float64x2x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld1_f64_x3(__p0) __extension__ ({ \
+  float64x1x3_t __ret; \
+  __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld1_p64_x4(__p0) __extension__ ({ \
+  poly64x1x4_t __ret; \
+  __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p64_x4(__p0) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld1q_p64_x4(__p0) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f64_x4(__p0) __extension__ ({ \
+  float64x2x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld1q_f64_x4(__p0) __extension__ ({ \
+  float64x2x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld1_f64_x4(__p0) __extension__ ({ \
+  float64x1x4_t __ret; \
+  __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld2_p64(__p0) __extension__ ({ \
+  poly64x1x2_t __ret; \
+  __builtin_neon_vld2_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_p64(__p0) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld2q_p64(__p0) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_u64(__p0) __extension__ ({ \
+  uint64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
+  __ret; \
+})
+#else
+#define vld2q_u64(__p0) __extension__ ({ \
+  uint64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_f64(__p0) __extension__ ({ \
+  float64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld2q_f64(__p0) __extension__ ({ \
+  float64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_s64(__p0) __extension__ ({ \
+  int64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
+  __ret; \
+})
+#else
+#define vld2q_s64(__p0) __extension__ ({ \
+  int64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld2_f64(__p0) __extension__ ({ \
+  float64x1x2_t __ret; \
+  __builtin_neon_vld2_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld2_dup_p64(__p0) __extension__ ({ \
+  poly64x1x2_t __ret; \
+  __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_p64(__p0) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld2q_dup_p64(__p0) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_f64(__p0) __extension__ ({ \
+  float64x2x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld2q_dup_f64(__p0) __extension__ ({ \
+  float64x2x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld2_dup_f64(__p0) __extension__ ({ \
+  float64x1x2_t __ret; \
+  __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1x2_t __ret; \
+  poly64x1x2_t __s1 = __p1; \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16x2_t __ret; \
+  poly8x16x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
+  __ret; \
+})
+#else
+#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16x2_t __ret; \
+  poly8x16x2_t __s1 = __p1; \
+  poly8x16x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  poly64x2x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
+  __ret; \
+})
+#else
+#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  poly64x2x2_t __s1 = __p1; \
+  poly64x2x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16x2_t __ret; \
+  uint8x16x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
+  __ret; \
+})
+#else
+#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16x2_t __ret; \
+  uint8x16x2_t __s1 = __p1; \
+  uint8x16x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2x2_t __ret; \
+  uint64x2x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
+  __ret; \
+})
+#else
+#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2x2_t __ret; \
+  uint64x2x2_t __s1 = __p1; \
+  uint64x2x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16x2_t __ret; \
+  int8x16x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
+  __ret; \
+})
+#else
+#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16x2_t __ret; \
+  int8x16x2_t __s1 = __p1; \
+  int8x16x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2x2_t __ret; \
+  float64x2x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
+  __ret; \
+})
+#else
+#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2x2_t __ret; \
+  float64x2x2_t __s1 = __p1; \
+  float64x2x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2x2_t __ret; \
+  int64x2x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
+  __ret; \
+})
+#else
+#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2x2_t __ret; \
+  int64x2x2_t __s1 = __p1; \
+  int64x2x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1x2_t __ret; \
+  uint64x1x2_t __s1 = __p1; \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
+  __ret; \
+})
+#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1x2_t __ret; \
+  float64x1x2_t __s1 = __p1; \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
+  __ret; \
+})
+#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1x2_t __ret; \
+  int64x1x2_t __s1 = __p1; \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
+  __ret; \
+})
+#define vld3_p64(__p0) __extension__ ({ \
+  poly64x1x3_t __ret; \
+  __builtin_neon_vld3_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_p64(__p0) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld3q_p64(__p0) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_u64(__p0) __extension__ ({ \
+  uint64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
+  __ret; \
+})
+#else
+#define vld3q_u64(__p0) __extension__ ({ \
+  uint64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_f64(__p0) __extension__ ({ \
+  float64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld3q_f64(__p0) __extension__ ({ \
+  float64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_s64(__p0) __extension__ ({ \
+  int64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
+  __ret; \
+})
+#else
+#define vld3q_s64(__p0) __extension__ ({ \
+  int64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld3_f64(__p0) __extension__ ({ \
+  float64x1x3_t __ret; \
+  __builtin_neon_vld3_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld3_dup_p64(__p0) __extension__ ({ \
+  poly64x1x3_t __ret; \
+  __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_p64(__p0) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld3q_dup_p64(__p0) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_f64(__p0) __extension__ ({ \
+  float64x2x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld3q_dup_f64(__p0) __extension__ ({ \
+  float64x2x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld3_dup_f64(__p0) __extension__ ({ \
+  float64x1x3_t __ret; \
+  __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1x3_t __ret; \
+  poly64x1x3_t __s1 = __p1; \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16x3_t __ret; \
+  poly8x16x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
+  __ret; \
+})
+#else
+#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16x3_t __ret; \
+  poly8x16x3_t __s1 = __p1; \
+  poly8x16x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  poly64x2x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
+  __ret; \
+})
+#else
+#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  poly64x2x3_t __s1 = __p1; \
+  poly64x2x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16x3_t __ret; \
+  uint8x16x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
+  __ret; \
+})
+#else
+#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16x3_t __ret; \
+  uint8x16x3_t __s1 = __p1; \
+  uint8x16x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2x3_t __ret; \
+  uint64x2x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
+  __ret; \
+})
+#else
+#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2x3_t __ret; \
+  uint64x2x3_t __s1 = __p1; \
+  uint64x2x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16x3_t __ret; \
+  int8x16x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
+  __ret; \
+})
+#else
+#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16x3_t __ret; \
+  int8x16x3_t __s1 = __p1; \
+  int8x16x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2x3_t __ret; \
+  float64x2x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
+  __ret; \
+})
+#else
+#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2x3_t __ret; \
+  float64x2x3_t __s1 = __p1; \
+  float64x2x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2x3_t __ret; \
+  int64x2x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
+  __ret; \
+})
+#else
+#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2x3_t __ret; \
+  int64x2x3_t __s1 = __p1; \
+  int64x2x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1x3_t __ret; \
+  uint64x1x3_t __s1 = __p1; \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
+  __ret; \
+})
+#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1x3_t __ret; \
+  float64x1x3_t __s1 = __p1; \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
+  __ret; \
+})
+#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1x3_t __ret; \
+  int64x1x3_t __s1 = __p1; \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
+  __ret; \
+})
+#define vld4_p64(__p0) __extension__ ({ \
+  poly64x1x4_t __ret; \
+  __builtin_neon_vld4_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_p64(__p0) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld4q_p64(__p0) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_u64(__p0) __extension__ ({ \
+  uint64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
+  __ret; \
+})
+#else
+#define vld4q_u64(__p0) __extension__ ({ \
+  uint64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_f64(__p0) __extension__ ({ \
+  float64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld4q_f64(__p0) __extension__ ({ \
+  float64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_s64(__p0) __extension__ ({ \
+  int64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
+  __ret; \
+})
+#else
+#define vld4q_s64(__p0) __extension__ ({ \
+  int64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld4_f64(__p0) __extension__ ({ \
+  float64x1x4_t __ret; \
+  __builtin_neon_vld4_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld4_dup_p64(__p0) __extension__ ({ \
+  poly64x1x4_t __ret; \
+  __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_p64(__p0) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld4q_dup_p64(__p0) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_f64(__p0) __extension__ ({ \
+  float64x2x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld4q_dup_f64(__p0) __extension__ ({ \
+  float64x2x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld4_dup_f64(__p0) __extension__ ({ \
+  float64x1x4_t __ret; \
+  __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1x4_t __ret; \
+  poly64x1x4_t __s1 = __p1; \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16x4_t __ret; \
+  poly8x16x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
+  __ret; \
+})
+#else
+#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16x4_t __ret; \
+  poly8x16x4_t __s1 = __p1; \
+  poly8x16x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  poly64x2x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
+  __ret; \
+})
+#else
+#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  poly64x2x4_t __s1 = __p1; \
+  poly64x2x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16x4_t __ret; \
+  uint8x16x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
+  __ret; \
+})
+#else
+#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16x4_t __ret; \
+  uint8x16x4_t __s1 = __p1; \
+  uint8x16x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2x4_t __ret; \
+  uint64x2x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
+  __ret; \
+})
+#else
+#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2x4_t __ret; \
+  uint64x2x4_t __s1 = __p1; \
+  uint64x2x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16x4_t __ret; \
+  int8x16x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
+  __ret; \
+})
+#else
+#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16x4_t __ret; \
+  int8x16x4_t __s1 = __p1; \
+  int8x16x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2x4_t __ret; \
+  float64x2x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
+  __ret; \
+})
+#else
+#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2x4_t __ret; \
+  float64x2x4_t __s1 = __p1; \
+  float64x2x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2x4_t __ret; \
+  int64x2x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
+  __ret; \
+})
+#else
+#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2x4_t __ret; \
+  int64x2x4_t __s1 = __p1; \
+  int64x2x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1x4_t __ret; \
+  uint64x1x4_t __s1 = __p1; \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
+  __ret; \
+})
+#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1x4_t __ret; \
+  float64x1x4_t __s1 = __p1; \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
+  __ret; \
+})
+#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1x4_t __ret; \
+  int64x1x4_t __s1 = __p1; \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
+  __ret; \
+})
+#define vldrq_p128(__p0) __extension__ ({ \
+  poly128_t __ret; \
+  __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
+  uint8_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
+  uint32_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
+  uint16_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vmaxvq_s8(int8x16_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0);
+  return __ret;
+}
+#else
+__ai int8_t vmaxvq_s8(int8x16_t __p0) {
+  int8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vmaxvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vmaxvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vmaxvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vmaxvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vmaxvq_s32(int32x4_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vmaxvq_s32(int32x4_t __p0) {
+  int32_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vmaxvq_s16(int16x8_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vmaxvq_s16(int16x8_t __p0) {
+  int16_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
+  uint8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
+  uint32_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
+  uint16_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vmaxv_s8(int8x8_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0);
+  return __ret;
+}
+#else
+__ai int8_t vmaxv_s8(int8x8_t __p0) {
+  int8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vmaxv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vmaxv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vmaxv_s32(int32x2_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vmaxv_s32(int32x2_t __p0) {
+  int32_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vmaxv_s16(int16x4_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vmaxv_s16(int16x4_t __p0) {
+  int16_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vminnmvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vminnmvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vminnmvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vminnmvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vminnmv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vminnmv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vminvq_u8(uint8x16_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint8_t vminvq_u8(uint8x16_t __p0) {
+  uint8_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vminvq_u32(uint32x4_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vminvq_u32(uint32x4_t __p0) {
+  uint32_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vminvq_u16(uint16x8_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vminvq_u16(uint16x8_t __p0) {
+  uint16_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vminvq_s8(int8x16_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vminvq_s8(__p0);
+  return __ret;
+}
+#else
+__ai int8_t vminvq_s8(int8x16_t __p0) {
+  int8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vminvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vminvq_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vminvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vminvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vminvq_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vminvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vminvq_s32(int32x4_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vminvq_s32(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vminvq_s32(int32x4_t __p0) {
+  int32_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vminvq_s16(int16x8_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vminvq_s16(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vminvq_s16(int16x8_t __p0) {
+  int16_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vminv_u8(uint8x8_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vminv_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint8_t vminv_u8(uint8x8_t __p0) {
+  uint8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vminv_u32(uint32x2_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vminv_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vminv_u32(uint32x2_t __p0) {
+  uint32_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vminv_u16(uint16x4_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vminv_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vminv_u16(uint16x4_t __p0) {
+  uint16_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vminv_s8(int8x8_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vminv_s8(__p0);
+  return __ret;
+}
+#else
+__ai int8_t vminv_s8(int8x8_t __p0) {
+  int8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8_t) __builtin_neon_vminv_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vminv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vminv_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vminv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vminv_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vminv_s32(int32x2_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vminv_s32(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vminv_s32(int32x2_t __p0) {
+  int32_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int32_t) __builtin_neon_vminv_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vminv_s16(int16x4_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vminv_s16(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vminv_s16(int16x4_t __p0) {
+  int16_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vminv_s16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = __p0 + __p1 * __p2;
+  return __ret;
+}
+#else
+__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __rev0 + __rev1 * __rev2;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = __p0 + __p1 * __p2;
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_laneq_u32(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \
+  uint32x4_t __ret_527; \
+  uint32x4_t __s0_527 = __p0_527; \
+  uint32x4_t __s1_527 = __p1_527; \
+  uint32x4_t __s2_527 = __p2_527; \
+  __ret_527 = __s0_527 + __s1_527 * splatq_laneq_u32(__s2_527, __p3_527); \
+  __ret_527; \
+})
+#else
+#define vmlaq_laneq_u32(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \
+  uint32x4_t __ret_528; \
+  uint32x4_t __s0_528 = __p0_528; \
+  uint32x4_t __s1_528 = __p1_528; \
+  uint32x4_t __s2_528 = __p2_528; \
+  uint32x4_t __rev0_528;  __rev0_528 = __builtin_shufflevector(__s0_528, __s0_528, 3, 2, 1, 0); \
+  uint32x4_t __rev1_528;  __rev1_528 = __builtin_shufflevector(__s1_528, __s1_528, 3, 2, 1, 0); \
+  uint32x4_t __rev2_528;  __rev2_528 = __builtin_shufflevector(__s2_528, __s2_528, 3, 2, 1, 0); \
+  __ret_528 = __rev0_528 + __rev1_528 * __noswap_splatq_laneq_u32(__rev2_528, __p3_528); \
+  __ret_528 = __builtin_shufflevector(__ret_528, __ret_528, 3, 2, 1, 0); \
+  __ret_528; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_laneq_u16(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \
+  uint16x8_t __ret_529; \
+  uint16x8_t __s0_529 = __p0_529; \
+  uint16x8_t __s1_529 = __p1_529; \
+  uint16x8_t __s2_529 = __p2_529; \
+  __ret_529 = __s0_529 + __s1_529 * splatq_laneq_u16(__s2_529, __p3_529); \
+  __ret_529; \
+})
+#else
+#define vmlaq_laneq_u16(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \
+  uint16x8_t __ret_530; \
+  uint16x8_t __s0_530 = __p0_530; \
+  uint16x8_t __s1_530 = __p1_530; \
+  uint16x8_t __s2_530 = __p2_530; \
+  uint16x8_t __rev0_530;  __rev0_530 = __builtin_shufflevector(__s0_530, __s0_530, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_530;  __rev1_530 = __builtin_shufflevector(__s1_530, __s1_530, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_530;  __rev2_530 = __builtin_shufflevector(__s2_530, __s2_530, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_530 = __rev0_530 + __rev1_530 * __noswap_splatq_laneq_u16(__rev2_530, __p3_530); \
+  __ret_530 = __builtin_shufflevector(__ret_530, __ret_530, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_530; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_laneq_f32(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \
+  float32x4_t __ret_531; \
+  float32x4_t __s0_531 = __p0_531; \
+  float32x4_t __s1_531 = __p1_531; \
+  float32x4_t __s2_531 = __p2_531; \
+  __ret_531 = __s0_531 + __s1_531 * splatq_laneq_f32(__s2_531, __p3_531); \
+  __ret_531; \
+})
+#else
+#define vmlaq_laneq_f32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \
+  float32x4_t __ret_532; \
+  float32x4_t __s0_532 = __p0_532; \
+  float32x4_t __s1_532 = __p1_532; \
+  float32x4_t __s2_532 = __p2_532; \
+  float32x4_t __rev0_532;  __rev0_532 = __builtin_shufflevector(__s0_532, __s0_532, 3, 2, 1, 0); \
+  float32x4_t __rev1_532;  __rev1_532 = __builtin_shufflevector(__s1_532, __s1_532, 3, 2, 1, 0); \
+  float32x4_t __rev2_532;  __rev2_532 = __builtin_shufflevector(__s2_532, __s2_532, 3, 2, 1, 0); \
+  __ret_532 = __rev0_532 + __rev1_532 * __noswap_splatq_laneq_f32(__rev2_532, __p3_532); \
+  __ret_532 = __builtin_shufflevector(__ret_532, __ret_532, 3, 2, 1, 0); \
+  __ret_532; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_laneq_s32(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \
+  int32x4_t __ret_533; \
+  int32x4_t __s0_533 = __p0_533; \
+  int32x4_t __s1_533 = __p1_533; \
+  int32x4_t __s2_533 = __p2_533; \
+  __ret_533 = __s0_533 + __s1_533 * splatq_laneq_s32(__s2_533, __p3_533); \
+  __ret_533; \
+})
+#else
+#define vmlaq_laneq_s32(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \
+  int32x4_t __ret_534; \
+  int32x4_t __s0_534 = __p0_534; \
+  int32x4_t __s1_534 = __p1_534; \
+  int32x4_t __s2_534 = __p2_534; \
+  int32x4_t __rev0_534;  __rev0_534 = __builtin_shufflevector(__s0_534, __s0_534, 3, 2, 1, 0); \
+  int32x4_t __rev1_534;  __rev1_534 = __builtin_shufflevector(__s1_534, __s1_534, 3, 2, 1, 0); \
+  int32x4_t __rev2_534;  __rev2_534 = __builtin_shufflevector(__s2_534, __s2_534, 3, 2, 1, 0); \
+  __ret_534 = __rev0_534 + __rev1_534 * __noswap_splatq_laneq_s32(__rev2_534, __p3_534); \
+  __ret_534 = __builtin_shufflevector(__ret_534, __ret_534, 3, 2, 1, 0); \
+  __ret_534; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_laneq_s16(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \
+  int16x8_t __ret_535; \
+  int16x8_t __s0_535 = __p0_535; \
+  int16x8_t __s1_535 = __p1_535; \
+  int16x8_t __s2_535 = __p2_535; \
+  __ret_535 = __s0_535 + __s1_535 * splatq_laneq_s16(__s2_535, __p3_535); \
+  __ret_535; \
+})
+#else
+#define vmlaq_laneq_s16(__p0_536, __p1_536, __p2_536, __p3_536) __extension__ ({ \
+  int16x8_t __ret_536; \
+  int16x8_t __s0_536 = __p0_536; \
+  int16x8_t __s1_536 = __p1_536; \
+  int16x8_t __s2_536 = __p2_536; \
+  int16x8_t __rev0_536;  __rev0_536 = __builtin_shufflevector(__s0_536, __s0_536, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_536;  __rev1_536 = __builtin_shufflevector(__s1_536, __s1_536, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_536;  __rev2_536 = __builtin_shufflevector(__s2_536, __s2_536, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_536 = __rev0_536 + __rev1_536 * __noswap_splatq_laneq_s16(__rev2_536, __p3_536); \
+  __ret_536 = __builtin_shufflevector(__ret_536, __ret_536, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_536; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_laneq_u32(__p0_537, __p1_537, __p2_537, __p3_537) __extension__ ({ \
+  uint32x2_t __ret_537; \
+  uint32x2_t __s0_537 = __p0_537; \
+  uint32x2_t __s1_537 = __p1_537; \
+  uint32x4_t __s2_537 = __p2_537; \
+  __ret_537 = __s0_537 + __s1_537 * splat_laneq_u32(__s2_537, __p3_537); \
+  __ret_537; \
+})
+#else
+#define vmla_laneq_u32(__p0_538, __p1_538, __p2_538, __p3_538) __extension__ ({ \
+  uint32x2_t __ret_538; \
+  uint32x2_t __s0_538 = __p0_538; \
+  uint32x2_t __s1_538 = __p1_538; \
+  uint32x4_t __s2_538 = __p2_538; \
+  uint32x2_t __rev0_538;  __rev0_538 = __builtin_shufflevector(__s0_538, __s0_538, 1, 0); \
+  uint32x2_t __rev1_538;  __rev1_538 = __builtin_shufflevector(__s1_538, __s1_538, 1, 0); \
+  uint32x4_t __rev2_538;  __rev2_538 = __builtin_shufflevector(__s2_538, __s2_538, 3, 2, 1, 0); \
+  __ret_538 = __rev0_538 + __rev1_538 * __noswap_splat_laneq_u32(__rev2_538, __p3_538); \
+  __ret_538 = __builtin_shufflevector(__ret_538, __ret_538, 1, 0); \
+  __ret_538; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_laneq_u16(__p0_539, __p1_539, __p2_539, __p3_539) __extension__ ({ \
+  uint16x4_t __ret_539; \
+  uint16x4_t __s0_539 = __p0_539; \
+  uint16x4_t __s1_539 = __p1_539; \
+  uint16x8_t __s2_539 = __p2_539; \
+  __ret_539 = __s0_539 + __s1_539 * splat_laneq_u16(__s2_539, __p3_539); \
+  __ret_539; \
+})
+#else
+#define vmla_laneq_u16(__p0_540, __p1_540, __p2_540, __p3_540) __extension__ ({ \
+  uint16x4_t __ret_540; \
+  uint16x4_t __s0_540 = __p0_540; \
+  uint16x4_t __s1_540 = __p1_540; \
+  uint16x8_t __s2_540 = __p2_540; \
+  uint16x4_t __rev0_540;  __rev0_540 = __builtin_shufflevector(__s0_540, __s0_540, 3, 2, 1, 0); \
+  uint16x4_t __rev1_540;  __rev1_540 = __builtin_shufflevector(__s1_540, __s1_540, 3, 2, 1, 0); \
+  uint16x8_t __rev2_540;  __rev2_540 = __builtin_shufflevector(__s2_540, __s2_540, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_540 = __rev0_540 + __rev1_540 * __noswap_splat_laneq_u16(__rev2_540, __p3_540); \
+  __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 3, 2, 1, 0); \
+  __ret_540; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_laneq_f32(__p0_541, __p1_541, __p2_541, __p3_541) __extension__ ({ \
+  float32x2_t __ret_541; \
+  float32x2_t __s0_541 = __p0_541; \
+  float32x2_t __s1_541 = __p1_541; \
+  float32x4_t __s2_541 = __p2_541; \
+  __ret_541 = __s0_541 + __s1_541 * splat_laneq_f32(__s2_541, __p3_541); \
+  __ret_541; \
+})
+#else
+#define vmla_laneq_f32(__p0_542, __p1_542, __p2_542, __p3_542) __extension__ ({ \
+  float32x2_t __ret_542; \
+  float32x2_t __s0_542 = __p0_542; \
+  float32x2_t __s1_542 = __p1_542; \
+  float32x4_t __s2_542 = __p2_542; \
+  float32x2_t __rev0_542;  __rev0_542 = __builtin_shufflevector(__s0_542, __s0_542, 1, 0); \
+  float32x2_t __rev1_542;  __rev1_542 = __builtin_shufflevector(__s1_542, __s1_542, 1, 0); \
+  float32x4_t __rev2_542;  __rev2_542 = __builtin_shufflevector(__s2_542, __s2_542, 3, 2, 1, 0); \
+  __ret_542 = __rev0_542 + __rev1_542 * __noswap_splat_laneq_f32(__rev2_542, __p3_542); \
+  __ret_542 = __builtin_shufflevector(__ret_542, __ret_542, 1, 0); \
+  __ret_542; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_laneq_s32(__p0_543, __p1_543, __p2_543, __p3_543) __extension__ ({ \
+  int32x2_t __ret_543; \
+  int32x2_t __s0_543 = __p0_543; \
+  int32x2_t __s1_543 = __p1_543; \
+  int32x4_t __s2_543 = __p2_543; \
+  __ret_543 = __s0_543 + __s1_543 * splat_laneq_s32(__s2_543, __p3_543); \
+  __ret_543; \
+})
+#else
+#define vmla_laneq_s32(__p0_544, __p1_544, __p2_544, __p3_544) __extension__ ({ \
+  int32x2_t __ret_544; \
+  int32x2_t __s0_544 = __p0_544; \
+  int32x2_t __s1_544 = __p1_544; \
+  int32x4_t __s2_544 = __p2_544; \
+  int32x2_t __rev0_544;  __rev0_544 = __builtin_shufflevector(__s0_544, __s0_544, 1, 0); \
+  int32x2_t __rev1_544;  __rev1_544 = __builtin_shufflevector(__s1_544, __s1_544, 1, 0); \
+  int32x4_t __rev2_544;  __rev2_544 = __builtin_shufflevector(__s2_544, __s2_544, 3, 2, 1, 0); \
+  __ret_544 = __rev0_544 + __rev1_544 * __noswap_splat_laneq_s32(__rev2_544, __p3_544); \
+  __ret_544 = __builtin_shufflevector(__ret_544, __ret_544, 1, 0); \
+  __ret_544; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_laneq_s16(__p0_545, __p1_545, __p2_545, __p3_545) __extension__ ({ \
+  int16x4_t __ret_545; \
+  int16x4_t __s0_545 = __p0_545; \
+  int16x4_t __s1_545 = __p1_545; \
+  int16x8_t __s2_545 = __p2_545; \
+  __ret_545 = __s0_545 + __s1_545 * splat_laneq_s16(__s2_545, __p3_545); \
+  __ret_545; \
+})
+#else
+#define vmla_laneq_s16(__p0_546, __p1_546, __p2_546, __p3_546) __extension__ ({ \
+  int16x4_t __ret_546; \
+  int16x4_t __s0_546 = __p0_546; \
+  int16x4_t __s1_546 = __p1_546; \
+  int16x8_t __s2_546 = __p2_546; \
+  int16x4_t __rev0_546;  __rev0_546 = __builtin_shufflevector(__s0_546, __s0_546, 3, 2, 1, 0); \
+  int16x4_t __rev1_546;  __rev1_546 = __builtin_shufflevector(__s1_546, __s1_546, 3, 2, 1, 0); \
+  int16x8_t __rev2_546;  __rev2_546 = __builtin_shufflevector(__s2_546, __s2_546, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_546 = __rev0_546 + __rev1_546 * __noswap_splat_laneq_s16(__rev2_546, __p3_546); \
+  __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 3, 2, 1, 0); \
+  __ret_546; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_lane_u32(__p0_547, __p1_547, __p2_547, __p3_547) __extension__ ({ \
+  uint64x2_t __ret_547; \
+  uint64x2_t __s0_547 = __p0_547; \
+  uint32x4_t __s1_547 = __p1_547; \
+  uint32x2_t __s2_547 = __p2_547; \
+  __ret_547 = __s0_547 + vmull_u32(vget_high_u32(__s1_547), splat_lane_u32(__s2_547, __p3_547)); \
+  __ret_547; \
+})
+#else
+#define vmlal_high_lane_u32(__p0_548, __p1_548, __p2_548, __p3_548) __extension__ ({ \
+  uint64x2_t __ret_548; \
+  uint64x2_t __s0_548 = __p0_548; \
+  uint32x4_t __s1_548 = __p1_548; \
+  uint32x2_t __s2_548 = __p2_548; \
+  uint64x2_t __rev0_548;  __rev0_548 = __builtin_shufflevector(__s0_548, __s0_548, 1, 0); \
+  uint32x4_t __rev1_548;  __rev1_548 = __builtin_shufflevector(__s1_548, __s1_548, 3, 2, 1, 0); \
+  uint32x2_t __rev2_548;  __rev2_548 = __builtin_shufflevector(__s2_548, __s2_548, 1, 0); \
+  __ret_548 = __rev0_548 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_548), __noswap_splat_lane_u32(__rev2_548, __p3_548)); \
+  __ret_548 = __builtin_shufflevector(__ret_548, __ret_548, 1, 0); \
+  __ret_548; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_lane_u16(__p0_549, __p1_549, __p2_549, __p3_549) __extension__ ({ \
+  uint32x4_t __ret_549; \
+  uint32x4_t __s0_549 = __p0_549; \
+  uint16x8_t __s1_549 = __p1_549; \
+  uint16x4_t __s2_549 = __p2_549; \
+  __ret_549 = __s0_549 + vmull_u16(vget_high_u16(__s1_549), splat_lane_u16(__s2_549, __p3_549)); \
+  __ret_549; \
+})
+#else
+#define vmlal_high_lane_u16(__p0_550, __p1_550, __p2_550, __p3_550) __extension__ ({ \
+  uint32x4_t __ret_550; \
+  uint32x4_t __s0_550 = __p0_550; \
+  uint16x8_t __s1_550 = __p1_550; \
+  uint16x4_t __s2_550 = __p2_550; \
+  uint32x4_t __rev0_550;  __rev0_550 = __builtin_shufflevector(__s0_550, __s0_550, 3, 2, 1, 0); \
+  uint16x8_t __rev1_550;  __rev1_550 = __builtin_shufflevector(__s1_550, __s1_550, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_550;  __rev2_550 = __builtin_shufflevector(__s2_550, __s2_550, 3, 2, 1, 0); \
+  __ret_550 = __rev0_550 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_550), __noswap_splat_lane_u16(__rev2_550, __p3_550)); \
+  __ret_550 = __builtin_shufflevector(__ret_550, __ret_550, 3, 2, 1, 0); \
+  __ret_550; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_lane_s32(__p0_551, __p1_551, __p2_551, __p3_551) __extension__ ({ \
+  int64x2_t __ret_551; \
+  int64x2_t __s0_551 = __p0_551; \
+  int32x4_t __s1_551 = __p1_551; \
+  int32x2_t __s2_551 = __p2_551; \
+  __ret_551 = __s0_551 + vmull_s32(vget_high_s32(__s1_551), splat_lane_s32(__s2_551, __p3_551)); \
+  __ret_551; \
+})
+#else
+#define vmlal_high_lane_s32(__p0_552, __p1_552, __p2_552, __p3_552) __extension__ ({ \
+  int64x2_t __ret_552; \
+  int64x2_t __s0_552 = __p0_552; \
+  int32x4_t __s1_552 = __p1_552; \
+  int32x2_t __s2_552 = __p2_552; \
+  int64x2_t __rev0_552;  __rev0_552 = __builtin_shufflevector(__s0_552, __s0_552, 1, 0); \
+  int32x4_t __rev1_552;  __rev1_552 = __builtin_shufflevector(__s1_552, __s1_552, 3, 2, 1, 0); \
+  int32x2_t __rev2_552;  __rev2_552 = __builtin_shufflevector(__s2_552, __s2_552, 1, 0); \
+  __ret_552 = __rev0_552 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_552), __noswap_splat_lane_s32(__rev2_552, __p3_552)); \
+  __ret_552 = __builtin_shufflevector(__ret_552, __ret_552, 1, 0); \
+  __ret_552; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_lane_s16(__p0_553, __p1_553, __p2_553, __p3_553) __extension__ ({ \
+  int32x4_t __ret_553; \
+  int32x4_t __s0_553 = __p0_553; \
+  int16x8_t __s1_553 = __p1_553; \
+  int16x4_t __s2_553 = __p2_553; \
+  __ret_553 = __s0_553 + vmull_s16(vget_high_s16(__s1_553), splat_lane_s16(__s2_553, __p3_553)); \
+  __ret_553; \
+})
+#else
+#define vmlal_high_lane_s16(__p0_554, __p1_554, __p2_554, __p3_554) __extension__ ({ \
+  int32x4_t __ret_554; \
+  int32x4_t __s0_554 = __p0_554; \
+  int16x8_t __s1_554 = __p1_554; \
+  int16x4_t __s2_554 = __p2_554; \
+  int32x4_t __rev0_554;  __rev0_554 = __builtin_shufflevector(__s0_554, __s0_554, 3, 2, 1, 0); \
+  int16x8_t __rev1_554;  __rev1_554 = __builtin_shufflevector(__s1_554, __s1_554, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_554;  __rev2_554 = __builtin_shufflevector(__s2_554, __s2_554, 3, 2, 1, 0); \
+  __ret_554 = __rev0_554 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_554), __noswap_splat_lane_s16(__rev2_554, __p3_554)); \
+  __ret_554 = __builtin_shufflevector(__ret_554, __ret_554, 3, 2, 1, 0); \
+  __ret_554; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_u32(__p0_555, __p1_555, __p2_555, __p3_555) __extension__ ({ \
+  uint64x2_t __ret_555; \
+  uint64x2_t __s0_555 = __p0_555; \
+  uint32x4_t __s1_555 = __p1_555; \
+  uint32x4_t __s2_555 = __p2_555; \
+  __ret_555 = __s0_555 + vmull_u32(vget_high_u32(__s1_555), splat_laneq_u32(__s2_555, __p3_555)); \
+  __ret_555; \
+})
+#else
+#define vmlal_high_laneq_u32(__p0_556, __p1_556, __p2_556, __p3_556) __extension__ ({ \
+  uint64x2_t __ret_556; \
+  uint64x2_t __s0_556 = __p0_556; \
+  uint32x4_t __s1_556 = __p1_556; \
+  uint32x4_t __s2_556 = __p2_556; \
+  uint64x2_t __rev0_556;  __rev0_556 = __builtin_shufflevector(__s0_556, __s0_556, 1, 0); \
+  uint32x4_t __rev1_556;  __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 3, 2, 1, 0); \
+  uint32x4_t __rev2_556;  __rev2_556 = __builtin_shufflevector(__s2_556, __s2_556, 3, 2, 1, 0); \
+  __ret_556 = __rev0_556 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_556), __noswap_splat_laneq_u32(__rev2_556, __p3_556)); \
+  __ret_556 = __builtin_shufflevector(__ret_556, __ret_556, 1, 0); \
+  __ret_556; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_u16(__p0_557, __p1_557, __p2_557, __p3_557) __extension__ ({ \
+  uint32x4_t __ret_557; \
+  uint32x4_t __s0_557 = __p0_557; \
+  uint16x8_t __s1_557 = __p1_557; \
+  uint16x8_t __s2_557 = __p2_557; \
+  __ret_557 = __s0_557 + vmull_u16(vget_high_u16(__s1_557), splat_laneq_u16(__s2_557, __p3_557)); \
+  __ret_557; \
+})
+#else
+#define vmlal_high_laneq_u16(__p0_558, __p1_558, __p2_558, __p3_558) __extension__ ({ \
+  uint32x4_t __ret_558; \
+  uint32x4_t __s0_558 = __p0_558; \
+  uint16x8_t __s1_558 = __p1_558; \
+  uint16x8_t __s2_558 = __p2_558; \
+  uint32x4_t __rev0_558;  __rev0_558 = __builtin_shufflevector(__s0_558, __s0_558, 3, 2, 1, 0); \
+  uint16x8_t __rev1_558;  __rev1_558 = __builtin_shufflevector(__s1_558, __s1_558, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_558;  __rev2_558 = __builtin_shufflevector(__s2_558, __s2_558, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_558 = __rev0_558 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_558), __noswap_splat_laneq_u16(__rev2_558, __p3_558)); \
+  __ret_558 = __builtin_shufflevector(__ret_558, __ret_558, 3, 2, 1, 0); \
+  __ret_558; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_s32(__p0_559, __p1_559, __p2_559, __p3_559) __extension__ ({ \
+  int64x2_t __ret_559; \
+  int64x2_t __s0_559 = __p0_559; \
+  int32x4_t __s1_559 = __p1_559; \
+  int32x4_t __s2_559 = __p2_559; \
+  __ret_559 = __s0_559 + vmull_s32(vget_high_s32(__s1_559), splat_laneq_s32(__s2_559, __p3_559)); \
+  __ret_559; \
+})
+#else
+#define vmlal_high_laneq_s32(__p0_560, __p1_560, __p2_560, __p3_560) __extension__ ({ \
+  int64x2_t __ret_560; \
+  int64x2_t __s0_560 = __p0_560; \
+  int32x4_t __s1_560 = __p1_560; \
+  int32x4_t __s2_560 = __p2_560; \
+  int64x2_t __rev0_560;  __rev0_560 = __builtin_shufflevector(__s0_560, __s0_560, 1, 0); \
+  int32x4_t __rev1_560;  __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 3, 2, 1, 0); \
+  int32x4_t __rev2_560;  __rev2_560 = __builtin_shufflevector(__s2_560, __s2_560, 3, 2, 1, 0); \
+  __ret_560 = __rev0_560 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_560), __noswap_splat_laneq_s32(__rev2_560, __p3_560)); \
+  __ret_560 = __builtin_shufflevector(__ret_560, __ret_560, 1, 0); \
+  __ret_560; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_s16(__p0_561, __p1_561, __p2_561, __p3_561) __extension__ ({ \
+  int32x4_t __ret_561; \
+  int32x4_t __s0_561 = __p0_561; \
+  int16x8_t __s1_561 = __p1_561; \
+  int16x8_t __s2_561 = __p2_561; \
+  __ret_561 = __s0_561 + vmull_s16(vget_high_s16(__s1_561), splat_laneq_s16(__s2_561, __p3_561)); \
+  __ret_561; \
+})
+#else
+#define vmlal_high_laneq_s16(__p0_562, __p1_562, __p2_562, __p3_562) __extension__ ({ \
+  int32x4_t __ret_562; \
+  int32x4_t __s0_562 = __p0_562; \
+  int16x8_t __s1_562 = __p1_562; \
+  int16x8_t __s2_562 = __p2_562; \
+  int32x4_t __rev0_562;  __rev0_562 = __builtin_shufflevector(__s0_562, __s0_562, 3, 2, 1, 0); \
+  int16x8_t __rev1_562;  __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_562;  __rev2_562 = __builtin_shufflevector(__s2_562, __s2_562, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_562 = __rev0_562 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_562), __noswap_splat_laneq_s16(__rev2_562, __p3_562)); \
+  __ret_562 = __builtin_shufflevector(__ret_562, __ret_562, 3, 2, 1, 0); \
+  __ret_562; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_u32(__p0_563, __p1_563, __p2_563, __p3_563) __extension__ ({ \
+  uint64x2_t __ret_563; \
+  uint64x2_t __s0_563 = __p0_563; \
+  uint32x2_t __s1_563 = __p1_563; \
+  uint32x4_t __s2_563 = __p2_563; \
+  __ret_563 = __s0_563 + vmull_u32(__s1_563, splat_laneq_u32(__s2_563, __p3_563)); \
+  __ret_563; \
+})
+#else
+#define vmlal_laneq_u32(__p0_564, __p1_564, __p2_564, __p3_564) __extension__ ({ \
+  uint64x2_t __ret_564; \
+  uint64x2_t __s0_564 = __p0_564; \
+  uint32x2_t __s1_564 = __p1_564; \
+  uint32x4_t __s2_564 = __p2_564; \
+  uint64x2_t __rev0_564;  __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 1, 0); \
+  uint32x2_t __rev1_564;  __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 1, 0); \
+  uint32x4_t __rev2_564;  __rev2_564 = __builtin_shufflevector(__s2_564, __s2_564, 3, 2, 1, 0); \
+  __ret_564 = __rev0_564 + __noswap_vmull_u32(__rev1_564, __noswap_splat_laneq_u32(__rev2_564, __p3_564)); \
+  __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 1, 0); \
+  __ret_564; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_u16(__p0_565, __p1_565, __p2_565, __p3_565) __extension__ ({ \
+  uint32x4_t __ret_565; \
+  uint32x4_t __s0_565 = __p0_565; \
+  uint16x4_t __s1_565 = __p1_565; \
+  uint16x8_t __s2_565 = __p2_565; \
+  __ret_565 = __s0_565 + vmull_u16(__s1_565, splat_laneq_u16(__s2_565, __p3_565)); \
+  __ret_565; \
+})
+#else
+#define vmlal_laneq_u16(__p0_566, __p1_566, __p2_566, __p3_566) __extension__ ({ \
+  uint32x4_t __ret_566; \
+  uint32x4_t __s0_566 = __p0_566; \
+  uint16x4_t __s1_566 = __p1_566; \
+  uint16x8_t __s2_566 = __p2_566; \
+  uint32x4_t __rev0_566;  __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 3, 2, 1, 0); \
+  uint16x4_t __rev1_566;  __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 3, 2, 1, 0); \
+  uint16x8_t __rev2_566;  __rev2_566 = __builtin_shufflevector(__s2_566, __s2_566, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_566 = __rev0_566 + __noswap_vmull_u16(__rev1_566, __noswap_splat_laneq_u16(__rev2_566, __p3_566)); \
+  __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 3, 2, 1, 0); \
+  __ret_566; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_s32(__p0_567, __p1_567, __p2_567, __p3_567) __extension__ ({ \
+  int64x2_t __ret_567; \
+  int64x2_t __s0_567 = __p0_567; \
+  int32x2_t __s1_567 = __p1_567; \
+  int32x4_t __s2_567 = __p2_567; \
+  __ret_567 = __s0_567 + vmull_s32(__s1_567, splat_laneq_s32(__s2_567, __p3_567)); \
+  __ret_567; \
+})
+#else
+#define vmlal_laneq_s32(__p0_568, __p1_568, __p2_568, __p3_568) __extension__ ({ \
+  int64x2_t __ret_568; \
+  int64x2_t __s0_568 = __p0_568; \
+  int32x2_t __s1_568 = __p1_568; \
+  int32x4_t __s2_568 = __p2_568; \
+  int64x2_t __rev0_568;  __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 1, 0); \
+  int32x2_t __rev1_568;  __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 1, 0); \
+  int32x4_t __rev2_568;  __rev2_568 = __builtin_shufflevector(__s2_568, __s2_568, 3, 2, 1, 0); \
+  __ret_568 = __rev0_568 + __noswap_vmull_s32(__rev1_568, __noswap_splat_laneq_s32(__rev2_568, __p3_568)); \
+  __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 1, 0); \
+  __ret_568; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_s16(__p0_569, __p1_569, __p2_569, __p3_569) __extension__ ({ \
+  int32x4_t __ret_569; \
+  int32x4_t __s0_569 = __p0_569; \
+  int16x4_t __s1_569 = __p1_569; \
+  int16x8_t __s2_569 = __p2_569; \
+  __ret_569 = __s0_569 + vmull_s16(__s1_569, splat_laneq_s16(__s2_569, __p3_569)); \
+  __ret_569; \
+})
+#else
+#define vmlal_laneq_s16(__p0_570, __p1_570, __p2_570, __p3_570) __extension__ ({ \
+  int32x4_t __ret_570; \
+  int32x4_t __s0_570 = __p0_570; \
+  int16x4_t __s1_570 = __p1_570; \
+  int16x8_t __s2_570 = __p2_570; \
+  int32x4_t __rev0_570;  __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 3, 2, 1, 0); \
+  int16x4_t __rev1_570;  __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 3, 2, 1, 0); \
+  int16x8_t __rev2_570;  __rev2_570 = __builtin_shufflevector(__s2_570, __s2_570, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_570 = __rev0_570 + __noswap_vmull_s16(__rev1_570, __noswap_splat_laneq_s16(__rev2_570, __p3_570)); \
+  __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 3, 2, 1, 0); \
+  __ret_570; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = __p0 - __p1 * __p2;
+  return __ret;
+}
+#else
+__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __rev0 - __rev1 * __rev2;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = __p0 - __p1 * __p2;
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_laneq_u32(__p0_571, __p1_571, __p2_571, __p3_571) __extension__ ({ \
+  uint32x4_t __ret_571; \
+  uint32x4_t __s0_571 = __p0_571; \
+  uint32x4_t __s1_571 = __p1_571; \
+  uint32x4_t __s2_571 = __p2_571; \
+  __ret_571 = __s0_571 - __s1_571 * splatq_laneq_u32(__s2_571, __p3_571); \
+  __ret_571; \
+})
+#else
+#define vmlsq_laneq_u32(__p0_572, __p1_572, __p2_572, __p3_572) __extension__ ({ \
+  uint32x4_t __ret_572; \
+  uint32x4_t __s0_572 = __p0_572; \
+  uint32x4_t __s1_572 = __p1_572; \
+  uint32x4_t __s2_572 = __p2_572; \
+  uint32x4_t __rev0_572;  __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 3, 2, 1, 0); \
+  uint32x4_t __rev1_572;  __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 3, 2, 1, 0); \
+  uint32x4_t __rev2_572;  __rev2_572 = __builtin_shufflevector(__s2_572, __s2_572, 3, 2, 1, 0); \
+  __ret_572 = __rev0_572 - __rev1_572 * __noswap_splatq_laneq_u32(__rev2_572, __p3_572); \
+  __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 3, 2, 1, 0); \
+  __ret_572; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_laneq_u16(__p0_573, __p1_573, __p2_573, __p3_573) __extension__ ({ \
+  uint16x8_t __ret_573; \
+  uint16x8_t __s0_573 = __p0_573; \
+  uint16x8_t __s1_573 = __p1_573; \
+  uint16x8_t __s2_573 = __p2_573; \
+  __ret_573 = __s0_573 - __s1_573 * splatq_laneq_u16(__s2_573, __p3_573); \
+  __ret_573; \
+})
+#else
+#define vmlsq_laneq_u16(__p0_574, __p1_574, __p2_574, __p3_574) __extension__ ({ \
+  uint16x8_t __ret_574; \
+  uint16x8_t __s0_574 = __p0_574; \
+  uint16x8_t __s1_574 = __p1_574; \
+  uint16x8_t __s2_574 = __p2_574; \
+  uint16x8_t __rev0_574;  __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_574;  __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_574;  __rev2_574 = __builtin_shufflevector(__s2_574, __s2_574, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_574 = __rev0_574 - __rev1_574 * __noswap_splatq_laneq_u16(__rev2_574, __p3_574); \
+  __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_574; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_laneq_f32(__p0_575, __p1_575, __p2_575, __p3_575) __extension__ ({ \
+  float32x4_t __ret_575; \
+  float32x4_t __s0_575 = __p0_575; \
+  float32x4_t __s1_575 = __p1_575; \
+  float32x4_t __s2_575 = __p2_575; \
+  __ret_575 = __s0_575 - __s1_575 * splatq_laneq_f32(__s2_575, __p3_575); \
+  __ret_575; \
+})
+#else
+#define vmlsq_laneq_f32(__p0_576, __p1_576, __p2_576, __p3_576) __extension__ ({ \
+  float32x4_t __ret_576; \
+  float32x4_t __s0_576 = __p0_576; \
+  float32x4_t __s1_576 = __p1_576; \
+  float32x4_t __s2_576 = __p2_576; \
+  float32x4_t __rev0_576;  __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 3, 2, 1, 0); \
+  float32x4_t __rev1_576;  __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 3, 2, 1, 0); \
+  float32x4_t __rev2_576;  __rev2_576 = __builtin_shufflevector(__s2_576, __s2_576, 3, 2, 1, 0); \
+  __ret_576 = __rev0_576 - __rev1_576 * __noswap_splatq_laneq_f32(__rev2_576, __p3_576); \
+  __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 3, 2, 1, 0); \
+  __ret_576; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_laneq_s32(__p0_577, __p1_577, __p2_577, __p3_577) __extension__ ({ \
+  int32x4_t __ret_577; \
+  int32x4_t __s0_577 = __p0_577; \
+  int32x4_t __s1_577 = __p1_577; \
+  int32x4_t __s2_577 = __p2_577; \
+  __ret_577 = __s0_577 - __s1_577 * splatq_laneq_s32(__s2_577, __p3_577); \
+  __ret_577; \
+})
+#else
+#define vmlsq_laneq_s32(__p0_578, __p1_578, __p2_578, __p3_578) __extension__ ({ \
+  int32x4_t __ret_578; \
+  int32x4_t __s0_578 = __p0_578; \
+  int32x4_t __s1_578 = __p1_578; \
+  int32x4_t __s2_578 = __p2_578; \
+  int32x4_t __rev0_578;  __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 3, 2, 1, 0); \
+  int32x4_t __rev1_578;  __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 3, 2, 1, 0); \
+  int32x4_t __rev2_578;  __rev2_578 = __builtin_shufflevector(__s2_578, __s2_578, 3, 2, 1, 0); \
+  __ret_578 = __rev0_578 - __rev1_578 * __noswap_splatq_laneq_s32(__rev2_578, __p3_578); \
+  __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 3, 2, 1, 0); \
+  __ret_578; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_laneq_s16(__p0_579, __p1_579, __p2_579, __p3_579) __extension__ ({ \
+  int16x8_t __ret_579; \
+  int16x8_t __s0_579 = __p0_579; \
+  int16x8_t __s1_579 = __p1_579; \
+  int16x8_t __s2_579 = __p2_579; \
+  __ret_579 = __s0_579 - __s1_579 * splatq_laneq_s16(__s2_579, __p3_579); \
+  __ret_579; \
+})
+#else
+#define vmlsq_laneq_s16(__p0_580, __p1_580, __p2_580, __p3_580) __extension__ ({ \
+  int16x8_t __ret_580; \
+  int16x8_t __s0_580 = __p0_580; \
+  int16x8_t __s1_580 = __p1_580; \
+  int16x8_t __s2_580 = __p2_580; \
+  int16x8_t __rev0_580;  __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_580;  __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_580;  __rev2_580 = __builtin_shufflevector(__s2_580, __s2_580, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_580 = __rev0_580 - __rev1_580 * __noswap_splatq_laneq_s16(__rev2_580, __p3_580); \
+  __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_580; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_laneq_u32(__p0_581, __p1_581, __p2_581, __p3_581) __extension__ ({ \
+  uint32x2_t __ret_581; \
+  uint32x2_t __s0_581 = __p0_581; \
+  uint32x2_t __s1_581 = __p1_581; \
+  uint32x4_t __s2_581 = __p2_581; \
+  __ret_581 = __s0_581 - __s1_581 * splat_laneq_u32(__s2_581, __p3_581); \
+  __ret_581; \
+})
+#else
+#define vmls_laneq_u32(__p0_582, __p1_582, __p2_582, __p3_582) __extension__ ({ \
+  uint32x2_t __ret_582; \
+  uint32x2_t __s0_582 = __p0_582; \
+  uint32x2_t __s1_582 = __p1_582; \
+  uint32x4_t __s2_582 = __p2_582; \
+  uint32x2_t __rev0_582;  __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 1, 0); \
+  uint32x2_t __rev1_582;  __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 1, 0); \
+  uint32x4_t __rev2_582;  __rev2_582 = __builtin_shufflevector(__s2_582, __s2_582, 3, 2, 1, 0); \
+  __ret_582 = __rev0_582 - __rev1_582 * __noswap_splat_laneq_u32(__rev2_582, __p3_582); \
+  __ret_582 = __builtin_shufflevector(__ret_582, __ret_582, 1, 0); \
+  __ret_582; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_laneq_u16(__p0_583, __p1_583, __p2_583, __p3_583) __extension__ ({ \
+  uint16x4_t __ret_583; \
+  uint16x4_t __s0_583 = __p0_583; \
+  uint16x4_t __s1_583 = __p1_583; \
+  uint16x8_t __s2_583 = __p2_583; \
+  __ret_583 = __s0_583 - __s1_583 * splat_laneq_u16(__s2_583, __p3_583); \
+  __ret_583; \
+})
+#else
+#define vmls_laneq_u16(__p0_584, __p1_584, __p2_584, __p3_584) __extension__ ({ \
+  uint16x4_t __ret_584; \
+  uint16x4_t __s0_584 = __p0_584; \
+  uint16x4_t __s1_584 = __p1_584; \
+  uint16x8_t __s2_584 = __p2_584; \
+  uint16x4_t __rev0_584;  __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 3, 2, 1, 0); \
+  uint16x4_t __rev1_584;  __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 3, 2, 1, 0); \
+  uint16x8_t __rev2_584;  __rev2_584 = __builtin_shufflevector(__s2_584, __s2_584, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_584 = __rev0_584 - __rev1_584 * __noswap_splat_laneq_u16(__rev2_584, __p3_584); \
+  __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 3, 2, 1, 0); \
+  __ret_584; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_laneq_f32(__p0_585, __p1_585, __p2_585, __p3_585) __extension__ ({ \
+  float32x2_t __ret_585; \
+  float32x2_t __s0_585 = __p0_585; \
+  float32x2_t __s1_585 = __p1_585; \
+  float32x4_t __s2_585 = __p2_585; \
+  __ret_585 = __s0_585 - __s1_585 * splat_laneq_f32(__s2_585, __p3_585); \
+  __ret_585; \
+})
+#else
+#define vmls_laneq_f32(__p0_586, __p1_586, __p2_586, __p3_586) __extension__ ({ \
+  float32x2_t __ret_586; \
+  float32x2_t __s0_586 = __p0_586; \
+  float32x2_t __s1_586 = __p1_586; \
+  float32x4_t __s2_586 = __p2_586; \
+  float32x2_t __rev0_586;  __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 1, 0); \
+  float32x2_t __rev1_586;  __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 1, 0); \
+  float32x4_t __rev2_586;  __rev2_586 = __builtin_shufflevector(__s2_586, __s2_586, 3, 2, 1, 0); \
+  __ret_586 = __rev0_586 - __rev1_586 * __noswap_splat_laneq_f32(__rev2_586, __p3_586); \
+  __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 1, 0); \
+  __ret_586; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_laneq_s32(__p0_587, __p1_587, __p2_587, __p3_587) __extension__ ({ \
+  int32x2_t __ret_587; \
+  int32x2_t __s0_587 = __p0_587; \
+  int32x2_t __s1_587 = __p1_587; \
+  int32x4_t __s2_587 = __p2_587; \
+  __ret_587 = __s0_587 - __s1_587 * splat_laneq_s32(__s2_587, __p3_587); \
+  __ret_587; \
+})
+#else
+#define vmls_laneq_s32(__p0_588, __p1_588, __p2_588, __p3_588) __extension__ ({ \
+  int32x2_t __ret_588; \
+  int32x2_t __s0_588 = __p0_588; \
+  int32x2_t __s1_588 = __p1_588; \
+  int32x4_t __s2_588 = __p2_588; \
+  int32x2_t __rev0_588;  __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 1, 0); \
+  int32x2_t __rev1_588;  __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 1, 0); \
+  int32x4_t __rev2_588;  __rev2_588 = __builtin_shufflevector(__s2_588, __s2_588, 3, 2, 1, 0); \
+  __ret_588 = __rev0_588 - __rev1_588 * __noswap_splat_laneq_s32(__rev2_588, __p3_588); \
+  __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 1, 0); \
+  __ret_588; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_laneq_s16(__p0_589, __p1_589, __p2_589, __p3_589) __extension__ ({ \
+  int16x4_t __ret_589; \
+  int16x4_t __s0_589 = __p0_589; \
+  int16x4_t __s1_589 = __p1_589; \
+  int16x8_t __s2_589 = __p2_589; \
+  __ret_589 = __s0_589 - __s1_589 * splat_laneq_s16(__s2_589, __p3_589); \
+  __ret_589; \
+})
+#else
+#define vmls_laneq_s16(__p0_590, __p1_590, __p2_590, __p3_590) __extension__ ({ \
+  int16x4_t __ret_590; \
+  int16x4_t __s0_590 = __p0_590; \
+  int16x4_t __s1_590 = __p1_590; \
+  int16x8_t __s2_590 = __p2_590; \
+  int16x4_t __rev0_590;  __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 3, 2, 1, 0); \
+  int16x4_t __rev1_590;  __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 3, 2, 1, 0); \
+  int16x8_t __rev2_590;  __rev2_590 = __builtin_shufflevector(__s2_590, __s2_590, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_590 = __rev0_590 - __rev1_590 * __noswap_splat_laneq_s16(__rev2_590, __p3_590); \
+  __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 3, 2, 1, 0); \
+  __ret_590; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_lane_u32(__p0_591, __p1_591, __p2_591, __p3_591) __extension__ ({ \
+  uint64x2_t __ret_591; \
+  uint64x2_t __s0_591 = __p0_591; \
+  uint32x4_t __s1_591 = __p1_591; \
+  uint32x2_t __s2_591 = __p2_591; \
+  __ret_591 = __s0_591 - vmull_u32(vget_high_u32(__s1_591), splat_lane_u32(__s2_591, __p3_591)); \
+  __ret_591; \
+})
+#else
+#define vmlsl_high_lane_u32(__p0_592, __p1_592, __p2_592, __p3_592) __extension__ ({ \
+  uint64x2_t __ret_592; \
+  uint64x2_t __s0_592 = __p0_592; \
+  uint32x4_t __s1_592 = __p1_592; \
+  uint32x2_t __s2_592 = __p2_592; \
+  uint64x2_t __rev0_592;  __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \
+  uint32x4_t __rev1_592;  __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 3, 2, 1, 0); \
+  uint32x2_t __rev2_592;  __rev2_592 = __builtin_shufflevector(__s2_592, __s2_592, 1, 0); \
+  __ret_592 = __rev0_592 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_592), __noswap_splat_lane_u32(__rev2_592, __p3_592)); \
+  __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \
+  __ret_592; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_lane_u16(__p0_593, __p1_593, __p2_593, __p3_593) __extension__ ({ \
+  uint32x4_t __ret_593; \
+  uint32x4_t __s0_593 = __p0_593; \
+  uint16x8_t __s1_593 = __p1_593; \
+  uint16x4_t __s2_593 = __p2_593; \
+  __ret_593 = __s0_593 - vmull_u16(vget_high_u16(__s1_593), splat_lane_u16(__s2_593, __p3_593)); \
+  __ret_593; \
+})
+#else
+#define vmlsl_high_lane_u16(__p0_594, __p1_594, __p2_594, __p3_594) __extension__ ({ \
+  uint32x4_t __ret_594; \
+  uint32x4_t __s0_594 = __p0_594; \
+  uint16x8_t __s1_594 = __p1_594; \
+  uint16x4_t __s2_594 = __p2_594; \
+  uint32x4_t __rev0_594;  __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \
+  uint16x8_t __rev1_594;  __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_594;  __rev2_594 = __builtin_shufflevector(__s2_594, __s2_594, 3, 2, 1, 0); \
+  __ret_594 = __rev0_594 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_594), __noswap_splat_lane_u16(__rev2_594, __p3_594)); \
+  __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 3, 2, 1, 0); \
+  __ret_594; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_lane_s32(__p0_595, __p1_595, __p2_595, __p3_595) __extension__ ({ \
+  int64x2_t __ret_595; \
+  int64x2_t __s0_595 = __p0_595; \
+  int32x4_t __s1_595 = __p1_595; \
+  int32x2_t __s2_595 = __p2_595; \
+  __ret_595 = __s0_595 - vmull_s32(vget_high_s32(__s1_595), splat_lane_s32(__s2_595, __p3_595)); \
+  __ret_595; \
+})
+#else
+#define vmlsl_high_lane_s32(__p0_596, __p1_596, __p2_596, __p3_596) __extension__ ({ \
+  int64x2_t __ret_596; \
+  int64x2_t __s0_596 = __p0_596; \
+  int32x4_t __s1_596 = __p1_596; \
+  int32x2_t __s2_596 = __p2_596; \
+  int64x2_t __rev0_596;  __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 1, 0); \
+  int32x4_t __rev1_596;  __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 3, 2, 1, 0); \
+  int32x2_t __rev2_596;  __rev2_596 = __builtin_shufflevector(__s2_596, __s2_596, 1, 0); \
+  __ret_596 = __rev0_596 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_596), __noswap_splat_lane_s32(__rev2_596, __p3_596)); \
+  __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 1, 0); \
+  __ret_596; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_lane_s16(__p0_597, __p1_597, __p2_597, __p3_597) __extension__ ({ \
+  int32x4_t __ret_597; \
+  int32x4_t __s0_597 = __p0_597; \
+  int16x8_t __s1_597 = __p1_597; \
+  int16x4_t __s2_597 = __p2_597; \
+  __ret_597 = __s0_597 - vmull_s16(vget_high_s16(__s1_597), splat_lane_s16(__s2_597, __p3_597)); \
+  __ret_597; \
+})
+#else
+#define vmlsl_high_lane_s16(__p0_598, __p1_598, __p2_598, __p3_598) __extension__ ({ \
+  int32x4_t __ret_598; \
+  int32x4_t __s0_598 = __p0_598; \
+  int16x8_t __s1_598 = __p1_598; \
+  int16x4_t __s2_598 = __p2_598; \
+  int32x4_t __rev0_598;  __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 3, 2, 1, 0); \
+  int16x8_t __rev1_598;  __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_598;  __rev2_598 = __builtin_shufflevector(__s2_598, __s2_598, 3, 2, 1, 0); \
+  __ret_598 = __rev0_598 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_598), __noswap_splat_lane_s16(__rev2_598, __p3_598)); \
+  __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 3, 2, 1, 0); \
+  __ret_598; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_u32(__p0_599, __p1_599, __p2_599, __p3_599) __extension__ ({ \
+  uint64x2_t __ret_599; \
+  uint64x2_t __s0_599 = __p0_599; \
+  uint32x4_t __s1_599 = __p1_599; \
+  uint32x4_t __s2_599 = __p2_599; \
+  __ret_599 = __s0_599 - vmull_u32(vget_high_u32(__s1_599), splat_laneq_u32(__s2_599, __p3_599)); \
+  __ret_599; \
+})
+#else
+#define vmlsl_high_laneq_u32(__p0_600, __p1_600, __p2_600, __p3_600) __extension__ ({ \
+  uint64x2_t __ret_600; \
+  uint64x2_t __s0_600 = __p0_600; \
+  uint32x4_t __s1_600 = __p1_600; \
+  uint32x4_t __s2_600 = __p2_600; \
+  uint64x2_t __rev0_600;  __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 1, 0); \
+  uint32x4_t __rev1_600;  __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 3, 2, 1, 0); \
+  uint32x4_t __rev2_600;  __rev2_600 = __builtin_shufflevector(__s2_600, __s2_600, 3, 2, 1, 0); \
+  __ret_600 = __rev0_600 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_600), __noswap_splat_laneq_u32(__rev2_600, __p3_600)); \
+  __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 1, 0); \
+  __ret_600; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_u16(__p0_601, __p1_601, __p2_601, __p3_601) __extension__ ({ \
+  uint32x4_t __ret_601; \
+  uint32x4_t __s0_601 = __p0_601; \
+  uint16x8_t __s1_601 = __p1_601; \
+  uint16x8_t __s2_601 = __p2_601; \
+  __ret_601 = __s0_601 - vmull_u16(vget_high_u16(__s1_601), splat_laneq_u16(__s2_601, __p3_601)); \
+  __ret_601; \
+})
+#else
+#define vmlsl_high_laneq_u16(__p0_602, __p1_602, __p2_602, __p3_602) __extension__ ({ \
+  uint32x4_t __ret_602; \
+  uint32x4_t __s0_602 = __p0_602; \
+  uint16x8_t __s1_602 = __p1_602; \
+  uint16x8_t __s2_602 = __p2_602; \
+  uint32x4_t __rev0_602;  __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 3, 2, 1, 0); \
+  uint16x8_t __rev1_602;  __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_602;  __rev2_602 = __builtin_shufflevector(__s2_602, __s2_602, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_602 = __rev0_602 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_602), __noswap_splat_laneq_u16(__rev2_602, __p3_602)); \
+  __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 3, 2, 1, 0); \
+  __ret_602; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_s32(__p0_603, __p1_603, __p2_603, __p3_603) __extension__ ({ \
+  int64x2_t __ret_603; \
+  int64x2_t __s0_603 = __p0_603; \
+  int32x4_t __s1_603 = __p1_603; \
+  int32x4_t __s2_603 = __p2_603; \
+  __ret_603 = __s0_603 - vmull_s32(vget_high_s32(__s1_603), splat_laneq_s32(__s2_603, __p3_603)); \
+  __ret_603; \
+})
+#else
+#define vmlsl_high_laneq_s32(__p0_604, __p1_604, __p2_604, __p3_604) __extension__ ({ \
+  int64x2_t __ret_604; \
+  int64x2_t __s0_604 = __p0_604; \
+  int32x4_t __s1_604 = __p1_604; \
+  int32x4_t __s2_604 = __p2_604; \
+  int64x2_t __rev0_604;  __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 1, 0); \
+  int32x4_t __rev1_604;  __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 3, 2, 1, 0); \
+  int32x4_t __rev2_604;  __rev2_604 = __builtin_shufflevector(__s2_604, __s2_604, 3, 2, 1, 0); \
+  __ret_604 = __rev0_604 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_604), __noswap_splat_laneq_s32(__rev2_604, __p3_604)); \
+  __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 1, 0); \
+  __ret_604; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_s16(__p0_605, __p1_605, __p2_605, __p3_605) __extension__ ({ \
+  int32x4_t __ret_605; \
+  int32x4_t __s0_605 = __p0_605; \
+  int16x8_t __s1_605 = __p1_605; \
+  int16x8_t __s2_605 = __p2_605; \
+  __ret_605 = __s0_605 - vmull_s16(vget_high_s16(__s1_605), splat_laneq_s16(__s2_605, __p3_605)); \
+  __ret_605; \
+})
+#else
+#define vmlsl_high_laneq_s16(__p0_606, __p1_606, __p2_606, __p3_606) __extension__ ({ \
+  int32x4_t __ret_606; \
+  int32x4_t __s0_606 = __p0_606; \
+  int16x8_t __s1_606 = __p1_606; \
+  int16x8_t __s2_606 = __p2_606; \
+  int32x4_t __rev0_606;  __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 3, 2, 1, 0); \
+  int16x8_t __rev1_606;  __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_606;  __rev2_606 = __builtin_shufflevector(__s2_606, __s2_606, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_606 = __rev0_606 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_606), __noswap_splat_laneq_s16(__rev2_606, __p3_606)); \
+  __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 3, 2, 1, 0); \
+  __ret_606; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_u32(__p0_607, __p1_607, __p2_607, __p3_607) __extension__ ({ \
+  uint64x2_t __ret_607; \
+  uint64x2_t __s0_607 = __p0_607; \
+  uint32x2_t __s1_607 = __p1_607; \
+  uint32x4_t __s2_607 = __p2_607; \
+  __ret_607 = __s0_607 - vmull_u32(__s1_607, splat_laneq_u32(__s2_607, __p3_607)); \
+  __ret_607; \
+})
+#else
+#define vmlsl_laneq_u32(__p0_608, __p1_608, __p2_608, __p3_608) __extension__ ({ \
+  uint64x2_t __ret_608; \
+  uint64x2_t __s0_608 = __p0_608; \
+  uint32x2_t __s1_608 = __p1_608; \
+  uint32x4_t __s2_608 = __p2_608; \
+  uint64x2_t __rev0_608;  __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 1, 0); \
+  uint32x2_t __rev1_608;  __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 1, 0); \
+  uint32x4_t __rev2_608;  __rev2_608 = __builtin_shufflevector(__s2_608, __s2_608, 3, 2, 1, 0); \
+  __ret_608 = __rev0_608 - __noswap_vmull_u32(__rev1_608, __noswap_splat_laneq_u32(__rev2_608, __p3_608)); \
+  __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 1, 0); \
+  __ret_608; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_u16(__p0_609, __p1_609, __p2_609, __p3_609) __extension__ ({ \
+  uint32x4_t __ret_609; \
+  uint32x4_t __s0_609 = __p0_609; \
+  uint16x4_t __s1_609 = __p1_609; \
+  uint16x8_t __s2_609 = __p2_609; \
+  __ret_609 = __s0_609 - vmull_u16(__s1_609, splat_laneq_u16(__s2_609, __p3_609)); \
+  __ret_609; \
+})
+#else
+#define vmlsl_laneq_u16(__p0_610, __p1_610, __p2_610, __p3_610) __extension__ ({ \
+  uint32x4_t __ret_610; \
+  uint32x4_t __s0_610 = __p0_610; \
+  uint16x4_t __s1_610 = __p1_610; \
+  uint16x8_t __s2_610 = __p2_610; \
+  uint32x4_t __rev0_610;  __rev0_610 = __builtin_shufflevector(__s0_610, __s0_610, 3, 2, 1, 0); \
+  uint16x4_t __rev1_610;  __rev1_610 = __builtin_shufflevector(__s1_610, __s1_610, 3, 2, 1, 0); \
+  uint16x8_t __rev2_610;  __rev2_610 = __builtin_shufflevector(__s2_610, __s2_610, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_610 = __rev0_610 - __noswap_vmull_u16(__rev1_610, __noswap_splat_laneq_u16(__rev2_610, __p3_610)); \
+  __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0); \
+  __ret_610; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_s32(__p0_611, __p1_611, __p2_611, __p3_611) __extension__ ({ \
+  int64x2_t __ret_611; \
+  int64x2_t __s0_611 = __p0_611; \
+  int32x2_t __s1_611 = __p1_611; \
+  int32x4_t __s2_611 = __p2_611; \
+  __ret_611 = __s0_611 - vmull_s32(__s1_611, splat_laneq_s32(__s2_611, __p3_611)); \
+  __ret_611; \
+})
+#else
+#define vmlsl_laneq_s32(__p0_612, __p1_612, __p2_612, __p3_612) __extension__ ({ \
+  int64x2_t __ret_612; \
+  int64x2_t __s0_612 = __p0_612; \
+  int32x2_t __s1_612 = __p1_612; \
+  int32x4_t __s2_612 = __p2_612; \
+  int64x2_t __rev0_612;  __rev0_612 = __builtin_shufflevector(__s0_612, __s0_612, 1, 0); \
+  int32x2_t __rev1_612;  __rev1_612 = __builtin_shufflevector(__s1_612, __s1_612, 1, 0); \
+  int32x4_t __rev2_612;  __rev2_612 = __builtin_shufflevector(__s2_612, __s2_612, 3, 2, 1, 0); \
+  __ret_612 = __rev0_612 - __noswap_vmull_s32(__rev1_612, __noswap_splat_laneq_s32(__rev2_612, __p3_612)); \
+  __ret_612 = __builtin_shufflevector(__ret_612, __ret_612, 1, 0); \
+  __ret_612; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_s16(__p0_613, __p1_613, __p2_613, __p3_613) __extension__ ({ \
+  int32x4_t __ret_613; \
+  int32x4_t __s0_613 = __p0_613; \
+  int16x4_t __s1_613 = __p1_613; \
+  int16x8_t __s2_613 = __p2_613; \
+  __ret_613 = __s0_613 - vmull_s16(__s1_613, splat_laneq_s16(__s2_613, __p3_613)); \
+  __ret_613; \
+})
+#else
+#define vmlsl_laneq_s16(__p0_614, __p1_614, __p2_614, __p3_614) __extension__ ({ \
+  int32x4_t __ret_614; \
+  int32x4_t __s0_614 = __p0_614; \
+  int16x4_t __s1_614 = __p1_614; \
+  int16x8_t __s2_614 = __p2_614; \
+  int32x4_t __rev0_614;  __rev0_614 = __builtin_shufflevector(__s0_614, __s0_614, 3, 2, 1, 0); \
+  int16x4_t __rev1_614;  __rev1_614 = __builtin_shufflevector(__s1_614, __s1_614, 3, 2, 1, 0); \
+  int16x8_t __rev2_614;  __rev2_614 = __builtin_shufflevector(__s2_614, __s2_614, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_614 = __rev0_614 - __noswap_vmull_s16(__rev1_614, __noswap_splat_laneq_s16(__rev2_614, __p3_614)); \
+  __ret_614 = __builtin_shufflevector(__ret_614, __ret_614, 3, 2, 1, 0); \
+  __ret_614; \
+})
+#endif
+
+__ai poly64x1_t vmov_n_p64(poly64_t __p0) {
+  poly64x1_t __ret;
+  __ret = (poly64x1_t) {__p0};
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t) {__p0, __p0};
+  return __ret;
+}
+#else
+__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t) {__p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmovq_n_f64(float64_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) {__p0, __p0};
+  return __ret;
+}
+#else
+__ai float64x2_t vmovq_n_f64(float64_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) {__p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmov_n_f64(float64_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) {__p0};
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_615) {
+  uint16x8_t __ret_615;
+  uint8x8_t __a1_615 = vget_high_u8(__p0_615);
+  __ret_615 = (uint16x8_t)(vshll_n_u8(__a1_615, 0));
+  return __ret_615;
+}
+#else
+__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_616) {
+  uint16x8_t __ret_616;
+  uint8x16_t __rev0_616;  __rev0_616 = __builtin_shufflevector(__p0_616, __p0_616, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __a1_616 = __noswap_vget_high_u8(__rev0_616);
+  __ret_616 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_616, 0));
+  __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret_616;
+}
+__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_617) {
+  uint16x8_t __ret_617;
+  uint8x8_t __a1_617 = __noswap_vget_high_u8(__p0_617);
+  __ret_617 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_617, 0));
+  return __ret_617;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_618) {
+  uint64x2_t __ret_618;
+  uint32x2_t __a1_618 = vget_high_u32(__p0_618);
+  __ret_618 = (uint64x2_t)(vshll_n_u32(__a1_618, 0));
+  return __ret_618;
+}
+#else
+__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_619) {
+  uint64x2_t __ret_619;
+  uint32x4_t __rev0_619;  __rev0_619 = __builtin_shufflevector(__p0_619, __p0_619, 3, 2, 1, 0);
+  uint32x2_t __a1_619 = __noswap_vget_high_u32(__rev0_619);
+  __ret_619 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_619, 0));
+  __ret_619 = __builtin_shufflevector(__ret_619, __ret_619, 1, 0);
+  return __ret_619;
+}
+__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_620) {
+  uint64x2_t __ret_620;
+  uint32x2_t __a1_620 = __noswap_vget_high_u32(__p0_620);
+  __ret_620 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_620, 0));
+  return __ret_620;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_621) {
+  uint32x4_t __ret_621;
+  uint16x4_t __a1_621 = vget_high_u16(__p0_621);
+  __ret_621 = (uint32x4_t)(vshll_n_u16(__a1_621, 0));
+  return __ret_621;
+}
+#else
+__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_622) {
+  uint32x4_t __ret_622;
+  uint16x8_t __rev0_622;  __rev0_622 = __builtin_shufflevector(__p0_622, __p0_622, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x4_t __a1_622 = __noswap_vget_high_u16(__rev0_622);
+  __ret_622 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_622, 0));
+  __ret_622 = __builtin_shufflevector(__ret_622, __ret_622, 3, 2, 1, 0);
+  return __ret_622;
+}
+__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_623) {
+  uint32x4_t __ret_623;
+  uint16x4_t __a1_623 = __noswap_vget_high_u16(__p0_623);
+  __ret_623 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_623, 0));
+  return __ret_623;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmovl_high_s8(int8x16_t __p0_624) {
+  int16x8_t __ret_624;
+  int8x8_t __a1_624 = vget_high_s8(__p0_624);
+  __ret_624 = (int16x8_t)(vshll_n_s8(__a1_624, 0));
+  return __ret_624;
+}
+#else
+__ai int16x8_t vmovl_high_s8(int8x16_t __p0_625) {
+  int16x8_t __ret_625;
+  int8x16_t __rev0_625;  __rev0_625 = __builtin_shufflevector(__p0_625, __p0_625, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x8_t __a1_625 = __noswap_vget_high_s8(__rev0_625);
+  __ret_625 = (int16x8_t)(__noswap_vshll_n_s8(__a1_625, 0));
+  __ret_625 = __builtin_shufflevector(__ret_625, __ret_625, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret_625;
+}
+__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_626) {
+  int16x8_t __ret_626;
+  int8x8_t __a1_626 = __noswap_vget_high_s8(__p0_626);
+  __ret_626 = (int16x8_t)(__noswap_vshll_n_s8(__a1_626, 0));
+  return __ret_626;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmovl_high_s32(int32x4_t __p0_627) {
+  int64x2_t __ret_627;
+  int32x2_t __a1_627 = vget_high_s32(__p0_627);
+  __ret_627 = (int64x2_t)(vshll_n_s32(__a1_627, 0));
+  return __ret_627;
+}
+#else
+__ai int64x2_t vmovl_high_s32(int32x4_t __p0_628) {
+  int64x2_t __ret_628;
+  int32x4_t __rev0_628;  __rev0_628 = __builtin_shufflevector(__p0_628, __p0_628, 3, 2, 1, 0);
+  int32x2_t __a1_628 = __noswap_vget_high_s32(__rev0_628);
+  __ret_628 = (int64x2_t)(__noswap_vshll_n_s32(__a1_628, 0));
+  __ret_628 = __builtin_shufflevector(__ret_628, __ret_628, 1, 0);
+  return __ret_628;
+}
+__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_629) {
+  int64x2_t __ret_629;
+  int32x2_t __a1_629 = __noswap_vget_high_s32(__p0_629);
+  __ret_629 = (int64x2_t)(__noswap_vshll_n_s32(__a1_629, 0));
+  return __ret_629;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmovl_high_s16(int16x8_t __p0_630) {
+  int32x4_t __ret_630;
+  int16x4_t __a1_630 = vget_high_s16(__p0_630);
+  __ret_630 = (int32x4_t)(vshll_n_s16(__a1_630, 0));
+  return __ret_630;
+}
+#else
+__ai int32x4_t vmovl_high_s16(int16x8_t __p0_631) {
+  int32x4_t __ret_631;
+  int16x8_t __rev0_631;  __rev0_631 = __builtin_shufflevector(__p0_631, __p0_631, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x4_t __a1_631 = __noswap_vget_high_s16(__rev0_631);
+  __ret_631 = (int32x4_t)(__noswap_vshll_n_s16(__a1_631, 0));
+  __ret_631 = __builtin_shufflevector(__ret_631, __ret_631, 3, 2, 1, 0);
+  return __ret_631;
+}
+__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_632) {
+  int32x4_t __ret_632;
+  int16x4_t __a1_632 = __noswap_vget_high_s16(__p0_632);
+  __ret_632 = (int32x4_t)(__noswap_vshll_n_s16(__a1_632, 0));
+  return __ret_632;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
+  uint16x8_t __ret;
+  __ret = vcombine_u16(__p0, vmovn_u32(__p1));
+  return __ret;
+}
+#else
+__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
+  uint16x8_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
+  uint32x4_t __ret;
+  __ret = vcombine_u32(__p0, vmovn_u64(__p1));
+  return __ret;
+}
+#else
+__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
+  uint32x4_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
+  uint8x16_t __ret;
+  __ret = vcombine_u8(__p0, vmovn_u16(__p1));
+  return __ret;
+}
+#else
+__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
+  uint8x16_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
+  int16x8_t __ret;
+  __ret = vcombine_s16(__p0, vmovn_s32(__p1));
+  return __ret;
+}
+#else
+__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
+  int16x8_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
+  int32x4_t __ret;
+  __ret = vcombine_s32(__p0, vmovn_s64(__p1));
+  return __ret;
+}
+#else
+__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
+  int32x4_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
+  int8x16_t __ret;
+  __ret = vcombine_s8(__p0, vmovn_s16(__p1));
+  return __ret;
+}
+#else
+__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
+  int8x16_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = __p0 * __p1;
+  return __ret;
+}
+#else
+__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __rev0 * __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = __p0 * __p1;
+  return __ret;
+}
+#define vmuld_lane_f64(__p0_633, __p1_633, __p2_633) __extension__ ({ \
+  float64_t __ret_633; \
+  float64_t __s0_633 = __p0_633; \
+  float64x1_t __s1_633 = __p1_633; \
+  __ret_633 = __s0_633 * vget_lane_f64(__s1_633, __p2_633); \
+  __ret_633; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vmuls_lane_f32(__p0_634, __p1_634, __p2_634) __extension__ ({ \
+  float32_t __ret_634; \
+  float32_t __s0_634 = __p0_634; \
+  float32x2_t __s1_634 = __p1_634; \
+  __ret_634 = __s0_634 * vget_lane_f32(__s1_634, __p2_634); \
+  __ret_634; \
+})
+#else
+#define vmuls_lane_f32(__p0_635, __p1_635, __p2_635) __extension__ ({ \
+  float32_t __ret_635; \
+  float32_t __s0_635 = __p0_635; \
+  float32x2_t __s1_635 = __p1_635; \
+  float32x2_t __rev1_635;  __rev1_635 = __builtin_shufflevector(__s1_635, __s1_635, 1, 0); \
+  __ret_635 = __s0_635 * __noswap_vget_lane_f32(__rev1_635, __p2_635); \
+  __ret_635; \
+})
+#endif
+
+#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x1_t __s1 = __p1; \
+  __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_lane_f64(__p0_636, __p1_636, __p2_636) __extension__ ({ \
+  float64x2_t __ret_636; \
+  float64x2_t __s0_636 = __p0_636; \
+  float64x1_t __s1_636 = __p1_636; \
+  __ret_636 = __s0_636 * splatq_lane_f64(__s1_636, __p2_636); \
+  __ret_636; \
+})
+#else
+#define vmulq_lane_f64(__p0_637, __p1_637, __p2_637) __extension__ ({ \
+  float64x2_t __ret_637; \
+  float64x2_t __s0_637 = __p0_637; \
+  float64x1_t __s1_637 = __p1_637; \
+  float64x2_t __rev0_637;  __rev0_637 = __builtin_shufflevector(__s0_637, __s0_637, 1, 0); \
+  __ret_637 = __rev0_637 * __noswap_splatq_lane_f64(__s1_637, __p2_637); \
+  __ret_637 = __builtin_shufflevector(__ret_637, __ret_637, 1, 0); \
+  __ret_637; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmuld_laneq_f64(__p0_638, __p1_638, __p2_638) __extension__ ({ \
+  float64_t __ret_638; \
+  float64_t __s0_638 = __p0_638; \
+  float64x2_t __s1_638 = __p1_638; \
+  __ret_638 = __s0_638 * vgetq_lane_f64(__s1_638, __p2_638); \
+  __ret_638; \
+})
+#else
+#define vmuld_laneq_f64(__p0_639, __p1_639, __p2_639) __extension__ ({ \
+  float64_t __ret_639; \
+  float64_t __s0_639 = __p0_639; \
+  float64x2_t __s1_639 = __p1_639; \
+  float64x2_t __rev1_639;  __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 1, 0); \
+  __ret_639 = __s0_639 * __noswap_vgetq_lane_f64(__rev1_639, __p2_639); \
+  __ret_639; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmuls_laneq_f32(__p0_640, __p1_640, __p2_640) __extension__ ({ \
+  float32_t __ret_640; \
+  float32_t __s0_640 = __p0_640; \
+  float32x4_t __s1_640 = __p1_640; \
+  __ret_640 = __s0_640 * vgetq_lane_f32(__s1_640, __p2_640); \
+  __ret_640; \
+})
+#else
+#define vmuls_laneq_f32(__p0_641, __p1_641, __p2_641) __extension__ ({ \
+  float32_t __ret_641; \
+  float32_t __s0_641 = __p0_641; \
+  float32x4_t __s1_641 = __p1_641; \
+  float32x4_t __rev1_641;  __rev1_641 = __builtin_shufflevector(__s1_641, __s1_641, 3, 2, 1, 0); \
+  __ret_641 = __s0_641 * __noswap_vgetq_lane_f32(__rev1_641, __p2_641); \
+  __ret_641; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \
+  __ret; \
+})
+#else
+#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_u32(__p0_642, __p1_642, __p2_642) __extension__ ({ \
+  uint32x4_t __ret_642; \
+  uint32x4_t __s0_642 = __p0_642; \
+  uint32x4_t __s1_642 = __p1_642; \
+  __ret_642 = __s0_642 * splatq_laneq_u32(__s1_642, __p2_642); \
+  __ret_642; \
+})
+#else
+#define vmulq_laneq_u32(__p0_643, __p1_643, __p2_643) __extension__ ({ \
+  uint32x4_t __ret_643; \
+  uint32x4_t __s0_643 = __p0_643; \
+  uint32x4_t __s1_643 = __p1_643; \
+  uint32x4_t __rev0_643;  __rev0_643 = __builtin_shufflevector(__s0_643, __s0_643, 3, 2, 1, 0); \
+  uint32x4_t __rev1_643;  __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 3, 2, 1, 0); \
+  __ret_643 = __rev0_643 * __noswap_splatq_laneq_u32(__rev1_643, __p2_643); \
+  __ret_643 = __builtin_shufflevector(__ret_643, __ret_643, 3, 2, 1, 0); \
+  __ret_643; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_u16(__p0_644, __p1_644, __p2_644) __extension__ ({ \
+  uint16x8_t __ret_644; \
+  uint16x8_t __s0_644 = __p0_644; \
+  uint16x8_t __s1_644 = __p1_644; \
+  __ret_644 = __s0_644 * splatq_laneq_u16(__s1_644, __p2_644); \
+  __ret_644; \
+})
+#else
+#define vmulq_laneq_u16(__p0_645, __p1_645, __p2_645) __extension__ ({ \
+  uint16x8_t __ret_645; \
+  uint16x8_t __s0_645 = __p0_645; \
+  uint16x8_t __s1_645 = __p1_645; \
+  uint16x8_t __rev0_645;  __rev0_645 = __builtin_shufflevector(__s0_645, __s0_645, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_645;  __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_645 = __rev0_645 * __noswap_splatq_laneq_u16(__rev1_645, __p2_645); \
+  __ret_645 = __builtin_shufflevector(__ret_645, __ret_645, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_645; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_f64(__p0_646, __p1_646, __p2_646) __extension__ ({ \
+  float64x2_t __ret_646; \
+  float64x2_t __s0_646 = __p0_646; \
+  float64x2_t __s1_646 = __p1_646; \
+  __ret_646 = __s0_646 * splatq_laneq_f64(__s1_646, __p2_646); \
+  __ret_646; \
+})
+#else
+#define vmulq_laneq_f64(__p0_647, __p1_647, __p2_647) __extension__ ({ \
+  float64x2_t __ret_647; \
+  float64x2_t __s0_647 = __p0_647; \
+  float64x2_t __s1_647 = __p1_647; \
+  float64x2_t __rev0_647;  __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 1, 0); \
+  float64x2_t __rev1_647;  __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 1, 0); \
+  __ret_647 = __rev0_647 * __noswap_splatq_laneq_f64(__rev1_647, __p2_647); \
+  __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 1, 0); \
+  __ret_647; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_f32(__p0_648, __p1_648, __p2_648) __extension__ ({ \
+  float32x4_t __ret_648; \
+  float32x4_t __s0_648 = __p0_648; \
+  float32x4_t __s1_648 = __p1_648; \
+  __ret_648 = __s0_648 * splatq_laneq_f32(__s1_648, __p2_648); \
+  __ret_648; \
+})
+#else
+#define vmulq_laneq_f32(__p0_649, __p1_649, __p2_649) __extension__ ({ \
+  float32x4_t __ret_649; \
+  float32x4_t __s0_649 = __p0_649; \
+  float32x4_t __s1_649 = __p1_649; \
+  float32x4_t __rev0_649;  __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 3, 2, 1, 0); \
+  float32x4_t __rev1_649;  __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 3, 2, 1, 0); \
+  __ret_649 = __rev0_649 * __noswap_splatq_laneq_f32(__rev1_649, __p2_649); \
+  __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 3, 2, 1, 0); \
+  __ret_649; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_s32(__p0_650, __p1_650, __p2_650) __extension__ ({ \
+  int32x4_t __ret_650; \
+  int32x4_t __s0_650 = __p0_650; \
+  int32x4_t __s1_650 = __p1_650; \
+  __ret_650 = __s0_650 * splatq_laneq_s32(__s1_650, __p2_650); \
+  __ret_650; \
+})
+#else
+#define vmulq_laneq_s32(__p0_651, __p1_651, __p2_651) __extension__ ({ \
+  int32x4_t __ret_651; \
+  int32x4_t __s0_651 = __p0_651; \
+  int32x4_t __s1_651 = __p1_651; \
+  int32x4_t __rev0_651;  __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 3, 2, 1, 0); \
+  int32x4_t __rev1_651;  __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 3, 2, 1, 0); \
+  __ret_651 = __rev0_651 * __noswap_splatq_laneq_s32(__rev1_651, __p2_651); \
+  __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 3, 2, 1, 0); \
+  __ret_651; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_s16(__p0_652, __p1_652, __p2_652) __extension__ ({ \
+  int16x8_t __ret_652; \
+  int16x8_t __s0_652 = __p0_652; \
+  int16x8_t __s1_652 = __p1_652; \
+  __ret_652 = __s0_652 * splatq_laneq_s16(__s1_652, __p2_652); \
+  __ret_652; \
+})
+#else
+#define vmulq_laneq_s16(__p0_653, __p1_653, __p2_653) __extension__ ({ \
+  int16x8_t __ret_653; \
+  int16x8_t __s0_653 = __p0_653; \
+  int16x8_t __s1_653 = __p1_653; \
+  int16x8_t __rev0_653;  __rev0_653 = __builtin_shufflevector(__s0_653, __s0_653, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_653;  __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_653 = __rev0_653 * __noswap_splatq_laneq_s16(__rev1_653, __p2_653); \
+  __ret_653 = __builtin_shufflevector(__ret_653, __ret_653, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_653; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_u32(__p0_654, __p1_654, __p2_654) __extension__ ({ \
+  uint32x2_t __ret_654; \
+  uint32x2_t __s0_654 = __p0_654; \
+  uint32x4_t __s1_654 = __p1_654; \
+  __ret_654 = __s0_654 * splat_laneq_u32(__s1_654, __p2_654); \
+  __ret_654; \
+})
+#else
+#define vmul_laneq_u32(__p0_655, __p1_655, __p2_655) __extension__ ({ \
+  uint32x2_t __ret_655; \
+  uint32x2_t __s0_655 = __p0_655; \
+  uint32x4_t __s1_655 = __p1_655; \
+  uint32x2_t __rev0_655;  __rev0_655 = __builtin_shufflevector(__s0_655, __s0_655, 1, 0); \
+  uint32x4_t __rev1_655;  __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \
+  __ret_655 = __rev0_655 * __noswap_splat_laneq_u32(__rev1_655, __p2_655); \
+  __ret_655 = __builtin_shufflevector(__ret_655, __ret_655, 1, 0); \
+  __ret_655; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_u16(__p0_656, __p1_656, __p2_656) __extension__ ({ \
+  uint16x4_t __ret_656; \
+  uint16x4_t __s0_656 = __p0_656; \
+  uint16x8_t __s1_656 = __p1_656; \
+  __ret_656 = __s0_656 * splat_laneq_u16(__s1_656, __p2_656); \
+  __ret_656; \
+})
+#else
+#define vmul_laneq_u16(__p0_657, __p1_657, __p2_657) __extension__ ({ \
+  uint16x4_t __ret_657; \
+  uint16x4_t __s0_657 = __p0_657; \
+  uint16x8_t __s1_657 = __p1_657; \
+  uint16x4_t __rev0_657;  __rev0_657 = __builtin_shufflevector(__s0_657, __s0_657, 3, 2, 1, 0); \
+  uint16x8_t __rev1_657;  __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_657 = __rev0_657 * __noswap_splat_laneq_u16(__rev1_657, __p2_657); \
+  __ret_657 = __builtin_shufflevector(__ret_657, __ret_657, 3, 2, 1, 0); \
+  __ret_657; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_f32(__p0_658, __p1_658, __p2_658) __extension__ ({ \
+  float32x2_t __ret_658; \
+  float32x2_t __s0_658 = __p0_658; \
+  float32x4_t __s1_658 = __p1_658; \
+  __ret_658 = __s0_658 * splat_laneq_f32(__s1_658, __p2_658); \
+  __ret_658; \
+})
+#else
+#define vmul_laneq_f32(__p0_659, __p1_659, __p2_659) __extension__ ({ \
+  float32x2_t __ret_659; \
+  float32x2_t __s0_659 = __p0_659; \
+  float32x4_t __s1_659 = __p1_659; \
+  float32x2_t __rev0_659;  __rev0_659 = __builtin_shufflevector(__s0_659, __s0_659, 1, 0); \
+  float32x4_t __rev1_659;  __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 3, 2, 1, 0); \
+  __ret_659 = __rev0_659 * __noswap_splat_laneq_f32(__rev1_659, __p2_659); \
+  __ret_659 = __builtin_shufflevector(__ret_659, __ret_659, 1, 0); \
+  __ret_659; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_s32(__p0_660, __p1_660, __p2_660) __extension__ ({ \
+  int32x2_t __ret_660; \
+  int32x2_t __s0_660 = __p0_660; \
+  int32x4_t __s1_660 = __p1_660; \
+  __ret_660 = __s0_660 * splat_laneq_s32(__s1_660, __p2_660); \
+  __ret_660; \
+})
+#else
+#define vmul_laneq_s32(__p0_661, __p1_661, __p2_661) __extension__ ({ \
+  int32x2_t __ret_661; \
+  int32x2_t __s0_661 = __p0_661; \
+  int32x4_t __s1_661 = __p1_661; \
+  int32x2_t __rev0_661;  __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 1, 0); \
+  int32x4_t __rev1_661;  __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 3, 2, 1, 0); \
+  __ret_661 = __rev0_661 * __noswap_splat_laneq_s32(__rev1_661, __p2_661); \
+  __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 1, 0); \
+  __ret_661; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_s16(__p0_662, __p1_662, __p2_662) __extension__ ({ \
+  int16x4_t __ret_662; \
+  int16x4_t __s0_662 = __p0_662; \
+  int16x8_t __s1_662 = __p1_662; \
+  __ret_662 = __s0_662 * splat_laneq_s16(__s1_662, __p2_662); \
+  __ret_662; \
+})
+#else
+#define vmul_laneq_s16(__p0_663, __p1_663, __p2_663) __extension__ ({ \
+  int16x4_t __ret_663; \
+  int16x4_t __s0_663 = __p0_663; \
+  int16x8_t __s1_663 = __p1_663; \
+  int16x4_t __rev0_663;  __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 3, 2, 1, 0); \
+  int16x8_t __rev1_663;  __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_663 = __rev0_663 * __noswap_splat_laneq_s16(__rev1_663, __p2_663); \
+  __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 3, 2, 1, 0); \
+  __ret_663; \
+})
+#endif
+
+__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
+  float64x2_t __ret;
+  __ret = __p0 * (float64x2_t) {__p1, __p1};
+  return __ret;
+}
+#else
+__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = __rev0 * (float64x2_t) {__p1, __p1};
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
+  poly128_t __ret;
+  __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly16x8_t __ret;
+  __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1));
+  return __ret;
+}
+#else
+__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly16x8_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
+  __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1));
+  return __ret;
+}
+#else
+__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
+  __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1));
+  return __ret;
+}
+#else
+__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
+  __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1));
+  return __ret;
+}
+#else
+__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
+  __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1));
+  return __ret;
+}
+#else
+__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
+  __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
+  return __ret;
+}
+#else
+__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
+  __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
+  return __ret;
+}
+#else
+__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly128_t __ret;
+  __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1)));
+  return __ret;
+}
+#else
+__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly128_t __ret;
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1)));
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_lane_u32(__p0_664, __p1_664, __p2_664) __extension__ ({ \
+  uint64x2_t __ret_664; \
+  uint32x4_t __s0_664 = __p0_664; \
+  uint32x2_t __s1_664 = __p1_664; \
+  __ret_664 = vmull_u32(vget_high_u32(__s0_664), splat_lane_u32(__s1_664, __p2_664)); \
+  __ret_664; \
+})
+#else
+#define vmull_high_lane_u32(__p0_665, __p1_665, __p2_665) __extension__ ({ \
+  uint64x2_t __ret_665; \
+  uint32x4_t __s0_665 = __p0_665; \
+  uint32x2_t __s1_665 = __p1_665; \
+  uint32x4_t __rev0_665;  __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 3, 2, 1, 0); \
+  uint32x2_t __rev1_665;  __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 1, 0); \
+  __ret_665 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_665), __noswap_splat_lane_u32(__rev1_665, __p2_665)); \
+  __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \
+  __ret_665; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_lane_u16(__p0_666, __p1_666, __p2_666) __extension__ ({ \
+  uint32x4_t __ret_666; \
+  uint16x8_t __s0_666 = __p0_666; \
+  uint16x4_t __s1_666 = __p1_666; \
+  __ret_666 = vmull_u16(vget_high_u16(__s0_666), splat_lane_u16(__s1_666, __p2_666)); \
+  __ret_666; \
+})
+#else
+#define vmull_high_lane_u16(__p0_667, __p1_667, __p2_667) __extension__ ({ \
+  uint32x4_t __ret_667; \
+  uint16x8_t __s0_667 = __p0_667; \
+  uint16x4_t __s1_667 = __p1_667; \
+  uint16x8_t __rev0_667;  __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev1_667;  __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 3, 2, 1, 0); \
+  __ret_667 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_667), __noswap_splat_lane_u16(__rev1_667, __p2_667)); \
+  __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \
+  __ret_667; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_lane_s32(__p0_668, __p1_668, __p2_668) __extension__ ({ \
+  int64x2_t __ret_668; \
+  int32x4_t __s0_668 = __p0_668; \
+  int32x2_t __s1_668 = __p1_668; \
+  __ret_668 = vmull_s32(vget_high_s32(__s0_668), splat_lane_s32(__s1_668, __p2_668)); \
+  __ret_668; \
+})
+#else
+#define vmull_high_lane_s32(__p0_669, __p1_669, __p2_669) __extension__ ({ \
+  int64x2_t __ret_669; \
+  int32x4_t __s0_669 = __p0_669; \
+  int32x2_t __s1_669 = __p1_669; \
+  int32x4_t __rev0_669;  __rev0_669 = __builtin_shufflevector(__s0_669, __s0_669, 3, 2, 1, 0); \
+  int32x2_t __rev1_669;  __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 1, 0); \
+  __ret_669 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_669), __noswap_splat_lane_s32(__rev1_669, __p2_669)); \
+  __ret_669 = __builtin_shufflevector(__ret_669, __ret_669, 1, 0); \
+  __ret_669; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_lane_s16(__p0_670, __p1_670, __p2_670) __extension__ ({ \
+  int32x4_t __ret_670; \
+  int16x8_t __s0_670 = __p0_670; \
+  int16x4_t __s1_670 = __p1_670; \
+  __ret_670 = vmull_s16(vget_high_s16(__s0_670), splat_lane_s16(__s1_670, __p2_670)); \
+  __ret_670; \
+})
+#else
+#define vmull_high_lane_s16(__p0_671, __p1_671, __p2_671) __extension__ ({ \
+  int32x4_t __ret_671; \
+  int16x8_t __s0_671 = __p0_671; \
+  int16x4_t __s1_671 = __p1_671; \
+  int16x8_t __rev0_671;  __rev0_671 = __builtin_shufflevector(__s0_671, __s0_671, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_671;  __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 3, 2, 1, 0); \
+  __ret_671 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_671), __noswap_splat_lane_s16(__rev1_671, __p2_671)); \
+  __ret_671 = __builtin_shufflevector(__ret_671, __ret_671, 3, 2, 1, 0); \
+  __ret_671; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_laneq_u32(__p0_672, __p1_672, __p2_672) __extension__ ({ \
+  uint64x2_t __ret_672; \
+  uint32x4_t __s0_672 = __p0_672; \
+  uint32x4_t __s1_672 = __p1_672; \
+  __ret_672 = vmull_u32(vget_high_u32(__s0_672), splat_laneq_u32(__s1_672, __p2_672)); \
+  __ret_672; \
+})
+#else
+#define vmull_high_laneq_u32(__p0_673, __p1_673, __p2_673) __extension__ ({ \
+  uint64x2_t __ret_673; \
+  uint32x4_t __s0_673 = __p0_673; \
+  uint32x4_t __s1_673 = __p1_673; \
+  uint32x4_t __rev0_673;  __rev0_673 = __builtin_shufflevector(__s0_673, __s0_673, 3, 2, 1, 0); \
+  uint32x4_t __rev1_673;  __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 3, 2, 1, 0); \
+  __ret_673 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_673), __noswap_splat_laneq_u32(__rev1_673, __p2_673)); \
+  __ret_673 = __builtin_shufflevector(__ret_673, __ret_673, 1, 0); \
+  __ret_673; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_laneq_u16(__p0_674, __p1_674, __p2_674) __extension__ ({ \
+  uint32x4_t __ret_674; \
+  uint16x8_t __s0_674 = __p0_674; \
+  uint16x8_t __s1_674 = __p1_674; \
+  __ret_674 = vmull_u16(vget_high_u16(__s0_674), splat_laneq_u16(__s1_674, __p2_674)); \
+  __ret_674; \
+})
+#else
+#define vmull_high_laneq_u16(__p0_675, __p1_675, __p2_675) __extension__ ({ \
+  uint32x4_t __ret_675; \
+  uint16x8_t __s0_675 = __p0_675; \
+  uint16x8_t __s1_675 = __p1_675; \
+  uint16x8_t __rev0_675;  __rev0_675 = __builtin_shufflevector(__s0_675, __s0_675, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_675;  __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_675 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_675), __noswap_splat_laneq_u16(__rev1_675, __p2_675)); \
+  __ret_675 = __builtin_shufflevector(__ret_675, __ret_675, 3, 2, 1, 0); \
+  __ret_675; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_laneq_s32(__p0_676, __p1_676, __p2_676) __extension__ ({ \
+  int64x2_t __ret_676; \
+  int32x4_t __s0_676 = __p0_676; \
+  int32x4_t __s1_676 = __p1_676; \
+  __ret_676 = vmull_s32(vget_high_s32(__s0_676), splat_laneq_s32(__s1_676, __p2_676)); \
+  __ret_676; \
+})
+#else
+#define vmull_high_laneq_s32(__p0_677, __p1_677, __p2_677) __extension__ ({ \
+  int64x2_t __ret_677; \
+  int32x4_t __s0_677 = __p0_677; \
+  int32x4_t __s1_677 = __p1_677; \
+  int32x4_t __rev0_677;  __rev0_677 = __builtin_shufflevector(__s0_677, __s0_677, 3, 2, 1, 0); \
+  int32x4_t __rev1_677;  __rev1_677 = __builtin_shufflevector(__s1_677, __s1_677, 3, 2, 1, 0); \
+  __ret_677 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_677), __noswap_splat_laneq_s32(__rev1_677, __p2_677)); \
+  __ret_677 = __builtin_shufflevector(__ret_677, __ret_677, 1, 0); \
+  __ret_677; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_laneq_s16(__p0_678, __p1_678, __p2_678) __extension__ ({ \
+  int32x4_t __ret_678; \
+  int16x8_t __s0_678 = __p0_678; \
+  int16x8_t __s1_678 = __p1_678; \
+  __ret_678 = vmull_s16(vget_high_s16(__s0_678), splat_laneq_s16(__s1_678, __p2_678)); \
+  __ret_678; \
+})
+#else
+#define vmull_high_laneq_s16(__p0_679, __p1_679, __p2_679) __extension__ ({ \
+  int32x4_t __ret_679; \
+  int16x8_t __s0_679 = __p0_679; \
+  int16x8_t __s1_679 = __p1_679; \
+  int16x8_t __rev0_679;  __rev0_679 = __builtin_shufflevector(__s0_679, __s0_679, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_679;  __rev1_679 = __builtin_shufflevector(__s1_679, __s1_679, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_679 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_679), __noswap_splat_laneq_s16(__rev1_679, __p2_679)); \
+  __ret_679 = __builtin_shufflevector(__ret_679, __ret_679, 3, 2, 1, 0); \
+  __ret_679; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
+  uint64x2_t __ret;
+  __ret = vmull_n_u32(vget_high_u32(__p0), __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
+  uint64x2_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
+  uint32x4_t __ret;
+  __ret = vmull_n_u16(vget_high_u16(__p0), __p1);
+  return __ret;
+}
+#else
+__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
+  uint32x4_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
+  int64x2_t __ret;
+  __ret = vmull_n_s32(vget_high_s32(__p0), __p1);
+  return __ret;
+}
+#else
+__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
+  int64x2_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
+  int32x4_t __ret;
+  __ret = vmull_n_s16(vget_high_s16(__p0), __p1);
+  return __ret;
+}
+#else
+__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
+  int32x4_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_laneq_u32(__p0_680, __p1_680, __p2_680) __extension__ ({ \
+  uint64x2_t __ret_680; \
+  uint32x2_t __s0_680 = __p0_680; \
+  uint32x4_t __s1_680 = __p1_680; \
+  __ret_680 = vmull_u32(__s0_680, splat_laneq_u32(__s1_680, __p2_680)); \
+  __ret_680; \
+})
+#else
+#define vmull_laneq_u32(__p0_681, __p1_681, __p2_681) __extension__ ({ \
+  uint64x2_t __ret_681; \
+  uint32x2_t __s0_681 = __p0_681; \
+  uint32x4_t __s1_681 = __p1_681; \
+  uint32x2_t __rev0_681;  __rev0_681 = __builtin_shufflevector(__s0_681, __s0_681, 1, 0); \
+  uint32x4_t __rev1_681;  __rev1_681 = __builtin_shufflevector(__s1_681, __s1_681, 3, 2, 1, 0); \
+  __ret_681 = __noswap_vmull_u32(__rev0_681, __noswap_splat_laneq_u32(__rev1_681, __p2_681)); \
+  __ret_681 = __builtin_shufflevector(__ret_681, __ret_681, 1, 0); \
+  __ret_681; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_laneq_u16(__p0_682, __p1_682, __p2_682) __extension__ ({ \
+  uint32x4_t __ret_682; \
+  uint16x4_t __s0_682 = __p0_682; \
+  uint16x8_t __s1_682 = __p1_682; \
+  __ret_682 = vmull_u16(__s0_682, splat_laneq_u16(__s1_682, __p2_682)); \
+  __ret_682; \
+})
+#else
+#define vmull_laneq_u16(__p0_683, __p1_683, __p2_683) __extension__ ({ \
+  uint32x4_t __ret_683; \
+  uint16x4_t __s0_683 = __p0_683; \
+  uint16x8_t __s1_683 = __p1_683; \
+  uint16x4_t __rev0_683;  __rev0_683 = __builtin_shufflevector(__s0_683, __s0_683, 3, 2, 1, 0); \
+  uint16x8_t __rev1_683;  __rev1_683 = __builtin_shufflevector(__s1_683, __s1_683, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_683 = __noswap_vmull_u16(__rev0_683, __noswap_splat_laneq_u16(__rev1_683, __p2_683)); \
+  __ret_683 = __builtin_shufflevector(__ret_683, __ret_683, 3, 2, 1, 0); \
+  __ret_683; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_laneq_s32(__p0_684, __p1_684, __p2_684) __extension__ ({ \
+  int64x2_t __ret_684; \
+  int32x2_t __s0_684 = __p0_684; \
+  int32x4_t __s1_684 = __p1_684; \
+  __ret_684 = vmull_s32(__s0_684, splat_laneq_s32(__s1_684, __p2_684)); \
+  __ret_684; \
+})
+#else
+#define vmull_laneq_s32(__p0_685, __p1_685, __p2_685) __extension__ ({ \
+  int64x2_t __ret_685; \
+  int32x2_t __s0_685 = __p0_685; \
+  int32x4_t __s1_685 = __p1_685; \
+  int32x2_t __rev0_685;  __rev0_685 = __builtin_shufflevector(__s0_685, __s0_685, 1, 0); \
+  int32x4_t __rev1_685;  __rev1_685 = __builtin_shufflevector(__s1_685, __s1_685, 3, 2, 1, 0); \
+  __ret_685 = __noswap_vmull_s32(__rev0_685, __noswap_splat_laneq_s32(__rev1_685, __p2_685)); \
+  __ret_685 = __builtin_shufflevector(__ret_685, __ret_685, 1, 0); \
+  __ret_685; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_laneq_s16(__p0_686, __p1_686, __p2_686) __extension__ ({ \
+  int32x4_t __ret_686; \
+  int16x4_t __s0_686 = __p0_686; \
+  int16x8_t __s1_686 = __p1_686; \
+  __ret_686 = vmull_s16(__s0_686, splat_laneq_s16(__s1_686, __p2_686)); \
+  __ret_686; \
+})
+#else
+#define vmull_laneq_s16(__p0_687, __p1_687, __p2_687) __extension__ ({ \
+  int32x4_t __ret_687; \
+  int16x4_t __s0_687 = __p0_687; \
+  int16x8_t __s1_687 = __p1_687; \
+  int16x4_t __rev0_687;  __rev0_687 = __builtin_shufflevector(__s0_687, __s0_687, 3, 2, 1, 0); \
+  int16x8_t __rev1_687;  __rev1_687 = __builtin_shufflevector(__s1_687, __s1_687, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_687 = __noswap_vmull_s16(__rev0_687, __noswap_splat_laneq_s16(__rev1_687, __p2_687)); \
+  __ret_687 = __builtin_shufflevector(__ret_687, __ret_687, 3, 2, 1, 0); \
+  __ret_687; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+  return __ret;
+}
+#endif
+
+__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
+  return __ret;
+}
+__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
+  return __ret;
+}
+#define vmulxd_lane_f64(__p0_688, __p1_688, __p2_688) __extension__ ({ \
+  float64_t __ret_688; \
+  float64_t __s0_688 = __p0_688; \
+  float64x1_t __s1_688 = __p1_688; \
+  __ret_688 = vmulxd_f64(__s0_688, vget_lane_f64(__s1_688, __p2_688)); \
+  __ret_688; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vmulxs_lane_f32(__p0_689, __p1_689, __p2_689) __extension__ ({ \
+  float32_t __ret_689; \
+  float32_t __s0_689 = __p0_689; \
+  float32x2_t __s1_689 = __p1_689; \
+  __ret_689 = vmulxs_f32(__s0_689, vget_lane_f32(__s1_689, __p2_689)); \
+  __ret_689; \
+})
+#else
+#define vmulxs_lane_f32(__p0_690, __p1_690, __p2_690) __extension__ ({ \
+  float32_t __ret_690; \
+  float32_t __s0_690 = __p0_690; \
+  float32x2_t __s1_690 = __p1_690; \
+  float32x2_t __rev1_690;  __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 1, 0); \
+  __ret_690 = vmulxs_f32(__s0_690, __noswap_vget_lane_f32(__rev1_690, __p2_690)); \
+  __ret_690; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_lane_f64(__p0_691, __p1_691, __p2_691) __extension__ ({ \
+  float64x2_t __ret_691; \
+  float64x2_t __s0_691 = __p0_691; \
+  float64x1_t __s1_691 = __p1_691; \
+  __ret_691 = vmulxq_f64(__s0_691, splatq_lane_f64(__s1_691, __p2_691)); \
+  __ret_691; \
+})
+#else
+#define vmulxq_lane_f64(__p0_692, __p1_692, __p2_692) __extension__ ({ \
+  float64x2_t __ret_692; \
+  float64x2_t __s0_692 = __p0_692; \
+  float64x1_t __s1_692 = __p1_692; \
+  float64x2_t __rev0_692;  __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 1, 0); \
+  __ret_692 = __noswap_vmulxq_f64(__rev0_692, __noswap_splatq_lane_f64(__s1_692, __p2_692)); \
+  __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 1, 0); \
+  __ret_692; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_lane_f32(__p0_693, __p1_693, __p2_693) __extension__ ({ \
+  float32x4_t __ret_693; \
+  float32x4_t __s0_693 = __p0_693; \
+  float32x2_t __s1_693 = __p1_693; \
+  __ret_693 = vmulxq_f32(__s0_693, splatq_lane_f32(__s1_693, __p2_693)); \
+  __ret_693; \
+})
+#else
+#define vmulxq_lane_f32(__p0_694, __p1_694, __p2_694) __extension__ ({ \
+  float32x4_t __ret_694; \
+  float32x4_t __s0_694 = __p0_694; \
+  float32x2_t __s1_694 = __p1_694; \
+  float32x4_t __rev0_694;  __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 3, 2, 1, 0); \
+  float32x2_t __rev1_694;  __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 1, 0); \
+  __ret_694 = __noswap_vmulxq_f32(__rev0_694, __noswap_splatq_lane_f32(__rev1_694, __p2_694)); \
+  __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 3, 2, 1, 0); \
+  __ret_694; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_lane_f32(__p0_695, __p1_695, __p2_695) __extension__ ({ \
+  float32x2_t __ret_695; \
+  float32x2_t __s0_695 = __p0_695; \
+  float32x2_t __s1_695 = __p1_695; \
+  __ret_695 = vmulx_f32(__s0_695, splat_lane_f32(__s1_695, __p2_695)); \
+  __ret_695; \
+})
+#else
+#define vmulx_lane_f32(__p0_696, __p1_696, __p2_696) __extension__ ({ \
+  float32x2_t __ret_696; \
+  float32x2_t __s0_696 = __p0_696; \
+  float32x2_t __s1_696 = __p1_696; \
+  float32x2_t __rev0_696;  __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \
+  float32x2_t __rev1_696;  __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 1, 0); \
+  __ret_696 = __noswap_vmulx_f32(__rev0_696, __noswap_splat_lane_f32(__rev1_696, __p2_696)); \
+  __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 1, 0); \
+  __ret_696; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxd_laneq_f64(__p0_697, __p1_697, __p2_697) __extension__ ({ \
+  float64_t __ret_697; \
+  float64_t __s0_697 = __p0_697; \
+  float64x2_t __s1_697 = __p1_697; \
+  __ret_697 = vmulxd_f64(__s0_697, vgetq_lane_f64(__s1_697, __p2_697)); \
+  __ret_697; \
+})
+#else
+#define vmulxd_laneq_f64(__p0_698, __p1_698, __p2_698) __extension__ ({ \
+  float64_t __ret_698; \
+  float64_t __s0_698 = __p0_698; \
+  float64x2_t __s1_698 = __p1_698; \
+  float64x2_t __rev1_698;  __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 1, 0); \
+  __ret_698 = vmulxd_f64(__s0_698, __noswap_vgetq_lane_f64(__rev1_698, __p2_698)); \
+  __ret_698; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxs_laneq_f32(__p0_699, __p1_699, __p2_699) __extension__ ({ \
+  float32_t __ret_699; \
+  float32_t __s0_699 = __p0_699; \
+  float32x4_t __s1_699 = __p1_699; \
+  __ret_699 = vmulxs_f32(__s0_699, vgetq_lane_f32(__s1_699, __p2_699)); \
+  __ret_699; \
+})
+#else
+#define vmulxs_laneq_f32(__p0_700, __p1_700, __p2_700) __extension__ ({ \
+  float32_t __ret_700; \
+  float32_t __s0_700 = __p0_700; \
+  float32x4_t __s1_700 = __p1_700; \
+  float32x4_t __rev1_700;  __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 3, 2, 1, 0); \
+  __ret_700 = vmulxs_f32(__s0_700, __noswap_vgetq_lane_f32(__rev1_700, __p2_700)); \
+  __ret_700; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_laneq_f64(__p0_701, __p1_701, __p2_701) __extension__ ({ \
+  float64x2_t __ret_701; \
+  float64x2_t __s0_701 = __p0_701; \
+  float64x2_t __s1_701 = __p1_701; \
+  __ret_701 = vmulxq_f64(__s0_701, splatq_laneq_f64(__s1_701, __p2_701)); \
+  __ret_701; \
+})
+#else
+#define vmulxq_laneq_f64(__p0_702, __p1_702, __p2_702) __extension__ ({ \
+  float64x2_t __ret_702; \
+  float64x2_t __s0_702 = __p0_702; \
+  float64x2_t __s1_702 = __p1_702; \
+  float64x2_t __rev0_702;  __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 1, 0); \
+  float64x2_t __rev1_702;  __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 1, 0); \
+  __ret_702 = __noswap_vmulxq_f64(__rev0_702, __noswap_splatq_laneq_f64(__rev1_702, __p2_702)); \
+  __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 1, 0); \
+  __ret_702; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_laneq_f32(__p0_703, __p1_703, __p2_703) __extension__ ({ \
+  float32x4_t __ret_703; \
+  float32x4_t __s0_703 = __p0_703; \
+  float32x4_t __s1_703 = __p1_703; \
+  __ret_703 = vmulxq_f32(__s0_703, splatq_laneq_f32(__s1_703, __p2_703)); \
+  __ret_703; \
+})
+#else
+#define vmulxq_laneq_f32(__p0_704, __p1_704, __p2_704) __extension__ ({ \
+  float32x4_t __ret_704; \
+  float32x4_t __s0_704 = __p0_704; \
+  float32x4_t __s1_704 = __p1_704; \
+  float32x4_t __rev0_704;  __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 3, 2, 1, 0); \
+  float32x4_t __rev1_704;  __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 3, 2, 1, 0); \
+  __ret_704 = __noswap_vmulxq_f32(__rev0_704, __noswap_splatq_laneq_f32(__rev1_704, __p2_704)); \
+  __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 3, 2, 1, 0); \
+  __ret_704; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_laneq_f32(__p0_705, __p1_705, __p2_705) __extension__ ({ \
+  float32x2_t __ret_705; \
+  float32x2_t __s0_705 = __p0_705; \
+  float32x4_t __s1_705 = __p1_705; \
+  __ret_705 = vmulx_f32(__s0_705, splat_laneq_f32(__s1_705, __p2_705)); \
+  __ret_705; \
+})
+#else
+#define vmulx_laneq_f32(__p0_706, __p1_706, __p2_706) __extension__ ({ \
+  float32x2_t __ret_706; \
+  float32x2_t __s0_706 = __p0_706; \
+  float32x4_t __s1_706 = __p1_706; \
+  float32x2_t __rev0_706;  __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 1, 0); \
+  float32x4_t __rev1_706;  __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 3, 2, 1, 0); \
+  __ret_706 = __noswap_vmulx_f32(__rev0_706, __noswap_splat_laneq_f32(__rev1_706, __p2_706)); \
+  __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 1, 0); \
+  __ret_706; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vnegq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = -__p0;
+  return __ret;
+}
+#else
+__ai float64x2_t vnegq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = -__rev0;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vnegq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  __ret = -__p0;
+  return __ret;
+}
+#else
+__ai int64x2_t vnegq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = -__rev0;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vneg_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = -__p0;
+  return __ret;
+}
+__ai int64x1_t vneg_s64(int64x1_t __p0) {
+  int64x1_t __ret;
+  __ret = -__p0;
+  return __ret;
+}
+__ai int64_t vnegd_s64(int64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
+  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+  return __ret;
+}
+#else
+__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0);
+  return __ret;
+}
+#else
+__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
+  uint64_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vpaddd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vpaddd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vpaddd_s64(int64x2_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0);
+  return __ret;
+}
+#else
+__ai int64_t vpaddd_s64(int64x2_t __p0) {
+  int64_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vpadds_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vpadds_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vpadds_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vpmaxs_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vpmaxs_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vpminqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vpminqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vpmins_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vpmins_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vpmins_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vpminnms_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vpminnms_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35);
+  return __ret;
+}
+#else
+__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai int64x1_t vqabs_s64(int64x1_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
+  return __ret;
+}
+__ai int8_t vqabsb_s8(int8_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
+  return __ret;
+}
+__ai int32_t vqabss_s32(int32_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
+  return __ret;
+}
+__ai int64_t vqabsd_s64(int64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
+  return __ret;
+}
+__ai int16_t vqabsh_s16(int16_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
+  return __ret;
+}
+__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
+  return __ret;
+}
+__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
+  return __ret;
+}
+__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
+  return __ret;
+}
+__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
+  return __ret;
+}
+__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
+  return __ret;
+}
+__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
+  return __ret;
+}
+__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int64x2_t __ret;
+  __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
+  return __ret;
+}
+#else
+__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int32x4_t __ret;
+  __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
+  return __ret;
+}
+#else
+__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_high_lane_s32(__p0_707, __p1_707, __p2_707, __p3_707) __extension__ ({ \
+  int64x2_t __ret_707; \
+  int64x2_t __s0_707 = __p0_707; \
+  int32x4_t __s1_707 = __p1_707; \
+  int32x2_t __s2_707 = __p2_707; \
+  __ret_707 = vqdmlal_s32(__s0_707, vget_high_s32(__s1_707), splat_lane_s32(__s2_707, __p3_707)); \
+  __ret_707; \
+})
+#else
+#define vqdmlal_high_lane_s32(__p0_708, __p1_708, __p2_708, __p3_708) __extension__ ({ \
+  int64x2_t __ret_708; \
+  int64x2_t __s0_708 = __p0_708; \
+  int32x4_t __s1_708 = __p1_708; \
+  int32x2_t __s2_708 = __p2_708; \
+  int64x2_t __rev0_708;  __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 1, 0); \
+  int32x4_t __rev1_708;  __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 3, 2, 1, 0); \
+  int32x2_t __rev2_708;  __rev2_708 = __builtin_shufflevector(__s2_708, __s2_708, 1, 0); \
+  __ret_708 = __noswap_vqdmlal_s32(__rev0_708, __noswap_vget_high_s32(__rev1_708), __noswap_splat_lane_s32(__rev2_708, __p3_708)); \
+  __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 1, 0); \
+  __ret_708; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_high_lane_s16(__p0_709, __p1_709, __p2_709, __p3_709) __extension__ ({ \
+  int32x4_t __ret_709; \
+  int32x4_t __s0_709 = __p0_709; \
+  int16x8_t __s1_709 = __p1_709; \
+  int16x4_t __s2_709 = __p2_709; \
+  __ret_709 = vqdmlal_s16(__s0_709, vget_high_s16(__s1_709), splat_lane_s16(__s2_709, __p3_709)); \
+  __ret_709; \
+})
+#else
+#define vqdmlal_high_lane_s16(__p0_710, __p1_710, __p2_710, __p3_710) __extension__ ({ \
+  int32x4_t __ret_710; \
+  int32x4_t __s0_710 = __p0_710; \
+  int16x8_t __s1_710 = __p1_710; \
+  int16x4_t __s2_710 = __p2_710; \
+  int32x4_t __rev0_710;  __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 3, 2, 1, 0); \
+  int16x8_t __rev1_710;  __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_710;  __rev2_710 = __builtin_shufflevector(__s2_710, __s2_710, 3, 2, 1, 0); \
+  __ret_710 = __noswap_vqdmlal_s16(__rev0_710, __noswap_vget_high_s16(__rev1_710), __noswap_splat_lane_s16(__rev2_710, __p3_710)); \
+  __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 3, 2, 1, 0); \
+  __ret_710; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_high_laneq_s32(__p0_711, __p1_711, __p2_711, __p3_711) __extension__ ({ \
+  int64x2_t __ret_711; \
+  int64x2_t __s0_711 = __p0_711; \
+  int32x4_t __s1_711 = __p1_711; \
+  int32x4_t __s2_711 = __p2_711; \
+  __ret_711 = vqdmlal_s32(__s0_711, vget_high_s32(__s1_711), splat_laneq_s32(__s2_711, __p3_711)); \
+  __ret_711; \
+})
+#else
+#define vqdmlal_high_laneq_s32(__p0_712, __p1_712, __p2_712, __p3_712) __extension__ ({ \
+  int64x2_t __ret_712; \
+  int64x2_t __s0_712 = __p0_712; \
+  int32x4_t __s1_712 = __p1_712; \
+  int32x4_t __s2_712 = __p2_712; \
+  int64x2_t __rev0_712;  __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \
+  int32x4_t __rev1_712;  __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 3, 2, 1, 0); \
+  int32x4_t __rev2_712;  __rev2_712 = __builtin_shufflevector(__s2_712, __s2_712, 3, 2, 1, 0); \
+  __ret_712 = __noswap_vqdmlal_s32(__rev0_712, __noswap_vget_high_s32(__rev1_712), __noswap_splat_laneq_s32(__rev2_712, __p3_712)); \
+  __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 1, 0); \
+  __ret_712; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_high_laneq_s16(__p0_713, __p1_713, __p2_713, __p3_713) __extension__ ({ \
+  int32x4_t __ret_713; \
+  int32x4_t __s0_713 = __p0_713; \
+  int16x8_t __s1_713 = __p1_713; \
+  int16x8_t __s2_713 = __p2_713; \
+  __ret_713 = vqdmlal_s16(__s0_713, vget_high_s16(__s1_713), splat_laneq_s16(__s2_713, __p3_713)); \
+  __ret_713; \
+})
+#else
+#define vqdmlal_high_laneq_s16(__p0_714, __p1_714, __p2_714, __p3_714) __extension__ ({ \
+  int32x4_t __ret_714; \
+  int32x4_t __s0_714 = __p0_714; \
+  int16x8_t __s1_714 = __p1_714; \
+  int16x8_t __s2_714 = __p2_714; \
+  int32x4_t __rev0_714;  __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \
+  int16x8_t __rev1_714;  __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_714;  __rev2_714 = __builtin_shufflevector(__s2_714, __s2_714, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_714 = __noswap_vqdmlal_s16(__rev0_714, __noswap_vget_high_s16(__rev1_714), __noswap_splat_laneq_s16(__rev2_714, __p3_714)); \
+  __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 3, 2, 1, 0); \
+  __ret_714; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+  int64x2_t __ret;
+  __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
+  return __ret;
+}
+#else
+__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+  int32x4_t __ret;
+  __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
+  return __ret;
+}
+#else
+__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x2_t __s2 = __p2; \
+  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x2_t __s2 = __p2; \
+  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x4_t __s2 = __p2; \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x4_t __s2 = __p2; \
+  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x4_t __s2 = __p2; \
+  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x4_t __s2 = __p2; \
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x8_t __s2 = __p2; \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x8_t __s2 = __p2; \
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_laneq_s32(__p0_715, __p1_715, __p2_715, __p3_715) __extension__ ({ \
+  int64x2_t __ret_715; \
+  int64x2_t __s0_715 = __p0_715; \
+  int32x2_t __s1_715 = __p1_715; \
+  int32x4_t __s2_715 = __p2_715; \
+  __ret_715 = vqdmlal_s32(__s0_715, __s1_715, splat_laneq_s32(__s2_715, __p3_715)); \
+  __ret_715; \
+})
+#else
+#define vqdmlal_laneq_s32(__p0_716, __p1_716, __p2_716, __p3_716) __extension__ ({ \
+  int64x2_t __ret_716; \
+  int64x2_t __s0_716 = __p0_716; \
+  int32x2_t __s1_716 = __p1_716; \
+  int32x4_t __s2_716 = __p2_716; \
+  int64x2_t __rev0_716;  __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 1, 0); \
+  int32x2_t __rev1_716;  __rev1_716 = __builtin_shufflevector(__s1_716, __s1_716, 1, 0); \
+  int32x4_t __rev2_716;  __rev2_716 = __builtin_shufflevector(__s2_716, __s2_716, 3, 2, 1, 0); \
+  __ret_716 = __noswap_vqdmlal_s32(__rev0_716, __rev1_716, __noswap_splat_laneq_s32(__rev2_716, __p3_716)); \
+  __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 1, 0); \
+  __ret_716; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_laneq_s16(__p0_717, __p1_717, __p2_717, __p3_717) __extension__ ({ \
+  int32x4_t __ret_717; \
+  int32x4_t __s0_717 = __p0_717; \
+  int16x4_t __s1_717 = __p1_717; \
+  int16x8_t __s2_717 = __p2_717; \
+  __ret_717 = vqdmlal_s16(__s0_717, __s1_717, splat_laneq_s16(__s2_717, __p3_717)); \
+  __ret_717; \
+})
+#else
+#define vqdmlal_laneq_s16(__p0_718, __p1_718, __p2_718, __p3_718) __extension__ ({ \
+  int32x4_t __ret_718; \
+  int32x4_t __s0_718 = __p0_718; \
+  int16x4_t __s1_718 = __p1_718; \
+  int16x8_t __s2_718 = __p2_718; \
+  int32x4_t __rev0_718;  __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \
+  int16x4_t __rev1_718;  __rev1_718 = __builtin_shufflevector(__s1_718, __s1_718, 3, 2, 1, 0); \
+  int16x8_t __rev2_718;  __rev2_718 = __builtin_shufflevector(__s2_718, __s2_718, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_718 = __noswap_vqdmlal_s16(__rev0_718, __rev1_718, __noswap_splat_laneq_s16(__rev2_718, __p3_718)); \
+  __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 3, 2, 1, 0); \
+  __ret_718; \
+})
+#endif
+
+__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
+  return __ret;
+}
+__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int64x2_t __ret;
+  __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
+  return __ret;
+}
+#else
+__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int32x4_t __ret;
+  __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
+  return __ret;
+}
+#else
+__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_high_lane_s32(__p0_719, __p1_719, __p2_719, __p3_719) __extension__ ({ \
+  int64x2_t __ret_719; \
+  int64x2_t __s0_719 = __p0_719; \
+  int32x4_t __s1_719 = __p1_719; \
+  int32x2_t __s2_719 = __p2_719; \
+  __ret_719 = vqdmlsl_s32(__s0_719, vget_high_s32(__s1_719), splat_lane_s32(__s2_719, __p3_719)); \
+  __ret_719; \
+})
+#else
+#define vqdmlsl_high_lane_s32(__p0_720, __p1_720, __p2_720, __p3_720) __extension__ ({ \
+  int64x2_t __ret_720; \
+  int64x2_t __s0_720 = __p0_720; \
+  int32x4_t __s1_720 = __p1_720; \
+  int32x2_t __s2_720 = __p2_720; \
+  int64x2_t __rev0_720;  __rev0_720 = __builtin_shufflevector(__s0_720, __s0_720, 1, 0); \
+  int32x4_t __rev1_720;  __rev1_720 = __builtin_shufflevector(__s1_720, __s1_720, 3, 2, 1, 0); \
+  int32x2_t __rev2_720;  __rev2_720 = __builtin_shufflevector(__s2_720, __s2_720, 1, 0); \
+  __ret_720 = __noswap_vqdmlsl_s32(__rev0_720, __noswap_vget_high_s32(__rev1_720), __noswap_splat_lane_s32(__rev2_720, __p3_720)); \
+  __ret_720 = __builtin_shufflevector(__ret_720, __ret_720, 1, 0); \
+  __ret_720; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_high_lane_s16(__p0_721, __p1_721, __p2_721, __p3_721) __extension__ ({ \
+  int32x4_t __ret_721; \
+  int32x4_t __s0_721 = __p0_721; \
+  int16x8_t __s1_721 = __p1_721; \
+  int16x4_t __s2_721 = __p2_721; \
+  __ret_721 = vqdmlsl_s16(__s0_721, vget_high_s16(__s1_721), splat_lane_s16(__s2_721, __p3_721)); \
+  __ret_721; \
+})
+#else
+#define vqdmlsl_high_lane_s16(__p0_722, __p1_722, __p2_722, __p3_722) __extension__ ({ \
+  int32x4_t __ret_722; \
+  int32x4_t __s0_722 = __p0_722; \
+  int16x8_t __s1_722 = __p1_722; \
+  int16x4_t __s2_722 = __p2_722; \
+  int32x4_t __rev0_722;  __rev0_722 = __builtin_shufflevector(__s0_722, __s0_722, 3, 2, 1, 0); \
+  int16x8_t __rev1_722;  __rev1_722 = __builtin_shufflevector(__s1_722, __s1_722, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_722;  __rev2_722 = __builtin_shufflevector(__s2_722, __s2_722, 3, 2, 1, 0); \
+  __ret_722 = __noswap_vqdmlsl_s16(__rev0_722, __noswap_vget_high_s16(__rev1_722), __noswap_splat_lane_s16(__rev2_722, __p3_722)); \
+  __ret_722 = __builtin_shufflevector(__ret_722, __ret_722, 3, 2, 1, 0); \
+  __ret_722; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_high_laneq_s32(__p0_723, __p1_723, __p2_723, __p3_723) __extension__ ({ \
+  int64x2_t __ret_723; \
+  int64x2_t __s0_723 = __p0_723; \
+  int32x4_t __s1_723 = __p1_723; \
+  int32x4_t __s2_723 = __p2_723; \
+  __ret_723 = vqdmlsl_s32(__s0_723, vget_high_s32(__s1_723), splat_laneq_s32(__s2_723, __p3_723)); \
+  __ret_723; \
+})
+#else
+#define vqdmlsl_high_laneq_s32(__p0_724, __p1_724, __p2_724, __p3_724) __extension__ ({ \
+  int64x2_t __ret_724; \
+  int64x2_t __s0_724 = __p0_724; \
+  int32x4_t __s1_724 = __p1_724; \
+  int32x4_t __s2_724 = __p2_724; \
+  int64x2_t __rev0_724;  __rev0_724 = __builtin_shufflevector(__s0_724, __s0_724, 1, 0); \
+  int32x4_t __rev1_724;  __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \
+  int32x4_t __rev2_724;  __rev2_724 = __builtin_shufflevector(__s2_724, __s2_724, 3, 2, 1, 0); \
+  __ret_724 = __noswap_vqdmlsl_s32(__rev0_724, __noswap_vget_high_s32(__rev1_724), __noswap_splat_laneq_s32(__rev2_724, __p3_724)); \
+  __ret_724 = __builtin_shufflevector(__ret_724, __ret_724, 1, 0); \
+  __ret_724; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_high_laneq_s16(__p0_725, __p1_725, __p2_725, __p3_725) __extension__ ({ \
+  int32x4_t __ret_725; \
+  int32x4_t __s0_725 = __p0_725; \
+  int16x8_t __s1_725 = __p1_725; \
+  int16x8_t __s2_725 = __p2_725; \
+  __ret_725 = vqdmlsl_s16(__s0_725, vget_high_s16(__s1_725), splat_laneq_s16(__s2_725, __p3_725)); \
+  __ret_725; \
+})
+#else
+#define vqdmlsl_high_laneq_s16(__p0_726, __p1_726, __p2_726, __p3_726) __extension__ ({ \
+  int32x4_t __ret_726; \
+  int32x4_t __s0_726 = __p0_726; \
+  int16x8_t __s1_726 = __p1_726; \
+  int16x8_t __s2_726 = __p2_726; \
+  int32x4_t __rev0_726;  __rev0_726 = __builtin_shufflevector(__s0_726, __s0_726, 3, 2, 1, 0); \
+  int16x8_t __rev1_726;  __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_726;  __rev2_726 = __builtin_shufflevector(__s2_726, __s2_726, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_726 = __noswap_vqdmlsl_s16(__rev0_726, __noswap_vget_high_s16(__rev1_726), __noswap_splat_laneq_s16(__rev2_726, __p3_726)); \
+  __ret_726 = __builtin_shufflevector(__ret_726, __ret_726, 3, 2, 1, 0); \
+  __ret_726; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+  int64x2_t __ret;
+  __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
+  return __ret;
+}
+#else
+__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+  int32x4_t __ret;
+  __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
+  return __ret;
+}
+#else
+__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x2_t __s2 = __p2; \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x2_t __s2 = __p2; \
+  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x4_t __s2 = __p2; \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x4_t __s2 = __p2; \
+  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x4_t __s2 = __p2; \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x4_t __s2 = __p2; \
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x8_t __s2 = __p2; \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x8_t __s2 = __p2; \
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_laneq_s32(__p0_727, __p1_727, __p2_727, __p3_727) __extension__ ({ \
+  int64x2_t __ret_727; \
+  int64x2_t __s0_727 = __p0_727; \
+  int32x2_t __s1_727 = __p1_727; \
+  int32x4_t __s2_727 = __p2_727; \
+  __ret_727 = vqdmlsl_s32(__s0_727, __s1_727, splat_laneq_s32(__s2_727, __p3_727)); \
+  __ret_727; \
+})
+#else
+#define vqdmlsl_laneq_s32(__p0_728, __p1_728, __p2_728, __p3_728) __extension__ ({ \
+  int64x2_t __ret_728; \
+  int64x2_t __s0_728 = __p0_728; \
+  int32x2_t __s1_728 = __p1_728; \
+  int32x4_t __s2_728 = __p2_728; \
+  int64x2_t __rev0_728;  __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 1, 0); \
+  int32x2_t __rev1_728;  __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 1, 0); \
+  int32x4_t __rev2_728;  __rev2_728 = __builtin_shufflevector(__s2_728, __s2_728, 3, 2, 1, 0); \
+  __ret_728 = __noswap_vqdmlsl_s32(__rev0_728, __rev1_728, __noswap_splat_laneq_s32(__rev2_728, __p3_728)); \
+  __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 1, 0); \
+  __ret_728; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_laneq_s16(__p0_729, __p1_729, __p2_729, __p3_729) __extension__ ({ \
+  int32x4_t __ret_729; \
+  int32x4_t __s0_729 = __p0_729; \
+  int16x4_t __s1_729 = __p1_729; \
+  int16x8_t __s2_729 = __p2_729; \
+  __ret_729 = vqdmlsl_s16(__s0_729, __s1_729, splat_laneq_s16(__s2_729, __p3_729)); \
+  __ret_729; \
+})
+#else
+#define vqdmlsl_laneq_s16(__p0_730, __p1_730, __p2_730, __p3_730) __extension__ ({ \
+  int32x4_t __ret_730; \
+  int32x4_t __s0_730 = __p0_730; \
+  int16x4_t __s1_730 = __p1_730; \
+  int16x8_t __s2_730 = __p2_730; \
+  int32x4_t __rev0_730;  __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 3, 2, 1, 0); \
+  int16x4_t __rev1_730;  __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 3, 2, 1, 0); \
+  int16x8_t __rev2_730;  __rev2_730 = __builtin_shufflevector(__s2_730, __s2_730, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_730 = __noswap_vqdmlsl_s16(__rev0_730, __rev1_730, __noswap_splat_laneq_s16(__rev2_730, __p3_730)); \
+  __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \
+  __ret_730; \
+})
+#endif
+
+__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
+  return __ret;
+}
+__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
+  __ret; \
+})
+#else
+#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
+  __ret; \
+})
+#else
+#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
+  __ret; \
+})
+#else
+#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
+  __ret; \
+})
+#else
+#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhs_lane_s32(__p0_731, __p1_731, __p2_731) __extension__ ({ \
+  int32_t __ret_731; \
+  int32_t __s0_731 = __p0_731; \
+  int32x2_t __s1_731 = __p1_731; \
+  __ret_731 = vqdmulhs_s32(__s0_731, vget_lane_s32(__s1_731, __p2_731)); \
+  __ret_731; \
+})
+#else
+#define vqdmulhs_lane_s32(__p0_732, __p1_732, __p2_732) __extension__ ({ \
+  int32_t __ret_732; \
+  int32_t __s0_732 = __p0_732; \
+  int32x2_t __s1_732 = __p1_732; \
+  int32x2_t __rev1_732;  __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 1, 0); \
+  __ret_732 = vqdmulhs_s32(__s0_732, __noswap_vget_lane_s32(__rev1_732, __p2_732)); \
+  __ret_732; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhh_lane_s16(__p0_733, __p1_733, __p2_733) __extension__ ({ \
+  int16_t __ret_733; \
+  int16_t __s0_733 = __p0_733; \
+  int16x4_t __s1_733 = __p1_733; \
+  __ret_733 = vqdmulhh_s16(__s0_733, vget_lane_s16(__s1_733, __p2_733)); \
+  __ret_733; \
+})
+#else
+#define vqdmulhh_lane_s16(__p0_734, __p1_734, __p2_734) __extension__ ({ \
+  int16_t __ret_734; \
+  int16_t __s0_734 = __p0_734; \
+  int16x4_t __s1_734 = __p1_734; \
+  int16x4_t __rev1_734;  __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 3, 2, 1, 0); \
+  __ret_734 = vqdmulhh_s16(__s0_734, __noswap_vget_lane_s16(__rev1_734, __p2_734)); \
+  __ret_734; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhs_laneq_s32(__p0_735, __p1_735, __p2_735) __extension__ ({ \
+  int32_t __ret_735; \
+  int32_t __s0_735 = __p0_735; \
+  int32x4_t __s1_735 = __p1_735; \
+  __ret_735 = vqdmulhs_s32(__s0_735, vgetq_lane_s32(__s1_735, __p2_735)); \
+  __ret_735; \
+})
+#else
+#define vqdmulhs_laneq_s32(__p0_736, __p1_736, __p2_736) __extension__ ({ \
+  int32_t __ret_736; \
+  int32_t __s0_736 = __p0_736; \
+  int32x4_t __s1_736 = __p1_736; \
+  int32x4_t __rev1_736;  __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 3, 2, 1, 0); \
+  __ret_736 = vqdmulhs_s32(__s0_736, __noswap_vgetq_lane_s32(__rev1_736, __p2_736)); \
+  __ret_736; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhh_laneq_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \
+  int16_t __ret_737; \
+  int16_t __s0_737 = __p0_737; \
+  int16x8_t __s1_737 = __p1_737; \
+  __ret_737 = vqdmulhh_s16(__s0_737, vgetq_lane_s16(__s1_737, __p2_737)); \
+  __ret_737; \
+})
+#else
+#define vqdmulhh_laneq_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \
+  int16_t __ret_738; \
+  int16_t __s0_738 = __p0_738; \
+  int16x8_t __s1_738 = __p1_738; \
+  int16x8_t __rev1_738;  __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_738 = vqdmulhh_s16(__s0_738, __noswap_vgetq_lane_s16(__rev1_738, __p2_738)); \
+  __ret_738; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
+  __ret; \
+})
+#else
+#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
+  __ret; \
+})
+#else
+#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
+  __ret; \
+})
+#else
+#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
+  __ret; \
+})
+#else
+#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
+  return __ret;
+}
+__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
+  __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
+  return __ret;
+}
+#else
+__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
+  __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
+  return __ret;
+}
+#else
+__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_high_lane_s32(__p0_739, __p1_739, __p2_739) __extension__ ({ \
+  int64x2_t __ret_739; \
+  int32x4_t __s0_739 = __p0_739; \
+  int32x2_t __s1_739 = __p1_739; \
+  __ret_739 = vqdmull_s32(vget_high_s32(__s0_739), splat_lane_s32(__s1_739, __p2_739)); \
+  __ret_739; \
+})
+#else
+#define vqdmull_high_lane_s32(__p0_740, __p1_740, __p2_740) __extension__ ({ \
+  int64x2_t __ret_740; \
+  int32x4_t __s0_740 = __p0_740; \
+  int32x2_t __s1_740 = __p1_740; \
+  int32x4_t __rev0_740;  __rev0_740 = __builtin_shufflevector(__s0_740, __s0_740, 3, 2, 1, 0); \
+  int32x2_t __rev1_740;  __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 1, 0); \
+  __ret_740 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_740), __noswap_splat_lane_s32(__rev1_740, __p2_740)); \
+  __ret_740 = __builtin_shufflevector(__ret_740, __ret_740, 1, 0); \
+  __ret_740; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_high_lane_s16(__p0_741, __p1_741, __p2_741) __extension__ ({ \
+  int32x4_t __ret_741; \
+  int16x8_t __s0_741 = __p0_741; \
+  int16x4_t __s1_741 = __p1_741; \
+  __ret_741 = vqdmull_s16(vget_high_s16(__s0_741), splat_lane_s16(__s1_741, __p2_741)); \
+  __ret_741; \
+})
+#else
+#define vqdmull_high_lane_s16(__p0_742, __p1_742, __p2_742) __extension__ ({ \
+  int32x4_t __ret_742; \
+  int16x8_t __s0_742 = __p0_742; \
+  int16x4_t __s1_742 = __p1_742; \
+  int16x8_t __rev0_742;  __rev0_742 = __builtin_shufflevector(__s0_742, __s0_742, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_742;  __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 3, 2, 1, 0); \
+  __ret_742 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_742), __noswap_splat_lane_s16(__rev1_742, __p2_742)); \
+  __ret_742 = __builtin_shufflevector(__ret_742, __ret_742, 3, 2, 1, 0); \
+  __ret_742; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_high_laneq_s32(__p0_743, __p1_743, __p2_743) __extension__ ({ \
+  int64x2_t __ret_743; \
+  int32x4_t __s0_743 = __p0_743; \
+  int32x4_t __s1_743 = __p1_743; \
+  __ret_743 = vqdmull_s32(vget_high_s32(__s0_743), splat_laneq_s32(__s1_743, __p2_743)); \
+  __ret_743; \
+})
+#else
+#define vqdmull_high_laneq_s32(__p0_744, __p1_744, __p2_744) __extension__ ({ \
+  int64x2_t __ret_744; \
+  int32x4_t __s0_744 = __p0_744; \
+  int32x4_t __s1_744 = __p1_744; \
+  int32x4_t __rev0_744;  __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 3, 2, 1, 0); \
+  int32x4_t __rev1_744;  __rev1_744 = __builtin_shufflevector(__s1_744, __s1_744, 3, 2, 1, 0); \
+  __ret_744 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_744), __noswap_splat_laneq_s32(__rev1_744, __p2_744)); \
+  __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 1, 0); \
+  __ret_744; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_high_laneq_s16(__p0_745, __p1_745, __p2_745) __extension__ ({ \
+  int32x4_t __ret_745; \
+  int16x8_t __s0_745 = __p0_745; \
+  int16x8_t __s1_745 = __p1_745; \
+  __ret_745 = vqdmull_s16(vget_high_s16(__s0_745), splat_laneq_s16(__s1_745, __p2_745)); \
+  __ret_745; \
+})
+#else
+#define vqdmull_high_laneq_s16(__p0_746, __p1_746, __p2_746) __extension__ ({ \
+  int32x4_t __ret_746; \
+  int16x8_t __s0_746 = __p0_746; \
+  int16x8_t __s1_746 = __p1_746; \
+  int16x8_t __rev0_746;  __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_746;  __rev1_746 = __builtin_shufflevector(__s1_746, __s1_746, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_746 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_746), __noswap_splat_laneq_s16(__rev1_746, __p2_746)); \
+  __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \
+  __ret_746; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
+  int64x2_t __ret;
+  __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1);
+  return __ret;
+}
+#else
+__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
+  int64x2_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
+  int32x4_t __ret;
+  __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1);
+  return __ret;
+}
+#else
+__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
+  int32x4_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulls_lane_s32(__p0_747, __p1_747, __p2_747) __extension__ ({ \
+  int64_t __ret_747; \
+  int32_t __s0_747 = __p0_747; \
+  int32x2_t __s1_747 = __p1_747; \
+  __ret_747 = vqdmulls_s32(__s0_747, vget_lane_s32(__s1_747, __p2_747)); \
+  __ret_747; \
+})
+#else
+#define vqdmulls_lane_s32(__p0_748, __p1_748, __p2_748) __extension__ ({ \
+  int64_t __ret_748; \
+  int32_t __s0_748 = __p0_748; \
+  int32x2_t __s1_748 = __p1_748; \
+  int32x2_t __rev1_748;  __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 1, 0); \
+  __ret_748 = vqdmulls_s32(__s0_748, __noswap_vget_lane_s32(__rev1_748, __p2_748)); \
+  __ret_748; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmullh_lane_s16(__p0_749, __p1_749, __p2_749) __extension__ ({ \
+  int32_t __ret_749; \
+  int16_t __s0_749 = __p0_749; \
+  int16x4_t __s1_749 = __p1_749; \
+  __ret_749 = vqdmullh_s16(__s0_749, vget_lane_s16(__s1_749, __p2_749)); \
+  __ret_749; \
+})
+#else
+#define vqdmullh_lane_s16(__p0_750, __p1_750, __p2_750) __extension__ ({ \
+  int32_t __ret_750; \
+  int16_t __s0_750 = __p0_750; \
+  int16x4_t __s1_750 = __p1_750; \
+  int16x4_t __rev1_750;  __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 3, 2, 1, 0); \
+  __ret_750 = vqdmullh_s16(__s0_750, __noswap_vget_lane_s16(__rev1_750, __p2_750)); \
+  __ret_750; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulls_laneq_s32(__p0_751, __p1_751, __p2_751) __extension__ ({ \
+  int64_t __ret_751; \
+  int32_t __s0_751 = __p0_751; \
+  int32x4_t __s1_751 = __p1_751; \
+  __ret_751 = vqdmulls_s32(__s0_751, vgetq_lane_s32(__s1_751, __p2_751)); \
+  __ret_751; \
+})
+#else
+#define vqdmulls_laneq_s32(__p0_752, __p1_752, __p2_752) __extension__ ({ \
+  int64_t __ret_752; \
+  int32_t __s0_752 = __p0_752; \
+  int32x4_t __s1_752 = __p1_752; \
+  int32x4_t __rev1_752;  __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 3, 2, 1, 0); \
+  __ret_752 = vqdmulls_s32(__s0_752, __noswap_vgetq_lane_s32(__rev1_752, __p2_752)); \
+  __ret_752; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmullh_laneq_s16(__p0_753, __p1_753, __p2_753) __extension__ ({ \
+  int32_t __ret_753; \
+  int16_t __s0_753 = __p0_753; \
+  int16x8_t __s1_753 = __p1_753; \
+  __ret_753 = vqdmullh_s16(__s0_753, vgetq_lane_s16(__s1_753, __p2_753)); \
+  __ret_753; \
+})
+#else
+#define vqdmullh_laneq_s16(__p0_754, __p1_754, __p2_754) __extension__ ({ \
+  int32_t __ret_754; \
+  int16_t __s0_754 = __p0_754; \
+  int16x8_t __s1_754 = __p1_754; \
+  int16x8_t __rev1_754;  __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_754 = vqdmullh_s16(__s0_754, __noswap_vgetq_lane_s16(__rev1_754, __p2_754)); \
+  __ret_754; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_laneq_s32(__p0_755, __p1_755, __p2_755) __extension__ ({ \
+  int64x2_t __ret_755; \
+  int32x2_t __s0_755 = __p0_755; \
+  int32x4_t __s1_755 = __p1_755; \
+  __ret_755 = vqdmull_s32(__s0_755, splat_laneq_s32(__s1_755, __p2_755)); \
+  __ret_755; \
+})
+#else
+#define vqdmull_laneq_s32(__p0_756, __p1_756, __p2_756) __extension__ ({ \
+  int64x2_t __ret_756; \
+  int32x2_t __s0_756 = __p0_756; \
+  int32x4_t __s1_756 = __p1_756; \
+  int32x2_t __rev0_756;  __rev0_756 = __builtin_shufflevector(__s0_756, __s0_756, 1, 0); \
+  int32x4_t __rev1_756;  __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \
+  __ret_756 = __noswap_vqdmull_s32(__rev0_756, __noswap_splat_laneq_s32(__rev1_756, __p2_756)); \
+  __ret_756 = __builtin_shufflevector(__ret_756, __ret_756, 1, 0); \
+  __ret_756; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_laneq_s16(__p0_757, __p1_757, __p2_757) __extension__ ({ \
+  int32x4_t __ret_757; \
+  int16x4_t __s0_757 = __p0_757; \
+  int16x8_t __s1_757 = __p1_757; \
+  __ret_757 = vqdmull_s16(__s0_757, splat_laneq_s16(__s1_757, __p2_757)); \
+  __ret_757; \
+})
+#else
+#define vqdmull_laneq_s16(__p0_758, __p1_758, __p2_758) __extension__ ({ \
+  int32x4_t __ret_758; \
+  int16x4_t __s0_758 = __p0_758; \
+  int16x8_t __s1_758 = __p1_758; \
+  int16x4_t __rev0_758;  __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 3, 2, 1, 0); \
+  int16x8_t __rev1_758;  __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_758 = __noswap_vqdmull_s16(__rev0_758, __noswap_splat_laneq_s16(__rev1_758, __p2_758)); \
+  __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 3, 2, 1, 0); \
+  __ret_758; \
+})
+#endif
+
+__ai int16_t vqmovns_s32(int32_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
+  return __ret;
+}
+__ai int32_t vqmovnd_s64(int64_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
+  return __ret;
+}
+__ai int8_t vqmovnh_s16(int16_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
+  return __ret;
+}
+__ai uint16_t vqmovns_u32(uint32_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
+  return __ret;
+}
+__ai uint32_t vqmovnd_u64(uint64_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
+  return __ret;
+}
+__ai uint8_t vqmovnh_u16(uint16_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
+  uint16x8_t __ret;
+  __ret = vcombine_u16(__p0, vqmovn_u32(__p1));
+  return __ret;
+}
+#else
+__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
+  uint16x8_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
+  uint32x4_t __ret;
+  __ret = vcombine_u32(__p0, vqmovn_u64(__p1));
+  return __ret;
+}
+#else
+__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
+  uint32x4_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
+  uint8x16_t __ret;
+  __ret = vcombine_u8(__p0, vqmovn_u16(__p1));
+  return __ret;
+}
+#else
+__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
+  uint8x16_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
+  int16x8_t __ret;
+  __ret = vcombine_s16(__p0, vqmovn_s32(__p1));
+  return __ret;
+}
+#else
+__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
+  int16x8_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
+  int32x4_t __ret;
+  __ret = vcombine_s32(__p0, vqmovn_s64(__p1));
+  return __ret;
+}
+#else
+__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
+  int32x4_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
+  int8x16_t __ret;
+  __ret = vcombine_s8(__p0, vqmovn_s16(__p1));
+  return __ret;
+}
+#else
+__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
+  int8x16_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint16_t vqmovuns_s32(int32_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vqmovuns_s32(__p0);
+  return __ret;
+}
+__ai uint32_t vqmovund_s64(int64_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vqmovund_s64(__p0);
+  return __ret;
+}
+__ai uint8_t vqmovunh_s16(int16_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vqmovunh_s16(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
+  uint16x8_t __ret;
+  __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
+  return __ret;
+}
+#else
+__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
+  uint16x8_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
+  uint32x4_t __ret;
+  __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
+  return __ret;
+}
+#else
+__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
+  uint32x4_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
+  uint8x16_t __ret;
+  __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
+  return __ret;
+}
+#else
+__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
+  uint8x16_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35);
+  return __ret;
+}
+#else
+__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai int64x1_t vqneg_s64(int64x1_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
+  return __ret;
+}
+__ai int8_t vqnegb_s8(int8_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
+  return __ret;
+}
+__ai int32_t vqnegs_s32(int32_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
+  return __ret;
+}
+__ai int64_t vqnegd_s64(int64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
+  return __ret;
+}
+__ai int16_t vqnegh_s16(int16_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
+  return __ret;
+}
+__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
+  return __ret;
+}
+__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
+  __ret; \
+})
+#else
+#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
+  __ret; \
+})
+#else
+#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
+  __ret; \
+})
+#else
+#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
+  __ret; \
+})
+#else
+#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhs_lane_s32(__p0_759, __p1_759, __p2_759) __extension__ ({ \
+  int32_t __ret_759; \
+  int32_t __s0_759 = __p0_759; \
+  int32x2_t __s1_759 = __p1_759; \
+  __ret_759 = vqrdmulhs_s32(__s0_759, vget_lane_s32(__s1_759, __p2_759)); \
+  __ret_759; \
+})
+#else
+#define vqrdmulhs_lane_s32(__p0_760, __p1_760, __p2_760) __extension__ ({ \
+  int32_t __ret_760; \
+  int32_t __s0_760 = __p0_760; \
+  int32x2_t __s1_760 = __p1_760; \
+  int32x2_t __rev1_760;  __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 1, 0); \
+  __ret_760 = vqrdmulhs_s32(__s0_760, __noswap_vget_lane_s32(__rev1_760, __p2_760)); \
+  __ret_760; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhh_lane_s16(__p0_761, __p1_761, __p2_761) __extension__ ({ \
+  int16_t __ret_761; \
+  int16_t __s0_761 = __p0_761; \
+  int16x4_t __s1_761 = __p1_761; \
+  __ret_761 = vqrdmulhh_s16(__s0_761, vget_lane_s16(__s1_761, __p2_761)); \
+  __ret_761; \
+})
+#else
+#define vqrdmulhh_lane_s16(__p0_762, __p1_762, __p2_762) __extension__ ({ \
+  int16_t __ret_762; \
+  int16_t __s0_762 = __p0_762; \
+  int16x4_t __s1_762 = __p1_762; \
+  int16x4_t __rev1_762;  __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 3, 2, 1, 0); \
+  __ret_762 = vqrdmulhh_s16(__s0_762, __noswap_vget_lane_s16(__rev1_762, __p2_762)); \
+  __ret_762; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhs_laneq_s32(__p0_763, __p1_763, __p2_763) __extension__ ({ \
+  int32_t __ret_763; \
+  int32_t __s0_763 = __p0_763; \
+  int32x4_t __s1_763 = __p1_763; \
+  __ret_763 = vqrdmulhs_s32(__s0_763, vgetq_lane_s32(__s1_763, __p2_763)); \
+  __ret_763; \
+})
+#else
+#define vqrdmulhs_laneq_s32(__p0_764, __p1_764, __p2_764) __extension__ ({ \
+  int32_t __ret_764; \
+  int32_t __s0_764 = __p0_764; \
+  int32x4_t __s1_764 = __p1_764; \
+  int32x4_t __rev1_764;  __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 3, 2, 1, 0); \
+  __ret_764 = vqrdmulhs_s32(__s0_764, __noswap_vgetq_lane_s32(__rev1_764, __p2_764)); \
+  __ret_764; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhh_laneq_s16(__p0_765, __p1_765, __p2_765) __extension__ ({ \
+  int16_t __ret_765; \
+  int16_t __s0_765 = __p0_765; \
+  int16x8_t __s1_765 = __p1_765; \
+  __ret_765 = vqrdmulhh_s16(__s0_765, vgetq_lane_s16(__s1_765, __p2_765)); \
+  __ret_765; \
+})
+#else
+#define vqrdmulhh_laneq_s16(__p0_766, __p1_766, __p2_766) __extension__ ({ \
+  int16_t __ret_766; \
+  int16_t __s0_766 = __p0_766; \
+  int16x8_t __s1_766 = __p1_766; \
+  int16x8_t __rev1_766;  __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_766 = vqrdmulhh_s16(__s0_766, __noswap_vgetq_lane_s16(__rev1_766, __p2_766)); \
+  __ret_766; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
+  __ret; \
+})
+#else
+#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
+  __ret; \
+})
+#else
+#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
+  __ret; \
+})
+#else
+#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
+  __ret; \
+})
+#else
+#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+__ai uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
+  return __ret;
+}
+__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
+  return __ret;
+}
+__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
+  return __ret;
+}
+__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
+  return __ret;
+}
+__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_u32(__p0_767, __p1_767, __p2_767) __extension__ ({ \
+  uint16x8_t __ret_767; \
+  uint16x4_t __s0_767 = __p0_767; \
+  uint32x4_t __s1_767 = __p1_767; \
+  __ret_767 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_767), (uint16x4_t)(vqrshrn_n_u32(__s1_767, __p2_767)))); \
+  __ret_767; \
+})
+#else
+#define vqrshrn_high_n_u32(__p0_768, __p1_768, __p2_768) __extension__ ({ \
+  uint16x8_t __ret_768; \
+  uint16x4_t __s0_768 = __p0_768; \
+  uint32x4_t __s1_768 = __p1_768; \
+  uint16x4_t __rev0_768;  __rev0_768 = __builtin_shufflevector(__s0_768, __s0_768, 3, 2, 1, 0); \
+  uint32x4_t __rev1_768;  __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 3, 2, 1, 0); \
+  __ret_768 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_768), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_768, __p2_768)))); \
+  __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_768; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_u64(__p0_769, __p1_769, __p2_769) __extension__ ({ \
+  uint32x4_t __ret_769; \
+  uint32x2_t __s0_769 = __p0_769; \
+  uint64x2_t __s1_769 = __p1_769; \
+  __ret_769 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_769), (uint32x2_t)(vqrshrn_n_u64(__s1_769, __p2_769)))); \
+  __ret_769; \
+})
+#else
+#define vqrshrn_high_n_u64(__p0_770, __p1_770, __p2_770) __extension__ ({ \
+  uint32x4_t __ret_770; \
+  uint32x2_t __s0_770 = __p0_770; \
+  uint64x2_t __s1_770 = __p1_770; \
+  uint32x2_t __rev0_770;  __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 1, 0); \
+  uint64x2_t __rev1_770;  __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 1, 0); \
+  __ret_770 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_770), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_770, __p2_770)))); \
+  __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \
+  __ret_770; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_u16(__p0_771, __p1_771, __p2_771) __extension__ ({ \
+  uint8x16_t __ret_771; \
+  uint8x8_t __s0_771 = __p0_771; \
+  uint16x8_t __s1_771 = __p1_771; \
+  __ret_771 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_771), (uint8x8_t)(vqrshrn_n_u16(__s1_771, __p2_771)))); \
+  __ret_771; \
+})
+#else
+#define vqrshrn_high_n_u16(__p0_772, __p1_772, __p2_772) __extension__ ({ \
+  uint8x16_t __ret_772; \
+  uint8x8_t __s0_772 = __p0_772; \
+  uint16x8_t __s1_772 = __p1_772; \
+  uint8x8_t __rev0_772;  __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_772;  __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_772 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_772), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_772, __p2_772)))); \
+  __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_772; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_s32(__p0_773, __p1_773, __p2_773) __extension__ ({ \
+  int16x8_t __ret_773; \
+  int16x4_t __s0_773 = __p0_773; \
+  int32x4_t __s1_773 = __p1_773; \
+  __ret_773 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_773), (int16x4_t)(vqrshrn_n_s32(__s1_773, __p2_773)))); \
+  __ret_773; \
+})
+#else
+#define vqrshrn_high_n_s32(__p0_774, __p1_774, __p2_774) __extension__ ({ \
+  int16x8_t __ret_774; \
+  int16x4_t __s0_774 = __p0_774; \
+  int32x4_t __s1_774 = __p1_774; \
+  int16x4_t __rev0_774;  __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \
+  int32x4_t __rev1_774;  __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 3, 2, 1, 0); \
+  __ret_774 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_774), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_774, __p2_774)))); \
+  __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_774; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_s64(__p0_775, __p1_775, __p2_775) __extension__ ({ \
+  int32x4_t __ret_775; \
+  int32x2_t __s0_775 = __p0_775; \
+  int64x2_t __s1_775 = __p1_775; \
+  __ret_775 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_775), (int32x2_t)(vqrshrn_n_s64(__s1_775, __p2_775)))); \
+  __ret_775; \
+})
+#else
+#define vqrshrn_high_n_s64(__p0_776, __p1_776, __p2_776) __extension__ ({ \
+  int32x4_t __ret_776; \
+  int32x2_t __s0_776 = __p0_776; \
+  int64x2_t __s1_776 = __p1_776; \
+  int32x2_t __rev0_776;  __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 1, 0); \
+  int64x2_t __rev1_776;  __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 1, 0); \
+  __ret_776 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_776), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_776, __p2_776)))); \
+  __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 3, 2, 1, 0); \
+  __ret_776; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_s16(__p0_777, __p1_777, __p2_777) __extension__ ({ \
+  int8x16_t __ret_777; \
+  int8x8_t __s0_777 = __p0_777; \
+  int16x8_t __s1_777 = __p1_777; \
+  __ret_777 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_777), (int8x8_t)(vqrshrn_n_s16(__s1_777, __p2_777)))); \
+  __ret_777; \
+})
+#else
+#define vqrshrn_high_n_s16(__p0_778, __p1_778, __p2_778) __extension__ ({ \
+  int8x16_t __ret_778; \
+  int8x8_t __s0_778 = __p0_778; \
+  int16x8_t __s1_778 = __p1_778; \
+  int8x8_t __rev0_778;  __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_778;  __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_778 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_778), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_778, __p2_778)))); \
+  __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_778; \
+})
+#endif
+
+#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
+  uint32_t __s0 = __p0; \
+  __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
+  __ret; \
+})
+#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  uint64_t __s0 = __p0; \
+  __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
+  __ret; \
+})
+#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
+  uint16_t __s0 = __p0; \
+  __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
+  __ret; \
+})
+#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int32_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
+  __ret; \
+})
+#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
+  __ret; \
+})
+#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int16_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrun_high_n_s32(__p0_779, __p1_779, __p2_779) __extension__ ({ \
+  int16x8_t __ret_779; \
+  int16x4_t __s0_779 = __p0_779; \
+  int32x4_t __s1_779 = __p1_779; \
+  __ret_779 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_779), (int16x4_t)(vqrshrun_n_s32(__s1_779, __p2_779)))); \
+  __ret_779; \
+})
+#else
+#define vqrshrun_high_n_s32(__p0_780, __p1_780, __p2_780) __extension__ ({ \
+  int16x8_t __ret_780; \
+  int16x4_t __s0_780 = __p0_780; \
+  int32x4_t __s1_780 = __p1_780; \
+  int16x4_t __rev0_780;  __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 3, 2, 1, 0); \
+  int32x4_t __rev1_780;  __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 3, 2, 1, 0); \
+  __ret_780 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_780), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_780, __p2_780)))); \
+  __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_780; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrun_high_n_s64(__p0_781, __p1_781, __p2_781) __extension__ ({ \
+  int32x4_t __ret_781; \
+  int32x2_t __s0_781 = __p0_781; \
+  int64x2_t __s1_781 = __p1_781; \
+  __ret_781 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_781), (int32x2_t)(vqrshrun_n_s64(__s1_781, __p2_781)))); \
+  __ret_781; \
+})
+#else
+#define vqrshrun_high_n_s64(__p0_782, __p1_782, __p2_782) __extension__ ({ \
+  int32x4_t __ret_782; \
+  int32x2_t __s0_782 = __p0_782; \
+  int64x2_t __s1_782 = __p1_782; \
+  int32x2_t __rev0_782;  __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 1, 0); \
+  int64x2_t __rev1_782;  __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 1, 0); \
+  __ret_782 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_782), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_782, __p2_782)))); \
+  __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 3, 2, 1, 0); \
+  __ret_782; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrun_high_n_s16(__p0_783, __p1_783, __p2_783) __extension__ ({ \
+  int8x16_t __ret_783; \
+  int8x8_t __s0_783 = __p0_783; \
+  int16x8_t __s1_783 = __p1_783; \
+  __ret_783 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_783), (int8x8_t)(vqrshrun_n_s16(__s1_783, __p2_783)))); \
+  __ret_783; \
+})
+#else
+#define vqrshrun_high_n_s16(__p0_784, __p1_784, __p2_784) __extension__ ({ \
+  int8x16_t __ret_784; \
+  int8x8_t __s0_784 = __p0_784; \
+  int16x8_t __s1_784 = __p1_784; \
+  int8x8_t __rev0_784;  __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_784;  __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_784 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_784), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_784, __p2_784)))); \
+  __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_784; \
+})
+#endif
+
+#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int32_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
+  __ret; \
+})
+#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
+  __ret; \
+})
+#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int16_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
+  __ret; \
+})
+__ai uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
+  return __ret;
+}
+__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
+  return __ret;
+}
+__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
+  return __ret;
+}
+__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
+  return __ret;
+}
+__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
+  return __ret;
+}
+#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
+  uint8_t __s0 = __p0; \
+  __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
+  __ret; \
+})
+#define vqshls_n_u32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  uint32_t __s0 = __p0; \
+  __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
+  __ret; \
+})
+#define vqshld_n_u64(__p0, __p1) __extension__ ({ \
+  uint64_t __ret; \
+  uint64_t __s0 = __p0; \
+  __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
+  __ret; \
+})
+#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
+  uint16_t __s0 = __p0; \
+  __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
+  __ret; \
+})
+#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int8_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
+  __ret; \
+})
+#define vqshls_n_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
+  __ret; \
+})
+#define vqshld_n_s64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
+  __ret; \
+})
+#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int16_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
+  __ret; \
+})
+#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int8_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
+  __ret; \
+})
+#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
+  __ret; \
+})
+#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
+  __ret; \
+})
+#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int16_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_u32(__p0_785, __p1_785, __p2_785) __extension__ ({ \
+  uint16x8_t __ret_785; \
+  uint16x4_t __s0_785 = __p0_785; \
+  uint32x4_t __s1_785 = __p1_785; \
+  __ret_785 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_785), (uint16x4_t)(vqshrn_n_u32(__s1_785, __p2_785)))); \
+  __ret_785; \
+})
+#else
+#define vqshrn_high_n_u32(__p0_786, __p1_786, __p2_786) __extension__ ({ \
+  uint16x8_t __ret_786; \
+  uint16x4_t __s0_786 = __p0_786; \
+  uint32x4_t __s1_786 = __p1_786; \
+  uint16x4_t __rev0_786;  __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 3, 2, 1, 0); \
+  uint32x4_t __rev1_786;  __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 3, 2, 1, 0); \
+  __ret_786 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_786), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_786, __p2_786)))); \
+  __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_786; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_u64(__p0_787, __p1_787, __p2_787) __extension__ ({ \
+  uint32x4_t __ret_787; \
+  uint32x2_t __s0_787 = __p0_787; \
+  uint64x2_t __s1_787 = __p1_787; \
+  __ret_787 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_787), (uint32x2_t)(vqshrn_n_u64(__s1_787, __p2_787)))); \
+  __ret_787; \
+})
+#else
+#define vqshrn_high_n_u64(__p0_788, __p1_788, __p2_788) __extension__ ({ \
+  uint32x4_t __ret_788; \
+  uint32x2_t __s0_788 = __p0_788; \
+  uint64x2_t __s1_788 = __p1_788; \
+  uint32x2_t __rev0_788;  __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 1, 0); \
+  uint64x2_t __rev1_788;  __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 1, 0); \
+  __ret_788 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_788), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_788, __p2_788)))); \
+  __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 3, 2, 1, 0); \
+  __ret_788; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_u16(__p0_789, __p1_789, __p2_789) __extension__ ({ \
+  uint8x16_t __ret_789; \
+  uint8x8_t __s0_789 = __p0_789; \
+  uint16x8_t __s1_789 = __p1_789; \
+  __ret_789 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_789), (uint8x8_t)(vqshrn_n_u16(__s1_789, __p2_789)))); \
+  __ret_789; \
+})
+#else
+#define vqshrn_high_n_u16(__p0_790, __p1_790, __p2_790) __extension__ ({ \
+  uint8x16_t __ret_790; \
+  uint8x8_t __s0_790 = __p0_790; \
+  uint16x8_t __s1_790 = __p1_790; \
+  uint8x8_t __rev0_790;  __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_790;  __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_790 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_790), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_790, __p2_790)))); \
+  __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_790; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_s32(__p0_791, __p1_791, __p2_791) __extension__ ({ \
+  int16x8_t __ret_791; \
+  int16x4_t __s0_791 = __p0_791; \
+  int32x4_t __s1_791 = __p1_791; \
+  __ret_791 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_791), (int16x4_t)(vqshrn_n_s32(__s1_791, __p2_791)))); \
+  __ret_791; \
+})
+#else
+#define vqshrn_high_n_s32(__p0_792, __p1_792, __p2_792) __extension__ ({ \
+  int16x8_t __ret_792; \
+  int16x4_t __s0_792 = __p0_792; \
+  int32x4_t __s1_792 = __p1_792; \
+  int16x4_t __rev0_792;  __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 3, 2, 1, 0); \
+  int32x4_t __rev1_792;  __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 3, 2, 1, 0); \
+  __ret_792 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_792), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_792, __p2_792)))); \
+  __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_792; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_s64(__p0_793, __p1_793, __p2_793) __extension__ ({ \
+  int32x4_t __ret_793; \
+  int32x2_t __s0_793 = __p0_793; \
+  int64x2_t __s1_793 = __p1_793; \
+  __ret_793 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_793), (int32x2_t)(vqshrn_n_s64(__s1_793, __p2_793)))); \
+  __ret_793; \
+})
+#else
+#define vqshrn_high_n_s64(__p0_794, __p1_794, __p2_794) __extension__ ({ \
+  int32x4_t __ret_794; \
+  int32x2_t __s0_794 = __p0_794; \
+  int64x2_t __s1_794 = __p1_794; \
+  int32x2_t __rev0_794;  __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 1, 0); \
+  int64x2_t __rev1_794;  __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 1, 0); \
+  __ret_794 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_794), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_794, __p2_794)))); \
+  __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 3, 2, 1, 0); \
+  __ret_794; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_s16(__p0_795, __p1_795, __p2_795) __extension__ ({ \
+  int8x16_t __ret_795; \
+  int8x8_t __s0_795 = __p0_795; \
+  int16x8_t __s1_795 = __p1_795; \
+  __ret_795 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_795), (int8x8_t)(vqshrn_n_s16(__s1_795, __p2_795)))); \
+  __ret_795; \
+})
+#else
+#define vqshrn_high_n_s16(__p0_796, __p1_796, __p2_796) __extension__ ({ \
+  int8x16_t __ret_796; \
+  int8x8_t __s0_796 = __p0_796; \
+  int16x8_t __s1_796 = __p1_796; \
+  int8x8_t __rev0_796;  __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_796;  __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_796 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_796), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_796, __p2_796)))); \
+  __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_796; \
+})
+#endif
+
+#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
+  uint32_t __s0 = __p0; \
+  __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
+  __ret; \
+})
+#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  uint64_t __s0 = __p0; \
+  __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
+  __ret; \
+})
+#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
+  uint16_t __s0 = __p0; \
+  __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
+  __ret; \
+})
+#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int32_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
+  __ret; \
+})
+#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
+  __ret; \
+})
+#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int16_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vqshrun_high_n_s32(__p0_797, __p1_797, __p2_797) __extension__ ({ \
+  int16x8_t __ret_797; \
+  int16x4_t __s0_797 = __p0_797; \
+  int32x4_t __s1_797 = __p1_797; \
+  __ret_797 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_797), (int16x4_t)(vqshrun_n_s32(__s1_797, __p2_797)))); \
+  __ret_797; \
+})
+#else
+#define vqshrun_high_n_s32(__p0_798, __p1_798, __p2_798) __extension__ ({ \
+  int16x8_t __ret_798; \
+  int16x4_t __s0_798 = __p0_798; \
+  int32x4_t __s1_798 = __p1_798; \
+  int16x4_t __rev0_798;  __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 3, 2, 1, 0); \
+  int32x4_t __rev1_798;  __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 3, 2, 1, 0); \
+  __ret_798 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_798), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_798, __p2_798)))); \
+  __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_798; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrun_high_n_s64(__p0_799, __p1_799, __p2_799) __extension__ ({ \
+  int32x4_t __ret_799; \
+  int32x2_t __s0_799 = __p0_799; \
+  int64x2_t __s1_799 = __p1_799; \
+  __ret_799 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_799), (int32x2_t)(vqshrun_n_s64(__s1_799, __p2_799)))); \
+  __ret_799; \
+})
+#else
+#define vqshrun_high_n_s64(__p0_800, __p1_800, __p2_800) __extension__ ({ \
+  int32x4_t __ret_800; \
+  int32x2_t __s0_800 = __p0_800; \
+  int64x2_t __s1_800 = __p1_800; \
+  int32x2_t __rev0_800;  __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \
+  int64x2_t __rev1_800;  __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 1, 0); \
+  __ret_800 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_800), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_800, __p2_800)))); \
+  __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 3, 2, 1, 0); \
+  __ret_800; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrun_high_n_s16(__p0_801, __p1_801, __p2_801) __extension__ ({ \
+  int8x16_t __ret_801; \
+  int8x8_t __s0_801 = __p0_801; \
+  int16x8_t __s1_801 = __p1_801; \
+  __ret_801 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_801), (int8x8_t)(vqshrun_n_s16(__s1_801, __p2_801)))); \
+  __ret_801; \
+})
+#else
+#define vqshrun_high_n_s16(__p0_802, __p1_802, __p2_802) __extension__ ({ \
+  int8x16_t __ret_802; \
+  int8x8_t __s0_802 = __p0_802; \
+  int16x8_t __s1_802 = __p1_802; \
+  int8x8_t __rev0_802;  __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_802;  __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_802 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_802), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_802, __p2_802)))); \
+  __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_802; \
+})
+#endif
+
+#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int32_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
+  __ret; \
+})
+#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
+  __ret; \
+})
+#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int16_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
+  __ret; \
+})
+__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
+  return __ret;
+}
+__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
+  return __ret;
+}
+__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
+  return __ret;
+}
+__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
+  return __ret;
+}
+__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  poly8x16x2_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  poly8x16x2_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  uint8x16x2_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  int8x16x2_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  uint8x16x2_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  int8x16x2_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  poly8x16x3_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  poly8x16x3_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  uint8x16x3_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  int8x16x3_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  uint8x16x3_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  int8x16x3_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  poly8x16x4_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  poly8x16x4_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  uint8x16x4_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  int8x16x4_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  uint8x16x4_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  int8x16x4_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16x2_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16x2_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16x2_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16x2_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16x2_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16x2_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16x3_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16x3_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16x3_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16x3_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16x3_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16x3_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16x4_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16x4_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16x4_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16x4_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16x4_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16x4_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint16x8_t __ret;
+  __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2));
+  return __ret;
+}
+#else
+__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint16x8_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint32x4_t __ret;
+  __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2));
+  return __ret;
+}
+#else
+__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint32x4_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint8x16_t __ret;
+  __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2));
+  return __ret;
+}
+#else
+__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint8x16_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int16x8_t __ret;
+  __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2));
+  return __ret;
+}
+#else
+__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int16x8_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int32x4_t __ret;
+  __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2));
+  return __ret;
+}
+#else
+__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int32x4_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int8x16_t __ret;
+  __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2));
+  return __ret;
+}
+#else
+__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int8x16_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
+  poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
+  poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
+  uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vrbit_s8(int8x8_t __p0) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vrbit_s8(int8x8_t __p0) {
+  int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrecpe_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+__ai float64_t vrecped_f64(float64_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
+  return __ret;
+}
+__ai float32_t vrecpes_f32(float32_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+  return __ret;
+}
+__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
+  return __ret;
+}
+__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
+  return __ret;
+}
+__ai float64_t vrecpxd_f64(float64_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
+  return __ret;
+}
+__ai float32_t vrecpxs_f32(float32_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
+  return __ret;
+}
 __ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
   poly8x8_t __ret;
   __ret = (poly8x8_t)(__p0);
@@ -38590,23450 +61791,6 @@
   __ret = (int16x4_t)(__p0);
   return __ret;
 }
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrnd_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrnda_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndi_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndm_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndn_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndp_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndx_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_FRINT)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd32x_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd32x_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd32z_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd32z_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd64x_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd64x_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd64z_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd64z_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_BF16) && !defined(__aarch64__)
-__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_BF16) && defined(__aarch64__)
-__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#else
-#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#else
-#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#else
-#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#else
-#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdotq_lane_f32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \
-  float32x4_t __s0_142 = __p0_142; \
-  bfloat16x8_t __s1_142 = __p1_142; \
-  bfloat16x4_t __s2_142 = __p2_142; \
-  float32x4_t __ret_142; \
-bfloat16x4_t __reint_142 = __s2_142; \
-float32x4_t __reint1_142 = splatq_lane_f32(*(float32x2_t *) &__reint_142, __p3_142); \
-  __ret_142 = vbfdotq_f32(__s0_142, __s1_142, *(bfloat16x8_t *) &__reint1_142); \
-  __ret_142; \
-})
-#else
-#define vbfdotq_lane_f32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \
-  float32x4_t __s0_143 = __p0_143; \
-  bfloat16x8_t __s1_143 = __p1_143; \
-  bfloat16x4_t __s2_143 = __p2_143; \
-  float32x4_t __rev0_143;  __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_143;  __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_143;  __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, 3, 2, 1, 0); \
-  float32x4_t __ret_143; \
-bfloat16x4_t __reint_143 = __rev2_143; \
-float32x4_t __reint1_143 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_143, __p3_143); \
-  __ret_143 = __noswap_vbfdotq_f32(__rev0_143, __rev1_143, *(bfloat16x8_t *) &__reint1_143); \
-  __ret_143 = __builtin_shufflevector(__ret_143, __ret_143, 3, 2, 1, 0); \
-  __ret_143; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdot_lane_f32(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \
-  float32x2_t __s0_144 = __p0_144; \
-  bfloat16x4_t __s1_144 = __p1_144; \
-  bfloat16x4_t __s2_144 = __p2_144; \
-  float32x2_t __ret_144; \
-bfloat16x4_t __reint_144 = __s2_144; \
-float32x2_t __reint1_144 = splat_lane_f32(*(float32x2_t *) &__reint_144, __p3_144); \
-  __ret_144 = vbfdot_f32(__s0_144, __s1_144, *(bfloat16x4_t *) &__reint1_144); \
-  __ret_144; \
-})
-#else
-#define vbfdot_lane_f32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \
-  float32x2_t __s0_145 = __p0_145; \
-  bfloat16x4_t __s1_145 = __p1_145; \
-  bfloat16x4_t __s2_145 = __p2_145; \
-  float32x2_t __rev0_145;  __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, 1, 0); \
-  bfloat16x4_t __rev1_145;  __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_145;  __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, 3, 2, 1, 0); \
-  float32x2_t __ret_145; \
-bfloat16x4_t __reint_145 = __rev2_145; \
-float32x2_t __reint1_145 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_145, __p3_145); \
-  __ret_145 = __noswap_vbfdot_f32(__rev0_145, __rev1_145, *(bfloat16x4_t *) &__reint1_145); \
-  __ret_145 = __builtin_shufflevector(__ret_145, __ret_145, 1, 0); \
-  __ret_145; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdotq_laneq_f32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \
-  float32x4_t __s0_146 = __p0_146; \
-  bfloat16x8_t __s1_146 = __p1_146; \
-  bfloat16x8_t __s2_146 = __p2_146; \
-  float32x4_t __ret_146; \
-bfloat16x8_t __reint_146 = __s2_146; \
-float32x4_t __reint1_146 = splatq_laneq_f32(*(float32x4_t *) &__reint_146, __p3_146); \
-  __ret_146 = vbfdotq_f32(__s0_146, __s1_146, *(bfloat16x8_t *) &__reint1_146); \
-  __ret_146; \
-})
-#else
-#define vbfdotq_laneq_f32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \
-  float32x4_t __s0_147 = __p0_147; \
-  bfloat16x8_t __s1_147 = __p1_147; \
-  bfloat16x8_t __s2_147 = __p2_147; \
-  float32x4_t __rev0_147;  __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_147;  __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_147;  __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_147; \
-bfloat16x8_t __reint_147 = __rev2_147; \
-float32x4_t __reint1_147 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_147, __p3_147); \
-  __ret_147 = __noswap_vbfdotq_f32(__rev0_147, __rev1_147, *(bfloat16x8_t *) &__reint1_147); \
-  __ret_147 = __builtin_shufflevector(__ret_147, __ret_147, 3, 2, 1, 0); \
-  __ret_147; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdot_laneq_f32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \
-  float32x2_t __s0_148 = __p0_148; \
-  bfloat16x4_t __s1_148 = __p1_148; \
-  bfloat16x8_t __s2_148 = __p2_148; \
-  float32x2_t __ret_148; \
-bfloat16x8_t __reint_148 = __s2_148; \
-float32x2_t __reint1_148 = splat_laneq_f32(*(float32x4_t *) &__reint_148, __p3_148); \
-  __ret_148 = vbfdot_f32(__s0_148, __s1_148, *(bfloat16x4_t *) &__reint1_148); \
-  __ret_148; \
-})
-#else
-#define vbfdot_laneq_f32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \
-  float32x2_t __s0_149 = __p0_149; \
-  bfloat16x4_t __s1_149 = __p1_149; \
-  bfloat16x8_t __s2_149 = __p2_149; \
-  float32x2_t __rev0_149;  __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, 1, 0); \
-  bfloat16x4_t __rev1_149;  __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_149;  __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_149; \
-bfloat16x8_t __reint_149 = __rev2_149; \
-float32x2_t __reint1_149 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_149, __p3_149); \
-  __ret_149 = __noswap_vbfdot_f32(__rev0_149, __rev1_149, *(bfloat16x4_t *) &__reint1_149); \
-  __ret_149 = __builtin_shufflevector(__ret_149, __ret_149, 1, 0); \
-  __ret_149; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#define vcreate_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (bfloat16x4_t)(__promote); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_150) {
-  float32x4_t __ret_150;
-bfloat16x4_t __reint_150 = __p0_150;
-int32x4_t __reint1_150 = vshll_n_s16(*(int16x4_t *) &__reint_150, 16);
-  __ret_150 = *(float32x4_t *) &__reint1_150;
-  return __ret_150;
-}
-#else
-__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_151) {
-  bfloat16x4_t __rev0_151;  __rev0_151 = __builtin_shufflevector(__p0_151, __p0_151, 3, 2, 1, 0);
-  float32x4_t __ret_151;
-bfloat16x4_t __reint_151 = __rev0_151;
-int32x4_t __reint1_151 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_151, 16);
-  __ret_151 = *(float32x4_t *) &__reint1_151;
-  __ret_151 = __builtin_shufflevector(__ret_151, __ret_151, 3, 2, 1, 0);
-  return __ret_151;
-}
-__ai float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_152) {
-  float32x4_t __ret_152;
-bfloat16x4_t __reint_152 = __p0_152;
-int32x4_t __reint1_152 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_152, 16);
-  __ret_152 = *(float32x4_t *) &__reint1_152;
-  return __ret_152;
-}
-#endif
-
-__ai float32_t vcvtah_f32_bf16(bfloat16_t __p0) {
-  float32_t __ret;
-bfloat16_t __reint = __p0;
-int32_t __reint1 = *(int32_t *) &__reint << 16;
-  __ret = *(float32_t *) &__reint1;
-  return __ret;
-}
-__ai bfloat16_t vcvth_bf16_f32(float32_t __p0) {
-  bfloat16_t __ret;
-  __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_bf16(__p0_153, __p1_153) __extension__ ({ \
-  bfloat16x4_t __s0_153 = __p0_153; \
-  bfloat16x8_t __ret_153; \
-  __ret_153 = splatq_lane_bf16(__s0_153, __p1_153); \
-  __ret_153; \
-})
-#else
-#define vdupq_lane_bf16(__p0_154, __p1_154) __extension__ ({ \
-  bfloat16x4_t __s0_154 = __p0_154; \
-  bfloat16x4_t __rev0_154;  __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_154; \
-  __ret_154 = __noswap_splatq_lane_bf16(__rev0_154, __p1_154); \
-  __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_154; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_bf16(__p0_155, __p1_155) __extension__ ({ \
-  bfloat16x4_t __s0_155 = __p0_155; \
-  bfloat16x4_t __ret_155; \
-  __ret_155 = splat_lane_bf16(__s0_155, __p1_155); \
-  __ret_155; \
-})
-#else
-#define vdup_lane_bf16(__p0_156, __p1_156) __extension__ ({ \
-  bfloat16x4_t __s0_156 = __p0_156; \
-  bfloat16x4_t __rev0_156;  __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_156; \
-  __ret_156 = __noswap_splat_lane_bf16(__rev0_156, __p1_156); \
-  __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \
-  __ret_156; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_bf16(__p0_157, __p1_157) __extension__ ({ \
-  bfloat16x8_t __s0_157 = __p0_157; \
-  bfloat16x8_t __ret_157; \
-  __ret_157 = splatq_laneq_bf16(__s0_157, __p1_157); \
-  __ret_157; \
-})
-#else
-#define vdupq_laneq_bf16(__p0_158, __p1_158) __extension__ ({ \
-  bfloat16x8_t __s0_158 = __p0_158; \
-  bfloat16x8_t __rev0_158;  __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_158; \
-  __ret_158 = __noswap_splatq_laneq_bf16(__rev0_158, __p1_158); \
-  __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_158; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_bf16(__p0_159, __p1_159) __extension__ ({ \
-  bfloat16x8_t __s0_159 = __p0_159; \
-  bfloat16x4_t __ret_159; \
-  __ret_159 = splat_laneq_bf16(__s0_159, __p1_159); \
-  __ret_159; \
-})
-#else
-#define vdup_laneq_bf16(__p0_160, __p1_160) __extension__ ({ \
-  bfloat16x8_t __s0_160 = __p0_160; \
-  bfloat16x8_t __rev0_160;  __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_160; \
-  __ret_160 = __noswap_splat_laneq_bf16(__rev0_160, __p1_160); \
-  __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 3, 2, 1, 0); \
-  __ret_160; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
-  __ret; \
-})
-#else
-#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
-  __ret; \
-})
-#else
-#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld2q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld2_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld2q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld2_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
-  __ret; \
-})
-#else
-#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
-  __ret; \
-})
-#else
-#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld3q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld3_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld3q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld3_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
-  __ret; \
-})
-#else
-#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
-  __ret; \
-})
-#else
-#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld4q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld4_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld4q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld4_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
-  __ret; \
-})
-#else
-#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
-  __ret; \
-})
-#else
-#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 43); \
-})
-#else
-#define vst1q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 11); \
-})
-#else
-#define vst1_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
-})
-#else
-#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
-})
-#else
-#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
-})
-#else
-#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
-})
-#else
-#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
-})
-#else
-#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
-})
-#else
-#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
-})
-#else
-#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
-})
-#else
-#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
-})
-#else
-#define vst2q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
-})
-#else
-#define vst2_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
-})
-#else
-#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
-})
-#else
-#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
-})
-#else
-#define vst3q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
-})
-#else
-#define vst3_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
-})
-#else
-#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
-})
-#else
-#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
-})
-#else
-#define vst4q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
-})
-#else
-#define vst4_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
-})
-#else
-#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
-})
-#else
-#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__rev0, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __a32_vcvt_bf16_f32(__p0);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __noswap___a32_vcvt_bf16_f32(__rev0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0));
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0));
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__rev0, 43);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_bf16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \
-  bfloat16x8_t __s0_161 = __p0_161; \
-  bfloat16x4_t __s2_161 = __p2_161; \
-  bfloat16x8_t __ret_161; \
-  __ret_161 = vsetq_lane_bf16(vget_lane_bf16(__s2_161, __p3_161), __s0_161, __p1_161); \
-  __ret_161; \
-})
-#else
-#define vcopyq_lane_bf16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \
-  bfloat16x8_t __s0_162 = __p0_162; \
-  bfloat16x4_t __s2_162 = __p2_162; \
-  bfloat16x8_t __rev0_162;  __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_162;  __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_162; \
-  __ret_162 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_162, __p3_162), __rev0_162, __p1_162); \
-  __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_162; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_bf16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \
-  bfloat16x4_t __s0_163 = __p0_163; \
-  bfloat16x4_t __s2_163 = __p2_163; \
-  bfloat16x4_t __ret_163; \
-  __ret_163 = vset_lane_bf16(vget_lane_bf16(__s2_163, __p3_163), __s0_163, __p1_163); \
-  __ret_163; \
-})
-#else
-#define vcopy_lane_bf16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \
-  bfloat16x4_t __s0_164 = __p0_164; \
-  bfloat16x4_t __s2_164 = __p2_164; \
-  bfloat16x4_t __rev0_164;  __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_164;  __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_164; \
-  __ret_164 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_164, __p3_164), __rev0_164, __p1_164); \
-  __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 3, 2, 1, 0); \
-  __ret_164; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_bf16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \
-  bfloat16x8_t __s0_165 = __p0_165; \
-  bfloat16x8_t __s2_165 = __p2_165; \
-  bfloat16x8_t __ret_165; \
-  __ret_165 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_165, __p3_165), __s0_165, __p1_165); \
-  __ret_165; \
-})
-#else
-#define vcopyq_laneq_bf16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \
-  bfloat16x8_t __s0_166 = __p0_166; \
-  bfloat16x8_t __s2_166 = __p2_166; \
-  bfloat16x8_t __rev0_166;  __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_166;  __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_166; \
-  __ret_166 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_166, __p3_166), __rev0_166, __p1_166); \
-  __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_166; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_bf16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \
-  bfloat16x4_t __s0_167 = __p0_167; \
-  bfloat16x8_t __s2_167 = __p2_167; \
-  bfloat16x4_t __ret_167; \
-  __ret_167 = vset_lane_bf16(vgetq_lane_bf16(__s2_167, __p3_167), __s0_167, __p1_167); \
-  __ret_167; \
-})
-#else
-#define vcopy_laneq_bf16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \
-  bfloat16x4_t __s0_168 = __p0_168; \
-  bfloat16x8_t __s2_168 = __p2_168; \
-  bfloat16x4_t __rev0_168;  __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_168;  __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_168; \
-  __ret_168 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_168, __p3_168), __rev0_168, __p1_168); \
-  __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \
-  __ret_168; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0));
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__p0, (int8x16_t)__p1, 43);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__rev0, (int8x16_t)__rev1, 43);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = __a64_vcvtq_low_bf16_f32(__p0);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_lane_f32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \
-  float32x2_t __s0_169 = __p0_169; \
-  float32x2_t __s1_169 = __p1_169; \
-  float32x2_t __s2_169 = __p2_169; \
-  float32x2_t __ret_169; \
-float32x2_t __reint_169 = __s2_169; \
-uint64x1_t __reint1_169 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_169, __p3_169)}; \
-  __ret_169 = vcmla_f32(__s0_169, __s1_169, *(float32x2_t *) &__reint1_169); \
-  __ret_169; \
-})
-#else
-#define vcmla_lane_f32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \
-  float32x2_t __s0_170 = __p0_170; \
-  float32x2_t __s1_170 = __p1_170; \
-  float32x2_t __s2_170 = __p2_170; \
-  float32x2_t __rev0_170;  __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 1, 0); \
-  float32x2_t __rev1_170;  __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 1, 0); \
-  float32x2_t __rev2_170;  __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 1, 0); \
-  float32x2_t __ret_170; \
-float32x2_t __reint_170 = __rev2_170; \
-uint64x1_t __reint1_170 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_170, __p3_170)}; \
-  __ret_170 = __noswap_vcmla_f32(__rev0_170, __rev1_170, *(float32x2_t *) &__reint1_170); \
-  __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 1, 0); \
-  __ret_170; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_lane_f32(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \
-  float32x4_t __s0_171 = __p0_171; \
-  float32x4_t __s1_171 = __p1_171; \
-  float32x2_t __s2_171 = __p2_171; \
-  float32x4_t __ret_171; \
-float32x2_t __reint_171 = __s2_171; \
-uint64x2_t __reint1_171 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171), vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171)}; \
-  __ret_171 = vcmlaq_f32(__s0_171, __s1_171, *(float32x4_t *) &__reint1_171); \
-  __ret_171; \
-})
-#else
-#define vcmlaq_lane_f32(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \
-  float32x4_t __s0_172 = __p0_172; \
-  float32x4_t __s1_172 = __p1_172; \
-  float32x2_t __s2_172 = __p2_172; \
-  float32x4_t __rev0_172;  __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 3, 2, 1, 0); \
-  float32x4_t __rev1_172;  __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 3, 2, 1, 0); \
-  float32x2_t __rev2_172;  __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 1, 0); \
-  float32x4_t __ret_172; \
-float32x2_t __reint_172 = __rev2_172; \
-uint64x2_t __reint1_172 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172), vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172)}; \
-  __ret_172 = __noswap_vcmlaq_f32(__rev0_172, __rev1_172, *(float32x4_t *) &__reint1_172); \
-  __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 3, 2, 1, 0); \
-  __ret_172; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_laneq_f32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \
-  float32x2_t __s0_173 = __p0_173; \
-  float32x2_t __s1_173 = __p1_173; \
-  float32x4_t __s2_173 = __p2_173; \
-  float32x2_t __ret_173; \
-float32x4_t __reint_173 = __s2_173; \
-uint64x1_t __reint1_173 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_173, __p3_173)}; \
-  __ret_173 = vcmla_f32(__s0_173, __s1_173, *(float32x2_t *) &__reint1_173); \
-  __ret_173; \
-})
-#else
-#define vcmla_laneq_f32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \
-  float32x2_t __s0_174 = __p0_174; \
-  float32x2_t __s1_174 = __p1_174; \
-  float32x4_t __s2_174 = __p2_174; \
-  float32x2_t __rev0_174;  __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \
-  float32x2_t __rev1_174;  __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 1, 0); \
-  float32x4_t __rev2_174;  __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 3, 2, 1, 0); \
-  float32x2_t __ret_174; \
-float32x4_t __reint_174 = __rev2_174; \
-uint64x1_t __reint1_174 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_174, __p3_174)}; \
-  __ret_174 = __noswap_vcmla_f32(__rev0_174, __rev1_174, *(float32x2_t *) &__reint1_174); \
-  __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \
-  __ret_174; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_laneq_f32(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \
-  float32x4_t __s0_175 = __p0_175; \
-  float32x4_t __s1_175 = __p1_175; \
-  float32x4_t __s2_175 = __p2_175; \
-  float32x4_t __ret_175; \
-float32x4_t __reint_175 = __s2_175; \
-uint64x2_t __reint1_175 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175), vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175)}; \
-  __ret_175 = vcmlaq_f32(__s0_175, __s1_175, *(float32x4_t *) &__reint1_175); \
-  __ret_175; \
-})
-#else
-#define vcmlaq_laneq_f32(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \
-  float32x4_t __s0_176 = __p0_176; \
-  float32x4_t __s1_176 = __p1_176; \
-  float32x4_t __s2_176 = __p2_176; \
-  float32x4_t __rev0_176;  __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 3, 2, 1, 0); \
-  float32x4_t __rev1_176;  __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 3, 2, 1, 0); \
-  float32x4_t __rev2_176;  __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 3, 2, 1, 0); \
-  float32x4_t __ret_176; \
-float32x4_t __reint_176 = __rev2_176; \
-uint64x2_t __reint1_176 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176)}; \
-  __ret_176 = __noswap_vcmlaq_f32(__rev0_176, __rev1_176, *(float32x4_t *) &__reint1_176); \
-  __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 3, 2, 1, 0); \
-  __ret_176; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_lane_f32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \
-  float32x2_t __s0_177 = __p0_177; \
-  float32x2_t __s1_177 = __p1_177; \
-  float32x2_t __s2_177 = __p2_177; \
-  float32x2_t __ret_177; \
-float32x2_t __reint_177 = __s2_177; \
-uint64x1_t __reint1_177 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_177, __p3_177)}; \
-  __ret_177 = vcmla_rot180_f32(__s0_177, __s1_177, *(float32x2_t *) &__reint1_177); \
-  __ret_177; \
-})
-#else
-#define vcmla_rot180_lane_f32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \
-  float32x2_t __s0_178 = __p0_178; \
-  float32x2_t __s1_178 = __p1_178; \
-  float32x2_t __s2_178 = __p2_178; \
-  float32x2_t __rev0_178;  __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 1, 0); \
-  float32x2_t __rev1_178;  __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 1, 0); \
-  float32x2_t __rev2_178;  __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 1, 0); \
-  float32x2_t __ret_178; \
-float32x2_t __reint_178 = __rev2_178; \
-uint64x1_t __reint1_178 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_178, __p3_178)}; \
-  __ret_178 = __noswap_vcmla_rot180_f32(__rev0_178, __rev1_178, *(float32x2_t *) &__reint1_178); \
-  __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 1, 0); \
-  __ret_178; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_lane_f32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \
-  float32x4_t __s0_179 = __p0_179; \
-  float32x4_t __s1_179 = __p1_179; \
-  float32x2_t __s2_179 = __p2_179; \
-  float32x4_t __ret_179; \
-float32x2_t __reint_179 = __s2_179; \
-uint64x2_t __reint1_179 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179), vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179)}; \
-  __ret_179 = vcmlaq_rot180_f32(__s0_179, __s1_179, *(float32x4_t *) &__reint1_179); \
-  __ret_179; \
-})
-#else
-#define vcmlaq_rot180_lane_f32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \
-  float32x4_t __s0_180 = __p0_180; \
-  float32x4_t __s1_180 = __p1_180; \
-  float32x2_t __s2_180 = __p2_180; \
-  float32x4_t __rev0_180;  __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \
-  float32x4_t __rev1_180;  __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 3, 2, 1, 0); \
-  float32x2_t __rev2_180;  __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 1, 0); \
-  float32x4_t __ret_180; \
-float32x2_t __reint_180 = __rev2_180; \
-uint64x2_t __reint1_180 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180), vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180)}; \
-  __ret_180 = __noswap_vcmlaq_rot180_f32(__rev0_180, __rev1_180, *(float32x4_t *) &__reint1_180); \
-  __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \
-  __ret_180; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_laneq_f32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \
-  float32x2_t __s0_181 = __p0_181; \
-  float32x2_t __s1_181 = __p1_181; \
-  float32x4_t __s2_181 = __p2_181; \
-  float32x2_t __ret_181; \
-float32x4_t __reint_181 = __s2_181; \
-uint64x1_t __reint1_181 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_181, __p3_181)}; \
-  __ret_181 = vcmla_rot180_f32(__s0_181, __s1_181, *(float32x2_t *) &__reint1_181); \
-  __ret_181; \
-})
-#else
-#define vcmla_rot180_laneq_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \
-  float32x2_t __s0_182 = __p0_182; \
-  float32x2_t __s1_182 = __p1_182; \
-  float32x4_t __s2_182 = __p2_182; \
-  float32x2_t __rev0_182;  __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \
-  float32x2_t __rev1_182;  __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 1, 0); \
-  float32x4_t __rev2_182;  __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 3, 2, 1, 0); \
-  float32x2_t __ret_182; \
-float32x4_t __reint_182 = __rev2_182; \
-uint64x1_t __reint1_182 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_182, __p3_182)}; \
-  __ret_182 = __noswap_vcmla_rot180_f32(__rev0_182, __rev1_182, *(float32x2_t *) &__reint1_182); \
-  __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \
-  __ret_182; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_laneq_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \
-  float32x4_t __s0_183 = __p0_183; \
-  float32x4_t __s1_183 = __p1_183; \
-  float32x4_t __s2_183 = __p2_183; \
-  float32x4_t __ret_183; \
-float32x4_t __reint_183 = __s2_183; \
-uint64x2_t __reint1_183 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183), vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183)}; \
-  __ret_183 = vcmlaq_rot180_f32(__s0_183, __s1_183, *(float32x4_t *) &__reint1_183); \
-  __ret_183; \
-})
-#else
-#define vcmlaq_rot180_laneq_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \
-  float32x4_t __s0_184 = __p0_184; \
-  float32x4_t __s1_184 = __p1_184; \
-  float32x4_t __s2_184 = __p2_184; \
-  float32x4_t __rev0_184;  __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 3, 2, 1, 0); \
-  float32x4_t __rev1_184;  __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 3, 2, 1, 0); \
-  float32x4_t __rev2_184;  __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 3, 2, 1, 0); \
-  float32x4_t __ret_184; \
-float32x4_t __reint_184 = __rev2_184; \
-uint64x2_t __reint1_184 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184)}; \
-  __ret_184 = __noswap_vcmlaq_rot180_f32(__rev0_184, __rev1_184, *(float32x4_t *) &__reint1_184); \
-  __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 3, 2, 1, 0); \
-  __ret_184; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \
-  float32x2_t __s0_185 = __p0_185; \
-  float32x2_t __s1_185 = __p1_185; \
-  float32x2_t __s2_185 = __p2_185; \
-  float32x2_t __ret_185; \
-float32x2_t __reint_185 = __s2_185; \
-uint64x1_t __reint1_185 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \
-  __ret_185 = vcmla_rot270_f32(__s0_185, __s1_185, *(float32x2_t *) &__reint1_185); \
-  __ret_185; \
-})
-#else
-#define vcmla_rot270_lane_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \
-  float32x2_t __s0_186 = __p0_186; \
-  float32x2_t __s1_186 = __p1_186; \
-  float32x2_t __s2_186 = __p2_186; \
-  float32x2_t __rev0_186;  __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 1, 0); \
-  float32x2_t __rev1_186;  __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 1, 0); \
-  float32x2_t __rev2_186;  __rev2_186 = __builtin_shufflevector(__s2_186, __s2_186, 1, 0); \
-  float32x2_t __ret_186; \
-float32x2_t __reint_186 = __rev2_186; \
-uint64x1_t __reint1_186 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_186, __p3_186)}; \
-  __ret_186 = __noswap_vcmla_rot270_f32(__rev0_186, __rev1_186, *(float32x2_t *) &__reint1_186); \
-  __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 1, 0); \
-  __ret_186; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_lane_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \
-  float32x4_t __s0_187 = __p0_187; \
-  float32x4_t __s1_187 = __p1_187; \
-  float32x2_t __s2_187 = __p2_187; \
-  float32x4_t __ret_187; \
-float32x2_t __reint_187 = __s2_187; \
-uint64x2_t __reint1_187 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187), vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187)}; \
-  __ret_187 = vcmlaq_rot270_f32(__s0_187, __s1_187, *(float32x4_t *) &__reint1_187); \
-  __ret_187; \
-})
-#else
-#define vcmlaq_rot270_lane_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \
-  float32x4_t __s0_188 = __p0_188; \
-  float32x4_t __s1_188 = __p1_188; \
-  float32x2_t __s2_188 = __p2_188; \
-  float32x4_t __rev0_188;  __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \
-  float32x4_t __rev1_188;  __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \
-  float32x2_t __rev2_188;  __rev2_188 = __builtin_shufflevector(__s2_188, __s2_188, 1, 0); \
-  float32x4_t __ret_188; \
-float32x2_t __reint_188 = __rev2_188; \
-uint64x2_t __reint1_188 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188), vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188)}; \
-  __ret_188 = __noswap_vcmlaq_rot270_f32(__rev0_188, __rev1_188, *(float32x4_t *) &__reint1_188); \
-  __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \
-  __ret_188; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \
-  float32x2_t __s0_189 = __p0_189; \
-  float32x2_t __s1_189 = __p1_189; \
-  float32x4_t __s2_189 = __p2_189; \
-  float32x2_t __ret_189; \
-float32x4_t __reint_189 = __s2_189; \
-uint64x1_t __reint1_189 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \
-  __ret_189 = vcmla_rot270_f32(__s0_189, __s1_189, *(float32x2_t *) &__reint1_189); \
-  __ret_189; \
-})
-#else
-#define vcmla_rot270_laneq_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \
-  float32x2_t __s0_190 = __p0_190; \
-  float32x2_t __s1_190 = __p1_190; \
-  float32x4_t __s2_190 = __p2_190; \
-  float32x2_t __rev0_190;  __rev0_190 = __builtin_shufflevector(__s0_190, __s0_190, 1, 0); \
-  float32x2_t __rev1_190;  __rev1_190 = __builtin_shufflevector(__s1_190, __s1_190, 1, 0); \
-  float32x4_t __rev2_190;  __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \
-  float32x2_t __ret_190; \
-float32x4_t __reint_190 = __rev2_190; \
-uint64x1_t __reint1_190 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_190, __p3_190)}; \
-  __ret_190 = __noswap_vcmla_rot270_f32(__rev0_190, __rev1_190, *(float32x2_t *) &__reint1_190); \
-  __ret_190 = __builtin_shufflevector(__ret_190, __ret_190, 1, 0); \
-  __ret_190; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_laneq_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \
-  float32x4_t __s0_191 = __p0_191; \
-  float32x4_t __s1_191 = __p1_191; \
-  float32x4_t __s2_191 = __p2_191; \
-  float32x4_t __ret_191; \
-float32x4_t __reint_191 = __s2_191; \
-uint64x2_t __reint1_191 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191), vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191)}; \
-  __ret_191 = vcmlaq_rot270_f32(__s0_191, __s1_191, *(float32x4_t *) &__reint1_191); \
-  __ret_191; \
-})
-#else
-#define vcmlaq_rot270_laneq_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \
-  float32x4_t __s0_192 = __p0_192; \
-  float32x4_t __s1_192 = __p1_192; \
-  float32x4_t __s2_192 = __p2_192; \
-  float32x4_t __rev0_192;  __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 3, 2, 1, 0); \
-  float32x4_t __rev1_192;  __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 3, 2, 1, 0); \
-  float32x4_t __rev2_192;  __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \
-  float32x4_t __ret_192; \
-float32x4_t __reint_192 = __rev2_192; \
-uint64x2_t __reint1_192 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192)}; \
-  __ret_192 = __noswap_vcmlaq_rot270_f32(__rev0_192, __rev1_192, *(float32x4_t *) &__reint1_192); \
-  __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 3, 2, 1, 0); \
-  __ret_192; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \
-  float32x2_t __s0_193 = __p0_193; \
-  float32x2_t __s1_193 = __p1_193; \
-  float32x2_t __s2_193 = __p2_193; \
-  float32x2_t __ret_193; \
-float32x2_t __reint_193 = __s2_193; \
-uint64x1_t __reint1_193 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \
-  __ret_193 = vcmla_rot90_f32(__s0_193, __s1_193, *(float32x2_t *) &__reint1_193); \
-  __ret_193; \
-})
-#else
-#define vcmla_rot90_lane_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \
-  float32x2_t __s0_194 = __p0_194; \
-  float32x2_t __s1_194 = __p1_194; \
-  float32x2_t __s2_194 = __p2_194; \
-  float32x2_t __rev0_194;  __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 1, 0); \
-  float32x2_t __rev1_194;  __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 1, 0); \
-  float32x2_t __rev2_194;  __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 1, 0); \
-  float32x2_t __ret_194; \
-float32x2_t __reint_194 = __rev2_194; \
-uint64x1_t __reint1_194 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_194, __p3_194)}; \
-  __ret_194 = __noswap_vcmla_rot90_f32(__rev0_194, __rev1_194, *(float32x2_t *) &__reint1_194); \
-  __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 1, 0); \
-  __ret_194; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_lane_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \
-  float32x4_t __s0_195 = __p0_195; \
-  float32x4_t __s1_195 = __p1_195; \
-  float32x2_t __s2_195 = __p2_195; \
-  float32x4_t __ret_195; \
-float32x2_t __reint_195 = __s2_195; \
-uint64x2_t __reint1_195 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195), vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195)}; \
-  __ret_195 = vcmlaq_rot90_f32(__s0_195, __s1_195, *(float32x4_t *) &__reint1_195); \
-  __ret_195; \
-})
-#else
-#define vcmlaq_rot90_lane_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \
-  float32x4_t __s0_196 = __p0_196; \
-  float32x4_t __s1_196 = __p1_196; \
-  float32x2_t __s2_196 = __p2_196; \
-  float32x4_t __rev0_196;  __rev0_196 = __builtin_shufflevector(__s0_196, __s0_196, 3, 2, 1, 0); \
-  float32x4_t __rev1_196;  __rev1_196 = __builtin_shufflevector(__s1_196, __s1_196, 3, 2, 1, 0); \
-  float32x2_t __rev2_196;  __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 1, 0); \
-  float32x4_t __ret_196; \
-float32x2_t __reint_196 = __rev2_196; \
-uint64x2_t __reint1_196 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196), vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196)}; \
-  __ret_196 = __noswap_vcmlaq_rot90_f32(__rev0_196, __rev1_196, *(float32x4_t *) &__reint1_196); \
-  __ret_196 = __builtin_shufflevector(__ret_196, __ret_196, 3, 2, 1, 0); \
-  __ret_196; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \
-  float32x2_t __s0_197 = __p0_197; \
-  float32x2_t __s1_197 = __p1_197; \
-  float32x4_t __s2_197 = __p2_197; \
-  float32x2_t __ret_197; \
-float32x4_t __reint_197 = __s2_197; \
-uint64x1_t __reint1_197 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \
-  __ret_197 = vcmla_rot90_f32(__s0_197, __s1_197, *(float32x2_t *) &__reint1_197); \
-  __ret_197; \
-})
-#else
-#define vcmla_rot90_laneq_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \
-  float32x2_t __s0_198 = __p0_198; \
-  float32x2_t __s1_198 = __p1_198; \
-  float32x4_t __s2_198 = __p2_198; \
-  float32x2_t __rev0_198;  __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 1, 0); \
-  float32x2_t __rev1_198;  __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 1, 0); \
-  float32x4_t __rev2_198;  __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 3, 2, 1, 0); \
-  float32x2_t __ret_198; \
-float32x4_t __reint_198 = __rev2_198; \
-uint64x1_t __reint1_198 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_198, __p3_198)}; \
-  __ret_198 = __noswap_vcmla_rot90_f32(__rev0_198, __rev1_198, *(float32x2_t *) &__reint1_198); \
-  __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 1, 0); \
-  __ret_198; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_laneq_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \
-  float32x4_t __s0_199 = __p0_199; \
-  float32x4_t __s1_199 = __p1_199; \
-  float32x4_t __s2_199 = __p2_199; \
-  float32x4_t __ret_199; \
-float32x4_t __reint_199 = __s2_199; \
-uint64x2_t __reint1_199 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199), vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199)}; \
-  __ret_199 = vcmlaq_rot90_f32(__s0_199, __s1_199, *(float32x4_t *) &__reint1_199); \
-  __ret_199; \
-})
-#else
-#define vcmlaq_rot90_laneq_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \
-  float32x4_t __s0_200 = __p0_200; \
-  float32x4_t __s1_200 = __p1_200; \
-  float32x4_t __s2_200 = __p2_200; \
-  float32x4_t __rev0_200;  __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \
-  float32x4_t __rev1_200;  __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \
-  float32x4_t __rev2_200;  __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 3, 2, 1, 0); \
-  float32x4_t __ret_200; \
-float32x4_t __reint_200 = __rev2_200; \
-uint64x2_t __reint1_200 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200)}; \
-  __ret_200 = __noswap_vcmlaq_rot90_f32(__rev0_200, __rev1_200, *(float32x4_t *) &__reint1_200); \
-  __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \
-  __ret_200; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_lane_f16(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \
-  float16x4_t __s0_201 = __p0_201; \
-  float16x4_t __s1_201 = __p1_201; \
-  float16x4_t __s2_201 = __p2_201; \
-  float16x4_t __ret_201; \
-float16x4_t __reint_201 = __s2_201; \
-uint32x2_t __reint1_201 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201), vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201)}; \
-  __ret_201 = vcmla_f16(__s0_201, __s1_201, *(float16x4_t *) &__reint1_201); \
-  __ret_201; \
-})
-#else
-#define vcmla_lane_f16(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \
-  float16x4_t __s0_202 = __p0_202; \
-  float16x4_t __s1_202 = __p1_202; \
-  float16x4_t __s2_202 = __p2_202; \
-  float16x4_t __rev0_202;  __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 3, 2, 1, 0); \
-  float16x4_t __rev1_202;  __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 3, 2, 1, 0); \
-  float16x4_t __rev2_202;  __rev2_202 = __builtin_shufflevector(__s2_202, __s2_202, 3, 2, 1, 0); \
-  float16x4_t __ret_202; \
-float16x4_t __reint_202 = __rev2_202; \
-uint32x2_t __reint1_202 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202)}; \
-  __ret_202 = __noswap_vcmla_f16(__rev0_202, __rev1_202, *(float16x4_t *) &__reint1_202); \
-  __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 3, 2, 1, 0); \
-  __ret_202; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_lane_f16(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \
-  float16x8_t __s0_203 = __p0_203; \
-  float16x8_t __s1_203 = __p1_203; \
-  float16x4_t __s2_203 = __p2_203; \
-  float16x8_t __ret_203; \
-float16x4_t __reint_203 = __s2_203; \
-uint32x4_t __reint1_203 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203)}; \
-  __ret_203 = vcmlaq_f16(__s0_203, __s1_203, *(float16x8_t *) &__reint1_203); \
-  __ret_203; \
-})
-#else
-#define vcmlaq_lane_f16(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \
-  float16x8_t __s0_204 = __p0_204; \
-  float16x8_t __s1_204 = __p1_204; \
-  float16x4_t __s2_204 = __p2_204; \
-  float16x8_t __rev0_204;  __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_204;  __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_204;  __rev2_204 = __builtin_shufflevector(__s2_204, __s2_204, 3, 2, 1, 0); \
-  float16x8_t __ret_204; \
-float16x4_t __reint_204 = __rev2_204; \
-uint32x4_t __reint1_204 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204)}; \
-  __ret_204 = __noswap_vcmlaq_f16(__rev0_204, __rev1_204, *(float16x8_t *) &__reint1_204); \
-  __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_204; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_laneq_f16(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \
-  float16x4_t __s0_205 = __p0_205; \
-  float16x4_t __s1_205 = __p1_205; \
-  float16x8_t __s2_205 = __p2_205; \
-  float16x4_t __ret_205; \
-float16x8_t __reint_205 = __s2_205; \
-uint32x2_t __reint1_205 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205), vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205)}; \
-  __ret_205 = vcmla_f16(__s0_205, __s1_205, *(float16x4_t *) &__reint1_205); \
-  __ret_205; \
-})
-#else
-#define vcmla_laneq_f16(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \
-  float16x4_t __s0_206 = __p0_206; \
-  float16x4_t __s1_206 = __p1_206; \
-  float16x8_t __s2_206 = __p2_206; \
-  float16x4_t __rev0_206;  __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 3, 2, 1, 0); \
-  float16x4_t __rev1_206;  __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 3, 2, 1, 0); \
-  float16x8_t __rev2_206;  __rev2_206 = __builtin_shufflevector(__s2_206, __s2_206, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_206; \
-float16x8_t __reint_206 = __rev2_206; \
-uint32x2_t __reint1_206 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206)}; \
-  __ret_206 = __noswap_vcmla_f16(__rev0_206, __rev1_206, *(float16x4_t *) &__reint1_206); \
-  __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 3, 2, 1, 0); \
-  __ret_206; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_laneq_f16(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \
-  float16x8_t __s0_207 = __p0_207; \
-  float16x8_t __s1_207 = __p1_207; \
-  float16x8_t __s2_207 = __p2_207; \
-  float16x8_t __ret_207; \
-float16x8_t __reint_207 = __s2_207; \
-uint32x4_t __reint1_207 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207)}; \
-  __ret_207 = vcmlaq_f16(__s0_207, __s1_207, *(float16x8_t *) &__reint1_207); \
-  __ret_207; \
-})
-#else
-#define vcmlaq_laneq_f16(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \
-  float16x8_t __s0_208 = __p0_208; \
-  float16x8_t __s1_208 = __p1_208; \
-  float16x8_t __s2_208 = __p2_208; \
-  float16x8_t __rev0_208;  __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_208;  __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_208;  __rev2_208 = __builtin_shufflevector(__s2_208, __s2_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_208; \
-float16x8_t __reint_208 = __rev2_208; \
-uint32x4_t __reint1_208 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208)}; \
-  __ret_208 = __noswap_vcmlaq_f16(__rev0_208, __rev1_208, *(float16x8_t *) &__reint1_208); \
-  __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_208; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_lane_f16(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \
-  float16x4_t __s0_209 = __p0_209; \
-  float16x4_t __s1_209 = __p1_209; \
-  float16x4_t __s2_209 = __p2_209; \
-  float16x4_t __ret_209; \
-float16x4_t __reint_209 = __s2_209; \
-uint32x2_t __reint1_209 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209), vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209)}; \
-  __ret_209 = vcmla_rot180_f16(__s0_209, __s1_209, *(float16x4_t *) &__reint1_209); \
-  __ret_209; \
-})
-#else
-#define vcmla_rot180_lane_f16(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \
-  float16x4_t __s0_210 = __p0_210; \
-  float16x4_t __s1_210 = __p1_210; \
-  float16x4_t __s2_210 = __p2_210; \
-  float16x4_t __rev0_210;  __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 3, 2, 1, 0); \
-  float16x4_t __rev1_210;  __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 3, 2, 1, 0); \
-  float16x4_t __rev2_210;  __rev2_210 = __builtin_shufflevector(__s2_210, __s2_210, 3, 2, 1, 0); \
-  float16x4_t __ret_210; \
-float16x4_t __reint_210 = __rev2_210; \
-uint32x2_t __reint1_210 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210)}; \
-  __ret_210 = __noswap_vcmla_rot180_f16(__rev0_210, __rev1_210, *(float16x4_t *) &__reint1_210); \
-  __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 3, 2, 1, 0); \
-  __ret_210; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_lane_f16(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \
-  float16x8_t __s0_211 = __p0_211; \
-  float16x8_t __s1_211 = __p1_211; \
-  float16x4_t __s2_211 = __p2_211; \
-  float16x8_t __ret_211; \
-float16x4_t __reint_211 = __s2_211; \
-uint32x4_t __reint1_211 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211)}; \
-  __ret_211 = vcmlaq_rot180_f16(__s0_211, __s1_211, *(float16x8_t *) &__reint1_211); \
-  __ret_211; \
-})
-#else
-#define vcmlaq_rot180_lane_f16(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \
-  float16x8_t __s0_212 = __p0_212; \
-  float16x8_t __s1_212 = __p1_212; \
-  float16x4_t __s2_212 = __p2_212; \
-  float16x8_t __rev0_212;  __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_212;  __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_212;  __rev2_212 = __builtin_shufflevector(__s2_212, __s2_212, 3, 2, 1, 0); \
-  float16x8_t __ret_212; \
-float16x4_t __reint_212 = __rev2_212; \
-uint32x4_t __reint1_212 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212)}; \
-  __ret_212 = __noswap_vcmlaq_rot180_f16(__rev0_212, __rev1_212, *(float16x8_t *) &__reint1_212); \
-  __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_212; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_laneq_f16(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \
-  float16x4_t __s0_213 = __p0_213; \
-  float16x4_t __s1_213 = __p1_213; \
-  float16x8_t __s2_213 = __p2_213; \
-  float16x4_t __ret_213; \
-float16x8_t __reint_213 = __s2_213; \
-uint32x2_t __reint1_213 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213), vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213)}; \
-  __ret_213 = vcmla_rot180_f16(__s0_213, __s1_213, *(float16x4_t *) &__reint1_213); \
-  __ret_213; \
-})
-#else
-#define vcmla_rot180_laneq_f16(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \
-  float16x4_t __s0_214 = __p0_214; \
-  float16x4_t __s1_214 = __p1_214; \
-  float16x8_t __s2_214 = __p2_214; \
-  float16x4_t __rev0_214;  __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \
-  float16x4_t __rev1_214;  __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 3, 2, 1, 0); \
-  float16x8_t __rev2_214;  __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_214; \
-float16x8_t __reint_214 = __rev2_214; \
-uint32x2_t __reint1_214 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214)}; \
-  __ret_214 = __noswap_vcmla_rot180_f16(__rev0_214, __rev1_214, *(float16x4_t *) &__reint1_214); \
-  __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \
-  __ret_214; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_laneq_f16(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \
-  float16x8_t __s0_215 = __p0_215; \
-  float16x8_t __s1_215 = __p1_215; \
-  float16x8_t __s2_215 = __p2_215; \
-  float16x8_t __ret_215; \
-float16x8_t __reint_215 = __s2_215; \
-uint32x4_t __reint1_215 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215)}; \
-  __ret_215 = vcmlaq_rot180_f16(__s0_215, __s1_215, *(float16x8_t *) &__reint1_215); \
-  __ret_215; \
-})
-#else
-#define vcmlaq_rot180_laneq_f16(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \
-  float16x8_t __s0_216 = __p0_216; \
-  float16x8_t __s1_216 = __p1_216; \
-  float16x8_t __s2_216 = __p2_216; \
-  float16x8_t __rev0_216;  __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_216;  __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_216;  __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_216; \
-float16x8_t __reint_216 = __rev2_216; \
-uint32x4_t __reint1_216 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216)}; \
-  __ret_216 = __noswap_vcmlaq_rot180_f16(__rev0_216, __rev1_216, *(float16x8_t *) &__reint1_216); \
-  __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_216; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_lane_f16(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \
-  float16x4_t __s0_217 = __p0_217; \
-  float16x4_t __s1_217 = __p1_217; \
-  float16x4_t __s2_217 = __p2_217; \
-  float16x4_t __ret_217; \
-float16x4_t __reint_217 = __s2_217; \
-uint32x2_t __reint1_217 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217), vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217)}; \
-  __ret_217 = vcmla_rot270_f16(__s0_217, __s1_217, *(float16x4_t *) &__reint1_217); \
-  __ret_217; \
-})
-#else
-#define vcmla_rot270_lane_f16(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \
-  float16x4_t __s0_218 = __p0_218; \
-  float16x4_t __s1_218 = __p1_218; \
-  float16x4_t __s2_218 = __p2_218; \
-  float16x4_t __rev0_218;  __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \
-  float16x4_t __rev1_218;  __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \
-  float16x4_t __rev2_218;  __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 3, 2, 1, 0); \
-  float16x4_t __ret_218; \
-float16x4_t __reint_218 = __rev2_218; \
-uint32x2_t __reint1_218 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218)}; \
-  __ret_218 = __noswap_vcmla_rot270_f16(__rev0_218, __rev1_218, *(float16x4_t *) &__reint1_218); \
-  __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \
-  __ret_218; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_lane_f16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \
-  float16x8_t __s0_219 = __p0_219; \
-  float16x8_t __s1_219 = __p1_219; \
-  float16x4_t __s2_219 = __p2_219; \
-  float16x8_t __ret_219; \
-float16x4_t __reint_219 = __s2_219; \
-uint32x4_t __reint1_219 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219)}; \
-  __ret_219 = vcmlaq_rot270_f16(__s0_219, __s1_219, *(float16x8_t *) &__reint1_219); \
-  __ret_219; \
-})
-#else
-#define vcmlaq_rot270_lane_f16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \
-  float16x8_t __s0_220 = __p0_220; \
-  float16x8_t __s1_220 = __p1_220; \
-  float16x4_t __s2_220 = __p2_220; \
-  float16x8_t __rev0_220;  __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_220;  __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_220;  __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \
-  float16x8_t __ret_220; \
-float16x4_t __reint_220 = __rev2_220; \
-uint32x4_t __reint1_220 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220)}; \
-  __ret_220 = __noswap_vcmlaq_rot270_f16(__rev0_220, __rev1_220, *(float16x8_t *) &__reint1_220); \
-  __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_220; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_laneq_f16(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \
-  float16x4_t __s0_221 = __p0_221; \
-  float16x4_t __s1_221 = __p1_221; \
-  float16x8_t __s2_221 = __p2_221; \
-  float16x4_t __ret_221; \
-float16x8_t __reint_221 = __s2_221; \
-uint32x2_t __reint1_221 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221), vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221)}; \
-  __ret_221 = vcmla_rot270_f16(__s0_221, __s1_221, *(float16x4_t *) &__reint1_221); \
-  __ret_221; \
-})
-#else
-#define vcmla_rot270_laneq_f16(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \
-  float16x4_t __s0_222 = __p0_222; \
-  float16x4_t __s1_222 = __p1_222; \
-  float16x8_t __s2_222 = __p2_222; \
-  float16x4_t __rev0_222;  __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 3, 2, 1, 0); \
-  float16x4_t __rev1_222;  __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 3, 2, 1, 0); \
-  float16x8_t __rev2_222;  __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_222; \
-float16x8_t __reint_222 = __rev2_222; \
-uint32x2_t __reint1_222 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222)}; \
-  __ret_222 = __noswap_vcmla_rot270_f16(__rev0_222, __rev1_222, *(float16x4_t *) &__reint1_222); \
-  __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 3, 2, 1, 0); \
-  __ret_222; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_laneq_f16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \
-  float16x8_t __s0_223 = __p0_223; \
-  float16x8_t __s1_223 = __p1_223; \
-  float16x8_t __s2_223 = __p2_223; \
-  float16x8_t __ret_223; \
-float16x8_t __reint_223 = __s2_223; \
-uint32x4_t __reint1_223 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223)}; \
-  __ret_223 = vcmlaq_rot270_f16(__s0_223, __s1_223, *(float16x8_t *) &__reint1_223); \
-  __ret_223; \
-})
-#else
-#define vcmlaq_rot270_laneq_f16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \
-  float16x8_t __s0_224 = __p0_224; \
-  float16x8_t __s1_224 = __p1_224; \
-  float16x8_t __s2_224 = __p2_224; \
-  float16x8_t __rev0_224;  __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_224;  __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_224;  __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_224; \
-float16x8_t __reint_224 = __rev2_224; \
-uint32x4_t __reint1_224 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224)}; \
-  __ret_224 = __noswap_vcmlaq_rot270_f16(__rev0_224, __rev1_224, *(float16x8_t *) &__reint1_224); \
-  __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_224; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_lane_f16(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \
-  float16x4_t __s0_225 = __p0_225; \
-  float16x4_t __s1_225 = __p1_225; \
-  float16x4_t __s2_225 = __p2_225; \
-  float16x4_t __ret_225; \
-float16x4_t __reint_225 = __s2_225; \
-uint32x2_t __reint1_225 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225), vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225)}; \
-  __ret_225 = vcmla_rot90_f16(__s0_225, __s1_225, *(float16x4_t *) &__reint1_225); \
-  __ret_225; \
-})
-#else
-#define vcmla_rot90_lane_f16(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \
-  float16x4_t __s0_226 = __p0_226; \
-  float16x4_t __s1_226 = __p1_226; \
-  float16x4_t __s2_226 = __p2_226; \
-  float16x4_t __rev0_226;  __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \
-  float16x4_t __rev1_226;  __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \
-  float16x4_t __rev2_226;  __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 3, 2, 1, 0); \
-  float16x4_t __ret_226; \
-float16x4_t __reint_226 = __rev2_226; \
-uint32x2_t __reint1_226 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226)}; \
-  __ret_226 = __noswap_vcmla_rot90_f16(__rev0_226, __rev1_226, *(float16x4_t *) &__reint1_226); \
-  __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \
-  __ret_226; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_lane_f16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \
-  float16x8_t __s0_227 = __p0_227; \
-  float16x8_t __s1_227 = __p1_227; \
-  float16x4_t __s2_227 = __p2_227; \
-  float16x8_t __ret_227; \
-float16x4_t __reint_227 = __s2_227; \
-uint32x4_t __reint1_227 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227)}; \
-  __ret_227 = vcmlaq_rot90_f16(__s0_227, __s1_227, *(float16x8_t *) &__reint1_227); \
-  __ret_227; \
-})
-#else
-#define vcmlaq_rot90_lane_f16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \
-  float16x8_t __s0_228 = __p0_228; \
-  float16x8_t __s1_228 = __p1_228; \
-  float16x4_t __s2_228 = __p2_228; \
-  float16x8_t __rev0_228;  __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_228;  __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_228;  __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \
-  float16x8_t __ret_228; \
-float16x4_t __reint_228 = __rev2_228; \
-uint32x4_t __reint1_228 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228)}; \
-  __ret_228 = __noswap_vcmlaq_rot90_f16(__rev0_228, __rev1_228, *(float16x8_t *) &__reint1_228); \
-  __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_228; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_laneq_f16(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \
-  float16x4_t __s0_229 = __p0_229; \
-  float16x4_t __s1_229 = __p1_229; \
-  float16x8_t __s2_229 = __p2_229; \
-  float16x4_t __ret_229; \
-float16x8_t __reint_229 = __s2_229; \
-uint32x2_t __reint1_229 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229), vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229)}; \
-  __ret_229 = vcmla_rot90_f16(__s0_229, __s1_229, *(float16x4_t *) &__reint1_229); \
-  __ret_229; \
-})
-#else
-#define vcmla_rot90_laneq_f16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \
-  float16x4_t __s0_230 = __p0_230; \
-  float16x4_t __s1_230 = __p1_230; \
-  float16x8_t __s2_230 = __p2_230; \
-  float16x4_t __rev0_230;  __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 3, 2, 1, 0); \
-  float16x4_t __rev1_230;  __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 3, 2, 1, 0); \
-  float16x8_t __rev2_230;  __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_230; \
-float16x8_t __reint_230 = __rev2_230; \
-uint32x2_t __reint1_230 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230)}; \
-  __ret_230 = __noswap_vcmla_rot90_f16(__rev0_230, __rev1_230, *(float16x4_t *) &__reint1_230); \
-  __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 3, 2, 1, 0); \
-  __ret_230; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_laneq_f16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \
-  float16x8_t __s0_231 = __p0_231; \
-  float16x8_t __s1_231 = __p1_231; \
-  float16x8_t __s2_231 = __p2_231; \
-  float16x8_t __ret_231; \
-float16x8_t __reint_231 = __s2_231; \
-uint32x4_t __reint1_231 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231)}; \
-  __ret_231 = vcmlaq_rot90_f16(__s0_231, __s1_231, *(float16x8_t *) &__reint1_231); \
-  __ret_231; \
-})
-#else
-#define vcmlaq_rot90_laneq_f16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \
-  float16x8_t __s0_232 = __p0_232; \
-  float16x8_t __s1_232 = __p1_232; \
-  float16x8_t __s2_232 = __p2_232; \
-  float16x8_t __rev0_232;  __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_232;  __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_232;  __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_232; \
-float16x8_t __reint_232 = __rev2_232; \
-uint32x4_t __reint1_232 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232)}; \
-  __ret_232 = __noswap_vcmlaq_rot90_f16(__rev0_232, __rev1_232, *(float16x8_t *) &__reint1_232); \
-  __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_232; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_lane_f64(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \
-  float64x1_t __s0_233 = __p0_233; \
-  float64x1_t __s1_233 = __p1_233; \
-  float64x1_t __s2_233 = __p2_233; \
-  float64x1_t __ret_233; \
-float64x1_t __reint_233 = __s2_233; \
-uint64x2_t __reint1_233 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233), vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233)}; \
-  __ret_233 = vcmla_f64(__s0_233, __s1_233, *(float64x1_t *) &__reint1_233); \
-  __ret_233; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_lane_f64(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \
-  float64x2_t __s0_234 = __p0_234; \
-  float64x2_t __s1_234 = __p1_234; \
-  float64x1_t __s2_234 = __p2_234; \
-  float64x2_t __ret_234; \
-float64x1_t __reint_234 = __s2_234; \
-uint64x2_t __reint1_234 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234), vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234)}; \
-  __ret_234 = vcmlaq_f64(__s0_234, __s1_234, *(float64x2_t *) &__reint1_234); \
-  __ret_234; \
-})
-#else
-#define vcmlaq_lane_f64(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \
-  float64x2_t __s0_235 = __p0_235; \
-  float64x2_t __s1_235 = __p1_235; \
-  float64x1_t __s2_235 = __p2_235; \
-  float64x2_t __rev0_235;  __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 1, 0); \
-  float64x2_t __rev1_235;  __rev1_235 = __builtin_shufflevector(__s1_235, __s1_235, 1, 0); \
-  float64x2_t __ret_235; \
-float64x1_t __reint_235 = __s2_235; \
-uint64x2_t __reint1_235 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235)}; \
-  __ret_235 = __noswap_vcmlaq_f64(__rev0_235, __rev1_235, *(float64x2_t *) &__reint1_235); \
-  __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 1, 0); \
-  __ret_235; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_laneq_f64(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \
-  float64x1_t __s0_236 = __p0_236; \
-  float64x1_t __s1_236 = __p1_236; \
-  float64x2_t __s2_236 = __p2_236; \
-  float64x1_t __ret_236; \
-float64x2_t __reint_236 = __s2_236; \
-uint64x2_t __reint1_236 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236), vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236)}; \
-  __ret_236 = vcmla_f64(__s0_236, __s1_236, *(float64x1_t *) &__reint1_236); \
-  __ret_236; \
-})
-#else
-#define vcmla_laneq_f64(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \
-  float64x1_t __s0_237 = __p0_237; \
-  float64x1_t __s1_237 = __p1_237; \
-  float64x2_t __s2_237 = __p2_237; \
-  float64x2_t __rev2_237;  __rev2_237 = __builtin_shufflevector(__s2_237, __s2_237, 1, 0); \
-  float64x1_t __ret_237; \
-float64x2_t __reint_237 = __rev2_237; \
-uint64x2_t __reint1_237 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237)}; \
-  __ret_237 = vcmla_f64(__s0_237, __s1_237, *(float64x1_t *) &__reint1_237); \
-  __ret_237; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_laneq_f64(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \
-  float64x2_t __s0_238 = __p0_238; \
-  float64x2_t __s1_238 = __p1_238; \
-  float64x2_t __s2_238 = __p2_238; \
-  float64x2_t __ret_238; \
-float64x2_t __reint_238 = __s2_238; \
-uint64x2_t __reint1_238 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238), vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238)}; \
-  __ret_238 = vcmlaq_f64(__s0_238, __s1_238, *(float64x2_t *) &__reint1_238); \
-  __ret_238; \
-})
-#else
-#define vcmlaq_laneq_f64(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \
-  float64x2_t __s0_239 = __p0_239; \
-  float64x2_t __s1_239 = __p1_239; \
-  float64x2_t __s2_239 = __p2_239; \
-  float64x2_t __rev0_239;  __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 1, 0); \
-  float64x2_t __rev1_239;  __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 1, 0); \
-  float64x2_t __rev2_239;  __rev2_239 = __builtin_shufflevector(__s2_239, __s2_239, 1, 0); \
-  float64x2_t __ret_239; \
-float64x2_t __reint_239 = __rev2_239; \
-uint64x2_t __reint1_239 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239)}; \
-  __ret_239 = __noswap_vcmlaq_f64(__rev0_239, __rev1_239, *(float64x2_t *) &__reint1_239); \
-  __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 1, 0); \
-  __ret_239; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_rot180_lane_f64(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \
-  float64x1_t __s0_240 = __p0_240; \
-  float64x1_t __s1_240 = __p1_240; \
-  float64x1_t __s2_240 = __p2_240; \
-  float64x1_t __ret_240; \
-float64x1_t __reint_240 = __s2_240; \
-uint64x2_t __reint1_240 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240), vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240)}; \
-  __ret_240 = vcmla_rot180_f64(__s0_240, __s1_240, *(float64x1_t *) &__reint1_240); \
-  __ret_240; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_lane_f64(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \
-  float64x2_t __s0_241 = __p0_241; \
-  float64x2_t __s1_241 = __p1_241; \
-  float64x1_t __s2_241 = __p2_241; \
-  float64x2_t __ret_241; \
-float64x1_t __reint_241 = __s2_241; \
-uint64x2_t __reint1_241 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241), vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241)}; \
-  __ret_241 = vcmlaq_rot180_f64(__s0_241, __s1_241, *(float64x2_t *) &__reint1_241); \
-  __ret_241; \
-})
-#else
-#define vcmlaq_rot180_lane_f64(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \
-  float64x2_t __s0_242 = __p0_242; \
-  float64x2_t __s1_242 = __p1_242; \
-  float64x1_t __s2_242 = __p2_242; \
-  float64x2_t __rev0_242;  __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 1, 0); \
-  float64x2_t __rev1_242;  __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 1, 0); \
-  float64x2_t __ret_242; \
-float64x1_t __reint_242 = __s2_242; \
-uint64x2_t __reint1_242 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242)}; \
-  __ret_242 = __noswap_vcmlaq_rot180_f64(__rev0_242, __rev1_242, *(float64x2_t *) &__reint1_242); \
-  __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 1, 0); \
-  __ret_242; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_laneq_f64(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \
-  float64x1_t __s0_243 = __p0_243; \
-  float64x1_t __s1_243 = __p1_243; \
-  float64x2_t __s2_243 = __p2_243; \
-  float64x1_t __ret_243; \
-float64x2_t __reint_243 = __s2_243; \
-uint64x2_t __reint1_243 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243), vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243)}; \
-  __ret_243 = vcmla_rot180_f64(__s0_243, __s1_243, *(float64x1_t *) &__reint1_243); \
-  __ret_243; \
-})
-#else
-#define vcmla_rot180_laneq_f64(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \
-  float64x1_t __s0_244 = __p0_244; \
-  float64x1_t __s1_244 = __p1_244; \
-  float64x2_t __s2_244 = __p2_244; \
-  float64x2_t __rev2_244;  __rev2_244 = __builtin_shufflevector(__s2_244, __s2_244, 1, 0); \
-  float64x1_t __ret_244; \
-float64x2_t __reint_244 = __rev2_244; \
-uint64x2_t __reint1_244 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244)}; \
-  __ret_244 = vcmla_rot180_f64(__s0_244, __s1_244, *(float64x1_t *) &__reint1_244); \
-  __ret_244; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_laneq_f64(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \
-  float64x2_t __s0_245 = __p0_245; \
-  float64x2_t __s1_245 = __p1_245; \
-  float64x2_t __s2_245 = __p2_245; \
-  float64x2_t __ret_245; \
-float64x2_t __reint_245 = __s2_245; \
-uint64x2_t __reint1_245 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245), vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245)}; \
-  __ret_245 = vcmlaq_rot180_f64(__s0_245, __s1_245, *(float64x2_t *) &__reint1_245); \
-  __ret_245; \
-})
-#else
-#define vcmlaq_rot180_laneq_f64(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \
-  float64x2_t __s0_246 = __p0_246; \
-  float64x2_t __s1_246 = __p1_246; \
-  float64x2_t __s2_246 = __p2_246; \
-  float64x2_t __rev0_246;  __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \
-  float64x2_t __rev1_246;  __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \
-  float64x2_t __rev2_246;  __rev2_246 = __builtin_shufflevector(__s2_246, __s2_246, 1, 0); \
-  float64x2_t __ret_246; \
-float64x2_t __reint_246 = __rev2_246; \
-uint64x2_t __reint1_246 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246)}; \
-  __ret_246 = __noswap_vcmlaq_rot180_f64(__rev0_246, __rev1_246, *(float64x2_t *) &__reint1_246); \
-  __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \
-  __ret_246; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_rot270_lane_f64(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \
-  float64x1_t __s0_247 = __p0_247; \
-  float64x1_t __s1_247 = __p1_247; \
-  float64x1_t __s2_247 = __p2_247; \
-  float64x1_t __ret_247; \
-float64x1_t __reint_247 = __s2_247; \
-uint64x2_t __reint1_247 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247), vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247)}; \
-  __ret_247 = vcmla_rot270_f64(__s0_247, __s1_247, *(float64x1_t *) &__reint1_247); \
-  __ret_247; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_lane_f64(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \
-  float64x2_t __s0_248 = __p0_248; \
-  float64x2_t __s1_248 = __p1_248; \
-  float64x1_t __s2_248 = __p2_248; \
-  float64x2_t __ret_248; \
-float64x1_t __reint_248 = __s2_248; \
-uint64x2_t __reint1_248 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248), vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248)}; \
-  __ret_248 = vcmlaq_rot270_f64(__s0_248, __s1_248, *(float64x2_t *) &__reint1_248); \
-  __ret_248; \
-})
-#else
-#define vcmlaq_rot270_lane_f64(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \
-  float64x2_t __s0_249 = __p0_249; \
-  float64x2_t __s1_249 = __p1_249; \
-  float64x1_t __s2_249 = __p2_249; \
-  float64x2_t __rev0_249;  __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 1, 0); \
-  float64x2_t __rev1_249;  __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 1, 0); \
-  float64x2_t __ret_249; \
-float64x1_t __reint_249 = __s2_249; \
-uint64x2_t __reint1_249 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249)}; \
-  __ret_249 = __noswap_vcmlaq_rot270_f64(__rev0_249, __rev1_249, *(float64x2_t *) &__reint1_249); \
-  __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 1, 0); \
-  __ret_249; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_laneq_f64(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \
-  float64x1_t __s0_250 = __p0_250; \
-  float64x1_t __s1_250 = __p1_250; \
-  float64x2_t __s2_250 = __p2_250; \
-  float64x1_t __ret_250; \
-float64x2_t __reint_250 = __s2_250; \
-uint64x2_t __reint1_250 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250), vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250)}; \
-  __ret_250 = vcmla_rot270_f64(__s0_250, __s1_250, *(float64x1_t *) &__reint1_250); \
-  __ret_250; \
-})
-#else
-#define vcmla_rot270_laneq_f64(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \
-  float64x1_t __s0_251 = __p0_251; \
-  float64x1_t __s1_251 = __p1_251; \
-  float64x2_t __s2_251 = __p2_251; \
-  float64x2_t __rev2_251;  __rev2_251 = __builtin_shufflevector(__s2_251, __s2_251, 1, 0); \
-  float64x1_t __ret_251; \
-float64x2_t __reint_251 = __rev2_251; \
-uint64x2_t __reint1_251 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251)}; \
-  __ret_251 = vcmla_rot270_f64(__s0_251, __s1_251, *(float64x1_t *) &__reint1_251); \
-  __ret_251; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_laneq_f64(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \
-  float64x2_t __s0_252 = __p0_252; \
-  float64x2_t __s1_252 = __p1_252; \
-  float64x2_t __s2_252 = __p2_252; \
-  float64x2_t __ret_252; \
-float64x2_t __reint_252 = __s2_252; \
-uint64x2_t __reint1_252 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252), vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252)}; \
-  __ret_252 = vcmlaq_rot270_f64(__s0_252, __s1_252, *(float64x2_t *) &__reint1_252); \
-  __ret_252; \
-})
-#else
-#define vcmlaq_rot270_laneq_f64(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \
-  float64x2_t __s0_253 = __p0_253; \
-  float64x2_t __s1_253 = __p1_253; \
-  float64x2_t __s2_253 = __p2_253; \
-  float64x2_t __rev0_253;  __rev0_253 = __builtin_shufflevector(__s0_253, __s0_253, 1, 0); \
-  float64x2_t __rev1_253;  __rev1_253 = __builtin_shufflevector(__s1_253, __s1_253, 1, 0); \
-  float64x2_t __rev2_253;  __rev2_253 = __builtin_shufflevector(__s2_253, __s2_253, 1, 0); \
-  float64x2_t __ret_253; \
-float64x2_t __reint_253 = __rev2_253; \
-uint64x2_t __reint1_253 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253)}; \
-  __ret_253 = __noswap_vcmlaq_rot270_f64(__rev0_253, __rev1_253, *(float64x2_t *) &__reint1_253); \
-  __ret_253 = __builtin_shufflevector(__ret_253, __ret_253, 1, 0); \
-  __ret_253; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_rot90_lane_f64(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \
-  float64x1_t __s0_254 = __p0_254; \
-  float64x1_t __s1_254 = __p1_254; \
-  float64x1_t __s2_254 = __p2_254; \
-  float64x1_t __ret_254; \
-float64x1_t __reint_254 = __s2_254; \
-uint64x2_t __reint1_254 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254), vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254)}; \
-  __ret_254 = vcmla_rot90_f64(__s0_254, __s1_254, *(float64x1_t *) &__reint1_254); \
-  __ret_254; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_lane_f64(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \
-  float64x2_t __s0_255 = __p0_255; \
-  float64x2_t __s1_255 = __p1_255; \
-  float64x1_t __s2_255 = __p2_255; \
-  float64x2_t __ret_255; \
-float64x1_t __reint_255 = __s2_255; \
-uint64x2_t __reint1_255 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255), vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255)}; \
-  __ret_255 = vcmlaq_rot90_f64(__s0_255, __s1_255, *(float64x2_t *) &__reint1_255); \
-  __ret_255; \
-})
-#else
-#define vcmlaq_rot90_lane_f64(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \
-  float64x2_t __s0_256 = __p0_256; \
-  float64x2_t __s1_256 = __p1_256; \
-  float64x1_t __s2_256 = __p2_256; \
-  float64x2_t __rev0_256;  __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 1, 0); \
-  float64x2_t __rev1_256;  __rev1_256 = __builtin_shufflevector(__s1_256, __s1_256, 1, 0); \
-  float64x2_t __ret_256; \
-float64x1_t __reint_256 = __s2_256; \
-uint64x2_t __reint1_256 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256)}; \
-  __ret_256 = __noswap_vcmlaq_rot90_f64(__rev0_256, __rev1_256, *(float64x2_t *) &__reint1_256); \
-  __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 1, 0); \
-  __ret_256; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_laneq_f64(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \
-  float64x1_t __s0_257 = __p0_257; \
-  float64x1_t __s1_257 = __p1_257; \
-  float64x2_t __s2_257 = __p2_257; \
-  float64x1_t __ret_257; \
-float64x2_t __reint_257 = __s2_257; \
-uint64x2_t __reint1_257 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257), vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257)}; \
-  __ret_257 = vcmla_rot90_f64(__s0_257, __s1_257, *(float64x1_t *) &__reint1_257); \
-  __ret_257; \
-})
-#else
-#define vcmla_rot90_laneq_f64(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
-  float64x1_t __s0_258 = __p0_258; \
-  float64x1_t __s1_258 = __p1_258; \
-  float64x2_t __s2_258 = __p2_258; \
-  float64x2_t __rev2_258;  __rev2_258 = __builtin_shufflevector(__s2_258, __s2_258, 1, 0); \
-  float64x1_t __ret_258; \
-float64x2_t __reint_258 = __rev2_258; \
-uint64x2_t __reint1_258 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258)}; \
-  __ret_258 = vcmla_rot90_f64(__s0_258, __s1_258, *(float64x1_t *) &__reint1_258); \
-  __ret_258; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_laneq_f64(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
-  float64x2_t __s0_259 = __p0_259; \
-  float64x2_t __s1_259 = __p1_259; \
-  float64x2_t __s2_259 = __p2_259; \
-  float64x2_t __ret_259; \
-float64x2_t __reint_259 = __s2_259; \
-uint64x2_t __reint1_259 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259), vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259)}; \
-  __ret_259 = vcmlaq_rot90_f64(__s0_259, __s1_259, *(float64x2_t *) &__reint1_259); \
-  __ret_259; \
-})
-#else
-#define vcmlaq_rot90_laneq_f64(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
-  float64x2_t __s0_260 = __p0_260; \
-  float64x2_t __s1_260 = __p1_260; \
-  float64x2_t __s2_260 = __p2_260; \
-  float64x2_t __rev0_260;  __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 1, 0); \
-  float64x2_t __rev1_260;  __rev1_260 = __builtin_shufflevector(__s1_260, __s1_260, 1, 0); \
-  float64x2_t __rev2_260;  __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 1, 0); \
-  float64x2_t __ret_260; \
-float64x2_t __reint_260 = __rev2_260; \
-uint64x2_t __reint1_260 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260)}; \
-  __ret_260 = __noswap_vcmlaq_rot90_f64(__rev0_260, __rev1_260, *(float64x2_t *) &__reint1_260); \
-  __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 1, 0); \
-  __ret_260; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_DOTPROD)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_u32(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
-  uint32x4_t __s0_261 = __p0_261; \
-  uint8x16_t __s1_261 = __p1_261; \
-  uint8x8_t __s2_261 = __p2_261; \
-  uint32x4_t __ret_261; \
-uint8x8_t __reint_261 = __s2_261; \
-uint32x4_t __reint1_261 = splatq_lane_u32(*(uint32x2_t *) &__reint_261, __p3_261); \
-  __ret_261 = vdotq_u32(__s0_261, __s1_261, *(uint8x16_t *) &__reint1_261); \
-  __ret_261; \
-})
-#else
-#define vdotq_lane_u32(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
-  uint32x4_t __s0_262 = __p0_262; \
-  uint8x16_t __s1_262 = __p1_262; \
-  uint8x8_t __s2_262 = __p2_262; \
-  uint32x4_t __rev0_262;  __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 3, 2, 1, 0); \
-  uint8x16_t __rev1_262;  __rev1_262 = __builtin_shufflevector(__s1_262, __s1_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_262;  __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_262; \
-uint8x8_t __reint_262 = __rev2_262; \
-uint32x4_t __reint1_262 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_262, __p3_262); \
-  __ret_262 = __noswap_vdotq_u32(__rev0_262, __rev1_262, *(uint8x16_t *) &__reint1_262); \
-  __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 3, 2, 1, 0); \
-  __ret_262; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_s32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
-  int32x4_t __s0_263 = __p0_263; \
-  int8x16_t __s1_263 = __p1_263; \
-  int8x8_t __s2_263 = __p2_263; \
-  int32x4_t __ret_263; \
-int8x8_t __reint_263 = __s2_263; \
-int32x4_t __reint1_263 = splatq_lane_s32(*(int32x2_t *) &__reint_263, __p3_263); \
-  __ret_263 = vdotq_s32(__s0_263, __s1_263, *(int8x16_t *) &__reint1_263); \
-  __ret_263; \
-})
-#else
-#define vdotq_lane_s32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
-  int32x4_t __s0_264 = __p0_264; \
-  int8x16_t __s1_264 = __p1_264; \
-  int8x8_t __s2_264 = __p2_264; \
-  int32x4_t __rev0_264;  __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \
-  int8x16_t __rev1_264;  __rev1_264 = __builtin_shufflevector(__s1_264, __s1_264, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_264;  __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_264; \
-int8x8_t __reint_264 = __rev2_264; \
-int32x4_t __reint1_264 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_264, __p3_264); \
-  __ret_264 = __noswap_vdotq_s32(__rev0_264, __rev1_264, *(int8x16_t *) &__reint1_264); \
-  __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \
-  __ret_264; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_lane_u32(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
-  uint32x2_t __s0_265 = __p0_265; \
-  uint8x8_t __s1_265 = __p1_265; \
-  uint8x8_t __s2_265 = __p2_265; \
-  uint32x2_t __ret_265; \
-uint8x8_t __reint_265 = __s2_265; \
-uint32x2_t __reint1_265 = splat_lane_u32(*(uint32x2_t *) &__reint_265, __p3_265); \
-  __ret_265 = vdot_u32(__s0_265, __s1_265, *(uint8x8_t *) &__reint1_265); \
-  __ret_265; \
-})
-#else
-#define vdot_lane_u32(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \
-  uint32x2_t __s0_266 = __p0_266; \
-  uint8x8_t __s1_266 = __p1_266; \
-  uint8x8_t __s2_266 = __p2_266; \
-  uint32x2_t __rev0_266;  __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 1, 0); \
-  uint8x8_t __rev1_266;  __rev1_266 = __builtin_shufflevector(__s1_266, __s1_266, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_266;  __rev2_266 = __builtin_shufflevector(__s2_266, __s2_266, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret_266; \
-uint8x8_t __reint_266 = __rev2_266; \
-uint32x2_t __reint1_266 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_266, __p3_266); \
-  __ret_266 = __noswap_vdot_u32(__rev0_266, __rev1_266, *(uint8x8_t *) &__reint1_266); \
-  __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 1, 0); \
-  __ret_266; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_lane_s32(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \
-  int32x2_t __s0_267 = __p0_267; \
-  int8x8_t __s1_267 = __p1_267; \
-  int8x8_t __s2_267 = __p2_267; \
-  int32x2_t __ret_267; \
-int8x8_t __reint_267 = __s2_267; \
-int32x2_t __reint1_267 = splat_lane_s32(*(int32x2_t *) &__reint_267, __p3_267); \
-  __ret_267 = vdot_s32(__s0_267, __s1_267, *(int8x8_t *) &__reint1_267); \
-  __ret_267; \
-})
-#else
-#define vdot_lane_s32(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
-  int32x2_t __s0_268 = __p0_268; \
-  int8x8_t __s1_268 = __p1_268; \
-  int8x8_t __s2_268 = __p2_268; \
-  int32x2_t __rev0_268;  __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 1, 0); \
-  int8x8_t __rev1_268;  __rev1_268 = __builtin_shufflevector(__s1_268, __s1_268, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_268;  __rev2_268 = __builtin_shufflevector(__s2_268, __s2_268, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_268; \
-int8x8_t __reint_268 = __rev2_268; \
-int32x2_t __reint1_268 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_268, __p3_268); \
-  __ret_268 = __noswap_vdot_s32(__rev0_268, __rev1_268, *(int8x8_t *) &__reint1_268); \
-  __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 1, 0); \
-  __ret_268; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_u32(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
-  uint32x4_t __s0_269 = __p0_269; \
-  uint8x16_t __s1_269 = __p1_269; \
-  uint8x16_t __s2_269 = __p2_269; \
-  uint32x4_t __ret_269; \
-uint8x16_t __reint_269 = __s2_269; \
-uint32x4_t __reint1_269 = splatq_laneq_u32(*(uint32x4_t *) &__reint_269, __p3_269); \
-  __ret_269 = vdotq_u32(__s0_269, __s1_269, *(uint8x16_t *) &__reint1_269); \
-  __ret_269; \
-})
-#else
-#define vdotq_laneq_u32(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
-  uint32x4_t __s0_270 = __p0_270; \
-  uint8x16_t __s1_270 = __p1_270; \
-  uint8x16_t __s2_270 = __p2_270; \
-  uint32x4_t __rev0_270;  __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 3, 2, 1, 0); \
-  uint8x16_t __rev1_270;  __rev1_270 = __builtin_shufflevector(__s1_270, __s1_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_270;  __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_270; \
-uint8x16_t __reint_270 = __rev2_270; \
-uint32x4_t __reint1_270 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_270, __p3_270); \
-  __ret_270 = __noswap_vdotq_u32(__rev0_270, __rev1_270, *(uint8x16_t *) &__reint1_270); \
-  __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 3, 2, 1, 0); \
-  __ret_270; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_s32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
-  int32x4_t __s0_271 = __p0_271; \
-  int8x16_t __s1_271 = __p1_271; \
-  int8x16_t __s2_271 = __p2_271; \
-  int32x4_t __ret_271; \
-int8x16_t __reint_271 = __s2_271; \
-int32x4_t __reint1_271 = splatq_laneq_s32(*(int32x4_t *) &__reint_271, __p3_271); \
-  __ret_271 = vdotq_s32(__s0_271, __s1_271, *(int8x16_t *) &__reint1_271); \
-  __ret_271; \
-})
-#else
-#define vdotq_laneq_s32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
-  int32x4_t __s0_272 = __p0_272; \
-  int8x16_t __s1_272 = __p1_272; \
-  int8x16_t __s2_272 = __p2_272; \
-  int32x4_t __rev0_272;  __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 3, 2, 1, 0); \
-  int8x16_t __rev1_272;  __rev1_272 = __builtin_shufflevector(__s1_272, __s1_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_272;  __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_272; \
-int8x16_t __reint_272 = __rev2_272; \
-int32x4_t __reint1_272 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_272, __p3_272); \
-  __ret_272 = __noswap_vdotq_s32(__rev0_272, __rev1_272, *(int8x16_t *) &__reint1_272); \
-  __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 3, 2, 1, 0); \
-  __ret_272; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_u32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
-  uint32x2_t __s0_273 = __p0_273; \
-  uint8x8_t __s1_273 = __p1_273; \
-  uint8x16_t __s2_273 = __p2_273; \
-  uint32x2_t __ret_273; \
-uint8x16_t __reint_273 = __s2_273; \
-uint32x2_t __reint1_273 = splat_laneq_u32(*(uint32x4_t *) &__reint_273, __p3_273); \
-  __ret_273 = vdot_u32(__s0_273, __s1_273, *(uint8x8_t *) &__reint1_273); \
-  __ret_273; \
-})
-#else
-#define vdot_laneq_u32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
-  uint32x2_t __s0_274 = __p0_274; \
-  uint8x8_t __s1_274 = __p1_274; \
-  uint8x16_t __s2_274 = __p2_274; \
-  uint32x2_t __rev0_274;  __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 1, 0); \
-  uint8x8_t __rev1_274;  __rev1_274 = __builtin_shufflevector(__s1_274, __s1_274, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_274;  __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret_274; \
-uint8x16_t __reint_274 = __rev2_274; \
-uint32x2_t __reint1_274 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_274, __p3_274); \
-  __ret_274 = __noswap_vdot_u32(__rev0_274, __rev1_274, *(uint8x8_t *) &__reint1_274); \
-  __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 1, 0); \
-  __ret_274; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_s32(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
-  int32x2_t __s0_275 = __p0_275; \
-  int8x8_t __s1_275 = __p1_275; \
-  int8x16_t __s2_275 = __p2_275; \
-  int32x2_t __ret_275; \
-int8x16_t __reint_275 = __s2_275; \
-int32x2_t __reint1_275 = splat_laneq_s32(*(int32x4_t *) &__reint_275, __p3_275); \
-  __ret_275 = vdot_s32(__s0_275, __s1_275, *(int8x8_t *) &__reint1_275); \
-  __ret_275; \
-})
-#else
-#define vdot_laneq_s32(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
-  int32x2_t __s0_276 = __p0_276; \
-  int8x8_t __s1_276 = __p1_276; \
-  int8x16_t __s2_276 = __p2_276; \
-  int32x2_t __rev0_276;  __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 1, 0); \
-  int8x8_t __rev1_276;  __rev1_276 = __builtin_shufflevector(__s1_276, __s1_276, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_276;  __rev2_276 = __builtin_shufflevector(__s2_276, __s2_276, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_276; \
-int8x16_t __reint_276 = __rev2_276; \
-int32x2_t __reint1_276 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_276, __p3_276); \
-  __ret_276 = __noswap_vdot_s32(__rev0_276, __rev1_276, *(int8x8_t *) &__reint1_276); \
-  __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 1, 0); \
-  __ret_276; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FMA)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vabsq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vabsq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vabs_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vabs_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclez_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclez_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \
-  __ret; \
-})
-#else
-#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \
-  __ret; \
-})
-#else
-#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = vfmaq_f16(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = vfma_f16(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f16(__p0_277, __p1_277, __p2_277) __extension__ ({ \
-  float16x8_t __s0_277 = __p0_277; \
-  float16x4_t __s1_277 = __p1_277; \
-  float16x8_t __ret_277; \
-  __ret_277 = __s0_277 * splatq_lane_f16(__s1_277, __p2_277); \
-  __ret_277; \
-})
-#else
-#define vmulq_lane_f16(__p0_278, __p1_278, __p2_278) __extension__ ({ \
-  float16x8_t __s0_278 = __p0_278; \
-  float16x4_t __s1_278 = __p1_278; \
-  float16x8_t __rev0_278;  __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1_278;  __rev1_278 = __builtin_shufflevector(__s1_278, __s1_278, 3, 2, 1, 0); \
-  float16x8_t __ret_278; \
-  __ret_278 = __rev0_278 * __noswap_splatq_lane_f16(__rev1_278, __p2_278); \
-  __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_278; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f16(__p0_279, __p1_279, __p2_279) __extension__ ({ \
-  float16x4_t __s0_279 = __p0_279; \
-  float16x4_t __s1_279 = __p1_279; \
-  float16x4_t __ret_279; \
-  __ret_279 = __s0_279 * splat_lane_f16(__s1_279, __p2_279); \
-  __ret_279; \
-})
-#else
-#define vmul_lane_f16(__p0_280, __p1_280, __p2_280) __extension__ ({ \
-  float16x4_t __s0_280 = __p0_280; \
-  float16x4_t __s1_280 = __p1_280; \
-  float16x4_t __rev0_280;  __rev0_280 = __builtin_shufflevector(__s0_280, __s0_280, 3, 2, 1, 0); \
-  float16x4_t __rev1_280;  __rev1_280 = __builtin_shufflevector(__s1_280, __s1_280, 3, 2, 1, 0); \
-  float16x4_t __ret_280; \
-  __ret_280 = __rev0_280 * __noswap_splat_lane_f16(__rev1_280, __p2_280); \
-  __ret_280 = __builtin_shufflevector(__ret_280, __ret_280, 3, 2, 1, 0); \
-  __ret_280; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
-  __ret; \
-})
-#else
-#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
-  __ret; \
-})
-#else
-#define vmul_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vnegq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float16x8_t vnegq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vneg_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float16x4_t vneg_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrev64_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai float16x4_t vrev64_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
-  __ret; \
-})
-#else
-#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsh_lane_f16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
-  float16_t __s0_281 = __p0_281; \
-  float16_t __s1_281 = __p1_281; \
-  float16x4_t __s2_281 = __p2_281; \
-  float16_t __ret_281; \
-  __ret_281 = vfmah_lane_f16(__s0_281, -__s1_281, __s2_281, __p3_281); \
-  __ret_281; \
-})
-#else
-#define vfmsh_lane_f16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
-  float16_t __s0_282 = __p0_282; \
-  float16_t __s1_282 = __p1_282; \
-  float16x4_t __s2_282 = __p2_282; \
-  float16x4_t __rev2_282;  __rev2_282 = __builtin_shufflevector(__s2_282, __s2_282, 3, 2, 1, 0); \
-  float16_t __ret_282; \
-  __ret_282 = __noswap_vfmah_lane_f16(__s0_282, -__s1_282, __rev2_282, __p3_282); \
-  __ret_282; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f16(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
-  float16x8_t __s0_283 = __p0_283; \
-  float16x8_t __s1_283 = __p1_283; \
-  float16x4_t __s2_283 = __p2_283; \
-  float16x8_t __ret_283; \
-  __ret_283 = vfmaq_lane_f16(__s0_283, -__s1_283, __s2_283, __p3_283); \
-  __ret_283; \
-})
-#else
-#define vfmsq_lane_f16(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
-  float16x8_t __s0_284 = __p0_284; \
-  float16x8_t __s1_284 = __p1_284; \
-  float16x4_t __s2_284 = __p2_284; \
-  float16x8_t __rev0_284;  __rev0_284 = __builtin_shufflevector(__s0_284, __s0_284, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_284;  __rev1_284 = __builtin_shufflevector(__s1_284, __s1_284, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_284;  __rev2_284 = __builtin_shufflevector(__s2_284, __s2_284, 3, 2, 1, 0); \
-  float16x8_t __ret_284; \
-  __ret_284 = __noswap_vfmaq_lane_f16(__rev0_284, -__rev1_284, __rev2_284, __p3_284); \
-  __ret_284 = __builtin_shufflevector(__ret_284, __ret_284, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_284; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f16(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
-  float16x4_t __s0_285 = __p0_285; \
-  float16x4_t __s1_285 = __p1_285; \
-  float16x4_t __s2_285 = __p2_285; \
-  float16x4_t __ret_285; \
-  __ret_285 = vfma_lane_f16(__s0_285, -__s1_285, __s2_285, __p3_285); \
-  __ret_285; \
-})
-#else
-#define vfms_lane_f16(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
-  float16x4_t __s0_286 = __p0_286; \
-  float16x4_t __s1_286 = __p1_286; \
-  float16x4_t __s2_286 = __p2_286; \
-  float16x4_t __rev0_286;  __rev0_286 = __builtin_shufflevector(__s0_286, __s0_286, 3, 2, 1, 0); \
-  float16x4_t __rev1_286;  __rev1_286 = __builtin_shufflevector(__s1_286, __s1_286, 3, 2, 1, 0); \
-  float16x4_t __rev2_286;  __rev2_286 = __builtin_shufflevector(__s2_286, __s2_286, 3, 2, 1, 0); \
-  float16x4_t __ret_286; \
-  __ret_286 = __noswap_vfma_lane_f16(__rev0_286, -__rev1_286, __rev2_286, __p3_286); \
-  __ret_286 = __builtin_shufflevector(__ret_286, __ret_286, 3, 2, 1, 0); \
-  __ret_286; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsh_laneq_f16(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
-  float16_t __s0_287 = __p0_287; \
-  float16_t __s1_287 = __p1_287; \
-  float16x8_t __s2_287 = __p2_287; \
-  float16_t __ret_287; \
-  __ret_287 = vfmah_laneq_f16(__s0_287, -__s1_287, __s2_287, __p3_287); \
-  __ret_287; \
-})
-#else
-#define vfmsh_laneq_f16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
-  float16_t __s0_288 = __p0_288; \
-  float16_t __s1_288 = __p1_288; \
-  float16x8_t __s2_288 = __p2_288; \
-  float16x8_t __rev2_288;  __rev2_288 = __builtin_shufflevector(__s2_288, __s2_288, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_288; \
-  __ret_288 = __noswap_vfmah_laneq_f16(__s0_288, -__s1_288, __rev2_288, __p3_288); \
-  __ret_288; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
-  float16x8_t __s0_289 = __p0_289; \
-  float16x8_t __s1_289 = __p1_289; \
-  float16x8_t __s2_289 = __p2_289; \
-  float16x8_t __ret_289; \
-  __ret_289 = vfmaq_laneq_f16(__s0_289, -__s1_289, __s2_289, __p3_289); \
-  __ret_289; \
-})
-#else
-#define vfmsq_laneq_f16(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \
-  float16x8_t __s0_290 = __p0_290; \
-  float16x8_t __s1_290 = __p1_290; \
-  float16x8_t __s2_290 = __p2_290; \
-  float16x8_t __rev0_290;  __rev0_290 = __builtin_shufflevector(__s0_290, __s0_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_290;  __rev1_290 = __builtin_shufflevector(__s1_290, __s1_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_290;  __rev2_290 = __builtin_shufflevector(__s2_290, __s2_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_290; \
-  __ret_290 = __noswap_vfmaq_laneq_f16(__rev0_290, -__rev1_290, __rev2_290, __p3_290); \
-  __ret_290 = __builtin_shufflevector(__ret_290, __ret_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_290; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f16(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \
-  float16x4_t __s0_291 = __p0_291; \
-  float16x4_t __s1_291 = __p1_291; \
-  float16x8_t __s2_291 = __p2_291; \
-  float16x4_t __ret_291; \
-  __ret_291 = vfma_laneq_f16(__s0_291, -__s1_291, __s2_291, __p3_291); \
-  __ret_291; \
-})
-#else
-#define vfms_laneq_f16(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \
-  float16x4_t __s0_292 = __p0_292; \
-  float16x4_t __s1_292 = __p1_292; \
-  float16x8_t __s2_292 = __p2_292; \
-  float16x4_t __rev0_292;  __rev0_292 = __builtin_shufflevector(__s0_292, __s0_292, 3, 2, 1, 0); \
-  float16x4_t __rev1_292;  __rev1_292 = __builtin_shufflevector(__s1_292, __s1_292, 3, 2, 1, 0); \
-  float16x8_t __rev2_292;  __rev2_292 = __builtin_shufflevector(__s2_292, __s2_292, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_292; \
-  __ret_292 = __noswap_vfma_laneq_f16(__rev0_292, -__rev1_292, __rev2_292, __p3_292); \
-  __ret_292 = __builtin_shufflevector(__ret_292, __ret_292, 3, 2, 1, 0); \
-  __ret_292; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vminnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vminnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vminvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vminv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f16(__p0_293, __p1_293, __p2_293) __extension__ ({ \
-  float16x8_t __s0_293 = __p0_293; \
-  float16x8_t __s1_293 = __p1_293; \
-  float16x8_t __ret_293; \
-  __ret_293 = __s0_293 * splatq_laneq_f16(__s1_293, __p2_293); \
-  __ret_293; \
-})
-#else
-#define vmulq_laneq_f16(__p0_294, __p1_294, __p2_294) __extension__ ({ \
-  float16x8_t __s0_294 = __p0_294; \
-  float16x8_t __s1_294 = __p1_294; \
-  float16x8_t __rev0_294;  __rev0_294 = __builtin_shufflevector(__s0_294, __s0_294, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_294;  __rev1_294 = __builtin_shufflevector(__s1_294, __s1_294, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_294; \
-  __ret_294 = __rev0_294 * __noswap_splatq_laneq_f16(__rev1_294, __p2_294); \
-  __ret_294 = __builtin_shufflevector(__ret_294, __ret_294, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_294; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f16(__p0_295, __p1_295, __p2_295) __extension__ ({ \
-  float16x4_t __s0_295 = __p0_295; \
-  float16x8_t __s1_295 = __p1_295; \
-  float16x4_t __ret_295; \
-  __ret_295 = __s0_295 * splat_laneq_f16(__s1_295, __p2_295); \
-  __ret_295; \
-})
-#else
-#define vmul_laneq_f16(__p0_296, __p1_296, __p2_296) __extension__ ({ \
-  float16x4_t __s0_296 = __p0_296; \
-  float16x8_t __s1_296 = __p1_296; \
-  float16x4_t __rev0_296;  __rev0_296 = __builtin_shufflevector(__s0_296, __s0_296, 3, 2, 1, 0); \
-  float16x8_t __rev1_296;  __rev1_296 = __builtin_shufflevector(__s1_296, __s1_296, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_296; \
-  __ret_296 = __rev0_296 * __noswap_splat_laneq_f16(__rev1_296, __p2_296); \
-  __ret_296 = __builtin_shufflevector(__ret_296, __ret_296, 3, 2, 1, 0); \
-  __ret_296; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f16(__p0_297, __p1_297, __p2_297) __extension__ ({ \
-  float16x8_t __s0_297 = __p0_297; \
-  float16x4_t __s1_297 = __p1_297; \
-  float16x8_t __ret_297; \
-  __ret_297 = vmulxq_f16(__s0_297, splatq_lane_f16(__s1_297, __p2_297)); \
-  __ret_297; \
-})
-#else
-#define vmulxq_lane_f16(__p0_298, __p1_298, __p2_298) __extension__ ({ \
-  float16x8_t __s0_298 = __p0_298; \
-  float16x4_t __s1_298 = __p1_298; \
-  float16x8_t __rev0_298;  __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1_298;  __rev1_298 = __builtin_shufflevector(__s1_298, __s1_298, 3, 2, 1, 0); \
-  float16x8_t __ret_298; \
-  __ret_298 = __noswap_vmulxq_f16(__rev0_298, __noswap_splatq_lane_f16(__rev1_298, __p2_298)); \
-  __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_298; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f16(__p0_299, __p1_299, __p2_299) __extension__ ({ \
-  float16x4_t __s0_299 = __p0_299; \
-  float16x4_t __s1_299 = __p1_299; \
-  float16x4_t __ret_299; \
-  __ret_299 = vmulx_f16(__s0_299, splat_lane_f16(__s1_299, __p2_299)); \
-  __ret_299; \
-})
-#else
-#define vmulx_lane_f16(__p0_300, __p1_300, __p2_300) __extension__ ({ \
-  float16x4_t __s0_300 = __p0_300; \
-  float16x4_t __s1_300 = __p1_300; \
-  float16x4_t __rev0_300;  __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 3, 2, 1, 0); \
-  float16x4_t __rev1_300;  __rev1_300 = __builtin_shufflevector(__s1_300, __s1_300, 3, 2, 1, 0); \
-  float16x4_t __ret_300; \
-  __ret_300 = __noswap_vmulx_f16(__rev0_300, __noswap_splat_lane_f16(__rev1_300, __p2_300)); \
-  __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 3, 2, 1, 0); \
-  __ret_300; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f16(__p0_301, __p1_301, __p2_301) __extension__ ({ \
-  float16x8_t __s0_301 = __p0_301; \
-  float16x8_t __s1_301 = __p1_301; \
-  float16x8_t __ret_301; \
-  __ret_301 = vmulxq_f16(__s0_301, splatq_laneq_f16(__s1_301, __p2_301)); \
-  __ret_301; \
-})
-#else
-#define vmulxq_laneq_f16(__p0_302, __p1_302, __p2_302) __extension__ ({ \
-  float16x8_t __s0_302 = __p0_302; \
-  float16x8_t __s1_302 = __p1_302; \
-  float16x8_t __rev0_302;  __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_302;  __rev1_302 = __builtin_shufflevector(__s1_302, __s1_302, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_302; \
-  __ret_302 = __noswap_vmulxq_f16(__rev0_302, __noswap_splatq_laneq_f16(__rev1_302, __p2_302)); \
-  __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_302; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f16(__p0_303, __p1_303, __p2_303) __extension__ ({ \
-  float16x4_t __s0_303 = __p0_303; \
-  float16x8_t __s1_303 = __p1_303; \
-  float16x4_t __ret_303; \
-  __ret_303 = vmulx_f16(__s0_303, splat_laneq_f16(__s1_303, __p2_303)); \
-  __ret_303; \
-})
-#else
-#define vmulx_laneq_f16(__p0_304, __p1_304, __p2_304) __extension__ ({ \
-  float16x4_t __s0_304 = __p0_304; \
-  float16x8_t __s1_304 = __p1_304; \
-  float16x4_t __rev0_304;  __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 3, 2, 1, 0); \
-  float16x8_t __rev1_304;  __rev1_304 = __builtin_shufflevector(__s1_304, __s1_304, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_304; \
-  __ret_304 = __noswap_vmulx_f16(__rev0_304, __noswap_splat_laneq_f16(__rev1_304, __p2_304)); \
-  __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 3, 2, 1, 0); \
-  __ret_304; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
-  __ret; \
-})
-#else
-#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
-  __ret; \
-})
-#else
-#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndi_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndi_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_MATMUL_INT8)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdotq_lane_s32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \
-  int32x4_t __s0_305 = __p0_305; \
-  uint8x16_t __s1_305 = __p1_305; \
-  int8x8_t __s2_305 = __p2_305; \
-  int32x4_t __ret_305; \
-int8x8_t __reint_305 = __s2_305; \
-  __ret_305 = vusdotq_s32(__s0_305, __s1_305, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_305, __p3_305))); \
-  __ret_305; \
-})
-#else
-#define vusdotq_lane_s32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \
-  int32x4_t __s0_306 = __p0_306; \
-  uint8x16_t __s1_306 = __p1_306; \
-  int8x8_t __s2_306 = __p2_306; \
-  int32x4_t __rev0_306;  __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \
-  uint8x16_t __rev1_306;  __rev1_306 = __builtin_shufflevector(__s1_306, __s1_306, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_306;  __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_306; \
-int8x8_t __reint_306 = __rev2_306; \
-  __ret_306 = __noswap_vusdotq_s32(__rev0_306, __rev1_306, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_306, __p3_306))); \
-  __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \
-  __ret_306; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdot_lane_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \
-  int32x2_t __s0_307 = __p0_307; \
-  uint8x8_t __s1_307 = __p1_307; \
-  int8x8_t __s2_307 = __p2_307; \
-  int32x2_t __ret_307; \
-int8x8_t __reint_307 = __s2_307; \
-  __ret_307 = vusdot_s32(__s0_307, __s1_307, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_307, __p3_307))); \
-  __ret_307; \
-})
-#else
-#define vusdot_lane_s32(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \
-  int32x2_t __s0_308 = __p0_308; \
-  uint8x8_t __s1_308 = __p1_308; \
-  int8x8_t __s2_308 = __p2_308; \
-  int32x2_t __rev0_308;  __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 1, 0); \
-  uint8x8_t __rev1_308;  __rev1_308 = __builtin_shufflevector(__s1_308, __s1_308, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_308;  __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_308; \
-int8x8_t __reint_308 = __rev2_308; \
-  __ret_308 = __noswap_vusdot_s32(__rev0_308, __rev1_308, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_308, __p3_308))); \
-  __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 1, 0); \
-  __ret_308; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX)
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s32(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \
-  int32x4_t __s0_309 = __p0_309; \
-  int32x4_t __s1_309 = __p1_309; \
-  int32x2_t __s2_309 = __p2_309; \
-  int32x4_t __ret_309; \
-  __ret_309 = vqaddq_s32(__s0_309, vqrdmulhq_s32(__s1_309, splatq_lane_s32(__s2_309, __p3_309))); \
-  __ret_309; \
-})
-#else
-#define vqrdmlahq_lane_s32(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \
-  int32x4_t __s0_310 = __p0_310; \
-  int32x4_t __s1_310 = __p1_310; \
-  int32x2_t __s2_310 = __p2_310; \
-  int32x4_t __rev0_310;  __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 3, 2, 1, 0); \
-  int32x4_t __rev1_310;  __rev1_310 = __builtin_shufflevector(__s1_310, __s1_310, 3, 2, 1, 0); \
-  int32x2_t __rev2_310;  __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 1, 0); \
-  int32x4_t __ret_310; \
-  __ret_310 = __noswap_vqaddq_s32(__rev0_310, __noswap_vqrdmulhq_s32(__rev1_310, __noswap_splatq_lane_s32(__rev2_310, __p3_310))); \
-  __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 3, 2, 1, 0); \
-  __ret_310; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s16(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \
-  int16x8_t __s0_311 = __p0_311; \
-  int16x8_t __s1_311 = __p1_311; \
-  int16x4_t __s2_311 = __p2_311; \
-  int16x8_t __ret_311; \
-  __ret_311 = vqaddq_s16(__s0_311, vqrdmulhq_s16(__s1_311, splatq_lane_s16(__s2_311, __p3_311))); \
-  __ret_311; \
-})
-#else
-#define vqrdmlahq_lane_s16(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \
-  int16x8_t __s0_312 = __p0_312; \
-  int16x8_t __s1_312 = __p1_312; \
-  int16x4_t __s2_312 = __p2_312; \
-  int16x8_t __rev0_312;  __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_312;  __rev1_312 = __builtin_shufflevector(__s1_312, __s1_312, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_312;  __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 3, 2, 1, 0); \
-  int16x8_t __ret_312; \
-  __ret_312 = __noswap_vqaddq_s16(__rev0_312, __noswap_vqrdmulhq_s16(__rev1_312, __noswap_splatq_lane_s16(__rev2_312, __p3_312))); \
-  __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_312; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \
-  int32x2_t __s0_313 = __p0_313; \
-  int32x2_t __s1_313 = __p1_313; \
-  int32x2_t __s2_313 = __p2_313; \
-  int32x2_t __ret_313; \
-  __ret_313 = vqadd_s32(__s0_313, vqrdmulh_s32(__s1_313, splat_lane_s32(__s2_313, __p3_313))); \
-  __ret_313; \
-})
-#else
-#define vqrdmlah_lane_s32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \
-  int32x2_t __s0_314 = __p0_314; \
-  int32x2_t __s1_314 = __p1_314; \
-  int32x2_t __s2_314 = __p2_314; \
-  int32x2_t __rev0_314;  __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 1, 0); \
-  int32x2_t __rev1_314;  __rev1_314 = __builtin_shufflevector(__s1_314, __s1_314, 1, 0); \
-  int32x2_t __rev2_314;  __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 1, 0); \
-  int32x2_t __ret_314; \
-  __ret_314 = __noswap_vqadd_s32(__rev0_314, __noswap_vqrdmulh_s32(__rev1_314, __noswap_splat_lane_s32(__rev2_314, __p3_314))); \
-  __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 1, 0); \
-  __ret_314; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s16(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \
-  int16x4_t __s0_315 = __p0_315; \
-  int16x4_t __s1_315 = __p1_315; \
-  int16x4_t __s2_315 = __p2_315; \
-  int16x4_t __ret_315; \
-  __ret_315 = vqadd_s16(__s0_315, vqrdmulh_s16(__s1_315, splat_lane_s16(__s2_315, __p3_315))); \
-  __ret_315; \
-})
-#else
-#define vqrdmlah_lane_s16(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \
-  int16x4_t __s0_316 = __p0_316; \
-  int16x4_t __s1_316 = __p1_316; \
-  int16x4_t __s2_316 = __p2_316; \
-  int16x4_t __rev0_316;  __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \
-  int16x4_t __rev1_316;  __rev1_316 = __builtin_shufflevector(__s1_316, __s1_316, 3, 2, 1, 0); \
-  int16x4_t __rev2_316;  __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 3, 2, 1, 0); \
-  int16x4_t __ret_316; \
-  __ret_316 = __noswap_vqadd_s16(__rev0_316, __noswap_vqrdmulh_s16(__rev1_316, __noswap_splat_lane_s16(__rev2_316, __p3_316))); \
-  __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \
-  __ret_316; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s32(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \
-  int32x4_t __s0_317 = __p0_317; \
-  int32x4_t __s1_317 = __p1_317; \
-  int32x2_t __s2_317 = __p2_317; \
-  int32x4_t __ret_317; \
-  __ret_317 = vqsubq_s32(__s0_317, vqrdmulhq_s32(__s1_317, splatq_lane_s32(__s2_317, __p3_317))); \
-  __ret_317; \
-})
-#else
-#define vqrdmlshq_lane_s32(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \
-  int32x4_t __s0_318 = __p0_318; \
-  int32x4_t __s1_318 = __p1_318; \
-  int32x2_t __s2_318 = __p2_318; \
-  int32x4_t __rev0_318;  __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 3, 2, 1, 0); \
-  int32x4_t __rev1_318;  __rev1_318 = __builtin_shufflevector(__s1_318, __s1_318, 3, 2, 1, 0); \
-  int32x2_t __rev2_318;  __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 1, 0); \
-  int32x4_t __ret_318; \
-  __ret_318 = __noswap_vqsubq_s32(__rev0_318, __noswap_vqrdmulhq_s32(__rev1_318, __noswap_splatq_lane_s32(__rev2_318, __p3_318))); \
-  __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 3, 2, 1, 0); \
-  __ret_318; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \
-  int16x8_t __s0_319 = __p0_319; \
-  int16x8_t __s1_319 = __p1_319; \
-  int16x4_t __s2_319 = __p2_319; \
-  int16x8_t __ret_319; \
-  __ret_319 = vqsubq_s16(__s0_319, vqrdmulhq_s16(__s1_319, splatq_lane_s16(__s2_319, __p3_319))); \
-  __ret_319; \
-})
-#else
-#define vqrdmlshq_lane_s16(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \
-  int16x8_t __s0_320 = __p0_320; \
-  int16x8_t __s1_320 = __p1_320; \
-  int16x4_t __s2_320 = __p2_320; \
-  int16x8_t __rev0_320;  __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_320;  __rev1_320 = __builtin_shufflevector(__s1_320, __s1_320, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_320;  __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 3, 2, 1, 0); \
-  int16x8_t __ret_320; \
-  __ret_320 = __noswap_vqsubq_s16(__rev0_320, __noswap_vqrdmulhq_s16(__rev1_320, __noswap_splatq_lane_s16(__rev2_320, __p3_320))); \
-  __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_320; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s32(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \
-  int32x2_t __s0_321 = __p0_321; \
-  int32x2_t __s1_321 = __p1_321; \
-  int32x2_t __s2_321 = __p2_321; \
-  int32x2_t __ret_321; \
-  __ret_321 = vqsub_s32(__s0_321, vqrdmulh_s32(__s1_321, splat_lane_s32(__s2_321, __p3_321))); \
-  __ret_321; \
-})
-#else
-#define vqrdmlsh_lane_s32(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \
-  int32x2_t __s0_322 = __p0_322; \
-  int32x2_t __s1_322 = __p1_322; \
-  int32x2_t __s2_322 = __p2_322; \
-  int32x2_t __rev0_322;  __rev0_322 = __builtin_shufflevector(__s0_322, __s0_322, 1, 0); \
-  int32x2_t __rev1_322;  __rev1_322 = __builtin_shufflevector(__s1_322, __s1_322, 1, 0); \
-  int32x2_t __rev2_322;  __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 1, 0); \
-  int32x2_t __ret_322; \
-  __ret_322 = __noswap_vqsub_s32(__rev0_322, __noswap_vqrdmulh_s32(__rev1_322, __noswap_splat_lane_s32(__rev2_322, __p3_322))); \
-  __ret_322 = __builtin_shufflevector(__ret_322, __ret_322, 1, 0); \
-  __ret_322; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \
-  int16x4_t __s0_323 = __p0_323; \
-  int16x4_t __s1_323 = __p1_323; \
-  int16x4_t __s2_323 = __p2_323; \
-  int16x4_t __ret_323; \
-  __ret_323 = vqsub_s16(__s0_323, vqrdmulh_s16(__s1_323, splat_lane_s16(__s2_323, __p3_323))); \
-  __ret_323; \
-})
-#else
-#define vqrdmlsh_lane_s16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \
-  int16x4_t __s0_324 = __p0_324; \
-  int16x4_t __s1_324 = __p1_324; \
-  int16x4_t __s2_324 = __p2_324; \
-  int16x4_t __rev0_324;  __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \
-  int16x4_t __rev1_324;  __rev1_324 = __builtin_shufflevector(__s1_324, __s1_324, 3, 2, 1, 0); \
-  int16x4_t __rev2_324;  __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 3, 2, 1, 0); \
-  int16x4_t __ret_324; \
-  __ret_324 = __noswap_vqsub_s16(__rev0_324, __noswap_vqrdmulh_s16(__rev1_324, __noswap_splat_lane_s16(__rev2_324, __p3_324))); \
-  __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \
-  __ret_324; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s32(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \
-  int32x4_t __s0_325 = __p0_325; \
-  int32x4_t __s1_325 = __p1_325; \
-  int32x4_t __s2_325 = __p2_325; \
-  int32x4_t __ret_325; \
-  __ret_325 = vqaddq_s32(__s0_325, vqrdmulhq_s32(__s1_325, splatq_laneq_s32(__s2_325, __p3_325))); \
-  __ret_325; \
-})
-#else
-#define vqrdmlahq_laneq_s32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \
-  int32x4_t __s0_326 = __p0_326; \
-  int32x4_t __s1_326 = __p1_326; \
-  int32x4_t __s2_326 = __p2_326; \
-  int32x4_t __rev0_326;  __rev0_326 = __builtin_shufflevector(__s0_326, __s0_326, 3, 2, 1, 0); \
-  int32x4_t __rev1_326;  __rev1_326 = __builtin_shufflevector(__s1_326, __s1_326, 3, 2, 1, 0); \
-  int32x4_t __rev2_326;  __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 3, 2, 1, 0); \
-  int32x4_t __ret_326; \
-  __ret_326 = __noswap_vqaddq_s32(__rev0_326, __noswap_vqrdmulhq_s32(__rev1_326, __noswap_splatq_laneq_s32(__rev2_326, __p3_326))); \
-  __ret_326 = __builtin_shufflevector(__ret_326, __ret_326, 3, 2, 1, 0); \
-  __ret_326; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s16(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \
-  int16x8_t __s0_327 = __p0_327; \
-  int16x8_t __s1_327 = __p1_327; \
-  int16x8_t __s2_327 = __p2_327; \
-  int16x8_t __ret_327; \
-  __ret_327 = vqaddq_s16(__s0_327, vqrdmulhq_s16(__s1_327, splatq_laneq_s16(__s2_327, __p3_327))); \
-  __ret_327; \
-})
-#else
-#define vqrdmlahq_laneq_s16(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \
-  int16x8_t __s0_328 = __p0_328; \
-  int16x8_t __s1_328 = __p1_328; \
-  int16x8_t __s2_328 = __p2_328; \
-  int16x8_t __rev0_328;  __rev0_328 = __builtin_shufflevector(__s0_328, __s0_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_328;  __rev1_328 = __builtin_shufflevector(__s1_328, __s1_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_328;  __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_328; \
-  __ret_328 = __noswap_vqaddq_s16(__rev0_328, __noswap_vqrdmulhq_s16(__rev1_328, __noswap_splatq_laneq_s16(__rev2_328, __p3_328))); \
-  __ret_328 = __builtin_shufflevector(__ret_328, __ret_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_328; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \
-  int32x2_t __s0_329 = __p0_329; \
-  int32x2_t __s1_329 = __p1_329; \
-  int32x4_t __s2_329 = __p2_329; \
-  int32x2_t __ret_329; \
-  __ret_329 = vqadd_s32(__s0_329, vqrdmulh_s32(__s1_329, splat_laneq_s32(__s2_329, __p3_329))); \
-  __ret_329; \
-})
-#else
-#define vqrdmlah_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \
-  int32x2_t __s0_330 = __p0_330; \
-  int32x2_t __s1_330 = __p1_330; \
-  int32x4_t __s2_330 = __p2_330; \
-  int32x2_t __rev0_330;  __rev0_330 = __builtin_shufflevector(__s0_330, __s0_330, 1, 0); \
-  int32x2_t __rev1_330;  __rev1_330 = __builtin_shufflevector(__s1_330, __s1_330, 1, 0); \
-  int32x4_t __rev2_330;  __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 3, 2, 1, 0); \
-  int32x2_t __ret_330; \
-  __ret_330 = __noswap_vqadd_s32(__rev0_330, __noswap_vqrdmulh_s32(__rev1_330, __noswap_splat_laneq_s32(__rev2_330, __p3_330))); \
-  __ret_330 = __builtin_shufflevector(__ret_330, __ret_330, 1, 0); \
-  __ret_330; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \
-  int16x4_t __s0_331 = __p0_331; \
-  int16x4_t __s1_331 = __p1_331; \
-  int16x8_t __s2_331 = __p2_331; \
-  int16x4_t __ret_331; \
-  __ret_331 = vqadd_s16(__s0_331, vqrdmulh_s16(__s1_331, splat_laneq_s16(__s2_331, __p3_331))); \
-  __ret_331; \
-})
-#else
-#define vqrdmlah_laneq_s16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \
-  int16x4_t __s0_332 = __p0_332; \
-  int16x4_t __s1_332 = __p1_332; \
-  int16x8_t __s2_332 = __p2_332; \
-  int16x4_t __rev0_332;  __rev0_332 = __builtin_shufflevector(__s0_332, __s0_332, 3, 2, 1, 0); \
-  int16x4_t __rev1_332;  __rev1_332 = __builtin_shufflevector(__s1_332, __s1_332, 3, 2, 1, 0); \
-  int16x8_t __rev2_332;  __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_332; \
-  __ret_332 = __noswap_vqadd_s16(__rev0_332, __noswap_vqrdmulh_s16(__rev1_332, __noswap_splat_laneq_s16(__rev2_332, __p3_332))); \
-  __ret_332 = __builtin_shufflevector(__ret_332, __ret_332, 3, 2, 1, 0); \
-  __ret_332; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s32(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \
-  int32x4_t __s0_333 = __p0_333; \
-  int32x4_t __s1_333 = __p1_333; \
-  int32x4_t __s2_333 = __p2_333; \
-  int32x4_t __ret_333; \
-  __ret_333 = vqsubq_s32(__s0_333, vqrdmulhq_s32(__s1_333, splatq_laneq_s32(__s2_333, __p3_333))); \
-  __ret_333; \
-})
-#else
-#define vqrdmlshq_laneq_s32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \
-  int32x4_t __s0_334 = __p0_334; \
-  int32x4_t __s1_334 = __p1_334; \
-  int32x4_t __s2_334 = __p2_334; \
-  int32x4_t __rev0_334;  __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 3, 2, 1, 0); \
-  int32x4_t __rev1_334;  __rev1_334 = __builtin_shufflevector(__s1_334, __s1_334, 3, 2, 1, 0); \
-  int32x4_t __rev2_334;  __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 3, 2, 1, 0); \
-  int32x4_t __ret_334; \
-  __ret_334 = __noswap_vqsubq_s32(__rev0_334, __noswap_vqrdmulhq_s32(__rev1_334, __noswap_splatq_laneq_s32(__rev2_334, __p3_334))); \
-  __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 3, 2, 1, 0); \
-  __ret_334; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s16(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \
-  int16x8_t __s0_335 = __p0_335; \
-  int16x8_t __s1_335 = __p1_335; \
-  int16x8_t __s2_335 = __p2_335; \
-  int16x8_t __ret_335; \
-  __ret_335 = vqsubq_s16(__s0_335, vqrdmulhq_s16(__s1_335, splatq_laneq_s16(__s2_335, __p3_335))); \
-  __ret_335; \
-})
-#else
-#define vqrdmlshq_laneq_s16(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \
-  int16x8_t __s0_336 = __p0_336; \
-  int16x8_t __s1_336 = __p1_336; \
-  int16x8_t __s2_336 = __p2_336; \
-  int16x8_t __rev0_336;  __rev0_336 = __builtin_shufflevector(__s0_336, __s0_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_336;  __rev1_336 = __builtin_shufflevector(__s1_336, __s1_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_336;  __rev2_336 = __builtin_shufflevector(__s2_336, __s2_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_336; \
-  __ret_336 = __noswap_vqsubq_s16(__rev0_336, __noswap_vqrdmulhq_s16(__rev1_336, __noswap_splatq_laneq_s16(__rev2_336, __p3_336))); \
-  __ret_336 = __builtin_shufflevector(__ret_336, __ret_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_336; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \
-  int32x2_t __s0_337 = __p0_337; \
-  int32x2_t __s1_337 = __p1_337; \
-  int32x4_t __s2_337 = __p2_337; \
-  int32x2_t __ret_337; \
-  __ret_337 = vqsub_s32(__s0_337, vqrdmulh_s32(__s1_337, splat_laneq_s32(__s2_337, __p3_337))); \
-  __ret_337; \
-})
-#else
-#define vqrdmlsh_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \
-  int32x2_t __s0_338 = __p0_338; \
-  int32x2_t __s1_338 = __p1_338; \
-  int32x4_t __s2_338 = __p2_338; \
-  int32x2_t __rev0_338;  __rev0_338 = __builtin_shufflevector(__s0_338, __s0_338, 1, 0); \
-  int32x2_t __rev1_338;  __rev1_338 = __builtin_shufflevector(__s1_338, __s1_338, 1, 0); \
-  int32x4_t __rev2_338;  __rev2_338 = __builtin_shufflevector(__s2_338, __s2_338, 3, 2, 1, 0); \
-  int32x2_t __ret_338; \
-  __ret_338 = __noswap_vqsub_s32(__rev0_338, __noswap_vqrdmulh_s32(__rev1_338, __noswap_splat_laneq_s32(__rev2_338, __p3_338))); \
-  __ret_338 = __builtin_shufflevector(__ret_338, __ret_338, 1, 0); \
-  __ret_338; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s16(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \
-  int16x4_t __s0_339 = __p0_339; \
-  int16x4_t __s1_339 = __p1_339; \
-  int16x8_t __s2_339 = __p2_339; \
-  int16x4_t __ret_339; \
-  __ret_339 = vqsub_s16(__s0_339, vqrdmulh_s16(__s1_339, splat_laneq_s16(__s2_339, __p3_339))); \
-  __ret_339; \
-})
-#else
-#define vqrdmlsh_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \
-  int16x4_t __s0_340 = __p0_340; \
-  int16x4_t __s1_340 = __p1_340; \
-  int16x8_t __s2_340 = __p2_340; \
-  int16x4_t __rev0_340;  __rev0_340 = __builtin_shufflevector(__s0_340, __s0_340, 3, 2, 1, 0); \
-  int16x4_t __rev1_340;  __rev1_340 = __builtin_shufflevector(__s1_340, __s1_340, 3, 2, 1, 0); \
-  int16x8_t __rev2_340;  __rev2_340 = __builtin_shufflevector(__s2_340, __s2_340, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_340; \
-  __ret_340 = __noswap_vqsub_s16(__rev0_340, __noswap_vqrdmulh_s16(__rev1_340, __noswap_splat_laneq_s16(__rev2_340, __p3_340))); \
-  __ret_340 = __builtin_shufflevector(__ret_340, __ret_340, 3, 2, 1, 0); \
-  __ret_340; \
-})
-#endif
-
-#endif
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vabsq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vabsq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabsq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vabsq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vabs_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai int64x1_t vabs_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int64_t vabsd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai poly128_t vaddq_p128(poly128_t __p0, poly128_t __p1) {
-  poly128_t __ret;
-  __ret = (poly128_t) __builtin_neon_vaddq_p128(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddlvq_s8(int8x16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddlvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddlvq_s32(int32x4_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddlvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddlvq_s16(int16x8_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddlvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddlv_s8(int8x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddlv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddlv_s32(int32x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddlv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddlv_s16(int16x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddlv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vaddvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vaddvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vaddvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vaddvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vaddvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vaddvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddvq_s64(int64x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddvq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vaddv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vaddv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vaddv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vaddv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vaddv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vaddv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38);
-  return __ret;
-}
-#else
-__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vceqzd_u64(uint64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
-  return __ret;
-}
-__ai uint64_t vceqzd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vceqzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vceqzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64_t vcged_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgez_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgez_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vcgezd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgezd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcgezd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcgezs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vcgtzd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcgtzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcgtzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcled_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclez_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclez_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclez_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclez_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclez_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclez_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclez_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclez_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclez_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclez_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vclezd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vclezd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vclezd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vclezs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcltz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcltz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vcltzd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcltzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcltzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p8(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \
-  poly8x16_t __s0_341 = __p0_341; \
-  poly8x8_t __s2_341 = __p2_341; \
-  poly8x16_t __ret_341; \
-  __ret_341 = vsetq_lane_p8(vget_lane_p8(__s2_341, __p3_341), __s0_341, __p1_341); \
-  __ret_341; \
-})
-#else
-#define vcopyq_lane_p8(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \
-  poly8x16_t __s0_342 = __p0_342; \
-  poly8x8_t __s2_342 = __p2_342; \
-  poly8x16_t __rev0_342;  __rev0_342 = __builtin_shufflevector(__s0_342, __s0_342, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_342;  __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_342; \
-  __ret_342 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_342, __p3_342), __rev0_342, __p1_342); \
-  __ret_342 = __builtin_shufflevector(__ret_342, __ret_342, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_342; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p16(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \
-  poly16x8_t __s0_343 = __p0_343; \
-  poly16x4_t __s2_343 = __p2_343; \
-  poly16x8_t __ret_343; \
-  __ret_343 = vsetq_lane_p16(vget_lane_p16(__s2_343, __p3_343), __s0_343, __p1_343); \
-  __ret_343; \
-})
-#else
-#define vcopyq_lane_p16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \
-  poly16x8_t __s0_344 = __p0_344; \
-  poly16x4_t __s2_344 = __p2_344; \
-  poly16x8_t __rev0_344;  __rev0_344 = __builtin_shufflevector(__s0_344, __s0_344, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __rev2_344;  __rev2_344 = __builtin_shufflevector(__s2_344, __s2_344, 3, 2, 1, 0); \
-  poly16x8_t __ret_344; \
-  __ret_344 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_344, __p3_344), __rev0_344, __p1_344); \
-  __ret_344 = __builtin_shufflevector(__ret_344, __ret_344, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_344; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u8(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \
-  uint8x16_t __s0_345 = __p0_345; \
-  uint8x8_t __s2_345 = __p2_345; \
-  uint8x16_t __ret_345; \
-  __ret_345 = vsetq_lane_u8(vget_lane_u8(__s2_345, __p3_345), __s0_345, __p1_345); \
-  __ret_345; \
-})
-#else
-#define vcopyq_lane_u8(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \
-  uint8x16_t __s0_346 = __p0_346; \
-  uint8x8_t __s2_346 = __p2_346; \
-  uint8x16_t __rev0_346;  __rev0_346 = __builtin_shufflevector(__s0_346, __s0_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_346;  __rev2_346 = __builtin_shufflevector(__s2_346, __s2_346, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_346; \
-  __ret_346 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_346, __p3_346), __rev0_346, __p1_346); \
-  __ret_346 = __builtin_shufflevector(__ret_346, __ret_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_346; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u32(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \
-  uint32x4_t __s0_347 = __p0_347; \
-  uint32x2_t __s2_347 = __p2_347; \
-  uint32x4_t __ret_347; \
-  __ret_347 = vsetq_lane_u32(vget_lane_u32(__s2_347, __p3_347), __s0_347, __p1_347); \
-  __ret_347; \
-})
-#else
-#define vcopyq_lane_u32(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \
-  uint32x4_t __s0_348 = __p0_348; \
-  uint32x2_t __s2_348 = __p2_348; \
-  uint32x4_t __rev0_348;  __rev0_348 = __builtin_shufflevector(__s0_348, __s0_348, 3, 2, 1, 0); \
-  uint32x2_t __rev2_348;  __rev2_348 = __builtin_shufflevector(__s2_348, __s2_348, 1, 0); \
-  uint32x4_t __ret_348; \
-  __ret_348 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_348, __p3_348), __rev0_348, __p1_348); \
-  __ret_348 = __builtin_shufflevector(__ret_348, __ret_348, 3, 2, 1, 0); \
-  __ret_348; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u64(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \
-  uint64x2_t __s0_349 = __p0_349; \
-  uint64x1_t __s2_349 = __p2_349; \
-  uint64x2_t __ret_349; \
-  __ret_349 = vsetq_lane_u64(vget_lane_u64(__s2_349, __p3_349), __s0_349, __p1_349); \
-  __ret_349; \
-})
-#else
-#define vcopyq_lane_u64(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \
-  uint64x2_t __s0_350 = __p0_350; \
-  uint64x1_t __s2_350 = __p2_350; \
-  uint64x2_t __rev0_350;  __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 1, 0); \
-  uint64x2_t __ret_350; \
-  __ret_350 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_350, __p3_350), __rev0_350, __p1_350); \
-  __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 1, 0); \
-  __ret_350; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u16(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \
-  uint16x8_t __s0_351 = __p0_351; \
-  uint16x4_t __s2_351 = __p2_351; \
-  uint16x8_t __ret_351; \
-  __ret_351 = vsetq_lane_u16(vget_lane_u16(__s2_351, __p3_351), __s0_351, __p1_351); \
-  __ret_351; \
-})
-#else
-#define vcopyq_lane_u16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \
-  uint16x8_t __s0_352 = __p0_352; \
-  uint16x4_t __s2_352 = __p2_352; \
-  uint16x8_t __rev0_352;  __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_352;  __rev2_352 = __builtin_shufflevector(__s2_352, __s2_352, 3, 2, 1, 0); \
-  uint16x8_t __ret_352; \
-  __ret_352 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_352, __p3_352), __rev0_352, __p1_352); \
-  __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_352; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s8(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \
-  int8x16_t __s0_353 = __p0_353; \
-  int8x8_t __s2_353 = __p2_353; \
-  int8x16_t __ret_353; \
-  __ret_353 = vsetq_lane_s8(vget_lane_s8(__s2_353, __p3_353), __s0_353, __p1_353); \
-  __ret_353; \
-})
-#else
-#define vcopyq_lane_s8(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \
-  int8x16_t __s0_354 = __p0_354; \
-  int8x8_t __s2_354 = __p2_354; \
-  int8x16_t __rev0_354;  __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_354;  __rev2_354 = __builtin_shufflevector(__s2_354, __s2_354, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_354; \
-  __ret_354 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_354, __p3_354), __rev0_354, __p1_354); \
-  __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_354; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f32(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \
-  float32x4_t __s0_355 = __p0_355; \
-  float32x2_t __s2_355 = __p2_355; \
-  float32x4_t __ret_355; \
-  __ret_355 = vsetq_lane_f32(vget_lane_f32(__s2_355, __p3_355), __s0_355, __p1_355); \
-  __ret_355; \
-})
-#else
-#define vcopyq_lane_f32(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \
-  float32x4_t __s0_356 = __p0_356; \
-  float32x2_t __s2_356 = __p2_356; \
-  float32x4_t __rev0_356;  __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 3, 2, 1, 0); \
-  float32x2_t __rev2_356;  __rev2_356 = __builtin_shufflevector(__s2_356, __s2_356, 1, 0); \
-  float32x4_t __ret_356; \
-  __ret_356 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_356, __p3_356), __rev0_356, __p1_356); \
-  __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 3, 2, 1, 0); \
-  __ret_356; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s32(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \
-  int32x4_t __s0_357 = __p0_357; \
-  int32x2_t __s2_357 = __p2_357; \
-  int32x4_t __ret_357; \
-  __ret_357 = vsetq_lane_s32(vget_lane_s32(__s2_357, __p3_357), __s0_357, __p1_357); \
-  __ret_357; \
-})
-#else
-#define vcopyq_lane_s32(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \
-  int32x4_t __s0_358 = __p0_358; \
-  int32x2_t __s2_358 = __p2_358; \
-  int32x4_t __rev0_358;  __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 3, 2, 1, 0); \
-  int32x2_t __rev2_358;  __rev2_358 = __builtin_shufflevector(__s2_358, __s2_358, 1, 0); \
-  int32x4_t __ret_358; \
-  __ret_358 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_358, __p3_358), __rev0_358, __p1_358); \
-  __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 3, 2, 1, 0); \
-  __ret_358; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s64(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \
-  int64x2_t __s0_359 = __p0_359; \
-  int64x1_t __s2_359 = __p2_359; \
-  int64x2_t __ret_359; \
-  __ret_359 = vsetq_lane_s64(vget_lane_s64(__s2_359, __p3_359), __s0_359, __p1_359); \
-  __ret_359; \
-})
-#else
-#define vcopyq_lane_s64(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \
-  int64x2_t __s0_360 = __p0_360; \
-  int64x1_t __s2_360 = __p2_360; \
-  int64x2_t __rev0_360;  __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 1, 0); \
-  int64x2_t __ret_360; \
-  __ret_360 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_360, __p3_360), __rev0_360, __p1_360); \
-  __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 1, 0); \
-  __ret_360; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s16(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \
-  int16x8_t __s0_361 = __p0_361; \
-  int16x4_t __s2_361 = __p2_361; \
-  int16x8_t __ret_361; \
-  __ret_361 = vsetq_lane_s16(vget_lane_s16(__s2_361, __p3_361), __s0_361, __p1_361); \
-  __ret_361; \
-})
-#else
-#define vcopyq_lane_s16(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \
-  int16x8_t __s0_362 = __p0_362; \
-  int16x4_t __s2_362 = __p2_362; \
-  int16x8_t __rev0_362;  __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_362;  __rev2_362 = __builtin_shufflevector(__s2_362, __s2_362, 3, 2, 1, 0); \
-  int16x8_t __ret_362; \
-  __ret_362 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_362, __p3_362), __rev0_362, __p1_362); \
-  __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_362; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p8(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \
-  poly8x8_t __s0_363 = __p0_363; \
-  poly8x8_t __s2_363 = __p2_363; \
-  poly8x8_t __ret_363; \
-  __ret_363 = vset_lane_p8(vget_lane_p8(__s2_363, __p3_363), __s0_363, __p1_363); \
-  __ret_363; \
-})
-#else
-#define vcopy_lane_p8(__p0_364, __p1_364, __p2_364, __p3_364) __extension__ ({ \
-  poly8x8_t __s0_364 = __p0_364; \
-  poly8x8_t __s2_364 = __p2_364; \
-  poly8x8_t __rev0_364;  __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_364;  __rev2_364 = __builtin_shufflevector(__s2_364, __s2_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_364; \
-  __ret_364 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_364, __p3_364), __rev0_364, __p1_364); \
-  __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_364; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p16(__p0_365, __p1_365, __p2_365, __p3_365) __extension__ ({ \
-  poly16x4_t __s0_365 = __p0_365; \
-  poly16x4_t __s2_365 = __p2_365; \
-  poly16x4_t __ret_365; \
-  __ret_365 = vset_lane_p16(vget_lane_p16(__s2_365, __p3_365), __s0_365, __p1_365); \
-  __ret_365; \
-})
-#else
-#define vcopy_lane_p16(__p0_366, __p1_366, __p2_366, __p3_366) __extension__ ({ \
-  poly16x4_t __s0_366 = __p0_366; \
-  poly16x4_t __s2_366 = __p2_366; \
-  poly16x4_t __rev0_366;  __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 3, 2, 1, 0); \
-  poly16x4_t __rev2_366;  __rev2_366 = __builtin_shufflevector(__s2_366, __s2_366, 3, 2, 1, 0); \
-  poly16x4_t __ret_366; \
-  __ret_366 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_366, __p3_366), __rev0_366, __p1_366); \
-  __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 3, 2, 1, 0); \
-  __ret_366; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u8(__p0_367, __p1_367, __p2_367, __p3_367) __extension__ ({ \
-  uint8x8_t __s0_367 = __p0_367; \
-  uint8x8_t __s2_367 = __p2_367; \
-  uint8x8_t __ret_367; \
-  __ret_367 = vset_lane_u8(vget_lane_u8(__s2_367, __p3_367), __s0_367, __p1_367); \
-  __ret_367; \
-})
-#else
-#define vcopy_lane_u8(__p0_368, __p1_368, __p2_368, __p3_368) __extension__ ({ \
-  uint8x8_t __s0_368 = __p0_368; \
-  uint8x8_t __s2_368 = __p2_368; \
-  uint8x8_t __rev0_368;  __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_368;  __rev2_368 = __builtin_shufflevector(__s2_368, __s2_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_368; \
-  __ret_368 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_368, __p3_368), __rev0_368, __p1_368); \
-  __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_368; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u32(__p0_369, __p1_369, __p2_369, __p3_369) __extension__ ({ \
-  uint32x2_t __s0_369 = __p0_369; \
-  uint32x2_t __s2_369 = __p2_369; \
-  uint32x2_t __ret_369; \
-  __ret_369 = vset_lane_u32(vget_lane_u32(__s2_369, __p3_369), __s0_369, __p1_369); \
-  __ret_369; \
-})
-#else
-#define vcopy_lane_u32(__p0_370, __p1_370, __p2_370, __p3_370) __extension__ ({ \
-  uint32x2_t __s0_370 = __p0_370; \
-  uint32x2_t __s2_370 = __p2_370; \
-  uint32x2_t __rev0_370;  __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 1, 0); \
-  uint32x2_t __rev2_370;  __rev2_370 = __builtin_shufflevector(__s2_370, __s2_370, 1, 0); \
-  uint32x2_t __ret_370; \
-  __ret_370 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_370, __p3_370), __rev0_370, __p1_370); \
-  __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 1, 0); \
-  __ret_370; \
-})
-#endif
-
-#define vcopy_lane_u64(__p0_371, __p1_371, __p2_371, __p3_371) __extension__ ({ \
-  uint64x1_t __s0_371 = __p0_371; \
-  uint64x1_t __s2_371 = __p2_371; \
-  uint64x1_t __ret_371; \
-  __ret_371 = vset_lane_u64(vget_lane_u64(__s2_371, __p3_371), __s0_371, __p1_371); \
-  __ret_371; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u16(__p0_372, __p1_372, __p2_372, __p3_372) __extension__ ({ \
-  uint16x4_t __s0_372 = __p0_372; \
-  uint16x4_t __s2_372 = __p2_372; \
-  uint16x4_t __ret_372; \
-  __ret_372 = vset_lane_u16(vget_lane_u16(__s2_372, __p3_372), __s0_372, __p1_372); \
-  __ret_372; \
-})
-#else
-#define vcopy_lane_u16(__p0_373, __p1_373, __p2_373, __p3_373) __extension__ ({ \
-  uint16x4_t __s0_373 = __p0_373; \
-  uint16x4_t __s2_373 = __p2_373; \
-  uint16x4_t __rev0_373;  __rev0_373 = __builtin_shufflevector(__s0_373, __s0_373, 3, 2, 1, 0); \
-  uint16x4_t __rev2_373;  __rev2_373 = __builtin_shufflevector(__s2_373, __s2_373, 3, 2, 1, 0); \
-  uint16x4_t __ret_373; \
-  __ret_373 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_373, __p3_373), __rev0_373, __p1_373); \
-  __ret_373 = __builtin_shufflevector(__ret_373, __ret_373, 3, 2, 1, 0); \
-  __ret_373; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s8(__p0_374, __p1_374, __p2_374, __p3_374) __extension__ ({ \
-  int8x8_t __s0_374 = __p0_374; \
-  int8x8_t __s2_374 = __p2_374; \
-  int8x8_t __ret_374; \
-  __ret_374 = vset_lane_s8(vget_lane_s8(__s2_374, __p3_374), __s0_374, __p1_374); \
-  __ret_374; \
-})
-#else
-#define vcopy_lane_s8(__p0_375, __p1_375, __p2_375, __p3_375) __extension__ ({ \
-  int8x8_t __s0_375 = __p0_375; \
-  int8x8_t __s2_375 = __p2_375; \
-  int8x8_t __rev0_375;  __rev0_375 = __builtin_shufflevector(__s0_375, __s0_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_375;  __rev2_375 = __builtin_shufflevector(__s2_375, __s2_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_375; \
-  __ret_375 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_375, __p3_375), __rev0_375, __p1_375); \
-  __ret_375 = __builtin_shufflevector(__ret_375, __ret_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_375; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_f32(__p0_376, __p1_376, __p2_376, __p3_376) __extension__ ({ \
-  float32x2_t __s0_376 = __p0_376; \
-  float32x2_t __s2_376 = __p2_376; \
-  float32x2_t __ret_376; \
-  __ret_376 = vset_lane_f32(vget_lane_f32(__s2_376, __p3_376), __s0_376, __p1_376); \
-  __ret_376; \
-})
-#else
-#define vcopy_lane_f32(__p0_377, __p1_377, __p2_377, __p3_377) __extension__ ({ \
-  float32x2_t __s0_377 = __p0_377; \
-  float32x2_t __s2_377 = __p2_377; \
-  float32x2_t __rev0_377;  __rev0_377 = __builtin_shufflevector(__s0_377, __s0_377, 1, 0); \
-  float32x2_t __rev2_377;  __rev2_377 = __builtin_shufflevector(__s2_377, __s2_377, 1, 0); \
-  float32x2_t __ret_377; \
-  __ret_377 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_377, __p3_377), __rev0_377, __p1_377); \
-  __ret_377 = __builtin_shufflevector(__ret_377, __ret_377, 1, 0); \
-  __ret_377; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s32(__p0_378, __p1_378, __p2_378, __p3_378) __extension__ ({ \
-  int32x2_t __s0_378 = __p0_378; \
-  int32x2_t __s2_378 = __p2_378; \
-  int32x2_t __ret_378; \
-  __ret_378 = vset_lane_s32(vget_lane_s32(__s2_378, __p3_378), __s0_378, __p1_378); \
-  __ret_378; \
-})
-#else
-#define vcopy_lane_s32(__p0_379, __p1_379, __p2_379, __p3_379) __extension__ ({ \
-  int32x2_t __s0_379 = __p0_379; \
-  int32x2_t __s2_379 = __p2_379; \
-  int32x2_t __rev0_379;  __rev0_379 = __builtin_shufflevector(__s0_379, __s0_379, 1, 0); \
-  int32x2_t __rev2_379;  __rev2_379 = __builtin_shufflevector(__s2_379, __s2_379, 1, 0); \
-  int32x2_t __ret_379; \
-  __ret_379 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_379, __p3_379), __rev0_379, __p1_379); \
-  __ret_379 = __builtin_shufflevector(__ret_379, __ret_379, 1, 0); \
-  __ret_379; \
-})
-#endif
-
-#define vcopy_lane_s64(__p0_380, __p1_380, __p2_380, __p3_380) __extension__ ({ \
-  int64x1_t __s0_380 = __p0_380; \
-  int64x1_t __s2_380 = __p2_380; \
-  int64x1_t __ret_380; \
-  __ret_380 = vset_lane_s64(vget_lane_s64(__s2_380, __p3_380), __s0_380, __p1_380); \
-  __ret_380; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s16(__p0_381, __p1_381, __p2_381, __p3_381) __extension__ ({ \
-  int16x4_t __s0_381 = __p0_381; \
-  int16x4_t __s2_381 = __p2_381; \
-  int16x4_t __ret_381; \
-  __ret_381 = vset_lane_s16(vget_lane_s16(__s2_381, __p3_381), __s0_381, __p1_381); \
-  __ret_381; \
-})
-#else
-#define vcopy_lane_s16(__p0_382, __p1_382, __p2_382, __p3_382) __extension__ ({ \
-  int16x4_t __s0_382 = __p0_382; \
-  int16x4_t __s2_382 = __p2_382; \
-  int16x4_t __rev0_382;  __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \
-  int16x4_t __rev2_382;  __rev2_382 = __builtin_shufflevector(__s2_382, __s2_382, 3, 2, 1, 0); \
-  int16x4_t __ret_382; \
-  __ret_382 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_382, __p3_382), __rev0_382, __p1_382); \
-  __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 3, 2, 1, 0); \
-  __ret_382; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p8(__p0_383, __p1_383, __p2_383, __p3_383) __extension__ ({ \
-  poly8x16_t __s0_383 = __p0_383; \
-  poly8x16_t __s2_383 = __p2_383; \
-  poly8x16_t __ret_383; \
-  __ret_383 = vsetq_lane_p8(vgetq_lane_p8(__s2_383, __p3_383), __s0_383, __p1_383); \
-  __ret_383; \
-})
-#else
-#define vcopyq_laneq_p8(__p0_384, __p1_384, __p2_384, __p3_384) __extension__ ({ \
-  poly8x16_t __s0_384 = __p0_384; \
-  poly8x16_t __s2_384 = __p2_384; \
-  poly8x16_t __rev0_384;  __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_384;  __rev2_384 = __builtin_shufflevector(__s2_384, __s2_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_384; \
-  __ret_384 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_384, __p3_384), __rev0_384, __p1_384); \
-  __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_384; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p16(__p0_385, __p1_385, __p2_385, __p3_385) __extension__ ({ \
-  poly16x8_t __s0_385 = __p0_385; \
-  poly16x8_t __s2_385 = __p2_385; \
-  poly16x8_t __ret_385; \
-  __ret_385 = vsetq_lane_p16(vgetq_lane_p16(__s2_385, __p3_385), __s0_385, __p1_385); \
-  __ret_385; \
-})
-#else
-#define vcopyq_laneq_p16(__p0_386, __p1_386, __p2_386, __p3_386) __extension__ ({ \
-  poly16x8_t __s0_386 = __p0_386; \
-  poly16x8_t __s2_386 = __p2_386; \
-  poly16x8_t __rev0_386;  __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev2_386;  __rev2_386 = __builtin_shufflevector(__s2_386, __s2_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_386; \
-  __ret_386 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_386, __p3_386), __rev0_386, __p1_386); \
-  __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_386; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u8(__p0_387, __p1_387, __p2_387, __p3_387) __extension__ ({ \
-  uint8x16_t __s0_387 = __p0_387; \
-  uint8x16_t __s2_387 = __p2_387; \
-  uint8x16_t __ret_387; \
-  __ret_387 = vsetq_lane_u8(vgetq_lane_u8(__s2_387, __p3_387), __s0_387, __p1_387); \
-  __ret_387; \
-})
-#else
-#define vcopyq_laneq_u8(__p0_388, __p1_388, __p2_388, __p3_388) __extension__ ({ \
-  uint8x16_t __s0_388 = __p0_388; \
-  uint8x16_t __s2_388 = __p2_388; \
-  uint8x16_t __rev0_388;  __rev0_388 = __builtin_shufflevector(__s0_388, __s0_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_388;  __rev2_388 = __builtin_shufflevector(__s2_388, __s2_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_388; \
-  __ret_388 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_388, __p3_388), __rev0_388, __p1_388); \
-  __ret_388 = __builtin_shufflevector(__ret_388, __ret_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_388; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u32(__p0_389, __p1_389, __p2_389, __p3_389) __extension__ ({ \
-  uint32x4_t __s0_389 = __p0_389; \
-  uint32x4_t __s2_389 = __p2_389; \
-  uint32x4_t __ret_389; \
-  __ret_389 = vsetq_lane_u32(vgetq_lane_u32(__s2_389, __p3_389), __s0_389, __p1_389); \
-  __ret_389; \
-})
-#else
-#define vcopyq_laneq_u32(__p0_390, __p1_390, __p2_390, __p3_390) __extension__ ({ \
-  uint32x4_t __s0_390 = __p0_390; \
-  uint32x4_t __s2_390 = __p2_390; \
-  uint32x4_t __rev0_390;  __rev0_390 = __builtin_shufflevector(__s0_390, __s0_390, 3, 2, 1, 0); \
-  uint32x4_t __rev2_390;  __rev2_390 = __builtin_shufflevector(__s2_390, __s2_390, 3, 2, 1, 0); \
-  uint32x4_t __ret_390; \
-  __ret_390 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_390, __p3_390), __rev0_390, __p1_390); \
-  __ret_390 = __builtin_shufflevector(__ret_390, __ret_390, 3, 2, 1, 0); \
-  __ret_390; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u64(__p0_391, __p1_391, __p2_391, __p3_391) __extension__ ({ \
-  uint64x2_t __s0_391 = __p0_391; \
-  uint64x2_t __s2_391 = __p2_391; \
-  uint64x2_t __ret_391; \
-  __ret_391 = vsetq_lane_u64(vgetq_lane_u64(__s2_391, __p3_391), __s0_391, __p1_391); \
-  __ret_391; \
-})
-#else
-#define vcopyq_laneq_u64(__p0_392, __p1_392, __p2_392, __p3_392) __extension__ ({ \
-  uint64x2_t __s0_392 = __p0_392; \
-  uint64x2_t __s2_392 = __p2_392; \
-  uint64x2_t __rev0_392;  __rev0_392 = __builtin_shufflevector(__s0_392, __s0_392, 1, 0); \
-  uint64x2_t __rev2_392;  __rev2_392 = __builtin_shufflevector(__s2_392, __s2_392, 1, 0); \
-  uint64x2_t __ret_392; \
-  __ret_392 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_392, __p3_392), __rev0_392, __p1_392); \
-  __ret_392 = __builtin_shufflevector(__ret_392, __ret_392, 1, 0); \
-  __ret_392; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u16(__p0_393, __p1_393, __p2_393, __p3_393) __extension__ ({ \
-  uint16x8_t __s0_393 = __p0_393; \
-  uint16x8_t __s2_393 = __p2_393; \
-  uint16x8_t __ret_393; \
-  __ret_393 = vsetq_lane_u16(vgetq_lane_u16(__s2_393, __p3_393), __s0_393, __p1_393); \
-  __ret_393; \
-})
-#else
-#define vcopyq_laneq_u16(__p0_394, __p1_394, __p2_394, __p3_394) __extension__ ({ \
-  uint16x8_t __s0_394 = __p0_394; \
-  uint16x8_t __s2_394 = __p2_394; \
-  uint16x8_t __rev0_394;  __rev0_394 = __builtin_shufflevector(__s0_394, __s0_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_394;  __rev2_394 = __builtin_shufflevector(__s2_394, __s2_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_394; \
-  __ret_394 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_394, __p3_394), __rev0_394, __p1_394); \
-  __ret_394 = __builtin_shufflevector(__ret_394, __ret_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_394; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s8(__p0_395, __p1_395, __p2_395, __p3_395) __extension__ ({ \
-  int8x16_t __s0_395 = __p0_395; \
-  int8x16_t __s2_395 = __p2_395; \
-  int8x16_t __ret_395; \
-  __ret_395 = vsetq_lane_s8(vgetq_lane_s8(__s2_395, __p3_395), __s0_395, __p1_395); \
-  __ret_395; \
-})
-#else
-#define vcopyq_laneq_s8(__p0_396, __p1_396, __p2_396, __p3_396) __extension__ ({ \
-  int8x16_t __s0_396 = __p0_396; \
-  int8x16_t __s2_396 = __p2_396; \
-  int8x16_t __rev0_396;  __rev0_396 = __builtin_shufflevector(__s0_396, __s0_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_396;  __rev2_396 = __builtin_shufflevector(__s2_396, __s2_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_396; \
-  __ret_396 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_396, __p3_396), __rev0_396, __p1_396); \
-  __ret_396 = __builtin_shufflevector(__ret_396, __ret_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_396; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f32(__p0_397, __p1_397, __p2_397, __p3_397) __extension__ ({ \
-  float32x4_t __s0_397 = __p0_397; \
-  float32x4_t __s2_397 = __p2_397; \
-  float32x4_t __ret_397; \
-  __ret_397 = vsetq_lane_f32(vgetq_lane_f32(__s2_397, __p3_397), __s0_397, __p1_397); \
-  __ret_397; \
-})
-#else
-#define vcopyq_laneq_f32(__p0_398, __p1_398, __p2_398, __p3_398) __extension__ ({ \
-  float32x4_t __s0_398 = __p0_398; \
-  float32x4_t __s2_398 = __p2_398; \
-  float32x4_t __rev0_398;  __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 3, 2, 1, 0); \
-  float32x4_t __rev2_398;  __rev2_398 = __builtin_shufflevector(__s2_398, __s2_398, 3, 2, 1, 0); \
-  float32x4_t __ret_398; \
-  __ret_398 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_398, __p3_398), __rev0_398, __p1_398); \
-  __ret_398 = __builtin_shufflevector(__ret_398, __ret_398, 3, 2, 1, 0); \
-  __ret_398; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s32(__p0_399, __p1_399, __p2_399, __p3_399) __extension__ ({ \
-  int32x4_t __s0_399 = __p0_399; \
-  int32x4_t __s2_399 = __p2_399; \
-  int32x4_t __ret_399; \
-  __ret_399 = vsetq_lane_s32(vgetq_lane_s32(__s2_399, __p3_399), __s0_399, __p1_399); \
-  __ret_399; \
-})
-#else
-#define vcopyq_laneq_s32(__p0_400, __p1_400, __p2_400, __p3_400) __extension__ ({ \
-  int32x4_t __s0_400 = __p0_400; \
-  int32x4_t __s2_400 = __p2_400; \
-  int32x4_t __rev0_400;  __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 3, 2, 1, 0); \
-  int32x4_t __rev2_400;  __rev2_400 = __builtin_shufflevector(__s2_400, __s2_400, 3, 2, 1, 0); \
-  int32x4_t __ret_400; \
-  __ret_400 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_400, __p3_400), __rev0_400, __p1_400); \
-  __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 3, 2, 1, 0); \
-  __ret_400; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s64(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \
-  int64x2_t __s0_401 = __p0_401; \
-  int64x2_t __s2_401 = __p2_401; \
-  int64x2_t __ret_401; \
-  __ret_401 = vsetq_lane_s64(vgetq_lane_s64(__s2_401, __p3_401), __s0_401, __p1_401); \
-  __ret_401; \
-})
-#else
-#define vcopyq_laneq_s64(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \
-  int64x2_t __s0_402 = __p0_402; \
-  int64x2_t __s2_402 = __p2_402; \
-  int64x2_t __rev0_402;  __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 1, 0); \
-  int64x2_t __rev2_402;  __rev2_402 = __builtin_shufflevector(__s2_402, __s2_402, 1, 0); \
-  int64x2_t __ret_402; \
-  __ret_402 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_402, __p3_402), __rev0_402, __p1_402); \
-  __ret_402 = __builtin_shufflevector(__ret_402, __ret_402, 1, 0); \
-  __ret_402; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s16(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \
-  int16x8_t __s0_403 = __p0_403; \
-  int16x8_t __s2_403 = __p2_403; \
-  int16x8_t __ret_403; \
-  __ret_403 = vsetq_lane_s16(vgetq_lane_s16(__s2_403, __p3_403), __s0_403, __p1_403); \
-  __ret_403; \
-})
-#else
-#define vcopyq_laneq_s16(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \
-  int16x8_t __s0_404 = __p0_404; \
-  int16x8_t __s2_404 = __p2_404; \
-  int16x8_t __rev0_404;  __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_404;  __rev2_404 = __builtin_shufflevector(__s2_404, __s2_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_404; \
-  __ret_404 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_404, __p3_404), __rev0_404, __p1_404); \
-  __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_404; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p8(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \
-  poly8x8_t __s0_405 = __p0_405; \
-  poly8x16_t __s2_405 = __p2_405; \
-  poly8x8_t __ret_405; \
-  __ret_405 = vset_lane_p8(vgetq_lane_p8(__s2_405, __p3_405), __s0_405, __p1_405); \
-  __ret_405; \
-})
-#else
-#define vcopy_laneq_p8(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \
-  poly8x8_t __s0_406 = __p0_406; \
-  poly8x16_t __s2_406 = __p2_406; \
-  poly8x8_t __rev0_406;  __rev0_406 = __builtin_shufflevector(__s0_406, __s0_406, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_406;  __rev2_406 = __builtin_shufflevector(__s2_406, __s2_406, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_406; \
-  __ret_406 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_406, __p3_406), __rev0_406, __p1_406); \
-  __ret_406 = __builtin_shufflevector(__ret_406, __ret_406, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_406; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p16(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \
-  poly16x4_t __s0_407 = __p0_407; \
-  poly16x8_t __s2_407 = __p2_407; \
-  poly16x4_t __ret_407; \
-  __ret_407 = vset_lane_p16(vgetq_lane_p16(__s2_407, __p3_407), __s0_407, __p1_407); \
-  __ret_407; \
-})
-#else
-#define vcopy_laneq_p16(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \
-  poly16x4_t __s0_408 = __p0_408; \
-  poly16x8_t __s2_408 = __p2_408; \
-  poly16x4_t __rev0_408;  __rev0_408 = __builtin_shufflevector(__s0_408, __s0_408, 3, 2, 1, 0); \
-  poly16x8_t __rev2_408;  __rev2_408 = __builtin_shufflevector(__s2_408, __s2_408, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_408; \
-  __ret_408 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_408, __p3_408), __rev0_408, __p1_408); \
-  __ret_408 = __builtin_shufflevector(__ret_408, __ret_408, 3, 2, 1, 0); \
-  __ret_408; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u8(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \
-  uint8x8_t __s0_409 = __p0_409; \
-  uint8x16_t __s2_409 = __p2_409; \
-  uint8x8_t __ret_409; \
-  __ret_409 = vset_lane_u8(vgetq_lane_u8(__s2_409, __p3_409), __s0_409, __p1_409); \
-  __ret_409; \
-})
-#else
-#define vcopy_laneq_u8(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \
-  uint8x8_t __s0_410 = __p0_410; \
-  uint8x16_t __s2_410 = __p2_410; \
-  uint8x8_t __rev0_410;  __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_410;  __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_410; \
-  __ret_410 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_410, __p3_410), __rev0_410, __p1_410); \
-  __ret_410 = __builtin_shufflevector(__ret_410, __ret_410, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_410; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u32(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \
-  uint32x2_t __s0_411 = __p0_411; \
-  uint32x4_t __s2_411 = __p2_411; \
-  uint32x2_t __ret_411; \
-  __ret_411 = vset_lane_u32(vgetq_lane_u32(__s2_411, __p3_411), __s0_411, __p1_411); \
-  __ret_411; \
-})
-#else
-#define vcopy_laneq_u32(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \
-  uint32x2_t __s0_412 = __p0_412; \
-  uint32x4_t __s2_412 = __p2_412; \
-  uint32x2_t __rev0_412;  __rev0_412 = __builtin_shufflevector(__s0_412, __s0_412, 1, 0); \
-  uint32x4_t __rev2_412;  __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 3, 2, 1, 0); \
-  uint32x2_t __ret_412; \
-  __ret_412 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_412, __p3_412), __rev0_412, __p1_412); \
-  __ret_412 = __builtin_shufflevector(__ret_412, __ret_412, 1, 0); \
-  __ret_412; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u64(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \
-  uint64x1_t __s0_413 = __p0_413; \
-  uint64x2_t __s2_413 = __p2_413; \
-  uint64x1_t __ret_413; \
-  __ret_413 = vset_lane_u64(vgetq_lane_u64(__s2_413, __p3_413), __s0_413, __p1_413); \
-  __ret_413; \
-})
-#else
-#define vcopy_laneq_u64(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \
-  uint64x1_t __s0_414 = __p0_414; \
-  uint64x2_t __s2_414 = __p2_414; \
-  uint64x2_t __rev2_414;  __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 1, 0); \
-  uint64x1_t __ret_414; \
-  __ret_414 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_414, __p3_414), __s0_414, __p1_414); \
-  __ret_414; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u16(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \
-  uint16x4_t __s0_415 = __p0_415; \
-  uint16x8_t __s2_415 = __p2_415; \
-  uint16x4_t __ret_415; \
-  __ret_415 = vset_lane_u16(vgetq_lane_u16(__s2_415, __p3_415), __s0_415, __p1_415); \
-  __ret_415; \
-})
-#else
-#define vcopy_laneq_u16(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \
-  uint16x4_t __s0_416 = __p0_416; \
-  uint16x8_t __s2_416 = __p2_416; \
-  uint16x4_t __rev0_416;  __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 3, 2, 1, 0); \
-  uint16x8_t __rev2_416;  __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_416; \
-  __ret_416 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_416, __p3_416), __rev0_416, __p1_416); \
-  __ret_416 = __builtin_shufflevector(__ret_416, __ret_416, 3, 2, 1, 0); \
-  __ret_416; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s8(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \
-  int8x8_t __s0_417 = __p0_417; \
-  int8x16_t __s2_417 = __p2_417; \
-  int8x8_t __ret_417; \
-  __ret_417 = vset_lane_s8(vgetq_lane_s8(__s2_417, __p3_417), __s0_417, __p1_417); \
-  __ret_417; \
-})
-#else
-#define vcopy_laneq_s8(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \
-  int8x8_t __s0_418 = __p0_418; \
-  int8x16_t __s2_418 = __p2_418; \
-  int8x8_t __rev0_418;  __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_418;  __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_418; \
-  __ret_418 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_418, __p3_418), __rev0_418, __p1_418); \
-  __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_418; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f32(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \
-  float32x2_t __s0_419 = __p0_419; \
-  float32x4_t __s2_419 = __p2_419; \
-  float32x2_t __ret_419; \
-  __ret_419 = vset_lane_f32(vgetq_lane_f32(__s2_419, __p3_419), __s0_419, __p1_419); \
-  __ret_419; \
-})
-#else
-#define vcopy_laneq_f32(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \
-  float32x2_t __s0_420 = __p0_420; \
-  float32x4_t __s2_420 = __p2_420; \
-  float32x2_t __rev0_420;  __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 1, 0); \
-  float32x4_t __rev2_420;  __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 3, 2, 1, 0); \
-  float32x2_t __ret_420; \
-  __ret_420 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_420, __p3_420), __rev0_420, __p1_420); \
-  __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 1, 0); \
-  __ret_420; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s32(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \
-  int32x2_t __s0_421 = __p0_421; \
-  int32x4_t __s2_421 = __p2_421; \
-  int32x2_t __ret_421; \
-  __ret_421 = vset_lane_s32(vgetq_lane_s32(__s2_421, __p3_421), __s0_421, __p1_421); \
-  __ret_421; \
-})
-#else
-#define vcopy_laneq_s32(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \
-  int32x2_t __s0_422 = __p0_422; \
-  int32x4_t __s2_422 = __p2_422; \
-  int32x2_t __rev0_422;  __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 1, 0); \
-  int32x4_t __rev2_422;  __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 3, 2, 1, 0); \
-  int32x2_t __ret_422; \
-  __ret_422 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_422, __p3_422), __rev0_422, __p1_422); \
-  __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 1, 0); \
-  __ret_422; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s64(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \
-  int64x1_t __s0_423 = __p0_423; \
-  int64x2_t __s2_423 = __p2_423; \
-  int64x1_t __ret_423; \
-  __ret_423 = vset_lane_s64(vgetq_lane_s64(__s2_423, __p3_423), __s0_423, __p1_423); \
-  __ret_423; \
-})
-#else
-#define vcopy_laneq_s64(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \
-  int64x1_t __s0_424 = __p0_424; \
-  int64x2_t __s2_424 = __p2_424; \
-  int64x2_t __rev2_424;  __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 1, 0); \
-  int64x1_t __ret_424; \
-  __ret_424 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_424, __p3_424), __s0_424, __p1_424); \
-  __ret_424; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s16(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \
-  int16x4_t __s0_425 = __p0_425; \
-  int16x8_t __s2_425 = __p2_425; \
-  int16x4_t __ret_425; \
-  __ret_425 = vset_lane_s16(vgetq_lane_s16(__s2_425, __p3_425), __s0_425, __p1_425); \
-  __ret_425; \
-})
-#else
-#define vcopy_laneq_s16(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \
-  int16x4_t __s0_426 = __p0_426; \
-  int16x8_t __s2_426 = __p2_426; \
-  int16x4_t __rev0_426;  __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 3, 2, 1, 0); \
-  int16x8_t __rev2_426;  __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_426; \
-  __ret_426 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_426, __p3_426), __rev0_426, __p1_426); \
-  __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 3, 2, 1, 0); \
-  __ret_426; \
-})
-#endif
-
-#define vcreate_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (poly64x1_t)(__promote); \
-  __ret; \
-})
-#define vcreate_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (float64x1_t)(__promote); \
-  __ret; \
-})
-__ai float32_t vcvts_f32_s32(int32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
-  return __ret;
-}
-__ai float32_t vcvts_f32_u32(uint32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
-  return __ret;
-}
-#endif
-
-__ai float64_t vcvtd_f64_s64(int64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
-  return __ret;
-}
-__ai float64_t vcvtd_f64_u64(uint64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
-  float16x8_t __ret;
-  __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1));
-  return __ret;
-}
-#else
-__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = vcvt_f32_f16(vget_high_f16(__p0));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x4_t __ret;
-  __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = vcvt_f64_f32(vget_high_f32(__p0));
-  return __ret;
-}
-#else
-__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
-  __ret; \
-})
-#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
-  __ret; \
-})
-__ai int32_t vcvts_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai uint32_t vcvts_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai int32_t vcvtas_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtad_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtas_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtad_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtms_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtmd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtms_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtns_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtnd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtns_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtps_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtpd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtps_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
-  return __ret;
-}
-__ai float32_t vcvtxd_f32_f64(float64_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x4_t __ret;
-  __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdup_lane_p64(__p0_427, __p1_427) __extension__ ({ \
-  poly64x1_t __s0_427 = __p0_427; \
-  poly64x1_t __ret_427; \
-  __ret_427 = splat_lane_p64(__s0_427, __p1_427); \
-  __ret_427; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p64(__p0_428, __p1_428) __extension__ ({ \
-  poly64x1_t __s0_428 = __p0_428; \
-  poly64x2_t __ret_428; \
-  __ret_428 = splatq_lane_p64(__s0_428, __p1_428); \
-  __ret_428; \
-})
-#else
-#define vdupq_lane_p64(__p0_429, __p1_429) __extension__ ({ \
-  poly64x1_t __s0_429 = __p0_429; \
-  poly64x2_t __ret_429; \
-  __ret_429 = __noswap_splatq_lane_p64(__s0_429, __p1_429); \
-  __ret_429 = __builtin_shufflevector(__ret_429, __ret_429, 1, 0); \
-  __ret_429; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f64(__p0_430, __p1_430) __extension__ ({ \
-  float64x1_t __s0_430 = __p0_430; \
-  float64x2_t __ret_430; \
-  __ret_430 = splatq_lane_f64(__s0_430, __p1_430); \
-  __ret_430; \
-})
-#else
-#define vdupq_lane_f64(__p0_431, __p1_431) __extension__ ({ \
-  float64x1_t __s0_431 = __p0_431; \
-  float64x2_t __ret_431; \
-  __ret_431 = __noswap_splatq_lane_f64(__s0_431, __p1_431); \
-  __ret_431 = __builtin_shufflevector(__ret_431, __ret_431, 1, 0); \
-  __ret_431; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0_432, __p1_432) __extension__ ({ \
-  float16x4_t __s0_432 = __p0_432; \
-  float16x8_t __ret_432; \
-  __ret_432 = splatq_lane_f16(__s0_432, __p1_432); \
-  __ret_432; \
-})
-#else
-#define vdupq_lane_f16(__p0_433, __p1_433) __extension__ ({ \
-  float16x4_t __s0_433 = __p0_433; \
-  float16x4_t __rev0_433;  __rev0_433 = __builtin_shufflevector(__s0_433, __s0_433, 3, 2, 1, 0); \
-  float16x8_t __ret_433; \
-  __ret_433 = __noswap_splatq_lane_f16(__rev0_433, __p1_433); \
-  __ret_433 = __builtin_shufflevector(__ret_433, __ret_433, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_433; \
-})
-#endif
-
-#define vdup_lane_f64(__p0_434, __p1_434) __extension__ ({ \
-  float64x1_t __s0_434 = __p0_434; \
-  float64x1_t __ret_434; \
-  __ret_434 = splat_lane_f64(__s0_434, __p1_434); \
-  __ret_434; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0_435, __p1_435) __extension__ ({ \
-  float16x4_t __s0_435 = __p0_435; \
-  float16x4_t __ret_435; \
-  __ret_435 = splat_lane_f16(__s0_435, __p1_435); \
-  __ret_435; \
-})
-#else
-#define vdup_lane_f16(__p0_436, __p1_436) __extension__ ({ \
-  float16x4_t __s0_436 = __p0_436; \
-  float16x4_t __rev0_436;  __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 3, 2, 1, 0); \
-  float16x4_t __ret_436; \
-  __ret_436 = __noswap_splat_lane_f16(__rev0_436, __p1_436); \
-  __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 3, 2, 1, 0); \
-  __ret_436; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p8(__p0_437, __p1_437) __extension__ ({ \
-  poly8x16_t __s0_437 = __p0_437; \
-  poly8x8_t __ret_437; \
-  __ret_437 = splat_laneq_p8(__s0_437, __p1_437); \
-  __ret_437; \
-})
-#else
-#define vdup_laneq_p8(__p0_438, __p1_438) __extension__ ({ \
-  poly8x16_t __s0_438 = __p0_438; \
-  poly8x16_t __rev0_438;  __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_438; \
-  __ret_438 = __noswap_splat_laneq_p8(__rev0_438, __p1_438); \
-  __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_438; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p64(__p0_439, __p1_439) __extension__ ({ \
-  poly64x2_t __s0_439 = __p0_439; \
-  poly64x1_t __ret_439; \
-  __ret_439 = splat_laneq_p64(__s0_439, __p1_439); \
-  __ret_439; \
-})
-#else
-#define vdup_laneq_p64(__p0_440, __p1_440) __extension__ ({ \
-  poly64x2_t __s0_440 = __p0_440; \
-  poly64x2_t __rev0_440;  __rev0_440 = __builtin_shufflevector(__s0_440, __s0_440, 1, 0); \
-  poly64x1_t __ret_440; \
-  __ret_440 = __noswap_splat_laneq_p64(__rev0_440, __p1_440); \
-  __ret_440; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p16(__p0_441, __p1_441) __extension__ ({ \
-  poly16x8_t __s0_441 = __p0_441; \
-  poly16x4_t __ret_441; \
-  __ret_441 = splat_laneq_p16(__s0_441, __p1_441); \
-  __ret_441; \
-})
-#else
-#define vdup_laneq_p16(__p0_442, __p1_442) __extension__ ({ \
-  poly16x8_t __s0_442 = __p0_442; \
-  poly16x8_t __rev0_442;  __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_442; \
-  __ret_442 = __noswap_splat_laneq_p16(__rev0_442, __p1_442); \
-  __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \
-  __ret_442; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p8(__p0_443, __p1_443) __extension__ ({ \
-  poly8x16_t __s0_443 = __p0_443; \
-  poly8x16_t __ret_443; \
-  __ret_443 = splatq_laneq_p8(__s0_443, __p1_443); \
-  __ret_443; \
-})
-#else
-#define vdupq_laneq_p8(__p0_444, __p1_444) __extension__ ({ \
-  poly8x16_t __s0_444 = __p0_444; \
-  poly8x16_t __rev0_444;  __rev0_444 = __builtin_shufflevector(__s0_444, __s0_444, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_444; \
-  __ret_444 = __noswap_splatq_laneq_p8(__rev0_444, __p1_444); \
-  __ret_444 = __builtin_shufflevector(__ret_444, __ret_444, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_444; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p64(__p0_445, __p1_445) __extension__ ({ \
-  poly64x2_t __s0_445 = __p0_445; \
-  poly64x2_t __ret_445; \
-  __ret_445 = splatq_laneq_p64(__s0_445, __p1_445); \
-  __ret_445; \
-})
-#else
-#define vdupq_laneq_p64(__p0_446, __p1_446) __extension__ ({ \
-  poly64x2_t __s0_446 = __p0_446; \
-  poly64x2_t __rev0_446;  __rev0_446 = __builtin_shufflevector(__s0_446, __s0_446, 1, 0); \
-  poly64x2_t __ret_446; \
-  __ret_446 = __noswap_splatq_laneq_p64(__rev0_446, __p1_446); \
-  __ret_446 = __builtin_shufflevector(__ret_446, __ret_446, 1, 0); \
-  __ret_446; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p16(__p0_447, __p1_447) __extension__ ({ \
-  poly16x8_t __s0_447 = __p0_447; \
-  poly16x8_t __ret_447; \
-  __ret_447 = splatq_laneq_p16(__s0_447, __p1_447); \
-  __ret_447; \
-})
-#else
-#define vdupq_laneq_p16(__p0_448, __p1_448) __extension__ ({ \
-  poly16x8_t __s0_448 = __p0_448; \
-  poly16x8_t __rev0_448;  __rev0_448 = __builtin_shufflevector(__s0_448, __s0_448, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_448; \
-  __ret_448 = __noswap_splatq_laneq_p16(__rev0_448, __p1_448); \
-  __ret_448 = __builtin_shufflevector(__ret_448, __ret_448, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_448; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u8(__p0_449, __p1_449) __extension__ ({ \
-  uint8x16_t __s0_449 = __p0_449; \
-  uint8x16_t __ret_449; \
-  __ret_449 = splatq_laneq_u8(__s0_449, __p1_449); \
-  __ret_449; \
-})
-#else
-#define vdupq_laneq_u8(__p0_450, __p1_450) __extension__ ({ \
-  uint8x16_t __s0_450 = __p0_450; \
-  uint8x16_t __rev0_450;  __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_450; \
-  __ret_450 = __noswap_splatq_laneq_u8(__rev0_450, __p1_450); \
-  __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_450; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u32(__p0_451, __p1_451) __extension__ ({ \
-  uint32x4_t __s0_451 = __p0_451; \
-  uint32x4_t __ret_451; \
-  __ret_451 = splatq_laneq_u32(__s0_451, __p1_451); \
-  __ret_451; \
-})
-#else
-#define vdupq_laneq_u32(__p0_452, __p1_452) __extension__ ({ \
-  uint32x4_t __s0_452 = __p0_452; \
-  uint32x4_t __rev0_452;  __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 3, 2, 1, 0); \
-  uint32x4_t __ret_452; \
-  __ret_452 = __noswap_splatq_laneq_u32(__rev0_452, __p1_452); \
-  __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 3, 2, 1, 0); \
-  __ret_452; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u64(__p0_453, __p1_453) __extension__ ({ \
-  uint64x2_t __s0_453 = __p0_453; \
-  uint64x2_t __ret_453; \
-  __ret_453 = splatq_laneq_u64(__s0_453, __p1_453); \
-  __ret_453; \
-})
-#else
-#define vdupq_laneq_u64(__p0_454, __p1_454) __extension__ ({ \
-  uint64x2_t __s0_454 = __p0_454; \
-  uint64x2_t __rev0_454;  __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 1, 0); \
-  uint64x2_t __ret_454; \
-  __ret_454 = __noswap_splatq_laneq_u64(__rev0_454, __p1_454); \
-  __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 1, 0); \
-  __ret_454; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u16(__p0_455, __p1_455) __extension__ ({ \
-  uint16x8_t __s0_455 = __p0_455; \
-  uint16x8_t __ret_455; \
-  __ret_455 = splatq_laneq_u16(__s0_455, __p1_455); \
-  __ret_455; \
-})
-#else
-#define vdupq_laneq_u16(__p0_456, __p1_456) __extension__ ({ \
-  uint16x8_t __s0_456 = __p0_456; \
-  uint16x8_t __rev0_456;  __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_456; \
-  __ret_456 = __noswap_splatq_laneq_u16(__rev0_456, __p1_456); \
-  __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_456; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s8(__p0_457, __p1_457) __extension__ ({ \
-  int8x16_t __s0_457 = __p0_457; \
-  int8x16_t __ret_457; \
-  __ret_457 = splatq_laneq_s8(__s0_457, __p1_457); \
-  __ret_457; \
-})
-#else
-#define vdupq_laneq_s8(__p0_458, __p1_458) __extension__ ({ \
-  int8x16_t __s0_458 = __p0_458; \
-  int8x16_t __rev0_458;  __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_458; \
-  __ret_458 = __noswap_splatq_laneq_s8(__rev0_458, __p1_458); \
-  __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_458; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f64(__p0_459, __p1_459) __extension__ ({ \
-  float64x2_t __s0_459 = __p0_459; \
-  float64x2_t __ret_459; \
-  __ret_459 = splatq_laneq_f64(__s0_459, __p1_459); \
-  __ret_459; \
-})
-#else
-#define vdupq_laneq_f64(__p0_460, __p1_460) __extension__ ({ \
-  float64x2_t __s0_460 = __p0_460; \
-  float64x2_t __rev0_460;  __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 1, 0); \
-  float64x2_t __ret_460; \
-  __ret_460 = __noswap_splatq_laneq_f64(__rev0_460, __p1_460); \
-  __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 1, 0); \
-  __ret_460; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f32(__p0_461, __p1_461) __extension__ ({ \
-  float32x4_t __s0_461 = __p0_461; \
-  float32x4_t __ret_461; \
-  __ret_461 = splatq_laneq_f32(__s0_461, __p1_461); \
-  __ret_461; \
-})
-#else
-#define vdupq_laneq_f32(__p0_462, __p1_462) __extension__ ({ \
-  float32x4_t __s0_462 = __p0_462; \
-  float32x4_t __rev0_462;  __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 3, 2, 1, 0); \
-  float32x4_t __ret_462; \
-  __ret_462 = __noswap_splatq_laneq_f32(__rev0_462, __p1_462); \
-  __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 3, 2, 1, 0); \
-  __ret_462; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f16(__p0_463, __p1_463) __extension__ ({ \
-  float16x8_t __s0_463 = __p0_463; \
-  float16x8_t __ret_463; \
-  __ret_463 = splatq_laneq_f16(__s0_463, __p1_463); \
-  __ret_463; \
-})
-#else
-#define vdupq_laneq_f16(__p0_464, __p1_464) __extension__ ({ \
-  float16x8_t __s0_464 = __p0_464; \
-  float16x8_t __rev0_464;  __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_464; \
-  __ret_464 = __noswap_splatq_laneq_f16(__rev0_464, __p1_464); \
-  __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_464; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s32(__p0_465, __p1_465) __extension__ ({ \
-  int32x4_t __s0_465 = __p0_465; \
-  int32x4_t __ret_465; \
-  __ret_465 = splatq_laneq_s32(__s0_465, __p1_465); \
-  __ret_465; \
-})
-#else
-#define vdupq_laneq_s32(__p0_466, __p1_466) __extension__ ({ \
-  int32x4_t __s0_466 = __p0_466; \
-  int32x4_t __rev0_466;  __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 3, 2, 1, 0); \
-  int32x4_t __ret_466; \
-  __ret_466 = __noswap_splatq_laneq_s32(__rev0_466, __p1_466); \
-  __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 3, 2, 1, 0); \
-  __ret_466; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s64(__p0_467, __p1_467) __extension__ ({ \
-  int64x2_t __s0_467 = __p0_467; \
-  int64x2_t __ret_467; \
-  __ret_467 = splatq_laneq_s64(__s0_467, __p1_467); \
-  __ret_467; \
-})
-#else
-#define vdupq_laneq_s64(__p0_468, __p1_468) __extension__ ({ \
-  int64x2_t __s0_468 = __p0_468; \
-  int64x2_t __rev0_468;  __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 1, 0); \
-  int64x2_t __ret_468; \
-  __ret_468 = __noswap_splatq_laneq_s64(__rev0_468, __p1_468); \
-  __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 1, 0); \
-  __ret_468; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s16(__p0_469, __p1_469) __extension__ ({ \
-  int16x8_t __s0_469 = __p0_469; \
-  int16x8_t __ret_469; \
-  __ret_469 = splatq_laneq_s16(__s0_469, __p1_469); \
-  __ret_469; \
-})
-#else
-#define vdupq_laneq_s16(__p0_470, __p1_470) __extension__ ({ \
-  int16x8_t __s0_470 = __p0_470; \
-  int16x8_t __rev0_470;  __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_470; \
-  __ret_470 = __noswap_splatq_laneq_s16(__rev0_470, __p1_470); \
-  __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_470; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u8(__p0_471, __p1_471) __extension__ ({ \
-  uint8x16_t __s0_471 = __p0_471; \
-  uint8x8_t __ret_471; \
-  __ret_471 = splat_laneq_u8(__s0_471, __p1_471); \
-  __ret_471; \
-})
-#else
-#define vdup_laneq_u8(__p0_472, __p1_472) __extension__ ({ \
-  uint8x16_t __s0_472 = __p0_472; \
-  uint8x16_t __rev0_472;  __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_472; \
-  __ret_472 = __noswap_splat_laneq_u8(__rev0_472, __p1_472); \
-  __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_472; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u32(__p0_473, __p1_473) __extension__ ({ \
-  uint32x4_t __s0_473 = __p0_473; \
-  uint32x2_t __ret_473; \
-  __ret_473 = splat_laneq_u32(__s0_473, __p1_473); \
-  __ret_473; \
-})
-#else
-#define vdup_laneq_u32(__p0_474, __p1_474) __extension__ ({ \
-  uint32x4_t __s0_474 = __p0_474; \
-  uint32x4_t __rev0_474;  __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 3, 2, 1, 0); \
-  uint32x2_t __ret_474; \
-  __ret_474 = __noswap_splat_laneq_u32(__rev0_474, __p1_474); \
-  __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 1, 0); \
-  __ret_474; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u64(__p0_475, __p1_475) __extension__ ({ \
-  uint64x2_t __s0_475 = __p0_475; \
-  uint64x1_t __ret_475; \
-  __ret_475 = splat_laneq_u64(__s0_475, __p1_475); \
-  __ret_475; \
-})
-#else
-#define vdup_laneq_u64(__p0_476, __p1_476) __extension__ ({ \
-  uint64x2_t __s0_476 = __p0_476; \
-  uint64x2_t __rev0_476;  __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 1, 0); \
-  uint64x1_t __ret_476; \
-  __ret_476 = __noswap_splat_laneq_u64(__rev0_476, __p1_476); \
-  __ret_476; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u16(__p0_477, __p1_477) __extension__ ({ \
-  uint16x8_t __s0_477 = __p0_477; \
-  uint16x4_t __ret_477; \
-  __ret_477 = splat_laneq_u16(__s0_477, __p1_477); \
-  __ret_477; \
-})
-#else
-#define vdup_laneq_u16(__p0_478, __p1_478) __extension__ ({ \
-  uint16x8_t __s0_478 = __p0_478; \
-  uint16x8_t __rev0_478;  __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_478; \
-  __ret_478 = __noswap_splat_laneq_u16(__rev0_478, __p1_478); \
-  __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 3, 2, 1, 0); \
-  __ret_478; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s8(__p0_479, __p1_479) __extension__ ({ \
-  int8x16_t __s0_479 = __p0_479; \
-  int8x8_t __ret_479; \
-  __ret_479 = splat_laneq_s8(__s0_479, __p1_479); \
-  __ret_479; \
-})
-#else
-#define vdup_laneq_s8(__p0_480, __p1_480) __extension__ ({ \
-  int8x16_t __s0_480 = __p0_480; \
-  int8x16_t __rev0_480;  __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_480; \
-  __ret_480 = __noswap_splat_laneq_s8(__rev0_480, __p1_480); \
-  __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_480; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f64(__p0_481, __p1_481) __extension__ ({ \
-  float64x2_t __s0_481 = __p0_481; \
-  float64x1_t __ret_481; \
-  __ret_481 = splat_laneq_f64(__s0_481, __p1_481); \
-  __ret_481; \
-})
-#else
-#define vdup_laneq_f64(__p0_482, __p1_482) __extension__ ({ \
-  float64x2_t __s0_482 = __p0_482; \
-  float64x2_t __rev0_482;  __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 1, 0); \
-  float64x1_t __ret_482; \
-  __ret_482 = __noswap_splat_laneq_f64(__rev0_482, __p1_482); \
-  __ret_482; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f32(__p0_483, __p1_483) __extension__ ({ \
-  float32x4_t __s0_483 = __p0_483; \
-  float32x2_t __ret_483; \
-  __ret_483 = splat_laneq_f32(__s0_483, __p1_483); \
-  __ret_483; \
-})
-#else
-#define vdup_laneq_f32(__p0_484, __p1_484) __extension__ ({ \
-  float32x4_t __s0_484 = __p0_484; \
-  float32x4_t __rev0_484;  __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 3, 2, 1, 0); \
-  float32x2_t __ret_484; \
-  __ret_484 = __noswap_splat_laneq_f32(__rev0_484, __p1_484); \
-  __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 1, 0); \
-  __ret_484; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f16(__p0_485, __p1_485) __extension__ ({ \
-  float16x8_t __s0_485 = __p0_485; \
-  float16x4_t __ret_485; \
-  __ret_485 = splat_laneq_f16(__s0_485, __p1_485); \
-  __ret_485; \
-})
-#else
-#define vdup_laneq_f16(__p0_486, __p1_486) __extension__ ({ \
-  float16x8_t __s0_486 = __p0_486; \
-  float16x8_t __rev0_486;  __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_486; \
-  __ret_486 = __noswap_splat_laneq_f16(__rev0_486, __p1_486); \
-  __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 3, 2, 1, 0); \
-  __ret_486; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s32(__p0_487, __p1_487) __extension__ ({ \
-  int32x4_t __s0_487 = __p0_487; \
-  int32x2_t __ret_487; \
-  __ret_487 = splat_laneq_s32(__s0_487, __p1_487); \
-  __ret_487; \
-})
-#else
-#define vdup_laneq_s32(__p0_488, __p1_488) __extension__ ({ \
-  int32x4_t __s0_488 = __p0_488; \
-  int32x4_t __rev0_488;  __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 3, 2, 1, 0); \
-  int32x2_t __ret_488; \
-  __ret_488 = __noswap_splat_laneq_s32(__rev0_488, __p1_488); \
-  __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 1, 0); \
-  __ret_488; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s64(__p0_489, __p1_489) __extension__ ({ \
-  int64x2_t __s0_489 = __p0_489; \
-  int64x1_t __ret_489; \
-  __ret_489 = splat_laneq_s64(__s0_489, __p1_489); \
-  __ret_489; \
-})
-#else
-#define vdup_laneq_s64(__p0_490, __p1_490) __extension__ ({ \
-  int64x2_t __s0_490 = __p0_490; \
-  int64x2_t __rev0_490;  __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 1, 0); \
-  int64x1_t __ret_490; \
-  __ret_490 = __noswap_splat_laneq_s64(__rev0_490, __p1_490); \
-  __ret_490; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s16(__p0_491, __p1_491) __extension__ ({ \
-  int16x8_t __s0_491 = __p0_491; \
-  int16x4_t __ret_491; \
-  __ret_491 = splat_laneq_s16(__s0_491, __p1_491); \
-  __ret_491; \
-})
-#else
-#define vdup_laneq_s16(__p0_492, __p1_492) __extension__ ({ \
-  int16x8_t __s0_492 = __p0_492; \
-  int16x8_t __rev0_492;  __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_492; \
-  __ret_492 = __noswap_splat_laneq_s16(__rev0_492, __p1_492); \
-  __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 3, 2, 1, 0); \
-  __ret_492; \
-})
-#endif
-
-__ai poly64x1_t vdup_n_p64(poly64_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vdupq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float64x2_t vdupq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vdup_n_f64(float64_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) {__p0};
-  return __ret;
-}
-#define vext_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \
-  __ret; \
-})
-#else
-#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
-  __ret; \
-})
-#endif
-
-#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
-  __ret; \
-})
-#else
-#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2});
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, -__p1, __p2);
-  return __ret;
-}
-#define vfmsd_lane_f64(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \
-  float64_t __s0_493 = __p0_493; \
-  float64_t __s1_493 = __p1_493; \
-  float64x1_t __s2_493 = __p2_493; \
-  float64_t __ret_493; \
-  __ret_493 = vfmad_lane_f64(__s0_493, -__s1_493, __s2_493, __p3_493); \
-  __ret_493; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_lane_f32(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \
-  float32_t __s0_494 = __p0_494; \
-  float32_t __s1_494 = __p1_494; \
-  float32x2_t __s2_494 = __p2_494; \
-  float32_t __ret_494; \
-  __ret_494 = vfmas_lane_f32(__s0_494, -__s1_494, __s2_494, __p3_494); \
-  __ret_494; \
-})
-#else
-#define vfmss_lane_f32(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \
-  float32_t __s0_495 = __p0_495; \
-  float32_t __s1_495 = __p1_495; \
-  float32x2_t __s2_495 = __p2_495; \
-  float32x2_t __rev2_495;  __rev2_495 = __builtin_shufflevector(__s2_495, __s2_495, 1, 0); \
-  float32_t __ret_495; \
-  __ret_495 = __noswap_vfmas_lane_f32(__s0_495, -__s1_495, __rev2_495, __p3_495); \
-  __ret_495; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f64(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \
-  float64x2_t __s0_496 = __p0_496; \
-  float64x2_t __s1_496 = __p1_496; \
-  float64x1_t __s2_496 = __p2_496; \
-  float64x2_t __ret_496; \
-  __ret_496 = vfmaq_lane_f64(__s0_496, -__s1_496, __s2_496, __p3_496); \
-  __ret_496; \
-})
-#else
-#define vfmsq_lane_f64(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \
-  float64x2_t __s0_497 = __p0_497; \
-  float64x2_t __s1_497 = __p1_497; \
-  float64x1_t __s2_497 = __p2_497; \
-  float64x2_t __rev0_497;  __rev0_497 = __builtin_shufflevector(__s0_497, __s0_497, 1, 0); \
-  float64x2_t __rev1_497;  __rev1_497 = __builtin_shufflevector(__s1_497, __s1_497, 1, 0); \
-  float64x2_t __ret_497; \
-  __ret_497 = __noswap_vfmaq_lane_f64(__rev0_497, -__rev1_497, __s2_497, __p3_497); \
-  __ret_497 = __builtin_shufflevector(__ret_497, __ret_497, 1, 0); \
-  __ret_497; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f32(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \
-  float32x4_t __s0_498 = __p0_498; \
-  float32x4_t __s1_498 = __p1_498; \
-  float32x2_t __s2_498 = __p2_498; \
-  float32x4_t __ret_498; \
-  __ret_498 = vfmaq_lane_f32(__s0_498, -__s1_498, __s2_498, __p3_498); \
-  __ret_498; \
-})
-#else
-#define vfmsq_lane_f32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \
-  float32x4_t __s0_499 = __p0_499; \
-  float32x4_t __s1_499 = __p1_499; \
-  float32x2_t __s2_499 = __p2_499; \
-  float32x4_t __rev0_499;  __rev0_499 = __builtin_shufflevector(__s0_499, __s0_499, 3, 2, 1, 0); \
-  float32x4_t __rev1_499;  __rev1_499 = __builtin_shufflevector(__s1_499, __s1_499, 3, 2, 1, 0); \
-  float32x2_t __rev2_499;  __rev2_499 = __builtin_shufflevector(__s2_499, __s2_499, 1, 0); \
-  float32x4_t __ret_499; \
-  __ret_499 = __noswap_vfmaq_lane_f32(__rev0_499, -__rev1_499, __rev2_499, __p3_499); \
-  __ret_499 = __builtin_shufflevector(__ret_499, __ret_499, 3, 2, 1, 0); \
-  __ret_499; \
-})
-#endif
-
-#define vfms_lane_f64(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \
-  float64x1_t __s0_500 = __p0_500; \
-  float64x1_t __s1_500 = __p1_500; \
-  float64x1_t __s2_500 = __p2_500; \
-  float64x1_t __ret_500; \
-  __ret_500 = vfma_lane_f64(__s0_500, -__s1_500, __s2_500, __p3_500); \
-  __ret_500; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f32(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \
-  float32x2_t __s0_501 = __p0_501; \
-  float32x2_t __s1_501 = __p1_501; \
-  float32x2_t __s2_501 = __p2_501; \
-  float32x2_t __ret_501; \
-  __ret_501 = vfma_lane_f32(__s0_501, -__s1_501, __s2_501, __p3_501); \
-  __ret_501; \
-})
-#else
-#define vfms_lane_f32(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \
-  float32x2_t __s0_502 = __p0_502; \
-  float32x2_t __s1_502 = __p1_502; \
-  float32x2_t __s2_502 = __p2_502; \
-  float32x2_t __rev0_502;  __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 1, 0); \
-  float32x2_t __rev1_502;  __rev1_502 = __builtin_shufflevector(__s1_502, __s1_502, 1, 0); \
-  float32x2_t __rev2_502;  __rev2_502 = __builtin_shufflevector(__s2_502, __s2_502, 1, 0); \
-  float32x2_t __ret_502; \
-  __ret_502 = __noswap_vfma_lane_f32(__rev0_502, -__rev1_502, __rev2_502, __p3_502); \
-  __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 1, 0); \
-  __ret_502; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsd_laneq_f64(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \
-  float64_t __s0_503 = __p0_503; \
-  float64_t __s1_503 = __p1_503; \
-  float64x2_t __s2_503 = __p2_503; \
-  float64_t __ret_503; \
-  __ret_503 = vfmad_laneq_f64(__s0_503, -__s1_503, __s2_503, __p3_503); \
-  __ret_503; \
-})
-#else
-#define vfmsd_laneq_f64(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \
-  float64_t __s0_504 = __p0_504; \
-  float64_t __s1_504 = __p1_504; \
-  float64x2_t __s2_504 = __p2_504; \
-  float64x2_t __rev2_504;  __rev2_504 = __builtin_shufflevector(__s2_504, __s2_504, 1, 0); \
-  float64_t __ret_504; \
-  __ret_504 = __noswap_vfmad_laneq_f64(__s0_504, -__s1_504, __rev2_504, __p3_504); \
-  __ret_504; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_laneq_f32(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \
-  float32_t __s0_505 = __p0_505; \
-  float32_t __s1_505 = __p1_505; \
-  float32x4_t __s2_505 = __p2_505; \
-  float32_t __ret_505; \
-  __ret_505 = vfmas_laneq_f32(__s0_505, -__s1_505, __s2_505, __p3_505); \
-  __ret_505; \
-})
-#else
-#define vfmss_laneq_f32(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \
-  float32_t __s0_506 = __p0_506; \
-  float32_t __s1_506 = __p1_506; \
-  float32x4_t __s2_506 = __p2_506; \
-  float32x4_t __rev2_506;  __rev2_506 = __builtin_shufflevector(__s2_506, __s2_506, 3, 2, 1, 0); \
-  float32_t __ret_506; \
-  __ret_506 = __noswap_vfmas_laneq_f32(__s0_506, -__s1_506, __rev2_506, __p3_506); \
-  __ret_506; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f64(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \
-  float64x2_t __s0_507 = __p0_507; \
-  float64x2_t __s1_507 = __p1_507; \
-  float64x2_t __s2_507 = __p2_507; \
-  float64x2_t __ret_507; \
-  __ret_507 = vfmaq_laneq_f64(__s0_507, -__s1_507, __s2_507, __p3_507); \
-  __ret_507; \
-})
-#else
-#define vfmsq_laneq_f64(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \
-  float64x2_t __s0_508 = __p0_508; \
-  float64x2_t __s1_508 = __p1_508; \
-  float64x2_t __s2_508 = __p2_508; \
-  float64x2_t __rev0_508;  __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 1, 0); \
-  float64x2_t __rev1_508;  __rev1_508 = __builtin_shufflevector(__s1_508, __s1_508, 1, 0); \
-  float64x2_t __rev2_508;  __rev2_508 = __builtin_shufflevector(__s2_508, __s2_508, 1, 0); \
-  float64x2_t __ret_508; \
-  __ret_508 = __noswap_vfmaq_laneq_f64(__rev0_508, -__rev1_508, __rev2_508, __p3_508); \
-  __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 1, 0); \
-  __ret_508; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f32(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \
-  float32x4_t __s0_509 = __p0_509; \
-  float32x4_t __s1_509 = __p1_509; \
-  float32x4_t __s2_509 = __p2_509; \
-  float32x4_t __ret_509; \
-  __ret_509 = vfmaq_laneq_f32(__s0_509, -__s1_509, __s2_509, __p3_509); \
-  __ret_509; \
-})
-#else
-#define vfmsq_laneq_f32(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \
-  float32x4_t __s0_510 = __p0_510; \
-  float32x4_t __s1_510 = __p1_510; \
-  float32x4_t __s2_510 = __p2_510; \
-  float32x4_t __rev0_510;  __rev0_510 = __builtin_shufflevector(__s0_510, __s0_510, 3, 2, 1, 0); \
-  float32x4_t __rev1_510;  __rev1_510 = __builtin_shufflevector(__s1_510, __s1_510, 3, 2, 1, 0); \
-  float32x4_t __rev2_510;  __rev2_510 = __builtin_shufflevector(__s2_510, __s2_510, 3, 2, 1, 0); \
-  float32x4_t __ret_510; \
-  __ret_510 = __noswap_vfmaq_laneq_f32(__rev0_510, -__rev1_510, __rev2_510, __p3_510); \
-  __ret_510 = __builtin_shufflevector(__ret_510, __ret_510, 3, 2, 1, 0); \
-  __ret_510; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f64(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \
-  float64x1_t __s0_511 = __p0_511; \
-  float64x1_t __s1_511 = __p1_511; \
-  float64x2_t __s2_511 = __p2_511; \
-  float64x1_t __ret_511; \
-  __ret_511 = vfma_laneq_f64(__s0_511, -__s1_511, __s2_511, __p3_511); \
-  __ret_511; \
-})
-#else
-#define vfms_laneq_f64(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \
-  float64x1_t __s0_512 = __p0_512; \
-  float64x1_t __s1_512 = __p1_512; \
-  float64x2_t __s2_512 = __p2_512; \
-  float64x2_t __rev2_512;  __rev2_512 = __builtin_shufflevector(__s2_512, __s2_512, 1, 0); \
-  float64x1_t __ret_512; \
-  __ret_512 = __noswap_vfma_laneq_f64(__s0_512, -__s1_512, __rev2_512, __p3_512); \
-  __ret_512; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f32(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \
-  float32x2_t __s0_513 = __p0_513; \
-  float32x2_t __s1_513 = __p1_513; \
-  float32x4_t __s2_513 = __p2_513; \
-  float32x2_t __ret_513; \
-  __ret_513 = vfma_laneq_f32(__s0_513, -__s1_513, __s2_513, __p3_513); \
-  __ret_513; \
-})
-#else
-#define vfms_laneq_f32(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \
-  float32x2_t __s0_514 = __p0_514; \
-  float32x2_t __s1_514 = __p1_514; \
-  float32x4_t __s2_514 = __p2_514; \
-  float32x2_t __rev0_514;  __rev0_514 = __builtin_shufflevector(__s0_514, __s0_514, 1, 0); \
-  float32x2_t __rev1_514;  __rev1_514 = __builtin_shufflevector(__s1_514, __s1_514, 1, 0); \
-  float32x4_t __rev2_514;  __rev2_514 = __builtin_shufflevector(__s2_514, __s2_514, 3, 2, 1, 0); \
-  float32x2_t __ret_514; \
-  __ret_514 = __noswap_vfma_laneq_f32(__rev0_514, -__rev1_514, __rev2_514, __p3_514); \
-  __ret_514 = __builtin_shufflevector(__ret_514, __ret_514, 1, 0); \
-  __ret_514; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2});
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x1_t vget_high_f64(float64x2_t __p0) {
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai float64x1_t vget_high_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#define vget_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x1_t vget_low_f64(float64x2_t __p0) {
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai float64x1_t vget_low_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#define vld1_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
-  __ret; \
-})
-#define vld1_dup_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
-  __ret; \
-})
-#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#define vld1_p64_x2(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x2(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x2(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x2(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x2(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x2(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld1_p64_x3(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x3(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x3(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x3(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x3(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x3(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld1_p64_x4(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x4(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x4(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x4(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x4(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x4(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_p64(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld2q_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld2q_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld2q_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld2q_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_f64(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_dup_p64(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_f64(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
-  __ret; \
-})
-#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
-  __ret; \
-})
-#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
-  __ret; \
-})
-#define vld3_p64(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld3q_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld3q_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld3q_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld3q_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_f64(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld3_dup_p64(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_f64(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
-  __ret; \
-})
-#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
-  __ret; \
-})
-#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
-  __ret; \
-})
-#define vld4_p64(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld4q_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld4q_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld4q_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld4q_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_f64(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld4_dup_p64(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_f64(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
-  __ret; \
-})
-#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
-  __ret; \
-})
-#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
-  __ret; \
-})
-#define vldrq_p128(__p0) __extension__ ({ \
-  poly128_t __ret; \
-  __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vmaxvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vmaxvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vmaxvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vmaxvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vmaxvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vmaxvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vmaxvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vmaxvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vmaxv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vmaxv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vmaxv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vmaxv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vmaxv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vmaxv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vminnmvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vminnmvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminnmvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminnmvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminnmv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminnmv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vminvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vminvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vminvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vminvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vminvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vminvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vminvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vminvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vminvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vminvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vminvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vminvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vminvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vminvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vminv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vminv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vminv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vminv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vminv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vminv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vminv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vminv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vminv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vminv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vminv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vminv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u32(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \
-  uint32x4_t __s0_515 = __p0_515; \
-  uint32x4_t __s1_515 = __p1_515; \
-  uint32x4_t __s2_515 = __p2_515; \
-  uint32x4_t __ret_515; \
-  __ret_515 = __s0_515 + __s1_515 * splatq_laneq_u32(__s2_515, __p3_515); \
-  __ret_515; \
-})
-#else
-#define vmlaq_laneq_u32(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \
-  uint32x4_t __s0_516 = __p0_516; \
-  uint32x4_t __s1_516 = __p1_516; \
-  uint32x4_t __s2_516 = __p2_516; \
-  uint32x4_t __rev0_516;  __rev0_516 = __builtin_shufflevector(__s0_516, __s0_516, 3, 2, 1, 0); \
-  uint32x4_t __rev1_516;  __rev1_516 = __builtin_shufflevector(__s1_516, __s1_516, 3, 2, 1, 0); \
-  uint32x4_t __rev2_516;  __rev2_516 = __builtin_shufflevector(__s2_516, __s2_516, 3, 2, 1, 0); \
-  uint32x4_t __ret_516; \
-  __ret_516 = __rev0_516 + __rev1_516 * __noswap_splatq_laneq_u32(__rev2_516, __p3_516); \
-  __ret_516 = __builtin_shufflevector(__ret_516, __ret_516, 3, 2, 1, 0); \
-  __ret_516; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u16(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \
-  uint16x8_t __s0_517 = __p0_517; \
-  uint16x8_t __s1_517 = __p1_517; \
-  uint16x8_t __s2_517 = __p2_517; \
-  uint16x8_t __ret_517; \
-  __ret_517 = __s0_517 + __s1_517 * splatq_laneq_u16(__s2_517, __p3_517); \
-  __ret_517; \
-})
-#else
-#define vmlaq_laneq_u16(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \
-  uint16x8_t __s0_518 = __p0_518; \
-  uint16x8_t __s1_518 = __p1_518; \
-  uint16x8_t __s2_518 = __p2_518; \
-  uint16x8_t __rev0_518;  __rev0_518 = __builtin_shufflevector(__s0_518, __s0_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_518;  __rev1_518 = __builtin_shufflevector(__s1_518, __s1_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_518;  __rev2_518 = __builtin_shufflevector(__s2_518, __s2_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_518; \
-  __ret_518 = __rev0_518 + __rev1_518 * __noswap_splatq_laneq_u16(__rev2_518, __p3_518); \
-  __ret_518 = __builtin_shufflevector(__ret_518, __ret_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_518; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_f32(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \
-  float32x4_t __s0_519 = __p0_519; \
-  float32x4_t __s1_519 = __p1_519; \
-  float32x4_t __s2_519 = __p2_519; \
-  float32x4_t __ret_519; \
-  __ret_519 = __s0_519 + __s1_519 * splatq_laneq_f32(__s2_519, __p3_519); \
-  __ret_519; \
-})
-#else
-#define vmlaq_laneq_f32(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \
-  float32x4_t __s0_520 = __p0_520; \
-  float32x4_t __s1_520 = __p1_520; \
-  float32x4_t __s2_520 = __p2_520; \
-  float32x4_t __rev0_520;  __rev0_520 = __builtin_shufflevector(__s0_520, __s0_520, 3, 2, 1, 0); \
-  float32x4_t __rev1_520;  __rev1_520 = __builtin_shufflevector(__s1_520, __s1_520, 3, 2, 1, 0); \
-  float32x4_t __rev2_520;  __rev2_520 = __builtin_shufflevector(__s2_520, __s2_520, 3, 2, 1, 0); \
-  float32x4_t __ret_520; \
-  __ret_520 = __rev0_520 + __rev1_520 * __noswap_splatq_laneq_f32(__rev2_520, __p3_520); \
-  __ret_520 = __builtin_shufflevector(__ret_520, __ret_520, 3, 2, 1, 0); \
-  __ret_520; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \
-  int32x4_t __s0_521 = __p0_521; \
-  int32x4_t __s1_521 = __p1_521; \
-  int32x4_t __s2_521 = __p2_521; \
-  int32x4_t __ret_521; \
-  __ret_521 = __s0_521 + __s1_521 * splatq_laneq_s32(__s2_521, __p3_521); \
-  __ret_521; \
-})
-#else
-#define vmlaq_laneq_s32(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \
-  int32x4_t __s0_522 = __p0_522; \
-  int32x4_t __s1_522 = __p1_522; \
-  int32x4_t __s2_522 = __p2_522; \
-  int32x4_t __rev0_522;  __rev0_522 = __builtin_shufflevector(__s0_522, __s0_522, 3, 2, 1, 0); \
-  int32x4_t __rev1_522;  __rev1_522 = __builtin_shufflevector(__s1_522, __s1_522, 3, 2, 1, 0); \
-  int32x4_t __rev2_522;  __rev2_522 = __builtin_shufflevector(__s2_522, __s2_522, 3, 2, 1, 0); \
-  int32x4_t __ret_522; \
-  __ret_522 = __rev0_522 + __rev1_522 * __noswap_splatq_laneq_s32(__rev2_522, __p3_522); \
-  __ret_522 = __builtin_shufflevector(__ret_522, __ret_522, 3, 2, 1, 0); \
-  __ret_522; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s16(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \
-  int16x8_t __s0_523 = __p0_523; \
-  int16x8_t __s1_523 = __p1_523; \
-  int16x8_t __s2_523 = __p2_523; \
-  int16x8_t __ret_523; \
-  __ret_523 = __s0_523 + __s1_523 * splatq_laneq_s16(__s2_523, __p3_523); \
-  __ret_523; \
-})
-#else
-#define vmlaq_laneq_s16(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \
-  int16x8_t __s0_524 = __p0_524; \
-  int16x8_t __s1_524 = __p1_524; \
-  int16x8_t __s2_524 = __p2_524; \
-  int16x8_t __rev0_524;  __rev0_524 = __builtin_shufflevector(__s0_524, __s0_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_524;  __rev1_524 = __builtin_shufflevector(__s1_524, __s1_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_524;  __rev2_524 = __builtin_shufflevector(__s2_524, __s2_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_524; \
-  __ret_524 = __rev0_524 + __rev1_524 * __noswap_splatq_laneq_s16(__rev2_524, __p3_524); \
-  __ret_524 = __builtin_shufflevector(__ret_524, __ret_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_524; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \
-  uint32x2_t __s0_525 = __p0_525; \
-  uint32x2_t __s1_525 = __p1_525; \
-  uint32x4_t __s2_525 = __p2_525; \
-  uint32x2_t __ret_525; \
-  __ret_525 = __s0_525 + __s1_525 * splat_laneq_u32(__s2_525, __p3_525); \
-  __ret_525; \
-})
-#else
-#define vmla_laneq_u32(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \
-  uint32x2_t __s0_526 = __p0_526; \
-  uint32x2_t __s1_526 = __p1_526; \
-  uint32x4_t __s2_526 = __p2_526; \
-  uint32x2_t __rev0_526;  __rev0_526 = __builtin_shufflevector(__s0_526, __s0_526, 1, 0); \
-  uint32x2_t __rev1_526;  __rev1_526 = __builtin_shufflevector(__s1_526, __s1_526, 1, 0); \
-  uint32x4_t __rev2_526;  __rev2_526 = __builtin_shufflevector(__s2_526, __s2_526, 3, 2, 1, 0); \
-  uint32x2_t __ret_526; \
-  __ret_526 = __rev0_526 + __rev1_526 * __noswap_splat_laneq_u32(__rev2_526, __p3_526); \
-  __ret_526 = __builtin_shufflevector(__ret_526, __ret_526, 1, 0); \
-  __ret_526; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u16(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \
-  uint16x4_t __s0_527 = __p0_527; \
-  uint16x4_t __s1_527 = __p1_527; \
-  uint16x8_t __s2_527 = __p2_527; \
-  uint16x4_t __ret_527; \
-  __ret_527 = __s0_527 + __s1_527 * splat_laneq_u16(__s2_527, __p3_527); \
-  __ret_527; \
-})
-#else
-#define vmla_laneq_u16(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \
-  uint16x4_t __s0_528 = __p0_528; \
-  uint16x4_t __s1_528 = __p1_528; \
-  uint16x8_t __s2_528 = __p2_528; \
-  uint16x4_t __rev0_528;  __rev0_528 = __builtin_shufflevector(__s0_528, __s0_528, 3, 2, 1, 0); \
-  uint16x4_t __rev1_528;  __rev1_528 = __builtin_shufflevector(__s1_528, __s1_528, 3, 2, 1, 0); \
-  uint16x8_t __rev2_528;  __rev2_528 = __builtin_shufflevector(__s2_528, __s2_528, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_528; \
-  __ret_528 = __rev0_528 + __rev1_528 * __noswap_splat_laneq_u16(__rev2_528, __p3_528); \
-  __ret_528 = __builtin_shufflevector(__ret_528, __ret_528, 3, 2, 1, 0); \
-  __ret_528; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_f32(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \
-  float32x2_t __s0_529 = __p0_529; \
-  float32x2_t __s1_529 = __p1_529; \
-  float32x4_t __s2_529 = __p2_529; \
-  float32x2_t __ret_529; \
-  __ret_529 = __s0_529 + __s1_529 * splat_laneq_f32(__s2_529, __p3_529); \
-  __ret_529; \
-})
-#else
-#define vmla_laneq_f32(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \
-  float32x2_t __s0_530 = __p0_530; \
-  float32x2_t __s1_530 = __p1_530; \
-  float32x4_t __s2_530 = __p2_530; \
-  float32x2_t __rev0_530;  __rev0_530 = __builtin_shufflevector(__s0_530, __s0_530, 1, 0); \
-  float32x2_t __rev1_530;  __rev1_530 = __builtin_shufflevector(__s1_530, __s1_530, 1, 0); \
-  float32x4_t __rev2_530;  __rev2_530 = __builtin_shufflevector(__s2_530, __s2_530, 3, 2, 1, 0); \
-  float32x2_t __ret_530; \
-  __ret_530 = __rev0_530 + __rev1_530 * __noswap_splat_laneq_f32(__rev2_530, __p3_530); \
-  __ret_530 = __builtin_shufflevector(__ret_530, __ret_530, 1, 0); \
-  __ret_530; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s32(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \
-  int32x2_t __s0_531 = __p0_531; \
-  int32x2_t __s1_531 = __p1_531; \
-  int32x4_t __s2_531 = __p2_531; \
-  int32x2_t __ret_531; \
-  __ret_531 = __s0_531 + __s1_531 * splat_laneq_s32(__s2_531, __p3_531); \
-  __ret_531; \
-})
-#else
-#define vmla_laneq_s32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \
-  int32x2_t __s0_532 = __p0_532; \
-  int32x2_t __s1_532 = __p1_532; \
-  int32x4_t __s2_532 = __p2_532; \
-  int32x2_t __rev0_532;  __rev0_532 = __builtin_shufflevector(__s0_532, __s0_532, 1, 0); \
-  int32x2_t __rev1_532;  __rev1_532 = __builtin_shufflevector(__s1_532, __s1_532, 1, 0); \
-  int32x4_t __rev2_532;  __rev2_532 = __builtin_shufflevector(__s2_532, __s2_532, 3, 2, 1, 0); \
-  int32x2_t __ret_532; \
-  __ret_532 = __rev0_532 + __rev1_532 * __noswap_splat_laneq_s32(__rev2_532, __p3_532); \
-  __ret_532 = __builtin_shufflevector(__ret_532, __ret_532, 1, 0); \
-  __ret_532; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s16(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \
-  int16x4_t __s0_533 = __p0_533; \
-  int16x4_t __s1_533 = __p1_533; \
-  int16x8_t __s2_533 = __p2_533; \
-  int16x4_t __ret_533; \
-  __ret_533 = __s0_533 + __s1_533 * splat_laneq_s16(__s2_533, __p3_533); \
-  __ret_533; \
-})
-#else
-#define vmla_laneq_s16(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \
-  int16x4_t __s0_534 = __p0_534; \
-  int16x4_t __s1_534 = __p1_534; \
-  int16x8_t __s2_534 = __p2_534; \
-  int16x4_t __rev0_534;  __rev0_534 = __builtin_shufflevector(__s0_534, __s0_534, 3, 2, 1, 0); \
-  int16x4_t __rev1_534;  __rev1_534 = __builtin_shufflevector(__s1_534, __s1_534, 3, 2, 1, 0); \
-  int16x8_t __rev2_534;  __rev2_534 = __builtin_shufflevector(__s2_534, __s2_534, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_534; \
-  __ret_534 = __rev0_534 + __rev1_534 * __noswap_splat_laneq_s16(__rev2_534, __p3_534); \
-  __ret_534 = __builtin_shufflevector(__ret_534, __ret_534, 3, 2, 1, 0); \
-  __ret_534; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u32(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \
-  uint64x2_t __s0_535 = __p0_535; \
-  uint32x4_t __s1_535 = __p1_535; \
-  uint32x2_t __s2_535 = __p2_535; \
-  uint64x2_t __ret_535; \
-  __ret_535 = __s0_535 + vmull_u32(vget_high_u32(__s1_535), splat_lane_u32(__s2_535, __p3_535)); \
-  __ret_535; \
-})
-#else
-#define vmlal_high_lane_u32(__p0_536, __p1_536, __p2_536, __p3_536) __extension__ ({ \
-  uint64x2_t __s0_536 = __p0_536; \
-  uint32x4_t __s1_536 = __p1_536; \
-  uint32x2_t __s2_536 = __p2_536; \
-  uint64x2_t __rev0_536;  __rev0_536 = __builtin_shufflevector(__s0_536, __s0_536, 1, 0); \
-  uint32x4_t __rev1_536;  __rev1_536 = __builtin_shufflevector(__s1_536, __s1_536, 3, 2, 1, 0); \
-  uint32x2_t __rev2_536;  __rev2_536 = __builtin_shufflevector(__s2_536, __s2_536, 1, 0); \
-  uint64x2_t __ret_536; \
-  __ret_536 = __rev0_536 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_536), __noswap_splat_lane_u32(__rev2_536, __p3_536)); \
-  __ret_536 = __builtin_shufflevector(__ret_536, __ret_536, 1, 0); \
-  __ret_536; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u16(__p0_537, __p1_537, __p2_537, __p3_537) __extension__ ({ \
-  uint32x4_t __s0_537 = __p0_537; \
-  uint16x8_t __s1_537 = __p1_537; \
-  uint16x4_t __s2_537 = __p2_537; \
-  uint32x4_t __ret_537; \
-  __ret_537 = __s0_537 + vmull_u16(vget_high_u16(__s1_537), splat_lane_u16(__s2_537, __p3_537)); \
-  __ret_537; \
-})
-#else
-#define vmlal_high_lane_u16(__p0_538, __p1_538, __p2_538, __p3_538) __extension__ ({ \
-  uint32x4_t __s0_538 = __p0_538; \
-  uint16x8_t __s1_538 = __p1_538; \
-  uint16x4_t __s2_538 = __p2_538; \
-  uint32x4_t __rev0_538;  __rev0_538 = __builtin_shufflevector(__s0_538, __s0_538, 3, 2, 1, 0); \
-  uint16x8_t __rev1_538;  __rev1_538 = __builtin_shufflevector(__s1_538, __s1_538, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_538;  __rev2_538 = __builtin_shufflevector(__s2_538, __s2_538, 3, 2, 1, 0); \
-  uint32x4_t __ret_538; \
-  __ret_538 = __rev0_538 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_538), __noswap_splat_lane_u16(__rev2_538, __p3_538)); \
-  __ret_538 = __builtin_shufflevector(__ret_538, __ret_538, 3, 2, 1, 0); \
-  __ret_538; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s32(__p0_539, __p1_539, __p2_539, __p3_539) __extension__ ({ \
-  int64x2_t __s0_539 = __p0_539; \
-  int32x4_t __s1_539 = __p1_539; \
-  int32x2_t __s2_539 = __p2_539; \
-  int64x2_t __ret_539; \
-  __ret_539 = __s0_539 + vmull_s32(vget_high_s32(__s1_539), splat_lane_s32(__s2_539, __p3_539)); \
-  __ret_539; \
-})
-#else
-#define vmlal_high_lane_s32(__p0_540, __p1_540, __p2_540, __p3_540) __extension__ ({ \
-  int64x2_t __s0_540 = __p0_540; \
-  int32x4_t __s1_540 = __p1_540; \
-  int32x2_t __s2_540 = __p2_540; \
-  int64x2_t __rev0_540;  __rev0_540 = __builtin_shufflevector(__s0_540, __s0_540, 1, 0); \
-  int32x4_t __rev1_540;  __rev1_540 = __builtin_shufflevector(__s1_540, __s1_540, 3, 2, 1, 0); \
-  int32x2_t __rev2_540;  __rev2_540 = __builtin_shufflevector(__s2_540, __s2_540, 1, 0); \
-  int64x2_t __ret_540; \
-  __ret_540 = __rev0_540 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_540), __noswap_splat_lane_s32(__rev2_540, __p3_540)); \
-  __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 1, 0); \
-  __ret_540; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s16(__p0_541, __p1_541, __p2_541, __p3_541) __extension__ ({ \
-  int32x4_t __s0_541 = __p0_541; \
-  int16x8_t __s1_541 = __p1_541; \
-  int16x4_t __s2_541 = __p2_541; \
-  int32x4_t __ret_541; \
-  __ret_541 = __s0_541 + vmull_s16(vget_high_s16(__s1_541), splat_lane_s16(__s2_541, __p3_541)); \
-  __ret_541; \
-})
-#else
-#define vmlal_high_lane_s16(__p0_542, __p1_542, __p2_542, __p3_542) __extension__ ({ \
-  int32x4_t __s0_542 = __p0_542; \
-  int16x8_t __s1_542 = __p1_542; \
-  int16x4_t __s2_542 = __p2_542; \
-  int32x4_t __rev0_542;  __rev0_542 = __builtin_shufflevector(__s0_542, __s0_542, 3, 2, 1, 0); \
-  int16x8_t __rev1_542;  __rev1_542 = __builtin_shufflevector(__s1_542, __s1_542, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_542;  __rev2_542 = __builtin_shufflevector(__s2_542, __s2_542, 3, 2, 1, 0); \
-  int32x4_t __ret_542; \
-  __ret_542 = __rev0_542 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_542), __noswap_splat_lane_s16(__rev2_542, __p3_542)); \
-  __ret_542 = __builtin_shufflevector(__ret_542, __ret_542, 3, 2, 1, 0); \
-  __ret_542; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u32(__p0_543, __p1_543, __p2_543, __p3_543) __extension__ ({ \
-  uint64x2_t __s0_543 = __p0_543; \
-  uint32x4_t __s1_543 = __p1_543; \
-  uint32x4_t __s2_543 = __p2_543; \
-  uint64x2_t __ret_543; \
-  __ret_543 = __s0_543 + vmull_u32(vget_high_u32(__s1_543), splat_laneq_u32(__s2_543, __p3_543)); \
-  __ret_543; \
-})
-#else
-#define vmlal_high_laneq_u32(__p0_544, __p1_544, __p2_544, __p3_544) __extension__ ({ \
-  uint64x2_t __s0_544 = __p0_544; \
-  uint32x4_t __s1_544 = __p1_544; \
-  uint32x4_t __s2_544 = __p2_544; \
-  uint64x2_t __rev0_544;  __rev0_544 = __builtin_shufflevector(__s0_544, __s0_544, 1, 0); \
-  uint32x4_t __rev1_544;  __rev1_544 = __builtin_shufflevector(__s1_544, __s1_544, 3, 2, 1, 0); \
-  uint32x4_t __rev2_544;  __rev2_544 = __builtin_shufflevector(__s2_544, __s2_544, 3, 2, 1, 0); \
-  uint64x2_t __ret_544; \
-  __ret_544 = __rev0_544 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_544), __noswap_splat_laneq_u32(__rev2_544, __p3_544)); \
-  __ret_544 = __builtin_shufflevector(__ret_544, __ret_544, 1, 0); \
-  __ret_544; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u16(__p0_545, __p1_545, __p2_545, __p3_545) __extension__ ({ \
-  uint32x4_t __s0_545 = __p0_545; \
-  uint16x8_t __s1_545 = __p1_545; \
-  uint16x8_t __s2_545 = __p2_545; \
-  uint32x4_t __ret_545; \
-  __ret_545 = __s0_545 + vmull_u16(vget_high_u16(__s1_545), splat_laneq_u16(__s2_545, __p3_545)); \
-  __ret_545; \
-})
-#else
-#define vmlal_high_laneq_u16(__p0_546, __p1_546, __p2_546, __p3_546) __extension__ ({ \
-  uint32x4_t __s0_546 = __p0_546; \
-  uint16x8_t __s1_546 = __p1_546; \
-  uint16x8_t __s2_546 = __p2_546; \
-  uint32x4_t __rev0_546;  __rev0_546 = __builtin_shufflevector(__s0_546, __s0_546, 3, 2, 1, 0); \
-  uint16x8_t __rev1_546;  __rev1_546 = __builtin_shufflevector(__s1_546, __s1_546, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_546;  __rev2_546 = __builtin_shufflevector(__s2_546, __s2_546, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_546; \
-  __ret_546 = __rev0_546 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_546), __noswap_splat_laneq_u16(__rev2_546, __p3_546)); \
-  __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 3, 2, 1, 0); \
-  __ret_546; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s32(__p0_547, __p1_547, __p2_547, __p3_547) __extension__ ({ \
-  int64x2_t __s0_547 = __p0_547; \
-  int32x4_t __s1_547 = __p1_547; \
-  int32x4_t __s2_547 = __p2_547; \
-  int64x2_t __ret_547; \
-  __ret_547 = __s0_547 + vmull_s32(vget_high_s32(__s1_547), splat_laneq_s32(__s2_547, __p3_547)); \
-  __ret_547; \
-})
-#else
-#define vmlal_high_laneq_s32(__p0_548, __p1_548, __p2_548, __p3_548) __extension__ ({ \
-  int64x2_t __s0_548 = __p0_548; \
-  int32x4_t __s1_548 = __p1_548; \
-  int32x4_t __s2_548 = __p2_548; \
-  int64x2_t __rev0_548;  __rev0_548 = __builtin_shufflevector(__s0_548, __s0_548, 1, 0); \
-  int32x4_t __rev1_548;  __rev1_548 = __builtin_shufflevector(__s1_548, __s1_548, 3, 2, 1, 0); \
-  int32x4_t __rev2_548;  __rev2_548 = __builtin_shufflevector(__s2_548, __s2_548, 3, 2, 1, 0); \
-  int64x2_t __ret_548; \
-  __ret_548 = __rev0_548 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_548), __noswap_splat_laneq_s32(__rev2_548, __p3_548)); \
-  __ret_548 = __builtin_shufflevector(__ret_548, __ret_548, 1, 0); \
-  __ret_548; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s16(__p0_549, __p1_549, __p2_549, __p3_549) __extension__ ({ \
-  int32x4_t __s0_549 = __p0_549; \
-  int16x8_t __s1_549 = __p1_549; \
-  int16x8_t __s2_549 = __p2_549; \
-  int32x4_t __ret_549; \
-  __ret_549 = __s0_549 + vmull_s16(vget_high_s16(__s1_549), splat_laneq_s16(__s2_549, __p3_549)); \
-  __ret_549; \
-})
-#else
-#define vmlal_high_laneq_s16(__p0_550, __p1_550, __p2_550, __p3_550) __extension__ ({ \
-  int32x4_t __s0_550 = __p0_550; \
-  int16x8_t __s1_550 = __p1_550; \
-  int16x8_t __s2_550 = __p2_550; \
-  int32x4_t __rev0_550;  __rev0_550 = __builtin_shufflevector(__s0_550, __s0_550, 3, 2, 1, 0); \
-  int16x8_t __rev1_550;  __rev1_550 = __builtin_shufflevector(__s1_550, __s1_550, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_550;  __rev2_550 = __builtin_shufflevector(__s2_550, __s2_550, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_550; \
-  __ret_550 = __rev0_550 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_550), __noswap_splat_laneq_s16(__rev2_550, __p3_550)); \
-  __ret_550 = __builtin_shufflevector(__ret_550, __ret_550, 3, 2, 1, 0); \
-  __ret_550; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u32(__p0_551, __p1_551, __p2_551, __p3_551) __extension__ ({ \
-  uint64x2_t __s0_551 = __p0_551; \
-  uint32x2_t __s1_551 = __p1_551; \
-  uint32x4_t __s2_551 = __p2_551; \
-  uint64x2_t __ret_551; \
-  __ret_551 = __s0_551 + vmull_u32(__s1_551, splat_laneq_u32(__s2_551, __p3_551)); \
-  __ret_551; \
-})
-#else
-#define vmlal_laneq_u32(__p0_552, __p1_552, __p2_552, __p3_552) __extension__ ({ \
-  uint64x2_t __s0_552 = __p0_552; \
-  uint32x2_t __s1_552 = __p1_552; \
-  uint32x4_t __s2_552 = __p2_552; \
-  uint64x2_t __rev0_552;  __rev0_552 = __builtin_shufflevector(__s0_552, __s0_552, 1, 0); \
-  uint32x2_t __rev1_552;  __rev1_552 = __builtin_shufflevector(__s1_552, __s1_552, 1, 0); \
-  uint32x4_t __rev2_552;  __rev2_552 = __builtin_shufflevector(__s2_552, __s2_552, 3, 2, 1, 0); \
-  uint64x2_t __ret_552; \
-  __ret_552 = __rev0_552 + __noswap_vmull_u32(__rev1_552, __noswap_splat_laneq_u32(__rev2_552, __p3_552)); \
-  __ret_552 = __builtin_shufflevector(__ret_552, __ret_552, 1, 0); \
-  __ret_552; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u16(__p0_553, __p1_553, __p2_553, __p3_553) __extension__ ({ \
-  uint32x4_t __s0_553 = __p0_553; \
-  uint16x4_t __s1_553 = __p1_553; \
-  uint16x8_t __s2_553 = __p2_553; \
-  uint32x4_t __ret_553; \
-  __ret_553 = __s0_553 + vmull_u16(__s1_553, splat_laneq_u16(__s2_553, __p3_553)); \
-  __ret_553; \
-})
-#else
-#define vmlal_laneq_u16(__p0_554, __p1_554, __p2_554, __p3_554) __extension__ ({ \
-  uint32x4_t __s0_554 = __p0_554; \
-  uint16x4_t __s1_554 = __p1_554; \
-  uint16x8_t __s2_554 = __p2_554; \
-  uint32x4_t __rev0_554;  __rev0_554 = __builtin_shufflevector(__s0_554, __s0_554, 3, 2, 1, 0); \
-  uint16x4_t __rev1_554;  __rev1_554 = __builtin_shufflevector(__s1_554, __s1_554, 3, 2, 1, 0); \
-  uint16x8_t __rev2_554;  __rev2_554 = __builtin_shufflevector(__s2_554, __s2_554, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_554; \
-  __ret_554 = __rev0_554 + __noswap_vmull_u16(__rev1_554, __noswap_splat_laneq_u16(__rev2_554, __p3_554)); \
-  __ret_554 = __builtin_shufflevector(__ret_554, __ret_554, 3, 2, 1, 0); \
-  __ret_554; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s32(__p0_555, __p1_555, __p2_555, __p3_555) __extension__ ({ \
-  int64x2_t __s0_555 = __p0_555; \
-  int32x2_t __s1_555 = __p1_555; \
-  int32x4_t __s2_555 = __p2_555; \
-  int64x2_t __ret_555; \
-  __ret_555 = __s0_555 + vmull_s32(__s1_555, splat_laneq_s32(__s2_555, __p3_555)); \
-  __ret_555; \
-})
-#else
-#define vmlal_laneq_s32(__p0_556, __p1_556, __p2_556, __p3_556) __extension__ ({ \
-  int64x2_t __s0_556 = __p0_556; \
-  int32x2_t __s1_556 = __p1_556; \
-  int32x4_t __s2_556 = __p2_556; \
-  int64x2_t __rev0_556;  __rev0_556 = __builtin_shufflevector(__s0_556, __s0_556, 1, 0); \
-  int32x2_t __rev1_556;  __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 1, 0); \
-  int32x4_t __rev2_556;  __rev2_556 = __builtin_shufflevector(__s2_556, __s2_556, 3, 2, 1, 0); \
-  int64x2_t __ret_556; \
-  __ret_556 = __rev0_556 + __noswap_vmull_s32(__rev1_556, __noswap_splat_laneq_s32(__rev2_556, __p3_556)); \
-  __ret_556 = __builtin_shufflevector(__ret_556, __ret_556, 1, 0); \
-  __ret_556; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s16(__p0_557, __p1_557, __p2_557, __p3_557) __extension__ ({ \
-  int32x4_t __s0_557 = __p0_557; \
-  int16x4_t __s1_557 = __p1_557; \
-  int16x8_t __s2_557 = __p2_557; \
-  int32x4_t __ret_557; \
-  __ret_557 = __s0_557 + vmull_s16(__s1_557, splat_laneq_s16(__s2_557, __p3_557)); \
-  __ret_557; \
-})
-#else
-#define vmlal_laneq_s16(__p0_558, __p1_558, __p2_558, __p3_558) __extension__ ({ \
-  int32x4_t __s0_558 = __p0_558; \
-  int16x4_t __s1_558 = __p1_558; \
-  int16x8_t __s2_558 = __p2_558; \
-  int32x4_t __rev0_558;  __rev0_558 = __builtin_shufflevector(__s0_558, __s0_558, 3, 2, 1, 0); \
-  int16x4_t __rev1_558;  __rev1_558 = __builtin_shufflevector(__s1_558, __s1_558, 3, 2, 1, 0); \
-  int16x8_t __rev2_558;  __rev2_558 = __builtin_shufflevector(__s2_558, __s2_558, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_558; \
-  __ret_558 = __rev0_558 + __noswap_vmull_s16(__rev1_558, __noswap_splat_laneq_s16(__rev2_558, __p3_558)); \
-  __ret_558 = __builtin_shufflevector(__ret_558, __ret_558, 3, 2, 1, 0); \
-  __ret_558; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u32(__p0_559, __p1_559, __p2_559, __p3_559) __extension__ ({ \
-  uint32x4_t __s0_559 = __p0_559; \
-  uint32x4_t __s1_559 = __p1_559; \
-  uint32x4_t __s2_559 = __p2_559; \
-  uint32x4_t __ret_559; \
-  __ret_559 = __s0_559 - __s1_559 * splatq_laneq_u32(__s2_559, __p3_559); \
-  __ret_559; \
-})
-#else
-#define vmlsq_laneq_u32(__p0_560, __p1_560, __p2_560, __p3_560) __extension__ ({ \
-  uint32x4_t __s0_560 = __p0_560; \
-  uint32x4_t __s1_560 = __p1_560; \
-  uint32x4_t __s2_560 = __p2_560; \
-  uint32x4_t __rev0_560;  __rev0_560 = __builtin_shufflevector(__s0_560, __s0_560, 3, 2, 1, 0); \
-  uint32x4_t __rev1_560;  __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 3, 2, 1, 0); \
-  uint32x4_t __rev2_560;  __rev2_560 = __builtin_shufflevector(__s2_560, __s2_560, 3, 2, 1, 0); \
-  uint32x4_t __ret_560; \
-  __ret_560 = __rev0_560 - __rev1_560 * __noswap_splatq_laneq_u32(__rev2_560, __p3_560); \
-  __ret_560 = __builtin_shufflevector(__ret_560, __ret_560, 3, 2, 1, 0); \
-  __ret_560; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u16(__p0_561, __p1_561, __p2_561, __p3_561) __extension__ ({ \
-  uint16x8_t __s0_561 = __p0_561; \
-  uint16x8_t __s1_561 = __p1_561; \
-  uint16x8_t __s2_561 = __p2_561; \
-  uint16x8_t __ret_561; \
-  __ret_561 = __s0_561 - __s1_561 * splatq_laneq_u16(__s2_561, __p3_561); \
-  __ret_561; \
-})
-#else
-#define vmlsq_laneq_u16(__p0_562, __p1_562, __p2_562, __p3_562) __extension__ ({ \
-  uint16x8_t __s0_562 = __p0_562; \
-  uint16x8_t __s1_562 = __p1_562; \
-  uint16x8_t __s2_562 = __p2_562; \
-  uint16x8_t __rev0_562;  __rev0_562 = __builtin_shufflevector(__s0_562, __s0_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_562;  __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_562;  __rev2_562 = __builtin_shufflevector(__s2_562, __s2_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_562; \
-  __ret_562 = __rev0_562 - __rev1_562 * __noswap_splatq_laneq_u16(__rev2_562, __p3_562); \
-  __ret_562 = __builtin_shufflevector(__ret_562, __ret_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_562; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_f32(__p0_563, __p1_563, __p2_563, __p3_563) __extension__ ({ \
-  float32x4_t __s0_563 = __p0_563; \
-  float32x4_t __s1_563 = __p1_563; \
-  float32x4_t __s2_563 = __p2_563; \
-  float32x4_t __ret_563; \
-  __ret_563 = __s0_563 - __s1_563 * splatq_laneq_f32(__s2_563, __p3_563); \
-  __ret_563; \
-})
-#else
-#define vmlsq_laneq_f32(__p0_564, __p1_564, __p2_564, __p3_564) __extension__ ({ \
-  float32x4_t __s0_564 = __p0_564; \
-  float32x4_t __s1_564 = __p1_564; \
-  float32x4_t __s2_564 = __p2_564; \
-  float32x4_t __rev0_564;  __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 3, 2, 1, 0); \
-  float32x4_t __rev1_564;  __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 3, 2, 1, 0); \
-  float32x4_t __rev2_564;  __rev2_564 = __builtin_shufflevector(__s2_564, __s2_564, 3, 2, 1, 0); \
-  float32x4_t __ret_564; \
-  __ret_564 = __rev0_564 - __rev1_564 * __noswap_splatq_laneq_f32(__rev2_564, __p3_564); \
-  __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 3, 2, 1, 0); \
-  __ret_564; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s32(__p0_565, __p1_565, __p2_565, __p3_565) __extension__ ({ \
-  int32x4_t __s0_565 = __p0_565; \
-  int32x4_t __s1_565 = __p1_565; \
-  int32x4_t __s2_565 = __p2_565; \
-  int32x4_t __ret_565; \
-  __ret_565 = __s0_565 - __s1_565 * splatq_laneq_s32(__s2_565, __p3_565); \
-  __ret_565; \
-})
-#else
-#define vmlsq_laneq_s32(__p0_566, __p1_566, __p2_566, __p3_566) __extension__ ({ \
-  int32x4_t __s0_566 = __p0_566; \
-  int32x4_t __s1_566 = __p1_566; \
-  int32x4_t __s2_566 = __p2_566; \
-  int32x4_t __rev0_566;  __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 3, 2, 1, 0); \
-  int32x4_t __rev1_566;  __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 3, 2, 1, 0); \
-  int32x4_t __rev2_566;  __rev2_566 = __builtin_shufflevector(__s2_566, __s2_566, 3, 2, 1, 0); \
-  int32x4_t __ret_566; \
-  __ret_566 = __rev0_566 - __rev1_566 * __noswap_splatq_laneq_s32(__rev2_566, __p3_566); \
-  __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 3, 2, 1, 0); \
-  __ret_566; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s16(__p0_567, __p1_567, __p2_567, __p3_567) __extension__ ({ \
-  int16x8_t __s0_567 = __p0_567; \
-  int16x8_t __s1_567 = __p1_567; \
-  int16x8_t __s2_567 = __p2_567; \
-  int16x8_t __ret_567; \
-  __ret_567 = __s0_567 - __s1_567 * splatq_laneq_s16(__s2_567, __p3_567); \
-  __ret_567; \
-})
-#else
-#define vmlsq_laneq_s16(__p0_568, __p1_568, __p2_568, __p3_568) __extension__ ({ \
-  int16x8_t __s0_568 = __p0_568; \
-  int16x8_t __s1_568 = __p1_568; \
-  int16x8_t __s2_568 = __p2_568; \
-  int16x8_t __rev0_568;  __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_568;  __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_568;  __rev2_568 = __builtin_shufflevector(__s2_568, __s2_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_568; \
-  __ret_568 = __rev0_568 - __rev1_568 * __noswap_splatq_laneq_s16(__rev2_568, __p3_568); \
-  __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_568; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u32(__p0_569, __p1_569, __p2_569, __p3_569) __extension__ ({ \
-  uint32x2_t __s0_569 = __p0_569; \
-  uint32x2_t __s1_569 = __p1_569; \
-  uint32x4_t __s2_569 = __p2_569; \
-  uint32x2_t __ret_569; \
-  __ret_569 = __s0_569 - __s1_569 * splat_laneq_u32(__s2_569, __p3_569); \
-  __ret_569; \
-})
-#else
-#define vmls_laneq_u32(__p0_570, __p1_570, __p2_570, __p3_570) __extension__ ({ \
-  uint32x2_t __s0_570 = __p0_570; \
-  uint32x2_t __s1_570 = __p1_570; \
-  uint32x4_t __s2_570 = __p2_570; \
-  uint32x2_t __rev0_570;  __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 1, 0); \
-  uint32x2_t __rev1_570;  __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 1, 0); \
-  uint32x4_t __rev2_570;  __rev2_570 = __builtin_shufflevector(__s2_570, __s2_570, 3, 2, 1, 0); \
-  uint32x2_t __ret_570; \
-  __ret_570 = __rev0_570 - __rev1_570 * __noswap_splat_laneq_u32(__rev2_570, __p3_570); \
-  __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 1, 0); \
-  __ret_570; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u16(__p0_571, __p1_571, __p2_571, __p3_571) __extension__ ({ \
-  uint16x4_t __s0_571 = __p0_571; \
-  uint16x4_t __s1_571 = __p1_571; \
-  uint16x8_t __s2_571 = __p2_571; \
-  uint16x4_t __ret_571; \
-  __ret_571 = __s0_571 - __s1_571 * splat_laneq_u16(__s2_571, __p3_571); \
-  __ret_571; \
-})
-#else
-#define vmls_laneq_u16(__p0_572, __p1_572, __p2_572, __p3_572) __extension__ ({ \
-  uint16x4_t __s0_572 = __p0_572; \
-  uint16x4_t __s1_572 = __p1_572; \
-  uint16x8_t __s2_572 = __p2_572; \
-  uint16x4_t __rev0_572;  __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 3, 2, 1, 0); \
-  uint16x4_t __rev1_572;  __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 3, 2, 1, 0); \
-  uint16x8_t __rev2_572;  __rev2_572 = __builtin_shufflevector(__s2_572, __s2_572, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_572; \
-  __ret_572 = __rev0_572 - __rev1_572 * __noswap_splat_laneq_u16(__rev2_572, __p3_572); \
-  __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 3, 2, 1, 0); \
-  __ret_572; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_f32(__p0_573, __p1_573, __p2_573, __p3_573) __extension__ ({ \
-  float32x2_t __s0_573 = __p0_573; \
-  float32x2_t __s1_573 = __p1_573; \
-  float32x4_t __s2_573 = __p2_573; \
-  float32x2_t __ret_573; \
-  __ret_573 = __s0_573 - __s1_573 * splat_laneq_f32(__s2_573, __p3_573); \
-  __ret_573; \
-})
-#else
-#define vmls_laneq_f32(__p0_574, __p1_574, __p2_574, __p3_574) __extension__ ({ \
-  float32x2_t __s0_574 = __p0_574; \
-  float32x2_t __s1_574 = __p1_574; \
-  float32x4_t __s2_574 = __p2_574; \
-  float32x2_t __rev0_574;  __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 1, 0); \
-  float32x2_t __rev1_574;  __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 1, 0); \
-  float32x4_t __rev2_574;  __rev2_574 = __builtin_shufflevector(__s2_574, __s2_574, 3, 2, 1, 0); \
-  float32x2_t __ret_574; \
-  __ret_574 = __rev0_574 - __rev1_574 * __noswap_splat_laneq_f32(__rev2_574, __p3_574); \
-  __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 1, 0); \
-  __ret_574; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s32(__p0_575, __p1_575, __p2_575, __p3_575) __extension__ ({ \
-  int32x2_t __s0_575 = __p0_575; \
-  int32x2_t __s1_575 = __p1_575; \
-  int32x4_t __s2_575 = __p2_575; \
-  int32x2_t __ret_575; \
-  __ret_575 = __s0_575 - __s1_575 * splat_laneq_s32(__s2_575, __p3_575); \
-  __ret_575; \
-})
-#else
-#define vmls_laneq_s32(__p0_576, __p1_576, __p2_576, __p3_576) __extension__ ({ \
-  int32x2_t __s0_576 = __p0_576; \
-  int32x2_t __s1_576 = __p1_576; \
-  int32x4_t __s2_576 = __p2_576; \
-  int32x2_t __rev0_576;  __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 1, 0); \
-  int32x2_t __rev1_576;  __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 1, 0); \
-  int32x4_t __rev2_576;  __rev2_576 = __builtin_shufflevector(__s2_576, __s2_576, 3, 2, 1, 0); \
-  int32x2_t __ret_576; \
-  __ret_576 = __rev0_576 - __rev1_576 * __noswap_splat_laneq_s32(__rev2_576, __p3_576); \
-  __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 1, 0); \
-  __ret_576; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s16(__p0_577, __p1_577, __p2_577, __p3_577) __extension__ ({ \
-  int16x4_t __s0_577 = __p0_577; \
-  int16x4_t __s1_577 = __p1_577; \
-  int16x8_t __s2_577 = __p2_577; \
-  int16x4_t __ret_577; \
-  __ret_577 = __s0_577 - __s1_577 * splat_laneq_s16(__s2_577, __p3_577); \
-  __ret_577; \
-})
-#else
-#define vmls_laneq_s16(__p0_578, __p1_578, __p2_578, __p3_578) __extension__ ({ \
-  int16x4_t __s0_578 = __p0_578; \
-  int16x4_t __s1_578 = __p1_578; \
-  int16x8_t __s2_578 = __p2_578; \
-  int16x4_t __rev0_578;  __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 3, 2, 1, 0); \
-  int16x4_t __rev1_578;  __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 3, 2, 1, 0); \
-  int16x8_t __rev2_578;  __rev2_578 = __builtin_shufflevector(__s2_578, __s2_578, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_578; \
-  __ret_578 = __rev0_578 - __rev1_578 * __noswap_splat_laneq_s16(__rev2_578, __p3_578); \
-  __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 3, 2, 1, 0); \
-  __ret_578; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u32(__p0_579, __p1_579, __p2_579, __p3_579) __extension__ ({ \
-  uint64x2_t __s0_579 = __p0_579; \
-  uint32x4_t __s1_579 = __p1_579; \
-  uint32x2_t __s2_579 = __p2_579; \
-  uint64x2_t __ret_579; \
-  __ret_579 = __s0_579 - vmull_u32(vget_high_u32(__s1_579), splat_lane_u32(__s2_579, __p3_579)); \
-  __ret_579; \
-})
-#else
-#define vmlsl_high_lane_u32(__p0_580, __p1_580, __p2_580, __p3_580) __extension__ ({ \
-  uint64x2_t __s0_580 = __p0_580; \
-  uint32x4_t __s1_580 = __p1_580; \
-  uint32x2_t __s2_580 = __p2_580; \
-  uint64x2_t __rev0_580;  __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 1, 0); \
-  uint32x4_t __rev1_580;  __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 3, 2, 1, 0); \
-  uint32x2_t __rev2_580;  __rev2_580 = __builtin_shufflevector(__s2_580, __s2_580, 1, 0); \
-  uint64x2_t __ret_580; \
-  __ret_580 = __rev0_580 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_580), __noswap_splat_lane_u32(__rev2_580, __p3_580)); \
-  __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 1, 0); \
-  __ret_580; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u16(__p0_581, __p1_581, __p2_581, __p3_581) __extension__ ({ \
-  uint32x4_t __s0_581 = __p0_581; \
-  uint16x8_t __s1_581 = __p1_581; \
-  uint16x4_t __s2_581 = __p2_581; \
-  uint32x4_t __ret_581; \
-  __ret_581 = __s0_581 - vmull_u16(vget_high_u16(__s1_581), splat_lane_u16(__s2_581, __p3_581)); \
-  __ret_581; \
-})
-#else
-#define vmlsl_high_lane_u16(__p0_582, __p1_582, __p2_582, __p3_582) __extension__ ({ \
-  uint32x4_t __s0_582 = __p0_582; \
-  uint16x8_t __s1_582 = __p1_582; \
-  uint16x4_t __s2_582 = __p2_582; \
-  uint32x4_t __rev0_582;  __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 3, 2, 1, 0); \
-  uint16x8_t __rev1_582;  __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_582;  __rev2_582 = __builtin_shufflevector(__s2_582, __s2_582, 3, 2, 1, 0); \
-  uint32x4_t __ret_582; \
-  __ret_582 = __rev0_582 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_582), __noswap_splat_lane_u16(__rev2_582, __p3_582)); \
-  __ret_582 = __builtin_shufflevector(__ret_582, __ret_582, 3, 2, 1, 0); \
-  __ret_582; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s32(__p0_583, __p1_583, __p2_583, __p3_583) __extension__ ({ \
-  int64x2_t __s0_583 = __p0_583; \
-  int32x4_t __s1_583 = __p1_583; \
-  int32x2_t __s2_583 = __p2_583; \
-  int64x2_t __ret_583; \
-  __ret_583 = __s0_583 - vmull_s32(vget_high_s32(__s1_583), splat_lane_s32(__s2_583, __p3_583)); \
-  __ret_583; \
-})
-#else
-#define vmlsl_high_lane_s32(__p0_584, __p1_584, __p2_584, __p3_584) __extension__ ({ \
-  int64x2_t __s0_584 = __p0_584; \
-  int32x4_t __s1_584 = __p1_584; \
-  int32x2_t __s2_584 = __p2_584; \
-  int64x2_t __rev0_584;  __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 1, 0); \
-  int32x4_t __rev1_584;  __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 3, 2, 1, 0); \
-  int32x2_t __rev2_584;  __rev2_584 = __builtin_shufflevector(__s2_584, __s2_584, 1, 0); \
-  int64x2_t __ret_584; \
-  __ret_584 = __rev0_584 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_584), __noswap_splat_lane_s32(__rev2_584, __p3_584)); \
-  __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 1, 0); \
-  __ret_584; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s16(__p0_585, __p1_585, __p2_585, __p3_585) __extension__ ({ \
-  int32x4_t __s0_585 = __p0_585; \
-  int16x8_t __s1_585 = __p1_585; \
-  int16x4_t __s2_585 = __p2_585; \
-  int32x4_t __ret_585; \
-  __ret_585 = __s0_585 - vmull_s16(vget_high_s16(__s1_585), splat_lane_s16(__s2_585, __p3_585)); \
-  __ret_585; \
-})
-#else
-#define vmlsl_high_lane_s16(__p0_586, __p1_586, __p2_586, __p3_586) __extension__ ({ \
-  int32x4_t __s0_586 = __p0_586; \
-  int16x8_t __s1_586 = __p1_586; \
-  int16x4_t __s2_586 = __p2_586; \
-  int32x4_t __rev0_586;  __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 3, 2, 1, 0); \
-  int16x8_t __rev1_586;  __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_586;  __rev2_586 = __builtin_shufflevector(__s2_586, __s2_586, 3, 2, 1, 0); \
-  int32x4_t __ret_586; \
-  __ret_586 = __rev0_586 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_586), __noswap_splat_lane_s16(__rev2_586, __p3_586)); \
-  __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 3, 2, 1, 0); \
-  __ret_586; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u32(__p0_587, __p1_587, __p2_587, __p3_587) __extension__ ({ \
-  uint64x2_t __s0_587 = __p0_587; \
-  uint32x4_t __s1_587 = __p1_587; \
-  uint32x4_t __s2_587 = __p2_587; \
-  uint64x2_t __ret_587; \
-  __ret_587 = __s0_587 - vmull_u32(vget_high_u32(__s1_587), splat_laneq_u32(__s2_587, __p3_587)); \
-  __ret_587; \
-})
-#else
-#define vmlsl_high_laneq_u32(__p0_588, __p1_588, __p2_588, __p3_588) __extension__ ({ \
-  uint64x2_t __s0_588 = __p0_588; \
-  uint32x4_t __s1_588 = __p1_588; \
-  uint32x4_t __s2_588 = __p2_588; \
-  uint64x2_t __rev0_588;  __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 1, 0); \
-  uint32x4_t __rev1_588;  __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 3, 2, 1, 0); \
-  uint32x4_t __rev2_588;  __rev2_588 = __builtin_shufflevector(__s2_588, __s2_588, 3, 2, 1, 0); \
-  uint64x2_t __ret_588; \
-  __ret_588 = __rev0_588 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_588), __noswap_splat_laneq_u32(__rev2_588, __p3_588)); \
-  __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 1, 0); \
-  __ret_588; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u16(__p0_589, __p1_589, __p2_589, __p3_589) __extension__ ({ \
-  uint32x4_t __s0_589 = __p0_589; \
-  uint16x8_t __s1_589 = __p1_589; \
-  uint16x8_t __s2_589 = __p2_589; \
-  uint32x4_t __ret_589; \
-  __ret_589 = __s0_589 - vmull_u16(vget_high_u16(__s1_589), splat_laneq_u16(__s2_589, __p3_589)); \
-  __ret_589; \
-})
-#else
-#define vmlsl_high_laneq_u16(__p0_590, __p1_590, __p2_590, __p3_590) __extension__ ({ \
-  uint32x4_t __s0_590 = __p0_590; \
-  uint16x8_t __s1_590 = __p1_590; \
-  uint16x8_t __s2_590 = __p2_590; \
-  uint32x4_t __rev0_590;  __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 3, 2, 1, 0); \
-  uint16x8_t __rev1_590;  __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_590;  __rev2_590 = __builtin_shufflevector(__s2_590, __s2_590, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_590; \
-  __ret_590 = __rev0_590 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_590), __noswap_splat_laneq_u16(__rev2_590, __p3_590)); \
-  __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 3, 2, 1, 0); \
-  __ret_590; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s32(__p0_591, __p1_591, __p2_591, __p3_591) __extension__ ({ \
-  int64x2_t __s0_591 = __p0_591; \
-  int32x4_t __s1_591 = __p1_591; \
-  int32x4_t __s2_591 = __p2_591; \
-  int64x2_t __ret_591; \
-  __ret_591 = __s0_591 - vmull_s32(vget_high_s32(__s1_591), splat_laneq_s32(__s2_591, __p3_591)); \
-  __ret_591; \
-})
-#else
-#define vmlsl_high_laneq_s32(__p0_592, __p1_592, __p2_592, __p3_592) __extension__ ({ \
-  int64x2_t __s0_592 = __p0_592; \
-  int32x4_t __s1_592 = __p1_592; \
-  int32x4_t __s2_592 = __p2_592; \
-  int64x2_t __rev0_592;  __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \
-  int32x4_t __rev1_592;  __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 3, 2, 1, 0); \
-  int32x4_t __rev2_592;  __rev2_592 = __builtin_shufflevector(__s2_592, __s2_592, 3, 2, 1, 0); \
-  int64x2_t __ret_592; \
-  __ret_592 = __rev0_592 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_592), __noswap_splat_laneq_s32(__rev2_592, __p3_592)); \
-  __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \
-  __ret_592; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s16(__p0_593, __p1_593, __p2_593, __p3_593) __extension__ ({ \
-  int32x4_t __s0_593 = __p0_593; \
-  int16x8_t __s1_593 = __p1_593; \
-  int16x8_t __s2_593 = __p2_593; \
-  int32x4_t __ret_593; \
-  __ret_593 = __s0_593 - vmull_s16(vget_high_s16(__s1_593), splat_laneq_s16(__s2_593, __p3_593)); \
-  __ret_593; \
-})
-#else
-#define vmlsl_high_laneq_s16(__p0_594, __p1_594, __p2_594, __p3_594) __extension__ ({ \
-  int32x4_t __s0_594 = __p0_594; \
-  int16x8_t __s1_594 = __p1_594; \
-  int16x8_t __s2_594 = __p2_594; \
-  int32x4_t __rev0_594;  __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \
-  int16x8_t __rev1_594;  __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_594;  __rev2_594 = __builtin_shufflevector(__s2_594, __s2_594, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_594; \
-  __ret_594 = __rev0_594 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_594), __noswap_splat_laneq_s16(__rev2_594, __p3_594)); \
-  __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 3, 2, 1, 0); \
-  __ret_594; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u32(__p0_595, __p1_595, __p2_595, __p3_595) __extension__ ({ \
-  uint64x2_t __s0_595 = __p0_595; \
-  uint32x2_t __s1_595 = __p1_595; \
-  uint32x4_t __s2_595 = __p2_595; \
-  uint64x2_t __ret_595; \
-  __ret_595 = __s0_595 - vmull_u32(__s1_595, splat_laneq_u32(__s2_595, __p3_595)); \
-  __ret_595; \
-})
-#else
-#define vmlsl_laneq_u32(__p0_596, __p1_596, __p2_596, __p3_596) __extension__ ({ \
-  uint64x2_t __s0_596 = __p0_596; \
-  uint32x2_t __s1_596 = __p1_596; \
-  uint32x4_t __s2_596 = __p2_596; \
-  uint64x2_t __rev0_596;  __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 1, 0); \
-  uint32x2_t __rev1_596;  __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 1, 0); \
-  uint32x4_t __rev2_596;  __rev2_596 = __builtin_shufflevector(__s2_596, __s2_596, 3, 2, 1, 0); \
-  uint64x2_t __ret_596; \
-  __ret_596 = __rev0_596 - __noswap_vmull_u32(__rev1_596, __noswap_splat_laneq_u32(__rev2_596, __p3_596)); \
-  __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 1, 0); \
-  __ret_596; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u16(__p0_597, __p1_597, __p2_597, __p3_597) __extension__ ({ \
-  uint32x4_t __s0_597 = __p0_597; \
-  uint16x4_t __s1_597 = __p1_597; \
-  uint16x8_t __s2_597 = __p2_597; \
-  uint32x4_t __ret_597; \
-  __ret_597 = __s0_597 - vmull_u16(__s1_597, splat_laneq_u16(__s2_597, __p3_597)); \
-  __ret_597; \
-})
-#else
-#define vmlsl_laneq_u16(__p0_598, __p1_598, __p2_598, __p3_598) __extension__ ({ \
-  uint32x4_t __s0_598 = __p0_598; \
-  uint16x4_t __s1_598 = __p1_598; \
-  uint16x8_t __s2_598 = __p2_598; \
-  uint32x4_t __rev0_598;  __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 3, 2, 1, 0); \
-  uint16x4_t __rev1_598;  __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 3, 2, 1, 0); \
-  uint16x8_t __rev2_598;  __rev2_598 = __builtin_shufflevector(__s2_598, __s2_598, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_598; \
-  __ret_598 = __rev0_598 - __noswap_vmull_u16(__rev1_598, __noswap_splat_laneq_u16(__rev2_598, __p3_598)); \
-  __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 3, 2, 1, 0); \
-  __ret_598; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s32(__p0_599, __p1_599, __p2_599, __p3_599) __extension__ ({ \
-  int64x2_t __s0_599 = __p0_599; \
-  int32x2_t __s1_599 = __p1_599; \
-  int32x4_t __s2_599 = __p2_599; \
-  int64x2_t __ret_599; \
-  __ret_599 = __s0_599 - vmull_s32(__s1_599, splat_laneq_s32(__s2_599, __p3_599)); \
-  __ret_599; \
-})
-#else
-#define vmlsl_laneq_s32(__p0_600, __p1_600, __p2_600, __p3_600) __extension__ ({ \
-  int64x2_t __s0_600 = __p0_600; \
-  int32x2_t __s1_600 = __p1_600; \
-  int32x4_t __s2_600 = __p2_600; \
-  int64x2_t __rev0_600;  __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 1, 0); \
-  int32x2_t __rev1_600;  __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 1, 0); \
-  int32x4_t __rev2_600;  __rev2_600 = __builtin_shufflevector(__s2_600, __s2_600, 3, 2, 1, 0); \
-  int64x2_t __ret_600; \
-  __ret_600 = __rev0_600 - __noswap_vmull_s32(__rev1_600, __noswap_splat_laneq_s32(__rev2_600, __p3_600)); \
-  __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 1, 0); \
-  __ret_600; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s16(__p0_601, __p1_601, __p2_601, __p3_601) __extension__ ({ \
-  int32x4_t __s0_601 = __p0_601; \
-  int16x4_t __s1_601 = __p1_601; \
-  int16x8_t __s2_601 = __p2_601; \
-  int32x4_t __ret_601; \
-  __ret_601 = __s0_601 - vmull_s16(__s1_601, splat_laneq_s16(__s2_601, __p3_601)); \
-  __ret_601; \
-})
-#else
-#define vmlsl_laneq_s16(__p0_602, __p1_602, __p2_602, __p3_602) __extension__ ({ \
-  int32x4_t __s0_602 = __p0_602; \
-  int16x4_t __s1_602 = __p1_602; \
-  int16x8_t __s2_602 = __p2_602; \
-  int32x4_t __rev0_602;  __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 3, 2, 1, 0); \
-  int16x4_t __rev1_602;  __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 3, 2, 1, 0); \
-  int16x8_t __rev2_602;  __rev2_602 = __builtin_shufflevector(__s2_602, __s2_602, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_602; \
-  __ret_602 = __rev0_602 - __noswap_vmull_s16(__rev1_602, __noswap_splat_laneq_s16(__rev2_602, __p3_602)); \
-  __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 3, 2, 1, 0); \
-  __ret_602; \
-})
-#endif
-
-__ai poly64x1_t vmov_n_p64(poly64_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmovq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float64x2_t vmovq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmov_n_f64(float64_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_603) {
-  uint16x8_t __ret_603;
-  uint8x8_t __a1_603 = vget_high_u8(__p0_603);
-  __ret_603 = (uint16x8_t)(vshll_n_u8(__a1_603, 0));
-  return __ret_603;
-}
-#else
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_604) {
-  uint8x16_t __rev0_604;  __rev0_604 = __builtin_shufflevector(__p0_604, __p0_604, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret_604;
-  uint8x8_t __a1_604 = __noswap_vget_high_u8(__rev0_604);
-  __ret_604 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_604, 0));
-  __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_604;
-}
-__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_605) {
-  uint16x8_t __ret_605;
-  uint8x8_t __a1_605 = __noswap_vget_high_u8(__p0_605);
-  __ret_605 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_605, 0));
-  return __ret_605;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_606) {
-  uint64x2_t __ret_606;
-  uint32x2_t __a1_606 = vget_high_u32(__p0_606);
-  __ret_606 = (uint64x2_t)(vshll_n_u32(__a1_606, 0));
-  return __ret_606;
-}
-#else
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_607) {
-  uint32x4_t __rev0_607;  __rev0_607 = __builtin_shufflevector(__p0_607, __p0_607, 3, 2, 1, 0);
-  uint64x2_t __ret_607;
-  uint32x2_t __a1_607 = __noswap_vget_high_u32(__rev0_607);
-  __ret_607 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_607, 0));
-  __ret_607 = __builtin_shufflevector(__ret_607, __ret_607, 1, 0);
-  return __ret_607;
-}
-__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_608) {
-  uint64x2_t __ret_608;
-  uint32x2_t __a1_608 = __noswap_vget_high_u32(__p0_608);
-  __ret_608 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_608, 0));
-  return __ret_608;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_609) {
-  uint32x4_t __ret_609;
-  uint16x4_t __a1_609 = vget_high_u16(__p0_609);
-  __ret_609 = (uint32x4_t)(vshll_n_u16(__a1_609, 0));
-  return __ret_609;
-}
-#else
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_610) {
-  uint16x8_t __rev0_610;  __rev0_610 = __builtin_shufflevector(__p0_610, __p0_610, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret_610;
-  uint16x4_t __a1_610 = __noswap_vget_high_u16(__rev0_610);
-  __ret_610 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_610, 0));
-  __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0);
-  return __ret_610;
-}
-__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_611) {
-  uint32x4_t __ret_611;
-  uint16x4_t __a1_611 = __noswap_vget_high_u16(__p0_611);
-  __ret_611 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_611, 0));
-  return __ret_611;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_612) {
-  int16x8_t __ret_612;
-  int8x8_t __a1_612 = vget_high_s8(__p0_612);
-  __ret_612 = (int16x8_t)(vshll_n_s8(__a1_612, 0));
-  return __ret_612;
-}
-#else
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_613) {
-  int8x16_t __rev0_613;  __rev0_613 = __builtin_shufflevector(__p0_613, __p0_613, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret_613;
-  int8x8_t __a1_613 = __noswap_vget_high_s8(__rev0_613);
-  __ret_613 = (int16x8_t)(__noswap_vshll_n_s8(__a1_613, 0));
-  __ret_613 = __builtin_shufflevector(__ret_613, __ret_613, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_613;
-}
-__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_614) {
-  int16x8_t __ret_614;
-  int8x8_t __a1_614 = __noswap_vget_high_s8(__p0_614);
-  __ret_614 = (int16x8_t)(__noswap_vshll_n_s8(__a1_614, 0));
-  return __ret_614;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_615) {
-  int64x2_t __ret_615;
-  int32x2_t __a1_615 = vget_high_s32(__p0_615);
-  __ret_615 = (int64x2_t)(vshll_n_s32(__a1_615, 0));
-  return __ret_615;
-}
-#else
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_616) {
-  int32x4_t __rev0_616;  __rev0_616 = __builtin_shufflevector(__p0_616, __p0_616, 3, 2, 1, 0);
-  int64x2_t __ret_616;
-  int32x2_t __a1_616 = __noswap_vget_high_s32(__rev0_616);
-  __ret_616 = (int64x2_t)(__noswap_vshll_n_s32(__a1_616, 0));
-  __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0);
-  return __ret_616;
-}
-__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_617) {
-  int64x2_t __ret_617;
-  int32x2_t __a1_617 = __noswap_vget_high_s32(__p0_617);
-  __ret_617 = (int64x2_t)(__noswap_vshll_n_s32(__a1_617, 0));
-  return __ret_617;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_618) {
-  int32x4_t __ret_618;
-  int16x4_t __a1_618 = vget_high_s16(__p0_618);
-  __ret_618 = (int32x4_t)(vshll_n_s16(__a1_618, 0));
-  return __ret_618;
-}
-#else
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_619) {
-  int16x8_t __rev0_619;  __rev0_619 = __builtin_shufflevector(__p0_619, __p0_619, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret_619;
-  int16x4_t __a1_619 = __noswap_vget_high_s16(__rev0_619);
-  __ret_619 = (int32x4_t)(__noswap_vshll_n_s16(__a1_619, 0));
-  __ret_619 = __builtin_shufflevector(__ret_619, __ret_619, 3, 2, 1, 0);
-  return __ret_619;
-}
-__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_620) {
-  int32x4_t __ret_620;
-  int16x4_t __a1_620 = __noswap_vget_high_s16(__p0_620);
-  __ret_620 = (int32x4_t)(__noswap_vshll_n_s16(__a1_620, 0));
-  return __ret_620;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vmovn_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vmovn_u64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vmovn_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vmovn_s32(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vmovn_s64(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vmovn_s16(__p1));
-  return __ret;
-}
-#else
-__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#define vmuld_lane_f64(__p0_621, __p1_621, __p2_621) __extension__ ({ \
-  float64_t __s0_621 = __p0_621; \
-  float64x1_t __s1_621 = __p1_621; \
-  float64_t __ret_621; \
-  __ret_621 = __s0_621 * vget_lane_f64(__s1_621, __p2_621); \
-  __ret_621; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmuls_lane_f32(__p0_622, __p1_622, __p2_622) __extension__ ({ \
-  float32_t __s0_622 = __p0_622; \
-  float32x2_t __s1_622 = __p1_622; \
-  float32_t __ret_622; \
-  __ret_622 = __s0_622 * vget_lane_f32(__s1_622, __p2_622); \
-  __ret_622; \
-})
-#else
-#define vmuls_lane_f32(__p0_623, __p1_623, __p2_623) __extension__ ({ \
-  float32_t __s0_623 = __p0_623; \
-  float32x2_t __s1_623 = __p1_623; \
-  float32x2_t __rev1_623;  __rev1_623 = __builtin_shufflevector(__s1_623, __s1_623, 1, 0); \
-  float32_t __ret_623; \
-  __ret_623 = __s0_623 * __noswap_vget_lane_f32(__rev1_623, __p2_623); \
-  __ret_623; \
-})
-#endif
-
-#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f64(__p0_624, __p1_624, __p2_624) __extension__ ({ \
-  float64x2_t __s0_624 = __p0_624; \
-  float64x1_t __s1_624 = __p1_624; \
-  float64x2_t __ret_624; \
-  __ret_624 = __s0_624 * splatq_lane_f64(__s1_624, __p2_624); \
-  __ret_624; \
-})
-#else
-#define vmulq_lane_f64(__p0_625, __p1_625, __p2_625) __extension__ ({ \
-  float64x2_t __s0_625 = __p0_625; \
-  float64x1_t __s1_625 = __p1_625; \
-  float64x2_t __rev0_625;  __rev0_625 = __builtin_shufflevector(__s0_625, __s0_625, 1, 0); \
-  float64x2_t __ret_625; \
-  __ret_625 = __rev0_625 * __noswap_splatq_lane_f64(__s1_625, __p2_625); \
-  __ret_625 = __builtin_shufflevector(__ret_625, __ret_625, 1, 0); \
-  __ret_625; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmuld_laneq_f64(__p0_626, __p1_626, __p2_626) __extension__ ({ \
-  float64_t __s0_626 = __p0_626; \
-  float64x2_t __s1_626 = __p1_626; \
-  float64_t __ret_626; \
-  __ret_626 = __s0_626 * vgetq_lane_f64(__s1_626, __p2_626); \
-  __ret_626; \
-})
-#else
-#define vmuld_laneq_f64(__p0_627, __p1_627, __p2_627) __extension__ ({ \
-  float64_t __s0_627 = __p0_627; \
-  float64x2_t __s1_627 = __p1_627; \
-  float64x2_t __rev1_627;  __rev1_627 = __builtin_shufflevector(__s1_627, __s1_627, 1, 0); \
-  float64_t __ret_627; \
-  __ret_627 = __s0_627 * __noswap_vgetq_lane_f64(__rev1_627, __p2_627); \
-  __ret_627; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmuls_laneq_f32(__p0_628, __p1_628, __p2_628) __extension__ ({ \
-  float32_t __s0_628 = __p0_628; \
-  float32x4_t __s1_628 = __p1_628; \
-  float32_t __ret_628; \
-  __ret_628 = __s0_628 * vgetq_lane_f32(__s1_628, __p2_628); \
-  __ret_628; \
-})
-#else
-#define vmuls_laneq_f32(__p0_629, __p1_629, __p2_629) __extension__ ({ \
-  float32_t __s0_629 = __p0_629; \
-  float32x4_t __s1_629 = __p1_629; \
-  float32x4_t __rev1_629;  __rev1_629 = __builtin_shufflevector(__s1_629, __s1_629, 3, 2, 1, 0); \
-  float32_t __ret_629; \
-  __ret_629 = __s0_629 * __noswap_vgetq_lane_f32(__rev1_629, __p2_629); \
-  __ret_629; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \
-  __ret; \
-})
-#else
-#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u32(__p0_630, __p1_630, __p2_630) __extension__ ({ \
-  uint32x4_t __s0_630 = __p0_630; \
-  uint32x4_t __s1_630 = __p1_630; \
-  uint32x4_t __ret_630; \
-  __ret_630 = __s0_630 * splatq_laneq_u32(__s1_630, __p2_630); \
-  __ret_630; \
-})
-#else
-#define vmulq_laneq_u32(__p0_631, __p1_631, __p2_631) __extension__ ({ \
-  uint32x4_t __s0_631 = __p0_631; \
-  uint32x4_t __s1_631 = __p1_631; \
-  uint32x4_t __rev0_631;  __rev0_631 = __builtin_shufflevector(__s0_631, __s0_631, 3, 2, 1, 0); \
-  uint32x4_t __rev1_631;  __rev1_631 = __builtin_shufflevector(__s1_631, __s1_631, 3, 2, 1, 0); \
-  uint32x4_t __ret_631; \
-  __ret_631 = __rev0_631 * __noswap_splatq_laneq_u32(__rev1_631, __p2_631); \
-  __ret_631 = __builtin_shufflevector(__ret_631, __ret_631, 3, 2, 1, 0); \
-  __ret_631; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u16(__p0_632, __p1_632, __p2_632) __extension__ ({ \
-  uint16x8_t __s0_632 = __p0_632; \
-  uint16x8_t __s1_632 = __p1_632; \
-  uint16x8_t __ret_632; \
-  __ret_632 = __s0_632 * splatq_laneq_u16(__s1_632, __p2_632); \
-  __ret_632; \
-})
-#else
-#define vmulq_laneq_u16(__p0_633, __p1_633, __p2_633) __extension__ ({ \
-  uint16x8_t __s0_633 = __p0_633; \
-  uint16x8_t __s1_633 = __p1_633; \
-  uint16x8_t __rev0_633;  __rev0_633 = __builtin_shufflevector(__s0_633, __s0_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_633;  __rev1_633 = __builtin_shufflevector(__s1_633, __s1_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_633; \
-  __ret_633 = __rev0_633 * __noswap_splatq_laneq_u16(__rev1_633, __p2_633); \
-  __ret_633 = __builtin_shufflevector(__ret_633, __ret_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_633; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f64(__p0_634, __p1_634, __p2_634) __extension__ ({ \
-  float64x2_t __s0_634 = __p0_634; \
-  float64x2_t __s1_634 = __p1_634; \
-  float64x2_t __ret_634; \
-  __ret_634 = __s0_634 * splatq_laneq_f64(__s1_634, __p2_634); \
-  __ret_634; \
-})
-#else
-#define vmulq_laneq_f64(__p0_635, __p1_635, __p2_635) __extension__ ({ \
-  float64x2_t __s0_635 = __p0_635; \
-  float64x2_t __s1_635 = __p1_635; \
-  float64x2_t __rev0_635;  __rev0_635 = __builtin_shufflevector(__s0_635, __s0_635, 1, 0); \
-  float64x2_t __rev1_635;  __rev1_635 = __builtin_shufflevector(__s1_635, __s1_635, 1, 0); \
-  float64x2_t __ret_635; \
-  __ret_635 = __rev0_635 * __noswap_splatq_laneq_f64(__rev1_635, __p2_635); \
-  __ret_635 = __builtin_shufflevector(__ret_635, __ret_635, 1, 0); \
-  __ret_635; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f32(__p0_636, __p1_636, __p2_636) __extension__ ({ \
-  float32x4_t __s0_636 = __p0_636; \
-  float32x4_t __s1_636 = __p1_636; \
-  float32x4_t __ret_636; \
-  __ret_636 = __s0_636 * splatq_laneq_f32(__s1_636, __p2_636); \
-  __ret_636; \
-})
-#else
-#define vmulq_laneq_f32(__p0_637, __p1_637, __p2_637) __extension__ ({ \
-  float32x4_t __s0_637 = __p0_637; \
-  float32x4_t __s1_637 = __p1_637; \
-  float32x4_t __rev0_637;  __rev0_637 = __builtin_shufflevector(__s0_637, __s0_637, 3, 2, 1, 0); \
-  float32x4_t __rev1_637;  __rev1_637 = __builtin_shufflevector(__s1_637, __s1_637, 3, 2, 1, 0); \
-  float32x4_t __ret_637; \
-  __ret_637 = __rev0_637 * __noswap_splatq_laneq_f32(__rev1_637, __p2_637); \
-  __ret_637 = __builtin_shufflevector(__ret_637, __ret_637, 3, 2, 1, 0); \
-  __ret_637; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s32(__p0_638, __p1_638, __p2_638) __extension__ ({ \
-  int32x4_t __s0_638 = __p0_638; \
-  int32x4_t __s1_638 = __p1_638; \
-  int32x4_t __ret_638; \
-  __ret_638 = __s0_638 * splatq_laneq_s32(__s1_638, __p2_638); \
-  __ret_638; \
-})
-#else
-#define vmulq_laneq_s32(__p0_639, __p1_639, __p2_639) __extension__ ({ \
-  int32x4_t __s0_639 = __p0_639; \
-  int32x4_t __s1_639 = __p1_639; \
-  int32x4_t __rev0_639;  __rev0_639 = __builtin_shufflevector(__s0_639, __s0_639, 3, 2, 1, 0); \
-  int32x4_t __rev1_639;  __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 3, 2, 1, 0); \
-  int32x4_t __ret_639; \
-  __ret_639 = __rev0_639 * __noswap_splatq_laneq_s32(__rev1_639, __p2_639); \
-  __ret_639 = __builtin_shufflevector(__ret_639, __ret_639, 3, 2, 1, 0); \
-  __ret_639; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s16(__p0_640, __p1_640, __p2_640) __extension__ ({ \
-  int16x8_t __s0_640 = __p0_640; \
-  int16x8_t __s1_640 = __p1_640; \
-  int16x8_t __ret_640; \
-  __ret_640 = __s0_640 * splatq_laneq_s16(__s1_640, __p2_640); \
-  __ret_640; \
-})
-#else
-#define vmulq_laneq_s16(__p0_641, __p1_641, __p2_641) __extension__ ({ \
-  int16x8_t __s0_641 = __p0_641; \
-  int16x8_t __s1_641 = __p1_641; \
-  int16x8_t __rev0_641;  __rev0_641 = __builtin_shufflevector(__s0_641, __s0_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_641;  __rev1_641 = __builtin_shufflevector(__s1_641, __s1_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_641; \
-  __ret_641 = __rev0_641 * __noswap_splatq_laneq_s16(__rev1_641, __p2_641); \
-  __ret_641 = __builtin_shufflevector(__ret_641, __ret_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_641; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u32(__p0_642, __p1_642, __p2_642) __extension__ ({ \
-  uint32x2_t __s0_642 = __p0_642; \
-  uint32x4_t __s1_642 = __p1_642; \
-  uint32x2_t __ret_642; \
-  __ret_642 = __s0_642 * splat_laneq_u32(__s1_642, __p2_642); \
-  __ret_642; \
-})
-#else
-#define vmul_laneq_u32(__p0_643, __p1_643, __p2_643) __extension__ ({ \
-  uint32x2_t __s0_643 = __p0_643; \
-  uint32x4_t __s1_643 = __p1_643; \
-  uint32x2_t __rev0_643;  __rev0_643 = __builtin_shufflevector(__s0_643, __s0_643, 1, 0); \
-  uint32x4_t __rev1_643;  __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 3, 2, 1, 0); \
-  uint32x2_t __ret_643; \
-  __ret_643 = __rev0_643 * __noswap_splat_laneq_u32(__rev1_643, __p2_643); \
-  __ret_643 = __builtin_shufflevector(__ret_643, __ret_643, 1, 0); \
-  __ret_643; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u16(__p0_644, __p1_644, __p2_644) __extension__ ({ \
-  uint16x4_t __s0_644 = __p0_644; \
-  uint16x8_t __s1_644 = __p1_644; \
-  uint16x4_t __ret_644; \
-  __ret_644 = __s0_644 * splat_laneq_u16(__s1_644, __p2_644); \
-  __ret_644; \
-})
-#else
-#define vmul_laneq_u16(__p0_645, __p1_645, __p2_645) __extension__ ({ \
-  uint16x4_t __s0_645 = __p0_645; \
-  uint16x8_t __s1_645 = __p1_645; \
-  uint16x4_t __rev0_645;  __rev0_645 = __builtin_shufflevector(__s0_645, __s0_645, 3, 2, 1, 0); \
-  uint16x8_t __rev1_645;  __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_645; \
-  __ret_645 = __rev0_645 * __noswap_splat_laneq_u16(__rev1_645, __p2_645); \
-  __ret_645 = __builtin_shufflevector(__ret_645, __ret_645, 3, 2, 1, 0); \
-  __ret_645; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f32(__p0_646, __p1_646, __p2_646) __extension__ ({ \
-  float32x2_t __s0_646 = __p0_646; \
-  float32x4_t __s1_646 = __p1_646; \
-  float32x2_t __ret_646; \
-  __ret_646 = __s0_646 * splat_laneq_f32(__s1_646, __p2_646); \
-  __ret_646; \
-})
-#else
-#define vmul_laneq_f32(__p0_647, __p1_647, __p2_647) __extension__ ({ \
-  float32x2_t __s0_647 = __p0_647; \
-  float32x4_t __s1_647 = __p1_647; \
-  float32x2_t __rev0_647;  __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 1, 0); \
-  float32x4_t __rev1_647;  __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 3, 2, 1, 0); \
-  float32x2_t __ret_647; \
-  __ret_647 = __rev0_647 * __noswap_splat_laneq_f32(__rev1_647, __p2_647); \
-  __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 1, 0); \
-  __ret_647; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s32(__p0_648, __p1_648, __p2_648) __extension__ ({ \
-  int32x2_t __s0_648 = __p0_648; \
-  int32x4_t __s1_648 = __p1_648; \
-  int32x2_t __ret_648; \
-  __ret_648 = __s0_648 * splat_laneq_s32(__s1_648, __p2_648); \
-  __ret_648; \
-})
-#else
-#define vmul_laneq_s32(__p0_649, __p1_649, __p2_649) __extension__ ({ \
-  int32x2_t __s0_649 = __p0_649; \
-  int32x4_t __s1_649 = __p1_649; \
-  int32x2_t __rev0_649;  __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 1, 0); \
-  int32x4_t __rev1_649;  __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 3, 2, 1, 0); \
-  int32x2_t __ret_649; \
-  __ret_649 = __rev0_649 * __noswap_splat_laneq_s32(__rev1_649, __p2_649); \
-  __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 1, 0); \
-  __ret_649; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s16(__p0_650, __p1_650, __p2_650) __extension__ ({ \
-  int16x4_t __s0_650 = __p0_650; \
-  int16x8_t __s1_650 = __p1_650; \
-  int16x4_t __ret_650; \
-  __ret_650 = __s0_650 * splat_laneq_s16(__s1_650, __p2_650); \
-  __ret_650; \
-})
-#else
-#define vmul_laneq_s16(__p0_651, __p1_651, __p2_651) __extension__ ({ \
-  int16x4_t __s0_651 = __p0_651; \
-  int16x8_t __s1_651 = __p1_651; \
-  int16x4_t __rev0_651;  __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 3, 2, 1, 0); \
-  int16x8_t __rev1_651;  __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_651; \
-  __ret_651 = __rev0_651 * __noswap_splat_laneq_s16(__rev1_651, __p2_651); \
-  __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 3, 2, 1, 0); \
-  __ret_651; \
-})
-#endif
-
-__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 * (float64x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 * (float64x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
-  poly128_t __ret;
-  __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly16x8_t __ret;
-  __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1));
-  return __ret;
-}
-#else
-__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly128_t __ret;
-  __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1)));
-  return __ret;
-}
-#else
-__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly128_t __ret;
-  __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1)));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u32(__p0_652, __p1_652, __p2_652) __extension__ ({ \
-  uint32x4_t __s0_652 = __p0_652; \
-  uint32x2_t __s1_652 = __p1_652; \
-  uint64x2_t __ret_652; \
-  __ret_652 = vmull_u32(vget_high_u32(__s0_652), splat_lane_u32(__s1_652, __p2_652)); \
-  __ret_652; \
-})
-#else
-#define vmull_high_lane_u32(__p0_653, __p1_653, __p2_653) __extension__ ({ \
-  uint32x4_t __s0_653 = __p0_653; \
-  uint32x2_t __s1_653 = __p1_653; \
-  uint32x4_t __rev0_653;  __rev0_653 = __builtin_shufflevector(__s0_653, __s0_653, 3, 2, 1, 0); \
-  uint32x2_t __rev1_653;  __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 1, 0); \
-  uint64x2_t __ret_653; \
-  __ret_653 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_653), __noswap_splat_lane_u32(__rev1_653, __p2_653)); \
-  __ret_653 = __builtin_shufflevector(__ret_653, __ret_653, 1, 0); \
-  __ret_653; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u16(__p0_654, __p1_654, __p2_654) __extension__ ({ \
-  uint16x8_t __s0_654 = __p0_654; \
-  uint16x4_t __s1_654 = __p1_654; \
-  uint32x4_t __ret_654; \
-  __ret_654 = vmull_u16(vget_high_u16(__s0_654), splat_lane_u16(__s1_654, __p2_654)); \
-  __ret_654; \
-})
-#else
-#define vmull_high_lane_u16(__p0_655, __p1_655, __p2_655) __extension__ ({ \
-  uint16x8_t __s0_655 = __p0_655; \
-  uint16x4_t __s1_655 = __p1_655; \
-  uint16x8_t __rev0_655;  __rev0_655 = __builtin_shufflevector(__s0_655, __s0_655, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1_655;  __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \
-  uint32x4_t __ret_655; \
-  __ret_655 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_655), __noswap_splat_lane_u16(__rev1_655, __p2_655)); \
-  __ret_655 = __builtin_shufflevector(__ret_655, __ret_655, 3, 2, 1, 0); \
-  __ret_655; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \
-  int32x4_t __s0_656 = __p0_656; \
-  int32x2_t __s1_656 = __p1_656; \
-  int64x2_t __ret_656; \
-  __ret_656 = vmull_s32(vget_high_s32(__s0_656), splat_lane_s32(__s1_656, __p2_656)); \
-  __ret_656; \
-})
-#else
-#define vmull_high_lane_s32(__p0_657, __p1_657, __p2_657) __extension__ ({ \
-  int32x4_t __s0_657 = __p0_657; \
-  int32x2_t __s1_657 = __p1_657; \
-  int32x4_t __rev0_657;  __rev0_657 = __builtin_shufflevector(__s0_657, __s0_657, 3, 2, 1, 0); \
-  int32x2_t __rev1_657;  __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 1, 0); \
-  int64x2_t __ret_657; \
-  __ret_657 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_657), __noswap_splat_lane_s32(__rev1_657, __p2_657)); \
-  __ret_657 = __builtin_shufflevector(__ret_657, __ret_657, 1, 0); \
-  __ret_657; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \
-  int16x8_t __s0_658 = __p0_658; \
-  int16x4_t __s1_658 = __p1_658; \
-  int32x4_t __ret_658; \
-  __ret_658 = vmull_s16(vget_high_s16(__s0_658), splat_lane_s16(__s1_658, __p2_658)); \
-  __ret_658; \
-})
-#else
-#define vmull_high_lane_s16(__p0_659, __p1_659, __p2_659) __extension__ ({ \
-  int16x8_t __s0_659 = __p0_659; \
-  int16x4_t __s1_659 = __p1_659; \
-  int16x8_t __rev0_659;  __rev0_659 = __builtin_shufflevector(__s0_659, __s0_659, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_659;  __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 3, 2, 1, 0); \
-  int32x4_t __ret_659; \
-  __ret_659 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_659), __noswap_splat_lane_s16(__rev1_659, __p2_659)); \
-  __ret_659 = __builtin_shufflevector(__ret_659, __ret_659, 3, 2, 1, 0); \
-  __ret_659; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u32(__p0_660, __p1_660, __p2_660) __extension__ ({ \
-  uint32x4_t __s0_660 = __p0_660; \
-  uint32x4_t __s1_660 = __p1_660; \
-  uint64x2_t __ret_660; \
-  __ret_660 = vmull_u32(vget_high_u32(__s0_660), splat_laneq_u32(__s1_660, __p2_660)); \
-  __ret_660; \
-})
-#else
-#define vmull_high_laneq_u32(__p0_661, __p1_661, __p2_661) __extension__ ({ \
-  uint32x4_t __s0_661 = __p0_661; \
-  uint32x4_t __s1_661 = __p1_661; \
-  uint32x4_t __rev0_661;  __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 3, 2, 1, 0); \
-  uint32x4_t __rev1_661;  __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 3, 2, 1, 0); \
-  uint64x2_t __ret_661; \
-  __ret_661 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_661), __noswap_splat_laneq_u32(__rev1_661, __p2_661)); \
-  __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 1, 0); \
-  __ret_661; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u16(__p0_662, __p1_662, __p2_662) __extension__ ({ \
-  uint16x8_t __s0_662 = __p0_662; \
-  uint16x8_t __s1_662 = __p1_662; \
-  uint32x4_t __ret_662; \
-  __ret_662 = vmull_u16(vget_high_u16(__s0_662), splat_laneq_u16(__s1_662, __p2_662)); \
-  __ret_662; \
-})
-#else
-#define vmull_high_laneq_u16(__p0_663, __p1_663, __p2_663) __extension__ ({ \
-  uint16x8_t __s0_663 = __p0_663; \
-  uint16x8_t __s1_663 = __p1_663; \
-  uint16x8_t __rev0_663;  __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_663;  __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_663; \
-  __ret_663 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_663), __noswap_splat_laneq_u16(__rev1_663, __p2_663)); \
-  __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 3, 2, 1, 0); \
-  __ret_663; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \
-  int32x4_t __s0_664 = __p0_664; \
-  int32x4_t __s1_664 = __p1_664; \
-  int64x2_t __ret_664; \
-  __ret_664 = vmull_s32(vget_high_s32(__s0_664), splat_laneq_s32(__s1_664, __p2_664)); \
-  __ret_664; \
-})
-#else
-#define vmull_high_laneq_s32(__p0_665, __p1_665, __p2_665) __extension__ ({ \
-  int32x4_t __s0_665 = __p0_665; \
-  int32x4_t __s1_665 = __p1_665; \
-  int32x4_t __rev0_665;  __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 3, 2, 1, 0); \
-  int32x4_t __rev1_665;  __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 3, 2, 1, 0); \
-  int64x2_t __ret_665; \
-  __ret_665 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_665), __noswap_splat_laneq_s32(__rev1_665, __p2_665)); \
-  __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \
-  __ret_665; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \
-  int16x8_t __s0_666 = __p0_666; \
-  int16x8_t __s1_666 = __p1_666; \
-  int32x4_t __ret_666; \
-  __ret_666 = vmull_s16(vget_high_s16(__s0_666), splat_laneq_s16(__s1_666, __p2_666)); \
-  __ret_666; \
-})
-#else
-#define vmull_high_laneq_s16(__p0_667, __p1_667, __p2_667) __extension__ ({ \
-  int16x8_t __s0_667 = __p0_667; \
-  int16x8_t __s1_667 = __p1_667; \
-  int16x8_t __rev0_667;  __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_667;  __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_667; \
-  __ret_667 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_667), __noswap_splat_laneq_s16(__rev1_667, __p2_667)); \
-  __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \
-  __ret_667; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_n_u32(vget_high_u32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_n_u16(vget_high_u16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_n_s32(vget_high_s32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_n_s16(vget_high_s16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u32(__p0_668, __p1_668, __p2_668) __extension__ ({ \
-  uint32x2_t __s0_668 = __p0_668; \
-  uint32x4_t __s1_668 = __p1_668; \
-  uint64x2_t __ret_668; \
-  __ret_668 = vmull_u32(__s0_668, splat_laneq_u32(__s1_668, __p2_668)); \
-  __ret_668; \
-})
-#else
-#define vmull_laneq_u32(__p0_669, __p1_669, __p2_669) __extension__ ({ \
-  uint32x2_t __s0_669 = __p0_669; \
-  uint32x4_t __s1_669 = __p1_669; \
-  uint32x2_t __rev0_669;  __rev0_669 = __builtin_shufflevector(__s0_669, __s0_669, 1, 0); \
-  uint32x4_t __rev1_669;  __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 3, 2, 1, 0); \
-  uint64x2_t __ret_669; \
-  __ret_669 = __noswap_vmull_u32(__rev0_669, __noswap_splat_laneq_u32(__rev1_669, __p2_669)); \
-  __ret_669 = __builtin_shufflevector(__ret_669, __ret_669, 1, 0); \
-  __ret_669; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u16(__p0_670, __p1_670, __p2_670) __extension__ ({ \
-  uint16x4_t __s0_670 = __p0_670; \
-  uint16x8_t __s1_670 = __p1_670; \
-  uint32x4_t __ret_670; \
-  __ret_670 = vmull_u16(__s0_670, splat_laneq_u16(__s1_670, __p2_670)); \
-  __ret_670; \
-})
-#else
-#define vmull_laneq_u16(__p0_671, __p1_671, __p2_671) __extension__ ({ \
-  uint16x4_t __s0_671 = __p0_671; \
-  uint16x8_t __s1_671 = __p1_671; \
-  uint16x4_t __rev0_671;  __rev0_671 = __builtin_shufflevector(__s0_671, __s0_671, 3, 2, 1, 0); \
-  uint16x8_t __rev1_671;  __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_671; \
-  __ret_671 = __noswap_vmull_u16(__rev0_671, __noswap_splat_laneq_u16(__rev1_671, __p2_671)); \
-  __ret_671 = __builtin_shufflevector(__ret_671, __ret_671, 3, 2, 1, 0); \
-  __ret_671; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s32(__p0_672, __p1_672, __p2_672) __extension__ ({ \
-  int32x2_t __s0_672 = __p0_672; \
-  int32x4_t __s1_672 = __p1_672; \
-  int64x2_t __ret_672; \
-  __ret_672 = vmull_s32(__s0_672, splat_laneq_s32(__s1_672, __p2_672)); \
-  __ret_672; \
-})
-#else
-#define vmull_laneq_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \
-  int32x2_t __s0_673 = __p0_673; \
-  int32x4_t __s1_673 = __p1_673; \
-  int32x2_t __rev0_673;  __rev0_673 = __builtin_shufflevector(__s0_673, __s0_673, 1, 0); \
-  int32x4_t __rev1_673;  __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 3, 2, 1, 0); \
-  int64x2_t __ret_673; \
-  __ret_673 = __noswap_vmull_s32(__rev0_673, __noswap_splat_laneq_s32(__rev1_673, __p2_673)); \
-  __ret_673 = __builtin_shufflevector(__ret_673, __ret_673, 1, 0); \
-  __ret_673; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \
-  int16x4_t __s0_674 = __p0_674; \
-  int16x8_t __s1_674 = __p1_674; \
-  int32x4_t __ret_674; \
-  __ret_674 = vmull_s16(__s0_674, splat_laneq_s16(__s1_674, __p2_674)); \
-  __ret_674; \
-})
-#else
-#define vmull_laneq_s16(__p0_675, __p1_675, __p2_675) __extension__ ({ \
-  int16x4_t __s0_675 = __p0_675; \
-  int16x8_t __s1_675 = __p1_675; \
-  int16x4_t __rev0_675;  __rev0_675 = __builtin_shufflevector(__s0_675, __s0_675, 3, 2, 1, 0); \
-  int16x8_t __rev1_675;  __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_675; \
-  __ret_675 = __noswap_vmull_s16(__rev0_675, __noswap_splat_laneq_s16(__rev1_675, __p2_675)); \
-  __ret_675 = __builtin_shufflevector(__ret_675, __ret_675, 3, 2, 1, 0); \
-  __ret_675; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#endif
-
-__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
-  return __ret;
-}
-#define vmulxd_lane_f64(__p0_676, __p1_676, __p2_676) __extension__ ({ \
-  float64_t __s0_676 = __p0_676; \
-  float64x1_t __s1_676 = __p1_676; \
-  float64_t __ret_676; \
-  __ret_676 = vmulxd_f64(__s0_676, vget_lane_f64(__s1_676, __p2_676)); \
-  __ret_676; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_lane_f32(__p0_677, __p1_677, __p2_677) __extension__ ({ \
-  float32_t __s0_677 = __p0_677; \
-  float32x2_t __s1_677 = __p1_677; \
-  float32_t __ret_677; \
-  __ret_677 = vmulxs_f32(__s0_677, vget_lane_f32(__s1_677, __p2_677)); \
-  __ret_677; \
-})
-#else
-#define vmulxs_lane_f32(__p0_678, __p1_678, __p2_678) __extension__ ({ \
-  float32_t __s0_678 = __p0_678; \
-  float32x2_t __s1_678 = __p1_678; \
-  float32x2_t __rev1_678;  __rev1_678 = __builtin_shufflevector(__s1_678, __s1_678, 1, 0); \
-  float32_t __ret_678; \
-  __ret_678 = vmulxs_f32(__s0_678, __noswap_vget_lane_f32(__rev1_678, __p2_678)); \
-  __ret_678; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f64(__p0_679, __p1_679, __p2_679) __extension__ ({ \
-  float64x2_t __s0_679 = __p0_679; \
-  float64x1_t __s1_679 = __p1_679; \
-  float64x2_t __ret_679; \
-  __ret_679 = vmulxq_f64(__s0_679, splatq_lane_f64(__s1_679, __p2_679)); \
-  __ret_679; \
-})
-#else
-#define vmulxq_lane_f64(__p0_680, __p1_680, __p2_680) __extension__ ({ \
-  float64x2_t __s0_680 = __p0_680; \
-  float64x1_t __s1_680 = __p1_680; \
-  float64x2_t __rev0_680;  __rev0_680 = __builtin_shufflevector(__s0_680, __s0_680, 1, 0); \
-  float64x2_t __ret_680; \
-  __ret_680 = __noswap_vmulxq_f64(__rev0_680, __noswap_splatq_lane_f64(__s1_680, __p2_680)); \
-  __ret_680 = __builtin_shufflevector(__ret_680, __ret_680, 1, 0); \
-  __ret_680; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f32(__p0_681, __p1_681, __p2_681) __extension__ ({ \
-  float32x4_t __s0_681 = __p0_681; \
-  float32x2_t __s1_681 = __p1_681; \
-  float32x4_t __ret_681; \
-  __ret_681 = vmulxq_f32(__s0_681, splatq_lane_f32(__s1_681, __p2_681)); \
-  __ret_681; \
-})
-#else
-#define vmulxq_lane_f32(__p0_682, __p1_682, __p2_682) __extension__ ({ \
-  float32x4_t __s0_682 = __p0_682; \
-  float32x2_t __s1_682 = __p1_682; \
-  float32x4_t __rev0_682;  __rev0_682 = __builtin_shufflevector(__s0_682, __s0_682, 3, 2, 1, 0); \
-  float32x2_t __rev1_682;  __rev1_682 = __builtin_shufflevector(__s1_682, __s1_682, 1, 0); \
-  float32x4_t __ret_682; \
-  __ret_682 = __noswap_vmulxq_f32(__rev0_682, __noswap_splatq_lane_f32(__rev1_682, __p2_682)); \
-  __ret_682 = __builtin_shufflevector(__ret_682, __ret_682, 3, 2, 1, 0); \
-  __ret_682; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f32(__p0_683, __p1_683, __p2_683) __extension__ ({ \
-  float32x2_t __s0_683 = __p0_683; \
-  float32x2_t __s1_683 = __p1_683; \
-  float32x2_t __ret_683; \
-  __ret_683 = vmulx_f32(__s0_683, splat_lane_f32(__s1_683, __p2_683)); \
-  __ret_683; \
-})
-#else
-#define vmulx_lane_f32(__p0_684, __p1_684, __p2_684) __extension__ ({ \
-  float32x2_t __s0_684 = __p0_684; \
-  float32x2_t __s1_684 = __p1_684; \
-  float32x2_t __rev0_684;  __rev0_684 = __builtin_shufflevector(__s0_684, __s0_684, 1, 0); \
-  float32x2_t __rev1_684;  __rev1_684 = __builtin_shufflevector(__s1_684, __s1_684, 1, 0); \
-  float32x2_t __ret_684; \
-  __ret_684 = __noswap_vmulx_f32(__rev0_684, __noswap_splat_lane_f32(__rev1_684, __p2_684)); \
-  __ret_684 = __builtin_shufflevector(__ret_684, __ret_684, 1, 0); \
-  __ret_684; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxd_laneq_f64(__p0_685, __p1_685, __p2_685) __extension__ ({ \
-  float64_t __s0_685 = __p0_685; \
-  float64x2_t __s1_685 = __p1_685; \
-  float64_t __ret_685; \
-  __ret_685 = vmulxd_f64(__s0_685, vgetq_lane_f64(__s1_685, __p2_685)); \
-  __ret_685; \
-})
-#else
-#define vmulxd_laneq_f64(__p0_686, __p1_686, __p2_686) __extension__ ({ \
-  float64_t __s0_686 = __p0_686; \
-  float64x2_t __s1_686 = __p1_686; \
-  float64x2_t __rev1_686;  __rev1_686 = __builtin_shufflevector(__s1_686, __s1_686, 1, 0); \
-  float64_t __ret_686; \
-  __ret_686 = vmulxd_f64(__s0_686, __noswap_vgetq_lane_f64(__rev1_686, __p2_686)); \
-  __ret_686; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_laneq_f32(__p0_687, __p1_687, __p2_687) __extension__ ({ \
-  float32_t __s0_687 = __p0_687; \
-  float32x4_t __s1_687 = __p1_687; \
-  float32_t __ret_687; \
-  __ret_687 = vmulxs_f32(__s0_687, vgetq_lane_f32(__s1_687, __p2_687)); \
-  __ret_687; \
-})
-#else
-#define vmulxs_laneq_f32(__p0_688, __p1_688, __p2_688) __extension__ ({ \
-  float32_t __s0_688 = __p0_688; \
-  float32x4_t __s1_688 = __p1_688; \
-  float32x4_t __rev1_688;  __rev1_688 = __builtin_shufflevector(__s1_688, __s1_688, 3, 2, 1, 0); \
-  float32_t __ret_688; \
-  __ret_688 = vmulxs_f32(__s0_688, __noswap_vgetq_lane_f32(__rev1_688, __p2_688)); \
-  __ret_688; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f64(__p0_689, __p1_689, __p2_689) __extension__ ({ \
-  float64x2_t __s0_689 = __p0_689; \
-  float64x2_t __s1_689 = __p1_689; \
-  float64x2_t __ret_689; \
-  __ret_689 = vmulxq_f64(__s0_689, splatq_laneq_f64(__s1_689, __p2_689)); \
-  __ret_689; \
-})
-#else
-#define vmulxq_laneq_f64(__p0_690, __p1_690, __p2_690) __extension__ ({ \
-  float64x2_t __s0_690 = __p0_690; \
-  float64x2_t __s1_690 = __p1_690; \
-  float64x2_t __rev0_690;  __rev0_690 = __builtin_shufflevector(__s0_690, __s0_690, 1, 0); \
-  float64x2_t __rev1_690;  __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 1, 0); \
-  float64x2_t __ret_690; \
-  __ret_690 = __noswap_vmulxq_f64(__rev0_690, __noswap_splatq_laneq_f64(__rev1_690, __p2_690)); \
-  __ret_690 = __builtin_shufflevector(__ret_690, __ret_690, 1, 0); \
-  __ret_690; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f32(__p0_691, __p1_691, __p2_691) __extension__ ({ \
-  float32x4_t __s0_691 = __p0_691; \
-  float32x4_t __s1_691 = __p1_691; \
-  float32x4_t __ret_691; \
-  __ret_691 = vmulxq_f32(__s0_691, splatq_laneq_f32(__s1_691, __p2_691)); \
-  __ret_691; \
-})
-#else
-#define vmulxq_laneq_f32(__p0_692, __p1_692, __p2_692) __extension__ ({ \
-  float32x4_t __s0_692 = __p0_692; \
-  float32x4_t __s1_692 = __p1_692; \
-  float32x4_t __rev0_692;  __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 3, 2, 1, 0); \
-  float32x4_t __rev1_692;  __rev1_692 = __builtin_shufflevector(__s1_692, __s1_692, 3, 2, 1, 0); \
-  float32x4_t __ret_692; \
-  __ret_692 = __noswap_vmulxq_f32(__rev0_692, __noswap_splatq_laneq_f32(__rev1_692, __p2_692)); \
-  __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 3, 2, 1, 0); \
-  __ret_692; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f32(__p0_693, __p1_693, __p2_693) __extension__ ({ \
-  float32x2_t __s0_693 = __p0_693; \
-  float32x4_t __s1_693 = __p1_693; \
-  float32x2_t __ret_693; \
-  __ret_693 = vmulx_f32(__s0_693, splat_laneq_f32(__s1_693, __p2_693)); \
-  __ret_693; \
-})
-#else
-#define vmulx_laneq_f32(__p0_694, __p1_694, __p2_694) __extension__ ({ \
-  float32x2_t __s0_694 = __p0_694; \
-  float32x4_t __s1_694 = __p1_694; \
-  float32x2_t __rev0_694;  __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 1, 0); \
-  float32x4_t __rev1_694;  __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 3, 2, 1, 0); \
-  float32x2_t __ret_694; \
-  __ret_694 = __noswap_vmulx_f32(__rev0_694, __noswap_splat_laneq_f32(__rev1_694, __p2_694)); \
-  __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 1, 0); \
-  __ret_694; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vnegq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float64x2_t vnegq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vnegq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int64x2_t vnegq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vneg_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-__ai int64x1_t vneg_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-__ai int64_t vnegd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpaddd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpaddd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vpaddd_s64(int64x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vpaddd_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpadds_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpadds_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmaxs_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmaxs_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpminqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpminqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmins_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmins_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpminnms_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpminnms_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqabs_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int8_t vqabsb_s8(int8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
-  return __ret;
-}
-__ai int32_t vqabss_s32(int32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
-  return __ret;
-}
-__ai int64_t vqabsd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
-  return __ret;
-}
-__ai int16_t vqabsh_s16(int16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
-  return __ret;
-}
-__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
-  return __ret;
-}
-__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s32(__p0_695, __p1_695, __p2_695, __p3_695) __extension__ ({ \
-  int64x2_t __s0_695 = __p0_695; \
-  int32x4_t __s1_695 = __p1_695; \
-  int32x2_t __s2_695 = __p2_695; \
-  int64x2_t __ret_695; \
-  __ret_695 = vqdmlal_s32(__s0_695, vget_high_s32(__s1_695), splat_lane_s32(__s2_695, __p3_695)); \
-  __ret_695; \
-})
-#else
-#define vqdmlal_high_lane_s32(__p0_696, __p1_696, __p2_696, __p3_696) __extension__ ({ \
-  int64x2_t __s0_696 = __p0_696; \
-  int32x4_t __s1_696 = __p1_696; \
-  int32x2_t __s2_696 = __p2_696; \
-  int64x2_t __rev0_696;  __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \
-  int32x4_t __rev1_696;  __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 3, 2, 1, 0); \
-  int32x2_t __rev2_696;  __rev2_696 = __builtin_shufflevector(__s2_696, __s2_696, 1, 0); \
-  int64x2_t __ret_696; \
-  __ret_696 = __noswap_vqdmlal_s32(__rev0_696, __noswap_vget_high_s32(__rev1_696), __noswap_splat_lane_s32(__rev2_696, __p3_696)); \
-  __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 1, 0); \
-  __ret_696; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s16(__p0_697, __p1_697, __p2_697, __p3_697) __extension__ ({ \
-  int32x4_t __s0_697 = __p0_697; \
-  int16x8_t __s1_697 = __p1_697; \
-  int16x4_t __s2_697 = __p2_697; \
-  int32x4_t __ret_697; \
-  __ret_697 = vqdmlal_s16(__s0_697, vget_high_s16(__s1_697), splat_lane_s16(__s2_697, __p3_697)); \
-  __ret_697; \
-})
-#else
-#define vqdmlal_high_lane_s16(__p0_698, __p1_698, __p2_698, __p3_698) __extension__ ({ \
-  int32x4_t __s0_698 = __p0_698; \
-  int16x8_t __s1_698 = __p1_698; \
-  int16x4_t __s2_698 = __p2_698; \
-  int32x4_t __rev0_698;  __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 3, 2, 1, 0); \
-  int16x8_t __rev1_698;  __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_698;  __rev2_698 = __builtin_shufflevector(__s2_698, __s2_698, 3, 2, 1, 0); \
-  int32x4_t __ret_698; \
-  __ret_698 = __noswap_vqdmlal_s16(__rev0_698, __noswap_vget_high_s16(__rev1_698), __noswap_splat_lane_s16(__rev2_698, __p3_698)); \
-  __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 3, 2, 1, 0); \
-  __ret_698; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s32(__p0_699, __p1_699, __p2_699, __p3_699) __extension__ ({ \
-  int64x2_t __s0_699 = __p0_699; \
-  int32x4_t __s1_699 = __p1_699; \
-  int32x4_t __s2_699 = __p2_699; \
-  int64x2_t __ret_699; \
-  __ret_699 = vqdmlal_s32(__s0_699, vget_high_s32(__s1_699), splat_laneq_s32(__s2_699, __p3_699)); \
-  __ret_699; \
-})
-#else
-#define vqdmlal_high_laneq_s32(__p0_700, __p1_700, __p2_700, __p3_700) __extension__ ({ \
-  int64x2_t __s0_700 = __p0_700; \
-  int32x4_t __s1_700 = __p1_700; \
-  int32x4_t __s2_700 = __p2_700; \
-  int64x2_t __rev0_700;  __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 1, 0); \
-  int32x4_t __rev1_700;  __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 3, 2, 1, 0); \
-  int32x4_t __rev2_700;  __rev2_700 = __builtin_shufflevector(__s2_700, __s2_700, 3, 2, 1, 0); \
-  int64x2_t __ret_700; \
-  __ret_700 = __noswap_vqdmlal_s32(__rev0_700, __noswap_vget_high_s32(__rev1_700), __noswap_splat_laneq_s32(__rev2_700, __p3_700)); \
-  __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 1, 0); \
-  __ret_700; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s16(__p0_701, __p1_701, __p2_701, __p3_701) __extension__ ({ \
-  int32x4_t __s0_701 = __p0_701; \
-  int16x8_t __s1_701 = __p1_701; \
-  int16x8_t __s2_701 = __p2_701; \
-  int32x4_t __ret_701; \
-  __ret_701 = vqdmlal_s16(__s0_701, vget_high_s16(__s1_701), splat_laneq_s16(__s2_701, __p3_701)); \
-  __ret_701; \
-})
-#else
-#define vqdmlal_high_laneq_s16(__p0_702, __p1_702, __p2_702, __p3_702) __extension__ ({ \
-  int32x4_t __s0_702 = __p0_702; \
-  int16x8_t __s1_702 = __p1_702; \
-  int16x8_t __s2_702 = __p2_702; \
-  int32x4_t __rev0_702;  __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 3, 2, 1, 0); \
-  int16x8_t __rev1_702;  __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_702;  __rev2_702 = __builtin_shufflevector(__s2_702, __s2_702, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_702; \
-  __ret_702 = __noswap_vqdmlal_s16(__rev0_702, __noswap_vget_high_s16(__rev1_702), __noswap_splat_laneq_s16(__rev2_702, __p3_702)); \
-  __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 3, 2, 1, 0); \
-  __ret_702; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s32(__p0_703, __p1_703, __p2_703, __p3_703) __extension__ ({ \
-  int64x2_t __s0_703 = __p0_703; \
-  int32x2_t __s1_703 = __p1_703; \
-  int32x4_t __s2_703 = __p2_703; \
-  int64x2_t __ret_703; \
-  __ret_703 = vqdmlal_s32(__s0_703, __s1_703, splat_laneq_s32(__s2_703, __p3_703)); \
-  __ret_703; \
-})
-#else
-#define vqdmlal_laneq_s32(__p0_704, __p1_704, __p2_704, __p3_704) __extension__ ({ \
-  int64x2_t __s0_704 = __p0_704; \
-  int32x2_t __s1_704 = __p1_704; \
-  int32x4_t __s2_704 = __p2_704; \
-  int64x2_t __rev0_704;  __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 1, 0); \
-  int32x2_t __rev1_704;  __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 1, 0); \
-  int32x4_t __rev2_704;  __rev2_704 = __builtin_shufflevector(__s2_704, __s2_704, 3, 2, 1, 0); \
-  int64x2_t __ret_704; \
-  __ret_704 = __noswap_vqdmlal_s32(__rev0_704, __rev1_704, __noswap_splat_laneq_s32(__rev2_704, __p3_704)); \
-  __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 1, 0); \
-  __ret_704; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s16(__p0_705, __p1_705, __p2_705, __p3_705) __extension__ ({ \
-  int32x4_t __s0_705 = __p0_705; \
-  int16x4_t __s1_705 = __p1_705; \
-  int16x8_t __s2_705 = __p2_705; \
-  int32x4_t __ret_705; \
-  __ret_705 = vqdmlal_s16(__s0_705, __s1_705, splat_laneq_s16(__s2_705, __p3_705)); \
-  __ret_705; \
-})
-#else
-#define vqdmlal_laneq_s16(__p0_706, __p1_706, __p2_706, __p3_706) __extension__ ({ \
-  int32x4_t __s0_706 = __p0_706; \
-  int16x4_t __s1_706 = __p1_706; \
-  int16x8_t __s2_706 = __p2_706; \
-  int32x4_t __rev0_706;  __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 3, 2, 1, 0); \
-  int16x4_t __rev1_706;  __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 3, 2, 1, 0); \
-  int16x8_t __rev2_706;  __rev2_706 = __builtin_shufflevector(__s2_706, __s2_706, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_706; \
-  __ret_706 = __noswap_vqdmlal_s16(__rev0_706, __rev1_706, __noswap_splat_laneq_s16(__rev2_706, __p3_706)); \
-  __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 3, 2, 1, 0); \
-  __ret_706; \
-})
-#endif
-
-__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
-  return __ret;
-}
-__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s32(__p0_707, __p1_707, __p2_707, __p3_707) __extension__ ({ \
-  int64x2_t __s0_707 = __p0_707; \
-  int32x4_t __s1_707 = __p1_707; \
-  int32x2_t __s2_707 = __p2_707; \
-  int64x2_t __ret_707; \
-  __ret_707 = vqdmlsl_s32(__s0_707, vget_high_s32(__s1_707), splat_lane_s32(__s2_707, __p3_707)); \
-  __ret_707; \
-})
-#else
-#define vqdmlsl_high_lane_s32(__p0_708, __p1_708, __p2_708, __p3_708) __extension__ ({ \
-  int64x2_t __s0_708 = __p0_708; \
-  int32x4_t __s1_708 = __p1_708; \
-  int32x2_t __s2_708 = __p2_708; \
-  int64x2_t __rev0_708;  __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 1, 0); \
-  int32x4_t __rev1_708;  __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 3, 2, 1, 0); \
-  int32x2_t __rev2_708;  __rev2_708 = __builtin_shufflevector(__s2_708, __s2_708, 1, 0); \
-  int64x2_t __ret_708; \
-  __ret_708 = __noswap_vqdmlsl_s32(__rev0_708, __noswap_vget_high_s32(__rev1_708), __noswap_splat_lane_s32(__rev2_708, __p3_708)); \
-  __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 1, 0); \
-  __ret_708; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s16(__p0_709, __p1_709, __p2_709, __p3_709) __extension__ ({ \
-  int32x4_t __s0_709 = __p0_709; \
-  int16x8_t __s1_709 = __p1_709; \
-  int16x4_t __s2_709 = __p2_709; \
-  int32x4_t __ret_709; \
-  __ret_709 = vqdmlsl_s16(__s0_709, vget_high_s16(__s1_709), splat_lane_s16(__s2_709, __p3_709)); \
-  __ret_709; \
-})
-#else
-#define vqdmlsl_high_lane_s16(__p0_710, __p1_710, __p2_710, __p3_710) __extension__ ({ \
-  int32x4_t __s0_710 = __p0_710; \
-  int16x8_t __s1_710 = __p1_710; \
-  int16x4_t __s2_710 = __p2_710; \
-  int32x4_t __rev0_710;  __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 3, 2, 1, 0); \
-  int16x8_t __rev1_710;  __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_710;  __rev2_710 = __builtin_shufflevector(__s2_710, __s2_710, 3, 2, 1, 0); \
-  int32x4_t __ret_710; \
-  __ret_710 = __noswap_vqdmlsl_s16(__rev0_710, __noswap_vget_high_s16(__rev1_710), __noswap_splat_lane_s16(__rev2_710, __p3_710)); \
-  __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 3, 2, 1, 0); \
-  __ret_710; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s32(__p0_711, __p1_711, __p2_711, __p3_711) __extension__ ({ \
-  int64x2_t __s0_711 = __p0_711; \
-  int32x4_t __s1_711 = __p1_711; \
-  int32x4_t __s2_711 = __p2_711; \
-  int64x2_t __ret_711; \
-  __ret_711 = vqdmlsl_s32(__s0_711, vget_high_s32(__s1_711), splat_laneq_s32(__s2_711, __p3_711)); \
-  __ret_711; \
-})
-#else
-#define vqdmlsl_high_laneq_s32(__p0_712, __p1_712, __p2_712, __p3_712) __extension__ ({ \
-  int64x2_t __s0_712 = __p0_712; \
-  int32x4_t __s1_712 = __p1_712; \
-  int32x4_t __s2_712 = __p2_712; \
-  int64x2_t __rev0_712;  __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \
-  int32x4_t __rev1_712;  __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 3, 2, 1, 0); \
-  int32x4_t __rev2_712;  __rev2_712 = __builtin_shufflevector(__s2_712, __s2_712, 3, 2, 1, 0); \
-  int64x2_t __ret_712; \
-  __ret_712 = __noswap_vqdmlsl_s32(__rev0_712, __noswap_vget_high_s32(__rev1_712), __noswap_splat_laneq_s32(__rev2_712, __p3_712)); \
-  __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 1, 0); \
-  __ret_712; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s16(__p0_713, __p1_713, __p2_713, __p3_713) __extension__ ({ \
-  int32x4_t __s0_713 = __p0_713; \
-  int16x8_t __s1_713 = __p1_713; \
-  int16x8_t __s2_713 = __p2_713; \
-  int32x4_t __ret_713; \
-  __ret_713 = vqdmlsl_s16(__s0_713, vget_high_s16(__s1_713), splat_laneq_s16(__s2_713, __p3_713)); \
-  __ret_713; \
-})
-#else
-#define vqdmlsl_high_laneq_s16(__p0_714, __p1_714, __p2_714, __p3_714) __extension__ ({ \
-  int32x4_t __s0_714 = __p0_714; \
-  int16x8_t __s1_714 = __p1_714; \
-  int16x8_t __s2_714 = __p2_714; \
-  int32x4_t __rev0_714;  __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \
-  int16x8_t __rev1_714;  __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_714;  __rev2_714 = __builtin_shufflevector(__s2_714, __s2_714, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_714; \
-  __ret_714 = __noswap_vqdmlsl_s16(__rev0_714, __noswap_vget_high_s16(__rev1_714), __noswap_splat_laneq_s16(__rev2_714, __p3_714)); \
-  __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 3, 2, 1, 0); \
-  __ret_714; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s32(__p0_715, __p1_715, __p2_715, __p3_715) __extension__ ({ \
-  int64x2_t __s0_715 = __p0_715; \
-  int32x2_t __s1_715 = __p1_715; \
-  int32x4_t __s2_715 = __p2_715; \
-  int64x2_t __ret_715; \
-  __ret_715 = vqdmlsl_s32(__s0_715, __s1_715, splat_laneq_s32(__s2_715, __p3_715)); \
-  __ret_715; \
-})
-#else
-#define vqdmlsl_laneq_s32(__p0_716, __p1_716, __p2_716, __p3_716) __extension__ ({ \
-  int64x2_t __s0_716 = __p0_716; \
-  int32x2_t __s1_716 = __p1_716; \
-  int32x4_t __s2_716 = __p2_716; \
-  int64x2_t __rev0_716;  __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 1, 0); \
-  int32x2_t __rev1_716;  __rev1_716 = __builtin_shufflevector(__s1_716, __s1_716, 1, 0); \
-  int32x4_t __rev2_716;  __rev2_716 = __builtin_shufflevector(__s2_716, __s2_716, 3, 2, 1, 0); \
-  int64x2_t __ret_716; \
-  __ret_716 = __noswap_vqdmlsl_s32(__rev0_716, __rev1_716, __noswap_splat_laneq_s32(__rev2_716, __p3_716)); \
-  __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 1, 0); \
-  __ret_716; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s16(__p0_717, __p1_717, __p2_717, __p3_717) __extension__ ({ \
-  int32x4_t __s0_717 = __p0_717; \
-  int16x4_t __s1_717 = __p1_717; \
-  int16x8_t __s2_717 = __p2_717; \
-  int32x4_t __ret_717; \
-  __ret_717 = vqdmlsl_s16(__s0_717, __s1_717, splat_laneq_s16(__s2_717, __p3_717)); \
-  __ret_717; \
-})
-#else
-#define vqdmlsl_laneq_s16(__p0_718, __p1_718, __p2_718, __p3_718) __extension__ ({ \
-  int32x4_t __s0_718 = __p0_718; \
-  int16x4_t __s1_718 = __p1_718; \
-  int16x8_t __s2_718 = __p2_718; \
-  int32x4_t __rev0_718;  __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \
-  int16x4_t __rev1_718;  __rev1_718 = __builtin_shufflevector(__s1_718, __s1_718, 3, 2, 1, 0); \
-  int16x8_t __rev2_718;  __rev2_718 = __builtin_shufflevector(__s2_718, __s2_718, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_718; \
-  __ret_718 = __noswap_vqdmlsl_s16(__rev0_718, __rev1_718, __noswap_splat_laneq_s16(__rev2_718, __p3_718)); \
-  __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 3, 2, 1, 0); \
-  __ret_718; \
-})
-#endif
-
-__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_lane_s32(__p0_719, __p1_719, __p2_719) __extension__ ({ \
-  int32_t __s0_719 = __p0_719; \
-  int32x2_t __s1_719 = __p1_719; \
-  int32_t __ret_719; \
-  __ret_719 = vqdmulhs_s32(__s0_719, vget_lane_s32(__s1_719, __p2_719)); \
-  __ret_719; \
-})
-#else
-#define vqdmulhs_lane_s32(__p0_720, __p1_720, __p2_720) __extension__ ({ \
-  int32_t __s0_720 = __p0_720; \
-  int32x2_t __s1_720 = __p1_720; \
-  int32x2_t __rev1_720;  __rev1_720 = __builtin_shufflevector(__s1_720, __s1_720, 1, 0); \
-  int32_t __ret_720; \
-  __ret_720 = vqdmulhs_s32(__s0_720, __noswap_vget_lane_s32(__rev1_720, __p2_720)); \
-  __ret_720; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_lane_s16(__p0_721, __p1_721, __p2_721) __extension__ ({ \
-  int16_t __s0_721 = __p0_721; \
-  int16x4_t __s1_721 = __p1_721; \
-  int16_t __ret_721; \
-  __ret_721 = vqdmulhh_s16(__s0_721, vget_lane_s16(__s1_721, __p2_721)); \
-  __ret_721; \
-})
-#else
-#define vqdmulhh_lane_s16(__p0_722, __p1_722, __p2_722) __extension__ ({ \
-  int16_t __s0_722 = __p0_722; \
-  int16x4_t __s1_722 = __p1_722; \
-  int16x4_t __rev1_722;  __rev1_722 = __builtin_shufflevector(__s1_722, __s1_722, 3, 2, 1, 0); \
-  int16_t __ret_722; \
-  __ret_722 = vqdmulhh_s16(__s0_722, __noswap_vget_lane_s16(__rev1_722, __p2_722)); \
-  __ret_722; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_laneq_s32(__p0_723, __p1_723, __p2_723) __extension__ ({ \
-  int32_t __s0_723 = __p0_723; \
-  int32x4_t __s1_723 = __p1_723; \
-  int32_t __ret_723; \
-  __ret_723 = vqdmulhs_s32(__s0_723, vgetq_lane_s32(__s1_723, __p2_723)); \
-  __ret_723; \
-})
-#else
-#define vqdmulhs_laneq_s32(__p0_724, __p1_724, __p2_724) __extension__ ({ \
-  int32_t __s0_724 = __p0_724; \
-  int32x4_t __s1_724 = __p1_724; \
-  int32x4_t __rev1_724;  __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \
-  int32_t __ret_724; \
-  __ret_724 = vqdmulhs_s32(__s0_724, __noswap_vgetq_lane_s32(__rev1_724, __p2_724)); \
-  __ret_724; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_laneq_s16(__p0_725, __p1_725, __p2_725) __extension__ ({ \
-  int16_t __s0_725 = __p0_725; \
-  int16x8_t __s1_725 = __p1_725; \
-  int16_t __ret_725; \
-  __ret_725 = vqdmulhh_s16(__s0_725, vgetq_lane_s16(__s1_725, __p2_725)); \
-  __ret_725; \
-})
-#else
-#define vqdmulhh_laneq_s16(__p0_726, __p1_726, __p2_726) __extension__ ({ \
-  int16_t __s0_726 = __p0_726; \
-  int16x8_t __s1_726 = __p1_726; \
-  int16x8_t __rev1_726;  __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_726; \
-  __ret_726 = vqdmulhh_s16(__s0_726, __noswap_vgetq_lane_s16(__rev1_726, __p2_726)); \
-  __ret_726; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s32(__p0_727, __p1_727, __p2_727) __extension__ ({ \
-  int32x4_t __s0_727 = __p0_727; \
-  int32x2_t __s1_727 = __p1_727; \
-  int64x2_t __ret_727; \
-  __ret_727 = vqdmull_s32(vget_high_s32(__s0_727), splat_lane_s32(__s1_727, __p2_727)); \
-  __ret_727; \
-})
-#else
-#define vqdmull_high_lane_s32(__p0_728, __p1_728, __p2_728) __extension__ ({ \
-  int32x4_t __s0_728 = __p0_728; \
-  int32x2_t __s1_728 = __p1_728; \
-  int32x4_t __rev0_728;  __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 3, 2, 1, 0); \
-  int32x2_t __rev1_728;  __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 1, 0); \
-  int64x2_t __ret_728; \
-  __ret_728 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_728), __noswap_splat_lane_s32(__rev1_728, __p2_728)); \
-  __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 1, 0); \
-  __ret_728; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s16(__p0_729, __p1_729, __p2_729) __extension__ ({ \
-  int16x8_t __s0_729 = __p0_729; \
-  int16x4_t __s1_729 = __p1_729; \
-  int32x4_t __ret_729; \
-  __ret_729 = vqdmull_s16(vget_high_s16(__s0_729), splat_lane_s16(__s1_729, __p2_729)); \
-  __ret_729; \
-})
-#else
-#define vqdmull_high_lane_s16(__p0_730, __p1_730, __p2_730) __extension__ ({ \
-  int16x8_t __s0_730 = __p0_730; \
-  int16x4_t __s1_730 = __p1_730; \
-  int16x8_t __rev0_730;  __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_730;  __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 3, 2, 1, 0); \
-  int32x4_t __ret_730; \
-  __ret_730 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_730), __noswap_splat_lane_s16(__rev1_730, __p2_730)); \
-  __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \
-  __ret_730; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s32(__p0_731, __p1_731, __p2_731) __extension__ ({ \
-  int32x4_t __s0_731 = __p0_731; \
-  int32x4_t __s1_731 = __p1_731; \
-  int64x2_t __ret_731; \
-  __ret_731 = vqdmull_s32(vget_high_s32(__s0_731), splat_laneq_s32(__s1_731, __p2_731)); \
-  __ret_731; \
-})
-#else
-#define vqdmull_high_laneq_s32(__p0_732, __p1_732, __p2_732) __extension__ ({ \
-  int32x4_t __s0_732 = __p0_732; \
-  int32x4_t __s1_732 = __p1_732; \
-  int32x4_t __rev0_732;  __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 3, 2, 1, 0); \
-  int32x4_t __rev1_732;  __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 3, 2, 1, 0); \
-  int64x2_t __ret_732; \
-  __ret_732 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_732), __noswap_splat_laneq_s32(__rev1_732, __p2_732)); \
-  __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 1, 0); \
-  __ret_732; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s16(__p0_733, __p1_733, __p2_733) __extension__ ({ \
-  int16x8_t __s0_733 = __p0_733; \
-  int16x8_t __s1_733 = __p1_733; \
-  int32x4_t __ret_733; \
-  __ret_733 = vqdmull_s16(vget_high_s16(__s0_733), splat_laneq_s16(__s1_733, __p2_733)); \
-  __ret_733; \
-})
-#else
-#define vqdmull_high_laneq_s16(__p0_734, __p1_734, __p2_734) __extension__ ({ \
-  int16x8_t __s0_734 = __p0_734; \
-  int16x8_t __s1_734 = __p1_734; \
-  int16x8_t __rev0_734;  __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_734;  __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_734; \
-  __ret_734 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_734), __noswap_splat_laneq_s16(__rev1_734, __p2_734)); \
-  __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 3, 2, 1, 0); \
-  __ret_734; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulls_lane_s32(__p0_735, __p1_735, __p2_735) __extension__ ({ \
-  int32_t __s0_735 = __p0_735; \
-  int32x2_t __s1_735 = __p1_735; \
-  int64_t __ret_735; \
-  __ret_735 = vqdmulls_s32(__s0_735, vget_lane_s32(__s1_735, __p2_735)); \
-  __ret_735; \
-})
-#else
-#define vqdmulls_lane_s32(__p0_736, __p1_736, __p2_736) __extension__ ({ \
-  int32_t __s0_736 = __p0_736; \
-  int32x2_t __s1_736 = __p1_736; \
-  int32x2_t __rev1_736;  __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 1, 0); \
-  int64_t __ret_736; \
-  __ret_736 = vqdmulls_s32(__s0_736, __noswap_vget_lane_s32(__rev1_736, __p2_736)); \
-  __ret_736; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmullh_lane_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \
-  int16_t __s0_737 = __p0_737; \
-  int16x4_t __s1_737 = __p1_737; \
-  int32_t __ret_737; \
-  __ret_737 = vqdmullh_s16(__s0_737, vget_lane_s16(__s1_737, __p2_737)); \
-  __ret_737; \
-})
-#else
-#define vqdmullh_lane_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \
-  int16_t __s0_738 = __p0_738; \
-  int16x4_t __s1_738 = __p1_738; \
-  int16x4_t __rev1_738;  __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 3, 2, 1, 0); \
-  int32_t __ret_738; \
-  __ret_738 = vqdmullh_s16(__s0_738, __noswap_vget_lane_s16(__rev1_738, __p2_738)); \
-  __ret_738; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulls_laneq_s32(__p0_739, __p1_739, __p2_739) __extension__ ({ \
-  int32_t __s0_739 = __p0_739; \
-  int32x4_t __s1_739 = __p1_739; \
-  int64_t __ret_739; \
-  __ret_739 = vqdmulls_s32(__s0_739, vgetq_lane_s32(__s1_739, __p2_739)); \
-  __ret_739; \
-})
-#else
-#define vqdmulls_laneq_s32(__p0_740, __p1_740, __p2_740) __extension__ ({ \
-  int32_t __s0_740 = __p0_740; \
-  int32x4_t __s1_740 = __p1_740; \
-  int32x4_t __rev1_740;  __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 3, 2, 1, 0); \
-  int64_t __ret_740; \
-  __ret_740 = vqdmulls_s32(__s0_740, __noswap_vgetq_lane_s32(__rev1_740, __p2_740)); \
-  __ret_740; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmullh_laneq_s16(__p0_741, __p1_741, __p2_741) __extension__ ({ \
-  int16_t __s0_741 = __p0_741; \
-  int16x8_t __s1_741 = __p1_741; \
-  int32_t __ret_741; \
-  __ret_741 = vqdmullh_s16(__s0_741, vgetq_lane_s16(__s1_741, __p2_741)); \
-  __ret_741; \
-})
-#else
-#define vqdmullh_laneq_s16(__p0_742, __p1_742, __p2_742) __extension__ ({ \
-  int16_t __s0_742 = __p0_742; \
-  int16x8_t __s1_742 = __p1_742; \
-  int16x8_t __rev1_742;  __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret_742; \
-  __ret_742 = vqdmullh_s16(__s0_742, __noswap_vgetq_lane_s16(__rev1_742, __p2_742)); \
-  __ret_742; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s32(__p0_743, __p1_743, __p2_743) __extension__ ({ \
-  int32x2_t __s0_743 = __p0_743; \
-  int32x4_t __s1_743 = __p1_743; \
-  int64x2_t __ret_743; \
-  __ret_743 = vqdmull_s32(__s0_743, splat_laneq_s32(__s1_743, __p2_743)); \
-  __ret_743; \
-})
-#else
-#define vqdmull_laneq_s32(__p0_744, __p1_744, __p2_744) __extension__ ({ \
-  int32x2_t __s0_744 = __p0_744; \
-  int32x4_t __s1_744 = __p1_744; \
-  int32x2_t __rev0_744;  __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 1, 0); \
-  int32x4_t __rev1_744;  __rev1_744 = __builtin_shufflevector(__s1_744, __s1_744, 3, 2, 1, 0); \
-  int64x2_t __ret_744; \
-  __ret_744 = __noswap_vqdmull_s32(__rev0_744, __noswap_splat_laneq_s32(__rev1_744, __p2_744)); \
-  __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 1, 0); \
-  __ret_744; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s16(__p0_745, __p1_745, __p2_745) __extension__ ({ \
-  int16x4_t __s0_745 = __p0_745; \
-  int16x8_t __s1_745 = __p1_745; \
-  int32x4_t __ret_745; \
-  __ret_745 = vqdmull_s16(__s0_745, splat_laneq_s16(__s1_745, __p2_745)); \
-  __ret_745; \
-})
-#else
-#define vqdmull_laneq_s16(__p0_746, __p1_746, __p2_746) __extension__ ({ \
-  int16x4_t __s0_746 = __p0_746; \
-  int16x8_t __s1_746 = __p1_746; \
-  int16x4_t __rev0_746;  __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 3, 2, 1, 0); \
-  int16x8_t __rev1_746;  __rev1_746 = __builtin_shufflevector(__s1_746, __s1_746, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_746; \
-  __ret_746 = __noswap_vqdmull_s16(__rev0_746, __noswap_splat_laneq_s16(__rev1_746, __p2_746)); \
-  __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \
-  __ret_746; \
-})
-#endif
-
-__ai int16_t vqmovns_s32(int32_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
-  return __ret;
-}
-__ai int32_t vqmovnd_s64(int64_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
-  return __ret;
-}
-__ai int8_t vqmovnh_s16(int16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
-  return __ret;
-}
-__ai uint16_t vqmovns_u32(uint32_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
-  return __ret;
-}
-__ai uint32_t vqmovnd_u64(uint64_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
-  return __ret;
-}
-__ai uint8_t vqmovnh_u16(uint16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vqmovn_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vqmovn_u64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vqmovn_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vqmovn_s32(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vqmovn_s64(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vqmovn_s16(__p1));
-  return __ret;
-}
-#else
-__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint16_t vqmovuns_s32(int32_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqmovuns_s32(__p0);
-  return __ret;
-}
-__ai uint32_t vqmovund_s64(int64_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqmovund_s64(__p0);
-  return __ret;
-}
-__ai uint8_t vqmovunh_s16(int16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqmovunh_s16(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqneg_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int8_t vqnegb_s8(int8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
-  return __ret;
-}
-__ai int32_t vqnegs_s32(int32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
-  return __ret;
-}
-__ai int64_t vqnegd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
-  return __ret;
-}
-__ai int16_t vqnegh_s16(int16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
-  return __ret;
-}
-__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_lane_s32(__p0_747, __p1_747, __p2_747) __extension__ ({ \
-  int32_t __s0_747 = __p0_747; \
-  int32x2_t __s1_747 = __p1_747; \
-  int32_t __ret_747; \
-  __ret_747 = vqrdmulhs_s32(__s0_747, vget_lane_s32(__s1_747, __p2_747)); \
-  __ret_747; \
-})
-#else
-#define vqrdmulhs_lane_s32(__p0_748, __p1_748, __p2_748) __extension__ ({ \
-  int32_t __s0_748 = __p0_748; \
-  int32x2_t __s1_748 = __p1_748; \
-  int32x2_t __rev1_748;  __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 1, 0); \
-  int32_t __ret_748; \
-  __ret_748 = vqrdmulhs_s32(__s0_748, __noswap_vget_lane_s32(__rev1_748, __p2_748)); \
-  __ret_748; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_lane_s16(__p0_749, __p1_749, __p2_749) __extension__ ({ \
-  int16_t __s0_749 = __p0_749; \
-  int16x4_t __s1_749 = __p1_749; \
-  int16_t __ret_749; \
-  __ret_749 = vqrdmulhh_s16(__s0_749, vget_lane_s16(__s1_749, __p2_749)); \
-  __ret_749; \
-})
-#else
-#define vqrdmulhh_lane_s16(__p0_750, __p1_750, __p2_750) __extension__ ({ \
-  int16_t __s0_750 = __p0_750; \
-  int16x4_t __s1_750 = __p1_750; \
-  int16x4_t __rev1_750;  __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 3, 2, 1, 0); \
-  int16_t __ret_750; \
-  __ret_750 = vqrdmulhh_s16(__s0_750, __noswap_vget_lane_s16(__rev1_750, __p2_750)); \
-  __ret_750; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_laneq_s32(__p0_751, __p1_751, __p2_751) __extension__ ({ \
-  int32_t __s0_751 = __p0_751; \
-  int32x4_t __s1_751 = __p1_751; \
-  int32_t __ret_751; \
-  __ret_751 = vqrdmulhs_s32(__s0_751, vgetq_lane_s32(__s1_751, __p2_751)); \
-  __ret_751; \
-})
-#else
-#define vqrdmulhs_laneq_s32(__p0_752, __p1_752, __p2_752) __extension__ ({ \
-  int32_t __s0_752 = __p0_752; \
-  int32x4_t __s1_752 = __p1_752; \
-  int32x4_t __rev1_752;  __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 3, 2, 1, 0); \
-  int32_t __ret_752; \
-  __ret_752 = vqrdmulhs_s32(__s0_752, __noswap_vgetq_lane_s32(__rev1_752, __p2_752)); \
-  __ret_752; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_laneq_s16(__p0_753, __p1_753, __p2_753) __extension__ ({ \
-  int16_t __s0_753 = __p0_753; \
-  int16x8_t __s1_753 = __p1_753; \
-  int16_t __ret_753; \
-  __ret_753 = vqrdmulhh_s16(__s0_753, vgetq_lane_s16(__s1_753, __p2_753)); \
-  __ret_753; \
-})
-#else
-#define vqrdmulhh_laneq_s16(__p0_754, __p1_754, __p2_754) __extension__ ({ \
-  int16_t __s0_754 = __p0_754; \
-  int16x8_t __s1_754 = __p1_754; \
-  int16x8_t __rev1_754;  __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_754; \
-  __ret_754 = vqrdmulhh_s16(__s0_754, __noswap_vgetq_lane_s16(__rev1_754, __p2_754)); \
-  __ret_754; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u32(__p0_755, __p1_755, __p2_755) __extension__ ({ \
-  uint16x4_t __s0_755 = __p0_755; \
-  uint32x4_t __s1_755 = __p1_755; \
-  uint16x8_t __ret_755; \
-  __ret_755 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_755), (uint16x4_t)(vqrshrn_n_u32(__s1_755, __p2_755)))); \
-  __ret_755; \
-})
-#else
-#define vqrshrn_high_n_u32(__p0_756, __p1_756, __p2_756) __extension__ ({ \
-  uint16x4_t __s0_756 = __p0_756; \
-  uint32x4_t __s1_756 = __p1_756; \
-  uint16x4_t __rev0_756;  __rev0_756 = __builtin_shufflevector(__s0_756, __s0_756, 3, 2, 1, 0); \
-  uint32x4_t __rev1_756;  __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \
-  uint16x8_t __ret_756; \
-  __ret_756 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_756), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_756, __p2_756)))); \
-  __ret_756 = __builtin_shufflevector(__ret_756, __ret_756, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_756; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u64(__p0_757, __p1_757, __p2_757) __extension__ ({ \
-  uint32x2_t __s0_757 = __p0_757; \
-  uint64x2_t __s1_757 = __p1_757; \
-  uint32x4_t __ret_757; \
-  __ret_757 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_757), (uint32x2_t)(vqrshrn_n_u64(__s1_757, __p2_757)))); \
-  __ret_757; \
-})
-#else
-#define vqrshrn_high_n_u64(__p0_758, __p1_758, __p2_758) __extension__ ({ \
-  uint32x2_t __s0_758 = __p0_758; \
-  uint64x2_t __s1_758 = __p1_758; \
-  uint32x2_t __rev0_758;  __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 1, 0); \
-  uint64x2_t __rev1_758;  __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 1, 0); \
-  uint32x4_t __ret_758; \
-  __ret_758 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_758), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_758, __p2_758)))); \
-  __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 3, 2, 1, 0); \
-  __ret_758; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u16(__p0_759, __p1_759, __p2_759) __extension__ ({ \
-  uint8x8_t __s0_759 = __p0_759; \
-  uint16x8_t __s1_759 = __p1_759; \
-  uint8x16_t __ret_759; \
-  __ret_759 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_759), (uint8x8_t)(vqrshrn_n_u16(__s1_759, __p2_759)))); \
-  __ret_759; \
-})
-#else
-#define vqrshrn_high_n_u16(__p0_760, __p1_760, __p2_760) __extension__ ({ \
-  uint8x8_t __s0_760 = __p0_760; \
-  uint16x8_t __s1_760 = __p1_760; \
-  uint8x8_t __rev0_760;  __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_760;  __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_760; \
-  __ret_760 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_760), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_760, __p2_760)))); \
-  __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_760; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s32(__p0_761, __p1_761, __p2_761) __extension__ ({ \
-  int16x4_t __s0_761 = __p0_761; \
-  int32x4_t __s1_761 = __p1_761; \
-  int16x8_t __ret_761; \
-  __ret_761 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_761), (int16x4_t)(vqrshrn_n_s32(__s1_761, __p2_761)))); \
-  __ret_761; \
-})
-#else
-#define vqrshrn_high_n_s32(__p0_762, __p1_762, __p2_762) __extension__ ({ \
-  int16x4_t __s0_762 = __p0_762; \
-  int32x4_t __s1_762 = __p1_762; \
-  int16x4_t __rev0_762;  __rev0_762 = __builtin_shufflevector(__s0_762, __s0_762, 3, 2, 1, 0); \
-  int32x4_t __rev1_762;  __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 3, 2, 1, 0); \
-  int16x8_t __ret_762; \
-  __ret_762 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_762), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_762, __p2_762)))); \
-  __ret_762 = __builtin_shufflevector(__ret_762, __ret_762, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_762; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s64(__p0_763, __p1_763, __p2_763) __extension__ ({ \
-  int32x2_t __s0_763 = __p0_763; \
-  int64x2_t __s1_763 = __p1_763; \
-  int32x4_t __ret_763; \
-  __ret_763 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_763), (int32x2_t)(vqrshrn_n_s64(__s1_763, __p2_763)))); \
-  __ret_763; \
-})
-#else
-#define vqrshrn_high_n_s64(__p0_764, __p1_764, __p2_764) __extension__ ({ \
-  int32x2_t __s0_764 = __p0_764; \
-  int64x2_t __s1_764 = __p1_764; \
-  int32x2_t __rev0_764;  __rev0_764 = __builtin_shufflevector(__s0_764, __s0_764, 1, 0); \
-  int64x2_t __rev1_764;  __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 1, 0); \
-  int32x4_t __ret_764; \
-  __ret_764 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_764), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_764, __p2_764)))); \
-  __ret_764 = __builtin_shufflevector(__ret_764, __ret_764, 3, 2, 1, 0); \
-  __ret_764; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s16(__p0_765, __p1_765, __p2_765) __extension__ ({ \
-  int8x8_t __s0_765 = __p0_765; \
-  int16x8_t __s1_765 = __p1_765; \
-  int8x16_t __ret_765; \
-  __ret_765 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_765), (int8x8_t)(vqrshrn_n_s16(__s1_765, __p2_765)))); \
-  __ret_765; \
-})
-#else
-#define vqrshrn_high_n_s16(__p0_766, __p1_766, __p2_766) __extension__ ({ \
-  int8x8_t __s0_766 = __p0_766; \
-  int16x8_t __s1_766 = __p1_766; \
-  int8x8_t __rev0_766;  __rev0_766 = __builtin_shufflevector(__s0_766, __s0_766, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_766;  __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_766; \
-  __ret_766 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_766), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_766, __p2_766)))); \
-  __ret_766 = __builtin_shufflevector(__ret_766, __ret_766, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_766; \
-})
-#endif
-
-#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s32(__p0_767, __p1_767, __p2_767) __extension__ ({ \
-  int16x4_t __s0_767 = __p0_767; \
-  int32x4_t __s1_767 = __p1_767; \
-  int16x8_t __ret_767; \
-  __ret_767 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_767), (int16x4_t)(vqrshrun_n_s32(__s1_767, __p2_767)))); \
-  __ret_767; \
-})
-#else
-#define vqrshrun_high_n_s32(__p0_768, __p1_768, __p2_768) __extension__ ({ \
-  int16x4_t __s0_768 = __p0_768; \
-  int32x4_t __s1_768 = __p1_768; \
-  int16x4_t __rev0_768;  __rev0_768 = __builtin_shufflevector(__s0_768, __s0_768, 3, 2, 1, 0); \
-  int32x4_t __rev1_768;  __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 3, 2, 1, 0); \
-  int16x8_t __ret_768; \
-  __ret_768 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_768), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_768, __p2_768)))); \
-  __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_768; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s64(__p0_769, __p1_769, __p2_769) __extension__ ({ \
-  int32x2_t __s0_769 = __p0_769; \
-  int64x2_t __s1_769 = __p1_769; \
-  int32x4_t __ret_769; \
-  __ret_769 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_769), (int32x2_t)(vqrshrun_n_s64(__s1_769, __p2_769)))); \
-  __ret_769; \
-})
-#else
-#define vqrshrun_high_n_s64(__p0_770, __p1_770, __p2_770) __extension__ ({ \
-  int32x2_t __s0_770 = __p0_770; \
-  int64x2_t __s1_770 = __p1_770; \
-  int32x2_t __rev0_770;  __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 1, 0); \
-  int64x2_t __rev1_770;  __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 1, 0); \
-  int32x4_t __ret_770; \
-  __ret_770 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_770), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_770, __p2_770)))); \
-  __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \
-  __ret_770; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s16(__p0_771, __p1_771, __p2_771) __extension__ ({ \
-  int8x8_t __s0_771 = __p0_771; \
-  int16x8_t __s1_771 = __p1_771; \
-  int8x16_t __ret_771; \
-  __ret_771 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_771), (int8x8_t)(vqrshrun_n_s16(__s1_771, __p2_771)))); \
-  __ret_771; \
-})
-#else
-#define vqrshrun_high_n_s16(__p0_772, __p1_772, __p2_772) __extension__ ({ \
-  int8x8_t __s0_772 = __p0_772; \
-  int16x8_t __s1_772 = __p1_772; \
-  int8x8_t __rev0_772;  __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_772;  __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_772; \
-  __ret_772 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_772), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_772, __p2_772)))); \
-  __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_772; \
-})
-#endif
-
-#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
-  __ret; \
-})
-__ai uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
-  return __ret;
-}
-#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
-  __ret; \
-})
-#define vqshls_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqshld_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
-  __ret; \
-})
-#define vqshls_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshld_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
-  __ret; \
-})
-#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u32(__p0_773, __p1_773, __p2_773) __extension__ ({ \
-  uint16x4_t __s0_773 = __p0_773; \
-  uint32x4_t __s1_773 = __p1_773; \
-  uint16x8_t __ret_773; \
-  __ret_773 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_773), (uint16x4_t)(vqshrn_n_u32(__s1_773, __p2_773)))); \
-  __ret_773; \
-})
-#else
-#define vqshrn_high_n_u32(__p0_774, __p1_774, __p2_774) __extension__ ({ \
-  uint16x4_t __s0_774 = __p0_774; \
-  uint32x4_t __s1_774 = __p1_774; \
-  uint16x4_t __rev0_774;  __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \
-  uint32x4_t __rev1_774;  __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 3, 2, 1, 0); \
-  uint16x8_t __ret_774; \
-  __ret_774 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_774), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_774, __p2_774)))); \
-  __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_774; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u64(__p0_775, __p1_775, __p2_775) __extension__ ({ \
-  uint32x2_t __s0_775 = __p0_775; \
-  uint64x2_t __s1_775 = __p1_775; \
-  uint32x4_t __ret_775; \
-  __ret_775 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_775), (uint32x2_t)(vqshrn_n_u64(__s1_775, __p2_775)))); \
-  __ret_775; \
-})
-#else
-#define vqshrn_high_n_u64(__p0_776, __p1_776, __p2_776) __extension__ ({ \
-  uint32x2_t __s0_776 = __p0_776; \
-  uint64x2_t __s1_776 = __p1_776; \
-  uint32x2_t __rev0_776;  __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 1, 0); \
-  uint64x2_t __rev1_776;  __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 1, 0); \
-  uint32x4_t __ret_776; \
-  __ret_776 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_776), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_776, __p2_776)))); \
-  __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 3, 2, 1, 0); \
-  __ret_776; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u16(__p0_777, __p1_777, __p2_777) __extension__ ({ \
-  uint8x8_t __s0_777 = __p0_777; \
-  uint16x8_t __s1_777 = __p1_777; \
-  uint8x16_t __ret_777; \
-  __ret_777 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_777), (uint8x8_t)(vqshrn_n_u16(__s1_777, __p2_777)))); \
-  __ret_777; \
-})
-#else
-#define vqshrn_high_n_u16(__p0_778, __p1_778, __p2_778) __extension__ ({ \
-  uint8x8_t __s0_778 = __p0_778; \
-  uint16x8_t __s1_778 = __p1_778; \
-  uint8x8_t __rev0_778;  __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_778;  __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_778; \
-  __ret_778 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_778), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_778, __p2_778)))); \
-  __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_778; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s32(__p0_779, __p1_779, __p2_779) __extension__ ({ \
-  int16x4_t __s0_779 = __p0_779; \
-  int32x4_t __s1_779 = __p1_779; \
-  int16x8_t __ret_779; \
-  __ret_779 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_779), (int16x4_t)(vqshrn_n_s32(__s1_779, __p2_779)))); \
-  __ret_779; \
-})
-#else
-#define vqshrn_high_n_s32(__p0_780, __p1_780, __p2_780) __extension__ ({ \
-  int16x4_t __s0_780 = __p0_780; \
-  int32x4_t __s1_780 = __p1_780; \
-  int16x4_t __rev0_780;  __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 3, 2, 1, 0); \
-  int32x4_t __rev1_780;  __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 3, 2, 1, 0); \
-  int16x8_t __ret_780; \
-  __ret_780 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_780), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_780, __p2_780)))); \
-  __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_780; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s64(__p0_781, __p1_781, __p2_781) __extension__ ({ \
-  int32x2_t __s0_781 = __p0_781; \
-  int64x2_t __s1_781 = __p1_781; \
-  int32x4_t __ret_781; \
-  __ret_781 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_781), (int32x2_t)(vqshrn_n_s64(__s1_781, __p2_781)))); \
-  __ret_781; \
-})
-#else
-#define vqshrn_high_n_s64(__p0_782, __p1_782, __p2_782) __extension__ ({ \
-  int32x2_t __s0_782 = __p0_782; \
-  int64x2_t __s1_782 = __p1_782; \
-  int32x2_t __rev0_782;  __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 1, 0); \
-  int64x2_t __rev1_782;  __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 1, 0); \
-  int32x4_t __ret_782; \
-  __ret_782 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_782), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_782, __p2_782)))); \
-  __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 3, 2, 1, 0); \
-  __ret_782; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s16(__p0_783, __p1_783, __p2_783) __extension__ ({ \
-  int8x8_t __s0_783 = __p0_783; \
-  int16x8_t __s1_783 = __p1_783; \
-  int8x16_t __ret_783; \
-  __ret_783 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_783), (int8x8_t)(vqshrn_n_s16(__s1_783, __p2_783)))); \
-  __ret_783; \
-})
-#else
-#define vqshrn_high_n_s16(__p0_784, __p1_784, __p2_784) __extension__ ({ \
-  int8x8_t __s0_784 = __p0_784; \
-  int16x8_t __s1_784 = __p1_784; \
-  int8x8_t __rev0_784;  __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_784;  __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_784; \
-  __ret_784 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_784), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_784, __p2_784)))); \
-  __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_784; \
-})
-#endif
-
-#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s32(__p0_785, __p1_785, __p2_785) __extension__ ({ \
-  int16x4_t __s0_785 = __p0_785; \
-  int32x4_t __s1_785 = __p1_785; \
-  int16x8_t __ret_785; \
-  __ret_785 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_785), (int16x4_t)(vqshrun_n_s32(__s1_785, __p2_785)))); \
-  __ret_785; \
-})
-#else
-#define vqshrun_high_n_s32(__p0_786, __p1_786, __p2_786) __extension__ ({ \
-  int16x4_t __s0_786 = __p0_786; \
-  int32x4_t __s1_786 = __p1_786; \
-  int16x4_t __rev0_786;  __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 3, 2, 1, 0); \
-  int32x4_t __rev1_786;  __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 3, 2, 1, 0); \
-  int16x8_t __ret_786; \
-  __ret_786 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_786), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_786, __p2_786)))); \
-  __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_786; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s64(__p0_787, __p1_787, __p2_787) __extension__ ({ \
-  int32x2_t __s0_787 = __p0_787; \
-  int64x2_t __s1_787 = __p1_787; \
-  int32x4_t __ret_787; \
-  __ret_787 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_787), (int32x2_t)(vqshrun_n_s64(__s1_787, __p2_787)))); \
-  __ret_787; \
-})
-#else
-#define vqshrun_high_n_s64(__p0_788, __p1_788, __p2_788) __extension__ ({ \
-  int32x2_t __s0_788 = __p0_788; \
-  int64x2_t __s1_788 = __p1_788; \
-  int32x2_t __rev0_788;  __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 1, 0); \
-  int64x2_t __rev1_788;  __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 1, 0); \
-  int32x4_t __ret_788; \
-  __ret_788 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_788), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_788, __p2_788)))); \
-  __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 3, 2, 1, 0); \
-  __ret_788; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s16(__p0_789, __p1_789, __p2_789) __extension__ ({ \
-  int8x8_t __s0_789 = __p0_789; \
-  int16x8_t __s1_789 = __p1_789; \
-  int8x16_t __ret_789; \
-  __ret_789 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_789), (int8x8_t)(vqshrun_n_s16(__s1_789, __p2_789)))); \
-  __ret_789; \
-})
-#else
-#define vqshrun_high_n_s16(__p0_790, __p1_790, __p2_790) __extension__ ({ \
-  int8x8_t __s0_790 = __p0_790; \
-  int16x8_t __s1_790 = __p1_790; \
-  int8x8_t __rev0_790;  __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_790;  __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_790; \
-  __ret_790 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_790), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_790, __p2_790)))); \
-  __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_790; \
-})
-#endif
-
-#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
-  __ret; \
-})
-__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
-  poly8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
-  poly8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
-  int8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
-  uint8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
-  int8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
-  poly8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
-  poly8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
-  uint8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
-  int8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
-  uint8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
-  int8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
-  poly8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
-  poly8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
-  uint8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
-  int8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
-  uint8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
-  int8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrbit_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrbit_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrecpe_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai float64_t vrecped_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrecpes_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
-  return __ret;
-}
-__ai float64_t vrecpxd_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrecpxs_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
-  return __ret;
-}
 __ai uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) {
   uint64_t __ret;
   __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
@@ -62045,140 +61802,140 @@
   return __ret;
 }
 #define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
   uint64_t __ret; \
+  uint64_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
   __ret; \
 })
 #define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
   int64_t __ret; \
+  int64_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u32(__p0_791, __p1_791, __p2_791) __extension__ ({ \
-  uint16x4_t __s0_791 = __p0_791; \
-  uint32x4_t __s1_791 = __p1_791; \
-  uint16x8_t __ret_791; \
-  __ret_791 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_791), (uint16x4_t)(vrshrn_n_u32(__s1_791, __p2_791)))); \
-  __ret_791; \
+#define vrshrn_high_n_u32(__p0_803, __p1_803, __p2_803) __extension__ ({ \
+  uint16x8_t __ret_803; \
+  uint16x4_t __s0_803 = __p0_803; \
+  uint32x4_t __s1_803 = __p1_803; \
+  __ret_803 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_803), (uint16x4_t)(vrshrn_n_u32(__s1_803, __p2_803)))); \
+  __ret_803; \
 })
 #else
-#define vrshrn_high_n_u32(__p0_792, __p1_792, __p2_792) __extension__ ({ \
-  uint16x4_t __s0_792 = __p0_792; \
-  uint32x4_t __s1_792 = __p1_792; \
-  uint16x4_t __rev0_792;  __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 3, 2, 1, 0); \
-  uint32x4_t __rev1_792;  __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 3, 2, 1, 0); \
-  uint16x8_t __ret_792; \
-  __ret_792 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_792), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_792, __p2_792)))); \
-  __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_792; \
+#define vrshrn_high_n_u32(__p0_804, __p1_804, __p2_804) __extension__ ({ \
+  uint16x8_t __ret_804; \
+  uint16x4_t __s0_804 = __p0_804; \
+  uint32x4_t __s1_804 = __p1_804; \
+  uint16x4_t __rev0_804;  __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 3, 2, 1, 0); \
+  uint32x4_t __rev1_804;  __rev1_804 = __builtin_shufflevector(__s1_804, __s1_804, 3, 2, 1, 0); \
+  __ret_804 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_804), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_804, __p2_804)))); \
+  __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_804; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u64(__p0_793, __p1_793, __p2_793) __extension__ ({ \
-  uint32x2_t __s0_793 = __p0_793; \
-  uint64x2_t __s1_793 = __p1_793; \
-  uint32x4_t __ret_793; \
-  __ret_793 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_793), (uint32x2_t)(vrshrn_n_u64(__s1_793, __p2_793)))); \
-  __ret_793; \
+#define vrshrn_high_n_u64(__p0_805, __p1_805, __p2_805) __extension__ ({ \
+  uint32x4_t __ret_805; \
+  uint32x2_t __s0_805 = __p0_805; \
+  uint64x2_t __s1_805 = __p1_805; \
+  __ret_805 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_805), (uint32x2_t)(vrshrn_n_u64(__s1_805, __p2_805)))); \
+  __ret_805; \
 })
 #else
-#define vrshrn_high_n_u64(__p0_794, __p1_794, __p2_794) __extension__ ({ \
-  uint32x2_t __s0_794 = __p0_794; \
-  uint64x2_t __s1_794 = __p1_794; \
-  uint32x2_t __rev0_794;  __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 1, 0); \
-  uint64x2_t __rev1_794;  __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 1, 0); \
-  uint32x4_t __ret_794; \
-  __ret_794 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_794), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_794, __p2_794)))); \
-  __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 3, 2, 1, 0); \
-  __ret_794; \
+#define vrshrn_high_n_u64(__p0_806, __p1_806, __p2_806) __extension__ ({ \
+  uint32x4_t __ret_806; \
+  uint32x2_t __s0_806 = __p0_806; \
+  uint64x2_t __s1_806 = __p1_806; \
+  uint32x2_t __rev0_806;  __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 1, 0); \
+  uint64x2_t __rev1_806;  __rev1_806 = __builtin_shufflevector(__s1_806, __s1_806, 1, 0); \
+  __ret_806 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_806), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_806, __p2_806)))); \
+  __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 3, 2, 1, 0); \
+  __ret_806; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u16(__p0_795, __p1_795, __p2_795) __extension__ ({ \
-  uint8x8_t __s0_795 = __p0_795; \
-  uint16x8_t __s1_795 = __p1_795; \
-  uint8x16_t __ret_795; \
-  __ret_795 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_795), (uint8x8_t)(vrshrn_n_u16(__s1_795, __p2_795)))); \
-  __ret_795; \
+#define vrshrn_high_n_u16(__p0_807, __p1_807, __p2_807) __extension__ ({ \
+  uint8x16_t __ret_807; \
+  uint8x8_t __s0_807 = __p0_807; \
+  uint16x8_t __s1_807 = __p1_807; \
+  __ret_807 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_807), (uint8x8_t)(vrshrn_n_u16(__s1_807, __p2_807)))); \
+  __ret_807; \
 })
 #else
-#define vrshrn_high_n_u16(__p0_796, __p1_796, __p2_796) __extension__ ({ \
-  uint8x8_t __s0_796 = __p0_796; \
-  uint16x8_t __s1_796 = __p1_796; \
-  uint8x8_t __rev0_796;  __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_796;  __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_796; \
-  __ret_796 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_796), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_796, __p2_796)))); \
-  __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_796; \
+#define vrshrn_high_n_u16(__p0_808, __p1_808, __p2_808) __extension__ ({ \
+  uint8x16_t __ret_808; \
+  uint8x8_t __s0_808 = __p0_808; \
+  uint16x8_t __s1_808 = __p1_808; \
+  uint8x8_t __rev0_808;  __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_808;  __rev1_808 = __builtin_shufflevector(__s1_808, __s1_808, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_808 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_808), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_808, __p2_808)))); \
+  __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_808; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s32(__p0_797, __p1_797, __p2_797) __extension__ ({ \
-  int16x4_t __s0_797 = __p0_797; \
-  int32x4_t __s1_797 = __p1_797; \
-  int16x8_t __ret_797; \
-  __ret_797 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_797), (int16x4_t)(vrshrn_n_s32(__s1_797, __p2_797)))); \
-  __ret_797; \
+#define vrshrn_high_n_s32(__p0_809, __p1_809, __p2_809) __extension__ ({ \
+  int16x8_t __ret_809; \
+  int16x4_t __s0_809 = __p0_809; \
+  int32x4_t __s1_809 = __p1_809; \
+  __ret_809 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_809), (int16x4_t)(vrshrn_n_s32(__s1_809, __p2_809)))); \
+  __ret_809; \
 })
 #else
-#define vrshrn_high_n_s32(__p0_798, __p1_798, __p2_798) __extension__ ({ \
-  int16x4_t __s0_798 = __p0_798; \
-  int32x4_t __s1_798 = __p1_798; \
-  int16x4_t __rev0_798;  __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 3, 2, 1, 0); \
-  int32x4_t __rev1_798;  __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 3, 2, 1, 0); \
-  int16x8_t __ret_798; \
-  __ret_798 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_798), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_798, __p2_798)))); \
-  __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_798; \
+#define vrshrn_high_n_s32(__p0_810, __p1_810, __p2_810) __extension__ ({ \
+  int16x8_t __ret_810; \
+  int16x4_t __s0_810 = __p0_810; \
+  int32x4_t __s1_810 = __p1_810; \
+  int16x4_t __rev0_810;  __rev0_810 = __builtin_shufflevector(__s0_810, __s0_810, 3, 2, 1, 0); \
+  int32x4_t __rev1_810;  __rev1_810 = __builtin_shufflevector(__s1_810, __s1_810, 3, 2, 1, 0); \
+  __ret_810 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_810), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_810, __p2_810)))); \
+  __ret_810 = __builtin_shufflevector(__ret_810, __ret_810, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_810; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s64(__p0_799, __p1_799, __p2_799) __extension__ ({ \
-  int32x2_t __s0_799 = __p0_799; \
-  int64x2_t __s1_799 = __p1_799; \
-  int32x4_t __ret_799; \
-  __ret_799 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_799), (int32x2_t)(vrshrn_n_s64(__s1_799, __p2_799)))); \
-  __ret_799; \
+#define vrshrn_high_n_s64(__p0_811, __p1_811, __p2_811) __extension__ ({ \
+  int32x4_t __ret_811; \
+  int32x2_t __s0_811 = __p0_811; \
+  int64x2_t __s1_811 = __p1_811; \
+  __ret_811 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_811), (int32x2_t)(vrshrn_n_s64(__s1_811, __p2_811)))); \
+  __ret_811; \
 })
 #else
-#define vrshrn_high_n_s64(__p0_800, __p1_800, __p2_800) __extension__ ({ \
-  int32x2_t __s0_800 = __p0_800; \
-  int64x2_t __s1_800 = __p1_800; \
-  int32x2_t __rev0_800;  __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \
-  int64x2_t __rev1_800;  __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 1, 0); \
-  int32x4_t __ret_800; \
-  __ret_800 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_800), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_800, __p2_800)))); \
-  __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 3, 2, 1, 0); \
-  __ret_800; \
+#define vrshrn_high_n_s64(__p0_812, __p1_812, __p2_812) __extension__ ({ \
+  int32x4_t __ret_812; \
+  int32x2_t __s0_812 = __p0_812; \
+  int64x2_t __s1_812 = __p1_812; \
+  int32x2_t __rev0_812;  __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 1, 0); \
+  int64x2_t __rev1_812;  __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 1, 0); \
+  __ret_812 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_812), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_812, __p2_812)))); \
+  __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 3, 2, 1, 0); \
+  __ret_812; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s16(__p0_801, __p1_801, __p2_801) __extension__ ({ \
-  int8x8_t __s0_801 = __p0_801; \
-  int16x8_t __s1_801 = __p1_801; \
-  int8x16_t __ret_801; \
-  __ret_801 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_801), (int8x8_t)(vrshrn_n_s16(__s1_801, __p2_801)))); \
-  __ret_801; \
+#define vrshrn_high_n_s16(__p0_813, __p1_813, __p2_813) __extension__ ({ \
+  int8x16_t __ret_813; \
+  int8x8_t __s0_813 = __p0_813; \
+  int16x8_t __s1_813 = __p1_813; \
+  __ret_813 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_813), (int8x8_t)(vrshrn_n_s16(__s1_813, __p2_813)))); \
+  __ret_813; \
 })
 #else
-#define vrshrn_high_n_s16(__p0_802, __p1_802, __p2_802) __extension__ ({ \
-  int8x8_t __s0_802 = __p0_802; \
-  int16x8_t __s1_802 = __p1_802; \
-  int8x8_t __rev0_802;  __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_802;  __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_802; \
-  __ret_802 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_802), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_802, __p2_802)))); \
-  __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_802; \
+#define vrshrn_high_n_s16(__p0_814, __p1_814, __p2_814) __extension__ ({ \
+  int8x16_t __ret_814; \
+  int8x8_t __s0_814 = __p0_814; \
+  int16x8_t __s1_814 = __p1_814; \
+  int8x8_t __rev0_814;  __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_814;  __rev1_814 = __builtin_shufflevector(__s1_814, __s1_814, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_814 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_814), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_814, __p2_814)))); \
+  __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_814; \
 })
 #endif
 
@@ -62190,8 +61947,8 @@
 }
 #else
 __ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -62221,9 +61978,9 @@
 }
 #else
 __ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -62246,16 +62003,16 @@
   return __ret;
 }
 #define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64_t __ret; \
   uint64_t __s0 = __p0; \
   uint64_t __s1 = __p1; \
-  uint64_t __ret; \
   __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
   __ret; \
 })
 #define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64_t __ret; \
   int64_t __s0 = __p0; \
   int64_t __s1 = __p1; \
-  int64_t __ret; \
   __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
   __ret; \
 })
@@ -62267,10 +62024,10 @@
 }
 #else
 __ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint16x8_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -62285,10 +62042,10 @@
 }
 #else
 __ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint32x4_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -62303,10 +62060,10 @@
 }
 #else
 __ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint8x16_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -62321,10 +62078,10 @@
 }
 #else
 __ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int16x8_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -62339,10 +62096,10 @@
 }
 #else
 __ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int32x4_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -62357,10 +62114,10 @@
 }
 #else
 __ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int8x16_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -62368,34 +62125,34 @@
 #endif
 
 #define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1_t __ret; \
   poly64_t __s0 = __p0; \
   poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
   __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (poly64x1_t)__s1, __p2); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
   __ret; \
 })
@@ -62403,35 +62160,35 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2_t __ret; \
   float64_t __s0 = __p0; \
   float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
   __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2_t __ret; \
   float64_t __s0 = __p0; \
   float64x2_t __s1 = __p1; \
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
   __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2_t __ret; \
   float64_t __s0 = __p0; \
   float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
   __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
 
 #define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1_t __ret; \
   float64_t __s0 = __p0; \
   float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
   __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \
   __ret; \
 })
@@ -62446,299 +62203,299 @@
   return __ret;
 }
 #define vshld_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
   uint64_t __ret; \
+  uint64_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
   __ret; \
 })
 #define vshld_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
   int64_t __ret; \
+  int64_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u8(__p0_803, __p1_803) __extension__ ({ \
-  uint8x16_t __s0_803 = __p0_803; \
-  uint16x8_t __ret_803; \
-  __ret_803 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_803), __p1_803)); \
-  __ret_803; \
-})
-#else
-#define vshll_high_n_u8(__p0_804, __p1_804) __extension__ ({ \
-  uint8x16_t __s0_804 = __p0_804; \
-  uint8x16_t __rev0_804;  __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_804; \
-  __ret_804 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_804), __p1_804)); \
-  __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_804; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u32(__p0_805, __p1_805) __extension__ ({ \
-  uint32x4_t __s0_805 = __p0_805; \
-  uint64x2_t __ret_805; \
-  __ret_805 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_805), __p1_805)); \
-  __ret_805; \
-})
-#else
-#define vshll_high_n_u32(__p0_806, __p1_806) __extension__ ({ \
-  uint32x4_t __s0_806 = __p0_806; \
-  uint32x4_t __rev0_806;  __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 3, 2, 1, 0); \
-  uint64x2_t __ret_806; \
-  __ret_806 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_806), __p1_806)); \
-  __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 1, 0); \
-  __ret_806; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u16(__p0_807, __p1_807) __extension__ ({ \
-  uint16x8_t __s0_807 = __p0_807; \
-  uint32x4_t __ret_807; \
-  __ret_807 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_807), __p1_807)); \
-  __ret_807; \
-})
-#else
-#define vshll_high_n_u16(__p0_808, __p1_808) __extension__ ({ \
-  uint16x8_t __s0_808 = __p0_808; \
-  uint16x8_t __rev0_808;  __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_808; \
-  __ret_808 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_808), __p1_808)); \
-  __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 3, 2, 1, 0); \
-  __ret_808; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s8(__p0_809, __p1_809) __extension__ ({ \
-  int8x16_t __s0_809 = __p0_809; \
-  int16x8_t __ret_809; \
-  __ret_809 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_809), __p1_809)); \
-  __ret_809; \
-})
-#else
-#define vshll_high_n_s8(__p0_810, __p1_810) __extension__ ({ \
-  int8x16_t __s0_810 = __p0_810; \
-  int8x16_t __rev0_810;  __rev0_810 = __builtin_shufflevector(__s0_810, __s0_810, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_810; \
-  __ret_810 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_810), __p1_810)); \
-  __ret_810 = __builtin_shufflevector(__ret_810, __ret_810, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_810; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s32(__p0_811, __p1_811) __extension__ ({ \
-  int32x4_t __s0_811 = __p0_811; \
-  int64x2_t __ret_811; \
-  __ret_811 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_811), __p1_811)); \
-  __ret_811; \
-})
-#else
-#define vshll_high_n_s32(__p0_812, __p1_812) __extension__ ({ \
-  int32x4_t __s0_812 = __p0_812; \
-  int32x4_t __rev0_812;  __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 3, 2, 1, 0); \
-  int64x2_t __ret_812; \
-  __ret_812 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_812), __p1_812)); \
-  __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 1, 0); \
-  __ret_812; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s16(__p0_813, __p1_813) __extension__ ({ \
-  int16x8_t __s0_813 = __p0_813; \
-  int32x4_t __ret_813; \
-  __ret_813 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_813), __p1_813)); \
-  __ret_813; \
-})
-#else
-#define vshll_high_n_s16(__p0_814, __p1_814) __extension__ ({ \
-  int16x8_t __s0_814 = __p0_814; \
-  int16x8_t __rev0_814;  __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_814; \
-  __ret_814 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_814), __p1_814)); \
-  __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 3, 2, 1, 0); \
-  __ret_814; \
-})
-#endif
-
-#define vshrd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vshrd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u32(__p0_815, __p1_815, __p2_815) __extension__ ({ \
-  uint16x4_t __s0_815 = __p0_815; \
-  uint32x4_t __s1_815 = __p1_815; \
+#define vshll_high_n_u8(__p0_815, __p1_815) __extension__ ({ \
   uint16x8_t __ret_815; \
-  __ret_815 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_815), (uint16x4_t)(vshrn_n_u32(__s1_815, __p2_815)))); \
+  uint8x16_t __s0_815 = __p0_815; \
+  __ret_815 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_815), __p1_815)); \
   __ret_815; \
 })
 #else
-#define vshrn_high_n_u32(__p0_816, __p1_816, __p2_816) __extension__ ({ \
-  uint16x4_t __s0_816 = __p0_816; \
-  uint32x4_t __s1_816 = __p1_816; \
-  uint16x4_t __rev0_816;  __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 3, 2, 1, 0); \
-  uint32x4_t __rev1_816;  __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 3, 2, 1, 0); \
+#define vshll_high_n_u8(__p0_816, __p1_816) __extension__ ({ \
   uint16x8_t __ret_816; \
-  __ret_816 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_816), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_816, __p2_816)))); \
+  uint8x16_t __s0_816 = __p0_816; \
+  uint8x16_t __rev0_816;  __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_816 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_816), __p1_816)); \
   __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_816; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u64(__p0_817, __p1_817, __p2_817) __extension__ ({ \
-  uint32x2_t __s0_817 = __p0_817; \
-  uint64x2_t __s1_817 = __p1_817; \
-  uint32x4_t __ret_817; \
-  __ret_817 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_817), (uint32x2_t)(vshrn_n_u64(__s1_817, __p2_817)))); \
+#define vshll_high_n_u32(__p0_817, __p1_817) __extension__ ({ \
+  uint64x2_t __ret_817; \
+  uint32x4_t __s0_817 = __p0_817; \
+  __ret_817 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_817), __p1_817)); \
   __ret_817; \
 })
 #else
-#define vshrn_high_n_u64(__p0_818, __p1_818, __p2_818) __extension__ ({ \
-  uint32x2_t __s0_818 = __p0_818; \
-  uint64x2_t __s1_818 = __p1_818; \
-  uint32x2_t __rev0_818;  __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 1, 0); \
-  uint64x2_t __rev1_818;  __rev1_818 = __builtin_shufflevector(__s1_818, __s1_818, 1, 0); \
-  uint32x4_t __ret_818; \
-  __ret_818 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_818), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_818, __p2_818)))); \
-  __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 3, 2, 1, 0); \
+#define vshll_high_n_u32(__p0_818, __p1_818) __extension__ ({ \
+  uint64x2_t __ret_818; \
+  uint32x4_t __s0_818 = __p0_818; \
+  uint32x4_t __rev0_818;  __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 3, 2, 1, 0); \
+  __ret_818 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_818), __p1_818)); \
+  __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 1, 0); \
   __ret_818; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u16(__p0_819, __p1_819, __p2_819) __extension__ ({ \
-  uint8x8_t __s0_819 = __p0_819; \
-  uint16x8_t __s1_819 = __p1_819; \
-  uint8x16_t __ret_819; \
-  __ret_819 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_819), (uint8x8_t)(vshrn_n_u16(__s1_819, __p2_819)))); \
+#define vshll_high_n_u16(__p0_819, __p1_819) __extension__ ({ \
+  uint32x4_t __ret_819; \
+  uint16x8_t __s0_819 = __p0_819; \
+  __ret_819 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_819), __p1_819)); \
   __ret_819; \
 })
 #else
-#define vshrn_high_n_u16(__p0_820, __p1_820, __p2_820) __extension__ ({ \
-  uint8x8_t __s0_820 = __p0_820; \
-  uint16x8_t __s1_820 = __p1_820; \
-  uint8x8_t __rev0_820;  __rev0_820 = __builtin_shufflevector(__s0_820, __s0_820, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_820;  __rev1_820 = __builtin_shufflevector(__s1_820, __s1_820, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_820; \
-  __ret_820 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_820), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_820, __p2_820)))); \
-  __ret_820 = __builtin_shufflevector(__ret_820, __ret_820, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vshll_high_n_u16(__p0_820, __p1_820) __extension__ ({ \
+  uint32x4_t __ret_820; \
+  uint16x8_t __s0_820 = __p0_820; \
+  uint16x8_t __rev0_820;  __rev0_820 = __builtin_shufflevector(__s0_820, __s0_820, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_820 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_820), __p1_820)); \
+  __ret_820 = __builtin_shufflevector(__ret_820, __ret_820, 3, 2, 1, 0); \
   __ret_820; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s32(__p0_821, __p1_821, __p2_821) __extension__ ({ \
-  int16x4_t __s0_821 = __p0_821; \
-  int32x4_t __s1_821 = __p1_821; \
+#define vshll_high_n_s8(__p0_821, __p1_821) __extension__ ({ \
   int16x8_t __ret_821; \
-  __ret_821 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_821), (int16x4_t)(vshrn_n_s32(__s1_821, __p2_821)))); \
+  int8x16_t __s0_821 = __p0_821; \
+  __ret_821 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_821), __p1_821)); \
   __ret_821; \
 })
 #else
-#define vshrn_high_n_s32(__p0_822, __p1_822, __p2_822) __extension__ ({ \
-  int16x4_t __s0_822 = __p0_822; \
-  int32x4_t __s1_822 = __p1_822; \
-  int16x4_t __rev0_822;  __rev0_822 = __builtin_shufflevector(__s0_822, __s0_822, 3, 2, 1, 0); \
-  int32x4_t __rev1_822;  __rev1_822 = __builtin_shufflevector(__s1_822, __s1_822, 3, 2, 1, 0); \
+#define vshll_high_n_s8(__p0_822, __p1_822) __extension__ ({ \
   int16x8_t __ret_822; \
-  __ret_822 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_822), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_822, __p2_822)))); \
+  int8x16_t __s0_822 = __p0_822; \
+  int8x16_t __rev0_822;  __rev0_822 = __builtin_shufflevector(__s0_822, __s0_822, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_822 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_822), __p1_822)); \
   __ret_822 = __builtin_shufflevector(__ret_822, __ret_822, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_822; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s64(__p0_823, __p1_823, __p2_823) __extension__ ({ \
-  int32x2_t __s0_823 = __p0_823; \
-  int64x2_t __s1_823 = __p1_823; \
-  int32x4_t __ret_823; \
-  __ret_823 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_823), (int32x2_t)(vshrn_n_s64(__s1_823, __p2_823)))); \
+#define vshll_high_n_s32(__p0_823, __p1_823) __extension__ ({ \
+  int64x2_t __ret_823; \
+  int32x4_t __s0_823 = __p0_823; \
+  __ret_823 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_823), __p1_823)); \
   __ret_823; \
 })
 #else
-#define vshrn_high_n_s64(__p0_824, __p1_824, __p2_824) __extension__ ({ \
-  int32x2_t __s0_824 = __p0_824; \
-  int64x2_t __s1_824 = __p1_824; \
-  int32x2_t __rev0_824;  __rev0_824 = __builtin_shufflevector(__s0_824, __s0_824, 1, 0); \
-  int64x2_t __rev1_824;  __rev1_824 = __builtin_shufflevector(__s1_824, __s1_824, 1, 0); \
-  int32x4_t __ret_824; \
-  __ret_824 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_824), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_824, __p2_824)))); \
-  __ret_824 = __builtin_shufflevector(__ret_824, __ret_824, 3, 2, 1, 0); \
+#define vshll_high_n_s32(__p0_824, __p1_824) __extension__ ({ \
+  int64x2_t __ret_824; \
+  int32x4_t __s0_824 = __p0_824; \
+  int32x4_t __rev0_824;  __rev0_824 = __builtin_shufflevector(__s0_824, __s0_824, 3, 2, 1, 0); \
+  __ret_824 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_824), __p1_824)); \
+  __ret_824 = __builtin_shufflevector(__ret_824, __ret_824, 1, 0); \
   __ret_824; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s16(__p0_825, __p1_825, __p2_825) __extension__ ({ \
-  int8x8_t __s0_825 = __p0_825; \
-  int16x8_t __s1_825 = __p1_825; \
-  int8x16_t __ret_825; \
-  __ret_825 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_825), (int8x8_t)(vshrn_n_s16(__s1_825, __p2_825)))); \
+#define vshll_high_n_s16(__p0_825, __p1_825) __extension__ ({ \
+  int32x4_t __ret_825; \
+  int16x8_t __s0_825 = __p0_825; \
+  __ret_825 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_825), __p1_825)); \
   __ret_825; \
 })
 #else
-#define vshrn_high_n_s16(__p0_826, __p1_826, __p2_826) __extension__ ({ \
-  int8x8_t __s0_826 = __p0_826; \
-  int16x8_t __s1_826 = __p1_826; \
-  int8x8_t __rev0_826;  __rev0_826 = __builtin_shufflevector(__s0_826, __s0_826, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_826;  __rev1_826 = __builtin_shufflevector(__s1_826, __s1_826, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_826; \
-  __ret_826 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_826), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_826, __p2_826)))); \
-  __ret_826 = __builtin_shufflevector(__ret_826, __ret_826, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vshll_high_n_s16(__p0_826, __p1_826) __extension__ ({ \
+  int32x4_t __ret_826; \
+  int16x8_t __s0_826 = __p0_826; \
+  int16x8_t __rev0_826;  __rev0_826 = __builtin_shufflevector(__s0_826, __s0_826, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_826 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_826), __p1_826)); \
+  __ret_826 = __builtin_shufflevector(__ret_826, __ret_826, 3, 2, 1, 0); \
   __ret_826; \
 })
 #endif
 
+#define vshrd_n_u64(__p0, __p1) __extension__ ({ \
+  uint64_t __ret; \
+  uint64_t __s0 = __p0; \
+  __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
+  __ret; \
+})
+#define vshrd_n_s64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_u32(__p0_827, __p1_827, __p2_827) __extension__ ({ \
+  uint16x8_t __ret_827; \
+  uint16x4_t __s0_827 = __p0_827; \
+  uint32x4_t __s1_827 = __p1_827; \
+  __ret_827 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_827), (uint16x4_t)(vshrn_n_u32(__s1_827, __p2_827)))); \
+  __ret_827; \
+})
+#else
+#define vshrn_high_n_u32(__p0_828, __p1_828, __p2_828) __extension__ ({ \
+  uint16x8_t __ret_828; \
+  uint16x4_t __s0_828 = __p0_828; \
+  uint32x4_t __s1_828 = __p1_828; \
+  uint16x4_t __rev0_828;  __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 3, 2, 1, 0); \
+  uint32x4_t __rev1_828;  __rev1_828 = __builtin_shufflevector(__s1_828, __s1_828, 3, 2, 1, 0); \
+  __ret_828 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_828), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_828, __p2_828)))); \
+  __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_828; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_u64(__p0_829, __p1_829, __p2_829) __extension__ ({ \
+  uint32x4_t __ret_829; \
+  uint32x2_t __s0_829 = __p0_829; \
+  uint64x2_t __s1_829 = __p1_829; \
+  __ret_829 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_829), (uint32x2_t)(vshrn_n_u64(__s1_829, __p2_829)))); \
+  __ret_829; \
+})
+#else
+#define vshrn_high_n_u64(__p0_830, __p1_830, __p2_830) __extension__ ({ \
+  uint32x4_t __ret_830; \
+  uint32x2_t __s0_830 = __p0_830; \
+  uint64x2_t __s1_830 = __p1_830; \
+  uint32x2_t __rev0_830;  __rev0_830 = __builtin_shufflevector(__s0_830, __s0_830, 1, 0); \
+  uint64x2_t __rev1_830;  __rev1_830 = __builtin_shufflevector(__s1_830, __s1_830, 1, 0); \
+  __ret_830 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_830), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_830, __p2_830)))); \
+  __ret_830 = __builtin_shufflevector(__ret_830, __ret_830, 3, 2, 1, 0); \
+  __ret_830; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_u16(__p0_831, __p1_831, __p2_831) __extension__ ({ \
+  uint8x16_t __ret_831; \
+  uint8x8_t __s0_831 = __p0_831; \
+  uint16x8_t __s1_831 = __p1_831; \
+  __ret_831 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_831), (uint8x8_t)(vshrn_n_u16(__s1_831, __p2_831)))); \
+  __ret_831; \
+})
+#else
+#define vshrn_high_n_u16(__p0_832, __p1_832, __p2_832) __extension__ ({ \
+  uint8x16_t __ret_832; \
+  uint8x8_t __s0_832 = __p0_832; \
+  uint16x8_t __s1_832 = __p1_832; \
+  uint8x8_t __rev0_832;  __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_832;  __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_832 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_832), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_832, __p2_832)))); \
+  __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_832; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_s32(__p0_833, __p1_833, __p2_833) __extension__ ({ \
+  int16x8_t __ret_833; \
+  int16x4_t __s0_833 = __p0_833; \
+  int32x4_t __s1_833 = __p1_833; \
+  __ret_833 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_833), (int16x4_t)(vshrn_n_s32(__s1_833, __p2_833)))); \
+  __ret_833; \
+})
+#else
+#define vshrn_high_n_s32(__p0_834, __p1_834, __p2_834) __extension__ ({ \
+  int16x8_t __ret_834; \
+  int16x4_t __s0_834 = __p0_834; \
+  int32x4_t __s1_834 = __p1_834; \
+  int16x4_t __rev0_834;  __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 3, 2, 1, 0); \
+  int32x4_t __rev1_834;  __rev1_834 = __builtin_shufflevector(__s1_834, __s1_834, 3, 2, 1, 0); \
+  __ret_834 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_834), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_834, __p2_834)))); \
+  __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_834; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_s64(__p0_835, __p1_835, __p2_835) __extension__ ({ \
+  int32x4_t __ret_835; \
+  int32x2_t __s0_835 = __p0_835; \
+  int64x2_t __s1_835 = __p1_835; \
+  __ret_835 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_835), (int32x2_t)(vshrn_n_s64(__s1_835, __p2_835)))); \
+  __ret_835; \
+})
+#else
+#define vshrn_high_n_s64(__p0_836, __p1_836, __p2_836) __extension__ ({ \
+  int32x4_t __ret_836; \
+  int32x2_t __s0_836 = __p0_836; \
+  int64x2_t __s1_836 = __p1_836; \
+  int32x2_t __rev0_836;  __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 1, 0); \
+  int64x2_t __rev1_836;  __rev1_836 = __builtin_shufflevector(__s1_836, __s1_836, 1, 0); \
+  __ret_836 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_836), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_836, __p2_836)))); \
+  __ret_836 = __builtin_shufflevector(__ret_836, __ret_836, 3, 2, 1, 0); \
+  __ret_836; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_s16(__p0_837, __p1_837, __p2_837) __extension__ ({ \
+  int8x16_t __ret_837; \
+  int8x8_t __s0_837 = __p0_837; \
+  int16x8_t __s1_837 = __p1_837; \
+  __ret_837 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_837), (int8x8_t)(vshrn_n_s16(__s1_837, __p2_837)))); \
+  __ret_837; \
+})
+#else
+#define vshrn_high_n_s16(__p0_838, __p1_838, __p2_838) __extension__ ({ \
+  int8x16_t __ret_838; \
+  int8x8_t __s0_838 = __p0_838; \
+  int16x8_t __s1_838 = __p1_838; \
+  int8x8_t __rev0_838;  __rev0_838 = __builtin_shufflevector(__s0_838, __s0_838, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_838;  __rev1_838 = __builtin_shufflevector(__s1_838, __s1_838, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_838 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_838), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_838, __p2_838)))); \
+  __ret_838 = __builtin_shufflevector(__ret_838, __ret_838, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_838; \
+})
+#endif
+
 #define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64_t __ret; \
   uint64_t __s0 = __p0; \
   uint64_t __s1 = __p1; \
-  uint64_t __ret; \
   __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
   __ret; \
 })
 #define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64_t __ret; \
   int64_t __s0 = __p0; \
   int64_t __s1 = __p1; \
-  int64_t __ret; \
   __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
   __ret; \
 })
 #define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1_t __ret; \
   poly64x1_t __s0 = __p0; \
   poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
   __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64x2_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
   __ret; \
 })
 #else
 #define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64x2_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -62773,9 +62530,9 @@
 }
 #else
 __ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -62790,9 +62547,9 @@
 }
 #else
 __ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -62807,9 +62564,9 @@
 }
 #else
 __ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -62824,9 +62581,9 @@
 }
 #else
 __ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -62841,9 +62598,9 @@
 }
 #else
 __ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -62858,9 +62615,9 @@
 }
 #else
 __ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -62880,9 +62637,9 @@
 }
 #else
 __ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -62897,8 +62654,8 @@
 }
 #else
 __ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -62913,8 +62670,8 @@
 }
 #else
 __ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -62934,8 +62691,8 @@
 }
 #else
 __ai float32x2_t vsqrt_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -62943,55 +62700,55 @@
 #endif
 
 #define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64_t __ret; \
   uint64_t __s0 = __p0; \
   uint64_t __s1 = __p1; \
-  uint64_t __ret; \
   __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
   __ret; \
 })
 #define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64_t __ret; \
   int64_t __s0 = __p0; \
   int64_t __s1 = __p1; \
-  int64_t __ret; \
   __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
   __ret; \
 })
 #define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64_t __ret; \
   uint64_t __s0 = __p0; \
   uint64_t __s1 = __p1; \
-  uint64_t __ret; \
   __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
   __ret; \
 })
 #define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64_t __ret; \
   int64_t __s0 = __p0; \
   int64_t __s1 = __p1; \
-  int64_t __ret; \
   __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
   __ret; \
 })
 #define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1_t __ret; \
   poly64x1_t __s0 = __p0; \
   poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
   __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64x2_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
   __ret; \
 })
 #else
 #define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64x2_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -63808,9 +63565,9 @@
 }
 #else
 __ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -63830,10 +63587,10 @@
 }
 #else
 __ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint16x8_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -63848,10 +63605,10 @@
 }
 #else
 __ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint32x4_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -63866,10 +63623,10 @@
 }
 #else
 __ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint8x16_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -63884,10 +63641,10 @@
 }
 #else
 __ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int16x8_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -63902,10 +63659,10 @@
 }
 #else
 __ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int32x4_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -63920,10 +63677,10 @@
 }
 #else
 __ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int8x16_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -63938,9 +63695,9 @@
 }
 #else
 __ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -63955,9 +63712,9 @@
 }
 #else
 __ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -63972,9 +63729,9 @@
 }
 #else
 __ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -63989,9 +63746,9 @@
 }
 #else
 __ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64006,9 +63763,9 @@
 }
 #else
 __ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64023,9 +63780,9 @@
 }
 #else
 __ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64040,9 +63797,9 @@
 }
 #else
 __ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 - __noswap_vmovl_high_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64057,9 +63814,9 @@
 }
 #else
 __ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 - __noswap_vmovl_high_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64074,9 +63831,9 @@
 }
 #else
 __ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 - __noswap_vmovl_high_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64091,9 +63848,9 @@
 }
 #else
 __ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 - __noswap_vmovl_high_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64108,9 +63865,9 @@
 }
 #else
 __ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 - __noswap_vmovl_high_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64125,9 +63882,9 @@
 }
 #else
 __ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 - __noswap_vmovl_high_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64135,54 +63892,54 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsudotq_laneq_s32(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \
-  int32x4_t __s0_827 = __p0_827; \
-  int8x16_t __s1_827 = __p1_827; \
-  uint8x16_t __s2_827 = __p2_827; \
-  int32x4_t __ret_827; \
-uint8x16_t __reint_827 = __s2_827; \
-  __ret_827 = vusdotq_s32(__s0_827, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_827, __p3_827)), __s1_827); \
-  __ret_827; \
+#define vsudotq_laneq_s32(__p0_839, __p1_839, __p2_839, __p3_839) __extension__ ({ \
+  int32x4_t __ret_839; \
+  int32x4_t __s0_839 = __p0_839; \
+  int8x16_t __s1_839 = __p1_839; \
+  uint8x16_t __s2_839 = __p2_839; \
+uint8x16_t __reint_839 = __s2_839; \
+  __ret_839 = vusdotq_s32(__s0_839, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_839, __p3_839)), __s1_839); \
+  __ret_839; \
 })
 #else
-#define vsudotq_laneq_s32(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \
-  int32x4_t __s0_828 = __p0_828; \
-  int8x16_t __s1_828 = __p1_828; \
-  uint8x16_t __s2_828 = __p2_828; \
-  int32x4_t __rev0_828;  __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 3, 2, 1, 0); \
-  int8x16_t __rev1_828;  __rev1_828 = __builtin_shufflevector(__s1_828, __s1_828, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_828;  __rev2_828 = __builtin_shufflevector(__s2_828, __s2_828, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_828; \
-uint8x16_t __reint_828 = __rev2_828; \
-  __ret_828 = __noswap_vusdotq_s32(__rev0_828, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_828, __p3_828)), __rev1_828); \
-  __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 3, 2, 1, 0); \
-  __ret_828; \
+#define vsudotq_laneq_s32(__p0_840, __p1_840, __p2_840, __p3_840) __extension__ ({ \
+  int32x4_t __ret_840; \
+  int32x4_t __s0_840 = __p0_840; \
+  int8x16_t __s1_840 = __p1_840; \
+  uint8x16_t __s2_840 = __p2_840; \
+  int32x4_t __rev0_840;  __rev0_840 = __builtin_shufflevector(__s0_840, __s0_840, 3, 2, 1, 0); \
+  int8x16_t __rev1_840;  __rev1_840 = __builtin_shufflevector(__s1_840, __s1_840, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_840;  __rev2_840 = __builtin_shufflevector(__s2_840, __s2_840, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x16_t __reint_840 = __rev2_840; \
+  __ret_840 = __noswap_vusdotq_s32(__rev0_840, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_840, __p3_840)), __rev1_840); \
+  __ret_840 = __builtin_shufflevector(__ret_840, __ret_840, 3, 2, 1, 0); \
+  __ret_840; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsudot_laneq_s32(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \
-  int32x2_t __s0_829 = __p0_829; \
-  int8x8_t __s1_829 = __p1_829; \
-  uint8x16_t __s2_829 = __p2_829; \
-  int32x2_t __ret_829; \
-uint8x16_t __reint_829 = __s2_829; \
-  __ret_829 = vusdot_s32(__s0_829, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_829, __p3_829)), __s1_829); \
-  __ret_829; \
+#define vsudot_laneq_s32(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \
+  int32x2_t __ret_841; \
+  int32x2_t __s0_841 = __p0_841; \
+  int8x8_t __s1_841 = __p1_841; \
+  uint8x16_t __s2_841 = __p2_841; \
+uint8x16_t __reint_841 = __s2_841; \
+  __ret_841 = vusdot_s32(__s0_841, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_841, __p3_841)), __s1_841); \
+  __ret_841; \
 })
 #else
-#define vsudot_laneq_s32(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \
-  int32x2_t __s0_830 = __p0_830; \
-  int8x8_t __s1_830 = __p1_830; \
-  uint8x16_t __s2_830 = __p2_830; \
-  int32x2_t __rev0_830;  __rev0_830 = __builtin_shufflevector(__s0_830, __s0_830, 1, 0); \
-  int8x8_t __rev1_830;  __rev1_830 = __builtin_shufflevector(__s1_830, __s1_830, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_830;  __rev2_830 = __builtin_shufflevector(__s2_830, __s2_830, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_830; \
-uint8x16_t __reint_830 = __rev2_830; \
-  __ret_830 = __noswap_vusdot_s32(__rev0_830, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_830, __p3_830)), __rev1_830); \
-  __ret_830 = __builtin_shufflevector(__ret_830, __ret_830, 1, 0); \
-  __ret_830; \
+#define vsudot_laneq_s32(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \
+  int32x2_t __ret_842; \
+  int32x2_t __s0_842 = __p0_842; \
+  int8x8_t __s1_842 = __p1_842; \
+  uint8x16_t __s2_842 = __p2_842; \
+  int32x2_t __rev0_842;  __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \
+  int8x8_t __rev1_842;  __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_842;  __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x16_t __reint_842 = __rev2_842; \
+  __ret_842 = __noswap_vusdot_s32(__rev0_842, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_842, __p3_842)), __rev1_842); \
+  __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \
+  __ret_842; \
 })
 #endif
 
@@ -64194,9 +63951,9 @@
 }
 #else
 __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64211,9 +63968,9 @@
 }
 #else
 __ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64228,9 +63985,9 @@
 }
 #else
 __ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64245,9 +64002,9 @@
 }
 #else
 __ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64262,9 +64019,9 @@
 }
 #else
 __ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64279,9 +64036,9 @@
 }
 #else
 __ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64296,9 +64053,9 @@
 }
 #else
 __ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64313,9 +64070,9 @@
 }
 #else
 __ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64330,9 +64087,9 @@
 }
 #else
 __ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64347,9 +64104,9 @@
 }
 #else
 __ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64364,9 +64121,9 @@
 }
 #else
 __ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64381,9 +64138,9 @@
 }
 #else
 __ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64398,9 +64155,9 @@
 }
 #else
 __ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64415,9 +64172,9 @@
 }
 #else
 __ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64432,9 +64189,9 @@
 }
 #else
 __ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64449,9 +64206,9 @@
 }
 #else
 __ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64466,9 +64223,9 @@
 }
 #else
 __ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64483,9 +64240,9 @@
 }
 #else
 __ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64500,9 +64257,9 @@
 }
 #else
 __ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64517,9 +64274,9 @@
 }
 #else
 __ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64534,9 +64291,9 @@
 }
 #else
 __ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64551,9 +64308,9 @@
 }
 #else
 __ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64568,9 +64325,9 @@
 }
 #else
 __ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64585,9 +64342,9 @@
 }
 #else
 __ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64602,9 +64359,9 @@
 }
 #else
 __ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64619,9 +64376,9 @@
 }
 #else
 __ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64636,9 +64393,9 @@
 }
 #else
 __ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64653,9 +64410,9 @@
 }
 #else
 __ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64670,9 +64427,9 @@
 }
 #else
 __ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64687,9 +64444,9 @@
 }
 #else
 __ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64704,9 +64461,9 @@
 }
 #else
 __ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64721,9 +64478,9 @@
 }
 #else
 __ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64738,9 +64495,9 @@
 }
 #else
 __ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64755,9 +64512,9 @@
 }
 #else
 __ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64772,9 +64529,9 @@
 }
 #else
 __ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64789,9 +64546,9 @@
 }
 #else
 __ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64806,9 +64563,9 @@
 }
 #else
 __ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64823,9 +64580,9 @@
 }
 #else
 __ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64840,9 +64597,9 @@
 }
 #else
 __ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64857,9 +64614,9 @@
 }
 #else
 __ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64874,9 +64631,9 @@
 }
 #else
 __ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64891,9 +64648,9 @@
 }
 #else
 __ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64908,9 +64665,9 @@
 }
 #else
 __ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64925,9 +64682,9 @@
 }
 #else
 __ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64947,9 +64704,9 @@
 }
 #else
 __ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  uint64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64964,9 +64721,9 @@
 }
 #else
 __ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64981,9 +64738,9 @@
 }
 #else
 __ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65038,9 +64795,9 @@
 }
 #else
 __ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65055,9 +64812,9 @@
 }
 #else
 __ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65072,9 +64829,9 @@
 }
 #else
 __ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65089,9 +64846,9 @@
 }
 #else
 __ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65106,9 +64863,9 @@
 }
 #else
 __ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65123,9 +64880,9 @@
 }
 #else
 __ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65145,9 +64902,9 @@
 }
 #else
 __ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65155,54 +64912,54 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vusdotq_laneq_s32(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \
-  int32x4_t __s0_831 = __p0_831; \
-  uint8x16_t __s1_831 = __p1_831; \
-  int8x16_t __s2_831 = __p2_831; \
-  int32x4_t __ret_831; \
-int8x16_t __reint_831 = __s2_831; \
-  __ret_831 = vusdotq_s32(__s0_831, __s1_831, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_831, __p3_831))); \
-  __ret_831; \
+#define vusdotq_laneq_s32(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \
+  int32x4_t __ret_843; \
+  int32x4_t __s0_843 = __p0_843; \
+  uint8x16_t __s1_843 = __p1_843; \
+  int8x16_t __s2_843 = __p2_843; \
+int8x16_t __reint_843 = __s2_843; \
+  __ret_843 = vusdotq_s32(__s0_843, __s1_843, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_843, __p3_843))); \
+  __ret_843; \
 })
 #else
-#define vusdotq_laneq_s32(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \
-  int32x4_t __s0_832 = __p0_832; \
-  uint8x16_t __s1_832 = __p1_832; \
-  int8x16_t __s2_832 = __p2_832; \
-  int32x4_t __rev0_832;  __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 3, 2, 1, 0); \
-  uint8x16_t __rev1_832;  __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_832;  __rev2_832 = __builtin_shufflevector(__s2_832, __s2_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_832; \
-int8x16_t __reint_832 = __rev2_832; \
-  __ret_832 = __noswap_vusdotq_s32(__rev0_832, __rev1_832, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_832, __p3_832))); \
-  __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 3, 2, 1, 0); \
-  __ret_832; \
+#define vusdotq_laneq_s32(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \
+  int32x4_t __ret_844; \
+  int32x4_t __s0_844 = __p0_844; \
+  uint8x16_t __s1_844 = __p1_844; \
+  int8x16_t __s2_844 = __p2_844; \
+  int32x4_t __rev0_844;  __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 3, 2, 1, 0); \
+  uint8x16_t __rev1_844;  __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_844;  __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x16_t __reint_844 = __rev2_844; \
+  __ret_844 = __noswap_vusdotq_s32(__rev0_844, __rev1_844, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_844, __p3_844))); \
+  __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 3, 2, 1, 0); \
+  __ret_844; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vusdot_laneq_s32(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \
-  int32x2_t __s0_833 = __p0_833; \
-  uint8x8_t __s1_833 = __p1_833; \
-  int8x16_t __s2_833 = __p2_833; \
-  int32x2_t __ret_833; \
-int8x16_t __reint_833 = __s2_833; \
-  __ret_833 = vusdot_s32(__s0_833, __s1_833, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_833, __p3_833))); \
-  __ret_833; \
+#define vusdot_laneq_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \
+  int32x2_t __ret_845; \
+  int32x2_t __s0_845 = __p0_845; \
+  uint8x8_t __s1_845 = __p1_845; \
+  int8x16_t __s2_845 = __p2_845; \
+int8x16_t __reint_845 = __s2_845; \
+  __ret_845 = vusdot_s32(__s0_845, __s1_845, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_845, __p3_845))); \
+  __ret_845; \
 })
 #else
-#define vusdot_laneq_s32(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \
-  int32x2_t __s0_834 = __p0_834; \
-  uint8x8_t __s1_834 = __p1_834; \
-  int8x16_t __s2_834 = __p2_834; \
-  int32x2_t __rev0_834;  __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 1, 0); \
-  uint8x8_t __rev1_834;  __rev1_834 = __builtin_shufflevector(__s1_834, __s1_834, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_834;  __rev2_834 = __builtin_shufflevector(__s2_834, __s2_834, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_834; \
-int8x16_t __reint_834 = __rev2_834; \
-  __ret_834 = __noswap_vusdot_s32(__rev0_834, __rev1_834, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_834, __p3_834))); \
-  __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 1, 0); \
-  __ret_834; \
+#define vusdot_laneq_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \
+  int32x2_t __ret_846; \
+  int32x2_t __s0_846 = __p0_846; \
+  uint8x8_t __s1_846 = __p1_846; \
+  int8x16_t __s2_846 = __p2_846; \
+  int32x2_t __rev0_846;  __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \
+  uint8x8_t __rev1_846;  __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_846;  __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x16_t __reint_846 = __rev2_846; \
+  __ret_846 = __noswap_vusdot_s32(__rev0_846, __rev1_846, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_846, __p3_846))); \
+  __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \
+  __ret_846; \
 })
 #endif
 
@@ -65214,9 +64971,9 @@
 }
 #else
 __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65231,9 +64988,9 @@
 }
 #else
 __ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65248,9 +65005,9 @@
 }
 #else
 __ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65265,9 +65022,9 @@
 }
 #else
 __ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65282,9 +65039,9 @@
 }
 #else
 __ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65299,9 +65056,9 @@
 }
 #else
 __ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65316,9 +65073,9 @@
 }
 #else
 __ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65333,9 +65090,9 @@
 }
 #else
 __ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65350,9 +65107,9 @@
 }
 #else
 __ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65367,9 +65124,9 @@
 }
 #else
 __ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65384,9 +65141,9 @@
 }
 #else
 __ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65401,9 +65158,9 @@
 }
 #else
 __ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65418,9 +65175,9 @@
 }
 #else
 __ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65435,9 +65192,9 @@
 }
 #else
 __ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65452,9 +65209,9 @@
 }
 #else
 __ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65469,9 +65226,9 @@
 }
 #else
 __ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65486,9 +65243,9 @@
 }
 #else
 __ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65503,9 +65260,9 @@
 }
 #else
 __ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65520,9 +65277,9 @@
 }
 #else
 __ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65537,9 +65294,9 @@
 }
 #else
 __ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65554,9 +65311,9 @@
 }
 #else
 __ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65571,9 +65328,9 @@
 }
 #else
 __ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65588,9 +65345,9 @@
 }
 #else
 __ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65605,9 +65362,9 @@
 }
 #else
 __ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65622,9 +65379,9 @@
 }
 #else
 __ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65639,9 +65396,9 @@
 }
 #else
 __ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65656,9 +65413,9 @@
 }
 #else
 __ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65673,9 +65430,9 @@
 }
 #else
 __ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65690,9 +65447,9 @@
 }
 #else
 __ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65707,9 +65464,9 @@
 }
 #else
 __ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65724,9 +65481,9 @@
 }
 #else
 __ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65741,9 +65498,9 @@
 }
 #else
 __ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65758,9 +65515,9 @@
 }
 #else
 __ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65775,9 +65532,9 @@
 }
 #else
 __ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65792,9 +65549,9 @@
 }
 #else
 __ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65809,9 +65566,9 @@
 }
 #else
 __ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65826,9 +65583,9 @@
 }
 #else
 __ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65843,9 +65600,9 @@
 }
 #else
 __ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65860,9 +65617,9 @@
 }
 #else
 __ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65877,9 +65634,9 @@
 }
 #else
 __ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65894,9 +65651,9 @@
 }
 #else
 __ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65911,9 +65668,9 @@
 }
 #else
 __ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65928,9 +65685,9 @@
 }
 #else
 __ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65945,9 +65702,9 @@
 }
 #else
 __ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65962,9 +65719,9 @@
 }
 #else
 __ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65979,9 +65736,9 @@
 }
 #else
 __ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65996,9 +65753,9 @@
 }
 #else
 __ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66013,9 +65770,9 @@
 }
 #else
 __ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66030,9 +65787,9 @@
 }
 #else
 __ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66047,9 +65804,9 @@
 }
 #else
 __ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66064,9 +65821,9 @@
 }
 #else
 __ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66081,9 +65838,9 @@
 }
 #else
 __ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66098,9 +65855,9 @@
 }
 #else
 __ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66115,9 +65872,9 @@
 }
 #else
 __ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66132,9 +65889,9 @@
 }
 #else
 __ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66149,9 +65906,9 @@
 }
 #else
 __ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66166,9 +65923,9 @@
 }
 #else
 __ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66183,9 +65940,9 @@
 }
 #else
 __ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66200,9 +65957,9 @@
 }
 #else
 __ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66217,9 +65974,9 @@
 }
 #else
 __ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66234,9 +65991,9 @@
 }
 #else
 __ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66251,9 +66008,9 @@
 }
 #else
 __ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66268,9 +66025,9 @@
 }
 #else
 __ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66285,9 +66042,9 @@
 }
 #else
 __ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66302,9 +66059,9 @@
 }
 #else
 __ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66319,9 +66076,9 @@
 }
 #else
 __ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66336,9 +66093,9 @@
 }
 #else
 __ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66353,9 +66110,9 @@
 }
 #else
 __ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66370,9 +66127,9 @@
 }
 #else
 __ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66387,9 +66144,9 @@
 }
 #else
 __ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66404,9 +66161,9 @@
 }
 #else
 __ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66421,9 +66178,9 @@
 }
 #else
 __ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66438,9 +66195,9 @@
 }
 #else
 __ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66455,9 +66212,9 @@
 }
 #else
 __ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66472,9 +66229,9 @@
 }
 #else
 __ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66489,9 +66246,9 @@
 }
 #else
 __ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66506,9 +66263,9 @@
 }
 #else
 __ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66523,9 +66280,9 @@
 }
 #else
 __ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66540,9 +66297,9 @@
 }
 #else
 __ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66557,9 +66314,9 @@
 }
 #else
 __ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66574,9 +66331,9 @@
 }
 #else
 __ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66591,9 +66348,9 @@
 }
 #else
 __ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66608,9 +66365,9 @@
 }
 #else
 __ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66625,9 +66382,9 @@
 }
 #else
 __ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66642,9 +66399,9 @@
 }
 #else
 __ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66659,9 +66416,9 @@
 }
 #else
 __ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66676,9 +66433,9 @@
 }
 #else
 __ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66693,9 +66450,9 @@
 }
 #else
 __ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66703,6 +66460,331 @@
 #endif
 
 #endif
+#if defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrndq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrnd_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrnda_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrndi_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrndm_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrndn_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrndp_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrndx_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+#endif
+#if defined(__aarch64__) && defined(__ARM_FEATURE_FRINT)
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__p0, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__rev0, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrnd32x_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__p0, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vrnd32x_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__rev0, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__p0, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__rev0, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrnd32z_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__p0, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vrnd32z_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__rev0, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__p0, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__rev0, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrnd64x_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__p0, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vrnd64x_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__rev0, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__p0, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__rev0, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrnd64z_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__p0, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vrnd64z_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__rev0, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+  return __ret;
+}
+#endif
 #ifdef __LITTLE_ENDIAN__
 __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
   uint8x16_t __ret;
@@ -66711,10 +66793,10 @@
 }
 #else
 __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66729,10 +66811,10 @@
 }
 #else
 __ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66747,10 +66829,10 @@
 }
 #else
 __ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66765,10 +66847,10 @@
 }
 #else
 __ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66783,10 +66865,10 @@
 }
 #else
 __ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66801,10 +66883,10 @@
 }
 #else
 __ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66819,10 +66901,10 @@
 }
 #else
 __ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66837,10 +66919,10 @@
 }
 #else
 __ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66855,10 +66937,10 @@
 }
 #else
 __ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66873,10 +66955,10 @@
 }
 #else
 __ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66891,10 +66973,10 @@
 }
 #else
 __ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66909,10 +66991,10 @@
 }
 #else
 __ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66927,9 +67009,9 @@
 }
 #else
 __ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint16x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1))));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66949,9 +67031,9 @@
 }
 #else
 __ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint64x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1))));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66971,9 +67053,9 @@
 }
 #else
 __ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint32x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1))));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66993,9 +67075,9 @@
 }
 #else
 __ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
+  int16x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1))));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67015,9 +67097,9 @@
 }
 #else
 __ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
+  int64x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1))));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67037,9 +67119,9 @@
 }
 #else
 __ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
+  int32x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1))));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67059,9 +67141,9 @@
 }
 #else
 __ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint16x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67076,9 +67158,9 @@
 }
 #else
 __ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint64x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67093,9 +67175,9 @@
 }
 #else
 __ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint32x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67110,9 +67192,9 @@
 }
 #else
 __ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
+  int16x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67127,9 +67209,9 @@
 }
 #else
 __ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
+  int64x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67144,9 +67226,9 @@
 }
 #else
 __ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
+  int32x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67161,9 +67243,9 @@
 }
 #else
 __ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __noswap_vmovl_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67178,9 +67260,9 @@
 }
 #else
 __ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 + __noswap_vmovl_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67195,9 +67277,9 @@
 }
 #else
 __ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __noswap_vmovl_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67212,9 +67294,9 @@
 }
 #else
 __ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __noswap_vmovl_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67229,9 +67311,9 @@
 }
 #else
 __ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 + __noswap_vmovl_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67246,9 +67328,9 @@
 }
 #else
 __ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __noswap_vmovl_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67256,60 +67338,60 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vget_lane_f16(__p0_835, __p1_835) __extension__ ({ \
-  float16x4_t __s0_835 = __p0_835; \
-  float16_t __ret_835; \
-float16x4_t __reint_835 = __s0_835; \
-int16_t __reint1_835 = vget_lane_s16(*(int16x4_t *) &__reint_835, __p1_835); \
-  __ret_835 = *(float16_t *) &__reint1_835; \
-  __ret_835; \
+#define vget_lane_f16(__p0_847, __p1_847) __extension__ ({ \
+  float16_t __ret_847; \
+  float16x4_t __s0_847 = __p0_847; \
+float16x4_t __reint_847 = __s0_847; \
+int16_t __reint1_847 = vget_lane_s16(*(int16x4_t *) &__reint_847, __p1_847); \
+  __ret_847 = *(float16_t *) &__reint1_847; \
+  __ret_847; \
 })
 #else
-#define vget_lane_f16(__p0_836, __p1_836) __extension__ ({ \
-  float16x4_t __s0_836 = __p0_836; \
-  float16x4_t __rev0_836;  __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 3, 2, 1, 0); \
-  float16_t __ret_836; \
-float16x4_t __reint_836 = __rev0_836; \
-int16_t __reint1_836 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_836, __p1_836); \
-  __ret_836 = *(float16_t *) &__reint1_836; \
-  __ret_836; \
+#define vget_lane_f16(__p0_848, __p1_848) __extension__ ({ \
+  float16_t __ret_848; \
+  float16x4_t __s0_848 = __p0_848; \
+  float16x4_t __rev0_848;  __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \
+float16x4_t __reint_848 = __rev0_848; \
+int16_t __reint1_848 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_848, __p1_848); \
+  __ret_848 = *(float16_t *) &__reint1_848; \
+  __ret_848; \
 })
-#define __noswap_vget_lane_f16(__p0_837, __p1_837) __extension__ ({ \
-  float16x4_t __s0_837 = __p0_837; \
-  float16_t __ret_837; \
-float16x4_t __reint_837 = __s0_837; \
-int16_t __reint1_837 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_837, __p1_837); \
-  __ret_837 = *(float16_t *) &__reint1_837; \
-  __ret_837; \
+#define __noswap_vget_lane_f16(__p0_849, __p1_849) __extension__ ({ \
+  float16_t __ret_849; \
+  float16x4_t __s0_849 = __p0_849; \
+float16x4_t __reint_849 = __s0_849; \
+int16_t __reint1_849 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_849, __p1_849); \
+  __ret_849 = *(float16_t *) &__reint1_849; \
+  __ret_849; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f16(__p0_838, __p1_838) __extension__ ({ \
-  float16x8_t __s0_838 = __p0_838; \
-  float16_t __ret_838; \
-float16x8_t __reint_838 = __s0_838; \
-int16_t __reint1_838 = vgetq_lane_s16(*(int16x8_t *) &__reint_838, __p1_838); \
-  __ret_838 = *(float16_t *) &__reint1_838; \
-  __ret_838; \
+#define vgetq_lane_f16(__p0_850, __p1_850) __extension__ ({ \
+  float16_t __ret_850; \
+  float16x8_t __s0_850 = __p0_850; \
+float16x8_t __reint_850 = __s0_850; \
+int16_t __reint1_850 = vgetq_lane_s16(*(int16x8_t *) &__reint_850, __p1_850); \
+  __ret_850 = *(float16_t *) &__reint1_850; \
+  __ret_850; \
 })
 #else
-#define vgetq_lane_f16(__p0_839, __p1_839) __extension__ ({ \
-  float16x8_t __s0_839 = __p0_839; \
-  float16x8_t __rev0_839;  __rev0_839 = __builtin_shufflevector(__s0_839, __s0_839, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_839; \
-float16x8_t __reint_839 = __rev0_839; \
-int16_t __reint1_839 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_839, __p1_839); \
-  __ret_839 = *(float16_t *) &__reint1_839; \
-  __ret_839; \
+#define vgetq_lane_f16(__p0_851, __p1_851) __extension__ ({ \
+  float16_t __ret_851; \
+  float16x8_t __s0_851 = __p0_851; \
+  float16x8_t __rev0_851;  __rev0_851 = __builtin_shufflevector(__s0_851, __s0_851, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_851 = __rev0_851; \
+int16_t __reint1_851 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_851, __p1_851); \
+  __ret_851 = *(float16_t *) &__reint1_851; \
+  __ret_851; \
 })
-#define __noswap_vgetq_lane_f16(__p0_840, __p1_840) __extension__ ({ \
-  float16x8_t __s0_840 = __p0_840; \
-  float16_t __ret_840; \
-float16x8_t __reint_840 = __s0_840; \
-int16_t __reint1_840 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_840, __p1_840); \
-  __ret_840 = *(float16_t *) &__reint1_840; \
-  __ret_840; \
+#define __noswap_vgetq_lane_f16(__p0_852, __p1_852) __extension__ ({ \
+  float16_t __ret_852; \
+  float16x8_t __s0_852 = __p0_852; \
+float16x8_t __reint_852 = __s0_852; \
+int16_t __reint1_852 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_852, __p1_852); \
+  __ret_852 = *(float16_t *) &__reint1_852; \
+  __ret_852; \
 })
 #endif
 
@@ -67321,10 +67403,10 @@
 }
 #else
 __ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67344,10 +67426,10 @@
 }
 #else
 __ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67367,10 +67449,10 @@
 }
 #else
 __ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67390,10 +67472,10 @@
 }
 #else
 __ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67413,10 +67495,10 @@
 }
 #else
 __ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67436,10 +67518,10 @@
 }
 #else
 __ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67452,98 +67534,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u32(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \
-  uint64x2_t __s0_841 = __p0_841; \
-  uint32x2_t __s1_841 = __p1_841; \
-  uint32x2_t __s2_841 = __p2_841; \
-  uint64x2_t __ret_841; \
-  __ret_841 = __s0_841 + vmull_u32(__s1_841, splat_lane_u32(__s2_841, __p3_841)); \
-  __ret_841; \
+#define vmlal_lane_u32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \
+  uint64x2_t __ret_853; \
+  uint64x2_t __s0_853 = __p0_853; \
+  uint32x2_t __s1_853 = __p1_853; \
+  uint32x2_t __s2_853 = __p2_853; \
+  __ret_853 = __s0_853 + vmull_u32(__s1_853, splat_lane_u32(__s2_853, __p3_853)); \
+  __ret_853; \
 })
 #else
-#define vmlal_lane_u32(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \
-  uint64x2_t __s0_842 = __p0_842; \
-  uint32x2_t __s1_842 = __p1_842; \
-  uint32x2_t __s2_842 = __p2_842; \
-  uint64x2_t __rev0_842;  __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \
-  uint32x2_t __rev1_842;  __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 1, 0); \
-  uint32x2_t __rev2_842;  __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, 1, 0); \
-  uint64x2_t __ret_842; \
-  __ret_842 = __rev0_842 + __noswap_vmull_u32(__rev1_842, __noswap_splat_lane_u32(__rev2_842, __p3_842)); \
-  __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \
-  __ret_842; \
+#define vmlal_lane_u32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \
+  uint64x2_t __ret_854; \
+  uint64x2_t __s0_854 = __p0_854; \
+  uint32x2_t __s1_854 = __p1_854; \
+  uint32x2_t __s2_854 = __p2_854; \
+  uint64x2_t __rev0_854;  __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \
+  uint32x2_t __rev1_854;  __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 1, 0); \
+  uint32x2_t __rev2_854;  __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 1, 0); \
+  __ret_854 = __rev0_854 + __noswap_vmull_u32(__rev1_854, __noswap_splat_lane_u32(__rev2_854, __p3_854)); \
+  __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \
+  __ret_854; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u16(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \
-  uint32x4_t __s0_843 = __p0_843; \
-  uint16x4_t __s1_843 = __p1_843; \
-  uint16x4_t __s2_843 = __p2_843; \
-  uint32x4_t __ret_843; \
-  __ret_843 = __s0_843 + vmull_u16(__s1_843, splat_lane_u16(__s2_843, __p3_843)); \
-  __ret_843; \
+#define vmlal_lane_u16(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \
+  uint32x4_t __ret_855; \
+  uint32x4_t __s0_855 = __p0_855; \
+  uint16x4_t __s1_855 = __p1_855; \
+  uint16x4_t __s2_855 = __p2_855; \
+  __ret_855 = __s0_855 + vmull_u16(__s1_855, splat_lane_u16(__s2_855, __p3_855)); \
+  __ret_855; \
 })
 #else
-#define vmlal_lane_u16(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \
-  uint32x4_t __s0_844 = __p0_844; \
-  uint16x4_t __s1_844 = __p1_844; \
-  uint16x4_t __s2_844 = __p2_844; \
-  uint32x4_t __rev0_844;  __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 3, 2, 1, 0); \
-  uint16x4_t __rev1_844;  __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 3, 2, 1, 0); \
-  uint16x4_t __rev2_844;  __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 3, 2, 1, 0); \
-  uint32x4_t __ret_844; \
-  __ret_844 = __rev0_844 + __noswap_vmull_u16(__rev1_844, __noswap_splat_lane_u16(__rev2_844, __p3_844)); \
-  __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 3, 2, 1, 0); \
-  __ret_844; \
+#define vmlal_lane_u16(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \
+  uint32x4_t __ret_856; \
+  uint32x4_t __s0_856 = __p0_856; \
+  uint16x4_t __s1_856 = __p1_856; \
+  uint16x4_t __s2_856 = __p2_856; \
+  uint32x4_t __rev0_856;  __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \
+  uint16x4_t __rev1_856;  __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 3, 2, 1, 0); \
+  uint16x4_t __rev2_856;  __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 3, 2, 1, 0); \
+  __ret_856 = __rev0_856 + __noswap_vmull_u16(__rev1_856, __noswap_splat_lane_u16(__rev2_856, __p3_856)); \
+  __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \
+  __ret_856; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \
-  int64x2_t __s0_845 = __p0_845; \
-  int32x2_t __s1_845 = __p1_845; \
-  int32x2_t __s2_845 = __p2_845; \
-  int64x2_t __ret_845; \
-  __ret_845 = __s0_845 + vmull_s32(__s1_845, splat_lane_s32(__s2_845, __p3_845)); \
-  __ret_845; \
+#define vmlal_lane_s32(__p0_857, __p1_857, __p2_857, __p3_857) __extension__ ({ \
+  int64x2_t __ret_857; \
+  int64x2_t __s0_857 = __p0_857; \
+  int32x2_t __s1_857 = __p1_857; \
+  int32x2_t __s2_857 = __p2_857; \
+  __ret_857 = __s0_857 + vmull_s32(__s1_857, splat_lane_s32(__s2_857, __p3_857)); \
+  __ret_857; \
 })
 #else
-#define vmlal_lane_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \
-  int64x2_t __s0_846 = __p0_846; \
-  int32x2_t __s1_846 = __p1_846; \
-  int32x2_t __s2_846 = __p2_846; \
-  int64x2_t __rev0_846;  __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \
-  int32x2_t __rev1_846;  __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 1, 0); \
-  int32x2_t __rev2_846;  __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 1, 0); \
-  int64x2_t __ret_846; \
-  __ret_846 = __rev0_846 + __noswap_vmull_s32(__rev1_846, __noswap_splat_lane_s32(__rev2_846, __p3_846)); \
-  __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \
-  __ret_846; \
+#define vmlal_lane_s32(__p0_858, __p1_858, __p2_858, __p3_858) __extension__ ({ \
+  int64x2_t __ret_858; \
+  int64x2_t __s0_858 = __p0_858; \
+  int32x2_t __s1_858 = __p1_858; \
+  int32x2_t __s2_858 = __p2_858; \
+  int64x2_t __rev0_858;  __rev0_858 = __builtin_shufflevector(__s0_858, __s0_858, 1, 0); \
+  int32x2_t __rev1_858;  __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 1, 0); \
+  int32x2_t __rev2_858;  __rev2_858 = __builtin_shufflevector(__s2_858, __s2_858, 1, 0); \
+  __ret_858 = __rev0_858 + __noswap_vmull_s32(__rev1_858, __noswap_splat_lane_s32(__rev2_858, __p3_858)); \
+  __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 1, 0); \
+  __ret_858; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s16(__p0_847, __p1_847, __p2_847, __p3_847) __extension__ ({ \
-  int32x4_t __s0_847 = __p0_847; \
-  int16x4_t __s1_847 = __p1_847; \
-  int16x4_t __s2_847 = __p2_847; \
-  int32x4_t __ret_847; \
-  __ret_847 = __s0_847 + vmull_s16(__s1_847, splat_lane_s16(__s2_847, __p3_847)); \
-  __ret_847; \
+#define vmlal_lane_s16(__p0_859, __p1_859, __p2_859, __p3_859) __extension__ ({ \
+  int32x4_t __ret_859; \
+  int32x4_t __s0_859 = __p0_859; \
+  int16x4_t __s1_859 = __p1_859; \
+  int16x4_t __s2_859 = __p2_859; \
+  __ret_859 = __s0_859 + vmull_s16(__s1_859, splat_lane_s16(__s2_859, __p3_859)); \
+  __ret_859; \
 })
 #else
-#define vmlal_lane_s16(__p0_848, __p1_848, __p2_848, __p3_848) __extension__ ({ \
-  int32x4_t __s0_848 = __p0_848; \
-  int16x4_t __s1_848 = __p1_848; \
-  int16x4_t __s2_848 = __p2_848; \
-  int32x4_t __rev0_848;  __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \
-  int16x4_t __rev1_848;  __rev1_848 = __builtin_shufflevector(__s1_848, __s1_848, 3, 2, 1, 0); \
-  int16x4_t __rev2_848;  __rev2_848 = __builtin_shufflevector(__s2_848, __s2_848, 3, 2, 1, 0); \
-  int32x4_t __ret_848; \
-  __ret_848 = __rev0_848 + __noswap_vmull_s16(__rev1_848, __noswap_splat_lane_s16(__rev2_848, __p3_848)); \
-  __ret_848 = __builtin_shufflevector(__ret_848, __ret_848, 3, 2, 1, 0); \
-  __ret_848; \
+#define vmlal_lane_s16(__p0_860, __p1_860, __p2_860, __p3_860) __extension__ ({ \
+  int32x4_t __ret_860; \
+  int32x4_t __s0_860 = __p0_860; \
+  int16x4_t __s1_860 = __p1_860; \
+  int16x4_t __s2_860 = __p2_860; \
+  int32x4_t __rev0_860;  __rev0_860 = __builtin_shufflevector(__s0_860, __s0_860, 3, 2, 1, 0); \
+  int16x4_t __rev1_860;  __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 3, 2, 1, 0); \
+  int16x4_t __rev2_860;  __rev2_860 = __builtin_shufflevector(__s2_860, __s2_860, 3, 2, 1, 0); \
+  __ret_860 = __rev0_860 + __noswap_vmull_s16(__rev1_860, __noswap_splat_lane_s16(__rev2_860, __p3_860)); \
+  __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 3, 2, 1, 0); \
+  __ret_860; \
 })
 #endif
 
@@ -67555,9 +67637,9 @@
 }
 #else
 __ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67577,9 +67659,9 @@
 }
 #else
 __ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67599,9 +67681,9 @@
 }
 #else
 __ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67621,9 +67703,9 @@
 }
 #else
 __ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67643,10 +67725,10 @@
 }
 #else
 __ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67666,10 +67748,10 @@
 }
 #else
 __ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67689,10 +67771,10 @@
 }
 #else
 __ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67712,10 +67794,10 @@
 }
 #else
 __ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67735,10 +67817,10 @@
 }
 #else
 __ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67758,10 +67840,10 @@
 }
 #else
 __ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67774,98 +67856,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u32(__p0_849, __p1_849, __p2_849, __p3_849) __extension__ ({ \
-  uint64x2_t __s0_849 = __p0_849; \
-  uint32x2_t __s1_849 = __p1_849; \
-  uint32x2_t __s2_849 = __p2_849; \
-  uint64x2_t __ret_849; \
-  __ret_849 = __s0_849 - vmull_u32(__s1_849, splat_lane_u32(__s2_849, __p3_849)); \
-  __ret_849; \
+#define vmlsl_lane_u32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \
+  uint64x2_t __ret_861; \
+  uint64x2_t __s0_861 = __p0_861; \
+  uint32x2_t __s1_861 = __p1_861; \
+  uint32x2_t __s2_861 = __p2_861; \
+  __ret_861 = __s0_861 - vmull_u32(__s1_861, splat_lane_u32(__s2_861, __p3_861)); \
+  __ret_861; \
 })
 #else
-#define vmlsl_lane_u32(__p0_850, __p1_850, __p2_850, __p3_850) __extension__ ({ \
-  uint64x2_t __s0_850 = __p0_850; \
-  uint32x2_t __s1_850 = __p1_850; \
-  uint32x2_t __s2_850 = __p2_850; \
-  uint64x2_t __rev0_850;  __rev0_850 = __builtin_shufflevector(__s0_850, __s0_850, 1, 0); \
-  uint32x2_t __rev1_850;  __rev1_850 = __builtin_shufflevector(__s1_850, __s1_850, 1, 0); \
-  uint32x2_t __rev2_850;  __rev2_850 = __builtin_shufflevector(__s2_850, __s2_850, 1, 0); \
-  uint64x2_t __ret_850; \
-  __ret_850 = __rev0_850 - __noswap_vmull_u32(__rev1_850, __noswap_splat_lane_u32(__rev2_850, __p3_850)); \
-  __ret_850 = __builtin_shufflevector(__ret_850, __ret_850, 1, 0); \
-  __ret_850; \
+#define vmlsl_lane_u32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \
+  uint64x2_t __ret_862; \
+  uint64x2_t __s0_862 = __p0_862; \
+  uint32x2_t __s1_862 = __p1_862; \
+  uint32x2_t __s2_862 = __p2_862; \
+  uint64x2_t __rev0_862;  __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 1, 0); \
+  uint32x2_t __rev1_862;  __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 1, 0); \
+  uint32x2_t __rev2_862;  __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 1, 0); \
+  __ret_862 = __rev0_862 - __noswap_vmull_u32(__rev1_862, __noswap_splat_lane_u32(__rev2_862, __p3_862)); \
+  __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 1, 0); \
+  __ret_862; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u16(__p0_851, __p1_851, __p2_851, __p3_851) __extension__ ({ \
-  uint32x4_t __s0_851 = __p0_851; \
-  uint16x4_t __s1_851 = __p1_851; \
-  uint16x4_t __s2_851 = __p2_851; \
-  uint32x4_t __ret_851; \
-  __ret_851 = __s0_851 - vmull_u16(__s1_851, splat_lane_u16(__s2_851, __p3_851)); \
-  __ret_851; \
+#define vmlsl_lane_u16(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \
+  uint32x4_t __ret_863; \
+  uint32x4_t __s0_863 = __p0_863; \
+  uint16x4_t __s1_863 = __p1_863; \
+  uint16x4_t __s2_863 = __p2_863; \
+  __ret_863 = __s0_863 - vmull_u16(__s1_863, splat_lane_u16(__s2_863, __p3_863)); \
+  __ret_863; \
 })
 #else
-#define vmlsl_lane_u16(__p0_852, __p1_852, __p2_852, __p3_852) __extension__ ({ \
-  uint32x4_t __s0_852 = __p0_852; \
-  uint16x4_t __s1_852 = __p1_852; \
-  uint16x4_t __s2_852 = __p2_852; \
-  uint32x4_t __rev0_852;  __rev0_852 = __builtin_shufflevector(__s0_852, __s0_852, 3, 2, 1, 0); \
-  uint16x4_t __rev1_852;  __rev1_852 = __builtin_shufflevector(__s1_852, __s1_852, 3, 2, 1, 0); \
-  uint16x4_t __rev2_852;  __rev2_852 = __builtin_shufflevector(__s2_852, __s2_852, 3, 2, 1, 0); \
-  uint32x4_t __ret_852; \
-  __ret_852 = __rev0_852 - __noswap_vmull_u16(__rev1_852, __noswap_splat_lane_u16(__rev2_852, __p3_852)); \
-  __ret_852 = __builtin_shufflevector(__ret_852, __ret_852, 3, 2, 1, 0); \
-  __ret_852; \
+#define vmlsl_lane_u16(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \
+  uint32x4_t __ret_864; \
+  uint32x4_t __s0_864 = __p0_864; \
+  uint16x4_t __s1_864 = __p1_864; \
+  uint16x4_t __s2_864 = __p2_864; \
+  uint32x4_t __rev0_864;  __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \
+  uint16x4_t __rev1_864;  __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 3, 2, 1, 0); \
+  uint16x4_t __rev2_864;  __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 3, 2, 1, 0); \
+  __ret_864 = __rev0_864 - __noswap_vmull_u16(__rev1_864, __noswap_splat_lane_u16(__rev2_864, __p3_864)); \
+  __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \
+  __ret_864; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \
-  int64x2_t __s0_853 = __p0_853; \
-  int32x2_t __s1_853 = __p1_853; \
-  int32x2_t __s2_853 = __p2_853; \
-  int64x2_t __ret_853; \
-  __ret_853 = __s0_853 - vmull_s32(__s1_853, splat_lane_s32(__s2_853, __p3_853)); \
-  __ret_853; \
+#define vmlsl_lane_s32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \
+  int64x2_t __ret_865; \
+  int64x2_t __s0_865 = __p0_865; \
+  int32x2_t __s1_865 = __p1_865; \
+  int32x2_t __s2_865 = __p2_865; \
+  __ret_865 = __s0_865 - vmull_s32(__s1_865, splat_lane_s32(__s2_865, __p3_865)); \
+  __ret_865; \
 })
 #else
-#define vmlsl_lane_s32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \
-  int64x2_t __s0_854 = __p0_854; \
-  int32x2_t __s1_854 = __p1_854; \
-  int32x2_t __s2_854 = __p2_854; \
-  int64x2_t __rev0_854;  __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \
-  int32x2_t __rev1_854;  __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 1, 0); \
-  int32x2_t __rev2_854;  __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 1, 0); \
-  int64x2_t __ret_854; \
-  __ret_854 = __rev0_854 - __noswap_vmull_s32(__rev1_854, __noswap_splat_lane_s32(__rev2_854, __p3_854)); \
-  __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \
-  __ret_854; \
+#define vmlsl_lane_s32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \
+  int64x2_t __ret_866; \
+  int64x2_t __s0_866 = __p0_866; \
+  int32x2_t __s1_866 = __p1_866; \
+  int32x2_t __s2_866 = __p2_866; \
+  int64x2_t __rev0_866;  __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 1, 0); \
+  int32x2_t __rev1_866;  __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 1, 0); \
+  int32x2_t __rev2_866;  __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 1, 0); \
+  __ret_866 = __rev0_866 - __noswap_vmull_s32(__rev1_866, __noswap_splat_lane_s32(__rev2_866, __p3_866)); \
+  __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 1, 0); \
+  __ret_866; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s16(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \
-  int32x4_t __s0_855 = __p0_855; \
-  int16x4_t __s1_855 = __p1_855; \
-  int16x4_t __s2_855 = __p2_855; \
-  int32x4_t __ret_855; \
-  __ret_855 = __s0_855 - vmull_s16(__s1_855, splat_lane_s16(__s2_855, __p3_855)); \
-  __ret_855; \
+#define vmlsl_lane_s16(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \
+  int32x4_t __ret_867; \
+  int32x4_t __s0_867 = __p0_867; \
+  int16x4_t __s1_867 = __p1_867; \
+  int16x4_t __s2_867 = __p2_867; \
+  __ret_867 = __s0_867 - vmull_s16(__s1_867, splat_lane_s16(__s2_867, __p3_867)); \
+  __ret_867; \
 })
 #else
-#define vmlsl_lane_s16(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \
-  int32x4_t __s0_856 = __p0_856; \
-  int16x4_t __s1_856 = __p1_856; \
-  int16x4_t __s2_856 = __p2_856; \
-  int32x4_t __rev0_856;  __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \
-  int16x4_t __rev1_856;  __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 3, 2, 1, 0); \
-  int16x4_t __rev2_856;  __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 3, 2, 1, 0); \
-  int32x4_t __ret_856; \
-  __ret_856 = __rev0_856 - __noswap_vmull_s16(__rev1_856, __noswap_splat_lane_s16(__rev2_856, __p3_856)); \
-  __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \
-  __ret_856; \
+#define vmlsl_lane_s16(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \
+  int32x4_t __ret_868; \
+  int32x4_t __s0_868 = __p0_868; \
+  int16x4_t __s1_868 = __p1_868; \
+  int16x4_t __s2_868 = __p2_868; \
+  int32x4_t __rev0_868;  __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \
+  int16x4_t __rev1_868;  __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 3, 2, 1, 0); \
+  int16x4_t __rev2_868;  __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 3, 2, 1, 0); \
+  __ret_868 = __rev0_868 - __noswap_vmull_s16(__rev1_868, __noswap_splat_lane_s16(__rev2_868, __p3_868)); \
+  __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \
+  __ret_868; \
 })
 #endif
 
@@ -67877,9 +67959,9 @@
 }
 #else
 __ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67899,9 +67981,9 @@
 }
 #else
 __ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67921,9 +68003,9 @@
 }
 #else
 __ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67943,9 +68025,9 @@
 }
 #else
 __ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67958,151 +68040,151 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vset_lane_f16(__p0_857, __p1_857, __p2_857) __extension__ ({ \
-  float16_t __s0_857 = __p0_857; \
-  float16x4_t __s1_857 = __p1_857; \
-  float16x4_t __ret_857; \
-float16_t __reint_857 = __s0_857; \
-float16x4_t __reint1_857 = __s1_857; \
-int16x4_t __reint2_857 = vset_lane_s16(*(int16_t *) &__reint_857, *(int16x4_t *) &__reint1_857, __p2_857); \
-  __ret_857 = *(float16x4_t *) &__reint2_857; \
-  __ret_857; \
+#define vset_lane_f16(__p0_869, __p1_869, __p2_869) __extension__ ({ \
+  float16x4_t __ret_869; \
+  float16_t __s0_869 = __p0_869; \
+  float16x4_t __s1_869 = __p1_869; \
+float16_t __reint_869 = __s0_869; \
+float16x4_t __reint1_869 = __s1_869; \
+int16x4_t __reint2_869 = vset_lane_s16(*(int16_t *) &__reint_869, *(int16x4_t *) &__reint1_869, __p2_869); \
+  __ret_869 = *(float16x4_t *) &__reint2_869; \
+  __ret_869; \
 })
 #else
-#define vset_lane_f16(__p0_858, __p1_858, __p2_858) __extension__ ({ \
-  float16_t __s0_858 = __p0_858; \
-  float16x4_t __s1_858 = __p1_858; \
-  float16x4_t __rev1_858;  __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 3, 2, 1, 0); \
-  float16x4_t __ret_858; \
-float16_t __reint_858 = __s0_858; \
-float16x4_t __reint1_858 = __rev1_858; \
-int16x4_t __reint2_858 = __noswap_vset_lane_s16(*(int16_t *) &__reint_858, *(int16x4_t *) &__reint1_858, __p2_858); \
-  __ret_858 = *(float16x4_t *) &__reint2_858; \
-  __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 3, 2, 1, 0); \
-  __ret_858; \
+#define vset_lane_f16(__p0_870, __p1_870, __p2_870) __extension__ ({ \
+  float16x4_t __ret_870; \
+  float16_t __s0_870 = __p0_870; \
+  float16x4_t __s1_870 = __p1_870; \
+  float16x4_t __rev1_870;  __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 3, 2, 1, 0); \
+float16_t __reint_870 = __s0_870; \
+float16x4_t __reint1_870 = __rev1_870; \
+int16x4_t __reint2_870 = __noswap_vset_lane_s16(*(int16_t *) &__reint_870, *(int16x4_t *) &__reint1_870, __p2_870); \
+  __ret_870 = *(float16x4_t *) &__reint2_870; \
+  __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 3, 2, 1, 0); \
+  __ret_870; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f16(__p0_859, __p1_859, __p2_859) __extension__ ({ \
-  float16_t __s0_859 = __p0_859; \
-  float16x8_t __s1_859 = __p1_859; \
-  float16x8_t __ret_859; \
-float16_t __reint_859 = __s0_859; \
-float16x8_t __reint1_859 = __s1_859; \
-int16x8_t __reint2_859 = vsetq_lane_s16(*(int16_t *) &__reint_859, *(int16x8_t *) &__reint1_859, __p2_859); \
-  __ret_859 = *(float16x8_t *) &__reint2_859; \
-  __ret_859; \
+#define vsetq_lane_f16(__p0_871, __p1_871, __p2_871) __extension__ ({ \
+  float16x8_t __ret_871; \
+  float16_t __s0_871 = __p0_871; \
+  float16x8_t __s1_871 = __p1_871; \
+float16_t __reint_871 = __s0_871; \
+float16x8_t __reint1_871 = __s1_871; \
+int16x8_t __reint2_871 = vsetq_lane_s16(*(int16_t *) &__reint_871, *(int16x8_t *) &__reint1_871, __p2_871); \
+  __ret_871 = *(float16x8_t *) &__reint2_871; \
+  __ret_871; \
 })
 #else
-#define vsetq_lane_f16(__p0_860, __p1_860, __p2_860) __extension__ ({ \
-  float16_t __s0_860 = __p0_860; \
-  float16x8_t __s1_860 = __p1_860; \
-  float16x8_t __rev1_860;  __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_860; \
-float16_t __reint_860 = __s0_860; \
-float16x8_t __reint1_860 = __rev1_860; \
-int16x8_t __reint2_860 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_860, *(int16x8_t *) &__reint1_860, __p2_860); \
-  __ret_860 = *(float16x8_t *) &__reint2_860; \
-  __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_860; \
+#define vsetq_lane_f16(__p0_872, __p1_872, __p2_872) __extension__ ({ \
+  float16x8_t __ret_872; \
+  float16_t __s0_872 = __p0_872; \
+  float16x8_t __s1_872 = __p1_872; \
+  float16x8_t __rev1_872;  __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16_t __reint_872 = __s0_872; \
+float16x8_t __reint1_872 = __rev1_872; \
+int16x8_t __reint2_872 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_872, *(int16x8_t *) &__reint1_872, __p2_872); \
+  __ret_872 = *(float16x8_t *) &__reint2_872; \
+  __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_872; \
 })
 #endif
 
 #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlalbq_lane_f32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \
-  float32x4_t __s0_861 = __p0_861; \
-  bfloat16x8_t __s1_861 = __p1_861; \
-  bfloat16x4_t __s2_861 = __p2_861; \
-  float32x4_t __ret_861; \
-  __ret_861 = vbfmlalbq_f32(__s0_861, __s1_861, (bfloat16x8_t) {vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861)}); \
-  __ret_861; \
+#define vbfmlalbq_lane_f32(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \
+  float32x4_t __ret_873; \
+  float32x4_t __s0_873 = __p0_873; \
+  bfloat16x8_t __s1_873 = __p1_873; \
+  bfloat16x4_t __s2_873 = __p2_873; \
+  __ret_873 = vbfmlalbq_f32(__s0_873, __s1_873, (bfloat16x8_t) {vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873)}); \
+  __ret_873; \
 })
 #else
-#define vbfmlalbq_lane_f32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \
-  float32x4_t __s0_862 = __p0_862; \
-  bfloat16x8_t __s1_862 = __p1_862; \
-  bfloat16x4_t __s2_862 = __p2_862; \
-  float32x4_t __rev0_862;  __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_862;  __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_862;  __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 3, 2, 1, 0); \
-  float32x4_t __ret_862; \
-  __ret_862 = __noswap_vbfmlalbq_f32(__rev0_862, __rev1_862, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862)}); \
-  __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 3, 2, 1, 0); \
-  __ret_862; \
+#define vbfmlalbq_lane_f32(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \
+  float32x4_t __ret_874; \
+  float32x4_t __s0_874 = __p0_874; \
+  bfloat16x8_t __s1_874 = __p1_874; \
+  bfloat16x4_t __s2_874 = __p2_874; \
+  float32x4_t __rev0_874;  __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_874;  __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_874;  __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 3, 2, 1, 0); \
+  __ret_874 = __noswap_vbfmlalbq_f32(__rev0_874, __rev1_874, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874)}); \
+  __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \
+  __ret_874; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlalbq_laneq_f32(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \
-  float32x4_t __s0_863 = __p0_863; \
-  bfloat16x8_t __s1_863 = __p1_863; \
-  bfloat16x8_t __s2_863 = __p2_863; \
-  float32x4_t __ret_863; \
-  __ret_863 = vbfmlalbq_f32(__s0_863, __s1_863, (bfloat16x8_t) {vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863)}); \
-  __ret_863; \
+#define vbfmlalbq_laneq_f32(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \
+  float32x4_t __ret_875; \
+  float32x4_t __s0_875 = __p0_875; \
+  bfloat16x8_t __s1_875 = __p1_875; \
+  bfloat16x8_t __s2_875 = __p2_875; \
+  __ret_875 = vbfmlalbq_f32(__s0_875, __s1_875, (bfloat16x8_t) {vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875)}); \
+  __ret_875; \
 })
 #else
-#define vbfmlalbq_laneq_f32(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \
-  float32x4_t __s0_864 = __p0_864; \
-  bfloat16x8_t __s1_864 = __p1_864; \
-  bfloat16x8_t __s2_864 = __p2_864; \
-  float32x4_t __rev0_864;  __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_864;  __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_864;  __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_864; \
-  __ret_864 = __noswap_vbfmlalbq_f32(__rev0_864, __rev1_864, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864)}); \
-  __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \
-  __ret_864; \
+#define vbfmlalbq_laneq_f32(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \
+  float32x4_t __ret_876; \
+  float32x4_t __s0_876 = __p0_876; \
+  bfloat16x8_t __s1_876 = __p1_876; \
+  bfloat16x8_t __s2_876 = __p2_876; \
+  float32x4_t __rev0_876;  __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_876;  __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_876;  __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_876 = __noswap_vbfmlalbq_f32(__rev0_876, __rev1_876, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876)}); \
+  __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 3, 2, 1, 0); \
+  __ret_876; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlaltq_lane_f32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \
-  float32x4_t __s0_865 = __p0_865; \
-  bfloat16x8_t __s1_865 = __p1_865; \
-  bfloat16x4_t __s2_865 = __p2_865; \
-  float32x4_t __ret_865; \
-  __ret_865 = vbfmlaltq_f32(__s0_865, __s1_865, (bfloat16x8_t) {vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865)}); \
-  __ret_865; \
+#define vbfmlaltq_lane_f32(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \
+  float32x4_t __ret_877; \
+  float32x4_t __s0_877 = __p0_877; \
+  bfloat16x8_t __s1_877 = __p1_877; \
+  bfloat16x4_t __s2_877 = __p2_877; \
+  __ret_877 = vbfmlaltq_f32(__s0_877, __s1_877, (bfloat16x8_t) {vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877)}); \
+  __ret_877; \
 })
 #else
-#define vbfmlaltq_lane_f32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \
-  float32x4_t __s0_866 = __p0_866; \
-  bfloat16x8_t __s1_866 = __p1_866; \
-  bfloat16x4_t __s2_866 = __p2_866; \
-  float32x4_t __rev0_866;  __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_866;  __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_866;  __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 3, 2, 1, 0); \
-  float32x4_t __ret_866; \
-  __ret_866 = __noswap_vbfmlaltq_f32(__rev0_866, __rev1_866, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866)}); \
-  __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 3, 2, 1, 0); \
-  __ret_866; \
+#define vbfmlaltq_lane_f32(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \
+  float32x4_t __ret_878; \
+  float32x4_t __s0_878 = __p0_878; \
+  bfloat16x8_t __s1_878 = __p1_878; \
+  bfloat16x4_t __s2_878 = __p2_878; \
+  float32x4_t __rev0_878;  __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_878;  __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_878;  __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 3, 2, 1, 0); \
+  __ret_878 = __noswap_vbfmlaltq_f32(__rev0_878, __rev1_878, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878)}); \
+  __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \
+  __ret_878; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlaltq_laneq_f32(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \
-  float32x4_t __s0_867 = __p0_867; \
-  bfloat16x8_t __s1_867 = __p1_867; \
-  bfloat16x8_t __s2_867 = __p2_867; \
-  float32x4_t __ret_867; \
-  __ret_867 = vbfmlaltq_f32(__s0_867, __s1_867, (bfloat16x8_t) {vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867)}); \
-  __ret_867; \
+#define vbfmlaltq_laneq_f32(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \
+  float32x4_t __ret_879; \
+  float32x4_t __s0_879 = __p0_879; \
+  bfloat16x8_t __s1_879 = __p1_879; \
+  bfloat16x8_t __s2_879 = __p2_879; \
+  __ret_879 = vbfmlaltq_f32(__s0_879, __s1_879, (bfloat16x8_t) {vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879)}); \
+  __ret_879; \
 })
 #else
-#define vbfmlaltq_laneq_f32(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \
-  float32x4_t __s0_868 = __p0_868; \
-  bfloat16x8_t __s1_868 = __p1_868; \
-  bfloat16x8_t __s2_868 = __p2_868; \
-  float32x4_t __rev0_868;  __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_868;  __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_868;  __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_868; \
-  __ret_868 = __noswap_vbfmlaltq_f32(__rev0_868, __rev1_868, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868)}); \
-  __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \
-  __ret_868; \
+#define vbfmlaltq_laneq_f32(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \
+  float32x4_t __ret_880; \
+  float32x4_t __s0_880 = __p0_880; \
+  bfloat16x8_t __s1_880 = __p1_880; \
+  bfloat16x8_t __s2_880 = __p2_880; \
+  float32x4_t __rev0_880;  __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_880;  __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_880;  __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_880 = __noswap_vbfmlaltq_f32(__rev0_880, __rev1_880, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880)}); \
+  __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 3, 2, 1, 0); \
+  __ret_880; \
 })
 #endif
 
@@ -68114,8 +68196,8 @@
 }
 #else
 __ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float32x4_t __ret;
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -68130,8 +68212,8 @@
 }
 #else
 __ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float32x4_t __ret;
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -68141,673 +68223,483 @@
 #endif
 #if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_high_f16(__p0_869, __p1_869, __p2_869, __p3_869) __extension__ ({ \
-  float32x4_t __s0_869 = __p0_869; \
-  float16x8_t __s1_869 = __p1_869; \
-  float16x4_t __s2_869 = __p2_869; \
-  float32x4_t __ret_869; \
-  __ret_869 = vfmlalq_high_f16(__s0_869, __s1_869, (float16x8_t) {vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869)}); \
-  __ret_869; \
-})
-#else
-#define vfmlalq_lane_high_f16(__p0_870, __p1_870, __p2_870, __p3_870) __extension__ ({ \
-  float32x4_t __s0_870 = __p0_870; \
-  float16x8_t __s1_870 = __p1_870; \
-  float16x4_t __s2_870 = __p2_870; \
-  float32x4_t __rev0_870;  __rev0_870 = __builtin_shufflevector(__s0_870, __s0_870, 3, 2, 1, 0); \
-  float16x8_t __rev1_870;  __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_870;  __rev2_870 = __builtin_shufflevector(__s2_870, __s2_870, 3, 2, 1, 0); \
-  float32x4_t __ret_870; \
-  __ret_870 = __noswap_vfmlalq_high_f16(__rev0_870, __rev1_870, (float16x8_t) {__noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870)}); \
-  __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 3, 2, 1, 0); \
-  __ret_870; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_high_f16(__p0_871, __p1_871, __p2_871, __p3_871) __extension__ ({ \
-  float32x2_t __s0_871 = __p0_871; \
-  float16x4_t __s1_871 = __p1_871; \
-  float16x4_t __s2_871 = __p2_871; \
-  float32x2_t __ret_871; \
-  __ret_871 = vfmlal_high_f16(__s0_871, __s1_871, (float16x4_t) {vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871)}); \
-  __ret_871; \
-})
-#else
-#define vfmlal_lane_high_f16(__p0_872, __p1_872, __p2_872, __p3_872) __extension__ ({ \
-  float32x2_t __s0_872 = __p0_872; \
-  float16x4_t __s1_872 = __p1_872; \
-  float16x4_t __s2_872 = __p2_872; \
-  float32x2_t __rev0_872;  __rev0_872 = __builtin_shufflevector(__s0_872, __s0_872, 1, 0); \
-  float16x4_t __rev1_872;  __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 3, 2, 1, 0); \
-  float16x4_t __rev2_872;  __rev2_872 = __builtin_shufflevector(__s2_872, __s2_872, 3, 2, 1, 0); \
-  float32x2_t __ret_872; \
-  __ret_872 = __noswap_vfmlal_high_f16(__rev0_872, __rev1_872, (float16x4_t) {__noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872)}); \
-  __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 1, 0); \
-  __ret_872; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_low_f16(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \
-  float32x4_t __s0_873 = __p0_873; \
-  float16x8_t __s1_873 = __p1_873; \
-  float16x4_t __s2_873 = __p2_873; \
-  float32x4_t __ret_873; \
-  __ret_873 = vfmlalq_low_f16(__s0_873, __s1_873, (float16x8_t) {vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873)}); \
-  __ret_873; \
-})
-#else
-#define vfmlalq_lane_low_f16(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \
-  float32x4_t __s0_874 = __p0_874; \
-  float16x8_t __s1_874 = __p1_874; \
-  float16x4_t __s2_874 = __p2_874; \
-  float32x4_t __rev0_874;  __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 3, 2, 1, 0); \
-  float16x8_t __rev1_874;  __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_874;  __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 3, 2, 1, 0); \
-  float32x4_t __ret_874; \
-  __ret_874 = __noswap_vfmlalq_low_f16(__rev0_874, __rev1_874, (float16x8_t) {__noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874)}); \
-  __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \
-  __ret_874; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_low_f16(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \
-  float32x2_t __s0_875 = __p0_875; \
-  float16x4_t __s1_875 = __p1_875; \
-  float16x4_t __s2_875 = __p2_875; \
-  float32x2_t __ret_875; \
-  __ret_875 = vfmlal_low_f16(__s0_875, __s1_875, (float16x4_t) {vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875)}); \
-  __ret_875; \
-})
-#else
-#define vfmlal_lane_low_f16(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \
-  float32x2_t __s0_876 = __p0_876; \
-  float16x4_t __s1_876 = __p1_876; \
-  float16x4_t __s2_876 = __p2_876; \
-  float32x2_t __rev0_876;  __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 1, 0); \
-  float16x4_t __rev1_876;  __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 3, 2, 1, 0); \
-  float16x4_t __rev2_876;  __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 3, 2, 1, 0); \
-  float32x2_t __ret_876; \
-  __ret_876 = __noswap_vfmlal_low_f16(__rev0_876, __rev1_876, (float16x4_t) {__noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876)}); \
-  __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 1, 0); \
-  __ret_876; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_high_f16(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \
-  float32x4_t __s0_877 = __p0_877; \
-  float16x8_t __s1_877 = __p1_877; \
-  float16x8_t __s2_877 = __p2_877; \
-  float32x4_t __ret_877; \
-  __ret_877 = vfmlalq_high_f16(__s0_877, __s1_877, (float16x8_t) {vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877)}); \
-  __ret_877; \
-})
-#else
-#define vfmlalq_laneq_high_f16(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \
-  float32x4_t __s0_878 = __p0_878; \
-  float16x8_t __s1_878 = __p1_878; \
-  float16x8_t __s2_878 = __p2_878; \
-  float32x4_t __rev0_878;  __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \
-  float16x8_t __rev1_878;  __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_878;  __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_878; \
-  __ret_878 = __noswap_vfmlalq_high_f16(__rev0_878, __rev1_878, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878)}); \
-  __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \
-  __ret_878; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_high_f16(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \
-  float32x2_t __s0_879 = __p0_879; \
-  float16x4_t __s1_879 = __p1_879; \
-  float16x8_t __s2_879 = __p2_879; \
-  float32x2_t __ret_879; \
-  __ret_879 = vfmlal_high_f16(__s0_879, __s1_879, (float16x4_t) {vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879)}); \
-  __ret_879; \
-})
-#else
-#define vfmlal_laneq_high_f16(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \
-  float32x2_t __s0_880 = __p0_880; \
-  float16x4_t __s1_880 = __p1_880; \
-  float16x8_t __s2_880 = __p2_880; \
-  float32x2_t __rev0_880;  __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 1, 0); \
-  float16x4_t __rev1_880;  __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 3, 2, 1, 0); \
-  float16x8_t __rev2_880;  __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_880; \
-  __ret_880 = __noswap_vfmlal_high_f16(__rev0_880, __rev1_880, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880)}); \
-  __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 1, 0); \
-  __ret_880; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_low_f16(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \
+#define vfmlalq_lane_high_f16(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \
+  float32x4_t __ret_881; \
   float32x4_t __s0_881 = __p0_881; \
   float16x8_t __s1_881 = __p1_881; \
-  float16x8_t __s2_881 = __p2_881; \
-  float32x4_t __ret_881; \
-  __ret_881 = vfmlalq_low_f16(__s0_881, __s1_881, (float16x8_t) {vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881)}); \
+  float16x4_t __s2_881 = __p2_881; \
+  __ret_881 = vfmlalq_high_f16(__s0_881, __s1_881, (float16x8_t) {vget_lane_f16(__s2_881, __p3_881), vget_lane_f16(__s2_881, __p3_881), vget_lane_f16(__s2_881, __p3_881), vget_lane_f16(__s2_881, __p3_881), vget_lane_f16(__s2_881, __p3_881), vget_lane_f16(__s2_881, __p3_881), vget_lane_f16(__s2_881, __p3_881), vget_lane_f16(__s2_881, __p3_881)}); \
   __ret_881; \
 })
 #else
-#define vfmlalq_laneq_low_f16(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \
+#define vfmlalq_lane_high_f16(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \
+  float32x4_t __ret_882; \
   float32x4_t __s0_882 = __p0_882; \
   float16x8_t __s1_882 = __p1_882; \
-  float16x8_t __s2_882 = __p2_882; \
+  float16x4_t __s2_882 = __p2_882; \
   float32x4_t __rev0_882;  __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, 3, 2, 1, 0); \
   float16x8_t __rev1_882;  __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_882;  __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_882; \
-  __ret_882 = __noswap_vfmlalq_low_f16(__rev0_882, __rev1_882, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882)}); \
+  float16x4_t __rev2_882;  __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 3, 2, 1, 0); \
+  __ret_882 = __noswap_vfmlalq_high_f16(__rev0_882, __rev1_882, (float16x8_t) {__noswap_vget_lane_f16(__rev2_882, __p3_882), __noswap_vget_lane_f16(__rev2_882, __p3_882), __noswap_vget_lane_f16(__rev2_882, __p3_882), __noswap_vget_lane_f16(__rev2_882, __p3_882), __noswap_vget_lane_f16(__rev2_882, __p3_882), __noswap_vget_lane_f16(__rev2_882, __p3_882), __noswap_vget_lane_f16(__rev2_882, __p3_882), __noswap_vget_lane_f16(__rev2_882, __p3_882)}); \
   __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \
   __ret_882; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_low_f16(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \
+#define vfmlal_lane_high_f16(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \
+  float32x2_t __ret_883; \
   float32x2_t __s0_883 = __p0_883; \
   float16x4_t __s1_883 = __p1_883; \
-  float16x8_t __s2_883 = __p2_883; \
-  float32x2_t __ret_883; \
-  __ret_883 = vfmlal_low_f16(__s0_883, __s1_883, (float16x4_t) {vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883)}); \
+  float16x4_t __s2_883 = __p2_883; \
+  __ret_883 = vfmlal_high_f16(__s0_883, __s1_883, (float16x4_t) {vget_lane_f16(__s2_883, __p3_883), vget_lane_f16(__s2_883, __p3_883), vget_lane_f16(__s2_883, __p3_883), vget_lane_f16(__s2_883, __p3_883)}); \
   __ret_883; \
 })
 #else
-#define vfmlal_laneq_low_f16(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \
+#define vfmlal_lane_high_f16(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \
+  float32x2_t __ret_884; \
   float32x2_t __s0_884 = __p0_884; \
   float16x4_t __s1_884 = __p1_884; \
-  float16x8_t __s2_884 = __p2_884; \
+  float16x4_t __s2_884 = __p2_884; \
   float32x2_t __rev0_884;  __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, 1, 0); \
   float16x4_t __rev1_884;  __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 3, 2, 1, 0); \
-  float16x8_t __rev2_884;  __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_884; \
-  __ret_884 = __noswap_vfmlal_low_f16(__rev0_884, __rev1_884, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884)}); \
+  float16x4_t __rev2_884;  __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 3, 2, 1, 0); \
+  __ret_884 = __noswap_vfmlal_high_f16(__rev0_884, __rev1_884, (float16x4_t) {__noswap_vget_lane_f16(__rev2_884, __p3_884), __noswap_vget_lane_f16(__rev2_884, __p3_884), __noswap_vget_lane_f16(__rev2_884, __p3_884), __noswap_vget_lane_f16(__rev2_884, __p3_884)}); \
   __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 1, 0); \
   __ret_884; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_high_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \
+#define vfmlalq_lane_low_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \
+  float32x4_t __ret_885; \
   float32x4_t __s0_885 = __p0_885; \
   float16x8_t __s1_885 = __p1_885; \
   float16x4_t __s2_885 = __p2_885; \
-  float32x4_t __ret_885; \
-  __ret_885 = vfmlslq_high_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \
+  __ret_885 = vfmlalq_low_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \
   __ret_885; \
 })
 #else
-#define vfmlslq_lane_high_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \
+#define vfmlalq_lane_low_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \
+  float32x4_t __ret_886; \
   float32x4_t __s0_886 = __p0_886; \
   float16x8_t __s1_886 = __p1_886; \
   float16x4_t __s2_886 = __p2_886; \
   float32x4_t __rev0_886;  __rev0_886 = __builtin_shufflevector(__s0_886, __s0_886, 3, 2, 1, 0); \
   float16x8_t __rev1_886;  __rev1_886 = __builtin_shufflevector(__s1_886, __s1_886, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x4_t __rev2_886;  __rev2_886 = __builtin_shufflevector(__s2_886, __s2_886, 3, 2, 1, 0); \
-  float32x4_t __ret_886; \
-  __ret_886 = __noswap_vfmlslq_high_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \
+  __ret_886 = __noswap_vfmlalq_low_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \
   __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 3, 2, 1, 0); \
   __ret_886; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_high_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \
+#define vfmlal_lane_low_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \
+  float32x2_t __ret_887; \
   float32x2_t __s0_887 = __p0_887; \
   float16x4_t __s1_887 = __p1_887; \
   float16x4_t __s2_887 = __p2_887; \
-  float32x2_t __ret_887; \
-  __ret_887 = vfmlsl_high_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \
+  __ret_887 = vfmlal_low_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \
   __ret_887; \
 })
 #else
-#define vfmlsl_lane_high_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \
+#define vfmlal_lane_low_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \
+  float32x2_t __ret_888; \
   float32x2_t __s0_888 = __p0_888; \
   float16x4_t __s1_888 = __p1_888; \
   float16x4_t __s2_888 = __p2_888; \
   float32x2_t __rev0_888;  __rev0_888 = __builtin_shufflevector(__s0_888, __s0_888, 1, 0); \
   float16x4_t __rev1_888;  __rev1_888 = __builtin_shufflevector(__s1_888, __s1_888, 3, 2, 1, 0); \
   float16x4_t __rev2_888;  __rev2_888 = __builtin_shufflevector(__s2_888, __s2_888, 3, 2, 1, 0); \
-  float32x2_t __ret_888; \
-  __ret_888 = __noswap_vfmlsl_high_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \
+  __ret_888 = __noswap_vfmlal_low_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \
   __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \
   __ret_888; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_low_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \
+#define vfmlalq_laneq_high_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \
+  float32x4_t __ret_889; \
   float32x4_t __s0_889 = __p0_889; \
   float16x8_t __s1_889 = __p1_889; \
-  float16x4_t __s2_889 = __p2_889; \
-  float32x4_t __ret_889; \
-  __ret_889 = vfmlslq_low_f16(__s0_889, __s1_889, (float16x8_t) {vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889)}); \
+  float16x8_t __s2_889 = __p2_889; \
+  __ret_889 = vfmlalq_high_f16(__s0_889, __s1_889, (float16x8_t) {vgetq_lane_f16(__s2_889, __p3_889), vgetq_lane_f16(__s2_889, __p3_889), vgetq_lane_f16(__s2_889, __p3_889), vgetq_lane_f16(__s2_889, __p3_889), vgetq_lane_f16(__s2_889, __p3_889), vgetq_lane_f16(__s2_889, __p3_889), vgetq_lane_f16(__s2_889, __p3_889), vgetq_lane_f16(__s2_889, __p3_889)}); \
   __ret_889; \
 })
 #else
-#define vfmlslq_lane_low_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \
+#define vfmlalq_laneq_high_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \
+  float32x4_t __ret_890; \
   float32x4_t __s0_890 = __p0_890; \
   float16x8_t __s1_890 = __p1_890; \
-  float16x4_t __s2_890 = __p2_890; \
+  float16x8_t __s2_890 = __p2_890; \
   float32x4_t __rev0_890;  __rev0_890 = __builtin_shufflevector(__s0_890, __s0_890, 3, 2, 1, 0); \
   float16x8_t __rev1_890;  __rev1_890 = __builtin_shufflevector(__s1_890, __s1_890, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_890;  __rev2_890 = __builtin_shufflevector(__s2_890, __s2_890, 3, 2, 1, 0); \
-  float32x4_t __ret_890; \
-  __ret_890 = __noswap_vfmlslq_low_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890)}); \
+  float16x8_t __rev2_890;  __rev2_890 = __builtin_shufflevector(__s2_890, __s2_890, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_890 = __noswap_vfmlalq_high_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_890, __p3_890), __noswap_vgetq_lane_f16(__rev2_890, __p3_890), __noswap_vgetq_lane_f16(__rev2_890, __p3_890), __noswap_vgetq_lane_f16(__rev2_890, __p3_890), __noswap_vgetq_lane_f16(__rev2_890, __p3_890), __noswap_vgetq_lane_f16(__rev2_890, __p3_890), __noswap_vgetq_lane_f16(__rev2_890, __p3_890), __noswap_vgetq_lane_f16(__rev2_890, __p3_890)}); \
   __ret_890 = __builtin_shufflevector(__ret_890, __ret_890, 3, 2, 1, 0); \
   __ret_890; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_low_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \
+#define vfmlal_laneq_high_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \
+  float32x2_t __ret_891; \
   float32x2_t __s0_891 = __p0_891; \
   float16x4_t __s1_891 = __p1_891; \
-  float16x4_t __s2_891 = __p2_891; \
-  float32x2_t __ret_891; \
-  __ret_891 = vfmlsl_low_f16(__s0_891, __s1_891, (float16x4_t) {vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891)}); \
+  float16x8_t __s2_891 = __p2_891; \
+  __ret_891 = vfmlal_high_f16(__s0_891, __s1_891, (float16x4_t) {vgetq_lane_f16(__s2_891, __p3_891), vgetq_lane_f16(__s2_891, __p3_891), vgetq_lane_f16(__s2_891, __p3_891), vgetq_lane_f16(__s2_891, __p3_891)}); \
   __ret_891; \
 })
 #else
-#define vfmlsl_lane_low_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \
+#define vfmlal_laneq_high_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \
+  float32x2_t __ret_892; \
   float32x2_t __s0_892 = __p0_892; \
   float16x4_t __s1_892 = __p1_892; \
-  float16x4_t __s2_892 = __p2_892; \
+  float16x8_t __s2_892 = __p2_892; \
   float32x2_t __rev0_892;  __rev0_892 = __builtin_shufflevector(__s0_892, __s0_892, 1, 0); \
   float16x4_t __rev1_892;  __rev1_892 = __builtin_shufflevector(__s1_892, __s1_892, 3, 2, 1, 0); \
-  float16x4_t __rev2_892;  __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 3, 2, 1, 0); \
-  float32x2_t __ret_892; \
-  __ret_892 = __noswap_vfmlsl_low_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892)}); \
+  float16x8_t __rev2_892;  __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_892 = __noswap_vfmlal_high_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_892, __p3_892), __noswap_vgetq_lane_f16(__rev2_892, __p3_892), __noswap_vgetq_lane_f16(__rev2_892, __p3_892), __noswap_vgetq_lane_f16(__rev2_892, __p3_892)}); \
   __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \
   __ret_892; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_high_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \
+#define vfmlalq_laneq_low_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \
+  float32x4_t __ret_893; \
   float32x4_t __s0_893 = __p0_893; \
   float16x8_t __s1_893 = __p1_893; \
   float16x8_t __s2_893 = __p2_893; \
-  float32x4_t __ret_893; \
-  __ret_893 = vfmlslq_high_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \
+  __ret_893 = vfmlalq_low_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \
   __ret_893; \
 })
 #else
-#define vfmlslq_laneq_high_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \
+#define vfmlalq_laneq_low_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \
+  float32x4_t __ret_894; \
   float32x4_t __s0_894 = __p0_894; \
   float16x8_t __s1_894 = __p1_894; \
   float16x8_t __s2_894 = __p2_894; \
   float32x4_t __rev0_894;  __rev0_894 = __builtin_shufflevector(__s0_894, __s0_894, 3, 2, 1, 0); \
   float16x8_t __rev1_894;  __rev1_894 = __builtin_shufflevector(__s1_894, __s1_894, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x8_t __rev2_894;  __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_894; \
-  __ret_894 = __noswap_vfmlslq_high_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \
+  __ret_894 = __noswap_vfmlalq_low_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \
   __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 3, 2, 1, 0); \
   __ret_894; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_high_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \
+#define vfmlal_laneq_low_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \
+  float32x2_t __ret_895; \
   float32x2_t __s0_895 = __p0_895; \
   float16x4_t __s1_895 = __p1_895; \
   float16x8_t __s2_895 = __p2_895; \
-  float32x2_t __ret_895; \
-  __ret_895 = vfmlsl_high_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \
+  __ret_895 = vfmlal_low_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \
   __ret_895; \
 })
 #else
-#define vfmlsl_laneq_high_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \
+#define vfmlal_laneq_low_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \
+  float32x2_t __ret_896; \
   float32x2_t __s0_896 = __p0_896; \
   float16x4_t __s1_896 = __p1_896; \
   float16x8_t __s2_896 = __p2_896; \
   float32x2_t __rev0_896;  __rev0_896 = __builtin_shufflevector(__s0_896, __s0_896, 1, 0); \
   float16x4_t __rev1_896;  __rev1_896 = __builtin_shufflevector(__s1_896, __s1_896, 3, 2, 1, 0); \
   float16x8_t __rev2_896;  __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_896; \
-  __ret_896 = __noswap_vfmlsl_high_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \
+  __ret_896 = __noswap_vfmlal_low_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \
   __ret_896 = __builtin_shufflevector(__ret_896, __ret_896, 1, 0); \
   __ret_896; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_low_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \
+#define vfmlslq_lane_high_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \
+  float32x4_t __ret_897; \
   float32x4_t __s0_897 = __p0_897; \
   float16x8_t __s1_897 = __p1_897; \
-  float16x8_t __s2_897 = __p2_897; \
-  float32x4_t __ret_897; \
-  __ret_897 = vfmlslq_low_f16(__s0_897, __s1_897, (float16x8_t) {vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897)}); \
+  float16x4_t __s2_897 = __p2_897; \
+  __ret_897 = vfmlslq_high_f16(__s0_897, __s1_897, (float16x8_t) {vget_lane_f16(__s2_897, __p3_897), vget_lane_f16(__s2_897, __p3_897), vget_lane_f16(__s2_897, __p3_897), vget_lane_f16(__s2_897, __p3_897), vget_lane_f16(__s2_897, __p3_897), vget_lane_f16(__s2_897, __p3_897), vget_lane_f16(__s2_897, __p3_897), vget_lane_f16(__s2_897, __p3_897)}); \
   __ret_897; \
 })
 #else
-#define vfmlslq_laneq_low_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \
+#define vfmlslq_lane_high_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \
+  float32x4_t __ret_898; \
   float32x4_t __s0_898 = __p0_898; \
   float16x8_t __s1_898 = __p1_898; \
-  float16x8_t __s2_898 = __p2_898; \
+  float16x4_t __s2_898 = __p2_898; \
   float32x4_t __rev0_898;  __rev0_898 = __builtin_shufflevector(__s0_898, __s0_898, 3, 2, 1, 0); \
   float16x8_t __rev1_898;  __rev1_898 = __builtin_shufflevector(__s1_898, __s1_898, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_898;  __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_898; \
-  __ret_898 = __noswap_vfmlslq_low_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898)}); \
+  float16x4_t __rev2_898;  __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 3, 2, 1, 0); \
+  __ret_898 = __noswap_vfmlslq_high_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vget_lane_f16(__rev2_898, __p3_898), __noswap_vget_lane_f16(__rev2_898, __p3_898), __noswap_vget_lane_f16(__rev2_898, __p3_898), __noswap_vget_lane_f16(__rev2_898, __p3_898), __noswap_vget_lane_f16(__rev2_898, __p3_898), __noswap_vget_lane_f16(__rev2_898, __p3_898), __noswap_vget_lane_f16(__rev2_898, __p3_898), __noswap_vget_lane_f16(__rev2_898, __p3_898)}); \
   __ret_898 = __builtin_shufflevector(__ret_898, __ret_898, 3, 2, 1, 0); \
   __ret_898; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_low_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \
+#define vfmlsl_lane_high_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \
+  float32x2_t __ret_899; \
   float32x2_t __s0_899 = __p0_899; \
   float16x4_t __s1_899 = __p1_899; \
-  float16x8_t __s2_899 = __p2_899; \
-  float32x2_t __ret_899; \
-  __ret_899 = vfmlsl_low_f16(__s0_899, __s1_899, (float16x4_t) {vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899)}); \
+  float16x4_t __s2_899 = __p2_899; \
+  __ret_899 = vfmlsl_high_f16(__s0_899, __s1_899, (float16x4_t) {vget_lane_f16(__s2_899, __p3_899), vget_lane_f16(__s2_899, __p3_899), vget_lane_f16(__s2_899, __p3_899), vget_lane_f16(__s2_899, __p3_899)}); \
   __ret_899; \
 })
 #else
-#define vfmlsl_laneq_low_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \
+#define vfmlsl_lane_high_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \
+  float32x2_t __ret_900; \
   float32x2_t __s0_900 = __p0_900; \
   float16x4_t __s1_900 = __p1_900; \
-  float16x8_t __s2_900 = __p2_900; \
+  float16x4_t __s2_900 = __p2_900; \
   float32x2_t __rev0_900;  __rev0_900 = __builtin_shufflevector(__s0_900, __s0_900, 1, 0); \
   float16x4_t __rev1_900;  __rev1_900 = __builtin_shufflevector(__s1_900, __s1_900, 3, 2, 1, 0); \
-  float16x8_t __rev2_900;  __rev2_900 = __builtin_shufflevector(__s2_900, __s2_900, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_900; \
-  __ret_900 = __noswap_vfmlsl_low_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900)}); \
+  float16x4_t __rev2_900;  __rev2_900 = __builtin_shufflevector(__s2_900, __s2_900, 3, 2, 1, 0); \
+  __ret_900 = __noswap_vfmlsl_high_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vget_lane_f16(__rev2_900, __p3_900), __noswap_vget_lane_f16(__rev2_900, __p3_900), __noswap_vget_lane_f16(__rev2_900, __p3_900), __noswap_vget_lane_f16(__rev2_900, __p3_900)}); \
   __ret_900 = __builtin_shufflevector(__ret_900, __ret_900, 1, 0); \
   __ret_900; \
 })
 #endif
 
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vmulh_lane_f16(__p0_901, __p1_901, __p2_901) __extension__ ({ \
-  float16_t __s0_901 = __p0_901; \
-  float16x4_t __s1_901 = __p1_901; \
-  float16_t __ret_901; \
-  __ret_901 = __s0_901 * vget_lane_f16(__s1_901, __p2_901); \
+#define vfmlslq_lane_low_f16(__p0_901, __p1_901, __p2_901, __p3_901) __extension__ ({ \
+  float32x4_t __ret_901; \
+  float32x4_t __s0_901 = __p0_901; \
+  float16x8_t __s1_901 = __p1_901; \
+  float16x4_t __s2_901 = __p2_901; \
+  __ret_901 = vfmlslq_low_f16(__s0_901, __s1_901, (float16x8_t) {vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901)}); \
   __ret_901; \
 })
 #else
-#define vmulh_lane_f16(__p0_902, __p1_902, __p2_902) __extension__ ({ \
-  float16_t __s0_902 = __p0_902; \
-  float16x4_t __s1_902 = __p1_902; \
-  float16x4_t __rev1_902;  __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 3, 2, 1, 0); \
-  float16_t __ret_902; \
-  __ret_902 = __s0_902 * __noswap_vget_lane_f16(__rev1_902, __p2_902); \
+#define vfmlslq_lane_low_f16(__p0_902, __p1_902, __p2_902, __p3_902) __extension__ ({ \
+  float32x4_t __ret_902; \
+  float32x4_t __s0_902 = __p0_902; \
+  float16x8_t __s1_902 = __p1_902; \
+  float16x4_t __s2_902 = __p2_902; \
+  float32x4_t __rev0_902;  __rev0_902 = __builtin_shufflevector(__s0_902, __s0_902, 3, 2, 1, 0); \
+  float16x8_t __rev1_902;  __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_902;  __rev2_902 = __builtin_shufflevector(__s2_902, __s2_902, 3, 2, 1, 0); \
+  __ret_902 = __noswap_vfmlslq_low_f16(__rev0_902, __rev1_902, (float16x8_t) {__noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902)}); \
+  __ret_902 = __builtin_shufflevector(__ret_902, __ret_902, 3, 2, 1, 0); \
   __ret_902; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulh_laneq_f16(__p0_903, __p1_903, __p2_903) __extension__ ({ \
-  float16_t __s0_903 = __p0_903; \
-  float16x8_t __s1_903 = __p1_903; \
-  float16_t __ret_903; \
-  __ret_903 = __s0_903 * vgetq_lane_f16(__s1_903, __p2_903); \
+#define vfmlsl_lane_low_f16(__p0_903, __p1_903, __p2_903, __p3_903) __extension__ ({ \
+  float32x2_t __ret_903; \
+  float32x2_t __s0_903 = __p0_903; \
+  float16x4_t __s1_903 = __p1_903; \
+  float16x4_t __s2_903 = __p2_903; \
+  __ret_903 = vfmlsl_low_f16(__s0_903, __s1_903, (float16x4_t) {vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903)}); \
   __ret_903; \
 })
 #else
-#define vmulh_laneq_f16(__p0_904, __p1_904, __p2_904) __extension__ ({ \
-  float16_t __s0_904 = __p0_904; \
-  float16x8_t __s1_904 = __p1_904; \
-  float16x8_t __rev1_904;  __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_904; \
-  __ret_904 = __s0_904 * __noswap_vgetq_lane_f16(__rev1_904, __p2_904); \
+#define vfmlsl_lane_low_f16(__p0_904, __p1_904, __p2_904, __p3_904) __extension__ ({ \
+  float32x2_t __ret_904; \
+  float32x2_t __s0_904 = __p0_904; \
+  float16x4_t __s1_904 = __p1_904; \
+  float16x4_t __s2_904 = __p2_904; \
+  float32x2_t __rev0_904;  __rev0_904 = __builtin_shufflevector(__s0_904, __s0_904, 1, 0); \
+  float16x4_t __rev1_904;  __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 3, 2, 1, 0); \
+  float16x4_t __rev2_904;  __rev2_904 = __builtin_shufflevector(__s2_904, __s2_904, 3, 2, 1, 0); \
+  __ret_904 = __noswap_vfmlsl_low_f16(__rev0_904, __rev1_904, (float16x4_t) {__noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904)}); \
+  __ret_904 = __builtin_shufflevector(__ret_904, __ret_904, 1, 0); \
   __ret_904; \
 })
 #endif
 
-#endif
-#if defined(__ARM_FEATURE_MATMUL_INT8)
 #ifdef __LITTLE_ENDIAN__
-#define vsudotq_lane_s32(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \
-  int32x4_t __s0_905 = __p0_905; \
-  int8x16_t __s1_905 = __p1_905; \
-  uint8x8_t __s2_905 = __p2_905; \
-  int32x4_t __ret_905; \
-uint8x8_t __reint_905 = __s2_905; \
-  __ret_905 = vusdotq_s32(__s0_905, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_905, __p3_905)), __s1_905); \
+#define vfmlslq_laneq_high_f16(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \
+  float32x4_t __ret_905; \
+  float32x4_t __s0_905 = __p0_905; \
+  float16x8_t __s1_905 = __p1_905; \
+  float16x8_t __s2_905 = __p2_905; \
+  __ret_905 = vfmlslq_high_f16(__s0_905, __s1_905, (float16x8_t) {vgetq_lane_f16(__s2_905, __p3_905), vgetq_lane_f16(__s2_905, __p3_905), vgetq_lane_f16(__s2_905, __p3_905), vgetq_lane_f16(__s2_905, __p3_905), vgetq_lane_f16(__s2_905, __p3_905), vgetq_lane_f16(__s2_905, __p3_905), vgetq_lane_f16(__s2_905, __p3_905), vgetq_lane_f16(__s2_905, __p3_905)}); \
   __ret_905; \
 })
 #else
-#define vsudotq_lane_s32(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \
-  int32x4_t __s0_906 = __p0_906; \
-  int8x16_t __s1_906 = __p1_906; \
-  uint8x8_t __s2_906 = __p2_906; \
-  int32x4_t __rev0_906;  __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \
-  int8x16_t __rev1_906;  __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_906;  __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_906; \
-uint8x8_t __reint_906 = __rev2_906; \
-  __ret_906 = __noswap_vusdotq_s32(__rev0_906, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_906, __p3_906)), __rev1_906); \
+#define vfmlslq_laneq_high_f16(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \
+  float32x4_t __ret_906; \
+  float32x4_t __s0_906 = __p0_906; \
+  float16x8_t __s1_906 = __p1_906; \
+  float16x8_t __s2_906 = __p2_906; \
+  float32x4_t __rev0_906;  __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \
+  float16x8_t __rev1_906;  __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_906;  __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_906 = __noswap_vfmlslq_high_f16(__rev0_906, __rev1_906, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_906, __p3_906), __noswap_vgetq_lane_f16(__rev2_906, __p3_906), __noswap_vgetq_lane_f16(__rev2_906, __p3_906), __noswap_vgetq_lane_f16(__rev2_906, __p3_906), __noswap_vgetq_lane_f16(__rev2_906, __p3_906), __noswap_vgetq_lane_f16(__rev2_906, __p3_906), __noswap_vgetq_lane_f16(__rev2_906, __p3_906), __noswap_vgetq_lane_f16(__rev2_906, __p3_906)}); \
   __ret_906 = __builtin_shufflevector(__ret_906, __ret_906, 3, 2, 1, 0); \
   __ret_906; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsudot_lane_s32(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \
-  int32x2_t __s0_907 = __p0_907; \
-  int8x8_t __s1_907 = __p1_907; \
-  uint8x8_t __s2_907 = __p2_907; \
-  int32x2_t __ret_907; \
-uint8x8_t __reint_907 = __s2_907; \
-  __ret_907 = vusdot_s32(__s0_907, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_907, __p3_907)), __s1_907); \
+#define vfmlsl_laneq_high_f16(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \
+  float32x2_t __ret_907; \
+  float32x2_t __s0_907 = __p0_907; \
+  float16x4_t __s1_907 = __p1_907; \
+  float16x8_t __s2_907 = __p2_907; \
+  __ret_907 = vfmlsl_high_f16(__s0_907, __s1_907, (float16x4_t) {vgetq_lane_f16(__s2_907, __p3_907), vgetq_lane_f16(__s2_907, __p3_907), vgetq_lane_f16(__s2_907, __p3_907), vgetq_lane_f16(__s2_907, __p3_907)}); \
   __ret_907; \
 })
 #else
-#define vsudot_lane_s32(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \
-  int32x2_t __s0_908 = __p0_908; \
-  int8x8_t __s1_908 = __p1_908; \
-  uint8x8_t __s2_908 = __p2_908; \
-  int32x2_t __rev0_908;  __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \
-  int8x8_t __rev1_908;  __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_908;  __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_908; \
-uint8x8_t __reint_908 = __rev2_908; \
-  __ret_908 = __noswap_vusdot_s32(__rev0_908, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_908, __p3_908)), __rev1_908); \
+#define vfmlsl_laneq_high_f16(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \
+  float32x2_t __ret_908; \
+  float32x2_t __s0_908 = __p0_908; \
+  float16x4_t __s1_908 = __p1_908; \
+  float16x8_t __s2_908 = __p2_908; \
+  float32x2_t __rev0_908;  __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \
+  float16x4_t __rev1_908;  __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 3, 2, 1, 0); \
+  float16x8_t __rev2_908;  __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_908 = __noswap_vfmlsl_high_f16(__rev0_908, __rev1_908, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_908, __p3_908), __noswap_vgetq_lane_f16(__rev2_908, __p3_908), __noswap_vgetq_lane_f16(__rev2_908, __p3_908), __noswap_vgetq_lane_f16(__rev2_908, __p3_908)}); \
   __ret_908 = __builtin_shufflevector(__ret_908, __ret_908, 1, 0); \
   __ret_908; \
 })
 #endif
 
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_lane_s32(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \
-  int32_t __s0_909 = __p0_909; \
-  int32_t __s1_909 = __p1_909; \
-  int32x2_t __s2_909 = __p2_909; \
-  int32_t __ret_909; \
-  __ret_909 = vqadds_s32(__s0_909, vqrdmulhs_s32(__s1_909, vget_lane_s32(__s2_909, __p3_909))); \
+#define vfmlslq_laneq_low_f16(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \
+  float32x4_t __ret_909; \
+  float32x4_t __s0_909 = __p0_909; \
+  float16x8_t __s1_909 = __p1_909; \
+  float16x8_t __s2_909 = __p2_909; \
+  __ret_909 = vfmlslq_low_f16(__s0_909, __s1_909, (float16x8_t) {vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909)}); \
   __ret_909; \
 })
 #else
-#define vqrdmlahs_lane_s32(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \
-  int32_t __s0_910 = __p0_910; \
-  int32_t __s1_910 = __p1_910; \
-  int32x2_t __s2_910 = __p2_910; \
-  int32x2_t __rev2_910;  __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 1, 0); \
-  int32_t __ret_910; \
-  __ret_910 = vqadds_s32(__s0_910, vqrdmulhs_s32(__s1_910, __noswap_vget_lane_s32(__rev2_910, __p3_910))); \
+#define vfmlslq_laneq_low_f16(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \
+  float32x4_t __ret_910; \
+  float32x4_t __s0_910 = __p0_910; \
+  float16x8_t __s1_910 = __p1_910; \
+  float16x8_t __s2_910 = __p2_910; \
+  float32x4_t __rev0_910;  __rev0_910 = __builtin_shufflevector(__s0_910, __s0_910, 3, 2, 1, 0); \
+  float16x8_t __rev1_910;  __rev1_910 = __builtin_shufflevector(__s1_910, __s1_910, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_910;  __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_910 = __noswap_vfmlslq_low_f16(__rev0_910, __rev1_910, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910)}); \
+  __ret_910 = __builtin_shufflevector(__ret_910, __ret_910, 3, 2, 1, 0); \
   __ret_910; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_lane_s16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \
-  int16_t __s0_911 = __p0_911; \
-  int16_t __s1_911 = __p1_911; \
-  int16x4_t __s2_911 = __p2_911; \
-  int16_t __ret_911; \
-  __ret_911 = vqaddh_s16(__s0_911, vqrdmulhh_s16(__s1_911, vget_lane_s16(__s2_911, __p3_911))); \
+#define vfmlsl_laneq_low_f16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \
+  float32x2_t __ret_911; \
+  float32x2_t __s0_911 = __p0_911; \
+  float16x4_t __s1_911 = __p1_911; \
+  float16x8_t __s2_911 = __p2_911; \
+  __ret_911 = vfmlsl_low_f16(__s0_911, __s1_911, (float16x4_t) {vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911)}); \
   __ret_911; \
 })
 #else
-#define vqrdmlahh_lane_s16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \
-  int16_t __s0_912 = __p0_912; \
-  int16_t __s1_912 = __p1_912; \
-  int16x4_t __s2_912 = __p2_912; \
-  int16x4_t __rev2_912;  __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 3, 2, 1, 0); \
-  int16_t __ret_912; \
-  __ret_912 = vqaddh_s16(__s0_912, vqrdmulhh_s16(__s1_912, __noswap_vget_lane_s16(__rev2_912, __p3_912))); \
+#define vfmlsl_laneq_low_f16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \
+  float32x2_t __ret_912; \
+  float32x2_t __s0_912 = __p0_912; \
+  float16x4_t __s1_912 = __p1_912; \
+  float16x8_t __s2_912 = __p2_912; \
+  float32x2_t __rev0_912;  __rev0_912 = __builtin_shufflevector(__s0_912, __s0_912, 1, 0); \
+  float16x4_t __rev1_912;  __rev1_912 = __builtin_shufflevector(__s1_912, __s1_912, 3, 2, 1, 0); \
+  float16x8_t __rev2_912;  __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_912 = __noswap_vfmlsl_low_f16(__rev0_912, __rev1_912, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912)}); \
+  __ret_912 = __builtin_shufflevector(__ret_912, __ret_912, 1, 0); \
   __ret_912; \
 })
 #endif
 
+#endif
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_laneq_s32(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \
-  int32_t __s0_913 = __p0_913; \
-  int32_t __s1_913 = __p1_913; \
-  int32x4_t __s2_913 = __p2_913; \
-  int32_t __ret_913; \
-  __ret_913 = vqadds_s32(__s0_913, vqrdmulhs_s32(__s1_913, vgetq_lane_s32(__s2_913, __p3_913))); \
+#define vmulh_lane_f16(__p0_913, __p1_913, __p2_913) __extension__ ({ \
+  float16_t __ret_913; \
+  float16_t __s0_913 = __p0_913; \
+  float16x4_t __s1_913 = __p1_913; \
+  __ret_913 = __s0_913 * vget_lane_f16(__s1_913, __p2_913); \
   __ret_913; \
 })
 #else
-#define vqrdmlahs_laneq_s32(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \
-  int32_t __s0_914 = __p0_914; \
-  int32_t __s1_914 = __p1_914; \
-  int32x4_t __s2_914 = __p2_914; \
-  int32x4_t __rev2_914;  __rev2_914 = __builtin_shufflevector(__s2_914, __s2_914, 3, 2, 1, 0); \
-  int32_t __ret_914; \
-  __ret_914 = vqadds_s32(__s0_914, vqrdmulhs_s32(__s1_914, __noswap_vgetq_lane_s32(__rev2_914, __p3_914))); \
+#define vmulh_lane_f16(__p0_914, __p1_914, __p2_914) __extension__ ({ \
+  float16_t __ret_914; \
+  float16_t __s0_914 = __p0_914; \
+  float16x4_t __s1_914 = __p1_914; \
+  float16x4_t __rev1_914;  __rev1_914 = __builtin_shufflevector(__s1_914, __s1_914, 3, 2, 1, 0); \
+  __ret_914 = __s0_914 * __noswap_vget_lane_f16(__rev1_914, __p2_914); \
   __ret_914; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_laneq_s16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \
-  int16_t __s0_915 = __p0_915; \
-  int16_t __s1_915 = __p1_915; \
-  int16x8_t __s2_915 = __p2_915; \
-  int16_t __ret_915; \
-  __ret_915 = vqaddh_s16(__s0_915, vqrdmulhh_s16(__s1_915, vgetq_lane_s16(__s2_915, __p3_915))); \
+#define vmulh_laneq_f16(__p0_915, __p1_915, __p2_915) __extension__ ({ \
+  float16_t __ret_915; \
+  float16_t __s0_915 = __p0_915; \
+  float16x8_t __s1_915 = __p1_915; \
+  __ret_915 = __s0_915 * vgetq_lane_f16(__s1_915, __p2_915); \
   __ret_915; \
 })
 #else
-#define vqrdmlahh_laneq_s16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \
-  int16_t __s0_916 = __p0_916; \
-  int16_t __s1_916 = __p1_916; \
-  int16x8_t __s2_916 = __p2_916; \
-  int16x8_t __rev2_916;  __rev2_916 = __builtin_shufflevector(__s2_916, __s2_916, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_916; \
-  __ret_916 = vqaddh_s16(__s0_916, vqrdmulhh_s16(__s1_916, __noswap_vgetq_lane_s16(__rev2_916, __p3_916))); \
+#define vmulh_laneq_f16(__p0_916, __p1_916, __p2_916) __extension__ ({ \
+  float16_t __ret_916; \
+  float16_t __s0_916 = __p0_916; \
+  float16x8_t __s1_916 = __p1_916; \
+  float16x8_t __rev1_916;  __rev1_916 = __builtin_shufflevector(__s1_916, __s1_916, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_916 = __s0_916 * __noswap_vgetq_lane_f16(__rev1_916, __p2_916); \
   __ret_916; \
 })
 #endif
 
-__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
+#endif
+#if defined(__ARM_FEATURE_MATMUL_INT8)
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_lane_s32(__p0_917, __p1_917, __p2_917, __p3_917) __extension__ ({ \
-  int32_t __s0_917 = __p0_917; \
-  int32_t __s1_917 = __p1_917; \
-  int32x2_t __s2_917 = __p2_917; \
-  int32_t __ret_917; \
-  __ret_917 = vqsubs_s32(__s0_917, vqrdmulhs_s32(__s1_917, vget_lane_s32(__s2_917, __p3_917))); \
+#define vsudotq_lane_s32(__p0_917, __p1_917, __p2_917, __p3_917) __extension__ ({ \
+  int32x4_t __ret_917; \
+  int32x4_t __s0_917 = __p0_917; \
+  int8x16_t __s1_917 = __p1_917; \
+  uint8x8_t __s2_917 = __p2_917; \
+uint8x8_t __reint_917 = __s2_917; \
+  __ret_917 = vusdotq_s32(__s0_917, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_917, __p3_917)), __s1_917); \
   __ret_917; \
 })
 #else
-#define vqrdmlshs_lane_s32(__p0_918, __p1_918, __p2_918, __p3_918) __extension__ ({ \
-  int32_t __s0_918 = __p0_918; \
-  int32_t __s1_918 = __p1_918; \
-  int32x2_t __s2_918 = __p2_918; \
-  int32x2_t __rev2_918;  __rev2_918 = __builtin_shufflevector(__s2_918, __s2_918, 1, 0); \
-  int32_t __ret_918; \
-  __ret_918 = vqsubs_s32(__s0_918, vqrdmulhs_s32(__s1_918, __noswap_vget_lane_s32(__rev2_918, __p3_918))); \
+#define vsudotq_lane_s32(__p0_918, __p1_918, __p2_918, __p3_918) __extension__ ({ \
+  int32x4_t __ret_918; \
+  int32x4_t __s0_918 = __p0_918; \
+  int8x16_t __s1_918 = __p1_918; \
+  uint8x8_t __s2_918 = __p2_918; \
+  int32x4_t __rev0_918;  __rev0_918 = __builtin_shufflevector(__s0_918, __s0_918, 3, 2, 1, 0); \
+  int8x16_t __rev1_918;  __rev1_918 = __builtin_shufflevector(__s1_918, __s1_918, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_918;  __rev2_918 = __builtin_shufflevector(__s2_918, __s2_918, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x8_t __reint_918 = __rev2_918; \
+  __ret_918 = __noswap_vusdotq_s32(__rev0_918, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_918, __p3_918)), __rev1_918); \
+  __ret_918 = __builtin_shufflevector(__ret_918, __ret_918, 3, 2, 1, 0); \
   __ret_918; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_lane_s16(__p0_919, __p1_919, __p2_919, __p3_919) __extension__ ({ \
-  int16_t __s0_919 = __p0_919; \
-  int16_t __s1_919 = __p1_919; \
-  int16x4_t __s2_919 = __p2_919; \
-  int16_t __ret_919; \
-  __ret_919 = vqsubh_s16(__s0_919, vqrdmulhh_s16(__s1_919, vget_lane_s16(__s2_919, __p3_919))); \
+#define vsudot_lane_s32(__p0_919, __p1_919, __p2_919, __p3_919) __extension__ ({ \
+  int32x2_t __ret_919; \
+  int32x2_t __s0_919 = __p0_919; \
+  int8x8_t __s1_919 = __p1_919; \
+  uint8x8_t __s2_919 = __p2_919; \
+uint8x8_t __reint_919 = __s2_919; \
+  __ret_919 = vusdot_s32(__s0_919, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_919, __p3_919)), __s1_919); \
   __ret_919; \
 })
 #else
-#define vqrdmlshh_lane_s16(__p0_920, __p1_920, __p2_920, __p3_920) __extension__ ({ \
-  int16_t __s0_920 = __p0_920; \
-  int16_t __s1_920 = __p1_920; \
-  int16x4_t __s2_920 = __p2_920; \
-  int16x4_t __rev2_920;  __rev2_920 = __builtin_shufflevector(__s2_920, __s2_920, 3, 2, 1, 0); \
-  int16_t __ret_920; \
-  __ret_920 = vqsubh_s16(__s0_920, vqrdmulhh_s16(__s1_920, __noswap_vget_lane_s16(__rev2_920, __p3_920))); \
+#define vsudot_lane_s32(__p0_920, __p1_920, __p2_920, __p3_920) __extension__ ({ \
+  int32x2_t __ret_920; \
+  int32x2_t __s0_920 = __p0_920; \
+  int8x8_t __s1_920 = __p1_920; \
+  uint8x8_t __s2_920 = __p2_920; \
+  int32x2_t __rev0_920;  __rev0_920 = __builtin_shufflevector(__s0_920, __s0_920, 1, 0); \
+  int8x8_t __rev1_920;  __rev1_920 = __builtin_shufflevector(__s1_920, __s1_920, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_920;  __rev2_920 = __builtin_shufflevector(__s2_920, __s2_920, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x8_t __reint_920 = __rev2_920; \
+  __ret_920 = __noswap_vusdot_s32(__rev0_920, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_920, __p3_920)), __rev1_920); \
+  __ret_920 = __builtin_shufflevector(__ret_920, __ret_920, 1, 0); \
   __ret_920; \
 })
 #endif
 
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_laneq_s32(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \
-  int32_t __s0_921 = __p0_921; \
-  int32_t __s1_921 = __p1_921; \
-  int32x4_t __s2_921 = __p2_921; \
-  int32_t __ret_921; \
-  __ret_921 = vqsubs_s32(__s0_921, vqrdmulhs_s32(__s1_921, vgetq_lane_s32(__s2_921, __p3_921))); \
-  __ret_921; \
-})
-#else
-#define vqrdmlshs_laneq_s32(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \
-  int32_t __s0_922 = __p0_922; \
-  int32_t __s1_922 = __p1_922; \
-  int32x4_t __s2_922 = __p2_922; \
-  int32x4_t __rev2_922;  __rev2_922 = __builtin_shufflevector(__s2_922, __s2_922, 3, 2, 1, 0); \
-  int32_t __ret_922; \
-  __ret_922 = vqsubs_s32(__s0_922, vqrdmulhs_s32(__s1_922, __noswap_vgetq_lane_s32(__rev2_922, __p3_922))); \
-  __ret_922; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_laneq_s16(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \
-  int16_t __s0_923 = __p0_923; \
-  int16_t __s1_923 = __p1_923; \
-  int16x8_t __s2_923 = __p2_923; \
-  int16_t __ret_923; \
-  __ret_923 = vqsubh_s16(__s0_923, vqrdmulhh_s16(__s1_923, vgetq_lane_s16(__s2_923, __p3_923))); \
-  __ret_923; \
-})
-#else
-#define vqrdmlshh_laneq_s16(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \
-  int16_t __s0_924 = __p0_924; \
-  int16_t __s1_924 = __p1_924; \
-  int16x8_t __s2_924 = __p2_924; \
-  int16x8_t __rev2_924;  __rev2_924 = __builtin_shufflevector(__s2_924, __s2_924, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_924; \
-  __ret_924 = vqsubh_s16(__s0_924, vqrdmulhh_s16(__s1_924, __noswap_vgetq_lane_s16(__rev2_924, __p3_924))); \
-  __ret_924; \
-})
-#endif
-
 #endif
 #if defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
@@ -68818,9 +68710,9 @@
 }
 #else
 __ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -68835,9 +68727,9 @@
 }
 #else
 __ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -68852,9 +68744,9 @@
 }
 #else
 __ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -68869,9 +68761,9 @@
 }
 #else
 __ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -68886,9 +68778,9 @@
 }
 #else
 __ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -68903,9 +68795,9 @@
 }
 #else
 __ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -68920,9 +68812,9 @@
 }
 #else
 __ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -68937,9 +68829,9 @@
 }
 #else
 __ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -68954,9 +68846,9 @@
 }
 #else
 __ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -68971,9 +68863,9 @@
 }
 #else
 __ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -68988,9 +68880,9 @@
 }
 #else
 __ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69005,9 +68897,9 @@
 }
 #else
 __ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69022,9 +68914,9 @@
 }
 #else
 __ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __noswap_vmovl_high_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69039,9 +68931,9 @@
 }
 #else
 __ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 + __noswap_vmovl_high_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69056,9 +68948,9 @@
 }
 #else
 __ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __noswap_vmovl_high_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69073,9 +68965,9 @@
 }
 #else
 __ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __noswap_vmovl_high_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69090,9 +68982,9 @@
 }
 #else
 __ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 + __noswap_vmovl_high_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69107,9 +68999,9 @@
 }
 #else
 __ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __noswap_vmovl_high_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69117,140 +69009,140 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p64(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \
-  poly64x2_t __s0_925 = __p0_925; \
-  poly64x1_t __s2_925 = __p2_925; \
-  poly64x2_t __ret_925; \
-  __ret_925 = vsetq_lane_p64(vget_lane_p64(__s2_925, __p3_925), __s0_925, __p1_925); \
-  __ret_925; \
+#define vcopyq_lane_p64(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \
+  poly64x2_t __ret_921; \
+  poly64x2_t __s0_921 = __p0_921; \
+  poly64x1_t __s2_921 = __p2_921; \
+  __ret_921 = vsetq_lane_p64(vget_lane_p64(__s2_921, __p3_921), __s0_921, __p1_921); \
+  __ret_921; \
 })
 #else
-#define vcopyq_lane_p64(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \
-  poly64x2_t __s0_926 = __p0_926; \
-  poly64x1_t __s2_926 = __p2_926; \
-  poly64x2_t __rev0_926;  __rev0_926 = __builtin_shufflevector(__s0_926, __s0_926, 1, 0); \
-  poly64x2_t __ret_926; \
-  __ret_926 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_926, __p3_926), __rev0_926, __p1_926); \
-  __ret_926 = __builtin_shufflevector(__ret_926, __ret_926, 1, 0); \
-  __ret_926; \
+#define vcopyq_lane_p64(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \
+  poly64x2_t __ret_922; \
+  poly64x2_t __s0_922 = __p0_922; \
+  poly64x1_t __s2_922 = __p2_922; \
+  poly64x2_t __rev0_922;  __rev0_922 = __builtin_shufflevector(__s0_922, __s0_922, 1, 0); \
+  __ret_922 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_922, __p3_922), __rev0_922, __p1_922); \
+  __ret_922 = __builtin_shufflevector(__ret_922, __ret_922, 1, 0); \
+  __ret_922; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f64(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \
-  float64x2_t __s0_927 = __p0_927; \
-  float64x1_t __s2_927 = __p2_927; \
-  float64x2_t __ret_927; \
-  __ret_927 = vsetq_lane_f64(vget_lane_f64(__s2_927, __p3_927), __s0_927, __p1_927); \
+#define vcopyq_lane_f64(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \
+  float64x2_t __ret_923; \
+  float64x2_t __s0_923 = __p0_923; \
+  float64x1_t __s2_923 = __p2_923; \
+  __ret_923 = vsetq_lane_f64(vget_lane_f64(__s2_923, __p3_923), __s0_923, __p1_923); \
+  __ret_923; \
+})
+#else
+#define vcopyq_lane_f64(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \
+  float64x2_t __ret_924; \
+  float64x2_t __s0_924 = __p0_924; \
+  float64x1_t __s2_924 = __p2_924; \
+  float64x2_t __rev0_924;  __rev0_924 = __builtin_shufflevector(__s0_924, __s0_924, 1, 0); \
+  __ret_924 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_924, __p3_924), __rev0_924, __p1_924); \
+  __ret_924 = __builtin_shufflevector(__ret_924, __ret_924, 1, 0); \
+  __ret_924; \
+})
+#endif
+
+#define vcopy_lane_p64(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \
+  poly64x1_t __ret_925; \
+  poly64x1_t __s0_925 = __p0_925; \
+  poly64x1_t __s2_925 = __p2_925; \
+  __ret_925 = vset_lane_p64(vget_lane_p64(__s2_925, __p3_925), __s0_925, __p1_925); \
+  __ret_925; \
+})
+#define vcopy_lane_f64(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \
+  float64x1_t __ret_926; \
+  float64x1_t __s0_926 = __p0_926; \
+  float64x1_t __s2_926 = __p2_926; \
+  __ret_926 = vset_lane_f64(vget_lane_f64(__s2_926, __p3_926), __s0_926, __p1_926); \
+  __ret_926; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_p64(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \
+  poly64x2_t __ret_927; \
+  poly64x2_t __s0_927 = __p0_927; \
+  poly64x2_t __s2_927 = __p2_927; \
+  __ret_927 = vsetq_lane_p64(vgetq_lane_p64(__s2_927, __p3_927), __s0_927, __p1_927); \
   __ret_927; \
 })
 #else
-#define vcopyq_lane_f64(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \
-  float64x2_t __s0_928 = __p0_928; \
-  float64x1_t __s2_928 = __p2_928; \
-  float64x2_t __rev0_928;  __rev0_928 = __builtin_shufflevector(__s0_928, __s0_928, 1, 0); \
-  float64x2_t __ret_928; \
-  __ret_928 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_928, __p3_928), __rev0_928, __p1_928); \
+#define vcopyq_laneq_p64(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \
+  poly64x2_t __ret_928; \
+  poly64x2_t __s0_928 = __p0_928; \
+  poly64x2_t __s2_928 = __p2_928; \
+  poly64x2_t __rev0_928;  __rev0_928 = __builtin_shufflevector(__s0_928, __s0_928, 1, 0); \
+  poly64x2_t __rev2_928;  __rev2_928 = __builtin_shufflevector(__s2_928, __s2_928, 1, 0); \
+  __ret_928 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_928, __p3_928), __rev0_928, __p1_928); \
   __ret_928 = __builtin_shufflevector(__ret_928, __ret_928, 1, 0); \
   __ret_928; \
 })
 #endif
 
-#define vcopy_lane_p64(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \
-  poly64x1_t __s0_929 = __p0_929; \
-  poly64x1_t __s2_929 = __p2_929; \
-  poly64x1_t __ret_929; \
-  __ret_929 = vset_lane_p64(vget_lane_p64(__s2_929, __p3_929), __s0_929, __p1_929); \
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_f64(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \
+  float64x2_t __ret_929; \
+  float64x2_t __s0_929 = __p0_929; \
+  float64x2_t __s2_929 = __p2_929; \
+  __ret_929 = vsetq_lane_f64(vgetq_lane_f64(__s2_929, __p3_929), __s0_929, __p1_929); \
   __ret_929; \
 })
-#define vcopy_lane_f64(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \
-  float64x1_t __s0_930 = __p0_930; \
-  float64x1_t __s2_930 = __p2_930; \
-  float64x1_t __ret_930; \
-  __ret_930 = vset_lane_f64(vget_lane_f64(__s2_930, __p3_930), __s0_930, __p1_930); \
+#else
+#define vcopyq_laneq_f64(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \
+  float64x2_t __ret_930; \
+  float64x2_t __s0_930 = __p0_930; \
+  float64x2_t __s2_930 = __p2_930; \
+  float64x2_t __rev0_930;  __rev0_930 = __builtin_shufflevector(__s0_930, __s0_930, 1, 0); \
+  float64x2_t __rev2_930;  __rev2_930 = __builtin_shufflevector(__s2_930, __s2_930, 1, 0); \
+  __ret_930 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_930, __p3_930), __rev0_930, __p1_930); \
+  __ret_930 = __builtin_shufflevector(__ret_930, __ret_930, 1, 0); \
   __ret_930; \
 })
+#endif
+
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p64(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \
-  poly64x2_t __s0_931 = __p0_931; \
+#define vcopy_laneq_p64(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \
+  poly64x1_t __ret_931; \
+  poly64x1_t __s0_931 = __p0_931; \
   poly64x2_t __s2_931 = __p2_931; \
-  poly64x2_t __ret_931; \
-  __ret_931 = vsetq_lane_p64(vgetq_lane_p64(__s2_931, __p3_931), __s0_931, __p1_931); \
+  __ret_931 = vset_lane_p64(vgetq_lane_p64(__s2_931, __p3_931), __s0_931, __p1_931); \
   __ret_931; \
 })
 #else
-#define vcopyq_laneq_p64(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \
-  poly64x2_t __s0_932 = __p0_932; \
+#define vcopy_laneq_p64(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \
+  poly64x1_t __ret_932; \
+  poly64x1_t __s0_932 = __p0_932; \
   poly64x2_t __s2_932 = __p2_932; \
-  poly64x2_t __rev0_932;  __rev0_932 = __builtin_shufflevector(__s0_932, __s0_932, 1, 0); \
   poly64x2_t __rev2_932;  __rev2_932 = __builtin_shufflevector(__s2_932, __s2_932, 1, 0); \
-  poly64x2_t __ret_932; \
-  __ret_932 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_932, __p3_932), __rev0_932, __p1_932); \
-  __ret_932 = __builtin_shufflevector(__ret_932, __ret_932, 1, 0); \
+  __ret_932 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_932, __p3_932), __s0_932, __p1_932); \
   __ret_932; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f64(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \
-  float64x2_t __s0_933 = __p0_933; \
+#define vcopy_laneq_f64(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \
+  float64x1_t __ret_933; \
+  float64x1_t __s0_933 = __p0_933; \
   float64x2_t __s2_933 = __p2_933; \
-  float64x2_t __ret_933; \
-  __ret_933 = vsetq_lane_f64(vgetq_lane_f64(__s2_933, __p3_933), __s0_933, __p1_933); \
+  __ret_933 = vset_lane_f64(vgetq_lane_f64(__s2_933, __p3_933), __s0_933, __p1_933); \
   __ret_933; \
 })
 #else
-#define vcopyq_laneq_f64(__p0_934, __p1_934, __p2_934, __p3_934) __extension__ ({ \
-  float64x2_t __s0_934 = __p0_934; \
+#define vcopy_laneq_f64(__p0_934, __p1_934, __p2_934, __p3_934) __extension__ ({ \
+  float64x1_t __ret_934; \
+  float64x1_t __s0_934 = __p0_934; \
   float64x2_t __s2_934 = __p2_934; \
-  float64x2_t __rev0_934;  __rev0_934 = __builtin_shufflevector(__s0_934, __s0_934, 1, 0); \
   float64x2_t __rev2_934;  __rev2_934 = __builtin_shufflevector(__s2_934, __s2_934, 1, 0); \
-  float64x2_t __ret_934; \
-  __ret_934 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_934, __p3_934), __rev0_934, __p1_934); \
-  __ret_934 = __builtin_shufflevector(__ret_934, __ret_934, 1, 0); \
+  __ret_934 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_934, __p3_934), __s0_934, __p1_934); \
   __ret_934; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p64(__p0_935, __p1_935, __p2_935, __p3_935) __extension__ ({ \
-  poly64x1_t __s0_935 = __p0_935; \
-  poly64x2_t __s2_935 = __p2_935; \
-  poly64x1_t __ret_935; \
-  __ret_935 = vset_lane_p64(vgetq_lane_p64(__s2_935, __p3_935), __s0_935, __p1_935); \
-  __ret_935; \
-})
-#else
-#define vcopy_laneq_p64(__p0_936, __p1_936, __p2_936, __p3_936) __extension__ ({ \
-  poly64x1_t __s0_936 = __p0_936; \
-  poly64x2_t __s2_936 = __p2_936; \
-  poly64x2_t __rev2_936;  __rev2_936 = __builtin_shufflevector(__s2_936, __s2_936, 1, 0); \
-  poly64x1_t __ret_936; \
-  __ret_936 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_936, __p3_936), __s0_936, __p1_936); \
-  __ret_936; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f64(__p0_937, __p1_937, __p2_937, __p3_937) __extension__ ({ \
-  float64x1_t __s0_937 = __p0_937; \
-  float64x2_t __s2_937 = __p2_937; \
-  float64x1_t __ret_937; \
-  __ret_937 = vset_lane_f64(vgetq_lane_f64(__s2_937, __p3_937), __s0_937, __p1_937); \
-  __ret_937; \
-})
-#else
-#define vcopy_laneq_f64(__p0_938, __p1_938, __p2_938, __p3_938) __extension__ ({ \
-  float64x1_t __s0_938 = __p0_938; \
-  float64x2_t __s2_938 = __p2_938; \
-  float64x2_t __rev2_938;  __rev2_938 = __builtin_shufflevector(__s2_938, __s2_938, 1, 0); \
-  float64x1_t __ret_938; \
-  __ret_938 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_938, __p3_938), __s0_938, __p1_938); \
-  __ret_938; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
 __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
   uint16x8_t __ret;
   __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
@@ -69258,10 +69150,10 @@
 }
 #else
 __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69276,10 +69168,10 @@
 }
 #else
 __ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69294,10 +69186,10 @@
 }
 #else
 __ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69312,10 +69204,10 @@
 }
 #else
 __ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69330,10 +69222,10 @@
 }
 #else
 __ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69348,10 +69240,10 @@
 }
 #else
 __ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69366,9 +69258,9 @@
 }
 #else
 __ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69383,9 +69275,9 @@
 }
 #else
 __ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69400,9 +69292,9 @@
 }
 #else
 __ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69417,9 +69309,9 @@
 }
 #else
 __ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69434,10 +69326,10 @@
 }
 #else
 __ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69452,10 +69344,10 @@
 }
 #else
 __ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69470,10 +69362,10 @@
 }
 #else
 __ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69488,10 +69380,10 @@
 }
 #else
 __ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69506,10 +69398,10 @@
 }
 #else
 __ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69524,10 +69416,10 @@
 }
 #else
 __ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69542,9 +69434,9 @@
 }
 #else
 __ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69559,9 +69451,9 @@
 }
 #else
 __ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69576,9 +69468,9 @@
 }
 #else
 __ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69593,47 +69485,47 @@
 }
 #else
 __ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
 #endif
 
-#define vmulx_lane_f64(__p0_939, __p1_939, __p2_939) __extension__ ({ \
-  float64x1_t __s0_939 = __p0_939; \
-  float64x1_t __s1_939 = __p1_939; \
-  float64x1_t __ret_939; \
-  float64_t __x_939 = vget_lane_f64(__s0_939, 0); \
-  float64_t __y_939 = vget_lane_f64(__s1_939, __p2_939); \
-  float64_t __z_939 = vmulxd_f64(__x_939, __y_939); \
-  __ret_939 = vset_lane_f64(__z_939, __s0_939, __p2_939); \
-  __ret_939; \
+#define vmulx_lane_f64(__p0_935, __p1_935, __p2_935) __extension__ ({ \
+  float64x1_t __ret_935; \
+  float64x1_t __s0_935 = __p0_935; \
+  float64x1_t __s1_935 = __p1_935; \
+  float64_t __x_935 = vget_lane_f64(__s0_935, 0); \
+  float64_t __y_935 = vget_lane_f64(__s1_935, __p2_935); \
+  float64_t __z_935 = vmulxd_f64(__x_935, __y_935); \
+  __ret_935 = vset_lane_f64(__z_935, __s0_935, __p2_935); \
+  __ret_935; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f64(__p0_940, __p1_940, __p2_940) __extension__ ({ \
-  float64x1_t __s0_940 = __p0_940; \
-  float64x2_t __s1_940 = __p1_940; \
-  float64x1_t __ret_940; \
-  float64_t __x_940 = vget_lane_f64(__s0_940, 0); \
-  float64_t __y_940 = vgetq_lane_f64(__s1_940, __p2_940); \
-  float64_t __z_940 = vmulxd_f64(__x_940, __y_940); \
-  __ret_940 = vset_lane_f64(__z_940, __s0_940, 0); \
-  __ret_940; \
+#define vmulx_laneq_f64(__p0_936, __p1_936, __p2_936) __extension__ ({ \
+  float64x1_t __ret_936; \
+  float64x1_t __s0_936 = __p0_936; \
+  float64x2_t __s1_936 = __p1_936; \
+  float64_t __x_936 = vget_lane_f64(__s0_936, 0); \
+  float64_t __y_936 = vgetq_lane_f64(__s1_936, __p2_936); \
+  float64_t __z_936 = vmulxd_f64(__x_936, __y_936); \
+  __ret_936 = vset_lane_f64(__z_936, __s0_936, 0); \
+  __ret_936; \
 })
 #else
-#define vmulx_laneq_f64(__p0_941, __p1_941, __p2_941) __extension__ ({ \
-  float64x1_t __s0_941 = __p0_941; \
-  float64x2_t __s1_941 = __p1_941; \
-  float64x2_t __rev1_941;  __rev1_941 = __builtin_shufflevector(__s1_941, __s1_941, 1, 0); \
-  float64x1_t __ret_941; \
-  float64_t __x_941 = vget_lane_f64(__s0_941, 0); \
-  float64_t __y_941 = __noswap_vgetq_lane_f64(__rev1_941, __p2_941); \
-  float64_t __z_941 = vmulxd_f64(__x_941, __y_941); \
-  __ret_941 = vset_lane_f64(__z_941, __s0_941, 0); \
-  __ret_941; \
+#define vmulx_laneq_f64(__p0_937, __p1_937, __p2_937) __extension__ ({ \
+  float64x1_t __ret_937; \
+  float64x1_t __s0_937 = __p0_937; \
+  float64x2_t __s1_937 = __p1_937; \
+  float64x2_t __rev1_937;  __rev1_937 = __builtin_shufflevector(__s1_937, __s1_937, 1, 0); \
+  float64_t __x_937 = vget_lane_f64(__s0_937, 0); \
+  float64_t __y_937 = __noswap_vgetq_lane_f64(__rev1_937, __p2_937); \
+  float64_t __z_937 = vmulxd_f64(__x_937, __y_937); \
+  __ret_937 = vset_lane_f64(__z_937, __s0_937, 0); \
+  __ret_937; \
 })
 #endif
 
@@ -69646,10 +69538,10 @@
 }
 #else
 __ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69669,10 +69561,10 @@
 }
 #else
 __ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69692,10 +69584,10 @@
 }
 #else
 __ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69715,10 +69607,10 @@
 }
 #else
 __ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69738,10 +69630,10 @@
 }
 #else
 __ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69761,10 +69653,10 @@
 }
 #else
 __ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69785,10 +69677,10 @@
 }
 #else
 __ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69803,10 +69695,10 @@
 }
 #else
 __ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69821,10 +69713,10 @@
 }
 #else
 __ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69839,10 +69731,10 @@
 }
 #else
 __ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69857,10 +69749,10 @@
 }
 #else
 __ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69875,10 +69767,10 @@
 }
 #else
 __ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_sve.h b/darwin-x86/lib64/clang/16.0.2/include/arm_sve.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_sve.h
rename to darwin-x86/lib64/clang/16.0.2/include/arm_sve.h
index 909634a..03dc3ff 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/arm_sve.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/arm_sve.h
@@ -2407,15 +2407,15 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z)))
 svuint16_t svcnt_s16_z(svbool_t, svint16_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb)))
-uint64_t svcntb();
+uint64_t svcntb(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb_pat)))
 uint64_t svcntb_pat(enum svpattern);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd)))
-uint64_t svcntd();
+uint64_t svcntd(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd_pat)))
 uint64_t svcntd_pat(enum svpattern);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth)))
-uint64_t svcnth();
+uint64_t svcnth(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth_pat)))
 uint64_t svcnth_pat(enum svpattern);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b8)))
@@ -2427,7 +2427,7 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b16)))
 uint64_t svcntp_b16(svbool_t, svbool_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw)))
-uint64_t svcntw();
+uint64_t svcntw(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw_pat)))
 uint64_t svcntw_pat(enum svpattern);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32)))
@@ -6521,7 +6521,7 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16)))
 int16_t svorv_s16(svbool_t, svint16_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b)))
-svbool_t svpfalse_b();
+svbool_t svpfalse_b(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b)))
 svbool_t svpfirst_b(svbool_t, svbool_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b8)))
@@ -6627,13 +6627,13 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b16)))
 svbool_t svptrue_pat_b16(enum svpattern);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b8)))
-svbool_t svptrue_b8();
+svbool_t svptrue_b8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b32)))
-svbool_t svptrue_b32();
+svbool_t svptrue_b32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b64)))
-svbool_t svptrue_b64();
+svbool_t svptrue_b64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b16)))
-svbool_t svptrue_b16();
+svbool_t svptrue_b16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8)))
 svint8_t svqadd_n_s8(svint8_t, int8_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32)))
@@ -7011,7 +7011,7 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z)))
 svint16_t svrbit_s16_z(svbool_t, svint16_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr)))
-svbool_t svrdffr();
+svbool_t svrdffr(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr_z)))
 svbool_t svrdffr_z(svbool_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64)))
@@ -7411,7 +7411,7 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16)))
 svint16x4_t svset4_s16(svint16x4_t, uint64_t, svint16_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsetffr)))
-void svsetffr();
+void svsetffr(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8)))
 svuint8_t svsplice_u8(svbool_t, svuint8_t, svuint8_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32)))
@@ -8285,93 +8285,93 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16)))
 svfloat16_t svtssel_f16(svfloat16_t, svuint16_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u8)))
-svuint8x2_t svundef2_u8();
+svuint8x2_t svundef2_u8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u32)))
-svuint32x2_t svundef2_u32();
+svuint32x2_t svundef2_u32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u64)))
-svuint64x2_t svundef2_u64();
+svuint64x2_t svundef2_u64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u16)))
-svuint16x2_t svundef2_u16();
+svuint16x2_t svundef2_u16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s8)))
-svint8x2_t svundef2_s8();
+svint8x2_t svundef2_s8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f64)))
-svfloat64x2_t svundef2_f64();
+svfloat64x2_t svundef2_f64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f32)))
-svfloat32x2_t svundef2_f32();
+svfloat32x2_t svundef2_f32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f16)))
-svfloat16x2_t svundef2_f16();
+svfloat16x2_t svundef2_f16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s32)))
-svint32x2_t svundef2_s32();
+svint32x2_t svundef2_s32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s64)))
-svint64x2_t svundef2_s64();
+svint64x2_t svundef2_s64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s16)))
-svint16x2_t svundef2_s16();
+svint16x2_t svundef2_s16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u8)))
-svuint8x3_t svundef3_u8();
+svuint8x3_t svundef3_u8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u32)))
-svuint32x3_t svundef3_u32();
+svuint32x3_t svundef3_u32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u64)))
-svuint64x3_t svundef3_u64();
+svuint64x3_t svundef3_u64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u16)))
-svuint16x3_t svundef3_u16();
+svuint16x3_t svundef3_u16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s8)))
-svint8x3_t svundef3_s8();
+svint8x3_t svundef3_s8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f64)))
-svfloat64x3_t svundef3_f64();
+svfloat64x3_t svundef3_f64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f32)))
-svfloat32x3_t svundef3_f32();
+svfloat32x3_t svundef3_f32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f16)))
-svfloat16x3_t svundef3_f16();
+svfloat16x3_t svundef3_f16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s32)))
-svint32x3_t svundef3_s32();
+svint32x3_t svundef3_s32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s64)))
-svint64x3_t svundef3_s64();
+svint64x3_t svundef3_s64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s16)))
-svint16x3_t svundef3_s16();
+svint16x3_t svundef3_s16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u8)))
-svuint8x4_t svundef4_u8();
+svuint8x4_t svundef4_u8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u32)))
-svuint32x4_t svundef4_u32();
+svuint32x4_t svundef4_u32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u64)))
-svuint64x4_t svundef4_u64();
+svuint64x4_t svundef4_u64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u16)))
-svuint16x4_t svundef4_u16();
+svuint16x4_t svundef4_u16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s8)))
-svint8x4_t svundef4_s8();
+svint8x4_t svundef4_s8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f64)))
-svfloat64x4_t svundef4_f64();
+svfloat64x4_t svundef4_f64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f32)))
-svfloat32x4_t svundef4_f32();
+svfloat32x4_t svundef4_f32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f16)))
-svfloat16x4_t svundef4_f16();
+svfloat16x4_t svundef4_f16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s32)))
-svint32x4_t svundef4_s32();
+svint32x4_t svundef4_s32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s64)))
-svint64x4_t svundef4_s64();
+svint64x4_t svundef4_s64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s16)))
-svint16x4_t svundef4_s16();
+svint16x4_t svundef4_s16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u8)))
-svuint8_t svundef_u8();
+svuint8_t svundef_u8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u32)))
-svuint32_t svundef_u32();
+svuint32_t svundef_u32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u64)))
-svuint64_t svundef_u64();
+svuint64_t svundef_u64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u16)))
-svuint16_t svundef_u16();
+svuint16_t svundef_u16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s8)))
-svint8_t svundef_s8();
+svint8_t svundef_s8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f64)))
-svfloat64_t svundef_f64();
+svfloat64_t svundef_f64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f32)))
-svfloat32_t svundef_f32();
+svfloat32_t svundef_f32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f16)))
-svfloat16_t svundef_f16();
+svfloat16_t svundef_f16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s32)))
-svint32_t svundef_s32();
+svint32_t svundef_s32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s64)))
-svint64_t svundef_s64();
+svint64_t svundef_s64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s16)))
-svint16_t svundef_s16();
+svint16_t svundef_s16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b)))
 svbool_t svunpkhi_b(svbool_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32)))
@@ -13830,8 +13830,8 @@
 int64_t svorv(svbool_t, svint64_t);
 __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16)))
 int16_t svorv(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b)))
-svbool_t svpfalse();
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b)))
+svbool_t svpfalse(void);
 __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b)))
 svbool_t svpfirst(svbool_t, svbool_t);
 __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base)))
@@ -23456,13 +23456,13 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16)))
 svbfloat16_t svtrn2_bf16(svbfloat16_t, svbfloat16_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_bf16)))
-svbfloat16x2_t svundef2_bf16();
+svbfloat16x2_t svundef2_bf16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_bf16)))
-svbfloat16x3_t svundef3_bf16();
+svbfloat16x3_t svundef3_bf16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_bf16)))
-svbfloat16x4_t svundef4_bf16();
+svbfloat16x4_t svundef4_bf16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_bf16)))
-svbfloat16_t svundef_bf16();
+svbfloat16_t svundef_bf16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16)))
 svbfloat16_t svuzp1_bf16(svbfloat16_t, svbfloat16_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16)))
@@ -24038,6 +24038,10 @@
 } // extern "C"
 #endif
 
+#undef __ai
+
+#undef __aio
+
 #endif /*__ARM_FEATURE_SVE */
 
 #endif /* __ARM_SVE_H */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/armintr.h b/darwin-x86/lib64/clang/16.0.2/include/armintr.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/armintr.h
rename to darwin-x86/lib64/clang/16.0.2/include/armintr.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx2intrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx2intrin.h
similarity index 95%
rename from darwin-x86/lib64/clang/14.0.2/include/avx2intrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx2intrin.h
index 5064c87..f8521e7 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx2intrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/avx2intrin.h
@@ -26,19 +26,19 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi8(__m256i __a)
 {
-    return (__m256i)__builtin_ia32_pabsb256((__v32qi)__a);
+    return (__m256i)__builtin_elementwise_abs((__v32qs)__a);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi16(__m256i __a)
 {
-    return (__m256i)__builtin_ia32_pabsw256((__v16hi)__a);
+    return (__m256i)__builtin_elementwise_abs((__v16hi)__a);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi32(__m256i __a)
 {
-    return (__m256i)__builtin_ia32_pabsd256((__v8si)__a);
+    return (__m256i)__builtin_elementwise_abs((__v8si)__a);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -92,25 +92,25 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_adds_epi8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_paddsb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_add_sat((__v32qs)__a, (__v32qs)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_adds_epi16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_paddsw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_add_sat((__v16hi)__a, (__v16hi)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_adds_epu8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_paddusb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_add_sat((__v32qu)__a, (__v32qu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_adds_epu16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_paddusw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_add_sat((__v16hu)__a, (__v16hu)__b);
 }
 
 #define _mm256_alignr_epi8(a, b, n) \
@@ -253,73 +253,73 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_max((__v32qs)__a, (__v32qs)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_max((__v16hi)__a, (__v16hi)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_max((__v8si)__a, (__v8si)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxub256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_max((__v32qu)__a, (__v32qu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_max((__v16hu)__a, (__v16hu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_max((__v8su)__a, (__v8su)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminsb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_min((__v32qs)__a, (__v32qs)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminsw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_min((__v16hi)__a, (__v16hi)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminsd256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_min((__v8si)__a, (__v8si)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminub256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_min((__v32qu)__a, (__v32qu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_min((__v16hu)__a, (__v16hu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminud256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_min((__v8su)__a, (__v8su)__b);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS256
@@ -628,25 +628,25 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_subs_epi8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_psubsb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_sub_sat((__v32qs)__a, (__v32qs)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_subs_epi16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_psubsw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_sub_sat((__v16hi)__a, (__v16hi)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_subs_epu8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_psubusb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_sub_sat((__v32qu)__a, (__v32qu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_subs_epu16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_psubusw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_sub_sat((__v16hu)__a, (__v16hu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512bf16intrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512bf16intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512bf16intrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512bf16intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512bitalgintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512bitalgintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512bitalgintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512bitalgintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512bwintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512bwintrin.h
similarity index 97%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512bwintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512bwintrin.h
index 6aee8ae..aaeb936 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512bwintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/avx512bwintrin.h
@@ -485,7 +485,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi8 (__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsb512((__v64qi)__A);
+  return (__m512i)__builtin_elementwise_abs((__v64qs)__A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -507,7 +507,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi16 (__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsw512((__v32hi)__A);
+  return (__m512i)__builtin_elementwise_abs((__v32hi)__A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -617,7 +617,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_adds_epi8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_paddsb512((__v64qi)__A, (__v64qi)__B);
+  return (__m512i)__builtin_elementwise_add_sat((__v64qs)__A, (__v64qs)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -639,7 +639,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_adds_epi16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_paddsw512((__v32hi)__A, (__v32hi)__B);
+  return (__m512i)__builtin_elementwise_add_sat((__v32hi)__A, (__v32hi)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -661,7 +661,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_adds_epu8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_paddusb512((__v64qi) __A, (__v64qi) __B);
+  return (__m512i)__builtin_elementwise_add_sat((__v64qu) __A, (__v64qu) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -683,7 +683,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_adds_epu16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_paddusw512((__v32hi) __A, (__v32hi) __B);
+  return (__m512i)__builtin_elementwise_add_sat((__v32hu) __A, (__v32hu) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -751,7 +751,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epi8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsb512((__v64qi) __A, (__v64qi) __B);
+  return (__m512i)__builtin_elementwise_max((__v64qs) __A, (__v64qs) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -773,7 +773,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epi16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsw512((__v32hi) __A, (__v32hi) __B);
+  return (__m512i)__builtin_elementwise_max((__v32hi) __A, (__v32hi) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -796,7 +796,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxub512((__v64qi)__A, (__v64qi)__B);
+  return (__m512i)__builtin_elementwise_max((__v64qu)__A, (__v64qu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -818,7 +818,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxuw512((__v32hi)__A, (__v32hi)__B);
+  return (__m512i)__builtin_elementwise_max((__v32hu)__A, (__v32hu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -840,7 +840,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epi8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsb512((__v64qi) __A, (__v64qi) __B);
+  return (__m512i)__builtin_elementwise_min((__v64qs) __A, (__v64qs) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -862,7 +862,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epi16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsw512((__v32hi) __A, (__v32hi) __B);
+  return (__m512i)__builtin_elementwise_min((__v32hi) __A, (__v32hi) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -884,7 +884,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminub512((__v64qi)__A, (__v64qi)__B);
+  return (__m512i)__builtin_elementwise_min((__v64qu)__A, (__v64qu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -906,7 +906,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminuw512((__v32hi)__A, (__v32hi)__B);
+  return (__m512i)__builtin_elementwise_min((__v32hu)__A, (__v32hu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -950,7 +950,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_subs_epi8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_psubsb512((__v64qi)__A, (__v64qi)__B);
+  return (__m512i)__builtin_elementwise_sub_sat((__v64qs)__A, (__v64qs)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -972,7 +972,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_subs_epi16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_psubsw512((__v32hi)__A, (__v32hi)__B);
+  return (__m512i)__builtin_elementwise_sub_sat((__v32hi)__A, (__v32hi)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -994,7 +994,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_subs_epu8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_psubusb512((__v64qi) __A, (__v64qi) __B);
+  return (__m512i)__builtin_elementwise_sub_sat((__v64qu) __A, (__v64qu) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1016,7 +1016,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_subs_epu16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_psubusw512((__v32hi) __A, (__v32hi) __B);
+  return (__m512i)__builtin_elementwise_sub_sat((__v32hu) __A, (__v32hu) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1506,7 +1506,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_slli_epi16(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, __B);
+  return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1598,7 +1598,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_srai_epi16(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, __B);
+  return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1643,7 +1643,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_srli_epi16(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, __B);
+  return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1659,7 +1659,7 @@
 _mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B)
 {
   return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_srli_epi16(__A, __B),
+                                         (__v32hi)_mm512_srli_epi16(__A, (unsigned int)__B),
                                          (__v32hi)_mm512_setzero_si512());
 }
 
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512cdintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512cdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512cdintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512cdintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512dqintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512dqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512dqintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512dqintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512erintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512erintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512erintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512erintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512fintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512fintrin.h
similarity index 98%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512fintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512fintrin.h
index df29864..61bc89c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512fintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/avx512fintrin.h
@@ -26,6 +26,10 @@
 typedef unsigned long long __v8du __attribute__((__vector_size__(64)));
 typedef unsigned int __v16su __attribute__((__vector_size__(64)));
 
+/* We need an explicitly signed variant for char. Note that this shouldn't
+ * appear in the interface though. */
+typedef signed char __v64qs __attribute__((__vector_size__(64)));
+
 typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64)));
 typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64)));
 typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64)));
@@ -1086,7 +1090,7 @@
 __DEFAULT_FN_ATTRS512
 _mm512_max_epi32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_max((__v16si)__A, (__v16si)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1108,7 +1112,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_max((__v16su)__A, (__v16su)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1130,7 +1134,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epi64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_max((__v8di)__A, (__v8di)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1152,7 +1156,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxuq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_max((__v8du)__A, (__v8du)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1321,7 +1325,7 @@
 __DEFAULT_FN_ATTRS512
 _mm512_min_epi32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_min((__v16si)__A, (__v16si)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1343,7 +1347,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_min((__v16su)__A, (__v16su)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1365,7 +1369,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epi64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_min((__v8di)__A, (__v8di)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1387,7 +1391,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminuq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_min((__v8du)__A, (__v8du)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1776,7 +1780,7 @@
 {
   return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
                                                   _MM_FROUND_FLOOR,
-                                                  (__v16sf) __A, -1,
+                                                  (__v16sf) __A, (unsigned short)-1,
                                                   _MM_FROUND_CUR_DIRECTION);
 }
 
@@ -1794,7 +1798,7 @@
 {
   return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
                                                    _MM_FROUND_FLOOR,
-                                                   (__v8df) __A, -1,
+                                                   (__v8df) __A, (unsigned char)-1,
                                                    _MM_FROUND_CUR_DIRECTION);
 }
 
@@ -1821,7 +1825,7 @@
 {
   return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
                                                   _MM_FROUND_CEIL,
-                                                  (__v16sf) __A, -1,
+                                                  (__v16sf) __A, (unsigned short)-1,
                                                   _MM_FROUND_CUR_DIRECTION);
 }
 
@@ -1830,7 +1834,7 @@
 {
   return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
                                                    _MM_FROUND_CEIL,
-                                                   (__v8df) __A, -1,
+                                                   (__v8df) __A, (unsigned char)-1,
                                                    _MM_FROUND_CUR_DIRECTION);
 }
 
@@ -1846,7 +1850,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi64(__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsq512((__v8di)__A);
+  return (__m512i)__builtin_elementwise_abs((__v8di)__A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1868,7 +1872,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi32(__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsd512((__v16si) __A);
+  return (__m512i)__builtin_elementwise_abs((__v16si) __A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5113,7 +5117,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_slli_epi32(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B);
+  return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5135,7 +5139,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_slli_epi64(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B);
+  return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5157,7 +5161,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_srli_epi32(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B);
+  return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5179,7 +5183,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_srli_epi64(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B);
+  return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5925,41 +5929,44 @@
                                             (__v8di)_mm512_setzero_si512());
 }
 
-#define _mm512_ternarylogic_epi32(A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
-                                             (__v16si)(__m512i)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)-1))
+/// \enum _MM_TERNLOG_ENUM
+///    A helper to represent the ternary logic operations among vector \a A,
+///    \a B and \a C. The representation is passed to \a imm.
+typedef enum {
+  _MM_TERNLOG_A = 0xF0,
+  _MM_TERNLOG_B = 0xCC,
+  _MM_TERNLOG_C = 0xAA
+} _MM_TERNLOG_ENUM;
 
-#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
-                                             (__v16si)(__m512i)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)(U)))
+#define _mm512_ternarylogic_epi32(A, B, C, imm)                                \
+  ((__m512i)__builtin_ia32_pternlogd512_mask(                                  \
+      (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C),     \
+      (unsigned char)(imm), (__mmask16)-1))
 
-#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \
-                                              (__v16si)(__m512i)(B), \
-                                              (__v16si)(__m512i)(C), \
-                                              (int)(imm), (__mmask16)(U)))
+#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm)                        \
+  ((__m512i)__builtin_ia32_pternlogd512_mask(                                  \
+      (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C),     \
+      (unsigned char)(imm), (__mmask16)(U)))
 
-#define _mm512_ternarylogic_epi64(A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
-                                             (__v8di)(__m512i)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)-1))
+#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm)                       \
+  ((__m512i)__builtin_ia32_pternlogd512_maskz(                                 \
+      (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C),     \
+      (unsigned char)(imm), (__mmask16)(U)))
 
-#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
-                                             (__v8di)(__m512i)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
+#define _mm512_ternarylogic_epi64(A, B, C, imm)                                \
+  ((__m512i)__builtin_ia32_pternlogq512_mask(                                  \
+      (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C),        \
+      (unsigned char)(imm), (__mmask8)-1))
 
-#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \
-                                              (__v8di)(__m512i)(B), \
-                                              (__v8di)(__m512i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
+#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm)                        \
+  ((__m512i)__builtin_ia32_pternlogq512_mask(                                  \
+      (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm)                       \
+  ((__m512i)__builtin_ia32_pternlogq512_maskz(                                 \
+      (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
 #ifdef __x86_64__
 #define _mm_cvt_roundsd_i64(A, R) \
@@ -6599,7 +6606,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_srai_epi32(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B);
+  return (__m512i)__builtin_ia32_psradi512((__v16si)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -6622,7 +6629,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_srai_epi64(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B);
+  return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -9312,43 +9319,43 @@
  */
 
 static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_add_q512(__W);
+  return __builtin_reduce_add((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_mul_q512(__W);
+  return __builtin_reduce_mul((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_and_q512(__W);
+  return __builtin_reduce_and((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_or_q512(__W);
+  return __builtin_reduce_or((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W) {
   __W = _mm512_maskz_mov_epi64(__M, __W);
-  return __builtin_ia32_reduce_add_q512(__W);
+  return __builtin_reduce_add((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) {
   __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(1), __M, __W);
-  return __builtin_ia32_reduce_mul_q512(__W);
+  return __builtin_reduce_mul((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __W);
-  return __builtin_ia32_reduce_and_q512(__W);
+  __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(-1LL), __M, __W);
+  return __builtin_reduce_and((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
   __W = _mm512_maskz_mov_epi64(__M, __W);
-  return __builtin_ia32_reduce_or_q512(__W);
+  return __builtin_reduce_or((__v8di)__W);
 }
 
 // -0.0 is used to ignore the start value since it is the neutral value of
@@ -9376,46 +9383,46 @@
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_add_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_add_d512((__v16si)__W);
+  return __builtin_reduce_add((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_mul_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_mul_d512((__v16si)__W);
+  return __builtin_reduce_mul((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_and_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_and_d512((__v16si)__W);
+  return __builtin_reduce_and((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_or_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_or_d512((__v16si)__W);
+  return __builtin_reduce_or((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_add_epi32( __mmask16 __M, __m512i __W) {
   __W = _mm512_maskz_mov_epi32(__M, __W);
-  return __builtin_ia32_reduce_add_d512((__v16si)__W);
+  return __builtin_reduce_add((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) {
   __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(1), __M, __W);
-  return __builtin_ia32_reduce_mul_d512((__v16si)__W);
+  return __builtin_reduce_mul((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __W);
-  return __builtin_ia32_reduce_and_d512((__v16si)__W);
+  __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(-1), __M, __W);
+  return __builtin_reduce_and((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
   __W = _mm512_maskz_mov_epi32(__M, __W);
-  return __builtin_ia32_reduce_or_d512((__v16si)__W);
+  return __builtin_reduce_or((__v16si)__W);
 }
 
 static __inline__ float __DEFAULT_FN_ATTRS512
@@ -9442,89 +9449,89 @@
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epi64(__m512i __V) {
-  return __builtin_ia32_reduce_smax_q512(__V);
+  return __builtin_reduce_max((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epu64(__m512i __V) {
-  return __builtin_ia32_reduce_umax_q512(__V);
+  return __builtin_reduce_max((__v8du)__V);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epi64(__m512i __V) {
-  return __builtin_ia32_reduce_smin_q512(__V);
+  return __builtin_reduce_min((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epu64(__m512i __V) {
-  return __builtin_ia32_reduce_umin_q512(__V);
+  return __builtin_reduce_min((__v8du)__V);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1LL), __M, __V);
-  return __builtin_ia32_reduce_smax_q512(__V);
+  return __builtin_reduce_max((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) {
   __V = _mm512_maskz_mov_epi64(__M, __V);
-  return __builtin_ia32_reduce_umax_q512(__V);
+  return __builtin_reduce_max((__v8du)__V);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __M, __V);
-  return __builtin_ia32_reduce_smin_q512(__V);
+  return __builtin_reduce_min((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __V);
-  return __builtin_ia32_reduce_umin_q512(__V);
+  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-1LL), __M, __V);
+  return __builtin_reduce_min((__v8du)__V);
 }
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epi32(__m512i __V) {
-  return __builtin_ia32_reduce_smax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epu32(__m512i __V) {
-  return __builtin_ia32_reduce_umax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16su)__V);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epi32(__m512i __V) {
-  return __builtin_ia32_reduce_smin_d512((__v16si)__V);
+  return __builtin_reduce_min((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epu32(__m512i __V) {
-  return __builtin_ia32_reduce_umin_d512((__v16si)__V);
+  return __builtin_reduce_min((__v16su)__V);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __M, __V);
-  return __builtin_ia32_reduce_smax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) {
   __V = _mm512_maskz_mov_epi32(__M, __V);
-  return __builtin_ia32_reduce_umax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16su)__V);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __M, __V);
-  return __builtin_ia32_reduce_smin_d512((__v16si)__V);
+  return __builtin_reduce_min((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __V);
-  return __builtin_ia32_reduce_umin_d512((__v16si)__V);
+  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-1), __M, __V);
+  return __builtin_reduce_min((__v16su)__V);
 }
 
 static __inline__ double __DEFAULT_FN_ATTRS512
@@ -9594,7 +9601,7 @@
 ///
 /// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
@@ -9602,7 +9609,7 @@
 ///   dst[i+63:i] := MEM[addr+63:addr]
 /// ENDFOR
 /// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
 #define _mm512_i32logather_pd(vindex, base_addr, scale)                        \
   _mm512_i32gather_pd(_mm512_castsi512_si256(vindex), (base_addr), (scale))
 
@@ -9614,7 +9621,7 @@
 ///
 /// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
@@ -9626,7 +9633,7 @@
 ///   FI
 /// ENDFOR
 /// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
 #define _mm512_mask_i32logather_pd(src, mask, vindex, base_addr, scale)        \
   _mm512_mask_i32gather_pd((src), (mask), _mm512_castsi512_si256(vindex),      \
                            (base_addr), (scale))
@@ -9637,7 +9644,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
@@ -9645,7 +9652,7 @@
 ///   dst[i+63:i] := MEM[addr+63:addr]
 /// ENDFOR
 /// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
 #define _mm512_i32logather_epi64(vindex, base_addr, scale)                     \
   _mm512_i32gather_epi64(_mm512_castsi512_si256(vindex), (base_addr), (scale))
 
@@ -9656,7 +9663,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
@@ -9668,7 +9675,7 @@
 ///   FI
 /// ENDFOR
 /// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
 #define _mm512_mask_i32logather_epi64(src, mask, vindex, base_addr, scale)     \
   _mm512_mask_i32gather_epi64((src), (mask), _mm512_castsi512_si256(vindex),   \
                               (base_addr), (scale))
@@ -9679,14 +9686,14 @@
 ///
 /// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
 ///   addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
 ///   MEM[addr+63:addr] := v1[i+63:i]
 /// ENDFOR
-/// \endoperation
+/// \endcode
 #define _mm512_i32loscatter_pd(base_addr, vindex, v1, scale)                   \
   _mm512_i32scatter_pd((base_addr), _mm512_castsi512_si256(vindex), (v1), (scale))
 
@@ -9698,7 +9705,7 @@
 ///
 /// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
@@ -9707,7 +9714,7 @@
 ///     MEM[addr+63:addr] := a[i+63:i]
 ///   FI
 /// ENDFOR
-/// \endoperation
+/// \endcode
 #define _mm512_mask_i32loscatter_pd(base_addr, mask, vindex, v1, scale)        \
   _mm512_mask_i32scatter_pd((base_addr), (mask),                               \
                             _mm512_castsi512_si256(vindex), (v1), (scale))
@@ -9718,14 +9725,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
 ///   addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
 ///   MEM[addr+63:addr] := a[i+63:i]
 /// ENDFOR
-/// \endoperation
+/// \endcode
 #define _mm512_i32loscatter_epi64(base_addr, vindex, v1, scale)                \
   _mm512_i32scatter_epi64((base_addr),                                         \
                           _mm512_castsi512_si256(vindex), (v1), (scale))
@@ -9737,7 +9744,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
@@ -9746,7 +9753,7 @@
 ///     MEM[addr+63:addr] := a[i+63:i]
 ///   FI
 /// ENDFOR
-/// \endoperation
+/// \endcode
 #define _mm512_mask_i32loscatter_epi64(base_addr, mask, vindex, v1, scale)     \
   _mm512_mask_i32scatter_epi64((base_addr), (mask),                            \
                                _mm512_castsi512_si256(vindex), (v1), (scale))
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512fp16intrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512fp16intrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512fp16intrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512fp16intrin.h
index 99409a3..7693857 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512fp16intrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/avx512fp16intrin.h
@@ -10,6 +10,8 @@
 #error "Never use <avx512fp16intrin.h> directly; include <immintrin.h> instead."
 #endif
 
+#ifdef __SSE2__
+
 #ifndef __AVX512FP16INTRIN_H
 #define __AVX512FP16INTRIN_H
 
@@ -829,7 +831,7 @@
   struct __mm_load_sh_struct {
     _Float16 __u;
   } __attribute__((__packed__, __may_alias__));
-  _Float16 __u = ((struct __mm_load_sh_struct *)__dp)->__u;
+  _Float16 __u = ((const struct __mm_load_sh_struct *)__dp)->__u;
   return (__m128h){__u, 0, 0, 0, 0, 0, 0, 0};
 }
 
@@ -838,13 +840,13 @@
   __m128h src = (__v8hf)__builtin_shufflevector(
       (__v8hf)__W, (__v8hf)_mm_setzero_ph(), 0, 8, 8, 8, 8, 8, 8, 8);
 
-  return (__m128h)__builtin_ia32_loadsh128_mask((__v8hf *)__A, src, __U & 1);
+  return (__m128h)__builtin_ia32_loadsh128_mask((const __v8hf *)__A, src, __U & 1);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_load_sh(__mmask8 __U, const void *__A) {
   return (__m128h)__builtin_ia32_loadsh128_mask(
-      (__v8hf *)__A, (__v8hf)_mm_setzero_ph(), __U & 1);
+      (const __v8hf *)__A, (__v8hf)_mm_setzero_ph(), __U & 1);
 }
 
 static __inline__ __m512h __DEFAULT_FN_ATTRS512
@@ -3347,3 +3349,4 @@
 #undef __DEFAULT_FN_ATTRS512
 
 #endif
+#endif
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512ifmaintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512ifmaintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512ifmaintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512ifmaintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512ifmavlintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512ifmavlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512ifmavlintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512ifmavlintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512pfintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512pfintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512pfintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512pfintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vbmi2intrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vbmi2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vbmi2intrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vbmi2intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vbmiintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vbmiintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vbmiintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vbmiintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vbmivlintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vbmivlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vbmivlintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vbmivlintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlbf16intrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vlbf16intrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlbf16intrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vlbf16intrin.h
index 6a5a860..1cdbb28 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512vlbf16intrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/avx512vlbf16intrin.h
@@ -417,7 +417,7 @@
   __v4sf __V = {__A, 0, 0, 0};
   __v8hi __R = __builtin_ia32_cvtneps2bf16_128_mask(
       (__v4sf)__V, (__v8hi)_mm_undefined_si128(), (__mmask8)-1);
-  return __R[0];
+  return (__bfloat16)__R[0];
 }
 
 /// Convert Packed BF16 Data to Packed float Data.
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlbitalgintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vlbitalgintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlbitalgintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vlbitalgintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlbwintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vlbwintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlbwintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vlbwintrin.h
index 7873516..521ccab 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512vlbwintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/avx512vlbwintrin.h
@@ -1942,7 +1942,7 @@
 _mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_slli_epi16(__A, __B),
+                                             (__v8hi)_mm_slli_epi16(__A, (int)__B),
                                              (__v8hi)__W);
 }
 
@@ -1950,7 +1950,7 @@
 _mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_slli_epi16(__A, __B),
+                                             (__v8hi)_mm_slli_epi16(__A, (int)__B),
                                              (__v8hi)_mm_setzero_si128());
 }
 
@@ -1959,7 +1959,7 @@
                        unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_slli_epi16(__A, __B),
+                                         (__v16hi)_mm256_slli_epi16(__A, (int)__B),
                                          (__v16hi)__W);
 }
 
@@ -1967,7 +1967,7 @@
 _mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_slli_epi16(__A, __B),
+                                         (__v16hi)_mm256_slli_epi16(__A, (int)__B),
                                          (__v16hi)_mm256_setzero_si256());
 }
 
@@ -2095,7 +2095,7 @@
 _mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_srai_epi16(__A, __B),
+                                             (__v8hi)_mm_srai_epi16(__A, (int)__B),
                                              (__v8hi)__W);
 }
 
@@ -2103,7 +2103,7 @@
 _mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_srai_epi16(__A, __B),
+                                             (__v8hi)_mm_srai_epi16(__A, (int)__B),
                                              (__v8hi)_mm_setzero_si128());
 }
 
@@ -2112,7 +2112,7 @@
                        unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_srai_epi16(__A, __B),
+                                         (__v16hi)_mm256_srai_epi16(__A, (int)__B),
                                          (__v16hi)__W);
 }
 
@@ -2120,7 +2120,7 @@
 _mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_srai_epi16(__A, __B),
+                                         (__v16hi)_mm256_srai_epi16(__A, (int)__B),
                                          (__v16hi)_mm256_setzero_si256());
 }
 
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlcdintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vlcdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlcdintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vlcdintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vldqintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vldqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vldqintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vldqintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlfp16intrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vlfp16intrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlfp16intrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vlfp16intrin.h
index 3d27853..d4a7d1b 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512vlfp16intrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/avx512vlfp16intrin.h
@@ -11,6 +11,8 @@
     "Never use <avx512vlfp16intrin.h> directly; include <immintrin.h> instead."
 #endif
 
+#ifdef __SSE2__
+
 #ifndef __AVX512VLFP16INTRIN_H
 #define __AVX512VLFP16INTRIN_H
 
@@ -2066,3 +2068,4 @@
 #undef __DEFAULT_FN_ATTRS256
 
 #endif
+#endif
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vlintrin.h
similarity index 98%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vlintrin.h
index 0519dba..3e8355f 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512vlintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/avx512vlintrin.h
@@ -2988,7 +2988,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_abs_epi64 (__m128i __A) {
-  return (__m128i)__builtin_ia32_pabsq128((__v2di)__A);
+  return (__m128i)__builtin_elementwise_abs((__v2di)__A);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3007,7 +3007,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi64 (__m256i __A) {
-  return (__m256i)__builtin_ia32_pabsq256 ((__v4di)__A);
+  return (__m256i)__builtin_elementwise_abs((__v4di)__A);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3054,7 +3054,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_max_epi64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pmaxsq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_max((__v2di)__A, (__v2di)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3073,7 +3073,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pmaxsq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_max((__v4di)__A, (__v4di)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3120,7 +3120,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_max_epu64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pmaxuq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_max((__v2du)__A, (__v2du)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3139,7 +3139,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pmaxuq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_max((__v4du)__A, (__v4du)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3186,7 +3186,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_min_epi64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pminsq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_min((__v2di)__A, (__v2di)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3205,7 +3205,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pminsq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_min((__v4di)__A, (__v4di)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3252,7 +3252,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_min_epu64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pminuq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_min((__v2du)__A, (__v2du)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3271,7 +3271,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pminuq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_min((__v4du)__A, (__v4du)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -4525,7 +4525,7 @@
 _mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_slli_epi32(__A, __B),
+                                             (__v4si)_mm_slli_epi32(__A, (int)__B),
                                              (__v4si)__W);
 }
 
@@ -4533,7 +4533,7 @@
 _mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_slli_epi32(__A, __B),
+                                             (__v4si)_mm_slli_epi32(__A, (int)__B),
                                              (__v4si)_mm_setzero_si128());
 }
 
@@ -4541,7 +4541,7 @@
 _mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_slli_epi32(__A, __B),
+                                             (__v8si)_mm256_slli_epi32(__A, (int)__B),
                                              (__v8si)__W);
 }
 
@@ -4549,7 +4549,7 @@
 _mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_slli_epi32(__A, __B),
+                                             (__v8si)_mm256_slli_epi32(__A, (int)__B),
                                              (__v8si)_mm256_setzero_si256());
 }
 
@@ -4589,7 +4589,7 @@
 _mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_slli_epi64(__A, __B),
+                                             (__v2di)_mm_slli_epi64(__A, (int)__B),
                                              (__v2di)__W);
 }
 
@@ -4597,7 +4597,7 @@
 _mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_slli_epi64(__A, __B),
+                                             (__v2di)_mm_slli_epi64(__A, (int)__B),
                                              (__v2di)_mm_setzero_si128());
 }
 
@@ -4605,7 +4605,7 @@
 _mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_slli_epi64(__A, __B),
+                                             (__v4di)_mm256_slli_epi64(__A, (int)__B),
                                              (__v4di)__W);
 }
 
@@ -4613,7 +4613,7 @@
 _mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_slli_epi64(__A, __B),
+                                             (__v4di)_mm256_slli_epi64(__A, (int)__B),
                                              (__v4di)_mm256_setzero_si256());
 }
 
@@ -4869,7 +4869,7 @@
 _mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srli_epi32(__A, __B),
+                                             (__v4si)_mm_srli_epi32(__A, (int)__B),
                                              (__v4si)__W);
 }
 
@@ -4877,7 +4877,7 @@
 _mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srli_epi32(__A, __B),
+                                             (__v4si)_mm_srli_epi32(__A, (int)__B),
                                              (__v4si)_mm_setzero_si128());
 }
 
@@ -4885,7 +4885,7 @@
 _mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srli_epi32(__A, __B),
+                                             (__v8si)_mm256_srli_epi32(__A, (int)__B),
                                              (__v8si)__W);
 }
 
@@ -4893,7 +4893,7 @@
 _mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srli_epi32(__A, __B),
+                                             (__v8si)_mm256_srli_epi32(__A, (int)__B),
                                              (__v8si)_mm256_setzero_si256());
 }
 
@@ -4933,7 +4933,7 @@
 _mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srli_epi64(__A, __B),
+                                             (__v2di)_mm_srli_epi64(__A, (int)__B),
                                              (__v2di)__W);
 }
 
@@ -4941,7 +4941,7 @@
 _mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srli_epi64(__A, __B),
+                                             (__v2di)_mm_srli_epi64(__A, (int)__B),
                                              (__v2di)_mm_setzero_si128());
 }
 
@@ -4949,7 +4949,7 @@
 _mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srli_epi64(__A, __B),
+                                             (__v4di)_mm256_srli_epi64(__A, (int)__B),
                                              (__v4di)__W);
 }
 
@@ -4957,7 +4957,7 @@
 _mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srli_epi64(__A, __B),
+                                             (__v4di)_mm256_srli_epi64(__A, (int)__B),
                                              (__v4di)_mm256_setzero_si256());
 }
 
@@ -6408,7 +6408,7 @@
 _mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srai_epi32(__A, __B),
+                                             (__v4si)_mm_srai_epi32(__A, (int)__B),
                                              (__v4si)__W);
 }
 
@@ -6416,7 +6416,7 @@
 _mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srai_epi32(__A, __B),
+                                             (__v4si)_mm_srai_epi32(__A, (int)__B),
                                              (__v4si)_mm_setzero_si128());
 }
 
@@ -6424,7 +6424,7 @@
 _mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srai_epi32(__A, __B),
+                                             (__v8si)_mm256_srai_epi32(__A, (int)__B),
                                              (__v8si)__W);
 }
 
@@ -6432,7 +6432,7 @@
 _mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srai_epi32(__A, __B),
+                                             (__v8si)_mm256_srai_epi32(__A, (int)__B),
                                              (__v8si)_mm256_setzero_si256());
 }
 
@@ -6483,7 +6483,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_srai_epi64(__m128i __A, unsigned int __imm)
 {
-  return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm);
+  return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, (int)__imm);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -6505,7 +6505,7 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_srai_epi64(__m256i __A, unsigned int __imm)
 {
-  return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm);
+  return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, (int)__imm);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -6525,79 +6525,65 @@
                                         (__v4di)_mm256_setzero_si256());
 }
 
-#define _mm_ternarylogic_epi32(A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
-                                             (__v4si)(__m128i)(B), \
-                                             (__v4si)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)-1))
+#define _mm_ternarylogic_epi32(A, B, C, imm)                                   \
+  ((__m128i)__builtin_ia32_pternlogd128_mask(                                  \
+      (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C),        \
+      (unsigned char)(imm), (__mmask8)-1))
 
-#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
-                                             (__v4si)(__m128i)(B), \
-                                             (__v4si)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
+#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm)                           \
+  ((__m128i)__builtin_ia32_pternlogd128_mask(                                  \
+      (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
-#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogd128_maskz((__v4si)(__m128i)(A), \
-                                              (__v4si)(__m128i)(B), \
-                                              (__v4si)(__m128i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
+#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm)                          \
+  ((__m128i)__builtin_ia32_pternlogd128_maskz(                                 \
+      (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
-#define _mm256_ternarylogic_epi32(A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
-                                             (__v8si)(__m256i)(B), \
-                                             (__v8si)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)-1))
+#define _mm256_ternarylogic_epi32(A, B, C, imm)                                \
+  ((__m256i)__builtin_ia32_pternlogd256_mask(                                  \
+      (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C),        \
+      (unsigned char)(imm), (__mmask8)-1))
 
-#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
-                                             (__v8si)(__m256i)(B), \
-                                             (__v8si)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
+#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm)                        \
+  ((__m256i)__builtin_ia32_pternlogd256_mask(                                  \
+      (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
-#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogd256_maskz((__v8si)(__m256i)(A), \
-                                              (__v8si)(__m256i)(B), \
-                                              (__v8si)(__m256i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
+#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm)                       \
+  ((__m256i)__builtin_ia32_pternlogd256_maskz(                                 \
+      (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
-#define _mm_ternarylogic_epi64(A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
-                                             (__v2di)(__m128i)(B), \
-                                             (__v2di)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)-1))
+#define _mm_ternarylogic_epi64(A, B, C, imm)                                   \
+  ((__m128i)__builtin_ia32_pternlogq128_mask(                                  \
+      (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C),        \
+      (unsigned char)(imm), (__mmask8)-1))
 
-#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
-                                             (__v2di)(__m128i)(B), \
-                                             (__v2di)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
+#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm)                           \
+  ((__m128i)__builtin_ia32_pternlogq128_mask(                                  \
+      (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
-#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogq128_maskz((__v2di)(__m128i)(A), \
-                                              (__v2di)(__m128i)(B), \
-                                              (__v2di)(__m128i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
+#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm)                          \
+  ((__m128i)__builtin_ia32_pternlogq128_maskz(                                 \
+      (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
-#define _mm256_ternarylogic_epi64(A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
-                                             (__v4di)(__m256i)(B), \
-                                             (__v4di)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)-1))
+#define _mm256_ternarylogic_epi64(A, B, C, imm)                                \
+  ((__m256i)__builtin_ia32_pternlogq256_mask(                                  \
+      (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C),        \
+      (unsigned char)(imm), (__mmask8)-1))
 
-#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
-                                             (__v4di)(__m256i)(B), \
-                                             (__v4di)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
+#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm)                        \
+  ((__m256i)__builtin_ia32_pternlogq256_mask(                                  \
+      (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
-#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogq256_maskz((__v4di)(__m256i)(A), \
-                                              (__v4di)(__m256i)(B), \
-                                              (__v4di)(__m256i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-
+#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm)                       \
+  ((__m256i)__builtin_ia32_pternlogq256_maskz(                                 \
+      (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
 #define _mm256_shuffle_f32x4(A, B, imm) \
   ((__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlvbmi2intrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vlvbmi2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlvbmi2intrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vlvbmi2intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlvnniintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vlvnniintrin.h
similarity index 97%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlvnniintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vlvnniintrin.h
index 0fb29af..8bc0694 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512vlvnniintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/avx512vlvnniintrin.h
@@ -25,7 +25,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -34,7 +34,7 @@
 ///      DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 #define _mm256_dpbusd_epi32(S, A, B) \
   ((__m256i)__builtin_ia32_vpdpbusd256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
 
@@ -45,7 +45,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -54,7 +54,7 @@
 ///      DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 #define _mm256_dpbusds_epi32(S, A, B) \
   ((__m256i)__builtin_ia32_vpdpbusds256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
 
@@ -65,14 +65,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
 ///      tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
 ///      DST.dword[j] := S.dword[j] + tmp1 + tmp2
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 #define _mm256_dpwssd_epi32(S, A, B) \
   ((__m256i)__builtin_ia32_vpdpwssd256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
 
@@ -83,14 +83,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
 ///      tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
 ///      DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2)
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 #define _mm256_dpwssds_epi32(S, A, B) \
   ((__m256i)__builtin_ia32_vpdpwssds256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
 
@@ -101,7 +101,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -110,7 +110,7 @@
 ///      DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 #define _mm_dpbusd_epi32(S, A, B) \
   ((__m128i)__builtin_ia32_vpdpbusd128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
 
@@ -121,7 +121,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -130,7 +130,7 @@
 ///      DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 #define _mm_dpbusds_epi32(S, A, B) \
   ((__m128i)__builtin_ia32_vpdpbusds128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
 
@@ -141,14 +141,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
 ///      tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
 ///      DST.dword[j] := S.dword[j] + tmp1 + tmp2
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 #define _mm_dpwssd_epi32(S, A, B) \
   ((__m128i)__builtin_ia32_vpdpwssd128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
 
@@ -159,14 +159,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
 ///      tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
 ///      DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2)
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 #define _mm_dpwssds_epi32(S, A, B) \
   ((__m128i)__builtin_ia32_vpdpwssds128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
 
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlvp2intersectintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vlvp2intersectintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlvp2intersectintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vlvp2intersectintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vnniintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vnniintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vnniintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vnniintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vp2intersectintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vp2intersectintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vp2intersectintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vp2intersectintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vpopcntdqintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vpopcntdqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vpopcntdqintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vpopcntdqintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vpopcntdqvlintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avx512vpopcntdqvlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vpopcntdqvlintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avx512vpopcntdqvlintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avxintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avxintrin.h
similarity index 98%
rename from darwin-x86/lib64/clang/14.0.2/include/avxintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avxintrin.h
index 17fe636..a8f953c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avxintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/avxintrin.h
@@ -1504,7 +1504,10 @@
 ///    00: Bits [31:0] and [159:128] are copied from the selected operand. \n
 ///    01: Bits [63:32] and [191:160] are copied from the selected operand. \n
 ///    10: Bits [95:64] and [223:192] are copied from the selected operand. \n
-///    11: Bits [127:96] and [255:224] are copied from the selected operand.
+///    11: Bits [127:96] and [255:224] are copied from the selected operand. \n
+///    Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+///    <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+///    <c>[b6, b4, b2, b0]</c>.
 /// \returns A 256-bit vector of [8 x float] containing the shuffled values.
 #define _mm256_shuffle_ps(a, b, mask) \
   ((__m256)__builtin_ia32_shufps256((__v8sf)(__m256)(a), \
@@ -1953,12 +1956,16 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// int _mm256_extract_epi32(__m256i X, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A 256-bit vector of [8 x i32].
-/// \param __imm
+/// \param N
 ///    An immediate integer operand with bits [2:0] determining which vector
 ///    element is extracted and returned.
 /// \returns A 32-bit integer containing the extracted 32 bits of extended
@@ -1971,12 +1978,16 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// int _mm256_extract_epi16(__m256i X, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A 256-bit integer vector of [16 x i16].
-/// \param __imm
+/// \param N
 ///    An immediate integer operand with bits [3:0] determining which vector
 ///    element is extracted and returned.
 /// \returns A 32-bit integer containing the extracted 16 bits of zero extended
@@ -1990,12 +2001,16 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// int _mm256_extract_epi8(__m256i X, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A 256-bit integer vector of [32 x i8].
-/// \param __imm
+/// \param N
 ///    An immediate integer operand with bits [4:0] determining which vector
 ///    element is extracted and returned.
 /// \returns A 32-bit integer containing the extracted 8 bits of zero extended
@@ -2010,12 +2025,16 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// long long _mm256_extract_epi64(__m256i X, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A 256-bit integer vector of [4 x i64].
-/// \param __imm
+/// \param N
 ///    An immediate integer operand with bits [1:0] determining which vector
 ///    element is extracted and returned.
 /// \returns A 64-bit integer containing the extracted 64 bits of extended
@@ -2030,18 +2049,22 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// __m256i _mm256_insert_epi32(__m256i X, int I, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A vector of [8 x i32] to be used by the insert operation.
-/// \param __b
+/// \param I
 ///    An integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
 ///    An immediate integer specifying the index of the vector element to be
 ///    replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-///    \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+///    \a N with \a I.
 #define _mm256_insert_epi32(X, I, N) \
   ((__m256i)__builtin_ia32_vec_set_v8si((__v8si)(__m256i)(X), \
                                         (int)(I), (int)(N)))
@@ -2053,18 +2076,22 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// __m256i _mm256_insert_epi16(__m256i X, int I, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A vector of [16 x i16] to be used by the insert operation.
-/// \param __b
+/// \param I
 ///    An i16 integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
 ///    An immediate integer specifying the index of the vector element to be
 ///    replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-///    \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+///    \a N with \a I.
 #define _mm256_insert_epi16(X, I, N) \
   ((__m256i)__builtin_ia32_vec_set_v16hi((__v16hi)(__m256i)(X), \
                                          (int)(I), (int)(N)))
@@ -2075,18 +2102,22 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// __m256i _mm256_insert_epi8(__m256i X, int I, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A vector of [32 x i8] to be used by the insert operation.
-/// \param __b
+/// \param I
 ///    An i8 integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
 ///    An immediate integer specifying the index of the vector element to be
 ///    replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-///    \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+///    \a N with \a I.
 #define _mm256_insert_epi8(X, I, N) \
   ((__m256i)__builtin_ia32_vec_set_v32qi((__v32qi)(__m256i)(X), \
                                          (int)(I), (int)(N)))
@@ -2098,18 +2129,22 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// __m256i _mm256_insert_epi64(__m256i X, int I, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A vector of [4 x i64] to be used by the insert operation.
-/// \param __b
+/// \param I
 ///    A 64-bit integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
 ///    An immediate integer specifying the index of the vector element to be
 ///    replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-///     \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+///     \a N with \a I.
 #define _mm256_insert_epi64(X, I, N) \
   ((__m256i)__builtin_ia32_vec_set_v4di((__v4di)(__m256i)(X), \
                                         (long long)(I), (int)(N)))
@@ -3177,7 +3212,7 @@
 ///    A pointer to a 256-bit integer vector containing integer values.
 /// \returns A 256-bit integer vector containing the moved values.
 static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_lddqu_si256(__m256i const *__p)
+_mm256_lddqu_si256(__m256i_u const *__p)
 {
   return (__m256i)__builtin_ia32_lddqu256((char const *)__p);
 }
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avxvnniintrin.h b/darwin-x86/lib64/clang/16.0.2/include/avxvnniintrin.h
similarity index 97%
rename from darwin-x86/lib64/clang/14.0.2/include/avxvnniintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/avxvnniintrin.h
index ad45cb7..b7de562 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avxvnniintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/avxvnniintrin.h
@@ -50,7 +50,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -59,7 +59,7 @@
 ///      DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_dpbusd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
 {
@@ -73,7 +73,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -82,7 +82,7 @@
 ///      DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_dpbusds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
 {
@@ -96,14 +96,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
 ///      tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
 ///      DST.dword[j] := __S.dword[j] + tmp1 + tmp2
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_dpwssd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
 {
@@ -117,14 +117,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
 ///      tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
 ///      DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2)
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_dpwssds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
 {
@@ -138,7 +138,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -147,7 +147,7 @@
 ///      DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_dpbusd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
 {
@@ -161,7 +161,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -170,7 +170,7 @@
 ///      DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_dpbusds_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
 {
@@ -184,14 +184,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
 ///      tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
 ///      DST.dword[j] := __S.dword[j] + tmp1 + tmp2
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_dpwssd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
 {
@@ -205,14 +205,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
 ///      tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
 ///      DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2)
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_dpwssds_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
 {
diff --git a/darwin-x86/lib64/clang/14.0.2/include/bits/stdatomic.h b/darwin-x86/lib64/clang/16.0.2/include/bits/stdatomic.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/bits/stdatomic.h
rename to darwin-x86/lib64/clang/16.0.2/include/bits/stdatomic.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/bmi2intrin.h b/darwin-x86/lib64/clang/16.0.2/include/bmi2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/bmi2intrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/bmi2intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/bmiintrin.h b/darwin-x86/lib64/clang/16.0.2/include/bmiintrin.h
similarity index 98%
rename from darwin-x86/lib64/clang/14.0.2/include/bmiintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/bmiintrin.h
index f583c21..ffb94be 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/bmiintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/bmiintrin.h
@@ -47,6 +47,7 @@
 ///    An unsigned 32-bit integer whose trailing zeros are to be counted.
 /// \returns An unsigned 32-bit integer containing the number of trailing zero
 ///    bits in the operand.
+/// \see _mm_tzcnt_32
 static __inline__ unsigned int __RELAXED_FN_ATTRS
 __tzcnt_u32(unsigned int __X)
 {
@@ -63,10 +64,11 @@
 ///    An unsigned 32-bit integer whose trailing zeros are to be counted.
 /// \returns An 32-bit integer containing the number of trailing zero bits in
 ///    the operand.
+/// \see __tzcnt_u32
 static __inline__ int __RELAXED_FN_ATTRS
 _mm_tzcnt_32(unsigned int __X)
 {
-  return __builtin_ia32_tzcnt_u32(__X);
+  return (int)__builtin_ia32_tzcnt_u32(__X);
 }
 
 #define _tzcnt_u32(a)     (__tzcnt_u32((a)))
@@ -83,6 +85,7 @@
 ///    An unsigned 64-bit integer whose trailing zeros are to be counted.
 /// \returns An unsigned 64-bit integer containing the number of trailing zero
 ///    bits in the operand.
+/// \see _mm_tzcnt_64
 static __inline__ unsigned long long __RELAXED_FN_ATTRS
 __tzcnt_u64(unsigned long long __X)
 {
@@ -99,10 +102,11 @@
 ///    An unsigned 64-bit integer whose trailing zeros are to be counted.
 /// \returns An 64-bit integer containing the number of trailing zero bits in
 ///    the operand.
+/// \see __tzcnt_u64
 static __inline__ long long __RELAXED_FN_ATTRS
 _mm_tzcnt_64(unsigned long long __X)
 {
-  return __builtin_ia32_tzcnt_u64(__X);
+  return (long long)__builtin_ia32_tzcnt_u64(__X);
 }
 
 #define _tzcnt_u64(a)     (__tzcnt_u64((a)))
diff --git a/darwin-x86/lib64/clang/14.0.2/include/builtins.h b/darwin-x86/lib64/clang/16.0.2/include/builtins.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/builtins.h
rename to darwin-x86/lib64/clang/16.0.2/include/builtins.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cet.h b/darwin-x86/lib64/clang/16.0.2/include/cet.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/cet.h
rename to darwin-x86/lib64/clang/16.0.2/include/cet.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cetintrin.h b/darwin-x86/lib64/clang/16.0.2/include/cetintrin.h
similarity index 80%
rename from darwin-x86/lib64/clang/14.0.2/include/cetintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/cetintrin.h
index 4290e9d..a68df5b 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/cetintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/cetintrin.h
@@ -19,7 +19,7 @@
   __attribute__((__always_inline__, __nodebug__, __target__("shstk")))
 
 static __inline__ void __DEFAULT_FN_ATTRS _incsspd(int __a) {
-  __builtin_ia32_incsspd(__a);
+  __builtin_ia32_incsspd((unsigned int)__a);
 }
 
 #ifdef __x86_64__
@@ -34,7 +34,7 @@
 }
 #else /* __x86_64__ */
 static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) {
-  __builtin_ia32_incsspd((int)__a);
+  __builtin_ia32_incsspd(__a);
 }
 #endif /* __x86_64__ */
 
@@ -42,10 +42,26 @@
   return __builtin_ia32_rdsspd(__a);
 }
 
+static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd_i32(void) {
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wuninitialized"
+  unsigned int t;
+  return __builtin_ia32_rdsspd(t);
+#pragma clang diagnostic pop
+}
+
 #ifdef __x86_64__
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq(unsigned long long __a) {
   return __builtin_ia32_rdsspq(__a);
 }
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq_i64(void) {
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wuninitialized"
+  unsigned long long t;
+  return __builtin_ia32_rdsspq(t);
+#pragma clang diagnostic pop
+}
 #endif /* __x86_64__ */
 
 #ifdef __x86_64__
@@ -58,7 +74,7 @@
 }
 #endif /* __x86_64__ */
 
-static __inline__ void __DEFAULT_FN_ATTRS _saveprevssp() {
+static __inline__ void __DEFAULT_FN_ATTRS _saveprevssp(void) {
   __builtin_ia32_saveprevssp();
 }
 
@@ -86,7 +102,7 @@
 }
 #endif /* __x86_64__ */
 
-static __inline__ void __DEFAULT_FN_ATTRS _setssbsy() {
+static __inline__ void __DEFAULT_FN_ATTRS _setssbsy(void) {
   __builtin_ia32_setssbsy();
 }
 
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cldemoteintrin.h b/darwin-x86/lib64/clang/16.0.2/include/cldemoteintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/cldemoteintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/cldemoteintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/clflushoptintrin.h b/darwin-x86/lib64/clang/16.0.2/include/clflushoptintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/clflushoptintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/clflushoptintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/clwbintrin.h b/darwin-x86/lib64/clang/16.0.2/include/clwbintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/clwbintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/clwbintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/clzerointrin.h b/darwin-x86/lib64/clang/16.0.2/include/clzerointrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/clzerointrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/clzerointrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cpuid.h b/darwin-x86/lib64/clang/16.0.2/include/cpuid.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/cpuid.h
rename to darwin-x86/lib64/clang/16.0.2/include/cpuid.h
index 6df1b4a..caa0069 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/cpuid.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/cpuid.h
@@ -200,7 +200,7 @@
 #define bit_AMXINT8       0x02000000
 
 /* Features in %eax for leaf 7 sub-leaf 1 */
-#define bit_AVXVNNI       0x00000008
+#define bit_AVXVNNI       0x00000010
 #define bit_AVX512BF16    0x00000020
 #define bit_HRESET        0x00400000
 
@@ -232,6 +232,7 @@
 
 /* Features in %ebx for leaf 0x80000008 */
 #define bit_CLZERO      0x00000001
+#define bit_RDPRU       0x00000010
 #define bit_WBNOINVD    0x00000200
 
 
diff --git a/darwin-x86/lib64/clang/14.0.2/include/crc32intrin.h b/darwin-x86/lib64/clang/16.0.2/include/crc32intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/crc32intrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/crc32intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cuda_wrappers/algorithm b/darwin-x86/lib64/clang/16.0.2/include/cuda_wrappers/algorithm
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/cuda_wrappers/algorithm
rename to darwin-x86/lib64/clang/16.0.2/include/cuda_wrappers/algorithm
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cuda_wrappers/complex b/darwin-x86/lib64/clang/16.0.2/include/cuda_wrappers/complex
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/cuda_wrappers/complex
rename to darwin-x86/lib64/clang/16.0.2/include/cuda_wrappers/complex
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cuda_wrappers/new b/darwin-x86/lib64/clang/16.0.2/include/cuda_wrappers/new
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/cuda_wrappers/new
rename to darwin-x86/lib64/clang/16.0.2/include/cuda_wrappers/new
diff --git a/darwin-x86/lib64/clang/14.0.2/include/emmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/emmintrin.h
similarity index 81%
rename from darwin-x86/lib64/clang/14.0.2/include/emmintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/emmintrin.h
index 6e9c303..a3f56e8 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/emmintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/emmintrin.h
@@ -20,16 +20,17 @@
 typedef long long __m128i __attribute__((__vector_size__(16), __aligned__(16)));
 
 typedef double __m128d_u __attribute__((__vector_size__(16), __aligned__(1)));
-typedef long long __m128i_u __attribute__((__vector_size__(16), __aligned__(1)));
+typedef long long __m128i_u
+    __attribute__((__vector_size__(16), __aligned__(1)));
 
 /* Type defines.  */
-typedef double __v2df __attribute__ ((__vector_size__ (16)));
-typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+typedef double __v2df __attribute__((__vector_size__(16)));
+typedef long long __v2di __attribute__((__vector_size__(16)));
 typedef short __v8hi __attribute__((__vector_size__(16)));
 typedef char __v16qi __attribute__((__vector_size__(16)));
 
 /* Unsigned types */
-typedef unsigned long long __v2du __attribute__ ((__vector_size__ (16)));
+typedef unsigned long long __v2du __attribute__((__vector_size__(16)));
 typedef unsigned short __v8hu __attribute__((__vector_size__(16)));
 typedef unsigned char __v16qu __attribute__((__vector_size__(16)));
 
@@ -38,8 +39,12 @@
 typedef signed char __v16qs __attribute__((__vector_size__(16)));
 
 /* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse2"), __min_vector_width__(64)))
+#define __DEFAULT_FN_ATTRS                                                     \
+  __attribute__((__always_inline__, __nodebug__, __target__("sse2"),           \
+                 __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS_MMX                                                 \
+  __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse2"),       \
+                 __min_vector_width__(64)))
 
 /// Adds lower double-precision values in both operands and returns the
 ///    sum in the lower 64 bits of the result. The upper 64 bits of the result
@@ -56,9 +61,8 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    sum of the lower 64 bits of both operands. The upper 64 bits are copied
 ///    from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_sd(__m128d __a,
+                                                        __m128d __b) {
   __a[0] += __b[0];
   return __a;
 }
@@ -75,9 +79,8 @@
 ///    A 128-bit vector of [2 x double] containing one of the source operands.
 /// \returns A 128-bit vector of [2 x double] containing the sums of both
 ///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_pd(__m128d __a,
+                                                        __m128d __b) {
   return (__m128d)((__v2df)__a + (__v2df)__b);
 }
 
@@ -98,9 +101,8 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    difference of the lower 64 bits of both operands. The upper 64 bits are
 ///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_sd(__m128d __a,
+                                                        __m128d __b) {
   __a[0] -= __b[0];
   return __a;
 }
@@ -117,9 +119,8 @@
 ///    A 128-bit vector of [2 x double] containing the subtrahend.
 /// \returns A 128-bit vector of [2 x double] containing the differences between
 ///    both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_pd(__m128d __a,
+                                                        __m128d __b) {
   return (__m128d)((__v2df)__a - (__v2df)__b);
 }
 
@@ -139,9 +140,8 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    product of the lower 64 bits of both operands. The upper 64 bits are
 ///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_sd(__m128d __a,
+                                                        __m128d __b) {
   __a[0] *= __b[0];
   return __a;
 }
@@ -158,9 +158,8 @@
 ///    A 128-bit vector of [2 x double] containing one of the operands.
 /// \returns A 128-bit vector of [2 x double] containing the products of both
 ///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_pd(__m128d __a,
+                                                        __m128d __b) {
   return (__m128d)((__v2df)__a * (__v2df)__b);
 }
 
@@ -181,9 +180,8 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    quotient of the lower 64 bits of both operands. The upper 64 bits are
 ///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_sd(__m128d __a,
+                                                        __m128d __b) {
   __a[0] /= __b[0];
   return __a;
 }
@@ -201,9 +199,8 @@
 ///    A 128-bit vector of [2 x double] containing the divisor.
 /// \returns A 128-bit vector of [2 x double] containing the quotients of both
 ///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_pd(__m128d __a,
+                                                        __m128d __b) {
   return (__m128d)((__v2df)__a / (__v2df)__b);
 }
 
@@ -226,11 +223,10 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    square root of the lower 64 bits of operand \a __b, and whose upper 64
 ///    bits are copied from the upper 64 bits of operand \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_sd(__m128d __a,
+                                                         __m128d __b) {
   __m128d __c = __builtin_ia32_sqrtsd((__v2df)__b);
-  return __extension__ (__m128d) { __c[0], __a[1] };
+  return __extension__(__m128d){__c[0], __a[1]};
 }
 
 /// Calculates the square root of the each of two values stored in a
@@ -244,9 +240,7 @@
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector of [2 x double] containing the square roots of the
 ///    values in the operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_pd(__m128d __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a) {
   return __builtin_ia32_sqrtpd((__v2df)__a);
 }
 
@@ -268,9 +262,8 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    minimum value between both operands. The upper 64 bits are copied from
 ///    the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -288,9 +281,8 @@
 ///    A 128-bit vector of [2 x double] containing one of the operands.
 /// \returns A 128-bit vector of [2 x double] containing the minimum values
 ///    between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -312,9 +304,8 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    maximum value between both operands. The upper 64 bits are copied from
 ///    the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -332,9 +323,8 @@
 ///    A 128-bit vector of [2 x double] containing one of the operands.
 /// \returns A 128-bit vector of [2 x double] containing the maximum values
 ///    between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_pd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -350,9 +340,8 @@
 ///    A 128-bit vector of [2 x double] containing one of the source operands.
 /// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
 ///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_and_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_and_pd(__m128d __a,
+                                                        __m128d __b) {
   return (__m128d)((__v2du)__a & (__v2du)__b);
 }
 
@@ -371,9 +360,8 @@
 /// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
 ///    values in the second operand and the one's complement of the first
 ///    operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_andnot_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_andnot_pd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)(~(__v2du)__a & (__v2du)__b);
 }
 
@@ -389,9 +377,8 @@
 ///    A 128-bit vector of [2 x double] containing one of the source operands.
 /// \returns A 128-bit vector of [2 x double] containing the bitwise OR of the
 ///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_or_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_or_pd(__m128d __a,
+                                                       __m128d __b) {
   return (__m128d)((__v2du)__a | (__v2du)__b);
 }
 
@@ -407,9 +394,8 @@
 ///    A 128-bit vector of [2 x double] containing one of the source operands.
 /// \returns A 128-bit vector of [2 x double] containing the bitwise XOR of the
 ///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_xor_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_xor_pd(__m128d __a,
+                                                        __m128d __b) {
   return (__m128d)((__v2du)__a ^ (__v2du)__b);
 }
 
@@ -426,9 +412,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_pd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -446,9 +431,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_pd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -467,9 +451,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_pd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b);
 }
 
@@ -488,9 +471,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_pd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a);
 }
 
@@ -509,9 +491,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_pd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a);
 }
 
@@ -532,9 +513,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_pd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -556,9 +536,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_pd(__m128d __a,
+                                                             __m128d __b) {
   return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -577,9 +556,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_pd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -598,9 +576,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_pd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -619,9 +596,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_pd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b);
 }
 
@@ -640,9 +616,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_pd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a);
 }
 
@@ -661,9 +636,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_pd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a);
 }
 
@@ -684,9 +658,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_sd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -709,9 +682,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_sd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -734,9 +706,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_sd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b);
 }
 
@@ -759,11 +730,10 @@
 ///     compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///     results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_sd(__m128d __a,
+                                                          __m128d __b) {
   __m128d __c = __builtin_ia32_cmpltsd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
+  return __extension__(__m128d){__c[0], __a[1]};
 }
 
 /// Compares the lower double-precision floating-point values in each of
@@ -785,11 +755,10 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_sd(__m128d __a,
+                                                          __m128d __b) {
   __m128d __c = __builtin_ia32_cmplesd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
+  return __extension__(__m128d){__c[0], __a[1]};
 }
 
 /// Compares the lower double-precision floating-point values in each of
@@ -813,9 +782,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_sd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -841,9 +809,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_sd(__m128d __a,
+                                                             __m128d __b) {
   return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -866,9 +833,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_sd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -891,9 +857,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_sd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -916,9 +881,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns  A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_sd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b);
 }
 
@@ -941,11 +905,10 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_sd(__m128d __a,
+                                                           __m128d __b) {
   __m128d __c = __builtin_ia32_cmpnltsd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
+  return __extension__(__m128d){__c[0], __a[1]};
 }
 
 /// Compares the lower double-precision floating-point values in each of
@@ -967,11 +930,10 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a,
+                                                           __m128d __b) {
   __m128d __c = __builtin_ia32_cmpnlesd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
+  return __extension__(__m128d){__c[0], __a[1]};
 }
 
 /// Compares the lower double-precision floating-point values in each of
@@ -992,9 +954,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comieq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a,
+                                                       __m128d __b) {
   return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b);
 }
 
@@ -1018,9 +979,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comilt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a,
+                                                       __m128d __b) {
   return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b);
 }
 
@@ -1044,9 +1004,8 @@
 ///     compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comile_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a,
+                                                       __m128d __b) {
   return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b);
 }
 
@@ -1070,9 +1029,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comigt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a,
+                                                       __m128d __b) {
   return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b);
 }
 
@@ -1096,9 +1054,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comige_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a,
+                                                       __m128d __b) {
   return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b);
 }
 
@@ -1122,9 +1079,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///     lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comineq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b);
 }
 
@@ -1146,9 +1102,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomieq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b);
 }
 
@@ -1172,9 +1127,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomilt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b);
 }
 
@@ -1198,9 +1152,8 @@
 ///     compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomile_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b);
 }
 
@@ -1224,9 +1177,8 @@
 ///     compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomigt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b);
 }
 
@@ -1250,9 +1202,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomige_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b);
 }
 
@@ -1276,9 +1227,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison result. If either of the two
 ///    lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomineq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_sd(__m128d __a,
+                                                         __m128d __b) {
   return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b);
 }
 
@@ -1295,9 +1245,7 @@
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
 ///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtpd_ps(__m128d __a)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpd_ps(__m128d __a) {
   return __builtin_ia32_cvtpd2ps((__v2df)__a);
 }
 
@@ -1315,9 +1263,7 @@
 ///    floating-point elements are converted to double-precision values. The
 ///    upper two elements are unused.
 /// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtps_pd(__m128 __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtps_pd(__m128 __a) {
   return (__m128d) __builtin_convertvector(
       __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df);
 }
@@ -1338,9 +1284,7 @@
 ///
 ///    The upper two elements are unused.
 /// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtepi32_pd(__m128i __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtepi32_pd(__m128i __a) {
   return (__m128d) __builtin_convertvector(
       __builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df);
 }
@@ -1358,9 +1302,7 @@
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
 ///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtpd_epi32(__m128d __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtpd_epi32(__m128d __a) {
   return __builtin_ia32_cvtpd2dq((__v2df)__a);
 }
 
@@ -1375,9 +1317,7 @@
 ///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
 ///    conversion.
 /// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsd_si32(__m128d __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsd_si32(__m128d __a) {
   return __builtin_ia32_cvtsd2si((__v2df)__a);
 }
 
@@ -1400,9 +1340,8 @@
 /// \returns A 128-bit vector of [4 x float]. The lower 32 bits contain the
 ///    converted value from the second parameter. The upper 96 bits are copied
 ///    from the upper 96 bits of the first parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtsd_ss(__m128 __a, __m128d __b)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtsd_ss(__m128 __a,
+                                                         __m128d __b) {
   return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)__a, (__v2df)__b);
 }
 
@@ -1423,9 +1362,8 @@
 /// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
 ///    converted value from the second parameter. The upper 64 bits are copied
 ///    from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi32_sd(__m128d __a, int __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi32_sd(__m128d __a,
+                                                            int __b) {
   __a[0] = __b;
   return __a;
 }
@@ -1449,9 +1387,8 @@
 /// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
 ///    converted value from the second parameter. The upper 64 bits are copied
 ///    from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtss_sd(__m128d __a, __m128 __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtss_sd(__m128d __a,
+                                                          __m128 __b) {
   __a[0] = __b[0];
   return __a;
 }
@@ -1473,9 +1410,7 @@
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
 ///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttpd_epi32(__m128d __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi32(__m128d __a) {
   return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a);
 }
 
@@ -1491,9 +1426,7 @@
 ///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
 ///    conversion.
 /// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvttsd_si32(__m128d __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttsd_si32(__m128d __a) {
   return __builtin_ia32_cvttsd2si((__v2df)__a);
 }
 
@@ -1508,9 +1441,7 @@
 /// \param __a
 ///    A 128-bit vector of [2 x double].
 /// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpd_pi32(__m128d __a)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvtpd_pi32(__m128d __a) {
   return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a);
 }
 
@@ -1528,9 +1459,7 @@
 /// \param __a
 ///    A 128-bit vector of [2 x double].
 /// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvttpd_pi32(__m128d __a)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvttpd_pi32(__m128d __a) {
   return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a);
 }
 
@@ -1545,9 +1474,7 @@
 /// \param __a
 ///    A 64-bit vector of [2 x i32].
 /// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi32_pd(__m64 __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX _mm_cvtpi32_pd(__m64 __a) {
   return __builtin_ia32_cvtpi2pd((__v2si)__a);
 }
 
@@ -1562,9 +1489,7 @@
 ///    A 128-bit vector of [2 x double]. The lower 64 bits are returned.
 /// \returns A double-precision floating-point value copied from the lower 64
 ///    bits of \a __a.
-static __inline__ double __DEFAULT_FN_ATTRS
-_mm_cvtsd_f64(__m128d __a)
-{
+static __inline__ double __DEFAULT_FN_ATTRS _mm_cvtsd_f64(__m128d __a) {
   return __a[0];
 }
 
@@ -1579,10 +1504,8 @@
 ///    A pointer to a 128-bit memory location. The address of the memory
 ///    location has to be 16-byte aligned.
 /// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_pd(double const *__dp)
-{
-  return *(const __m128d*)__dp;
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_pd(double const *__dp) {
+  return *(const __m128d *)__dp;
 }
 
 /// Loads a double-precision floating-point value from a specified memory
@@ -1597,17 +1520,15 @@
 ///    A pointer to a memory location containing a double-precision value.
 /// \returns A 128-bit vector of [2 x double] containing the loaded and
 ///    duplicated values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load1_pd(double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load1_pd(double const *__dp) {
   struct __mm_load1_pd_struct {
     double __u;
   } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_load1_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, __u };
+  double __u = ((const struct __mm_load1_pd_struct *)__dp)->__u;
+  return __extension__(__m128d){__u, __u};
 }
 
-#define        _mm_load_pd1(dp)        _mm_load1_pd(dp)
+#define _mm_load_pd1(dp) _mm_load1_pd(dp)
 
 /// Loads two double-precision values, in reverse order, from an aligned
 ///    memory location into a 128-bit vector of [2 x double].
@@ -1623,10 +1544,8 @@
 ///    loaded in reverse order.
 /// \returns A 128-bit vector of [2 x double] containing the reversed loaded
 ///    values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadr_pd(double const *__dp)
-{
-  __m128d __u = *(const __m128d*)__dp;
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadr_pd(double const *__dp) {
+  __m128d __u = *(const __m128d *)__dp;
   return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
 }
 
@@ -1641,13 +1560,11 @@
 ///    A pointer to a 128-bit memory location. The address of the memory
 ///    location does not have to be aligned.
 /// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadu_pd(double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadu_pd(double const *__dp) {
   struct __loadu_pd {
     __m128d_u __v;
   } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_pd*)__dp)->__v;
+  return ((const struct __loadu_pd *)__dp)->__v;
 }
 
 /// Loads a 64-bit integer value to the low element of a 128-bit integer
@@ -1661,14 +1578,12 @@
 ///    A pointer to a 64-bit memory location. The address of the memory
 ///    location does not have to be aligned.
 /// \returns A 128-bit vector of [2 x i64] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si64(void const *__a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si64(void const *__a) {
   struct __loadu_si64 {
     long long __v;
   } __attribute__((__packed__, __may_alias__));
-  long long __u = ((const struct __loadu_si64*)__a)->__v;
-  return __extension__ (__m128i)(__v2di){__u, 0LL};
+  long long __u = ((const struct __loadu_si64 *)__a)->__v;
+  return __extension__(__m128i)(__v2di){__u, 0LL};
 }
 
 /// Loads a 32-bit integer value to the low element of a 128-bit integer
@@ -1682,14 +1597,12 @@
 ///    A pointer to a 32-bit memory location. The address of the memory
 ///    location does not have to be aligned.
 /// \returns A 128-bit vector of [4 x i32] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si32(void const *__a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si32(void const *__a) {
   struct __loadu_si32 {
     int __v;
   } __attribute__((__packed__, __may_alias__));
-  int __u = ((const struct __loadu_si32*)__a)->__v;
-  return __extension__ (__m128i)(__v4si){__u, 0, 0, 0};
+  int __u = ((const struct __loadu_si32 *)__a)->__v;
+  return __extension__(__m128i)(__v4si){__u, 0, 0, 0};
 }
 
 /// Loads a 16-bit integer value to the low element of a 128-bit integer
@@ -1703,14 +1616,12 @@
 ///    A pointer to a 16-bit memory location. The address of the memory
 ///    location does not have to be aligned.
 /// \returns A 128-bit vector of [8 x i16] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si16(void const *__a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si16(void const *__a) {
   struct __loadu_si16 {
     short __v;
   } __attribute__((__packed__, __may_alias__));
-  short __u = ((const struct __loadu_si16*)__a)->__v;
-  return __extension__ (__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
+  short __u = ((const struct __loadu_si16 *)__a)->__v;
+  return __extension__(__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
 }
 
 /// Loads a 64-bit double-precision value to the low element of a
@@ -1724,14 +1635,12 @@
 ///    A pointer to a memory location containing a double-precision value.
 ///    The address of the memory location does not have to be aligned.
 /// \returns A 128-bit vector of [2 x double] containing the loaded value.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_sd(double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_sd(double const *__dp) {
   struct __mm_load_sd_struct {
     double __u;
   } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_load_sd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, 0 };
+  double __u = ((const struct __mm_load_sd_struct *)__dp)->__u;
+  return __extension__(__m128d){__u, 0};
 }
 
 /// Loads a double-precision value into the high-order bits of a 128-bit
@@ -1751,14 +1660,13 @@
 ///    [127:64] of the result. The address of the memory location does not have
 ///    to be aligned.
 /// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadh_pd(__m128d __a, double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadh_pd(__m128d __a,
+                                                          double const *__dp) {
   struct __mm_loadh_pd_struct {
     double __u;
   } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_loadh_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __a[0], __u };
+  double __u = ((const struct __mm_loadh_pd_struct *)__dp)->__u;
+  return __extension__(__m128d){__a[0], __u};
 }
 
 /// Loads a double-precision value into the low-order bits of a 128-bit
@@ -1778,14 +1686,13 @@
 ///    [63:0] of the result. The address of the memory location does not have to
 ///    be aligned.
 /// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadl_pd(__m128d __a, double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadl_pd(__m128d __a,
+                                                          double const *__dp) {
   struct __mm_loadl_pd_struct {
     double __u;
   } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_loadl_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, __a[1] };
+  double __u = ((const struct __mm_loadl_pd_struct *)__dp)->__u;
+  return __extension__(__m128d){__u, __a[1]};
 }
 
 /// Constructs a 128-bit floating-point vector of [2 x double] with
@@ -1799,9 +1706,7 @@
 ///
 /// \returns A 128-bit floating-point vector of [2 x double] with unspecified
 ///    content.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_undefined_pd(void)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_undefined_pd(void) {
   return (__m128d)__builtin_ia32_undef128();
 }
 
@@ -1819,10 +1724,8 @@
 /// \returns An initialized 128-bit floating-point vector of [2 x double]. The
 ///    lower 64 bits contain the value of the parameter. The upper 64 bits are
 ///    set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_sd(double __w)
-{
-  return __extension__ (__m128d){ __w, 0 };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_sd(double __w) {
+  return __extension__(__m128d){__w, 0};
 }
 
 /// Constructs a 128-bit floating-point vector of [2 x double], with each
@@ -1837,10 +1740,8 @@
 ///    A double-precision floating-point value used to initialize each vector
 ///    element of the result.
 /// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set1_pd(double __w)
-{
-  return __extension__ (__m128d){ __w, __w };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set1_pd(double __w) {
+  return __extension__(__m128d){__w, __w};
 }
 
 /// Constructs a 128-bit floating-point vector of [2 x double], with each
@@ -1855,9 +1756,7 @@
 ///    A double-precision floating-point value used to initialize each vector
 ///    element of the result.
 /// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd1(double __w)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd1(double __w) {
   return _mm_set1_pd(__w);
 }
 
@@ -1875,10 +1774,9 @@
 ///    A double-precision floating-point value used to initialize the lower 64
 ///    bits of the result.
 /// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd(double __w, double __x)
-{
-  return __extension__ (__m128d){ __x, __w };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd(double __w,
+                                                        double __x) {
+  return __extension__(__m128d){__x, __w};
 }
 
 /// Constructs a 128-bit floating-point vector of [2 x double],
@@ -1896,10 +1794,9 @@
 ///    A double-precision floating-point value used to initialize the upper 64
 ///    bits of the result.
 /// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setr_pd(double __w, double __x)
-{
-  return __extension__ (__m128d){ __w, __x };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setr_pd(double __w,
+                                                         double __x) {
+  return __extension__(__m128d){__w, __x};
 }
 
 /// Constructs a 128-bit floating-point vector of [2 x double]
@@ -1911,10 +1808,8 @@
 ///
 /// \returns An initialized 128-bit floating-point vector of [2 x double] with
 ///    all elements set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setzero_pd(void)
-{
-  return __extension__ (__m128d){ 0, 0 };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setzero_pd(void) {
+  return __extension__(__m128d){0, 0};
 }
 
 /// Constructs a 128-bit floating-point vector of [2 x double]. The lower
@@ -1932,9 +1827,8 @@
 ///    A 128-bit vector of [2 x double]. The lower 64 bits are written to the
 ///    lower 64 bits of the result.
 /// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_move_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_move_sd(__m128d __a,
+                                                         __m128d __b) {
   __a[0] = __b[0];
   return __a;
 }
@@ -1950,13 +1844,12 @@
 ///    A pointer to a 64-bit memory location.
 /// \param __a
 ///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_sd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_sd(double *__dp,
+                                                       __m128d __a) {
   struct __mm_store_sd_struct {
     double __u;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_store_sd_struct*)__dp)->__u = __a[0];
+  ((struct __mm_store_sd_struct *)__dp)->__u = __a[0];
 }
 
 /// Moves packed double-precision values from a 128-bit vector of
@@ -1972,10 +1865,9 @@
 /// \param __a
 ///    A packed 128-bit vector of [2 x double] containing the values to be
 ///    moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd(double *__dp, __m128d __a)
-{
-  *(__m128d*)__dp = __a;
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd(double *__dp,
+                                                       __m128d __a) {
+  *(__m128d *)__dp = __a;
 }
 
 /// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to
@@ -1992,9 +1884,8 @@
 /// \param __a
 ///    A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
 ///    of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store1_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store1_pd(double *__dp,
+                                                        __m128d __a) {
   __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
   _mm_store_pd(__dp, __a);
 }
@@ -2013,9 +1904,8 @@
 /// \param __a
 ///    A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
 ///    of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd1(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd1(double *__dp,
+                                                        __m128d __a) {
   _mm_store1_pd(__dp, __a);
 }
 
@@ -2031,13 +1921,12 @@
 ///    location does not have to be aligned.
 /// \param __a
 ///    A 128-bit vector of [2 x double] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_pd(double *__dp,
+                                                        __m128d __a) {
   struct __storeu_pd {
     __m128d_u __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_pd*)__dp)->__v = __a;
+  ((struct __storeu_pd *)__dp)->__v = __a;
 }
 
 /// Stores two double-precision values, in reverse order, from a 128-bit
@@ -2054,9 +1943,8 @@
 /// \param __a
 ///    A 128-bit vector of [2 x double] containing the values to be reversed and
 ///    stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storer_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storer_pd(double *__dp,
+                                                        __m128d __a) {
   __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 1, 0);
   *(__m128d *)__dp = __a;
 }
@@ -2072,13 +1960,12 @@
 ///    A pointer to a 64-bit memory location.
 /// \param __a
 ///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeh_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeh_pd(double *__dp,
+                                                        __m128d __a) {
   struct __mm_storeh_pd_struct {
     double __u;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1];
+  ((struct __mm_storeh_pd_struct *)__dp)->__u = __a[1];
 }
 
 /// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
@@ -2092,13 +1979,12 @@
 ///    A pointer to a 64-bit memory location.
 /// \param __a
 ///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_pd(double *__dp,
+                                                        __m128d __a) {
   struct __mm_storeh_pd_struct {
     double __u;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0];
+  ((struct __mm_storeh_pd_struct *)__dp)->__u = __a[0];
 }
 
 /// Adds the corresponding elements of two 128-bit vectors of [16 x i8],
@@ -2117,9 +2003,8 @@
 ///    A 128-bit vector of [16 x i8].
 /// \returns A 128-bit vector of [16 x i8] containing the sums of both
 ///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi8(__m128i __a,
+                                                          __m128i __b) {
   return (__m128i)((__v16qu)__a + (__v16qu)__b);
 }
 
@@ -2139,9 +2024,8 @@
 ///    A 128-bit vector of [8 x i16].
 /// \returns A 128-bit vector of [8 x i16] containing the sums of both
 ///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi16(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v8hu)__a + (__v8hu)__b);
 }
 
@@ -2161,9 +2045,8 @@
 ///    A 128-bit vector of [4 x i32].
 /// \returns A 128-bit vector of [4 x i32] containing the sums of both
 ///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi32(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v4su)__a + (__v4su)__b);
 }
 
@@ -2179,9 +2062,8 @@
 /// \param __b
 ///    A 64-bit integer.
 /// \returns A 64-bit integer containing the sum of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_add_si64(__m64 __a, __m64 __b)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_add_si64(__m64 __a,
+                                                            __m64 __b) {
   return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b);
 }
 
@@ -2201,9 +2083,8 @@
 ///    A 128-bit vector of [2 x i64].
 /// \returns A 128-bit vector of [2 x i64] containing the sums of both
 ///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi64(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi64(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v2du)__a + (__v2du)__b);
 }
 
@@ -2222,10 +2103,9 @@
 ///    A 128-bit signed [16 x i8] vector.
 /// \returns A 128-bit signed [16 x i8] vector containing the saturated sums of
 ///    both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a,
+                                                           __m128i __b) {
+  return (__m128i)__builtin_elementwise_add_sat((__v16qs)__a, (__v16qs)__b);
 }
 
 /// Adds, with saturation, the corresponding elements of two 128-bit
@@ -2244,10 +2124,9 @@
 ///    A 128-bit signed [8 x i16] vector.
 /// \returns A 128-bit signed [8 x i16] vector containing the saturated sums of
 ///    both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a,
+                                                            __m128i __b) {
+  return (__m128i)__builtin_elementwise_add_sat((__v8hi)__a, (__v8hi)__b);
 }
 
 /// Adds, with saturation, the corresponding elements of two 128-bit
@@ -2265,10 +2144,9 @@
 ///    A 128-bit unsigned [16 x i8] vector.
 /// \returns A 128-bit unsigned [16 x i8] vector containing the saturated sums
 ///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a,
+                                                           __m128i __b) {
+  return (__m128i)__builtin_elementwise_add_sat((__v16qu)__a, (__v16qu)__b);
 }
 
 /// Adds, with saturation, the corresponding elements of two 128-bit
@@ -2286,10 +2164,9 @@
 ///    A 128-bit unsigned [8 x i16] vector.
 /// \returns A 128-bit unsigned [8 x i16] vector containing the saturated sums
 ///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu16(__m128i __a,
+                                                            __m128i __b) {
+  return (__m128i)__builtin_elementwise_add_sat((__v8hu)__a, (__v8hu)__b);
 }
 
 /// Computes the rounded averages of corresponding elements of two
@@ -2306,9 +2183,8 @@
 ///    A 128-bit unsigned [16 x i8] vector.
 /// \returns A 128-bit unsigned [16 x i8] vector containing the rounded
 ///    averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu8(__m128i __a,
+                                                          __m128i __b) {
   return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
 }
 
@@ -2326,9 +2202,8 @@
 ///    A 128-bit unsigned [8 x i16] vector.
 /// \returns A 128-bit unsigned [8 x i16] vector containing the rounded
 ///    averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu16(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b);
 }
 
@@ -2352,9 +2227,8 @@
 ///    A 128-bit signed [8 x i16] vector.
 /// \returns A 128-bit signed [4 x i32] vector containing the sums of products
 ///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_madd_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_madd_epi16(__m128i __a,
+                                                            __m128i __b) {
   return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b);
 }
 
@@ -2372,10 +2246,9 @@
 ///    A 128-bit signed [8 x i16] vector.
 /// \returns A 128-bit signed [8 x i16] vector containing the greater value of
 ///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi16(__m128i __a,
+                                                           __m128i __b) {
+  return (__m128i)__builtin_elementwise_max((__v8hi)__a, (__v8hi)__b);
 }
 
 /// Compares corresponding elements of two 128-bit unsigned [16 x i8]
@@ -2392,10 +2265,9 @@
 ///    A 128-bit unsigned [16 x i8] vector.
 /// \returns A 128-bit unsigned [16 x i8] vector containing the greater value of
 ///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu8(__m128i __a,
+                                                          __m128i __b) {
+  return (__m128i)__builtin_elementwise_max((__v16qu)__a, (__v16qu)__b);
 }
 
 /// Compares corresponding elements of two 128-bit signed [8 x i16]
@@ -2412,10 +2284,9 @@
 ///    A 128-bit signed [8 x i16] vector.
 /// \returns A 128-bit signed [8 x i16] vector containing the smaller value of
 ///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi16(__m128i __a,
+                                                           __m128i __b) {
+  return (__m128i)__builtin_elementwise_min((__v8hi)__a, (__v8hi)__b);
 }
 
 /// Compares corresponding elements of two 128-bit unsigned [16 x i8]
@@ -2432,10 +2303,9 @@
 ///    A 128-bit unsigned [16 x i8] vector.
 /// \returns A 128-bit unsigned [16 x i8] vector containing the smaller value of
 ///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu8(__m128i __a,
+                                                          __m128i __b) {
+  return (__m128i)__builtin_elementwise_min((__v16qu)__a, (__v16qu)__b);
 }
 
 /// Multiplies the corresponding elements of two signed [8 x i16]
@@ -2452,9 +2322,8 @@
 ///    A 128-bit signed [8 x i16] vector.
 /// \returns A 128-bit signed [8 x i16] vector containing the upper 16 bits of
 ///    each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epi16(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
 }
 
@@ -2472,9 +2341,8 @@
 ///    A 128-bit unsigned [8 x i16] vector.
 /// \returns A 128-bit unsigned [8 x i16] vector containing the upper 16 bits
 ///    of each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epu16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epu16(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
 }
 
@@ -2492,9 +2360,8 @@
 ///    A 128-bit signed [8 x i16] vector.
 /// \returns A 128-bit signed [8 x i16] vector containing the lower 16 bits of
 ///    each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mullo_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi16(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)((__v8hu)__a * (__v8hu)__b);
 }
 
@@ -2511,9 +2378,8 @@
 /// \param __b
 ///    A 64-bit integer containing one of the source operands.
 /// \returns A 64-bit integer vector containing the product of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_mul_su32(__m64 __a, __m64 __b)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_mul_su32(__m64 __a,
+                                                            __m64 __b) {
   return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b);
 }
 
@@ -2530,9 +2396,8 @@
 /// \param __b
 ///    A [2 x i64] vector containing one of the source operands.
 /// \returns A [2 x i64] vector containing the product of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mul_epu32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epu32(__m128i __a,
+                                                           __m128i __b) {
   return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);
 }
 
@@ -2552,9 +2417,8 @@
 ///    A 128-bit integer vector containing one of the source operands.
 /// \returns A [2 x i64] vector containing the sums of the sets of absolute
 ///    differences between both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sad_epu8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sad_epu8(__m128i __a,
+                                                          __m128i __b) {
   return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b);
 }
 
@@ -2570,9 +2434,8 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the differences of the values
 ///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi8(__m128i __a,
+                                                          __m128i __b) {
   return (__m128i)((__v16qu)__a - (__v16qu)__b);
 }
 
@@ -2588,9 +2451,8 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the differences of the values
 ///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi16(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v8hu)__a - (__v8hu)__b);
 }
 
@@ -2606,9 +2468,8 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the differences of the values
 ///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi32(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v4su)__a - (__v4su)__b);
 }
 
@@ -2625,9 +2486,8 @@
 ///    A 64-bit integer vector containing the subtrahend.
 /// \returns A 64-bit integer vector containing the difference of the values in
 ///    the operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_sub_si64(__m64 __a, __m64 __b)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_sub_si64(__m64 __a,
+                                                            __m64 __b) {
   return (__m64)__builtin_ia32_psubq((__v1di)__a, (__v1di)__b);
 }
 
@@ -2643,9 +2503,8 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the differences of the values
 ///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi64(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi64(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v2du)__a - (__v2du)__b);
 }
 
@@ -2664,10 +2523,9 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the differences of the values
 ///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a,
+                                                           __m128i __b) {
+  return (__m128i)__builtin_elementwise_sub_sat((__v16qs)__a, (__v16qs)__b);
 }
 
 /// Subtracts corresponding 16-bit signed integer values in the input and
@@ -2685,10 +2543,9 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the differences of the values
 ///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a,
+                                                            __m128i __b) {
+  return (__m128i)__builtin_elementwise_sub_sat((__v8hi)__a, (__v8hi)__b);
 }
 
 /// Subtracts corresponding 8-bit unsigned integer values in the input
@@ -2705,10 +2562,9 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the unsigned integer
 ///    differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a,
+                                                           __m128i __b) {
+  return (__m128i)__builtin_elementwise_sub_sat((__v16qu)__a, (__v16qu)__b);
 }
 
 /// Subtracts corresponding 16-bit unsigned integer values in the input
@@ -2725,10 +2581,9 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the unsigned integer
 ///    differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu16(__m128i __a,
+                                                            __m128i __b) {
+  return (__m128i)__builtin_elementwise_sub_sat((__v8hu)__a, (__v8hu)__b);
 }
 
 /// Performs a bitwise AND of two 128-bit integer vectors.
@@ -2743,9 +2598,8 @@
 ///    A 128-bit integer vector containing one of the source operands.
 /// \returns A 128-bit integer vector containing the bitwise AND of the values
 ///    in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_and_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v2du)__a & (__v2du)__b);
 }
 
@@ -2763,9 +2617,8 @@
 ///    A 128-bit vector containing the right source operand.
 /// \returns A 128-bit integer vector containing the bitwise AND of the one's
 ///    complement of the first operand and the values in the second operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_andnot_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a,
+                                                              __m128i __b) {
   return (__m128i)(~(__v2du)__a & (__v2du)__b);
 }
 /// Performs a bitwise OR of two 128-bit integer vectors.
@@ -2780,9 +2633,8 @@
 ///    A 128-bit integer vector containing one of the source operands.
 /// \returns A 128-bit integer vector containing the bitwise OR of the values
 ///    in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_or_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a,
+                                                          __m128i __b) {
   return (__m128i)((__v2du)__a | (__v2du)__b);
 }
 
@@ -2798,9 +2650,8 @@
 ///    A 128-bit integer vector containing one of the source operands.
 /// \returns A 128-bit integer vector containing the bitwise exclusive OR of the
 ///    values in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_xor_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_xor_si128(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v2du)__a ^ (__v2du)__b);
 }
 
@@ -2821,11 +2672,13 @@
 ///    An immediate value specifying the number of bytes to left-shift operand
 ///    \a a.
 /// \returns A 128-bit integer vector containing the left-shifted value.
-#define _mm_slli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
+#define _mm_slli_si128(a, imm)                                                 \
+  ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a),          \
+                                                (int)(imm)))
 
-#define _mm_bslli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
+#define _mm_bslli_si128(a, imm)                                                \
+  ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a),          \
+                                                (int)(imm)))
 
 /// Left-shifts each 16-bit value in the 128-bit integer vector operand
 ///    by the specified number of bits. Low-order bits are cleared.
@@ -2840,9 +2693,8 @@
 ///    An integer value specifying the number of bits to left-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi16(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi16(__m128i __a,
+                                                            int __count) {
   return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);
 }
 
@@ -2859,9 +2711,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to left-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi16(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi16(__m128i __a,
+                                                           __m128i __count) {
   return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count);
 }
 
@@ -2878,9 +2729,8 @@
 ///    An integer value specifying the number of bits to left-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi32(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi32(__m128i __a,
+                                                            int __count) {
   return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);
 }
 
@@ -2897,9 +2747,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to left-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi32(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a,
+                                                           __m128i __count) {
   return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count);
 }
 
@@ -2916,9 +2765,8 @@
 ///    An integer value specifying the number of bits to left-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi64(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi64(__m128i __a,
+                                                            int __count) {
   return __builtin_ia32_psllqi128((__v2di)__a, __count);
 }
 
@@ -2935,9 +2783,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to left-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi64(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a,
+                                                           __m128i __count) {
   return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count);
 }
 
@@ -2955,9 +2802,8 @@
 ///    An integer value specifying the number of bits to right-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi16(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi16(__m128i __a,
+                                                            int __count) {
   return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);
 }
 
@@ -2975,9 +2821,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to right-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi16(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi16(__m128i __a,
+                                                           __m128i __count) {
   return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count);
 }
 
@@ -2995,9 +2840,8 @@
 ///    An integer value specifying the number of bits to right-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi32(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi32(__m128i __a,
+                                                            int __count) {
   return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);
 }
 
@@ -3015,9 +2859,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to right-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi32(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a,
+                                                           __m128i __count) {
   return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count);
 }
 
@@ -3038,11 +2881,13 @@
 ///    An immediate value specifying the number of bytes to right-shift operand
 ///    \a a.
 /// \returns A 128-bit integer vector containing the right-shifted value.
-#define _mm_srli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
+#define _mm_srli_si128(a, imm)                                                 \
+  ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a),          \
+                                                (int)(imm)))
 
-#define _mm_bsrli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
+#define _mm_bsrli_si128(a, imm)                                                \
+  ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a),          \
+                                                (int)(imm)))
 
 /// Right-shifts each of 16-bit values in the 128-bit integer vector
 ///    operand by the specified number of bits. High-order bits are cleared.
@@ -3057,9 +2902,8 @@
 ///    An integer value specifying the number of bits to right-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi16(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi16(__m128i __a,
+                                                            int __count) {
   return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);
 }
 
@@ -3076,9 +2920,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to right-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi16(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi16(__m128i __a,
+                                                           __m128i __count) {
   return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count);
 }
 
@@ -3095,9 +2938,8 @@
 ///    An integer value specifying the number of bits to right-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi32(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi32(__m128i __a,
+                                                            int __count) {
   return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);
 }
 
@@ -3114,9 +2956,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to right-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi32(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a,
+                                                           __m128i __count) {
   return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count);
 }
 
@@ -3133,9 +2974,8 @@
 ///    An integer value specifying the number of bits to right-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi64(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi64(__m128i __a,
+                                                            int __count) {
   return __builtin_ia32_psrlqi128((__v2di)__a, __count);
 }
 
@@ -3152,9 +2992,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to right-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi64(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a,
+                                                           __m128i __count) {
   return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count);
 }
 
@@ -3171,9 +3010,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a,
+                                                            __m128i __b) {
   return (__m128i)((__v16qi)__a == (__v16qi)__b);
 }
 
@@ -3190,9 +3028,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)((__v8hi)__a == (__v8hi)__b);
 }
 
@@ -3209,9 +3046,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)((__v4si)__a == (__v4si)__b);
 }
 
@@ -3229,9 +3065,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a,
+                                                            __m128i __b) {
   /* This function always performs a signed comparison, but __v16qi is a char
      which may be signed or unsigned, so use __v16qs. */
   return (__m128i)((__v16qs)__a > (__v16qs)__b);
@@ -3252,9 +3087,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)((__v8hi)__a > (__v8hi)__b);
 }
 
@@ -3273,9 +3107,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)((__v4si)__a > (__v4si)__b);
 }
 
@@ -3294,9 +3127,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a,
+                                                            __m128i __b) {
   return _mm_cmpgt_epi8(__b, __a);
 }
 
@@ -3315,9 +3147,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a,
+                                                             __m128i __b) {
   return _mm_cmpgt_epi16(__b, __a);
 }
 
@@ -3336,9 +3167,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi32(__m128i __a,
+                                                             __m128i __b) {
   return _mm_cmpgt_epi32(__b, __a);
 }
 
@@ -3360,9 +3190,8 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    converted value of the second operand. The upper 64 bits are copied from
 ///    the upper 64 bits of the first operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi64_sd(__m128d __a, long long __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi64_sd(__m128d __a,
+                                                            long long __b) {
   __a[0] = __b;
   return __a;
 }
@@ -3378,9 +3207,7 @@
 ///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
 ///    conversion.
 /// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsd_si64(__m128d __a)
-{
+static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsd_si64(__m128d __a) {
   return __builtin_ia32_cvtsd2si64((__v2df)__a);
 }
 
@@ -3396,9 +3223,7 @@
 ///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
 ///    conversion.
 /// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvttsd_si64(__m128d __a)
-{
+static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvttsd_si64(__m128d __a) {
   return __builtin_ia32_cvttsd2si64((__v2df)__a);
 }
 #endif
@@ -3412,10 +3237,8 @@
 /// \param __a
 ///    A 128-bit integer vector.
 /// \returns A 128-bit vector of [4 x float] containing the converted values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtepi32_ps(__m128i __a)
-{
-  return (__m128)__builtin_convertvector((__v4si)__a, __v4sf);
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtepi32_ps(__m128i __a) {
+  return (__m128) __builtin_convertvector((__v4si)__a, __v4sf);
 }
 
 /// Converts a vector of [4 x float] into a vector of [4 x i32].
@@ -3428,9 +3251,7 @@
 ///    A 128-bit vector of [4 x float].
 /// \returns A 128-bit integer vector of [4 x i32] containing the converted
 ///    values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtps_epi32(__m128 __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a) {
   return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a);
 }
 
@@ -3445,9 +3266,7 @@
 /// \param __a
 ///    A 128-bit vector of [4 x float].
 /// \returns A 128-bit vector of [4 x i32] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttps_epi32(__m128 __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a) {
   return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)__a);
 }
 
@@ -3461,29 +3280,24 @@
 /// \param __a
 ///    A 32-bit signed integer operand.
 /// \returns A 128-bit vector of [4 x i32].
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi32_si128(int __a)
-{
-  return __extension__ (__m128i)(__v4si){ __a, 0, 0, 0 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi32_si128(int __a) {
+  return __extension__(__m128i)(__v4si){__a, 0, 0, 0};
 }
 
-#ifdef __x86_64__
 /// Returns a vector of [2 x i64] where the lower element is the input
 ///    operand and the upper element is zero.
 ///
 /// \headerfile <x86intrin.h>
 ///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
+/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction
+/// in 64-bit mode.
 ///
 /// \param __a
 ///    A 64-bit signed integer operand containing the value to be converted.
 /// \returns A 128-bit vector of [2 x i64] containing the converted value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi64_si128(long long __a)
-{
-  return __extension__ (__m128i)(__v2di){ __a, 0 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi64_si128(long long __a) {
+  return __extension__(__m128i)(__v2di){__a, 0};
 }
-#endif
 
 /// Moves the least significant 32 bits of a vector of [4 x i32] to a
 ///    32-bit signed integer value.
@@ -3496,14 +3310,11 @@
 ///    A vector of [4 x i32]. The least significant 32 bits are moved to the
 ///    destination.
 /// \returns A 32-bit signed integer containing the moved value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si32(__m128i __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsi128_si32(__m128i __a) {
   __v4si __b = (__v4si)__a;
   return __b[0];
 }
 
-#ifdef __x86_64__
 /// Moves the least significant 64 bits of a vector of [2 x i64] to a
 ///    64-bit signed integer value.
 ///
@@ -3515,12 +3326,9 @@
 ///    A vector of [2 x i64]. The least significant 64 bits are moved to the
 ///    destination.
 /// \returns A 64-bit signed integer containing the moved value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si64(__m128i __a)
-{
+static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsi128_si64(__m128i __a) {
   return __a[0];
 }
-#endif
 
 /// Moves packed integer values from an aligned 128-bit memory location
 ///    to elements in a 128-bit integer vector.
@@ -3533,8 +3341,7 @@
 ///    An aligned pointer to a memory location containing integer values.
 /// \returns A 128-bit integer vector containing the moved values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_load_si128(__m128i const *__p)
-{
+_mm_load_si128(__m128i const *__p) {
   return *__p;
 }
 
@@ -3549,12 +3356,11 @@
 ///    A pointer to a memory location containing integer values.
 /// \returns A 128-bit integer vector containing the moved values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si128(__m128i_u const *__p)
-{
+_mm_loadu_si128(__m128i_u const *__p) {
   struct __loadu_si128 {
     __m128i_u __v;
   } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_si128*)__p)->__v;
+  return ((const struct __loadu_si128 *)__p)->__v;
 }
 
 /// Returns a vector of [2 x i64] where the lower element is taken from
@@ -3570,12 +3376,12 @@
 /// \returns A 128-bit vector of [2 x i64]. The lower order bits contain the
 ///    moved value. The higher order bits are cleared.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadl_epi64(__m128i_u const *__p)
-{
+_mm_loadl_epi64(__m128i_u const *__p) {
   struct __mm_loadl_epi64_struct {
     long long __u;
   } __attribute__((__packed__, __may_alias__));
-  return __extension__ (__m128i) { ((const struct __mm_loadl_epi64_struct*)__p)->__u, 0};
+  return __extension__(__m128i){
+      ((const struct __mm_loadl_epi64_struct *)__p)->__u, 0};
 }
 
 /// Generates a 128-bit vector of [4 x i32] with unspecified content.
@@ -3587,9 +3393,7 @@
 /// This intrinsic has no corresponding instruction.
 ///
 /// \returns A 128-bit vector of [4 x i32] with unspecified content.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_undefined_si128(void)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void) {
   return (__m128i)__builtin_ia32_undef128();
 }
 
@@ -3609,10 +3413,9 @@
 ///    destination vector of [2 x i64].
 /// \returns An initialized 128-bit vector of [2 x i64] containing the values
 ///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64x(long long __q1, long long __q0)
-{
-  return __extension__ (__m128i)(__v2di){ __q0, __q1 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64x(long long __q1,
+                                                            long long __q0) {
+  return __extension__(__m128i)(__v2di){__q0, __q1};
 }
 
 /// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
@@ -3631,9 +3434,8 @@
 ///    destination vector of [2 x i64].
 /// \returns An initialized 128-bit vector of [2 x i64] containing the values
 ///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64(__m64 __q1, __m64 __q0)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64(__m64 __q1,
+                                                           __m64 __q0) {
   return _mm_set_epi64x((long long)__q1, (long long)__q0);
 }
 
@@ -3659,10 +3461,9 @@
 ///    vector.
 /// \returns An initialized 128-bit vector of [4 x i32] containing the values
 ///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
-{
-  return __extension__ (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi32(int __i3, int __i2,
+                                                           int __i1, int __i0) {
+  return __extension__(__m128i)(__v4si){__i0, __i1, __i2, __i3};
 }
 
 /// Initializes the 16-bit values in a 128-bit vector of [8 x i16] with
@@ -3700,9 +3501,10 @@
 /// \returns An initialized 128-bit vector of [8 x i16] containing the values
 ///    provided in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
-{
-  return __extension__ (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
+_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3,
+              short __w2, short __w1, short __w0) {
+  return __extension__(__m128i)(__v8hi){__w0, __w1, __w2, __w3,
+                                        __w4, __w5, __w6, __w7};
 }
 
 /// Initializes the 8-bit values in a 128-bit vector of [16 x i8] with
@@ -3748,9 +3550,12 @@
 /// \returns An initialized 128-bit vector of [16 x i8] containing the values
 ///    provided in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
-{
-  return __extension__ (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
+_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11,
+             char __b10, char __b9, char __b8, char __b7, char __b6, char __b5,
+             char __b4, char __b3, char __b2, char __b1, char __b0) {
+  return __extension__(__m128i)(__v16qi){
+      __b0, __b1, __b2,  __b3,  __b4,  __b5,  __b6,  __b7,
+      __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15};
 }
 
 /// Initializes both values in a 128-bit integer vector with the
@@ -3766,9 +3571,7 @@
 ///    vector.
 /// \returns An initialized 128-bit integer vector of [2 x i64] with both
 ///    elements containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64x(long long __q)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64x(long long __q) {
   return _mm_set_epi64x(__q, __q);
 }
 
@@ -3785,9 +3588,7 @@
 ///    vector.
 /// \returns An initialized 128-bit vector of [2 x i64] with all elements
 ///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64(__m64 __q)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64(__m64 __q) {
   return _mm_set_epi64(__q, __q);
 }
 
@@ -3804,9 +3605,7 @@
 ///    vector.
 /// \returns An initialized 128-bit vector of [4 x i32] with all elements
 ///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi32(int __i)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi32(int __i) {
   return _mm_set_epi32(__i, __i, __i, __i);
 }
 
@@ -3823,9 +3622,7 @@
 ///    vector.
 /// \returns An initialized 128-bit vector of [8 x i16] with all elements
 ///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi16(short __w)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi16(short __w) {
   return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w);
 }
 
@@ -3842,10 +3639,9 @@
 ///    vector.
 /// \returns An initialized 128-bit vector of [16 x i8] with all elements
 ///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi8(char __b)
-{
-  return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi8(char __b) {
+  return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
+                      __b, __b, __b, __b, __b);
 }
 
 /// Constructs a 128-bit integer vector, initialized in reverse order
@@ -3862,9 +3658,8 @@
 ///    A 64-bit integral value used to initialize the upper 64 bits of the
 ///    result.
 /// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi64(__m64 __q0, __m64 __q1)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi64(__m64 __q0,
+                                                            __m64 __q1) {
   return _mm_set_epi64(__q1, __q0);
 }
 
@@ -3885,9 +3680,9 @@
 /// \param __i3
 ///    A 32-bit integral value used to initialize bits [127:96] of the result.
 /// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi32(int __i0, int __i1,
+                                                            int __i2,
+                                                            int __i3) {
   return _mm_set_epi32(__i3, __i2, __i1, __i0);
 }
 
@@ -3917,8 +3712,8 @@
 ///    A 16-bit integral value used to initialize bits [127:112] of the result.
 /// \returns An initialized 128-bit integer vector.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
-{
+_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4,
+               short __w5, short __w6, short __w7) {
   return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0);
 }
 
@@ -3964,9 +3759,11 @@
 ///    An 8-bit integral value used to initialize bits [127:120] of the result.
 /// \returns An initialized 128-bit integer vector.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
-{
-  return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8, __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
+_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
+              char __b6, char __b7, char __b8, char __b9, char __b10,
+              char __b11, char __b12, char __b13, char __b14, char __b15) {
+  return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8,
+                      __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
 }
 
 /// Creates a 128-bit integer vector initialized to zero.
@@ -3977,10 +3774,8 @@
 ///
 /// \returns An initialized 128-bit integer vector with all elements set to
 ///    zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setzero_si128(void)
-{
-  return __extension__ (__m128i)(__v2di){ 0LL, 0LL };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void) {
+  return __extension__(__m128i)(__v2di){0LL, 0LL};
 }
 
 /// Stores a 128-bit integer vector to a memory location aligned on a
@@ -3995,9 +3790,8 @@
 ///    values.
 /// \param __b
 ///    A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_si128(__m128i *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_si128(__m128i *__p,
+                                                          __m128i __b) {
   *__p = __b;
 }
 
@@ -4011,13 +3805,12 @@
 ///    A pointer to a memory location that will receive the integer values.
 /// \param __b
 ///    A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si128(__m128i_u *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si128(__m128i_u *__p,
+                                                           __m128i __b) {
   struct __storeu_si128 {
     __m128i_u __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si128*)__p)->__v = __b;
+  ((struct __storeu_si128 *)__p)->__v = __b;
 }
 
 /// Stores a 64-bit integer value from the low element of a 128-bit integer
@@ -4032,13 +3825,12 @@
 ///    location does not have to be aligned.
 /// \param __b
 ///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si64(void *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si64(void *__p,
+                                                          __m128i __b) {
   struct __storeu_si64 {
     long long __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si64*)__p)->__v = ((__v2di)__b)[0];
+  ((struct __storeu_si64 *)__p)->__v = ((__v2di)__b)[0];
 }
 
 /// Stores a 32-bit integer value from the low element of a 128-bit integer
@@ -4053,13 +3845,12 @@
 ///    location does not have to be aligned.
 /// \param __b
 ///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si32(void *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si32(void *__p,
+                                                          __m128i __b) {
   struct __storeu_si32 {
     int __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si32*)__p)->__v = ((__v4si)__b)[0];
+  ((struct __storeu_si32 *)__p)->__v = ((__v4si)__b)[0];
 }
 
 /// Stores a 16-bit integer value from the low element of a 128-bit integer
@@ -4074,13 +3865,12 @@
 ///    location does not have to be aligned.
 /// \param __b
 ///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si16(void *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si16(void *__p,
+                                                          __m128i __b) {
   struct __storeu_si16 {
     short __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si16*)__p)->__v = ((__v8hi)__b)[0];
+  ((struct __storeu_si16 *)__p)->__v = ((__v8hi)__b)[0];
 }
 
 /// Moves bytes selected by the mask from the first operand to the
@@ -4104,9 +3894,9 @@
 /// \param __p
 ///    A pointer to an unaligned 128-bit memory location where the specified
 ///    values are moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_maskmoveu_si128(__m128i __d,
+                                                              __m128i __n,
+                                                              char *__p) {
   __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p);
 }
 
@@ -4123,13 +3913,12 @@
 /// \param __a
 ///    A 128-bit integer vector of [2 x i64]. The lower 64 bits contain the
 ///    value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_epi64(__m128i_u *__p, __m128i __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_epi64(__m128i_u *__p,
+                                                           __m128i __a) {
   struct __mm_storel_epi64_struct {
     long long __u;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storel_epi64_struct*)__p)->__u = __a[0];
+  ((struct __mm_storel_epi64_struct *)__p)->__u = __a[0];
 }
 
 /// Stores a 128-bit floating point vector of [2 x double] to a 128-bit
@@ -4146,10 +3935,9 @@
 ///    A pointer to the 128-bit aligned memory location used to store the value.
 /// \param __a
 ///    A vector of [2 x double] containing the 64-bit values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_pd(double *__p, __m128d __a)
-{
-  __builtin_nontemporal_store((__v2df)__a, (__v2df*)__p);
+static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_pd(double *__p,
+                                                        __m128d __a) {
+  __builtin_nontemporal_store((__v2df)__a, (__v2df *)__p);
 }
 
 /// Stores a 128-bit integer vector to a 128-bit aligned memory location.
@@ -4165,10 +3953,9 @@
 ///    A pointer to the 128-bit aligned memory location used to store the value.
 /// \param __a
 ///    A 128-bit integer vector containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_si128(__m128i *__p, __m128i __a)
-{
-  __builtin_nontemporal_store((__v2di)__a, (__v2di*)__p);
+static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_si128(__m128i *__p,
+                                                           __m128i __a) {
+  __builtin_nontemporal_store((__v2di)__a, (__v2di *)__p);
 }
 
 /// Stores a 32-bit integer value in the specified memory location.
@@ -4184,9 +3971,9 @@
 ///    A pointer to the 32-bit memory location used to store the value.
 /// \param __a
 ///    A 32-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si32(int *__p, int __a)
-{
+static __inline__ void
+    __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
+    _mm_stream_si32(int *__p, int __a) {
   __builtin_ia32_movnti(__p, __a);
 }
 
@@ -4204,9 +3991,9 @@
 ///    A pointer to the 64-bit memory location used to store the value.
 /// \param __a
 ///    A 64-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si64(long long *__p, long long __a)
-{
+static __inline__ void
+    __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
+    _mm_stream_si64(long long *__p, long long __a) {
   __builtin_ia32_movnti64(__p, __a);
 }
 #endif
@@ -4225,7 +4012,7 @@
 /// \param __p
 ///    A pointer to the memory location used to identify the cache line to be
 ///    flushed.
-void _mm_clflush(void const * __p);
+void _mm_clflush(void const *__p);
 
 /// Forces strong memory ordering (serialization) between load
 ///    instructions preceding this instruction and load instructions following
@@ -4275,9 +4062,8 @@
 ///   than 0x80 are saturated to 0x80. The converted [8 x i8] values are
 ///   written to the higher 64 bits of the result.
 /// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
 }
 
@@ -4303,9 +4089,8 @@
 ///    less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
 ///    are written to the higher 64 bits of the result.
 /// \returns A 128-bit vector of [8 x i16] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
 }
 
@@ -4331,9 +4116,8 @@
 ///    than 0x00 are saturated to 0x00. The converted [8 x i8] values are
 ///    written to the higher 64 bits of the result.
 /// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packus_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a,
+                                                              __m128i __b) {
   return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b);
 }
 
@@ -4342,25 +4126,29 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// __m128i _mm_extract_epi16(__m128i a, const int imm);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VPEXTRW / PEXTRW </c> instruction.
 ///
-/// \param __a
+/// \param a
 ///    A 128-bit integer vector.
-/// \param __imm
-///    An immediate value. Bits [2:0] selects values from \a __a to be assigned
+/// \param imm
+///    An immediate value. Bits [2:0] selects values from \a a to be assigned
 ///    to bits[15:0] of the result. \n
-///    000: assign values from bits [15:0] of \a __a. \n
-///    001: assign values from bits [31:16] of \a __a. \n
-///    010: assign values from bits [47:32] of \a __a. \n
-///    011: assign values from bits [63:48] of \a __a. \n
-///    100: assign values from bits [79:64] of \a __a. \n
-///    101: assign values from bits [95:80] of \a __a. \n
-///    110: assign values from bits [111:96] of \a __a. \n
-///    111: assign values from bits [127:112] of \a __a.
+///    000: assign values from bits [15:0] of \a a. \n
+///    001: assign values from bits [31:16] of \a a. \n
+///    010: assign values from bits [47:32] of \a a. \n
+///    011: assign values from bits [63:48] of \a a. \n
+///    100: assign values from bits [79:64] of \a a. \n
+///    101: assign values from bits [95:80] of \a a. \n
+///    110: assign values from bits [111:96] of \a a. \n
+///    111: assign values from bits [127:112] of \a a.
 /// \returns An integer, whose lower 16 bits are selected from the 128-bit
 ///    integer vector parameter and the remaining bits are assigned zeros.
-#define _mm_extract_epi16(a, imm) \
-  ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \
+#define _mm_extract_epi16(a, imm)                                              \
+  ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a),      \
                                                     (int)(imm)))
 
 /// Constructs a 128-bit integer vector by first making a copy of the
@@ -4370,21 +4158,25 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// __m128i _mm_insert_epi16(__m128i a, int b, const int imm);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VPINSRW / PINSRW </c> instruction.
 ///
-/// \param __a
+/// \param a
 ///    A 128-bit integer vector of [8 x i16]. This vector is copied to the
 ///    result and then one of the eight elements in the result is replaced by
-///    the lower 16 bits of \a __b.
-/// \param __b
+///    the lower 16 bits of \a b.
+/// \param b
 ///    An integer. The lower 16 bits of this parameter are written to the
-///    result beginning at an offset specified by \a __imm.
-/// \param __imm
+///    result beginning at an offset specified by \a imm.
+/// \param imm
 ///    An immediate value specifying the bit offset in the result at which the
-///    lower 16 bits of \a __b are written.
+///    lower 16 bits of \a b are written.
 /// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi16(a, b, imm) \
-  ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \
+#define _mm_insert_epi16(a, b, imm)                                            \
+  ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b),        \
                                         (int)(imm)))
 
 /// Copies the values of the most significant bits from each 8-bit
@@ -4399,9 +4191,7 @@
 ///    A 128-bit integer vector containing the values with bits to be extracted.
 /// \returns The most significant bits from each 8-bit element in \a __a,
 ///    written to bits [15:0]. The other bits are assigned zeros.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_epi8(__m128i __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_epi8(__m128i __a) {
   return __builtin_ia32_pmovmskb128((__v16qi)__a);
 }
 
@@ -4431,9 +4221,12 @@
 ///    00: assign values from bits [31:0] of \a a. \n
 ///    01: assign values from bits [63:32] of \a a. \n
 ///    10: assign values from bits [95:64] of \a a. \n
-///    11: assign values from bits [127:96] of \a a.
+///    11: assign values from bits [127:96] of \a a. \n
+///    Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+///    <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+///    <c>[b6, b4, b2, b0]</c>.
 /// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shuffle_epi32(a, imm) \
+#define _mm_shuffle_epi32(a, imm)                                              \
   ((__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm)))
 
 /// Constructs a 128-bit integer vector by shuffling four lower 16-bit
@@ -4462,8 +4255,11 @@
 ///    01: assign values from bits [31:16] of \a a. \n
 ///    10: assign values from bits [47:32] of \a a. \n
 ///    11: assign values from bits [63:48] of \a a. \n
+///    Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+///    <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+///    <c>[b6, b4, b2, b0]</c>.
 /// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflelo_epi16(a, imm) \
+#define _mm_shufflelo_epi16(a, imm)                                            \
   ((__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm)))
 
 /// Constructs a 128-bit integer vector by shuffling four upper 16-bit
@@ -4492,8 +4288,11 @@
 ///    01: assign values from bits [95:80] of \a a. \n
 ///    10: assign values from bits [111:96] of \a a. \n
 ///    11: assign values from bits [127:112] of \a a. \n
+///    Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+///    <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+///    <c>[b6, b4, b2, b0]</c>.
 /// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflehi_epi16(a, imm) \
+#define _mm_shufflehi_epi16(a, imm)                                            \
   ((__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm)))
 
 /// Unpacks the high-order (index 8-15) values from two 128-bit vectors
@@ -4525,10 +4324,11 @@
 ///    Bits [119:112] are written to bits [111:104] of the result. \n
 ///    Bits [127:120] are written to bits [127:120] of the result.
 /// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi8(__m128i __a,
+                                                               __m128i __b) {
+  return (__m128i)__builtin_shufflevector(
+      (__v16qi)__a, (__v16qi)__b, 8, 16 + 8, 9, 16 + 9, 10, 16 + 10, 11,
+      16 + 11, 12, 16 + 12, 13, 16 + 13, 14, 16 + 14, 15, 16 + 15);
 }
 
 /// Unpacks the high-order (index 4-7) values from two 128-bit vectors of
@@ -4552,10 +4352,10 @@
 ///    Bits [111:96] are written to bits [95:80] of the result. \n
 ///    Bits [127:112] are written to bits [127:112] of the result.
 /// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi16(__m128i __a,
+                                                                __m128i __b) {
+  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8 + 4, 5,
+                                          8 + 5, 6, 8 + 6, 7, 8 + 7);
 }
 
 /// Unpacks the high-order (index 2,3) values from two 128-bit vectors of
@@ -4575,10 +4375,10 @@
 ///    Bits [95:64] are written to bits [64:32] of the destination. \n
 ///    Bits [127:96] are written to bits [127:96] of the destination.
 /// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi32(__m128i __a,
+                                                                __m128i __b) {
+  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4 + 2, 3,
+                                          4 + 3);
 }
 
 /// Unpacks the high-order 64-bit elements from two 128-bit vectors of
@@ -4596,10 +4396,9 @@
 ///    A 128-bit vector of [2 x i64]. \n
 ///    Bits [127:64] are written to bits [127:64] of the destination.
 /// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2+1);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi64(__m128i __a,
+                                                                __m128i __b) {
+  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2 + 1);
 }
 
 /// Unpacks the low-order (index 0-7) values from two 128-bit vectors of
@@ -4631,10 +4430,11 @@
 ///    Bits [55:48] are written to bits [111:104] of the result. \n
 ///    Bits [63:56] are written to bits [127:120] of the result.
 /// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi8(__m128i __a,
+                                                               __m128i __b) {
+  return (__m128i)__builtin_shufflevector(
+      (__v16qi)__a, (__v16qi)__b, 0, 16 + 0, 1, 16 + 1, 2, 16 + 2, 3, 16 + 3, 4,
+      16 + 4, 5, 16 + 5, 6, 16 + 6, 7, 16 + 7);
 }
 
 /// Unpacks the low-order (index 0-3) values from each of the two 128-bit
@@ -4659,10 +4459,10 @@
 ///    Bits [47:32] are written to bits [95:80] of the result. \n
 ///    Bits [63:48] are written to bits [127:112] of the result.
 /// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi16(__m128i __a,
+                                                                __m128i __b) {
+  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8 + 0, 1,
+                                          8 + 1, 2, 8 + 2, 3, 8 + 3);
 }
 
 /// Unpacks the low-order (index 0,1) values from two 128-bit vectors of
@@ -4682,10 +4482,10 @@
 ///    Bits [31:0] are written to bits [64:32] of the destination. \n
 ///    Bits [63:32] are written to bits [127:96] of the destination.
 /// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi32(__m128i __a,
+                                                                __m128i __b) {
+  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4 + 0, 1,
+                                          4 + 1);
 }
 
 /// Unpacks the low-order 64-bit elements from two 128-bit vectors of
@@ -4703,10 +4503,9 @@
 ///    A 128-bit vector of [2 x i64]. \n
 ///    Bits [63:0] are written to bits [127:64] of the destination. \n
 /// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2+0);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi64(__m128i __a,
+                                                                __m128i __b) {
+  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2 + 0);
 }
 
 /// Returns the lower 64 bits of a 128-bit integer vector as a 64-bit
@@ -4720,9 +4519,7 @@
 ///    A 128-bit integer vector operand. The lower 64 bits are moved to the
 ///    destination.
 /// \returns A 64-bit integer containing the lower 64 bits of the parameter.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_movepi64_pi64(__m128i __a)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_movepi64_pi64(__m128i __a) {
   return (__m64)__a[0];
 }
 
@@ -4737,10 +4534,8 @@
 ///    A 64-bit value.
 /// \returns A 128-bit integer vector. The lower 64 bits contain the value from
 ///    the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_movpi64_epi64(__m64 __a)
-{
-  return __extension__ (__m128i)(__v2di){ (long long)__a, 0 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_movpi64_epi64(__m64 __a) {
+  return __extension__(__m128i)(__v2di){(long long)__a, 0};
 }
 
 /// Moves the lower 64 bits of a 128-bit integer vector to a 128-bit
@@ -4755,9 +4550,7 @@
 ///    destination.
 /// \returns A 128-bit integer vector. The lower 64 bits contain the value from
 ///    the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_move_epi64(__m128i __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_move_epi64(__m128i __a) {
   return __builtin_shufflevector((__v2di)__a, _mm_setzero_si128(), 0, 2);
 }
 
@@ -4776,10 +4569,9 @@
 ///    A 128-bit vector of [2 x double]. \n
 ///    Bits [127:64] are written to bits [127:64] of the destination.
 /// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpackhi_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2+1);
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpackhi_pd(__m128d __a,
+                                                             __m128d __b) {
+  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2 + 1);
 }
 
 /// Unpacks the low-order 64-bit elements from two 128-bit vectors
@@ -4797,10 +4589,9 @@
 ///    A 128-bit vector of [2 x double]. \n
 ///    Bits [63:0] are written to bits [127:64] of the destination.
 /// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpacklo_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2+0);
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpacklo_pd(__m128d __a,
+                                                             __m128d __b) {
+  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2 + 0);
 }
 
 /// Extracts the sign bits of the double-precision values in the 128-bit
@@ -4816,13 +4607,10 @@
 ///    be extracted.
 /// \returns The sign bits from each of the double-precision elements in \a __a,
 ///    written to bits [1:0]. The remaining bits are assigned values of zero.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_pd(__m128d __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_pd(__m128d __a) {
   return __builtin_ia32_movmskpd((__v2df)__a);
 }
 
-
 /// Constructs a 128-bit floating-point vector of [2 x double] from two
 ///    128-bit vector parameters of [2 x double], using the immediate-value
 ///     parameter as a specifier.
@@ -4846,9 +4634,12 @@
 ///    Bit[0] = 1: upper element of \a a copied to lower element of result. \n
 ///    Bit[1] = 0: lower element of \a b copied to upper element of result. \n
 ///    Bit[1] = 1: upper element of \a b copied to upper element of result. \n
+///    Note: To generate a mask, you can use the \c _MM_SHUFFLE2 macro.
+///    <c>_MM_SHUFFLE2(b1, b0)</c> can create a 2-bit mask of the form
+///    <c>[b1, b0]</c>.
 /// \returns A 128-bit vector of [2 x double] containing the shuffled values.
-#define _mm_shuffle_pd(a, b, i) \
-  ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
+#define _mm_shuffle_pd(a, b, i)                                                \
+  ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b),  \
                                   (int)(i)))
 
 /// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
@@ -4862,9 +4653,7 @@
 ///    A 128-bit floating-point vector of [2 x double].
 /// \returns A 128-bit floating-point vector of [4 x float] containing the same
 ///    bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castpd_ps(__m128d __a)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castpd_ps(__m128d __a) {
   return (__m128)__a;
 }
 
@@ -4879,9 +4668,7 @@
 ///    A 128-bit floating-point vector of [2 x double].
 /// \returns A 128-bit integer vector containing the same bitwise pattern as the
 ///    parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castpd_si128(__m128d __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castpd_si128(__m128d __a) {
   return (__m128i)__a;
 }
 
@@ -4896,9 +4683,7 @@
 ///    A 128-bit floating-point vector of [4 x float].
 /// \returns A 128-bit floating-point vector of [2 x double] containing the same
 ///    bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castps_pd(__m128 __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castps_pd(__m128 __a) {
   return (__m128d)__a;
 }
 
@@ -4913,9 +4698,7 @@
 ///    A 128-bit floating-point vector of [4 x float].
 /// \returns A 128-bit integer vector containing the same bitwise pattern as the
 ///    parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castps_si128(__m128 __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castps_si128(__m128 __a) {
   return (__m128i)__a;
 }
 
@@ -4930,9 +4713,7 @@
 ///    A 128-bit integer vector.
 /// \returns A 128-bit floating-point vector of [4 x float] containing the same
 ///    bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castsi128_ps(__m128i __a)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castsi128_ps(__m128i __a) {
   return (__m128)__a;
 }
 
@@ -4947,9 +4728,7 @@
 ///    A 128-bit integer vector.
 /// \returns A 128-bit floating-point vector of [2 x double] containing the same
 ///    bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castsi128_pd(__m128i __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castsi128_pd(__m128i __a) {
   return (__m128d)__a;
 }
 
@@ -4974,12 +4753,13 @@
 
 #define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
 
-#define _MM_DENORMALS_ZERO_ON   (0x0040U)
-#define _MM_DENORMALS_ZERO_OFF  (0x0000U)
+#define _MM_DENORMALS_ZERO_ON (0x0040U)
+#define _MM_DENORMALS_ZERO_OFF (0x0000U)
 
 #define _MM_DENORMALS_ZERO_MASK (0x0040U)
 
 #define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
-#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
+#define _MM_SET_DENORMALS_ZERO_MODE(x)                                         \
+  (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
 
 #endif /* __EMMINTRIN_H */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/enqcmdintrin.h b/darwin-x86/lib64/clang/16.0.2/include/enqcmdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/enqcmdintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/enqcmdintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/f16cintrin.h b/darwin-x86/lib64/clang/16.0.2/include/f16cintrin.h
similarity index 97%
rename from darwin-x86/lib64/clang/14.0.2/include/f16cintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/f16cintrin.h
index 13905e6..94a662c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/f16cintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/f16cintrin.h
@@ -65,9 +65,9 @@
 ///    011: Truncate \n
 ///    1XX: Use MXCSR.RC for rounding
 /// \returns The converted 16-bit half-precision float value.
-#define _cvtss_sh(a, imm) \
-  ((unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
-                                                     (imm)))[0]))
+#define _cvtss_sh(a, imm) __extension__ ({ \
+  (unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
+                                                     (imm)))[0]); })
 
 /// Converts a 128-bit vector containing 32-bit float values into a
 ///    128-bit vector containing 16-bit half-precision float values.
diff --git a/darwin-x86/lib64/clang/14.0.2/include/float.h b/darwin-x86/lib64/clang/16.0.2/include/float.h
similarity index 74%
rename from darwin-x86/lib64/clang/14.0.2/include/float.h
rename to darwin-x86/lib64/clang/16.0.2/include/float.h
index ed610b2..5dace7a 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/float.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/float.h
@@ -14,10 +14,11 @@
  * additional definitions provided for Windows.
  * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
  *
- * Also fall back on Darwin to allow additional definitions and
+ * Also fall back on Darwin and AIX to allow additional definitions and
  * implementation-defined values.
  */
-#if (defined(__APPLE__) || (defined(__MINGW32__) || defined(_MSC_VER))) && \
+#if (defined(__APPLE__) || defined(__MINGW32__) || defined(_MSC_VER) ||        \
+     defined(_AIX)) &&                                                         \
     __STDC_HOSTED__ && __has_include_next(<float.h>)
 
 /* Prior to Apple's 10.7 SDK, float.h SDK header used to apply an extra level
@@ -37,7 +38,10 @@
 #  undef FLT_MANT_DIG
 #  undef DBL_MANT_DIG
 #  undef LDBL_MANT_DIG
-#  if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) ||              \
+    !defined(__STRICT_ANSI__) ||                                               \
+    (defined(__cplusplus) && __cplusplus >= 201103L) ||                        \
+    (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #    undef DECIMAL_DIG
 #  endif
 #  undef FLT_DIG
@@ -64,7 +68,10 @@
 #  undef FLT_MIN
 #  undef DBL_MIN
 #  undef LDBL_MIN
-#  if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) ||              \
+    !defined(__STRICT_ANSI__) ||                                               \
+    (defined(__cplusplus) && __cplusplus >= 201703L) ||                        \
+    (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #    undef FLT_TRUE_MIN
 #    undef DBL_TRUE_MIN
 #    undef LDBL_TRUE_MIN
@@ -87,7 +94,10 @@
 #define DBL_MANT_DIG __DBL_MANT_DIG__
 #define LDBL_MANT_DIG __LDBL_MANT_DIG__
 
-#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) ||              \
+    !defined(__STRICT_ANSI__) ||                                               \
+    (defined(__cplusplus) && __cplusplus >= 201103L) ||                        \
+    (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #  define DECIMAL_DIG __DECIMAL_DIG__
 #endif
 
@@ -123,7 +133,10 @@
 #define DBL_MIN __DBL_MIN__
 #define LDBL_MIN __LDBL_MIN__
 
-#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) ||              \
+    !defined(__STRICT_ANSI__) ||                                               \
+    (defined(__cplusplus) && __cplusplus >= 201703L) ||                        \
+    (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #  define FLT_TRUE_MIN __FLT_DENORM_MIN__
 #  define DBL_TRUE_MIN __DBL_DENORM_MIN__
 #  define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
diff --git a/darwin-x86/lib64/clang/14.0.2/include/fma4intrin.h b/darwin-x86/lib64/clang/16.0.2/include/fma4intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/fma4intrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/fma4intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/fmaintrin.h b/darwin-x86/lib64/clang/16.0.2/include/fmaintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/fmaintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/fmaintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/fuzzer/FuzzedDataProvider.h b/darwin-x86/lib64/clang/16.0.2/include/fuzzer/FuzzedDataProvider.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/fuzzer/FuzzedDataProvider.h
rename to darwin-x86/lib64/clang/16.0.2/include/fuzzer/FuzzedDataProvider.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/fxsrintrin.h b/darwin-x86/lib64/clang/16.0.2/include/fxsrintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/fxsrintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/fxsrintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/gfniintrin.h b/darwin-x86/lib64/clang/16.0.2/include/gfniintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/gfniintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/gfniintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hexagon_circ_brev_intrinsics.h b/darwin-x86/lib64/clang/16.0.2/include/hexagon_circ_brev_intrinsics.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/hexagon_circ_brev_intrinsics.h
rename to darwin-x86/lib64/clang/16.0.2/include/hexagon_circ_brev_intrinsics.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hexagon_protos.h b/darwin-x86/lib64/clang/16.0.2/include/hexagon_protos.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/hexagon_protos.h
rename to darwin-x86/lib64/clang/16.0.2/include/hexagon_protos.h
index cdffd93..2642f3c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/hexagon_protos.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/hexagon_protos.h
@@ -8003,17 +8003,6 @@
 #define Q6_P_vtrunohb_PP __builtin_HEXAGON_S6_vtrunohb_ppp
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vmem(Rt32):nt
-   C Intrinsic Prototype: HVX_Vector Q6_V_vmem_R_nt(Word32 Rt)
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vmem_R_nt __builtin_HEXAGON_V6_ldntnt0
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
 #if __HEXAGON_ARCH__ >= 65
 /* ==========================================================================
    Assembly Syntax:       Pd4=!any8(vcmpb.eq(Rss32,Rtt32))
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hexagon_types.h b/darwin-x86/lib64/clang/16.0.2/include/hexagon_types.h
similarity index 98%
rename from darwin-x86/lib64/clang/14.0.2/include/hexagon_types.h
rename to darwin-x86/lib64/clang/16.0.2/include/hexagon_types.h
index 6958809..029727c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/hexagon_types.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/hexagon_types.h
@@ -1177,37 +1177,6 @@
 
 #endif /* __cplusplus */
 
-// V65 Silver types
-#if __Q6S_ARCH__ >= 65
-  // Silver vector types are 128 bytes, and pairs are 256. The vector predicate
-  // types are 16 bytes and 32 bytes for pairs.
-  typedef long HEXAGON_VecPred128 __attribute__((__vector_size__(16)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_VecPred256 __attribute__((__vector_size__(32)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(256)));
-
-  typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(4)));
-
-  typedef long HEXAGON_UVect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(4)));
-
-  #define Q6S_VectorPredPair HEXAGON_VecPred256
-  #define Q6S_VectorPred     HEXAGON_VecPred128
-  #define Q6S_Vector         HEXAGON_Vect1024
-  #define Q6S_VectorPair     HEXAGON_Vect2048
-  #define Q6S_UVector        HEXAGON_UVect1024
-  #define Q6S_UVectorPair    HEXAGON_UVect2048
-
-#else /* __Q6S_ARCH__ >= 65 */
-
 // V65 Vector types
 #if __HVX_ARCH__ >= 65
 #if defined __HVX__ && (__HVX_LENGTH__ == 128)
@@ -1256,7 +1225,6 @@
 #endif /* defined __HVX__ &&  (__HVX_LENGTH__ == 64) */
 #endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
 #endif /* __HVX_ARCH__ >= 65 */
-#endif /* __Q6S_ARCH__ >= 65 */
 
 /* Predicates */
 
diff --git a/darwin-x86/lib64/clang/16.0.2/include/hlsl.h b/darwin-x86/lib64/clang/16.0.2/include/hlsl.h
new file mode 100644
index 0000000..a9dce45
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/hlsl.h
@@ -0,0 +1,15 @@
+//===----- hlsl.h - HLSL definitions --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _HLSL_H_
+#define _HLSL_H_
+
+#include "hlsl/hlsl_basic_types.h"
+#include "hlsl/hlsl_intrinsics.h"
+
+#endif //_HLSL_H_
diff --git a/darwin-x86/lib64/clang/16.0.2/include/hlsl_basic_types.h b/darwin-x86/lib64/clang/16.0.2/include/hlsl_basic_types.h
new file mode 100644
index 0000000..e68715f
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/hlsl_basic_types.h
@@ -0,0 +1,64 @@
+//===----- hlsl_basic_types.h - HLSL definitions for basic types ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _HLSL_HLSL_BASIC_TYPES_H_
+#define _HLSL_HLSL_BASIC_TYPES_H_
+
+// built-in scalar data types:
+
+#ifdef __HLSL_ENABLE_16_BIT
+// 16-bit integer.
+typedef unsigned short uint16_t;
+typedef short int16_t;
+#endif
+
+// unsigned 32-bit integer.
+typedef unsigned int uint;
+
+// 64-bit integer.
+typedef unsigned long uint64_t;
+typedef long int64_t;
+
+// built-in vector data types:
+
+#ifdef __HLSL_ENABLE_16_BIT
+typedef vector<int16_t, 2> int16_t2;
+typedef vector<int16_t, 3> int16_t3;
+typedef vector<int16_t, 4> int16_t4;
+typedef vector<uint16_t, 2> uint16_t2;
+typedef vector<uint16_t, 3> uint16_t3;
+typedef vector<uint16_t, 4> uint16_t4;
+#endif
+
+typedef vector<int, 2> int2;
+typedef vector<int, 3> int3;
+typedef vector<int, 4> int4;
+typedef vector<uint, 2> uint2;
+typedef vector<uint, 3> uint3;
+typedef vector<uint, 4> uint4;
+typedef vector<int64_t, 2> int64_t2;
+typedef vector<int64_t, 3> int64_t3;
+typedef vector<int64_t, 4> int64_t4;
+typedef vector<uint64_t, 2> uint64_t2;
+typedef vector<uint64_t, 3> uint64_t3;
+typedef vector<uint64_t, 4> uint64_t4;
+
+#ifdef __HLSL_ENABLE_16_BIT
+typedef vector<half, 2> half2;
+typedef vector<half, 3> half3;
+typedef vector<half, 4> half4;
+#endif
+
+typedef vector<float, 2> float2;
+typedef vector<float, 3> float3;
+typedef vector<float, 4> float4;
+typedef vector<double, 2> double2;
+typedef vector<double, 3> double3;
+typedef vector<double, 4> double4;
+
+#endif //_HLSL_HLSL_BASIC_TYPES_H_
diff --git a/darwin-x86/lib64/clang/16.0.2/include/hlsl_intrinsics.h b/darwin-x86/lib64/clang/16.0.2/include/hlsl_intrinsics.h
new file mode 100644
index 0000000..43f8dfe
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/hlsl_intrinsics.h
@@ -0,0 +1,102 @@
+//===----- hlsl_intrinsics.h - HLSL definitions for intrinsics ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _HLSL_HLSL_INTRINSICS_H_
+#define _HLSL_HLSL_INTRINSICS_H_
+
+__attribute__((availability(shadermodel, introduced = 6.0)))
+__attribute__((clang_builtin_alias(__builtin_hlsl_wave_active_count_bits))) uint
+WaveActiveCountBits(bool bBit);
+
+// abs builtins
+#ifdef __HLSL_ENABLE_16_BIT
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int16_t abs(int16_t);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int16_t2 abs(int16_t2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int16_t3 abs(int16_t3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int16_t4 abs(int16_t4);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) half abs(half);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+half2 abs(half2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+half3 abs(half3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+half4 abs(half4);
+#endif
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int abs(int);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int2 abs(int2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int3 abs(int3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int4 abs(int4);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) float
+abs(float);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+float2 abs(float2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+float3 abs(float3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+float4 abs(float4);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int64_t abs(int64_t);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int64_t2 abs(int64_t2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int64_t3 abs(int64_t3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int64_t4 abs(int64_t4);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) double
+abs(double);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+double2 abs(double2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+double3 abs(double3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+double4 abs(double4);
+
+// sqrt builtins
+__attribute__((clang_builtin_alias(__builtin_sqrt))) double sqrt(double In);
+__attribute__((clang_builtin_alias(__builtin_sqrtf))) float sqrt(float In);
+
+#ifdef __HLSL_ENABLE_16_BIT
+__attribute__((clang_builtin_alias(__builtin_sqrtf16))) half sqrt(half In);
+#endif
+
+// ceil builtins
+#ifdef __HLSL_ENABLE_16_BIT
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+half ceil(half);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+half2 ceil(half2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+half3 ceil(half3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+half4 ceil(half4);
+#endif
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) float
+ceil(float);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+float2 ceil(float2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+float3 ceil(float3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+float4 ceil(float4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) double
+ceil(double);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+double2 ceil(double2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+double3 ceil(double3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+double4 ceil(double4);
+
+#endif //_HLSL_HLSL_INTRINSICS_H_
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hresetintrin.h b/darwin-x86/lib64/clang/16.0.2/include/hresetintrin.h
similarity index 97%
rename from darwin-x86/lib64/clang/14.0.2/include/hresetintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/hresetintrin.h
index 13e31a2..646f6c1 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/hresetintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/hresetintrin.h
@@ -25,7 +25,7 @@
 ///
 /// This intrinsic corresponds to the <c> HRESET </c> instruction.
 ///
-/// \operation
+/// \code{.operation}
 ///    IF __eax == 0
 ///      // nop
 ///    ELSE
@@ -35,7 +35,7 @@
 ///        FI
 ///      ENDFOR
 ///    FI
-/// \endoperation
+/// \endcode
 static __inline void __DEFAULT_FN_ATTRS
 _hreset(int __eax)
 {
diff --git a/darwin-x86/lib64/clang/14.0.2/include/htmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/htmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/htmintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/htmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/htmxlintrin.h b/darwin-x86/lib64/clang/16.0.2/include/htmxlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/htmxlintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/htmxlintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h b/darwin-x86/lib64/clang/16.0.2/include/hvx_hexagon_protos.h
similarity index 67%
rename from darwin-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h
rename to darwin-x86/lib64/clang/16.0.2/include/hvx_hexagon_protos.h
index 41ce7a6..7e3679a 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/hvx_hexagon_protos.h
@@ -9,7 +9,6 @@
 //===----------------------------------------------------------------------===//
 
 
-
 #ifndef _HVX_HEXAGON_PROTOS_H_
 #define _HVX_HEXAGON_PROTOS_H_ 1
 
@@ -28,7 +27,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_R_vextract_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)
+#define Q6_R_vextract_VR(Vu,Rs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)(Vu,Rs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -39,7 +38,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_hi_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)
+#define Q6_V_hi_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)(Vss)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -50,7 +49,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_lo_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)
+#define Q6_V_lo_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)(Vss)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -61,7 +60,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)
+#define Q6_V_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)(Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -72,7 +71,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_and_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)
+#define Q6_Q_and_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -83,7 +82,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_and_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)
+#define Q6_Q_and_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -94,7 +93,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_not_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)
+#define Q6_Q_not_Q(Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -105,7 +104,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_or_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)
+#define Q6_Q_or_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -116,7 +115,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_or_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)
+#define Q6_Q_or_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -127,7 +126,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vsetq_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)
+#define Q6_Q_vsetq_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)(Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -138,7 +137,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_xor_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)
+#define Q6_Q_xor_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -149,7 +148,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QnRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)
+#define Q6_vmem_QnRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -160,7 +159,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QnRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)
+#define Q6_vmem_QnRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -171,7 +170,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)
+#define Q6_vmem_QRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -182,7 +181,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)
+#define Q6_vmem_QRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -193,7 +192,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuh_vabsdiff_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)
+#define Q6_Vuh_vabsdiff_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -204,7 +203,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vub_vabsdiff_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)
+#define Q6_Vub_vabsdiff_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -215,7 +214,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuh_vabsdiff_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)
+#define Q6_Vuh_vabsdiff_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -226,7 +225,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vabsdiff_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)
+#define Q6_Vuw_vabsdiff_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -237,7 +236,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vabs_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)
+#define Q6_Vh_vabs_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -248,7 +247,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vabs_Vh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)
+#define Q6_Vh_vabs_Vh_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -259,7 +258,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vabs_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)
+#define Q6_Vw_vabs_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -270,7 +269,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vabs_Vw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)
+#define Q6_Vw_vabs_Vw_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -281,7 +280,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vadd_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)
+#define Q6_Vb_vadd_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -292,7 +291,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vadd_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)
+#define Q6_Wb_vadd_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -303,7 +302,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condacc_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)
+#define Q6_Vb_condacc_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -314,7 +313,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)
+#define Q6_Vb_condacc_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -325,7 +324,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)
+#define Q6_Vh_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -336,7 +335,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vadd_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)
+#define Q6_Wh_vadd_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -347,7 +346,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condacc_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)
+#define Q6_Vh_condacc_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -358,7 +357,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)
+#define Q6_Vh_condacc_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -369,7 +368,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vadd_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)
+#define Q6_Vh_vadd_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -380,7 +379,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vadd_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)
+#define Q6_Wh_vadd_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -391,7 +390,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)
+#define Q6_Ww_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -402,7 +401,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vadd_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)
+#define Q6_Wh_vadd_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -413,7 +412,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vadd_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)
+#define Q6_Vub_vadd_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -424,7 +423,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wub_vadd_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)
+#define Q6_Wub_vadd_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -435,7 +434,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vadd_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)
+#define Q6_Vuh_vadd_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -446,7 +445,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vadd_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)
+#define Q6_Wuh_vadd_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -457,7 +456,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vadd_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)
+#define Q6_Ww_vadd_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -468,7 +467,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)
+#define Q6_Vw_vadd_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -479,7 +478,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vadd_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)
+#define Q6_Ww_vadd_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -490,7 +489,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condacc_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)
+#define Q6_Vw_condacc_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -501,7 +500,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)
+#define Q6_Vw_condacc_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -512,7 +511,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)
+#define Q6_Vw_vadd_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -523,7 +522,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vadd_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)
+#define Q6_Ww_vadd_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -534,7 +533,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_valign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)
+#define Q6_V_valign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -545,7 +544,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_valign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)
+#define Q6_V_valign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -556,7 +555,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vand_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)
+#define Q6_V_vand_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -567,7 +566,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vand_QR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)
+#define Q6_V_vand_QR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -578,7 +577,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vandor_VQR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)
+#define Q6_V_vandor_VQR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -589,7 +588,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Q_vand_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)
+#define Q6_Q_vand_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)(Vu,Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -600,7 +599,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Q_vandor_QVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)
+#define Q6_Q_vandor_QVR(Qx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -611,7 +610,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasl_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)
+#define Q6_Vh_vasl_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -622,7 +621,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasl_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)
+#define Q6_Vh_vasl_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -633,7 +632,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasl_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)
+#define Q6_Vw_vasl_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -644,7 +643,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vaslacc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)
+#define Q6_Vw_vaslacc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -655,7 +654,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasl_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)
+#define Q6_Vw_vasl_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -666,7 +665,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)
+#define Q6_Vh_vasr_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -677,7 +676,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)
+#define Q6_Vb_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -688,7 +687,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)
+#define Q6_Vub_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -699,7 +698,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)
+#define Q6_Vub_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -710,7 +709,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)
+#define Q6_Vh_vasr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -721,7 +720,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasr_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)
+#define Q6_Vw_vasr_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -732,7 +731,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasracc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)
+#define Q6_Vw_vasracc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -743,7 +742,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)
+#define Q6_Vh_vasr_VwVwR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -754,7 +753,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)
+#define Q6_Vh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -765,7 +764,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)
+#define Q6_Vh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -776,7 +775,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)
+#define Q6_Vuh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -787,7 +786,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)
+#define Q6_Vw_vasr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -798,7 +797,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_equals_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)
+#define Q6_V_equals_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -809,7 +808,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_equals_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)
+#define Q6_W_equals_W(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)(Vuu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -820,7 +819,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)
+#define Q6_Vh_vavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -831,7 +830,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vavg_VhVh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)
+#define Q6_Vh_vavg_VhVh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -842,7 +841,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)
+#define Q6_Vub_vavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -853,7 +852,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vavg_VubVub_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)
+#define Q6_Vub_vavg_VubVub_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -864,7 +863,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vavg_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)
+#define Q6_Vuh_vavg_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -875,7 +874,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vavg_VuhVuh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)
+#define Q6_Vuh_vavg_VuhVuh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -886,7 +885,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)
+#define Q6_Vw_vavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -897,7 +896,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vavg_VwVw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)
+#define Q6_Vw_vavg_VwVw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -908,7 +907,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vcl0_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)
+#define Q6_Vuh_vcl0_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -919,7 +918,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vcl0_Vuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)
+#define Q6_Vuw_vcl0_Vuw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -930,7 +929,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vcombine_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)
+#define Q6_W_vcombine_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -941,7 +940,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)
+#define Q6_V_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)()
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -952,7 +951,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vdeal_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)
+#define Q6_Vb_vdeal_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -963,7 +962,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vdeale_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)
+#define Q6_Vb_vdeale_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -974,7 +973,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vdeal_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)
+#define Q6_Vh_vdeal_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -985,7 +984,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vdeal_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)
+#define Q6_W_vdeal_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -996,7 +995,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)
+#define Q6_V_vdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1007,7 +1006,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vdmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)
+#define Q6_Vh_vdmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1018,7 +1017,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vdmpyacc_VhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)
+#define Q6_Vh_vdmpyacc_VhVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1029,7 +1028,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vdmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)
+#define Q6_Wh_vdmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1040,7 +1039,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vdmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)
+#define Q6_Wh_vdmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1051,7 +1050,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)
+#define Q6_Vw_vdmpy_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1062,7 +1061,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)
+#define Q6_Vw_vdmpyacc_VwVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1073,7 +1072,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vdmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)
+#define Q6_Ww_vdmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1084,7 +1083,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vdmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)
+#define Q6_Ww_vdmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1095,7 +1094,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_WhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)
+#define Q6_Vw_vdmpy_WhRh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1106,29 +1105,29 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwWhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)
+#define Q6_Vw_vdmpyacc_VwWhRh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)(Vx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Rt32.h):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRh_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)
+#define Q6_Vw_vdmpy_VhRh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)
+#define Q6_Vw_vdmpyacc_VwVhRh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1139,7 +1138,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_WhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)
+#define Q6_Vw_vdmpy_WhRuh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1150,40 +1149,40 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwWhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)
+#define Q6_Vw_vdmpyacc_VwWhRuh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)(Vx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRuh_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)
+#define Q6_Vw_vdmpy_VhRuh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)
+#define Q6_Vw_vdmpyacc_VwVhRuh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Vv32.h):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)
+#define Q6_Vw_vdmpy_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1194,7 +1193,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)
+#define Q6_Vw_vdmpyacc_VwVhVh_sat(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1205,7 +1204,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vdsad_WuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)
+#define Q6_Wuw_vdsad_WuhRuh(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1216,7 +1215,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vdsadacc_WuwWuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)
+#define Q6_Wuw_vdsadacc_WuwWuhRuh(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1227,7 +1226,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eq_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)
+#define Q6_Q_vcmp_eq_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1238,7 +1237,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)
+#define Q6_Q_vcmp_eqand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1249,7 +1248,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)
+#define Q6_Q_vcmp_eqor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1260,7 +1259,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)
+#define Q6_Q_vcmp_eqxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1271,7 +1270,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eq_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)
+#define Q6_Q_vcmp_eq_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1282,7 +1281,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)
+#define Q6_Q_vcmp_eqand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1293,7 +1292,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)
+#define Q6_Q_vcmp_eqor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1304,7 +1303,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)
+#define Q6_Q_vcmp_eqxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1315,7 +1314,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eq_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)
+#define Q6_Q_vcmp_eq_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1326,7 +1325,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)
+#define Q6_Q_vcmp_eqand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1337,7 +1336,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)
+#define Q6_Q_vcmp_eqor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1348,7 +1347,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)
+#define Q6_Q_vcmp_eqxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1359,7 +1358,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)
+#define Q6_Q_vcmp_gt_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1370,7 +1369,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)
+#define Q6_Q_vcmp_gtand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1381,7 +1380,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)
+#define Q6_Q_vcmp_gtor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1392,7 +1391,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)
+#define Q6_Q_vcmp_gtxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1403,7 +1402,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)
+#define Q6_Q_vcmp_gt_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1414,7 +1413,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)
+#define Q6_Q_vcmp_gtand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1425,7 +1424,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)
+#define Q6_Q_vcmp_gtor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1436,7 +1435,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)
+#define Q6_Q_vcmp_gtxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1447,7 +1446,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)
+#define Q6_Q_vcmp_gt_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1458,7 +1457,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)
+#define Q6_Q_vcmp_gtand_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1469,7 +1468,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)
+#define Q6_Q_vcmp_gtor_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1480,7 +1479,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)
+#define Q6_Q_vcmp_gtxacc_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1491,7 +1490,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)
+#define Q6_Q_vcmp_gt_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1502,7 +1501,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)
+#define Q6_Q_vcmp_gtand_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1513,7 +1512,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)
+#define Q6_Q_vcmp_gtor_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1524,7 +1523,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)
+#define Q6_Q_vcmp_gtxacc_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1535,7 +1534,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)
+#define Q6_Q_vcmp_gt_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1546,7 +1545,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)
+#define Q6_Q_vcmp_gtand_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1557,7 +1556,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)
+#define Q6_Q_vcmp_gtor_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1568,7 +1567,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)
+#define Q6_Q_vcmp_gtxacc_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1579,7 +1578,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)
+#define Q6_Q_vcmp_gt_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1590,7 +1589,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)
+#define Q6_Q_vcmp_gtand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1601,7 +1600,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)
+#define Q6_Q_vcmp_gtor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1612,7 +1611,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)
+#define Q6_Q_vcmp_gtxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1623,7 +1622,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vinsert_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)
+#define Q6_Vw_vinsert_VwR(Vx,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)(Vx,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1634,7 +1633,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vlalign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)
+#define Q6_V_vlalign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1645,7 +1644,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vlalign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)
+#define Q6_V_vlalign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1656,7 +1655,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vlsr_VuhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)
+#define Q6_Vuh_vlsr_VuhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1667,7 +1666,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vlsr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)
+#define Q6_Vh_vlsr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1678,7 +1677,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vlsr_VuwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)
+#define Q6_Vuw_vlsr_VuwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1689,7 +1688,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vlsr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)
+#define Q6_Vw_vlsr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1700,7 +1699,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32_VbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)
+#define Q6_Vb_vlut32_VbVbR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1711,7 +1710,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32or_VbVbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)
+#define Q6_Vb_vlut32or_VbVbVbR(Vx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)(Vx,Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1722,7 +1721,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16_VbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)
+#define Q6_Wh_vlut16_VbVhR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1733,7 +1732,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16or_WhVbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)
+#define Q6_Wh_vlut16or_WhVbVhR(Vxx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)(Vxx,Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1744,7 +1743,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vmax_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)
+#define Q6_Vh_vmax_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1755,7 +1754,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vmax_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)
+#define Q6_Vub_vmax_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1766,7 +1765,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vmax_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)
+#define Q6_Vuh_vmax_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1777,7 +1776,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vmax_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)
+#define Q6_Vw_vmax_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1788,7 +1787,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vmin_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)
+#define Q6_Vh_vmin_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1799,7 +1798,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vmin_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)
+#define Q6_Vub_vmin_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1810,7 +1809,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vmin_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)
+#define Q6_Vuh_vmin_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1821,7 +1820,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vmin_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)
+#define Q6_Vw_vmin_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1832,7 +1831,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)
+#define Q6_Wh_vmpa_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1843,7 +1842,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpaacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)
+#define Q6_Wh_vmpaacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1854,7 +1853,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)
+#define Q6_Wh_vmpa_WubWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1865,7 +1864,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubWub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)
+#define Q6_Wh_vmpa_WubWub(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1876,7 +1875,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpa_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)
+#define Q6_Ww_vmpa_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1887,7 +1886,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpaacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)
+#define Q6_Ww_vmpaacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1898,7 +1897,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)
+#define Q6_Wh_vmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1909,7 +1908,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpyacc_WhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)
+#define Q6_Wh_vmpyacc_WhVubRb(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1920,7 +1919,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)
+#define Q6_Wh_vmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1931,7 +1930,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpyacc_WhVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)
+#define Q6_Wh_vmpyacc_WhVubVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1942,7 +1941,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)
+#define Q6_Wh_vmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1953,7 +1952,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpyacc_WhVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)
+#define Q6_Wh_vmpyacc_WhVbVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1964,7 +1963,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)
+#define Q6_Vw_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1975,7 +1974,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpy_VhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)
+#define Q6_Ww_vmpy_VhRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1986,29 +1985,29 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)
+#define Q6_Ww_vmpyacc_WwVhRh_sat(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat
    C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_rnd_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpy_VhRh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)
+#define Q6_Vh_vmpy_VhRh_s1_rnd_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat
    C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpy_VhRh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)
+#define Q6_Vh_vmpy_VhRh_s1_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2019,7 +2018,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpy_VhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)
+#define Q6_Ww_vmpy_VhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2030,7 +2029,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)
+#define Q6_Ww_vmpyacc_WwVhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2041,7 +2040,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpy_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)
+#define Q6_Ww_vmpy_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2052,18 +2051,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)
+#define Q6_Ww_vmpyacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat
    C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpy_VhVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)
+#define Q6_Vh_vmpy_VhVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2074,7 +2073,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyieo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)
+#define Q6_Vw_vmpyieo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2085,7 +2084,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyieacc_VwVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)
+#define Q6_Vw_vmpyieacc_VwVwVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2096,7 +2095,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyie_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)
+#define Q6_Vw_vmpyie_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2107,7 +2106,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyieacc_VwVwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)
+#define Q6_Vw_vmpyieacc_VwVwVuh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2118,7 +2117,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyi_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)
+#define Q6_Vh_vmpyi_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2129,7 +2128,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyiacc_VhVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)
+#define Q6_Vh_vmpyiacc_VhVhVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2140,7 +2139,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyi_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)
+#define Q6_Vh_vmpyi_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2151,7 +2150,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyiacc_VhVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)
+#define Q6_Vh_vmpyiacc_VhVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2162,7 +2161,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyio_VwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)
+#define Q6_Vw_vmpyio_VwVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2173,7 +2172,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyi_VwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)
+#define Q6_Vw_vmpyi_VwRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2184,7 +2183,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyiacc_VwVwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)
+#define Q6_Vw_vmpyiacc_VwVwRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2195,7 +2194,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyi_VwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)
+#define Q6_Vw_vmpyi_VwRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2206,7 +2205,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyiacc_VwVwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)
+#define Q6_Vw_vmpyiacc_VwVwRh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2217,7 +2216,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyo_VwVh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)
+#define Q6_Vw_vmpyo_VwVh_s1_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2228,7 +2227,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)
+#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2239,7 +2238,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)
+#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2250,7 +2249,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)
+#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2261,7 +2260,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)
+#define Q6_Wuh_vmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2272,7 +2271,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpyacc_WuhVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)
+#define Q6_Wuh_vmpyacc_WuhVubRub(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2283,7 +2282,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)
+#define Q6_Wuh_vmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2294,7 +2293,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpyacc_WuhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)
+#define Q6_Wuh_vmpyacc_WuhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2305,7 +2304,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpy_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)
+#define Q6_Wuw_vmpy_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2316,7 +2315,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpyacc_WuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)
+#define Q6_Wuw_vmpyacc_WuwVuhRuh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2327,7 +2326,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpy_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)
+#define Q6_Wuw_vmpy_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2338,7 +2337,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpyacc_WuwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)
+#define Q6_Wuw_vmpyacc_WuwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2349,7 +2348,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vmux_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)
+#define Q6_V_vmux_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2360,7 +2359,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vnavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)
+#define Q6_Vh_vnavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2371,7 +2370,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vnavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)
+#define Q6_Vb_vnavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2382,7 +2381,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vnavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)
+#define Q6_Vw_vnavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2393,7 +2392,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vnormamt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)
+#define Q6_Vh_vnormamt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2404,7 +2403,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vnormamt_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)
+#define Q6_Vw_vnormamt_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2415,7 +2414,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vnot_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)
+#define Q6_V_vnot_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2426,7 +2425,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)
+#define Q6_V_vor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2437,7 +2436,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vpacke_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)
+#define Q6_Vb_vpacke_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2448,7 +2447,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpacke_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)
+#define Q6_Vh_vpacke_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2459,7 +2458,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)
+#define Q6_Vb_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2470,7 +2469,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)
+#define Q6_Vub_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2481,7 +2480,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vpacko_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)
+#define Q6_Vb_vpacko_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2492,7 +2491,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpacko_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)
+#define Q6_Vh_vpacko_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2503,7 +2502,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)
+#define Q6_Vh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2514,7 +2513,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)
+#define Q6_Vuh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2525,7 +2524,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpopcount_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)
+#define Q6_Vh_vpopcount_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2536,7 +2535,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vrdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)
+#define Q6_V_vrdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2547,7 +2546,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)
+#define Q6_Vw_vrmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2558,7 +2557,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpyacc_VwVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)
+#define Q6_Vw_vrmpyacc_VwVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2569,7 +2568,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vrmpy_WubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)
+#define Q6_Ww_vrmpy_WubRbI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)(Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2580,7 +2579,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vrmpyacc_WwWubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)
+#define Q6_Ww_vrmpyacc_WwWubRbI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)(Vxx,Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2591,18 +2590,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)
+#define Q6_Vw_vrmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vrmpy(Vu32.ub,Vv32.b)
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpyacc_VwVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)
+#define Q6_Vw_vrmpyacc_VwVubVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2613,18 +2612,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)
+#define Q6_Vw_vrmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vrmpy(Vu32.b,Vv32.b)
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVbVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpyacc_VwVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)
+#define Q6_Vw_vrmpyacc_VwVbVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2635,7 +2634,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)
+#define Q6_Vuw_vrmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2646,7 +2645,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpyacc_VuwVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)
+#define Q6_Vuw_vrmpyacc_VuwVubRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2657,7 +2656,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrmpy_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)
+#define Q6_Wuw_vrmpy_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)(Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2668,7 +2667,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrmpyacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)
+#define Q6_Wuw_vrmpyacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)(Vxx,Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2679,18 +2678,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)
+#define Q6_Vuw_vrmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)
    C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubVub(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpyacc_VuwVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)
+#define Q6_Vuw_vrmpyacc_VuwVubVub(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2701,7 +2700,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vror_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)
+#define Q6_V_vror_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2712,7 +2711,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)
+#define Q6_Vb_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2723,7 +2722,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)
+#define Q6_Vub_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2734,7 +2733,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)
+#define Q6_Vh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2745,7 +2744,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)
+#define Q6_Vuh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2756,7 +2755,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrsad_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)
+#define Q6_Wuw_vrsad_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)(Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2767,7 +2766,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrsadacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)
+#define Q6_Wuw_vrsadacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)(Vxx,Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2778,7 +2777,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vsat_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)
+#define Q6_Vub_vsat_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2789,7 +2788,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vsat_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)
+#define Q6_Vh_vsat_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2800,7 +2799,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vsxt_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)
+#define Q6_Wh_vsxt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2811,7 +2810,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vsxt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)
+#define Q6_Ww_vsxt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2822,7 +2821,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vshuffe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)
+#define Q6_Vh_vshuffe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2833,7 +2832,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vshuff_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)
+#define Q6_Vb_vshuff_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2844,7 +2843,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vshuffe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)
+#define Q6_Vb_vshuffe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2855,7 +2854,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vshuff_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)
+#define Q6_Vh_vshuff_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2866,7 +2865,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vshuffo_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)
+#define Q6_Vb_vshuffo_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2877,7 +2876,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vshuff_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)
+#define Q6_W_vshuff_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2888,7 +2887,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vshuffoe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)
+#define Q6_Wb_vshuffoe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2899,7 +2898,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vshuffoe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)
+#define Q6_Wh_vshuffoe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2910,7 +2909,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vshuffo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)
+#define Q6_Vh_vshuffo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2921,7 +2920,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vsub_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)
+#define Q6_Vb_vsub_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2932,7 +2931,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vsub_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)
+#define Q6_Wb_vsub_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2943,7 +2942,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condnac_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)
+#define Q6_Vb_condnac_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2954,7 +2953,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condnac_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)
+#define Q6_Vb_condnac_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2965,7 +2964,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)
+#define Q6_Vh_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2976,7 +2975,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vsub_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)
+#define Q6_Wh_vsub_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2987,7 +2986,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condnac_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)
+#define Q6_Vh_condnac_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2998,7 +2997,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condnac_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)
+#define Q6_Vh_condnac_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3009,7 +3008,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vsub_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)
+#define Q6_Vh_vsub_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3020,7 +3019,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vsub_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)
+#define Q6_Wh_vsub_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3031,7 +3030,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)
+#define Q6_Ww_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3042,7 +3041,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vsub_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)
+#define Q6_Wh_vsub_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3053,7 +3052,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vsub_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)
+#define Q6_Vub_vsub_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3064,7 +3063,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wub_vsub_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)
+#define Q6_Wub_vsub_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3075,7 +3074,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vsub_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)
+#define Q6_Vuh_vsub_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3086,7 +3085,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vsub_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)
+#define Q6_Wuh_vsub_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3097,7 +3096,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vsub_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)
+#define Q6_Ww_vsub_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3108,7 +3107,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsub_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)
+#define Q6_Vw_vsub_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3119,7 +3118,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vsub_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)
+#define Q6_Ww_vsub_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3130,7 +3129,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condnac_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)
+#define Q6_Vw_condnac_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3141,7 +3140,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condnac_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)
+#define Q6_Vw_condnac_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3152,7 +3151,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsub_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)
+#define Q6_Vw_vsub_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3163,7 +3162,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vsub_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)
+#define Q6_Ww_vsub_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3174,7 +3173,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vswap_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)
+#define Q6_W_vswap_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3185,7 +3184,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpy_WbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)
+#define Q6_Wh_vtmpy_WbRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3196,7 +3195,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpyacc_WhWbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)
+#define Q6_Wh_vtmpyacc_WhWbRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3207,7 +3206,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)
+#define Q6_Wh_vtmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3218,7 +3217,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)
+#define Q6_Wh_vtmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3229,7 +3228,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vtmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)
+#define Q6_Ww_vtmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3240,7 +3239,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vtmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)
+#define Q6_Ww_vtmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3251,7 +3250,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vunpack_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)
+#define Q6_Wh_vunpack_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3262,7 +3261,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vunpack_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)
+#define Q6_Ww_vunpack_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3273,7 +3272,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vunpackoor_WhVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)
+#define Q6_Wh_vunpackoor_WhVb(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)(Vxx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3284,7 +3283,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vunpackoor_WwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)
+#define Q6_Ww_vunpackoor_WwVh(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)(Vxx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3295,7 +3294,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vunpack_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)
+#define Q6_Wuh_vunpack_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3306,7 +3305,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vunpack_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)
+#define Q6_Wuw_vunpack_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3317,7 +3316,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vxor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)
+#define Q6_V_vxor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3328,7 +3327,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vzxt_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)
+#define Q6_Wuh_vzxt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3339,7 +3338,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vzxt_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)
+#define Q6_Wuw_vzxt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 62
@@ -3350,7 +3349,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vb_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)
+#define Q6_Vb_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)(Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3361,7 +3360,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)
+#define Q6_Vh_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)(Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3372,7 +3371,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vsetq2_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)
+#define Q6_Q_vsetq2_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)(Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3383,7 +3382,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Qb_vshuffe_QhQh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)
+#define Q6_Qb_vshuffe_QhQh(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3394,7 +3393,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Qh_vshuffe_QwQw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)
+#define Q6_Qh_vshuffe_QwQw(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3405,7 +3404,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vadd_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)
+#define Q6_Vb_vadd_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3416,7 +3415,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vadd_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)
+#define Q6_Wb_vadd_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3427,7 +3426,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)
+#define Q6_Vw_vadd_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)(Vu,Vv,Qx)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3438,7 +3437,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vadd_vclb_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)
+#define Q6_Vh_vadd_vclb_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3449,7 +3448,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_vclb_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)
+#define Q6_Vw_vadd_vclb_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3460,7 +3459,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vaddacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)
+#define Q6_Ww_vaddacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3471,7 +3470,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vaddacc_WhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)
+#define Q6_Wh_vaddacc_WhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3482,7 +3481,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vadd_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)
+#define Q6_Vub_vadd_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3493,7 +3492,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vaddacc_WwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)
+#define Q6_Ww_vaddacc_WwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3504,7 +3503,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vadd_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)
+#define Q6_Vuw_vadd_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3515,7 +3514,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vadd_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)
+#define Q6_Wuw_vadd_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3526,7 +3525,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vand_QnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)
+#define Q6_V_vand_QnR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3537,7 +3536,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vandor_VQnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)
+#define Q6_V_vandor_VQnR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3548,7 +3547,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vand_QnV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)
+#define Q6_V_vand_QnV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3559,7 +3558,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vand_QV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)
+#define Q6_V_vand_QV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3570,7 +3569,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)
+#define Q6_Vb_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3581,7 +3580,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VuwVuwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)
+#define Q6_Vuh_vasr_VuwVuwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3592,7 +3591,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)
+#define Q6_Vuh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3603,7 +3602,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vlsr_VubR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)
+#define Q6_Vub_vlsr_VubR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3614,7 +3613,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32_VbVbR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)
+#define Q6_Vb_vlut32_VbVbR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3625,7 +3624,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32or_VbVbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)
+#define Q6_Vb_vlut32or_VbVbVbI(Vx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)(Vx,Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3636,7 +3635,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32_VbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)
+#define Q6_Vb_vlut32_VbVbI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3647,7 +3646,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16_VbVhR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)
+#define Q6_Wh_vlut16_VbVhR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3658,7 +3657,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16or_WhVbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)
+#define Q6_Wh_vlut16or_WhVbVhI(Vxx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)(Vxx,Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3669,7 +3668,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16_VbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)
+#define Q6_Wh_vlut16_VbVhI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3680,7 +3679,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vmax_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)
+#define Q6_Vb_vmax_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3691,7 +3690,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vmin_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)
+#define Q6_Vb_vmin_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3702,7 +3701,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpa_WuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)
+#define Q6_Ww_vmpa_WuhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3713,7 +3712,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpaacc_WwWuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)
+#define Q6_Ww_vmpaacc_WwWuhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3724,7 +3723,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_W_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)
+#define Q6_W_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3735,7 +3734,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyi_VwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)
+#define Q6_Vw_vmpyi_VwRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3746,7 +3745,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyiacc_VwVwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)
+#define Q6_Vw_vmpyiacc_VwVwRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3757,7 +3756,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_W_vmpyoacc_WVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)
+#define Q6_W_vmpyoacc_WVwVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3768,7 +3767,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vround_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)
+#define Q6_Vub_vround_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3779,7 +3778,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vround_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)
+#define Q6_Vuh_vround_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3790,7 +3789,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vsat_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)
+#define Q6_Vuh_vsat_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3801,7 +3800,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vsub_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)
+#define Q6_Vb_vsub_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3812,7 +3811,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vsub_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)
+#define Q6_Wb_vsub_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3823,7 +3822,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsub_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)
+#define Q6_Vw_vsub_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)(Vu,Vv,Qx)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3834,7 +3833,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vsub_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)
+#define Q6_Vub_vsub_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3845,7 +3844,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vsub_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)
+#define Q6_Vuw_vsub_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3856,7 +3855,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vsub_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)
+#define Q6_Wuw_vsub_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 65
@@ -3867,7 +3866,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vabs_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)
+#define Q6_Vb_vabs_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3878,7 +3877,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vabs_Vb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)
+#define Q6_Vb_vabs_Vb_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3889,7 +3888,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vaslacc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)
+#define Q6_Vh_vaslacc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3900,7 +3899,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasracc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)
+#define Q6_Vh_vasracc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3911,7 +3910,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VuhVuhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)
+#define Q6_Vub_vasr_VuhVuhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3922,7 +3921,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VuhVuhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)
+#define Q6_Vub_vasr_VuhVuhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3933,7 +3932,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VuwVuwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)
+#define Q6_Vuh_vasr_VuwVuwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3944,7 +3943,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)
+#define Q6_Vb_vavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3955,7 +3954,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vavg_VbVb_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)
+#define Q6_Vb_vavg_VbVb_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3966,7 +3965,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vavg_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)
+#define Q6_Vuw_vavg_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3977,7 +3976,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vavg_VuwVuw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)
+#define Q6_Vuw_vavg_VuwVuw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3988,7 +3987,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)
+#define Q6_W_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)()
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3999,7 +3998,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_ARMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)
+#define Q6_vgather_ARMVh(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)(Rs,Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4010,7 +4009,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_AQRMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)
+#define Q6_vgather_AQRMVh(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4021,7 +4020,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_ARMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)
+#define Q6_vgather_ARMWw(Rs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)(Rs,Rt,Mu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4032,7 +4031,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_AQRMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)
+#define Q6_vgather_AQRMWw(Rs,Qs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4043,7 +4042,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_ARMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)
+#define Q6_vgather_ARMVw(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)(Rs,Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4054,7 +4053,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_AQRMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)
+#define Q6_vgather_AQRMVw(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4065,7 +4064,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vlut4_VuhPh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)
+#define Q6_Vh_vlut4_VuhPh(Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)(Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4076,7 +4075,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)
+#define Q6_Wh_vmpa_WubRub(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4087,7 +4086,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpaacc_WhWubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)
+#define Q6_Wh_vmpaacc_WhWubRub(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4098,7 +4097,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vmpa_VhVhVhPh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)
+#define Q6_Vh_vmpa_VhVhVhPh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)(Vx,Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4109,7 +4108,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vmpa_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)
+#define Q6_Vh_vmpa_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)(Vx,Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4120,7 +4119,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vmps_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)
+#define Q6_Vh_vmps_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)(Vx,Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4131,7 +4130,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)
+#define Q6_Ww_vmpyacc_WwVhRh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4142,7 +4141,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vmpye_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)
+#define Q6_Vuw_vmpye_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4153,7 +4152,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vmpyeacc_VuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)
+#define Q6_Vuw_vmpyeacc_VuwVuhRuh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4164,7 +4163,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vnavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)
+#define Q6_Vb_vnavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4175,7 +4174,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)
+#define Q6_Vb_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4186,7 +4185,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)
+#define Q6_Vh_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4197,7 +4196,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)
+#define Q6_Vw_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4208,7 +4207,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)
+#define Q6_vscatter_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4219,7 +4218,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatteracc_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)
+#define Q6_vscatteracc_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4230,7 +4229,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_QRMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)
+#define Q6_vscatter_QRMVhV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4241,7 +4240,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)
+#define Q6_vscatter_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)(Rt,Mu,Vvv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4252,7 +4251,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatteracc_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)
+#define Q6_vscatteracc_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)(Rt,Mu,Vvv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4263,7 +4262,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_QRMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)
+#define Q6_vscatter_QRMWwV(Qs,Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4274,7 +4273,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)
+#define Q6_vscatter_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4285,7 +4284,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatteracc_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)
+#define Q6_vscatteracc_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4296,7 +4295,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_QRMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)
+#define Q6_vscatter_QRMVwV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 66
@@ -4307,7 +4306,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVwQ_carry_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)
+#define Q6_Vw_vadd_VwVwQ_carry_sat(Vu,Vv,Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)(Vu,Vv,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 66
@@ -4318,7 +4317,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vasrinto_WwVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)
+#define Q6_Ww_vasrinto_WwVwVw(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 66
@@ -4329,7 +4328,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vrotr_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)
+#define Q6_Vuw_vrotr_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 66
@@ -4340,7 +4339,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsatdw_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)
+#define Q6_Vw_vsatdw_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 68
@@ -4351,7 +4350,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpy_WubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)
+#define Q6_Ww_v6mpy_WubWbI_h(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)(Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
 #if __HVX_ARCH__ >= 68
@@ -4362,7 +4361,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpyacc_WwWubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)
+#define Q6_Ww_v6mpyacc_WwWubWbI_h(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)(Vxx,Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
 #if __HVX_ARCH__ >= 68
@@ -4373,7 +4372,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpy_WubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)
+#define Q6_Ww_v6mpy_WubWbI_v(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)(Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
 #if __HVX_ARCH__ >= 68
@@ -4384,9 +4383,801 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpyacc_WwWubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)
+#define Q6_Ww_v6mpyacc_WwWubWbI_v(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)(Vxx,Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vabs(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vabs_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vabs_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vabs(Vu32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vabs_Vsf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vabs_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_sf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vadd(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vadd(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vadd(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vadd_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vadd(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vadd_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vadd(Vu32.qf32,Vv32.qf32)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vadd_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vadd(Vu32.qf32,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vadd_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vadd(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vadd(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vadd(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.w=vfmv(Vu32.w)
+   C Intrinsic Prototype: HVX_Vector Q6_Vw_vfmv_Vw(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vw_vfmv_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign_fp)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=Vu32.qf16
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Vqf16(HVX_Vector Vu)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_equals_Vqf16(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf16)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=Vuu32.qf32
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Wqf32(HVX_VectorPair Vuu)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_equals_Wqf32(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf32)(Vuu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=Vu32.qf32
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_equals_Vqf32(HVX_Vector Vu)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vsf_equals_Vqf32(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_sf_qf32)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.b=vcvt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vb_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vb_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_b_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.h=vcvt(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vh_vcvt_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_h_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.hf=vcvt(Vu32.b)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vb(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Whf_vcvt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_b)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vcvt(Vu32.h)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vh(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vcvt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_h)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vcvt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vcvt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.hf=vcvt(Vu32.ub)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vub(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Whf_vcvt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_ub)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vcvt(Vu32.uh)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vuh(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vcvt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_uh)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vcvt(Vu32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vcvt_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_sf_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.ub=vcvt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vub_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vub_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_ub_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vcvt(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcvt_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vuh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_uh_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vdmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vdmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vx32.sf+=vdmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpyacc_VsfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vdmpyacc_VsfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf_acc)(Vx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vfmax(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vfmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vfmax(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vfmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vfmin(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vfmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vfmin(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vfmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vfneg(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfneg_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vfneg_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vfneg(Vu32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfneg_Vsf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vfneg_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_sf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qd4=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf)(Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qd4=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf)(Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vmax(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_vmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vmax(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vsf_vmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vmin(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_vmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vmin(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vsf_vmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vx32.hf+=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpyacc_VhfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vmpyacc_VhfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf_acc)(Vx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vmpy(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf16_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf16_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vmpy(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf16_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_mix_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vmpy(Vu32.qf32,Vv32.qf32)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf32_vmpy_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.qf32=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wqf32_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.qf32=vmpy(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wqf32_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_mix_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.qf32=vmpy(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wqf32_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vmpy(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf32_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vxx32.sf+=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpyacc_WsfVhfVhf(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vmpyacc_WsfVhfVhf(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf_acc)(Vxx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vmpy(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vsub(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vsub(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vsub(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vsub_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vsub(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vsub_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vsub(Vu32.qf32,Vv32.qf32)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vsub_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vsub(Vu32.qf32,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vsub_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vsub(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vsub(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vsub(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.ub=vasr(Vuu32.uh,Vv32.ub):rnd:sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vub_vasr_WuhVub_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubrndsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.ub=vasr(Vuu32.uh,Vv32.ub):sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vub_vasr_WuhVub_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vasr(Vuu32.w,Vv32.uh):rnd:sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vuh_vasr_WwVuh_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhrndsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vasr(Vuu32.w,Vv32.uh):sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vuh_vasr_WwVuh_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmpy_VuhVuh_rs16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vuh_vmpy_VuhVuh_rs16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhvs)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
 #endif /* __HVX__ */
 
 #endif
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ia32intrin.h b/darwin-x86/lib64/clang/16.0.2/include/ia32intrin.h
similarity index 95%
rename from darwin-x86/lib64/clang/14.0.2/include/ia32intrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/ia32intrin.h
index ec8142b..f1904ef 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/ia32intrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/ia32intrin.h
@@ -40,7 +40,7 @@
  */
 static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsfd(int __A) {
-  return __builtin_ctz(__A);
+  return __builtin_ctz((unsigned int)__A);
 }
 
 /** Find the first set bit starting from the msb. Result is undefined if
@@ -57,7 +57,7 @@
  */
 static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsrd(int __A) {
-  return 31 - __builtin_clz(__A);
+  return 31 - __builtin_clz((unsigned int)__A);
 }
 
 /** Swaps the bytes in the input. Converting little endian to big endian or
@@ -73,12 +73,12 @@
  */
 static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bswapd(int __A) {
-  return __builtin_bswap32(__A);
+  return (int)__builtin_bswap32((unsigned int)__A);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 _bswap(int __A) {
-  return __builtin_bswap32(__A);
+  return (int)__builtin_bswap32((unsigned int)__A);
 }
 
 #define _bit_scan_forward(A) __bsfd((A))
@@ -99,7 +99,7 @@
  */
 static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsfq(long long __A) {
-  return __builtin_ctzll(__A);
+  return (long long)__builtin_ctzll((unsigned long long)__A);
 }
 
 /** Find the first set bit starting from the msb. Result is undefined if
@@ -116,7 +116,7 @@
  */
 static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsrq(long long __A) {
-  return 63 - __builtin_clzll(__A);
+  return 63 - __builtin_clzll((unsigned long long)__A);
 }
 
 /** Swaps the bytes in the input. Converting little endian to big endian or
@@ -132,7 +132,7 @@
  */
 static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
 __bswapq(long long __A) {
-  return __builtin_bswap64(__A);
+  return (long long)__builtin_bswap64((unsigned long long)__A);
 }
 
 #define _bswap64(A) __bswapq((A))
@@ -395,23 +395,23 @@
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR
 __rold(unsigned int __X, int __C) {
-  return __builtin_rotateleft32(__X, __C);
+  return __builtin_rotateleft32(__X, (unsigned int)__C);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR
 __rord(unsigned int __X, int __C) {
-  return __builtin_rotateright32(__X, __C);
+  return __builtin_rotateright32(__X, (unsigned int)__C);
 }
 
 #ifdef __x86_64__
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR
 __rolq(unsigned long long __X, int __C) {
-  return __builtin_rotateleft64(__X, __C);
+  return __builtin_rotateleft64(__X, (unsigned long long)__C);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR
 __rorq(unsigned long long __X, int __C) {
-  return __builtin_rotateright64(__X, __C);
+  return __builtin_rotateright64(__X, (unsigned long long)__C);
 }
 #endif /* __x86_64__ */
 
diff --git a/darwin-x86/lib64/clang/14.0.2/include/immintrin.h b/darwin-x86/lib64/clang/16.0.2/include/immintrin.h
similarity index 92%
rename from darwin-x86/lib64/clang/14.0.2/include/immintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/immintrin.h
index e5174f8..f4e4cea 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/immintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/immintrin.h
@@ -214,17 +214,13 @@
 #include <avx512pfintrin.h>
 #endif
 
-/*
- * FIXME: _Float16 type is legal only when HW support float16 operation.
- * We use __AVX512FP16__ to identify if float16 is supported or not, so
- * when float16 is not supported, the related header is not included.
- *
- */
-#if defined(__AVX512FP16__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512FP16__)
 #include <avx512fp16intrin.h>
 #endif
 
-#if defined(__AVX512FP16__) && defined(__AVX512VL__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    (defined(__AVX512VL__) && defined(__AVX512FP16__))
 #include <avx512vlfp16intrin.h>
 #endif
 
@@ -276,20 +272,37 @@
 static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
 _rdrand16_step(unsigned short *__p)
 {
-  return __builtin_ia32_rdrand16_step(__p);
+  return (int)__builtin_ia32_rdrand16_step(__p);
 }
 
 static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
 _rdrand32_step(unsigned int *__p)
 {
-  return __builtin_ia32_rdrand32_step(__p);
+  return (int)__builtin_ia32_rdrand32_step(__p);
 }
 
 #ifdef __x86_64__
 static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
 _rdrand64_step(unsigned long long *__p)
 {
-  return __builtin_ia32_rdrand64_step(__p);
+  return (int)__builtin_ia32_rdrand64_step(__p);
+}
+#else
+// We need to emulate the functionality of 64-bit rdrand with 2 32-bit
+// rdrand instructions.
+static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
+_rdrand64_step(unsigned long long *__p)
+{
+  unsigned int __lo, __hi;
+  unsigned int __res_lo = __builtin_ia32_rdrand32_step(&__lo);
+  unsigned int __res_hi = __builtin_ia32_rdrand32_step(&__hi);
+  if (__res_lo && __res_hi) {
+    *__p = ((unsigned long long)__hi << 32) | (unsigned long long)__lo;
+    return 1;
+  } else {
+    *__p = 0;
+    return 0;
+  }
 }
 #endif
 #endif /* __RDRND__ */
@@ -360,50 +373,50 @@
 static __inline__ short __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
 _loadbe_i16(void const * __P) {
   struct __loadu_i16 {
-    short __v;
+    unsigned short __v;
   } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap16(((const struct __loadu_i16*)__P)->__v);
+  return (short)__builtin_bswap16(((const struct __loadu_i16*)__P)->__v);
 }
 
 static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
 _storebe_i16(void * __P, short __D) {
   struct __storeu_i16 {
-    short __v;
+    unsigned short __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i16*)__P)->__v = __builtin_bswap16(__D);
+  ((struct __storeu_i16*)__P)->__v = __builtin_bswap16((unsigned short)__D);
 }
 
 static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
 _loadbe_i32(void const * __P) {
   struct __loadu_i32 {
-    int __v;
+    unsigned int __v;
   } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap32(((const struct __loadu_i32*)__P)->__v);
+  return (int)__builtin_bswap32(((const struct __loadu_i32*)__P)->__v);
 }
 
 static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
 _storebe_i32(void * __P, int __D) {
   struct __storeu_i32 {
-    int __v;
+    unsigned int __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i32*)__P)->__v = __builtin_bswap32(__D);
+  ((struct __storeu_i32*)__P)->__v = __builtin_bswap32((unsigned int)__D);
 }
 
 #ifdef __x86_64__
 static __inline__ long long __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
 _loadbe_i64(void const * __P) {
   struct __loadu_i64 {
-    long long __v;
+    unsigned long long __v;
   } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap64(((const struct __loadu_i64*)__P)->__v);
+  return (long long)__builtin_bswap64(((const struct __loadu_i64*)__P)->__v);
 }
 
 static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
 _storebe_i64(void * __P, long long __D) {
   struct __storeu_i64 {
-    long long __v;
+    unsigned long long __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i64*)__P)->__v = __builtin_bswap64(__D);
+  ((struct __storeu_i64*)__P)->__v = __builtin_bswap64((unsigned long long)__D);
 }
 #endif
 #endif /* __MOVBE */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/intrin.h b/darwin-x86/lib64/clang/16.0.2/include/intrin.h
similarity index 95%
rename from darwin-x86/lib64/clang/14.0.2/include/intrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/intrin.h
index 02e66d0..de68b07 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/intrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/intrin.h
@@ -534,27 +534,6 @@
 |* Misc
 \*----------------------------------------------------------------------------*/
 #if defined(__i386__) || defined(__x86_64__)
-#if defined(__i386__)
-#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx)             \
-  __asm("cpuid"                                                                \
-        : "=a"(__eax), "=b"(__ebx), "=c"(__ecx), "=d"(__edx)                   \
-        : "0"(__leaf), "2"(__count))
-#else
-/* x86-64 uses %rbx as the base register, so preserve it. */
-#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx)             \
-  __asm("xchg{q} {%%rbx, %q1|%q1, rbx}\n"                                      \
-        "cpuid\n"                                                              \
-        "xchg{q} {%%rbx, %q1|%q1, rbx}"                                        \
-        : "=a"(__eax), "=r"(__ebx), "=c"(__ecx), "=d"(__edx)                   \
-        : "0"(__leaf), "2"(__count))
-#endif
-static __inline__ void __DEFAULT_FN_ATTRS __cpuid(int __info[4], int __level) {
-  __cpuid_count(__level, 0, __info[0], __info[1], __info[2], __info[3]);
-}
-static __inline__ void __DEFAULT_FN_ATTRS __cpuidex(int __info[4], int __level,
-                                                    int __ecx) {
-  __cpuid_count(__level, __ecx, __info[0], __info[1], __info[2], __info[3]);
-}
 static __inline__ void __DEFAULT_FN_ATTRS __halt(void) {
   __asm__ volatile("hlt");
 }
@@ -581,6 +560,18 @@
 
 __int64 __mulh(__int64 __a, __int64 __b);
 unsigned __int64 __umulh(unsigned __int64 __a, unsigned __int64 __b);
+
+void __break(int);
+
+void __writex18byte(unsigned long offset, unsigned char data);
+void __writex18word(unsigned long offset, unsigned short data);
+void __writex18dword(unsigned long offset, unsigned long data);
+void __writex18qword(unsigned long offset, unsigned __int64 data);
+
+unsigned char __readx18byte(unsigned long offset);
+unsigned short __readx18word(unsigned long offset);
+unsigned long __readx18dword(unsigned long offset);
+unsigned __int64 __readx18qword(unsigned long offset);
 #endif
 
 /*----------------------------------------------------------------------------*\
diff --git a/darwin-x86/lib64/clang/14.0.2/include/inttypes.h b/darwin-x86/lib64/clang/16.0.2/include/inttypes.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/inttypes.h
rename to darwin-x86/lib64/clang/16.0.2/include/inttypes.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/invpcidintrin.h b/darwin-x86/lib64/clang/16.0.2/include/invpcidintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/invpcidintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/invpcidintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/iso646.h b/darwin-x86/lib64/clang/16.0.2/include/iso646.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/iso646.h
rename to darwin-x86/lib64/clang/16.0.2/include/iso646.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/keylockerintrin.h b/darwin-x86/lib64/clang/16.0.2/include/keylockerintrin.h
similarity index 97%
rename from darwin-x86/lib64/clang/14.0.2/include/keylockerintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/keylockerintrin.h
index ad9428e..1994ac4 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/keylockerintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/keylockerintrin.h
@@ -46,7 +46,7 @@
 ///
 /// This intrinsic corresponds to the <c> LOADIWKEY </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// IF CPL > 0 // LOADKWKEY only allowed at ring 0 (supervisor mode)
 ///   GP (0)
 /// FI
@@ -91,7 +91,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_loadiwkey (unsigned int __ctl, __m128i __intkey,
                __m128i __enkey_lo, __m128i __enkey_hi) {
@@ -106,7 +106,7 @@
 ///
 /// This intrinsic corresponds to the <c> ENCODEKEY128 </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// InputKey[127:0] := __key[127:0]
 /// KeyMetadata[2:0] := __htype[2:0]
 /// KeyMetadata[23:3] := 0 // Reserved for future usage
@@ -126,7 +126,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 _mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) {
   return __builtin_ia32_encodekey128_u32(__htype, (__v2di)__key, __h);
@@ -141,7 +141,7 @@
 ///
 /// This intrinsic corresponds to the <c> ENCODEKEY256 </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// InputKey[127:0] := __key_lo[127:0]
 /// InputKey[255:128] := __key_hi[255:128]
 /// KeyMetadata[2:0] := __htype[2:0]
@@ -163,7 +163,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 _mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi,
                      void *__h) {
@@ -179,7 +179,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESENC128KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
 /// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
 ///                    (Handle[127:0] AND (CPL > 0)) ||
@@ -202,7 +202,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
   return __builtin_ia32_aesenc128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -216,7 +216,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESENC256KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle[511:0] := MEM[__h+511:__h] // Load is not guaranteed to be atomic.
 /// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
 ///                    (Handle[127:0] AND (CPL > 0)) ||
@@ -241,7 +241,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
   return __builtin_ia32_aesenc256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -255,7 +255,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESDEC128KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
 /// IllegalHandle := (HandleReservedBitSet (Handle[383:0]) ||
 ///                  (Handle[127:0] AND (CPL > 0)) ||
@@ -280,7 +280,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
   return __builtin_ia32_aesdec128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -294,7 +294,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESDEC256KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle[511:0] := MEM[__h+511:__h]
 /// IllegalHandle := (HandleReservedBitSet (Handle[511:0]) ||
 ///                   (Handle[127:0] AND (CPL > 0)) ||
@@ -319,7 +319,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
   return __builtin_ia32_aesdec256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -346,7 +346,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESENCWIDE128KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle := MEM[__h+383:__h]
 /// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
 ///                    (Handle[127:0] AND (CPL > 0)) ||
@@ -377,7 +377,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
   return __builtin_ia32_aesencwide128kl_u8((__v2di *)__odata,
@@ -392,7 +392,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESENCWIDE256KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle[511:0] := MEM[__h+511:__h]
 /// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
 ///                    (Handle[127:0] AND (CPL > 0)) ||
@@ -423,7 +423,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
   return __builtin_ia32_aesencwide256kl_u8((__v2di *)__odata,
@@ -438,7 +438,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESDECWIDE128KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle[383:0] := MEM[__h+383:__h]
 /// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
 ///                    (Handle[127:0] AND (CPL > 0)) ||
@@ -469,7 +469,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
   return __builtin_ia32_aesdecwide128kl_u8((__v2di *)__odata,
@@ -484,7 +484,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESDECWIDE256KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle[511:0] := MEM[__h+511:__h]
 /// IllegalHandle = ( HandleReservedBitSet (Handle[511:0]) ||
 ///                   (Handle[127:0] AND (CPL > 0)) ||
@@ -515,7 +515,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
   return __builtin_ia32_aesdecwide256kl_u8((__v2di *)__odata,
diff --git a/darwin-x86/lib64/clang/14.0.2/include/limits.h b/darwin-x86/lib64/clang/16.0.2/include/limits.h
similarity index 76%
rename from darwin-x86/lib64/clang/14.0.2/include/limits.h
rename to darwin-x86/lib64/clang/16.0.2/include/limits.h
index c653580..32cc901 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/limits.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/limits.h
@@ -62,6 +62,26 @@
 
 #define CHAR_BIT  __CHAR_BIT__
 
+/* C2x 5.2.4.2.1 */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+#define BOOL_WIDTH   __BOOL_WIDTH__
+#define CHAR_WIDTH   CHAR_BIT
+#define SCHAR_WIDTH  CHAR_BIT
+#define UCHAR_WIDTH  CHAR_BIT
+#define USHRT_WIDTH  __SHRT_WIDTH__
+#define SHRT_WIDTH   __SHRT_WIDTH__
+#define UINT_WIDTH   __INT_WIDTH__
+#define INT_WIDTH    __INT_WIDTH__
+#define ULONG_WIDTH  __LONG_WIDTH__
+#define LONG_WIDTH   __LONG_WIDTH__
+#define ULLONG_WIDTH __LLONG_WIDTH__
+#define LLONG_WIDTH  __LLONG_WIDTH__
+
+#define BITINT_MAXWIDTH __BITINT_MAXWIDTH__
+#endif
+
 #ifdef __CHAR_UNSIGNED__  /* -funsigned-char */
 #define CHAR_MIN 0
 #define CHAR_MAX UCHAR_MAX
@@ -73,7 +93,8 @@
 /* C99 5.2.4.2.1: Added long long.
    C++11 18.3.3.2: same contents as the Standard C Library header <limits.h>.
  */
-#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) ||              \
+    (defined(__cplusplus) && __cplusplus >= 201103L)
 
 #undef  LLONG_MIN
 #undef  LLONG_MAX
diff --git a/darwin-x86/lib64/clang/14.0.2/include/lwpintrin.h b/darwin-x86/lib64/clang/16.0.2/include/lwpintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/lwpintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/lwpintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/lzcntintrin.h b/darwin-x86/lib64/clang/16.0.2/include/lzcntintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/lzcntintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/lzcntintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/mm3dnow.h b/darwin-x86/lib64/clang/16.0.2/include/mm3dnow.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/mm3dnow.h
rename to darwin-x86/lib64/clang/16.0.2/include/mm3dnow.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/mm_malloc.h b/darwin-x86/lib64/clang/16.0.2/include/mm_malloc.h
similarity index 90%
rename from darwin-x86/lib64/clang/14.0.2/include/mm_malloc.h
rename to darwin-x86/lib64/clang/16.0.2/include/mm_malloc.h
index 933dbaa..d32fe59 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/mm_malloc.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/mm_malloc.h
@@ -28,9 +28,9 @@
 
 #if !(defined(_WIN32) && defined(_mm_malloc))
 static __inline__ void *__attribute__((__always_inline__, __nodebug__,
-                                       __malloc__))
-_mm_malloc(size_t __size, size_t __align)
-{
+                                       __malloc__, __alloc_size__(1),
+                                       __alloc_align__(2)))
+_mm_malloc(size_t __size, size_t __align) {
   if (__align == 1) {
     return malloc(__size);
   }
diff --git a/darwin-x86/lib64/clang/14.0.2/include/mmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/mmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/mmintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/mmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/module.modulemap b/darwin-x86/lib64/clang/16.0.2/include/module.modulemap
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/module.modulemap
rename to darwin-x86/lib64/clang/16.0.2/include/module.modulemap
diff --git a/darwin-x86/lib64/clang/14.0.2/include/movdirintrin.h b/darwin-x86/lib64/clang/16.0.2/include/movdirintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/movdirintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/movdirintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/msa.h b/darwin-x86/lib64/clang/16.0.2/include/msa.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/msa.h
rename to darwin-x86/lib64/clang/16.0.2/include/msa.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/mwaitxintrin.h b/darwin-x86/lib64/clang/16.0.2/include/mwaitxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/mwaitxintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/mwaitxintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/nmmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/nmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/nmmintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/nmmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/opencl-c-base.h b/darwin-x86/lib64/clang/16.0.2/include/opencl-c-base.h
similarity index 96%
rename from darwin-x86/lib64/clang/14.0.2/include/opencl-c-base.h
rename to darwin-x86/lib64/clang/16.0.2/include/opencl-c-base.h
index 9c81ddb..c433b4f 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/opencl-c-base.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/opencl-c-base.h
@@ -21,6 +21,7 @@
 #define cl_khr_subgroup_shuffle 1
 #define cl_khr_subgroup_shuffle_relative 1
 #define cl_khr_subgroup_clustered_reduce 1
+#define cl_khr_subgroup_rotate 1
 #define cl_khr_extended_bit_ops 1
 #define cl_khr_integer_dot_product 1
 #define __opencl_c_integer_dot_product_input_4x8bit 1
@@ -67,10 +68,25 @@
 #if (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
 // For the SPIR and SPIR-V target all features are supported.
 #if defined(__SPIR__) || defined(__SPIRV__)
+#define __opencl_c_work_group_collective_functions 1
+#define __opencl_c_atomic_order_seq_cst 1
+#define __opencl_c_atomic_scope_device 1
 #define __opencl_c_atomic_scope_all_devices 1
+#define __opencl_c_read_write_images 1
 #endif // defined(__SPIR__)
 #endif // (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
 
+#if !defined(__opencl_c_generic_address_space)
+// Internal feature macro to provide named (global, local, private) address
+// space overloads for builtin functions that take a pointer argument.
+#define __opencl_c_named_address_space_builtins 1
+#endif // !defined(__opencl_c_generic_address_space)
+
+#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups)
+// Internal feature macro to provide subgroup builtins.
+#define __opencl_subgroup_builtins 1
+#endif
+
 // built-in scalar data types:
 
 /**
@@ -188,6 +204,9 @@
 typedef double double16 __attribute__((ext_vector_type(16)));
 #endif
 
+// An internal alias for half, for use by OpenCLBuiltins.td.
+#define __half half
+
 #if defined(__OPENCL_CPP_VERSION__)
 #define NULL nullptr
 #elif defined(__OPENCL_C_VERSION__)
@@ -498,12 +517,14 @@
 
 #define MAX_WORK_DIM 3
 
+#ifdef __opencl_c_device_enqueue
 typedef struct {
   unsigned int workDimension;
   size_t globalWorkOffset[MAX_WORK_DIM];
   size_t globalWorkSize[MAX_WORK_DIM];
   size_t localWorkSize[MAX_WORK_DIM];
 } ndrange_t;
+#endif // __opencl_c_device_enqueue
 
 #endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 
@@ -600,9 +621,11 @@
 // C++ for OpenCL - __remove_address_space
 #if defined(__OPENCL_CPP_VERSION__)
 template <typename _Tp> struct __remove_address_space { using type = _Tp; };
+#if defined(__opencl_c_generic_address_space)
 template <typename _Tp> struct __remove_address_space<__generic _Tp> {
   using type = _Tp;
 };
+#endif
 template <typename _Tp> struct __remove_address_space<__global _Tp> {
   using type = _Tp;
 };
diff --git a/darwin-x86/lib64/clang/16.0.2/include/opencl-c.h b/darwin-x86/lib64/clang/16.0.2/include/opencl-c.h
new file mode 100644
index 0000000..2afe023
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/opencl-c.h
@@ -0,0 +1,18338 @@
+//===--- opencl-c.h - OpenCL C language builtin function header -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _OPENCL_H_
+#define _OPENCL_H_
+
+#include "opencl-c-base.h"
+
+#if defined(__opencl_c_images)
+#ifndef cl_khr_depth_images
+#define cl_khr_depth_images
+#endif //cl_khr_depth_images
+#endif //defined(__opencl_c_images)
+
+#if __OPENCL_C_VERSION__ < CL_VERSION_2_0
+#ifdef cl_khr_3d_image_writes
+#pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable
+#endif //cl_khr_3d_image_writes
+#endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0
+
+#if (defined(__OPENCL_CPP_VERSION__) ||                                        \
+     (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) &&                              \
+    (defined(__SPIR__) || defined(__SPIRV__))
+#pragma OPENCL EXTENSION cl_intel_planar_yuv : begin
+#pragma OPENCL EXTENSION cl_intel_planar_yuv : end
+#endif // (defined(__OPENCL_CPP_VERSION__) ||
+       //  (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) &&
+       // (defined(__SPIR__) || defined(__SPIRV__))
+
+#define __ovld __attribute__((overloadable))
+#define __conv __attribute__((convergent))
+
+// Optimizations
+#define __purefn __attribute__((pure))
+#define __cnfn __attribute__((const))
+
+
+// OpenCL v1.1/1.2/2.0 s6.2.3 - Explicit conversions
+
+char __ovld __cnfn convert_char_rte(char);
+char __ovld __cnfn convert_char_sat_rte(char);
+char __ovld __cnfn convert_char_rtz(char);
+char __ovld __cnfn convert_char_sat_rtz(char);
+char __ovld __cnfn convert_char_rtp(char);
+char __ovld __cnfn convert_char_sat_rtp(char);
+char __ovld __cnfn convert_char_rtn(char);
+char __ovld __cnfn convert_char_sat_rtn(char);
+char __ovld __cnfn convert_char(char);
+char __ovld __cnfn convert_char_sat(char);
+char __ovld __cnfn convert_char_rte(uchar);
+char __ovld __cnfn convert_char_sat_rte(uchar);
+char __ovld __cnfn convert_char_rtz(uchar);
+char __ovld __cnfn convert_char_sat_rtz(uchar);
+char __ovld __cnfn convert_char_rtp(uchar);
+char __ovld __cnfn convert_char_sat_rtp(uchar);
+char __ovld __cnfn convert_char_rtn(uchar);
+char __ovld __cnfn convert_char_sat_rtn(uchar);
+char __ovld __cnfn convert_char(uchar);
+char __ovld __cnfn convert_char_sat(uchar);
+char __ovld __cnfn convert_char_rte(short);
+char __ovld __cnfn convert_char_sat_rte(short);
+char __ovld __cnfn convert_char_rtz(short);
+char __ovld __cnfn convert_char_sat_rtz(short);
+char __ovld __cnfn convert_char_rtp(short);
+char __ovld __cnfn convert_char_sat_rtp(short);
+char __ovld __cnfn convert_char_rtn(short);
+char __ovld __cnfn convert_char_sat_rtn(short);
+char __ovld __cnfn convert_char(short);
+char __ovld __cnfn convert_char_sat(short);
+char __ovld __cnfn convert_char_rte(ushort);
+char __ovld __cnfn convert_char_sat_rte(ushort);
+char __ovld __cnfn convert_char_rtz(ushort);
+char __ovld __cnfn convert_char_sat_rtz(ushort);
+char __ovld __cnfn convert_char_rtp(ushort);
+char __ovld __cnfn convert_char_sat_rtp(ushort);
+char __ovld __cnfn convert_char_rtn(ushort);
+char __ovld __cnfn convert_char_sat_rtn(ushort);
+char __ovld __cnfn convert_char(ushort);
+char __ovld __cnfn convert_char_sat(ushort);
+char __ovld __cnfn convert_char_rte(int);
+char __ovld __cnfn convert_char_sat_rte(int);
+char __ovld __cnfn convert_char_rtz(int);
+char __ovld __cnfn convert_char_sat_rtz(int);
+char __ovld __cnfn convert_char_rtp(int);
+char __ovld __cnfn convert_char_sat_rtp(int);
+char __ovld __cnfn convert_char_rtn(int);
+char __ovld __cnfn convert_char_sat_rtn(int);
+char __ovld __cnfn convert_char(int);
+char __ovld __cnfn convert_char_sat(int);
+char __ovld __cnfn convert_char_rte(uint);
+char __ovld __cnfn convert_char_sat_rte(uint);
+char __ovld __cnfn convert_char_rtz(uint);
+char __ovld __cnfn convert_char_sat_rtz(uint);
+char __ovld __cnfn convert_char_rtp(uint);
+char __ovld __cnfn convert_char_sat_rtp(uint);
+char __ovld __cnfn convert_char_rtn(uint);
+char __ovld __cnfn convert_char_sat_rtn(uint);
+char __ovld __cnfn convert_char(uint);
+char __ovld __cnfn convert_char_sat(uint);
+char __ovld __cnfn convert_char_rte(long);
+char __ovld __cnfn convert_char_sat_rte(long);
+char __ovld __cnfn convert_char_rtz(long);
+char __ovld __cnfn convert_char_sat_rtz(long);
+char __ovld __cnfn convert_char_rtp(long);
+char __ovld __cnfn convert_char_sat_rtp(long);
+char __ovld __cnfn convert_char_rtn(long);
+char __ovld __cnfn convert_char_sat_rtn(long);
+char __ovld __cnfn convert_char(long);
+char __ovld __cnfn convert_char_sat(long);
+char __ovld __cnfn convert_char_rte(ulong);
+char __ovld __cnfn convert_char_sat_rte(ulong);
+char __ovld __cnfn convert_char_rtz(ulong);
+char __ovld __cnfn convert_char_sat_rtz(ulong);
+char __ovld __cnfn convert_char_rtp(ulong);
+char __ovld __cnfn convert_char_sat_rtp(ulong);
+char __ovld __cnfn convert_char_rtn(ulong);
+char __ovld __cnfn convert_char_sat_rtn(ulong);
+char __ovld __cnfn convert_char(ulong);
+char __ovld __cnfn convert_char_sat(ulong);
+char __ovld __cnfn convert_char_rte(float);
+char __ovld __cnfn convert_char_sat_rte(float);
+char __ovld __cnfn convert_char_rtz(float);
+char __ovld __cnfn convert_char_sat_rtz(float);
+char __ovld __cnfn convert_char_rtp(float);
+char __ovld __cnfn convert_char_sat_rtp(float);
+char __ovld __cnfn convert_char_rtn(float);
+char __ovld __cnfn convert_char_sat_rtn(float);
+char __ovld __cnfn convert_char(float);
+char __ovld __cnfn convert_char_sat(float);
+uchar __ovld __cnfn convert_uchar_rte(char);
+uchar __ovld __cnfn convert_uchar_sat_rte(char);
+uchar __ovld __cnfn convert_uchar_rtz(char);
+uchar __ovld __cnfn convert_uchar_sat_rtz(char);
+uchar __ovld __cnfn convert_uchar_rtp(char);
+uchar __ovld __cnfn convert_uchar_sat_rtp(char);
+uchar __ovld __cnfn convert_uchar_rtn(char);
+uchar __ovld __cnfn convert_uchar_sat_rtn(char);
+uchar __ovld __cnfn convert_uchar(char);
+uchar __ovld __cnfn convert_uchar_sat(char);
+uchar __ovld __cnfn convert_uchar_rte(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rte(uchar);
+uchar __ovld __cnfn convert_uchar_rtz(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rtz(uchar);
+uchar __ovld __cnfn convert_uchar_rtp(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rtp(uchar);
+uchar __ovld __cnfn convert_uchar_rtn(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rtn(uchar);
+uchar __ovld __cnfn convert_uchar(uchar);
+uchar __ovld __cnfn convert_uchar_sat(uchar);
+uchar __ovld __cnfn convert_uchar_rte(short);
+uchar __ovld __cnfn convert_uchar_sat_rte(short);
+uchar __ovld __cnfn convert_uchar_rtz(short);
+uchar __ovld __cnfn convert_uchar_sat_rtz(short);
+uchar __ovld __cnfn convert_uchar_rtp(short);
+uchar __ovld __cnfn convert_uchar_sat_rtp(short);
+uchar __ovld __cnfn convert_uchar_rtn(short);
+uchar __ovld __cnfn convert_uchar_sat_rtn(short);
+uchar __ovld __cnfn convert_uchar(short);
+uchar __ovld __cnfn convert_uchar_sat(short);
+uchar __ovld __cnfn convert_uchar_rte(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rte(ushort);
+uchar __ovld __cnfn convert_uchar_rtz(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rtz(ushort);
+uchar __ovld __cnfn convert_uchar_rtp(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rtp(ushort);
+uchar __ovld __cnfn convert_uchar_rtn(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rtn(ushort);
+uchar __ovld __cnfn convert_uchar(ushort);
+uchar __ovld __cnfn convert_uchar_sat(ushort);
+uchar __ovld __cnfn convert_uchar_rte(int);
+uchar __ovld __cnfn convert_uchar_sat_rte(int);
+uchar __ovld __cnfn convert_uchar_rtz(int);
+uchar __ovld __cnfn convert_uchar_sat_rtz(int);
+uchar __ovld __cnfn convert_uchar_rtp(int);
+uchar __ovld __cnfn convert_uchar_sat_rtp(int);
+uchar __ovld __cnfn convert_uchar_rtn(int);
+uchar __ovld __cnfn convert_uchar_sat_rtn(int);
+uchar __ovld __cnfn convert_uchar(int);
+uchar __ovld __cnfn convert_uchar_sat(int);
+uchar __ovld __cnfn convert_uchar_rte(uint);
+uchar __ovld __cnfn convert_uchar_sat_rte(uint);
+uchar __ovld __cnfn convert_uchar_rtz(uint);
+uchar __ovld __cnfn convert_uchar_sat_rtz(uint);
+uchar __ovld __cnfn convert_uchar_rtp(uint);
+uchar __ovld __cnfn convert_uchar_sat_rtp(uint);
+uchar __ovld __cnfn convert_uchar_rtn(uint);
+uchar __ovld __cnfn convert_uchar_sat_rtn(uint);
+uchar __ovld __cnfn convert_uchar(uint);
+uchar __ovld __cnfn convert_uchar_sat(uint);
+uchar __ovld __cnfn convert_uchar_rte(long);
+uchar __ovld __cnfn convert_uchar_sat_rte(long);
+uchar __ovld __cnfn convert_uchar_rtz(long);
+uchar __ovld __cnfn convert_uchar_sat_rtz(long);
+uchar __ovld __cnfn convert_uchar_rtp(long);
+uchar __ovld __cnfn convert_uchar_sat_rtp(long);
+uchar __ovld __cnfn convert_uchar_rtn(long);
+uchar __ovld __cnfn convert_uchar_sat_rtn(long);
+uchar __ovld __cnfn convert_uchar(long);
+uchar __ovld __cnfn convert_uchar_sat(long);
+uchar __ovld __cnfn convert_uchar_rte(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rte(ulong);
+uchar __ovld __cnfn convert_uchar_rtz(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rtz(ulong);
+uchar __ovld __cnfn convert_uchar_rtp(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rtp(ulong);
+uchar __ovld __cnfn convert_uchar_rtn(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rtn(ulong);
+uchar __ovld __cnfn convert_uchar(ulong);
+uchar __ovld __cnfn convert_uchar_sat(ulong);
+uchar __ovld __cnfn convert_uchar_rte(float);
+uchar __ovld __cnfn convert_uchar_sat_rte(float);
+uchar __ovld __cnfn convert_uchar_rtz(float);
+uchar __ovld __cnfn convert_uchar_sat_rtz(float);
+uchar __ovld __cnfn convert_uchar_rtp(float);
+uchar __ovld __cnfn convert_uchar_sat_rtp(float);
+uchar __ovld __cnfn convert_uchar_rtn(float);
+uchar __ovld __cnfn convert_uchar_sat_rtn(float);
+uchar __ovld __cnfn convert_uchar(float);
+uchar __ovld __cnfn convert_uchar_sat(float);
+
+short __ovld __cnfn convert_short_rte(char);
+short __ovld __cnfn convert_short_sat_rte(char);
+short __ovld __cnfn convert_short_rtz(char);
+short __ovld __cnfn convert_short_sat_rtz(char);
+short __ovld __cnfn convert_short_rtp(char);
+short __ovld __cnfn convert_short_sat_rtp(char);
+short __ovld __cnfn convert_short_rtn(char);
+short __ovld __cnfn convert_short_sat_rtn(char);
+short __ovld __cnfn convert_short(char);
+short __ovld __cnfn convert_short_sat(char);
+short __ovld __cnfn convert_short_rte(uchar);
+short __ovld __cnfn convert_short_sat_rte(uchar);
+short __ovld __cnfn convert_short_rtz(uchar);
+short __ovld __cnfn convert_short_sat_rtz(uchar);
+short __ovld __cnfn convert_short_rtp(uchar);
+short __ovld __cnfn convert_short_sat_rtp(uchar);
+short __ovld __cnfn convert_short_rtn(uchar);
+short __ovld __cnfn convert_short_sat_rtn(uchar);
+short __ovld __cnfn convert_short(uchar);
+short __ovld __cnfn convert_short_sat(uchar);
+short __ovld __cnfn convert_short_rte(short);
+short __ovld __cnfn convert_short_sat_rte(short);
+short __ovld __cnfn convert_short_rtz(short);
+short __ovld __cnfn convert_short_sat_rtz(short);
+short __ovld __cnfn convert_short_rtp(short);
+short __ovld __cnfn convert_short_sat_rtp(short);
+short __ovld __cnfn convert_short_rtn(short);
+short __ovld __cnfn convert_short_sat_rtn(short);
+short __ovld __cnfn convert_short(short);
+short __ovld __cnfn convert_short_sat(short);
+short __ovld __cnfn convert_short_rte(ushort);
+short __ovld __cnfn convert_short_sat_rte(ushort);
+short __ovld __cnfn convert_short_rtz(ushort);
+short __ovld __cnfn convert_short_sat_rtz(ushort);
+short __ovld __cnfn convert_short_rtp(ushort);
+short __ovld __cnfn convert_short_sat_rtp(ushort);
+short __ovld __cnfn convert_short_rtn(ushort);
+short __ovld __cnfn convert_short_sat_rtn(ushort);
+short __ovld __cnfn convert_short(ushort);
+short __ovld __cnfn convert_short_sat(ushort);
+short __ovld __cnfn convert_short_rte(int);
+short __ovld __cnfn convert_short_sat_rte(int);
+short __ovld __cnfn convert_short_rtz(int);
+short __ovld __cnfn convert_short_sat_rtz(int);
+short __ovld __cnfn convert_short_rtp(int);
+short __ovld __cnfn convert_short_sat_rtp(int);
+short __ovld __cnfn convert_short_rtn(int);
+short __ovld __cnfn convert_short_sat_rtn(int);
+short __ovld __cnfn convert_short(int);
+short __ovld __cnfn convert_short_sat(int);
+short __ovld __cnfn convert_short_rte(uint);
+short __ovld __cnfn convert_short_sat_rte(uint);
+short __ovld __cnfn convert_short_rtz(uint);
+short __ovld __cnfn convert_short_sat_rtz(uint);
+short __ovld __cnfn convert_short_rtp(uint);
+short __ovld __cnfn convert_short_sat_rtp(uint);
+short __ovld __cnfn convert_short_rtn(uint);
+short __ovld __cnfn convert_short_sat_rtn(uint);
+short __ovld __cnfn convert_short(uint);
+short __ovld __cnfn convert_short_sat(uint);
+short __ovld __cnfn convert_short_rte(long);
+short __ovld __cnfn convert_short_sat_rte(long);
+short __ovld __cnfn convert_short_rtz(long);
+short __ovld __cnfn convert_short_sat_rtz(long);
+short __ovld __cnfn convert_short_rtp(long);
+short __ovld __cnfn convert_short_sat_rtp(long);
+short __ovld __cnfn convert_short_rtn(long);
+short __ovld __cnfn convert_short_sat_rtn(long);
+short __ovld __cnfn convert_short(long);
+short __ovld __cnfn convert_short_sat(long);
+short __ovld __cnfn convert_short_rte(ulong);
+short __ovld __cnfn convert_short_sat_rte(ulong);
+short __ovld __cnfn convert_short_rtz(ulong);
+short __ovld __cnfn convert_short_sat_rtz(ulong);
+short __ovld __cnfn convert_short_rtp(ulong);
+short __ovld __cnfn convert_short_sat_rtp(ulong);
+short __ovld __cnfn convert_short_rtn(ulong);
+short __ovld __cnfn convert_short_sat_rtn(ulong);
+short __ovld __cnfn convert_short(ulong);
+short __ovld __cnfn convert_short_sat(ulong);
+short __ovld __cnfn convert_short_rte(float);
+short __ovld __cnfn convert_short_sat_rte(float);
+short __ovld __cnfn convert_short_rtz(float);
+short __ovld __cnfn convert_short_sat_rtz(float);
+short __ovld __cnfn convert_short_rtp(float);
+short __ovld __cnfn convert_short_sat_rtp(float);
+short __ovld __cnfn convert_short_rtn(float);
+short __ovld __cnfn convert_short_sat_rtn(float);
+short __ovld __cnfn convert_short(float);
+short __ovld __cnfn convert_short_sat(float);
+ushort __ovld __cnfn convert_ushort_rte(char);
+ushort __ovld __cnfn convert_ushort_sat_rte(char);
+ushort __ovld __cnfn convert_ushort_rtz(char);
+ushort __ovld __cnfn convert_ushort_sat_rtz(char);
+ushort __ovld __cnfn convert_ushort_rtp(char);
+ushort __ovld __cnfn convert_ushort_sat_rtp(char);
+ushort __ovld __cnfn convert_ushort_rtn(char);
+ushort __ovld __cnfn convert_ushort_sat_rtn(char);
+ushort __ovld __cnfn convert_ushort(char);
+ushort __ovld __cnfn convert_ushort_sat(char);
+ushort __ovld __cnfn convert_ushort_rte(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rte(uchar);
+ushort __ovld __cnfn convert_ushort_rtz(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rtz(uchar);
+ushort __ovld __cnfn convert_ushort_rtp(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rtp(uchar);
+ushort __ovld __cnfn convert_ushort_rtn(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rtn(uchar);
+ushort __ovld __cnfn convert_ushort(uchar);
+ushort __ovld __cnfn convert_ushort_sat(uchar);
+ushort __ovld __cnfn convert_ushort_rte(short);
+ushort __ovld __cnfn convert_ushort_sat_rte(short);
+ushort __ovld __cnfn convert_ushort_rtz(short);
+ushort __ovld __cnfn convert_ushort_sat_rtz(short);
+ushort __ovld __cnfn convert_ushort_rtp(short);
+ushort __ovld __cnfn convert_ushort_sat_rtp(short);
+ushort __ovld __cnfn convert_ushort_rtn(short);
+ushort __ovld __cnfn convert_ushort_sat_rtn(short);
+ushort __ovld __cnfn convert_ushort(short);
+ushort __ovld __cnfn convert_ushort_sat(short);
+ushort __ovld __cnfn convert_ushort_rte(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rte(ushort);
+ushort __ovld __cnfn convert_ushort_rtz(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rtz(ushort);
+ushort __ovld __cnfn convert_ushort_rtp(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rtp(ushort);
+ushort __ovld __cnfn convert_ushort_rtn(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rtn(ushort);
+ushort __ovld __cnfn convert_ushort(ushort);
+ushort __ovld __cnfn convert_ushort_sat(ushort);
+ushort __ovld __cnfn convert_ushort_rte(int);
+ushort __ovld __cnfn convert_ushort_sat_rte(int);
+ushort __ovld __cnfn convert_ushort_rtz(int);
+ushort __ovld __cnfn convert_ushort_sat_rtz(int);
+ushort __ovld __cnfn convert_ushort_rtp(int);
+ushort __ovld __cnfn convert_ushort_sat_rtp(int);
+ushort __ovld __cnfn convert_ushort_rtn(int);
+ushort __ovld __cnfn convert_ushort_sat_rtn(int);
+ushort __ovld __cnfn convert_ushort(int);
+ushort __ovld __cnfn convert_ushort_sat(int);
+ushort __ovld __cnfn convert_ushort_rte(uint);
+ushort __ovld __cnfn convert_ushort_sat_rte(uint);
+ushort __ovld __cnfn convert_ushort_rtz(uint);
+ushort __ovld __cnfn convert_ushort_sat_rtz(uint);
+ushort __ovld __cnfn convert_ushort_rtp(uint);
+ushort __ovld __cnfn convert_ushort_sat_rtp(uint);
+ushort __ovld __cnfn convert_ushort_rtn(uint);
+ushort __ovld __cnfn convert_ushort_sat_rtn(uint);
+ushort __ovld __cnfn convert_ushort(uint);
+ushort __ovld __cnfn convert_ushort_sat(uint);
+ushort __ovld __cnfn convert_ushort_rte(long);
+ushort __ovld __cnfn convert_ushort_sat_rte(long);
+ushort __ovld __cnfn convert_ushort_rtz(long);
+ushort __ovld __cnfn convert_ushort_sat_rtz(long);
+ushort __ovld __cnfn convert_ushort_rtp(long);
+ushort __ovld __cnfn convert_ushort_sat_rtp(long);
+ushort __ovld __cnfn convert_ushort_rtn(long);
+ushort __ovld __cnfn convert_ushort_sat_rtn(long);
+ushort __ovld __cnfn convert_ushort(long);
+ushort __ovld __cnfn convert_ushort_sat(long);
+ushort __ovld __cnfn convert_ushort_rte(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rte(ulong);
+ushort __ovld __cnfn convert_ushort_rtz(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rtz(ulong);
+ushort __ovld __cnfn convert_ushort_rtp(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rtp(ulong);
+ushort __ovld __cnfn convert_ushort_rtn(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rtn(ulong);
+ushort __ovld __cnfn convert_ushort(ulong);
+ushort __ovld __cnfn convert_ushort_sat(ulong);
+ushort __ovld __cnfn convert_ushort_rte(float);
+ushort __ovld __cnfn convert_ushort_sat_rte(float);
+ushort __ovld __cnfn convert_ushort_rtz(float);
+ushort __ovld __cnfn convert_ushort_sat_rtz(float);
+ushort __ovld __cnfn convert_ushort_rtp(float);
+ushort __ovld __cnfn convert_ushort_sat_rtp(float);
+ushort __ovld __cnfn convert_ushort_rtn(float);
+ushort __ovld __cnfn convert_ushort_sat_rtn(float);
+ushort __ovld __cnfn convert_ushort(float);
+ushort __ovld __cnfn convert_ushort_sat(float);
+int __ovld __cnfn convert_int_rte(char);
+int __ovld __cnfn convert_int_sat_rte(char);
+int __ovld __cnfn convert_int_rtz(char);
+int __ovld __cnfn convert_int_sat_rtz(char);
+int __ovld __cnfn convert_int_rtp(char);
+int __ovld __cnfn convert_int_sat_rtp(char);
+int __ovld __cnfn convert_int_rtn(char);
+int __ovld __cnfn convert_int_sat_rtn(char);
+int __ovld __cnfn convert_int(char);
+int __ovld __cnfn convert_int_sat(char);
+int __ovld __cnfn convert_int_rte(uchar);
+int __ovld __cnfn convert_int_sat_rte(uchar);
+int __ovld __cnfn convert_int_rtz(uchar);
+int __ovld __cnfn convert_int_sat_rtz(uchar);
+int __ovld __cnfn convert_int_rtp(uchar);
+int __ovld __cnfn convert_int_sat_rtp(uchar);
+int __ovld __cnfn convert_int_rtn(uchar);
+int __ovld __cnfn convert_int_sat_rtn(uchar);
+int __ovld __cnfn convert_int(uchar);
+int __ovld __cnfn convert_int_sat(uchar);
+int __ovld __cnfn convert_int_rte(short);
+int __ovld __cnfn convert_int_sat_rte(short);
+int __ovld __cnfn convert_int_rtz(short);
+int __ovld __cnfn convert_int_sat_rtz(short);
+int __ovld __cnfn convert_int_rtp(short);
+int __ovld __cnfn convert_int_sat_rtp(short);
+int __ovld __cnfn convert_int_rtn(short);
+int __ovld __cnfn convert_int_sat_rtn(short);
+int __ovld __cnfn convert_int(short);
+int __ovld __cnfn convert_int_sat(short);
+int __ovld __cnfn convert_int_rte(ushort);
+int __ovld __cnfn convert_int_sat_rte(ushort);
+int __ovld __cnfn convert_int_rtz(ushort);
+int __ovld __cnfn convert_int_sat_rtz(ushort);
+int __ovld __cnfn convert_int_rtp(ushort);
+int __ovld __cnfn convert_int_sat_rtp(ushort);
+int __ovld __cnfn convert_int_rtn(ushort);
+int __ovld __cnfn convert_int_sat_rtn(ushort);
+int __ovld __cnfn convert_int(ushort);
+int __ovld __cnfn convert_int_sat(ushort);
+int __ovld __cnfn convert_int_rte(int);
+int __ovld __cnfn convert_int_sat_rte(int);
+int __ovld __cnfn convert_int_rtz(int);
+int __ovld __cnfn convert_int_sat_rtz(int);
+int __ovld __cnfn convert_int_rtp(int);
+int __ovld __cnfn convert_int_sat_rtp(int);
+int __ovld __cnfn convert_int_rtn(int);
+int __ovld __cnfn convert_int_sat_rtn(int);
+int __ovld __cnfn convert_int(int);
+int __ovld __cnfn convert_int_sat(int);
+int __ovld __cnfn convert_int_rte(uint);
+int __ovld __cnfn convert_int_sat_rte(uint);
+int __ovld __cnfn convert_int_rtz(uint);
+int __ovld __cnfn convert_int_sat_rtz(uint);
+int __ovld __cnfn convert_int_rtp(uint);
+int __ovld __cnfn convert_int_sat_rtp(uint);
+int __ovld __cnfn convert_int_rtn(uint);
+int __ovld __cnfn convert_int_sat_rtn(uint);
+int __ovld __cnfn convert_int(uint);
+int __ovld __cnfn convert_int_sat(uint);
+int __ovld __cnfn convert_int_rte(long);
+int __ovld __cnfn convert_int_sat_rte(long);
+int __ovld __cnfn convert_int_rtz(long);
+int __ovld __cnfn convert_int_sat_rtz(long);
+int __ovld __cnfn convert_int_rtp(long);
+int __ovld __cnfn convert_int_sat_rtp(long);
+int __ovld __cnfn convert_int_rtn(long);
+int __ovld __cnfn convert_int_sat_rtn(long);
+int __ovld __cnfn convert_int(long);
+int __ovld __cnfn convert_int_sat(long);
+int __ovld __cnfn convert_int_rte(ulong);
+int __ovld __cnfn convert_int_sat_rte(ulong);
+int __ovld __cnfn convert_int_rtz(ulong);
+int __ovld __cnfn convert_int_sat_rtz(ulong);
+int __ovld __cnfn convert_int_rtp(ulong);
+int __ovld __cnfn convert_int_sat_rtp(ulong);
+int __ovld __cnfn convert_int_rtn(ulong);
+int __ovld __cnfn convert_int_sat_rtn(ulong);
+int __ovld __cnfn convert_int(ulong);
+int __ovld __cnfn convert_int_sat(ulong);
+int __ovld __cnfn convert_int_rte(float);
+int __ovld __cnfn convert_int_sat_rte(float);
+int __ovld __cnfn convert_int_rtz(float);
+int __ovld __cnfn convert_int_sat_rtz(float);
+int __ovld __cnfn convert_int_rtp(float);
+int __ovld __cnfn convert_int_sat_rtp(float);
+int __ovld __cnfn convert_int_rtn(float);
+int __ovld __cnfn convert_int_sat_rtn(float);
+int __ovld __cnfn convert_int(float);
+int __ovld __cnfn convert_int_sat(float);
+uint __ovld __cnfn convert_uint_rte(char);
+uint __ovld __cnfn convert_uint_sat_rte(char);
+uint __ovld __cnfn convert_uint_rtz(char);
+uint __ovld __cnfn convert_uint_sat_rtz(char);
+uint __ovld __cnfn convert_uint_rtp(char);
+uint __ovld __cnfn convert_uint_sat_rtp(char);
+uint __ovld __cnfn convert_uint_rtn(char);
+uint __ovld __cnfn convert_uint_sat_rtn(char);
+uint __ovld __cnfn convert_uint(char);
+uint __ovld __cnfn convert_uint_sat(char);
+uint __ovld __cnfn convert_uint_rte(uchar);
+uint __ovld __cnfn convert_uint_sat_rte(uchar);
+uint __ovld __cnfn convert_uint_rtz(uchar);
+uint __ovld __cnfn convert_uint_sat_rtz(uchar);
+uint __ovld __cnfn convert_uint_rtp(uchar);
+uint __ovld __cnfn convert_uint_sat_rtp(uchar);
+uint __ovld __cnfn convert_uint_rtn(uchar);
+uint __ovld __cnfn convert_uint_sat_rtn(uchar);
+uint __ovld __cnfn convert_uint(uchar);
+uint __ovld __cnfn convert_uint_sat(uchar);
+uint __ovld __cnfn convert_uint_rte(short);
+uint __ovld __cnfn convert_uint_sat_rte(short);
+uint __ovld __cnfn convert_uint_rtz(short);
+uint __ovld __cnfn convert_uint_sat_rtz(short);
+uint __ovld __cnfn convert_uint_rtp(short);
+uint __ovld __cnfn convert_uint_sat_rtp(short);
+uint __ovld __cnfn convert_uint_rtn(short);
+uint __ovld __cnfn convert_uint_sat_rtn(short);
+uint __ovld __cnfn convert_uint(short);
+uint __ovld __cnfn convert_uint_sat(short);
+uint __ovld __cnfn convert_uint_rte(ushort);
+uint __ovld __cnfn convert_uint_sat_rte(ushort);
+uint __ovld __cnfn convert_uint_rtz(ushort);
+uint __ovld __cnfn convert_uint_sat_rtz(ushort);
+uint __ovld __cnfn convert_uint_rtp(ushort);
+uint __ovld __cnfn convert_uint_sat_rtp(ushort);
+uint __ovld __cnfn convert_uint_rtn(ushort);
+uint __ovld __cnfn convert_uint_sat_rtn(ushort);
+uint __ovld __cnfn convert_uint(ushort);
+uint __ovld __cnfn convert_uint_sat(ushort);
+uint __ovld __cnfn convert_uint_rte(int);
+uint __ovld __cnfn convert_uint_sat_rte(int);
+uint __ovld __cnfn convert_uint_rtz(int);
+uint __ovld __cnfn convert_uint_sat_rtz(int);
+uint __ovld __cnfn convert_uint_rtp(int);
+uint __ovld __cnfn convert_uint_sat_rtp(int);
+uint __ovld __cnfn convert_uint_rtn(int);
+uint __ovld __cnfn convert_uint_sat_rtn(int);
+uint __ovld __cnfn convert_uint(int);
+uint __ovld __cnfn convert_uint_sat(int);
+uint __ovld __cnfn convert_uint_rte(uint);
+uint __ovld __cnfn convert_uint_sat_rte(uint);
+uint __ovld __cnfn convert_uint_rtz(uint);
+uint __ovld __cnfn convert_uint_sat_rtz(uint);
+uint __ovld __cnfn convert_uint_rtp(uint);
+uint __ovld __cnfn convert_uint_sat_rtp(uint);
+uint __ovld __cnfn convert_uint_rtn(uint);
+uint __ovld __cnfn convert_uint_sat_rtn(uint);
+uint __ovld __cnfn convert_uint(uint);
+uint __ovld __cnfn convert_uint_sat(uint);
+uint __ovld __cnfn convert_uint_rte(long);
+uint __ovld __cnfn convert_uint_sat_rte(long);
+uint __ovld __cnfn convert_uint_rtz(long);
+uint __ovld __cnfn convert_uint_sat_rtz(long);
+uint __ovld __cnfn convert_uint_rtp(long);
+uint __ovld __cnfn convert_uint_sat_rtp(long);
+uint __ovld __cnfn convert_uint_rtn(long);
+uint __ovld __cnfn convert_uint_sat_rtn(long);
+uint __ovld __cnfn convert_uint(long);
+uint __ovld __cnfn convert_uint_sat(long);
+uint __ovld __cnfn convert_uint_rte(ulong);
+uint __ovld __cnfn convert_uint_sat_rte(ulong);
+uint __ovld __cnfn convert_uint_rtz(ulong);
+uint __ovld __cnfn convert_uint_sat_rtz(ulong);
+uint __ovld __cnfn convert_uint_rtp(ulong);
+uint __ovld __cnfn convert_uint_sat_rtp(ulong);
+uint __ovld __cnfn convert_uint_rtn(ulong);
+uint __ovld __cnfn convert_uint_sat_rtn(ulong);
+uint __ovld __cnfn convert_uint(ulong);
+uint __ovld __cnfn convert_uint_sat(ulong);
+uint __ovld __cnfn convert_uint_rte(float);
+uint __ovld __cnfn convert_uint_sat_rte(float);
+uint __ovld __cnfn convert_uint_rtz(float);
+uint __ovld __cnfn convert_uint_sat_rtz(float);
+uint __ovld __cnfn convert_uint_rtp(float);
+uint __ovld __cnfn convert_uint_sat_rtp(float);
+uint __ovld __cnfn convert_uint_rtn(float);
+uint __ovld __cnfn convert_uint_sat_rtn(float);
+uint __ovld __cnfn convert_uint(float);
+uint __ovld __cnfn convert_uint_sat(float);
+long __ovld __cnfn convert_long_rte(char);
+long __ovld __cnfn convert_long_sat_rte(char);
+long __ovld __cnfn convert_long_rtz(char);
+long __ovld __cnfn convert_long_sat_rtz(char);
+long __ovld __cnfn convert_long_rtp(char);
+long __ovld __cnfn convert_long_sat_rtp(char);
+long __ovld __cnfn convert_long_rtn(char);
+long __ovld __cnfn convert_long_sat_rtn(char);
+long __ovld __cnfn convert_long(char);
+long __ovld __cnfn convert_long_sat(char);
+long __ovld __cnfn convert_long_rte(uchar);
+long __ovld __cnfn convert_long_sat_rte(uchar);
+long __ovld __cnfn convert_long_rtz(uchar);
+long __ovld __cnfn convert_long_sat_rtz(uchar);
+long __ovld __cnfn convert_long_rtp(uchar);
+long __ovld __cnfn convert_long_sat_rtp(uchar);
+long __ovld __cnfn convert_long_rtn(uchar);
+long __ovld __cnfn convert_long_sat_rtn(uchar);
+long __ovld __cnfn convert_long(uchar);
+long __ovld __cnfn convert_long_sat(uchar);
+long __ovld __cnfn convert_long_rte(short);
+long __ovld __cnfn convert_long_sat_rte(short);
+long __ovld __cnfn convert_long_rtz(short);
+long __ovld __cnfn convert_long_sat_rtz(short);
+long __ovld __cnfn convert_long_rtp(short);
+long __ovld __cnfn convert_long_sat_rtp(short);
+long __ovld __cnfn convert_long_rtn(short);
+long __ovld __cnfn convert_long_sat_rtn(short);
+long __ovld __cnfn convert_long(short);
+long __ovld __cnfn convert_long_sat(short);
+long __ovld __cnfn convert_long_rte(ushort);
+long __ovld __cnfn convert_long_sat_rte(ushort);
+long __ovld __cnfn convert_long_rtz(ushort);
+long __ovld __cnfn convert_long_sat_rtz(ushort);
+long __ovld __cnfn convert_long_rtp(ushort);
+long __ovld __cnfn convert_long_sat_rtp(ushort);
+long __ovld __cnfn convert_long_rtn(ushort);
+long __ovld __cnfn convert_long_sat_rtn(ushort);
+long __ovld __cnfn convert_long(ushort);
+long __ovld __cnfn convert_long_sat(ushort);
+long __ovld __cnfn convert_long_rte(int);
+long __ovld __cnfn convert_long_sat_rte(int);
+long __ovld __cnfn convert_long_rtz(int);
+long __ovld __cnfn convert_long_sat_rtz(int);
+long __ovld __cnfn convert_long_rtp(int);
+long __ovld __cnfn convert_long_sat_rtp(int);
+long __ovld __cnfn convert_long_rtn(int);
+long __ovld __cnfn convert_long_sat_rtn(int);
+long __ovld __cnfn convert_long(int);
+long __ovld __cnfn convert_long_sat(int);
+long __ovld __cnfn convert_long_rte(uint);
+long __ovld __cnfn convert_long_sat_rte(uint);
+long __ovld __cnfn convert_long_rtz(uint);
+long __ovld __cnfn convert_long_sat_rtz(uint);
+long __ovld __cnfn convert_long_rtp(uint);
+long __ovld __cnfn convert_long_sat_rtp(uint);
+long __ovld __cnfn convert_long_rtn(uint);
+long __ovld __cnfn convert_long_sat_rtn(uint);
+long __ovld __cnfn convert_long(uint);
+long __ovld __cnfn convert_long_sat(uint);
+long __ovld __cnfn convert_long_rte(long);
+long __ovld __cnfn convert_long_sat_rte(long);
+long __ovld __cnfn convert_long_rtz(long);
+long __ovld __cnfn convert_long_sat_rtz(long);
+long __ovld __cnfn convert_long_rtp(long);
+long __ovld __cnfn convert_long_sat_rtp(long);
+long __ovld __cnfn convert_long_rtn(long);
+long __ovld __cnfn convert_long_sat_rtn(long);
+long __ovld __cnfn convert_long(long);
+long __ovld __cnfn convert_long_sat(long);
+long __ovld __cnfn convert_long_rte(ulong);
+long __ovld __cnfn convert_long_sat_rte(ulong);
+long __ovld __cnfn convert_long_rtz(ulong);
+long __ovld __cnfn convert_long_sat_rtz(ulong);
+long __ovld __cnfn convert_long_rtp(ulong);
+long __ovld __cnfn convert_long_sat_rtp(ulong);
+long __ovld __cnfn convert_long_rtn(ulong);
+long __ovld __cnfn convert_long_sat_rtn(ulong);
+long __ovld __cnfn convert_long(ulong);
+long __ovld __cnfn convert_long_sat(ulong);
+long __ovld __cnfn convert_long_rte(float);
+long __ovld __cnfn convert_long_sat_rte(float);
+long __ovld __cnfn convert_long_rtz(float);
+long __ovld __cnfn convert_long_sat_rtz(float);
+long __ovld __cnfn convert_long_rtp(float);
+long __ovld __cnfn convert_long_sat_rtp(float);
+long __ovld __cnfn convert_long_rtn(float);
+long __ovld __cnfn convert_long_sat_rtn(float);
+long __ovld __cnfn convert_long(float);
+long __ovld __cnfn convert_long_sat(float);
+ulong __ovld __cnfn convert_ulong_rte(char);
+ulong __ovld __cnfn convert_ulong_sat_rte(char);
+ulong __ovld __cnfn convert_ulong_rtz(char);
+ulong __ovld __cnfn convert_ulong_sat_rtz(char);
+ulong __ovld __cnfn convert_ulong_rtp(char);
+ulong __ovld __cnfn convert_ulong_sat_rtp(char);
+ulong __ovld __cnfn convert_ulong_rtn(char);
+ulong __ovld __cnfn convert_ulong_sat_rtn(char);
+ulong __ovld __cnfn convert_ulong(char);
+ulong __ovld __cnfn convert_ulong_sat(char);
+ulong __ovld __cnfn convert_ulong_rte(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rte(uchar);
+ulong __ovld __cnfn convert_ulong_rtz(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rtz(uchar);
+ulong __ovld __cnfn convert_ulong_rtp(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rtp(uchar);
+ulong __ovld __cnfn convert_ulong_rtn(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rtn(uchar);
+ulong __ovld __cnfn convert_ulong(uchar);
+ulong __ovld __cnfn convert_ulong_sat(uchar);
+ulong __ovld __cnfn convert_ulong_rte(short);
+ulong __ovld __cnfn convert_ulong_sat_rte(short);
+ulong __ovld __cnfn convert_ulong_rtz(short);
+ulong __ovld __cnfn convert_ulong_sat_rtz(short);
+ulong __ovld __cnfn convert_ulong_rtp(short);
+ulong __ovld __cnfn convert_ulong_sat_rtp(short);
+ulong __ovld __cnfn convert_ulong_rtn(short);
+ulong __ovld __cnfn convert_ulong_sat_rtn(short);
+ulong __ovld __cnfn convert_ulong(short);
+ulong __ovld __cnfn convert_ulong_sat(short);
+ulong __ovld __cnfn convert_ulong_rte(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rte(ushort);
+ulong __ovld __cnfn convert_ulong_rtz(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rtz(ushort);
+ulong __ovld __cnfn convert_ulong_rtp(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rtp(ushort);
+ulong __ovld __cnfn convert_ulong_rtn(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rtn(ushort);
+ulong __ovld __cnfn convert_ulong(ushort);
+ulong __ovld __cnfn convert_ulong_sat(ushort);
+ulong __ovld __cnfn convert_ulong_rte(int);
+ulong __ovld __cnfn convert_ulong_sat_rte(int);
+ulong __ovld __cnfn convert_ulong_rtz(int);
+ulong __ovld __cnfn convert_ulong_sat_rtz(int);
+ulong __ovld __cnfn convert_ulong_rtp(int);
+ulong __ovld __cnfn convert_ulong_sat_rtp(int);
+ulong __ovld __cnfn convert_ulong_rtn(int);
+ulong __ovld __cnfn convert_ulong_sat_rtn(int);
+ulong __ovld __cnfn convert_ulong(int);
+ulong __ovld __cnfn convert_ulong_sat(int);
+ulong __ovld __cnfn convert_ulong_rte(uint);
+ulong __ovld __cnfn convert_ulong_sat_rte(uint);
+ulong __ovld __cnfn convert_ulong_rtz(uint);
+ulong __ovld __cnfn convert_ulong_sat_rtz(uint);
+ulong __ovld __cnfn convert_ulong_rtp(uint);
+ulong __ovld __cnfn convert_ulong_sat_rtp(uint);
+ulong __ovld __cnfn convert_ulong_rtn(uint);
+ulong __ovld __cnfn convert_ulong_sat_rtn(uint);
+ulong __ovld __cnfn convert_ulong(uint);
+ulong __ovld __cnfn convert_ulong_sat(uint);
+ulong __ovld __cnfn convert_ulong_rte(long);
+ulong __ovld __cnfn convert_ulong_sat_rte(long);
+ulong __ovld __cnfn convert_ulong_rtz(long);
+ulong __ovld __cnfn convert_ulong_sat_rtz(long);
+ulong __ovld __cnfn convert_ulong_rtp(long);
+ulong __ovld __cnfn convert_ulong_sat_rtp(long);
+ulong __ovld __cnfn convert_ulong_rtn(long);
+ulong __ovld __cnfn convert_ulong_sat_rtn(long);
+ulong __ovld __cnfn convert_ulong(long);
+ulong __ovld __cnfn convert_ulong_sat(long);
+ulong __ovld __cnfn convert_ulong_rte(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rte(ulong);
+ulong __ovld __cnfn convert_ulong_rtz(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rtz(ulong);
+ulong __ovld __cnfn convert_ulong_rtp(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rtp(ulong);
+ulong __ovld __cnfn convert_ulong_rtn(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rtn(ulong);
+ulong __ovld __cnfn convert_ulong(ulong);
+ulong __ovld __cnfn convert_ulong_sat(ulong);
+ulong __ovld __cnfn convert_ulong_rte(float);
+ulong __ovld __cnfn convert_ulong_sat_rte(float);
+ulong __ovld __cnfn convert_ulong_rtz(float);
+ulong __ovld __cnfn convert_ulong_sat_rtz(float);
+ulong __ovld __cnfn convert_ulong_rtp(float);
+ulong __ovld __cnfn convert_ulong_sat_rtp(float);
+ulong __ovld __cnfn convert_ulong_rtn(float);
+ulong __ovld __cnfn convert_ulong_sat_rtn(float);
+ulong __ovld __cnfn convert_ulong(float);
+ulong __ovld __cnfn convert_ulong_sat(float);
+float __ovld __cnfn convert_float_rte(char);
+float __ovld __cnfn convert_float_rtz(char);
+float __ovld __cnfn convert_float_rtp(char);
+float __ovld __cnfn convert_float_rtn(char);
+float __ovld __cnfn convert_float(char);
+float __ovld __cnfn convert_float_rte(uchar);
+float __ovld __cnfn convert_float_rtz(uchar);
+float __ovld __cnfn convert_float_rtp(uchar);
+float __ovld __cnfn convert_float_rtn(uchar);
+float __ovld __cnfn convert_float(uchar);
+float __ovld __cnfn convert_float_rte(short);
+float __ovld __cnfn convert_float_rtz(short);
+float __ovld __cnfn convert_float_rtp(short);
+float __ovld __cnfn convert_float_rtn(short);
+float __ovld __cnfn convert_float(short);
+float __ovld __cnfn convert_float_rte(ushort);
+float __ovld __cnfn convert_float_rtz(ushort);
+float __ovld __cnfn convert_float_rtp(ushort);
+float __ovld __cnfn convert_float_rtn(ushort);
+float __ovld __cnfn convert_float(ushort);
+float __ovld __cnfn convert_float_rte(int);
+float __ovld __cnfn convert_float_rtz(int);
+float __ovld __cnfn convert_float_rtp(int);
+float __ovld __cnfn convert_float_rtn(int);
+float __ovld __cnfn convert_float(int);
+float __ovld __cnfn convert_float_rte(uint);
+float __ovld __cnfn convert_float_rtz(uint);
+float __ovld __cnfn convert_float_rtp(uint);
+float __ovld __cnfn convert_float_rtn(uint);
+float __ovld __cnfn convert_float(uint);
+float __ovld __cnfn convert_float_rte(long);
+float __ovld __cnfn convert_float_rtz(long);
+float __ovld __cnfn convert_float_rtp(long);
+float __ovld __cnfn convert_float_rtn(long);
+float __ovld __cnfn convert_float(long);
+float __ovld __cnfn convert_float_rte(ulong);
+float __ovld __cnfn convert_float_rtz(ulong);
+float __ovld __cnfn convert_float_rtp(ulong);
+float __ovld __cnfn convert_float_rtn(ulong);
+float __ovld __cnfn convert_float(ulong);
+float __ovld __cnfn convert_float_rte(float);
+float __ovld __cnfn convert_float_rtz(float);
+float __ovld __cnfn convert_float_rtp(float);
+float __ovld __cnfn convert_float_rtn(float);
+float __ovld __cnfn convert_float(float);
+char2 __ovld __cnfn convert_char2_rte(char2);
+char2 __ovld __cnfn convert_char2_sat_rte(char2);
+char2 __ovld __cnfn convert_char2_rtz(char2);
+char2 __ovld __cnfn convert_char2_sat_rtz(char2);
+char2 __ovld __cnfn convert_char2_rtp(char2);
+char2 __ovld __cnfn convert_char2_sat_rtp(char2);
+char2 __ovld __cnfn convert_char2_rtn(char2);
+char2 __ovld __cnfn convert_char2_sat_rtn(char2);
+char2 __ovld __cnfn convert_char2(char2);
+char2 __ovld __cnfn convert_char2_sat(char2);
+char2 __ovld __cnfn convert_char2_rte(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rte(uchar2);
+char2 __ovld __cnfn convert_char2_rtz(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rtz(uchar2);
+char2 __ovld __cnfn convert_char2_rtp(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rtp(uchar2);
+char2 __ovld __cnfn convert_char2_rtn(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rtn(uchar2);
+char2 __ovld __cnfn convert_char2(uchar2);
+char2 __ovld __cnfn convert_char2_sat(uchar2);
+char2 __ovld __cnfn convert_char2_rte(short2);
+char2 __ovld __cnfn convert_char2_sat_rte(short2);
+char2 __ovld __cnfn convert_char2_rtz(short2);
+char2 __ovld __cnfn convert_char2_sat_rtz(short2);
+char2 __ovld __cnfn convert_char2_rtp(short2);
+char2 __ovld __cnfn convert_char2_sat_rtp(short2);
+char2 __ovld __cnfn convert_char2_rtn(short2);
+char2 __ovld __cnfn convert_char2_sat_rtn(short2);
+char2 __ovld __cnfn convert_char2(short2);
+char2 __ovld __cnfn convert_char2_sat(short2);
+char2 __ovld __cnfn convert_char2_rte(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rte(ushort2);
+char2 __ovld __cnfn convert_char2_rtz(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rtz(ushort2);
+char2 __ovld __cnfn convert_char2_rtp(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rtp(ushort2);
+char2 __ovld __cnfn convert_char2_rtn(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rtn(ushort2);
+char2 __ovld __cnfn convert_char2(ushort2);
+char2 __ovld __cnfn convert_char2_sat(ushort2);
+char2 __ovld __cnfn convert_char2_rte(int2);
+char2 __ovld __cnfn convert_char2_sat_rte(int2);
+char2 __ovld __cnfn convert_char2_rtz(int2);
+char2 __ovld __cnfn convert_char2_sat_rtz(int2);
+char2 __ovld __cnfn convert_char2_rtp(int2);
+char2 __ovld __cnfn convert_char2_sat_rtp(int2);
+char2 __ovld __cnfn convert_char2_rtn(int2);
+char2 __ovld __cnfn convert_char2_sat_rtn(int2);
+char2 __ovld __cnfn convert_char2(int2);
+char2 __ovld __cnfn convert_char2_sat(int2);
+char2 __ovld __cnfn convert_char2_rte(uint2);
+char2 __ovld __cnfn convert_char2_sat_rte(uint2);
+char2 __ovld __cnfn convert_char2_rtz(uint2);
+char2 __ovld __cnfn convert_char2_sat_rtz(uint2);
+char2 __ovld __cnfn convert_char2_rtp(uint2);
+char2 __ovld __cnfn convert_char2_sat_rtp(uint2);
+char2 __ovld __cnfn convert_char2_rtn(uint2);
+char2 __ovld __cnfn convert_char2_sat_rtn(uint2);
+char2 __ovld __cnfn convert_char2(uint2);
+char2 __ovld __cnfn convert_char2_sat(uint2);
+char2 __ovld __cnfn convert_char2_rte(long2);
+char2 __ovld __cnfn convert_char2_sat_rte(long2);
+char2 __ovld __cnfn convert_char2_rtz(long2);
+char2 __ovld __cnfn convert_char2_sat_rtz(long2);
+char2 __ovld __cnfn convert_char2_rtp(long2);
+char2 __ovld __cnfn convert_char2_sat_rtp(long2);
+char2 __ovld __cnfn convert_char2_rtn(long2);
+char2 __ovld __cnfn convert_char2_sat_rtn(long2);
+char2 __ovld __cnfn convert_char2(long2);
+char2 __ovld __cnfn convert_char2_sat(long2);
+char2 __ovld __cnfn convert_char2_rte(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rte(ulong2);
+char2 __ovld __cnfn convert_char2_rtz(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rtz(ulong2);
+char2 __ovld __cnfn convert_char2_rtp(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rtp(ulong2);
+char2 __ovld __cnfn convert_char2_rtn(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rtn(ulong2);
+char2 __ovld __cnfn convert_char2(ulong2);
+char2 __ovld __cnfn convert_char2_sat(ulong2);
+char2 __ovld __cnfn convert_char2_rte(float2);
+char2 __ovld __cnfn convert_char2_sat_rte(float2);
+char2 __ovld __cnfn convert_char2_rtz(float2);
+char2 __ovld __cnfn convert_char2_sat_rtz(float2);
+char2 __ovld __cnfn convert_char2_rtp(float2);
+char2 __ovld __cnfn convert_char2_sat_rtp(float2);
+char2 __ovld __cnfn convert_char2_rtn(float2);
+char2 __ovld __cnfn convert_char2_sat_rtn(float2);
+char2 __ovld __cnfn convert_char2(float2);
+char2 __ovld __cnfn convert_char2_sat(float2);
+uchar2 __ovld __cnfn convert_uchar2_rte(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(char2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(char2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(char2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(char2);
+uchar2 __ovld __cnfn convert_uchar2(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat(char2);
+uchar2 __ovld __cnfn convert_uchar2_rte(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uchar2);
+uchar2 __ovld __cnfn convert_uchar2(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rte(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(short2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(short2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(short2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(short2);
+uchar2 __ovld __cnfn convert_uchar2(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat(short2);
+uchar2 __ovld __cnfn convert_uchar2_rte(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ushort2);
+uchar2 __ovld __cnfn convert_uchar2(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rte(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(int2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(int2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(int2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(int2);
+uchar2 __ovld __cnfn convert_uchar2(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat(int2);
+uchar2 __ovld __cnfn convert_uchar2_rte(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uint2);
+uchar2 __ovld __cnfn convert_uchar2(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rte(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(long2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(long2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(long2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(long2);
+uchar2 __ovld __cnfn convert_uchar2(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat(long2);
+uchar2 __ovld __cnfn convert_uchar2_rte(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ulong2);
+uchar2 __ovld __cnfn convert_uchar2(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rte(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(float2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(float2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(float2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(float2);
+uchar2 __ovld __cnfn convert_uchar2(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat(float2);
+short2 __ovld __cnfn convert_short2_rte(char2);
+short2 __ovld __cnfn convert_short2_sat_rte(char2);
+short2 __ovld __cnfn convert_short2_rtz(char2);
+short2 __ovld __cnfn convert_short2_sat_rtz(char2);
+short2 __ovld __cnfn convert_short2_rtp(char2);
+short2 __ovld __cnfn convert_short2_sat_rtp(char2);
+short2 __ovld __cnfn convert_short2_rtn(char2);
+short2 __ovld __cnfn convert_short2_sat_rtn(char2);
+short2 __ovld __cnfn convert_short2(char2);
+short2 __ovld __cnfn convert_short2_sat(char2);
+short2 __ovld __cnfn convert_short2_rte(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rte(uchar2);
+short2 __ovld __cnfn convert_short2_rtz(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rtz(uchar2);
+short2 __ovld __cnfn convert_short2_rtp(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rtp(uchar2);
+short2 __ovld __cnfn convert_short2_rtn(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rtn(uchar2);
+short2 __ovld __cnfn convert_short2(uchar2);
+short2 __ovld __cnfn convert_short2_sat(uchar2);
+short2 __ovld __cnfn convert_short2_rte(short2);
+short2 __ovld __cnfn convert_short2_sat_rte(short2);
+short2 __ovld __cnfn convert_short2_rtz(short2);
+short2 __ovld __cnfn convert_short2_sat_rtz(short2);
+short2 __ovld __cnfn convert_short2_rtp(short2);
+short2 __ovld __cnfn convert_short2_sat_rtp(short2);
+short2 __ovld __cnfn convert_short2_rtn(short2);
+short2 __ovld __cnfn convert_short2_sat_rtn(short2);
+short2 __ovld __cnfn convert_short2(short2);
+short2 __ovld __cnfn convert_short2_sat(short2);
+short2 __ovld __cnfn convert_short2_rte(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rte(ushort2);
+short2 __ovld __cnfn convert_short2_rtz(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rtz(ushort2);
+short2 __ovld __cnfn convert_short2_rtp(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rtp(ushort2);
+short2 __ovld __cnfn convert_short2_rtn(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rtn(ushort2);
+short2 __ovld __cnfn convert_short2(ushort2);
+short2 __ovld __cnfn convert_short2_sat(ushort2);
+short2 __ovld __cnfn convert_short2_rte(int2);
+short2 __ovld __cnfn convert_short2_sat_rte(int2);
+short2 __ovld __cnfn convert_short2_rtz(int2);
+short2 __ovld __cnfn convert_short2_sat_rtz(int2);
+short2 __ovld __cnfn convert_short2_rtp(int2);
+short2 __ovld __cnfn convert_short2_sat_rtp(int2);
+short2 __ovld __cnfn convert_short2_rtn(int2);
+short2 __ovld __cnfn convert_short2_sat_rtn(int2);
+short2 __ovld __cnfn convert_short2(int2);
+short2 __ovld __cnfn convert_short2_sat(int2);
+short2 __ovld __cnfn convert_short2_rte(uint2);
+short2 __ovld __cnfn convert_short2_sat_rte(uint2);
+short2 __ovld __cnfn convert_short2_rtz(uint2);
+short2 __ovld __cnfn convert_short2_sat_rtz(uint2);
+short2 __ovld __cnfn convert_short2_rtp(uint2);
+short2 __ovld __cnfn convert_short2_sat_rtp(uint2);
+short2 __ovld __cnfn convert_short2_rtn(uint2);
+short2 __ovld __cnfn convert_short2_sat_rtn(uint2);
+short2 __ovld __cnfn convert_short2(uint2);
+short2 __ovld __cnfn convert_short2_sat(uint2);
+short2 __ovld __cnfn convert_short2_rte(long2);
+short2 __ovld __cnfn convert_short2_sat_rte(long2);
+short2 __ovld __cnfn convert_short2_rtz(long2);
+short2 __ovld __cnfn convert_short2_sat_rtz(long2);
+short2 __ovld __cnfn convert_short2_rtp(long2);
+short2 __ovld __cnfn convert_short2_sat_rtp(long2);
+short2 __ovld __cnfn convert_short2_rtn(long2);
+short2 __ovld __cnfn convert_short2_sat_rtn(long2);
+short2 __ovld __cnfn convert_short2(long2);
+short2 __ovld __cnfn convert_short2_sat(long2);
+short2 __ovld __cnfn convert_short2_rte(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rte(ulong2);
+short2 __ovld __cnfn convert_short2_rtz(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rtz(ulong2);
+short2 __ovld __cnfn convert_short2_rtp(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rtp(ulong2);
+short2 __ovld __cnfn convert_short2_rtn(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rtn(ulong2);
+short2 __ovld __cnfn convert_short2(ulong2);
+short2 __ovld __cnfn convert_short2_sat(ulong2);
+short2 __ovld __cnfn convert_short2_rte(float2);
+short2 __ovld __cnfn convert_short2_sat_rte(float2);
+short2 __ovld __cnfn convert_short2_rtz(float2);
+short2 __ovld __cnfn convert_short2_sat_rtz(float2);
+short2 __ovld __cnfn convert_short2_rtp(float2);
+short2 __ovld __cnfn convert_short2_sat_rtp(float2);
+short2 __ovld __cnfn convert_short2_rtn(float2);
+short2 __ovld __cnfn convert_short2_sat_rtn(float2);
+short2 __ovld __cnfn convert_short2(float2);
+short2 __ovld __cnfn convert_short2_sat(float2);
+ushort2 __ovld __cnfn convert_ushort2_rte(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(char2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(char2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(char2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(char2);
+ushort2 __ovld __cnfn convert_ushort2(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat(char2);
+ushort2 __ovld __cnfn convert_ushort2_rte(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uchar2);
+ushort2 __ovld __cnfn convert_ushort2(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rte(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(short2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(short2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(short2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(short2);
+ushort2 __ovld __cnfn convert_ushort2(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat(short2);
+ushort2 __ovld __cnfn convert_ushort2_rte(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ushort2);
+ushort2 __ovld __cnfn convert_ushort2(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rte(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(int2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(int2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(int2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(int2);
+ushort2 __ovld __cnfn convert_ushort2(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat(int2);
+ushort2 __ovld __cnfn convert_ushort2_rte(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uint2);
+ushort2 __ovld __cnfn convert_ushort2(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rte(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(long2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(long2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(long2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(long2);
+ushort2 __ovld __cnfn convert_ushort2(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat(long2);
+ushort2 __ovld __cnfn convert_ushort2_rte(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ulong2);
+ushort2 __ovld __cnfn convert_ushort2(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rte(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(float2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(float2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(float2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(float2);
+ushort2 __ovld __cnfn convert_ushort2(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat(float2);
+int2 __ovld __cnfn convert_int2_rte(char2);
+int2 __ovld __cnfn convert_int2_sat_rte(char2);
+int2 __ovld __cnfn convert_int2_rtz(char2);
+int2 __ovld __cnfn convert_int2_sat_rtz(char2);
+int2 __ovld __cnfn convert_int2_rtp(char2);
+int2 __ovld __cnfn convert_int2_sat_rtp(char2);
+int2 __ovld __cnfn convert_int2_rtn(char2);
+int2 __ovld __cnfn convert_int2_sat_rtn(char2);
+int2 __ovld __cnfn convert_int2(char2);
+int2 __ovld __cnfn convert_int2_sat(char2);
+int2 __ovld __cnfn convert_int2_rte(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rte(uchar2);
+int2 __ovld __cnfn convert_int2_rtz(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rtz(uchar2);
+int2 __ovld __cnfn convert_int2_rtp(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rtp(uchar2);
+int2 __ovld __cnfn convert_int2_rtn(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rtn(uchar2);
+int2 __ovld __cnfn convert_int2(uchar2);
+int2 __ovld __cnfn convert_int2_sat(uchar2);
+int2 __ovld __cnfn convert_int2_rte(short2);
+int2 __ovld __cnfn convert_int2_sat_rte(short2);
+int2 __ovld __cnfn convert_int2_rtz(short2);
+int2 __ovld __cnfn convert_int2_sat_rtz(short2);
+int2 __ovld __cnfn convert_int2_rtp(short2);
+int2 __ovld __cnfn convert_int2_sat_rtp(short2);
+int2 __ovld __cnfn convert_int2_rtn(short2);
+int2 __ovld __cnfn convert_int2_sat_rtn(short2);
+int2 __ovld __cnfn convert_int2(short2);
+int2 __ovld __cnfn convert_int2_sat(short2);
+int2 __ovld __cnfn convert_int2_rte(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rte(ushort2);
+int2 __ovld __cnfn convert_int2_rtz(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rtz(ushort2);
+int2 __ovld __cnfn convert_int2_rtp(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rtp(ushort2);
+int2 __ovld __cnfn convert_int2_rtn(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rtn(ushort2);
+int2 __ovld __cnfn convert_int2(ushort2);
+int2 __ovld __cnfn convert_int2_sat(ushort2);
+int2 __ovld __cnfn convert_int2_rte(int2);
+int2 __ovld __cnfn convert_int2_sat_rte(int2);
+int2 __ovld __cnfn convert_int2_rtz(int2);
+int2 __ovld __cnfn convert_int2_sat_rtz(int2);
+int2 __ovld __cnfn convert_int2_rtp(int2);
+int2 __ovld __cnfn convert_int2_sat_rtp(int2);
+int2 __ovld __cnfn convert_int2_rtn(int2);
+int2 __ovld __cnfn convert_int2_sat_rtn(int2);
+int2 __ovld __cnfn convert_int2(int2);
+int2 __ovld __cnfn convert_int2_sat(int2);
+int2 __ovld __cnfn convert_int2_rte(uint2);
+int2 __ovld __cnfn convert_int2_sat_rte(uint2);
+int2 __ovld __cnfn convert_int2_rtz(uint2);
+int2 __ovld __cnfn convert_int2_sat_rtz(uint2);
+int2 __ovld __cnfn convert_int2_rtp(uint2);
+int2 __ovld __cnfn convert_int2_sat_rtp(uint2);
+int2 __ovld __cnfn convert_int2_rtn(uint2);
+int2 __ovld __cnfn convert_int2_sat_rtn(uint2);
+int2 __ovld __cnfn convert_int2(uint2);
+int2 __ovld __cnfn convert_int2_sat(uint2);
+int2 __ovld __cnfn convert_int2_rte(long2);
+int2 __ovld __cnfn convert_int2_sat_rte(long2);
+int2 __ovld __cnfn convert_int2_rtz(long2);
+int2 __ovld __cnfn convert_int2_sat_rtz(long2);
+int2 __ovld __cnfn convert_int2_rtp(long2);
+int2 __ovld __cnfn convert_int2_sat_rtp(long2);
+int2 __ovld __cnfn convert_int2_rtn(long2);
+int2 __ovld __cnfn convert_int2_sat_rtn(long2);
+int2 __ovld __cnfn convert_int2(long2);
+int2 __ovld __cnfn convert_int2_sat(long2);
+int2 __ovld __cnfn convert_int2_rte(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rte(ulong2);
+int2 __ovld __cnfn convert_int2_rtz(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rtz(ulong2);
+int2 __ovld __cnfn convert_int2_rtp(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rtp(ulong2);
+int2 __ovld __cnfn convert_int2_rtn(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rtn(ulong2);
+int2 __ovld __cnfn convert_int2(ulong2);
+int2 __ovld __cnfn convert_int2_sat(ulong2);
+int2 __ovld __cnfn convert_int2_rte(float2);
+int2 __ovld __cnfn convert_int2_sat_rte(float2);
+int2 __ovld __cnfn convert_int2_rtz(float2);
+int2 __ovld __cnfn convert_int2_sat_rtz(float2);
+int2 __ovld __cnfn convert_int2_rtp(float2);
+int2 __ovld __cnfn convert_int2_sat_rtp(float2);
+int2 __ovld __cnfn convert_int2_rtn(float2);
+int2 __ovld __cnfn convert_int2_sat_rtn(float2);
+int2 __ovld __cnfn convert_int2(float2);
+int2 __ovld __cnfn convert_int2_sat(float2);
+uint2 __ovld __cnfn convert_uint2_rte(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(char2);
+uint2 __ovld __cnfn convert_uint2_rtz(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(char2);
+uint2 __ovld __cnfn convert_uint2_rtp(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(char2);
+uint2 __ovld __cnfn convert_uint2_rtn(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(char2);
+uint2 __ovld __cnfn convert_uint2(char2);
+uint2 __ovld __cnfn convert_uint2_sat(char2);
+uint2 __ovld __cnfn convert_uint2_rte(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(uchar2);
+uint2 __ovld __cnfn convert_uint2_rtz(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(uchar2);
+uint2 __ovld __cnfn convert_uint2_rtp(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(uchar2);
+uint2 __ovld __cnfn convert_uint2_rtn(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(uchar2);
+uint2 __ovld __cnfn convert_uint2(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat(uchar2);
+uint2 __ovld __cnfn convert_uint2_rte(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(short2);
+uint2 __ovld __cnfn convert_uint2_rtz(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(short2);
+uint2 __ovld __cnfn convert_uint2_rtp(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(short2);
+uint2 __ovld __cnfn convert_uint2_rtn(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(short2);
+uint2 __ovld __cnfn convert_uint2(short2);
+uint2 __ovld __cnfn convert_uint2_sat(short2);
+uint2 __ovld __cnfn convert_uint2_rte(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(ushort2);
+uint2 __ovld __cnfn convert_uint2_rtz(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(ushort2);
+uint2 __ovld __cnfn convert_uint2_rtp(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(ushort2);
+uint2 __ovld __cnfn convert_uint2_rtn(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(ushort2);
+uint2 __ovld __cnfn convert_uint2(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat(ushort2);
+uint2 __ovld __cnfn convert_uint2_rte(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(int2);
+uint2 __ovld __cnfn convert_uint2_rtz(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(int2);
+uint2 __ovld __cnfn convert_uint2_rtp(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(int2);
+uint2 __ovld __cnfn convert_uint2_rtn(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(int2);
+uint2 __ovld __cnfn convert_uint2(int2);
+uint2 __ovld __cnfn convert_uint2_sat(int2);
+uint2 __ovld __cnfn convert_uint2_rte(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(uint2);
+uint2 __ovld __cnfn convert_uint2_rtz(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(uint2);
+uint2 __ovld __cnfn convert_uint2_rtp(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(uint2);
+uint2 __ovld __cnfn convert_uint2_rtn(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(uint2);
+uint2 __ovld __cnfn convert_uint2(uint2);
+uint2 __ovld __cnfn convert_uint2_sat(uint2);
+uint2 __ovld __cnfn convert_uint2_rte(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(long2);
+uint2 __ovld __cnfn convert_uint2_rtz(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(long2);
+uint2 __ovld __cnfn convert_uint2_rtp(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(long2);
+uint2 __ovld __cnfn convert_uint2_rtn(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(long2);
+uint2 __ovld __cnfn convert_uint2(long2);
+uint2 __ovld __cnfn convert_uint2_sat(long2);
+uint2 __ovld __cnfn convert_uint2_rte(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(ulong2);
+uint2 __ovld __cnfn convert_uint2_rtz(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(ulong2);
+uint2 __ovld __cnfn convert_uint2_rtp(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(ulong2);
+uint2 __ovld __cnfn convert_uint2_rtn(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(ulong2);
+uint2 __ovld __cnfn convert_uint2(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat(ulong2);
+uint2 __ovld __cnfn convert_uint2_rte(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(float2);
+uint2 __ovld __cnfn convert_uint2_rtz(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(float2);
+uint2 __ovld __cnfn convert_uint2_rtp(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(float2);
+uint2 __ovld __cnfn convert_uint2_rtn(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(float2);
+uint2 __ovld __cnfn convert_uint2(float2);
+uint2 __ovld __cnfn convert_uint2_sat(float2);
+long2 __ovld __cnfn convert_long2_rte(char2);
+long2 __ovld __cnfn convert_long2_sat_rte(char2);
+long2 __ovld __cnfn convert_long2_rtz(char2);
+long2 __ovld __cnfn convert_long2_sat_rtz(char2);
+long2 __ovld __cnfn convert_long2_rtp(char2);
+long2 __ovld __cnfn convert_long2_sat_rtp(char2);
+long2 __ovld __cnfn convert_long2_rtn(char2);
+long2 __ovld __cnfn convert_long2_sat_rtn(char2);
+long2 __ovld __cnfn convert_long2(char2);
+long2 __ovld __cnfn convert_long2_sat(char2);
+long2 __ovld __cnfn convert_long2_rte(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rte(uchar2);
+long2 __ovld __cnfn convert_long2_rtz(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rtz(uchar2);
+long2 __ovld __cnfn convert_long2_rtp(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rtp(uchar2);
+long2 __ovld __cnfn convert_long2_rtn(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rtn(uchar2);
+long2 __ovld __cnfn convert_long2(uchar2);
+long2 __ovld __cnfn convert_long2_sat(uchar2);
+long2 __ovld __cnfn convert_long2_rte(short2);
+long2 __ovld __cnfn convert_long2_sat_rte(short2);
+long2 __ovld __cnfn convert_long2_rtz(short2);
+long2 __ovld __cnfn convert_long2_sat_rtz(short2);
+long2 __ovld __cnfn convert_long2_rtp(short2);
+long2 __ovld __cnfn convert_long2_sat_rtp(short2);
+long2 __ovld __cnfn convert_long2_rtn(short2);
+long2 __ovld __cnfn convert_long2_sat_rtn(short2);
+long2 __ovld __cnfn convert_long2(short2);
+long2 __ovld __cnfn convert_long2_sat(short2);
+long2 __ovld __cnfn convert_long2_rte(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rte(ushort2);
+long2 __ovld __cnfn convert_long2_rtz(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rtz(ushort2);
+long2 __ovld __cnfn convert_long2_rtp(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rtp(ushort2);
+long2 __ovld __cnfn convert_long2_rtn(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rtn(ushort2);
+long2 __ovld __cnfn convert_long2(ushort2);
+long2 __ovld __cnfn convert_long2_sat(ushort2);
+long2 __ovld __cnfn convert_long2_rte(int2);
+long2 __ovld __cnfn convert_long2_sat_rte(int2);
+long2 __ovld __cnfn convert_long2_rtz(int2);
+long2 __ovld __cnfn convert_long2_sat_rtz(int2);
+long2 __ovld __cnfn convert_long2_rtp(int2);
+long2 __ovld __cnfn convert_long2_sat_rtp(int2);
+long2 __ovld __cnfn convert_long2_rtn(int2);
+long2 __ovld __cnfn convert_long2_sat_rtn(int2);
+long2 __ovld __cnfn convert_long2(int2);
+long2 __ovld __cnfn convert_long2_sat(int2);
+long2 __ovld __cnfn convert_long2_rte(uint2);
+long2 __ovld __cnfn convert_long2_sat_rte(uint2);
+long2 __ovld __cnfn convert_long2_rtz(uint2);
+long2 __ovld __cnfn convert_long2_sat_rtz(uint2);
+long2 __ovld __cnfn convert_long2_rtp(uint2);
+long2 __ovld __cnfn convert_long2_sat_rtp(uint2);
+long2 __ovld __cnfn convert_long2_rtn(uint2);
+long2 __ovld __cnfn convert_long2_sat_rtn(uint2);
+long2 __ovld __cnfn convert_long2(uint2);
+long2 __ovld __cnfn convert_long2_sat(uint2);
+long2 __ovld __cnfn convert_long2_rte(long2);
+long2 __ovld __cnfn convert_long2_sat_rte(long2);
+long2 __ovld __cnfn convert_long2_rtz(long2);
+long2 __ovld __cnfn convert_long2_sat_rtz(long2);
+long2 __ovld __cnfn convert_long2_rtp(long2);
+long2 __ovld __cnfn convert_long2_sat_rtp(long2);
+long2 __ovld __cnfn convert_long2_rtn(long2);
+long2 __ovld __cnfn convert_long2_sat_rtn(long2);
+long2 __ovld __cnfn convert_long2(long2);
+long2 __ovld __cnfn convert_long2_sat(long2);
+long2 __ovld __cnfn convert_long2_rte(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rte(ulong2);
+long2 __ovld __cnfn convert_long2_rtz(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rtz(ulong2);
+long2 __ovld __cnfn convert_long2_rtp(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rtp(ulong2);
+long2 __ovld __cnfn convert_long2_rtn(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rtn(ulong2);
+long2 __ovld __cnfn convert_long2(ulong2);
+long2 __ovld __cnfn convert_long2_sat(ulong2);
+long2 __ovld __cnfn convert_long2_rte(float2);
+long2 __ovld __cnfn convert_long2_sat_rte(float2);
+long2 __ovld __cnfn convert_long2_rtz(float2);
+long2 __ovld __cnfn convert_long2_sat_rtz(float2);
+long2 __ovld __cnfn convert_long2_rtp(float2);
+long2 __ovld __cnfn convert_long2_sat_rtp(float2);
+long2 __ovld __cnfn convert_long2_rtn(float2);
+long2 __ovld __cnfn convert_long2_sat_rtn(float2);
+long2 __ovld __cnfn convert_long2(float2);
+long2 __ovld __cnfn convert_long2_sat(float2);
+ulong2 __ovld __cnfn convert_ulong2_rte(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(char2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(char2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(char2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(char2);
+ulong2 __ovld __cnfn convert_ulong2(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat(char2);
+ulong2 __ovld __cnfn convert_ulong2_rte(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uchar2);
+ulong2 __ovld __cnfn convert_ulong2(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rte(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(short2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(short2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(short2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(short2);
+ulong2 __ovld __cnfn convert_ulong2(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat(short2);
+ulong2 __ovld __cnfn convert_ulong2_rte(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ushort2);
+ulong2 __ovld __cnfn convert_ulong2(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rte(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(int2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(int2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(int2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(int2);
+ulong2 __ovld __cnfn convert_ulong2(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat(int2);
+ulong2 __ovld __cnfn convert_ulong2_rte(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uint2);
+ulong2 __ovld __cnfn convert_ulong2(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rte(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(long2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(long2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(long2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(long2);
+ulong2 __ovld __cnfn convert_ulong2(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat(long2);
+ulong2 __ovld __cnfn convert_ulong2_rte(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ulong2);
+ulong2 __ovld __cnfn convert_ulong2(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rte(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(float2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(float2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(float2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(float2);
+ulong2 __ovld __cnfn convert_ulong2(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat(float2);
+float2 __ovld __cnfn convert_float2_rte(char2);
+float2 __ovld __cnfn convert_float2_rtz(char2);
+float2 __ovld __cnfn convert_float2_rtp(char2);
+float2 __ovld __cnfn convert_float2_rtn(char2);
+float2 __ovld __cnfn convert_float2(char2);
+float2 __ovld __cnfn convert_float2_rte(uchar2);
+float2 __ovld __cnfn convert_float2_rtz(uchar2);
+float2 __ovld __cnfn convert_float2_rtp(uchar2);
+float2 __ovld __cnfn convert_float2_rtn(uchar2);
+float2 __ovld __cnfn convert_float2(uchar2);
+float2 __ovld __cnfn convert_float2_rte(short2);
+float2 __ovld __cnfn convert_float2_rtz(short2);
+float2 __ovld __cnfn convert_float2_rtp(short2);
+float2 __ovld __cnfn convert_float2_rtn(short2);
+float2 __ovld __cnfn convert_float2(short2);
+float2 __ovld __cnfn convert_float2_rte(ushort2);
+float2 __ovld __cnfn convert_float2_rtz(ushort2);
+float2 __ovld __cnfn convert_float2_rtp(ushort2);
+float2 __ovld __cnfn convert_float2_rtn(ushort2);
+float2 __ovld __cnfn convert_float2(ushort2);
+float2 __ovld __cnfn convert_float2_rte(int2);
+float2 __ovld __cnfn convert_float2_rtz(int2);
+float2 __ovld __cnfn convert_float2_rtp(int2);
+float2 __ovld __cnfn convert_float2_rtn(int2);
+float2 __ovld __cnfn convert_float2(int2);
+float2 __ovld __cnfn convert_float2_rte(uint2);
+float2 __ovld __cnfn convert_float2_rtz(uint2);
+float2 __ovld __cnfn convert_float2_rtp(uint2);
+float2 __ovld __cnfn convert_float2_rtn(uint2);
+float2 __ovld __cnfn convert_float2(uint2);
+float2 __ovld __cnfn convert_float2_rte(long2);
+float2 __ovld __cnfn convert_float2_rtz(long2);
+float2 __ovld __cnfn convert_float2_rtp(long2);
+float2 __ovld __cnfn convert_float2_rtn(long2);
+float2 __ovld __cnfn convert_float2(long2);
+float2 __ovld __cnfn convert_float2_rte(ulong2);
+float2 __ovld __cnfn convert_float2_rtz(ulong2);
+float2 __ovld __cnfn convert_float2_rtp(ulong2);
+float2 __ovld __cnfn convert_float2_rtn(ulong2);
+float2 __ovld __cnfn convert_float2(ulong2);
+float2 __ovld __cnfn convert_float2_rte(float2);
+float2 __ovld __cnfn convert_float2_rtz(float2);
+float2 __ovld __cnfn convert_float2_rtp(float2);
+float2 __ovld __cnfn convert_float2_rtn(float2);
+float2 __ovld __cnfn convert_float2(float2);
+char3 __ovld __cnfn convert_char3_rte(char3);
+char3 __ovld __cnfn convert_char3_sat_rte(char3);
+char3 __ovld __cnfn convert_char3_rtz(char3);
+char3 __ovld __cnfn convert_char3_sat_rtz(char3);
+char3 __ovld __cnfn convert_char3_rtp(char3);
+char3 __ovld __cnfn convert_char3_sat_rtp(char3);
+char3 __ovld __cnfn convert_char3_rtn(char3);
+char3 __ovld __cnfn convert_char3_sat_rtn(char3);
+char3 __ovld __cnfn convert_char3(char3);
+char3 __ovld __cnfn convert_char3_sat(char3);
+char3 __ovld __cnfn convert_char3_rte(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rte(uchar3);
+char3 __ovld __cnfn convert_char3_rtz(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rtz(uchar3);
+char3 __ovld __cnfn convert_char3_rtp(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rtp(uchar3);
+char3 __ovld __cnfn convert_char3_rtn(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rtn(uchar3);
+char3 __ovld __cnfn convert_char3(uchar3);
+char3 __ovld __cnfn convert_char3_sat(uchar3);
+char3 __ovld __cnfn convert_char3_rte(short3);
+char3 __ovld __cnfn convert_char3_sat_rte(short3);
+char3 __ovld __cnfn convert_char3_rtz(short3);
+char3 __ovld __cnfn convert_char3_sat_rtz(short3);
+char3 __ovld __cnfn convert_char3_rtp(short3);
+char3 __ovld __cnfn convert_char3_sat_rtp(short3);
+char3 __ovld __cnfn convert_char3_rtn(short3);
+char3 __ovld __cnfn convert_char3_sat_rtn(short3);
+char3 __ovld __cnfn convert_char3(short3);
+char3 __ovld __cnfn convert_char3_sat(short3);
+char3 __ovld __cnfn convert_char3_rte(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rte(ushort3);
+char3 __ovld __cnfn convert_char3_rtz(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rtz(ushort3);
+char3 __ovld __cnfn convert_char3_rtp(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rtp(ushort3);
+char3 __ovld __cnfn convert_char3_rtn(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rtn(ushort3);
+char3 __ovld __cnfn convert_char3(ushort3);
+char3 __ovld __cnfn convert_char3_sat(ushort3);
+char3 __ovld __cnfn convert_char3_rte(int3);
+char3 __ovld __cnfn convert_char3_sat_rte(int3);
+char3 __ovld __cnfn convert_char3_rtz(int3);
+char3 __ovld __cnfn convert_char3_sat_rtz(int3);
+char3 __ovld __cnfn convert_char3_rtp(int3);
+char3 __ovld __cnfn convert_char3_sat_rtp(int3);
+char3 __ovld __cnfn convert_char3_rtn(int3);
+char3 __ovld __cnfn convert_char3_sat_rtn(int3);
+char3 __ovld __cnfn convert_char3(int3);
+char3 __ovld __cnfn convert_char3_sat(int3);
+char3 __ovld __cnfn convert_char3_rte(uint3);
+char3 __ovld __cnfn convert_char3_sat_rte(uint3);
+char3 __ovld __cnfn convert_char3_rtz(uint3);
+char3 __ovld __cnfn convert_char3_sat_rtz(uint3);
+char3 __ovld __cnfn convert_char3_rtp(uint3);
+char3 __ovld __cnfn convert_char3_sat_rtp(uint3);
+char3 __ovld __cnfn convert_char3_rtn(uint3);
+char3 __ovld __cnfn convert_char3_sat_rtn(uint3);
+char3 __ovld __cnfn convert_char3(uint3);
+char3 __ovld __cnfn convert_char3_sat(uint3);
+char3 __ovld __cnfn convert_char3_rte(long3);
+char3 __ovld __cnfn convert_char3_sat_rte(long3);
+char3 __ovld __cnfn convert_char3_rtz(long3);
+char3 __ovld __cnfn convert_char3_sat_rtz(long3);
+char3 __ovld __cnfn convert_char3_rtp(long3);
+char3 __ovld __cnfn convert_char3_sat_rtp(long3);
+char3 __ovld __cnfn convert_char3_rtn(long3);
+char3 __ovld __cnfn convert_char3_sat_rtn(long3);
+char3 __ovld __cnfn convert_char3(long3);
+char3 __ovld __cnfn convert_char3_sat(long3);
+char3 __ovld __cnfn convert_char3_rte(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rte(ulong3);
+char3 __ovld __cnfn convert_char3_rtz(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rtz(ulong3);
+char3 __ovld __cnfn convert_char3_rtp(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rtp(ulong3);
+char3 __ovld __cnfn convert_char3_rtn(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rtn(ulong3);
+char3 __ovld __cnfn convert_char3(ulong3);
+char3 __ovld __cnfn convert_char3_sat(ulong3);
+char3 __ovld __cnfn convert_char3_rte(float3);
+char3 __ovld __cnfn convert_char3_sat_rte(float3);
+char3 __ovld __cnfn convert_char3_rtz(float3);
+char3 __ovld __cnfn convert_char3_sat_rtz(float3);
+char3 __ovld __cnfn convert_char3_rtp(float3);
+char3 __ovld __cnfn convert_char3_sat_rtp(float3);
+char3 __ovld __cnfn convert_char3_rtn(float3);
+char3 __ovld __cnfn convert_char3_sat_rtn(float3);
+char3 __ovld __cnfn convert_char3(float3);
+char3 __ovld __cnfn convert_char3_sat(float3);
+uchar3 __ovld __cnfn convert_uchar3_rte(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(char3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(char3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(char3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(char3);
+uchar3 __ovld __cnfn convert_uchar3(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat(char3);
+uchar3 __ovld __cnfn convert_uchar3_rte(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uchar3);
+uchar3 __ovld __cnfn convert_uchar3(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rte(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(short3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(short3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(short3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(short3);
+uchar3 __ovld __cnfn convert_uchar3(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat(short3);
+uchar3 __ovld __cnfn convert_uchar3_rte(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ushort3);
+uchar3 __ovld __cnfn convert_uchar3(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rte(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(int3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(int3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(int3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(int3);
+uchar3 __ovld __cnfn convert_uchar3(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat(int3);
+uchar3 __ovld __cnfn convert_uchar3_rte(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uint3);
+uchar3 __ovld __cnfn convert_uchar3(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rte(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(long3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(long3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(long3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(long3);
+uchar3 __ovld __cnfn convert_uchar3(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat(long3);
+uchar3 __ovld __cnfn convert_uchar3_rte(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ulong3);
+uchar3 __ovld __cnfn convert_uchar3(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rte(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(float3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(float3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(float3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(float3);
+uchar3 __ovld __cnfn convert_uchar3(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat(float3);
+short3 __ovld __cnfn convert_short3_rte(char3);
+short3 __ovld __cnfn convert_short3_sat_rte(char3);
+short3 __ovld __cnfn convert_short3_rtz(char3);
+short3 __ovld __cnfn convert_short3_sat_rtz(char3);
+short3 __ovld __cnfn convert_short3_rtp(char3);
+short3 __ovld __cnfn convert_short3_sat_rtp(char3);
+short3 __ovld __cnfn convert_short3_rtn(char3);
+short3 __ovld __cnfn convert_short3_sat_rtn(char3);
+short3 __ovld __cnfn convert_short3(char3);
+short3 __ovld __cnfn convert_short3_sat(char3);
+short3 __ovld __cnfn convert_short3_rte(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rte(uchar3);
+short3 __ovld __cnfn convert_short3_rtz(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rtz(uchar3);
+short3 __ovld __cnfn convert_short3_rtp(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rtp(uchar3);
+short3 __ovld __cnfn convert_short3_rtn(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rtn(uchar3);
+short3 __ovld __cnfn convert_short3(uchar3);
+short3 __ovld __cnfn convert_short3_sat(uchar3);
+short3 __ovld __cnfn convert_short3_rte(short3);
+short3 __ovld __cnfn convert_short3_sat_rte(short3);
+short3 __ovld __cnfn convert_short3_rtz(short3);
+short3 __ovld __cnfn convert_short3_sat_rtz(short3);
+short3 __ovld __cnfn convert_short3_rtp(short3);
+short3 __ovld __cnfn convert_short3_sat_rtp(short3);
+short3 __ovld __cnfn convert_short3_rtn(short3);
+short3 __ovld __cnfn convert_short3_sat_rtn(short3);
+short3 __ovld __cnfn convert_short3(short3);
+short3 __ovld __cnfn convert_short3_sat(short3);
+short3 __ovld __cnfn convert_short3_rte(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rte(ushort3);
+short3 __ovld __cnfn convert_short3_rtz(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rtz(ushort3);
+short3 __ovld __cnfn convert_short3_rtp(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rtp(ushort3);
+short3 __ovld __cnfn convert_short3_rtn(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rtn(ushort3);
+short3 __ovld __cnfn convert_short3(ushort3);
+short3 __ovld __cnfn convert_short3_sat(ushort3);
+short3 __ovld __cnfn convert_short3_rte(int3);
+short3 __ovld __cnfn convert_short3_sat_rte(int3);
+short3 __ovld __cnfn convert_short3_rtz(int3);
+short3 __ovld __cnfn convert_short3_sat_rtz(int3);
+short3 __ovld __cnfn convert_short3_rtp(int3);
+short3 __ovld __cnfn convert_short3_sat_rtp(int3);
+short3 __ovld __cnfn convert_short3_rtn(int3);
+short3 __ovld __cnfn convert_short3_sat_rtn(int3);
+short3 __ovld __cnfn convert_short3(int3);
+short3 __ovld __cnfn convert_short3_sat(int3);
+short3 __ovld __cnfn convert_short3_rte(uint3);
+short3 __ovld __cnfn convert_short3_sat_rte(uint3);
+short3 __ovld __cnfn convert_short3_rtz(uint3);
+short3 __ovld __cnfn convert_short3_sat_rtz(uint3);
+short3 __ovld __cnfn convert_short3_rtp(uint3);
+short3 __ovld __cnfn convert_short3_sat_rtp(uint3);
+short3 __ovld __cnfn convert_short3_rtn(uint3);
+short3 __ovld __cnfn convert_short3_sat_rtn(uint3);
+short3 __ovld __cnfn convert_short3(uint3);
+short3 __ovld __cnfn convert_short3_sat(uint3);
+short3 __ovld __cnfn convert_short3_rte(long3);
+short3 __ovld __cnfn convert_short3_sat_rte(long3);
+short3 __ovld __cnfn convert_short3_rtz(long3);
+short3 __ovld __cnfn convert_short3_sat_rtz(long3);
+short3 __ovld __cnfn convert_short3_rtp(long3);
+short3 __ovld __cnfn convert_short3_sat_rtp(long3);
+short3 __ovld __cnfn convert_short3_rtn(long3);
+short3 __ovld __cnfn convert_short3_sat_rtn(long3);
+short3 __ovld __cnfn convert_short3(long3);
+short3 __ovld __cnfn convert_short3_sat(long3);
+short3 __ovld __cnfn convert_short3_rte(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rte(ulong3);
+short3 __ovld __cnfn convert_short3_rtz(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rtz(ulong3);
+short3 __ovld __cnfn convert_short3_rtp(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rtp(ulong3);
+short3 __ovld __cnfn convert_short3_rtn(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rtn(ulong3);
+short3 __ovld __cnfn convert_short3(ulong3);
+short3 __ovld __cnfn convert_short3_sat(ulong3);
+short3 __ovld __cnfn convert_short3_rte(float3);
+short3 __ovld __cnfn convert_short3_sat_rte(float3);
+short3 __ovld __cnfn convert_short3_rtz(float3);
+short3 __ovld __cnfn convert_short3_sat_rtz(float3);
+short3 __ovld __cnfn convert_short3_rtp(float3);
+short3 __ovld __cnfn convert_short3_sat_rtp(float3);
+short3 __ovld __cnfn convert_short3_rtn(float3);
+short3 __ovld __cnfn convert_short3_sat_rtn(float3);
+short3 __ovld __cnfn convert_short3(float3);
+short3 __ovld __cnfn convert_short3_sat(float3);
+ushort3 __ovld __cnfn convert_ushort3_rte(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(char3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(char3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(char3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(char3);
+ushort3 __ovld __cnfn convert_ushort3(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat(char3);
+ushort3 __ovld __cnfn convert_ushort3_rte(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uchar3);
+ushort3 __ovld __cnfn convert_ushort3(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rte(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(short3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(short3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(short3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(short3);
+ushort3 __ovld __cnfn convert_ushort3(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat(short3);
+ushort3 __ovld __cnfn convert_ushort3_rte(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ushort3);
+ushort3 __ovld __cnfn convert_ushort3(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rte(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(int3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(int3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(int3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(int3);
+ushort3 __ovld __cnfn convert_ushort3(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat(int3);
+ushort3 __ovld __cnfn convert_ushort3_rte(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uint3);
+ushort3 __ovld __cnfn convert_ushort3(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rte(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(long3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(long3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(long3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(long3);
+ushort3 __ovld __cnfn convert_ushort3(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat(long3);
+ushort3 __ovld __cnfn convert_ushort3_rte(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ulong3);
+ushort3 __ovld __cnfn convert_ushort3(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rte(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(float3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(float3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(float3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(float3);
+ushort3 __ovld __cnfn convert_ushort3(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat(float3);
+int3 __ovld __cnfn convert_int3_rte(char3);
+int3 __ovld __cnfn convert_int3_sat_rte(char3);
+int3 __ovld __cnfn convert_int3_rtz(char3);
+int3 __ovld __cnfn convert_int3_sat_rtz(char3);
+int3 __ovld __cnfn convert_int3_rtp(char3);
+int3 __ovld __cnfn convert_int3_sat_rtp(char3);
+int3 __ovld __cnfn convert_int3_rtn(char3);
+int3 __ovld __cnfn convert_int3_sat_rtn(char3);
+int3 __ovld __cnfn convert_int3(char3);
+int3 __ovld __cnfn convert_int3_sat(char3);
+int3 __ovld __cnfn convert_int3_rte(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rte(uchar3);
+int3 __ovld __cnfn convert_int3_rtz(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rtz(uchar3);
+int3 __ovld __cnfn convert_int3_rtp(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rtp(uchar3);
+int3 __ovld __cnfn convert_int3_rtn(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rtn(uchar3);
+int3 __ovld __cnfn convert_int3(uchar3);
+int3 __ovld __cnfn convert_int3_sat(uchar3);
+int3 __ovld __cnfn convert_int3_rte(short3);
+int3 __ovld __cnfn convert_int3_sat_rte(short3);
+int3 __ovld __cnfn convert_int3_rtz(short3);
+int3 __ovld __cnfn convert_int3_sat_rtz(short3);
+int3 __ovld __cnfn convert_int3_rtp(short3);
+int3 __ovld __cnfn convert_int3_sat_rtp(short3);
+int3 __ovld __cnfn convert_int3_rtn(short3);
+int3 __ovld __cnfn convert_int3_sat_rtn(short3);
+int3 __ovld __cnfn convert_int3(short3);
+int3 __ovld __cnfn convert_int3_sat(short3);
+int3 __ovld __cnfn convert_int3_rte(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rte(ushort3);
+int3 __ovld __cnfn convert_int3_rtz(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rtz(ushort3);
+int3 __ovld __cnfn convert_int3_rtp(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rtp(ushort3);
+int3 __ovld __cnfn convert_int3_rtn(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rtn(ushort3);
+int3 __ovld __cnfn convert_int3(ushort3);
+int3 __ovld __cnfn convert_int3_sat(ushort3);
+int3 __ovld __cnfn convert_int3_rte(int3);
+int3 __ovld __cnfn convert_int3_sat_rte(int3);
+int3 __ovld __cnfn convert_int3_rtz(int3);
+int3 __ovld __cnfn convert_int3_sat_rtz(int3);
+int3 __ovld __cnfn convert_int3_rtp(int3);
+int3 __ovld __cnfn convert_int3_sat_rtp(int3);
+int3 __ovld __cnfn convert_int3_rtn(int3);
+int3 __ovld __cnfn convert_int3_sat_rtn(int3);
+int3 __ovld __cnfn convert_int3(int3);
+int3 __ovld __cnfn convert_int3_sat(int3);
+int3 __ovld __cnfn convert_int3_rte(uint3);
+int3 __ovld __cnfn convert_int3_sat_rte(uint3);
+int3 __ovld __cnfn convert_int3_rtz(uint3);
+int3 __ovld __cnfn convert_int3_sat_rtz(uint3);
+int3 __ovld __cnfn convert_int3_rtp(uint3);
+int3 __ovld __cnfn convert_int3_sat_rtp(uint3);
+int3 __ovld __cnfn convert_int3_rtn(uint3);
+int3 __ovld __cnfn convert_int3_sat_rtn(uint3);
+int3 __ovld __cnfn convert_int3(uint3);
+int3 __ovld __cnfn convert_int3_sat(uint3);
+int3 __ovld __cnfn convert_int3_rte(long3);
+int3 __ovld __cnfn convert_int3_sat_rte(long3);
+int3 __ovld __cnfn convert_int3_rtz(long3);
+int3 __ovld __cnfn convert_int3_sat_rtz(long3);
+int3 __ovld __cnfn convert_int3_rtp(long3);
+int3 __ovld __cnfn convert_int3_sat_rtp(long3);
+int3 __ovld __cnfn convert_int3_rtn(long3);
+int3 __ovld __cnfn convert_int3_sat_rtn(long3);
+int3 __ovld __cnfn convert_int3(long3);
+int3 __ovld __cnfn convert_int3_sat(long3);
+int3 __ovld __cnfn convert_int3_rte(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rte(ulong3);
+int3 __ovld __cnfn convert_int3_rtz(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rtz(ulong3);
+int3 __ovld __cnfn convert_int3_rtp(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rtp(ulong3);
+int3 __ovld __cnfn convert_int3_rtn(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rtn(ulong3);
+int3 __ovld __cnfn convert_int3(ulong3);
+int3 __ovld __cnfn convert_int3_sat(ulong3);
+int3 __ovld __cnfn convert_int3_rte(float3);
+int3 __ovld __cnfn convert_int3_sat_rte(float3);
+int3 __ovld __cnfn convert_int3_rtz(float3);
+int3 __ovld __cnfn convert_int3_sat_rtz(float3);
+int3 __ovld __cnfn convert_int3_rtp(float3);
+int3 __ovld __cnfn convert_int3_sat_rtp(float3);
+int3 __ovld __cnfn convert_int3_rtn(float3);
+int3 __ovld __cnfn convert_int3_sat_rtn(float3);
+int3 __ovld __cnfn convert_int3(float3);
+int3 __ovld __cnfn convert_int3_sat(float3);
+uint3 __ovld __cnfn convert_uint3_rte(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(char3);
+uint3 __ovld __cnfn convert_uint3_rtz(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(char3);
+uint3 __ovld __cnfn convert_uint3_rtp(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(char3);
+uint3 __ovld __cnfn convert_uint3_rtn(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(char3);
+uint3 __ovld __cnfn convert_uint3(char3);
+uint3 __ovld __cnfn convert_uint3_sat(char3);
+uint3 __ovld __cnfn convert_uint3_rte(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(uchar3);
+uint3 __ovld __cnfn convert_uint3_rtz(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(uchar3);
+uint3 __ovld __cnfn convert_uint3_rtp(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(uchar3);
+uint3 __ovld __cnfn convert_uint3_rtn(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(uchar3);
+uint3 __ovld __cnfn convert_uint3(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat(uchar3);
+uint3 __ovld __cnfn convert_uint3_rte(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(short3);
+uint3 __ovld __cnfn convert_uint3_rtz(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(short3);
+uint3 __ovld __cnfn convert_uint3_rtp(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(short3);
+uint3 __ovld __cnfn convert_uint3_rtn(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(short3);
+uint3 __ovld __cnfn convert_uint3(short3);
+uint3 __ovld __cnfn convert_uint3_sat(short3);
+uint3 __ovld __cnfn convert_uint3_rte(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(ushort3);
+uint3 __ovld __cnfn convert_uint3_rtz(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(ushort3);
+uint3 __ovld __cnfn convert_uint3_rtp(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(ushort3);
+uint3 __ovld __cnfn convert_uint3_rtn(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(ushort3);
+uint3 __ovld __cnfn convert_uint3(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat(ushort3);
+uint3 __ovld __cnfn convert_uint3_rte(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(int3);
+uint3 __ovld __cnfn convert_uint3_rtz(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(int3);
+uint3 __ovld __cnfn convert_uint3_rtp(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(int3);
+uint3 __ovld __cnfn convert_uint3_rtn(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(int3);
+uint3 __ovld __cnfn convert_uint3(int3);
+uint3 __ovld __cnfn convert_uint3_sat(int3);
+uint3 __ovld __cnfn convert_uint3_rte(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(uint3);
+uint3 __ovld __cnfn convert_uint3_rtz(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(uint3);
+uint3 __ovld __cnfn convert_uint3_rtp(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(uint3);
+uint3 __ovld __cnfn convert_uint3_rtn(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(uint3);
+uint3 __ovld __cnfn convert_uint3(uint3);
+uint3 __ovld __cnfn convert_uint3_sat(uint3);
+uint3 __ovld __cnfn convert_uint3_rte(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(long3);
+uint3 __ovld __cnfn convert_uint3_rtz(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(long3);
+uint3 __ovld __cnfn convert_uint3_rtp(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(long3);
+uint3 __ovld __cnfn convert_uint3_rtn(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(long3);
+uint3 __ovld __cnfn convert_uint3(long3);
+uint3 __ovld __cnfn convert_uint3_sat(long3);
+uint3 __ovld __cnfn convert_uint3_rte(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(ulong3);
+uint3 __ovld __cnfn convert_uint3_rtz(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(ulong3);
+uint3 __ovld __cnfn convert_uint3_rtp(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(ulong3);
+uint3 __ovld __cnfn convert_uint3_rtn(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(ulong3);
+uint3 __ovld __cnfn convert_uint3(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat(ulong3);
+uint3 __ovld __cnfn convert_uint3_rte(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(float3);
+uint3 __ovld __cnfn convert_uint3_rtz(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(float3);
+uint3 __ovld __cnfn convert_uint3_rtp(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(float3);
+uint3 __ovld __cnfn convert_uint3_rtn(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(float3);
+uint3 __ovld __cnfn convert_uint3(float3);
+uint3 __ovld __cnfn convert_uint3_sat(float3);
+long3 __ovld __cnfn convert_long3_rte(char3);
+long3 __ovld __cnfn convert_long3_sat_rte(char3);
+long3 __ovld __cnfn convert_long3_rtz(char3);
+long3 __ovld __cnfn convert_long3_sat_rtz(char3);
+long3 __ovld __cnfn convert_long3_rtp(char3);
+long3 __ovld __cnfn convert_long3_sat_rtp(char3);
+long3 __ovld __cnfn convert_long3_rtn(char3);
+long3 __ovld __cnfn convert_long3_sat_rtn(char3);
+long3 __ovld __cnfn convert_long3(char3);
+long3 __ovld __cnfn convert_long3_sat(char3);
+long3 __ovld __cnfn convert_long3_rte(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rte(uchar3);
+long3 __ovld __cnfn convert_long3_rtz(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rtz(uchar3);
+long3 __ovld __cnfn convert_long3_rtp(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rtp(uchar3);
+long3 __ovld __cnfn convert_long3_rtn(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rtn(uchar3);
+long3 __ovld __cnfn convert_long3(uchar3);
+long3 __ovld __cnfn convert_long3_sat(uchar3);
+long3 __ovld __cnfn convert_long3_rte(short3);
+long3 __ovld __cnfn convert_long3_sat_rte(short3);
+long3 __ovld __cnfn convert_long3_rtz(short3);
+long3 __ovld __cnfn convert_long3_sat_rtz(short3);
+long3 __ovld __cnfn convert_long3_rtp(short3);
+long3 __ovld __cnfn convert_long3_sat_rtp(short3);
+long3 __ovld __cnfn convert_long3_rtn(short3);
+long3 __ovld __cnfn convert_long3_sat_rtn(short3);
+long3 __ovld __cnfn convert_long3(short3);
+long3 __ovld __cnfn convert_long3_sat(short3);
+long3 __ovld __cnfn convert_long3_rte(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rte(ushort3);
+long3 __ovld __cnfn convert_long3_rtz(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rtz(ushort3);
+long3 __ovld __cnfn convert_long3_rtp(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rtp(ushort3);
+long3 __ovld __cnfn convert_long3_rtn(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rtn(ushort3);
+long3 __ovld __cnfn convert_long3(ushort3);
+long3 __ovld __cnfn convert_long3_sat(ushort3);
+long3 __ovld __cnfn convert_long3_rte(int3);
+long3 __ovld __cnfn convert_long3_sat_rte(int3);
+long3 __ovld __cnfn convert_long3_rtz(int3);
+long3 __ovld __cnfn convert_long3_sat_rtz(int3);
+long3 __ovld __cnfn convert_long3_rtp(int3);
+long3 __ovld __cnfn convert_long3_sat_rtp(int3);
+long3 __ovld __cnfn convert_long3_rtn(int3);
+long3 __ovld __cnfn convert_long3_sat_rtn(int3);
+long3 __ovld __cnfn convert_long3(int3);
+long3 __ovld __cnfn convert_long3_sat(int3);
+long3 __ovld __cnfn convert_long3_rte(uint3);
+long3 __ovld __cnfn convert_long3_sat_rte(uint3);
+long3 __ovld __cnfn convert_long3_rtz(uint3);
+long3 __ovld __cnfn convert_long3_sat_rtz(uint3);
+long3 __ovld __cnfn convert_long3_rtp(uint3);
+long3 __ovld __cnfn convert_long3_sat_rtp(uint3);
+long3 __ovld __cnfn convert_long3_rtn(uint3);
+long3 __ovld __cnfn convert_long3_sat_rtn(uint3);
+long3 __ovld __cnfn convert_long3(uint3);
+long3 __ovld __cnfn convert_long3_sat(uint3);
+long3 __ovld __cnfn convert_long3_rte(long3);
+long3 __ovld __cnfn convert_long3_sat_rte(long3);
+long3 __ovld __cnfn convert_long3_rtz(long3);
+long3 __ovld __cnfn convert_long3_sat_rtz(long3);
+long3 __ovld __cnfn convert_long3_rtp(long3);
+long3 __ovld __cnfn convert_long3_sat_rtp(long3);
+long3 __ovld __cnfn convert_long3_rtn(long3);
+long3 __ovld __cnfn convert_long3_sat_rtn(long3);
+long3 __ovld __cnfn convert_long3(long3);
+long3 __ovld __cnfn convert_long3_sat(long3);
+long3 __ovld __cnfn convert_long3_rte(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rte(ulong3);
+long3 __ovld __cnfn convert_long3_rtz(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rtz(ulong3);
+long3 __ovld __cnfn convert_long3_rtp(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rtp(ulong3);
+long3 __ovld __cnfn convert_long3_rtn(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rtn(ulong3);
+long3 __ovld __cnfn convert_long3(ulong3);
+long3 __ovld __cnfn convert_long3_sat(ulong3);
+long3 __ovld __cnfn convert_long3_rte(float3);
+long3 __ovld __cnfn convert_long3_sat_rte(float3);
+long3 __ovld __cnfn convert_long3_rtz(float3);
+long3 __ovld __cnfn convert_long3_sat_rtz(float3);
+long3 __ovld __cnfn convert_long3_rtp(float3);
+long3 __ovld __cnfn convert_long3_sat_rtp(float3);
+long3 __ovld __cnfn convert_long3_rtn(float3);
+long3 __ovld __cnfn convert_long3_sat_rtn(float3);
+long3 __ovld __cnfn convert_long3(float3);
+long3 __ovld __cnfn convert_long3_sat(float3);
+ulong3 __ovld __cnfn convert_ulong3_rte(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(char3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(char3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(char3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(char3);
+ulong3 __ovld __cnfn convert_ulong3(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat(char3);
+ulong3 __ovld __cnfn convert_ulong3_rte(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uchar3);
+ulong3 __ovld __cnfn convert_ulong3(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rte(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(short3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(short3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(short3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(short3);
+ulong3 __ovld __cnfn convert_ulong3(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat(short3);
+ulong3 __ovld __cnfn convert_ulong3_rte(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ushort3);
+ulong3 __ovld __cnfn convert_ulong3(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rte(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(int3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(int3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(int3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(int3);
+ulong3 __ovld __cnfn convert_ulong3(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat(int3);
+ulong3 __ovld __cnfn convert_ulong3_rte(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uint3);
+ulong3 __ovld __cnfn convert_ulong3(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rte(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(long3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(long3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(long3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(long3);
+ulong3 __ovld __cnfn convert_ulong3(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat(long3);
+ulong3 __ovld __cnfn convert_ulong3_rte(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ulong3);
+ulong3 __ovld __cnfn convert_ulong3(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rte(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(float3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(float3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(float3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(float3);
+ulong3 __ovld __cnfn convert_ulong3(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat(float3);
+float3 __ovld __cnfn convert_float3_rte(char3);
+float3 __ovld __cnfn convert_float3_rtz(char3);
+float3 __ovld __cnfn convert_float3_rtp(char3);
+float3 __ovld __cnfn convert_float3_rtn(char3);
+float3 __ovld __cnfn convert_float3(char3);
+float3 __ovld __cnfn convert_float3_rte(uchar3);
+float3 __ovld __cnfn convert_float3_rtz(uchar3);
+float3 __ovld __cnfn convert_float3_rtp(uchar3);
+float3 __ovld __cnfn convert_float3_rtn(uchar3);
+float3 __ovld __cnfn convert_float3(uchar3);
+float3 __ovld __cnfn convert_float3_rte(short3);
+float3 __ovld __cnfn convert_float3_rtz(short3);
+float3 __ovld __cnfn convert_float3_rtp(short3);
+float3 __ovld __cnfn convert_float3_rtn(short3);
+float3 __ovld __cnfn convert_float3(short3);
+float3 __ovld __cnfn convert_float3_rte(ushort3);
+float3 __ovld __cnfn convert_float3_rtz(ushort3);
+float3 __ovld __cnfn convert_float3_rtp(ushort3);
+float3 __ovld __cnfn convert_float3_rtn(ushort3);
+float3 __ovld __cnfn convert_float3(ushort3);
+float3 __ovld __cnfn convert_float3_rte(int3);
+float3 __ovld __cnfn convert_float3_rtz(int3);
+float3 __ovld __cnfn convert_float3_rtp(int3);
+float3 __ovld __cnfn convert_float3_rtn(int3);
+float3 __ovld __cnfn convert_float3(int3);
+float3 __ovld __cnfn convert_float3_rte(uint3);
+float3 __ovld __cnfn convert_float3_rtz(uint3);
+float3 __ovld __cnfn convert_float3_rtp(uint3);
+float3 __ovld __cnfn convert_float3_rtn(uint3);
+float3 __ovld __cnfn convert_float3(uint3);
+float3 __ovld __cnfn convert_float3_rte(long3);
+float3 __ovld __cnfn convert_float3_rtz(long3);
+float3 __ovld __cnfn convert_float3_rtp(long3);
+float3 __ovld __cnfn convert_float3_rtn(long3);
+float3 __ovld __cnfn convert_float3(long3);
+float3 __ovld __cnfn convert_float3_rte(ulong3);
+float3 __ovld __cnfn convert_float3_rtz(ulong3);
+float3 __ovld __cnfn convert_float3_rtp(ulong3);
+float3 __ovld __cnfn convert_float3_rtn(ulong3);
+float3 __ovld __cnfn convert_float3(ulong3);
+float3 __ovld __cnfn convert_float3_rte(float3);
+float3 __ovld __cnfn convert_float3_rtz(float3);
+float3 __ovld __cnfn convert_float3_rtp(float3);
+float3 __ovld __cnfn convert_float3_rtn(float3);
+float3 __ovld __cnfn convert_float3(float3);
+char4 __ovld __cnfn convert_char4_rte(char4);
+char4 __ovld __cnfn convert_char4_sat_rte(char4);
+char4 __ovld __cnfn convert_char4_rtz(char4);
+char4 __ovld __cnfn convert_char4_sat_rtz(char4);
+char4 __ovld __cnfn convert_char4_rtp(char4);
+char4 __ovld __cnfn convert_char4_sat_rtp(char4);
+char4 __ovld __cnfn convert_char4_rtn(char4);
+char4 __ovld __cnfn convert_char4_sat_rtn(char4);
+char4 __ovld __cnfn convert_char4(char4);
+char4 __ovld __cnfn convert_char4_sat(char4);
+char4 __ovld __cnfn convert_char4_rte(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rte(uchar4);
+char4 __ovld __cnfn convert_char4_rtz(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rtz(uchar4);
+char4 __ovld __cnfn convert_char4_rtp(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rtp(uchar4);
+char4 __ovld __cnfn convert_char4_rtn(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rtn(uchar4);
+char4 __ovld __cnfn convert_char4(uchar4);
+char4 __ovld __cnfn convert_char4_sat(uchar4);
+char4 __ovld __cnfn convert_char4_rte(short4);
+char4 __ovld __cnfn convert_char4_sat_rte(short4);
+char4 __ovld __cnfn convert_char4_rtz(short4);
+char4 __ovld __cnfn convert_char4_sat_rtz(short4);
+char4 __ovld __cnfn convert_char4_rtp(short4);
+char4 __ovld __cnfn convert_char4_sat_rtp(short4);
+char4 __ovld __cnfn convert_char4_rtn(short4);
+char4 __ovld __cnfn convert_char4_sat_rtn(short4);
+char4 __ovld __cnfn convert_char4(short4);
+char4 __ovld __cnfn convert_char4_sat(short4);
+char4 __ovld __cnfn convert_char4_rte(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rte(ushort4);
+char4 __ovld __cnfn convert_char4_rtz(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rtz(ushort4);
+char4 __ovld __cnfn convert_char4_rtp(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rtp(ushort4);
+char4 __ovld __cnfn convert_char4_rtn(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rtn(ushort4);
+char4 __ovld __cnfn convert_char4(ushort4);
+char4 __ovld __cnfn convert_char4_sat(ushort4);
+char4 __ovld __cnfn convert_char4_rte(int4);
+char4 __ovld __cnfn convert_char4_sat_rte(int4);
+char4 __ovld __cnfn convert_char4_rtz(int4);
+char4 __ovld __cnfn convert_char4_sat_rtz(int4);
+char4 __ovld __cnfn convert_char4_rtp(int4);
+char4 __ovld __cnfn convert_char4_sat_rtp(int4);
+char4 __ovld __cnfn convert_char4_rtn(int4);
+char4 __ovld __cnfn convert_char4_sat_rtn(int4);
+char4 __ovld __cnfn convert_char4(int4);
+char4 __ovld __cnfn convert_char4_sat(int4);
+char4 __ovld __cnfn convert_char4_rte(uint4);
+char4 __ovld __cnfn convert_char4_sat_rte(uint4);
+char4 __ovld __cnfn convert_char4_rtz(uint4);
+char4 __ovld __cnfn convert_char4_sat_rtz(uint4);
+char4 __ovld __cnfn convert_char4_rtp(uint4);
+char4 __ovld __cnfn convert_char4_sat_rtp(uint4);
+char4 __ovld __cnfn convert_char4_rtn(uint4);
+char4 __ovld __cnfn convert_char4_sat_rtn(uint4);
+char4 __ovld __cnfn convert_char4(uint4);
+char4 __ovld __cnfn convert_char4_sat(uint4);
+char4 __ovld __cnfn convert_char4_rte(long4);
+char4 __ovld __cnfn convert_char4_sat_rte(long4);
+char4 __ovld __cnfn convert_char4_rtz(long4);
+char4 __ovld __cnfn convert_char4_sat_rtz(long4);
+char4 __ovld __cnfn convert_char4_rtp(long4);
+char4 __ovld __cnfn convert_char4_sat_rtp(long4);
+char4 __ovld __cnfn convert_char4_rtn(long4);
+char4 __ovld __cnfn convert_char4_sat_rtn(long4);
+char4 __ovld __cnfn convert_char4(long4);
+char4 __ovld __cnfn convert_char4_sat(long4);
+char4 __ovld __cnfn convert_char4_rte(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rte(ulong4);
+char4 __ovld __cnfn convert_char4_rtz(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rtz(ulong4);
+char4 __ovld __cnfn convert_char4_rtp(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rtp(ulong4);
+char4 __ovld __cnfn convert_char4_rtn(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rtn(ulong4);
+char4 __ovld __cnfn convert_char4(ulong4);
+char4 __ovld __cnfn convert_char4_sat(ulong4);
+char4 __ovld __cnfn convert_char4_rte(float4);
+char4 __ovld __cnfn convert_char4_sat_rte(float4);
+char4 __ovld __cnfn convert_char4_rtz(float4);
+char4 __ovld __cnfn convert_char4_sat_rtz(float4);
+char4 __ovld __cnfn convert_char4_rtp(float4);
+char4 __ovld __cnfn convert_char4_sat_rtp(float4);
+char4 __ovld __cnfn convert_char4_rtn(float4);
+char4 __ovld __cnfn convert_char4_sat_rtn(float4);
+char4 __ovld __cnfn convert_char4(float4);
+char4 __ovld __cnfn convert_char4_sat(float4);
+uchar4 __ovld __cnfn convert_uchar4_rte(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(char4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(char4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(char4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(char4);
+uchar4 __ovld __cnfn convert_uchar4(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat(char4);
+uchar4 __ovld __cnfn convert_uchar4_rte(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uchar4);
+uchar4 __ovld __cnfn convert_uchar4(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rte(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(short4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(short4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(short4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(short4);
+uchar4 __ovld __cnfn convert_uchar4(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat(short4);
+uchar4 __ovld __cnfn convert_uchar4_rte(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ushort4);
+uchar4 __ovld __cnfn convert_uchar4(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rte(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(int4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(int4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(int4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(int4);
+uchar4 __ovld __cnfn convert_uchar4(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat(int4);
+uchar4 __ovld __cnfn convert_uchar4_rte(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uint4);
+uchar4 __ovld __cnfn convert_uchar4(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rte(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(long4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(long4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(long4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(long4);
+uchar4 __ovld __cnfn convert_uchar4(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat(long4);
+uchar4 __ovld __cnfn convert_uchar4_rte(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ulong4);
+uchar4 __ovld __cnfn convert_uchar4(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rte(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(float4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(float4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(float4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(float4);
+uchar4 __ovld __cnfn convert_uchar4(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat(float4);
+short4 __ovld __cnfn convert_short4_rte(char4);
+short4 __ovld __cnfn convert_short4_sat_rte(char4);
+short4 __ovld __cnfn convert_short4_rtz(char4);
+short4 __ovld __cnfn convert_short4_sat_rtz(char4);
+short4 __ovld __cnfn convert_short4_rtp(char4);
+short4 __ovld __cnfn convert_short4_sat_rtp(char4);
+short4 __ovld __cnfn convert_short4_rtn(char4);
+short4 __ovld __cnfn convert_short4_sat_rtn(char4);
+short4 __ovld __cnfn convert_short4(char4);
+short4 __ovld __cnfn convert_short4_sat(char4);
+short4 __ovld __cnfn convert_short4_rte(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rte(uchar4);
+short4 __ovld __cnfn convert_short4_rtz(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rtz(uchar4);
+short4 __ovld __cnfn convert_short4_rtp(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rtp(uchar4);
+short4 __ovld __cnfn convert_short4_rtn(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rtn(uchar4);
+short4 __ovld __cnfn convert_short4(uchar4);
+short4 __ovld __cnfn convert_short4_sat(uchar4);
+short4 __ovld __cnfn convert_short4_rte(short4);
+short4 __ovld __cnfn convert_short4_sat_rte(short4);
+short4 __ovld __cnfn convert_short4_rtz(short4);
+short4 __ovld __cnfn convert_short4_sat_rtz(short4);
+short4 __ovld __cnfn convert_short4_rtp(short4);
+short4 __ovld __cnfn convert_short4_sat_rtp(short4);
+short4 __ovld __cnfn convert_short4_rtn(short4);
+short4 __ovld __cnfn convert_short4_sat_rtn(short4);
+short4 __ovld __cnfn convert_short4(short4);
+short4 __ovld __cnfn convert_short4_sat(short4);
+short4 __ovld __cnfn convert_short4_rte(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rte(ushort4);
+short4 __ovld __cnfn convert_short4_rtz(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rtz(ushort4);
+short4 __ovld __cnfn convert_short4_rtp(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rtp(ushort4);
+short4 __ovld __cnfn convert_short4_rtn(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rtn(ushort4);
+short4 __ovld __cnfn convert_short4(ushort4);
+short4 __ovld __cnfn convert_short4_sat(ushort4);
+short4 __ovld __cnfn convert_short4_rte(int4);
+short4 __ovld __cnfn convert_short4_sat_rte(int4);
+short4 __ovld __cnfn convert_short4_rtz(int4);
+short4 __ovld __cnfn convert_short4_sat_rtz(int4);
+short4 __ovld __cnfn convert_short4_rtp(int4);
+short4 __ovld __cnfn convert_short4_sat_rtp(int4);
+short4 __ovld __cnfn convert_short4_rtn(int4);
+short4 __ovld __cnfn convert_short4_sat_rtn(int4);
+short4 __ovld __cnfn convert_short4(int4);
+short4 __ovld __cnfn convert_short4_sat(int4);
+short4 __ovld __cnfn convert_short4_rte(uint4);
+short4 __ovld __cnfn convert_short4_sat_rte(uint4);
+short4 __ovld __cnfn convert_short4_rtz(uint4);
+short4 __ovld __cnfn convert_short4_sat_rtz(uint4);
+short4 __ovld __cnfn convert_short4_rtp(uint4);
+short4 __ovld __cnfn convert_short4_sat_rtp(uint4);
+short4 __ovld __cnfn convert_short4_rtn(uint4);
+short4 __ovld __cnfn convert_short4_sat_rtn(uint4);
+short4 __ovld __cnfn convert_short4(uint4);
+short4 __ovld __cnfn convert_short4_sat(uint4);
+short4 __ovld __cnfn convert_short4_rte(long4);
+short4 __ovld __cnfn convert_short4_sat_rte(long4);
+short4 __ovld __cnfn convert_short4_rtz(long4);
+short4 __ovld __cnfn convert_short4_sat_rtz(long4);
+short4 __ovld __cnfn convert_short4_rtp(long4);
+short4 __ovld __cnfn convert_short4_sat_rtp(long4);
+short4 __ovld __cnfn convert_short4_rtn(long4);
+short4 __ovld __cnfn convert_short4_sat_rtn(long4);
+short4 __ovld __cnfn convert_short4(long4);
+short4 __ovld __cnfn convert_short4_sat(long4);
+short4 __ovld __cnfn convert_short4_rte(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rte(ulong4);
+short4 __ovld __cnfn convert_short4_rtz(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rtz(ulong4);
+short4 __ovld __cnfn convert_short4_rtp(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rtp(ulong4);
+short4 __ovld __cnfn convert_short4_rtn(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rtn(ulong4);
+short4 __ovld __cnfn convert_short4(ulong4);
+short4 __ovld __cnfn convert_short4_sat(ulong4);
+short4 __ovld __cnfn convert_short4_rte(float4);
+short4 __ovld __cnfn convert_short4_sat_rte(float4);
+short4 __ovld __cnfn convert_short4_rtz(float4);
+short4 __ovld __cnfn convert_short4_sat_rtz(float4);
+short4 __ovld __cnfn convert_short4_rtp(float4);
+short4 __ovld __cnfn convert_short4_sat_rtp(float4);
+short4 __ovld __cnfn convert_short4_rtn(float4);
+short4 __ovld __cnfn convert_short4_sat_rtn(float4);
+short4 __ovld __cnfn convert_short4(float4);
+short4 __ovld __cnfn convert_short4_sat(float4);
+ushort4 __ovld __cnfn convert_ushort4_rte(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(char4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(char4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(char4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(char4);
+ushort4 __ovld __cnfn convert_ushort4(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat(char4);
+ushort4 __ovld __cnfn convert_ushort4_rte(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uchar4);
+ushort4 __ovld __cnfn convert_ushort4(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rte(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(short4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(short4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(short4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(short4);
+ushort4 __ovld __cnfn convert_ushort4(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat(short4);
+ushort4 __ovld __cnfn convert_ushort4_rte(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ushort4);
+ushort4 __ovld __cnfn convert_ushort4(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rte(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(int4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(int4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(int4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(int4);
+ushort4 __ovld __cnfn convert_ushort4(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat(int4);
+ushort4 __ovld __cnfn convert_ushort4_rte(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uint4);
+ushort4 __ovld __cnfn convert_ushort4(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rte(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(long4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(long4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(long4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(long4);
+ushort4 __ovld __cnfn convert_ushort4(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat(long4);
+ushort4 __ovld __cnfn convert_ushort4_rte(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ulong4);
+ushort4 __ovld __cnfn convert_ushort4(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rte(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(float4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(float4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(float4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(float4);
+ushort4 __ovld __cnfn convert_ushort4(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat(float4);
+int4 __ovld __cnfn convert_int4_rte(char4);
+int4 __ovld __cnfn convert_int4_sat_rte(char4);
+int4 __ovld __cnfn convert_int4_rtz(char4);
+int4 __ovld __cnfn convert_int4_sat_rtz(char4);
+int4 __ovld __cnfn convert_int4_rtp(char4);
+int4 __ovld __cnfn convert_int4_sat_rtp(char4);
+int4 __ovld __cnfn convert_int4_rtn(char4);
+int4 __ovld __cnfn convert_int4_sat_rtn(char4);
+int4 __ovld __cnfn convert_int4(char4);
+int4 __ovld __cnfn convert_int4_sat(char4);
+int4 __ovld __cnfn convert_int4_rte(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rte(uchar4);
+int4 __ovld __cnfn convert_int4_rtz(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rtz(uchar4);
+int4 __ovld __cnfn convert_int4_rtp(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rtp(uchar4);
+int4 __ovld __cnfn convert_int4_rtn(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rtn(uchar4);
+int4 __ovld __cnfn convert_int4(uchar4);
+int4 __ovld __cnfn convert_int4_sat(uchar4);
+int4 __ovld __cnfn convert_int4_rte(short4);
+int4 __ovld __cnfn convert_int4_sat_rte(short4);
+int4 __ovld __cnfn convert_int4_rtz(short4);
+int4 __ovld __cnfn convert_int4_sat_rtz(short4);
+int4 __ovld __cnfn convert_int4_rtp(short4);
+int4 __ovld __cnfn convert_int4_sat_rtp(short4);
+int4 __ovld __cnfn convert_int4_rtn(short4);
+int4 __ovld __cnfn convert_int4_sat_rtn(short4);
+int4 __ovld __cnfn convert_int4(short4);
+int4 __ovld __cnfn convert_int4_sat(short4);
+int4 __ovld __cnfn convert_int4_rte(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rte(ushort4);
+int4 __ovld __cnfn convert_int4_rtz(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rtz(ushort4);
+int4 __ovld __cnfn convert_int4_rtp(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rtp(ushort4);
+int4 __ovld __cnfn convert_int4_rtn(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rtn(ushort4);
+int4 __ovld __cnfn convert_int4(ushort4);
+int4 __ovld __cnfn convert_int4_sat(ushort4);
+int4 __ovld __cnfn convert_int4_rte(int4);
+int4 __ovld __cnfn convert_int4_sat_rte(int4);
+int4 __ovld __cnfn convert_int4_rtz(int4);
+int4 __ovld __cnfn convert_int4_sat_rtz(int4);
+int4 __ovld __cnfn convert_int4_rtp(int4);
+int4 __ovld __cnfn convert_int4_sat_rtp(int4);
+int4 __ovld __cnfn convert_int4_rtn(int4);
+int4 __ovld __cnfn convert_int4_sat_rtn(int4);
+int4 __ovld __cnfn convert_int4(int4);
+int4 __ovld __cnfn convert_int4_sat(int4);
+int4 __ovld __cnfn convert_int4_rte(uint4);
+int4 __ovld __cnfn convert_int4_sat_rte(uint4);
+int4 __ovld __cnfn convert_int4_rtz(uint4);
+int4 __ovld __cnfn convert_int4_sat_rtz(uint4);
+int4 __ovld __cnfn convert_int4_rtp(uint4);
+int4 __ovld __cnfn convert_int4_sat_rtp(uint4);
+int4 __ovld __cnfn convert_int4_rtn(uint4);
+int4 __ovld __cnfn convert_int4_sat_rtn(uint4);
+int4 __ovld __cnfn convert_int4(uint4);
+int4 __ovld __cnfn convert_int4_sat(uint4);
+int4 __ovld __cnfn convert_int4_rte(long4);
+int4 __ovld __cnfn convert_int4_sat_rte(long4);
+int4 __ovld __cnfn convert_int4_rtz(long4);
+int4 __ovld __cnfn convert_int4_sat_rtz(long4);
+int4 __ovld __cnfn convert_int4_rtp(long4);
+int4 __ovld __cnfn convert_int4_sat_rtp(long4);
+int4 __ovld __cnfn convert_int4_rtn(long4);
+int4 __ovld __cnfn convert_int4_sat_rtn(long4);
+int4 __ovld __cnfn convert_int4(long4);
+int4 __ovld __cnfn convert_int4_sat(long4);
+int4 __ovld __cnfn convert_int4_rte(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rte(ulong4);
+int4 __ovld __cnfn convert_int4_rtz(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rtz(ulong4);
+int4 __ovld __cnfn convert_int4_rtp(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rtp(ulong4);
+int4 __ovld __cnfn convert_int4_rtn(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rtn(ulong4);
+int4 __ovld __cnfn convert_int4(ulong4);
+int4 __ovld __cnfn convert_int4_sat(ulong4);
+int4 __ovld __cnfn convert_int4_rte(float4);
+int4 __ovld __cnfn convert_int4_sat_rte(float4);
+int4 __ovld __cnfn convert_int4_rtz(float4);
+int4 __ovld __cnfn convert_int4_sat_rtz(float4);
+int4 __ovld __cnfn convert_int4_rtp(float4);
+int4 __ovld __cnfn convert_int4_sat_rtp(float4);
+int4 __ovld __cnfn convert_int4_rtn(float4);
+int4 __ovld __cnfn convert_int4_sat_rtn(float4);
+int4 __ovld __cnfn convert_int4(float4);
+int4 __ovld __cnfn convert_int4_sat(float4);
+uint4 __ovld __cnfn convert_uint4_rte(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(char4);
+uint4 __ovld __cnfn convert_uint4_rtz(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(char4);
+uint4 __ovld __cnfn convert_uint4_rtp(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(char4);
+uint4 __ovld __cnfn convert_uint4_rtn(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(char4);
+uint4 __ovld __cnfn convert_uint4(char4);
+uint4 __ovld __cnfn convert_uint4_sat(char4);
+uint4 __ovld __cnfn convert_uint4_rte(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(uchar4);
+uint4 __ovld __cnfn convert_uint4_rtz(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(uchar4);
+uint4 __ovld __cnfn convert_uint4_rtp(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(uchar4);
+uint4 __ovld __cnfn convert_uint4_rtn(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(uchar4);
+uint4 __ovld __cnfn convert_uint4(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat(uchar4);
+uint4 __ovld __cnfn convert_uint4_rte(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(short4);
+uint4 __ovld __cnfn convert_uint4_rtz(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(short4);
+uint4 __ovld __cnfn convert_uint4_rtp(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(short4);
+uint4 __ovld __cnfn convert_uint4_rtn(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(short4);
+uint4 __ovld __cnfn convert_uint4(short4);
+uint4 __ovld __cnfn convert_uint4_sat(short4);
+uint4 __ovld __cnfn convert_uint4_rte(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(ushort4);
+uint4 __ovld __cnfn convert_uint4_rtz(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(ushort4);
+uint4 __ovld __cnfn convert_uint4_rtp(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(ushort4);
+uint4 __ovld __cnfn convert_uint4_rtn(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(ushort4);
+uint4 __ovld __cnfn convert_uint4(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat(ushort4);
+uint4 __ovld __cnfn convert_uint4_rte(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(int4);
+uint4 __ovld __cnfn convert_uint4_rtz(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(int4);
+uint4 __ovld __cnfn convert_uint4_rtp(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(int4);
+uint4 __ovld __cnfn convert_uint4_rtn(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(int4);
+uint4 __ovld __cnfn convert_uint4(int4);
+uint4 __ovld __cnfn convert_uint4_sat(int4);
+uint4 __ovld __cnfn convert_uint4_rte(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(uint4);
+uint4 __ovld __cnfn convert_uint4_rtz(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(uint4);
+uint4 __ovld __cnfn convert_uint4_rtp(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(uint4);
+uint4 __ovld __cnfn convert_uint4_rtn(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(uint4);
+uint4 __ovld __cnfn convert_uint4(uint4);
+uint4 __ovld __cnfn convert_uint4_sat(uint4);
+uint4 __ovld __cnfn convert_uint4_rte(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(long4);
+uint4 __ovld __cnfn convert_uint4_rtz(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(long4);
+uint4 __ovld __cnfn convert_uint4_rtp(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(long4);
+uint4 __ovld __cnfn convert_uint4_rtn(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(long4);
+uint4 __ovld __cnfn convert_uint4(long4);
+uint4 __ovld __cnfn convert_uint4_sat(long4);
+uint4 __ovld __cnfn convert_uint4_rte(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(ulong4);
+uint4 __ovld __cnfn convert_uint4_rtz(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(ulong4);
+uint4 __ovld __cnfn convert_uint4_rtp(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(ulong4);
+uint4 __ovld __cnfn convert_uint4_rtn(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(ulong4);
+uint4 __ovld __cnfn convert_uint4(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat(ulong4);
+uint4 __ovld __cnfn convert_uint4_rte(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(float4);
+uint4 __ovld __cnfn convert_uint4_rtz(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(float4);
+uint4 __ovld __cnfn convert_uint4_rtp(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(float4);
+uint4 __ovld __cnfn convert_uint4_rtn(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(float4);
+uint4 __ovld __cnfn convert_uint4(float4);
+uint4 __ovld __cnfn convert_uint4_sat(float4);
+long4 __ovld __cnfn convert_long4_rte(char4);
+long4 __ovld __cnfn convert_long4_sat_rte(char4);
+long4 __ovld __cnfn convert_long4_rtz(char4);
+long4 __ovld __cnfn convert_long4_sat_rtz(char4);
+long4 __ovld __cnfn convert_long4_rtp(char4);
+long4 __ovld __cnfn convert_long4_sat_rtp(char4);
+long4 __ovld __cnfn convert_long4_rtn(char4);
+long4 __ovld __cnfn convert_long4_sat_rtn(char4);
+long4 __ovld __cnfn convert_long4(char4);
+long4 __ovld __cnfn convert_long4_sat(char4);
+long4 __ovld __cnfn convert_long4_rte(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rte(uchar4);
+long4 __ovld __cnfn convert_long4_rtz(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rtz(uchar4);
+long4 __ovld __cnfn convert_long4_rtp(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rtp(uchar4);
+long4 __ovld __cnfn convert_long4_rtn(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rtn(uchar4);
+long4 __ovld __cnfn convert_long4(uchar4);
+long4 __ovld __cnfn convert_long4_sat(uchar4);
+long4 __ovld __cnfn convert_long4_rte(short4);
+long4 __ovld __cnfn convert_long4_sat_rte(short4);
+long4 __ovld __cnfn convert_long4_rtz(short4);
+long4 __ovld __cnfn convert_long4_sat_rtz(short4);
+long4 __ovld __cnfn convert_long4_rtp(short4);
+long4 __ovld __cnfn convert_long4_sat_rtp(short4);
+long4 __ovld __cnfn convert_long4_rtn(short4);
+long4 __ovld __cnfn convert_long4_sat_rtn(short4);
+long4 __ovld __cnfn convert_long4(short4);
+long4 __ovld __cnfn convert_long4_sat(short4);
+long4 __ovld __cnfn convert_long4_rte(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rte(ushort4);
+long4 __ovld __cnfn convert_long4_rtz(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rtz(ushort4);
+long4 __ovld __cnfn convert_long4_rtp(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rtp(ushort4);
+long4 __ovld __cnfn convert_long4_rtn(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rtn(ushort4);
+long4 __ovld __cnfn convert_long4(ushort4);
+long4 __ovld __cnfn convert_long4_sat(ushort4);
+long4 __ovld __cnfn convert_long4_rte(int4);
+long4 __ovld __cnfn convert_long4_sat_rte(int4);
+long4 __ovld __cnfn convert_long4_rtz(int4);
+long4 __ovld __cnfn convert_long4_sat_rtz(int4);
+long4 __ovld __cnfn convert_long4_rtp(int4);
+long4 __ovld __cnfn convert_long4_sat_rtp(int4);
+long4 __ovld __cnfn convert_long4_rtn(int4);
+long4 __ovld __cnfn convert_long4_sat_rtn(int4);
+long4 __ovld __cnfn convert_long4(int4);
+long4 __ovld __cnfn convert_long4_sat(int4);
+long4 __ovld __cnfn convert_long4_rte(uint4);
+long4 __ovld __cnfn convert_long4_sat_rte(uint4);
+long4 __ovld __cnfn convert_long4_rtz(uint4);
+long4 __ovld __cnfn convert_long4_sat_rtz(uint4);
+long4 __ovld __cnfn convert_long4_rtp(uint4);
+long4 __ovld __cnfn convert_long4_sat_rtp(uint4);
+long4 __ovld __cnfn convert_long4_rtn(uint4);
+long4 __ovld __cnfn convert_long4_sat_rtn(uint4);
+long4 __ovld __cnfn convert_long4(uint4);
+long4 __ovld __cnfn convert_long4_sat(uint4);
+long4 __ovld __cnfn convert_long4_rte(long4);
+long4 __ovld __cnfn convert_long4_sat_rte(long4);
+long4 __ovld __cnfn convert_long4_rtz(long4);
+long4 __ovld __cnfn convert_long4_sat_rtz(long4);
+long4 __ovld __cnfn convert_long4_rtp(long4);
+long4 __ovld __cnfn convert_long4_sat_rtp(long4);
+long4 __ovld __cnfn convert_long4_rtn(long4);
+long4 __ovld __cnfn convert_long4_sat_rtn(long4);
+long4 __ovld __cnfn convert_long4(long4);
+long4 __ovld __cnfn convert_long4_sat(long4);
+long4 __ovld __cnfn convert_long4_rte(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rte(ulong4);
+long4 __ovld __cnfn convert_long4_rtz(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rtz(ulong4);
+long4 __ovld __cnfn convert_long4_rtp(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rtp(ulong4);
+long4 __ovld __cnfn convert_long4_rtn(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rtn(ulong4);
+long4 __ovld __cnfn convert_long4(ulong4);
+long4 __ovld __cnfn convert_long4_sat(ulong4);
+long4 __ovld __cnfn convert_long4_rte(float4);
+long4 __ovld __cnfn convert_long4_sat_rte(float4);
+long4 __ovld __cnfn convert_long4_rtz(float4);
+long4 __ovld __cnfn convert_long4_sat_rtz(float4);
+long4 __ovld __cnfn convert_long4_rtp(float4);
+long4 __ovld __cnfn convert_long4_sat_rtp(float4);
+long4 __ovld __cnfn convert_long4_rtn(float4);
+long4 __ovld __cnfn convert_long4_sat_rtn(float4);
+long4 __ovld __cnfn convert_long4(float4);
+long4 __ovld __cnfn convert_long4_sat(float4);
+ulong4 __ovld __cnfn convert_ulong4_rte(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(char4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(char4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(char4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(char4);
+ulong4 __ovld __cnfn convert_ulong4(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat(char4);
+ulong4 __ovld __cnfn convert_ulong4_rte(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uchar4);
+ulong4 __ovld __cnfn convert_ulong4(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rte(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(short4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(short4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(short4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(short4);
+ulong4 __ovld __cnfn convert_ulong4(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat(short4);
+ulong4 __ovld __cnfn convert_ulong4_rte(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ushort4);
+ulong4 __ovld __cnfn convert_ulong4(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rte(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(int4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(int4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(int4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(int4);
+ulong4 __ovld __cnfn convert_ulong4(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat(int4);
+ulong4 __ovld __cnfn convert_ulong4_rte(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uint4);
+ulong4 __ovld __cnfn convert_ulong4(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rte(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(long4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(long4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(long4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(long4);
+ulong4 __ovld __cnfn convert_ulong4(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat(long4);
+ulong4 __ovld __cnfn convert_ulong4_rte(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ulong4);
+ulong4 __ovld __cnfn convert_ulong4(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rte(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(float4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(float4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(float4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(float4);
+ulong4 __ovld __cnfn convert_ulong4(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat(float4);
+float4 __ovld __cnfn convert_float4_rte(char4);
+float4 __ovld __cnfn convert_float4_rtz(char4);
+float4 __ovld __cnfn convert_float4_rtp(char4);
+float4 __ovld __cnfn convert_float4_rtn(char4);
+float4 __ovld __cnfn convert_float4(char4);
+float4 __ovld __cnfn convert_float4_rte(uchar4);
+float4 __ovld __cnfn convert_float4_rtz(uchar4);
+float4 __ovld __cnfn convert_float4_rtp(uchar4);
+float4 __ovld __cnfn convert_float4_rtn(uchar4);
+float4 __ovld __cnfn convert_float4(uchar4);
+float4 __ovld __cnfn convert_float4_rte(short4);
+float4 __ovld __cnfn convert_float4_rtz(short4);
+float4 __ovld __cnfn convert_float4_rtp(short4);
+float4 __ovld __cnfn convert_float4_rtn(short4);
+float4 __ovld __cnfn convert_float4(short4);
+float4 __ovld __cnfn convert_float4_rte(ushort4);
+float4 __ovld __cnfn convert_float4_rtz(ushort4);
+float4 __ovld __cnfn convert_float4_rtp(ushort4);
+float4 __ovld __cnfn convert_float4_rtn(ushort4);
+float4 __ovld __cnfn convert_float4(ushort4);
+float4 __ovld __cnfn convert_float4_rte(int4);
+float4 __ovld __cnfn convert_float4_rtz(int4);
+float4 __ovld __cnfn convert_float4_rtp(int4);
+float4 __ovld __cnfn convert_float4_rtn(int4);
+float4 __ovld __cnfn convert_float4(int4);
+float4 __ovld __cnfn convert_float4_rte(uint4);
+float4 __ovld __cnfn convert_float4_rtz(uint4);
+float4 __ovld __cnfn convert_float4_rtp(uint4);
+float4 __ovld __cnfn convert_float4_rtn(uint4);
+float4 __ovld __cnfn convert_float4(uint4);
+float4 __ovld __cnfn convert_float4_rte(long4);
+float4 __ovld __cnfn convert_float4_rtz(long4);
+float4 __ovld __cnfn convert_float4_rtp(long4);
+float4 __ovld __cnfn convert_float4_rtn(long4);
+float4 __ovld __cnfn convert_float4(long4);
+float4 __ovld __cnfn convert_float4_rte(ulong4);
+float4 __ovld __cnfn convert_float4_rtz(ulong4);
+float4 __ovld __cnfn convert_float4_rtp(ulong4);
+float4 __ovld __cnfn convert_float4_rtn(ulong4);
+float4 __ovld __cnfn convert_float4(ulong4);
+float4 __ovld __cnfn convert_float4_rte(float4);
+float4 __ovld __cnfn convert_float4_rtz(float4);
+float4 __ovld __cnfn convert_float4_rtp(float4);
+float4 __ovld __cnfn convert_float4_rtn(float4);
+float4 __ovld __cnfn convert_float4(float4);
+char8 __ovld __cnfn convert_char8_rte(char8);
+char8 __ovld __cnfn convert_char8_sat_rte(char8);
+char8 __ovld __cnfn convert_char8_rtz(char8);
+char8 __ovld __cnfn convert_char8_sat_rtz(char8);
+char8 __ovld __cnfn convert_char8_rtp(char8);
+char8 __ovld __cnfn convert_char8_sat_rtp(char8);
+char8 __ovld __cnfn convert_char8_rtn(char8);
+char8 __ovld __cnfn convert_char8_sat_rtn(char8);
+char8 __ovld __cnfn convert_char8(char8);
+char8 __ovld __cnfn convert_char8_sat(char8);
+char8 __ovld __cnfn convert_char8_rte(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rte(uchar8);
+char8 __ovld __cnfn convert_char8_rtz(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rtz(uchar8);
+char8 __ovld __cnfn convert_char8_rtp(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rtp(uchar8);
+char8 __ovld __cnfn convert_char8_rtn(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rtn(uchar8);
+char8 __ovld __cnfn convert_char8(uchar8);
+char8 __ovld __cnfn convert_char8_sat(uchar8);
+char8 __ovld __cnfn convert_char8_rte(short8);
+char8 __ovld __cnfn convert_char8_sat_rte(short8);
+char8 __ovld __cnfn convert_char8_rtz(short8);
+char8 __ovld __cnfn convert_char8_sat_rtz(short8);
+char8 __ovld __cnfn convert_char8_rtp(short8);
+char8 __ovld __cnfn convert_char8_sat_rtp(short8);
+char8 __ovld __cnfn convert_char8_rtn(short8);
+char8 __ovld __cnfn convert_char8_sat_rtn(short8);
+char8 __ovld __cnfn convert_char8(short8);
+char8 __ovld __cnfn convert_char8_sat(short8);
+char8 __ovld __cnfn convert_char8_rte(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rte(ushort8);
+char8 __ovld __cnfn convert_char8_rtz(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rtz(ushort8);
+char8 __ovld __cnfn convert_char8_rtp(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rtp(ushort8);
+char8 __ovld __cnfn convert_char8_rtn(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rtn(ushort8);
+char8 __ovld __cnfn convert_char8(ushort8);
+char8 __ovld __cnfn convert_char8_sat(ushort8);
+char8 __ovld __cnfn convert_char8_rte(int8);
+char8 __ovld __cnfn convert_char8_sat_rte(int8);
+char8 __ovld __cnfn convert_char8_rtz(int8);
+char8 __ovld __cnfn convert_char8_sat_rtz(int8);
+char8 __ovld __cnfn convert_char8_rtp(int8);
+char8 __ovld __cnfn convert_char8_sat_rtp(int8);
+char8 __ovld __cnfn convert_char8_rtn(int8);
+char8 __ovld __cnfn convert_char8_sat_rtn(int8);
+char8 __ovld __cnfn convert_char8(int8);
+char8 __ovld __cnfn convert_char8_sat(int8);
+char8 __ovld __cnfn convert_char8_rte(uint8);
+char8 __ovld __cnfn convert_char8_sat_rte(uint8);
+char8 __ovld __cnfn convert_char8_rtz(uint8);
+char8 __ovld __cnfn convert_char8_sat_rtz(uint8);
+char8 __ovld __cnfn convert_char8_rtp(uint8);
+char8 __ovld __cnfn convert_char8_sat_rtp(uint8);
+char8 __ovld __cnfn convert_char8_rtn(uint8);
+char8 __ovld __cnfn convert_char8_sat_rtn(uint8);
+char8 __ovld __cnfn convert_char8(uint8);
+char8 __ovld __cnfn convert_char8_sat(uint8);
+char8 __ovld __cnfn convert_char8_rte(long8);
+char8 __ovld __cnfn convert_char8_sat_rte(long8);
+char8 __ovld __cnfn convert_char8_rtz(long8);
+char8 __ovld __cnfn convert_char8_sat_rtz(long8);
+char8 __ovld __cnfn convert_char8_rtp(long8);
+char8 __ovld __cnfn convert_char8_sat_rtp(long8);
+char8 __ovld __cnfn convert_char8_rtn(long8);
+char8 __ovld __cnfn convert_char8_sat_rtn(long8);
+char8 __ovld __cnfn convert_char8(long8);
+char8 __ovld __cnfn convert_char8_sat(long8);
+char8 __ovld __cnfn convert_char8_rte(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rte(ulong8);
+char8 __ovld __cnfn convert_char8_rtz(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rtz(ulong8);
+char8 __ovld __cnfn convert_char8_rtp(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rtp(ulong8);
+char8 __ovld __cnfn convert_char8_rtn(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rtn(ulong8);
+char8 __ovld __cnfn convert_char8(ulong8);
+char8 __ovld __cnfn convert_char8_sat(ulong8);
+char8 __ovld __cnfn convert_char8_rte(float8);
+char8 __ovld __cnfn convert_char8_sat_rte(float8);
+char8 __ovld __cnfn convert_char8_rtz(float8);
+char8 __ovld __cnfn convert_char8_sat_rtz(float8);
+char8 __ovld __cnfn convert_char8_rtp(float8);
+char8 __ovld __cnfn convert_char8_sat_rtp(float8);
+char8 __ovld __cnfn convert_char8_rtn(float8);
+char8 __ovld __cnfn convert_char8_sat_rtn(float8);
+char8 __ovld __cnfn convert_char8(float8);
+char8 __ovld __cnfn convert_char8_sat(float8);
+uchar8 __ovld __cnfn convert_uchar8_rte(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(char8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(char8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(char8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(char8);
+uchar8 __ovld __cnfn convert_uchar8(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat(char8);
+uchar8 __ovld __cnfn convert_uchar8_rte(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uchar8);
+uchar8 __ovld __cnfn convert_uchar8(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rte(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(short8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(short8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(short8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(short8);
+uchar8 __ovld __cnfn convert_uchar8(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat(short8);
+uchar8 __ovld __cnfn convert_uchar8_rte(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ushort8);
+uchar8 __ovld __cnfn convert_uchar8(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rte(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(int8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(int8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(int8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(int8);
+uchar8 __ovld __cnfn convert_uchar8(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat(int8);
+uchar8 __ovld __cnfn convert_uchar8_rte(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uint8);
+uchar8 __ovld __cnfn convert_uchar8(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rte(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(long8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(long8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(long8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(long8);
+uchar8 __ovld __cnfn convert_uchar8(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat(long8);
+uchar8 __ovld __cnfn convert_uchar8_rte(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ulong8);
+uchar8 __ovld __cnfn convert_uchar8(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rte(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(float8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(float8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(float8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(float8);
+uchar8 __ovld __cnfn convert_uchar8(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat(float8);
+short8 __ovld __cnfn convert_short8_rte(char8);
+short8 __ovld __cnfn convert_short8_sat_rte(char8);
+short8 __ovld __cnfn convert_short8_rtz(char8);
+short8 __ovld __cnfn convert_short8_sat_rtz(char8);
+short8 __ovld __cnfn convert_short8_rtp(char8);
+short8 __ovld __cnfn convert_short8_sat_rtp(char8);
+short8 __ovld __cnfn convert_short8_rtn(char8);
+short8 __ovld __cnfn convert_short8_sat_rtn(char8);
+short8 __ovld __cnfn convert_short8(char8);
+short8 __ovld __cnfn convert_short8_sat(char8);
+short8 __ovld __cnfn convert_short8_rte(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rte(uchar8);
+short8 __ovld __cnfn convert_short8_rtz(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rtz(uchar8);
+short8 __ovld __cnfn convert_short8_rtp(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rtp(uchar8);
+short8 __ovld __cnfn convert_short8_rtn(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rtn(uchar8);
+short8 __ovld __cnfn convert_short8(uchar8);
+short8 __ovld __cnfn convert_short8_sat(uchar8);
+short8 __ovld __cnfn convert_short8_rte(short8);
+short8 __ovld __cnfn convert_short8_sat_rte(short8);
+short8 __ovld __cnfn convert_short8_rtz(short8);
+short8 __ovld __cnfn convert_short8_sat_rtz(short8);
+short8 __ovld __cnfn convert_short8_rtp(short8);
+short8 __ovld __cnfn convert_short8_sat_rtp(short8);
+short8 __ovld __cnfn convert_short8_rtn(short8);
+short8 __ovld __cnfn convert_short8_sat_rtn(short8);
+short8 __ovld __cnfn convert_short8(short8);
+short8 __ovld __cnfn convert_short8_sat(short8);
+short8 __ovld __cnfn convert_short8_rte(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rte(ushort8);
+short8 __ovld __cnfn convert_short8_rtz(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rtz(ushort8);
+short8 __ovld __cnfn convert_short8_rtp(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rtp(ushort8);
+short8 __ovld __cnfn convert_short8_rtn(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rtn(ushort8);
+short8 __ovld __cnfn convert_short8(ushort8);
+short8 __ovld __cnfn convert_short8_sat(ushort8);
+short8 __ovld __cnfn convert_short8_rte(int8);
+short8 __ovld __cnfn convert_short8_sat_rte(int8);
+short8 __ovld __cnfn convert_short8_rtz(int8);
+short8 __ovld __cnfn convert_short8_sat_rtz(int8);
+short8 __ovld __cnfn convert_short8_rtp(int8);
+short8 __ovld __cnfn convert_short8_sat_rtp(int8);
+short8 __ovld __cnfn convert_short8_rtn(int8);
+short8 __ovld __cnfn convert_short8_sat_rtn(int8);
+short8 __ovld __cnfn convert_short8(int8);
+short8 __ovld __cnfn convert_short8_sat(int8);
+short8 __ovld __cnfn convert_short8_rte(uint8);
+short8 __ovld __cnfn convert_short8_sat_rte(uint8);
+short8 __ovld __cnfn convert_short8_rtz(uint8);
+short8 __ovld __cnfn convert_short8_sat_rtz(uint8);
+short8 __ovld __cnfn convert_short8_rtp(uint8);
+short8 __ovld __cnfn convert_short8_sat_rtp(uint8);
+short8 __ovld __cnfn convert_short8_rtn(uint8);
+short8 __ovld __cnfn convert_short8_sat_rtn(uint8);
+short8 __ovld __cnfn convert_short8(uint8);
+short8 __ovld __cnfn convert_short8_sat(uint8);
+short8 __ovld __cnfn convert_short8_rte(long8);
+short8 __ovld __cnfn convert_short8_sat_rte(long8);
+short8 __ovld __cnfn convert_short8_rtz(long8);
+short8 __ovld __cnfn convert_short8_sat_rtz(long8);
+short8 __ovld __cnfn convert_short8_rtp(long8);
+short8 __ovld __cnfn convert_short8_sat_rtp(long8);
+short8 __ovld __cnfn convert_short8_rtn(long8);
+short8 __ovld __cnfn convert_short8_sat_rtn(long8);
+short8 __ovld __cnfn convert_short8(long8);
+short8 __ovld __cnfn convert_short8_sat(long8);
+short8 __ovld __cnfn convert_short8_rte(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rte(ulong8);
+short8 __ovld __cnfn convert_short8_rtz(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rtz(ulong8);
+short8 __ovld __cnfn convert_short8_rtp(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rtp(ulong8);
+short8 __ovld __cnfn convert_short8_rtn(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rtn(ulong8);
+short8 __ovld __cnfn convert_short8(ulong8);
+short8 __ovld __cnfn convert_short8_sat(ulong8);
+short8 __ovld __cnfn convert_short8_rte(float8);
+short8 __ovld __cnfn convert_short8_sat_rte(float8);
+short8 __ovld __cnfn convert_short8_rtz(float8);
+short8 __ovld __cnfn convert_short8_sat_rtz(float8);
+short8 __ovld __cnfn convert_short8_rtp(float8);
+short8 __ovld __cnfn convert_short8_sat_rtp(float8);
+short8 __ovld __cnfn convert_short8_rtn(float8);
+short8 __ovld __cnfn convert_short8_sat_rtn(float8);
+short8 __ovld __cnfn convert_short8(float8);
+short8 __ovld __cnfn convert_short8_sat(float8);
+ushort8 __ovld __cnfn convert_ushort8_rte(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(char8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(char8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(char8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(char8);
+ushort8 __ovld __cnfn convert_ushort8(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat(char8);
+ushort8 __ovld __cnfn convert_ushort8_rte(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uchar8);
+ushort8 __ovld __cnfn convert_ushort8(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rte(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(short8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(short8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(short8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(short8);
+ushort8 __ovld __cnfn convert_ushort8(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat(short8);
+ushort8 __ovld __cnfn convert_ushort8_rte(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ushort8);
+ushort8 __ovld __cnfn convert_ushort8(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rte(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(int8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(int8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(int8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(int8);
+ushort8 __ovld __cnfn convert_ushort8(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat(int8);
+ushort8 __ovld __cnfn convert_ushort8_rte(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uint8);
+ushort8 __ovld __cnfn convert_ushort8(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rte(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(long8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(long8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(long8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(long8);
+ushort8 __ovld __cnfn convert_ushort8(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat(long8);
+ushort8 __ovld __cnfn convert_ushort8_rte(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ulong8);
+ushort8 __ovld __cnfn convert_ushort8(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rte(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(float8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(float8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(float8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(float8);
+ushort8 __ovld __cnfn convert_ushort8(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat(float8);
+int8 __ovld __cnfn convert_int8_rte(char8);
+int8 __ovld __cnfn convert_int8_sat_rte(char8);
+int8 __ovld __cnfn convert_int8_rtz(char8);
+int8 __ovld __cnfn convert_int8_sat_rtz(char8);
+int8 __ovld __cnfn convert_int8_rtp(char8);
+int8 __ovld __cnfn convert_int8_sat_rtp(char8);
+int8 __ovld __cnfn convert_int8_rtn(char8);
+int8 __ovld __cnfn convert_int8_sat_rtn(char8);
+int8 __ovld __cnfn convert_int8(char8);
+int8 __ovld __cnfn convert_int8_sat(char8);
+int8 __ovld __cnfn convert_int8_rte(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rte(uchar8);
+int8 __ovld __cnfn convert_int8_rtz(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rtz(uchar8);
+int8 __ovld __cnfn convert_int8_rtp(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rtp(uchar8);
+int8 __ovld __cnfn convert_int8_rtn(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rtn(uchar8);
+int8 __ovld __cnfn convert_int8(uchar8);
+int8 __ovld __cnfn convert_int8_sat(uchar8);
+int8 __ovld __cnfn convert_int8_rte(short8);
+int8 __ovld __cnfn convert_int8_sat_rte(short8);
+int8 __ovld __cnfn convert_int8_rtz(short8);
+int8 __ovld __cnfn convert_int8_sat_rtz(short8);
+int8 __ovld __cnfn convert_int8_rtp(short8);
+int8 __ovld __cnfn convert_int8_sat_rtp(short8);
+int8 __ovld __cnfn convert_int8_rtn(short8);
+int8 __ovld __cnfn convert_int8_sat_rtn(short8);
+int8 __ovld __cnfn convert_int8(short8);
+int8 __ovld __cnfn convert_int8_sat(short8);
+int8 __ovld __cnfn convert_int8_rte(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rte(ushort8);
+int8 __ovld __cnfn convert_int8_rtz(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rtz(ushort8);
+int8 __ovld __cnfn convert_int8_rtp(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rtp(ushort8);
+int8 __ovld __cnfn convert_int8_rtn(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rtn(ushort8);
+int8 __ovld __cnfn convert_int8(ushort8);
+int8 __ovld __cnfn convert_int8_sat(ushort8);
+int8 __ovld __cnfn convert_int8_rte(int8);
+int8 __ovld __cnfn convert_int8_sat_rte(int8);
+int8 __ovld __cnfn convert_int8_rtz(int8);
+int8 __ovld __cnfn convert_int8_sat_rtz(int8);
+int8 __ovld __cnfn convert_int8_rtp(int8);
+int8 __ovld __cnfn convert_int8_sat_rtp(int8);
+int8 __ovld __cnfn convert_int8_rtn(int8);
+int8 __ovld __cnfn convert_int8_sat_rtn(int8);
+int8 __ovld __cnfn convert_int8(int8);
+int8 __ovld __cnfn convert_int8_sat(int8);
+int8 __ovld __cnfn convert_int8_rte(uint8);
+int8 __ovld __cnfn convert_int8_sat_rte(uint8);
+int8 __ovld __cnfn convert_int8_rtz(uint8);
+int8 __ovld __cnfn convert_int8_sat_rtz(uint8);
+int8 __ovld __cnfn convert_int8_rtp(uint8);
+int8 __ovld __cnfn convert_int8_sat_rtp(uint8);
+int8 __ovld __cnfn convert_int8_rtn(uint8);
+int8 __ovld __cnfn convert_int8_sat_rtn(uint8);
+int8 __ovld __cnfn convert_int8(uint8);
+int8 __ovld __cnfn convert_int8_sat(uint8);
+int8 __ovld __cnfn convert_int8_rte(long8);
+int8 __ovld __cnfn convert_int8_sat_rte(long8);
+int8 __ovld __cnfn convert_int8_rtz(long8);
+int8 __ovld __cnfn convert_int8_sat_rtz(long8);
+int8 __ovld __cnfn convert_int8_rtp(long8);
+int8 __ovld __cnfn convert_int8_sat_rtp(long8);
+int8 __ovld __cnfn convert_int8_rtn(long8);
+int8 __ovld __cnfn convert_int8_sat_rtn(long8);
+int8 __ovld __cnfn convert_int8(long8);
+int8 __ovld __cnfn convert_int8_sat(long8);
+int8 __ovld __cnfn convert_int8_rte(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rte(ulong8);
+int8 __ovld __cnfn convert_int8_rtz(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rtz(ulong8);
+int8 __ovld __cnfn convert_int8_rtp(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rtp(ulong8);
+int8 __ovld __cnfn convert_int8_rtn(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rtn(ulong8);
+int8 __ovld __cnfn convert_int8(ulong8);
+int8 __ovld __cnfn convert_int8_sat(ulong8);
+int8 __ovld __cnfn convert_int8_rte(float8);
+int8 __ovld __cnfn convert_int8_sat_rte(float8);
+int8 __ovld __cnfn convert_int8_rtz(float8);
+int8 __ovld __cnfn convert_int8_sat_rtz(float8);
+int8 __ovld __cnfn convert_int8_rtp(float8);
+int8 __ovld __cnfn convert_int8_sat_rtp(float8);
+int8 __ovld __cnfn convert_int8_rtn(float8);
+int8 __ovld __cnfn convert_int8_sat_rtn(float8);
+int8 __ovld __cnfn convert_int8(float8);
+int8 __ovld __cnfn convert_int8_sat(float8);
+uint8 __ovld __cnfn convert_uint8_rte(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(char8);
+uint8 __ovld __cnfn convert_uint8_rtz(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(char8);
+uint8 __ovld __cnfn convert_uint8_rtp(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(char8);
+uint8 __ovld __cnfn convert_uint8_rtn(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(char8);
+uint8 __ovld __cnfn convert_uint8(char8);
+uint8 __ovld __cnfn convert_uint8_sat(char8);
+uint8 __ovld __cnfn convert_uint8_rte(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(uchar8);
+uint8 __ovld __cnfn convert_uint8_rtz(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(uchar8);
+uint8 __ovld __cnfn convert_uint8_rtp(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(uchar8);
+uint8 __ovld __cnfn convert_uint8_rtn(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(uchar8);
+uint8 __ovld __cnfn convert_uint8(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat(uchar8);
+uint8 __ovld __cnfn convert_uint8_rte(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(short8);
+uint8 __ovld __cnfn convert_uint8_rtz(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(short8);
+uint8 __ovld __cnfn convert_uint8_rtp(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(short8);
+uint8 __ovld __cnfn convert_uint8_rtn(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(short8);
+uint8 __ovld __cnfn convert_uint8(short8);
+uint8 __ovld __cnfn convert_uint8_sat(short8);
+uint8 __ovld __cnfn convert_uint8_rte(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(ushort8);
+uint8 __ovld __cnfn convert_uint8_rtz(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(ushort8);
+uint8 __ovld __cnfn convert_uint8_rtp(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(ushort8);
+uint8 __ovld __cnfn convert_uint8_rtn(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(ushort8);
+uint8 __ovld __cnfn convert_uint8(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat(ushort8);
+uint8 __ovld __cnfn convert_uint8_rte(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(int8);
+uint8 __ovld __cnfn convert_uint8_rtz(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(int8);
+uint8 __ovld __cnfn convert_uint8_rtp(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(int8);
+uint8 __ovld __cnfn convert_uint8_rtn(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(int8);
+uint8 __ovld __cnfn convert_uint8(int8);
+uint8 __ovld __cnfn convert_uint8_sat(int8);
+uint8 __ovld __cnfn convert_uint8_rte(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(uint8);
+uint8 __ovld __cnfn convert_uint8_rtz(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(uint8);
+uint8 __ovld __cnfn convert_uint8_rtp(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(uint8);
+uint8 __ovld __cnfn convert_uint8_rtn(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(uint8);
+uint8 __ovld __cnfn convert_uint8(uint8);
+uint8 __ovld __cnfn convert_uint8_sat(uint8);
+uint8 __ovld __cnfn convert_uint8_rte(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(long8);
+uint8 __ovld __cnfn convert_uint8_rtz(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(long8);
+uint8 __ovld __cnfn convert_uint8_rtp(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(long8);
+uint8 __ovld __cnfn convert_uint8_rtn(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(long8);
+uint8 __ovld __cnfn convert_uint8(long8);
+uint8 __ovld __cnfn convert_uint8_sat(long8);
+uint8 __ovld __cnfn convert_uint8_rte(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(ulong8);
+uint8 __ovld __cnfn convert_uint8_rtz(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(ulong8);
+uint8 __ovld __cnfn convert_uint8_rtp(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(ulong8);
+uint8 __ovld __cnfn convert_uint8_rtn(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(ulong8);
+uint8 __ovld __cnfn convert_uint8(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat(ulong8);
+uint8 __ovld __cnfn convert_uint8_rte(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(float8);
+uint8 __ovld __cnfn convert_uint8_rtz(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(float8);
+uint8 __ovld __cnfn convert_uint8_rtp(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(float8);
+uint8 __ovld __cnfn convert_uint8_rtn(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(float8);
+uint8 __ovld __cnfn convert_uint8(float8);
+uint8 __ovld __cnfn convert_uint8_sat(float8);
+long8 __ovld __cnfn convert_long8_rte(char8);
+long8 __ovld __cnfn convert_long8_sat_rte(char8);
+long8 __ovld __cnfn convert_long8_rtz(char8);
+long8 __ovld __cnfn convert_long8_sat_rtz(char8);
+long8 __ovld __cnfn convert_long8_rtp(char8);
+long8 __ovld __cnfn convert_long8_sat_rtp(char8);
+long8 __ovld __cnfn convert_long8_rtn(char8);
+long8 __ovld __cnfn convert_long8_sat_rtn(char8);
+long8 __ovld __cnfn convert_long8(char8);
+long8 __ovld __cnfn convert_long8_sat(char8);
+long8 __ovld __cnfn convert_long8_rte(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rte(uchar8);
+long8 __ovld __cnfn convert_long8_rtz(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rtz(uchar8);
+long8 __ovld __cnfn convert_long8_rtp(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rtp(uchar8);
+long8 __ovld __cnfn convert_long8_rtn(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rtn(uchar8);
+long8 __ovld __cnfn convert_long8(uchar8);
+long8 __ovld __cnfn convert_long8_sat(uchar8);
+long8 __ovld __cnfn convert_long8_rte(short8);
+long8 __ovld __cnfn convert_long8_sat_rte(short8);
+long8 __ovld __cnfn convert_long8_rtz(short8);
+long8 __ovld __cnfn convert_long8_sat_rtz(short8);
+long8 __ovld __cnfn convert_long8_rtp(short8);
+long8 __ovld __cnfn convert_long8_sat_rtp(short8);
+long8 __ovld __cnfn convert_long8_rtn(short8);
+long8 __ovld __cnfn convert_long8_sat_rtn(short8);
+long8 __ovld __cnfn convert_long8(short8);
+long8 __ovld __cnfn convert_long8_sat(short8);
+long8 __ovld __cnfn convert_long8_rte(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rte(ushort8);
+long8 __ovld __cnfn convert_long8_rtz(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rtz(ushort8);
+long8 __ovld __cnfn convert_long8_rtp(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rtp(ushort8);
+long8 __ovld __cnfn convert_long8_rtn(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rtn(ushort8);
+long8 __ovld __cnfn convert_long8(ushort8);
+long8 __ovld __cnfn convert_long8_sat(ushort8);
+long8 __ovld __cnfn convert_long8_rte(int8);
+long8 __ovld __cnfn convert_long8_sat_rte(int8);
+long8 __ovld __cnfn convert_long8_rtz(int8);
+long8 __ovld __cnfn convert_long8_sat_rtz(int8);
+long8 __ovld __cnfn convert_long8_rtp(int8);
+long8 __ovld __cnfn convert_long8_sat_rtp(int8);
+long8 __ovld __cnfn convert_long8_rtn(int8);
+long8 __ovld __cnfn convert_long8_sat_rtn(int8);
+long8 __ovld __cnfn convert_long8(int8);
+long8 __ovld __cnfn convert_long8_sat(int8);
+long8 __ovld __cnfn convert_long8_rte(uint8);
+long8 __ovld __cnfn convert_long8_sat_rte(uint8);
+long8 __ovld __cnfn convert_long8_rtz(uint8);
+long8 __ovld __cnfn convert_long8_sat_rtz(uint8);
+long8 __ovld __cnfn convert_long8_rtp(uint8);
+long8 __ovld __cnfn convert_long8_sat_rtp(uint8);
+long8 __ovld __cnfn convert_long8_rtn(uint8);
+long8 __ovld __cnfn convert_long8_sat_rtn(uint8);
+long8 __ovld __cnfn convert_long8(uint8);
+long8 __ovld __cnfn convert_long8_sat(uint8);
+long8 __ovld __cnfn convert_long8_rte(long8);
+long8 __ovld __cnfn convert_long8_sat_rte(long8);
+long8 __ovld __cnfn convert_long8_rtz(long8);
+long8 __ovld __cnfn convert_long8_sat_rtz(long8);
+long8 __ovld __cnfn convert_long8_rtp(long8);
+long8 __ovld __cnfn convert_long8_sat_rtp(long8);
+long8 __ovld __cnfn convert_long8_rtn(long8);
+long8 __ovld __cnfn convert_long8_sat_rtn(long8);
+long8 __ovld __cnfn convert_long8(long8);
+long8 __ovld __cnfn convert_long8_sat(long8);
+long8 __ovld __cnfn convert_long8_rte(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rte(ulong8);
+long8 __ovld __cnfn convert_long8_rtz(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rtz(ulong8);
+long8 __ovld __cnfn convert_long8_rtp(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rtp(ulong8);
+long8 __ovld __cnfn convert_long8_rtn(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rtn(ulong8);
+long8 __ovld __cnfn convert_long8(ulong8);
+long8 __ovld __cnfn convert_long8_sat(ulong8);
+long8 __ovld __cnfn convert_long8_rte(float8);
+long8 __ovld __cnfn convert_long8_sat_rte(float8);
+long8 __ovld __cnfn convert_long8_rtz(float8);
+long8 __ovld __cnfn convert_long8_sat_rtz(float8);
+long8 __ovld __cnfn convert_long8_rtp(float8);
+long8 __ovld __cnfn convert_long8_sat_rtp(float8);
+long8 __ovld __cnfn convert_long8_rtn(float8);
+long8 __ovld __cnfn convert_long8_sat_rtn(float8);
+long8 __ovld __cnfn convert_long8(float8);
+long8 __ovld __cnfn convert_long8_sat(float8);
+ulong8 __ovld __cnfn convert_ulong8_rte(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(char8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(char8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(char8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(char8);
+ulong8 __ovld __cnfn convert_ulong8(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat(char8);
+ulong8 __ovld __cnfn convert_ulong8_rte(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uchar8);
+ulong8 __ovld __cnfn convert_ulong8(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rte(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(short8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(short8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(short8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(short8);
+ulong8 __ovld __cnfn convert_ulong8(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat(short8);
+ulong8 __ovld __cnfn convert_ulong8_rte(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ushort8);
+ulong8 __ovld __cnfn convert_ulong8(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rte(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(int8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(int8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(int8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(int8);
+ulong8 __ovld __cnfn convert_ulong8(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat(int8);
+ulong8 __ovld __cnfn convert_ulong8_rte(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uint8);
+ulong8 __ovld __cnfn convert_ulong8(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rte(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(long8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(long8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(long8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(long8);
+ulong8 __ovld __cnfn convert_ulong8(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat(long8);
+ulong8 __ovld __cnfn convert_ulong8_rte(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ulong8);
+ulong8 __ovld __cnfn convert_ulong8(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rte(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(float8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(float8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(float8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(float8);
+ulong8 __ovld __cnfn convert_ulong8(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat(float8);
+float8 __ovld __cnfn convert_float8_rte(char8);
+float8 __ovld __cnfn convert_float8_rtz(char8);
+float8 __ovld __cnfn convert_float8_rtp(char8);
+float8 __ovld __cnfn convert_float8_rtn(char8);
+float8 __ovld __cnfn convert_float8(char8);
+float8 __ovld __cnfn convert_float8_rte(uchar8);
+float8 __ovld __cnfn convert_float8_rtz(uchar8);
+float8 __ovld __cnfn convert_float8_rtp(uchar8);
+float8 __ovld __cnfn convert_float8_rtn(uchar8);
+float8 __ovld __cnfn convert_float8(uchar8);
+float8 __ovld __cnfn convert_float8_rte(short8);
+float8 __ovld __cnfn convert_float8_rtz(short8);
+float8 __ovld __cnfn convert_float8_rtp(short8);
+float8 __ovld __cnfn convert_float8_rtn(short8);
+float8 __ovld __cnfn convert_float8(short8);
+float8 __ovld __cnfn convert_float8_rte(ushort8);
+float8 __ovld __cnfn convert_float8_rtz(ushort8);
+float8 __ovld __cnfn convert_float8_rtp(ushort8);
+float8 __ovld __cnfn convert_float8_rtn(ushort8);
+float8 __ovld __cnfn convert_float8(ushort8);
+float8 __ovld __cnfn convert_float8_rte(int8);
+float8 __ovld __cnfn convert_float8_rtz(int8);
+float8 __ovld __cnfn convert_float8_rtp(int8);
+float8 __ovld __cnfn convert_float8_rtn(int8);
+float8 __ovld __cnfn convert_float8(int8);
+float8 __ovld __cnfn convert_float8_rte(uint8);
+float8 __ovld __cnfn convert_float8_rtz(uint8);
+float8 __ovld __cnfn convert_float8_rtp(uint8);
+float8 __ovld __cnfn convert_float8_rtn(uint8);
+float8 __ovld __cnfn convert_float8(uint8);
+float8 __ovld __cnfn convert_float8_rte(long8);
+float8 __ovld __cnfn convert_float8_rtz(long8);
+float8 __ovld __cnfn convert_float8_rtp(long8);
+float8 __ovld __cnfn convert_float8_rtn(long8);
+float8 __ovld __cnfn convert_float8(long8);
+float8 __ovld __cnfn convert_float8_rte(ulong8);
+float8 __ovld __cnfn convert_float8_rtz(ulong8);
+float8 __ovld __cnfn convert_float8_rtp(ulong8);
+float8 __ovld __cnfn convert_float8_rtn(ulong8);
+float8 __ovld __cnfn convert_float8(ulong8);
+float8 __ovld __cnfn convert_float8_rte(float8);
+float8 __ovld __cnfn convert_float8_rtz(float8);
+float8 __ovld __cnfn convert_float8_rtp(float8);
+float8 __ovld __cnfn convert_float8_rtn(float8);
+float8 __ovld __cnfn convert_float8(float8);
+char16 __ovld __cnfn convert_char16_rte(char16);
+char16 __ovld __cnfn convert_char16_sat_rte(char16);
+char16 __ovld __cnfn convert_char16_rtz(char16);
+char16 __ovld __cnfn convert_char16_sat_rtz(char16);
+char16 __ovld __cnfn convert_char16_rtp(char16);
+char16 __ovld __cnfn convert_char16_sat_rtp(char16);
+char16 __ovld __cnfn convert_char16_rtn(char16);
+char16 __ovld __cnfn convert_char16_sat_rtn(char16);
+char16 __ovld __cnfn convert_char16(char16);
+char16 __ovld __cnfn convert_char16_sat(char16);
+char16 __ovld __cnfn convert_char16_rte(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rte(uchar16);
+char16 __ovld __cnfn convert_char16_rtz(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rtz(uchar16);
+char16 __ovld __cnfn convert_char16_rtp(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rtp(uchar16);
+char16 __ovld __cnfn convert_char16_rtn(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rtn(uchar16);
+char16 __ovld __cnfn convert_char16(uchar16);
+char16 __ovld __cnfn convert_char16_sat(uchar16);
+char16 __ovld __cnfn convert_char16_rte(short16);
+char16 __ovld __cnfn convert_char16_sat_rte(short16);
+char16 __ovld __cnfn convert_char16_rtz(short16);
+char16 __ovld __cnfn convert_char16_sat_rtz(short16);
+char16 __ovld __cnfn convert_char16_rtp(short16);
+char16 __ovld __cnfn convert_char16_sat_rtp(short16);
+char16 __ovld __cnfn convert_char16_rtn(short16);
+char16 __ovld __cnfn convert_char16_sat_rtn(short16);
+char16 __ovld __cnfn convert_char16(short16);
+char16 __ovld __cnfn convert_char16_sat(short16);
+char16 __ovld __cnfn convert_char16_rte(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rte(ushort16);
+char16 __ovld __cnfn convert_char16_rtz(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rtz(ushort16);
+char16 __ovld __cnfn convert_char16_rtp(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rtp(ushort16);
+char16 __ovld __cnfn convert_char16_rtn(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rtn(ushort16);
+char16 __ovld __cnfn convert_char16(ushort16);
+char16 __ovld __cnfn convert_char16_sat(ushort16);
+char16 __ovld __cnfn convert_char16_rte(int16);
+char16 __ovld __cnfn convert_char16_sat_rte(int16);
+char16 __ovld __cnfn convert_char16_rtz(int16);
+char16 __ovld __cnfn convert_char16_sat_rtz(int16);
+char16 __ovld __cnfn convert_char16_rtp(int16);
+char16 __ovld __cnfn convert_char16_sat_rtp(int16);
+char16 __ovld __cnfn convert_char16_rtn(int16);
+char16 __ovld __cnfn convert_char16_sat_rtn(int16);
+char16 __ovld __cnfn convert_char16(int16);
+char16 __ovld __cnfn convert_char16_sat(int16);
+char16 __ovld __cnfn convert_char16_rte(uint16);
+char16 __ovld __cnfn convert_char16_sat_rte(uint16);
+char16 __ovld __cnfn convert_char16_rtz(uint16);
+char16 __ovld __cnfn convert_char16_sat_rtz(uint16);
+char16 __ovld __cnfn convert_char16_rtp(uint16);
+char16 __ovld __cnfn convert_char16_sat_rtp(uint16);
+char16 __ovld __cnfn convert_char16_rtn(uint16);
+char16 __ovld __cnfn convert_char16_sat_rtn(uint16);
+char16 __ovld __cnfn convert_char16(uint16);
+char16 __ovld __cnfn convert_char16_sat(uint16);
+char16 __ovld __cnfn convert_char16_rte(long16);
+char16 __ovld __cnfn convert_char16_sat_rte(long16);
+char16 __ovld __cnfn convert_char16_rtz(long16);
+char16 __ovld __cnfn convert_char16_sat_rtz(long16);
+char16 __ovld __cnfn convert_char16_rtp(long16);
+char16 __ovld __cnfn convert_char16_sat_rtp(long16);
+char16 __ovld __cnfn convert_char16_rtn(long16);
+char16 __ovld __cnfn convert_char16_sat_rtn(long16);
+char16 __ovld __cnfn convert_char16(long16);
+char16 __ovld __cnfn convert_char16_sat(long16);
+char16 __ovld __cnfn convert_char16_rte(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rte(ulong16);
+char16 __ovld __cnfn convert_char16_rtz(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rtz(ulong16);
+char16 __ovld __cnfn convert_char16_rtp(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rtp(ulong16);
+char16 __ovld __cnfn convert_char16_rtn(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rtn(ulong16);
+char16 __ovld __cnfn convert_char16(ulong16);
+char16 __ovld __cnfn convert_char16_sat(ulong16);
+char16 __ovld __cnfn convert_char16_rte(float16);
+char16 __ovld __cnfn convert_char16_sat_rte(float16);
+char16 __ovld __cnfn convert_char16_rtz(float16);
+char16 __ovld __cnfn convert_char16_sat_rtz(float16);
+char16 __ovld __cnfn convert_char16_rtp(float16);
+char16 __ovld __cnfn convert_char16_sat_rtp(float16);
+char16 __ovld __cnfn convert_char16_rtn(float16);
+char16 __ovld __cnfn convert_char16_sat_rtn(float16);
+char16 __ovld __cnfn convert_char16(float16);
+char16 __ovld __cnfn convert_char16_sat(float16);
+uchar16 __ovld __cnfn convert_uchar16_rte(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(char16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(char16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(char16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(char16);
+uchar16 __ovld __cnfn convert_uchar16(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat(char16);
+uchar16 __ovld __cnfn convert_uchar16_rte(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uchar16);
+uchar16 __ovld __cnfn convert_uchar16(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rte(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(short16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(short16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(short16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(short16);
+uchar16 __ovld __cnfn convert_uchar16(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat(short16);
+uchar16 __ovld __cnfn convert_uchar16_rte(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ushort16);
+uchar16 __ovld __cnfn convert_uchar16(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rte(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(int16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(int16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(int16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(int16);
+uchar16 __ovld __cnfn convert_uchar16(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat(int16);
+uchar16 __ovld __cnfn convert_uchar16_rte(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uint16);
+uchar16 __ovld __cnfn convert_uchar16(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rte(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(long16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(long16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(long16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(long16);
+uchar16 __ovld __cnfn convert_uchar16(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat(long16);
+uchar16 __ovld __cnfn convert_uchar16_rte(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ulong16);
+uchar16 __ovld __cnfn convert_uchar16(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rte(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(float16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(float16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(float16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(float16);
+uchar16 __ovld __cnfn convert_uchar16(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat(float16);
+short16 __ovld __cnfn convert_short16_rte(char16);
+short16 __ovld __cnfn convert_short16_sat_rte(char16);
+short16 __ovld __cnfn convert_short16_rtz(char16);
+short16 __ovld __cnfn convert_short16_sat_rtz(char16);
+short16 __ovld __cnfn convert_short16_rtp(char16);
+short16 __ovld __cnfn convert_short16_sat_rtp(char16);
+short16 __ovld __cnfn convert_short16_rtn(char16);
+short16 __ovld __cnfn convert_short16_sat_rtn(char16);
+short16 __ovld __cnfn convert_short16(char16);
+short16 __ovld __cnfn convert_short16_sat(char16);
+short16 __ovld __cnfn convert_short16_rte(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rte(uchar16);
+short16 __ovld __cnfn convert_short16_rtz(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rtz(uchar16);
+short16 __ovld __cnfn convert_short16_rtp(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rtp(uchar16);
+short16 __ovld __cnfn convert_short16_rtn(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rtn(uchar16);
+short16 __ovld __cnfn convert_short16(uchar16);
+short16 __ovld __cnfn convert_short16_sat(uchar16);
+short16 __ovld __cnfn convert_short16_rte(short16);
+short16 __ovld __cnfn convert_short16_sat_rte(short16);
+short16 __ovld __cnfn convert_short16_rtz(short16);
+short16 __ovld __cnfn convert_short16_sat_rtz(short16);
+short16 __ovld __cnfn convert_short16_rtp(short16);
+short16 __ovld __cnfn convert_short16_sat_rtp(short16);
+short16 __ovld __cnfn convert_short16_rtn(short16);
+short16 __ovld __cnfn convert_short16_sat_rtn(short16);
+short16 __ovld __cnfn convert_short16(short16);
+short16 __ovld __cnfn convert_short16_sat(short16);
+short16 __ovld __cnfn convert_short16_rte(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rte(ushort16);
+short16 __ovld __cnfn convert_short16_rtz(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rtz(ushort16);
+short16 __ovld __cnfn convert_short16_rtp(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rtp(ushort16);
+short16 __ovld __cnfn convert_short16_rtn(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rtn(ushort16);
+short16 __ovld __cnfn convert_short16(ushort16);
+short16 __ovld __cnfn convert_short16_sat(ushort16);
+short16 __ovld __cnfn convert_short16_rte(int16);
+short16 __ovld __cnfn convert_short16_sat_rte(int16);
+short16 __ovld __cnfn convert_short16_rtz(int16);
+short16 __ovld __cnfn convert_short16_sat_rtz(int16);
+short16 __ovld __cnfn convert_short16_rtp(int16);
+short16 __ovld __cnfn convert_short16_sat_rtp(int16);
+short16 __ovld __cnfn convert_short16_rtn(int16);
+short16 __ovld __cnfn convert_short16_sat_rtn(int16);
+short16 __ovld __cnfn convert_short16(int16);
+short16 __ovld __cnfn convert_short16_sat(int16);
+short16 __ovld __cnfn convert_short16_rte(uint16);
+short16 __ovld __cnfn convert_short16_sat_rte(uint16);
+short16 __ovld __cnfn convert_short16_rtz(uint16);
+short16 __ovld __cnfn convert_short16_sat_rtz(uint16);
+short16 __ovld __cnfn convert_short16_rtp(uint16);
+short16 __ovld __cnfn convert_short16_sat_rtp(uint16);
+short16 __ovld __cnfn convert_short16_rtn(uint16);
+short16 __ovld __cnfn convert_short16_sat_rtn(uint16);
+short16 __ovld __cnfn convert_short16(uint16);
+short16 __ovld __cnfn convert_short16_sat(uint16);
+short16 __ovld __cnfn convert_short16_rte(long16);
+short16 __ovld __cnfn convert_short16_sat_rte(long16);
+short16 __ovld __cnfn convert_short16_rtz(long16);
+short16 __ovld __cnfn convert_short16_sat_rtz(long16);
+short16 __ovld __cnfn convert_short16_rtp(long16);
+short16 __ovld __cnfn convert_short16_sat_rtp(long16);
+short16 __ovld __cnfn convert_short16_rtn(long16);
+short16 __ovld __cnfn convert_short16_sat_rtn(long16);
+short16 __ovld __cnfn convert_short16(long16);
+short16 __ovld __cnfn convert_short16_sat(long16);
+short16 __ovld __cnfn convert_short16_rte(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rte(ulong16);
+short16 __ovld __cnfn convert_short16_rtz(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rtz(ulong16);
+short16 __ovld __cnfn convert_short16_rtp(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rtp(ulong16);
+short16 __ovld __cnfn convert_short16_rtn(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rtn(ulong16);
+short16 __ovld __cnfn convert_short16(ulong16);
+short16 __ovld __cnfn convert_short16_sat(ulong16);
+short16 __ovld __cnfn convert_short16_rte(float16);
+short16 __ovld __cnfn convert_short16_sat_rte(float16);
+short16 __ovld __cnfn convert_short16_rtz(float16);
+short16 __ovld __cnfn convert_short16_sat_rtz(float16);
+short16 __ovld __cnfn convert_short16_rtp(float16);
+short16 __ovld __cnfn convert_short16_sat_rtp(float16);
+short16 __ovld __cnfn convert_short16_rtn(float16);
+short16 __ovld __cnfn convert_short16_sat_rtn(float16);
+short16 __ovld __cnfn convert_short16(float16);
+short16 __ovld __cnfn convert_short16_sat(float16);
+ushort16 __ovld __cnfn convert_ushort16_rte(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(char16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(char16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(char16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(char16);
+ushort16 __ovld __cnfn convert_ushort16(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat(char16);
+ushort16 __ovld __cnfn convert_ushort16_rte(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uchar16);
+ushort16 __ovld __cnfn convert_ushort16(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rte(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(short16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(short16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(short16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(short16);
+ushort16 __ovld __cnfn convert_ushort16(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat(short16);
+ushort16 __ovld __cnfn convert_ushort16_rte(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ushort16);
+ushort16 __ovld __cnfn convert_ushort16(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rte(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(int16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(int16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(int16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(int16);
+ushort16 __ovld __cnfn convert_ushort16(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat(int16);
+ushort16 __ovld __cnfn convert_ushort16_rte(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uint16);
+ushort16 __ovld __cnfn convert_ushort16(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rte(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(long16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(long16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(long16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(long16);
+ushort16 __ovld __cnfn convert_ushort16(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat(long16);
+ushort16 __ovld __cnfn convert_ushort16_rte(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ulong16);
+ushort16 __ovld __cnfn convert_ushort16(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rte(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(float16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(float16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(float16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(float16);
+ushort16 __ovld __cnfn convert_ushort16(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat(float16);
+int16 __ovld __cnfn convert_int16_rte(char16);
+int16 __ovld __cnfn convert_int16_sat_rte(char16);
+int16 __ovld __cnfn convert_int16_rtz(char16);
+int16 __ovld __cnfn convert_int16_sat_rtz(char16);
+int16 __ovld __cnfn convert_int16_rtp(char16);
+int16 __ovld __cnfn convert_int16_sat_rtp(char16);
+int16 __ovld __cnfn convert_int16_rtn(char16);
+int16 __ovld __cnfn convert_int16_sat_rtn(char16);
+int16 __ovld __cnfn convert_int16(char16);
+int16 __ovld __cnfn convert_int16_sat(char16);
+int16 __ovld __cnfn convert_int16_rte(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rte(uchar16);
+int16 __ovld __cnfn convert_int16_rtz(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rtz(uchar16);
+int16 __ovld __cnfn convert_int16_rtp(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rtp(uchar16);
+int16 __ovld __cnfn convert_int16_rtn(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rtn(uchar16);
+int16 __ovld __cnfn convert_int16(uchar16);
+int16 __ovld __cnfn convert_int16_sat(uchar16);
+int16 __ovld __cnfn convert_int16_rte(short16);
+int16 __ovld __cnfn convert_int16_sat_rte(short16);
+int16 __ovld __cnfn convert_int16_rtz(short16);
+int16 __ovld __cnfn convert_int16_sat_rtz(short16);
+int16 __ovld __cnfn convert_int16_rtp(short16);
+int16 __ovld __cnfn convert_int16_sat_rtp(short16);
+int16 __ovld __cnfn convert_int16_rtn(short16);
+int16 __ovld __cnfn convert_int16_sat_rtn(short16);
+int16 __ovld __cnfn convert_int16(short16);
+int16 __ovld __cnfn convert_int16_sat(short16);
+int16 __ovld __cnfn convert_int16_rte(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rte(ushort16);
+int16 __ovld __cnfn convert_int16_rtz(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rtz(ushort16);
+int16 __ovld __cnfn convert_int16_rtp(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rtp(ushort16);
+int16 __ovld __cnfn convert_int16_rtn(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rtn(ushort16);
+int16 __ovld __cnfn convert_int16(ushort16);
+int16 __ovld __cnfn convert_int16_sat(ushort16);
+int16 __ovld __cnfn convert_int16_rte(int16);
+int16 __ovld __cnfn convert_int16_sat_rte(int16);
+int16 __ovld __cnfn convert_int16_rtz(int16);
+int16 __ovld __cnfn convert_int16_sat_rtz(int16);
+int16 __ovld __cnfn convert_int16_rtp(int16);
+int16 __ovld __cnfn convert_int16_sat_rtp(int16);
+int16 __ovld __cnfn convert_int16_rtn(int16);
+int16 __ovld __cnfn convert_int16_sat_rtn(int16);
+int16 __ovld __cnfn convert_int16(int16);
+int16 __ovld __cnfn convert_int16_sat(int16);
+int16 __ovld __cnfn convert_int16_rte(uint16);
+int16 __ovld __cnfn convert_int16_sat_rte(uint16);
+int16 __ovld __cnfn convert_int16_rtz(uint16);
+int16 __ovld __cnfn convert_int16_sat_rtz(uint16);
+int16 __ovld __cnfn convert_int16_rtp(uint16);
+int16 __ovld __cnfn convert_int16_sat_rtp(uint16);
+int16 __ovld __cnfn convert_int16_rtn(uint16);
+int16 __ovld __cnfn convert_int16_sat_rtn(uint16);
+int16 __ovld __cnfn convert_int16(uint16);
+int16 __ovld __cnfn convert_int16_sat(uint16);
+int16 __ovld __cnfn convert_int16_rte(long16);
+int16 __ovld __cnfn convert_int16_sat_rte(long16);
+int16 __ovld __cnfn convert_int16_rtz(long16);
+int16 __ovld __cnfn convert_int16_sat_rtz(long16);
+int16 __ovld __cnfn convert_int16_rtp(long16);
+int16 __ovld __cnfn convert_int16_sat_rtp(long16);
+int16 __ovld __cnfn convert_int16_rtn(long16);
+int16 __ovld __cnfn convert_int16_sat_rtn(long16);
+int16 __ovld __cnfn convert_int16(long16);
+int16 __ovld __cnfn convert_int16_sat(long16);
+int16 __ovld __cnfn convert_int16_rte(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rte(ulong16);
+int16 __ovld __cnfn convert_int16_rtz(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rtz(ulong16);
+int16 __ovld __cnfn convert_int16_rtp(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rtp(ulong16);
+int16 __ovld __cnfn convert_int16_rtn(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rtn(ulong16);
+int16 __ovld __cnfn convert_int16(ulong16);
+int16 __ovld __cnfn convert_int16_sat(ulong16);
+int16 __ovld __cnfn convert_int16_rte(float16);
+int16 __ovld __cnfn convert_int16_sat_rte(float16);
+int16 __ovld __cnfn convert_int16_rtz(float16);
+int16 __ovld __cnfn convert_int16_sat_rtz(float16);
+int16 __ovld __cnfn convert_int16_rtp(float16);
+int16 __ovld __cnfn convert_int16_sat_rtp(float16);
+int16 __ovld __cnfn convert_int16_rtn(float16);
+int16 __ovld __cnfn convert_int16_sat_rtn(float16);
+int16 __ovld __cnfn convert_int16(float16);
+int16 __ovld __cnfn convert_int16_sat(float16);
+uint16 __ovld __cnfn convert_uint16_rte(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(char16);
+uint16 __ovld __cnfn convert_uint16_rtz(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(char16);
+uint16 __ovld __cnfn convert_uint16_rtp(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(char16);
+uint16 __ovld __cnfn convert_uint16_rtn(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(char16);
+uint16 __ovld __cnfn convert_uint16(char16);
+uint16 __ovld __cnfn convert_uint16_sat(char16);
+uint16 __ovld __cnfn convert_uint16_rte(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(uchar16);
+uint16 __ovld __cnfn convert_uint16_rtz(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(uchar16);
+uint16 __ovld __cnfn convert_uint16_rtp(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(uchar16);
+uint16 __ovld __cnfn convert_uint16_rtn(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(uchar16);
+uint16 __ovld __cnfn convert_uint16(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat(uchar16);
+uint16 __ovld __cnfn convert_uint16_rte(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(short16);
+uint16 __ovld __cnfn convert_uint16_rtz(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(short16);
+uint16 __ovld __cnfn convert_uint16_rtp(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(short16);
+uint16 __ovld __cnfn convert_uint16_rtn(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(short16);
+uint16 __ovld __cnfn convert_uint16(short16);
+uint16 __ovld __cnfn convert_uint16_sat(short16);
+uint16 __ovld __cnfn convert_uint16_rte(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(ushort16);
+uint16 __ovld __cnfn convert_uint16_rtz(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(ushort16);
+uint16 __ovld __cnfn convert_uint16_rtp(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(ushort16);
+uint16 __ovld __cnfn convert_uint16_rtn(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(ushort16);
+uint16 __ovld __cnfn convert_uint16(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat(ushort16);
+uint16 __ovld __cnfn convert_uint16_rte(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(int16);
+uint16 __ovld __cnfn convert_uint16_rtz(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(int16);
+uint16 __ovld __cnfn convert_uint16_rtp(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(int16);
+uint16 __ovld __cnfn convert_uint16_rtn(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(int16);
+uint16 __ovld __cnfn convert_uint16(int16);
+uint16 __ovld __cnfn convert_uint16_sat(int16);
+uint16 __ovld __cnfn convert_uint16_rte(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(uint16);
+uint16 __ovld __cnfn convert_uint16_rtz(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(uint16);
+uint16 __ovld __cnfn convert_uint16_rtp(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(uint16);
+uint16 __ovld __cnfn convert_uint16_rtn(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(uint16);
+uint16 __ovld __cnfn convert_uint16(uint16);
+uint16 __ovld __cnfn convert_uint16_sat(uint16);
+uint16 __ovld __cnfn convert_uint16_rte(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(long16);
+uint16 __ovld __cnfn convert_uint16_rtz(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(long16);
+uint16 __ovld __cnfn convert_uint16_rtp(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(long16);
+uint16 __ovld __cnfn convert_uint16_rtn(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(long16);
+uint16 __ovld __cnfn convert_uint16(long16);
+uint16 __ovld __cnfn convert_uint16_sat(long16);
+uint16 __ovld __cnfn convert_uint16_rte(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(ulong16);
+uint16 __ovld __cnfn convert_uint16_rtz(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(ulong16);
+uint16 __ovld __cnfn convert_uint16_rtp(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(ulong16);
+uint16 __ovld __cnfn convert_uint16_rtn(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(ulong16);
+uint16 __ovld __cnfn convert_uint16(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat(ulong16);
+uint16 __ovld __cnfn convert_uint16_rte(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(float16);
+uint16 __ovld __cnfn convert_uint16_rtz(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(float16);
+uint16 __ovld __cnfn convert_uint16_rtp(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(float16);
+uint16 __ovld __cnfn convert_uint16_rtn(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(float16);
+uint16 __ovld __cnfn convert_uint16(float16);
+uint16 __ovld __cnfn convert_uint16_sat(float16);
+long16 __ovld __cnfn convert_long16_rte(char16);
+long16 __ovld __cnfn convert_long16_sat_rte(char16);
+long16 __ovld __cnfn convert_long16_rtz(char16);
+long16 __ovld __cnfn convert_long16_sat_rtz(char16);
+long16 __ovld __cnfn convert_long16_rtp(char16);
+long16 __ovld __cnfn convert_long16_sat_rtp(char16);
+long16 __ovld __cnfn convert_long16_rtn(char16);
+long16 __ovld __cnfn convert_long16_sat_rtn(char16);
+long16 __ovld __cnfn convert_long16(char16);
+long16 __ovld __cnfn convert_long16_sat(char16);
+long16 __ovld __cnfn convert_long16_rte(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rte(uchar16);
+long16 __ovld __cnfn convert_long16_rtz(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rtz(uchar16);
+long16 __ovld __cnfn convert_long16_rtp(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rtp(uchar16);
+long16 __ovld __cnfn convert_long16_rtn(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rtn(uchar16);
+long16 __ovld __cnfn convert_long16(uchar16);
+long16 __ovld __cnfn convert_long16_sat(uchar16);
+long16 __ovld __cnfn convert_long16_rte(short16);
+long16 __ovld __cnfn convert_long16_sat_rte(short16);
+long16 __ovld __cnfn convert_long16_rtz(short16);
+long16 __ovld __cnfn convert_long16_sat_rtz(short16);
+long16 __ovld __cnfn convert_long16_rtp(short16);
+long16 __ovld __cnfn convert_long16_sat_rtp(short16);
+long16 __ovld __cnfn convert_long16_rtn(short16);
+long16 __ovld __cnfn convert_long16_sat_rtn(short16);
+long16 __ovld __cnfn convert_long16(short16);
+long16 __ovld __cnfn convert_long16_sat(short16);
+long16 __ovld __cnfn convert_long16_rte(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rte(ushort16);
+long16 __ovld __cnfn convert_long16_rtz(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rtz(ushort16);
+long16 __ovld __cnfn convert_long16_rtp(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rtp(ushort16);
+long16 __ovld __cnfn convert_long16_rtn(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rtn(ushort16);
+long16 __ovld __cnfn convert_long16(ushort16);
+long16 __ovld __cnfn convert_long16_sat(ushort16);
+long16 __ovld __cnfn convert_long16_rte(int16);
+long16 __ovld __cnfn convert_long16_sat_rte(int16);
+long16 __ovld __cnfn convert_long16_rtz(int16);
+long16 __ovld __cnfn convert_long16_sat_rtz(int16);
+long16 __ovld __cnfn convert_long16_rtp(int16);
+long16 __ovld __cnfn convert_long16_sat_rtp(int16);
+long16 __ovld __cnfn convert_long16_rtn(int16);
+long16 __ovld __cnfn convert_long16_sat_rtn(int16);
+long16 __ovld __cnfn convert_long16(int16);
+long16 __ovld __cnfn convert_long16_sat(int16);
+long16 __ovld __cnfn convert_long16_rte(uint16);
+long16 __ovld __cnfn convert_long16_sat_rte(uint16);
+long16 __ovld __cnfn convert_long16_rtz(uint16);
+long16 __ovld __cnfn convert_long16_sat_rtz(uint16);
+long16 __ovld __cnfn convert_long16_rtp(uint16);
+long16 __ovld __cnfn convert_long16_sat_rtp(uint16);
+long16 __ovld __cnfn convert_long16_rtn(uint16);
+long16 __ovld __cnfn convert_long16_sat_rtn(uint16);
+long16 __ovld __cnfn convert_long16(uint16);
+long16 __ovld __cnfn convert_long16_sat(uint16);
+long16 __ovld __cnfn convert_long16_rte(long16);
+long16 __ovld __cnfn convert_long16_sat_rte(long16);
+long16 __ovld __cnfn convert_long16_rtz(long16);
+long16 __ovld __cnfn convert_long16_sat_rtz(long16);
+long16 __ovld __cnfn convert_long16_rtp(long16);
+long16 __ovld __cnfn convert_long16_sat_rtp(long16);
+long16 __ovld __cnfn convert_long16_rtn(long16);
+long16 __ovld __cnfn convert_long16_sat_rtn(long16);
+long16 __ovld __cnfn convert_long16(long16);
+long16 __ovld __cnfn convert_long16_sat(long16);
+long16 __ovld __cnfn convert_long16_rte(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rte(ulong16);
+long16 __ovld __cnfn convert_long16_rtz(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rtz(ulong16);
+long16 __ovld __cnfn convert_long16_rtp(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rtp(ulong16);
+long16 __ovld __cnfn convert_long16_rtn(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rtn(ulong16);
+long16 __ovld __cnfn convert_long16(ulong16);
+long16 __ovld __cnfn convert_long16_sat(ulong16);
+long16 __ovld __cnfn convert_long16_rte(float16);
+long16 __ovld __cnfn convert_long16_sat_rte(float16);
+long16 __ovld __cnfn convert_long16_rtz(float16);
+long16 __ovld __cnfn convert_long16_sat_rtz(float16);
+long16 __ovld __cnfn convert_long16_rtp(float16);
+long16 __ovld __cnfn convert_long16_sat_rtp(float16);
+long16 __ovld __cnfn convert_long16_rtn(float16);
+long16 __ovld __cnfn convert_long16_sat_rtn(float16);
+long16 __ovld __cnfn convert_long16(float16);
+long16 __ovld __cnfn convert_long16_sat(float16);
+ulong16 __ovld __cnfn convert_ulong16_rte(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(char16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(char16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(char16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(char16);
+ulong16 __ovld __cnfn convert_ulong16(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat(char16);
+ulong16 __ovld __cnfn convert_ulong16_rte(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uchar16);
+ulong16 __ovld __cnfn convert_ulong16(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rte(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(short16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(short16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(short16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(short16);
+ulong16 __ovld __cnfn convert_ulong16(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat(short16);
+ulong16 __ovld __cnfn convert_ulong16_rte(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ushort16);
+ulong16 __ovld __cnfn convert_ulong16(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rte(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(int16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(int16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(int16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(int16);
+ulong16 __ovld __cnfn convert_ulong16(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat(int16);
+ulong16 __ovld __cnfn convert_ulong16_rte(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uint16);
+ulong16 __ovld __cnfn convert_ulong16(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rte(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(long16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(long16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(long16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(long16);
+ulong16 __ovld __cnfn convert_ulong16(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat(long16);
+ulong16 __ovld __cnfn convert_ulong16_rte(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ulong16);
+ulong16 __ovld __cnfn convert_ulong16(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rte(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(float16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(float16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(float16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(float16);
+ulong16 __ovld __cnfn convert_ulong16(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat(float16);
+float16 __ovld __cnfn convert_float16_rte(char16);
+float16 __ovld __cnfn convert_float16_rtz(char16);
+float16 __ovld __cnfn convert_float16_rtp(char16);
+float16 __ovld __cnfn convert_float16_rtn(char16);
+float16 __ovld __cnfn convert_float16(char16);
+float16 __ovld __cnfn convert_float16_rte(uchar16);
+float16 __ovld __cnfn convert_float16_rtz(uchar16);
+float16 __ovld __cnfn convert_float16_rtp(uchar16);
+float16 __ovld __cnfn convert_float16_rtn(uchar16);
+float16 __ovld __cnfn convert_float16(uchar16);
+float16 __ovld __cnfn convert_float16_rte(short16);
+float16 __ovld __cnfn convert_float16_rtz(short16);
+float16 __ovld __cnfn convert_float16_rtp(short16);
+float16 __ovld __cnfn convert_float16_rtn(short16);
+float16 __ovld __cnfn convert_float16(short16);
+float16 __ovld __cnfn convert_float16_rte(ushort16);
+float16 __ovld __cnfn convert_float16_rtz(ushort16);
+float16 __ovld __cnfn convert_float16_rtp(ushort16);
+float16 __ovld __cnfn convert_float16_rtn(ushort16);
+float16 __ovld __cnfn convert_float16(ushort16);
+float16 __ovld __cnfn convert_float16_rte(int16);
+float16 __ovld __cnfn convert_float16_rtz(int16);
+float16 __ovld __cnfn convert_float16_rtp(int16);
+float16 __ovld __cnfn convert_float16_rtn(int16);
+float16 __ovld __cnfn convert_float16(int16);
+float16 __ovld __cnfn convert_float16_rte(uint16);
+float16 __ovld __cnfn convert_float16_rtz(uint16);
+float16 __ovld __cnfn convert_float16_rtp(uint16);
+float16 __ovld __cnfn convert_float16_rtn(uint16);
+float16 __ovld __cnfn convert_float16(uint16);
+float16 __ovld __cnfn convert_float16_rte(long16);
+float16 __ovld __cnfn convert_float16_rtz(long16);
+float16 __ovld __cnfn convert_float16_rtp(long16);
+float16 __ovld __cnfn convert_float16_rtn(long16);
+float16 __ovld __cnfn convert_float16(long16);
+float16 __ovld __cnfn convert_float16_rte(ulong16);
+float16 __ovld __cnfn convert_float16_rtz(ulong16);
+float16 __ovld __cnfn convert_float16_rtp(ulong16);
+float16 __ovld __cnfn convert_float16_rtn(ulong16);
+float16 __ovld __cnfn convert_float16(ulong16);
+float16 __ovld __cnfn convert_float16_rte(float16);
+float16 __ovld __cnfn convert_float16_rtz(float16);
+float16 __ovld __cnfn convert_float16_rtp(float16);
+float16 __ovld __cnfn convert_float16_rtn(float16);
+float16 __ovld __cnfn convert_float16(float16);
+
+// Conversions with double data type parameters or return value.
+
+#ifdef cl_khr_fp64
+#pragma OPENCL EXTENSION cl_khr_fp64 : enable
+char __ovld __cnfn convert_char(double);
+char __ovld __cnfn convert_char_rte(double);
+char __ovld __cnfn convert_char_rtn(double);
+char __ovld __cnfn convert_char_rtp(double);
+char __ovld __cnfn convert_char_rtz(double);
+char __ovld __cnfn convert_char_sat(double);
+char __ovld __cnfn convert_char_sat_rte(double);
+char __ovld __cnfn convert_char_sat_rtn(double);
+char __ovld __cnfn convert_char_sat_rtp(double);
+char __ovld __cnfn convert_char_sat_rtz(double);
+char2 __ovld __cnfn convert_char2(double2);
+char2 __ovld __cnfn convert_char2_rte(double2);
+char2 __ovld __cnfn convert_char2_rtn(double2);
+char2 __ovld __cnfn convert_char2_rtp(double2);
+char2 __ovld __cnfn convert_char2_rtz(double2);
+char2 __ovld __cnfn convert_char2_sat(double2);
+char2 __ovld __cnfn convert_char2_sat_rte(double2);
+char2 __ovld __cnfn convert_char2_sat_rtn(double2);
+char2 __ovld __cnfn convert_char2_sat_rtp(double2);
+char2 __ovld __cnfn convert_char2_sat_rtz(double2);
+char3 __ovld __cnfn convert_char3(double3);
+char3 __ovld __cnfn convert_char3_rte(double3);
+char3 __ovld __cnfn convert_char3_rtn(double3);
+char3 __ovld __cnfn convert_char3_rtp(double3);
+char3 __ovld __cnfn convert_char3_rtz(double3);
+char3 __ovld __cnfn convert_char3_sat(double3);
+char3 __ovld __cnfn convert_char3_sat_rte(double3);
+char3 __ovld __cnfn convert_char3_sat_rtn(double3);
+char3 __ovld __cnfn convert_char3_sat_rtp(double3);
+char3 __ovld __cnfn convert_char3_sat_rtz(double3);
+char4 __ovld __cnfn convert_char4(double4);
+char4 __ovld __cnfn convert_char4_rte(double4);
+char4 __ovld __cnfn convert_char4_rtn(double4);
+char4 __ovld __cnfn convert_char4_rtp(double4);
+char4 __ovld __cnfn convert_char4_rtz(double4);
+char4 __ovld __cnfn convert_char4_sat(double4);
+char4 __ovld __cnfn convert_char4_sat_rte(double4);
+char4 __ovld __cnfn convert_char4_sat_rtn(double4);
+char4 __ovld __cnfn convert_char4_sat_rtp(double4);
+char4 __ovld __cnfn convert_char4_sat_rtz(double4);
+char8 __ovld __cnfn convert_char8(double8);
+char8 __ovld __cnfn convert_char8_rte(double8);
+char8 __ovld __cnfn convert_char8_rtn(double8);
+char8 __ovld __cnfn convert_char8_rtp(double8);
+char8 __ovld __cnfn convert_char8_rtz(double8);
+char8 __ovld __cnfn convert_char8_sat(double8);
+char8 __ovld __cnfn convert_char8_sat_rte(double8);
+char8 __ovld __cnfn convert_char8_sat_rtn(double8);
+char8 __ovld __cnfn convert_char8_sat_rtp(double8);
+char8 __ovld __cnfn convert_char8_sat_rtz(double8);
+char16 __ovld __cnfn convert_char16(double16);
+char16 __ovld __cnfn convert_char16_rte(double16);
+char16 __ovld __cnfn convert_char16_rtn(double16);
+char16 __ovld __cnfn convert_char16_rtp(double16);
+char16 __ovld __cnfn convert_char16_rtz(double16);
+char16 __ovld __cnfn convert_char16_sat(double16);
+char16 __ovld __cnfn convert_char16_sat_rte(double16);
+char16 __ovld __cnfn convert_char16_sat_rtn(double16);
+char16 __ovld __cnfn convert_char16_sat_rtp(double16);
+char16 __ovld __cnfn convert_char16_sat_rtz(double16);
+
+uchar __ovld __cnfn convert_uchar(double);
+uchar __ovld __cnfn convert_uchar_rte(double);
+uchar __ovld __cnfn convert_uchar_rtn(double);
+uchar __ovld __cnfn convert_uchar_rtp(double);
+uchar __ovld __cnfn convert_uchar_rtz(double);
+uchar __ovld __cnfn convert_uchar_sat(double);
+uchar __ovld __cnfn convert_uchar_sat_rte(double);
+uchar __ovld __cnfn convert_uchar_sat_rtn(double);
+uchar __ovld __cnfn convert_uchar_sat_rtp(double);
+uchar __ovld __cnfn convert_uchar_sat_rtz(double);
+uchar2 __ovld __cnfn convert_uchar2(double2);
+uchar2 __ovld __cnfn convert_uchar2_rte(double2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(double2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(double2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(double2);
+uchar3 __ovld __cnfn convert_uchar3(double3);
+uchar3 __ovld __cnfn convert_uchar3_rte(double3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(double3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(double3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(double3);
+uchar4 __ovld __cnfn convert_uchar4(double4);
+uchar4 __ovld __cnfn convert_uchar4_rte(double4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(double4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(double4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(double4);
+uchar8 __ovld __cnfn convert_uchar8(double8);
+uchar8 __ovld __cnfn convert_uchar8_rte(double8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(double8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(double8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(double8);
+uchar16 __ovld __cnfn convert_uchar16(double16);
+uchar16 __ovld __cnfn convert_uchar16_rte(double16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(double16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(double16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(double16);
+
+short __ovld __cnfn convert_short(double);
+short __ovld __cnfn convert_short_rte(double);
+short __ovld __cnfn convert_short_rtn(double);
+short __ovld __cnfn convert_short_rtp(double);
+short __ovld __cnfn convert_short_rtz(double);
+short __ovld __cnfn convert_short_sat(double);
+short __ovld __cnfn convert_short_sat_rte(double);
+short __ovld __cnfn convert_short_sat_rtn(double);
+short __ovld __cnfn convert_short_sat_rtp(double);
+short __ovld __cnfn convert_short_sat_rtz(double);
+short2 __ovld __cnfn convert_short2(double2);
+short2 __ovld __cnfn convert_short2_rte(double2);
+short2 __ovld __cnfn convert_short2_rtn(double2);
+short2 __ovld __cnfn convert_short2_rtp(double2);
+short2 __ovld __cnfn convert_short2_rtz(double2);
+short2 __ovld __cnfn convert_short2_sat(double2);
+short2 __ovld __cnfn convert_short2_sat_rte(double2);
+short2 __ovld __cnfn convert_short2_sat_rtn(double2);
+short2 __ovld __cnfn convert_short2_sat_rtp(double2);
+short2 __ovld __cnfn convert_short2_sat_rtz(double2);
+short3 __ovld __cnfn convert_short3(double3);
+short3 __ovld __cnfn convert_short3_rte(double3);
+short3 __ovld __cnfn convert_short3_rtn(double3);
+short3 __ovld __cnfn convert_short3_rtp(double3);
+short3 __ovld __cnfn convert_short3_rtz(double3);
+short3 __ovld __cnfn convert_short3_sat(double3);
+short3 __ovld __cnfn convert_short3_sat_rte(double3);
+short3 __ovld __cnfn convert_short3_sat_rtn(double3);
+short3 __ovld __cnfn convert_short3_sat_rtp(double3);
+short3 __ovld __cnfn convert_short3_sat_rtz(double3);
+short4 __ovld __cnfn convert_short4(double4);
+short4 __ovld __cnfn convert_short4_rte(double4);
+short4 __ovld __cnfn convert_short4_rtn(double4);
+short4 __ovld __cnfn convert_short4_rtp(double4);
+short4 __ovld __cnfn convert_short4_rtz(double4);
+short4 __ovld __cnfn convert_short4_sat(double4);
+short4 __ovld __cnfn convert_short4_sat_rte(double4);
+short4 __ovld __cnfn convert_short4_sat_rtn(double4);
+short4 __ovld __cnfn convert_short4_sat_rtp(double4);
+short4 __ovld __cnfn convert_short4_sat_rtz(double4);
+short8 __ovld __cnfn convert_short8(double8);
+short8 __ovld __cnfn convert_short8_rte(double8);
+short8 __ovld __cnfn convert_short8_rtn(double8);
+short8 __ovld __cnfn convert_short8_rtp(double8);
+short8 __ovld __cnfn convert_short8_rtz(double8);
+short8 __ovld __cnfn convert_short8_sat(double8);
+short8 __ovld __cnfn convert_short8_sat_rte(double8);
+short8 __ovld __cnfn convert_short8_sat_rtn(double8);
+short8 __ovld __cnfn convert_short8_sat_rtp(double8);
+short8 __ovld __cnfn convert_short8_sat_rtz(double8);
+short16 __ovld __cnfn convert_short16(double16);
+short16 __ovld __cnfn convert_short16_rte(double16);
+short16 __ovld __cnfn convert_short16_rtn(double16);
+short16 __ovld __cnfn convert_short16_rtp(double16);
+short16 __ovld __cnfn convert_short16_rtz(double16);
+short16 __ovld __cnfn convert_short16_sat(double16);
+short16 __ovld __cnfn convert_short16_sat_rte(double16);
+short16 __ovld __cnfn convert_short16_sat_rtn(double16);
+short16 __ovld __cnfn convert_short16_sat_rtp(double16);
+short16 __ovld __cnfn convert_short16_sat_rtz(double16);
+
+ushort __ovld __cnfn convert_ushort(double);
+ushort __ovld __cnfn convert_ushort_rte(double);
+ushort __ovld __cnfn convert_ushort_rtn(double);
+ushort __ovld __cnfn convert_ushort_rtp(double);
+ushort __ovld __cnfn convert_ushort_rtz(double);
+ushort __ovld __cnfn convert_ushort_sat(double);
+ushort __ovld __cnfn convert_ushort_sat_rte(double);
+ushort __ovld __cnfn convert_ushort_sat_rtn(double);
+ushort __ovld __cnfn convert_ushort_sat_rtp(double);
+ushort __ovld __cnfn convert_ushort_sat_rtz(double);
+ushort2 __ovld __cnfn convert_ushort2(double2);
+ushort2 __ovld __cnfn convert_ushort2_rte(double2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(double2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(double2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(double2);
+ushort3 __ovld __cnfn convert_ushort3(double3);
+ushort3 __ovld __cnfn convert_ushort3_rte(double3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(double3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(double3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(double3);
+ushort4 __ovld __cnfn convert_ushort4(double4);
+ushort4 __ovld __cnfn convert_ushort4_rte(double4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(double4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(double4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(double4);
+ushort8 __ovld __cnfn convert_ushort8(double8);
+ushort8 __ovld __cnfn convert_ushort8_rte(double8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(double8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(double8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(double8);
+ushort16 __ovld __cnfn convert_ushort16(double16);
+ushort16 __ovld __cnfn convert_ushort16_rte(double16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(double16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(double16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(double16);
+
+int __ovld __cnfn convert_int(double);
+int __ovld __cnfn convert_int_rte(double);
+int __ovld __cnfn convert_int_rtn(double);
+int __ovld __cnfn convert_int_rtp(double);
+int __ovld __cnfn convert_int_rtz(double);
+int __ovld __cnfn convert_int_sat(double);
+int __ovld __cnfn convert_int_sat_rte(double);
+int __ovld __cnfn convert_int_sat_rtn(double);
+int __ovld __cnfn convert_int_sat_rtp(double);
+int __ovld __cnfn convert_int_sat_rtz(double);
+int2 __ovld __cnfn convert_int2(double2);
+int2 __ovld __cnfn convert_int2_rte(double2);
+int2 __ovld __cnfn convert_int2_rtn(double2);
+int2 __ovld __cnfn convert_int2_rtp(double2);
+int2 __ovld __cnfn convert_int2_rtz(double2);
+int2 __ovld __cnfn convert_int2_sat(double2);
+int2 __ovld __cnfn convert_int2_sat_rte(double2);
+int2 __ovld __cnfn convert_int2_sat_rtn(double2);
+int2 __ovld __cnfn convert_int2_sat_rtp(double2);
+int2 __ovld __cnfn convert_int2_sat_rtz(double2);
+int3 __ovld __cnfn convert_int3(double3);
+int3 __ovld __cnfn convert_int3_rte(double3);
+int3 __ovld __cnfn convert_int3_rtn(double3);
+int3 __ovld __cnfn convert_int3_rtp(double3);
+int3 __ovld __cnfn convert_int3_rtz(double3);
+int3 __ovld __cnfn convert_int3_sat(double3);
+int3 __ovld __cnfn convert_int3_sat_rte(double3);
+int3 __ovld __cnfn convert_int3_sat_rtn(double3);
+int3 __ovld __cnfn convert_int3_sat_rtp(double3);
+int3 __ovld __cnfn convert_int3_sat_rtz(double3);
+int4 __ovld __cnfn convert_int4(double4);
+int4 __ovld __cnfn convert_int4_rte(double4);
+int4 __ovld __cnfn convert_int4_rtn(double4);
+int4 __ovld __cnfn convert_int4_rtp(double4);
+int4 __ovld __cnfn convert_int4_rtz(double4);
+int4 __ovld __cnfn convert_int4_sat(double4);
+int4 __ovld __cnfn convert_int4_sat_rte(double4);
+int4 __ovld __cnfn convert_int4_sat_rtn(double4);
+int4 __ovld __cnfn convert_int4_sat_rtp(double4);
+int4 __ovld __cnfn convert_int4_sat_rtz(double4);
+int8 __ovld __cnfn convert_int8(double8);
+int8 __ovld __cnfn convert_int8_rte(double8);
+int8 __ovld __cnfn convert_int8_rtn(double8);
+int8 __ovld __cnfn convert_int8_rtp(double8);
+int8 __ovld __cnfn convert_int8_rtz(double8);
+int8 __ovld __cnfn convert_int8_sat(double8);
+int8 __ovld __cnfn convert_int8_sat_rte(double8);
+int8 __ovld __cnfn convert_int8_sat_rtn(double8);
+int8 __ovld __cnfn convert_int8_sat_rtp(double8);
+int8 __ovld __cnfn convert_int8_sat_rtz(double8);
+int16 __ovld __cnfn convert_int16(double16);
+int16 __ovld __cnfn convert_int16_rte(double16);
+int16 __ovld __cnfn convert_int16_rtn(double16);
+int16 __ovld __cnfn convert_int16_rtp(double16);
+int16 __ovld __cnfn convert_int16_rtz(double16);
+int16 __ovld __cnfn convert_int16_sat(double16);
+int16 __ovld __cnfn convert_int16_sat_rte(double16);
+int16 __ovld __cnfn convert_int16_sat_rtn(double16);
+int16 __ovld __cnfn convert_int16_sat_rtp(double16);
+int16 __ovld __cnfn convert_int16_sat_rtz(double16);
+
+uint __ovld __cnfn convert_uint(double);
+uint __ovld __cnfn convert_uint_rte(double);
+uint __ovld __cnfn convert_uint_rtn(double);
+uint __ovld __cnfn convert_uint_rtp(double);
+uint __ovld __cnfn convert_uint_rtz(double);
+uint __ovld __cnfn convert_uint_sat(double);
+uint __ovld __cnfn convert_uint_sat_rte(double);
+uint __ovld __cnfn convert_uint_sat_rtn(double);
+uint __ovld __cnfn convert_uint_sat_rtp(double);
+uint __ovld __cnfn convert_uint_sat_rtz(double);
+uint2 __ovld __cnfn convert_uint2(double2);
+uint2 __ovld __cnfn convert_uint2_rte(double2);
+uint2 __ovld __cnfn convert_uint2_rtn(double2);
+uint2 __ovld __cnfn convert_uint2_rtp(double2);
+uint2 __ovld __cnfn convert_uint2_rtz(double2);
+uint2 __ovld __cnfn convert_uint2_sat(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(double2);
+uint3 __ovld __cnfn convert_uint3(double3);
+uint3 __ovld __cnfn convert_uint3_rte(double3);
+uint3 __ovld __cnfn convert_uint3_rtn(double3);
+uint3 __ovld __cnfn convert_uint3_rtp(double3);
+uint3 __ovld __cnfn convert_uint3_rtz(double3);
+uint3 __ovld __cnfn convert_uint3_sat(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(double3);
+uint4 __ovld __cnfn convert_uint4(double4);
+uint4 __ovld __cnfn convert_uint4_rte(double4);
+uint4 __ovld __cnfn convert_uint4_rtn(double4);
+uint4 __ovld __cnfn convert_uint4_rtp(double4);
+uint4 __ovld __cnfn convert_uint4_rtz(double4);
+uint4 __ovld __cnfn convert_uint4_sat(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(double4);
+uint8 __ovld __cnfn convert_uint8(double8);
+uint8 __ovld __cnfn convert_uint8_rte(double8);
+uint8 __ovld __cnfn convert_uint8_rtn(double8);
+uint8 __ovld __cnfn convert_uint8_rtp(double8);
+uint8 __ovld __cnfn convert_uint8_rtz(double8);
+uint8 __ovld __cnfn convert_uint8_sat(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(double8);
+uint16 __ovld __cnfn convert_uint16(double16);
+uint16 __ovld __cnfn convert_uint16_rte(double16);
+uint16 __ovld __cnfn convert_uint16_rtn(double16);
+uint16 __ovld __cnfn convert_uint16_rtp(double16);
+uint16 __ovld __cnfn convert_uint16_rtz(double16);
+uint16 __ovld __cnfn convert_uint16_sat(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(double16);
+
+long __ovld __cnfn convert_long(double);
+long __ovld __cnfn convert_long_rte(double);
+long __ovld __cnfn convert_long_rtn(double);
+long __ovld __cnfn convert_long_rtp(double);
+long __ovld __cnfn convert_long_rtz(double);
+long __ovld __cnfn convert_long_sat(double);
+long __ovld __cnfn convert_long_sat_rte(double);
+long __ovld __cnfn convert_long_sat_rtn(double);
+long __ovld __cnfn convert_long_sat_rtp(double);
+long __ovld __cnfn convert_long_sat_rtz(double);
+long2 __ovld __cnfn convert_long2(double2);
+long2 __ovld __cnfn convert_long2_rte(double2);
+long2 __ovld __cnfn convert_long2_rtn(double2);
+long2 __ovld __cnfn convert_long2_rtp(double2);
+long2 __ovld __cnfn convert_long2_rtz(double2);
+long2 __ovld __cnfn convert_long2_sat(double2);
+long2 __ovld __cnfn convert_long2_sat_rte(double2);
+long2 __ovld __cnfn convert_long2_sat_rtn(double2);
+long2 __ovld __cnfn convert_long2_sat_rtp(double2);
+long2 __ovld __cnfn convert_long2_sat_rtz(double2);
+long3 __ovld __cnfn convert_long3(double3);
+long3 __ovld __cnfn convert_long3_rte(double3);
+long3 __ovld __cnfn convert_long3_rtn(double3);
+long3 __ovld __cnfn convert_long3_rtp(double3);
+long3 __ovld __cnfn convert_long3_rtz(double3);
+long3 __ovld __cnfn convert_long3_sat(double3);
+long3 __ovld __cnfn convert_long3_sat_rte(double3);
+long3 __ovld __cnfn convert_long3_sat_rtn(double3);
+long3 __ovld __cnfn convert_long3_sat_rtp(double3);
+long3 __ovld __cnfn convert_long3_sat_rtz(double3);
+long4 __ovld __cnfn convert_long4(double4);
+long4 __ovld __cnfn convert_long4_rte(double4);
+long4 __ovld __cnfn convert_long4_rtn(double4);
+long4 __ovld __cnfn convert_long4_rtp(double4);
+long4 __ovld __cnfn convert_long4_rtz(double4);
+long4 __ovld __cnfn convert_long4_sat(double4);
+long4 __ovld __cnfn convert_long4_sat_rte(double4);
+long4 __ovld __cnfn convert_long4_sat_rtn(double4);
+long4 __ovld __cnfn convert_long4_sat_rtp(double4);
+long4 __ovld __cnfn convert_long4_sat_rtz(double4);
+long8 __ovld __cnfn convert_long8(double8);
+long8 __ovld __cnfn convert_long8_rte(double8);
+long8 __ovld __cnfn convert_long8_rtn(double8);
+long8 __ovld __cnfn convert_long8_rtp(double8);
+long8 __ovld __cnfn convert_long8_rtz(double8);
+long8 __ovld __cnfn convert_long8_sat(double8);
+long8 __ovld __cnfn convert_long8_sat_rte(double8);
+long8 __ovld __cnfn convert_long8_sat_rtn(double8);
+long8 __ovld __cnfn convert_long8_sat_rtp(double8);
+long8 __ovld __cnfn convert_long8_sat_rtz(double8);
+long16 __ovld __cnfn convert_long16(double16);
+long16 __ovld __cnfn convert_long16_rte(double16);
+long16 __ovld __cnfn convert_long16_rtn(double16);
+long16 __ovld __cnfn convert_long16_rtp(double16);
+long16 __ovld __cnfn convert_long16_rtz(double16);
+long16 __ovld __cnfn convert_long16_sat(double16);
+long16 __ovld __cnfn convert_long16_sat_rte(double16);
+long16 __ovld __cnfn convert_long16_sat_rtn(double16);
+long16 __ovld __cnfn convert_long16_sat_rtp(double16);
+long16 __ovld __cnfn convert_long16_sat_rtz(double16);
+
+ulong __ovld __cnfn convert_ulong(double);
+ulong __ovld __cnfn convert_ulong_rte(double);
+ulong __ovld __cnfn convert_ulong_rtn(double);
+ulong __ovld __cnfn convert_ulong_rtp(double);
+ulong __ovld __cnfn convert_ulong_rtz(double);
+ulong __ovld __cnfn convert_ulong_sat(double);
+ulong __ovld __cnfn convert_ulong_sat_rte(double);
+ulong __ovld __cnfn convert_ulong_sat_rtn(double);
+ulong __ovld __cnfn convert_ulong_sat_rtp(double);
+ulong __ovld __cnfn convert_ulong_sat_rtz(double);
+ulong2 __ovld __cnfn convert_ulong2(double2);
+ulong2 __ovld __cnfn convert_ulong2_rte(double2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(double2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(double2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(double2);
+ulong3 __ovld __cnfn convert_ulong3(double3);
+ulong3 __ovld __cnfn convert_ulong3_rte(double3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(double3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(double3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(double3);
+ulong4 __ovld __cnfn convert_ulong4(double4);
+ulong4 __ovld __cnfn convert_ulong4_rte(double4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(double4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(double4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(double4);
+ulong8 __ovld __cnfn convert_ulong8(double8);
+ulong8 __ovld __cnfn convert_ulong8_rte(double8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(double8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(double8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(double8);
+ulong16 __ovld __cnfn convert_ulong16(double16);
+ulong16 __ovld __cnfn convert_ulong16_rte(double16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(double16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(double16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(double16);
+
+float __ovld __cnfn convert_float(double);
+float __ovld __cnfn convert_float_rte(double);
+float __ovld __cnfn convert_float_rtn(double);
+float __ovld __cnfn convert_float_rtp(double);
+float __ovld __cnfn convert_float_rtz(double);
+float2 __ovld __cnfn convert_float2(double2);
+float2 __ovld __cnfn convert_float2_rte(double2);
+float2 __ovld __cnfn convert_float2_rtn(double2);
+float2 __ovld __cnfn convert_float2_rtp(double2);
+float2 __ovld __cnfn convert_float2_rtz(double2);
+float3 __ovld __cnfn convert_float3(double3);
+float3 __ovld __cnfn convert_float3_rte(double3);
+float3 __ovld __cnfn convert_float3_rtn(double3);
+float3 __ovld __cnfn convert_float3_rtp(double3);
+float3 __ovld __cnfn convert_float3_rtz(double3);
+float4 __ovld __cnfn convert_float4(double4);
+float4 __ovld __cnfn convert_float4_rte(double4);
+float4 __ovld __cnfn convert_float4_rtn(double4);
+float4 __ovld __cnfn convert_float4_rtp(double4);
+float4 __ovld __cnfn convert_float4_rtz(double4);
+float8 __ovld __cnfn convert_float8(double8);
+float8 __ovld __cnfn convert_float8_rte(double8);
+float8 __ovld __cnfn convert_float8_rtn(double8);
+float8 __ovld __cnfn convert_float8_rtp(double8);
+float8 __ovld __cnfn convert_float8_rtz(double8);
+float16 __ovld __cnfn convert_float16(double16);
+float16 __ovld __cnfn convert_float16_rte(double16);
+float16 __ovld __cnfn convert_float16_rtn(double16);
+float16 __ovld __cnfn convert_float16_rtp(double16);
+float16 __ovld __cnfn convert_float16_rtz(double16);
+
+double __ovld __cnfn convert_double(char);
+double __ovld __cnfn convert_double(double);
+double __ovld __cnfn convert_double(float);
+double __ovld __cnfn convert_double(int);
+double __ovld __cnfn convert_double(long);
+double __ovld __cnfn convert_double(short);
+double __ovld __cnfn convert_double(uchar);
+double __ovld __cnfn convert_double(uint);
+double __ovld __cnfn convert_double(ulong);
+double __ovld __cnfn convert_double(ushort);
+double __ovld __cnfn convert_double_rte(char);
+double __ovld __cnfn convert_double_rte(double);
+double __ovld __cnfn convert_double_rte(float);
+double __ovld __cnfn convert_double_rte(int);
+double __ovld __cnfn convert_double_rte(long);
+double __ovld __cnfn convert_double_rte(short);
+double __ovld __cnfn convert_double_rte(uchar);
+double __ovld __cnfn convert_double_rte(uint);
+double __ovld __cnfn convert_double_rte(ulong);
+double __ovld __cnfn convert_double_rte(ushort);
+double __ovld __cnfn convert_double_rtn(char);
+double __ovld __cnfn convert_double_rtn(double);
+double __ovld __cnfn convert_double_rtn(float);
+double __ovld __cnfn convert_double_rtn(int);
+double __ovld __cnfn convert_double_rtn(long);
+double __ovld __cnfn convert_double_rtn(short);
+double __ovld __cnfn convert_double_rtn(uchar);
+double __ovld __cnfn convert_double_rtn(uint);
+double __ovld __cnfn convert_double_rtn(ulong);
+double __ovld __cnfn convert_double_rtn(ushort);
+double __ovld __cnfn convert_double_rtp(char);
+double __ovld __cnfn convert_double_rtp(double);
+double __ovld __cnfn convert_double_rtp(float);
+double __ovld __cnfn convert_double_rtp(int);
+double __ovld __cnfn convert_double_rtp(long);
+double __ovld __cnfn convert_double_rtp(short);
+double __ovld __cnfn convert_double_rtp(uchar);
+double __ovld __cnfn convert_double_rtp(uint);
+double __ovld __cnfn convert_double_rtp(ulong);
+double __ovld __cnfn convert_double_rtp(ushort);
+double __ovld __cnfn convert_double_rtz(char);
+double __ovld __cnfn convert_double_rtz(double);
+double __ovld __cnfn convert_double_rtz(float);
+double __ovld __cnfn convert_double_rtz(int);
+double __ovld __cnfn convert_double_rtz(long);
+double __ovld __cnfn convert_double_rtz(short);
+double __ovld __cnfn convert_double_rtz(uchar);
+double __ovld __cnfn convert_double_rtz(uint);
+double __ovld __cnfn convert_double_rtz(ulong);
+double __ovld __cnfn convert_double_rtz(ushort);
+double2 __ovld __cnfn convert_double2(char2);
+double2 __ovld __cnfn convert_double2(double2);
+double2 __ovld __cnfn convert_double2(float2);
+double2 __ovld __cnfn convert_double2(int2);
+double2 __ovld __cnfn convert_double2(long2);
+double2 __ovld __cnfn convert_double2(short2);
+double2 __ovld __cnfn convert_double2(uchar2);
+double2 __ovld __cnfn convert_double2(uint2);
+double2 __ovld __cnfn convert_double2(ulong2);
+double2 __ovld __cnfn convert_double2(ushort2);
+double2 __ovld __cnfn convert_double2_rte(char2);
+double2 __ovld __cnfn convert_double2_rte(double2);
+double2 __ovld __cnfn convert_double2_rte(float2);
+double2 __ovld __cnfn convert_double2_rte(int2);
+double2 __ovld __cnfn convert_double2_rte(long2);
+double2 __ovld __cnfn convert_double2_rte(short2);
+double2 __ovld __cnfn convert_double2_rte(uchar2);
+double2 __ovld __cnfn convert_double2_rte(uint2);
+double2 __ovld __cnfn convert_double2_rte(ulong2);
+double2 __ovld __cnfn convert_double2_rte(ushort2);
+double2 __ovld __cnfn convert_double2_rtn(char2);
+double2 __ovld __cnfn convert_double2_rtn(double2);
+double2 __ovld __cnfn convert_double2_rtn(float2);
+double2 __ovld __cnfn convert_double2_rtn(int2);
+double2 __ovld __cnfn convert_double2_rtn(long2);
+double2 __ovld __cnfn convert_double2_rtn(short2);
+double2 __ovld __cnfn convert_double2_rtn(uchar2);
+double2 __ovld __cnfn convert_double2_rtn(uint2);
+double2 __ovld __cnfn convert_double2_rtn(ulong2);
+double2 __ovld __cnfn convert_double2_rtn(ushort2);
+double2 __ovld __cnfn convert_double2_rtp(char2);
+double2 __ovld __cnfn convert_double2_rtp(double2);
+double2 __ovld __cnfn convert_double2_rtp(float2);
+double2 __ovld __cnfn convert_double2_rtp(int2);
+double2 __ovld __cnfn convert_double2_rtp(long2);
+double2 __ovld __cnfn convert_double2_rtp(short2);
+double2 __ovld __cnfn convert_double2_rtp(uchar2);
+double2 __ovld __cnfn convert_double2_rtp(uint2);
+double2 __ovld __cnfn convert_double2_rtp(ulong2);
+double2 __ovld __cnfn convert_double2_rtp(ushort2);
+double2 __ovld __cnfn convert_double2_rtz(char2);
+double2 __ovld __cnfn convert_double2_rtz(double2);
+double2 __ovld __cnfn convert_double2_rtz(float2);
+double2 __ovld __cnfn convert_double2_rtz(int2);
+double2 __ovld __cnfn convert_double2_rtz(long2);
+double2 __ovld __cnfn convert_double2_rtz(short2);
+double2 __ovld __cnfn convert_double2_rtz(uchar2);
+double2 __ovld __cnfn convert_double2_rtz(uint2);
+double2 __ovld __cnfn convert_double2_rtz(ulong2);
+double2 __ovld __cnfn convert_double2_rtz(ushort2);
+double3 __ovld __cnfn convert_double3(char3);
+double3 __ovld __cnfn convert_double3(double3);
+double3 __ovld __cnfn convert_double3(float3);
+double3 __ovld __cnfn convert_double3(int3);
+double3 __ovld __cnfn convert_double3(long3);
+double3 __ovld __cnfn convert_double3(short3);
+double3 __ovld __cnfn convert_double3(uchar3);
+double3 __ovld __cnfn convert_double3(uint3);
+double3 __ovld __cnfn convert_double3(ulong3);
+double3 __ovld __cnfn convert_double3(ushort3);
+double3 __ovld __cnfn convert_double3_rte(char3);
+double3 __ovld __cnfn convert_double3_rte(double3);
+double3 __ovld __cnfn convert_double3_rte(float3);
+double3 __ovld __cnfn convert_double3_rte(int3);
+double3 __ovld __cnfn convert_double3_rte(long3);
+double3 __ovld __cnfn convert_double3_rte(short3);
+double3 __ovld __cnfn convert_double3_rte(uchar3);
+double3 __ovld __cnfn convert_double3_rte(uint3);
+double3 __ovld __cnfn convert_double3_rte(ulong3);
+double3 __ovld __cnfn convert_double3_rte(ushort3);
+double3 __ovld __cnfn convert_double3_rtn(char3);
+double3 __ovld __cnfn convert_double3_rtn(double3);
+double3 __ovld __cnfn convert_double3_rtn(float3);
+double3 __ovld __cnfn convert_double3_rtn(int3);
+double3 __ovld __cnfn convert_double3_rtn(long3);
+double3 __ovld __cnfn convert_double3_rtn(short3);
+double3 __ovld __cnfn convert_double3_rtn(uchar3);
+double3 __ovld __cnfn convert_double3_rtn(uint3);
+double3 __ovld __cnfn convert_double3_rtn(ulong3);
+double3 __ovld __cnfn convert_double3_rtn(ushort3);
+double3 __ovld __cnfn convert_double3_rtp(char3);
+double3 __ovld __cnfn convert_double3_rtp(double3);
+double3 __ovld __cnfn convert_double3_rtp(float3);
+double3 __ovld __cnfn convert_double3_rtp(int3);
+double3 __ovld __cnfn convert_double3_rtp(long3);
+double3 __ovld __cnfn convert_double3_rtp(short3);
+double3 __ovld __cnfn convert_double3_rtp(uchar3);
+double3 __ovld __cnfn convert_double3_rtp(uint3);
+double3 __ovld __cnfn convert_double3_rtp(ulong3);
+double3 __ovld __cnfn convert_double3_rtp(ushort3);
+double3 __ovld __cnfn convert_double3_rtz(char3);
+double3 __ovld __cnfn convert_double3_rtz(double3);
+double3 __ovld __cnfn convert_double3_rtz(float3);
+double3 __ovld __cnfn convert_double3_rtz(int3);
+double3 __ovld __cnfn convert_double3_rtz(long3);
+double3 __ovld __cnfn convert_double3_rtz(short3);
+double3 __ovld __cnfn convert_double3_rtz(uchar3);
+double3 __ovld __cnfn convert_double3_rtz(uint3);
+double3 __ovld __cnfn convert_double3_rtz(ulong3);
+double3 __ovld __cnfn convert_double3_rtz(ushort3);
+double4 __ovld __cnfn convert_double4(char4);
+double4 __ovld __cnfn convert_double4(double4);
+double4 __ovld __cnfn convert_double4(float4);
+double4 __ovld __cnfn convert_double4(int4);
+double4 __ovld __cnfn convert_double4(long4);
+double4 __ovld __cnfn convert_double4(short4);
+double4 __ovld __cnfn convert_double4(uchar4);
+double4 __ovld __cnfn convert_double4(uint4);
+double4 __ovld __cnfn convert_double4(ulong4);
+double4 __ovld __cnfn convert_double4(ushort4);
+double4 __ovld __cnfn convert_double4_rte(char4);
+double4 __ovld __cnfn convert_double4_rte(double4);
+double4 __ovld __cnfn convert_double4_rte(float4);
+double4 __ovld __cnfn convert_double4_rte(int4);
+double4 __ovld __cnfn convert_double4_rte(long4);
+double4 __ovld __cnfn convert_double4_rte(short4);
+double4 __ovld __cnfn convert_double4_rte(uchar4);
+double4 __ovld __cnfn convert_double4_rte(uint4);
+double4 __ovld __cnfn convert_double4_rte(ulong4);
+double4 __ovld __cnfn convert_double4_rte(ushort4);
+double4 __ovld __cnfn convert_double4_rtn(char4);
+double4 __ovld __cnfn convert_double4_rtn(double4);
+double4 __ovld __cnfn convert_double4_rtn(float4);
+double4 __ovld __cnfn convert_double4_rtn(int4);
+double4 __ovld __cnfn convert_double4_rtn(long4);
+double4 __ovld __cnfn convert_double4_rtn(short4);
+double4 __ovld __cnfn convert_double4_rtn(uchar4);
+double4 __ovld __cnfn convert_double4_rtn(uint4);
+double4 __ovld __cnfn convert_double4_rtn(ulong4);
+double4 __ovld __cnfn convert_double4_rtn(ushort4);
+double4 __ovld __cnfn convert_double4_rtp(char4);
+double4 __ovld __cnfn convert_double4_rtp(double4);
+double4 __ovld __cnfn convert_double4_rtp(float4);
+double4 __ovld __cnfn convert_double4_rtp(int4);
+double4 __ovld __cnfn convert_double4_rtp(long4);
+double4 __ovld __cnfn convert_double4_rtp(short4);
+double4 __ovld __cnfn convert_double4_rtp(uchar4);
+double4 __ovld __cnfn convert_double4_rtp(uint4);
+double4 __ovld __cnfn convert_double4_rtp(ulong4);
+double4 __ovld __cnfn convert_double4_rtp(ushort4);
+double4 __ovld __cnfn convert_double4_rtz(char4);
+double4 __ovld __cnfn convert_double4_rtz(double4);
+double4 __ovld __cnfn convert_double4_rtz(float4);
+double4 __ovld __cnfn convert_double4_rtz(int4);
+double4 __ovld __cnfn convert_double4_rtz(long4);
+double4 __ovld __cnfn convert_double4_rtz(short4);
+double4 __ovld __cnfn convert_double4_rtz(uchar4);
+double4 __ovld __cnfn convert_double4_rtz(uint4);
+double4 __ovld __cnfn convert_double4_rtz(ulong4);
+double4 __ovld __cnfn convert_double4_rtz(ushort4);
+double8 __ovld __cnfn convert_double8(char8);
+double8 __ovld __cnfn convert_double8(double8);
+double8 __ovld __cnfn convert_double8(float8);
+double8 __ovld __cnfn convert_double8(int8);
+double8 __ovld __cnfn convert_double8(long8);
+double8 __ovld __cnfn convert_double8(short8);
+double8 __ovld __cnfn convert_double8(uchar8);
+double8 __ovld __cnfn convert_double8(uint8);
+double8 __ovld __cnfn convert_double8(ulong8);
+double8 __ovld __cnfn convert_double8(ushort8);
+double8 __ovld __cnfn convert_double8_rte(char8);
+double8 __ovld __cnfn convert_double8_rte(double8);
+double8 __ovld __cnfn convert_double8_rte(float8);
+double8 __ovld __cnfn convert_double8_rte(int8);
+double8 __ovld __cnfn convert_double8_rte(long8);
+double8 __ovld __cnfn convert_double8_rte(short8);
+double8 __ovld __cnfn convert_double8_rte(uchar8);
+double8 __ovld __cnfn convert_double8_rte(uint8);
+double8 __ovld __cnfn convert_double8_rte(ulong8);
+double8 __ovld __cnfn convert_double8_rte(ushort8);
+double8 __ovld __cnfn convert_double8_rtn(char8);
+double8 __ovld __cnfn convert_double8_rtn(double8);
+double8 __ovld __cnfn convert_double8_rtn(float8);
+double8 __ovld __cnfn convert_double8_rtn(int8);
+double8 __ovld __cnfn convert_double8_rtn(long8);
+double8 __ovld __cnfn convert_double8_rtn(short8);
+double8 __ovld __cnfn convert_double8_rtn(uchar8);
+double8 __ovld __cnfn convert_double8_rtn(uint8);
+double8 __ovld __cnfn convert_double8_rtn(ulong8);
+double8 __ovld __cnfn convert_double8_rtn(ushort8);
+double8 __ovld __cnfn convert_double8_rtp(char8);
+double8 __ovld __cnfn convert_double8_rtp(double8);
+double8 __ovld __cnfn convert_double8_rtp(float8);
+double8 __ovld __cnfn convert_double8_rtp(int8);
+double8 __ovld __cnfn convert_double8_rtp(long8);
+double8 __ovld __cnfn convert_double8_rtp(short8);
+double8 __ovld __cnfn convert_double8_rtp(uchar8);
+double8 __ovld __cnfn convert_double8_rtp(uint8);
+double8 __ovld __cnfn convert_double8_rtp(ulong8);
+double8 __ovld __cnfn convert_double8_rtp(ushort8);
+double8 __ovld __cnfn convert_double8_rtz(char8);
+double8 __ovld __cnfn convert_double8_rtz(double8);
+double8 __ovld __cnfn convert_double8_rtz(float8);
+double8 __ovld __cnfn convert_double8_rtz(int8);
+double8 __ovld __cnfn convert_double8_rtz(long8);
+double8 __ovld __cnfn convert_double8_rtz(short8);
+double8 __ovld __cnfn convert_double8_rtz(uchar8);
+double8 __ovld __cnfn convert_double8_rtz(uint8);
+double8 __ovld __cnfn convert_double8_rtz(ulong8);
+double8 __ovld __cnfn convert_double8_rtz(ushort8);
+double16 __ovld __cnfn convert_double16(char16);
+double16 __ovld __cnfn convert_double16(double16);
+double16 __ovld __cnfn convert_double16(float16);
+double16 __ovld __cnfn convert_double16(int16);
+double16 __ovld __cnfn convert_double16(long16);
+double16 __ovld __cnfn convert_double16(short16);
+double16 __ovld __cnfn convert_double16(uchar16);
+double16 __ovld __cnfn convert_double16(uint16);
+double16 __ovld __cnfn convert_double16(ulong16);
+double16 __ovld __cnfn convert_double16(ushort16);
+double16 __ovld __cnfn convert_double16_rte(char16);
+double16 __ovld __cnfn convert_double16_rte(double16);
+double16 __ovld __cnfn convert_double16_rte(float16);
+double16 __ovld __cnfn convert_double16_rte(int16);
+double16 __ovld __cnfn convert_double16_rte(long16);
+double16 __ovld __cnfn convert_double16_rte(short16);
+double16 __ovld __cnfn convert_double16_rte(uchar16);
+double16 __ovld __cnfn convert_double16_rte(uint16);
+double16 __ovld __cnfn convert_double16_rte(ulong16);
+double16 __ovld __cnfn convert_double16_rte(ushort16);
+double16 __ovld __cnfn convert_double16_rtn(char16);
+double16 __ovld __cnfn convert_double16_rtn(double16);
+double16 __ovld __cnfn convert_double16_rtn(float16);
+double16 __ovld __cnfn convert_double16_rtn(int16);
+double16 __ovld __cnfn convert_double16_rtn(long16);
+double16 __ovld __cnfn convert_double16_rtn(short16);
+double16 __ovld __cnfn convert_double16_rtn(uchar16);
+double16 __ovld __cnfn convert_double16_rtn(uint16);
+double16 __ovld __cnfn convert_double16_rtn(ulong16);
+double16 __ovld __cnfn convert_double16_rtn(ushort16);
+double16 __ovld __cnfn convert_double16_rtp(char16);
+double16 __ovld __cnfn convert_double16_rtp(double16);
+double16 __ovld __cnfn convert_double16_rtp(float16);
+double16 __ovld __cnfn convert_double16_rtp(int16);
+double16 __ovld __cnfn convert_double16_rtp(long16);
+double16 __ovld __cnfn convert_double16_rtp(short16);
+double16 __ovld __cnfn convert_double16_rtp(uchar16);
+double16 __ovld __cnfn convert_double16_rtp(uint16);
+double16 __ovld __cnfn convert_double16_rtp(ulong16);
+double16 __ovld __cnfn convert_double16_rtp(ushort16);
+double16 __ovld __cnfn convert_double16_rtz(char16);
+double16 __ovld __cnfn convert_double16_rtz(double16);
+double16 __ovld __cnfn convert_double16_rtz(float16);
+double16 __ovld __cnfn convert_double16_rtz(int16);
+double16 __ovld __cnfn convert_double16_rtz(long16);
+double16 __ovld __cnfn convert_double16_rtz(short16);
+double16 __ovld __cnfn convert_double16_rtz(uchar16);
+double16 __ovld __cnfn convert_double16_rtz(uint16);
+double16 __ovld __cnfn convert_double16_rtz(ulong16);
+double16 __ovld __cnfn convert_double16_rtz(ushort16);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+#pragma OPENCL EXTENSION cl_khr_fp16 : enable
+// Convert half types to non-double types.
+uchar __ovld __cnfn convert_uchar(half);
+uchar __ovld __cnfn convert_uchar_rte(half);
+uchar __ovld __cnfn convert_uchar_rtp(half);
+uchar __ovld __cnfn convert_uchar_rtn(half);
+uchar __ovld __cnfn convert_uchar_rtz(half);
+uchar __ovld __cnfn convert_uchar_sat(half);
+uchar __ovld __cnfn convert_uchar_sat_rte(half);
+uchar __ovld __cnfn convert_uchar_sat_rtp(half);
+uchar __ovld __cnfn convert_uchar_sat_rtn(half);
+uchar __ovld __cnfn convert_uchar_sat_rtz(half);
+uchar2 __ovld __cnfn convert_uchar2(half2);
+uchar2 __ovld __cnfn convert_uchar2_rte(half2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(half2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(half2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(half2);
+uchar3 __ovld __cnfn convert_uchar3(half3);
+uchar3 __ovld __cnfn convert_uchar3_rte(half3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(half3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(half3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(half3);
+uchar4 __ovld __cnfn convert_uchar4(half4);
+uchar4 __ovld __cnfn convert_uchar4_rte(half4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(half4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(half4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(half4);
+uchar8 __ovld __cnfn convert_uchar8(half8);
+uchar8 __ovld __cnfn convert_uchar8_rte(half8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(half8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(half8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(half8);
+uchar16 __ovld __cnfn convert_uchar16(half16);
+uchar16 __ovld __cnfn convert_uchar16_rte(half16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(half16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(half16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(half16);
+ushort __ovld __cnfn convert_ushort(half);
+ushort __ovld __cnfn convert_ushort_rte(half);
+ushort __ovld __cnfn convert_ushort_rtp(half);
+ushort __ovld __cnfn convert_ushort_rtn(half);
+ushort __ovld __cnfn convert_ushort_rtz(half);
+ushort __ovld __cnfn convert_ushort_sat(half);
+ushort __ovld __cnfn convert_ushort_sat_rte(half);
+ushort __ovld __cnfn convert_ushort_sat_rtp(half);
+ushort __ovld __cnfn convert_ushort_sat_rtn(half);
+ushort __ovld __cnfn convert_ushort_sat_rtz(half);
+ushort2 __ovld __cnfn convert_ushort2(half2);
+ushort2 __ovld __cnfn convert_ushort2_rte(half2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(half2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(half2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(half2);
+ushort3 __ovld __cnfn convert_ushort3(half3);
+ushort3 __ovld __cnfn convert_ushort3_rte(half3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(half3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(half3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(half3);
+ushort4 __ovld __cnfn convert_ushort4(half4);
+ushort4 __ovld __cnfn convert_ushort4_rte(half4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(half4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(half4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(half4);
+ushort8 __ovld __cnfn convert_ushort8(half8);
+ushort8 __ovld __cnfn convert_ushort8_rte(half8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(half8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(half8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(half8);
+ushort16 __ovld __cnfn convert_ushort16(half16);
+ushort16 __ovld __cnfn convert_ushort16_rte(half16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(half16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(half16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(half16);
+uint __ovld __cnfn convert_uint(half);
+uint __ovld __cnfn convert_uint_rte(half);
+uint __ovld __cnfn convert_uint_rtp(half);
+uint __ovld __cnfn convert_uint_rtn(half);
+uint __ovld __cnfn convert_uint_rtz(half);
+uint __ovld __cnfn convert_uint_sat(half);
+uint __ovld __cnfn convert_uint_sat_rte(half);
+uint __ovld __cnfn convert_uint_sat_rtp(half);
+uint __ovld __cnfn convert_uint_sat_rtn(half);
+uint __ovld __cnfn convert_uint_sat_rtz(half);
+uint2 __ovld __cnfn convert_uint2(half2);
+uint2 __ovld __cnfn convert_uint2_rte(half2);
+uint2 __ovld __cnfn convert_uint2_rtp(half2);
+uint2 __ovld __cnfn convert_uint2_rtn(half2);
+uint2 __ovld __cnfn convert_uint2_rtz(half2);
+uint2 __ovld __cnfn convert_uint2_sat(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(half2);
+uint3 __ovld __cnfn convert_uint3(half3);
+uint3 __ovld __cnfn convert_uint3_rte(half3);
+uint3 __ovld __cnfn convert_uint3_rtp(half3);
+uint3 __ovld __cnfn convert_uint3_rtn(half3);
+uint3 __ovld __cnfn convert_uint3_rtz(half3);
+uint3 __ovld __cnfn convert_uint3_sat(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(half3);
+uint4 __ovld __cnfn convert_uint4(half4);
+uint4 __ovld __cnfn convert_uint4_rte(half4);
+uint4 __ovld __cnfn convert_uint4_rtp(half4);
+uint4 __ovld __cnfn convert_uint4_rtn(half4);
+uint4 __ovld __cnfn convert_uint4_rtz(half4);
+uint4 __ovld __cnfn convert_uint4_sat(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(half4);
+uint8 __ovld __cnfn convert_uint8(half8);
+uint8 __ovld __cnfn convert_uint8_rte(half8);
+uint8 __ovld __cnfn convert_uint8_rtp(half8);
+uint8 __ovld __cnfn convert_uint8_rtn(half8);
+uint8 __ovld __cnfn convert_uint8_rtz(half8);
+uint8 __ovld __cnfn convert_uint8_sat(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(half8);
+uint16 __ovld __cnfn convert_uint16(half16);
+uint16 __ovld __cnfn convert_uint16_rte(half16);
+uint16 __ovld __cnfn convert_uint16_rtp(half16);
+uint16 __ovld __cnfn convert_uint16_rtn(half16);
+uint16 __ovld __cnfn convert_uint16_rtz(half16);
+uint16 __ovld __cnfn convert_uint16_sat(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(half16);
+ulong __ovld __cnfn convert_ulong(half);
+ulong __ovld __cnfn convert_ulong_rte(half);
+ulong __ovld __cnfn convert_ulong_rtp(half);
+ulong __ovld __cnfn convert_ulong_rtn(half);
+ulong __ovld __cnfn convert_ulong_rtz(half);
+ulong __ovld __cnfn convert_ulong_sat(half);
+ulong __ovld __cnfn convert_ulong_sat_rte(half);
+ulong __ovld __cnfn convert_ulong_sat_rtp(half);
+ulong __ovld __cnfn convert_ulong_sat_rtn(half);
+ulong __ovld __cnfn convert_ulong_sat_rtz(half);
+ulong2 __ovld __cnfn convert_ulong2(half2);
+ulong2 __ovld __cnfn convert_ulong2_rte(half2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(half2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(half2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(half2);
+ulong3 __ovld __cnfn convert_ulong3(half3);
+ulong3 __ovld __cnfn convert_ulong3_rte(half3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(half3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(half3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(half3);
+ulong4 __ovld __cnfn convert_ulong4(half4);
+ulong4 __ovld __cnfn convert_ulong4_rte(half4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(half4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(half4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(half4);
+ulong8 __ovld __cnfn convert_ulong8(half8);
+ulong8 __ovld __cnfn convert_ulong8_rte(half8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(half8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(half8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(half8);
+ulong16 __ovld __cnfn convert_ulong16(half16);
+ulong16 __ovld __cnfn convert_ulong16_rte(half16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(half16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(half16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(half16);
+char __ovld __cnfn convert_char(half);
+char __ovld __cnfn convert_char_rte(half);
+char __ovld __cnfn convert_char_rtp(half);
+char __ovld __cnfn convert_char_rtn(half);
+char __ovld __cnfn convert_char_rtz(half);
+char __ovld __cnfn convert_char_sat(half);
+char __ovld __cnfn convert_char_sat_rte(half);
+char __ovld __cnfn convert_char_sat_rtp(half);
+char __ovld __cnfn convert_char_sat_rtn(half);
+char __ovld __cnfn convert_char_sat_rtz(half);
+char2 __ovld __cnfn convert_char2(half2);
+char2 __ovld __cnfn convert_char2_rte(half2);
+char2 __ovld __cnfn convert_char2_rtp(half2);
+char2 __ovld __cnfn convert_char2_rtn(half2);
+char2 __ovld __cnfn convert_char2_rtz(half2);
+char2 __ovld __cnfn convert_char2_sat(half2);
+char2 __ovld __cnfn convert_char2_sat_rte(half2);
+char2 __ovld __cnfn convert_char2_sat_rtp(half2);
+char2 __ovld __cnfn convert_char2_sat_rtn(half2);
+char2 __ovld __cnfn convert_char2_sat_rtz(half2);
+char3 __ovld __cnfn convert_char3(half3);
+char3 __ovld __cnfn convert_char3_rte(half3);
+char3 __ovld __cnfn convert_char3_rtp(half3);
+char3 __ovld __cnfn convert_char3_rtn(half3);
+char3 __ovld __cnfn convert_char3_rtz(half3);
+char3 __ovld __cnfn convert_char3_sat(half3);
+char3 __ovld __cnfn convert_char3_sat_rte(half3);
+char3 __ovld __cnfn convert_char3_sat_rtp(half3);
+char3 __ovld __cnfn convert_char3_sat_rtn(half3);
+char3 __ovld __cnfn convert_char3_sat_rtz(half3);
+char4 __ovld __cnfn convert_char4(half4);
+char4 __ovld __cnfn convert_char4_rte(half4);
+char4 __ovld __cnfn convert_char4_rtp(half4);
+char4 __ovld __cnfn convert_char4_rtn(half4);
+char4 __ovld __cnfn convert_char4_rtz(half4);
+char4 __ovld __cnfn convert_char4_sat(half4);
+char4 __ovld __cnfn convert_char4_sat_rte(half4);
+char4 __ovld __cnfn convert_char4_sat_rtp(half4);
+char4 __ovld __cnfn convert_char4_sat_rtn(half4);
+char4 __ovld __cnfn convert_char4_sat_rtz(half4);
+char8 __ovld __cnfn convert_char8(half8);
+char8 __ovld __cnfn convert_char8_rte(half8);
+char8 __ovld __cnfn convert_char8_rtp(half8);
+char8 __ovld __cnfn convert_char8_rtn(half8);
+char8 __ovld __cnfn convert_char8_rtz(half8);
+char8 __ovld __cnfn convert_char8_sat(half8);
+char8 __ovld __cnfn convert_char8_sat_rte(half8);
+char8 __ovld __cnfn convert_char8_sat_rtp(half8);
+char8 __ovld __cnfn convert_char8_sat_rtn(half8);
+char8 __ovld __cnfn convert_char8_sat_rtz(half8);
+char16 __ovld __cnfn convert_char16(half16);
+char16 __ovld __cnfn convert_char16_rte(half16);
+char16 __ovld __cnfn convert_char16_rtp(half16);
+char16 __ovld __cnfn convert_char16_rtn(half16);
+char16 __ovld __cnfn convert_char16_rtz(half16);
+char16 __ovld __cnfn convert_char16_sat(half16);
+char16 __ovld __cnfn convert_char16_sat_rte(half16);
+char16 __ovld __cnfn convert_char16_sat_rtp(half16);
+char16 __ovld __cnfn convert_char16_sat_rtn(half16);
+char16 __ovld __cnfn convert_char16_sat_rtz(half16);
+short __ovld __cnfn convert_short(half);
+short __ovld __cnfn convert_short_rte(half);
+short __ovld __cnfn convert_short_rtp(half);
+short __ovld __cnfn convert_short_rtn(half);
+short __ovld __cnfn convert_short_rtz(half);
+short __ovld __cnfn convert_short_sat(half);
+short __ovld __cnfn convert_short_sat_rte(half);
+short __ovld __cnfn convert_short_sat_rtp(half);
+short __ovld __cnfn convert_short_sat_rtn(half);
+short __ovld __cnfn convert_short_sat_rtz(half);
+short2 __ovld __cnfn convert_short2(half2);
+short2 __ovld __cnfn convert_short2_rte(half2);
+short2 __ovld __cnfn convert_short2_rtp(half2);
+short2 __ovld __cnfn convert_short2_rtn(half2);
+short2 __ovld __cnfn convert_short2_rtz(half2);
+short2 __ovld __cnfn convert_short2_sat(half2);
+short2 __ovld __cnfn convert_short2_sat_rte(half2);
+short2 __ovld __cnfn convert_short2_sat_rtp(half2);
+short2 __ovld __cnfn convert_short2_sat_rtn(half2);
+short2 __ovld __cnfn convert_short2_sat_rtz(half2);
+short3 __ovld __cnfn convert_short3(half3);
+short3 __ovld __cnfn convert_short3_rte(half3);
+short3 __ovld __cnfn convert_short3_rtp(half3);
+short3 __ovld __cnfn convert_short3_rtn(half3);
+short3 __ovld __cnfn convert_short3_rtz(half3);
+short3 __ovld __cnfn convert_short3_sat(half3);
+short3 __ovld __cnfn convert_short3_sat_rte(half3);
+short3 __ovld __cnfn convert_short3_sat_rtp(half3);
+short3 __ovld __cnfn convert_short3_sat_rtn(half3);
+short3 __ovld __cnfn convert_short3_sat_rtz(half3);
+short4 __ovld __cnfn convert_short4(half4);
+short4 __ovld __cnfn convert_short4_rte(half4);
+short4 __ovld __cnfn convert_short4_rtp(half4);
+short4 __ovld __cnfn convert_short4_rtn(half4);
+short4 __ovld __cnfn convert_short4_rtz(half4);
+short4 __ovld __cnfn convert_short4_sat(half4);
+short4 __ovld __cnfn convert_short4_sat_rte(half4);
+short4 __ovld __cnfn convert_short4_sat_rtp(half4);
+short4 __ovld __cnfn convert_short4_sat_rtn(half4);
+short4 __ovld __cnfn convert_short4_sat_rtz(half4);
+short8 __ovld __cnfn convert_short8(half8);
+short8 __ovld __cnfn convert_short8_rte(half8);
+short8 __ovld __cnfn convert_short8_rtp(half8);
+short8 __ovld __cnfn convert_short8_rtn(half8);
+short8 __ovld __cnfn convert_short8_rtz(half8);
+short8 __ovld __cnfn convert_short8_sat(half8);
+short8 __ovld __cnfn convert_short8_sat_rte(half8);
+short8 __ovld __cnfn convert_short8_sat_rtp(half8);
+short8 __ovld __cnfn convert_short8_sat_rtn(half8);
+short8 __ovld __cnfn convert_short8_sat_rtz(half8);
+short16 __ovld __cnfn convert_short16(half16);
+short16 __ovld __cnfn convert_short16_rte(half16);
+short16 __ovld __cnfn convert_short16_rtp(half16);
+short16 __ovld __cnfn convert_short16_rtn(half16);
+short16 __ovld __cnfn convert_short16_rtz(half16);
+short16 __ovld __cnfn convert_short16_sat(half16);
+short16 __ovld __cnfn convert_short16_sat_rte(half16);
+short16 __ovld __cnfn convert_short16_sat_rtp(half16);
+short16 __ovld __cnfn convert_short16_sat_rtn(half16);
+short16 __ovld __cnfn convert_short16_sat_rtz(half16);
+int __ovld __cnfn convert_int(half);
+int __ovld __cnfn convert_int_rte(half);
+int __ovld __cnfn convert_int_rtp(half);
+int __ovld __cnfn convert_int_rtn(half);
+int __ovld __cnfn convert_int_rtz(half);
+int __ovld __cnfn convert_int_sat(half);
+int __ovld __cnfn convert_int_sat_rte(half);
+int __ovld __cnfn convert_int_sat_rtp(half);
+int __ovld __cnfn convert_int_sat_rtn(half);
+int __ovld __cnfn convert_int_sat_rtz(half);
+int2 __ovld __cnfn convert_int2(half2);
+int2 __ovld __cnfn convert_int2_rte(half2);
+int2 __ovld __cnfn convert_int2_rtp(half2);
+int2 __ovld __cnfn convert_int2_rtn(half2);
+int2 __ovld __cnfn convert_int2_rtz(half2);
+int2 __ovld __cnfn convert_int2_sat(half2);
+int2 __ovld __cnfn convert_int2_sat_rte(half2);
+int2 __ovld __cnfn convert_int2_sat_rtp(half2);
+int2 __ovld __cnfn convert_int2_sat_rtn(half2);
+int2 __ovld __cnfn convert_int2_sat_rtz(half2);
+int3 __ovld __cnfn convert_int3(half3);
+int3 __ovld __cnfn convert_int3_rte(half3);
+int3 __ovld __cnfn convert_int3_rtp(half3);
+int3 __ovld __cnfn convert_int3_rtn(half3);
+int3 __ovld __cnfn convert_int3_rtz(half3);
+int3 __ovld __cnfn convert_int3_sat(half3);
+int3 __ovld __cnfn convert_int3_sat_rte(half3);
+int3 __ovld __cnfn convert_int3_sat_rtp(half3);
+int3 __ovld __cnfn convert_int3_sat_rtn(half3);
+int3 __ovld __cnfn convert_int3_sat_rtz(half3);
+int4 __ovld __cnfn convert_int4(half4);
+int4 __ovld __cnfn convert_int4_rte(half4);
+int4 __ovld __cnfn convert_int4_rtp(half4);
+int4 __ovld __cnfn convert_int4_rtn(half4);
+int4 __ovld __cnfn convert_int4_rtz(half4);
+int4 __ovld __cnfn convert_int4_sat(half4);
+int4 __ovld __cnfn convert_int4_sat_rte(half4);
+int4 __ovld __cnfn convert_int4_sat_rtp(half4);
+int4 __ovld __cnfn convert_int4_sat_rtn(half4);
+int4 __ovld __cnfn convert_int4_sat_rtz(half4);
+int8 __ovld __cnfn convert_int8(half8);
+int8 __ovld __cnfn convert_int8_rte(half8);
+int8 __ovld __cnfn convert_int8_rtp(half8);
+int8 __ovld __cnfn convert_int8_rtn(half8);
+int8 __ovld __cnfn convert_int8_rtz(half8);
+int8 __ovld __cnfn convert_int8_sat(half8);
+int8 __ovld __cnfn convert_int8_sat_rte(half8);
+int8 __ovld __cnfn convert_int8_sat_rtp(half8);
+int8 __ovld __cnfn convert_int8_sat_rtn(half8);
+int8 __ovld __cnfn convert_int8_sat_rtz(half8);
+int16 __ovld __cnfn convert_int16(half16);
+int16 __ovld __cnfn convert_int16_rte(half16);
+int16 __ovld __cnfn convert_int16_rtp(half16);
+int16 __ovld __cnfn convert_int16_rtn(half16);
+int16 __ovld __cnfn convert_int16_rtz(half16);
+int16 __ovld __cnfn convert_int16_sat(half16);
+int16 __ovld __cnfn convert_int16_sat_rte(half16);
+int16 __ovld __cnfn convert_int16_sat_rtp(half16);
+int16 __ovld __cnfn convert_int16_sat_rtn(half16);
+int16 __ovld __cnfn convert_int16_sat_rtz(half16);
+long __ovld __cnfn convert_long(half);
+long __ovld __cnfn convert_long_rte(half);
+long __ovld __cnfn convert_long_rtp(half);
+long __ovld __cnfn convert_long_rtn(half);
+long __ovld __cnfn convert_long_rtz(half);
+long __ovld __cnfn convert_long_sat(half);
+long __ovld __cnfn convert_long_sat_rte(half);
+long __ovld __cnfn convert_long_sat_rtp(half);
+long __ovld __cnfn convert_long_sat_rtn(half);
+long __ovld __cnfn convert_long_sat_rtz(half);
+long2 __ovld __cnfn convert_long2(half2);
+long2 __ovld __cnfn convert_long2_rte(half2);
+long2 __ovld __cnfn convert_long2_rtp(half2);
+long2 __ovld __cnfn convert_long2_rtn(half2);
+long2 __ovld __cnfn convert_long2_rtz(half2);
+long2 __ovld __cnfn convert_long2_sat(half2);
+long2 __ovld __cnfn convert_long2_sat_rte(half2);
+long2 __ovld __cnfn convert_long2_sat_rtp(half2);
+long2 __ovld __cnfn convert_long2_sat_rtn(half2);
+long2 __ovld __cnfn convert_long2_sat_rtz(half2);
+long3 __ovld __cnfn convert_long3(half3);
+long3 __ovld __cnfn convert_long3_rte(half3);
+long3 __ovld __cnfn convert_long3_rtp(half3);
+long3 __ovld __cnfn convert_long3_rtn(half3);
+long3 __ovld __cnfn convert_long3_rtz(half3);
+long3 __ovld __cnfn convert_long3_sat(half3);
+long3 __ovld __cnfn convert_long3_sat_rte(half3);
+long3 __ovld __cnfn convert_long3_sat_rtp(half3);
+long3 __ovld __cnfn convert_long3_sat_rtn(half3);
+long3 __ovld __cnfn convert_long3_sat_rtz(half3);
+long4 __ovld __cnfn convert_long4(half4);
+long4 __ovld __cnfn convert_long4_rte(half4);
+long4 __ovld __cnfn convert_long4_rtp(half4);
+long4 __ovld __cnfn convert_long4_rtn(half4);
+long4 __ovld __cnfn convert_long4_rtz(half4);
+long4 __ovld __cnfn convert_long4_sat(half4);
+long4 __ovld __cnfn convert_long4_sat_rte(half4);
+long4 __ovld __cnfn convert_long4_sat_rtp(half4);
+long4 __ovld __cnfn convert_long4_sat_rtn(half4);
+long4 __ovld __cnfn convert_long4_sat_rtz(half4);
+long8 __ovld __cnfn convert_long8(half8);
+long8 __ovld __cnfn convert_long8_rte(half8);
+long8 __ovld __cnfn convert_long8_rtp(half8);
+long8 __ovld __cnfn convert_long8_rtn(half8);
+long8 __ovld __cnfn convert_long8_rtz(half8);
+long8 __ovld __cnfn convert_long8_sat(half8);
+long8 __ovld __cnfn convert_long8_sat_rte(half8);
+long8 __ovld __cnfn convert_long8_sat_rtp(half8);
+long8 __ovld __cnfn convert_long8_sat_rtn(half8);
+long8 __ovld __cnfn convert_long8_sat_rtz(half8);
+long16 __ovld __cnfn convert_long16(half16);
+long16 __ovld __cnfn convert_long16_rte(half16);
+long16 __ovld __cnfn convert_long16_rtp(half16);
+long16 __ovld __cnfn convert_long16_rtn(half16);
+long16 __ovld __cnfn convert_long16_rtz(half16);
+long16 __ovld __cnfn convert_long16_sat(half16);
+long16 __ovld __cnfn convert_long16_sat_rte(half16);
+long16 __ovld __cnfn convert_long16_sat_rtp(half16);
+long16 __ovld __cnfn convert_long16_sat_rtn(half16);
+long16 __ovld __cnfn convert_long16_sat_rtz(half16);
+float __ovld __cnfn convert_float(half);
+float __ovld __cnfn convert_float_rte(half);
+float __ovld __cnfn convert_float_rtp(half);
+float __ovld __cnfn convert_float_rtn(half);
+float __ovld __cnfn convert_float_rtz(half);
+float2 __ovld __cnfn convert_float2(half2);
+float2 __ovld __cnfn convert_float2_rte(half2);
+float2 __ovld __cnfn convert_float2_rtp(half2);
+float2 __ovld __cnfn convert_float2_rtn(half2);
+float2 __ovld __cnfn convert_float2_rtz(half2);
+float3 __ovld __cnfn convert_float3(half3);
+float3 __ovld __cnfn convert_float3_rte(half3);
+float3 __ovld __cnfn convert_float3_rtp(half3);
+float3 __ovld __cnfn convert_float3_rtn(half3);
+float3 __ovld __cnfn convert_float3_rtz(half3);
+float4 __ovld __cnfn convert_float4(half4);
+float4 __ovld __cnfn convert_float4_rte(half4);
+float4 __ovld __cnfn convert_float4_rtp(half4);
+float4 __ovld __cnfn convert_float4_rtn(half4);
+float4 __ovld __cnfn convert_float4_rtz(half4);
+float8 __ovld __cnfn convert_float8(half8);
+float8 __ovld __cnfn convert_float8_rte(half8);
+float8 __ovld __cnfn convert_float8_rtp(half8);
+float8 __ovld __cnfn convert_float8_rtn(half8);
+float8 __ovld __cnfn convert_float8_rtz(half8);
+float16 __ovld __cnfn convert_float16(half16);
+float16 __ovld __cnfn convert_float16_rte(half16);
+float16 __ovld __cnfn convert_float16_rtp(half16);
+float16 __ovld __cnfn convert_float16_rtn(half16);
+float16 __ovld __cnfn convert_float16_rtz(half16);
+
+// Convert non-double types to half types.
+half __ovld __cnfn convert_half(uchar);
+half __ovld __cnfn convert_half(ushort);
+half __ovld __cnfn convert_half(uint);
+half __ovld __cnfn convert_half(ulong);
+half __ovld __cnfn convert_half(char);
+half __ovld __cnfn convert_half(short);
+half __ovld __cnfn convert_half(int);
+half __ovld __cnfn convert_half(long);
+half __ovld __cnfn convert_half(float);
+half __ovld __cnfn convert_half(half);
+half __ovld __cnfn convert_half_rte(uchar);
+half __ovld __cnfn convert_half_rte(ushort);
+half __ovld __cnfn convert_half_rte(uint);
+half __ovld __cnfn convert_half_rte(ulong);
+half __ovld __cnfn convert_half_rte(char);
+half __ovld __cnfn convert_half_rte(short);
+half __ovld __cnfn convert_half_rte(int);
+half __ovld __cnfn convert_half_rte(long);
+half __ovld __cnfn convert_half_rte(float);
+half __ovld __cnfn convert_half_rte(half);
+half __ovld __cnfn convert_half_rtp(uchar);
+half __ovld __cnfn convert_half_rtp(ushort);
+half __ovld __cnfn convert_half_rtp(uint);
+half __ovld __cnfn convert_half_rtp(ulong);
+half __ovld __cnfn convert_half_rtp(char);
+half __ovld __cnfn convert_half_rtp(short);
+half __ovld __cnfn convert_half_rtp(int);
+half __ovld __cnfn convert_half_rtp(long);
+half __ovld __cnfn convert_half_rtp(float);
+half __ovld __cnfn convert_half_rtp(half);
+half __ovld __cnfn convert_half_rtn(uchar);
+half __ovld __cnfn convert_half_rtn(ushort);
+half __ovld __cnfn convert_half_rtn(uint);
+half __ovld __cnfn convert_half_rtn(ulong);
+half __ovld __cnfn convert_half_rtn(char);
+half __ovld __cnfn convert_half_rtn(short);
+half __ovld __cnfn convert_half_rtn(int);
+half __ovld __cnfn convert_half_rtn(long);
+half __ovld __cnfn convert_half_rtn(float);
+half __ovld __cnfn convert_half_rtn(half);
+half __ovld __cnfn convert_half_rtz(uchar);
+half __ovld __cnfn convert_half_rtz(ushort);
+half __ovld __cnfn convert_half_rtz(uint);
+half __ovld __cnfn convert_half_rtz(ulong);
+half __ovld __cnfn convert_half_rtz(char);
+half __ovld __cnfn convert_half_rtz(short);
+half __ovld __cnfn convert_half_rtz(int);
+half __ovld __cnfn convert_half_rtz(long);
+half __ovld __cnfn convert_half_rtz(float);
+half __ovld __cnfn convert_half_rtz(half);
+half2 __ovld __cnfn convert_half2(char2);
+half2 __ovld __cnfn convert_half2(uchar2);
+half2 __ovld __cnfn convert_half2(short2);
+half2 __ovld __cnfn convert_half2(ushort2);
+half2 __ovld __cnfn convert_half2(int2);
+half2 __ovld __cnfn convert_half2(uint2);
+half2 __ovld __cnfn convert_half2(long2);
+half2 __ovld __cnfn convert_half2(ulong2);
+half2 __ovld __cnfn convert_half2(float2);
+half2 __ovld __cnfn convert_half2(half2);
+half2 __ovld __cnfn convert_half2_rte(char2);
+half2 __ovld __cnfn convert_half2_rte(uchar2);
+half2 __ovld __cnfn convert_half2_rte(short2);
+half2 __ovld __cnfn convert_half2_rte(ushort2);
+half2 __ovld __cnfn convert_half2_rte(int2);
+half2 __ovld __cnfn convert_half2_rte(uint2);
+half2 __ovld __cnfn convert_half2_rte(long2);
+half2 __ovld __cnfn convert_half2_rte(ulong2);
+half2 __ovld __cnfn convert_half2_rte(float2);
+half2 __ovld __cnfn convert_half2_rte(half2);
+half2 __ovld __cnfn convert_half2_rtp(char2);
+half2 __ovld __cnfn convert_half2_rtp(uchar2);
+half2 __ovld __cnfn convert_half2_rtp(short2);
+half2 __ovld __cnfn convert_half2_rtp(ushort2);
+half2 __ovld __cnfn convert_half2_rtp(int2);
+half2 __ovld __cnfn convert_half2_rtp(uint2);
+half2 __ovld __cnfn convert_half2_rtp(long2);
+half2 __ovld __cnfn convert_half2_rtp(ulong2);
+half2 __ovld __cnfn convert_half2_rtp(float2);
+half2 __ovld __cnfn convert_half2_rtp(half2);
+half2 __ovld __cnfn convert_half2_rtn(char2);
+half2 __ovld __cnfn convert_half2_rtn(uchar2);
+half2 __ovld __cnfn convert_half2_rtn(short2);
+half2 __ovld __cnfn convert_half2_rtn(ushort2);
+half2 __ovld __cnfn convert_half2_rtn(int2);
+half2 __ovld __cnfn convert_half2_rtn(uint2);
+half2 __ovld __cnfn convert_half2_rtn(long2);
+half2 __ovld __cnfn convert_half2_rtn(ulong2);
+half2 __ovld __cnfn convert_half2_rtn(float2);
+half2 __ovld __cnfn convert_half2_rtn(half2);
+half2 __ovld __cnfn convert_half2_rtz(char2);
+half2 __ovld __cnfn convert_half2_rtz(uchar2);
+half2 __ovld __cnfn convert_half2_rtz(short2);
+half2 __ovld __cnfn convert_half2_rtz(ushort2);
+half2 __ovld __cnfn convert_half2_rtz(int2);
+half2 __ovld __cnfn convert_half2_rtz(uint2);
+half2 __ovld __cnfn convert_half2_rtz(long2);
+half2 __ovld __cnfn convert_half2_rtz(ulong2);
+half2 __ovld __cnfn convert_half2_rtz(float2);
+half2 __ovld __cnfn convert_half2_rtz(half2);
+half3 __ovld __cnfn convert_half3(char3);
+half3 __ovld __cnfn convert_half3(uchar3);
+half3 __ovld __cnfn convert_half3(short3);
+half3 __ovld __cnfn convert_half3(ushort3);
+half3 __ovld __cnfn convert_half3(int3);
+half3 __ovld __cnfn convert_half3(uint3);
+half3 __ovld __cnfn convert_half3(long3);
+half3 __ovld __cnfn convert_half3(ulong3);
+half3 __ovld __cnfn convert_half3(float3);
+half3 __ovld __cnfn convert_half3(half3);
+half3 __ovld __cnfn convert_half3_rte(char3);
+half3 __ovld __cnfn convert_half3_rte(uchar3);
+half3 __ovld __cnfn convert_half3_rte(short3);
+half3 __ovld __cnfn convert_half3_rte(ushort3);
+half3 __ovld __cnfn convert_half3_rte(int3);
+half3 __ovld __cnfn convert_half3_rte(uint3);
+half3 __ovld __cnfn convert_half3_rte(long3);
+half3 __ovld __cnfn convert_half3_rte(ulong3);
+half3 __ovld __cnfn convert_half3_rte(float3);
+half3 __ovld __cnfn convert_half3_rte(half3);
+half3 __ovld __cnfn convert_half3_rtp(char3);
+half3 __ovld __cnfn convert_half3_rtp(uchar3);
+half3 __ovld __cnfn convert_half3_rtp(short3);
+half3 __ovld __cnfn convert_half3_rtp(ushort3);
+half3 __ovld __cnfn convert_half3_rtp(int3);
+half3 __ovld __cnfn convert_half3_rtp(uint3);
+half3 __ovld __cnfn convert_half3_rtp(long3);
+half3 __ovld __cnfn convert_half3_rtp(ulong3);
+half3 __ovld __cnfn convert_half3_rtp(float3);
+half3 __ovld __cnfn convert_half3_rtp(half3);
+half3 __ovld __cnfn convert_half3_rtn(char3);
+half3 __ovld __cnfn convert_half3_rtn(uchar3);
+half3 __ovld __cnfn convert_half3_rtn(short3);
+half3 __ovld __cnfn convert_half3_rtn(ushort3);
+half3 __ovld __cnfn convert_half3_rtn(int3);
+half3 __ovld __cnfn convert_half3_rtn(uint3);
+half3 __ovld __cnfn convert_half3_rtn(long3);
+half3 __ovld __cnfn convert_half3_rtn(ulong3);
+half3 __ovld __cnfn convert_half3_rtn(float3);
+half3 __ovld __cnfn convert_half3_rtn(half3);
+half3 __ovld __cnfn convert_half3_rtz(char3);
+half3 __ovld __cnfn convert_half3_rtz(uchar3);
+half3 __ovld __cnfn convert_half3_rtz(short3);
+half3 __ovld __cnfn convert_half3_rtz(ushort3);
+half3 __ovld __cnfn convert_half3_rtz(int3);
+half3 __ovld __cnfn convert_half3_rtz(uint3);
+half3 __ovld __cnfn convert_half3_rtz(long3);
+half3 __ovld __cnfn convert_half3_rtz(ulong3);
+half3 __ovld __cnfn convert_half3_rtz(float3);
+half3 __ovld __cnfn convert_half3_rtz(half3);
+half4 __ovld __cnfn convert_half4(char4);
+half4 __ovld __cnfn convert_half4(uchar4);
+half4 __ovld __cnfn convert_half4(short4);
+half4 __ovld __cnfn convert_half4(ushort4);
+half4 __ovld __cnfn convert_half4(int4);
+half4 __ovld __cnfn convert_half4(uint4);
+half4 __ovld __cnfn convert_half4(long4);
+half4 __ovld __cnfn convert_half4(ulong4);
+half4 __ovld __cnfn convert_half4(float4);
+half4 __ovld __cnfn convert_half4(half4);
+half4 __ovld __cnfn convert_half4_rte(char4);
+half4 __ovld __cnfn convert_half4_rte(uchar4);
+half4 __ovld __cnfn convert_half4_rte(short4);
+half4 __ovld __cnfn convert_half4_rte(ushort4);
+half4 __ovld __cnfn convert_half4_rte(int4);
+half4 __ovld __cnfn convert_half4_rte(uint4);
+half4 __ovld __cnfn convert_half4_rte(long4);
+half4 __ovld __cnfn convert_half4_rte(ulong4);
+half4 __ovld __cnfn convert_half4_rte(float4);
+half4 __ovld __cnfn convert_half4_rte(half4);
+half4 __ovld __cnfn convert_half4_rtp(char4);
+half4 __ovld __cnfn convert_half4_rtp(uchar4);
+half4 __ovld __cnfn convert_half4_rtp(short4);
+half4 __ovld __cnfn convert_half4_rtp(ushort4);
+half4 __ovld __cnfn convert_half4_rtp(int4);
+half4 __ovld __cnfn convert_half4_rtp(uint4);
+half4 __ovld __cnfn convert_half4_rtp(long4);
+half4 __ovld __cnfn convert_half4_rtp(ulong4);
+half4 __ovld __cnfn convert_half4_rtp(float4);
+half4 __ovld __cnfn convert_half4_rtp(half4);
+half4 __ovld __cnfn convert_half4_rtn(char4);
+half4 __ovld __cnfn convert_half4_rtn(uchar4);
+half4 __ovld __cnfn convert_half4_rtn(short4);
+half4 __ovld __cnfn convert_half4_rtn(ushort4);
+half4 __ovld __cnfn convert_half4_rtn(int4);
+half4 __ovld __cnfn convert_half4_rtn(uint4);
+half4 __ovld __cnfn convert_half4_rtn(long4);
+half4 __ovld __cnfn convert_half4_rtn(ulong4);
+half4 __ovld __cnfn convert_half4_rtn(float4);
+half4 __ovld __cnfn convert_half4_rtn(half4);
+half4 __ovld __cnfn convert_half4_rtz(char4);
+half4 __ovld __cnfn convert_half4_rtz(uchar4);
+half4 __ovld __cnfn convert_half4_rtz(short4);
+half4 __ovld __cnfn convert_half4_rtz(ushort4);
+half4 __ovld __cnfn convert_half4_rtz(int4);
+half4 __ovld __cnfn convert_half4_rtz(uint4);
+half4 __ovld __cnfn convert_half4_rtz(long4);
+half4 __ovld __cnfn convert_half4_rtz(ulong4);
+half4 __ovld __cnfn convert_half4_rtz(float4);
+half4 __ovld __cnfn convert_half4_rtz(half4);
+half8 __ovld __cnfn convert_half8(char8);
+half8 __ovld __cnfn convert_half8(uchar8);
+half8 __ovld __cnfn convert_half8(short8);
+half8 __ovld __cnfn convert_half8(ushort8);
+half8 __ovld __cnfn convert_half8(int8);
+half8 __ovld __cnfn convert_half8(uint8);
+half8 __ovld __cnfn convert_half8(long8);
+half8 __ovld __cnfn convert_half8(ulong8);
+half8 __ovld __cnfn convert_half8(float8);
+half8 __ovld __cnfn convert_half8(half8);
+half8 __ovld __cnfn convert_half8_rte(char8);
+half8 __ovld __cnfn convert_half8_rte(uchar8);
+half8 __ovld __cnfn convert_half8_rte(short8);
+half8 __ovld __cnfn convert_half8_rte(ushort8);
+half8 __ovld __cnfn convert_half8_rte(int8);
+half8 __ovld __cnfn convert_half8_rte(uint8);
+half8 __ovld __cnfn convert_half8_rte(long8);
+half8 __ovld __cnfn convert_half8_rte(ulong8);
+half8 __ovld __cnfn convert_half8_rte(float8);
+half8 __ovld __cnfn convert_half8_rte(half8);
+half8 __ovld __cnfn convert_half8_rtp(char8);
+half8 __ovld __cnfn convert_half8_rtp(uchar8);
+half8 __ovld __cnfn convert_half8_rtp(short8);
+half8 __ovld __cnfn convert_half8_rtp(ushort8);
+half8 __ovld __cnfn convert_half8_rtp(int8);
+half8 __ovld __cnfn convert_half8_rtp(uint8);
+half8 __ovld __cnfn convert_half8_rtp(long8);
+half8 __ovld __cnfn convert_half8_rtp(ulong8);
+half8 __ovld __cnfn convert_half8_rtp(float8);
+half8 __ovld __cnfn convert_half8_rtp(half8);
+half8 __ovld __cnfn convert_half8_rtn(char8);
+half8 __ovld __cnfn convert_half8_rtn(uchar8);
+half8 __ovld __cnfn convert_half8_rtn(short8);
+half8 __ovld __cnfn convert_half8_rtn(ushort8);
+half8 __ovld __cnfn convert_half8_rtn(int8);
+half8 __ovld __cnfn convert_half8_rtn(uint8);
+half8 __ovld __cnfn convert_half8_rtn(long8);
+half8 __ovld __cnfn convert_half8_rtn(ulong8);
+half8 __ovld __cnfn convert_half8_rtn(float8);
+half8 __ovld __cnfn convert_half8_rtn(half8);
+half8 __ovld __cnfn convert_half8_rtz(char8);
+half8 __ovld __cnfn convert_half8_rtz(uchar8);
+half8 __ovld __cnfn convert_half8_rtz(short8);
+half8 __ovld __cnfn convert_half8_rtz(ushort8);
+half8 __ovld __cnfn convert_half8_rtz(int8);
+half8 __ovld __cnfn convert_half8_rtz(uint8);
+half8 __ovld __cnfn convert_half8_rtz(long8);
+half8 __ovld __cnfn convert_half8_rtz(ulong8);
+half8 __ovld __cnfn convert_half8_rtz(float8);
+half8 __ovld __cnfn convert_half8_rtz(half8);
+half16 __ovld __cnfn convert_half16(char16);
+half16 __ovld __cnfn convert_half16(uchar16);
+half16 __ovld __cnfn convert_half16(short16);
+half16 __ovld __cnfn convert_half16(ushort16);
+half16 __ovld __cnfn convert_half16(int16);
+half16 __ovld __cnfn convert_half16(uint16);
+half16 __ovld __cnfn convert_half16(long16);
+half16 __ovld __cnfn convert_half16(ulong16);
+half16 __ovld __cnfn convert_half16(float16);
+half16 __ovld __cnfn convert_half16(half16);
+half16 __ovld __cnfn convert_half16_rte(char16);
+half16 __ovld __cnfn convert_half16_rte(uchar16);
+half16 __ovld __cnfn convert_half16_rte(short16);
+half16 __ovld __cnfn convert_half16_rte(ushort16);
+half16 __ovld __cnfn convert_half16_rte(int16);
+half16 __ovld __cnfn convert_half16_rte(uint16);
+half16 __ovld __cnfn convert_half16_rte(long16);
+half16 __ovld __cnfn convert_half16_rte(ulong16);
+half16 __ovld __cnfn convert_half16_rte(float16);
+half16 __ovld __cnfn convert_half16_rte(half16);
+half16 __ovld __cnfn convert_half16_rtp(char16);
+half16 __ovld __cnfn convert_half16_rtp(uchar16);
+half16 __ovld __cnfn convert_half16_rtp(short16);
+half16 __ovld __cnfn convert_half16_rtp(ushort16);
+half16 __ovld __cnfn convert_half16_rtp(int16);
+half16 __ovld __cnfn convert_half16_rtp(uint16);
+half16 __ovld __cnfn convert_half16_rtp(long16);
+half16 __ovld __cnfn convert_half16_rtp(ulong16);
+half16 __ovld __cnfn convert_half16_rtp(float16);
+half16 __ovld __cnfn convert_half16_rtp(half16);
+half16 __ovld __cnfn convert_half16_rtn(char16);
+half16 __ovld __cnfn convert_half16_rtn(uchar16);
+half16 __ovld __cnfn convert_half16_rtn(short16);
+half16 __ovld __cnfn convert_half16_rtn(ushort16);
+half16 __ovld __cnfn convert_half16_rtn(int16);
+half16 __ovld __cnfn convert_half16_rtn(uint16);
+half16 __ovld __cnfn convert_half16_rtn(long16);
+half16 __ovld __cnfn convert_half16_rtn(ulong16);
+half16 __ovld __cnfn convert_half16_rtn(float16);
+half16 __ovld __cnfn convert_half16_rtn(half16);
+half16 __ovld __cnfn convert_half16_rtz(char16);
+half16 __ovld __cnfn convert_half16_rtz(uchar16);
+half16 __ovld __cnfn convert_half16_rtz(short16);
+half16 __ovld __cnfn convert_half16_rtz(ushort16);
+half16 __ovld __cnfn convert_half16_rtz(int16);
+half16 __ovld __cnfn convert_half16_rtz(uint16);
+half16 __ovld __cnfn convert_half16_rtz(long16);
+half16 __ovld __cnfn convert_half16_rtz(ulong16);
+half16 __ovld __cnfn convert_half16_rtz(float16);
+half16 __ovld __cnfn convert_half16_rtz(half16);
+
+// Convert half types to double types.
+#ifdef cl_khr_fp64
+double __ovld __cnfn convert_double(half);
+double __ovld __cnfn convert_double_rte(half);
+double __ovld __cnfn convert_double_rtp(half);
+double __ovld __cnfn convert_double_rtn(half);
+double __ovld __cnfn convert_double_rtz(half);
+double2 __ovld __cnfn convert_double2(half2);
+double2 __ovld __cnfn convert_double2_rte(half2);
+double2 __ovld __cnfn convert_double2_rtp(half2);
+double2 __ovld __cnfn convert_double2_rtn(half2);
+double2 __ovld __cnfn convert_double2_rtz(half2);
+double3 __ovld __cnfn convert_double3(half3);
+double3 __ovld __cnfn convert_double3_rte(half3);
+double3 __ovld __cnfn convert_double3_rtp(half3);
+double3 __ovld __cnfn convert_double3_rtn(half3);
+double3 __ovld __cnfn convert_double3_rtz(half3);
+double4 __ovld __cnfn convert_double4(half4);
+double4 __ovld __cnfn convert_double4_rte(half4);
+double4 __ovld __cnfn convert_double4_rtp(half4);
+double4 __ovld __cnfn convert_double4_rtn(half4);
+double4 __ovld __cnfn convert_double4_rtz(half4);
+double8 __ovld __cnfn convert_double8(half8);
+double8 __ovld __cnfn convert_double8_rte(half8);
+double8 __ovld __cnfn convert_double8_rtp(half8);
+double8 __ovld __cnfn convert_double8_rtn(half8);
+double8 __ovld __cnfn convert_double8_rtz(half8);
+double16 __ovld __cnfn convert_double16(half16);
+double16 __ovld __cnfn convert_double16_rte(half16);
+double16 __ovld __cnfn convert_double16_rtp(half16);
+double16 __ovld __cnfn convert_double16_rtn(half16);
+double16 __ovld __cnfn convert_double16_rtz(half16);
+
+// Convert double types to half types.
+half __ovld __cnfn convert_half(double);
+half __ovld __cnfn convert_half_rte(double);
+half __ovld __cnfn convert_half_rtp(double);
+half __ovld __cnfn convert_half_rtn(double);
+half __ovld __cnfn convert_half_rtz(double);
+half2 __ovld __cnfn convert_half2(double2);
+half2 __ovld __cnfn convert_half2_rte(double2);
+half2 __ovld __cnfn convert_half2_rtp(double2);
+half2 __ovld __cnfn convert_half2_rtn(double2);
+half2 __ovld __cnfn convert_half2_rtz(double2);
+half3 __ovld __cnfn convert_half3(double3);
+half3 __ovld __cnfn convert_half3_rte(double3);
+half3 __ovld __cnfn convert_half3_rtp(double3);
+half3 __ovld __cnfn convert_half3_rtn(double3);
+half3 __ovld __cnfn convert_half3_rtz(double3);
+half4 __ovld __cnfn convert_half4(double4);
+half4 __ovld __cnfn convert_half4_rte(double4);
+half4 __ovld __cnfn convert_half4_rtp(double4);
+half4 __ovld __cnfn convert_half4_rtn(double4);
+half4 __ovld __cnfn convert_half4_rtz(double4);
+half8 __ovld __cnfn convert_half8(double8);
+half8 __ovld __cnfn convert_half8_rte(double8);
+half8 __ovld __cnfn convert_half8_rtp(double8);
+half8 __ovld __cnfn convert_half8_rtn(double8);
+half8 __ovld __cnfn convert_half8_rtz(double8);
+half16 __ovld __cnfn convert_half16(double16);
+half16 __ovld __cnfn convert_half16_rte(double16);
+half16 __ovld __cnfn convert_half16_rtp(double16);
+half16 __ovld __cnfn convert_half16_rtn(double16);
+half16 __ovld __cnfn convert_half16_rtz(double16);
+#endif //cl_khr_fp64
+
+#endif // cl_khr_fp16
+
+// OpenCL v1.1 s6.11.1, v1.2 s6.12.1, v2.0 s6.13.1 - Work-item Functions
+
+/**
+ * Returns the number of dimensions in use. This is the
+ * value given to the work_dim argument specified in
+ * clEnqueueNDRangeKernel.
+ * For clEnqueueTask, this returns 1.
+ */
+uint __ovld __cnfn get_work_dim(void);
+
+/**
+ * Returns the number of global work-items specified for
+ * dimension identified by dimindx. This value is given by
+ * the global_work_size argument to
+ * clEnqueueNDRangeKernel. Valid values of dimindx
+ * are 0 to get_work_dim() - 1. For other values of
+ * dimindx, get_global_size() returns 1.
+ * For clEnqueueTask, this always returns 1.
+ */
+size_t __ovld __cnfn get_global_size(uint);
+
+/**
+ * Returns the unique global work-item ID value for
+ * dimension identified by dimindx. The global work-item
+ * ID specifies the work-item ID based on the number of
+ * global work-items specified to execute the kernel. Valid
+ * values of dimindx are 0 to get_work_dim() - 1. For
+ * other values of dimindx, get_global_id() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_global_id(uint);
+
+/**
+ * Returns the number of local work-items specified in
+ * dimension identified by dimindx. This value is given by
+ * the local_work_size argument to
+ * clEnqueueNDRangeKernel if local_work_size is not
+ * NULL; otherwise the OpenCL implementation chooses
+ * an appropriate local_work_size value which is returned
+ * by this function. Valid values of dimindx are 0 to
+ * get_work_dim() - 1. For other values of dimindx,
+ * get_local_size() returns 1.
+ * For clEnqueueTask, this always returns 1.
+ */
+size_t __ovld __cnfn get_local_size(uint);
+
+/**
+ * Returns the unique local work-item ID i.e. a work-item
+ * within a specific work-group for dimension identified by
+ * dimindx. Valid values of dimindx are 0 to
+ * get_work_dim() - 1. For other values of dimindx,
+ * get_local_id() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_local_id(uint);
+
+/**
+ * Returns the number of work-groups that will execute a
+ * kernel for dimension identified by dimindx.
+ * Valid values of dimindx are 0 to get_work_dim() - 1.
+ * For other values of dimindx, get_num_groups() returns 1.
+ * For clEnqueueTask, this always returns 1.
+ */
+size_t __ovld __cnfn get_num_groups(uint);
+
+/**
+ * get_group_id returns the work-group ID which is a
+ * number from 0 .. get_num_groups(dimindx) - 1.
+ * Valid values of dimindx are 0 to get_work_dim() - 1.
+ * For other values, get_group_id() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_group_id(uint);
+
+/**
+ * get_global_offset returns the offset values specified in
+ * global_work_offset argument to
+ * clEnqueueNDRangeKernel.
+ * Valid values of dimindx are 0 to get_work_dim() - 1.
+ * For other values, get_global_offset() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_global_offset(uint);
+
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+size_t __ovld get_enqueued_local_size(uint);
+size_t __ovld get_global_linear_id(void);
+size_t __ovld get_local_linear_id(void);
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+// OpenCL v1.1 s6.11.2, v1.2 s6.12.2, v2.0 s6.13.2 - Math functions
+
+/**
+ * Arc cosine function.
+ */
+float __ovld __cnfn acos(float);
+float2 __ovld __cnfn acos(float2);
+float3 __ovld __cnfn acos(float3);
+float4 __ovld __cnfn acos(float4);
+float8 __ovld __cnfn acos(float8);
+float16 __ovld __cnfn acos(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn acos(double);
+double2 __ovld __cnfn acos(double2);
+double3 __ovld __cnfn acos(double3);
+double4 __ovld __cnfn acos(double4);
+double8 __ovld __cnfn acos(double8);
+double16 __ovld __cnfn acos(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn acos(half);
+half2 __ovld __cnfn acos(half2);
+half3 __ovld __cnfn acos(half3);
+half4 __ovld __cnfn acos(half4);
+half8 __ovld __cnfn acos(half8);
+half16 __ovld __cnfn acos(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Inverse hyperbolic cosine.
+ */
+float __ovld __cnfn acosh(float);
+float2 __ovld __cnfn acosh(float2);
+float3 __ovld __cnfn acosh(float3);
+float4 __ovld __cnfn acosh(float4);
+float8 __ovld __cnfn acosh(float8);
+float16 __ovld __cnfn acosh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn acosh(double);
+double2 __ovld __cnfn acosh(double2);
+double3 __ovld __cnfn acosh(double3);
+double4 __ovld __cnfn acosh(double4);
+double8 __ovld __cnfn acosh(double8);
+double16 __ovld __cnfn acosh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn acosh(half);
+half2 __ovld __cnfn acosh(half2);
+half3 __ovld __cnfn acosh(half3);
+half4 __ovld __cnfn acosh(half4);
+half8 __ovld __cnfn acosh(half8);
+half16 __ovld __cnfn acosh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute acos (x) / PI.
+ */
+float __ovld __cnfn acospi(float);
+float2 __ovld __cnfn acospi(float2);
+float3 __ovld __cnfn acospi(float3);
+float4 __ovld __cnfn acospi(float4);
+float8 __ovld __cnfn acospi(float8);
+float16 __ovld __cnfn acospi(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn acospi(double);
+double2 __ovld __cnfn acospi(double2);
+double3 __ovld __cnfn acospi(double3);
+double4 __ovld __cnfn acospi(double4);
+double8 __ovld __cnfn acospi(double8);
+double16 __ovld __cnfn acospi(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn acospi(half);
+half2 __ovld __cnfn acospi(half2);
+half3 __ovld __cnfn acospi(half3);
+half4 __ovld __cnfn acospi(half4);
+half8 __ovld __cnfn acospi(half8);
+half16 __ovld __cnfn acospi(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Arc sine function.
+ */
+float __ovld __cnfn asin(float);
+float2 __ovld __cnfn asin(float2);
+float3 __ovld __cnfn asin(float3);
+float4 __ovld __cnfn asin(float4);
+float8 __ovld __cnfn asin(float8);
+float16 __ovld __cnfn asin(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn asin(double);
+double2 __ovld __cnfn asin(double2);
+double3 __ovld __cnfn asin(double3);
+double4 __ovld __cnfn asin(double4);
+double8 __ovld __cnfn asin(double8);
+double16 __ovld __cnfn asin(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn asin(half);
+half2 __ovld __cnfn asin(half2);
+half3 __ovld __cnfn asin(half3);
+half4 __ovld __cnfn asin(half4);
+half8 __ovld __cnfn asin(half8);
+half16 __ovld __cnfn asin(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Inverse hyperbolic sine.
+ */
+float __ovld __cnfn asinh(float);
+float2 __ovld __cnfn asinh(float2);
+float3 __ovld __cnfn asinh(float3);
+float4 __ovld __cnfn asinh(float4);
+float8 __ovld __cnfn asinh(float8);
+float16 __ovld __cnfn asinh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn asinh(double);
+double2 __ovld __cnfn asinh(double2);
+double3 __ovld __cnfn asinh(double3);
+double4 __ovld __cnfn asinh(double4);
+double8 __ovld __cnfn asinh(double8);
+double16 __ovld __cnfn asinh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn asinh(half);
+half2 __ovld __cnfn asinh(half2);
+half3 __ovld __cnfn asinh(half3);
+half4 __ovld __cnfn asinh(half4);
+half8 __ovld __cnfn asinh(half8);
+half16 __ovld __cnfn asinh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute asin (x) / PI.
+ */
+float __ovld __cnfn asinpi(float);
+float2 __ovld __cnfn asinpi(float2);
+float3 __ovld __cnfn asinpi(float3);
+float4 __ovld __cnfn asinpi(float4);
+float8 __ovld __cnfn asinpi(float8);
+float16 __ovld __cnfn asinpi(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn asinpi(double);
+double2 __ovld __cnfn asinpi(double2);
+double3 __ovld __cnfn asinpi(double3);
+double4 __ovld __cnfn asinpi(double4);
+double8 __ovld __cnfn asinpi(double8);
+double16 __ovld __cnfn asinpi(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn asinpi(half);
+half2 __ovld __cnfn asinpi(half2);
+half3 __ovld __cnfn asinpi(half3);
+half4 __ovld __cnfn asinpi(half4);
+half8 __ovld __cnfn asinpi(half8);
+half16 __ovld __cnfn asinpi(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Arc tangent function.
+ */
+float __ovld __cnfn atan(float);
+float2 __ovld __cnfn atan(float2);
+float3 __ovld __cnfn atan(float3);
+float4 __ovld __cnfn atan(float4);
+float8 __ovld __cnfn atan(float8);
+float16 __ovld __cnfn atan(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atan(double);
+double2 __ovld __cnfn atan(double2);
+double3 __ovld __cnfn atan(double3);
+double4 __ovld __cnfn atan(double4);
+double8 __ovld __cnfn atan(double8);
+double16 __ovld __cnfn atan(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atan(half);
+half2 __ovld __cnfn atan(half2);
+half3 __ovld __cnfn atan(half3);
+half4 __ovld __cnfn atan(half4);
+half8 __ovld __cnfn atan(half8);
+half16 __ovld __cnfn atan(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Arc tangent of y / x.
+ */
+float __ovld __cnfn atan2(float, float);
+float2 __ovld __cnfn atan2(float2, float2);
+float3 __ovld __cnfn atan2(float3, float3);
+float4 __ovld __cnfn atan2(float4, float4);
+float8 __ovld __cnfn atan2(float8, float8);
+float16 __ovld __cnfn atan2(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atan2(double, double);
+double2 __ovld __cnfn atan2(double2, double2);
+double3 __ovld __cnfn atan2(double3, double3);
+double4 __ovld __cnfn atan2(double4, double4);
+double8 __ovld __cnfn atan2(double8, double8);
+double16 __ovld __cnfn atan2(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atan2(half, half);
+half2 __ovld __cnfn atan2(half2, half2);
+half3 __ovld __cnfn atan2(half3, half3);
+half4 __ovld __cnfn atan2(half4, half4);
+half8 __ovld __cnfn atan2(half8, half8);
+half16 __ovld __cnfn atan2(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Hyperbolic arc tangent.
+ */
+float __ovld __cnfn atanh(float);
+float2 __ovld __cnfn atanh(float2);
+float3 __ovld __cnfn atanh(float3);
+float4 __ovld __cnfn atanh(float4);
+float8 __ovld __cnfn atanh(float8);
+float16 __ovld __cnfn atanh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atanh(double);
+double2 __ovld __cnfn atanh(double2);
+double3 __ovld __cnfn atanh(double3);
+double4 __ovld __cnfn atanh(double4);
+double8 __ovld __cnfn atanh(double8);
+double16 __ovld __cnfn atanh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atanh(half);
+half2 __ovld __cnfn atanh(half2);
+half3 __ovld __cnfn atanh(half3);
+half4 __ovld __cnfn atanh(half4);
+half8 __ovld __cnfn atanh(half8);
+half16 __ovld __cnfn atanh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute atan (x) / PI.
+ */
+float __ovld __cnfn atanpi(float);
+float2 __ovld __cnfn atanpi(float2);
+float3 __ovld __cnfn atanpi(float3);
+float4 __ovld __cnfn atanpi(float4);
+float8 __ovld __cnfn atanpi(float8);
+float16 __ovld __cnfn atanpi(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atanpi(double);
+double2 __ovld __cnfn atanpi(double2);
+double3 __ovld __cnfn atanpi(double3);
+double4 __ovld __cnfn atanpi(double4);
+double8 __ovld __cnfn atanpi(double8);
+double16 __ovld __cnfn atanpi(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atanpi(half);
+half2 __ovld __cnfn atanpi(half2);
+half3 __ovld __cnfn atanpi(half3);
+half4 __ovld __cnfn atanpi(half4);
+half8 __ovld __cnfn atanpi(half8);
+half16 __ovld __cnfn atanpi(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute atan2 (y, x) / PI.
+ */
+float __ovld __cnfn atan2pi(float, float);
+float2 __ovld __cnfn atan2pi(float2, float2);
+float3 __ovld __cnfn atan2pi(float3, float3);
+float4 __ovld __cnfn atan2pi(float4, float4);
+float8 __ovld __cnfn atan2pi(float8, float8);
+float16 __ovld __cnfn atan2pi(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atan2pi(double, double);
+double2 __ovld __cnfn atan2pi(double2, double2);
+double3 __ovld __cnfn atan2pi(double3, double3);
+double4 __ovld __cnfn atan2pi(double4, double4);
+double8 __ovld __cnfn atan2pi(double8, double8);
+double16 __ovld __cnfn atan2pi(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atan2pi(half, half);
+half2 __ovld __cnfn atan2pi(half2, half2);
+half3 __ovld __cnfn atan2pi(half3, half3);
+half4 __ovld __cnfn atan2pi(half4, half4);
+half8 __ovld __cnfn atan2pi(half8, half8);
+half16 __ovld __cnfn atan2pi(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cube-root.
+ */
+float __ovld __cnfn cbrt(float);
+float2 __ovld __cnfn cbrt(float2);
+float3 __ovld __cnfn cbrt(float3);
+float4 __ovld __cnfn cbrt(float4);
+float8 __ovld __cnfn cbrt(float8);
+float16 __ovld __cnfn cbrt(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cbrt(double);
+double2 __ovld __cnfn cbrt(double2);
+double3 __ovld __cnfn cbrt(double3);
+double4 __ovld __cnfn cbrt(double4);
+double8 __ovld __cnfn cbrt(double8);
+double16 __ovld __cnfn cbrt(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cbrt(half);
+half2 __ovld __cnfn cbrt(half2);
+half3 __ovld __cnfn cbrt(half3);
+half4 __ovld __cnfn cbrt(half4);
+half8 __ovld __cnfn cbrt(half8);
+half16 __ovld __cnfn cbrt(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Round to integral value using the round to positive
+ * infinity rounding mode.
+ */
+float __ovld __cnfn ceil(float);
+float2 __ovld __cnfn ceil(float2);
+float3 __ovld __cnfn ceil(float3);
+float4 __ovld __cnfn ceil(float4);
+float8 __ovld __cnfn ceil(float8);
+float16 __ovld __cnfn ceil(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn ceil(double);
+double2 __ovld __cnfn ceil(double2);
+double3 __ovld __cnfn ceil(double3);
+double4 __ovld __cnfn ceil(double4);
+double8 __ovld __cnfn ceil(double8);
+double16 __ovld __cnfn ceil(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn ceil(half);
+half2 __ovld __cnfn ceil(half2);
+half3 __ovld __cnfn ceil(half3);
+half4 __ovld __cnfn ceil(half4);
+half8 __ovld __cnfn ceil(half8);
+half16 __ovld __cnfn ceil(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns x with its sign changed to match the sign of y.
+ */
+float __ovld __cnfn copysign(float, float);
+float2 __ovld __cnfn copysign(float2, float2);
+float3 __ovld __cnfn copysign(float3, float3);
+float4 __ovld __cnfn copysign(float4, float4);
+float8 __ovld __cnfn copysign(float8, float8);
+float16 __ovld __cnfn copysign(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn copysign(double, double);
+double2 __ovld __cnfn copysign(double2, double2);
+double3 __ovld __cnfn copysign(double3, double3);
+double4 __ovld __cnfn copysign(double4, double4);
+double8 __ovld __cnfn copysign(double8, double8);
+double16 __ovld __cnfn copysign(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn copysign(half, half);
+half2 __ovld __cnfn copysign(half2, half2);
+half3 __ovld __cnfn copysign(half3, half3);
+half4 __ovld __cnfn copysign(half4, half4);
+half8 __ovld __cnfn copysign(half8, half8);
+half16 __ovld __cnfn copysign(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cosine.
+ */
+float __ovld __cnfn cos(float);
+float2 __ovld __cnfn cos(float2);
+float3 __ovld __cnfn cos(float3);
+float4 __ovld __cnfn cos(float4);
+float8 __ovld __cnfn cos(float8);
+float16 __ovld __cnfn cos(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cos(double);
+double2 __ovld __cnfn cos(double2);
+double3 __ovld __cnfn cos(double3);
+double4 __ovld __cnfn cos(double4);
+double8 __ovld __cnfn cos(double8);
+double16 __ovld __cnfn cos(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cos(half);
+half2 __ovld __cnfn cos(half2);
+half3 __ovld __cnfn cos(half3);
+half4 __ovld __cnfn cos(half4);
+half8 __ovld __cnfn cos(half8);
+half16 __ovld __cnfn cos(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute hyperbolic cosine.
+ */
+float __ovld __cnfn cosh(float);
+float2 __ovld __cnfn cosh(float2);
+float3 __ovld __cnfn cosh(float3);
+float4 __ovld __cnfn cosh(float4);
+float8 __ovld __cnfn cosh(float8);
+float16 __ovld __cnfn cosh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cosh(double);
+double2 __ovld __cnfn cosh(double2);
+double3 __ovld __cnfn cosh(double3);
+double4 __ovld __cnfn cosh(double4);
+double8 __ovld __cnfn cosh(double8);
+double16 __ovld __cnfn cosh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cosh(half);
+half2 __ovld __cnfn cosh(half2);
+half3 __ovld __cnfn cosh(half3);
+half4 __ovld __cnfn cosh(half4);
+half8 __ovld __cnfn cosh(half8);
+half16 __ovld __cnfn cosh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cos (PI * x).
+ */
+float __ovld __cnfn cospi(float);
+float2 __ovld __cnfn cospi(float2);
+float3 __ovld __cnfn cospi(float3);
+float4 __ovld __cnfn cospi(float4);
+float8 __ovld __cnfn cospi(float8);
+float16 __ovld __cnfn cospi(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cospi(double);
+double2 __ovld __cnfn cospi(double2);
+double3 __ovld __cnfn cospi(double3);
+double4 __ovld __cnfn cospi(double4);
+double8 __ovld __cnfn cospi(double8);
+double16 __ovld __cnfn cospi(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cospi(half);
+half2 __ovld __cnfn cospi(half2);
+half3 __ovld __cnfn cospi(half3);
+half4 __ovld __cnfn cospi(half4);
+half8 __ovld __cnfn cospi(half8);
+half16 __ovld __cnfn cospi(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Complementary error function.
+ */
+float __ovld __cnfn erfc(float);
+float2 __ovld __cnfn erfc(float2);
+float3 __ovld __cnfn erfc(float3);
+float4 __ovld __cnfn erfc(float4);
+float8 __ovld __cnfn erfc(float8);
+float16 __ovld __cnfn erfc(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn erfc(double);
+double2 __ovld __cnfn erfc(double2);
+double3 __ovld __cnfn erfc(double3);
+double4 __ovld __cnfn erfc(double4);
+double8 __ovld __cnfn erfc(double8);
+double16 __ovld __cnfn erfc(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn erfc(half);
+half2 __ovld __cnfn erfc(half2);
+half3 __ovld __cnfn erfc(half3);
+half4 __ovld __cnfn erfc(half4);
+half8 __ovld __cnfn erfc(half8);
+half16 __ovld __cnfn erfc(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Error function encountered in integrating the
+ * normal distribution.
+ */
+float __ovld __cnfn erf(float);
+float2 __ovld __cnfn erf(float2);
+float3 __ovld __cnfn erf(float3);
+float4 __ovld __cnfn erf(float4);
+float8 __ovld __cnfn erf(float8);
+float16 __ovld __cnfn erf(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn erf(double);
+double2 __ovld __cnfn erf(double2);
+double3 __ovld __cnfn erf(double3);
+double4 __ovld __cnfn erf(double4);
+double8 __ovld __cnfn erf(double8);
+double16 __ovld __cnfn erf(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn erf(half);
+half2 __ovld __cnfn erf(half2);
+half3 __ovld __cnfn erf(half3);
+half4 __ovld __cnfn erf(half4);
+half8 __ovld __cnfn erf(half8);
+half16 __ovld __cnfn erf(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the base e exponential function of x.
+ */
+float __ovld __cnfn exp(float);
+float2 __ovld __cnfn exp(float2);
+float3 __ovld __cnfn exp(float3);
+float4 __ovld __cnfn exp(float4);
+float8 __ovld __cnfn exp(float8);
+float16 __ovld __cnfn exp(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn exp(double);
+double2 __ovld __cnfn exp(double2);
+double3 __ovld __cnfn exp(double3);
+double4 __ovld __cnfn exp(double4);
+double8 __ovld __cnfn exp(double8);
+double16 __ovld __cnfn exp(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn exp(half);
+half2 __ovld __cnfn exp(half2);
+half3 __ovld __cnfn exp(half3);
+half4 __ovld __cnfn exp(half4);
+half8 __ovld __cnfn exp(half8);
+half16 __ovld __cnfn exp(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Exponential base 2 function.
+ */
+float __ovld __cnfn exp2(float);
+float2 __ovld __cnfn exp2(float2);
+float3 __ovld __cnfn exp2(float3);
+float4 __ovld __cnfn exp2(float4);
+float8 __ovld __cnfn exp2(float8);
+float16 __ovld __cnfn exp2(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn exp2(double);
+double2 __ovld __cnfn exp2(double2);
+double3 __ovld __cnfn exp2(double3);
+double4 __ovld __cnfn exp2(double4);
+double8 __ovld __cnfn exp2(double8);
+double16 __ovld __cnfn exp2(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn exp2(half);
+half2 __ovld __cnfn exp2(half2);
+half3 __ovld __cnfn exp2(half3);
+half4 __ovld __cnfn exp2(half4);
+half8 __ovld __cnfn exp2(half8);
+half16 __ovld __cnfn exp2(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Exponential base 10 function.
+ */
+float __ovld __cnfn exp10(float);
+float2 __ovld __cnfn exp10(float2);
+float3 __ovld __cnfn exp10(float3);
+float4 __ovld __cnfn exp10(float4);
+float8 __ovld __cnfn exp10(float8);
+float16 __ovld __cnfn exp10(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn exp10(double);
+double2 __ovld __cnfn exp10(double2);
+double3 __ovld __cnfn exp10(double3);
+double4 __ovld __cnfn exp10(double4);
+double8 __ovld __cnfn exp10(double8);
+double16 __ovld __cnfn exp10(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn exp10(half);
+half2 __ovld __cnfn exp10(half2);
+half3 __ovld __cnfn exp10(half3);
+half4 __ovld __cnfn exp10(half4);
+half8 __ovld __cnfn exp10(half8);
+half16 __ovld __cnfn exp10(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute e^x- 1.0.
+ */
+float __ovld __cnfn expm1(float);
+float2 __ovld __cnfn expm1(float2);
+float3 __ovld __cnfn expm1(float3);
+float4 __ovld __cnfn expm1(float4);
+float8 __ovld __cnfn expm1(float8);
+float16 __ovld __cnfn expm1(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn expm1(double);
+double2 __ovld __cnfn expm1(double2);
+double3 __ovld __cnfn expm1(double3);
+double4 __ovld __cnfn expm1(double4);
+double8 __ovld __cnfn expm1(double8);
+double16 __ovld __cnfn expm1(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn expm1(half);
+half2 __ovld __cnfn expm1(half2);
+half3 __ovld __cnfn expm1(half3);
+half4 __ovld __cnfn expm1(half4);
+half8 __ovld __cnfn expm1(half8);
+half16 __ovld __cnfn expm1(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute absolute value of a floating-point number.
+ */
+float __ovld __cnfn fabs(float);
+float2 __ovld __cnfn fabs(float2);
+float3 __ovld __cnfn fabs(float3);
+float4 __ovld __cnfn fabs(float4);
+float8 __ovld __cnfn fabs(float8);
+float16 __ovld __cnfn fabs(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fabs(double);
+double2 __ovld __cnfn fabs(double2);
+double3 __ovld __cnfn fabs(double3);
+double4 __ovld __cnfn fabs(double4);
+double8 __ovld __cnfn fabs(double8);
+double16 __ovld __cnfn fabs(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fabs(half);
+half2 __ovld __cnfn fabs(half2);
+half3 __ovld __cnfn fabs(half3);
+half4 __ovld __cnfn fabs(half4);
+half8 __ovld __cnfn fabs(half8);
+half16 __ovld __cnfn fabs(half16);
+#endif //cl_khr_fp16
+
+/**
+ * x - y if x > y, +0 if x is less than or equal to y.
+ */
+float __ovld __cnfn fdim(float, float);
+float2 __ovld __cnfn fdim(float2, float2);
+float3 __ovld __cnfn fdim(float3, float3);
+float4 __ovld __cnfn fdim(float4, float4);
+float8 __ovld __cnfn fdim(float8, float8);
+float16 __ovld __cnfn fdim(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fdim(double, double);
+double2 __ovld __cnfn fdim(double2, double2);
+double3 __ovld __cnfn fdim(double3, double3);
+double4 __ovld __cnfn fdim(double4, double4);
+double8 __ovld __cnfn fdim(double8, double8);
+double16 __ovld __cnfn fdim(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fdim(half, half);
+half2 __ovld __cnfn fdim(half2, half2);
+half3 __ovld __cnfn fdim(half3, half3);
+half4 __ovld __cnfn fdim(half4, half4);
+half8 __ovld __cnfn fdim(half8, half8);
+half16 __ovld __cnfn fdim(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Round to integral value using the round to -ve
+ * infinity rounding mode.
+ */
+float __ovld __cnfn floor(float);
+float2 __ovld __cnfn floor(float2);
+float3 __ovld __cnfn floor(float3);
+float4 __ovld __cnfn floor(float4);
+float8 __ovld __cnfn floor(float8);
+float16 __ovld __cnfn floor(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn floor(double);
+double2 __ovld __cnfn floor(double2);
+double3 __ovld __cnfn floor(double3);
+double4 __ovld __cnfn floor(double4);
+double8 __ovld __cnfn floor(double8);
+double16 __ovld __cnfn floor(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn floor(half);
+half2 __ovld __cnfn floor(half2);
+half3 __ovld __cnfn floor(half3);
+half4 __ovld __cnfn floor(half4);
+half8 __ovld __cnfn floor(half8);
+half16 __ovld __cnfn floor(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the correctly rounded floating-point
+ * representation of the sum of c with the infinitely
+ * precise product of a and b. Rounding of
+ * intermediate products shall not occur. Edge case
+ * behavior is per the IEEE 754-2008 standard.
+ */
+float __ovld __cnfn fma(float, float, float);
+float2 __ovld __cnfn fma(float2, float2, float2);
+float3 __ovld __cnfn fma(float3, float3, float3);
+float4 __ovld __cnfn fma(float4, float4, float4);
+float8 __ovld __cnfn fma(float8, float8, float8);
+float16 __ovld __cnfn fma(float16, float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fma(double, double, double);
+double2 __ovld __cnfn fma(double2, double2, double2);
+double3 __ovld __cnfn fma(double3, double3, double3);
+double4 __ovld __cnfn fma(double4, double4, double4);
+double8 __ovld __cnfn fma(double8, double8, double8);
+double16 __ovld __cnfn fma(double16, double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fma(half, half, half);
+half2 __ovld __cnfn fma(half2, half2, half2);
+half3 __ovld __cnfn fma(half3, half3, half3);
+half4 __ovld __cnfn fma(half4, half4, half4);
+half8 __ovld __cnfn fma(half8, half8, half8);
+half16 __ovld __cnfn fma(half16, half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if x < y, otherwise it returns x. If one
+ * argument is a NaN, fmax() returns the other
+ * argument. If both arguments are NaNs, fmax()
+ * returns a NaN.
+ */
+float __ovld __cnfn fmax(float, float);
+float2 __ovld __cnfn fmax(float2, float2);
+float3 __ovld __cnfn fmax(float3, float3);
+float4 __ovld __cnfn fmax(float4, float4);
+float8 __ovld __cnfn fmax(float8, float8);
+float16 __ovld __cnfn fmax(float16, float16);
+float2 __ovld __cnfn fmax(float2, float);
+float3 __ovld __cnfn fmax(float3, float);
+float4 __ovld __cnfn fmax(float4, float);
+float8 __ovld __cnfn fmax(float8, float);
+float16 __ovld __cnfn fmax(float16, float);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fmax(double, double);
+double2 __ovld __cnfn fmax(double2, double2);
+double3 __ovld __cnfn fmax(double3, double3);
+double4 __ovld __cnfn fmax(double4, double4);
+double8 __ovld __cnfn fmax(double8, double8);
+double16 __ovld __cnfn fmax(double16, double16);
+double2 __ovld __cnfn fmax(double2, double);
+double3 __ovld __cnfn fmax(double3, double);
+double4 __ovld __cnfn fmax(double4, double);
+double8 __ovld __cnfn fmax(double8, double);
+double16 __ovld __cnfn fmax(double16, double);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fmax(half, half);
+half2 __ovld __cnfn fmax(half2, half2);
+half3 __ovld __cnfn fmax(half3, half3);
+half4 __ovld __cnfn fmax(half4, half4);
+half8 __ovld __cnfn fmax(half8, half8);
+half16 __ovld __cnfn fmax(half16, half16);
+half2 __ovld __cnfn fmax(half2, half);
+half3 __ovld __cnfn fmax(half3, half);
+half4 __ovld __cnfn fmax(half4, half);
+half8 __ovld __cnfn fmax(half8, half);
+half16 __ovld __cnfn fmax(half16, half);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if y < x, otherwise it returns x. If one
+ * argument is a NaN, fmin() returns the other
+ * argument. If both arguments are NaNs, fmin()
+ * returns a NaN.
+ */
+float __ovld __cnfn fmin(float, float);
+float2 __ovld __cnfn fmin(float2, float2);
+float3 __ovld __cnfn fmin(float3, float3);
+float4 __ovld __cnfn fmin(float4, float4);
+float8 __ovld __cnfn fmin(float8, float8);
+float16 __ovld __cnfn fmin(float16, float16);
+float2 __ovld __cnfn fmin(float2, float);
+float3 __ovld __cnfn fmin(float3, float);
+float4 __ovld __cnfn fmin(float4, float);
+float8 __ovld __cnfn fmin(float8, float);
+float16 __ovld __cnfn fmin(float16, float);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fmin(double, double);
+double2 __ovld __cnfn fmin(double2, double2);
+double3 __ovld __cnfn fmin(double3, double3);
+double4 __ovld __cnfn fmin(double4, double4);
+double8 __ovld __cnfn fmin(double8, double8);
+double16 __ovld __cnfn fmin(double16, double16);
+double2 __ovld __cnfn fmin(double2, double);
+double3 __ovld __cnfn fmin(double3, double);
+double4 __ovld __cnfn fmin(double4, double);
+double8 __ovld __cnfn fmin(double8, double);
+double16 __ovld __cnfn fmin(double16, double);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fmin(half, half);
+half2 __ovld __cnfn fmin(half2, half2);
+half3 __ovld __cnfn fmin(half3, half3);
+half4 __ovld __cnfn fmin(half4, half4);
+half8 __ovld __cnfn fmin(half8, half8);
+half16 __ovld __cnfn fmin(half16, half16);
+half2 __ovld __cnfn fmin(half2, half);
+half3 __ovld __cnfn fmin(half3, half);
+half4 __ovld __cnfn fmin(half4, half);
+half8 __ovld __cnfn fmin(half8, half);
+half16 __ovld __cnfn fmin(half16, half);
+#endif //cl_khr_fp16
+
+/**
+ * Modulus. Returns x - y * trunc (x/y).
+ */
+float __ovld __cnfn fmod(float, float);
+float2 __ovld __cnfn fmod(float2, float2);
+float3 __ovld __cnfn fmod(float3, float3);
+float4 __ovld __cnfn fmod(float4, float4);
+float8 __ovld __cnfn fmod(float8, float8);
+float16 __ovld __cnfn fmod(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fmod(double, double);
+double2 __ovld __cnfn fmod(double2, double2);
+double3 __ovld __cnfn fmod(double3, double3);
+double4 __ovld __cnfn fmod(double4, double4);
+double8 __ovld __cnfn fmod(double8, double8);
+double16 __ovld __cnfn fmod(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fmod(half, half);
+half2 __ovld __cnfn fmod(half2, half2);
+half3 __ovld __cnfn fmod(half3, half3);
+half4 __ovld __cnfn fmod(half4, half4);
+half8 __ovld __cnfn fmod(half8, half8);
+half16 __ovld __cnfn fmod(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns fmin(x - floor (x), 0x1.fffffep-1f ).
+ * floor(x) is returned in iptr.
+ */
+#if defined(__opencl_c_generic_address_space)
+float __ovld fract(float, float *);
+float2 __ovld fract(float2, float2 *);
+float3 __ovld fract(float3, float3 *);
+float4 __ovld fract(float4, float4 *);
+float8 __ovld fract(float8, float8 *);
+float16 __ovld fract(float16, float16 *);
+#ifdef cl_khr_fp64
+double __ovld fract(double, double *);
+double2 __ovld fract(double2, double2 *);
+double3 __ovld fract(double3, double3 *);
+double4 __ovld fract(double4, double4 *);
+double8 __ovld fract(double8, double8 *);
+double16 __ovld fract(double16, double16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld fract(half, half *);
+half2 __ovld fract(half2, half2 *);
+half3 __ovld fract(half3, half3 *);
+half4 __ovld fract(half4, half4 *);
+half8 __ovld fract(half8, half8 *);
+half16 __ovld fract(half16, half16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld fract(float, __global float *);
+float2 __ovld fract(float2, __global float2 *);
+float3 __ovld fract(float3, __global float3 *);
+float4 __ovld fract(float4, __global float4 *);
+float8 __ovld fract(float8, __global float8 *);
+float16 __ovld fract(float16, __global float16 *);
+float __ovld fract(float, __local float *);
+float2 __ovld fract(float2, __local float2 *);
+float3 __ovld fract(float3, __local float3 *);
+float4 __ovld fract(float4, __local float4 *);
+float8 __ovld fract(float8, __local float8 *);
+float16 __ovld fract(float16, __local float16 *);
+float __ovld fract(float, __private float *);
+float2 __ovld fract(float2, __private float2 *);
+float3 __ovld fract(float3, __private float3 *);
+float4 __ovld fract(float4, __private float4 *);
+float8 __ovld fract(float8, __private float8 *);
+float16 __ovld fract(float16, __private float16 *);
+#ifdef cl_khr_fp64
+double __ovld fract(double, __global double *);
+double2 __ovld fract(double2, __global double2 *);
+double3 __ovld fract(double3, __global double3 *);
+double4 __ovld fract(double4, __global double4 *);
+double8 __ovld fract(double8, __global double8 *);
+double16 __ovld fract(double16, __global double16 *);
+double __ovld fract(double, __local double *);
+double2 __ovld fract(double2, __local double2 *);
+double3 __ovld fract(double3, __local double3 *);
+double4 __ovld fract(double4, __local double4 *);
+double8 __ovld fract(double8, __local double8 *);
+double16 __ovld fract(double16, __local double16 *);
+double __ovld fract(double, __private double *);
+double2 __ovld fract(double2, __private double2 *);
+double3 __ovld fract(double3, __private double3 *);
+double4 __ovld fract(double4, __private double4 *);
+double8 __ovld fract(double8, __private double8 *);
+double16 __ovld fract(double16, __private double16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld fract(half, __global half *);
+half2 __ovld fract(half2, __global half2 *);
+half3 __ovld fract(half3, __global half3 *);
+half4 __ovld fract(half4, __global half4 *);
+half8 __ovld fract(half8, __global half8 *);
+half16 __ovld fract(half16, __global half16 *);
+half __ovld fract(half, __local half *);
+half2 __ovld fract(half2, __local half2 *);
+half3 __ovld fract(half3, __local half3 *);
+half4 __ovld fract(half4, __local half4 *);
+half8 __ovld fract(half8, __local half8 *);
+half16 __ovld fract(half16, __local half16 *);
+half __ovld fract(half, __private half *);
+half2 __ovld fract(half2, __private half2 *);
+half3 __ovld fract(half3, __private half3 *);
+half4 __ovld fract(half4, __private half4 *);
+half8 __ovld fract(half8, __private half8 *);
+half16 __ovld fract(half16, __private half16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * Extract mantissa and exponent from x. For each
+ * component the mantissa returned is a float with
+ * magnitude in the interval [1/2, 1) or 0. Each
+ * component of x equals mantissa returned * 2^exp.
+ */
+#if defined(__opencl_c_generic_address_space)
+float __ovld frexp(float, int *);
+float2 __ovld frexp(float2, int2 *);
+float3 __ovld frexp(float3, int3 *);
+float4 __ovld frexp(float4, int4 *);
+float8 __ovld frexp(float8, int8 *);
+float16 __ovld frexp(float16, int16 *);
+#ifdef cl_khr_fp64
+double __ovld frexp(double, int *);
+double2 __ovld frexp(double2, int2 *);
+double3 __ovld frexp(double3, int3 *);
+double4 __ovld frexp(double4, int4 *);
+double8 __ovld frexp(double8, int8 *);
+double16 __ovld frexp(double16, int16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld frexp(half, int *);
+half2 __ovld frexp(half2, int2 *);
+half3 __ovld frexp(half3, int3 *);
+half4 __ovld frexp(half4, int4 *);
+half8 __ovld frexp(half8, int8 *);
+half16 __ovld frexp(half16, int16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld frexp(float, __global int *);
+float2 __ovld frexp(float2, __global int2 *);
+float3 __ovld frexp(float3, __global int3 *);
+float4 __ovld frexp(float4, __global int4 *);
+float8 __ovld frexp(float8, __global int8 *);
+float16 __ovld frexp(float16, __global int16 *);
+float __ovld frexp(float, __local int *);
+float2 __ovld frexp(float2, __local int2 *);
+float3 __ovld frexp(float3, __local int3 *);
+float4 __ovld frexp(float4, __local int4 *);
+float8 __ovld frexp(float8, __local int8 *);
+float16 __ovld frexp(float16, __local int16 *);
+float __ovld frexp(float, __private int *);
+float2 __ovld frexp(float2, __private int2 *);
+float3 __ovld frexp(float3, __private int3 *);
+float4 __ovld frexp(float4, __private int4 *);
+float8 __ovld frexp(float8, __private int8 *);
+float16 __ovld frexp(float16, __private int16 *);
+#ifdef cl_khr_fp64
+double __ovld frexp(double, __global int *);
+double2 __ovld frexp(double2, __global int2 *);
+double3 __ovld frexp(double3, __global int3 *);
+double4 __ovld frexp(double4, __global int4 *);
+double8 __ovld frexp(double8, __global int8 *);
+double16 __ovld frexp(double16, __global int16 *);
+double __ovld frexp(double, __local int *);
+double2 __ovld frexp(double2, __local int2 *);
+double3 __ovld frexp(double3, __local int3 *);
+double4 __ovld frexp(double4, __local int4 *);
+double8 __ovld frexp(double8, __local int8 *);
+double16 __ovld frexp(double16, __local int16 *);
+double __ovld frexp(double, __private int *);
+double2 __ovld frexp(double2, __private int2 *);
+double3 __ovld frexp(double3, __private int3 *);
+double4 __ovld frexp(double4, __private int4 *);
+double8 __ovld frexp(double8, __private int8 *);
+double16 __ovld frexp(double16, __private int16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld frexp(half, __global int *);
+half2 __ovld frexp(half2, __global int2 *);
+half3 __ovld frexp(half3, __global int3 *);
+half4 __ovld frexp(half4, __global int4 *);
+half8 __ovld frexp(half8, __global int8 *);
+half16 __ovld frexp(half16, __global int16 *);
+half __ovld frexp(half, __local int *);
+half2 __ovld frexp(half2, __local int2 *);
+half3 __ovld frexp(half3, __local int3 *);
+half4 __ovld frexp(half4, __local int4 *);
+half8 __ovld frexp(half8, __local int8 *);
+half16 __ovld frexp(half16, __local int16 *);
+half __ovld frexp(half, __private int *);
+half2 __ovld frexp(half2, __private int2 *);
+half3 __ovld frexp(half3, __private int3 *);
+half4 __ovld frexp(half4, __private int4 *);
+half8 __ovld frexp(half8, __private int8 *);
+half16 __ovld frexp(half16, __private int16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * Compute the value of the square root of x^2 + y^2
+ * without undue overflow or underflow.
+ */
+float __ovld __cnfn hypot(float, float);
+float2 __ovld __cnfn hypot(float2, float2);
+float3 __ovld __cnfn hypot(float3, float3);
+float4 __ovld __cnfn hypot(float4, float4);
+float8 __ovld __cnfn hypot(float8, float8);
+float16 __ovld __cnfn hypot(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn hypot(double, double);
+double2 __ovld __cnfn hypot(double2, double2);
+double3 __ovld __cnfn hypot(double3, double3);
+double4 __ovld __cnfn hypot(double4, double4);
+double8 __ovld __cnfn hypot(double8, double8);
+double16 __ovld __cnfn hypot(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn hypot(half, half);
+half2 __ovld __cnfn hypot(half2, half2);
+half3 __ovld __cnfn hypot(half3, half3);
+half4 __ovld __cnfn hypot(half4, half4);
+half8 __ovld __cnfn hypot(half8, half8);
+half16 __ovld __cnfn hypot(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Return the exponent as an integer value.
+ */
+int __ovld __cnfn ilogb(float);
+int2 __ovld __cnfn ilogb(float2);
+int3 __ovld __cnfn ilogb(float3);
+int4 __ovld __cnfn ilogb(float4);
+int8 __ovld __cnfn ilogb(float8);
+int16 __ovld __cnfn ilogb(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn ilogb(double);
+int2 __ovld __cnfn ilogb(double2);
+int3 __ovld __cnfn ilogb(double3);
+int4 __ovld __cnfn ilogb(double4);
+int8 __ovld __cnfn ilogb(double8);
+int16 __ovld __cnfn ilogb(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn ilogb(half);
+int2 __ovld __cnfn ilogb(half2);
+int3 __ovld __cnfn ilogb(half3);
+int4 __ovld __cnfn ilogb(half4);
+int8 __ovld __cnfn ilogb(half8);
+int16 __ovld __cnfn ilogb(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Multiply x by 2 to the power n.
+ */
+float __ovld __cnfn ldexp(float, int);
+float2 __ovld __cnfn ldexp(float2, int2);
+float3 __ovld __cnfn ldexp(float3, int3);
+float4 __ovld __cnfn ldexp(float4, int4);
+float8 __ovld __cnfn ldexp(float8, int8);
+float16 __ovld __cnfn ldexp(float16, int16);
+float2 __ovld __cnfn ldexp(float2, int);
+float3 __ovld __cnfn ldexp(float3, int);
+float4 __ovld __cnfn ldexp(float4, int);
+float8 __ovld __cnfn ldexp(float8, int);
+float16 __ovld __cnfn ldexp(float16, int);
+#ifdef cl_khr_fp64
+double __ovld __cnfn ldexp(double, int);
+double2 __ovld __cnfn ldexp(double2, int2);
+double3 __ovld __cnfn ldexp(double3, int3);
+double4 __ovld __cnfn ldexp(double4, int4);
+double8 __ovld __cnfn ldexp(double8, int8);
+double16 __ovld __cnfn ldexp(double16, int16);
+double2 __ovld __cnfn ldexp(double2, int);
+double3 __ovld __cnfn ldexp(double3, int);
+double4 __ovld __cnfn ldexp(double4, int);
+double8 __ovld __cnfn ldexp(double8, int);
+double16 __ovld __cnfn ldexp(double16, int);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn ldexp(half, int);
+half2 __ovld __cnfn ldexp(half2, int2);
+half3 __ovld __cnfn ldexp(half3, int3);
+half4 __ovld __cnfn ldexp(half4, int4);
+half8 __ovld __cnfn ldexp(half8, int8);
+half16 __ovld __cnfn ldexp(half16, int16);
+half2 __ovld __cnfn ldexp(half2, int);
+half3 __ovld __cnfn ldexp(half3, int);
+half4 __ovld __cnfn ldexp(half4, int);
+half8 __ovld __cnfn ldexp(half8, int);
+half16 __ovld __cnfn ldexp(half16, int);
+#endif //cl_khr_fp16
+
+/**
+ * Log gamma function. Returns the natural
+ * logarithm of the absolute value of the gamma
+ * function. The sign of the gamma function is
+ * returned in the signp argument of lgamma_r.
+ */
+float __ovld __cnfn lgamma(float);
+float2 __ovld __cnfn lgamma(float2);
+float3 __ovld __cnfn lgamma(float3);
+float4 __ovld __cnfn lgamma(float4);
+float8 __ovld __cnfn lgamma(float8);
+float16 __ovld __cnfn lgamma(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn lgamma(double);
+double2 __ovld __cnfn lgamma(double2);
+double3 __ovld __cnfn lgamma(double3);
+double4 __ovld __cnfn lgamma(double4);
+double8 __ovld __cnfn lgamma(double8);
+double16 __ovld __cnfn lgamma(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn lgamma(half);
+half2 __ovld __cnfn lgamma(half2);
+half3 __ovld __cnfn lgamma(half3);
+half4 __ovld __cnfn lgamma(half4);
+half8 __ovld __cnfn lgamma(half8);
+half16 __ovld __cnfn lgamma(half16);
+#endif //cl_khr_fp16
+
+#if defined(__opencl_c_generic_address_space)
+float __ovld lgamma_r(float, int *);
+float2 __ovld lgamma_r(float2, int2 *);
+float3 __ovld lgamma_r(float3, int3 *);
+float4 __ovld lgamma_r(float4, int4 *);
+float8 __ovld lgamma_r(float8, int8 *);
+float16 __ovld lgamma_r(float16, int16 *);
+#ifdef cl_khr_fp64
+double __ovld lgamma_r(double, int *);
+double2 __ovld lgamma_r(double2, int2 *);
+double3 __ovld lgamma_r(double3, int3 *);
+double4 __ovld lgamma_r(double4, int4 *);
+double8 __ovld lgamma_r(double8, int8 *);
+double16 __ovld lgamma_r(double16, int16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld lgamma_r(half, int *);
+half2 __ovld lgamma_r(half2, int2 *);
+half3 __ovld lgamma_r(half3, int3 *);
+half4 __ovld lgamma_r(half4, int4 *);
+half8 __ovld lgamma_r(half8, int8 *);
+half16 __ovld lgamma_r(half16, int16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld lgamma_r(float, __global int *);
+float2 __ovld lgamma_r(float2, __global int2 *);
+float3 __ovld lgamma_r(float3, __global int3 *);
+float4 __ovld lgamma_r(float4, __global int4 *);
+float8 __ovld lgamma_r(float8, __global int8 *);
+float16 __ovld lgamma_r(float16, __global int16 *);
+float __ovld lgamma_r(float, __local int *);
+float2 __ovld lgamma_r(float2, __local int2 *);
+float3 __ovld lgamma_r(float3, __local int3 *);
+float4 __ovld lgamma_r(float4, __local int4 *);
+float8 __ovld lgamma_r(float8, __local int8 *);
+float16 __ovld lgamma_r(float16, __local int16 *);
+float __ovld lgamma_r(float, __private int *);
+float2 __ovld lgamma_r(float2, __private int2 *);
+float3 __ovld lgamma_r(float3, __private int3 *);
+float4 __ovld lgamma_r(float4, __private int4 *);
+float8 __ovld lgamma_r(float8, __private int8 *);
+float16 __ovld lgamma_r(float16, __private int16 *);
+#ifdef cl_khr_fp64
+double __ovld lgamma_r(double, __global int *);
+double2 __ovld lgamma_r(double2, __global int2 *);
+double3 __ovld lgamma_r(double3, __global int3 *);
+double4 __ovld lgamma_r(double4, __global int4 *);
+double8 __ovld lgamma_r(double8, __global int8 *);
+double16 __ovld lgamma_r(double16, __global int16 *);
+double __ovld lgamma_r(double, __local int *);
+double2 __ovld lgamma_r(double2, __local int2 *);
+double3 __ovld lgamma_r(double3, __local int3 *);
+double4 __ovld lgamma_r(double4, __local int4 *);
+double8 __ovld lgamma_r(double8, __local int8 *);
+double16 __ovld lgamma_r(double16, __local int16 *);
+double __ovld lgamma_r(double, __private int *);
+double2 __ovld lgamma_r(double2, __private int2 *);
+double3 __ovld lgamma_r(double3, __private int3 *);
+double4 __ovld lgamma_r(double4, __private int4 *);
+double8 __ovld lgamma_r(double8, __private int8 *);
+double16 __ovld lgamma_r(double16, __private int16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld lgamma_r(half, __global int *);
+half2 __ovld lgamma_r(half2, __global int2 *);
+half3 __ovld lgamma_r(half3, __global int3 *);
+half4 __ovld lgamma_r(half4, __global int4 *);
+half8 __ovld lgamma_r(half8, __global int8 *);
+half16 __ovld lgamma_r(half16, __global int16 *);
+half __ovld lgamma_r(half, __local int *);
+half2 __ovld lgamma_r(half2, __local int2 *);
+half3 __ovld lgamma_r(half3, __local int3 *);
+half4 __ovld lgamma_r(half4, __local int4 *);
+half8 __ovld lgamma_r(half8, __local int8 *);
+half16 __ovld lgamma_r(half16, __local int16 *);
+half __ovld lgamma_r(half, __private int *);
+half2 __ovld lgamma_r(half2, __private int2 *);
+half3 __ovld lgamma_r(half3, __private int3 *);
+half4 __ovld lgamma_r(half4, __private int4 *);
+half8 __ovld lgamma_r(half8, __private int8 *);
+half16 __ovld lgamma_r(half16, __private int16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * Compute natural logarithm.
+ */
+float __ovld __cnfn log(float);
+float2 __ovld __cnfn log(float2);
+float3 __ovld __cnfn log(float3);
+float4 __ovld __cnfn log(float4);
+float8 __ovld __cnfn log(float8);
+float16 __ovld __cnfn log(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log(double);
+double2 __ovld __cnfn log(double2);
+double3 __ovld __cnfn log(double3);
+double4 __ovld __cnfn log(double4);
+double8 __ovld __cnfn log(double8);
+double16 __ovld __cnfn log(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log(half);
+half2 __ovld __cnfn log(half2);
+half3 __ovld __cnfn log(half3);
+half4 __ovld __cnfn log(half4);
+half8 __ovld __cnfn log(half8);
+half16 __ovld __cnfn log(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute a base 2 logarithm.
+ */
+float __ovld __cnfn log2(float);
+float2 __ovld __cnfn log2(float2);
+float3 __ovld __cnfn log2(float3);
+float4 __ovld __cnfn log2(float4);
+float8 __ovld __cnfn log2(float8);
+float16 __ovld __cnfn log2(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log2(double);
+double2 __ovld __cnfn log2(double2);
+double3 __ovld __cnfn log2(double3);
+double4 __ovld __cnfn log2(double4);
+double8 __ovld __cnfn log2(double8);
+double16 __ovld __cnfn log2(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log2(half);
+half2 __ovld __cnfn log2(half2);
+half3 __ovld __cnfn log2(half3);
+half4 __ovld __cnfn log2(half4);
+half8 __ovld __cnfn log2(half8);
+half16 __ovld __cnfn log2(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute a base 10 logarithm.
+ */
+float __ovld __cnfn log10(float);
+float2 __ovld __cnfn log10(float2);
+float3 __ovld __cnfn log10(float3);
+float4 __ovld __cnfn log10(float4);
+float8 __ovld __cnfn log10(float8);
+float16 __ovld __cnfn log10(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log10(double);
+double2 __ovld __cnfn log10(double2);
+double3 __ovld __cnfn log10(double3);
+double4 __ovld __cnfn log10(double4);
+double8 __ovld __cnfn log10(double8);
+double16 __ovld __cnfn log10(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log10(half);
+half2 __ovld __cnfn log10(half2);
+half3 __ovld __cnfn log10(half3);
+half4 __ovld __cnfn log10(half4);
+half8 __ovld __cnfn log10(half8);
+half16 __ovld __cnfn log10(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute a base e logarithm of (1.0 + x).
+ */
+float __ovld __cnfn log1p(float);
+float2 __ovld __cnfn log1p(float2);
+float3 __ovld __cnfn log1p(float3);
+float4 __ovld __cnfn log1p(float4);
+float8 __ovld __cnfn log1p(float8);
+float16 __ovld __cnfn log1p(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log1p(double);
+double2 __ovld __cnfn log1p(double2);
+double3 __ovld __cnfn log1p(double3);
+double4 __ovld __cnfn log1p(double4);
+double8 __ovld __cnfn log1p(double8);
+double16 __ovld __cnfn log1p(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log1p(half);
+half2 __ovld __cnfn log1p(half2);
+half3 __ovld __cnfn log1p(half3);
+half4 __ovld __cnfn log1p(half4);
+half8 __ovld __cnfn log1p(half8);
+half16 __ovld __cnfn log1p(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the exponent of x, which is the integral
+ * part of logr | x |.
+ */
+float __ovld __cnfn logb(float);
+float2 __ovld __cnfn logb(float2);
+float3 __ovld __cnfn logb(float3);
+float4 __ovld __cnfn logb(float4);
+float8 __ovld __cnfn logb(float8);
+float16 __ovld __cnfn logb(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn logb(double);
+double2 __ovld __cnfn logb(double2);
+double3 __ovld __cnfn logb(double3);
+double4 __ovld __cnfn logb(double4);
+double8 __ovld __cnfn logb(double8);
+double16 __ovld __cnfn logb(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn logb(half);
+half2 __ovld __cnfn logb(half2);
+half3 __ovld __cnfn logb(half3);
+half4 __ovld __cnfn logb(half4);
+half8 __ovld __cnfn logb(half8);
+half16 __ovld __cnfn logb(half16);
+#endif //cl_khr_fp16
+
+/**
+ * mad approximates a * b + c. Whether or how the
+ * product of a * b is rounded and how supernormal or
+ * subnormal intermediate products are handled is not
+ * defined. mad is intended to be used where speed is
+ * preferred over accuracy.
+ */
+float __ovld __cnfn mad(float, float, float);
+float2 __ovld __cnfn mad(float2, float2, float2);
+float3 __ovld __cnfn mad(float3, float3, float3);
+float4 __ovld __cnfn mad(float4, float4, float4);
+float8 __ovld __cnfn mad(float8, float8, float8);
+float16 __ovld __cnfn mad(float16, float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn mad(double, double, double);
+double2 __ovld __cnfn mad(double2, double2, double2);
+double3 __ovld __cnfn mad(double3, double3, double3);
+double4 __ovld __cnfn mad(double4, double4, double4);
+double8 __ovld __cnfn mad(double8, double8, double8);
+double16 __ovld __cnfn mad(double16, double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn mad(half, half, half);
+half2 __ovld __cnfn mad(half2, half2, half2);
+half3 __ovld __cnfn mad(half3, half3, half3);
+half4 __ovld __cnfn mad(half4, half4, half4);
+half8 __ovld __cnfn mad(half8, half8, half8);
+half16 __ovld __cnfn mad(half16, half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns x if | x | > | y |, y if | y | > | x |, otherwise
+ * fmax(x, y).
+ */
+float __ovld __cnfn maxmag(float, float);
+float2 __ovld __cnfn maxmag(float2, float2);
+float3 __ovld __cnfn maxmag(float3, float3);
+float4 __ovld __cnfn maxmag(float4, float4);
+float8 __ovld __cnfn maxmag(float8, float8);
+float16 __ovld __cnfn maxmag(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn maxmag(double, double);
+double2 __ovld __cnfn maxmag(double2, double2);
+double3 __ovld __cnfn maxmag(double3, double3);
+double4 __ovld __cnfn maxmag(double4, double4);
+double8 __ovld __cnfn maxmag(double8, double8);
+double16 __ovld __cnfn maxmag(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn maxmag(half, half);
+half2 __ovld __cnfn maxmag(half2, half2);
+half3 __ovld __cnfn maxmag(half3, half3);
+half4 __ovld __cnfn maxmag(half4, half4);
+half8 __ovld __cnfn maxmag(half8, half8);
+half16 __ovld __cnfn maxmag(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns x if | x | < | y |, y if | y | < | x |, otherwise
+ * fmin(x, y).
+ */
+float __ovld __cnfn minmag(float, float);
+float2 __ovld __cnfn minmag(float2, float2);
+float3 __ovld __cnfn minmag(float3, float3);
+float4 __ovld __cnfn minmag(float4, float4);
+float8 __ovld __cnfn minmag(float8, float8);
+float16 __ovld __cnfn minmag(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn minmag(double, double);
+double2 __ovld __cnfn minmag(double2, double2);
+double3 __ovld __cnfn minmag(double3, double3);
+double4 __ovld __cnfn minmag(double4, double4);
+double8 __ovld __cnfn minmag(double8, double8);
+double16 __ovld __cnfn minmag(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn minmag(half, half);
+half2 __ovld __cnfn minmag(half2, half2);
+half3 __ovld __cnfn minmag(half3, half3);
+half4 __ovld __cnfn minmag(half4, half4);
+half8 __ovld __cnfn minmag(half8, half8);
+half16 __ovld __cnfn minmag(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Decompose a floating-point number. The modf
+ * function breaks the argument x into integral and
+ * fractional parts, each of which has the same sign as
+ * the argument. It stores the integral part in the object
+ * pointed to by iptr.
+ */
+#if defined(__opencl_c_generic_address_space)
+float __ovld modf(float, float *);
+float2 __ovld modf(float2, float2 *);
+float3 __ovld modf(float3, float3 *);
+float4 __ovld modf(float4, float4 *);
+float8 __ovld modf(float8, float8 *);
+float16 __ovld modf(float16, float16 *);
+#ifdef cl_khr_fp64
+double __ovld modf(double, double *);
+double2 __ovld modf(double2, double2 *);
+double3 __ovld modf(double3, double3 *);
+double4 __ovld modf(double4, double4 *);
+double8 __ovld modf(double8, double8 *);
+double16 __ovld modf(double16, double16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld modf(half, half *);
+half2 __ovld modf(half2, half2 *);
+half3 __ovld modf(half3, half3 *);
+half4 __ovld modf(half4, half4 *);
+half8 __ovld modf(half8, half8 *);
+half16 __ovld modf(half16, half16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld modf(float, __global float *);
+float2 __ovld modf(float2, __global float2 *);
+float3 __ovld modf(float3, __global float3 *);
+float4 __ovld modf(float4, __global float4 *);
+float8 __ovld modf(float8, __global float8 *);
+float16 __ovld modf(float16, __global float16 *);
+float __ovld modf(float, __local float *);
+float2 __ovld modf(float2, __local float2 *);
+float3 __ovld modf(float3, __local float3 *);
+float4 __ovld modf(float4, __local float4 *);
+float8 __ovld modf(float8, __local float8 *);
+float16 __ovld modf(float16, __local float16 *);
+float __ovld modf(float, __private float *);
+float2 __ovld modf(float2, __private float2 *);
+float3 __ovld modf(float3, __private float3 *);
+float4 __ovld modf(float4, __private float4 *);
+float8 __ovld modf(float8, __private float8 *);
+float16 __ovld modf(float16, __private float16 *);
+#ifdef cl_khr_fp64
+double __ovld modf(double, __global double *);
+double2 __ovld modf(double2, __global double2 *);
+double3 __ovld modf(double3, __global double3 *);
+double4 __ovld modf(double4, __global double4 *);
+double8 __ovld modf(double8, __global double8 *);
+double16 __ovld modf(double16, __global double16 *);
+double __ovld modf(double, __local double *);
+double2 __ovld modf(double2, __local double2 *);
+double3 __ovld modf(double3, __local double3 *);
+double4 __ovld modf(double4, __local double4 *);
+double8 __ovld modf(double8, __local double8 *);
+double16 __ovld modf(double16, __local double16 *);
+double __ovld modf(double, __private double *);
+double2 __ovld modf(double2, __private double2 *);
+double3 __ovld modf(double3, __private double3 *);
+double4 __ovld modf(double4, __private double4 *);
+double8 __ovld modf(double8, __private double8 *);
+double16 __ovld modf(double16, __private double16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld modf(half, __global half *);
+half2 __ovld modf(half2, __global half2 *);
+half3 __ovld modf(half3, __global half3 *);
+half4 __ovld modf(half4, __global half4 *);
+half8 __ovld modf(half8, __global half8 *);
+half16 __ovld modf(half16, __global half16 *);
+half __ovld modf(half, __local half *);
+half2 __ovld modf(half2, __local half2 *);
+half3 __ovld modf(half3, __local half3 *);
+half4 __ovld modf(half4, __local half4 *);
+half8 __ovld modf(half8, __local half8 *);
+half16 __ovld modf(half16, __local half16 *);
+half __ovld modf(half, __private half *);
+half2 __ovld modf(half2, __private half2 *);
+half3 __ovld modf(half3, __private half3 *);
+half4 __ovld modf(half4, __private half4 *);
+half8 __ovld modf(half8, __private half8 *);
+half16 __ovld modf(half16, __private half16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * Returns a quiet NaN. The nancode may be placed
+ * in the significand of the resulting NaN.
+ */
+float __ovld __cnfn nan(uint);
+float2 __ovld __cnfn nan(uint2);
+float3 __ovld __cnfn nan(uint3);
+float4 __ovld __cnfn nan(uint4);
+float8 __ovld __cnfn nan(uint8);
+float16 __ovld __cnfn nan(uint16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn nan(ulong);
+double2 __ovld __cnfn nan(ulong2);
+double3 __ovld __cnfn nan(ulong3);
+double4 __ovld __cnfn nan(ulong4);
+double8 __ovld __cnfn nan(ulong8);
+double16 __ovld __cnfn nan(ulong16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn nan(ushort);
+half2 __ovld __cnfn nan(ushort2);
+half3 __ovld __cnfn nan(ushort3);
+half4 __ovld __cnfn nan(ushort4);
+half8 __ovld __cnfn nan(ushort8);
+half16 __ovld __cnfn nan(ushort16);
+#endif //cl_khr_fp16
+
+/**
+ * Computes the next representable single-precision
+ * floating-point value following x in the direction of
+ * y. Thus, if y is less than x, nextafter() returns the
+ * largest representable floating-point number less
+ * than x.
+ */
+float __ovld __cnfn nextafter(float, float);
+float2 __ovld __cnfn nextafter(float2, float2);
+float3 __ovld __cnfn nextafter(float3, float3);
+float4 __ovld __cnfn nextafter(float4, float4);
+float8 __ovld __cnfn nextafter(float8, float8);
+float16 __ovld __cnfn nextafter(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn nextafter(double, double);
+double2 __ovld __cnfn nextafter(double2, double2);
+double3 __ovld __cnfn nextafter(double3, double3);
+double4 __ovld __cnfn nextafter(double4, double4);
+double8 __ovld __cnfn nextafter(double8, double8);
+double16 __ovld __cnfn nextafter(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn nextafter(half, half);
+half2 __ovld __cnfn nextafter(half2, half2);
+half3 __ovld __cnfn nextafter(half3, half3);
+half4 __ovld __cnfn nextafter(half4, half4);
+half8 __ovld __cnfn nextafter(half8, half8);
+half16 __ovld __cnfn nextafter(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power y.
+ */
+float __ovld __cnfn pow(float, float);
+float2 __ovld __cnfn pow(float2, float2);
+float3 __ovld __cnfn pow(float3, float3);
+float4 __ovld __cnfn pow(float4, float4);
+float8 __ovld __cnfn pow(float8, float8);
+float16 __ovld __cnfn pow(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn pow(double, double);
+double2 __ovld __cnfn pow(double2, double2);
+double3 __ovld __cnfn pow(double3, double3);
+double4 __ovld __cnfn pow(double4, double4);
+double8 __ovld __cnfn pow(double8, double8);
+double16 __ovld __cnfn pow(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn pow(half, half);
+half2 __ovld __cnfn pow(half2, half2);
+half3 __ovld __cnfn pow(half3, half3);
+half4 __ovld __cnfn pow(half4, half4);
+half8 __ovld __cnfn pow(half8, half8);
+half16 __ovld __cnfn pow(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power y, where y is an integer.
+ */
+float __ovld __cnfn pown(float, int);
+float2 __ovld __cnfn pown(float2, int2);
+float3 __ovld __cnfn pown(float3, int3);
+float4 __ovld __cnfn pown(float4, int4);
+float8 __ovld __cnfn pown(float8, int8);
+float16 __ovld __cnfn pown(float16, int16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn pown(double, int);
+double2 __ovld __cnfn pown(double2, int2);
+double3 __ovld __cnfn pown(double3, int3);
+double4 __ovld __cnfn pown(double4, int4);
+double8 __ovld __cnfn pown(double8, int8);
+double16 __ovld __cnfn pown(double16, int16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn pown(half, int);
+half2 __ovld __cnfn pown(half2, int2);
+half3 __ovld __cnfn pown(half3, int3);
+half4 __ovld __cnfn pown(half4, int4);
+half8 __ovld __cnfn pown(half8, int8);
+half16 __ovld __cnfn pown(half16, int16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power y, where x is >= 0.
+ */
+float __ovld __cnfn powr(float, float);
+float2 __ovld __cnfn powr(float2, float2);
+float3 __ovld __cnfn powr(float3, float3);
+float4 __ovld __cnfn powr(float4, float4);
+float8 __ovld __cnfn powr(float8, float8);
+float16 __ovld __cnfn powr(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn powr(double, double);
+double2 __ovld __cnfn powr(double2, double2);
+double3 __ovld __cnfn powr(double3, double3);
+double4 __ovld __cnfn powr(double4, double4);
+double8 __ovld __cnfn powr(double8, double8);
+double16 __ovld __cnfn powr(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn powr(half, half);
+half2 __ovld __cnfn powr(half2, half2);
+half3 __ovld __cnfn powr(half3, half3);
+half4 __ovld __cnfn powr(half4, half4);
+half8 __ovld __cnfn powr(half8, half8);
+half16 __ovld __cnfn powr(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the value r such that r = x - n*y, where n
+ * is the integer nearest the exact value of x/y. If there
+ * are two integers closest to x/y, n shall be the even
+ * one. If r is zero, it is given the same sign as x.
+ */
+float __ovld __cnfn remainder(float, float);
+float2 __ovld __cnfn remainder(float2, float2);
+float3 __ovld __cnfn remainder(float3, float3);
+float4 __ovld __cnfn remainder(float4, float4);
+float8 __ovld __cnfn remainder(float8, float8);
+float16 __ovld __cnfn remainder(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn remainder(double, double);
+double2 __ovld __cnfn remainder(double2, double2);
+double3 __ovld __cnfn remainder(double3, double3);
+double4 __ovld __cnfn remainder(double4, double4);
+double8 __ovld __cnfn remainder(double8, double8);
+double16 __ovld __cnfn remainder(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn remainder(half, half);
+half2 __ovld __cnfn remainder(half2, half2);
+half3 __ovld __cnfn remainder(half3, half3);
+half4 __ovld __cnfn remainder(half4, half4);
+half8 __ovld __cnfn remainder(half8, half8);
+half16 __ovld __cnfn remainder(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * The remquo function computes the value r such
+ * that r = x - n*y, where n is the integer nearest the
+ * exact value of x/y. If there are two integers closest
+ * to x/y, n shall be the even one. If r is zero, it is
+ * given the same sign as x. This is the same value
+ * that is returned by the remainder function.
+ * remquo also calculates the lower seven bits of the
+ * integral quotient x/y, and gives that value the same
+ * sign as x/y. It stores this signed value in the object
+ * pointed to by quo.
+ */
+#if defined(__opencl_c_generic_address_space)
+float __ovld remquo(float, float, int *);
+float2 __ovld remquo(float2, float2, int2 *);
+float3 __ovld remquo(float3, float3, int3 *);
+float4 __ovld remquo(float4, float4, int4 *);
+float8 __ovld remquo(float8, float8, int8 *);
+float16 __ovld remquo(float16, float16, int16 *);
+#ifdef cl_khr_fp64
+double __ovld remquo(double, double, int *);
+double2 __ovld remquo(double2, double2, int2 *);
+double3 __ovld remquo(double3, double3, int3 *);
+double4 __ovld remquo(double4, double4, int4 *);
+double8 __ovld remquo(double8, double8, int8 *);
+double16 __ovld remquo(double16, double16, int16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld remquo(half, half, int *);
+half2 __ovld remquo(half2, half2, int2 *);
+half3 __ovld remquo(half3, half3, int3 *);
+half4 __ovld remquo(half4, half4, int4 *);
+half8 __ovld remquo(half8, half8, int8 *);
+half16 __ovld remquo(half16, half16, int16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld remquo(float, float, __global int *);
+float2 __ovld remquo(float2, float2, __global int2 *);
+float3 __ovld remquo(float3, float3, __global int3 *);
+float4 __ovld remquo(float4, float4, __global int4 *);
+float8 __ovld remquo(float8, float8, __global int8 *);
+float16 __ovld remquo(float16, float16, __global int16 *);
+float __ovld remquo(float, float, __local int *);
+float2 __ovld remquo(float2, float2, __local int2 *);
+float3 __ovld remquo(float3, float3, __local int3 *);
+float4 __ovld remquo(float4, float4, __local int4 *);
+float8 __ovld remquo(float8, float8, __local int8 *);
+float16 __ovld remquo(float16, float16, __local int16 *);
+float __ovld remquo(float, float, __private int *);
+float2 __ovld remquo(float2, float2, __private int2 *);
+float3 __ovld remquo(float3, float3, __private int3 *);
+float4 __ovld remquo(float4, float4, __private int4 *);
+float8 __ovld remquo(float8, float8, __private int8 *);
+float16 __ovld remquo(float16, float16, __private int16 *);
+#ifdef cl_khr_fp64
+double __ovld remquo(double, double, __global int *);
+double2 __ovld remquo(double2, double2, __global int2 *);
+double3 __ovld remquo(double3, double3, __global int3 *);
+double4 __ovld remquo(double4, double4, __global int4 *);
+double8 __ovld remquo(double8, double8, __global int8 *);
+double16 __ovld remquo(double16, double16, __global int16 *);
+double __ovld remquo(double, double, __local int *);
+double2 __ovld remquo(double2, double2, __local int2 *);
+double3 __ovld remquo(double3, double3, __local int3 *);
+double4 __ovld remquo(double4, double4, __local int4 *);
+double8 __ovld remquo(double8, double8, __local int8 *);
+double16 __ovld remquo(double16, double16, __local int16 *);
+double __ovld remquo(double, double, __private int *);
+double2 __ovld remquo(double2, double2, __private int2 *);
+double3 __ovld remquo(double3, double3, __private int3 *);
+double4 __ovld remquo(double4, double4, __private int4 *);
+double8 __ovld remquo(double8, double8, __private int8 *);
+double16 __ovld remquo(double16, double16, __private int16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld remquo(half, half, __global int *);
+half2 __ovld remquo(half2, half2, __global int2 *);
+half3 __ovld remquo(half3, half3, __global int3 *);
+half4 __ovld remquo(half4, half4, __global int4 *);
+half8 __ovld remquo(half8, half8, __global int8 *);
+half16 __ovld remquo(half16, half16, __global int16 *);
+half __ovld remquo(half, half, __local int *);
+half2 __ovld remquo(half2, half2, __local int2 *);
+half3 __ovld remquo(half3, half3, __local int3 *);
+half4 __ovld remquo(half4, half4, __local int4 *);
+half8 __ovld remquo(half8, half8, __local int8 *);
+half16 __ovld remquo(half16, half16, __local int16 *);
+half __ovld remquo(half, half, __private int *);
+half2 __ovld remquo(half2, half2, __private int2 *);
+half3 __ovld remquo(half3, half3, __private int3 *);
+half4 __ovld remquo(half4, half4, __private int4 *);
+half8 __ovld remquo(half8, half8, __private int8 *);
+half16 __ovld remquo(half16, half16, __private int16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+/**
+ * Round to integral value (using round to nearest
+ * even rounding mode) in floating-point format.
+ * Refer to section 7.1 for description of rounding
+ * modes.
+ */
+float __ovld __cnfn rint(float);
+float2 __ovld __cnfn rint(float2);
+float3 __ovld __cnfn rint(float3);
+float4 __ovld __cnfn rint(float4);
+float8 __ovld __cnfn rint(float8);
+float16 __ovld __cnfn rint(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn rint(double);
+double2 __ovld __cnfn rint(double2);
+double3 __ovld __cnfn rint(double3);
+double4 __ovld __cnfn rint(double4);
+double8 __ovld __cnfn rint(double8);
+double16 __ovld __cnfn rint(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn rint(half);
+half2 __ovld __cnfn rint(half2);
+half3 __ovld __cnfn rint(half3);
+half4 __ovld __cnfn rint(half4);
+half8 __ovld __cnfn rint(half8);
+half16 __ovld __cnfn rint(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power 1/y.
+ */
+float __ovld __cnfn rootn(float, int);
+float2 __ovld __cnfn rootn(float2, int2);
+float3 __ovld __cnfn rootn(float3, int3);
+float4 __ovld __cnfn rootn(float4, int4);
+float8 __ovld __cnfn rootn(float8, int8);
+float16 __ovld __cnfn rootn(float16, int16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn rootn(double, int);
+double2 __ovld __cnfn rootn(double2, int2);
+double3 __ovld __cnfn rootn(double3, int3);
+double4 __ovld __cnfn rootn(double4, int4);
+double8 __ovld __cnfn rootn(double8, int8);
+double16 __ovld __cnfn rootn(double16, int16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn rootn(half, int);
+half2 __ovld __cnfn rootn(half2, int2);
+half3 __ovld __cnfn rootn(half3, int3);
+half4 __ovld __cnfn rootn(half4, int4);
+half8 __ovld __cnfn rootn(half8, int8);
+half16 __ovld __cnfn rootn(half16, int16);
+#endif //cl_khr_fp16
+
+/**
+ * Return the integral value nearest to x rounding
+ * halfway cases away from zero, regardless of the
+ * current rounding direction.
+ */
+float __ovld __cnfn round(float);
+float2 __ovld __cnfn round(float2);
+float3 __ovld __cnfn round(float3);
+float4 __ovld __cnfn round(float4);
+float8 __ovld __cnfn round(float8);
+float16 __ovld __cnfn round(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn round(double);
+double2 __ovld __cnfn round(double2);
+double3 __ovld __cnfn round(double3);
+double4 __ovld __cnfn round(double4);
+double8 __ovld __cnfn round(double8);
+double16 __ovld __cnfn round(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn round(half);
+half2 __ovld __cnfn round(half2);
+half3 __ovld __cnfn round(half3);
+half4 __ovld __cnfn round(half4);
+half8 __ovld __cnfn round(half8);
+half16 __ovld __cnfn round(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute inverse square root.
+ */
+float __ovld __cnfn rsqrt(float);
+float2 __ovld __cnfn rsqrt(float2);
+float3 __ovld __cnfn rsqrt(float3);
+float4 __ovld __cnfn rsqrt(float4);
+float8 __ovld __cnfn rsqrt(float8);
+float16 __ovld __cnfn rsqrt(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn rsqrt(double);
+double2 __ovld __cnfn rsqrt(double2);
+double3 __ovld __cnfn rsqrt(double3);
+double4 __ovld __cnfn rsqrt(double4);
+double8 __ovld __cnfn rsqrt(double8);
+double16 __ovld __cnfn rsqrt(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn rsqrt(half);
+half2 __ovld __cnfn rsqrt(half2);
+half3 __ovld __cnfn rsqrt(half3);
+half4 __ovld __cnfn rsqrt(half4);
+half8 __ovld __cnfn rsqrt(half8);
+half16 __ovld __cnfn rsqrt(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute sine.
+ */
+float __ovld __cnfn sin(float);
+float2 __ovld __cnfn sin(float2);
+float3 __ovld __cnfn sin(float3);
+float4 __ovld __cnfn sin(float4);
+float8 __ovld __cnfn sin(float8);
+float16 __ovld __cnfn sin(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sin(double);
+double2 __ovld __cnfn sin(double2);
+double3 __ovld __cnfn sin(double3);
+double4 __ovld __cnfn sin(double4);
+double8 __ovld __cnfn sin(double8);
+double16 __ovld __cnfn sin(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sin(half);
+half2 __ovld __cnfn sin(half2);
+half3 __ovld __cnfn sin(half3);
+half4 __ovld __cnfn sin(half4);
+half8 __ovld __cnfn sin(half8);
+half16 __ovld __cnfn sin(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute sine and cosine of x. The computed sine
+ * is the return value and computed cosine is returned
+ * in cosval.
+ */
+#if defined(__opencl_c_generic_address_space)
+float __ovld sincos(float, float *);
+float2 __ovld sincos(float2, float2 *);
+float3 __ovld sincos(float3, float3 *);
+float4 __ovld sincos(float4, float4 *);
+float8 __ovld sincos(float8, float8 *);
+float16 __ovld sincos(float16, float16 *);
+#ifdef cl_khr_fp64
+double __ovld sincos(double, double *);
+double2 __ovld sincos(double2, double2 *);
+double3 __ovld sincos(double3, double3 *);
+double4 __ovld sincos(double4, double4 *);
+double8 __ovld sincos(double8, double8 *);
+double16 __ovld sincos(double16, double16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld sincos(half, half *);
+half2 __ovld sincos(half2, half2 *);
+half3 __ovld sincos(half3, half3 *);
+half4 __ovld sincos(half4, half4 *);
+half8 __ovld sincos(half8, half8 *);
+half16 __ovld sincos(half16, half16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld sincos(float, __global float *);
+float2 __ovld sincos(float2, __global float2 *);
+float3 __ovld sincos(float3, __global float3 *);
+float4 __ovld sincos(float4, __global float4 *);
+float8 __ovld sincos(float8, __global float8 *);
+float16 __ovld sincos(float16, __global float16 *);
+float __ovld sincos(float, __local float *);
+float2 __ovld sincos(float2, __local float2 *);
+float3 __ovld sincos(float3, __local float3 *);
+float4 __ovld sincos(float4, __local float4 *);
+float8 __ovld sincos(float8, __local float8 *);
+float16 __ovld sincos(float16, __local float16 *);
+float __ovld sincos(float, __private float *);
+float2 __ovld sincos(float2, __private float2 *);
+float3 __ovld sincos(float3, __private float3 *);
+float4 __ovld sincos(float4, __private float4 *);
+float8 __ovld sincos(float8, __private float8 *);
+float16 __ovld sincos(float16, __private float16 *);
+#ifdef cl_khr_fp64
+double __ovld sincos(double, __global double *);
+double2 __ovld sincos(double2, __global double2 *);
+double3 __ovld sincos(double3, __global double3 *);
+double4 __ovld sincos(double4, __global double4 *);
+double8 __ovld sincos(double8, __global double8 *);
+double16 __ovld sincos(double16, __global double16 *);
+double __ovld sincos(double, __local double *);
+double2 __ovld sincos(double2, __local double2 *);
+double3 __ovld sincos(double3, __local double3 *);
+double4 __ovld sincos(double4, __local double4 *);
+double8 __ovld sincos(double8, __local double8 *);
+double16 __ovld sincos(double16, __local double16 *);
+double __ovld sincos(double, __private double *);
+double2 __ovld sincos(double2, __private double2 *);
+double3 __ovld sincos(double3, __private double3 *);
+double4 __ovld sincos(double4, __private double4 *);
+double8 __ovld sincos(double8, __private double8 *);
+double16 __ovld sincos(double16, __private double16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld sincos(half, __global half *);
+half2 __ovld sincos(half2, __global half2 *);
+half3 __ovld sincos(half3, __global half3 *);
+half4 __ovld sincos(half4, __global half4 *);
+half8 __ovld sincos(half8, __global half8 *);
+half16 __ovld sincos(half16, __global half16 *);
+half __ovld sincos(half, __local half *);
+half2 __ovld sincos(half2, __local half2 *);
+half3 __ovld sincos(half3, __local half3 *);
+half4 __ovld sincos(half4, __local half4 *);
+half8 __ovld sincos(half8, __local half8 *);
+half16 __ovld sincos(half16, __local half16 *);
+half __ovld sincos(half, __private half *);
+half2 __ovld sincos(half2, __private half2 *);
+half3 __ovld sincos(half3, __private half3 *);
+half4 __ovld sincos(half4, __private half4 *);
+half8 __ovld sincos(half8, __private half8 *);
+half16 __ovld sincos(half16, __private half16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * Compute hyperbolic sine.
+ */
+float __ovld __cnfn sinh(float);
+float2 __ovld __cnfn sinh(float2);
+float3 __ovld __cnfn sinh(float3);
+float4 __ovld __cnfn sinh(float4);
+float8 __ovld __cnfn sinh(float8);
+float16 __ovld __cnfn sinh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sinh(double);
+double2 __ovld __cnfn sinh(double2);
+double3 __ovld __cnfn sinh(double3);
+double4 __ovld __cnfn sinh(double4);
+double8 __ovld __cnfn sinh(double8);
+double16 __ovld __cnfn sinh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sinh(half);
+half2 __ovld __cnfn sinh(half2);
+half3 __ovld __cnfn sinh(half3);
+half4 __ovld __cnfn sinh(half4);
+half8 __ovld __cnfn sinh(half8);
+half16 __ovld __cnfn sinh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute sin (PI * x).
+ */
+float __ovld __cnfn sinpi(float);
+float2 __ovld __cnfn sinpi(float2);
+float3 __ovld __cnfn sinpi(float3);
+float4 __ovld __cnfn sinpi(float4);
+float8 __ovld __cnfn sinpi(float8);
+float16 __ovld __cnfn sinpi(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sinpi(double);
+double2 __ovld __cnfn sinpi(double2);
+double3 __ovld __cnfn sinpi(double3);
+double4 __ovld __cnfn sinpi(double4);
+double8 __ovld __cnfn sinpi(double8);
+double16 __ovld __cnfn sinpi(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sinpi(half);
+half2 __ovld __cnfn sinpi(half2);
+half3 __ovld __cnfn sinpi(half3);
+half4 __ovld __cnfn sinpi(half4);
+half8 __ovld __cnfn sinpi(half8);
+half16 __ovld __cnfn sinpi(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute square root.
+ */
+float __ovld __cnfn sqrt(float);
+float2 __ovld __cnfn sqrt(float2);
+float3 __ovld __cnfn sqrt(float3);
+float4 __ovld __cnfn sqrt(float4);
+float8 __ovld __cnfn sqrt(float8);
+float16 __ovld __cnfn sqrt(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sqrt(double);
+double2 __ovld __cnfn sqrt(double2);
+double3 __ovld __cnfn sqrt(double3);
+double4 __ovld __cnfn sqrt(double4);
+double8 __ovld __cnfn sqrt(double8);
+double16 __ovld __cnfn sqrt(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sqrt(half);
+half2 __ovld __cnfn sqrt(half2);
+half3 __ovld __cnfn sqrt(half3);
+half4 __ovld __cnfn sqrt(half4);
+half8 __ovld __cnfn sqrt(half8);
+half16 __ovld __cnfn sqrt(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute tangent.
+ */
+float __ovld __cnfn tan(float);
+float2 __ovld __cnfn tan(float2);
+float3 __ovld __cnfn tan(float3);
+float4 __ovld __cnfn tan(float4);
+float8 __ovld __cnfn tan(float8);
+float16 __ovld __cnfn tan(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tan(double);
+double2 __ovld __cnfn tan(double2);
+double3 __ovld __cnfn tan(double3);
+double4 __ovld __cnfn tan(double4);
+double8 __ovld __cnfn tan(double8);
+double16 __ovld __cnfn tan(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tan(half);
+half2 __ovld __cnfn tan(half2);
+half3 __ovld __cnfn tan(half3);
+half4 __ovld __cnfn tan(half4);
+half8 __ovld __cnfn tan(half8);
+half16 __ovld __cnfn tan(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute hyperbolic tangent.
+ */
+float __ovld __cnfn tanh(float);
+float2 __ovld __cnfn tanh(float2);
+float3 __ovld __cnfn tanh(float3);
+float4 __ovld __cnfn tanh(float4);
+float8 __ovld __cnfn tanh(float8);
+float16 __ovld __cnfn tanh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tanh(double);
+double2 __ovld __cnfn tanh(double2);
+double3 __ovld __cnfn tanh(double3);
+double4 __ovld __cnfn tanh(double4);
+double8 __ovld __cnfn tanh(double8);
+double16 __ovld __cnfn tanh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tanh(half);
+half2 __ovld __cnfn tanh(half2);
+half3 __ovld __cnfn tanh(half3);
+half4 __ovld __cnfn tanh(half4);
+half8 __ovld __cnfn tanh(half8);
+half16 __ovld __cnfn tanh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute tan (PI * x).
+ */
+float __ovld __cnfn tanpi(float);
+float2 __ovld __cnfn tanpi(float2);
+float3 __ovld __cnfn tanpi(float3);
+float4 __ovld __cnfn tanpi(float4);
+float8 __ovld __cnfn tanpi(float8);
+float16 __ovld __cnfn tanpi(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tanpi(double);
+double2 __ovld __cnfn tanpi(double2);
+double3 __ovld __cnfn tanpi(double3);
+double4 __ovld __cnfn tanpi(double4);
+double8 __ovld __cnfn tanpi(double8);
+double16 __ovld __cnfn tanpi(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tanpi(half);
+half2 __ovld __cnfn tanpi(half2);
+half3 __ovld __cnfn tanpi(half3);
+half4 __ovld __cnfn tanpi(half4);
+half8 __ovld __cnfn tanpi(half8);
+half16 __ovld __cnfn tanpi(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the gamma function.
+ */
+float __ovld __cnfn tgamma(float);
+float2 __ovld __cnfn tgamma(float2);
+float3 __ovld __cnfn tgamma(float3);
+float4 __ovld __cnfn tgamma(float4);
+float8 __ovld __cnfn tgamma(float8);
+float16 __ovld __cnfn tgamma(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tgamma(double);
+double2 __ovld __cnfn tgamma(double2);
+double3 __ovld __cnfn tgamma(double3);
+double4 __ovld __cnfn tgamma(double4);
+double8 __ovld __cnfn tgamma(double8);
+double16 __ovld __cnfn tgamma(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tgamma(half);
+half2 __ovld __cnfn tgamma(half2);
+half3 __ovld __cnfn tgamma(half3);
+half4 __ovld __cnfn tgamma(half4);
+half8 __ovld __cnfn tgamma(half8);
+half16 __ovld __cnfn tgamma(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Round to integral value using the round to zero
+ * rounding mode.
+ */
+float __ovld __cnfn trunc(float);
+float2 __ovld __cnfn trunc(float2);
+float3 __ovld __cnfn trunc(float3);
+float4 __ovld __cnfn trunc(float4);
+float8 __ovld __cnfn trunc(float8);
+float16 __ovld __cnfn trunc(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn trunc(double);
+double2 __ovld __cnfn trunc(double2);
+double3 __ovld __cnfn trunc(double3);
+double4 __ovld __cnfn trunc(double4);
+double8 __ovld __cnfn trunc(double8);
+double16 __ovld __cnfn trunc(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn trunc(half);
+half2 __ovld __cnfn trunc(half2);
+half3 __ovld __cnfn trunc(half3);
+half4 __ovld __cnfn trunc(half4);
+half8 __ovld __cnfn trunc(half8);
+half16 __ovld __cnfn trunc(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cosine. x must be in the range -2^16 ... +2^16.
+ */
+float __ovld __cnfn half_cos(float);
+float2 __ovld __cnfn half_cos(float2);
+float3 __ovld __cnfn half_cos(float3);
+float4 __ovld __cnfn half_cos(float4);
+float8 __ovld __cnfn half_cos(float8);
+float16 __ovld __cnfn half_cos(float16);
+
+/**
+ * Compute x / y.
+ */
+float __ovld __cnfn half_divide(float, float);
+float2 __ovld __cnfn half_divide(float2, float2);
+float3 __ovld __cnfn half_divide(float3, float3);
+float4 __ovld __cnfn half_divide(float4, float4);
+float8 __ovld __cnfn half_divide(float8, float8);
+float16 __ovld __cnfn half_divide(float16, float16);
+
+/**
+ * Compute the base- e exponential of x.
+ */
+float __ovld __cnfn half_exp(float);
+float2 __ovld __cnfn half_exp(float2);
+float3 __ovld __cnfn half_exp(float3);
+float4 __ovld __cnfn half_exp(float4);
+float8 __ovld __cnfn half_exp(float8);
+float16 __ovld __cnfn half_exp(float16);
+
+/**
+ * Compute the base- 2 exponential of x.
+ */
+float __ovld __cnfn half_exp2(float);
+float2 __ovld __cnfn half_exp2(float2);
+float3 __ovld __cnfn half_exp2(float3);
+float4 __ovld __cnfn half_exp2(float4);
+float8 __ovld __cnfn half_exp2(float8);
+float16 __ovld __cnfn half_exp2(float16);
+
+/**
+ * Compute the base- 10 exponential of x.
+ */
+float __ovld __cnfn half_exp10(float);
+float2 __ovld __cnfn half_exp10(float2);
+float3 __ovld __cnfn half_exp10(float3);
+float4 __ovld __cnfn half_exp10(float4);
+float8 __ovld __cnfn half_exp10(float8);
+float16 __ovld __cnfn half_exp10(float16);
+
+/**
+ * Compute natural logarithm.
+ */
+float __ovld __cnfn half_log(float);
+float2 __ovld __cnfn half_log(float2);
+float3 __ovld __cnfn half_log(float3);
+float4 __ovld __cnfn half_log(float4);
+float8 __ovld __cnfn half_log(float8);
+float16 __ovld __cnfn half_log(float16);
+
+/**
+ * Compute a base 2 logarithm.
+ */
+float __ovld __cnfn half_log2(float);
+float2 __ovld __cnfn half_log2(float2);
+float3 __ovld __cnfn half_log2(float3);
+float4 __ovld __cnfn half_log2(float4);
+float8 __ovld __cnfn half_log2(float8);
+float16 __ovld __cnfn half_log2(float16);
+
+/**
+ * Compute a base 10 logarithm.
+ */
+float __ovld __cnfn half_log10(float);
+float2 __ovld __cnfn half_log10(float2);
+float3 __ovld __cnfn half_log10(float3);
+float4 __ovld __cnfn half_log10(float4);
+float8 __ovld __cnfn half_log10(float8);
+float16 __ovld __cnfn half_log10(float16);
+
+/**
+ * Compute x to the power y, where x is >= 0.
+ */
+float __ovld __cnfn half_powr(float, float);
+float2 __ovld __cnfn half_powr(float2, float2);
+float3 __ovld __cnfn half_powr(float3, float3);
+float4 __ovld __cnfn half_powr(float4, float4);
+float8 __ovld __cnfn half_powr(float8, float8);
+float16 __ovld __cnfn half_powr(float16, float16);
+
+/**
+ * Compute reciprocal.
+ */
+float __ovld __cnfn half_recip(float);
+float2 __ovld __cnfn half_recip(float2);
+float3 __ovld __cnfn half_recip(float3);
+float4 __ovld __cnfn half_recip(float4);
+float8 __ovld __cnfn half_recip(float8);
+float16 __ovld __cnfn half_recip(float16);
+
+/**
+ * Compute inverse square root.
+ */
+float __ovld __cnfn half_rsqrt(float);
+float2 __ovld __cnfn half_rsqrt(float2);
+float3 __ovld __cnfn half_rsqrt(float3);
+float4 __ovld __cnfn half_rsqrt(float4);
+float8 __ovld __cnfn half_rsqrt(float8);
+float16 __ovld __cnfn half_rsqrt(float16);
+
+/**
+ * Compute sine. x must be in the range -2^16 ... +2^16.
+ */
+float __ovld __cnfn half_sin(float);
+float2 __ovld __cnfn half_sin(float2);
+float3 __ovld __cnfn half_sin(float3);
+float4 __ovld __cnfn half_sin(float4);
+float8 __ovld __cnfn half_sin(float8);
+float16 __ovld __cnfn half_sin(float16);
+
+/**
+ * Compute square root.
+ */
+float __ovld __cnfn half_sqrt(float);
+float2 __ovld __cnfn half_sqrt(float2);
+float3 __ovld __cnfn half_sqrt(float3);
+float4 __ovld __cnfn half_sqrt(float4);
+float8 __ovld __cnfn half_sqrt(float8);
+float16 __ovld __cnfn half_sqrt(float16);
+
+/**
+ * Compute tangent. x must be in the range -216 ... +216.
+ */
+float __ovld __cnfn half_tan(float);
+float2 __ovld __cnfn half_tan(float2);
+float3 __ovld __cnfn half_tan(float3);
+float4 __ovld __cnfn half_tan(float4);
+float8 __ovld __cnfn half_tan(float8);
+float16 __ovld __cnfn half_tan(float16);
+
+/**
+ * Compute cosine over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_cos(float);
+float2 __ovld __cnfn native_cos(float2);
+float3 __ovld __cnfn native_cos(float3);
+float4 __ovld __cnfn native_cos(float4);
+float8 __ovld __cnfn native_cos(float8);
+float16 __ovld __cnfn native_cos(float16);
+
+/**
+ * Compute x / y over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_divide(float, float);
+float2 __ovld __cnfn native_divide(float2, float2);
+float3 __ovld __cnfn native_divide(float3, float3);
+float4 __ovld __cnfn native_divide(float4, float4);
+float8 __ovld __cnfn native_divide(float8, float8);
+float16 __ovld __cnfn native_divide(float16, float16);
+
+/**
+ * Compute the base- e exponential of x over an
+ * implementation-defined range. The maximum error is
+ * implementation-defined.
+ */
+float __ovld __cnfn native_exp(float);
+float2 __ovld __cnfn native_exp(float2);
+float3 __ovld __cnfn native_exp(float3);
+float4 __ovld __cnfn native_exp(float4);
+float8 __ovld __cnfn native_exp(float8);
+float16 __ovld __cnfn native_exp(float16);
+
+/**
+ * Compute the base- 2 exponential of x over an
+ * implementation-defined range. The maximum error is
+ * implementation-defined.
+ */
+float __ovld __cnfn native_exp2(float);
+float2 __ovld __cnfn native_exp2(float2);
+float3 __ovld __cnfn native_exp2(float3);
+float4 __ovld __cnfn native_exp2(float4);
+float8 __ovld __cnfn native_exp2(float8);
+float16 __ovld __cnfn native_exp2(float16);
+
+/**
+ * Compute the base- 10 exponential of x over an
+ * implementation-defined range. The maximum error is
+ * implementation-defined.
+ */
+float __ovld __cnfn native_exp10(float);
+float2 __ovld __cnfn native_exp10(float2);
+float3 __ovld __cnfn native_exp10(float3);
+float4 __ovld __cnfn native_exp10(float4);
+float8 __ovld __cnfn native_exp10(float8);
+float16 __ovld __cnfn native_exp10(float16);
+
+/**
+ * Compute natural logarithm over an implementationdefined
+ * range. The maximum error is implementation
+ * defined.
+ */
+float __ovld __cnfn native_log(float);
+float2 __ovld __cnfn native_log(float2);
+float3 __ovld __cnfn native_log(float3);
+float4 __ovld __cnfn native_log(float4);
+float8 __ovld __cnfn native_log(float8);
+float16 __ovld __cnfn native_log(float16);
+
+/**
+ * Compute a base 2 logarithm over an implementationdefined
+ * range. The maximum error is implementationdefined.
+ */
+float __ovld __cnfn native_log2(float);
+float2 __ovld __cnfn native_log2(float2);
+float3 __ovld __cnfn native_log2(float3);
+float4 __ovld __cnfn native_log2(float4);
+float8 __ovld __cnfn native_log2(float8);
+float16 __ovld __cnfn native_log2(float16);
+
+/**
+ * Compute a base 10 logarithm over an implementationdefined
+ * range. The maximum error is implementationdefined.
+ */
+float __ovld __cnfn native_log10(float);
+float2 __ovld __cnfn native_log10(float2);
+float3 __ovld __cnfn native_log10(float3);
+float4 __ovld __cnfn native_log10(float4);
+float8 __ovld __cnfn native_log10(float8);
+float16 __ovld __cnfn native_log10(float16);
+
+/**
+ * Compute x to the power y, where x is >= 0. The range of
+ * x and y are implementation-defined. The maximum error
+ * is implementation-defined.
+ */
+float __ovld __cnfn native_powr(float, float);
+float2 __ovld __cnfn native_powr(float2, float2);
+float3 __ovld __cnfn native_powr(float3, float3);
+float4 __ovld __cnfn native_powr(float4, float4);
+float8 __ovld __cnfn native_powr(float8, float8);
+float16 __ovld __cnfn native_powr(float16, float16);
+
+/**
+ * Compute reciprocal over an implementation-defined
+ * range. The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_recip(float);
+float2 __ovld __cnfn native_recip(float2);
+float3 __ovld __cnfn native_recip(float3);
+float4 __ovld __cnfn native_recip(float4);
+float8 __ovld __cnfn native_recip(float8);
+float16 __ovld __cnfn native_recip(float16);
+
+/**
+ * Compute inverse square root over an implementationdefined
+ * range. The maximum error is implementationdefined.
+ */
+float __ovld __cnfn native_rsqrt(float);
+float2 __ovld __cnfn native_rsqrt(float2);
+float3 __ovld __cnfn native_rsqrt(float3);
+float4 __ovld __cnfn native_rsqrt(float4);
+float8 __ovld __cnfn native_rsqrt(float8);
+float16 __ovld __cnfn native_rsqrt(float16);
+
+/**
+ * Compute sine over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_sin(float);
+float2 __ovld __cnfn native_sin(float2);
+float3 __ovld __cnfn native_sin(float3);
+float4 __ovld __cnfn native_sin(float4);
+float8 __ovld __cnfn native_sin(float8);
+float16 __ovld __cnfn native_sin(float16);
+
+/**
+ * Compute square root over an implementation-defined
+ * range. The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_sqrt(float);
+float2 __ovld __cnfn native_sqrt(float2);
+float3 __ovld __cnfn native_sqrt(float3);
+float4 __ovld __cnfn native_sqrt(float4);
+float8 __ovld __cnfn native_sqrt(float8);
+float16 __ovld __cnfn native_sqrt(float16);
+
+/**
+ * Compute tangent over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_tan(float);
+float2 __ovld __cnfn native_tan(float2);
+float3 __ovld __cnfn native_tan(float3);
+float4 __ovld __cnfn native_tan(float4);
+float8 __ovld __cnfn native_tan(float8);
+float16 __ovld __cnfn native_tan(float16);
+
+// OpenCL v1.1 s6.11.3, v1.2 s6.12.3, v2.0 s6.13.3 - Integer Functions
+
+/**
+ * Returns | x |.
+ */
+uchar __ovld __cnfn abs(char);
+uchar __ovld __cnfn abs(uchar);
+uchar2 __ovld __cnfn abs(char2);
+uchar2 __ovld __cnfn abs(uchar2);
+uchar3 __ovld __cnfn abs(char3);
+uchar3 __ovld __cnfn abs(uchar3);
+uchar4 __ovld __cnfn abs(char4);
+uchar4 __ovld __cnfn abs(uchar4);
+uchar8 __ovld __cnfn abs(char8);
+uchar8 __ovld __cnfn abs(uchar8);
+uchar16 __ovld __cnfn abs(char16);
+uchar16 __ovld __cnfn abs(uchar16);
+ushort __ovld __cnfn abs(short);
+ushort __ovld __cnfn abs(ushort);
+ushort2 __ovld __cnfn abs(short2);
+ushort2 __ovld __cnfn abs(ushort2);
+ushort3 __ovld __cnfn abs(short3);
+ushort3 __ovld __cnfn abs(ushort3);
+ushort4 __ovld __cnfn abs(short4);
+ushort4 __ovld __cnfn abs(ushort4);
+ushort8 __ovld __cnfn abs(short8);
+ushort8 __ovld __cnfn abs(ushort8);
+ushort16 __ovld __cnfn abs(short16);
+ushort16 __ovld __cnfn abs(ushort16);
+uint __ovld __cnfn abs(int);
+uint __ovld __cnfn abs(uint);
+uint2 __ovld __cnfn abs(int2);
+uint2 __ovld __cnfn abs(uint2);
+uint3 __ovld __cnfn abs(int3);
+uint3 __ovld __cnfn abs(uint3);
+uint4 __ovld __cnfn abs(int4);
+uint4 __ovld __cnfn abs(uint4);
+uint8 __ovld __cnfn abs(int8);
+uint8 __ovld __cnfn abs(uint8);
+uint16 __ovld __cnfn abs(int16);
+uint16 __ovld __cnfn abs(uint16);
+ulong __ovld __cnfn abs(long);
+ulong __ovld __cnfn abs(ulong);
+ulong2 __ovld __cnfn abs(long2);
+ulong2 __ovld __cnfn abs(ulong2);
+ulong3 __ovld __cnfn abs(long3);
+ulong3 __ovld __cnfn abs(ulong3);
+ulong4 __ovld __cnfn abs(long4);
+ulong4 __ovld __cnfn abs(ulong4);
+ulong8 __ovld __cnfn abs(long8);
+ulong8 __ovld __cnfn abs(ulong8);
+ulong16 __ovld __cnfn abs(long16);
+ulong16 __ovld __cnfn abs(ulong16);
+
+/**
+ * Returns | x - y | without modulo overflow.
+ */
+uchar __ovld __cnfn abs_diff(char, char);
+uchar __ovld __cnfn abs_diff(uchar, uchar);
+uchar2 __ovld __cnfn abs_diff(char2, char2);
+uchar2 __ovld __cnfn abs_diff(uchar2, uchar2);
+uchar3 __ovld __cnfn abs_diff(char3, char3);
+uchar3 __ovld __cnfn abs_diff(uchar3, uchar3);
+uchar4 __ovld __cnfn abs_diff(char4, char4);
+uchar4 __ovld __cnfn abs_diff(uchar4, uchar4);
+uchar8 __ovld __cnfn abs_diff(char8, char8);
+uchar8 __ovld __cnfn abs_diff(uchar8, uchar8);
+uchar16 __ovld __cnfn abs_diff(char16, char16);
+uchar16 __ovld __cnfn abs_diff(uchar16, uchar16);
+ushort __ovld __cnfn abs_diff(short, short);
+ushort __ovld __cnfn abs_diff(ushort, ushort);
+ushort2 __ovld __cnfn abs_diff(short2, short2);
+ushort2 __ovld __cnfn abs_diff(ushort2, ushort2);
+ushort3 __ovld __cnfn abs_diff(short3, short3);
+ushort3 __ovld __cnfn abs_diff(ushort3, ushort3);
+ushort4 __ovld __cnfn abs_diff(short4, short4);
+ushort4 __ovld __cnfn abs_diff(ushort4, ushort4);
+ushort8 __ovld __cnfn abs_diff(short8, short8);
+ushort8 __ovld __cnfn abs_diff(ushort8, ushort8);
+ushort16 __ovld __cnfn abs_diff(short16, short16);
+ushort16 __ovld __cnfn abs_diff(ushort16, ushort16);
+uint __ovld __cnfn abs_diff(int, int);
+uint __ovld __cnfn abs_diff(uint, uint);
+uint2 __ovld __cnfn abs_diff(int2, int2);
+uint2 __ovld __cnfn abs_diff(uint2, uint2);
+uint3 __ovld __cnfn abs_diff(int3, int3);
+uint3 __ovld __cnfn abs_diff(uint3, uint3);
+uint4 __ovld __cnfn abs_diff(int4, int4);
+uint4 __ovld __cnfn abs_diff(uint4, uint4);
+uint8 __ovld __cnfn abs_diff(int8, int8);
+uint8 __ovld __cnfn abs_diff(uint8, uint8);
+uint16 __ovld __cnfn abs_diff(int16, int16);
+uint16 __ovld __cnfn abs_diff(uint16, uint16);
+ulong __ovld __cnfn abs_diff(long, long);
+ulong __ovld __cnfn abs_diff(ulong, ulong);
+ulong2 __ovld __cnfn abs_diff(long2, long2);
+ulong2 __ovld __cnfn abs_diff(ulong2, ulong2);
+ulong3 __ovld __cnfn abs_diff(long3, long3);
+ulong3 __ovld __cnfn abs_diff(ulong3, ulong3);
+ulong4 __ovld __cnfn abs_diff(long4, long4);
+ulong4 __ovld __cnfn abs_diff(ulong4, ulong4);
+ulong8 __ovld __cnfn abs_diff(long8, long8);
+ulong8 __ovld __cnfn abs_diff(ulong8, ulong8);
+ulong16 __ovld __cnfn abs_diff(long16, long16);
+ulong16 __ovld __cnfn abs_diff(ulong16, ulong16);
+
+/**
+ * Returns x + y and saturates the result.
+ */
+char __ovld __cnfn add_sat(char, char);
+uchar __ovld __cnfn add_sat(uchar, uchar);
+char2 __ovld __cnfn add_sat(char2, char2);
+uchar2 __ovld __cnfn add_sat(uchar2, uchar2);
+char3 __ovld __cnfn add_sat(char3, char3);
+uchar3 __ovld __cnfn add_sat(uchar3, uchar3);
+char4 __ovld __cnfn add_sat(char4, char4);
+uchar4 __ovld __cnfn add_sat(uchar4, uchar4);
+char8 __ovld __cnfn add_sat(char8, char8);
+uchar8 __ovld __cnfn add_sat(uchar8, uchar8);
+char16 __ovld __cnfn add_sat(char16, char16);
+uchar16 __ovld __cnfn add_sat(uchar16, uchar16);
+short __ovld __cnfn add_sat(short, short);
+ushort __ovld __cnfn add_sat(ushort, ushort);
+short2 __ovld __cnfn add_sat(short2, short2);
+ushort2 __ovld __cnfn add_sat(ushort2, ushort2);
+short3 __ovld __cnfn add_sat(short3, short3);
+ushort3 __ovld __cnfn add_sat(ushort3, ushort3);
+short4 __ovld __cnfn add_sat(short4, short4);
+ushort4 __ovld __cnfn add_sat(ushort4, ushort4);
+short8 __ovld __cnfn add_sat(short8, short8);
+ushort8 __ovld __cnfn add_sat(ushort8, ushort8);
+short16 __ovld __cnfn add_sat(short16, short16);
+ushort16 __ovld __cnfn add_sat(ushort16, ushort16);
+int __ovld __cnfn add_sat(int, int);
+uint __ovld __cnfn add_sat(uint, uint);
+int2 __ovld __cnfn add_sat(int2, int2);
+uint2 __ovld __cnfn add_sat(uint2, uint2);
+int3 __ovld __cnfn add_sat(int3, int3);
+uint3 __ovld __cnfn add_sat(uint3, uint3);
+int4 __ovld __cnfn add_sat(int4, int4);
+uint4 __ovld __cnfn add_sat(uint4, uint4);
+int8 __ovld __cnfn add_sat(int8, int8);
+uint8 __ovld __cnfn add_sat(uint8, uint8);
+int16 __ovld __cnfn add_sat(int16, int16);
+uint16 __ovld __cnfn add_sat(uint16, uint16);
+long __ovld __cnfn add_sat(long, long);
+ulong __ovld __cnfn add_sat(ulong, ulong);
+long2 __ovld __cnfn add_sat(long2, long2);
+ulong2 __ovld __cnfn add_sat(ulong2, ulong2);
+long3 __ovld __cnfn add_sat(long3, long3);
+ulong3 __ovld __cnfn add_sat(ulong3, ulong3);
+long4 __ovld __cnfn add_sat(long4, long4);
+ulong4 __ovld __cnfn add_sat(ulong4, ulong4);
+long8 __ovld __cnfn add_sat(long8, long8);
+ulong8 __ovld __cnfn add_sat(ulong8, ulong8);
+long16 __ovld __cnfn add_sat(long16, long16);
+ulong16 __ovld __cnfn add_sat(ulong16, ulong16);
+
+/**
+ * Returns (x + y) >> 1. The intermediate sum does
+ * not modulo overflow.
+ */
+char __ovld __cnfn hadd(char, char);
+uchar __ovld __cnfn hadd(uchar, uchar);
+char2 __ovld __cnfn hadd(char2, char2);
+uchar2 __ovld __cnfn hadd(uchar2, uchar2);
+char3 __ovld __cnfn hadd(char3, char3);
+uchar3 __ovld __cnfn hadd(uchar3, uchar3);
+char4 __ovld __cnfn hadd(char4, char4);
+uchar4 __ovld __cnfn hadd(uchar4, uchar4);
+char8 __ovld __cnfn hadd(char8, char8);
+uchar8 __ovld __cnfn hadd(uchar8, uchar8);
+char16 __ovld __cnfn hadd(char16, char16);
+uchar16 __ovld __cnfn hadd(uchar16, uchar16);
+short __ovld __cnfn hadd(short, short);
+ushort __ovld __cnfn hadd(ushort, ushort);
+short2 __ovld __cnfn hadd(short2, short2);
+ushort2 __ovld __cnfn hadd(ushort2, ushort2);
+short3 __ovld __cnfn hadd(short3, short3);
+ushort3 __ovld __cnfn hadd(ushort3, ushort3);
+short4 __ovld __cnfn hadd(short4, short4);
+ushort4 __ovld __cnfn hadd(ushort4, ushort4);
+short8 __ovld __cnfn hadd(short8, short8);
+ushort8 __ovld __cnfn hadd(ushort8, ushort8);
+short16 __ovld __cnfn hadd(short16, short16);
+ushort16 __ovld __cnfn hadd(ushort16, ushort16);
+int __ovld __cnfn hadd(int, int);
+uint __ovld __cnfn hadd(uint, uint);
+int2 __ovld __cnfn hadd(int2, int2);
+uint2 __ovld __cnfn hadd(uint2, uint2);
+int3 __ovld __cnfn hadd(int3, int3);
+uint3 __ovld __cnfn hadd(uint3, uint3);
+int4 __ovld __cnfn hadd(int4, int4);
+uint4 __ovld __cnfn hadd(uint4, uint4);
+int8 __ovld __cnfn hadd(int8, int8);
+uint8 __ovld __cnfn hadd(uint8, uint8);
+int16 __ovld __cnfn hadd(int16, int16);
+uint16 __ovld __cnfn hadd(uint16, uint16);
+long __ovld __cnfn hadd(long, long);
+ulong __ovld __cnfn hadd(ulong, ulong);
+long2 __ovld __cnfn hadd(long2, long2);
+ulong2 __ovld __cnfn hadd(ulong2, ulong2);
+long3 __ovld __cnfn hadd(long3, long3);
+ulong3 __ovld __cnfn hadd(ulong3, ulong3);
+long4 __ovld __cnfn hadd(long4, long4);
+ulong4 __ovld __cnfn hadd(ulong4, ulong4);
+long8 __ovld __cnfn hadd(long8, long8);
+ulong8 __ovld __cnfn hadd(ulong8, ulong8);
+long16 __ovld __cnfn hadd(long16, long16);
+ulong16 __ovld __cnfn hadd(ulong16, ulong16);
+
+/**
+ * Returns (x + y + 1) >> 1. The intermediate sum
+ * does not modulo overflow.
+ */
+char __ovld __cnfn rhadd(char, char);
+uchar __ovld __cnfn rhadd(uchar, uchar);
+char2 __ovld __cnfn rhadd(char2, char2);
+uchar2 __ovld __cnfn rhadd(uchar2, uchar2);
+char3 __ovld __cnfn rhadd(char3, char3);
+uchar3 __ovld __cnfn rhadd(uchar3, uchar3);
+char4 __ovld __cnfn rhadd(char4, char4);
+uchar4 __ovld __cnfn rhadd(uchar4, uchar4);
+char8 __ovld __cnfn rhadd(char8, char8);
+uchar8 __ovld __cnfn rhadd(uchar8, uchar8);
+char16 __ovld __cnfn rhadd(char16, char16);
+uchar16 __ovld __cnfn rhadd(uchar16, uchar16);
+short __ovld __cnfn rhadd(short, short);
+ushort __ovld __cnfn rhadd(ushort, ushort);
+short2 __ovld __cnfn rhadd(short2, short2);
+ushort2 __ovld __cnfn rhadd(ushort2, ushort2);
+short3 __ovld __cnfn rhadd(short3, short3);
+ushort3 __ovld __cnfn rhadd(ushort3, ushort3);
+short4 __ovld __cnfn rhadd(short4, short4);
+ushort4 __ovld __cnfn rhadd(ushort4, ushort4);
+short8 __ovld __cnfn rhadd(short8, short8);
+ushort8 __ovld __cnfn rhadd(ushort8, ushort8);
+short16 __ovld __cnfn rhadd(short16, short16);
+ushort16 __ovld __cnfn rhadd(ushort16, ushort16);
+int __ovld __cnfn rhadd(int, int);
+uint __ovld __cnfn rhadd(uint, uint);
+int2 __ovld __cnfn rhadd(int2, int2);
+uint2 __ovld __cnfn rhadd(uint2, uint2);
+int3 __ovld __cnfn rhadd(int3, int3);
+uint3 __ovld __cnfn rhadd(uint3, uint3);
+int4 __ovld __cnfn rhadd(int4, int4);
+uint4 __ovld __cnfn rhadd(uint4, uint4);
+int8 __ovld __cnfn rhadd(int8, int8);
+uint8 __ovld __cnfn rhadd(uint8, uint8);
+int16 __ovld __cnfn rhadd(int16, int16);
+uint16 __ovld __cnfn rhadd(uint16, uint16);
+long __ovld __cnfn rhadd(long, long);
+ulong __ovld __cnfn rhadd(ulong, ulong);
+long2 __ovld __cnfn rhadd(long2, long2);
+ulong2 __ovld __cnfn rhadd(ulong2, ulong2);
+long3 __ovld __cnfn rhadd(long3, long3);
+ulong3 __ovld __cnfn rhadd(ulong3, ulong3);
+long4 __ovld __cnfn rhadd(long4, long4);
+ulong4 __ovld __cnfn rhadd(ulong4, ulong4);
+long8 __ovld __cnfn rhadd(long8, long8);
+ulong8 __ovld __cnfn rhadd(ulong8, ulong8);
+long16 __ovld __cnfn rhadd(long16, long16);
+ulong16 __ovld __cnfn rhadd(ulong16, ulong16);
+
+/**
+ * Returns min(max(x, minval), maxval).
+ * Results are undefined if minval > maxval.
+ */
+char __ovld __cnfn clamp(char, char, char);
+uchar __ovld __cnfn clamp(uchar, uchar, uchar);
+char2 __ovld __cnfn clamp(char2, char2, char2);
+uchar2 __ovld __cnfn clamp(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn clamp(char3, char3, char3);
+uchar3 __ovld __cnfn clamp(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn clamp(char4, char4, char4);
+uchar4 __ovld __cnfn clamp(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn clamp(char8, char8, char8);
+uchar8 __ovld __cnfn clamp(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn clamp(char16, char16, char16);
+uchar16 __ovld __cnfn clamp(uchar16, uchar16, uchar16);
+short __ovld __cnfn clamp(short, short, short);
+ushort __ovld __cnfn clamp(ushort, ushort, ushort);
+short2 __ovld __cnfn clamp(short2, short2, short2);
+ushort2 __ovld __cnfn clamp(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn clamp(short3, short3, short3);
+ushort3 __ovld __cnfn clamp(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn clamp(short4, short4, short4);
+ushort4 __ovld __cnfn clamp(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn clamp(short8, short8, short8);
+ushort8 __ovld __cnfn clamp(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn clamp(short16, short16, short16);
+ushort16 __ovld __cnfn clamp(ushort16, ushort16, ushort16);
+int __ovld __cnfn clamp(int, int, int);
+uint __ovld __cnfn clamp(uint, uint, uint);
+int2 __ovld __cnfn clamp(int2, int2, int2);
+uint2 __ovld __cnfn clamp(uint2, uint2, uint2);
+int3 __ovld __cnfn clamp(int3, int3, int3);
+uint3 __ovld __cnfn clamp(uint3, uint3, uint3);
+int4 __ovld __cnfn clamp(int4, int4, int4);
+uint4 __ovld __cnfn clamp(uint4, uint4, uint4);
+int8 __ovld __cnfn clamp(int8, int8, int8);
+uint8 __ovld __cnfn clamp(uint8, uint8, uint8);
+int16 __ovld __cnfn clamp(int16, int16, int16);
+uint16 __ovld __cnfn clamp(uint16, uint16, uint16);
+long __ovld __cnfn clamp(long, long, long);
+ulong __ovld __cnfn clamp(ulong, ulong, ulong);
+long2 __ovld __cnfn clamp(long2, long2, long2);
+ulong2 __ovld __cnfn clamp(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn clamp(long3, long3, long3);
+ulong3 __ovld __cnfn clamp(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn clamp(long4, long4, long4);
+ulong4 __ovld __cnfn clamp(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn clamp(long8, long8, long8);
+ulong8 __ovld __cnfn clamp(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn clamp(long16, long16, long16);
+ulong16 __ovld __cnfn clamp(ulong16, ulong16, ulong16);
+char2 __ovld __cnfn clamp(char2, char, char);
+uchar2 __ovld __cnfn clamp(uchar2, uchar, uchar);
+char3 __ovld __cnfn clamp(char3, char, char);
+uchar3 __ovld __cnfn clamp(uchar3, uchar, uchar);
+char4 __ovld __cnfn clamp(char4, char, char);
+uchar4 __ovld __cnfn clamp(uchar4, uchar, uchar);
+char8 __ovld __cnfn clamp(char8, char, char);
+uchar8 __ovld __cnfn clamp(uchar8, uchar, uchar);
+char16 __ovld __cnfn clamp(char16, char, char);
+uchar16 __ovld __cnfn clamp(uchar16, uchar, uchar);
+short2 __ovld __cnfn clamp(short2, short, short);
+ushort2 __ovld __cnfn clamp(ushort2, ushort, ushort);
+short3 __ovld __cnfn clamp(short3, short, short);
+ushort3 __ovld __cnfn clamp(ushort3, ushort, ushort);
+short4 __ovld __cnfn clamp(short4, short, short);
+ushort4 __ovld __cnfn clamp(ushort4, ushort, ushort);
+short8 __ovld __cnfn clamp(short8, short, short);
+ushort8 __ovld __cnfn clamp(ushort8, ushort, ushort);
+short16 __ovld __cnfn clamp(short16, short, short);
+ushort16 __ovld __cnfn clamp(ushort16, ushort, ushort);
+int2 __ovld __cnfn clamp(int2, int, int);
+uint2 __ovld __cnfn clamp(uint2, uint, uint);
+int3 __ovld __cnfn clamp(int3, int, int);
+uint3 __ovld __cnfn clamp(uint3, uint, uint);
+int4 __ovld __cnfn clamp(int4, int, int);
+uint4 __ovld __cnfn clamp(uint4, uint, uint);
+int8 __ovld __cnfn clamp(int8, int, int);
+uint8 __ovld __cnfn clamp(uint8, uint, uint);
+int16 __ovld __cnfn clamp(int16, int, int);
+uint16 __ovld __cnfn clamp(uint16, uint, uint);
+long2 __ovld __cnfn clamp(long2, long, long);
+ulong2 __ovld __cnfn clamp(ulong2, ulong, ulong);
+long3 __ovld __cnfn clamp(long3, long, long);
+ulong3 __ovld __cnfn clamp(ulong3, ulong, ulong);
+long4 __ovld __cnfn clamp(long4, long, long);
+ulong4 __ovld __cnfn clamp(ulong4, ulong, ulong);
+long8 __ovld __cnfn clamp(long8, long, long);
+ulong8 __ovld __cnfn clamp(ulong8, ulong, ulong);
+long16 __ovld __cnfn clamp(long16, long, long);
+ulong16 __ovld __cnfn clamp(ulong16, ulong, ulong);
+
+/**
+ * Returns the number of leading 0-bits in x, starting
+ * at the most significant bit position.
+ */
+char __ovld __cnfn clz(char);
+uchar __ovld __cnfn clz(uchar);
+char2 __ovld __cnfn clz(char2);
+uchar2 __ovld __cnfn clz(uchar2);
+char3 __ovld __cnfn clz(char3);
+uchar3 __ovld __cnfn clz(uchar3);
+char4 __ovld __cnfn clz(char4);
+uchar4 __ovld __cnfn clz(uchar4);
+char8 __ovld __cnfn clz(char8);
+uchar8 __ovld __cnfn clz(uchar8);
+char16 __ovld __cnfn clz(char16);
+uchar16 __ovld __cnfn clz(uchar16);
+short __ovld __cnfn clz(short);
+ushort __ovld __cnfn clz(ushort);
+short2 __ovld __cnfn clz(short2);
+ushort2 __ovld __cnfn clz(ushort2);
+short3 __ovld __cnfn clz(short3);
+ushort3 __ovld __cnfn clz(ushort3);
+short4 __ovld __cnfn clz(short4);
+ushort4 __ovld __cnfn clz(ushort4);
+short8 __ovld __cnfn clz(short8);
+ushort8 __ovld __cnfn clz(ushort8);
+short16 __ovld __cnfn clz(short16);
+ushort16 __ovld __cnfn clz(ushort16);
+int __ovld __cnfn clz(int);
+uint __ovld __cnfn clz(uint);
+int2 __ovld __cnfn clz(int2);
+uint2 __ovld __cnfn clz(uint2);
+int3 __ovld __cnfn clz(int3);
+uint3 __ovld __cnfn clz(uint3);
+int4 __ovld __cnfn clz(int4);
+uint4 __ovld __cnfn clz(uint4);
+int8 __ovld __cnfn clz(int8);
+uint8 __ovld __cnfn clz(uint8);
+int16 __ovld __cnfn clz(int16);
+uint16 __ovld __cnfn clz(uint16);
+long __ovld __cnfn clz(long);
+ulong __ovld __cnfn clz(ulong);
+long2 __ovld __cnfn clz(long2);
+ulong2 __ovld __cnfn clz(ulong2);
+long3 __ovld __cnfn clz(long3);
+ulong3 __ovld __cnfn clz(ulong3);
+long4 __ovld __cnfn clz(long4);
+ulong4 __ovld __cnfn clz(ulong4);
+long8 __ovld __cnfn clz(long8);
+ulong8 __ovld __cnfn clz(ulong8);
+long16 __ovld __cnfn clz(long16);
+ulong16 __ovld __cnfn clz(ulong16);
+
+/**
+ * Returns the count of trailing 0-bits in x. If x is 0,
+ * returns the size in bits of the type of x or
+ * component type of x, if x is a vector.
+ */
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+char __ovld __cnfn ctz(char);
+uchar __ovld __cnfn ctz(uchar);
+char2 __ovld __cnfn ctz(char2);
+uchar2 __ovld __cnfn ctz(uchar2);
+char3 __ovld __cnfn ctz(char3);
+uchar3 __ovld __cnfn ctz(uchar3);
+char4 __ovld __cnfn ctz(char4);
+uchar4 __ovld __cnfn ctz(uchar4);
+char8 __ovld __cnfn ctz(char8);
+uchar8 __ovld __cnfn ctz(uchar8);
+char16 __ovld __cnfn ctz(char16);
+uchar16 __ovld __cnfn ctz(uchar16);
+short __ovld __cnfn ctz(short);
+ushort __ovld __cnfn ctz(ushort);
+short2 __ovld __cnfn ctz(short2);
+ushort2 __ovld __cnfn ctz(ushort2);
+short3 __ovld __cnfn ctz(short3);
+ushort3 __ovld __cnfn ctz(ushort3);
+short4 __ovld __cnfn ctz(short4);
+ushort4 __ovld __cnfn ctz(ushort4);
+short8 __ovld __cnfn ctz(short8);
+ushort8 __ovld __cnfn ctz(ushort8);
+short16 __ovld __cnfn ctz(short16);
+ushort16 __ovld __cnfn ctz(ushort16);
+int __ovld __cnfn ctz(int);
+uint __ovld __cnfn ctz(uint);
+int2 __ovld __cnfn ctz(int2);
+uint2 __ovld __cnfn ctz(uint2);
+int3 __ovld __cnfn ctz(int3);
+uint3 __ovld __cnfn ctz(uint3);
+int4 __ovld __cnfn ctz(int4);
+uint4 __ovld __cnfn ctz(uint4);
+int8 __ovld __cnfn ctz(int8);
+uint8 __ovld __cnfn ctz(uint8);
+int16 __ovld __cnfn ctz(int16);
+uint16 __ovld __cnfn ctz(uint16);
+long __ovld __cnfn ctz(long);
+ulong __ovld __cnfn ctz(ulong);
+long2 __ovld __cnfn ctz(long2);
+ulong2 __ovld __cnfn ctz(ulong2);
+long3 __ovld __cnfn ctz(long3);
+ulong3 __ovld __cnfn ctz(ulong3);
+long4 __ovld __cnfn ctz(long4);
+ulong4 __ovld __cnfn ctz(ulong4);
+long8 __ovld __cnfn ctz(long8);
+ulong8 __ovld __cnfn ctz(ulong8);
+long16 __ovld __cnfn ctz(long16);
+ulong16 __ovld __cnfn ctz(ulong16);
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+/**
+ * Returns mul_hi(a, b) + c.
+ */
+char __ovld __cnfn mad_hi(char, char, char);
+uchar __ovld __cnfn mad_hi(uchar, uchar, uchar);
+char2 __ovld __cnfn mad_hi(char2, char2, char2);
+uchar2 __ovld __cnfn mad_hi(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn mad_hi(char3, char3, char3);
+uchar3 __ovld __cnfn mad_hi(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn mad_hi(char4, char4, char4);
+uchar4 __ovld __cnfn mad_hi(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn mad_hi(char8, char8, char8);
+uchar8 __ovld __cnfn mad_hi(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn mad_hi(char16, char16, char16);
+uchar16 __ovld __cnfn mad_hi(uchar16, uchar16, uchar16);
+short __ovld __cnfn mad_hi(short, short, short);
+ushort __ovld __cnfn mad_hi(ushort, ushort, ushort);
+short2 __ovld __cnfn mad_hi(short2, short2, short2);
+ushort2 __ovld __cnfn mad_hi(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn mad_hi(short3, short3, short3);
+ushort3 __ovld __cnfn mad_hi(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn mad_hi(short4, short4, short4);
+ushort4 __ovld __cnfn mad_hi(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn mad_hi(short8, short8, short8);
+ushort8 __ovld __cnfn mad_hi(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn mad_hi(short16, short16, short16);
+ushort16 __ovld __cnfn mad_hi(ushort16, ushort16, ushort16);
+int __ovld __cnfn mad_hi(int, int, int);
+uint __ovld __cnfn mad_hi(uint, uint, uint);
+int2 __ovld __cnfn mad_hi(int2, int2, int2);
+uint2 __ovld __cnfn mad_hi(uint2, uint2, uint2);
+int3 __ovld __cnfn mad_hi(int3, int3, int3);
+uint3 __ovld __cnfn mad_hi(uint3, uint3, uint3);
+int4 __ovld __cnfn mad_hi(int4, int4, int4);
+uint4 __ovld __cnfn mad_hi(uint4, uint4, uint4);
+int8 __ovld __cnfn mad_hi(int8, int8, int8);
+uint8 __ovld __cnfn mad_hi(uint8, uint8, uint8);
+int16 __ovld __cnfn mad_hi(int16, int16, int16);
+uint16 __ovld __cnfn mad_hi(uint16, uint16, uint16);
+long __ovld __cnfn mad_hi(long, long, long);
+ulong __ovld __cnfn mad_hi(ulong, ulong, ulong);
+long2 __ovld __cnfn mad_hi(long2, long2, long2);
+ulong2 __ovld __cnfn mad_hi(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn mad_hi(long3, long3, long3);
+ulong3 __ovld __cnfn mad_hi(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn mad_hi(long4, long4, long4);
+ulong4 __ovld __cnfn mad_hi(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn mad_hi(long8, long8, long8);
+ulong8 __ovld __cnfn mad_hi(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn mad_hi(long16, long16, long16);
+ulong16 __ovld __cnfn mad_hi(ulong16, ulong16, ulong16);
+
+/**
+ * Returns a * b + c and saturates the result.
+ */
+char __ovld __cnfn mad_sat(char, char, char);
+uchar __ovld __cnfn mad_sat(uchar, uchar, uchar);
+char2 __ovld __cnfn mad_sat(char2, char2, char2);
+uchar2 __ovld __cnfn mad_sat(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn mad_sat(char3, char3, char3);
+uchar3 __ovld __cnfn mad_sat(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn mad_sat(char4, char4, char4);
+uchar4 __ovld __cnfn mad_sat(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn mad_sat(char8, char8, char8);
+uchar8 __ovld __cnfn mad_sat(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn mad_sat(char16, char16, char16);
+uchar16 __ovld __cnfn mad_sat(uchar16, uchar16, uchar16);
+short __ovld __cnfn mad_sat(short, short, short);
+ushort __ovld __cnfn mad_sat(ushort, ushort, ushort);
+short2 __ovld __cnfn mad_sat(short2, short2, short2);
+ushort2 __ovld __cnfn mad_sat(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn mad_sat(short3, short3, short3);
+ushort3 __ovld __cnfn mad_sat(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn mad_sat(short4, short4, short4);
+ushort4 __ovld __cnfn mad_sat(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn mad_sat(short8, short8, short8);
+ushort8 __ovld __cnfn mad_sat(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn mad_sat(short16, short16, short16);
+ushort16 __ovld __cnfn mad_sat(ushort16, ushort16, ushort16);
+int __ovld __cnfn mad_sat(int, int, int);
+uint __ovld __cnfn mad_sat(uint, uint, uint);
+int2 __ovld __cnfn mad_sat(int2, int2, int2);
+uint2 __ovld __cnfn mad_sat(uint2, uint2, uint2);
+int3 __ovld __cnfn mad_sat(int3, int3, int3);
+uint3 __ovld __cnfn mad_sat(uint3, uint3, uint3);
+int4 __ovld __cnfn mad_sat(int4, int4, int4);
+uint4 __ovld __cnfn mad_sat(uint4, uint4, uint4);
+int8 __ovld __cnfn mad_sat(int8, int8, int8);
+uint8 __ovld __cnfn mad_sat(uint8, uint8, uint8);
+int16 __ovld __cnfn mad_sat(int16, int16, int16);
+uint16 __ovld __cnfn mad_sat(uint16, uint16, uint16);
+long __ovld __cnfn mad_sat(long, long, long);
+ulong __ovld __cnfn mad_sat(ulong, ulong, ulong);
+long2 __ovld __cnfn mad_sat(long2, long2, long2);
+ulong2 __ovld __cnfn mad_sat(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn mad_sat(long3, long3, long3);
+ulong3 __ovld __cnfn mad_sat(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn mad_sat(long4, long4, long4);
+ulong4 __ovld __cnfn mad_sat(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn mad_sat(long8, long8, long8);
+ulong8 __ovld __cnfn mad_sat(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn mad_sat(long16, long16, long16);
+ulong16 __ovld __cnfn mad_sat(ulong16, ulong16, ulong16);
+
+/**
+ * Returns y if x < y, otherwise it returns x.
+ */
+char __ovld __cnfn max(char, char);
+uchar __ovld __cnfn max(uchar, uchar);
+char2 __ovld __cnfn max(char2, char2);
+uchar2 __ovld __cnfn max(uchar2, uchar2);
+char3 __ovld __cnfn max(char3, char3);
+uchar3 __ovld __cnfn max(uchar3, uchar3);
+char4 __ovld __cnfn max(char4, char4);
+uchar4 __ovld __cnfn max(uchar4, uchar4);
+char8 __ovld __cnfn max(char8, char8);
+uchar8 __ovld __cnfn max(uchar8, uchar8);
+char16 __ovld __cnfn max(char16, char16);
+uchar16 __ovld __cnfn max(uchar16, uchar16);
+short __ovld __cnfn max(short, short);
+ushort __ovld __cnfn max(ushort, ushort);
+short2 __ovld __cnfn max(short2, short2);
+ushort2 __ovld __cnfn max(ushort2, ushort2);
+short3 __ovld __cnfn max(short3, short3);
+ushort3 __ovld __cnfn max(ushort3, ushort3);
+short4 __ovld __cnfn max(short4, short4);
+ushort4 __ovld __cnfn max(ushort4, ushort4);
+short8 __ovld __cnfn max(short8, short8);
+ushort8 __ovld __cnfn max(ushort8, ushort8);
+short16 __ovld __cnfn max(short16, short16);
+ushort16 __ovld __cnfn max(ushort16, ushort16);
+int __ovld __cnfn max(int, int);
+uint __ovld __cnfn max(uint, uint);
+int2 __ovld __cnfn max(int2, int2);
+uint2 __ovld __cnfn max(uint2, uint2);
+int3 __ovld __cnfn max(int3, int3);
+uint3 __ovld __cnfn max(uint3, uint3);
+int4 __ovld __cnfn max(int4, int4);
+uint4 __ovld __cnfn max(uint4, uint4);
+int8 __ovld __cnfn max(int8, int8);
+uint8 __ovld __cnfn max(uint8, uint8);
+int16 __ovld __cnfn max(int16, int16);
+uint16 __ovld __cnfn max(uint16, uint16);
+long __ovld __cnfn max(long, long);
+ulong __ovld __cnfn max(ulong, ulong);
+long2 __ovld __cnfn max(long2, long2);
+ulong2 __ovld __cnfn max(ulong2, ulong2);
+long3 __ovld __cnfn max(long3, long3);
+ulong3 __ovld __cnfn max(ulong3, ulong3);
+long4 __ovld __cnfn max(long4, long4);
+ulong4 __ovld __cnfn max(ulong4, ulong4);
+long8 __ovld __cnfn max(long8, long8);
+ulong8 __ovld __cnfn max(ulong8, ulong8);
+long16 __ovld __cnfn max(long16, long16);
+ulong16 __ovld __cnfn max(ulong16, ulong16);
+char2 __ovld __cnfn max(char2, char);
+uchar2 __ovld __cnfn max(uchar2, uchar);
+char3 __ovld __cnfn max(char3, char);
+uchar3 __ovld __cnfn max(uchar3, uchar);
+char4 __ovld __cnfn max(char4, char);
+uchar4 __ovld __cnfn max(uchar4, uchar);
+char8 __ovld __cnfn max(char8, char);
+uchar8 __ovld __cnfn max(uchar8, uchar);
+char16 __ovld __cnfn max(char16, char);
+uchar16 __ovld __cnfn max(uchar16, uchar);
+short2 __ovld __cnfn max(short2, short);
+ushort2 __ovld __cnfn max(ushort2, ushort);
+short3 __ovld __cnfn max(short3, short);
+ushort3 __ovld __cnfn max(ushort3, ushort);
+short4 __ovld __cnfn max(short4, short);
+ushort4 __ovld __cnfn max(ushort4, ushort);
+short8 __ovld __cnfn max(short8, short);
+ushort8 __ovld __cnfn max(ushort8, ushort);
+short16 __ovld __cnfn max(short16, short);
+ushort16 __ovld __cnfn max(ushort16, ushort);
+int2 __ovld __cnfn max(int2, int);
+uint2 __ovld __cnfn max(uint2, uint);
+int3 __ovld __cnfn max(int3, int);
+uint3 __ovld __cnfn max(uint3, uint);
+int4 __ovld __cnfn max(int4, int);
+uint4 __ovld __cnfn max(uint4, uint);
+int8 __ovld __cnfn max(int8, int);
+uint8 __ovld __cnfn max(uint8, uint);
+int16 __ovld __cnfn max(int16, int);
+uint16 __ovld __cnfn max(uint16, uint);
+long2 __ovld __cnfn max(long2, long);
+ulong2 __ovld __cnfn max(ulong2, ulong);
+long3 __ovld __cnfn max(long3, long);
+ulong3 __ovld __cnfn max(ulong3, ulong);
+long4 __ovld __cnfn max(long4, long);
+ulong4 __ovld __cnfn max(ulong4, ulong);
+long8 __ovld __cnfn max(long8, long);
+ulong8 __ovld __cnfn max(ulong8, ulong);
+long16 __ovld __cnfn max(long16, long);
+ulong16 __ovld __cnfn max(ulong16, ulong);
+
+/**
+ * Returns y if y < x, otherwise it returns x.
+ */
+char __ovld __cnfn min(char, char);
+uchar __ovld __cnfn min(uchar, uchar);
+char2 __ovld __cnfn min(char2, char2);
+uchar2 __ovld __cnfn min(uchar2, uchar2);
+char3 __ovld __cnfn min(char3, char3);
+uchar3 __ovld __cnfn min(uchar3, uchar3);
+char4 __ovld __cnfn min(char4, char4);
+uchar4 __ovld __cnfn min(uchar4, uchar4);
+char8 __ovld __cnfn min(char8, char8);
+uchar8 __ovld __cnfn min(uchar8, uchar8);
+char16 __ovld __cnfn min(char16, char16);
+uchar16 __ovld __cnfn min(uchar16, uchar16);
+short __ovld __cnfn min(short, short);
+ushort __ovld __cnfn min(ushort, ushort);
+short2 __ovld __cnfn min(short2, short2);
+ushort2 __ovld __cnfn min(ushort2, ushort2);
+short3 __ovld __cnfn min(short3, short3);
+ushort3 __ovld __cnfn min(ushort3, ushort3);
+short4 __ovld __cnfn min(short4, short4);
+ushort4 __ovld __cnfn min(ushort4, ushort4);
+short8 __ovld __cnfn min(short8, short8);
+ushort8 __ovld __cnfn min(ushort8, ushort8);
+short16 __ovld __cnfn min(short16, short16);
+ushort16 __ovld __cnfn min(ushort16, ushort16);
+int __ovld __cnfn min(int, int);
+uint __ovld __cnfn min(uint, uint);
+int2 __ovld __cnfn min(int2, int2);
+uint2 __ovld __cnfn min(uint2, uint2);
+int3 __ovld __cnfn min(int3, int3);
+uint3 __ovld __cnfn min(uint3, uint3);
+int4 __ovld __cnfn min(int4, int4);
+uint4 __ovld __cnfn min(uint4, uint4);
+int8 __ovld __cnfn min(int8, int8);
+uint8 __ovld __cnfn min(uint8, uint8);
+int16 __ovld __cnfn min(int16, int16);
+uint16 __ovld __cnfn min(uint16, uint16);
+long __ovld __cnfn min(long, long);
+ulong __ovld __cnfn min(ulong, ulong);
+long2 __ovld __cnfn min(long2, long2);
+ulong2 __ovld __cnfn min(ulong2, ulong2);
+long3 __ovld __cnfn min(long3, long3);
+ulong3 __ovld __cnfn min(ulong3, ulong3);
+long4 __ovld __cnfn min(long4, long4);
+ulong4 __ovld __cnfn min(ulong4, ulong4);
+long8 __ovld __cnfn min(long8, long8);
+ulong8 __ovld __cnfn min(ulong8, ulong8);
+long16 __ovld __cnfn min(long16, long16);
+ulong16 __ovld __cnfn min(ulong16, ulong16);
+char2 __ovld __cnfn min(char2, char);
+uchar2 __ovld __cnfn min(uchar2, uchar);
+char3 __ovld __cnfn min(char3, char);
+uchar3 __ovld __cnfn min(uchar3, uchar);
+char4 __ovld __cnfn min(char4, char);
+uchar4 __ovld __cnfn min(uchar4, uchar);
+char8 __ovld __cnfn min(char8, char);
+uchar8 __ovld __cnfn min(uchar8, uchar);
+char16 __ovld __cnfn min(char16, char);
+uchar16 __ovld __cnfn min(uchar16, uchar);
+short2 __ovld __cnfn min(short2, short);
+ushort2 __ovld __cnfn min(ushort2, ushort);
+short3 __ovld __cnfn min(short3, short);
+ushort3 __ovld __cnfn min(ushort3, ushort);
+short4 __ovld __cnfn min(short4, short);
+ushort4 __ovld __cnfn min(ushort4, ushort);
+short8 __ovld __cnfn min(short8, short);
+ushort8 __ovld __cnfn min(ushort8, ushort);
+short16 __ovld __cnfn min(short16, short);
+ushort16 __ovld __cnfn min(ushort16, ushort);
+int2 __ovld __cnfn min(int2, int);
+uint2 __ovld __cnfn min(uint2, uint);
+int3 __ovld __cnfn min(int3, int);
+uint3 __ovld __cnfn min(uint3, uint);
+int4 __ovld __cnfn min(int4, int);
+uint4 __ovld __cnfn min(uint4, uint);
+int8 __ovld __cnfn min(int8, int);
+uint8 __ovld __cnfn min(uint8, uint);
+int16 __ovld __cnfn min(int16, int);
+uint16 __ovld __cnfn min(uint16, uint);
+long2 __ovld __cnfn min(long2, long);
+ulong2 __ovld __cnfn min(ulong2, ulong);
+long3 __ovld __cnfn min(long3, long);
+ulong3 __ovld __cnfn min(ulong3, ulong);
+long4 __ovld __cnfn min(long4, long);
+ulong4 __ovld __cnfn min(ulong4, ulong);
+long8 __ovld __cnfn min(long8, long);
+ulong8 __ovld __cnfn min(ulong8, ulong);
+long16 __ovld __cnfn min(long16, long);
+ulong16 __ovld __cnfn min(ulong16, ulong);
+
+/**
+ * Computes x * y and returns the high half of the
+ * product of x and y.
+ */
+char __ovld __cnfn mul_hi(char, char);
+uchar __ovld __cnfn mul_hi(uchar, uchar);
+char2 __ovld __cnfn mul_hi(char2, char2);
+uchar2 __ovld __cnfn mul_hi(uchar2, uchar2);
+char3 __ovld __cnfn mul_hi(char3, char3);
+uchar3 __ovld __cnfn mul_hi(uchar3, uchar3);
+char4 __ovld __cnfn mul_hi(char4, char4);
+uchar4 __ovld __cnfn mul_hi(uchar4, uchar4);
+char8 __ovld __cnfn mul_hi(char8, char8);
+uchar8 __ovld __cnfn mul_hi(uchar8, uchar8);
+char16 __ovld __cnfn mul_hi(char16, char16);
+uchar16 __ovld __cnfn mul_hi(uchar16, uchar16);
+short __ovld __cnfn mul_hi(short, short);
+ushort __ovld __cnfn mul_hi(ushort, ushort);
+short2 __ovld __cnfn mul_hi(short2, short2);
+ushort2 __ovld __cnfn mul_hi(ushort2, ushort2);
+short3 __ovld __cnfn mul_hi(short3, short3);
+ushort3 __ovld __cnfn mul_hi(ushort3, ushort3);
+short4 __ovld __cnfn mul_hi(short4, short4);
+ushort4 __ovld __cnfn mul_hi(ushort4, ushort4);
+short8 __ovld __cnfn mul_hi(short8, short8);
+ushort8 __ovld __cnfn mul_hi(ushort8, ushort8);
+short16 __ovld __cnfn mul_hi(short16, short16);
+ushort16 __ovld __cnfn mul_hi(ushort16, ushort16);
+int __ovld __cnfn mul_hi(int, int);
+uint __ovld __cnfn mul_hi(uint, uint);
+int2 __ovld __cnfn mul_hi(int2, int2);
+uint2 __ovld __cnfn mul_hi(uint2, uint2);
+int3 __ovld __cnfn mul_hi(int3, int3);
+uint3 __ovld __cnfn mul_hi(uint3, uint3);
+int4 __ovld __cnfn mul_hi(int4, int4);
+uint4 __ovld __cnfn mul_hi(uint4, uint4);
+int8 __ovld __cnfn mul_hi(int8, int8);
+uint8 __ovld __cnfn mul_hi(uint8, uint8);
+int16 __ovld __cnfn mul_hi(int16, int16);
+uint16 __ovld __cnfn mul_hi(uint16, uint16);
+long __ovld __cnfn mul_hi(long, long);
+ulong __ovld __cnfn mul_hi(ulong, ulong);
+long2 __ovld __cnfn mul_hi(long2, long2);
+ulong2 __ovld __cnfn mul_hi(ulong2, ulong2);
+long3 __ovld __cnfn mul_hi(long3, long3);
+ulong3 __ovld __cnfn mul_hi(ulong3, ulong3);
+long4 __ovld __cnfn mul_hi(long4, long4);
+ulong4 __ovld __cnfn mul_hi(ulong4, ulong4);
+long8 __ovld __cnfn mul_hi(long8, long8);
+ulong8 __ovld __cnfn mul_hi(ulong8, ulong8);
+long16 __ovld __cnfn mul_hi(long16, long16);
+ulong16 __ovld __cnfn mul_hi(ulong16, ulong16);
+
+/**
+ * For each element in v, the bits are shifted left by
+ * the number of bits given by the corresponding
+ * element in i (subject to usual shift modulo rules
+ * described in section 6.3). Bits shifted off the left
+ * side of the element are shifted back in from the
+ * right.
+ */
+char __ovld __cnfn rotate(char, char);
+uchar __ovld __cnfn rotate(uchar, uchar);
+char2 __ovld __cnfn rotate(char2, char2);
+uchar2 __ovld __cnfn rotate(uchar2, uchar2);
+char3 __ovld __cnfn rotate(char3, char3);
+uchar3 __ovld __cnfn rotate(uchar3, uchar3);
+char4 __ovld __cnfn rotate(char4, char4);
+uchar4 __ovld __cnfn rotate(uchar4, uchar4);
+char8 __ovld __cnfn rotate(char8, char8);
+uchar8 __ovld __cnfn rotate(uchar8, uchar8);
+char16 __ovld __cnfn rotate(char16, char16);
+uchar16 __ovld __cnfn rotate(uchar16, uchar16);
+short __ovld __cnfn rotate(short, short);
+ushort __ovld __cnfn rotate(ushort, ushort);
+short2 __ovld __cnfn rotate(short2, short2);
+ushort2 __ovld __cnfn rotate(ushort2, ushort2);
+short3 __ovld __cnfn rotate(short3, short3);
+ushort3 __ovld __cnfn rotate(ushort3, ushort3);
+short4 __ovld __cnfn rotate(short4, short4);
+ushort4 __ovld __cnfn rotate(ushort4, ushort4);
+short8 __ovld __cnfn rotate(short8, short8);
+ushort8 __ovld __cnfn rotate(ushort8, ushort8);
+short16 __ovld __cnfn rotate(short16, short16);
+ushort16 __ovld __cnfn rotate(ushort16, ushort16);
+int __ovld __cnfn rotate(int, int);
+uint __ovld __cnfn rotate(uint, uint);
+int2 __ovld __cnfn rotate(int2, int2);
+uint2 __ovld __cnfn rotate(uint2, uint2);
+int3 __ovld __cnfn rotate(int3, int3);
+uint3 __ovld __cnfn rotate(uint3, uint3);
+int4 __ovld __cnfn rotate(int4, int4);
+uint4 __ovld __cnfn rotate(uint4, uint4);
+int8 __ovld __cnfn rotate(int8, int8);
+uint8 __ovld __cnfn rotate(uint8, uint8);
+int16 __ovld __cnfn rotate(int16, int16);
+uint16 __ovld __cnfn rotate(uint16, uint16);
+long __ovld __cnfn rotate(long, long);
+ulong __ovld __cnfn rotate(ulong, ulong);
+long2 __ovld __cnfn rotate(long2, long2);
+ulong2 __ovld __cnfn rotate(ulong2, ulong2);
+long3 __ovld __cnfn rotate(long3, long3);
+ulong3 __ovld __cnfn rotate(ulong3, ulong3);
+long4 __ovld __cnfn rotate(long4, long4);
+ulong4 __ovld __cnfn rotate(ulong4, ulong4);
+long8 __ovld __cnfn rotate(long8, long8);
+ulong8 __ovld __cnfn rotate(ulong8, ulong8);
+long16 __ovld __cnfn rotate(long16, long16);
+ulong16 __ovld __cnfn rotate(ulong16, ulong16);
+
+/**
+ * Returns x - y and saturates the result.
+ */
+char __ovld __cnfn sub_sat(char, char);
+uchar __ovld __cnfn sub_sat(uchar, uchar);
+char2 __ovld __cnfn sub_sat(char2, char2);
+uchar2 __ovld __cnfn sub_sat(uchar2, uchar2);
+char3 __ovld __cnfn sub_sat(char3, char3);
+uchar3 __ovld __cnfn sub_sat(uchar3, uchar3);
+char4 __ovld __cnfn sub_sat(char4, char4);
+uchar4 __ovld __cnfn sub_sat(uchar4, uchar4);
+char8 __ovld __cnfn sub_sat(char8, char8);
+uchar8 __ovld __cnfn sub_sat(uchar8, uchar8);
+char16 __ovld __cnfn sub_sat(char16, char16);
+uchar16 __ovld __cnfn sub_sat(uchar16, uchar16);
+short __ovld __cnfn sub_sat(short, short);
+ushort __ovld __cnfn sub_sat(ushort, ushort);
+short2 __ovld __cnfn sub_sat(short2, short2);
+ushort2 __ovld __cnfn sub_sat(ushort2, ushort2);
+short3 __ovld __cnfn sub_sat(short3, short3);
+ushort3 __ovld __cnfn sub_sat(ushort3, ushort3);
+short4 __ovld __cnfn sub_sat(short4, short4);
+ushort4 __ovld __cnfn sub_sat(ushort4, ushort4);
+short8 __ovld __cnfn sub_sat(short8, short8);
+ushort8 __ovld __cnfn sub_sat(ushort8, ushort8);
+short16 __ovld __cnfn sub_sat(short16, short16);
+ushort16 __ovld __cnfn sub_sat(ushort16, ushort16);
+int __ovld __cnfn sub_sat(int, int);
+uint __ovld __cnfn sub_sat(uint, uint);
+int2 __ovld __cnfn sub_sat(int2, int2);
+uint2 __ovld __cnfn sub_sat(uint2, uint2);
+int3 __ovld __cnfn sub_sat(int3, int3);
+uint3 __ovld __cnfn sub_sat(uint3, uint3);
+int4 __ovld __cnfn sub_sat(int4, int4);
+uint4 __ovld __cnfn sub_sat(uint4, uint4);
+int8 __ovld __cnfn sub_sat(int8, int8);
+uint8 __ovld __cnfn sub_sat(uint8, uint8);
+int16 __ovld __cnfn sub_sat(int16, int16);
+uint16 __ovld __cnfn sub_sat(uint16, uint16);
+long __ovld __cnfn sub_sat(long, long);
+ulong __ovld __cnfn sub_sat(ulong, ulong);
+long2 __ovld __cnfn sub_sat(long2, long2);
+ulong2 __ovld __cnfn sub_sat(ulong2, ulong2);
+long3 __ovld __cnfn sub_sat(long3, long3);
+ulong3 __ovld __cnfn sub_sat(ulong3, ulong3);
+long4 __ovld __cnfn sub_sat(long4, long4);
+ulong4 __ovld __cnfn sub_sat(ulong4, ulong4);
+long8 __ovld __cnfn sub_sat(long8, long8);
+ulong8 __ovld __cnfn sub_sat(ulong8, ulong8);
+long16 __ovld __cnfn sub_sat(long16, long16);
+ulong16 __ovld __cnfn sub_sat(ulong16, ulong16);
+
+/**
+ * result[i] = ((short)hi[i] << 8) | lo[i]
+ * result[i] = ((ushort)hi[i] << 8) | lo[i]
+ */
+short __ovld __cnfn upsample(char, uchar);
+ushort __ovld __cnfn upsample(uchar, uchar);
+short2 __ovld __cnfn upsample(char2, uchar2);
+short3 __ovld __cnfn upsample(char3, uchar3);
+short4 __ovld __cnfn upsample(char4, uchar4);
+short8 __ovld __cnfn upsample(char8, uchar8);
+short16 __ovld __cnfn upsample(char16, uchar16);
+ushort2 __ovld __cnfn upsample(uchar2, uchar2);
+ushort3 __ovld __cnfn upsample(uchar3, uchar3);
+ushort4 __ovld __cnfn upsample(uchar4, uchar4);
+ushort8 __ovld __cnfn upsample(uchar8, uchar8);
+ushort16 __ovld __cnfn upsample(uchar16, uchar16);
+
+/**
+ * result[i] = ((int)hi[i] << 16) | lo[i]
+ * result[i] = ((uint)hi[i] << 16) | lo[i]
+ */
+int __ovld __cnfn upsample(short, ushort);
+uint __ovld __cnfn upsample(ushort, ushort);
+int2 __ovld __cnfn upsample(short2, ushort2);
+int3 __ovld __cnfn upsample(short3, ushort3);
+int4 __ovld __cnfn upsample(short4, ushort4);
+int8 __ovld __cnfn upsample(short8, ushort8);
+int16 __ovld __cnfn upsample(short16, ushort16);
+uint2 __ovld __cnfn upsample(ushort2, ushort2);
+uint3 __ovld __cnfn upsample(ushort3, ushort3);
+uint4 __ovld __cnfn upsample(ushort4, ushort4);
+uint8 __ovld __cnfn upsample(ushort8, ushort8);
+uint16 __ovld __cnfn upsample(ushort16, ushort16);
+/**
+ * result[i] = ((long)hi[i] << 32) | lo[i]
+ * result[i] = ((ulong)hi[i] << 32) | lo[i]
+ */
+long __ovld __cnfn upsample(int, uint);
+ulong __ovld __cnfn upsample(uint, uint);
+long2 __ovld __cnfn upsample(int2, uint2);
+long3 __ovld __cnfn upsample(int3, uint3);
+long4 __ovld __cnfn upsample(int4, uint4);
+long8 __ovld __cnfn upsample(int8, uint8);
+long16 __ovld __cnfn upsample(int16, uint16);
+ulong2 __ovld __cnfn upsample(uint2, uint2);
+ulong3 __ovld __cnfn upsample(uint3, uint3);
+ulong4 __ovld __cnfn upsample(uint4, uint4);
+ulong8 __ovld __cnfn upsample(uint8, uint8);
+ulong16 __ovld __cnfn upsample(uint16, uint16);
+
+/*
+ * popcount(x): returns the number of set bit in x
+ */
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+char __ovld __cnfn popcount(char);
+uchar __ovld __cnfn popcount(uchar);
+char2 __ovld __cnfn popcount(char2);
+uchar2 __ovld __cnfn popcount(uchar2);
+char3 __ovld __cnfn popcount(char3);
+uchar3 __ovld __cnfn popcount(uchar3);
+char4 __ovld __cnfn popcount(char4);
+uchar4 __ovld __cnfn popcount(uchar4);
+char8 __ovld __cnfn popcount(char8);
+uchar8 __ovld __cnfn popcount(uchar8);
+char16 __ovld __cnfn popcount(char16);
+uchar16 __ovld __cnfn popcount(uchar16);
+short __ovld __cnfn popcount(short);
+ushort __ovld __cnfn popcount(ushort);
+short2 __ovld __cnfn popcount(short2);
+ushort2 __ovld __cnfn popcount(ushort2);
+short3 __ovld __cnfn popcount(short3);
+ushort3 __ovld __cnfn popcount(ushort3);
+short4 __ovld __cnfn popcount(short4);
+ushort4 __ovld __cnfn popcount(ushort4);
+short8 __ovld __cnfn popcount(short8);
+ushort8 __ovld __cnfn popcount(ushort8);
+short16 __ovld __cnfn popcount(short16);
+ushort16 __ovld __cnfn popcount(ushort16);
+int __ovld __cnfn popcount(int);
+uint __ovld __cnfn popcount(uint);
+int2 __ovld __cnfn popcount(int2);
+uint2 __ovld __cnfn popcount(uint2);
+int3 __ovld __cnfn popcount(int3);
+uint3 __ovld __cnfn popcount(uint3);
+int4 __ovld __cnfn popcount(int4);
+uint4 __ovld __cnfn popcount(uint4);
+int8 __ovld __cnfn popcount(int8);
+uint8 __ovld __cnfn popcount(uint8);
+int16 __ovld __cnfn popcount(int16);
+uint16 __ovld __cnfn popcount(uint16);
+long __ovld __cnfn popcount(long);
+ulong __ovld __cnfn popcount(ulong);
+long2 __ovld __cnfn popcount(long2);
+ulong2 __ovld __cnfn popcount(ulong2);
+long3 __ovld __cnfn popcount(long3);
+ulong3 __ovld __cnfn popcount(ulong3);
+long4 __ovld __cnfn popcount(long4);
+ulong4 __ovld __cnfn popcount(ulong4);
+long8 __ovld __cnfn popcount(long8);
+ulong8 __ovld __cnfn popcount(ulong8);
+long16 __ovld __cnfn popcount(long16);
+ulong16 __ovld __cnfn popcount(ulong16);
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+
+/**
+ * Multiply two 24-bit integer values x and y and add
+ * the 32-bit integer result to the 32-bit integer z.
+ * Refer to definition of mul24 to see how the 24-bit
+ * integer multiplication is performed.
+ */
+int __ovld __cnfn mad24(int, int, int);
+uint __ovld __cnfn mad24(uint, uint, uint);
+int2 __ovld __cnfn mad24(int2, int2, int2);
+uint2 __ovld __cnfn mad24(uint2, uint2, uint2);
+int3 __ovld __cnfn mad24(int3, int3, int3);
+uint3 __ovld __cnfn mad24(uint3, uint3, uint3);
+int4 __ovld __cnfn mad24(int4, int4, int4);
+uint4 __ovld __cnfn mad24(uint4, uint4, uint4);
+int8 __ovld __cnfn mad24(int8, int8, int8);
+uint8 __ovld __cnfn mad24(uint8, uint8, uint8);
+int16 __ovld __cnfn mad24(int16, int16, int16);
+uint16 __ovld __cnfn mad24(uint16, uint16, uint16);
+
+/**
+ * Multiply two 24-bit integer values x and y. x and y
+ * are 32-bit integers but only the low 24-bits are used
+ * to perform the multiplication. mul24 should only
+ * be used when values in x and y are in the range [-
+ * 2^23, 2^23-1] if x and y are signed integers and in the
+ * range [0, 2^24-1] if x and y are unsigned integers. If
+ * x and y are not in this range, the multiplication
+ * result is implementation-defined.
+ */
+int __ovld __cnfn mul24(int, int);
+uint __ovld __cnfn mul24(uint, uint);
+int2 __ovld __cnfn mul24(int2, int2);
+uint2 __ovld __cnfn mul24(uint2, uint2);
+int3 __ovld __cnfn mul24(int3, int3);
+uint3 __ovld __cnfn mul24(uint3, uint3);
+int4 __ovld __cnfn mul24(int4, int4);
+uint4 __ovld __cnfn mul24(uint4, uint4);
+int8 __ovld __cnfn mul24(int8, int8);
+uint8 __ovld __cnfn mul24(uint8, uint8);
+int16 __ovld __cnfn mul24(int16, int16);
+uint16 __ovld __cnfn mul24(uint16, uint16);
+
+// OpenCL v1.1 s6.11.4, v1.2 s6.12.4, v2.0 s6.13.4 - Common Functions
+
+/**
+ * Returns fmin(fmax(x, minval), maxval).
+ * Results are undefined if minval > maxval.
+ */
+float __ovld __cnfn clamp(float, float, float);
+float2 __ovld __cnfn clamp(float2, float2, float2);
+float3 __ovld __cnfn clamp(float3, float3, float3);
+float4 __ovld __cnfn clamp(float4, float4, float4);
+float8 __ovld __cnfn clamp(float8, float8, float8);
+float16 __ovld __cnfn clamp(float16, float16, float16);
+float2 __ovld __cnfn clamp(float2, float, float);
+float3 __ovld __cnfn clamp(float3, float, float);
+float4 __ovld __cnfn clamp(float4, float, float);
+float8 __ovld __cnfn clamp(float8, float, float);
+float16 __ovld __cnfn clamp(float16, float, float);
+#ifdef cl_khr_fp64
+double __ovld __cnfn clamp(double, double, double);
+double2 __ovld __cnfn clamp(double2, double2, double2);
+double3 __ovld __cnfn clamp(double3, double3, double3);
+double4 __ovld __cnfn clamp(double4, double4, double4);
+double8 __ovld __cnfn clamp(double8, double8, double8);
+double16 __ovld __cnfn clamp(double16, double16, double16);
+double2 __ovld __cnfn clamp(double2, double, double);
+double3 __ovld __cnfn clamp(double3, double, double);
+double4 __ovld __cnfn clamp(double4, double, double);
+double8 __ovld __cnfn clamp(double8, double, double);
+double16 __ovld __cnfn clamp(double16, double, double);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn clamp(half, half, half);
+half2 __ovld __cnfn clamp(half2, half2, half2);
+half3 __ovld __cnfn clamp(half3, half3, half3);
+half4 __ovld __cnfn clamp(half4, half4, half4);
+half8 __ovld __cnfn clamp(half8, half8, half8);
+half16 __ovld __cnfn clamp(half16, half16, half16);
+half2 __ovld __cnfn clamp(half2, half, half);
+half3 __ovld __cnfn clamp(half3, half, half);
+half4 __ovld __cnfn clamp(half4, half, half);
+half8 __ovld __cnfn clamp(half8, half, half);
+half16 __ovld __cnfn clamp(half16, half, half);
+#endif //cl_khr_fp16
+
+/**
+ * Converts radians to degrees, i.e. (180 / PI) *
+ * radians.
+ */
+float __ovld __cnfn degrees(float);
+float2 __ovld __cnfn degrees(float2);
+float3 __ovld __cnfn degrees(float3);
+float4 __ovld __cnfn degrees(float4);
+float8 __ovld __cnfn degrees(float8);
+float16 __ovld __cnfn degrees(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn degrees(double);
+double2 __ovld __cnfn degrees(double2);
+double3 __ovld __cnfn degrees(double3);
+double4 __ovld __cnfn degrees(double4);
+double8 __ovld __cnfn degrees(double8);
+double16 __ovld __cnfn degrees(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn degrees(half);
+half2 __ovld __cnfn degrees(half2);
+half3 __ovld __cnfn degrees(half3);
+half4 __ovld __cnfn degrees(half4);
+half8 __ovld __cnfn degrees(half8);
+half16 __ovld __cnfn degrees(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if x < y, otherwise it returns x. If x and y
+ * are infinite or NaN, the return values are undefined.
+ */
+float __ovld __cnfn max(float, float);
+float2 __ovld __cnfn max(float2, float2);
+float3 __ovld __cnfn max(float3, float3);
+float4 __ovld __cnfn max(float4, float4);
+float8 __ovld __cnfn max(float8, float8);
+float16 __ovld __cnfn max(float16, float16);
+float2 __ovld __cnfn max(float2, float);
+float3 __ovld __cnfn max(float3, float);
+float4 __ovld __cnfn max(float4, float);
+float8 __ovld __cnfn max(float8, float);
+float16 __ovld __cnfn max(float16, float);
+#ifdef cl_khr_fp64
+double __ovld __cnfn max(double, double);
+double2 __ovld __cnfn max(double2, double2);
+double3 __ovld __cnfn max(double3, double3);
+double4 __ovld __cnfn max(double4, double4);
+double8 __ovld __cnfn max(double8, double8);
+double16 __ovld __cnfn max(double16, double16);
+double2 __ovld __cnfn max(double2, double);
+double3 __ovld __cnfn max(double3, double);
+double4 __ovld __cnfn max(double4, double);
+double8 __ovld __cnfn max(double8, double);
+double16 __ovld __cnfn max(double16, double);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn max(half, half);
+half2 __ovld __cnfn max(half2, half2);
+half3 __ovld __cnfn max(half3, half3);
+half4 __ovld __cnfn max(half4, half4);
+half8 __ovld __cnfn max(half8, half8);
+half16 __ovld __cnfn max(half16, half16);
+half2 __ovld __cnfn max(half2, half);
+half3 __ovld __cnfn max(half3, half);
+half4 __ovld __cnfn max(half4, half);
+half8 __ovld __cnfn max(half8, half);
+half16 __ovld __cnfn max(half16, half);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if y < x, otherwise it returns x. If x and y
+ * are infinite or NaN, the return values are undefined.
+ */
+float __ovld __cnfn min(float, float);
+float2 __ovld __cnfn min(float2, float2);
+float3 __ovld __cnfn min(float3, float3);
+float4 __ovld __cnfn min(float4, float4);
+float8 __ovld __cnfn min(float8, float8);
+float16 __ovld __cnfn min(float16, float16);
+float2 __ovld __cnfn min(float2, float);
+float3 __ovld __cnfn min(float3, float);
+float4 __ovld __cnfn min(float4, float);
+float8 __ovld __cnfn min(float8, float);
+float16 __ovld __cnfn min(float16, float);
+#ifdef cl_khr_fp64
+double __ovld __cnfn min(double, double);
+double2 __ovld __cnfn min(double2, double2);
+double3 __ovld __cnfn min(double3, double3);
+double4 __ovld __cnfn min(double4, double4);
+double8 __ovld __cnfn min(double8, double8);
+double16 __ovld __cnfn min(double16, double16);
+double2 __ovld __cnfn min(double2, double);
+double3 __ovld __cnfn min(double3, double);
+double4 __ovld __cnfn min(double4, double);
+double8 __ovld __cnfn min(double8, double);
+double16 __ovld __cnfn min(double16, double);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn min(half, half);
+half2 __ovld __cnfn min(half2, half2);
+half3 __ovld __cnfn min(half3, half3);
+half4 __ovld __cnfn min(half4, half4);
+half8 __ovld __cnfn min(half8, half8);
+half16 __ovld __cnfn min(half16, half16);
+half2 __ovld __cnfn min(half2, half);
+half3 __ovld __cnfn min(half3, half);
+half4 __ovld __cnfn min(half4, half);
+half8 __ovld __cnfn min(half8, half);
+half16 __ovld __cnfn min(half16, half);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the linear blend of x & y implemented as:
+ * x + (y - x) * a
+ * a must be a value in the range 0.0 ... 1.0. If a is not
+ * in the range 0.0 ... 1.0, the return values are
+ * undefined.
+ */
+float __ovld __cnfn mix(float, float, float);
+float2 __ovld __cnfn mix(float2, float2, float2);
+float3 __ovld __cnfn mix(float3, float3, float3);
+float4 __ovld __cnfn mix(float4, float4, float4);
+float8 __ovld __cnfn mix(float8, float8, float8);
+float16 __ovld __cnfn mix(float16, float16, float16);
+float2 __ovld __cnfn mix(float2, float2, float);
+float3 __ovld __cnfn mix(float3, float3, float);
+float4 __ovld __cnfn mix(float4, float4, float);
+float8 __ovld __cnfn mix(float8, float8, float);
+float16 __ovld __cnfn mix(float16, float16, float);
+#ifdef cl_khr_fp64
+double __ovld __cnfn mix(double, double, double);
+double2 __ovld __cnfn mix(double2, double2, double2);
+double3 __ovld __cnfn mix(double3, double3, double3);
+double4 __ovld __cnfn mix(double4, double4, double4);
+double8 __ovld __cnfn mix(double8, double8, double8);
+double16 __ovld __cnfn mix(double16, double16, double16);
+double2 __ovld __cnfn mix(double2, double2, double);
+double3 __ovld __cnfn mix(double3, double3, double);
+double4 __ovld __cnfn mix(double4, double4, double);
+double8 __ovld __cnfn mix(double8, double8, double);
+double16 __ovld __cnfn mix(double16, double16, double);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn mix(half, half, half);
+half2 __ovld __cnfn mix(half2, half2, half2);
+half3 __ovld __cnfn mix(half3, half3, half3);
+half4 __ovld __cnfn mix(half4, half4, half4);
+half8 __ovld __cnfn mix(half8, half8, half8);
+half16 __ovld __cnfn mix(half16, half16, half16);
+half2 __ovld __cnfn mix(half2, half2, half);
+half3 __ovld __cnfn mix(half3, half3, half);
+half4 __ovld __cnfn mix(half4, half4, half);
+half8 __ovld __cnfn mix(half8, half8, half);
+half16 __ovld __cnfn mix(half16, half16, half);
+#endif //cl_khr_fp16
+
+/**
+ * Converts degrees to radians, i.e. (PI / 180) *
+ * degrees.
+ */
+float __ovld __cnfn radians(float);
+float2 __ovld __cnfn radians(float2);
+float3 __ovld __cnfn radians(float3);
+float4 __ovld __cnfn radians(float4);
+float8 __ovld __cnfn radians(float8);
+float16 __ovld __cnfn radians(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn radians(double);
+double2 __ovld __cnfn radians(double2);
+double3 __ovld __cnfn radians(double3);
+double4 __ovld __cnfn radians(double4);
+double8 __ovld __cnfn radians(double8);
+double16 __ovld __cnfn radians(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn radians(half);
+half2 __ovld __cnfn radians(half2);
+half3 __ovld __cnfn radians(half3);
+half4 __ovld __cnfn radians(half4);
+half8 __ovld __cnfn radians(half8);
+half16 __ovld __cnfn radians(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 0.0 if x < edge, otherwise it returns 1.0.
+ */
+float __ovld __cnfn step(float, float);
+float2 __ovld __cnfn step(float2, float2);
+float3 __ovld __cnfn step(float3, float3);
+float4 __ovld __cnfn step(float4, float4);
+float8 __ovld __cnfn step(float8, float8);
+float16 __ovld __cnfn step(float16, float16);
+float2 __ovld __cnfn step(float, float2);
+float3 __ovld __cnfn step(float, float3);
+float4 __ovld __cnfn step(float, float4);
+float8 __ovld __cnfn step(float, float8);
+float16 __ovld __cnfn step(float, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn step(double, double);
+double2 __ovld __cnfn step(double2, double2);
+double3 __ovld __cnfn step(double3, double3);
+double4 __ovld __cnfn step(double4, double4);
+double8 __ovld __cnfn step(double8, double8);
+double16 __ovld __cnfn step(double16, double16);
+double2 __ovld __cnfn step(double, double2);
+double3 __ovld __cnfn step(double, double3);
+double4 __ovld __cnfn step(double, double4);
+double8 __ovld __cnfn step(double, double8);
+double16 __ovld __cnfn step(double, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn step(half, half);
+half2 __ovld __cnfn step(half2, half2);
+half3 __ovld __cnfn step(half3, half3);
+half4 __ovld __cnfn step(half4, half4);
+half8 __ovld __cnfn step(half8, half8);
+half16 __ovld __cnfn step(half16, half16);
+half2 __ovld __cnfn step(half, half2);
+half3 __ovld __cnfn step(half, half3);
+half4 __ovld __cnfn step(half, half4);
+half8 __ovld __cnfn step(half, half8);
+half16 __ovld __cnfn step(half, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and
+ * performs smooth Hermite interpolation between 0
+ * and 1when edge0 < x < edge1. This is useful in
+ * cases where you would want a threshold function
+ * with a smooth transition.
+ * This is equivalent to:
+ * gentype t;
+ * t = clamp ((x - edge0) / (edge1 - edge0), 0, 1);
+ * return t * t * (3 - 2 * t);
+ * Results are undefined if edge0 >= edge1 or if x,
+ * edge0 or edge1 is a NaN.
+ */
+float __ovld __cnfn smoothstep(float, float, float);
+float2 __ovld __cnfn smoothstep(float2, float2, float2);
+float3 __ovld __cnfn smoothstep(float3, float3, float3);
+float4 __ovld __cnfn smoothstep(float4, float4, float4);
+float8 __ovld __cnfn smoothstep(float8, float8, float8);
+float16 __ovld __cnfn smoothstep(float16, float16, float16);
+float2 __ovld __cnfn smoothstep(float, float, float2);
+float3 __ovld __cnfn smoothstep(float, float, float3);
+float4 __ovld __cnfn smoothstep(float, float, float4);
+float8 __ovld __cnfn smoothstep(float, float, float8);
+float16 __ovld __cnfn smoothstep(float, float, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn smoothstep(double, double, double);
+double2 __ovld __cnfn smoothstep(double2, double2, double2);
+double3 __ovld __cnfn smoothstep(double3, double3, double3);
+double4 __ovld __cnfn smoothstep(double4, double4, double4);
+double8 __ovld __cnfn smoothstep(double8, double8, double8);
+double16 __ovld __cnfn smoothstep(double16, double16, double16);
+double2 __ovld __cnfn smoothstep(double, double, double2);
+double3 __ovld __cnfn smoothstep(double, double, double3);
+double4 __ovld __cnfn smoothstep(double, double, double4);
+double8 __ovld __cnfn smoothstep(double, double, double8);
+double16 __ovld __cnfn smoothstep(double, double, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn smoothstep(half, half, half);
+half2 __ovld __cnfn smoothstep(half2, half2, half2);
+half3 __ovld __cnfn smoothstep(half3, half3, half3);
+half4 __ovld __cnfn smoothstep(half4, half4, half4);
+half8 __ovld __cnfn smoothstep(half8, half8, half8);
+half16 __ovld __cnfn smoothstep(half16, half16, half16);
+half2 __ovld __cnfn smoothstep(half, half, half2);
+half3 __ovld __cnfn smoothstep(half, half, half3);
+half4 __ovld __cnfn smoothstep(half, half, half4);
+half8 __ovld __cnfn smoothstep(half, half, half8);
+half16 __ovld __cnfn smoothstep(half, half, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x =
+ * +0.0, or -1.0 if x < 0. Returns 0.0 if x is a NaN.
+ */
+float __ovld __cnfn sign(float);
+float2 __ovld __cnfn sign(float2);
+float3 __ovld __cnfn sign(float3);
+float4 __ovld __cnfn sign(float4);
+float8 __ovld __cnfn sign(float8);
+float16 __ovld __cnfn sign(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sign(double);
+double2 __ovld __cnfn sign(double2);
+double3 __ovld __cnfn sign(double3);
+double4 __ovld __cnfn sign(double4);
+double8 __ovld __cnfn sign(double8);
+double16 __ovld __cnfn sign(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sign(half);
+half2 __ovld __cnfn sign(half2);
+half3 __ovld __cnfn sign(half3);
+half4 __ovld __cnfn sign(half4);
+half8 __ovld __cnfn sign(half8);
+half16 __ovld __cnfn sign(half16);
+#endif //cl_khr_fp16
+
+// OpenCL v1.1 s6.11.5, v1.2 s6.12.5, v2.0 s6.13.5 - Geometric Functions
+
+/**
+ * Returns the cross product of p0.xyz and p1.xyz. The
+ * w component of float4 result returned will be 0.0.
+ */
+float4 __ovld __cnfn cross(float4, float4);
+float3 __ovld __cnfn cross(float3, float3);
+#ifdef cl_khr_fp64
+double4 __ovld __cnfn cross(double4, double4);
+double3 __ovld __cnfn cross(double3, double3);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half4 __ovld __cnfn cross(half4, half4);
+half3 __ovld __cnfn cross(half3, half3);
+#endif //cl_khr_fp16
+
+/**
+ * Compute dot product.
+ */
+float __ovld __cnfn dot(float, float);
+float __ovld __cnfn dot(float2, float2);
+float __ovld __cnfn dot(float3, float3);
+float __ovld __cnfn dot(float4, float4);
+#ifdef cl_khr_fp64
+double __ovld __cnfn dot(double, double);
+double __ovld __cnfn dot(double2, double2);
+double __ovld __cnfn dot(double3, double3);
+double __ovld __cnfn dot(double4, double4);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn dot(half, half);
+half __ovld __cnfn dot(half2, half2);
+half __ovld __cnfn dot(half3, half3);
+half __ovld __cnfn dot(half4, half4);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the distance between p0 and p1. This is
+ * calculated as length(p0 - p1).
+ */
+float __ovld __cnfn distance(float, float);
+float __ovld __cnfn distance(float2, float2);
+float __ovld __cnfn distance(float3, float3);
+float __ovld __cnfn distance(float4, float4);
+#ifdef cl_khr_fp64
+double __ovld __cnfn distance(double, double);
+double __ovld __cnfn distance(double2, double2);
+double __ovld __cnfn distance(double3, double3);
+double __ovld __cnfn distance(double4, double4);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn distance(half, half);
+half __ovld __cnfn distance(half2, half2);
+half __ovld __cnfn distance(half3, half3);
+half __ovld __cnfn distance(half4, half4);
+#endif //cl_khr_fp16
+
+/**
+ * Return the length of vector p, i.e.,
+ * sqrt(p.x2 + p.y 2 + ...)
+ */
+float __ovld __cnfn length(float);
+float __ovld __cnfn length(float2);
+float __ovld __cnfn length(float3);
+float __ovld __cnfn length(float4);
+#ifdef cl_khr_fp64
+double __ovld __cnfn length(double);
+double __ovld __cnfn length(double2);
+double __ovld __cnfn length(double3);
+double __ovld __cnfn length(double4);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn length(half);
+half __ovld __cnfn length(half2);
+half __ovld __cnfn length(half3);
+half __ovld __cnfn length(half4);
+#endif //cl_khr_fp16
+
+/**
+ * Returns a vector in the same direction as p but with a
+ * length of 1.
+ */
+float __ovld __cnfn normalize(float);
+float2 __ovld __cnfn normalize(float2);
+float3 __ovld __cnfn normalize(float3);
+float4 __ovld __cnfn normalize(float4);
+#ifdef cl_khr_fp64
+double __ovld __cnfn normalize(double);
+double2 __ovld __cnfn normalize(double2);
+double3 __ovld __cnfn normalize(double3);
+double4 __ovld __cnfn normalize(double4);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn normalize(half);
+half2 __ovld __cnfn normalize(half2);
+half3 __ovld __cnfn normalize(half3);
+half4 __ovld __cnfn normalize(half4);
+#endif //cl_khr_fp16
+
+/**
+ * Returns fast_length(p0 - p1).
+ */
+float __ovld __cnfn fast_distance(float, float);
+float __ovld __cnfn fast_distance(float2, float2);
+float __ovld __cnfn fast_distance(float3, float3);
+float __ovld __cnfn fast_distance(float4, float4);
+
+/**
+ * Returns the length of vector p computed as:
+ * half_sqrt(p.x2 + p.y2 + ...)
+ */
+float __ovld __cnfn fast_length(float);
+float __ovld __cnfn fast_length(float2);
+float __ovld __cnfn fast_length(float3);
+float __ovld __cnfn fast_length(float4);
+
+/**
+ * Returns a vector in the same direction as p but with a
+ * length of 1. fast_normalize is computed as:
+ * p * half_rsqrt (p.x^2 + p.y^2 + ... )
+ * The result shall be within 8192 ulps error from the
+ * infinitely precise result of
+ * if (all(p == 0.0f))
+ * result = p;
+ * else
+ * result = p / sqrt (p.x^2 + p.y^2 + ...);
+ * with the following exceptions:
+ * 1) If the sum of squares is greater than FLT_MAX
+ * then the value of the floating-point values in the
+ * result vector are undefined.
+ * 2) If the sum of squares is less than FLT_MIN then
+ * the implementation may return back p.
+ * 3) If the device is in "denorms are flushed to zero"
+ * mode, individual operand elements with magnitude
+ * less than sqrt(FLT_MIN) may be flushed to zero
+ * before proceeding with the calculation.
+ */
+float __ovld __cnfn fast_normalize(float);
+float2 __ovld __cnfn fast_normalize(float2);
+float3 __ovld __cnfn fast_normalize(float3);
+float4 __ovld __cnfn fast_normalize(float4);
+
+// OpenCL v1.1 s6.11.6, v1.2 s6.12.6, v2.0 s6.13.6 - Relational Functions
+
+/**
+ * intn isequal (floatn x, floatn y)
+ * Returns the component-wise compare of x == y.
+ */
+int __ovld __cnfn isequal(float, float);
+int2 __ovld __cnfn isequal(float2, float2);
+int3 __ovld __cnfn isequal(float3, float3);
+int4 __ovld __cnfn isequal(float4, float4);
+int8 __ovld __cnfn isequal(float8, float8);
+int16 __ovld __cnfn isequal(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isequal(double, double);
+long2 __ovld __cnfn isequal(double2, double2);
+long3 __ovld __cnfn isequal(double3, double3);
+long4 __ovld __cnfn isequal(double4, double4);
+long8 __ovld __cnfn isequal(double8, double8);
+long16 __ovld __cnfn isequal(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isequal(half, half);
+short2 __ovld __cnfn isequal(half2, half2);
+short3 __ovld __cnfn isequal(half3, half3);
+short4 __ovld __cnfn isequal(half4, half4);
+short8 __ovld __cnfn isequal(half8, half8);
+short16 __ovld __cnfn isequal(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x != y.
+ */
+int __ovld __cnfn isnotequal(float, float);
+int2 __ovld __cnfn isnotequal(float2, float2);
+int3 __ovld __cnfn isnotequal(float3, float3);
+int4 __ovld __cnfn isnotequal(float4, float4);
+int8 __ovld __cnfn isnotequal(float8, float8);
+int16 __ovld __cnfn isnotequal(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isnotequal(double, double);
+long2 __ovld __cnfn isnotequal(double2, double2);
+long3 __ovld __cnfn isnotequal(double3, double3);
+long4 __ovld __cnfn isnotequal(double4, double4);
+long8 __ovld __cnfn isnotequal(double8, double8);
+long16 __ovld __cnfn isnotequal(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isnotequal(half, half);
+short2 __ovld __cnfn isnotequal(half2, half2);
+short3 __ovld __cnfn isnotequal(half3, half3);
+short4 __ovld __cnfn isnotequal(half4, half4);
+short8 __ovld __cnfn isnotequal(half8, half8);
+short16 __ovld __cnfn isnotequal(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x > y.
+ */
+int __ovld __cnfn isgreater(float, float);
+int2 __ovld __cnfn isgreater(float2, float2);
+int3 __ovld __cnfn isgreater(float3, float3);
+int4 __ovld __cnfn isgreater(float4, float4);
+int8 __ovld __cnfn isgreater(float8, float8);
+int16 __ovld __cnfn isgreater(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isgreater(double, double);
+long2 __ovld __cnfn isgreater(double2, double2);
+long3 __ovld __cnfn isgreater(double3, double3);
+long4 __ovld __cnfn isgreater(double4, double4);
+long8 __ovld __cnfn isgreater(double8, double8);
+long16 __ovld __cnfn isgreater(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isgreater(half, half);
+short2 __ovld __cnfn isgreater(half2, half2);
+short3 __ovld __cnfn isgreater(half3, half3);
+short4 __ovld __cnfn isgreater(half4, half4);
+short8 __ovld __cnfn isgreater(half8, half8);
+short16 __ovld __cnfn isgreater(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x >= y.
+ */
+int __ovld __cnfn isgreaterequal(float, float);
+int2 __ovld __cnfn isgreaterequal(float2, float2);
+int3 __ovld __cnfn isgreaterequal(float3, float3);
+int4 __ovld __cnfn isgreaterequal(float4, float4);
+int8 __ovld __cnfn isgreaterequal(float8, float8);
+int16 __ovld __cnfn isgreaterequal(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isgreaterequal(double, double);
+long2 __ovld __cnfn isgreaterequal(double2, double2);
+long3 __ovld __cnfn isgreaterequal(double3, double3);
+long4 __ovld __cnfn isgreaterequal(double4, double4);
+long8 __ovld __cnfn isgreaterequal(double8, double8);
+long16 __ovld __cnfn isgreaterequal(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isgreaterequal(half, half);
+short2 __ovld __cnfn isgreaterequal(half2, half2);
+short3 __ovld __cnfn isgreaterequal(half3, half3);
+short4 __ovld __cnfn isgreaterequal(half4, half4);
+short8 __ovld __cnfn isgreaterequal(half8, half8);
+short16 __ovld __cnfn isgreaterequal(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x < y.
+ */
+int __ovld __cnfn isless(float, float);
+int2 __ovld __cnfn isless(float2, float2);
+int3 __ovld __cnfn isless(float3, float3);
+int4 __ovld __cnfn isless(float4, float4);
+int8 __ovld __cnfn isless(float8, float8);
+int16 __ovld __cnfn isless(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isless(double, double);
+long2 __ovld __cnfn isless(double2, double2);
+long3 __ovld __cnfn isless(double3, double3);
+long4 __ovld __cnfn isless(double4, double4);
+long8 __ovld __cnfn isless(double8, double8);
+long16 __ovld __cnfn isless(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isless(half, half);
+short2 __ovld __cnfn isless(half2, half2);
+short3 __ovld __cnfn isless(half3, half3);
+short4 __ovld __cnfn isless(half4, half4);
+short8 __ovld __cnfn isless(half8, half8);
+short16 __ovld __cnfn isless(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x <= y.
+ */
+int __ovld __cnfn islessequal(float, float);
+int2 __ovld __cnfn islessequal(float2, float2);
+int3 __ovld __cnfn islessequal(float3, float3);
+int4 __ovld __cnfn islessequal(float4, float4);
+int8 __ovld __cnfn islessequal(float8, float8);
+int16 __ovld __cnfn islessequal(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn islessequal(double, double);
+long2 __ovld __cnfn islessequal(double2, double2);
+long3 __ovld __cnfn islessequal(double3, double3);
+long4 __ovld __cnfn islessequal(double4, double4);
+long8 __ovld __cnfn islessequal(double8, double8);
+long16 __ovld __cnfn islessequal(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn islessequal(half, half);
+short2 __ovld __cnfn islessequal(half2, half2);
+short3 __ovld __cnfn islessequal(half3, half3);
+short4 __ovld __cnfn islessequal(half4, half4);
+short8 __ovld __cnfn islessequal(half8, half8);
+short16 __ovld __cnfn islessequal(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of
+ * (x < y) || (x > y) .
+ */
+int __ovld __cnfn islessgreater(float, float);
+int2 __ovld __cnfn islessgreater(float2, float2);
+int3 __ovld __cnfn islessgreater(float3, float3);
+int4 __ovld __cnfn islessgreater(float4, float4);
+int8 __ovld __cnfn islessgreater(float8, float8);
+int16 __ovld __cnfn islessgreater(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn islessgreater(double, double);
+long2 __ovld __cnfn islessgreater(double2, double2);
+long3 __ovld __cnfn islessgreater(double3, double3);
+long4 __ovld __cnfn islessgreater(double4, double4);
+long8 __ovld __cnfn islessgreater(double8, double8);
+long16 __ovld __cnfn islessgreater(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn islessgreater(half, half);
+short2 __ovld __cnfn islessgreater(half2, half2);
+short3 __ovld __cnfn islessgreater(half3, half3);
+short4 __ovld __cnfn islessgreater(half4, half4);
+short8 __ovld __cnfn islessgreater(half8, half8);
+short16 __ovld __cnfn islessgreater(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for finite value.
+ */
+int __ovld __cnfn isfinite(float);
+int2 __ovld __cnfn isfinite(float2);
+int3 __ovld __cnfn isfinite(float3);
+int4 __ovld __cnfn isfinite(float4);
+int8 __ovld __cnfn isfinite(float8);
+int16 __ovld __cnfn isfinite(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isfinite(double);
+long2 __ovld __cnfn isfinite(double2);
+long3 __ovld __cnfn isfinite(double3);
+long4 __ovld __cnfn isfinite(double4);
+long8 __ovld __cnfn isfinite(double8);
+long16 __ovld __cnfn isfinite(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isfinite(half);
+short2 __ovld __cnfn isfinite(half2);
+short3 __ovld __cnfn isfinite(half3);
+short4 __ovld __cnfn isfinite(half4);
+short8 __ovld __cnfn isfinite(half8);
+short16 __ovld __cnfn isfinite(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for infinity value (+ve or -ve) .
+ */
+int __ovld __cnfn isinf(float);
+int2 __ovld __cnfn isinf(float2);
+int3 __ovld __cnfn isinf(float3);
+int4 __ovld __cnfn isinf(float4);
+int8 __ovld __cnfn isinf(float8);
+int16 __ovld __cnfn isinf(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isinf(double);
+long2 __ovld __cnfn isinf(double2);
+long3 __ovld __cnfn isinf(double3);
+long4 __ovld __cnfn isinf(double4);
+long8 __ovld __cnfn isinf(double8);
+long16 __ovld __cnfn isinf(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isinf(half);
+short2 __ovld __cnfn isinf(half2);
+short3 __ovld __cnfn isinf(half3);
+short4 __ovld __cnfn isinf(half4);
+short8 __ovld __cnfn isinf(half8);
+short16 __ovld __cnfn isinf(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for a NaN.
+ */
+int __ovld __cnfn isnan(float);
+int2 __ovld __cnfn isnan(float2);
+int3 __ovld __cnfn isnan(float3);
+int4 __ovld __cnfn isnan(float4);
+int8 __ovld __cnfn isnan(float8);
+int16 __ovld __cnfn isnan(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isnan(double);
+long2 __ovld __cnfn isnan(double2);
+long3 __ovld __cnfn isnan(double3);
+long4 __ovld __cnfn isnan(double4);
+long8 __ovld __cnfn isnan(double8);
+long16 __ovld __cnfn isnan(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isnan(half);
+short2 __ovld __cnfn isnan(half2);
+short3 __ovld __cnfn isnan(half3);
+short4 __ovld __cnfn isnan(half4);
+short8 __ovld __cnfn isnan(half8);
+short16 __ovld __cnfn isnan(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for a normal value.
+ */
+int __ovld __cnfn isnormal(float);
+int2 __ovld __cnfn isnormal(float2);
+int3 __ovld __cnfn isnormal(float3);
+int4 __ovld __cnfn isnormal(float4);
+int8 __ovld __cnfn isnormal(float8);
+int16 __ovld __cnfn isnormal(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isnormal(double);
+long2 __ovld __cnfn isnormal(double2);
+long3 __ovld __cnfn isnormal(double3);
+long4 __ovld __cnfn isnormal(double4);
+long8 __ovld __cnfn isnormal(double8);
+long16 __ovld __cnfn isnormal(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isnormal(half);
+short2 __ovld __cnfn isnormal(half2);
+short3 __ovld __cnfn isnormal(half3);
+short4 __ovld __cnfn isnormal(half4);
+short8 __ovld __cnfn isnormal(half8);
+short16 __ovld __cnfn isnormal(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test if arguments are ordered. isordered() takes
+ * arguments x and y, and returns the result
+ * isequal(x, x) && isequal(y, y).
+ */
+int __ovld __cnfn isordered(float, float);
+int2 __ovld __cnfn isordered(float2, float2);
+int3 __ovld __cnfn isordered(float3, float3);
+int4 __ovld __cnfn isordered(float4, float4);
+int8 __ovld __cnfn isordered(float8, float8);
+int16 __ovld __cnfn isordered(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isordered(double, double);
+long2 __ovld __cnfn isordered(double2, double2);
+long3 __ovld __cnfn isordered(double3, double3);
+long4 __ovld __cnfn isordered(double4, double4);
+long8 __ovld __cnfn isordered(double8, double8);
+long16 __ovld __cnfn isordered(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isordered(half, half);
+short2 __ovld __cnfn isordered(half2, half2);
+short3 __ovld __cnfn isordered(half3, half3);
+short4 __ovld __cnfn isordered(half4, half4);
+short8 __ovld __cnfn isordered(half8, half8);
+short16 __ovld __cnfn isordered(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test if arguments are unordered. isunordered()
+ * takes arguments x and y, returning non-zero if x or y
+ * is NaN, and zero otherwise.
+ */
+int __ovld __cnfn isunordered(float, float);
+int2 __ovld __cnfn isunordered(float2, float2);
+int3 __ovld __cnfn isunordered(float3, float3);
+int4 __ovld __cnfn isunordered(float4, float4);
+int8 __ovld __cnfn isunordered(float8, float8);
+int16 __ovld __cnfn isunordered(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isunordered(double, double);
+long2 __ovld __cnfn isunordered(double2, double2);
+long3 __ovld __cnfn isunordered(double3, double3);
+long4 __ovld __cnfn isunordered(double4, double4);
+long8 __ovld __cnfn isunordered(double8, double8);
+long16 __ovld __cnfn isunordered(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isunordered(half, half);
+short2 __ovld __cnfn isunordered(half2, half2);
+short3 __ovld __cnfn isunordered(half3, half3);
+short4 __ovld __cnfn isunordered(half4, half4);
+short8 __ovld __cnfn isunordered(half8, half8);
+short16 __ovld __cnfn isunordered(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for sign bit. The scalar version of the function
+ * returns a 1 if the sign bit in the float is set else returns
+ * 0. The vector version of the function returns the
+ * following for each component in floatn: a -1 if the
+ * sign bit in the float is set else returns 0.
+ */
+int __ovld __cnfn signbit(float);
+int2 __ovld __cnfn signbit(float2);
+int3 __ovld __cnfn signbit(float3);
+int4 __ovld __cnfn signbit(float4);
+int8 __ovld __cnfn signbit(float8);
+int16 __ovld __cnfn signbit(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn signbit(double);
+long2 __ovld __cnfn signbit(double2);
+long3 __ovld __cnfn signbit(double3);
+long4 __ovld __cnfn signbit(double4);
+long8 __ovld __cnfn signbit(double8);
+long16 __ovld __cnfn signbit(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn signbit(half);
+short2 __ovld __cnfn signbit(half2);
+short3 __ovld __cnfn signbit(half3);
+short4 __ovld __cnfn signbit(half4);
+short8 __ovld __cnfn signbit(half8);
+short16 __ovld __cnfn signbit(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 1 if the most significant bit in any component
+ * of x is set; otherwise returns 0.
+ */
+int __ovld __cnfn any(char);
+int __ovld __cnfn any(char2);
+int __ovld __cnfn any(char3);
+int __ovld __cnfn any(char4);
+int __ovld __cnfn any(char8);
+int __ovld __cnfn any(char16);
+int __ovld __cnfn any(short);
+int __ovld __cnfn any(short2);
+int __ovld __cnfn any(short3);
+int __ovld __cnfn any(short4);
+int __ovld __cnfn any(short8);
+int __ovld __cnfn any(short16);
+int __ovld __cnfn any(int);
+int __ovld __cnfn any(int2);
+int __ovld __cnfn any(int3);
+int __ovld __cnfn any(int4);
+int __ovld __cnfn any(int8);
+int __ovld __cnfn any(int16);
+int __ovld __cnfn any(long);
+int __ovld __cnfn any(long2);
+int __ovld __cnfn any(long3);
+int __ovld __cnfn any(long4);
+int __ovld __cnfn any(long8);
+int __ovld __cnfn any(long16);
+
+/**
+ * Returns 1 if the most significant bit in all components
+ * of x is set; otherwise returns 0.
+ */
+int __ovld __cnfn all(char);
+int __ovld __cnfn all(char2);
+int __ovld __cnfn all(char3);
+int __ovld __cnfn all(char4);
+int __ovld __cnfn all(char8);
+int __ovld __cnfn all(char16);
+int __ovld __cnfn all(short);
+int __ovld __cnfn all(short2);
+int __ovld __cnfn all(short3);
+int __ovld __cnfn all(short4);
+int __ovld __cnfn all(short8);
+int __ovld __cnfn all(short16);
+int __ovld __cnfn all(int);
+int __ovld __cnfn all(int2);
+int __ovld __cnfn all(int3);
+int __ovld __cnfn all(int4);
+int __ovld __cnfn all(int8);
+int __ovld __cnfn all(int16);
+int __ovld __cnfn all(long);
+int __ovld __cnfn all(long2);
+int __ovld __cnfn all(long3);
+int __ovld __cnfn all(long4);
+int __ovld __cnfn all(long8);
+int __ovld __cnfn all(long16);
+
+/**
+ * Each bit of the result is the corresponding bit of a if
+ * the corresponding bit of c is 0. Otherwise it is the
+ * corresponding bit of b.
+ */
+char __ovld __cnfn bitselect(char, char, char);
+uchar __ovld __cnfn bitselect(uchar, uchar, uchar);
+char2 __ovld __cnfn bitselect(char2, char2, char2);
+uchar2 __ovld __cnfn bitselect(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn bitselect(char3, char3, char3);
+uchar3 __ovld __cnfn bitselect(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn bitselect(char4, char4, char4);
+uchar4 __ovld __cnfn bitselect(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn bitselect(char8, char8, char8);
+uchar8 __ovld __cnfn bitselect(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn bitselect(char16, char16, char16);
+uchar16 __ovld __cnfn bitselect(uchar16, uchar16, uchar16);
+short __ovld __cnfn bitselect(short, short, short);
+ushort __ovld __cnfn bitselect(ushort, ushort, ushort);
+short2 __ovld __cnfn bitselect(short2, short2, short2);
+ushort2 __ovld __cnfn bitselect(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn bitselect(short3, short3, short3);
+ushort3 __ovld __cnfn bitselect(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn bitselect(short4, short4, short4);
+ushort4 __ovld __cnfn bitselect(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn bitselect(short8, short8, short8);
+ushort8 __ovld __cnfn bitselect(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn bitselect(short16, short16, short16);
+ushort16 __ovld __cnfn bitselect(ushort16, ushort16, ushort16);
+int __ovld __cnfn bitselect(int, int, int);
+uint __ovld __cnfn bitselect(uint, uint, uint);
+int2 __ovld __cnfn bitselect(int2, int2, int2);
+uint2 __ovld __cnfn bitselect(uint2, uint2, uint2);
+int3 __ovld __cnfn bitselect(int3, int3, int3);
+uint3 __ovld __cnfn bitselect(uint3, uint3, uint3);
+int4 __ovld __cnfn bitselect(int4, int4, int4);
+uint4 __ovld __cnfn bitselect(uint4, uint4, uint4);
+int8 __ovld __cnfn bitselect(int8, int8, int8);
+uint8 __ovld __cnfn bitselect(uint8, uint8, uint8);
+int16 __ovld __cnfn bitselect(int16, int16, int16);
+uint16 __ovld __cnfn bitselect(uint16, uint16, uint16);
+long __ovld __cnfn bitselect(long, long, long);
+ulong __ovld __cnfn bitselect(ulong, ulong, ulong);
+long2 __ovld __cnfn bitselect(long2, long2, long2);
+ulong2 __ovld __cnfn bitselect(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn bitselect(long3, long3, long3);
+ulong3 __ovld __cnfn bitselect(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn bitselect(long4, long4, long4);
+ulong4 __ovld __cnfn bitselect(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn bitselect(long8, long8, long8);
+ulong8 __ovld __cnfn bitselect(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn bitselect(long16, long16, long16);
+ulong16 __ovld __cnfn bitselect(ulong16, ulong16, ulong16);
+float __ovld __cnfn bitselect(float, float, float);
+float2 __ovld __cnfn bitselect(float2, float2, float2);
+float3 __ovld __cnfn bitselect(float3, float3, float3);
+float4 __ovld __cnfn bitselect(float4, float4, float4);
+float8 __ovld __cnfn bitselect(float8, float8, float8);
+float16 __ovld __cnfn bitselect(float16, float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn bitselect(double, double, double);
+double2 __ovld __cnfn bitselect(double2, double2, double2);
+double3 __ovld __cnfn bitselect(double3, double3, double3);
+double4 __ovld __cnfn bitselect(double4, double4, double4);
+double8 __ovld __cnfn bitselect(double8, double8, double8);
+double16 __ovld __cnfn bitselect(double16, double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn bitselect(half, half, half);
+half2 __ovld __cnfn bitselect(half2, half2, half2);
+half3 __ovld __cnfn bitselect(half3, half3, half3);
+half4 __ovld __cnfn bitselect(half4, half4, half4);
+half8 __ovld __cnfn bitselect(half8, half8, half8);
+half16 __ovld __cnfn bitselect(half16, half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * For each component of a vector type,
+ * result[i] = if MSB of c[i] is set ? b[i] : a[i].
+ * For a scalar type, result = c ? b : a.
+ * b and a must have the same type.
+ * c must have the same number of elements and bits as a.
+ */
+char __ovld __cnfn select(char, char, char);
+uchar __ovld __cnfn select(uchar, uchar, char);
+char2 __ovld __cnfn select(char2, char2, char2);
+uchar2 __ovld __cnfn select(uchar2, uchar2, char2);
+char3 __ovld __cnfn select(char3, char3, char3);
+uchar3 __ovld __cnfn select(uchar3, uchar3, char3);
+char4 __ovld __cnfn select(char4, char4, char4);
+uchar4 __ovld __cnfn select(uchar4, uchar4, char4);
+char8 __ovld __cnfn select(char8, char8, char8);
+uchar8 __ovld __cnfn select(uchar8, uchar8, char8);
+char16 __ovld __cnfn select(char16, char16, char16);
+uchar16 __ovld __cnfn select(uchar16, uchar16, char16);
+
+short __ovld __cnfn select(short, short, short);
+ushort __ovld __cnfn select(ushort, ushort, short);
+short2 __ovld __cnfn select(short2, short2, short2);
+ushort2 __ovld __cnfn select(ushort2, ushort2, short2);
+short3 __ovld __cnfn select(short3, short3, short3);
+ushort3 __ovld __cnfn select(ushort3, ushort3, short3);
+short4 __ovld __cnfn select(short4, short4, short4);
+ushort4 __ovld __cnfn select(ushort4, ushort4, short4);
+short8 __ovld __cnfn select(short8, short8, short8);
+ushort8 __ovld __cnfn select(ushort8, ushort8, short8);
+short16 __ovld __cnfn select(short16, short16, short16);
+ushort16 __ovld __cnfn select(ushort16, ushort16, short16);
+
+int __ovld __cnfn select(int, int, int);
+uint __ovld __cnfn select(uint, uint, int);
+int2 __ovld __cnfn select(int2, int2, int2);
+uint2 __ovld __cnfn select(uint2, uint2, int2);
+int3 __ovld __cnfn select(int3, int3, int3);
+uint3 __ovld __cnfn select(uint3, uint3, int3);
+int4 __ovld __cnfn select(int4, int4, int4);
+uint4 __ovld __cnfn select(uint4, uint4, int4);
+int8 __ovld __cnfn select(int8, int8, int8);
+uint8 __ovld __cnfn select(uint8, uint8, int8);
+int16 __ovld __cnfn select(int16, int16, int16);
+uint16 __ovld __cnfn select(uint16, uint16, int16);
+float __ovld __cnfn select(float, float, int);
+float2 __ovld __cnfn select(float2, float2, int2);
+float3 __ovld __cnfn select(float3, float3, int3);
+float4 __ovld __cnfn select(float4, float4, int4);
+float8 __ovld __cnfn select(float8, float8, int8);
+float16 __ovld __cnfn select(float16, float16, int16);
+
+long __ovld __cnfn select(long, long, long);
+ulong __ovld __cnfn select(ulong, ulong, long);
+long2 __ovld __cnfn select(long2, long2, long2);
+ulong2 __ovld __cnfn select(ulong2, ulong2, long2);
+long3 __ovld __cnfn select(long3, long3, long3);
+ulong3 __ovld __cnfn select(ulong3, ulong3, long3);
+long4 __ovld __cnfn select(long4, long4, long4);
+ulong4 __ovld __cnfn select(ulong4, ulong4, long4);
+long8 __ovld __cnfn select(long8, long8, long8);
+ulong8 __ovld __cnfn select(ulong8, ulong8, long8);
+long16 __ovld __cnfn select(long16, long16, long16);
+ulong16 __ovld __cnfn select(ulong16, ulong16, long16);
+
+char __ovld __cnfn select(char, char, uchar);
+uchar __ovld __cnfn select(uchar, uchar, uchar);
+char2 __ovld __cnfn select(char2, char2, uchar2);
+uchar2 __ovld __cnfn select(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn select(char3, char3, uchar3);
+uchar3 __ovld __cnfn select(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn select(char4, char4, uchar4);
+uchar4 __ovld __cnfn select(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn select(char8, char8, uchar8);
+uchar8 __ovld __cnfn select(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn select(char16, char16, uchar16);
+uchar16 __ovld __cnfn select(uchar16, uchar16, uchar16);
+
+short __ovld __cnfn select(short, short, ushort);
+ushort __ovld __cnfn select(ushort, ushort, ushort);
+short2 __ovld __cnfn select(short2, short2, ushort2);
+ushort2 __ovld __cnfn select(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn select(short3, short3, ushort3);
+ushort3 __ovld __cnfn select(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn select(short4, short4, ushort4);
+ushort4 __ovld __cnfn select(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn select(short8, short8, ushort8);
+ushort8 __ovld __cnfn select(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn select(short16, short16, ushort16);
+ushort16 __ovld __cnfn select(ushort16, ushort16, ushort16);
+
+int __ovld __cnfn select(int, int, uint);
+uint __ovld __cnfn select(uint, uint, uint);
+int2 __ovld __cnfn select(int2, int2, uint2);
+uint2 __ovld __cnfn select(uint2, uint2, uint2);
+int3 __ovld __cnfn select(int3, int3, uint3);
+uint3 __ovld __cnfn select(uint3, uint3, uint3);
+int4 __ovld __cnfn select(int4, int4, uint4);
+uint4 __ovld __cnfn select(uint4, uint4, uint4);
+int8 __ovld __cnfn select(int8, int8, uint8);
+uint8 __ovld __cnfn select(uint8, uint8, uint8);
+int16 __ovld __cnfn select(int16, int16, uint16);
+uint16 __ovld __cnfn select(uint16, uint16, uint16);
+float __ovld __cnfn select(float, float, uint);
+float2 __ovld __cnfn select(float2, float2, uint2);
+float3 __ovld __cnfn select(float3, float3, uint3);
+float4 __ovld __cnfn select(float4, float4, uint4);
+float8 __ovld __cnfn select(float8, float8, uint8);
+float16 __ovld __cnfn select(float16, float16, uint16);
+
+long __ovld __cnfn select(long, long, ulong);
+ulong __ovld __cnfn select(ulong, ulong, ulong);
+long2 __ovld __cnfn select(long2, long2, ulong2);
+ulong2 __ovld __cnfn select(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn select(long3, long3, ulong3);
+ulong3 __ovld __cnfn select(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn select(long4, long4, ulong4);
+ulong4 __ovld __cnfn select(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn select(long8, long8, ulong8);
+ulong8 __ovld __cnfn select(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn select(long16, long16, ulong16);
+ulong16 __ovld __cnfn select(ulong16, ulong16, ulong16);
+
+#ifdef cl_khr_fp64
+double __ovld __cnfn select(double, double, long);
+double2 __ovld __cnfn select(double2, double2, long2);
+double3 __ovld __cnfn select(double3, double3, long3);
+double4 __ovld __cnfn select(double4, double4, long4);
+double8 __ovld __cnfn select(double8, double8, long8);
+double16 __ovld __cnfn select(double16, double16, long16);
+double __ovld __cnfn select(double, double, ulong);
+double2 __ovld __cnfn select(double2, double2, ulong2);
+double3 __ovld __cnfn select(double3, double3, ulong3);
+double4 __ovld __cnfn select(double4, double4, ulong4);
+double8 __ovld __cnfn select(double8, double8, ulong8);
+double16 __ovld __cnfn select(double16, double16, ulong16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn select(half, half, short);
+half2 __ovld __cnfn select(half2, half2, short2);
+half3 __ovld __cnfn select(half3, half3, short3);
+half4 __ovld __cnfn select(half4, half4, short4);
+half8 __ovld __cnfn select(half8, half8, short8);
+half16 __ovld __cnfn select(half16, half16, short16);
+half __ovld __cnfn select(half, half, ushort);
+half2 __ovld __cnfn select(half2, half2, ushort2);
+half3 __ovld __cnfn select(half3, half3, ushort3);
+half4 __ovld __cnfn select(half4, half4, ushort4);
+half8 __ovld __cnfn select(half8, half8, ushort8);
+half16 __ovld __cnfn select(half16, half16, ushort16);
+#endif //cl_khr_fp16
+
+// OpenCL v1.1 s6.11.7, v1.2 s6.12.7, v2.0 s6.13.7 - Vector Data Load and Store Functions
+// OpenCL extensions v1.1 s9.6.6, v1.2 s9.5.6, v2.0 s9.4.6 - Vector Data Load and Store Functions for Half Type
+/**
+ * Use generic type gentype to indicate the built-in data types
+ * char, uchar, short, ushort, int, uint, long, ulong, float,
+ * double or half.
+ *
+ * vloadn return sizeof (gentypen) bytes of data read from address (p + (offset * n)).
+ *
+ * vstoren write sizeof (gentypen) bytes given by data to address (p + (offset * n)).
+ *
+ * The address computed as (p + (offset * n)) must be
+ * 8-bit aligned if gentype is char, uchar;
+ * 16-bit aligned if gentype is short, ushort, half;
+ * 32-bit aligned if gentype is int, uint, float;
+ * 64-bit aligned if gentype is long, ulong, double.
+ */
+
+char2 __ovld __purefn vload2(size_t, const __constant char *);
+uchar2 __ovld __purefn vload2(size_t, const __constant uchar *);
+short2 __ovld __purefn vload2(size_t, const __constant short *);
+ushort2 __ovld __purefn vload2(size_t, const __constant ushort *);
+int2 __ovld __purefn vload2(size_t, const __constant int *);
+uint2 __ovld __purefn vload2(size_t, const __constant uint *);
+long2 __ovld __purefn vload2(size_t, const __constant long *);
+ulong2 __ovld __purefn vload2(size_t, const __constant ulong *);
+float2 __ovld __purefn vload2(size_t, const __constant float *);
+char3 __ovld __purefn vload3(size_t, const __constant char *);
+uchar3 __ovld __purefn vload3(size_t, const __constant uchar *);
+short3 __ovld __purefn vload3(size_t, const __constant short *);
+ushort3 __ovld __purefn vload3(size_t, const __constant ushort *);
+int3 __ovld __purefn vload3(size_t, const __constant int *);
+uint3 __ovld __purefn vload3(size_t, const __constant uint *);
+long3 __ovld __purefn vload3(size_t, const __constant long *);
+ulong3 __ovld __purefn vload3(size_t, const __constant ulong *);
+float3 __ovld __purefn vload3(size_t, const __constant float *);
+char4 __ovld __purefn vload4(size_t, const __constant char *);
+uchar4 __ovld __purefn vload4(size_t, const __constant uchar *);
+short4 __ovld __purefn vload4(size_t, const __constant short *);
+ushort4 __ovld __purefn vload4(size_t, const __constant ushort *);
+int4 __ovld __purefn vload4(size_t, const __constant int *);
+uint4 __ovld __purefn vload4(size_t, const __constant uint *);
+long4 __ovld __purefn vload4(size_t, const __constant long *);
+ulong4 __ovld __purefn vload4(size_t, const __constant ulong *);
+float4 __ovld __purefn vload4(size_t, const __constant float *);
+char8 __ovld __purefn vload8(size_t, const __constant char *);
+uchar8 __ovld __purefn vload8(size_t, const __constant uchar *);
+short8 __ovld __purefn vload8(size_t, const __constant short *);
+ushort8 __ovld __purefn vload8(size_t, const __constant ushort *);
+int8 __ovld __purefn vload8(size_t, const __constant int *);
+uint8 __ovld __purefn vload8(size_t, const __constant uint *);
+long8 __ovld __purefn vload8(size_t, const __constant long *);
+ulong8 __ovld __purefn vload8(size_t, const __constant ulong *);
+float8 __ovld __purefn vload8(size_t, const __constant float *);
+char16 __ovld __purefn vload16(size_t, const __constant char *);
+uchar16 __ovld __purefn vload16(size_t, const __constant uchar *);
+short16 __ovld __purefn vload16(size_t, const __constant short *);
+ushort16 __ovld __purefn vload16(size_t, const __constant ushort *);
+int16 __ovld __purefn vload16(size_t, const __constant int *);
+uint16 __ovld __purefn vload16(size_t, const __constant uint *);
+long16 __ovld __purefn vload16(size_t, const __constant long *);
+ulong16 __ovld __purefn vload16(size_t, const __constant ulong *);
+float16 __ovld __purefn vload16(size_t, const __constant float *);
+#ifdef cl_khr_fp64
+double2 __ovld __purefn vload2(size_t, const __constant double *);
+double3 __ovld __purefn vload3(size_t, const __constant double *);
+double4 __ovld __purefn vload4(size_t, const __constant double *);
+double8 __ovld __purefn vload8(size_t, const __constant double *);
+double16 __ovld __purefn vload16(size_t, const __constant double *);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half2 __ovld __purefn vload2(size_t, const __constant half *);
+half3 __ovld __purefn vload3(size_t, const __constant half *);
+half4 __ovld __purefn vload4(size_t, const __constant half *);
+half8 __ovld __purefn vload8(size_t, const __constant half *);
+half16 __ovld __purefn vload16(size_t, const __constant half *);
+#endif //cl_khr_fp16
+
+#if defined(__opencl_c_generic_address_space)
+char2 __ovld __purefn vload2(size_t, const char *);
+uchar2 __ovld __purefn vload2(size_t, const uchar *);
+short2 __ovld __purefn vload2(size_t, const short *);
+ushort2 __ovld __purefn vload2(size_t, const ushort *);
+int2 __ovld __purefn vload2(size_t, const int *);
+uint2 __ovld __purefn vload2(size_t, const uint *);
+long2 __ovld __purefn vload2(size_t, const long *);
+ulong2 __ovld __purefn vload2(size_t, const ulong *);
+float2 __ovld __purefn vload2(size_t, const float *);
+char3 __ovld __purefn vload3(size_t, const char *);
+uchar3 __ovld __purefn vload3(size_t, const uchar *);
+short3 __ovld __purefn vload3(size_t, const short *);
+ushort3 __ovld __purefn vload3(size_t, const ushort *);
+int3 __ovld __purefn vload3(size_t, const int *);
+uint3 __ovld __purefn vload3(size_t, const uint *);
+long3 __ovld __purefn vload3(size_t, const long *);
+ulong3 __ovld __purefn vload3(size_t, const ulong *);
+float3 __ovld __purefn vload3(size_t, const float *);
+char4 __ovld __purefn vload4(size_t, const char *);
+uchar4 __ovld __purefn vload4(size_t, const uchar *);
+short4 __ovld __purefn vload4(size_t, const short *);
+ushort4 __ovld __purefn vload4(size_t, const ushort *);
+int4 __ovld __purefn vload4(size_t, const int *);
+uint4 __ovld __purefn vload4(size_t, const uint *);
+long4 __ovld __purefn vload4(size_t, const long *);
+ulong4 __ovld __purefn vload4(size_t, const ulong *);
+float4 __ovld __purefn vload4(size_t, const float *);
+char8 __ovld __purefn vload8(size_t, const char *);
+uchar8 __ovld __purefn vload8(size_t, const uchar *);
+short8 __ovld __purefn vload8(size_t, const short *);
+ushort8 __ovld __purefn vload8(size_t, const ushort *);
+int8 __ovld __purefn vload8(size_t, const int *);
+uint8 __ovld __purefn vload8(size_t, const uint *);
+long8 __ovld __purefn vload8(size_t, const long *);
+ulong8 __ovld __purefn vload8(size_t, const ulong *);
+float8 __ovld __purefn vload8(size_t, const float *);
+char16 __ovld __purefn vload16(size_t, const char *);
+uchar16 __ovld __purefn vload16(size_t, const uchar *);
+short16 __ovld __purefn vload16(size_t, const short *);
+ushort16 __ovld __purefn vload16(size_t, const ushort *);
+int16 __ovld __purefn vload16(size_t, const int *);
+uint16 __ovld __purefn vload16(size_t, const uint *);
+long16 __ovld __purefn vload16(size_t, const long *);
+ulong16 __ovld __purefn vload16(size_t, const ulong *);
+float16 __ovld __purefn vload16(size_t, const float *);
+
+#ifdef cl_khr_fp64
+double2 __ovld __purefn vload2(size_t, const double *);
+double3 __ovld __purefn vload3(size_t, const double *);
+double4 __ovld __purefn vload4(size_t, const double *);
+double8 __ovld __purefn vload8(size_t, const double *);
+double16 __ovld __purefn vload16(size_t, const double *);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half2 __ovld __purefn vload2(size_t, const half *);
+half3 __ovld __purefn vload3(size_t, const half *);
+half4 __ovld __purefn vload4(size_t, const half *);
+half8 __ovld __purefn vload8(size_t, const half *);
+half16 __ovld __purefn vload16(size_t, const half *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+char2 __ovld __purefn vload2(size_t, const __global char *);
+uchar2 __ovld __purefn vload2(size_t, const __global uchar *);
+short2 __ovld __purefn vload2(size_t, const __global short *);
+ushort2 __ovld __purefn vload2(size_t, const __global ushort *);
+int2 __ovld __purefn vload2(size_t, const __global int *);
+uint2 __ovld __purefn vload2(size_t, const __global uint *);
+long2 __ovld __purefn vload2(size_t, const __global long *);
+ulong2 __ovld __purefn vload2(size_t, const __global ulong *);
+float2 __ovld __purefn vload2(size_t, const __global float *);
+char3 __ovld __purefn vload3(size_t, const __global char *);
+uchar3 __ovld __purefn vload3(size_t, const __global uchar *);
+short3 __ovld __purefn vload3(size_t, const __global short *);
+ushort3 __ovld __purefn vload3(size_t, const __global ushort *);
+int3 __ovld __purefn vload3(size_t, const __global int *);
+uint3 __ovld __purefn vload3(size_t, const __global uint *);
+long3 __ovld __purefn vload3(size_t, const __global long *);
+ulong3 __ovld __purefn vload3(size_t, const __global ulong *);
+float3 __ovld __purefn vload3(size_t, const __global float *);
+char4 __ovld __purefn vload4(size_t, const __global char *);
+uchar4 __ovld __purefn vload4(size_t, const __global uchar *);
+short4 __ovld __purefn vload4(size_t, const __global short *);
+ushort4 __ovld __purefn vload4(size_t, const __global ushort *);
+int4 __ovld __purefn vload4(size_t, const __global int *);
+uint4 __ovld __purefn vload4(size_t, const __global uint *);
+long4 __ovld __purefn vload4(size_t, const __global long *);
+ulong4 __ovld __purefn vload4(size_t, const __global ulong *);
+float4 __ovld __purefn vload4(size_t, const __global float *);
+char8 __ovld __purefn vload8(size_t, const __global char *);
+uchar8 __ovld __purefn vload8(size_t, const __global uchar *);
+short8 __ovld __purefn vload8(size_t, const __global short *);
+ushort8 __ovld __purefn vload8(size_t, const __global ushort *);
+int8 __ovld __purefn vload8(size_t, const __global int *);
+uint8 __ovld __purefn vload8(size_t, const __global uint *);
+long8 __ovld __purefn vload8(size_t, const __global long *);
+ulong8 __ovld __purefn vload8(size_t, const __global ulong *);
+float8 __ovld __purefn vload8(size_t, const __global float *);
+char16 __ovld __purefn vload16(size_t, const __global char *);
+uchar16 __ovld __purefn vload16(size_t, const __global uchar *);
+short16 __ovld __purefn vload16(size_t, const __global short *);
+ushort16 __ovld __purefn vload16(size_t, const __global ushort *);
+int16 __ovld __purefn vload16(size_t, const __global int *);
+uint16 __ovld __purefn vload16(size_t, const __global uint *);
+long16 __ovld __purefn vload16(size_t, const __global long *);
+ulong16 __ovld __purefn vload16(size_t, const __global ulong *);
+float16 __ovld __purefn vload16(size_t, const __global float *);
+char2 __ovld __purefn vload2(size_t, const __local char *);
+uchar2 __ovld __purefn vload2(size_t, const __local uchar *);
+short2 __ovld __purefn vload2(size_t, const __local short *);
+ushort2 __ovld __purefn vload2(size_t, const __local ushort *);
+int2 __ovld __purefn vload2(size_t, const __local int *);
+uint2 __ovld __purefn vload2(size_t, const __local uint *);
+long2 __ovld __purefn vload2(size_t, const __local long *);
+ulong2 __ovld __purefn vload2(size_t, const __local ulong *);
+float2 __ovld __purefn vload2(size_t, const __local float *);
+char3 __ovld __purefn vload3(size_t, const __local char *);
+uchar3 __ovld __purefn vload3(size_t, const __local uchar *);
+short3 __ovld __purefn vload3(size_t, const __local short *);
+ushort3 __ovld __purefn vload3(size_t, const __local ushort *);
+int3 __ovld __purefn vload3(size_t, const __local int *);
+uint3 __ovld __purefn vload3(size_t, const __local uint *);
+long3 __ovld __purefn vload3(size_t, const __local long *);
+ulong3 __ovld __purefn vload3(size_t, const __local ulong *);
+float3 __ovld __purefn vload3(size_t, const __local float *);
+char4 __ovld __purefn vload4(size_t, const __local char *);
+uchar4 __ovld __purefn vload4(size_t, const __local uchar *);
+short4 __ovld __purefn vload4(size_t, const __local short *);
+ushort4 __ovld __purefn vload4(size_t, const __local ushort *);
+int4 __ovld __purefn vload4(size_t, const __local int *);
+uint4 __ovld __purefn vload4(size_t, const __local uint *);
+long4 __ovld __purefn vload4(size_t, const __local long *);
+ulong4 __ovld __purefn vload4(size_t, const __local ulong *);
+float4 __ovld __purefn vload4(size_t, const __local float *);
+char8 __ovld __purefn vload8(size_t, const __local char *);
+uchar8 __ovld __purefn vload8(size_t, const __local uchar *);
+short8 __ovld __purefn vload8(size_t, const __local short *);
+ushort8 __ovld __purefn vload8(size_t, const __local ushort *);
+int8 __ovld __purefn vload8(size_t, const __local int *);
+uint8 __ovld __purefn vload8(size_t, const __local uint *);
+long8 __ovld __purefn vload8(size_t, const __local long *);
+ulong8 __ovld __purefn vload8(size_t, const __local ulong *);
+float8 __ovld __purefn vload8(size_t, const __local float *);
+char16 __ovld __purefn vload16(size_t, const __local char *);
+uchar16 __ovld __purefn vload16(size_t, const __local uchar *);
+short16 __ovld __purefn vload16(size_t, const __local short *);
+ushort16 __ovld __purefn vload16(size_t, const __local ushort *);
+int16 __ovld __purefn vload16(size_t, const __local int *);
+uint16 __ovld __purefn vload16(size_t, const __local uint *);
+long16 __ovld __purefn vload16(size_t, const __local long *);
+ulong16 __ovld __purefn vload16(size_t, const __local ulong *);
+float16 __ovld __purefn vload16(size_t, const __local float *);
+char2 __ovld __purefn vload2(size_t, const __private char *);
+uchar2 __ovld __purefn vload2(size_t, const __private uchar *);
+short2 __ovld __purefn vload2(size_t, const __private short *);
+ushort2 __ovld __purefn vload2(size_t, const __private ushort *);
+int2 __ovld __purefn vload2(size_t, const __private int *);
+uint2 __ovld __purefn vload2(size_t, const __private uint *);
+long2 __ovld __purefn vload2(size_t, const __private long *);
+ulong2 __ovld __purefn vload2(size_t, const __private ulong *);
+float2 __ovld __purefn vload2(size_t, const __private float *);
+char3 __ovld __purefn vload3(size_t, const __private char *);
+uchar3 __ovld __purefn vload3(size_t, const __private uchar *);
+short3 __ovld __purefn vload3(size_t, const __private short *);
+ushort3 __ovld __purefn vload3(size_t, const __private ushort *);
+int3 __ovld __purefn vload3(size_t, const __private int *);
+uint3 __ovld __purefn vload3(size_t, const __private uint *);
+long3 __ovld __purefn vload3(size_t, const __private long *);
+ulong3 __ovld __purefn vload3(size_t, const __private ulong *);
+float3 __ovld __purefn vload3(size_t, const __private float *);
+char4 __ovld __purefn vload4(size_t, const __private char *);
+uchar4 __ovld __purefn vload4(size_t, const __private uchar *);
+short4 __ovld __purefn vload4(size_t, const __private short *);
+ushort4 __ovld __purefn vload4(size_t, const __private ushort *);
+int4 __ovld __purefn vload4(size_t, const __private int *);
+uint4 __ovld __purefn vload4(size_t, const __private uint *);
+long4 __ovld __purefn vload4(size_t, const __private long *);
+ulong4 __ovld __purefn vload4(size_t, const __private ulong *);
+float4 __ovld __purefn vload4(size_t, const __private float *);
+char8 __ovld __purefn vload8(size_t, const __private char *);
+uchar8 __ovld __purefn vload8(size_t, const __private uchar *);
+short8 __ovld __purefn vload8(size_t, const __private short *);
+ushort8 __ovld __purefn vload8(size_t, const __private ushort *);
+int8 __ovld __purefn vload8(size_t, const __private int *);
+uint8 __ovld __purefn vload8(size_t, const __private uint *);
+long8 __ovld __purefn vload8(size_t, const __private long *);
+ulong8 __ovld __purefn vload8(size_t, const __private ulong *);
+float8 __ovld __purefn vload8(size_t, const __private float *);
+char16 __ovld __purefn vload16(size_t, const __private char *);
+uchar16 __ovld __purefn vload16(size_t, const __private uchar *);
+short16 __ovld __purefn vload16(size_t, const __private short *);
+ushort16 __ovld __purefn vload16(size_t, const __private ushort *);
+int16 __ovld __purefn vload16(size_t, const __private int *);
+uint16 __ovld __purefn vload16(size_t, const __private uint *);
+long16 __ovld __purefn vload16(size_t, const __private long *);
+ulong16 __ovld __purefn vload16(size_t, const __private ulong *);
+float16 __ovld __purefn vload16(size_t, const __private float *);
+
+#ifdef cl_khr_fp64
+double2 __ovld __purefn vload2(size_t, const __global double *);
+double3 __ovld __purefn vload3(size_t, const __global double *);
+double4 __ovld __purefn vload4(size_t, const __global double *);
+double8 __ovld __purefn vload8(size_t, const __global double *);
+double16 __ovld __purefn vload16(size_t, const __global double *);
+double2 __ovld __purefn vload2(size_t, const __local double *);
+double3 __ovld __purefn vload3(size_t, const __local double *);
+double4 __ovld __purefn vload4(size_t, const __local double *);
+double8 __ovld __purefn vload8(size_t, const __local double *);
+double16 __ovld __purefn vload16(size_t, const __local double *);
+double2 __ovld __purefn vload2(size_t, const __private double *);
+double3 __ovld __purefn vload3(size_t, const __private double *);
+double4 __ovld __purefn vload4(size_t, const __private double *);
+double8 __ovld __purefn vload8(size_t, const __private double *);
+double16 __ovld __purefn vload16(size_t, const __private double *);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half2 __ovld __purefn vload2(size_t, const __global half *);
+half3 __ovld __purefn vload3(size_t, const __global half *);
+half4 __ovld __purefn vload4(size_t, const __global half *);
+half8 __ovld __purefn vload8(size_t, const __global half *);
+half16 __ovld __purefn vload16(size_t, const __global half *);
+half2 __ovld __purefn vload2(size_t, const __local half *);
+half3 __ovld __purefn vload3(size_t, const __local half *);
+half4 __ovld __purefn vload4(size_t, const __local half *);
+half8 __ovld __purefn vload8(size_t, const __local half *);
+half16 __ovld __purefn vload16(size_t, const __local half *);
+half2 __ovld __purefn vload2(size_t, const __private half *);
+half3 __ovld __purefn vload3(size_t, const __private half *);
+half4 __ovld __purefn vload4(size_t, const __private half *);
+half8 __ovld __purefn vload8(size_t, const __private half *);
+half16 __ovld __purefn vload16(size_t, const __private half *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+#if defined(__opencl_c_generic_address_space)
+void __ovld vstore2(char2, size_t, char *);
+void __ovld vstore2(uchar2, size_t, uchar *);
+void __ovld vstore2(short2, size_t, short *);
+void __ovld vstore2(ushort2, size_t, ushort *);
+void __ovld vstore2(int2, size_t, int *);
+void __ovld vstore2(uint2, size_t, uint *);
+void __ovld vstore2(long2, size_t, long *);
+void __ovld vstore2(ulong2, size_t, ulong *);
+void __ovld vstore2(float2, size_t, float *);
+void __ovld vstore3(char3, size_t, char *);
+void __ovld vstore3(uchar3, size_t, uchar *);
+void __ovld vstore3(short3, size_t, short *);
+void __ovld vstore3(ushort3, size_t, ushort *);
+void __ovld vstore3(int3, size_t, int *);
+void __ovld vstore3(uint3, size_t, uint *);
+void __ovld vstore3(long3, size_t, long *);
+void __ovld vstore3(ulong3, size_t, ulong *);
+void __ovld vstore3(float3, size_t, float *);
+void __ovld vstore4(char4, size_t, char *);
+void __ovld vstore4(uchar4, size_t, uchar *);
+void __ovld vstore4(short4, size_t, short *);
+void __ovld vstore4(ushort4, size_t, ushort *);
+void __ovld vstore4(int4, size_t, int *);
+void __ovld vstore4(uint4, size_t, uint *);
+void __ovld vstore4(long4, size_t, long *);
+void __ovld vstore4(ulong4, size_t, ulong *);
+void __ovld vstore4(float4, size_t, float *);
+void __ovld vstore8(char8, size_t, char *);
+void __ovld vstore8(uchar8, size_t, uchar *);
+void __ovld vstore8(short8, size_t, short *);
+void __ovld vstore8(ushort8, size_t, ushort *);
+void __ovld vstore8(int8, size_t, int *);
+void __ovld vstore8(uint8, size_t, uint *);
+void __ovld vstore8(long8, size_t, long *);
+void __ovld vstore8(ulong8, size_t, ulong *);
+void __ovld vstore8(float8, size_t, float *);
+void __ovld vstore16(char16, size_t, char *);
+void __ovld vstore16(uchar16, size_t, uchar *);
+void __ovld vstore16(short16, size_t, short *);
+void __ovld vstore16(ushort16, size_t, ushort *);
+void __ovld vstore16(int16, size_t, int *);
+void __ovld vstore16(uint16, size_t, uint *);
+void __ovld vstore16(long16, size_t, long *);
+void __ovld vstore16(ulong16, size_t, ulong *);
+void __ovld vstore16(float16, size_t, float *);
+#ifdef cl_khr_fp64
+void __ovld vstore2(double2, size_t, double *);
+void __ovld vstore3(double3, size_t, double *);
+void __ovld vstore4(double4, size_t, double *);
+void __ovld vstore8(double8, size_t, double *);
+void __ovld vstore16(double16, size_t, double *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+void __ovld vstore2(half2, size_t, half *);
+void __ovld vstore3(half3, size_t, half *);
+void __ovld vstore4(half4, size_t, half *);
+void __ovld vstore8(half8, size_t, half *);
+void __ovld vstore16(half16, size_t, half *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+void __ovld vstore2(char2, size_t, __global char *);
+void __ovld vstore2(uchar2, size_t, __global uchar *);
+void __ovld vstore2(short2, size_t, __global short *);
+void __ovld vstore2(ushort2, size_t, __global ushort *);
+void __ovld vstore2(int2, size_t, __global int *);
+void __ovld vstore2(uint2, size_t, __global uint *);
+void __ovld vstore2(long2, size_t, __global long *);
+void __ovld vstore2(ulong2, size_t, __global ulong *);
+void __ovld vstore2(float2, size_t, __global float *);
+void __ovld vstore3(char3, size_t, __global char *);
+void __ovld vstore3(uchar3, size_t, __global uchar *);
+void __ovld vstore3(short3, size_t, __global short *);
+void __ovld vstore3(ushort3, size_t, __global ushort *);
+void __ovld vstore3(int3, size_t, __global int *);
+void __ovld vstore3(uint3, size_t, __global uint *);
+void __ovld vstore3(long3, size_t, __global long *);
+void __ovld vstore3(ulong3, size_t, __global ulong *);
+void __ovld vstore3(float3, size_t, __global float *);
+void __ovld vstore4(char4, size_t, __global char *);
+void __ovld vstore4(uchar4, size_t, __global uchar *);
+void __ovld vstore4(short4, size_t, __global short *);
+void __ovld vstore4(ushort4, size_t, __global ushort *);
+void __ovld vstore4(int4, size_t, __global int *);
+void __ovld vstore4(uint4, size_t, __global uint *);
+void __ovld vstore4(long4, size_t, __global long *);
+void __ovld vstore4(ulong4, size_t, __global ulong *);
+void __ovld vstore4(float4, size_t, __global float *);
+void __ovld vstore8(char8, size_t, __global char *);
+void __ovld vstore8(uchar8, size_t, __global uchar *);
+void __ovld vstore8(short8, size_t, __global short *);
+void __ovld vstore8(ushort8, size_t, __global ushort *);
+void __ovld vstore8(int8, size_t, __global int *);
+void __ovld vstore8(uint8, size_t, __global uint *);
+void __ovld vstore8(long8, size_t, __global long *);
+void __ovld vstore8(ulong8, size_t, __global ulong *);
+void __ovld vstore8(float8, size_t, __global float *);
+void __ovld vstore16(char16, size_t, __global char *);
+void __ovld vstore16(uchar16, size_t, __global uchar *);
+void __ovld vstore16(short16, size_t, __global short *);
+void __ovld vstore16(ushort16, size_t, __global ushort *);
+void __ovld vstore16(int16, size_t, __global int *);
+void __ovld vstore16(uint16, size_t, __global uint *);
+void __ovld vstore16(long16, size_t, __global long *);
+void __ovld vstore16(ulong16, size_t, __global ulong *);
+void __ovld vstore16(float16, size_t, __global float *);
+void __ovld vstore2(char2, size_t, __local char *);
+void __ovld vstore2(uchar2, size_t, __local uchar *);
+void __ovld vstore2(short2, size_t, __local short *);
+void __ovld vstore2(ushort2, size_t, __local ushort *);
+void __ovld vstore2(int2, size_t, __local int *);
+void __ovld vstore2(uint2, size_t, __local uint *);
+void __ovld vstore2(long2, size_t, __local long *);
+void __ovld vstore2(ulong2, size_t, __local ulong *);
+void __ovld vstore2(float2, size_t, __local float *);
+void __ovld vstore3(char3, size_t, __local char *);
+void __ovld vstore3(uchar3, size_t, __local uchar *);
+void __ovld vstore3(short3, size_t, __local short *);
+void __ovld vstore3(ushort3, size_t, __local ushort *);
+void __ovld vstore3(int3, size_t, __local int *);
+void __ovld vstore3(uint3, size_t, __local uint *);
+void __ovld vstore3(long3, size_t, __local long *);
+void __ovld vstore3(ulong3, size_t, __local ulong *);
+void __ovld vstore3(float3, size_t, __local float *);
+void __ovld vstore4(char4, size_t, __local char *);
+void __ovld vstore4(uchar4, size_t, __local uchar *);
+void __ovld vstore4(short4, size_t, __local short *);
+void __ovld vstore4(ushort4, size_t, __local ushort *);
+void __ovld vstore4(int4, size_t, __local int *);
+void __ovld vstore4(uint4, size_t, __local uint *);
+void __ovld vstore4(long4, size_t, __local long *);
+void __ovld vstore4(ulong4, size_t, __local ulong *);
+void __ovld vstore4(float4, size_t, __local float *);
+void __ovld vstore8(char8, size_t, __local char *);
+void __ovld vstore8(uchar8, size_t, __local uchar *);
+void __ovld vstore8(short8, size_t, __local short *);
+void __ovld vstore8(ushort8, size_t, __local ushort *);
+void __ovld vstore8(int8, size_t, __local int *);
+void __ovld vstore8(uint8, size_t, __local uint *);
+void __ovld vstore8(long8, size_t, __local long *);
+void __ovld vstore8(ulong8, size_t, __local ulong *);
+void __ovld vstore8(float8, size_t, __local float *);
+void __ovld vstore16(char16, size_t, __local char *);
+void __ovld vstore16(uchar16, size_t, __local uchar *);
+void __ovld vstore16(short16, size_t, __local short *);
+void __ovld vstore16(ushort16, size_t, __local ushort *);
+void __ovld vstore16(int16, size_t, __local int *);
+void __ovld vstore16(uint16, size_t, __local uint *);
+void __ovld vstore16(long16, size_t, __local long *);
+void __ovld vstore16(ulong16, size_t, __local ulong *);
+void __ovld vstore16(float16, size_t, __local float *);
+void __ovld vstore2(char2, size_t, __private char *);
+void __ovld vstore2(uchar2, size_t, __private uchar *);
+void __ovld vstore2(short2, size_t, __private short *);
+void __ovld vstore2(ushort2, size_t, __private ushort *);
+void __ovld vstore2(int2, size_t, __private int *);
+void __ovld vstore2(uint2, size_t, __private uint *);
+void __ovld vstore2(long2, size_t, __private long *);
+void __ovld vstore2(ulong2, size_t, __private ulong *);
+void __ovld vstore2(float2, size_t, __private float *);
+void __ovld vstore3(char3, size_t, __private char *);
+void __ovld vstore3(uchar3, size_t, __private uchar *);
+void __ovld vstore3(short3, size_t, __private short *);
+void __ovld vstore3(ushort3, size_t, __private ushort *);
+void __ovld vstore3(int3, size_t, __private int *);
+void __ovld vstore3(uint3, size_t, __private uint *);
+void __ovld vstore3(long3, size_t, __private long *);
+void __ovld vstore3(ulong3, size_t, __private ulong *);
+void __ovld vstore3(float3, size_t, __private float *);
+void __ovld vstore4(char4, size_t, __private char *);
+void __ovld vstore4(uchar4, size_t, __private uchar *);
+void __ovld vstore4(short4, size_t, __private short *);
+void __ovld vstore4(ushort4, size_t, __private ushort *);
+void __ovld vstore4(int4, size_t, __private int *);
+void __ovld vstore4(uint4, size_t, __private uint *);
+void __ovld vstore4(long4, size_t, __private long *);
+void __ovld vstore4(ulong4, size_t, __private ulong *);
+void __ovld vstore4(float4, size_t, __private float *);
+void __ovld vstore8(char8, size_t, __private char *);
+void __ovld vstore8(uchar8, size_t, __private uchar *);
+void __ovld vstore8(short8, size_t, __private short *);
+void __ovld vstore8(ushort8, size_t, __private ushort *);
+void __ovld vstore8(int8, size_t, __private int *);
+void __ovld vstore8(uint8, size_t, __private uint *);
+void __ovld vstore8(long8, size_t, __private long *);
+void __ovld vstore8(ulong8, size_t, __private ulong *);
+void __ovld vstore8(float8, size_t, __private float *);
+void __ovld vstore16(char16, size_t, __private char *);
+void __ovld vstore16(uchar16, size_t, __private uchar *);
+void __ovld vstore16(short16, size_t, __private short *);
+void __ovld vstore16(ushort16, size_t, __private ushort *);
+void __ovld vstore16(int16, size_t, __private int *);
+void __ovld vstore16(uint16, size_t, __private uint *);
+void __ovld vstore16(long16, size_t, __private long *);
+void __ovld vstore16(ulong16, size_t, __private ulong *);
+void __ovld vstore16(float16, size_t, __private float *);
+#ifdef cl_khr_fp64
+void __ovld vstore2(double2, size_t, __global double *);
+void __ovld vstore3(double3, size_t, __global double *);
+void __ovld vstore4(double4, size_t, __global double *);
+void __ovld vstore8(double8, size_t, __global double *);
+void __ovld vstore16(double16, size_t, __global double *);
+void __ovld vstore2(double2, size_t, __local double *);
+void __ovld vstore3(double3, size_t, __local double *);
+void __ovld vstore4(double4, size_t, __local double *);
+void __ovld vstore8(double8, size_t, __local double *);
+void __ovld vstore16(double16, size_t, __local double *);
+void __ovld vstore2(double2, size_t, __private double *);
+void __ovld vstore3(double3, size_t, __private double *);
+void __ovld vstore4(double4, size_t, __private double *);
+void __ovld vstore8(double8, size_t, __private double *);
+void __ovld vstore16(double16, size_t, __private double *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+void __ovld vstore2(half2, size_t, __global half *);
+void __ovld vstore3(half3, size_t, __global half *);
+void __ovld vstore4(half4, size_t, __global half *);
+void __ovld vstore8(half8, size_t, __global half *);
+void __ovld vstore16(half16, size_t, __global half *);
+void __ovld vstore2(half2, size_t, __local half *);
+void __ovld vstore3(half3, size_t, __local half *);
+void __ovld vstore4(half4, size_t, __local half *);
+void __ovld vstore8(half8, size_t, __local half *);
+void __ovld vstore16(half16, size_t, __local half *);
+void __ovld vstore2(half2, size_t, __private half *);
+void __ovld vstore3(half3, size_t, __private half *);
+void __ovld vstore4(half4, size_t, __private half *);
+void __ovld vstore8(half8, size_t, __private half *);
+void __ovld vstore16(half16, size_t, __private half *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * Read sizeof (half) bytes of data from address
+ * (p + offset). The data read is interpreted as a
+ * half value. The half value is converted to a
+ * float value and the float value is returned.
+ * The read address computed as (p + offset)
+ * must be 16-bit aligned.
+ */
+float __ovld __purefn vload_half(size_t, const __constant half *);
+#if defined(__opencl_c_generic_address_space)
+float __ovld __purefn vload_half(size_t, const half *);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld __purefn vload_half(size_t, const __global half *);
+float __ovld __purefn vload_half(size_t, const __local half *);
+float __ovld __purefn vload_half(size_t, const __private half *);
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * Read sizeof (halfn) bytes of data from address
+ * (p + (offset * n)). The data read is interpreted
+ * as a halfn value. The halfn value read is
+ * converted to a floatn value and the floatn
+ * value is returned. The read address computed
+ * as (p + (offset * n)) must be 16-bit aligned.
+ */
+float2 __ovld __purefn vload_half2(size_t, const __constant half *);
+float3 __ovld __purefn vload_half3(size_t, const __constant half *);
+float4 __ovld __purefn vload_half4(size_t, const __constant half *);
+float8 __ovld __purefn vload_half8(size_t, const __constant half *);
+float16 __ovld __purefn vload_half16(size_t, const __constant half *);
+#if defined(__opencl_c_generic_address_space)
+float2 __ovld __purefn vload_half2(size_t, const half *);
+float3 __ovld __purefn vload_half3(size_t, const half *);
+float4 __ovld __purefn vload_half4(size_t, const half *);
+float8 __ovld __purefn vload_half8(size_t, const half *);
+float16 __ovld __purefn vload_half16(size_t, const half *);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float2 __ovld __purefn vload_half2(size_t, const __global half *);
+float3 __ovld __purefn vload_half3(size_t, const __global half *);
+float4 __ovld __purefn vload_half4(size_t, const __global half *);
+float8 __ovld __purefn vload_half8(size_t, const __global half *);
+float16 __ovld __purefn vload_half16(size_t, const __global half *);
+float2 __ovld __purefn vload_half2(size_t, const __local half *);
+float3 __ovld __purefn vload_half3(size_t, const __local half *);
+float4 __ovld __purefn vload_half4(size_t, const __local half *);
+float8 __ovld __purefn vload_half8(size_t, const __local half *);
+float16 __ovld __purefn vload_half16(size_t, const __local half *);
+float2 __ovld __purefn vload_half2(size_t, const __private half *);
+float3 __ovld __purefn vload_half3(size_t, const __private half *);
+float4 __ovld __purefn vload_half4(size_t, const __private half *);
+float8 __ovld __purefn vload_half8(size_t, const __private half *);
+float16 __ovld __purefn vload_half16(size_t, const __private half *);
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * The float value given by data is first
+ * converted to a half value using the appropriate
+ * rounding mode. The half value is then written
+ * to address computed as (p + offset). The
+ * address computed as (p + offset) must be 16-
+ * bit aligned.
+ * vstore_half use the current rounding mode.
+ * The default current rounding mode is round to
+ * nearest even.
+ */
+#if defined(__opencl_c_generic_address_space)
+void __ovld vstore_half(float, size_t, half *);
+void __ovld vstore_half_rte(float, size_t, half *);
+void __ovld vstore_half_rtz(float, size_t, half *);
+void __ovld vstore_half_rtp(float, size_t, half *);
+void __ovld vstore_half_rtn(float, size_t, half *);
+#ifdef cl_khr_fp64
+void __ovld vstore_half(double, size_t, half *);
+void __ovld vstore_half_rte(double, size_t, half *);
+void __ovld vstore_half_rtz(double, size_t, half *);
+void __ovld vstore_half_rtp(double, size_t, half *);
+void __ovld vstore_half_rtn(double, size_t, half *);
+#endif //cl_khr_fp64
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+void __ovld vstore_half(float, size_t, __global half *);
+void __ovld vstore_half_rte(float, size_t, __global half *);
+void __ovld vstore_half_rtz(float, size_t, __global half *);
+void __ovld vstore_half_rtp(float, size_t, __global half *);
+void __ovld vstore_half_rtn(float, size_t, __global half *);
+void __ovld vstore_half(float, size_t, __local half *);
+void __ovld vstore_half_rte(float, size_t, __local half *);
+void __ovld vstore_half_rtz(float, size_t, __local half *);
+void __ovld vstore_half_rtp(float, size_t, __local half *);
+void __ovld vstore_half_rtn(float, size_t, __local half *);
+void __ovld vstore_half(float, size_t, __private half *);
+void __ovld vstore_half_rte(float, size_t, __private half *);
+void __ovld vstore_half_rtz(float, size_t, __private half *);
+void __ovld vstore_half_rtp(float, size_t, __private half *);
+void __ovld vstore_half_rtn(float, size_t, __private half *);
+#ifdef cl_khr_fp64
+void __ovld vstore_half(double, size_t, __global half *);
+void __ovld vstore_half_rte(double, size_t, __global half *);
+void __ovld vstore_half_rtz(double, size_t, __global half *);
+void __ovld vstore_half_rtp(double, size_t, __global half *);
+void __ovld vstore_half_rtn(double, size_t, __global half *);
+void __ovld vstore_half(double, size_t, __local half *);
+void __ovld vstore_half_rte(double, size_t, __local half *);
+void __ovld vstore_half_rtz(double, size_t, __local half *);
+void __ovld vstore_half_rtp(double, size_t, __local half *);
+void __ovld vstore_half_rtn(double, size_t, __local half *);
+void __ovld vstore_half(double, size_t, __private half *);
+void __ovld vstore_half_rte(double, size_t, __private half *);
+void __ovld vstore_half_rtz(double, size_t, __private half *);
+void __ovld vstore_half_rtp(double, size_t, __private half *);
+void __ovld vstore_half_rtn(double, size_t, __private half *);
+#endif //cl_khr_fp64
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * The floatn value given by data is converted to
+ * a halfn value using the appropriate rounding
+ * mode. The halfn value is then written to
+ * address computed as (p + (offset * n)). The
+ * address computed as (p + (offset * n)) must be
+ * 16-bit aligned.
+ * vstore_halfn uses the current rounding mode.
+ * The default current rounding mode is round to
+ * nearest even.
+ */
+#if defined(__opencl_c_generic_address_space)
+void __ovld vstore_half2(float2, size_t, half *);
+void __ovld vstore_half3(float3, size_t, half *);
+void __ovld vstore_half4(float4, size_t, half *);
+void __ovld vstore_half8(float8, size_t, half *);
+void __ovld vstore_half16(float16, size_t, half *);
+void __ovld vstore_half2_rte(float2, size_t, half *);
+void __ovld vstore_half3_rte(float3, size_t, half *);
+void __ovld vstore_half4_rte(float4, size_t, half *);
+void __ovld vstore_half8_rte(float8, size_t, half *);
+void __ovld vstore_half16_rte(float16, size_t, half *);
+void __ovld vstore_half2_rtz(float2, size_t, half *);
+void __ovld vstore_half3_rtz(float3, size_t, half *);
+void __ovld vstore_half4_rtz(float4, size_t, half *);
+void __ovld vstore_half8_rtz(float8, size_t, half *);
+void __ovld vstore_half16_rtz(float16, size_t, half *);
+void __ovld vstore_half2_rtp(float2, size_t, half *);
+void __ovld vstore_half3_rtp(float3, size_t, half *);
+void __ovld vstore_half4_rtp(float4, size_t, half *);
+void __ovld vstore_half8_rtp(float8, size_t, half *);
+void __ovld vstore_half16_rtp(float16, size_t, half *);
+void __ovld vstore_half2_rtn(float2, size_t, half *);
+void __ovld vstore_half3_rtn(float3, size_t, half *);
+void __ovld vstore_half4_rtn(float4, size_t, half *);
+void __ovld vstore_half8_rtn(float8, size_t, half *);
+void __ovld vstore_half16_rtn(float16, size_t, half *);
+#ifdef cl_khr_fp64
+void __ovld vstore_half2(double2, size_t, half *);
+void __ovld vstore_half3(double3, size_t, half *);
+void __ovld vstore_half4(double4, size_t, half *);
+void __ovld vstore_half8(double8, size_t, half *);
+void __ovld vstore_half16(double16, size_t, half *);
+void __ovld vstore_half2_rte(double2, size_t, half *);
+void __ovld vstore_half3_rte(double3, size_t, half *);
+void __ovld vstore_half4_rte(double4, size_t, half *);
+void __ovld vstore_half8_rte(double8, size_t, half *);
+void __ovld vstore_half16_rte(double16, size_t, half *);
+void __ovld vstore_half2_rtz(double2, size_t, half *);
+void __ovld vstore_half3_rtz(double3, size_t, half *);
+void __ovld vstore_half4_rtz(double4, size_t, half *);
+void __ovld vstore_half8_rtz(double8, size_t, half *);
+void __ovld vstore_half16_rtz(double16, size_t, half *);
+void __ovld vstore_half2_rtp(double2, size_t, half *);
+void __ovld vstore_half3_rtp(double3, size_t, half *);
+void __ovld vstore_half4_rtp(double4, size_t, half *);
+void __ovld vstore_half8_rtp(double8, size_t, half *);
+void __ovld vstore_half16_rtp(double16, size_t, half *);
+void __ovld vstore_half2_rtn(double2, size_t, half *);
+void __ovld vstore_half3_rtn(double3, size_t, half *);
+void __ovld vstore_half4_rtn(double4, size_t, half *);
+void __ovld vstore_half8_rtn(double8, size_t, half *);
+void __ovld vstore_half16_rtn(double16, size_t, half *);
+#endif //cl_khr_fp64
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+void __ovld vstore_half2(float2, size_t, __global half *);
+void __ovld vstore_half3(float3, size_t, __global half *);
+void __ovld vstore_half4(float4, size_t, __global half *);
+void __ovld vstore_half8(float8, size_t, __global half *);
+void __ovld vstore_half16(float16, size_t, __global half *);
+void __ovld vstore_half2_rte(float2, size_t, __global half *);
+void __ovld vstore_half3_rte(float3, size_t, __global half *);
+void __ovld vstore_half4_rte(float4, size_t, __global half *);
+void __ovld vstore_half8_rte(float8, size_t, __global half *);
+void __ovld vstore_half16_rte(float16, size_t, __global half *);
+void __ovld vstore_half2_rtz(float2, size_t, __global half *);
+void __ovld vstore_half3_rtz(float3, size_t, __global half *);
+void __ovld vstore_half4_rtz(float4, size_t, __global half *);
+void __ovld vstore_half8_rtz(float8, size_t, __global half *);
+void __ovld vstore_half16_rtz(float16, size_t, __global half *);
+void __ovld vstore_half2_rtp(float2, size_t, __global half *);
+void __ovld vstore_half3_rtp(float3, size_t, __global half *);
+void __ovld vstore_half4_rtp(float4, size_t, __global half *);
+void __ovld vstore_half8_rtp(float8, size_t, __global half *);
+void __ovld vstore_half16_rtp(float16, size_t, __global half *);
+void __ovld vstore_half2_rtn(float2, size_t, __global half *);
+void __ovld vstore_half3_rtn(float3, size_t, __global half *);
+void __ovld vstore_half4_rtn(float4, size_t, __global half *);
+void __ovld vstore_half8_rtn(float8, size_t, __global half *);
+void __ovld vstore_half16_rtn(float16, size_t, __global half *);
+void __ovld vstore_half2(float2, size_t, __local half *);
+void __ovld vstore_half3(float3, size_t, __local half *);
+void __ovld vstore_half4(float4, size_t, __local half *);
+void __ovld vstore_half8(float8, size_t, __local half *);
+void __ovld vstore_half16(float16, size_t, __local half *);
+void __ovld vstore_half2_rte(float2, size_t, __local half *);
+void __ovld vstore_half3_rte(float3, size_t, __local half *);
+void __ovld vstore_half4_rte(float4, size_t, __local half *);
+void __ovld vstore_half8_rte(float8, size_t, __local half *);
+void __ovld vstore_half16_rte(float16, size_t, __local half *);
+void __ovld vstore_half2_rtz(float2, size_t, __local half *);
+void __ovld vstore_half3_rtz(float3, size_t, __local half *);
+void __ovld vstore_half4_rtz(float4, size_t, __local half *);
+void __ovld vstore_half8_rtz(float8, size_t, __local half *);
+void __ovld vstore_half16_rtz(float16, size_t, __local half *);
+void __ovld vstore_half2_rtp(float2, size_t, __local half *);
+void __ovld vstore_half3_rtp(float3, size_t, __local half *);
+void __ovld vstore_half4_rtp(float4, size_t, __local half *);
+void __ovld vstore_half8_rtp(float8, size_t, __local half *);
+void __ovld vstore_half16_rtp(float16, size_t, __local half *);
+void __ovld vstore_half2_rtn(float2, size_t, __local half *);
+void __ovld vstore_half3_rtn(float3, size_t, __local half *);
+void __ovld vstore_half4_rtn(float4, size_t, __local half *);
+void __ovld vstore_half8_rtn(float8, size_t, __local half *);
+void __ovld vstore_half16_rtn(float16, size_t, __local half *);
+void __ovld vstore_half2(float2, size_t, __private half *);
+void __ovld vstore_half3(float3, size_t, __private half *);
+void __ovld vstore_half4(float4, size_t, __private half *);
+void __ovld vstore_half8(float8, size_t, __private half *);
+void __ovld vstore_half16(float16, size_t, __private half *);
+void __ovld vstore_half2_rte(float2, size_t, __private half *);
+void __ovld vstore_half3_rte(float3, size_t, __private half *);
+void __ovld vstore_half4_rte(float4, size_t, __private half *);
+void __ovld vstore_half8_rte(float8, size_t, __private half *);
+void __ovld vstore_half16_rte(float16, size_t, __private half *);
+void __ovld vstore_half2_rtz(float2, size_t, __private half *);
+void __ovld vstore_half3_rtz(float3, size_t, __private half *);
+void __ovld vstore_half4_rtz(float4, size_t, __private half *);
+void __ovld vstore_half8_rtz(float8, size_t, __private half *);
+void __ovld vstore_half16_rtz(float16, size_t, __private half *);
+void __ovld vstore_half2_rtp(float2, size_t, __private half *);
+void __ovld vstore_half3_rtp(float3, size_t, __private half *);
+void __ovld vstore_half4_rtp(float4, size_t, __private half *);
+void __ovld vstore_half8_rtp(float8, size_t, __private half *);
+void __ovld vstore_half16_rtp(float16, size_t, __private half *);
+void __ovld vstore_half2_rtn(float2, size_t, __private half *);
+void __ovld vstore_half3_rtn(float3, size_t, __private half *);
+void __ovld vstore_half4_rtn(float4, size_t, __private half *);
+void __ovld vstore_half8_rtn(float8, size_t, __private half *);
+void __ovld vstore_half16_rtn(float16, size_t, __private half *);
+#ifdef cl_khr_fp64
+void __ovld vstore_half2(double2, size_t, __global half *);
+void __ovld vstore_half3(double3, size_t, __global half *);
+void __ovld vstore_half4(double4, size_t, __global half *);
+void __ovld vstore_half8(double8, size_t, __global half *);
+void __ovld vstore_half16(double16, size_t, __global half *);
+void __ovld vstore_half2_rte(double2, size_t, __global half *);
+void __ovld vstore_half3_rte(double3, size_t, __global half *);
+void __ovld vstore_half4_rte(double4, size_t, __global half *);
+void __ovld vstore_half8_rte(double8, size_t, __global half *);
+void __ovld vstore_half16_rte(double16, size_t, __global half *);
+void __ovld vstore_half2_rtz(double2, size_t, __global half *);
+void __ovld vstore_half3_rtz(double3, size_t, __global half *);
+void __ovld vstore_half4_rtz(double4, size_t, __global half *);
+void __ovld vstore_half8_rtz(double8, size_t, __global half *);
+void __ovld vstore_half16_rtz(double16, size_t, __global half *);
+void __ovld vstore_half2_rtp(double2, size_t, __global half *);
+void __ovld vstore_half3_rtp(double3, size_t, __global half *);
+void __ovld vstore_half4_rtp(double4, size_t, __global half *);
+void __ovld vstore_half8_rtp(double8, size_t, __global half *);
+void __ovld vstore_half16_rtp(double16, size_t, __global half *);
+void __ovld vstore_half2_rtn(double2, size_t, __global half *);
+void __ovld vstore_half3_rtn(double3, size_t, __global half *);
+void __ovld vstore_half4_rtn(double4, size_t, __global half *);
+void __ovld vstore_half8_rtn(double8, size_t, __global half *);
+void __ovld vstore_half16_rtn(double16, size_t, __global half *);
+void __ovld vstore_half2(double2, size_t, __local half *);
+void __ovld vstore_half3(double3, size_t, __local half *);
+void __ovld vstore_half4(double4, size_t, __local half *);
+void __ovld vstore_half8(double8, size_t, __local half *);
+void __ovld vstore_half16(double16, size_t, __local half *);
+void __ovld vstore_half2_rte(double2, size_t, __local half *);
+void __ovld vstore_half3_rte(double3, size_t, __local half *);
+void __ovld vstore_half4_rte(double4, size_t, __local half *);
+void __ovld vstore_half8_rte(double8, size_t, __local half *);
+void __ovld vstore_half16_rte(double16, size_t, __local half *);
+void __ovld vstore_half2_rtz(double2, size_t, __local half *);
+void __ovld vstore_half3_rtz(double3, size_t, __local half *);
+void __ovld vstore_half4_rtz(double4, size_t, __local half *);
+void __ovld vstore_half8_rtz(double8, size_t, __local half *);
+void __ovld vstore_half16_rtz(double16, size_t, __local half *);
+void __ovld vstore_half2_rtp(double2, size_t, __local half *);
+void __ovld vstore_half3_rtp(double3, size_t, __local half *);
+void __ovld vstore_half4_rtp(double4, size_t, __local half *);
+void __ovld vstore_half8_rtp(double8, size_t, __local half *);
+void __ovld vstore_half16_rtp(double16, size_t, __local half *);
+void __ovld vstore_half2_rtn(double2, size_t, __local half *);
+void __ovld vstore_half3_rtn(double3, size_t, __local half *);
+void __ovld vstore_half4_rtn(double4, size_t, __local half *);
+void __ovld vstore_half8_rtn(double8, size_t, __local half *);
+void __ovld vstore_half16_rtn(double16, size_t, __local half *);
+void __ovld vstore_half2(double2, size_t, __private half *);
+void __ovld vstore_half3(double3, size_t, __private half *);
+void __ovld vstore_half4(double4, size_t, __private half *);
+void __ovld vstore_half8(double8, size_t, __private half *);
+void __ovld vstore_half16(double16, size_t, __private half *);
+void __ovld vstore_half2_rte(double2, size_t, __private half *);
+void __ovld vstore_half3_rte(double3, size_t, __private half *);
+void __ovld vstore_half4_rte(double4, size_t, __private half *);
+void __ovld vstore_half8_rte(double8, size_t, __private half *);
+void __ovld vstore_half16_rte(double16, size_t, __private half *);
+void __ovld vstore_half2_rtz(double2, size_t, __private half *);
+void __ovld vstore_half3_rtz(double3, size_t, __private half *);
+void __ovld vstore_half4_rtz(double4, size_t, __private half *);
+void __ovld vstore_half8_rtz(double8, size_t, __private half *);
+void __ovld vstore_half16_rtz(double16, size_t, __private half *);
+void __ovld vstore_half2_rtp(double2, size_t, __private half *);
+void __ovld vstore_half3_rtp(double3, size_t, __private half *);
+void __ovld vstore_half4_rtp(double4, size_t, __private half *);
+void __ovld vstore_half8_rtp(double8, size_t, __private half *);
+void __ovld vstore_half16_rtp(double16, size_t, __private half *);
+void __ovld vstore_half2_rtn(double2, size_t, __private half *);
+void __ovld vstore_half3_rtn(double3, size_t, __private half *);
+void __ovld vstore_half4_rtn(double4, size_t, __private half *);
+void __ovld vstore_half8_rtn(double8, size_t, __private half *);
+void __ovld vstore_half16_rtn(double16, size_t, __private half *);
+#endif //cl_khr_fp64
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * For n = 1, 2, 4, 8 and 16 read sizeof (halfn)
+ * bytes of data from address (p + (offset * n)).
+ * The data read is interpreted as a halfn value.
+ * The halfn value read is converted to a floatn
+ * value and the floatn value is returned.
+ * The address computed as (p + (offset * n))
+ * must be aligned to sizeof (halfn) bytes.
+ * For n = 3, vloada_half3 reads a half3 from
+ * address (p + (offset * 4)) and returns a float3.
+ * The address computed as (p + (offset * 4))
+ * must be aligned to sizeof (half) * 4 bytes.
+ */
+float2 __ovld __purefn vloada_half2(size_t, const __constant half *);
+float3 __ovld __purefn vloada_half3(size_t, const __constant half *);
+float4 __ovld __purefn vloada_half4(size_t, const __constant half *);
+float8 __ovld __purefn vloada_half8(size_t, const __constant half *);
+float16 __ovld __purefn vloada_half16(size_t, const __constant half *);
+#if defined(__opencl_c_generic_address_space)
+float2 __ovld __purefn vloada_half2(size_t, const half *);
+float3 __ovld __purefn vloada_half3(size_t, const half *);
+float4 __ovld __purefn vloada_half4(size_t, const half *);
+float8 __ovld __purefn vloada_half8(size_t, const half *);
+float16 __ovld __purefn vloada_half16(size_t, const half *);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float2 __ovld __purefn vloada_half2(size_t, const __global half *);
+float3 __ovld __purefn vloada_half3(size_t, const __global half *);
+float4 __ovld __purefn vloada_half4(size_t, const __global half *);
+float8 __ovld __purefn vloada_half8(size_t, const __global half *);
+float16 __ovld __purefn vloada_half16(size_t, const __global half *);
+float2 __ovld __purefn vloada_half2(size_t, const __local half *);
+float3 __ovld __purefn vloada_half3(size_t, const __local half *);
+float4 __ovld __purefn vloada_half4(size_t, const __local half *);
+float8 __ovld __purefn vloada_half8(size_t, const __local half *);
+float16 __ovld __purefn vloada_half16(size_t, const __local half *);
+float2 __ovld __purefn vloada_half2(size_t, const __private half *);
+float3 __ovld __purefn vloada_half3(size_t, const __private half *);
+float4 __ovld __purefn vloada_half4(size_t, const __private half *);
+float8 __ovld __purefn vloada_half8(size_t, const __private half *);
+float16 __ovld __purefn vloada_half16(size_t, const __private half *);
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * The floatn value given by data is converted to
+ * a halfn value using the appropriate rounding
+ * mode.
+ * For n = 1, 2, 4, 8 and 16, the halfn value is
+ * written to the address computed as (p + (offset
+ * * n)). The address computed as (p + (offset *
+ * n)) must be aligned to sizeof (halfn) bytes.
+ * For n = 3, the half3 value is written to the
+ * address computed as (p + (offset * 4)). The
+ * address computed as (p + (offset * 4)) must be
+ * aligned to sizeof (half) * 4 bytes.
+ * vstorea_halfn uses the current rounding
+ * mode. The default current rounding mode is
+ * round to nearest even.
+ */
+#if defined(__opencl_c_generic_address_space)
+void __ovld vstorea_half2(float2, size_t, half *);
+void __ovld vstorea_half3(float3, size_t, half *);
+void __ovld vstorea_half4(float4, size_t, half *);
+void __ovld vstorea_half8(float8, size_t, half *);
+void __ovld vstorea_half16(float16, size_t, half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, half *);
+void __ovld vstorea_half3_rte(float3, size_t, half *);
+void __ovld vstorea_half4_rte(float4, size_t, half *);
+void __ovld vstorea_half8_rte(float8, size_t, half *);
+void __ovld vstorea_half16_rte(float16, size_t, half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, half *);
+void __ovld vstorea_half3_rtz(float3, size_t, half *);
+void __ovld vstorea_half4_rtz(float4, size_t, half *);
+void __ovld vstorea_half8_rtz(float8, size_t, half *);
+void __ovld vstorea_half16_rtz(float16, size_t, half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, half *);
+void __ovld vstorea_half3_rtp(float3, size_t, half *);
+void __ovld vstorea_half4_rtp(float4, size_t, half *);
+void __ovld vstorea_half8_rtp(float8, size_t, half *);
+void __ovld vstorea_half16_rtp(float16, size_t, half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, half *);
+void __ovld vstorea_half3_rtn(float3, size_t, half *);
+void __ovld vstorea_half4_rtn(float4, size_t, half *);
+void __ovld vstorea_half8_rtn(float8, size_t, half *);
+void __ovld vstorea_half16_rtn(float16, size_t, half *);
+
+#ifdef cl_khr_fp64
+void __ovld vstorea_half2(double2, size_t, half *);
+void __ovld vstorea_half3(double3, size_t, half *);
+void __ovld vstorea_half4(double4, size_t, half *);
+void __ovld vstorea_half8(double8, size_t, half *);
+void __ovld vstorea_half16(double16, size_t, half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, half *);
+void __ovld vstorea_half3_rte(double3, size_t, half *);
+void __ovld vstorea_half4_rte(double4, size_t, half *);
+void __ovld vstorea_half8_rte(double8, size_t, half *);
+void __ovld vstorea_half16_rte(double16, size_t, half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, half *);
+void __ovld vstorea_half3_rtz(double3, size_t, half *);
+void __ovld vstorea_half4_rtz(double4, size_t, half *);
+void __ovld vstorea_half8_rtz(double8, size_t, half *);
+void __ovld vstorea_half16_rtz(double16, size_t, half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, half *);
+void __ovld vstorea_half3_rtp(double3, size_t, half *);
+void __ovld vstorea_half4_rtp(double4, size_t, half *);
+void __ovld vstorea_half8_rtp(double8, size_t, half *);
+void __ovld vstorea_half16_rtp(double16, size_t, half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, half *);
+void __ovld vstorea_half3_rtn(double3, size_t, half *);
+void __ovld vstorea_half4_rtn(double4, size_t, half *);
+void __ovld vstorea_half8_rtn(double8, size_t, half *);
+void __ovld vstorea_half16_rtn(double16, size_t, half *);
+#endif //cl_khr_fp64
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+void __ovld vstorea_half2(float2, size_t, __global half *);
+void __ovld vstorea_half3(float3, size_t, __global half *);
+void __ovld vstorea_half4(float4, size_t, __global half *);
+void __ovld vstorea_half8(float8, size_t, __global half *);
+void __ovld vstorea_half16(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, __global half *);
+void __ovld vstorea_half3_rte(float3, size_t, __global half *);
+void __ovld vstorea_half4_rte(float4, size_t, __global half *);
+void __ovld vstorea_half8_rte(float8, size_t, __global half *);
+void __ovld vstorea_half16_rte(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, __global half *);
+void __ovld vstorea_half3_rtz(float3, size_t, __global half *);
+void __ovld vstorea_half4_rtz(float4, size_t, __global half *);
+void __ovld vstorea_half8_rtz(float8, size_t, __global half *);
+void __ovld vstorea_half16_rtz(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, __global half *);
+void __ovld vstorea_half3_rtp(float3, size_t, __global half *);
+void __ovld vstorea_half4_rtp(float4, size_t, __global half *);
+void __ovld vstorea_half8_rtp(float8, size_t, __global half *);
+void __ovld vstorea_half16_rtp(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, __global half *);
+void __ovld vstorea_half3_rtn(float3, size_t, __global half *);
+void __ovld vstorea_half4_rtn(float4, size_t, __global half *);
+void __ovld vstorea_half8_rtn(float8, size_t, __global half *);
+void __ovld vstorea_half16_rtn(float16, size_t, __global half *);
+
+void __ovld vstorea_half2(float2, size_t, __local half *);
+void __ovld vstorea_half3(float3, size_t, __local half *);
+void __ovld vstorea_half4(float4, size_t, __local half *);
+void __ovld vstorea_half8(float8, size_t, __local half *);
+void __ovld vstorea_half16(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, __local half *);
+void __ovld vstorea_half3_rte(float3, size_t, __local half *);
+void __ovld vstorea_half4_rte(float4, size_t, __local half *);
+void __ovld vstorea_half8_rte(float8, size_t, __local half *);
+void __ovld vstorea_half16_rte(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, __local half *);
+void __ovld vstorea_half3_rtz(float3, size_t, __local half *);
+void __ovld vstorea_half4_rtz(float4, size_t, __local half *);
+void __ovld vstorea_half8_rtz(float8, size_t, __local half *);
+void __ovld vstorea_half16_rtz(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, __local half *);
+void __ovld vstorea_half3_rtp(float3, size_t, __local half *);
+void __ovld vstorea_half4_rtp(float4, size_t, __local half *);
+void __ovld vstorea_half8_rtp(float8, size_t, __local half *);
+void __ovld vstorea_half16_rtp(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, __local half *);
+void __ovld vstorea_half3_rtn(float3, size_t, __local half *);
+void __ovld vstorea_half4_rtn(float4, size_t, __local half *);
+void __ovld vstorea_half8_rtn(float8, size_t, __local half *);
+void __ovld vstorea_half16_rtn(float16, size_t, __local half *);
+
+void __ovld vstorea_half2(float2, size_t, __private half *);
+void __ovld vstorea_half3(float3, size_t, __private half *);
+void __ovld vstorea_half4(float4, size_t, __private half *);
+void __ovld vstorea_half8(float8, size_t, __private half *);
+void __ovld vstorea_half16(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, __private half *);
+void __ovld vstorea_half3_rte(float3, size_t, __private half *);
+void __ovld vstorea_half4_rte(float4, size_t, __private half *);
+void __ovld vstorea_half8_rte(float8, size_t, __private half *);
+void __ovld vstorea_half16_rte(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, __private half *);
+void __ovld vstorea_half3_rtz(float3, size_t, __private half *);
+void __ovld vstorea_half4_rtz(float4, size_t, __private half *);
+void __ovld vstorea_half8_rtz(float8, size_t, __private half *);
+void __ovld vstorea_half16_rtz(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, __private half *);
+void __ovld vstorea_half3_rtp(float3, size_t, __private half *);
+void __ovld vstorea_half4_rtp(float4, size_t, __private half *);
+void __ovld vstorea_half8_rtp(float8, size_t, __private half *);
+void __ovld vstorea_half16_rtp(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, __private half *);
+void __ovld vstorea_half3_rtn(float3, size_t, __private half *);
+void __ovld vstorea_half4_rtn(float4, size_t, __private half *);
+void __ovld vstorea_half8_rtn(float8, size_t, __private half *);
+void __ovld vstorea_half16_rtn(float16, size_t, __private half *);
+
+#ifdef cl_khr_fp64
+void __ovld vstorea_half2(double2, size_t, __global half *);
+void __ovld vstorea_half3(double3, size_t, __global half *);
+void __ovld vstorea_half4(double4, size_t, __global half *);
+void __ovld vstorea_half8(double8, size_t, __global half *);
+void __ovld vstorea_half16(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, __global half *);
+void __ovld vstorea_half3_rte(double3, size_t, __global half *);
+void __ovld vstorea_half4_rte(double4, size_t, __global half *);
+void __ovld vstorea_half8_rte(double8, size_t, __global half *);
+void __ovld vstorea_half16_rte(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, __global half *);
+void __ovld vstorea_half3_rtz(double3, size_t, __global half *);
+void __ovld vstorea_half4_rtz(double4, size_t, __global half *);
+void __ovld vstorea_half8_rtz(double8, size_t, __global half *);
+void __ovld vstorea_half16_rtz(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, __global half *);
+void __ovld vstorea_half3_rtp(double3, size_t, __global half *);
+void __ovld vstorea_half4_rtp(double4, size_t, __global half *);
+void __ovld vstorea_half8_rtp(double8, size_t, __global half *);
+void __ovld vstorea_half16_rtp(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, __global half *);
+void __ovld vstorea_half3_rtn(double3, size_t, __global half *);
+void __ovld vstorea_half4_rtn(double4, size_t, __global half *);
+void __ovld vstorea_half8_rtn(double8, size_t, __global half *);
+void __ovld vstorea_half16_rtn(double16, size_t, __global half *);
+
+void __ovld vstorea_half2(double2, size_t, __local half *);
+void __ovld vstorea_half3(double3, size_t, __local half *);
+void __ovld vstorea_half4(double4, size_t, __local half *);
+void __ovld vstorea_half8(double8, size_t, __local half *);
+void __ovld vstorea_half16(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, __local half *);
+void __ovld vstorea_half3_rte(double3, size_t, __local half *);
+void __ovld vstorea_half4_rte(double4, size_t, __local half *);
+void __ovld vstorea_half8_rte(double8, size_t, __local half *);
+void __ovld vstorea_half16_rte(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, __local half *);
+void __ovld vstorea_half3_rtz(double3, size_t, __local half *);
+void __ovld vstorea_half4_rtz(double4, size_t, __local half *);
+void __ovld vstorea_half8_rtz(double8, size_t, __local half *);
+void __ovld vstorea_half16_rtz(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, __local half *);
+void __ovld vstorea_half3_rtp(double3, size_t, __local half *);
+void __ovld vstorea_half4_rtp(double4, size_t, __local half *);
+void __ovld vstorea_half8_rtp(double8, size_t, __local half *);
+void __ovld vstorea_half16_rtp(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, __local half *);
+void __ovld vstorea_half3_rtn(double3, size_t, __local half *);
+void __ovld vstorea_half4_rtn(double4, size_t, __local half *);
+void __ovld vstorea_half8_rtn(double8, size_t, __local half *);
+void __ovld vstorea_half16_rtn(double16, size_t, __local half *);
+
+void __ovld vstorea_half2(double2, size_t, __private half *);
+void __ovld vstorea_half3(double3, size_t, __private half *);
+void __ovld vstorea_half4(double4, size_t, __private half *);
+void __ovld vstorea_half8(double8, size_t, __private half *);
+void __ovld vstorea_half16(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, __private half *);
+void __ovld vstorea_half3_rte(double3, size_t, __private half *);
+void __ovld vstorea_half4_rte(double4, size_t, __private half *);
+void __ovld vstorea_half8_rte(double8, size_t, __private half *);
+void __ovld vstorea_half16_rte(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, __private half *);
+void __ovld vstorea_half3_rtz(double3, size_t, __private half *);
+void __ovld vstorea_half4_rtz(double4, size_t, __private half *);
+void __ovld vstorea_half8_rtz(double8, size_t, __private half *);
+void __ovld vstorea_half16_rtz(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, __private half *);
+void __ovld vstorea_half3_rtp(double3, size_t, __private half *);
+void __ovld vstorea_half4_rtp(double4, size_t, __private half *);
+void __ovld vstorea_half8_rtp(double8, size_t, __private half *);
+void __ovld vstorea_half16_rtp(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, __private half *);
+void __ovld vstorea_half3_rtn(double3, size_t, __private half *);
+void __ovld vstorea_half4_rtn(double4, size_t, __private half *);
+void __ovld vstorea_half8_rtn(double8, size_t, __private half *);
+void __ovld vstorea_half16_rtn(double16, size_t, __private half *);
+#endif //cl_khr_fp64
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
+
+/**
+ * All work-items in a work-group executing the kernel
+ * on a processor must execute this function before any
+ * are allowed to continue execution beyond the barrier.
+ * This function must be encountered by all work-items in
+ * a work-group executing the kernel.
+ * If barrier is inside a conditional statement, then all
+ * work-items must enter the conditional if any work-item
+ * enters the conditional statement and executes the
+ * barrier.
+ * If barrer is inside a loop, all work-items must execute
+ * the barrier for each iteration of the loop before any are
+ * allowed to continue execution beyond the barrier.
+ * The barrier function also queues a memory fence
+ * (reads and writes) to ensure correct ordering of
+ * memory operations to local or global memory.
+ * The flags argument specifies the memory address space
+ * and can be set to a combination of the following literal
+ * values.
+ * CLK_LOCAL_MEM_FENCE - The barrier function
+ * will either flush any variables stored in local memory
+ * or queue a memory fence to ensure correct ordering of
+ * memory operations to local memory.
+ * CLK_GLOBAL_MEM_FENCE - The barrier function
+ * will queue a memory fence to ensure correct ordering
+ * of memory operations to global memory. This can be
+ * useful when work-items, for example, write to buffer or
+ * image objects and then want to read the updated data.
+ */
+
+void __ovld __conv barrier(cl_mem_fence_flags);
+
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+void __ovld __conv work_group_barrier(cl_mem_fence_flags, memory_scope);
+void __ovld __conv work_group_barrier(cl_mem_fence_flags);
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+// OpenCL v1.1 s6.11.9, v1.2 s6.12.9 - Explicit Memory Fence Functions
+
+/**
+ * Orders loads and stores of a work-item
+ * executing a kernel. This means that loads
+ * and stores preceding the mem_fence will
+ * be committed to memory before any loads
+ * and stores following the mem_fence.
+ * The flags argument specifies the memory
+ * address space and can be set to a
+ * combination of the following literal
+ * values:
+ * CLK_LOCAL_MEM_FENCE
+ * CLK_GLOBAL_MEM_FENCE.
+ */
+void __ovld mem_fence(cl_mem_fence_flags);
+
+/**
+ * Read memory barrier that orders only
+ * loads.
+ * The flags argument specifies the memory
+ * address space and can be set to a
+ * combination of the following literal
+ * values:
+ * CLK_LOCAL_MEM_FENCE
+ * CLK_GLOBAL_MEM_FENCE.
+ */
+void __ovld read_mem_fence(cl_mem_fence_flags);
+
+/**
+ * Write memory barrier that orders only
+ * stores.
+ * The flags argument specifies the memory
+ * address space and can be set to a
+ * combination of the following literal
+ * values:
+ * CLK_LOCAL_MEM_FENCE
+ * CLK_GLOBAL_MEM_FENCE.
+ */
+void __ovld write_mem_fence(cl_mem_fence_flags);
+
+// OpenCL v2.0 s6.13.9 - Address Space Qualifier Functions
+
+#if defined(__opencl_c_generic_address_space)
+cl_mem_fence_flags __ovld get_fence(const void *ptr);
+cl_mem_fence_flags __ovld get_fence(void *ptr);
+
+/**
+ * Builtin functions to_global, to_local, and to_private need to be declared as Clang builtin functions
+ * and checked in Sema since they should be declared as
+ *   addr gentype* to_addr (gentype*);
+ * where gentype is builtin type or user defined type.
+ */
+
+#endif //defined(__opencl_c_generic_address_space)
+
+// OpenCL v1.1 s6.11.10, v1.2 s6.12.10, v2.0 s6.13.10 - Async Copies from Global to Local Memory, Local to Global Memory, and Prefetch
+
+/**
+ * event_t async_work_group_copy (
+ * __global gentype *dst,
+ * const __local gentype *src,
+ * size_t num_elements,
+ * event_t event)
+ * Perform an async copy of num_elements
+ * gentype elements from src to dst. The async
+ * copy is performed by all work-items in a workgroup
+ * and this built-in function must therefore
+ * be encountered by all work-items in a workgroup
+ * executing the kernel with the same
+ * argument values; otherwise the results are
+ * undefined.
+ * Returns an event object that can be used by
+ * wait_group_events to wait for the async copy
+ * to finish. The event argument can also be used
+ * to associate the async_work_group_copy with
+ * a previous async copy allowing an event to be
+ * shared by multiple async copies; otherwise event
+ * should be zero.
+ * If event argument is non-zero, the event object
+ * supplied in event argument will be returned.
+ * This function does not perform any implicit
+ * synchronization of source data such as using a
+ * barrier before performing the copy.
+ */
+event_t __ovld async_work_group_copy(__local char *, const __global char *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar *, const __global uchar *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short *, const __global short *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort *, const __global ushort *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int *, const __global int *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint *, const __global uint *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long *, const __global long *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong *, const __global ulong *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float *, const __global float *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char2 *, const __global char2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar2 *, const __global uchar2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short2 *, const __global short2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort2 *, const __global ushort2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int2 *, const __global int2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint2 *, const __global uint2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long2 *, const __global long2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong2 *, const __global ulong2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float2 *, const __global float2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char3 *, const __global char3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar3 *, const __global uchar3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short3 *, const __global short3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort3 *, const __global ushort3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int3 *, const __global int3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint3 *, const __global uint3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long3 *, const __global long3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong3 *, const __global ulong3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float3 *, const __global float3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char4 *, const __global char4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar4 *, const __global uchar4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short4 *, const __global short4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort4 *, const __global ushort4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int4 *, const __global int4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint4 *, const __global uint4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long4 *, const __global long4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong4 *, const __global ulong4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float4 *, const __global float4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char8 *, const __global char8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar8 *, const __global uchar8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short8 *, const __global short8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort8 *, const __global ushort8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int8 *, const __global int8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint8 *, const __global uint8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long8 *, const __global long8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong8 *, const __global ulong8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float8 *, const __global float8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char16 *, const __global char16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar16 *, const __global uchar16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short16 *, const __global short16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort16 *, const __global ushort16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int16 *, const __global int16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint16 *, const __global uint16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long16 *, const __global long16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong16 *, const __global ulong16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float16 *, const __global float16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char *, const __local char *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar *, const __local uchar *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short *, const __local short *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort *, const __local ushort *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int *, const __local int *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint *, const __local uint *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long *, const __local long *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong *, const __local ulong *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float *, const __local float *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char2 *, const __local char2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar2 *, const __local uchar2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short2 *, const __local short2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort2 *, const __local ushort2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int2 *, const __local int2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint2 *, const __local uint2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long2 *, const __local long2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong2 *, const __local ulong2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float2 *, const __local float2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char3 *, const __local char3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar3 *, const __local uchar3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short3 *, const __local short3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort3 *, const __local ushort3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int3 *, const __local int3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint3 *, const __local uint3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long3 *, const __local long3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong3 *, const __local ulong3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float3 *, const __local float3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char4 *, const __local char4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar4 *, const __local uchar4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short4 *, const __local short4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort4 *, const __local ushort4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int4 *, const __local int4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint4 *, const __local uint4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long4 *, const __local long4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong4 *, const __local ulong4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float4 *, const __local float4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char8 *, const __local char8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar8 *, const __local uchar8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short8 *, const __local short8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort8 *, const __local ushort8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int8 *, const __local int8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint8 *, const __local uint8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long8 *, const __local long8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong8 *, const __local ulong8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float8 *, const __local float8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char16 *, const __local char16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar16 *, const __local uchar16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short16 *, const __local short16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort16 *, const __local ushort16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int16 *, const __local int16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint16 *, const __local uint16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long16 *, const __local long16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong16 *, const __local ulong16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float16 *, const __local float16 *, size_t, event_t);
+#ifdef cl_khr_fp64
+event_t __ovld async_work_group_copy(__local double *, const __global double *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double2 *, const __global double2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double3 *, const __global double3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double4 *, const __global double4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double8 *, const __global double8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double16 *, const __global double16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double *, const __local double *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double2 *, const __local double2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double3 *, const __local double3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double4 *, const __local double4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double8 *, const __local double8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double16 *, const __local double16 *, size_t, event_t);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+event_t __ovld async_work_group_copy(__local half *, const __global half *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half2 *, const __global half2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half3 *, const __global half3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half4 *, const __global half4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half8 *, const __global half8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half16 *, const __global half16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half *, const __local half *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half2 *, const __local half2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half3 *, const __local half3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half4 *, const __local half4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half8 *, const __local half8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half16 *, const __local half16 *, size_t, event_t);
+#endif //cl_khr_fp16
+
+/**
+ * Perform an async gather of num_elements
+ * gentype elements from src to dst. The
+ * src_stride is the stride in elements for each
+ * gentype element read from src. The dst_stride
+ * is the stride in elements for each gentype
+ * element written to dst. The async gather is
+ * performed by all work-items in a work-group.
+ * This built-in function must therefore be
+ * encountered by all work-items in a work-group
+ * executing the kernel with the same argument
+ * values; otherwise the results are undefined.
+ * Returns an event object that can be used by
+ * wait_group_events to wait for the async copy
+ * to finish. The event argument can also be used
+ * to associate the
+ * async_work_group_strided_copy with a
+ * previous async copy allowing an event to be
+ * shared by multiple async copies; otherwise event
+ * should be zero.
+ * If event argument is non-zero, the event object
+ * supplied in event argument will be returned.
+ * This function does not perform any implicit
+ * synchronization of source data such as using a
+ * barrier before performing the copy.
+ */
+event_t __ovld async_work_group_strided_copy(__local char *, const __global char *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar *, const __global uchar *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short *, const __global short *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort *, const __global ushort *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int *, const __global int *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint *, const __global uint *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long *, const __global long *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong *, const __global ulong *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float *, const __global float *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char2 *, const __global char2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar2 *, const __global uchar2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short2 *, const __global short2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort2 *, const __global ushort2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int2 *, const __global int2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint2 *, const __global uint2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long2 *, const __global long2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong2 *, const __global ulong2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float2 *, const __global float2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char3 *, const __global char3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar3 *, const __global uchar3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short3 *, const __global short3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort3 *, const __global ushort3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int3 *, const __global int3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint3 *, const __global uint3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long3 *, const __global long3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong3 *, const __global ulong3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float3 *, const __global float3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char4 *, const __global char4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar4 *, const __global uchar4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short4 *, const __global short4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort4 *, const __global ushort4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int4 *, const __global int4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint4 *, const __global uint4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long4 *, const __global long4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong4 *, const __global ulong4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float4 *, const __global float4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char8 *, const __global char8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar8 *, const __global uchar8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short8 *, const __global short8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort8 *, const __global ushort8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int8 *, const __global int8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint8 *, const __global uint8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long8 *, const __global long8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong8 *, const __global ulong8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float8 *, const __global float8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char16 *, const __global char16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar16 *, const __global uchar16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short16 *, const __global short16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort16 *, const __global ushort16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int16 *, const __global int16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint16 *, const __global uint16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long16 *, const __global long16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong16 *, const __global ulong16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float16 *, const __global float16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char *, const __local char *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar *, const __local uchar *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short *, const __local short *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort *, const __local ushort *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int *, const __local int *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint *, const __local uint *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long *, const __local long *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong *, const __local ulong *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float *, const __local float *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char2 *, const __local char2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar2 *, const __local uchar2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short2 *, const __local short2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort2 *, const __local ushort2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int2 *, const __local int2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint2 *, const __local uint2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long2 *, const __local long2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong2 *, const __local ulong2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float2 *, const __local float2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char3 *, const __local char3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar3 *, const __local uchar3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short3 *, const __local short3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort3 *, const __local ushort3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int3 *, const __local int3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint3 *, const __local uint3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long3 *, const __local long3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong3 *, const __local ulong3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float3 *, const __local float3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char4 *, const __local char4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar4 *, const __local uchar4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short4 *, const __local short4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort4 *, const __local ushort4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int4 *, const __local int4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint4 *, const __local uint4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long4 *, const __local long4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong4 *, const __local ulong4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float4 *, const __local float4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char8 *, const __local char8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar8 *, const __local uchar8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short8 *, const __local short8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort8 *, const __local ushort8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int8 *, const __local int8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint8 *, const __local uint8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long8 *, const __local long8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong8 *, const __local ulong8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float8 *, const __local float8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char16 *, const __local char16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar16 *, const __local uchar16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short16 *, const __local short16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort16 *, const __local ushort16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int16 *, const __local int16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint16 *, const __local uint16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long16 *, const __local long16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong16 *, const __local ulong16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float16 *, const __local float16 *, size_t, size_t, event_t);
+#ifdef cl_khr_fp64
+event_t __ovld async_work_group_strided_copy(__local double *, const __global double *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double2 *, const __global double2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double3 *, const __global double3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double4 *, const __global double4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double8 *, const __global double8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double16 *, const __global double16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double *, const __local double *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double2 *, const __local double2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double3 *, const __local double3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double4 *, const __local double4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double8 *, const __local double8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double16 *, const __local double16 *, size_t, size_t, event_t);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+event_t __ovld async_work_group_strided_copy(__local half *, const __global half *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half2 *, const __global half2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half3 *, const __global half3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half4 *, const __global half4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half8 *, const __global half8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half16 *, const __global half16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half *, const __local half *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half2 *, const __local half2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half3 *, const __local half3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half4 *, const __local half4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half8 *, const __local half8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half16 *, const __local half16 *, size_t, size_t, event_t);
+#endif //cl_khr_fp16
+
+/**
+ * Wait for events that identify the
+ * async_work_group_copy operations to
+ * complete. The event objects specified in
+ * event_list will be released after the wait is
+ * performed.
+ * This function must be encountered by all workitems
+ * in a work-group executing the kernel with
+ * the same num_events and event objects specified
+ * in event_list; otherwise the results are undefined.
+ */
+void __ovld wait_group_events(int, event_t *);
+
+/**
+ * Prefetch num_elements * sizeof(gentype)
+ * bytes into the global cache. The prefetch
+ * instruction is applied to a work-item in a workgroup
+ * and does not affect the functional
+ * behavior of the kernel.
+ */
+void __ovld prefetch(const __global char *, size_t);
+void __ovld prefetch(const __global uchar *, size_t);
+void __ovld prefetch(const __global short *, size_t);
+void __ovld prefetch(const __global ushort *, size_t);
+void __ovld prefetch(const __global int *, size_t);
+void __ovld prefetch(const __global uint *, size_t);
+void __ovld prefetch(const __global long *, size_t);
+void __ovld prefetch(const __global ulong *, size_t);
+void __ovld prefetch(const __global float *, size_t);
+void __ovld prefetch(const __global char2 *, size_t);
+void __ovld prefetch(const __global uchar2 *, size_t);
+void __ovld prefetch(const __global short2 *, size_t);
+void __ovld prefetch(const __global ushort2 *, size_t);
+void __ovld prefetch(const __global int2 *, size_t);
+void __ovld prefetch(const __global uint2 *, size_t);
+void __ovld prefetch(const __global long2 *, size_t);
+void __ovld prefetch(const __global ulong2 *, size_t);
+void __ovld prefetch(const __global float2 *, size_t);
+void __ovld prefetch(const __global char3 *, size_t);
+void __ovld prefetch(const __global uchar3 *, size_t);
+void __ovld prefetch(const __global short3 *, size_t);
+void __ovld prefetch(const __global ushort3 *, size_t);
+void __ovld prefetch(const __global int3 *, size_t);
+void __ovld prefetch(const __global uint3 *, size_t);
+void __ovld prefetch(const __global long3 *, size_t);
+void __ovld prefetch(const __global ulong3 *, size_t);
+void __ovld prefetch(const __global float3 *, size_t);
+void __ovld prefetch(const __global char4 *, size_t);
+void __ovld prefetch(const __global uchar4 *, size_t);
+void __ovld prefetch(const __global short4 *, size_t);
+void __ovld prefetch(const __global ushort4 *, size_t);
+void __ovld prefetch(const __global int4 *, size_t);
+void __ovld prefetch(const __global uint4 *, size_t);
+void __ovld prefetch(const __global long4 *, size_t);
+void __ovld prefetch(const __global ulong4 *, size_t);
+void __ovld prefetch(const __global float4 *, size_t);
+void __ovld prefetch(const __global char8 *, size_t);
+void __ovld prefetch(const __global uchar8 *, size_t);
+void __ovld prefetch(const __global short8 *, size_t);
+void __ovld prefetch(const __global ushort8 *, size_t);
+void __ovld prefetch(const __global int8 *, size_t);
+void __ovld prefetch(const __global uint8 *, size_t);
+void __ovld prefetch(const __global long8 *, size_t);
+void __ovld prefetch(const __global ulong8 *, size_t);
+void __ovld prefetch(const __global float8 *, size_t);
+void __ovld prefetch(const __global char16 *, size_t);
+void __ovld prefetch(const __global uchar16 *, size_t);
+void __ovld prefetch(const __global short16 *, size_t);
+void __ovld prefetch(const __global ushort16 *, size_t);
+void __ovld prefetch(const __global int16 *, size_t);
+void __ovld prefetch(const __global uint16 *, size_t);
+void __ovld prefetch(const __global long16 *, size_t);
+void __ovld prefetch(const __global ulong16 *, size_t);
+void __ovld prefetch(const __global float16 *, size_t);
+#ifdef cl_khr_fp64
+void __ovld prefetch(const __global double *, size_t);
+void __ovld prefetch(const __global double2 *, size_t);
+void __ovld prefetch(const __global double3 *, size_t);
+void __ovld prefetch(const __global double4 *, size_t);
+void __ovld prefetch(const __global double8 *, size_t);
+void __ovld prefetch(const __global double16 *, size_t);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+void __ovld prefetch(const __global half *, size_t);
+void __ovld prefetch(const __global half2 *, size_t);
+void __ovld prefetch(const __global half3 *, size_t);
+void __ovld prefetch(const __global half4 *, size_t);
+void __ovld prefetch(const __global half8 *, size_t);
+void __ovld prefetch(const __global half16 *, size_t);
+#endif // cl_khr_fp16
+
+// OpenCL v1.1 s6.11.1, v1.2 s6.12.11 - Atomic Functions
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
+#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
+#endif
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old + val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_add(volatile __global int *, int);
+uint __ovld atomic_add(volatile __global uint *, uint);
+int __ovld atomic_add(volatile __local int *, int);
+uint __ovld atomic_add(volatile __local uint *, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_add(volatile int *, int);
+uint __ovld atomic_add(volatile uint *, uint);
+#endif
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_add(volatile __global int *, int);
+uint __ovld atom_add(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_add(volatile __local int *, int);
+uint __ovld atom_add(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_add(volatile __global long *, long);
+ulong __ovld atom_add(volatile __global ulong *, ulong);
+long __ovld atom_add(volatile __local long *, long);
+ulong __ovld atom_add(volatile __local ulong *, ulong);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old) stored at location pointed by p.
+ * Compute (old - val) and store result at location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_sub(volatile __global int *, int);
+uint __ovld atomic_sub(volatile __global uint *, uint);
+int __ovld atomic_sub(volatile __local int *, int);
+uint __ovld atomic_sub(volatile __local uint *, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_sub(volatile int *, int);
+uint __ovld atomic_sub(volatile uint *, uint);
+#endif
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_sub(volatile __global int *, int);
+uint __ovld atom_sub(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_sub(volatile __local int *, int);
+uint __ovld atom_sub(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_sub(volatile __global long *, long);
+ulong __ovld atom_sub(volatile __global ulong *, ulong);
+long __ovld atom_sub(volatile __local long *, long);
+ulong __ovld atom_sub(volatile __local ulong *, ulong);
+#endif
+
+/**
+ * Swaps the old value stored at location p
+ * with new value given by val. Returns old
+ * value.
+ */
+int __ovld atomic_xchg(volatile __global int *, int);
+uint __ovld atomic_xchg(volatile __global uint *, uint);
+int __ovld atomic_xchg(volatile __local int *, int);
+uint __ovld atomic_xchg(volatile __local uint *, uint);
+float __ovld atomic_xchg(volatile __global float *, float);
+float __ovld atomic_xchg(volatile __local float *, float);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_xchg(volatile int *, int);
+uint __ovld atomic_xchg(volatile uint *, uint);
+float __ovld atomic_xchg(volatile float *, float);
+#endif
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_xchg(volatile __global int *, int);
+uint __ovld atom_xchg(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_xchg(volatile __local int *, int);
+uint __ovld atom_xchg(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_xchg(volatile __global long *, long);
+long __ovld atom_xchg(volatile __local long *, long);
+ulong __ovld atom_xchg(volatile __global ulong *, ulong);
+ulong __ovld atom_xchg(volatile __local ulong *, ulong);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old + 1) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_inc(volatile __global int *);
+uint __ovld atomic_inc(volatile __global uint *);
+int __ovld atomic_inc(volatile __local int *);
+uint __ovld atomic_inc(volatile __local uint *);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_inc(volatile int *);
+uint __ovld atomic_inc(volatile uint *);
+#endif
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_inc(volatile __global int *);
+uint __ovld atom_inc(volatile __global uint *);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_inc(volatile __local int *);
+uint __ovld atom_inc(volatile __local uint *);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_inc(volatile __global long *);
+ulong __ovld atom_inc(volatile __global ulong *);
+long __ovld atom_inc(volatile __local long *);
+ulong __ovld atom_inc(volatile __local ulong *);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old - 1) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_dec(volatile __global int *);
+uint __ovld atomic_dec(volatile __global uint *);
+int __ovld atomic_dec(volatile __local int *);
+uint __ovld atomic_dec(volatile __local uint *);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_dec(volatile int *);
+uint __ovld atomic_dec(volatile uint *);
+#endif
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_dec(volatile __global int *);
+uint __ovld atom_dec(volatile __global uint *);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_dec(volatile __local int *);
+uint __ovld atom_dec(volatile __local uint *);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_dec(volatile __global long *);
+ulong __ovld atom_dec(volatile __global ulong *);
+long __ovld atom_dec(volatile __local long *);
+ulong __ovld atom_dec(volatile __local ulong *);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old == cmp) ? val : old and store result at
+ * location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_cmpxchg(volatile __global int *, int, int);
+uint __ovld atomic_cmpxchg(volatile __global uint *, uint, uint);
+int __ovld atomic_cmpxchg(volatile __local int *, int, int);
+uint __ovld atomic_cmpxchg(volatile __local uint *, uint, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_cmpxchg(volatile int *, int, int);
+uint __ovld atomic_cmpxchg(volatile uint *, uint, uint);
+#endif
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_cmpxchg(volatile __global int *, int, int);
+uint __ovld atom_cmpxchg(volatile __global uint *, uint, uint);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_cmpxchg(volatile __local int *, int, int);
+uint __ovld atom_cmpxchg(volatile __local uint *, uint, uint);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_cmpxchg(volatile __global long *, long, long);
+ulong __ovld atom_cmpxchg(volatile __global ulong *, ulong, ulong);
+long __ovld atom_cmpxchg(volatile __local long *, long, long);
+ulong __ovld atom_cmpxchg(volatile __local ulong *, ulong, ulong);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * min(old, val) and store minimum value at
+ * location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_min(volatile __global int *, int);
+uint __ovld atomic_min(volatile __global uint *, uint);
+int __ovld atomic_min(volatile __local int *, int);
+uint __ovld atomic_min(volatile __local uint *, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_min(volatile int *, int);
+uint __ovld atomic_min(volatile uint *, uint);
+#endif
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_min(volatile __global int *, int);
+uint __ovld atom_min(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_min(volatile __local int *, int);
+uint __ovld atom_min(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_min(volatile __global long *, long);
+ulong __ovld atom_min(volatile __global ulong *, ulong);
+long __ovld atom_min(volatile __local long *, long);
+ulong __ovld atom_min(volatile __local ulong *, ulong);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * max(old, val) and store maximum value at
+ * location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_max(volatile __global int *, int);
+uint __ovld atomic_max(volatile __global uint *, uint);
+int __ovld atomic_max(volatile __local int *, int);
+uint __ovld atomic_max(volatile __local uint *, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_max(volatile int *, int);
+uint __ovld atomic_max(volatile uint *, uint);
+#endif
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_max(volatile __global int *, int);
+uint __ovld atom_max(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_max(volatile __local int *, int);
+uint __ovld atom_max(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_max(volatile __global long *, long);
+ulong __ovld atom_max(volatile __global ulong *, ulong);
+long __ovld atom_max(volatile __local long *, long);
+ulong __ovld atom_max(volatile __local ulong *, ulong);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old & val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_and(volatile __global int *, int);
+uint __ovld atomic_and(volatile __global uint *, uint);
+int __ovld atomic_and(volatile __local int *, int);
+uint __ovld atomic_and(volatile __local uint *, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_and(volatile int *, int);
+uint __ovld atomic_and(volatile uint *, uint);
+#endif
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_and(volatile __global int *, int);
+uint __ovld atom_and(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_and(volatile __local int *, int);
+uint __ovld atom_and(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_and(volatile __global long *, long);
+ulong __ovld atom_and(volatile __global ulong *, ulong);
+long __ovld atom_and(volatile __local long *, long);
+ulong __ovld atom_and(volatile __local ulong *, ulong);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old | val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_or(volatile __global int *, int);
+uint __ovld atomic_or(volatile __global uint *, uint);
+int __ovld atomic_or(volatile __local int *, int);
+uint __ovld atomic_or(volatile __local uint *, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_or(volatile int *, int);
+uint __ovld atomic_or(volatile uint *, uint);
+#endif
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_or(volatile __global int *, int);
+uint __ovld atom_or(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_or(volatile __local int *, int);
+uint __ovld atom_or(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_or(volatile __global long *, long);
+ulong __ovld atom_or(volatile __global ulong *, ulong);
+long __ovld atom_or(volatile __local long *, long);
+ulong __ovld atom_or(volatile __local ulong *, ulong);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old ^ val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_xor(volatile __global int *, int);
+uint __ovld atomic_xor(volatile __global uint *, uint);
+int __ovld atomic_xor(volatile __local int *, int);
+uint __ovld atomic_xor(volatile __local uint *, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_xor(volatile int *, int);
+uint __ovld atomic_xor(volatile uint *, uint);
+#endif
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_xor(volatile __global int *, int);
+uint __ovld atom_xor(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_xor(volatile __local int *, int);
+uint __ovld atom_xor(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_xor(volatile __global long *, long);
+ulong __ovld atom_xor(volatile __global ulong *, ulong);
+long __ovld atom_xor(volatile __local long *, long);
+ulong __ovld atom_xor(volatile __local ulong *, ulong);
+#endif
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : disable
+#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : disable
+#endif
+
+// OpenCL v2.0 s6.13.11 - Atomics Functions
+
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+// double atomics support requires extensions cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
+#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
+#endif
+
+// atomic_init()
+#if defined(__opencl_c_generic_address_space)
+void __ovld atomic_init(volatile atomic_int *, int);
+void __ovld atomic_init(volatile atomic_uint *, uint);
+void __ovld atomic_init(volatile atomic_float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+void __ovld atomic_init(volatile atomic_long *, long);
+void __ovld atomic_init(volatile atomic_ulong *, ulong);
+#ifdef cl_khr_fp64
+void __ovld atomic_init(volatile atomic_double *, double);
+#endif //cl_khr_fp64
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+void __ovld atomic_init(volatile __global atomic_int *, int);
+void __ovld atomic_init(volatile __local atomic_int *, int);
+void __ovld atomic_init(volatile __global atomic_uint *, uint);
+void __ovld atomic_init(volatile __local atomic_uint *, uint);
+void __ovld atomic_init(volatile __global atomic_float *, float);
+void __ovld atomic_init(volatile __local atomic_float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+void __ovld atomic_init(volatile __global atomic_long *, long);
+void __ovld atomic_init(volatile __local atomic_long *, long);
+void __ovld atomic_init(volatile __global atomic_ulong *, ulong);
+void __ovld atomic_init(volatile __local atomic_ulong *, ulong);
+#ifdef cl_khr_fp64
+void __ovld atomic_init(volatile __global atomic_double *, double);
+void __ovld atomic_init(volatile __local atomic_double *, double);
+#endif //cl_khr_fp64
+#endif
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+
+// atomic_work_item_fence()
+void __ovld atomic_work_item_fence(cl_mem_fence_flags, memory_order, memory_scope);
+
+// atomic_fetch()
+// OpenCL v2.0 s6.13.11.7.5:
+// add/sub: atomic type argument can be uintptr_t/intptr_t, value type argument can be ptrdiff_t.
+
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_fetch_add(volatile atomic_int *, int);
+uint __ovld atomic_fetch_add(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_sub(volatile atomic_int *, int);
+uint __ovld atomic_fetch_sub(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_or(volatile atomic_int *, int);
+uint __ovld atomic_fetch_or(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_xor(volatile atomic_int *, int);
+uint __ovld atomic_fetch_xor(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_and(volatile atomic_int *, int);
+uint __ovld atomic_fetch_and(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_min(volatile atomic_int *, int);
+uint __ovld atomic_fetch_min(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_max(volatile atomic_int *, int);
+uint __ovld atomic_fetch_max(volatile atomic_uint *, uint);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_add(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_sub(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_sub(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_or(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_or(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_xor(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_xor(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_and(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_and(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_min(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_min(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_max(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_max(volatile atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *, ptrdiff_t);
+uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *, ptrdiff_t);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_fetch_add(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_add(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_add(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_add(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_sub(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_sub(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_sub(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_sub(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_or(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_or(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_or(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_or(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_xor(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_xor(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_xor(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_xor(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_and(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_and(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_and(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_and(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_min(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_min(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_min(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_min(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_max(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_max(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_max(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_max(volatile __local atomic_uint *, uint);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_add(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_add(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_add(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_add(volatile __global atomic_uintptr_t *, ptrdiff_t);
+uintptr_t __ovld atomic_fetch_add(volatile __local atomic_uintptr_t *, ptrdiff_t);
+long __ovld atomic_fetch_sub(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_sub(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_sub(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_sub(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_sub(volatile __global atomic_uintptr_t *, ptrdiff_t);
+uintptr_t __ovld atomic_fetch_sub(volatile __local atomic_uintptr_t *, ptrdiff_t);
+long __ovld atomic_fetch_or(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_or(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_or(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_or(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_or(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_or(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_or(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_or(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_xor(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_xor(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_xor(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_xor(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_xor(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_xor(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_xor(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_xor(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_and(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_and(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_and(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_and(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_and(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_and(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_and(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_and(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_min(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_min(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_min(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_min(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_min(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_min(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_min(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_min(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_max(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_max(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_max(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_max(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_max(volatile __global atomic_uintptr_t *, uintptr_t);
+uintptr_t __ovld atomic_fetch_max(volatile __local atomic_uintptr_t *, uintptr_t);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_fetch_add_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_sub_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_or_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_xor_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_and_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_min_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_max_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *, uint, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_sub_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_or_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_xor_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_and_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_min_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_max_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *, uint, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order);
+long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order);
+long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __global atomic_uintptr_t *, uintptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __local atomic_uintptr_t *, uintptr_t, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_fetch_add_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_sub_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_or_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_xor_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_and_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_min_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_max_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_sub_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_or_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_xor_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_and_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_min_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_max_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __global atomic_uintptr_t *, uintptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __local atomic_uintptr_t *, uintptr_t, memory_order, memory_scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+
+// The functionality added by cl_ext_float_atomics extension
+#if defined(cl_ext_float_atomics)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_load_store)
+void __ovld atomic_store(volatile __global atomic_half *, half);
+void __ovld atomic_store_explicit(volatile __global atomic_half *,
+                                  half, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_half *,
+                                  half, memory_order, memory_scope);
+half __ovld atomic_load(volatile __global atomic_half *);
+half __ovld atomic_load_explicit(volatile __global atomic_half *,
+                                 memory_order);
+half __ovld atomic_load_explicit(volatile __global atomic_half *,
+                                 memory_order, memory_scope);
+half __ovld atomic_exchange(volatile __global atomic_half *, half);
+half __ovld atomic_exchange_explicit(volatile __global atomic_half *,
+                                     half, memory_order);
+half __ovld atomic_exchange_explicit(volatile __global atomic_half *,
+                                     half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store)
+
+#if defined(__opencl_c_ext_fp16_local_atomic_load_store)
+void __ovld atomic_store(volatile __local atomic_half *, half);
+void __ovld atomic_store_explicit(volatile __local atomic_half *,
+                                  half, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_half *,
+                                  half, memory_order, memory_scope);
+half __ovld atomic_load(volatile __local atomic_half *);
+half __ovld atomic_load_explicit(volatile __local atomic_half *,
+                                 memory_order);
+half __ovld atomic_load_explicit(volatile __local atomic_half *,
+                                 memory_order, memory_scope);
+half __ovld atomic_exchange(volatile __local atomic_half *, half);
+half __ovld atomic_exchange_explicit(volatile __local atomic_half *,
+                                     half, memory_order);
+half __ovld atomic_exchange_explicit(volatile __local atomic_half *,
+                                     half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_local_atomic_load_store)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_load_store) &&                   \
+    defined(__opencl_c_ext_fp16_local_atomic_load_store)
+void __ovld atomic_store(volatile atomic_half *, half);
+void __ovld atomic_store_explicit(volatile atomic_half *, half,
+                                  memory_order);
+void __ovld atomic_store_explicit(volatile atomic_half *, half,
+                                  memory_order, memory_scope);
+half __ovld atomic_load(volatile atomic_half *);
+half __ovld atomic_load_explicit(volatile atomic_half *,
+                                 memory_order);
+half __ovld atomic_load_explicit(volatile atomic_half *,
+                                 memory_order, memory_scope);
+half __ovld atomic_exchange(volatile atomic_half *, half);
+half __ovld atomic_exchange_explicit(volatile atomic_half *, half,
+                                     memory_order);
+half __ovld atomic_exchange_explicit(volatile atomic_half *, half,
+                                     memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store) &&
+       // defined(__opencl_c_ext_fp16_local_atomic_load_store)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_min_max)
+half __ovld atomic_fetch_min(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_max(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *,
+                                      half, memory_order, memory_scope);
+half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *,
+                                      half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp16_local_atomic_min_max)
+half __ovld atomic_fetch_min(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_max(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *,
+                                      half, memory_order, memory_scope);
+half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *,
+                                      half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_local_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_min_max) &&                      \
+    defined(__opencl_c_ext_fp16_local_atomic_min_max)
+half __ovld atomic_fetch_min(volatile atomic_half *, half);
+half __ovld atomic_fetch_max(volatile atomic_half *, half);
+half __ovld atomic_fetch_min_explicit(volatile atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_max_explicit(volatile atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_min_explicit(volatile atomic_half *,
+                                      half, memory_order, memory_scope);
+half __ovld atomic_fetch_max_explicit(volatile atomic_half *,
+                                      half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max) &&                \
+    defined(__opencl_c_ext_fp16_local_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp32_global_atomic_min_max)
+float __ovld atomic_fetch_min(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_max(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *,
+                                       float, memory_order, memory_scope);
+float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *,
+                                       float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp32_local_atomic_min_max)
+float __ovld atomic_fetch_min(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_max(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *,
+                                       float, memory_order, memory_scope);
+float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *,
+                                       float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_local_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp32_global_atomic_min_max) &&                      \
+    defined(__opencl_c_ext_fp32_local_atomic_min_max)
+float __ovld atomic_fetch_min(volatile atomic_float *, float);
+float __ovld atomic_fetch_max(volatile atomic_float *, float);
+float __ovld atomic_fetch_min_explicit(volatile atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_max_explicit(volatile atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_min_explicit(volatile atomic_float *,
+                                       float, memory_order, memory_scope);
+float __ovld atomic_fetch_max_explicit(volatile atomic_float *,
+                                       float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max) &&                \
+    defined(__opencl_c_ext_fp32_local_atomic_min_max)
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#if defined(__opencl_c_ext_fp64_global_atomic_min_max)
+double __ovld atomic_fetch_min(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_max(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *,
+                                        double, memory_order, memory_scope);
+double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *,
+                                        double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp64_local_atomic_min_max)
+double __ovld atomic_fetch_min(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_max(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *,
+                                        double, memory_order, memory_scope);
+double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *,
+                                        double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_local_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp64_global_atomic_min_max) &&                      \
+    defined(__opencl_c_ext_fp64_local_atomic_min_max)
+double __ovld atomic_fetch_min(volatile atomic_double *, double);
+double __ovld atomic_fetch_max(volatile atomic_double *, double);
+double __ovld atomic_fetch_min_explicit(volatile atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_max_explicit(volatile atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_min_explicit(volatile atomic_double *,
+                                        double, memory_order, memory_scope);
+double __ovld atomic_fetch_max_explicit(volatile atomic_double *,
+                                        double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max) &&                \
+    defined(__opencl_c_ext_fp64_local_atomic_min_max)
+#endif // defined(cl_khr_int64_base_atomics) &&                                \
+    defined(cl_khr_int64_extended_atomics)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_add)
+half __ovld atomic_fetch_add(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_sub(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *,
+                                      half, memory_order, memory_scope);
+half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *,
+                                      half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_add)
+
+#if defined(__opencl_c_ext_fp16_local_atomic_add)
+half __ovld atomic_fetch_add(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_sub(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *,
+                                      half, memory_order, memory_scope);
+half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *,
+                                      half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_local_atomic_add)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_add) &&                          \
+    defined(__opencl_c_ext_fp16_local_atomic_add)
+half __ovld atomic_fetch_add(volatile atomic_half *, half);
+half __ovld atomic_fetch_sub(volatile atomic_half *, half);
+half __ovld atomic_fetch_add_explicit(volatile atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_sub_explicit(volatile atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_add_explicit(volatile atomic_half *,
+                                      half, memory_order, memory_scope);
+half __ovld atomic_fetch_sub_explicit(volatile atomic_half *,
+                                      half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_add) &&                    \
+    defined(__opencl_c_ext_fp16_local_atomic_add)
+
+#if defined(__opencl_c_ext_fp32_global_atomic_add)
+float __ovld atomic_fetch_add(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_sub(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *,
+                                       float, memory_order, memory_scope);
+float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *,
+                                       float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_global_atomic_add)
+
+#if defined(__opencl_c_ext_fp32_local_atomic_add)
+float __ovld atomic_fetch_add(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_sub(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *,
+                                       float, memory_order, memory_scope);
+float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *,
+                                       float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_local_atomic_add)
+
+#if defined(__opencl_c_ext_fp32_global_atomic_add) &&                          \
+    defined(__opencl_c_ext_fp32_local_atomic_add)
+float __ovld atomic_fetch_add(volatile atomic_float *, float);
+float __ovld atomic_fetch_sub(volatile atomic_float *, float);
+float __ovld atomic_fetch_add_explicit(volatile atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_sub_explicit(volatile atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_add_explicit(volatile atomic_float *,
+                                       float, memory_order, memory_scope);
+float __ovld atomic_fetch_sub_explicit(volatile atomic_float *,
+                                       float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_global_atomic_add) &&                    \
+    defined(__opencl_c_ext_fp32_local_atomic_add)
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#if defined(__opencl_c_ext_fp64_global_atomic_add)
+double __ovld atomic_fetch_add(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_sub(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *,
+                                        double, memory_order, memory_scope);
+double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *,
+                                        double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_global_atomic_add)
+
+#if defined(__opencl_c_ext_fp64_local_atomic_add)
+double __ovld atomic_fetch_add(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_sub(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *,
+                                        double, memory_order, memory_scope);
+double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *,
+                                        double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_local_atomic_add)
+
+#if defined(__opencl_c_ext_fp64_global_atomic_add) &&                          \
+    defined(__opencl_c_ext_fp64_local_atomic_add)
+double __ovld atomic_fetch_add(volatile atomic_double *, double);
+double __ovld atomic_fetch_sub(volatile atomic_double *, double);
+double __ovld atomic_fetch_add_explicit(volatile atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_sub_explicit(volatile atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_add_explicit(volatile atomic_double *,
+                                        double, memory_order, memory_scope);
+double __ovld atomic_fetch_sub_explicit(volatile atomic_double *,
+                                        double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_global_atomic_add) &&                    \
+    defined(__opencl_c_ext_fp64_local_atomic_add)
+#endif // defined(cl_khr_int64_base_atomics) &&                                \
+    defined(cl_khr_int64_extended_atomics)
+
+#endif // cl_ext_float_atomics
+
+// atomic_store()
+
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+void __ovld atomic_store(volatile atomic_int *, int);
+void __ovld atomic_store(volatile atomic_uint *, uint);
+void __ovld atomic_store(volatile atomic_float *, float);
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store(volatile atomic_double *, double);
+#endif //cl_khr_fp64
+void __ovld atomic_store(volatile atomic_long *, long);
+void __ovld atomic_store(volatile atomic_ulong *, ulong);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+void __ovld atomic_store(volatile __global atomic_int *, int);
+void __ovld atomic_store(volatile __local atomic_int *, int);
+void __ovld atomic_store(volatile __global atomic_uint *, uint);
+void __ovld atomic_store(volatile __local atomic_uint *, uint);
+void __ovld atomic_store(volatile __global atomic_float *, float);
+void __ovld atomic_store(volatile __local atomic_float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store(volatile __global atomic_double *, double);
+void __ovld atomic_store(volatile __local atomic_double *, double);
+#endif //cl_khr_fp64
+void __ovld atomic_store(volatile __global atomic_long *, long);
+void __ovld atomic_store(volatile __local atomic_long *, long);
+void __ovld atomic_store(volatile __global atomic_ulong *, ulong);
+void __ovld atomic_store(volatile __local atomic_ulong *, ulong);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+void __ovld atomic_store_explicit(volatile atomic_int *, int, memory_order);
+void __ovld atomic_store_explicit(volatile atomic_uint *, uint, memory_order);
+void __ovld atomic_store_explicit(volatile atomic_float *, float, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store_explicit(volatile atomic_double *, double, memory_order);
+#endif //cl_khr_fp64
+void __ovld atomic_store_explicit(volatile atomic_long *, long, memory_order);
+void __ovld atomic_store_explicit(volatile atomic_ulong *, ulong, memory_order);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+void __ovld atomic_store_explicit(volatile __global atomic_int *, int, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_int *, int, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_uint *, uint, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_uint *, uint, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_float *, float, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_float *, float, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store_explicit(volatile __global atomic_double *, double, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_double *, double, memory_order);
+#endif
+void __ovld atomic_store_explicit(volatile __global atomic_long *, long, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_long *, long, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_generic_address_space)
+void __ovld atomic_store_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile atomic_float *, float, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store_explicit(volatile atomic_double *, double, memory_order, memory_scope);
+#endif //cl_khr_fp64
+void __ovld atomic_store_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+void __ovld atomic_store_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __global atomic_float *, float, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_float *, float, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store_explicit(volatile __global atomic_double *, double, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_double *, double, memory_order, memory_scope);
+#endif //cl_khr_fp64
+void __ovld atomic_store_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+
+// atomic_load()
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_load(volatile atomic_int *);
+uint __ovld atomic_load(volatile atomic_uint *);
+float __ovld atomic_load(volatile atomic_float *);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load(volatile atomic_double *);
+#endif //cl_khr_fp64
+long __ovld atomic_load(volatile atomic_long *);
+ulong __ovld atomic_load(volatile atomic_ulong *);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_load(volatile __global atomic_int *);
+int __ovld atomic_load(volatile __local atomic_int *);
+uint __ovld atomic_load(volatile __global atomic_uint *);
+uint __ovld atomic_load(volatile __local atomic_uint *);
+float __ovld atomic_load(volatile __global atomic_float *);
+float __ovld atomic_load(volatile __local atomic_float *);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load(volatile __global atomic_double *);
+double __ovld atomic_load(volatile __local atomic_double *);
+#endif //cl_khr_fp64
+long __ovld atomic_load(volatile __global atomic_long *);
+long __ovld atomic_load(volatile __local atomic_long *);
+ulong __ovld atomic_load(volatile __global atomic_ulong *);
+ulong __ovld atomic_load(volatile __local atomic_ulong *);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_load_explicit(volatile atomic_int *, memory_order);
+uint __ovld atomic_load_explicit(volatile atomic_uint *, memory_order);
+float __ovld atomic_load_explicit(volatile atomic_float *, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load_explicit(volatile atomic_double *, memory_order);
+#endif //cl_khr_fp64
+long __ovld atomic_load_explicit(volatile atomic_long *, memory_order);
+ulong __ovld atomic_load_explicit(volatile atomic_ulong *, memory_order);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_load_explicit(volatile __global atomic_int *, memory_order);
+int __ovld atomic_load_explicit(volatile __local atomic_int *, memory_order);
+uint __ovld atomic_load_explicit(volatile __global atomic_uint *, memory_order);
+uint __ovld atomic_load_explicit(volatile __local atomic_uint *, memory_order);
+float __ovld atomic_load_explicit(volatile __global atomic_float *, memory_order);
+float __ovld atomic_load_explicit(volatile __local atomic_float *, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load_explicit(volatile __global atomic_double *, memory_order);
+double __ovld atomic_load_explicit(volatile __local atomic_double *, memory_order);
+#endif //cl_khr_fp64
+long __ovld atomic_load_explicit(volatile __global atomic_long *, memory_order);
+long __ovld atomic_load_explicit(volatile __local atomic_long *, memory_order);
+ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *, memory_order);
+ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_load_explicit(volatile atomic_int *, memory_order, memory_scope);
+uint __ovld atomic_load_explicit(volatile atomic_uint *, memory_order, memory_scope);
+float __ovld atomic_load_explicit(volatile atomic_float *, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load_explicit(volatile atomic_double *, memory_order, memory_scope);
+#endif //cl_khr_fp64
+long __ovld atomic_load_explicit(volatile atomic_long *, memory_order, memory_scope);
+ulong __ovld atomic_load_explicit(volatile atomic_ulong *, memory_order, memory_scope);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_load_explicit(volatile __global atomic_int *, memory_order, memory_scope);
+int __ovld atomic_load_explicit(volatile __local atomic_int *, memory_order, memory_scope);
+uint __ovld atomic_load_explicit(volatile __global atomic_uint *, memory_order, memory_scope);
+uint __ovld atomic_load_explicit(volatile __local atomic_uint *, memory_order, memory_scope);
+float __ovld atomic_load_explicit(volatile __global atomic_float *, memory_order, memory_scope);
+float __ovld atomic_load_explicit(volatile __local atomic_float *, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load_explicit(volatile __global atomic_double *, memory_order, memory_scope);
+double __ovld atomic_load_explicit(volatile __local atomic_double *, memory_order, memory_scope);
+#endif
+long __ovld atomic_load_explicit(volatile __global atomic_long *, memory_order, memory_scope);
+long __ovld atomic_load_explicit(volatile __local atomic_long *, memory_order, memory_scope);
+ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *, memory_order, memory_scope);
+ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *, memory_order, memory_scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+
+// atomic_exchange()
+
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_exchange(volatile atomic_int *, int);
+uint __ovld atomic_exchange(volatile atomic_uint *, uint);
+float __ovld atomic_exchange(volatile atomic_float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange(volatile atomic_double *, double);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange(volatile atomic_long *, long);
+ulong __ovld atomic_exchange(volatile atomic_ulong *, ulong);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_exchange(volatile __global atomic_int *, int);
+int __ovld atomic_exchange(volatile __local atomic_int *, int);
+uint __ovld atomic_exchange(volatile __global atomic_uint *, uint);
+uint __ovld atomic_exchange(volatile __local atomic_uint *, uint);
+float __ovld atomic_exchange(volatile __global atomic_float *, float);
+float __ovld atomic_exchange(volatile __local atomic_float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange(volatile __global atomic_double *, double);
+double __ovld atomic_exchange(volatile __local atomic_double *, double);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange(volatile __global atomic_long *, long);
+long __ovld atomic_exchange(volatile __local atomic_long *, long);
+ulong __ovld atomic_exchange(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_exchange(volatile __local atomic_ulong *, ulong);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_exchange_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_exchange_explicit(volatile atomic_uint *, uint, memory_order);
+float __ovld atomic_exchange_explicit(volatile atomic_float *, float, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange_explicit(volatile atomic_double *, double, memory_order);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *, ulong, memory_order);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_exchange_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_exchange_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *, uint, memory_order);
+float __ovld atomic_exchange_explicit(volatile __global atomic_float *, float, memory_order);
+float __ovld atomic_exchange_explicit(volatile __local atomic_float *, float, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange_explicit(volatile __global atomic_double *, double, memory_order);
+double __ovld atomic_exchange_explicit(volatile __local atomic_double *, double, memory_order);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_exchange_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)wi
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_exchange_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_exchange_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+float __ovld atomic_exchange_explicit(volatile atomic_float *, float, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange_explicit(volatile atomic_double *, double, memory_order, memory_scope);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_exchange_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_exchange_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+float __ovld atomic_exchange_explicit(volatile __global atomic_float *, float, memory_order, memory_scope);
+float __ovld atomic_exchange_explicit(volatile __local atomic_float *, float, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange_explicit(volatile __global atomic_double *, double, memory_order, memory_scope);
+double __ovld atomic_exchange_explicit(volatile __local atomic_double *, double, memory_order, memory_scope);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_exchange_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+
+// atomic_compare_exchange_strong() and atomic_compare_exchange_weak()
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_compare_exchange_strong(volatile atomic_int *, int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *, uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_int *, int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *, uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_float *, float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_float *, float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile atomic_double *, double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_double *, double *, double);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile atomic_long *, long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_long *, long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *, ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *, ulong *, ulong);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *, __private float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *, __private float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *, __private float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *, __private float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *, __private double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *, __private double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *, __private double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *, __private double *, double);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *, __private ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *, __private ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *, __private ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *, __private ulong *, ulong);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *, int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *, int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *, float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *, float *, float, memory_order, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *, double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *, double *, double, memory_order, memory_order);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *, long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *, long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif //defined(__opencl_c_atomic_scope_device)
+
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *, int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *, int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *, float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *, float *, float, memory_order, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *, double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *, double *, double, memory_order, memory_order, memory_scope);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *, long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *, long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order, memory_scope);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+
+// atomic_flag_test_and_set() and atomic_flag_clear()
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_flag_test_and_set(volatile atomic_flag *);
+void __ovld atomic_flag_clear(volatile atomic_flag *);
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_flag_test_and_set(volatile __global atomic_flag *);
+bool __ovld atomic_flag_test_and_set(volatile __local atomic_flag *);
+void __ovld atomic_flag_clear(volatile __global atomic_flag *);
+void __ovld atomic_flag_clear(volatile __local atomic_flag *);
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order);
+void __ovld atomic_flag_clear_explicit(volatile atomic_flag *, memory_order);
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *, memory_order);
+bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *, memory_order);
+void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *, memory_order);
+void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *, memory_order);
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order, memory_scope);
+void __ovld atomic_flag_clear_explicit(volatile atomic_flag *, memory_order, memory_scope);
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *, memory_order, memory_scope);
+bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *, memory_order, memory_scope);
+void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *, memory_order, memory_scope);
+void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *, memory_order, memory_scope);
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions
+
+/**
+ * The shuffle and shuffle2 built-in functions construct
+ * a permutation of elements from one or two input
+ * vectors respectively that are of the same type,
+ * returning a vector with the same element type as the
+ * input and length that is the same as the shuffle mask.
+ * The size of each element in the mask must match the
+ * size of each element in the result. For shuffle, only
+ * the ilogb(2m-1) least significant bits of each mask
+ * element are considered. For shuffle2, only the
+ * ilogb(2m-1)+1 least significant bits of each mask
+ * element are considered. Other bits in the mask shall
+ * be ignored.
+ * The elements of the input vectors are numbered from
+ * left to right across one or both of the vectors. For this
+ * purpose, the number of elements in a vector is given
+ * by vec_step(gentypem). The shuffle mask operand
+ * specifies, for each element of the result vector, which
+ * element of the one or two input vectors the result
+ * element gets.
+ * Examples:
+ * uint4 mask = (uint4)(3, 2,
+ * 1, 0);
+ * float4 a;
+ * float4 r = shuffle(a, mask);
+ * // r.s0123 = a.wzyx
+ * uint8 mask = (uint8)(0, 1, 2, 3,
+ * 4, 5, 6, 7);
+ * float4 a, b;
+ * float8 r = shuffle2(a, b, mask);
+ * // r.s0123 = a.xyzw
+ * // r.s4567 = b.xyzw
+ * uint4 mask;
+ * float8 a;
+ * float4 b;
+ * b = shuffle(a, mask);
+ * Examples that are not valid are:
+ * uint8 mask;
+ * short16 a;
+ * short8 b;
+ * b = shuffle(a, mask); <- not valid
+ */
+char2 __ovld __cnfn shuffle(char2, uchar2);
+char2 __ovld __cnfn shuffle(char4, uchar2);
+char2 __ovld __cnfn shuffle(char8, uchar2);
+char2 __ovld __cnfn shuffle(char16, uchar2);
+
+uchar2 __ovld __cnfn shuffle(uchar2, uchar2);
+uchar2 __ovld __cnfn shuffle(uchar4, uchar2);
+uchar2 __ovld __cnfn shuffle(uchar8, uchar2);
+uchar2 __ovld __cnfn shuffle(uchar16, uchar2);
+
+short2 __ovld __cnfn shuffle(short2, ushort2);
+short2 __ovld __cnfn shuffle(short4, ushort2);
+short2 __ovld __cnfn shuffle(short8, ushort2);
+short2 __ovld __cnfn shuffle(short16, ushort2);
+
+ushort2 __ovld __cnfn shuffle(ushort2, ushort2);
+ushort2 __ovld __cnfn shuffle(ushort4, ushort2);
+ushort2 __ovld __cnfn shuffle(ushort8, ushort2);
+ushort2 __ovld __cnfn shuffle(ushort16, ushort2);
+
+int2 __ovld __cnfn shuffle(int2, uint2);
+int2 __ovld __cnfn shuffle(int4, uint2);
+int2 __ovld __cnfn shuffle(int8, uint2);
+int2 __ovld __cnfn shuffle(int16, uint2);
+
+uint2 __ovld __cnfn shuffle(uint2, uint2);
+uint2 __ovld __cnfn shuffle(uint4, uint2);
+uint2 __ovld __cnfn shuffle(uint8, uint2);
+uint2 __ovld __cnfn shuffle(uint16, uint2);
+
+long2 __ovld __cnfn shuffle(long2, ulong2);
+long2 __ovld __cnfn shuffle(long4, ulong2);
+long2 __ovld __cnfn shuffle(long8, ulong2);
+long2 __ovld __cnfn shuffle(long16, ulong2);
+
+ulong2 __ovld __cnfn shuffle(ulong2, ulong2);
+ulong2 __ovld __cnfn shuffle(ulong4, ulong2);
+ulong2 __ovld __cnfn shuffle(ulong8, ulong2);
+ulong2 __ovld __cnfn shuffle(ulong16, ulong2);
+
+float2 __ovld __cnfn shuffle(float2, uint2);
+float2 __ovld __cnfn shuffle(float4, uint2);
+float2 __ovld __cnfn shuffle(float8, uint2);
+float2 __ovld __cnfn shuffle(float16, uint2);
+
+char4 __ovld __cnfn shuffle(char2, uchar4);
+char4 __ovld __cnfn shuffle(char4, uchar4);
+char4 __ovld __cnfn shuffle(char8, uchar4);
+char4 __ovld __cnfn shuffle(char16, uchar4);
+
+uchar4 __ovld __cnfn shuffle(uchar2, uchar4);
+uchar4 __ovld __cnfn shuffle(uchar4, uchar4);
+uchar4 __ovld __cnfn shuffle(uchar8, uchar4);
+uchar4 __ovld __cnfn shuffle(uchar16, uchar4);
+
+short4 __ovld __cnfn shuffle(short2, ushort4);
+short4 __ovld __cnfn shuffle(short4, ushort4);
+short4 __ovld __cnfn shuffle(short8, ushort4);
+short4 __ovld __cnfn shuffle(short16, ushort4);
+
+ushort4 __ovld __cnfn shuffle(ushort2, ushort4);
+ushort4 __ovld __cnfn shuffle(ushort4, ushort4);
+ushort4 __ovld __cnfn shuffle(ushort8, ushort4);
+ushort4 __ovld __cnfn shuffle(ushort16, ushort4);
+
+int4 __ovld __cnfn shuffle(int2, uint4);
+int4 __ovld __cnfn shuffle(int4, uint4);
+int4 __ovld __cnfn shuffle(int8, uint4);
+int4 __ovld __cnfn shuffle(int16, uint4);
+
+uint4 __ovld __cnfn shuffle(uint2, uint4);
+uint4 __ovld __cnfn shuffle(uint4, uint4);
+uint4 __ovld __cnfn shuffle(uint8, uint4);
+uint4 __ovld __cnfn shuffle(uint16, uint4);
+
+long4 __ovld __cnfn shuffle(long2, ulong4);
+long4 __ovld __cnfn shuffle(long4, ulong4);
+long4 __ovld __cnfn shuffle(long8, ulong4);
+long4 __ovld __cnfn shuffle(long16, ulong4);
+
+ulong4 __ovld __cnfn shuffle(ulong2, ulong4);
+ulong4 __ovld __cnfn shuffle(ulong4, ulong4);
+ulong4 __ovld __cnfn shuffle(ulong8, ulong4);
+ulong4 __ovld __cnfn shuffle(ulong16, ulong4);
+
+float4 __ovld __cnfn shuffle(float2, uint4);
+float4 __ovld __cnfn shuffle(float4, uint4);
+float4 __ovld __cnfn shuffle(float8, uint4);
+float4 __ovld __cnfn shuffle(float16, uint4);
+
+char8 __ovld __cnfn shuffle(char2, uchar8);
+char8 __ovld __cnfn shuffle(char4, uchar8);
+char8 __ovld __cnfn shuffle(char8, uchar8);
+char8 __ovld __cnfn shuffle(char16, uchar8);
+
+uchar8 __ovld __cnfn shuffle(uchar2, uchar8);
+uchar8 __ovld __cnfn shuffle(uchar4, uchar8);
+uchar8 __ovld __cnfn shuffle(uchar8, uchar8);
+uchar8 __ovld __cnfn shuffle(uchar16, uchar8);
+
+short8 __ovld __cnfn shuffle(short2, ushort8);
+short8 __ovld __cnfn shuffle(short4, ushort8);
+short8 __ovld __cnfn shuffle(short8, ushort8);
+short8 __ovld __cnfn shuffle(short16, ushort8);
+
+ushort8 __ovld __cnfn shuffle(ushort2, ushort8);
+ushort8 __ovld __cnfn shuffle(ushort4, ushort8);
+ushort8 __ovld __cnfn shuffle(ushort8, ushort8);
+ushort8 __ovld __cnfn shuffle(ushort16, ushort8);
+
+int8 __ovld __cnfn shuffle(int2, uint8);
+int8 __ovld __cnfn shuffle(int4, uint8);
+int8 __ovld __cnfn shuffle(int8, uint8);
+int8 __ovld __cnfn shuffle(int16, uint8);
+
+uint8 __ovld __cnfn shuffle(uint2, uint8);
+uint8 __ovld __cnfn shuffle(uint4, uint8);
+uint8 __ovld __cnfn shuffle(uint8, uint8);
+uint8 __ovld __cnfn shuffle(uint16, uint8);
+
+long8 __ovld __cnfn shuffle(long2, ulong8);
+long8 __ovld __cnfn shuffle(long4, ulong8);
+long8 __ovld __cnfn shuffle(long8, ulong8);
+long8 __ovld __cnfn shuffle(long16, ulong8);
+
+ulong8 __ovld __cnfn shuffle(ulong2, ulong8);
+ulong8 __ovld __cnfn shuffle(ulong4, ulong8);
+ulong8 __ovld __cnfn shuffle(ulong8, ulong8);
+ulong8 __ovld __cnfn shuffle(ulong16, ulong8);
+
+float8 __ovld __cnfn shuffle(float2, uint8);
+float8 __ovld __cnfn shuffle(float4, uint8);
+float8 __ovld __cnfn shuffle(float8, uint8);
+float8 __ovld __cnfn shuffle(float16, uint8);
+
+char16 __ovld __cnfn shuffle(char2, uchar16);
+char16 __ovld __cnfn shuffle(char4, uchar16);
+char16 __ovld __cnfn shuffle(char8, uchar16);
+char16 __ovld __cnfn shuffle(char16, uchar16);
+
+uchar16 __ovld __cnfn shuffle(uchar2, uchar16);
+uchar16 __ovld __cnfn shuffle(uchar4, uchar16);
+uchar16 __ovld __cnfn shuffle(uchar8, uchar16);
+uchar16 __ovld __cnfn shuffle(uchar16, uchar16);
+
+short16 __ovld __cnfn shuffle(short2, ushort16);
+short16 __ovld __cnfn shuffle(short4, ushort16);
+short16 __ovld __cnfn shuffle(short8, ushort16);
+short16 __ovld __cnfn shuffle(short16, ushort16);
+
+ushort16 __ovld __cnfn shuffle(ushort2, ushort16);
+ushort16 __ovld __cnfn shuffle(ushort4, ushort16);
+ushort16 __ovld __cnfn shuffle(ushort8, ushort16);
+ushort16 __ovld __cnfn shuffle(ushort16, ushort16);
+
+int16 __ovld __cnfn shuffle(int2, uint16);
+int16 __ovld __cnfn shuffle(int4, uint16);
+int16 __ovld __cnfn shuffle(int8, uint16);
+int16 __ovld __cnfn shuffle(int16, uint16);
+
+uint16 __ovld __cnfn shuffle(uint2, uint16);
+uint16 __ovld __cnfn shuffle(uint4, uint16);
+uint16 __ovld __cnfn shuffle(uint8, uint16);
+uint16 __ovld __cnfn shuffle(uint16, uint16);
+
+long16 __ovld __cnfn shuffle(long2, ulong16);
+long16 __ovld __cnfn shuffle(long4, ulong16);
+long16 __ovld __cnfn shuffle(long8, ulong16);
+long16 __ovld __cnfn shuffle(long16, ulong16);
+
+ulong16 __ovld __cnfn shuffle(ulong2, ulong16);
+ulong16 __ovld __cnfn shuffle(ulong4, ulong16);
+ulong16 __ovld __cnfn shuffle(ulong8, ulong16);
+ulong16 __ovld __cnfn shuffle(ulong16, ulong16);
+
+float16 __ovld __cnfn shuffle(float2, uint16);
+float16 __ovld __cnfn shuffle(float4, uint16);
+float16 __ovld __cnfn shuffle(float8, uint16);
+float16 __ovld __cnfn shuffle(float16, uint16);
+
+#ifdef cl_khr_fp64
+double2 __ovld __cnfn shuffle(double2, ulong2);
+double2 __ovld __cnfn shuffle(double4, ulong2);
+double2 __ovld __cnfn shuffle(double8, ulong2);
+double2 __ovld __cnfn shuffle(double16, ulong2);
+
+double4 __ovld __cnfn shuffle(double2, ulong4);
+double4 __ovld __cnfn shuffle(double4, ulong4);
+double4 __ovld __cnfn shuffle(double8, ulong4);
+double4 __ovld __cnfn shuffle(double16, ulong4);
+
+double8 __ovld __cnfn shuffle(double2, ulong8);
+double8 __ovld __cnfn shuffle(double4, ulong8);
+double8 __ovld __cnfn shuffle(double8, ulong8);
+double8 __ovld __cnfn shuffle(double16, ulong8);
+
+double16 __ovld __cnfn shuffle(double2, ulong16);
+double16 __ovld __cnfn shuffle(double4, ulong16);
+double16 __ovld __cnfn shuffle(double8, ulong16);
+double16 __ovld __cnfn shuffle(double16, ulong16);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half2 __ovld __cnfn shuffle(half2, ushort2);
+half2 __ovld __cnfn shuffle(half4, ushort2);
+half2 __ovld __cnfn shuffle(half8, ushort2);
+half2 __ovld __cnfn shuffle(half16, ushort2);
+
+half4 __ovld __cnfn shuffle(half2, ushort4);
+half4 __ovld __cnfn shuffle(half4, ushort4);
+half4 __ovld __cnfn shuffle(half8, ushort4);
+half4 __ovld __cnfn shuffle(half16, ushort4);
+
+half8 __ovld __cnfn shuffle(half2, ushort8);
+half8 __ovld __cnfn shuffle(half4, ushort8);
+half8 __ovld __cnfn shuffle(half8, ushort8);
+half8 __ovld __cnfn shuffle(half16, ushort8);
+
+half16 __ovld __cnfn shuffle(half2, ushort16);
+half16 __ovld __cnfn shuffle(half4, ushort16);
+half16 __ovld __cnfn shuffle(half8, ushort16);
+half16 __ovld __cnfn shuffle(half16, ushort16);
+#endif //cl_khr_fp16
+
+char2 __ovld __cnfn shuffle2(char2, char2, uchar2);
+char2 __ovld __cnfn shuffle2(char4, char4, uchar2);
+char2 __ovld __cnfn shuffle2(char8, char8, uchar2);
+char2 __ovld __cnfn shuffle2(char16, char16, uchar2);
+
+uchar2 __ovld __cnfn shuffle2(uchar2, uchar2, uchar2);
+uchar2 __ovld __cnfn shuffle2(uchar4, uchar4, uchar2);
+uchar2 __ovld __cnfn shuffle2(uchar8, uchar8, uchar2);
+uchar2 __ovld __cnfn shuffle2(uchar16, uchar16, uchar2);
+
+short2 __ovld __cnfn shuffle2(short2, short2, ushort2);
+short2 __ovld __cnfn shuffle2(short4, short4, ushort2);
+short2 __ovld __cnfn shuffle2(short8, short8, ushort2);
+short2 __ovld __cnfn shuffle2(short16, short16, ushort2);
+
+ushort2 __ovld __cnfn shuffle2(ushort2, ushort2, ushort2);
+ushort2 __ovld __cnfn shuffle2(ushort4, ushort4, ushort2);
+ushort2 __ovld __cnfn shuffle2(ushort8, ushort8, ushort2);
+ushort2 __ovld __cnfn shuffle2(ushort16, ushort16, ushort2);
+
+int2 __ovld __cnfn shuffle2(int2, int2, uint2);
+int2 __ovld __cnfn shuffle2(int4, int4, uint2);
+int2 __ovld __cnfn shuffle2(int8, int8, uint2);
+int2 __ovld __cnfn shuffle2(int16, int16, uint2);
+
+uint2 __ovld __cnfn shuffle2(uint2, uint2, uint2);
+uint2 __ovld __cnfn shuffle2(uint4, uint4, uint2);
+uint2 __ovld __cnfn shuffle2(uint8, uint8, uint2);
+uint2 __ovld __cnfn shuffle2(uint16, uint16, uint2);
+
+long2 __ovld __cnfn shuffle2(long2, long2, ulong2);
+long2 __ovld __cnfn shuffle2(long4, long4, ulong2);
+long2 __ovld __cnfn shuffle2(long8, long8, ulong2);
+long2 __ovld __cnfn shuffle2(long16, long16, ulong2);
+
+ulong2 __ovld __cnfn shuffle2(ulong2, ulong2, ulong2);
+ulong2 __ovld __cnfn shuffle2(ulong4, ulong4, ulong2);
+ulong2 __ovld __cnfn shuffle2(ulong8, ulong8, ulong2);
+ulong2 __ovld __cnfn shuffle2(ulong16, ulong16, ulong2);
+
+float2 __ovld __cnfn shuffle2(float2, float2, uint2);
+float2 __ovld __cnfn shuffle2(float4, float4, uint2);
+float2 __ovld __cnfn shuffle2(float8, float8, uint2);
+float2 __ovld __cnfn shuffle2(float16, float16, uint2);
+
+char4 __ovld __cnfn shuffle2(char2, char2, uchar4);
+char4 __ovld __cnfn shuffle2(char4, char4, uchar4);
+char4 __ovld __cnfn shuffle2(char8, char8, uchar4);
+char4 __ovld __cnfn shuffle2(char16, char16, uchar4);
+
+uchar4 __ovld __cnfn shuffle2(uchar2, uchar2, uchar4);
+uchar4 __ovld __cnfn shuffle2(uchar4, uchar4, uchar4);
+uchar4 __ovld __cnfn shuffle2(uchar8, uchar8, uchar4);
+uchar4 __ovld __cnfn shuffle2(uchar16, uchar16, uchar4);
+
+short4 __ovld __cnfn shuffle2(short2, short2, ushort4);
+short4 __ovld __cnfn shuffle2(short4, short4, ushort4);
+short4 __ovld __cnfn shuffle2(short8, short8, ushort4);
+short4 __ovld __cnfn shuffle2(short16, short16, ushort4);
+
+ushort4 __ovld __cnfn shuffle2(ushort2, ushort2, ushort4);
+ushort4 __ovld __cnfn shuffle2(ushort4, ushort4, ushort4);
+ushort4 __ovld __cnfn shuffle2(ushort8, ushort8, ushort4);
+ushort4 __ovld __cnfn shuffle2(ushort16, ushort16, ushort4);
+
+int4 __ovld __cnfn shuffle2(int2, int2, uint4);
+int4 __ovld __cnfn shuffle2(int4, int4, uint4);
+int4 __ovld __cnfn shuffle2(int8, int8, uint4);
+int4 __ovld __cnfn shuffle2(int16, int16, uint4);
+
+uint4 __ovld __cnfn shuffle2(uint2, uint2, uint4);
+uint4 __ovld __cnfn shuffle2(uint4, uint4, uint4);
+uint4 __ovld __cnfn shuffle2(uint8, uint8, uint4);
+uint4 __ovld __cnfn shuffle2(uint16, uint16, uint4);
+
+long4 __ovld __cnfn shuffle2(long2, long2, ulong4);
+long4 __ovld __cnfn shuffle2(long4, long4, ulong4);
+long4 __ovld __cnfn shuffle2(long8, long8, ulong4);
+long4 __ovld __cnfn shuffle2(long16, long16, ulong4);
+
+ulong4 __ovld __cnfn shuffle2(ulong2, ulong2, ulong4);
+ulong4 __ovld __cnfn shuffle2(ulong4, ulong4, ulong4);
+ulong4 __ovld __cnfn shuffle2(ulong8, ulong8, ulong4);
+ulong4 __ovld __cnfn shuffle2(ulong16, ulong16, ulong4);
+
+float4 __ovld __cnfn shuffle2(float2, float2, uint4);
+float4 __ovld __cnfn shuffle2(float4, float4, uint4);
+float4 __ovld __cnfn shuffle2(float8, float8, uint4);
+float4 __ovld __cnfn shuffle2(float16, float16, uint4);
+
+char8 __ovld __cnfn shuffle2(char2, char2, uchar8);
+char8 __ovld __cnfn shuffle2(char4, char4, uchar8);
+char8 __ovld __cnfn shuffle2(char8, char8, uchar8);
+char8 __ovld __cnfn shuffle2(char16, char16, uchar8);
+
+uchar8 __ovld __cnfn shuffle2(uchar2, uchar2, uchar8);
+uchar8 __ovld __cnfn shuffle2(uchar4, uchar4, uchar8);
+uchar8 __ovld __cnfn shuffle2(uchar8, uchar8, uchar8);
+uchar8 __ovld __cnfn shuffle2(uchar16, uchar16, uchar8);
+
+short8 __ovld __cnfn shuffle2(short2, short2, ushort8);
+short8 __ovld __cnfn shuffle2(short4, short4, ushort8);
+short8 __ovld __cnfn shuffle2(short8, short8, ushort8);
+short8 __ovld __cnfn shuffle2(short16, short16, ushort8);
+
+ushort8 __ovld __cnfn shuffle2(ushort2, ushort2, ushort8);
+ushort8 __ovld __cnfn shuffle2(ushort4, ushort4, ushort8);
+ushort8 __ovld __cnfn shuffle2(ushort8, ushort8, ushort8);
+ushort8 __ovld __cnfn shuffle2(ushort16, ushort16, ushort8);
+
+int8 __ovld __cnfn shuffle2(int2, int2, uint8);
+int8 __ovld __cnfn shuffle2(int4, int4, uint8);
+int8 __ovld __cnfn shuffle2(int8, int8, uint8);
+int8 __ovld __cnfn shuffle2(int16, int16, uint8);
+
+uint8 __ovld __cnfn shuffle2(uint2, uint2, uint8);
+uint8 __ovld __cnfn shuffle2(uint4, uint4, uint8);
+uint8 __ovld __cnfn shuffle2(uint8, uint8, uint8);
+uint8 __ovld __cnfn shuffle2(uint16, uint16, uint8);
+
+long8 __ovld __cnfn shuffle2(long2, long2, ulong8);
+long8 __ovld __cnfn shuffle2(long4, long4, ulong8);
+long8 __ovld __cnfn shuffle2(long8, long8, ulong8);
+long8 __ovld __cnfn shuffle2(long16, long16, ulong8);
+
+ulong8 __ovld __cnfn shuffle2(ulong2, ulong2, ulong8);
+ulong8 __ovld __cnfn shuffle2(ulong4, ulong4, ulong8);
+ulong8 __ovld __cnfn shuffle2(ulong8, ulong8, ulong8);
+ulong8 __ovld __cnfn shuffle2(ulong16, ulong16, ulong8);
+
+float8 __ovld __cnfn shuffle2(float2, float2, uint8);
+float8 __ovld __cnfn shuffle2(float4, float4, uint8);
+float8 __ovld __cnfn shuffle2(float8, float8, uint8);
+float8 __ovld __cnfn shuffle2(float16, float16, uint8);
+
+char16 __ovld __cnfn shuffle2(char2, char2, uchar16);
+char16 __ovld __cnfn shuffle2(char4, char4, uchar16);
+char16 __ovld __cnfn shuffle2(char8, char8, uchar16);
+char16 __ovld __cnfn shuffle2(char16, char16, uchar16);
+
+uchar16 __ovld __cnfn shuffle2(uchar2, uchar2, uchar16);
+uchar16 __ovld __cnfn shuffle2(uchar4, uchar4, uchar16);
+uchar16 __ovld __cnfn shuffle2(uchar8, uchar8, uchar16);
+uchar16 __ovld __cnfn shuffle2(uchar16, uchar16, uchar16);
+
+short16 __ovld __cnfn shuffle2(short2, short2, ushort16);
+short16 __ovld __cnfn shuffle2(short4, short4, ushort16);
+short16 __ovld __cnfn shuffle2(short8, short8, ushort16);
+short16 __ovld __cnfn shuffle2(short16, short16, ushort16);
+
+ushort16 __ovld __cnfn shuffle2(ushort2, ushort2, ushort16);
+ushort16 __ovld __cnfn shuffle2(ushort4, ushort4, ushort16);
+ushort16 __ovld __cnfn shuffle2(ushort8, ushort8, ushort16);
+ushort16 __ovld __cnfn shuffle2(ushort16, ushort16, ushort16);
+
+int16 __ovld __cnfn shuffle2(int2, int2, uint16);
+int16 __ovld __cnfn shuffle2(int4, int4, uint16);
+int16 __ovld __cnfn shuffle2(int8, int8, uint16);
+int16 __ovld __cnfn shuffle2(int16, int16, uint16);
+
+uint16 __ovld __cnfn shuffle2(uint2, uint2, uint16);
+uint16 __ovld __cnfn shuffle2(uint4, uint4, uint16);
+uint16 __ovld __cnfn shuffle2(uint8, uint8, uint16);
+uint16 __ovld __cnfn shuffle2(uint16, uint16, uint16);
+
+long16 __ovld __cnfn shuffle2(long2, long2, ulong16);
+long16 __ovld __cnfn shuffle2(long4, long4, ulong16);
+long16 __ovld __cnfn shuffle2(long8, long8, ulong16);
+long16 __ovld __cnfn shuffle2(long16, long16, ulong16);
+
+ulong16 __ovld __cnfn shuffle2(ulong2, ulong2, ulong16);
+ulong16 __ovld __cnfn shuffle2(ulong4, ulong4, ulong16);
+ulong16 __ovld __cnfn shuffle2(ulong8, ulong8, ulong16);
+ulong16 __ovld __cnfn shuffle2(ulong16, ulong16, ulong16);
+
+float16 __ovld __cnfn shuffle2(float2, float2, uint16);
+float16 __ovld __cnfn shuffle2(float4, float4, uint16);
+float16 __ovld __cnfn shuffle2(float8, float8, uint16);
+float16 __ovld __cnfn shuffle2(float16, float16, uint16);
+
+#ifdef cl_khr_fp64
+double2 __ovld __cnfn shuffle2(double2, double2, ulong2);
+double2 __ovld __cnfn shuffle2(double4, double4, ulong2);
+double2 __ovld __cnfn shuffle2(double8, double8, ulong2);
+double2 __ovld __cnfn shuffle2(double16, double16, ulong2);
+
+double4 __ovld __cnfn shuffle2(double2, double2, ulong4);
+double4 __ovld __cnfn shuffle2(double4, double4, ulong4);
+double4 __ovld __cnfn shuffle2(double8, double8, ulong4);
+double4 __ovld __cnfn shuffle2(double16, double16, ulong4);
+
+double8 __ovld __cnfn shuffle2(double2, double2, ulong8);
+double8 __ovld __cnfn shuffle2(double4, double4, ulong8);
+double8 __ovld __cnfn shuffle2(double8, double8, ulong8);
+double8 __ovld __cnfn shuffle2(double16, double16, ulong8);
+
+double16 __ovld __cnfn shuffle2(double2, double2, ulong16);
+double16 __ovld __cnfn shuffle2(double4, double4, ulong16);
+double16 __ovld __cnfn shuffle2(double8, double8, ulong16);
+double16 __ovld __cnfn shuffle2(double16, double16, ulong16);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half2 __ovld __cnfn shuffle2(half2, half2, ushort2);
+half2 __ovld __cnfn shuffle2(half4, half4, ushort2);
+half2 __ovld __cnfn shuffle2(half8, half8, ushort2);
+half2 __ovld __cnfn shuffle2(half16, half16, ushort2);
+
+half4 __ovld __cnfn shuffle2(half2, half2, ushort4);
+half4 __ovld __cnfn shuffle2(half4, half4, ushort4);
+half4 __ovld __cnfn shuffle2(half8, half8, ushort4);
+half4 __ovld __cnfn shuffle2(half16, half16, ushort4);
+
+half8 __ovld __cnfn shuffle2(half2, half2, ushort8);
+half8 __ovld __cnfn shuffle2(half4, half4, ushort8);
+half8 __ovld __cnfn shuffle2(half8, half8, ushort8);
+half8 __ovld __cnfn shuffle2(half16, half16, ushort8);
+
+half16 __ovld __cnfn shuffle2(half2, half2, ushort16);
+half16 __ovld __cnfn shuffle2(half4, half4, ushort16);
+half16 __ovld __cnfn shuffle2(half8, half8, ushort16);
+half16 __ovld __cnfn shuffle2(half16, half16, ushort16);
+#endif //cl_khr_fp16
+
+// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
+
+#ifdef cl_khr_gl_msaa_sharing
+#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
+#endif //cl_khr_gl_msaa_sharing
+
+/**
+ * Use the coordinate (coord.xy) to do an element lookup in
+ * the 2D image object specified by image.
+ *
+ * Use the coordinate (coord.x, coord.y, coord.z) to do
+ * an element lookup in the 3D image object specified
+ * by image. coord.w is ignored.
+ *
+ * Use the coordinate (coord.z) to index into the
+ * 2D image array object specified by image_array
+ * and (coord.x, coord.y) to do an element lookup in
+ * the 2D image object specified by image.
+ *
+ * Use the coordinate (x) to do an element lookup in
+ * the 1D image object specified by image.
+ *
+ * Use the coordinate (coord.y) to index into the
+ * 1D image array object specified by image_array
+ * and (coord.x) to do an element lookup in
+ * the 1D image object specified by image.
+ *
+ * Use the coordinate (cood.xy) and sample to do an
+ * element lookup in the 2D multi-sample image specified
+ * by image.
+ *
+ * Use coord.xy and sample to do an element
+ * lookup in the 2D multi-sample image layer
+ * identified by index coord.z in the 2D multi-sample
+ * image array specified by image.
+ *
+ * For mipmap images, use the mip-level specified by
+ * the Level-of-Detail (lod) or use gradients for LOD
+ * computation.
+ *
+ * read_imagef returns floating-point values in the
+ * range [0.0 ... 1.0] for image objects created with
+ * image_channel_data_type set to one of the predefined
+ * packed formats or CL_UNORM_INT8, or
+ * CL_UNORM_INT16.
+ *
+ * read_imagef returns floating-point values in the
+ * range [-1.0 ... 1.0] for image objects created with
+ * image_channel_data_type set to CL_SNORM_INT8,
+ * or CL_SNORM_INT16.
+ *
+ * read_imagef returns floating-point values for image
+ * objects created with image_channel_data_type set to
+ * CL_HALF_FLOAT or CL_FLOAT.
+ *
+ * read_imagei and read_imageui return
+ * unnormalized signed integer and unsigned integer
+ * values respectively. Each channel will be stored in a
+ * 32-bit integer.
+ *
+ * read_imagei can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_SIGNED_INT8,
+ * CL_SIGNED_INT16 and
+ * CL_SIGNED_INT32.
+ * If the image_channel_data_type is not one of the
+ * above values, the values returned by read_imagei
+ * are undefined.
+ *
+ * read_imageui can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_UNSIGNED_INT8,
+ * CL_UNSIGNED_INT16 and
+ * CL_UNSIGNED_INT32.
+ * If the image_channel_data_type is not one of the
+ * above values, the values returned by read_imageui
+ * are undefined.
+ *
+ * The read_image{i|ui} calls support a nearest filter
+ * only. The filter_mode specified in sampler
+ * must be set to CLK_FILTER_NEAREST; otherwise
+ * the values returned are undefined.
+
+ * The read_image{f|i|ui} calls that take
+ * integer coordinates must use a sampler with
+ * normalized coordinates set to
+ * CLK_NORMALIZED_COORDS_FALSE and
+ * addressing mode set to
+ * CLK_ADDRESS_CLAMP_TO_EDGE,
+ * CLK_ADDRESS_CLAMP or CLK_ADDRESS_NONE;
+ * otherwise the values returned are undefined.
+ *
+ * Values returned by read_imagef for image objects
+ * with image_channel_data_type values not specified
+ * in the description above are undefined.
+ */
+
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, int2);
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2);
+
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, int2);
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2);
+
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, int4);
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4);
+
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, int4);
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, float4);
+
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, int4);
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4);
+
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, int4);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4);
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, int);
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, float);
+
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, int);
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, int);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, float);
+
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, int2);
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, float2);
+
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, int2);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, float2);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, float2);
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2);
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, int2);
+
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4);
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, int4);
+#endif //cl_khr_depth_images
+
+#if defined(cl_khr_gl_msaa_sharing)
+float4 __ovld __purefn read_imagef(read_only image2d_msaa_t, int2, int);
+int4 __ovld __purefn read_imagei(read_only image2d_msaa_t, int2, int);
+uint4 __ovld __purefn read_imageui(read_only image2d_msaa_t, int2, int);
+
+float __ovld __purefn read_imagef(read_only image2d_msaa_depth_t, int2, int);
+
+float4 __ovld __purefn read_imagef(read_only image2d_array_msaa_t, int4, int);
+int4 __ovld __purefn read_imagei(read_only image2d_array_msaa_t, int4, int);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_msaa_t, int4, int);
+
+float __ovld __purefn read_imagef(read_only image2d_array_msaa_depth_t, int4, int);
+#endif //cl_khr_gl_msaa_sharing
+
+// OpenCL Extension v2.0 s9.18 - Mipmaps
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#ifdef cl_khr_mipmap_image
+
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, float, float);
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, float, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, float, float);
+
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, float2, float);
+
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2, float);
+
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2, float);
+#endif // cl_khr_depth_images
+
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4, float);
+
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4, float);
+#endif // cl_khr_depth_images
+
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, float4, float);
+
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, float, float, float);
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, float, float, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, float, float, float);
+
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, float2, float, float);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, float2, float, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, float2, float, float);
+
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2, float2, float2);
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2, float2, float2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2, float2, float2);
+
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2, float2, float2);
+#endif // cl_khr_depth_images
+
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4, float2, float2);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4, float2, float2);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4, float2, float2);
+
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4, float2, float2);
+#endif // cl_khr_depth_images
+
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4, float4, float4);
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4, float4, float4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, float4, float4, float4);
+
+#endif //cl_khr_mipmap_image
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+
+/**
+* Sampler-less Image Access
+*/
+
+float4 __ovld __purefn read_imagef(read_only image1d_t, int);
+int4 __ovld __purefn read_imagei(read_only image1d_t, int);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, int);
+
+float4 __ovld __purefn read_imagef(read_only image1d_buffer_t, int);
+int4 __ovld __purefn read_imagei(read_only image1d_buffer_t, int);
+uint4 __ovld __purefn read_imageui(read_only image1d_buffer_t, int);
+
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, int2);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, int2);
+
+float4 __ovld __purefn read_imagef(read_only image2d_t, int2);
+int4 __ovld __purefn read_imagei(read_only image2d_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, int2);
+
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, int4);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, int4);
+
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_depth_t, int2);
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, int4);
+#endif //cl_khr_depth_images
+
+float4 __ovld __purefn read_imagef(read_only image3d_t, int4);
+int4 __ovld __purefn read_imagei(read_only image3d_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, int4);
+
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+
+// Image read functions returning half4 type
+#ifdef cl_khr_fp16
+half4 __ovld __purefn read_imageh(read_only image1d_t, sampler_t, int);
+half4 __ovld __purefn read_imageh(read_only image1d_t, sampler_t, float);
+half4 __ovld __purefn read_imageh(read_only image2d_t, sampler_t, int2);
+half4 __ovld __purefn read_imageh(read_only image2d_t, sampler_t, float2);
+half4 __ovld __purefn read_imageh(read_only image3d_t, sampler_t, int4);
+half4 __ovld __purefn read_imageh(read_only image3d_t, sampler_t, float4);
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+half4 __ovld __purefn read_imageh(read_only image1d_array_t, sampler_t, int2);
+half4 __ovld __purefn read_imageh(read_only image1d_array_t, sampler_t, float2);
+half4 __ovld __purefn read_imageh(read_only image2d_array_t, sampler_t, int4);
+half4 __ovld __purefn read_imageh(read_only image2d_array_t, sampler_t, float4);
+/**
+ * Sampler-less Image Access
+ */
+half4 __ovld __purefn read_imageh(read_only image1d_t, int);
+half4 __ovld __purefn read_imageh(read_only image2d_t, int2);
+half4 __ovld __purefn read_imageh(read_only image3d_t, int4);
+half4 __ovld __purefn read_imageh(read_only image1d_array_t, int2);
+half4 __ovld __purefn read_imageh(read_only image2d_array_t, int4);
+half4 __ovld __purefn read_imageh(read_only image1d_buffer_t, int);
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+#endif //cl_khr_fp16
+
+// Image read functions for read_write images
+#if defined(__opencl_c_read_write_images)
+float4 __ovld __purefn read_imagef(read_write image1d_t, int);
+int4 __ovld __purefn read_imagei(read_write image1d_t, int);
+uint4 __ovld __purefn read_imageui(read_write image1d_t, int);
+
+float4 __ovld __purefn read_imagef(read_write image1d_buffer_t, int);
+int4 __ovld __purefn read_imagei(read_write image1d_buffer_t, int);
+uint4 __ovld __purefn read_imageui(read_write image1d_buffer_t, int);
+
+float4 __ovld __purefn read_imagef(read_write image1d_array_t, int2);
+int4 __ovld __purefn read_imagei(read_write image1d_array_t, int2);
+uint4 __ovld __purefn read_imageui(read_write image1d_array_t, int2);
+
+float4 __ovld __purefn read_imagef(read_write image2d_t, int2);
+int4 __ovld __purefn read_imagei(read_write image2d_t, int2);
+uint4 __ovld __purefn read_imageui(read_write image2d_t, int2);
+
+float4 __ovld __purefn read_imagef(read_write image2d_array_t, int4);
+int4 __ovld __purefn read_imagei(read_write image2d_array_t, int4);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_t, int4);
+
+float4 __ovld __purefn read_imagef(read_write image3d_t, int4);
+int4 __ovld __purefn read_imagei(read_write image3d_t, int4);
+uint4 __ovld __purefn read_imageui(read_write image3d_t, int4);
+
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_write image2d_depth_t, int2);
+float __ovld __purefn read_imagef(read_write image2d_array_depth_t, int4);
+#endif //cl_khr_depth_images
+
+#if cl_khr_gl_msaa_sharing
+float4 __ovld __purefn read_imagef(read_write image2d_msaa_t, int2, int);
+int4 __ovld __purefn read_imagei(read_write image2d_msaa_t, int2, int);
+uint4 __ovld __purefn read_imageui(read_write image2d_msaa_t, int2, int);
+
+float4 __ovld __purefn read_imagef(read_write image2d_array_msaa_t, int4, int);
+int4 __ovld __purefn read_imagei(read_write image2d_array_msaa_t, int4, int);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_msaa_t, int4, int);
+
+float __ovld __purefn read_imagef(read_write image2d_msaa_depth_t, int2, int);
+float __ovld __purefn read_imagef(read_write image2d_array_msaa_depth_t, int4, int);
+#endif //cl_khr_gl_msaa_sharing
+
+#ifdef cl_khr_mipmap_image
+float4 __ovld __purefn read_imagef(read_write image1d_t, sampler_t, float, float);
+int4 __ovld __purefn read_imagei(read_write image1d_t, sampler_t, float, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_t, sampler_t, float, float);
+
+float4 __ovld __purefn read_imagef(read_write image1d_array_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_write image1d_array_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_array_t, sampler_t, float2, float);
+
+float4 __ovld __purefn read_imagef(read_write image2d_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_write image2d_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_write image2d_t, sampler_t, float2, float);
+
+float __ovld __purefn read_imagef(read_write image2d_depth_t, sampler_t, float2, float);
+
+float4 __ovld __purefn read_imagef(read_write image2d_array_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_write image2d_array_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_t, sampler_t, float4, float);
+
+float __ovld __purefn read_imagef(read_write image2d_array_depth_t, sampler_t, float4, float);
+
+float4 __ovld __purefn read_imagef(read_write image3d_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_write image3d_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_write image3d_t, sampler_t, float4, float);
+
+float4 __ovld __purefn read_imagef(read_write image1d_t, sampler_t, float, float, float);
+int4 __ovld __purefn read_imagei(read_write image1d_t, sampler_t, float, float, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_t, sampler_t, float, float, float);
+
+float4 __ovld __purefn read_imagef(read_write image1d_array_t, sampler_t, float2, float, float);
+int4 __ovld __purefn read_imagei(read_write image1d_array_t, sampler_t, float2, float, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_array_t, sampler_t, float2, float, float);
+
+float4 __ovld __purefn read_imagef(read_write image2d_t, sampler_t, float2, float2, float2);
+int4 __ovld __purefn read_imagei(read_write image2d_t, sampler_t, float2, float2, float2);
+uint4 __ovld __purefn read_imageui(read_write image2d_t, sampler_t, float2, float2, float2);
+
+float __ovld __purefn read_imagef(read_write image2d_depth_t, sampler_t, float2, float2, float2);
+
+float4 __ovld __purefn read_imagef(read_write image2d_array_t, sampler_t, float4, float2, float2);
+int4 __ovld __purefn read_imagei(read_write image2d_array_t, sampler_t, float4, float2, float2);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_t, sampler_t, float4, float2, float2);
+
+float __ovld __purefn read_imagef(read_write image2d_array_depth_t, sampler_t, float4, float2, float2);
+
+float4 __ovld __purefn read_imagef(read_write image3d_t, sampler_t, float4, float4, float4);
+int4 __ovld __purefn read_imagei(read_write image3d_t, sampler_t, float4, float4, float4);
+uint4 __ovld __purefn read_imageui(read_write image3d_t, sampler_t, float4, float4, float4);
+
+#endif //cl_khr_mipmap_image
+
+// Image read functions returning half4 type
+#ifdef cl_khr_fp16
+half4 __ovld __purefn read_imageh(read_write image1d_t, int);
+half4 __ovld __purefn read_imageh(read_write image2d_t, int2);
+half4 __ovld __purefn read_imageh(read_write image3d_t, int4);
+half4 __ovld __purefn read_imageh(read_write image1d_array_t, int2);
+half4 __ovld __purefn read_imageh(read_write image2d_array_t, int4);
+half4 __ovld __purefn read_imageh(read_write image1d_buffer_t, int);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+ * Write color value to location specified by coordinate
+ * (coord.x, coord.y) in the 2D image object specified by image.
+ * (coord.x, coord.y) are considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1, and 0
+ * ... image height - 1.
+
+ * Write color value to location specified by coordinate
+ * (coord.x, coord.y) in the 2D image object specified by index
+ * (coord.z) of the 2D image array object image_array.
+ * (coord.x, coord.y) are considered to be unnormalized
+ * coordinates and must be in the range 0 ... image width
+ * - 1.
+ *
+ * Write color value to location specified by coordinate
+ * (coord) in the 1D image (buffer) object specified by image.
+ * coord is considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1.
+ *
+ * Write color value to location specified by coordinate
+ * (coord.x) in the 1D image object specified by index
+ * (coord.y) of the 1D image array object image_array.
+ * x is considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1.
+ *
+ * Write color value to location specified by coordinate
+ * (coord.x, coord.y, coord.z) in the 3D image object specified by image.
+ * coord.x & coord.y are considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1, and 0
+ * ... image height - 1.
+ *
+ * For mipmap images, use mip-level specified by lod.
+ *
+ * Appropriate data format conversion to the specified
+ * image format is done before writing the color value.
+ *
+ * write_imagef can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the pre-defined packed formats or set to
+ * CL_SNORM_INT8, CL_UNORM_INT8,
+ * CL_SNORM_INT16, CL_UNORM_INT16,
+ * CL_HALF_FLOAT or CL_FLOAT. Appropriate data
+ * format conversion will be done to convert channel
+ * data from a floating-point value to actual data format
+ * in which the channels are stored.
+ *
+ * write_imagei can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_SIGNED_INT8,
+ * CL_SIGNED_INT16 and
+ * CL_SIGNED_INT32.
+ *
+ * write_imageui can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_UNSIGNED_INT8,
+ * CL_UNSIGNED_INT16 and
+ * CL_UNSIGNED_INT32.
+ *
+ * The behavior of write_imagef, write_imagei and
+ * write_imageui for image objects created with
+ * image_channel_data_type values not specified in
+ * the description above or with (x, y) coordinate
+ * values that are not in the range (0 ... image width -1,
+ * 0 ... image height - 1), respectively, is undefined.
+ */
+void __ovld write_imagef(write_only image2d_t, int2, float4);
+void __ovld write_imagei(write_only image2d_t, int2, int4);
+void __ovld write_imageui(write_only image2d_t, int2, uint4);
+
+void __ovld write_imagef(write_only image2d_array_t, int4, float4);
+void __ovld write_imagei(write_only image2d_array_t, int4, int4);
+void __ovld write_imageui(write_only image2d_array_t, int4, uint4);
+
+void __ovld write_imagef(write_only image1d_t, int, float4);
+void __ovld write_imagei(write_only image1d_t, int, int4);
+void __ovld write_imageui(write_only image1d_t, int, uint4);
+
+void __ovld write_imagef(write_only image1d_buffer_t, int, float4);
+void __ovld write_imagei(write_only image1d_buffer_t, int, int4);
+void __ovld write_imageui(write_only image1d_buffer_t, int, uint4);
+
+void __ovld write_imagef(write_only image1d_array_t, int2, float4);
+void __ovld write_imagei(write_only image1d_array_t, int2, int4);
+void __ovld write_imageui(write_only image1d_array_t, int2, uint4);
+
+#ifdef cl_khr_3d_image_writes
+void __ovld write_imagef(write_only image3d_t, int4, float4);
+void __ovld write_imagei(write_only image3d_t, int4, int4);
+void __ovld write_imageui(write_only image3d_t, int4, uint4);
+#endif
+
+#ifdef cl_khr_depth_images
+void __ovld write_imagef(write_only image2d_depth_t, int2, float);
+void __ovld write_imagef(write_only image2d_array_depth_t, int4, float);
+#endif //cl_khr_depth_images
+
+// OpenCL Extension v2.0 s9.18 - Mipmaps
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(cl_khr_mipmap_image_writes)
+void __ovld write_imagef(write_only image1d_t, int, int, float4);
+void __ovld write_imagei(write_only image1d_t, int, int, int4);
+void __ovld write_imageui(write_only image1d_t, int, int, uint4);
+
+void __ovld write_imagef(write_only image1d_array_t, int2, int, float4);
+void __ovld write_imagei(write_only image1d_array_t, int2, int, int4);
+void __ovld write_imageui(write_only image1d_array_t, int2, int, uint4);
+
+void __ovld write_imagef(write_only image2d_t, int2, int, float4);
+void __ovld write_imagei(write_only image2d_t, int2, int, int4);
+void __ovld write_imageui(write_only image2d_t, int2, int, uint4);
+
+void __ovld write_imagef(write_only image2d_array_t, int4, int, float4);
+void __ovld write_imagei(write_only image2d_array_t, int4, int, int4);
+void __ovld write_imageui(write_only image2d_array_t, int4, int, uint4);
+
+void __ovld write_imagef(write_only image2d_depth_t, int2, int, float);
+void __ovld write_imagef(write_only image2d_array_depth_t, int4, int, float);
+
+#ifdef cl_khr_3d_image_writes
+void __ovld write_imagef(write_only image3d_t, int4, int, float4);
+void __ovld write_imagei(write_only image3d_t, int4, int, int4);
+void __ovld write_imageui(write_only image3d_t, int4, int, uint4);
+#endif //cl_khr_3d_image_writes
+
+#endif //defined(cl_khr_mipmap_image_writes)
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+// Image write functions for half4 type
+#ifdef cl_khr_fp16
+void __ovld write_imageh(write_only image1d_t, int, half4);
+void __ovld write_imageh(write_only image2d_t, int2, half4);
+#ifdef cl_khr_3d_image_writes
+void __ovld write_imageh(write_only image3d_t, int4, half4);
+#endif
+void __ovld write_imageh(write_only image1d_array_t, int2, half4);
+void __ovld write_imageh(write_only image2d_array_t, int4, half4);
+void __ovld write_imageh(write_only image1d_buffer_t, int, half4);
+#endif //cl_khr_fp16
+
+// Image write functions for read_write images
+#if defined(__opencl_c_read_write_images)
+void __ovld write_imagef(read_write image2d_t, int2, float4);
+void __ovld write_imagei(read_write image2d_t, int2, int4);
+void __ovld write_imageui(read_write image2d_t, int2, uint4);
+
+void __ovld write_imagef(read_write image2d_array_t, int4, float4);
+void __ovld write_imagei(read_write image2d_array_t, int4, int4);
+void __ovld write_imageui(read_write image2d_array_t, int4, uint4);
+
+void __ovld write_imagef(read_write image1d_t, int, float4);
+void __ovld write_imagei(read_write image1d_t, int, int4);
+void __ovld write_imageui(read_write image1d_t, int, uint4);
+
+void __ovld write_imagef(read_write image1d_buffer_t, int, float4);
+void __ovld write_imagei(read_write image1d_buffer_t, int, int4);
+void __ovld write_imageui(read_write image1d_buffer_t, int, uint4);
+
+void __ovld write_imagef(read_write image1d_array_t, int2, float4);
+void __ovld write_imagei(read_write image1d_array_t, int2, int4);
+void __ovld write_imageui(read_write image1d_array_t, int2, uint4);
+
+#ifdef cl_khr_3d_image_writes
+void __ovld write_imagef(read_write image3d_t, int4, float4);
+void __ovld write_imagei(read_write image3d_t, int4, int4);
+void __ovld write_imageui(read_write image3d_t, int4, uint4);
+#endif
+
+#ifdef cl_khr_depth_images
+void __ovld write_imagef(read_write image2d_depth_t, int2, float);
+void __ovld write_imagef(read_write image2d_array_depth_t, int4, float);
+#endif //cl_khr_depth_images
+
+#if defined(cl_khr_mipmap_image_writes)
+void __ovld write_imagef(read_write image1d_t, int, int, float4);
+void __ovld write_imagei(read_write image1d_t, int, int, int4);
+void __ovld write_imageui(read_write image1d_t, int, int, uint4);
+
+void __ovld write_imagef(read_write image1d_array_t, int2, int, float4);
+void __ovld write_imagei(read_write image1d_array_t, int2, int, int4);
+void __ovld write_imageui(read_write image1d_array_t, int2, int, uint4);
+
+void __ovld write_imagef(read_write image2d_t, int2, int, float4);
+void __ovld write_imagei(read_write image2d_t, int2, int, int4);
+void __ovld write_imageui(read_write image2d_t, int2, int, uint4);
+
+void __ovld write_imagef(read_write image2d_array_t, int4, int, float4);
+void __ovld write_imagei(read_write image2d_array_t, int4, int, int4);
+void __ovld write_imageui(read_write image2d_array_t, int4, int, uint4);
+
+void __ovld write_imagef(read_write image2d_depth_t, int2, int, float);
+void __ovld write_imagef(read_write image2d_array_depth_t, int4, int, float);
+
+#ifdef cl_khr_3d_image_writes
+void __ovld write_imagef(read_write image3d_t, int4, int, float4);
+void __ovld write_imagei(read_write image3d_t, int4, int, int4);
+void __ovld write_imageui(read_write image3d_t, int4, int, uint4);
+#endif //cl_khr_3d_image_writes
+
+#endif //cl_khr_mipmap_image_writes
+
+// Image write functions for half4 type
+#ifdef cl_khr_fp16
+void __ovld write_imageh(read_write image1d_t, int, half4);
+void __ovld write_imageh(read_write image2d_t, int2, half4);
+#ifdef cl_khr_3d_image_writes
+void __ovld write_imageh(read_write image3d_t, int4, half4);
+#endif
+void __ovld write_imageh(read_write image1d_array_t, int2, half4);
+void __ovld write_imageh(read_write image2d_array_t, int4, half4);
+void __ovld write_imageh(read_write image1d_buffer_t, int, half4);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_read_write_images)
+
+// Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have
+// access qualifier, which by default assume read_only access qualifier. Image query builtin
+// functions with write_only image argument should also be declared.
+
+/**
+ * Return the image width in pixels.
+ *
+  */
+int __ovld __cnfn get_image_width(read_only image1d_t);
+int __ovld __cnfn get_image_width(read_only image1d_buffer_t);
+int __ovld __cnfn get_image_width(read_only image2d_t);
+int __ovld __cnfn get_image_width(read_only image3d_t);
+int __ovld __cnfn get_image_width(read_only image1d_array_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_width(read_only image2d_depth_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_width(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_width(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_width(write_only image1d_t);
+int __ovld __cnfn get_image_width(write_only image1d_buffer_t);
+int __ovld __cnfn get_image_width(write_only image2d_t);
+#ifdef cl_khr_3d_image_writes
+int __ovld __cnfn get_image_width(write_only image3d_t);
+#endif
+int __ovld __cnfn get_image_width(write_only image1d_array_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_width(write_only image2d_depth_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_width(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_width(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_width(read_write image1d_t);
+int __ovld __cnfn get_image_width(read_write image1d_buffer_t);
+int __ovld __cnfn get_image_width(read_write image2d_t);
+int __ovld __cnfn get_image_width(read_write image3d_t);
+int __ovld __cnfn get_image_width(read_write image1d_array_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_width(read_write image2d_depth_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_width(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+ * Return the image height in pixels.
+ */
+int __ovld __cnfn get_image_height(read_only image2d_t);
+int __ovld __cnfn get_image_height(read_only image3d_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_height(read_only image2d_depth_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_height(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_height(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_height(write_only image2d_t);
+#ifdef cl_khr_3d_image_writes
+int __ovld __cnfn get_image_height(write_only image3d_t);
+#endif
+int __ovld __cnfn get_image_height(write_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_height(write_only image2d_depth_t);
+int __ovld __cnfn get_image_height(write_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_height(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_height(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_height(read_write image2d_t);
+int __ovld __cnfn get_image_height(read_write image3d_t);
+int __ovld __cnfn get_image_height(read_write image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_height(read_write image2d_depth_t);
+int __ovld __cnfn get_image_height(read_write image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_height(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+ * Return the image depth in pixels.
+ */
+int __ovld __cnfn get_image_depth(read_only image3d_t);
+
+#ifdef cl_khr_3d_image_writes
+int __ovld __cnfn get_image_depth(write_only image3d_t);
+#endif
+
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_depth(read_write image3d_t);
+#endif //defined(__opencl_c_read_write_images)
+
+// OpenCL Extension v2.0 s9.18 - Mipmaps
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#ifdef cl_khr_mipmap_image
+/**
+ * Return the image miplevels.
+ */
+
+int __ovld get_image_num_mip_levels(read_only image1d_t);
+int __ovld get_image_num_mip_levels(read_only image2d_t);
+int __ovld get_image_num_mip_levels(read_only image3d_t);
+
+int __ovld get_image_num_mip_levels(write_only image1d_t);
+int __ovld get_image_num_mip_levels(write_only image2d_t);
+#ifdef cl_khr_3d_image_writes
+int __ovld get_image_num_mip_levels(write_only image3d_t);
+#endif
+
+#if defined(__opencl_c_read_write_images)
+int __ovld get_image_num_mip_levels(read_write image1d_t);
+int __ovld get_image_num_mip_levels(read_write image2d_t);
+int __ovld get_image_num_mip_levels(read_write image3d_t);
+#endif //defined(__opencl_c_read_write_images)
+
+int __ovld get_image_num_mip_levels(read_only image1d_array_t);
+int __ovld get_image_num_mip_levels(read_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t);
+int __ovld get_image_num_mip_levels(read_only image2d_depth_t);
+#endif // cl_khr_depth_images
+
+int __ovld get_image_num_mip_levels(write_only image1d_array_t);
+int __ovld get_image_num_mip_levels(write_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t);
+int __ovld get_image_num_mip_levels(write_only image2d_depth_t);
+#endif // cl_khr_depth_images
+
+#if defined(__opencl_c_read_write_images)
+int __ovld get_image_num_mip_levels(read_write image1d_array_t);
+int __ovld get_image_num_mip_levels(read_write image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t);
+int __ovld get_image_num_mip_levels(read_write image2d_depth_t);
+#endif // cl_khr_depth_images
+#endif //defined(__opencl_c_read_write_images)
+
+#endif //cl_khr_mipmap_image
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+/**
+ * Return the channel data type. Valid values are:
+ * CLK_SNORM_INT8
+ * CLK_SNORM_INT16
+ * CLK_UNORM_INT8
+ * CLK_UNORM_INT16
+ * CLK_UNORM_SHORT_565
+ * CLK_UNORM_SHORT_555
+ * CLK_UNORM_SHORT_101010
+ * CLK_SIGNED_INT8
+ * CLK_SIGNED_INT16
+ * CLK_SIGNED_INT32
+ * CLK_UNSIGNED_INT8
+ * CLK_UNSIGNED_INT16
+ * CLK_UNSIGNED_INT32
+ * CLK_HALF_FLOAT
+ * CLK_FLOAT
+ */
+
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image3d_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_array_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_t);
+#ifdef cl_khr_3d_image_writes
+int __ovld __cnfn get_image_channel_data_type(write_only image3d_t);
+#endif
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_array_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image3d_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+ * Return the image channel order. Valid values are:
+ * CLK_A
+ * CLK_R
+ * CLK_Rx
+ * CLK_RG
+ * CLK_RGx
+ * CLK_RA
+ * CLK_RGB
+ * CLK_RGBx
+ * CLK_RGBA
+ * CLK_ARGB
+ * CLK_BGRA
+ * CLK_INTENSITY
+ * CLK_LUMINANCE
+ */
+
+int __ovld __cnfn get_image_channel_order(read_only image1d_t);
+int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_t);
+int __ovld __cnfn get_image_channel_order(read_only image3d_t);
+int __ovld __cnfn get_image_channel_order(read_only image1d_array_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_order(read_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_channel_order(write_only image1d_t);
+int __ovld __cnfn get_image_channel_order(write_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_t);
+#ifdef cl_khr_3d_image_writes
+int __ovld __cnfn get_image_channel_order(write_only image3d_t);
+#endif
+int __ovld __cnfn get_image_channel_order(write_only image1d_array_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_order(write_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_channel_order(read_write image1d_t);
+int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_t);
+int __ovld __cnfn get_image_channel_order(read_write image3d_t);
+int __ovld __cnfn get_image_channel_order(read_write image1d_array_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_order(read_write image2d_depth_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+ * Return the 2D image width and height as an int2
+ * type. The width is returned in the x component, and
+ * the height in the y component.
+ */
+int2 __ovld __cnfn get_image_dim(read_only image2d_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_depth_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_depth_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+int2 __ovld __cnfn get_image_dim(write_only image2d_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_depth_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_depth_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+#if defined(__opencl_c_read_write_images)
+int2 __ovld __cnfn get_image_dim(read_write image2d_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_t);
+#ifdef cl_khr_depth_images
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_depth_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+ * Return the 3D image width, height, and depth as an
+ * int4 type. The width is returned in the x
+ * component, height in the y component, depth in the z
+ * component and the w component is 0.
+ */
+int4 __ovld __cnfn get_image_dim(read_only image3d_t);
+#ifdef cl_khr_3d_image_writes
+int4 __ovld __cnfn get_image_dim(write_only image3d_t);
+#endif
+#if defined(__opencl_c_read_write_images)
+int4 __ovld __cnfn get_image_dim(read_write image3d_t);
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+ * Return the image array size.
+ */
+
+size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_t);
+#ifdef cl_khr_depth_images
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_t);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+size_t __ovld __cnfn get_image_array_size(write_only image1d_array_t);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_t);
+#ifdef cl_khr_depth_images
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+#if defined(__opencl_c_read_write_images)
+size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t);
+#ifdef cl_khr_depth_images
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+* Return the number of samples associated with image
+*/
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_num_samples(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_num_samples(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_only image2d_array_msaa_depth_t);
+
+int __ovld __cnfn get_image_num_samples(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_num_samples(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_num_samples(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_num_samples(write_only image2d_array_msaa_depth_t);
+
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_num_samples(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_num_samples(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_write image2d_array_msaa_depth_t);
+#endif //defined(__opencl_c_read_write_images)
+#endif
+
+// OpenCL v2.0 s6.13.15 - Work-group Functions
+
+#if defined(__opencl_c_work_group_collective_functions)
+int __ovld __conv work_group_all(int predicate);
+int __ovld __conv work_group_any(int predicate);
+
+#ifdef cl_khr_fp16
+half __ovld __conv work_group_broadcast(half, size_t local_id);
+half __ovld __conv work_group_broadcast(half, size_t, size_t);
+half __ovld __conv work_group_broadcast(half, size_t, size_t, size_t);
+#endif
+int __ovld __conv work_group_broadcast(int, size_t local_id);
+int __ovld __conv work_group_broadcast(int, size_t, size_t);
+int __ovld __conv work_group_broadcast(int, size_t, size_t, size_t);
+uint __ovld __conv work_group_broadcast(uint, size_t local_id);
+uint __ovld __conv work_group_broadcast(uint, size_t, size_t);
+uint __ovld __conv work_group_broadcast(uint, size_t, size_t, size_t);
+long __ovld __conv work_group_broadcast(long, size_t local_id);
+long __ovld __conv work_group_broadcast(long, size_t, size_t);
+long __ovld __conv work_group_broadcast(long, size_t, size_t, size_t);
+ulong __ovld __conv work_group_broadcast(ulong, size_t local_id);
+ulong __ovld __conv work_group_broadcast(ulong, size_t, size_t);
+ulong __ovld __conv work_group_broadcast(ulong, size_t, size_t, size_t);
+float __ovld __conv work_group_broadcast(float, size_t local_id);
+float __ovld __conv work_group_broadcast(float, size_t, size_t);
+float __ovld __conv work_group_broadcast(float, size_t, size_t, size_t);
+#ifdef cl_khr_fp64
+double __ovld __conv work_group_broadcast(double, size_t local_id);
+double __ovld __conv work_group_broadcast(double, size_t, size_t);
+double __ovld __conv work_group_broadcast(double, size_t, size_t, size_t);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half __ovld __conv work_group_reduce_add(half);
+half __ovld __conv work_group_reduce_min(half);
+half __ovld __conv work_group_reduce_max(half);
+half __ovld __conv work_group_scan_exclusive_add(half);
+half __ovld __conv work_group_scan_exclusive_min(half);
+half __ovld __conv work_group_scan_exclusive_max(half);
+half __ovld __conv work_group_scan_inclusive_add(half);
+half __ovld __conv work_group_scan_inclusive_min(half);
+half __ovld __conv work_group_scan_inclusive_max(half);
+#endif
+int __ovld __conv work_group_reduce_add(int);
+int __ovld __conv work_group_reduce_min(int);
+int __ovld __conv work_group_reduce_max(int);
+int __ovld __conv work_group_scan_exclusive_add(int);
+int __ovld __conv work_group_scan_exclusive_min(int);
+int __ovld __conv work_group_scan_exclusive_max(int);
+int __ovld __conv work_group_scan_inclusive_add(int);
+int __ovld __conv work_group_scan_inclusive_min(int);
+int __ovld __conv work_group_scan_inclusive_max(int);
+uint __ovld __conv work_group_reduce_add(uint);
+uint __ovld __conv work_group_reduce_min(uint);
+uint __ovld __conv work_group_reduce_max(uint);
+uint __ovld __conv work_group_scan_exclusive_add(uint);
+uint __ovld __conv work_group_scan_exclusive_min(uint);
+uint __ovld __conv work_group_scan_exclusive_max(uint);
+uint __ovld __conv work_group_scan_inclusive_add(uint);
+uint __ovld __conv work_group_scan_inclusive_min(uint);
+uint __ovld __conv work_group_scan_inclusive_max(uint);
+long __ovld __conv work_group_reduce_add(long);
+long __ovld __conv work_group_reduce_min(long);
+long __ovld __conv work_group_reduce_max(long);
+long __ovld __conv work_group_scan_exclusive_add(long);
+long __ovld __conv work_group_scan_exclusive_min(long);
+long __ovld __conv work_group_scan_exclusive_max(long);
+long __ovld __conv work_group_scan_inclusive_add(long);
+long __ovld __conv work_group_scan_inclusive_min(long);
+long __ovld __conv work_group_scan_inclusive_max(long);
+ulong __ovld __conv work_group_reduce_add(ulong);
+ulong __ovld __conv work_group_reduce_min(ulong);
+ulong __ovld __conv work_group_reduce_max(ulong);
+ulong __ovld __conv work_group_scan_exclusive_add(ulong);
+ulong __ovld __conv work_group_scan_exclusive_min(ulong);
+ulong __ovld __conv work_group_scan_exclusive_max(ulong);
+ulong __ovld __conv work_group_scan_inclusive_add(ulong);
+ulong __ovld __conv work_group_scan_inclusive_min(ulong);
+ulong __ovld __conv work_group_scan_inclusive_max(ulong);
+float __ovld __conv work_group_reduce_add(float);
+float __ovld __conv work_group_reduce_min(float);
+float __ovld __conv work_group_reduce_max(float);
+float __ovld __conv work_group_scan_exclusive_add(float);
+float __ovld __conv work_group_scan_exclusive_min(float);
+float __ovld __conv work_group_scan_exclusive_max(float);
+float __ovld __conv work_group_scan_inclusive_add(float);
+float __ovld __conv work_group_scan_inclusive_min(float);
+float __ovld __conv work_group_scan_inclusive_max(float);
+#ifdef cl_khr_fp64
+double __ovld __conv work_group_reduce_add(double);
+double __ovld __conv work_group_reduce_min(double);
+double __ovld __conv work_group_reduce_max(double);
+double __ovld __conv work_group_scan_exclusive_add(double);
+double __ovld __conv work_group_scan_exclusive_min(double);
+double __ovld __conv work_group_scan_exclusive_max(double);
+double __ovld __conv work_group_scan_inclusive_add(double);
+double __ovld __conv work_group_scan_inclusive_min(double);
+double __ovld __conv work_group_scan_inclusive_max(double);
+#endif //cl_khr_fp64
+
+#endif //defined(__opencl_c_work_group_collective_functions)
+
+// OpenCL v2.0 s6.13.16 - Pipe Functions
+#if defined(__opencl_c_pipes)
+bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);
+#endif //defined(__opencl_c_pipes)
+
+
+// OpenCL v2.0 s6.13.17 - Enqueue Kernels
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+#ifdef __opencl_c_device_enqueue
+ndrange_t __ovld ndrange_1D(size_t);
+ndrange_t __ovld ndrange_1D(size_t, size_t);
+ndrange_t __ovld ndrange_1D(size_t, size_t, size_t);
+
+ndrange_t __ovld ndrange_2D(const size_t[2]);
+ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2]);
+ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2], const size_t[2]);
+
+ndrange_t __ovld ndrange_3D(const size_t[3]);
+ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3]);
+ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3], const size_t[3]);
+
+int __ovld enqueue_marker(queue_t, uint, const clk_event_t*, clk_event_t*);
+
+void __ovld retain_event(clk_event_t);
+
+void __ovld release_event(clk_event_t);
+
+clk_event_t __ovld create_user_event(void);
+
+void __ovld set_user_event_status(clk_event_t e, int state);
+
+bool __ovld is_valid_event (clk_event_t event);
+
+void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void*);
+
+queue_t __ovld get_default_queue(void);
+#endif //__opencl_c_device_enqueue
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+// OpenCL Extension v2.0 s9.17 - Sub-groups
+
+#if defined(__opencl_subgroup_builtins)
+// Shared Sub Group Functions
+uint    __ovld get_sub_group_size(void);
+uint    __ovld get_max_sub_group_size(void);
+uint    __ovld get_num_sub_groups(void);
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+uint    __ovld get_enqueued_num_sub_groups(void);
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+uint    __ovld get_sub_group_id(void);
+uint    __ovld get_sub_group_local_id(void);
+
+void    __ovld __conv sub_group_barrier(cl_mem_fence_flags);
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+void    __ovld __conv sub_group_barrier(cl_mem_fence_flags, memory_scope);
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+int     __ovld __conv sub_group_all(int predicate);
+int     __ovld __conv sub_group_any(int predicate);
+
+int     __ovld __conv sub_group_broadcast(int  , uint sub_group_local_id);
+uint    __ovld __conv sub_group_broadcast(uint , uint sub_group_local_id);
+long    __ovld __conv sub_group_broadcast(long , uint sub_group_local_id);
+ulong   __ovld __conv sub_group_broadcast(ulong, uint sub_group_local_id);
+float   __ovld __conv sub_group_broadcast(float, uint sub_group_local_id);
+
+int     __ovld __conv sub_group_reduce_add(int  );
+uint    __ovld __conv sub_group_reduce_add(uint );
+long    __ovld __conv sub_group_reduce_add(long );
+ulong   __ovld __conv sub_group_reduce_add(ulong);
+float   __ovld __conv sub_group_reduce_add(float);
+int     __ovld __conv sub_group_reduce_min(int  );
+uint    __ovld __conv sub_group_reduce_min(uint );
+long    __ovld __conv sub_group_reduce_min(long );
+ulong   __ovld __conv sub_group_reduce_min(ulong);
+float   __ovld __conv sub_group_reduce_min(float);
+int     __ovld __conv sub_group_reduce_max(int  );
+uint    __ovld __conv sub_group_reduce_max(uint );
+long    __ovld __conv sub_group_reduce_max(long );
+ulong   __ovld __conv sub_group_reduce_max(ulong);
+float   __ovld __conv sub_group_reduce_max(float);
+
+int     __ovld __conv sub_group_scan_exclusive_add(int  );
+uint    __ovld __conv sub_group_scan_exclusive_add(uint );
+long    __ovld __conv sub_group_scan_exclusive_add(long );
+ulong   __ovld __conv sub_group_scan_exclusive_add(ulong);
+float   __ovld __conv sub_group_scan_exclusive_add(float);
+int     __ovld __conv sub_group_scan_exclusive_min(int  );
+uint    __ovld __conv sub_group_scan_exclusive_min(uint );
+long    __ovld __conv sub_group_scan_exclusive_min(long );
+ulong   __ovld __conv sub_group_scan_exclusive_min(ulong);
+float   __ovld __conv sub_group_scan_exclusive_min(float);
+int     __ovld __conv sub_group_scan_exclusive_max(int  );
+uint    __ovld __conv sub_group_scan_exclusive_max(uint );
+long    __ovld __conv sub_group_scan_exclusive_max(long );
+ulong   __ovld __conv sub_group_scan_exclusive_max(ulong);
+float   __ovld __conv sub_group_scan_exclusive_max(float);
+
+int     __ovld __conv sub_group_scan_inclusive_add(int  );
+uint    __ovld __conv sub_group_scan_inclusive_add(uint );
+long    __ovld __conv sub_group_scan_inclusive_add(long );
+ulong   __ovld __conv sub_group_scan_inclusive_add(ulong);
+float   __ovld __conv sub_group_scan_inclusive_add(float);
+int     __ovld __conv sub_group_scan_inclusive_min(int  );
+uint    __ovld __conv sub_group_scan_inclusive_min(uint );
+long    __ovld __conv sub_group_scan_inclusive_min(long );
+ulong   __ovld __conv sub_group_scan_inclusive_min(ulong);
+float   __ovld __conv sub_group_scan_inclusive_min(float);
+int     __ovld __conv sub_group_scan_inclusive_max(int  );
+uint    __ovld __conv sub_group_scan_inclusive_max(uint );
+long    __ovld __conv sub_group_scan_inclusive_max(long );
+ulong   __ovld __conv sub_group_scan_inclusive_max(ulong);
+float   __ovld __conv sub_group_scan_inclusive_max(float);
+
+#ifdef cl_khr_fp16
+half    __ovld __conv sub_group_broadcast(half, uint sub_group_local_id);
+half    __ovld __conv sub_group_reduce_add(half);
+half    __ovld __conv sub_group_reduce_min(half);
+half    __ovld __conv sub_group_reduce_max(half);
+half    __ovld __conv sub_group_scan_exclusive_add(half);
+half    __ovld __conv sub_group_scan_exclusive_min(half);
+half    __ovld __conv sub_group_scan_exclusive_max(half);
+half    __ovld __conv sub_group_scan_inclusive_add(half);
+half    __ovld __conv sub_group_scan_inclusive_min(half);
+half    __ovld __conv sub_group_scan_inclusive_max(half);
+#endif //cl_khr_fp16
+
+#ifdef cl_khr_fp64
+double  __ovld __conv sub_group_broadcast(double, uint sub_group_local_id);
+double  __ovld __conv sub_group_reduce_add(double);
+double  __ovld __conv sub_group_reduce_min(double);
+double  __ovld __conv sub_group_reduce_max(double);
+double  __ovld __conv sub_group_scan_exclusive_add(double);
+double  __ovld __conv sub_group_scan_exclusive_min(double);
+double  __ovld __conv sub_group_scan_exclusive_max(double);
+double  __ovld __conv sub_group_scan_inclusive_add(double);
+double  __ovld __conv sub_group_scan_inclusive_min(double);
+double  __ovld __conv sub_group_scan_inclusive_max(double);
+#endif //cl_khr_fp64
+
+#endif // __opencl_subgroup_builtins
+
+#if defined(cl_khr_subgroup_extended_types)
+char __ovld __conv sub_group_broadcast( char value, uint index );
+char2 __ovld __conv sub_group_broadcast( char2 value, uint index );
+char3 __ovld __conv sub_group_broadcast( char3 value, uint index );
+char4 __ovld __conv sub_group_broadcast( char4 value, uint index );
+char8 __ovld __conv sub_group_broadcast( char8 value, uint index );
+char16 __ovld __conv sub_group_broadcast( char16 value, uint index );
+
+uchar __ovld __conv sub_group_broadcast( uchar value, uint index );
+uchar2 __ovld __conv sub_group_broadcast( uchar2 value, uint index );
+uchar3 __ovld __conv sub_group_broadcast( uchar3 value, uint index );
+uchar4 __ovld __conv sub_group_broadcast( uchar4 value, uint index );
+uchar8 __ovld __conv sub_group_broadcast( uchar8 value, uint index );
+uchar16 __ovld __conv sub_group_broadcast( uchar16 value, uint index );
+
+short __ovld __conv sub_group_broadcast( short value, uint index );
+short2 __ovld __conv sub_group_broadcast( short2 value, uint index );
+short3 __ovld __conv sub_group_broadcast( short3 value, uint index );
+short4 __ovld __conv sub_group_broadcast( short4 value, uint index );
+short8 __ovld __conv sub_group_broadcast( short8 value, uint index );
+short16 __ovld __conv sub_group_broadcast( short16 value, uint index );
+
+ushort __ovld __conv sub_group_broadcast( ushort value, uint index );
+ushort2 __ovld __conv sub_group_broadcast( ushort2 value, uint index );
+ushort3 __ovld __conv sub_group_broadcast( ushort3 value, uint index );
+ushort4 __ovld __conv sub_group_broadcast( ushort4 value, uint index );
+ushort8 __ovld __conv sub_group_broadcast( ushort8 value, uint index );
+ushort16 __ovld __conv sub_group_broadcast( ushort16 value, uint index );
+
+// scalar int broadcast is part of cl_khr_subgroups
+int2 __ovld __conv sub_group_broadcast( int2 value, uint index );
+int3 __ovld __conv sub_group_broadcast( int3 value, uint index );
+int4 __ovld __conv sub_group_broadcast( int4 value, uint index );
+int8 __ovld __conv sub_group_broadcast( int8 value, uint index );
+int16 __ovld __conv sub_group_broadcast( int16 value, uint index );
+
+// scalar uint broadcast is part of cl_khr_subgroups
+uint2 __ovld __conv sub_group_broadcast( uint2 value, uint index );
+uint3 __ovld __conv sub_group_broadcast( uint3 value, uint index );
+uint4 __ovld __conv sub_group_broadcast( uint4 value, uint index );
+uint8 __ovld __conv sub_group_broadcast( uint8 value, uint index );
+uint16 __ovld __conv sub_group_broadcast( uint16 value, uint index );
+
+// scalar long broadcast is part of cl_khr_subgroups
+long2 __ovld __conv sub_group_broadcast( long2 value, uint index );
+long3 __ovld __conv sub_group_broadcast( long3 value, uint index );
+long4 __ovld __conv sub_group_broadcast( long4 value, uint index );
+long8 __ovld __conv sub_group_broadcast( long8 value, uint index );
+long16 __ovld __conv sub_group_broadcast( long16 value, uint index );
+
+// scalar ulong broadcast is part of cl_khr_subgroups
+ulong2 __ovld __conv sub_group_broadcast( ulong2 value, uint index );
+ulong3 __ovld __conv sub_group_broadcast( ulong3 value, uint index );
+ulong4 __ovld __conv sub_group_broadcast( ulong4 value, uint index );
+ulong8 __ovld __conv sub_group_broadcast( ulong8 value, uint index );
+ulong16 __ovld __conv sub_group_broadcast( ulong16 value, uint index );
+
+// scalar float broadcast is part of cl_khr_subgroups
+float2 __ovld __conv sub_group_broadcast( float2 value, uint index );
+float3 __ovld __conv sub_group_broadcast( float3 value, uint index );
+float4 __ovld __conv sub_group_broadcast( float4 value, uint index );
+float8 __ovld __conv sub_group_broadcast( float8 value, uint index );
+float16 __ovld __conv sub_group_broadcast( float16 value, uint index );
+
+char __ovld __conv sub_group_reduce_add( char value );
+uchar __ovld __conv sub_group_reduce_add( uchar value );
+short __ovld __conv sub_group_reduce_add( short value );
+ushort __ovld __conv sub_group_reduce_add( ushort value );
+
+char __ovld __conv sub_group_reduce_min( char value );
+uchar __ovld __conv sub_group_reduce_min( uchar value );
+short __ovld __conv sub_group_reduce_min( short value );
+ushort __ovld __conv sub_group_reduce_min( ushort value );
+
+char __ovld __conv sub_group_reduce_max( char value );
+uchar __ovld __conv sub_group_reduce_max( uchar value );
+short __ovld __conv sub_group_reduce_max( short value );
+ushort __ovld __conv sub_group_reduce_max( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_add( char value );
+uchar __ovld __conv sub_group_scan_inclusive_add( uchar value );
+short __ovld __conv sub_group_scan_inclusive_add( short value );
+ushort __ovld __conv sub_group_scan_inclusive_add( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_min( char value );
+uchar __ovld __conv sub_group_scan_inclusive_min( uchar value );
+short __ovld __conv sub_group_scan_inclusive_min( short value );
+ushort __ovld __conv sub_group_scan_inclusive_min( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_max( char value );
+uchar __ovld __conv sub_group_scan_inclusive_max( uchar value );
+short __ovld __conv sub_group_scan_inclusive_max( short value );
+ushort __ovld __conv sub_group_scan_inclusive_max( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_add( char value );
+uchar __ovld __conv sub_group_scan_exclusive_add( uchar value );
+short __ovld __conv sub_group_scan_exclusive_add( short value );
+ushort __ovld __conv sub_group_scan_exclusive_add( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_min( char value );
+uchar __ovld __conv sub_group_scan_exclusive_min( uchar value );
+short __ovld __conv sub_group_scan_exclusive_min( short value );
+ushort __ovld __conv sub_group_scan_exclusive_min( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_max( char value );
+uchar __ovld __conv sub_group_scan_exclusive_max( uchar value );
+short __ovld __conv sub_group_scan_exclusive_max( short value );
+ushort __ovld __conv sub_group_scan_exclusive_max( ushort value );
+
+#if defined(cl_khr_fp16)
+// scalar half broadcast is part of cl_khr_subgroups
+half2 __ovld __conv sub_group_broadcast( half2 value, uint index );
+half3 __ovld __conv sub_group_broadcast( half3 value, uint index );
+half4 __ovld __conv sub_group_broadcast( half4 value, uint index );
+half8 __ovld __conv sub_group_broadcast( half8 value, uint index );
+half16 __ovld __conv sub_group_broadcast( half16 value, uint index );
+#endif  // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+// scalar double broadcast is part of cl_khr_subgroups
+double2 __ovld __conv sub_group_broadcast( double2 value, uint index );
+double3 __ovld __conv sub_group_broadcast( double3 value, uint index );
+double4 __ovld __conv sub_group_broadcast( double4 value, uint index );
+double8 __ovld __conv sub_group_broadcast( double8 value, uint index );
+double16 __ovld __conv sub_group_broadcast( double16 value, uint index );
+#endif  // cl_khr_fp64
+
+#endif  // cl_khr_subgroup_extended_types
+
+#if defined(cl_khr_subgroup_non_uniform_vote)
+int     __ovld sub_group_elect(void);
+int     __ovld sub_group_non_uniform_all( int predicate );
+int     __ovld sub_group_non_uniform_any( int predicate );
+
+int     __ovld sub_group_non_uniform_all_equal( char value );
+int     __ovld sub_group_non_uniform_all_equal( uchar value );
+int     __ovld sub_group_non_uniform_all_equal( short value );
+int     __ovld sub_group_non_uniform_all_equal( ushort value );
+int     __ovld sub_group_non_uniform_all_equal( int value );
+int     __ovld sub_group_non_uniform_all_equal( uint value );
+int     __ovld sub_group_non_uniform_all_equal( long value );
+int     __ovld sub_group_non_uniform_all_equal( ulong value );
+int     __ovld sub_group_non_uniform_all_equal( float value );
+
+#if defined(cl_khr_fp16)
+int     __ovld sub_group_non_uniform_all_equal( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+int     __ovld sub_group_non_uniform_all_equal( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_non_uniform_vote
+
+#if defined(cl_khr_subgroup_ballot)
+char    __ovld sub_group_non_uniform_broadcast( char value, uint index );
+char2   __ovld sub_group_non_uniform_broadcast( char2 value, uint index );
+char3   __ovld sub_group_non_uniform_broadcast( char3 value, uint index );
+char4   __ovld sub_group_non_uniform_broadcast( char4 value, uint index );
+char8   __ovld sub_group_non_uniform_broadcast( char8 value, uint index );
+char16  __ovld sub_group_non_uniform_broadcast( char16 value, uint index );
+
+uchar   __ovld sub_group_non_uniform_broadcast( uchar value, uint index );
+uchar2  __ovld sub_group_non_uniform_broadcast( uchar2 value, uint index );
+uchar3  __ovld sub_group_non_uniform_broadcast( uchar3 value, uint index );
+uchar4  __ovld sub_group_non_uniform_broadcast( uchar4 value, uint index );
+uchar8  __ovld sub_group_non_uniform_broadcast( uchar8 value, uint index );
+uchar16 __ovld sub_group_non_uniform_broadcast( uchar16 value, uint index );
+
+short   __ovld sub_group_non_uniform_broadcast( short value, uint index );
+short2  __ovld sub_group_non_uniform_broadcast( short2 value, uint index );
+short3  __ovld sub_group_non_uniform_broadcast( short3 value, uint index );
+short4  __ovld sub_group_non_uniform_broadcast( short4 value, uint index );
+short8  __ovld sub_group_non_uniform_broadcast( short8 value, uint index );
+short16 __ovld sub_group_non_uniform_broadcast( short16 value, uint index );
+
+ushort  __ovld sub_group_non_uniform_broadcast( ushort value, uint index );
+ushort2 __ovld sub_group_non_uniform_broadcast( ushort2 value, uint index );
+ushort3 __ovld sub_group_non_uniform_broadcast( ushort3 value, uint index );
+ushort4 __ovld sub_group_non_uniform_broadcast( ushort4 value, uint index );
+ushort8 __ovld sub_group_non_uniform_broadcast( ushort8 value, uint index );
+ushort16 __ovld sub_group_non_uniform_broadcast( ushort16 value, uint index );
+
+int     __ovld sub_group_non_uniform_broadcast( int value, uint index );
+int2    __ovld sub_group_non_uniform_broadcast( int2 value, uint index );
+int3    __ovld sub_group_non_uniform_broadcast( int3 value, uint index );
+int4    __ovld sub_group_non_uniform_broadcast( int4 value, uint index );
+int8    __ovld sub_group_non_uniform_broadcast( int8 value, uint index );
+int16   __ovld sub_group_non_uniform_broadcast( int16 value, uint index );
+
+uint    __ovld sub_group_non_uniform_broadcast( uint value, uint index );
+uint2   __ovld sub_group_non_uniform_broadcast( uint2 value, uint index );
+uint3   __ovld sub_group_non_uniform_broadcast( uint3 value, uint index );
+uint4   __ovld sub_group_non_uniform_broadcast( uint4 value, uint index );
+uint8   __ovld sub_group_non_uniform_broadcast( uint8 value, uint index );
+uint16  __ovld sub_group_non_uniform_broadcast( uint16 value, uint index );
+
+long    __ovld sub_group_non_uniform_broadcast( long value, uint index );
+long2   __ovld sub_group_non_uniform_broadcast( long2 value, uint index );
+long3   __ovld sub_group_non_uniform_broadcast( long3 value, uint index );
+long4   __ovld sub_group_non_uniform_broadcast( long4 value, uint index );
+long8   __ovld sub_group_non_uniform_broadcast( long8 value, uint index );
+long16  __ovld sub_group_non_uniform_broadcast( long16 value, uint index );
+
+ulong   __ovld sub_group_non_uniform_broadcast( ulong value, uint index );
+ulong2  __ovld sub_group_non_uniform_broadcast( ulong2 value, uint index );
+ulong3  __ovld sub_group_non_uniform_broadcast( ulong3 value, uint index );
+ulong4  __ovld sub_group_non_uniform_broadcast( ulong4 value, uint index );
+ulong8  __ovld sub_group_non_uniform_broadcast( ulong8 value, uint index );
+ulong16 __ovld sub_group_non_uniform_broadcast( ulong16 value, uint index );
+
+float   __ovld sub_group_non_uniform_broadcast( float value, uint index );
+float2  __ovld sub_group_non_uniform_broadcast( float2 value, uint index );
+float3  __ovld sub_group_non_uniform_broadcast( float3 value, uint index );
+float4  __ovld sub_group_non_uniform_broadcast( float4 value, uint index );
+float8  __ovld sub_group_non_uniform_broadcast( float8 value, uint index );
+float16 __ovld sub_group_non_uniform_broadcast( float16 value, uint index );
+
+char    __ovld sub_group_broadcast_first( char value );
+uchar   __ovld sub_group_broadcast_first( uchar value );
+short   __ovld sub_group_broadcast_first( short value );
+ushort  __ovld sub_group_broadcast_first( ushort value );
+int     __ovld sub_group_broadcast_first( int value );
+uint    __ovld sub_group_broadcast_first( uint value );
+long    __ovld sub_group_broadcast_first( long value );
+ulong   __ovld sub_group_broadcast_first( ulong value );
+float   __ovld sub_group_broadcast_first( float value );
+
+uint4   __ovld sub_group_ballot( int predicate );
+int     __ovld __cnfn sub_group_inverse_ballot( uint4 value );
+int     __ovld __cnfn sub_group_ballot_bit_extract( uint4 value, uint index );
+uint    __ovld __cnfn sub_group_ballot_bit_count( uint4 value );
+
+uint    __ovld sub_group_ballot_inclusive_scan( uint4 value );
+uint    __ovld sub_group_ballot_exclusive_scan( uint4 value );
+uint    __ovld sub_group_ballot_find_lsb( uint4 value );
+uint    __ovld sub_group_ballot_find_msb( uint4 value );
+
+uint4   __ovld __cnfn get_sub_group_eq_mask(void);
+uint4   __ovld __cnfn get_sub_group_ge_mask(void);
+uint4   __ovld __cnfn get_sub_group_gt_mask(void);
+uint4   __ovld __cnfn get_sub_group_le_mask(void);
+uint4   __ovld __cnfn get_sub_group_lt_mask(void);
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_non_uniform_broadcast( half value, uint index );
+half2   __ovld sub_group_non_uniform_broadcast( half2 value, uint index );
+half3   __ovld sub_group_non_uniform_broadcast( half3 value, uint index );
+half4   __ovld sub_group_non_uniform_broadcast( half4 value, uint index );
+half8   __ovld sub_group_non_uniform_broadcast( half8 value, uint index );
+half16  __ovld sub_group_non_uniform_broadcast( half16 value, uint index );
+
+half    __ovld sub_group_broadcast_first( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double   __ovld sub_group_non_uniform_broadcast( double value, uint index );
+double2  __ovld sub_group_non_uniform_broadcast( double2 value, uint index );
+double3  __ovld sub_group_non_uniform_broadcast( double3 value, uint index );
+double4  __ovld sub_group_non_uniform_broadcast( double4 value, uint index );
+double8  __ovld sub_group_non_uniform_broadcast( double8 value, uint index );
+double16 __ovld sub_group_non_uniform_broadcast( double16 value, uint index );
+
+double   __ovld sub_group_broadcast_first( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_ballot
+
+#if defined(cl_khr_subgroup_non_uniform_arithmetic)
+char    __ovld sub_group_non_uniform_reduce_add( char value );
+uchar   __ovld sub_group_non_uniform_reduce_add( uchar value );
+short   __ovld sub_group_non_uniform_reduce_add( short value );
+ushort  __ovld sub_group_non_uniform_reduce_add( ushort value );
+int     __ovld sub_group_non_uniform_reduce_add( int value );
+uint    __ovld sub_group_non_uniform_reduce_add( uint value );
+long    __ovld sub_group_non_uniform_reduce_add( long value );
+ulong   __ovld sub_group_non_uniform_reduce_add( ulong value );
+float   __ovld sub_group_non_uniform_reduce_add( float value );
+
+char    __ovld sub_group_non_uniform_reduce_mul( char value );
+uchar   __ovld sub_group_non_uniform_reduce_mul( uchar value );
+short   __ovld sub_group_non_uniform_reduce_mul( short value );
+ushort  __ovld sub_group_non_uniform_reduce_mul( ushort value );
+int     __ovld sub_group_non_uniform_reduce_mul( int value );
+uint    __ovld sub_group_non_uniform_reduce_mul( uint value );
+long    __ovld sub_group_non_uniform_reduce_mul( long value );
+ulong   __ovld sub_group_non_uniform_reduce_mul( ulong value );
+float   __ovld sub_group_non_uniform_reduce_mul( float value );
+
+char    __ovld sub_group_non_uniform_reduce_min( char value );
+uchar   __ovld sub_group_non_uniform_reduce_min( uchar value );
+short   __ovld sub_group_non_uniform_reduce_min( short value );
+ushort  __ovld sub_group_non_uniform_reduce_min( ushort value );
+int     __ovld sub_group_non_uniform_reduce_min( int value );
+uint    __ovld sub_group_non_uniform_reduce_min( uint value );
+long    __ovld sub_group_non_uniform_reduce_min( long value );
+ulong   __ovld sub_group_non_uniform_reduce_min( ulong value );
+float   __ovld sub_group_non_uniform_reduce_min( float value );
+
+char    __ovld sub_group_non_uniform_reduce_max( char value );
+uchar   __ovld sub_group_non_uniform_reduce_max( uchar value );
+short   __ovld sub_group_non_uniform_reduce_max( short value );
+ushort  __ovld sub_group_non_uniform_reduce_max( ushort value );
+int     __ovld sub_group_non_uniform_reduce_max( int value );
+uint    __ovld sub_group_non_uniform_reduce_max( uint value );
+long    __ovld sub_group_non_uniform_reduce_max( long value );
+ulong   __ovld sub_group_non_uniform_reduce_max( ulong value );
+float   __ovld sub_group_non_uniform_reduce_max( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_add( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_add( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_add( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_add( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_add( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_add( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_add( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_add( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_add( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_mul( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_mul( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_mul( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_mul( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_mul( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_mul( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_mul( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_mul( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_mul( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_min( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_min( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_min( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_min( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_min( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_min( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_min( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_min( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_min( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_max( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_max( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_max( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_max( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_max( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_max( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_max( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_max( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_max( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_add( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_add( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_add( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_add( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_add( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_add( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_add( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_add( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_add( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_mul( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_mul( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_mul( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_mul( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_mul( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_mul( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_mul( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_mul( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_mul( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_min( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_min( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_min( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_min( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_min( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_min( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_min( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_min( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_min( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_max( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_max( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_max( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_max( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_max( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_max( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_max( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_max( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_max( float value );
+
+char    __ovld sub_group_non_uniform_reduce_and( char value );
+uchar   __ovld sub_group_non_uniform_reduce_and( uchar value );
+short   __ovld sub_group_non_uniform_reduce_and( short value );
+ushort  __ovld sub_group_non_uniform_reduce_and( ushort value );
+int     __ovld sub_group_non_uniform_reduce_and( int value );
+uint    __ovld sub_group_non_uniform_reduce_and( uint value );
+long    __ovld sub_group_non_uniform_reduce_and( long value );
+ulong   __ovld sub_group_non_uniform_reduce_and( ulong value );
+
+char    __ovld sub_group_non_uniform_reduce_or( char value );
+uchar   __ovld sub_group_non_uniform_reduce_or( uchar value );
+short   __ovld sub_group_non_uniform_reduce_or( short value );
+ushort  __ovld sub_group_non_uniform_reduce_or( ushort value );
+int     __ovld sub_group_non_uniform_reduce_or( int value );
+uint    __ovld sub_group_non_uniform_reduce_or( uint value );
+long    __ovld sub_group_non_uniform_reduce_or( long value );
+ulong   __ovld sub_group_non_uniform_reduce_or( ulong value );
+
+char    __ovld sub_group_non_uniform_reduce_xor( char value );
+uchar   __ovld sub_group_non_uniform_reduce_xor( uchar value );
+short   __ovld sub_group_non_uniform_reduce_xor( short value );
+ushort  __ovld sub_group_non_uniform_reduce_xor( ushort value );
+int     __ovld sub_group_non_uniform_reduce_xor( int value );
+uint    __ovld sub_group_non_uniform_reduce_xor( uint value );
+long    __ovld sub_group_non_uniform_reduce_xor( long value );
+ulong   __ovld sub_group_non_uniform_reduce_xor( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_and( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_and( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_and( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_and( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_and( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_and( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_and( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_and( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_or( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_or( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_or( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_or( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_or( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_or( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_or( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_or( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_xor( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_xor( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_xor( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_xor( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_xor( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_xor( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_xor( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_xor( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_and( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_and( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_and( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_and( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_and( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_and( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_and( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_and( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_or( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_or( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_or( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_or( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_or( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_or( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_or( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_or( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_xor( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_xor( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_xor( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_xor( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_xor( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_xor( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_xor( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_xor( ulong value );
+
+int     __ovld sub_group_non_uniform_reduce_logical_and( int predicate );
+int     __ovld sub_group_non_uniform_reduce_logical_or( int predicate );
+int     __ovld sub_group_non_uniform_reduce_logical_xor( int predicate );
+
+int     __ovld sub_group_non_uniform_scan_inclusive_logical_and( int predicate );
+int     __ovld sub_group_non_uniform_scan_inclusive_logical_or( int predicate );
+int     __ovld sub_group_non_uniform_scan_inclusive_logical_xor( int predicate );
+
+int     __ovld sub_group_non_uniform_scan_exclusive_logical_and( int predicate );
+int     __ovld sub_group_non_uniform_scan_exclusive_logical_or( int predicate );
+int     __ovld sub_group_non_uniform_scan_exclusive_logical_xor( int predicate );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_non_uniform_reduce_add( half value );
+half    __ovld sub_group_non_uniform_reduce_mul( half value );
+half    __ovld sub_group_non_uniform_reduce_min( half value );
+half    __ovld sub_group_non_uniform_reduce_max( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_add( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_mul( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_min( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_max( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_add( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_mul( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_min( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_max( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_non_uniform_reduce_add( double value );
+double  __ovld sub_group_non_uniform_reduce_mul( double value );
+double  __ovld sub_group_non_uniform_reduce_min( double value );
+double  __ovld sub_group_non_uniform_reduce_max( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_add( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_mul( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_min( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_max( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_add( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_mul( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_min( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_max( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_non_uniform_arithmetic
+
+#if defined(cl_khr_subgroup_shuffle)
+char    __ovld sub_group_shuffle( char value, uint index );
+uchar   __ovld sub_group_shuffle( uchar value, uint index );
+short   __ovld sub_group_shuffle( short value, uint index );
+ushort  __ovld sub_group_shuffle( ushort value, uint index );
+int     __ovld sub_group_shuffle( int value, uint index );
+uint    __ovld sub_group_shuffle( uint value, uint index );
+long    __ovld sub_group_shuffle( long value, uint index );
+ulong   __ovld sub_group_shuffle( ulong value, uint index );
+float   __ovld sub_group_shuffle( float value, uint index );
+
+char    __ovld sub_group_shuffle_xor( char value, uint mask );
+uchar   __ovld sub_group_shuffle_xor( uchar value, uint mask );
+short   __ovld sub_group_shuffle_xor( short value, uint mask );
+ushort  __ovld sub_group_shuffle_xor( ushort value, uint mask );
+int     __ovld sub_group_shuffle_xor( int value, uint mask );
+uint    __ovld sub_group_shuffle_xor( uint value, uint mask );
+long    __ovld sub_group_shuffle_xor( long value, uint mask );
+ulong   __ovld sub_group_shuffle_xor( ulong value, uint mask );
+float   __ovld sub_group_shuffle_xor( float value, uint mask );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_shuffle( half value, uint index );
+half    __ovld sub_group_shuffle_xor( half value, uint mask );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_shuffle( double value, uint index );
+double  __ovld sub_group_shuffle_xor( double value, uint mask );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_shuffle
+
+#if defined(cl_khr_subgroup_shuffle_relative)
+char    __ovld sub_group_shuffle_up( char value, uint delta );
+uchar   __ovld sub_group_shuffle_up( uchar value, uint delta );
+short   __ovld sub_group_shuffle_up( short value, uint delta );
+ushort  __ovld sub_group_shuffle_up( ushort value, uint delta );
+int     __ovld sub_group_shuffle_up( int value, uint delta );
+uint    __ovld sub_group_shuffle_up( uint value, uint delta );
+long    __ovld sub_group_shuffle_up( long value, uint delta );
+ulong   __ovld sub_group_shuffle_up( ulong value, uint delta );
+float   __ovld sub_group_shuffle_up( float value, uint delta );
+
+char    __ovld sub_group_shuffle_down( char value, uint delta );
+uchar   __ovld sub_group_shuffle_down( uchar value, uint delta );
+short   __ovld sub_group_shuffle_down( short value, uint delta );
+ushort  __ovld sub_group_shuffle_down( ushort value, uint delta );
+int     __ovld sub_group_shuffle_down( int value, uint delta );
+uint    __ovld sub_group_shuffle_down( uint value, uint delta );
+long    __ovld sub_group_shuffle_down( long value, uint delta );
+ulong   __ovld sub_group_shuffle_down( ulong value, uint delta );
+float   __ovld sub_group_shuffle_down( float value, uint delta );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_shuffle_up( half value, uint delta );
+half    __ovld sub_group_shuffle_down( half value, uint delta );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_shuffle_up( double value, uint delta );
+double  __ovld sub_group_shuffle_down( double value, uint delta );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_shuffle_relative
+
+#if defined(cl_khr_subgroup_clustered_reduce)
+char    __ovld sub_group_clustered_reduce_add( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_add( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_add( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_add( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_add( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_add( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_add( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_add( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_add( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_mul( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_mul( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_mul( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_mul( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_mul( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_mul( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_mul( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_mul( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_mul( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_min( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_min( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_min( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_min( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_min( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_min( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_min( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_min( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_min( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_max( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_max( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_max( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_max( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_max( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_max( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_max( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_max( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_max( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_and( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_and( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_and( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_and( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_and( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_and( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_and( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_and( ulong value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_or( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_or( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_or( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_or( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_or( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_or( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_or( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_or( ulong value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_xor( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_xor( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_xor( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_xor( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_xor( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_xor( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_xor( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_xor( ulong value, uint clustersize );
+
+int     __ovld sub_group_clustered_reduce_logical_and( int predicate, uint clustersize );
+int     __ovld sub_group_clustered_reduce_logical_or( int predicate, uint clustersize );
+int     __ovld sub_group_clustered_reduce_logical_xor( int predicate, uint clustersize );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_clustered_reduce_add( half value, uint clustersize );
+half    __ovld sub_group_clustered_reduce_mul( half value, uint clustersize );
+half    __ovld sub_group_clustered_reduce_min( half value, uint clustersize );
+half    __ovld sub_group_clustered_reduce_max( half value, uint clustersize );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_clustered_reduce_add( double value, uint clustersize );
+double  __ovld sub_group_clustered_reduce_mul( double value, uint clustersize );
+double  __ovld sub_group_clustered_reduce_min( double value, uint clustersize );
+double  __ovld sub_group_clustered_reduce_max( double value, uint clustersize );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_clustered_reduce
+
+#if defined(cl_khr_extended_bit_ops)
+char __ovld __cnfn bitfield_insert(char, char, uint, uint);
+uchar __ovld __cnfn bitfield_insert(uchar, uchar, uint, uint);
+short __ovld __cnfn bitfield_insert(short, short, uint, uint);
+ushort __ovld __cnfn bitfield_insert(ushort, ushort, uint, uint);
+int __ovld __cnfn bitfield_insert(int, int, uint, uint);
+uint __ovld __cnfn bitfield_insert(uint, uint, uint, uint);
+long __ovld __cnfn bitfield_insert(long, long, uint, uint);
+ulong __ovld __cnfn bitfield_insert(ulong, ulong, uint, uint);
+char2 __ovld __cnfn bitfield_insert(char2, char2, uint, uint);
+uchar2 __ovld __cnfn bitfield_insert(uchar2, uchar2, uint, uint);
+short2 __ovld __cnfn bitfield_insert(short2, short2, uint, uint);
+ushort2 __ovld __cnfn bitfield_insert(ushort2, ushort2, uint, uint);
+int2 __ovld __cnfn bitfield_insert(int2, int2, uint, uint);
+uint2 __ovld __cnfn bitfield_insert(uint2, uint2, uint, uint);
+long2 __ovld __cnfn bitfield_insert(long2, long2, uint, uint);
+ulong2 __ovld __cnfn bitfield_insert(ulong2, ulong2, uint, uint);
+char3 __ovld __cnfn bitfield_insert(char3, char3, uint, uint);
+uchar3 __ovld __cnfn bitfield_insert(uchar3, uchar3, uint, uint);
+short3 __ovld __cnfn bitfield_insert(short3, short3, uint, uint);
+ushort3 __ovld __cnfn bitfield_insert(ushort3, ushort3, uint, uint);
+int3 __ovld __cnfn bitfield_insert(int3, int3, uint, uint);
+uint3 __ovld __cnfn bitfield_insert(uint3, uint3, uint, uint);
+long3 __ovld __cnfn bitfield_insert(long3, long3, uint, uint);
+ulong3 __ovld __cnfn bitfield_insert(ulong3, ulong3, uint, uint);
+char4 __ovld __cnfn bitfield_insert(char4, char4, uint, uint);
+uchar4 __ovld __cnfn bitfield_insert(uchar4, uchar4, uint, uint);
+short4 __ovld __cnfn bitfield_insert(short4, short4, uint, uint);
+ushort4 __ovld __cnfn bitfield_insert(ushort4, ushort4, uint, uint);
+int4 __ovld __cnfn bitfield_insert(int4, int4, uint, uint);
+uint4 __ovld __cnfn bitfield_insert(uint4, uint4, uint, uint);
+long4 __ovld __cnfn bitfield_insert(long4, long4, uint, uint);
+ulong4 __ovld __cnfn bitfield_insert(ulong4, ulong4, uint, uint);
+char8 __ovld __cnfn bitfield_insert(char8, char8, uint, uint);
+uchar8 __ovld __cnfn bitfield_insert(uchar8, uchar8, uint, uint);
+short8 __ovld __cnfn bitfield_insert(short8, short8, uint, uint);
+ushort8 __ovld __cnfn bitfield_insert(ushort8, ushort8, uint, uint);
+int8 __ovld __cnfn bitfield_insert(int8, int8, uint, uint);
+uint8 __ovld __cnfn bitfield_insert(uint8, uint8, uint, uint);
+long8 __ovld __cnfn bitfield_insert(long8, long8, uint, uint);
+ulong8 __ovld __cnfn bitfield_insert(ulong8, ulong8, uint, uint);
+char16 __ovld __cnfn bitfield_insert(char16, char16, uint, uint);
+uchar16 __ovld __cnfn bitfield_insert(uchar16, uchar16, uint, uint);
+short16 __ovld __cnfn bitfield_insert(short16, short16, uint, uint);
+ushort16 __ovld __cnfn bitfield_insert(ushort16, ushort16, uint, uint);
+int16 __ovld __cnfn bitfield_insert(int16, int16, uint, uint);
+uint16 __ovld __cnfn bitfield_insert(uint16, uint16, uint, uint);
+long16 __ovld __cnfn bitfield_insert(long16, long16, uint, uint);
+ulong16 __ovld __cnfn bitfield_insert(ulong16, ulong16, uint, uint);
+
+char __ovld __cnfn bitfield_extract_signed(char, uint, uint);
+short __ovld __cnfn bitfield_extract_signed(short, uint, uint);
+int __ovld __cnfn bitfield_extract_signed(int, uint, uint);
+long __ovld __cnfn bitfield_extract_signed(long, uint, uint);
+char2 __ovld __cnfn bitfield_extract_signed(char2, uint, uint);
+short2 __ovld __cnfn bitfield_extract_signed(short2, uint, uint);
+int2 __ovld __cnfn bitfield_extract_signed(int2, uint, uint);
+long2 __ovld __cnfn bitfield_extract_signed(long2, uint, uint);
+char3 __ovld __cnfn bitfield_extract_signed(char3, uint, uint);
+short3 __ovld __cnfn bitfield_extract_signed(short3, uint, uint);
+int3 __ovld __cnfn bitfield_extract_signed(int3, uint, uint);
+long3 __ovld __cnfn bitfield_extract_signed(long3, uint, uint);
+char4 __ovld __cnfn bitfield_extract_signed(char4, uint, uint);
+short4 __ovld __cnfn bitfield_extract_signed(short4, uint, uint);
+int4 __ovld __cnfn bitfield_extract_signed(int4, uint, uint);
+long4 __ovld __cnfn bitfield_extract_signed(long4, uint, uint);
+char8 __ovld __cnfn bitfield_extract_signed(char8, uint, uint);
+short8 __ovld __cnfn bitfield_extract_signed(short8, uint, uint);
+int8 __ovld __cnfn bitfield_extract_signed(int8, uint, uint);
+long8 __ovld __cnfn bitfield_extract_signed(long8, uint, uint);
+char16 __ovld __cnfn bitfield_extract_signed(char16, uint, uint);
+short16 __ovld __cnfn bitfield_extract_signed(short16, uint, uint);
+int16 __ovld __cnfn bitfield_extract_signed(int16, uint, uint);
+long16 __ovld __cnfn bitfield_extract_signed(long16, uint, uint);
+
+char __ovld __cnfn bitfield_extract_signed(uchar, uint, uint);
+short __ovld __cnfn bitfield_extract_signed(ushort, uint, uint);
+int __ovld __cnfn bitfield_extract_signed(uint, uint, uint);
+long __ovld __cnfn bitfield_extract_signed(ulong, uint, uint);
+char2 __ovld __cnfn bitfield_extract_signed(uchar2, uint, uint);
+short2 __ovld __cnfn bitfield_extract_signed(ushort2, uint, uint);
+int2 __ovld __cnfn bitfield_extract_signed(uint2, uint, uint);
+long2 __ovld __cnfn bitfield_extract_signed(ulong2, uint, uint);
+char3 __ovld __cnfn bitfield_extract_signed(uchar3, uint, uint);
+short3 __ovld __cnfn bitfield_extract_signed(ushort3, uint, uint);
+int3 __ovld __cnfn bitfield_extract_signed(uint3, uint, uint);
+long3 __ovld __cnfn bitfield_extract_signed(ulong3, uint, uint);
+char4 __ovld __cnfn bitfield_extract_signed(uchar4, uint, uint);
+short4 __ovld __cnfn bitfield_extract_signed(ushort4, uint, uint);
+int4 __ovld __cnfn bitfield_extract_signed(uint4, uint, uint);
+long4 __ovld __cnfn bitfield_extract_signed(ulong4, uint, uint);
+char8 __ovld __cnfn bitfield_extract_signed(uchar8, uint, uint);
+short8 __ovld __cnfn bitfield_extract_signed(ushort8, uint, uint);
+int8 __ovld __cnfn bitfield_extract_signed(uint8, uint, uint);
+long8 __ovld __cnfn bitfield_extract_signed(ulong8, uint, uint);
+char16 __ovld __cnfn bitfield_extract_signed(uchar16, uint, uint);
+short16 __ovld __cnfn bitfield_extract_signed(ushort16, uint, uint);
+int16 __ovld __cnfn bitfield_extract_signed(uint16, uint, uint);
+long16 __ovld __cnfn bitfield_extract_signed(ulong16, uint, uint);
+
+uchar __ovld __cnfn bitfield_extract_unsigned(char, uint, uint);
+ushort __ovld __cnfn bitfield_extract_unsigned(short, uint, uint);
+uint __ovld __cnfn bitfield_extract_unsigned(int, uint, uint);
+ulong __ovld __cnfn bitfield_extract_unsigned(long, uint, uint);
+uchar2 __ovld __cnfn bitfield_extract_unsigned(char2, uint, uint);
+ushort2 __ovld __cnfn bitfield_extract_unsigned(short2, uint, uint);
+uint2 __ovld __cnfn bitfield_extract_unsigned(int2, uint, uint);
+ulong2 __ovld __cnfn bitfield_extract_unsigned(long2, uint, uint);
+uchar3 __ovld __cnfn bitfield_extract_unsigned(char3, uint, uint);
+ushort3 __ovld __cnfn bitfield_extract_unsigned(short3, uint, uint);
+uint3 __ovld __cnfn bitfield_extract_unsigned(int3, uint, uint);
+ulong3 __ovld __cnfn bitfield_extract_unsigned(long3, uint, uint);
+uchar4 __ovld __cnfn bitfield_extract_unsigned(char4, uint, uint);
+ushort4 __ovld __cnfn bitfield_extract_unsigned(short4, uint, uint);
+uint4 __ovld __cnfn bitfield_extract_unsigned(int4, uint, uint);
+ulong4 __ovld __cnfn bitfield_extract_unsigned(long4, uint, uint);
+uchar8 __ovld __cnfn bitfield_extract_unsigned(char8, uint, uint);
+ushort8 __ovld __cnfn bitfield_extract_unsigned(short8, uint, uint);
+uint8 __ovld __cnfn bitfield_extract_unsigned(int8, uint, uint);
+ulong8 __ovld __cnfn bitfield_extract_unsigned(long8, uint, uint);
+uchar16 __ovld __cnfn bitfield_extract_unsigned(char16, uint, uint);
+ushort16 __ovld __cnfn bitfield_extract_unsigned(short16, uint, uint);
+uint16 __ovld __cnfn bitfield_extract_unsigned(int16, uint, uint);
+ulong16 __ovld __cnfn bitfield_extract_unsigned(long16, uint, uint);
+
+uchar __ovld __cnfn bitfield_extract_unsigned(uchar, uint, uint);
+ushort __ovld __cnfn bitfield_extract_unsigned(ushort, uint, uint);
+uint __ovld __cnfn bitfield_extract_unsigned(uint, uint, uint);
+ulong __ovld __cnfn bitfield_extract_unsigned(ulong, uint, uint);
+uchar2 __ovld __cnfn bitfield_extract_unsigned(uchar2, uint, uint);
+ushort2 __ovld __cnfn bitfield_extract_unsigned(ushort2, uint, uint);
+uint2 __ovld __cnfn bitfield_extract_unsigned(uint2, uint, uint);
+ulong2 __ovld __cnfn bitfield_extract_unsigned(ulong2, uint, uint);
+uchar3 __ovld __cnfn bitfield_extract_unsigned(uchar3, uint, uint);
+ushort3 __ovld __cnfn bitfield_extract_unsigned(ushort3, uint, uint);
+uint3 __ovld __cnfn bitfield_extract_unsigned(uint3, uint, uint);
+ulong3 __ovld __cnfn bitfield_extract_unsigned(ulong3, uint, uint);
+uchar4 __ovld __cnfn bitfield_extract_unsigned(uchar4, uint, uint);
+ushort4 __ovld __cnfn bitfield_extract_unsigned(ushort4, uint, uint);
+uint4 __ovld __cnfn bitfield_extract_unsigned(uint4, uint, uint);
+ulong4 __ovld __cnfn bitfield_extract_unsigned(ulong4, uint, uint);
+uchar8 __ovld __cnfn bitfield_extract_unsigned(uchar8, uint, uint);
+ushort8 __ovld __cnfn bitfield_extract_unsigned(ushort8, uint, uint);
+uint8 __ovld __cnfn bitfield_extract_unsigned(uint8, uint, uint);
+ulong8 __ovld __cnfn bitfield_extract_unsigned(ulong8, uint, uint);
+uchar16 __ovld __cnfn bitfield_extract_unsigned(uchar16, uint, uint);
+ushort16 __ovld __cnfn bitfield_extract_unsigned(ushort16, uint, uint);
+uint16 __ovld __cnfn bitfield_extract_unsigned(uint16, uint, uint);
+ulong16 __ovld __cnfn bitfield_extract_unsigned(ulong16, uint, uint);
+
+char __ovld __cnfn bit_reverse(char);
+uchar __ovld __cnfn bit_reverse(uchar);
+short __ovld __cnfn bit_reverse(short);
+ushort __ovld __cnfn bit_reverse(ushort);
+int __ovld __cnfn bit_reverse(int);
+uint __ovld __cnfn bit_reverse(uint);
+long __ovld __cnfn bit_reverse(long);
+ulong __ovld __cnfn bit_reverse(ulong);
+char2 __ovld __cnfn bit_reverse(char2);
+uchar2 __ovld __cnfn bit_reverse(uchar2);
+short2 __ovld __cnfn bit_reverse(short2);
+ushort2 __ovld __cnfn bit_reverse(ushort2);
+int2 __ovld __cnfn bit_reverse(int2);
+uint2 __ovld __cnfn bit_reverse(uint2);
+long2 __ovld __cnfn bit_reverse(long2);
+ulong2 __ovld __cnfn bit_reverse(ulong2);
+char3 __ovld __cnfn bit_reverse(char3);
+uchar3 __ovld __cnfn bit_reverse(uchar3);
+short3 __ovld __cnfn bit_reverse(short3);
+ushort3 __ovld __cnfn bit_reverse(ushort3);
+int3 __ovld __cnfn bit_reverse(int3);
+uint3 __ovld __cnfn bit_reverse(uint3);
+long3 __ovld __cnfn bit_reverse(long3);
+ulong3 __ovld __cnfn bit_reverse(ulong3);
+char4 __ovld __cnfn bit_reverse(char4);
+uchar4 __ovld __cnfn bit_reverse(uchar4);
+short4 __ovld __cnfn bit_reverse(short4);
+ushort4 __ovld __cnfn bit_reverse(ushort4);
+int4 __ovld __cnfn bit_reverse(int4);
+uint4 __ovld __cnfn bit_reverse(uint4);
+long4 __ovld __cnfn bit_reverse(long4);
+ulong4 __ovld __cnfn bit_reverse(ulong4);
+char8 __ovld __cnfn bit_reverse(char8);
+uchar8 __ovld __cnfn bit_reverse(uchar8);
+short8 __ovld __cnfn bit_reverse(short8);
+ushort8 __ovld __cnfn bit_reverse(ushort8);
+int8 __ovld __cnfn bit_reverse(int8);
+uint8 __ovld __cnfn bit_reverse(uint8);
+long8 __ovld __cnfn bit_reverse(long8);
+ulong8 __ovld __cnfn bit_reverse(ulong8);
+char16 __ovld __cnfn bit_reverse(char16);
+uchar16 __ovld __cnfn bit_reverse(uchar16);
+short16 __ovld __cnfn bit_reverse(short16);
+ushort16 __ovld __cnfn bit_reverse(ushort16);
+int16 __ovld __cnfn bit_reverse(int16);
+uint16 __ovld __cnfn bit_reverse(uint16);
+long16 __ovld __cnfn bit_reverse(long16);
+ulong16 __ovld __cnfn bit_reverse(ulong16);
+#endif // cl_khr_extended_bit_ops
+
+#if defined(__opencl_c_integer_dot_product_input_4x8bit)
+uint __ovld __cnfn dot(uchar4, uchar4);
+int __ovld __cnfn dot(char4, char4);
+int __ovld __cnfn dot(uchar4, char4);
+int __ovld __cnfn dot(char4, uchar4);
+
+uint __ovld __cnfn dot_acc_sat(uchar4, uchar4, uint);
+int __ovld __cnfn dot_acc_sat(char4, char4, int);
+int __ovld __cnfn dot_acc_sat(uchar4, char4, int);
+int __ovld __cnfn dot_acc_sat(char4, uchar4, int);
+#endif // __opencl_c_integer_dot_product_input_4x8bit
+
+#if defined(__opencl_c_integer_dot_product_input_4x8bit_packed)
+uint __ovld __cnfn dot_4x8packed_uu_uint(uint, uint);
+int __ovld __cnfn dot_4x8packed_ss_int(uint, uint);
+int __ovld __cnfn dot_4x8packed_us_int(uint, uint);
+int __ovld __cnfn dot_4x8packed_su_int(uint, uint);
+
+uint __ovld __cnfn dot_acc_sat_4x8packed_uu_uint(uint, uint, uint);
+int __ovld __cnfn dot_acc_sat_4x8packed_ss_int(uint, uint, int);
+int __ovld __cnfn dot_acc_sat_4x8packed_us_int(uint, uint, int);
+int __ovld __cnfn dot_acc_sat_4x8packed_su_int(uint, uint, int);
+#endif // __opencl_c_integer_dot_product_input_4x8bit_packed
+
+#if defined(cl_khr_subgroup_rotate)
+char __ovld __conv sub_group_rotate(char, int);
+uchar __ovld __conv sub_group_rotate(uchar, int);
+short __ovld __conv sub_group_rotate(short, int);
+ushort __ovld __conv sub_group_rotate(ushort, int);
+int __ovld __conv sub_group_rotate(int, int);
+uint __ovld __conv sub_group_rotate(uint, int);
+long __ovld __conv sub_group_rotate(long, int);
+ulong __ovld __conv sub_group_rotate(ulong, int);
+float __ovld __conv sub_group_rotate(float, int);
+#if defined(cl_khr_fp64)
+double __ovld __conv sub_group_rotate(double, int);
+#endif // cl_khr_fp64
+#if defined(cl_khr_fp16)
+half __ovld __conv sub_group_rotate(half, int);
+#endif // cl_khr_fp16
+
+char __ovld __conv sub_group_clustered_rotate(char, int, uint);
+uchar __ovld __conv sub_group_clustered_rotate(uchar, int, uint);
+short __ovld __conv sub_group_clustered_rotate(short, int, uint);
+ushort __ovld __conv sub_group_clustered_rotate(ushort, int, uint);
+int __ovld __conv sub_group_clustered_rotate(int, int, uint);
+uint __ovld __conv sub_group_clustered_rotate(uint, int, uint);
+long __ovld __conv sub_group_clustered_rotate(long, int, uint);
+ulong __ovld __conv sub_group_clustered_rotate(ulong, int, uint);
+float __ovld __conv sub_group_clustered_rotate(float, int, uint);
+#if defined(cl_khr_fp64)
+double __ovld __conv sub_group_clustered_rotate(double, int, uint);
+#endif // cl_khr_fp64
+#if defined(cl_khr_fp16)
+half __ovld __conv sub_group_clustered_rotate(half, int, uint);
+#endif // cl_khr_fp16
+#endif // cl_khr_subgroup_rotate
+
+#if defined(cl_intel_subgroups)
+// Intel-Specific Sub Group Functions
+float   __ovld __conv intel_sub_group_shuffle( float , uint );
+float2  __ovld __conv intel_sub_group_shuffle( float2, uint );
+float3  __ovld __conv intel_sub_group_shuffle( float3, uint );
+float4  __ovld __conv intel_sub_group_shuffle( float4, uint );
+float8  __ovld __conv intel_sub_group_shuffle( float8, uint );
+float16 __ovld __conv intel_sub_group_shuffle( float16, uint );
+
+int     __ovld __conv intel_sub_group_shuffle( int , uint );
+int2    __ovld __conv intel_sub_group_shuffle( int2, uint );
+int3    __ovld __conv intel_sub_group_shuffle( int3, uint );
+int4    __ovld __conv intel_sub_group_shuffle( int4, uint );
+int8    __ovld __conv intel_sub_group_shuffle( int8, uint );
+int16   __ovld __conv intel_sub_group_shuffle( int16, uint );
+
+uint    __ovld __conv intel_sub_group_shuffle( uint , uint );
+uint2   __ovld __conv intel_sub_group_shuffle( uint2, uint );
+uint3   __ovld __conv intel_sub_group_shuffle( uint3, uint );
+uint4   __ovld __conv intel_sub_group_shuffle( uint4, uint );
+uint8   __ovld __conv intel_sub_group_shuffle( uint8, uint );
+uint16  __ovld __conv intel_sub_group_shuffle( uint16, uint );
+
+long    __ovld __conv intel_sub_group_shuffle( long, uint );
+ulong   __ovld __conv intel_sub_group_shuffle( ulong, uint );
+
+float   __ovld __conv intel_sub_group_shuffle_down( float  cur, float  next, uint );
+float2  __ovld __conv intel_sub_group_shuffle_down( float2 cur, float2 next, uint );
+float3  __ovld __conv intel_sub_group_shuffle_down( float3 cur, float3 next, uint );
+float4  __ovld __conv intel_sub_group_shuffle_down( float4 cur, float4 next, uint );
+float8  __ovld __conv intel_sub_group_shuffle_down( float8 cur, float8 next, uint );
+float16 __ovld __conv intel_sub_group_shuffle_down( float16 cur, float16 next, uint );
+
+int     __ovld __conv intel_sub_group_shuffle_down( int  cur, int  next, uint );
+int2    __ovld __conv intel_sub_group_shuffle_down( int2 cur, int2 next, uint );
+int3    __ovld __conv intel_sub_group_shuffle_down( int3 cur, int3 next, uint );
+int4    __ovld __conv intel_sub_group_shuffle_down( int4 cur, int4 next, uint );
+int8    __ovld __conv intel_sub_group_shuffle_down( int8 cur, int8 next, uint );
+int16   __ovld __conv intel_sub_group_shuffle_down( int16 cur, int16 next, uint );
+
+uint    __ovld __conv intel_sub_group_shuffle_down( uint  cur, uint  next, uint );
+uint2   __ovld __conv intel_sub_group_shuffle_down( uint2 cur, uint2 next, uint );
+uint3   __ovld __conv intel_sub_group_shuffle_down( uint3 cur, uint3 next, uint );
+uint4   __ovld __conv intel_sub_group_shuffle_down( uint4 cur, uint4 next, uint );
+uint8   __ovld __conv intel_sub_group_shuffle_down( uint8 cur, uint8 next, uint );
+uint16  __ovld __conv intel_sub_group_shuffle_down( uint16 cur, uint16 next, uint );
+
+long    __ovld __conv intel_sub_group_shuffle_down( long prev, long cur, uint );
+ulong   __ovld __conv intel_sub_group_shuffle_down( ulong prev, ulong cur, uint );
+
+float   __ovld __conv intel_sub_group_shuffle_up( float  prev, float  cur, uint );
+float2  __ovld __conv intel_sub_group_shuffle_up( float2 prev, float2 cur, uint );
+float3  __ovld __conv intel_sub_group_shuffle_up( float3 prev, float3 cur, uint );
+float4  __ovld __conv intel_sub_group_shuffle_up( float4 prev, float4 cur, uint );
+float8  __ovld __conv intel_sub_group_shuffle_up( float8 prev, float8 cur, uint );
+float16 __ovld __conv intel_sub_group_shuffle_up( float16 prev, float16 cur, uint );
+
+int     __ovld __conv intel_sub_group_shuffle_up( int  prev, int  cur, uint );
+int2    __ovld __conv intel_sub_group_shuffle_up( int2 prev, int2 cur, uint );
+int3    __ovld __conv intel_sub_group_shuffle_up( int3 prev, int3 cur, uint );
+int4    __ovld __conv intel_sub_group_shuffle_up( int4 prev, int4 cur, uint );
+int8    __ovld __conv intel_sub_group_shuffle_up( int8 prev, int8 cur, uint );
+int16   __ovld __conv intel_sub_group_shuffle_up( int16 prev, int16 cur, uint );
+
+uint    __ovld __conv intel_sub_group_shuffle_up( uint  prev, uint  cur, uint );
+uint2   __ovld __conv intel_sub_group_shuffle_up( uint2 prev, uint2 cur, uint );
+uint3   __ovld __conv intel_sub_group_shuffle_up( uint3 prev, uint3 cur, uint );
+uint4   __ovld __conv intel_sub_group_shuffle_up( uint4 prev, uint4 cur, uint );
+uint8   __ovld __conv intel_sub_group_shuffle_up( uint8 prev, uint8 cur, uint );
+uint16  __ovld __conv intel_sub_group_shuffle_up( uint16 prev, uint16 cur, uint );
+
+long    __ovld __conv intel_sub_group_shuffle_up( long prev, long cur, uint );
+ulong   __ovld __conv intel_sub_group_shuffle_up( ulong prev, ulong cur, uint );
+
+float   __ovld __conv intel_sub_group_shuffle_xor( float , uint );
+float2  __ovld __conv intel_sub_group_shuffle_xor( float2, uint );
+float3  __ovld __conv intel_sub_group_shuffle_xor( float3, uint );
+float4  __ovld __conv intel_sub_group_shuffle_xor( float4, uint );
+float8  __ovld __conv intel_sub_group_shuffle_xor( float8, uint );
+float16 __ovld __conv intel_sub_group_shuffle_xor( float16, uint );
+
+int     __ovld __conv intel_sub_group_shuffle_xor( int , uint );
+int2    __ovld __conv intel_sub_group_shuffle_xor( int2, uint );
+int3    __ovld __conv intel_sub_group_shuffle_xor( int3, uint );
+int4    __ovld __conv intel_sub_group_shuffle_xor( int4, uint );
+int8    __ovld __conv intel_sub_group_shuffle_xor( int8, uint );
+int16   __ovld __conv intel_sub_group_shuffle_xor( int16, uint );
+
+uint    __ovld __conv intel_sub_group_shuffle_xor( uint , uint );
+uint2   __ovld __conv intel_sub_group_shuffle_xor( uint2, uint );
+uint3   __ovld __conv intel_sub_group_shuffle_xor( uint3, uint );
+uint4   __ovld __conv intel_sub_group_shuffle_xor( uint4, uint );
+uint8   __ovld __conv intel_sub_group_shuffle_xor( uint8, uint );
+uint16  __ovld __conv intel_sub_group_shuffle_xor( uint16, uint );
+
+long    __ovld __conv intel_sub_group_shuffle_xor( long, uint );
+ulong   __ovld __conv intel_sub_group_shuffle_xor( ulong, uint );
+
+#if defined(__opencl_c_images)
+uint    __ovld __conv intel_sub_group_block_read(read_only image2d_t, int2);
+uint2   __ovld __conv intel_sub_group_block_read2(read_only image2d_t, int2);
+uint4   __ovld __conv intel_sub_group_block_read4(read_only image2d_t, int2);
+uint8   __ovld __conv intel_sub_group_block_read8(read_only image2d_t, int2);
+#endif
+
+#if defined(__opencl_c_read_write_images)
+uint    __ovld __conv intel_sub_group_block_read(read_write image2d_t, int2);
+uint2   __ovld __conv intel_sub_group_block_read2(read_write image2d_t, int2);
+uint4   __ovld __conv intel_sub_group_block_read4(read_write image2d_t, int2);
+uint8   __ovld __conv intel_sub_group_block_read8(read_write image2d_t, int2);
+#endif // defined(__opencl_c_read_write_images)
+
+uint    __ovld __conv intel_sub_group_block_read( const __global uint* p );
+uint2   __ovld __conv intel_sub_group_block_read2( const __global uint* p );
+uint4   __ovld __conv intel_sub_group_block_read4( const __global uint* p );
+uint8   __ovld __conv intel_sub_group_block_read8( const __global uint* p );
+
+#if defined(__opencl_c_images)
+void    __ovld __conv intel_sub_group_block_write(write_only image2d_t, int2, uint);
+void    __ovld __conv intel_sub_group_block_write2(write_only image2d_t, int2, uint2);
+void    __ovld __conv intel_sub_group_block_write4(write_only image2d_t, int2, uint4);
+void    __ovld __conv intel_sub_group_block_write8(write_only image2d_t, int2, uint8);
+#endif // defined(__opencl_c_images)
+
+#if defined(__opencl_c_read_write_images)
+void    __ovld __conv intel_sub_group_block_write(read_write image2d_t, int2, uint);
+void    __ovld __conv intel_sub_group_block_write2(read_write image2d_t, int2, uint2);
+void    __ovld __conv intel_sub_group_block_write4(read_write image2d_t, int2, uint4);
+void    __ovld __conv intel_sub_group_block_write8(read_write image2d_t, int2, uint8);
+#endif // defined(__opencl_c_read_write_images)
+
+void    __ovld __conv intel_sub_group_block_write( __global uint* p, uint data );
+void    __ovld __conv intel_sub_group_block_write2( __global uint* p, uint2 data );
+void    __ovld __conv intel_sub_group_block_write4( __global uint* p, uint4 data );
+void    __ovld __conv intel_sub_group_block_write8( __global uint* p, uint8 data );
+
+#ifdef cl_khr_fp16
+half    __ovld __conv intel_sub_group_shuffle( half, uint );
+half    __ovld __conv intel_sub_group_shuffle_down( half prev, half cur, uint );
+half    __ovld __conv intel_sub_group_shuffle_up( half prev, half cur, uint );
+half    __ovld __conv intel_sub_group_shuffle_xor( half, uint );
+#endif
+
+#if defined(cl_khr_fp64)
+double  __ovld __conv intel_sub_group_shuffle( double, uint );
+double  __ovld __conv intel_sub_group_shuffle_down( double prev, double cur, uint );
+double  __ovld __conv intel_sub_group_shuffle_up( double prev, double cur, uint );
+double  __ovld __conv intel_sub_group_shuffle_xor( double, uint );
+#endif
+
+#endif //cl_intel_subgroups
+
+#if defined(cl_intel_subgroups_short)
+short       __ovld __conv intel_sub_group_broadcast( short , uint sub_group_local_id );
+short2      __ovld __conv intel_sub_group_broadcast( short2, uint sub_group_local_id );
+short3      __ovld __conv intel_sub_group_broadcast( short3, uint sub_group_local_id );
+short4      __ovld __conv intel_sub_group_broadcast( short4, uint sub_group_local_id );
+short8      __ovld __conv intel_sub_group_broadcast( short8, uint sub_group_local_id );
+
+ushort      __ovld __conv intel_sub_group_broadcast( ushort , uint sub_group_local_id );
+ushort2     __ovld __conv intel_sub_group_broadcast( ushort2, uint sub_group_local_id );
+ushort3     __ovld __conv intel_sub_group_broadcast( ushort3, uint sub_group_local_id );
+ushort4     __ovld __conv intel_sub_group_broadcast( ushort4, uint sub_group_local_id );
+ushort8     __ovld __conv intel_sub_group_broadcast( ushort8, uint sub_group_local_id );
+
+short       __ovld __conv intel_sub_group_shuffle( short  , uint );
+short2      __ovld __conv intel_sub_group_shuffle( short2 , uint );
+short3      __ovld __conv intel_sub_group_shuffle( short3 , uint );
+short4      __ovld __conv intel_sub_group_shuffle( short4 , uint );
+short8      __ovld __conv intel_sub_group_shuffle( short8 , uint );
+short16     __ovld __conv intel_sub_group_shuffle( short16, uint);
+
+ushort      __ovld __conv intel_sub_group_shuffle( ushort  , uint );
+ushort2     __ovld __conv intel_sub_group_shuffle( ushort2 , uint );
+ushort3     __ovld __conv intel_sub_group_shuffle( ushort3 , uint );
+ushort4     __ovld __conv intel_sub_group_shuffle( ushort4 , uint );
+ushort8     __ovld __conv intel_sub_group_shuffle( ushort8 , uint );
+ushort16    __ovld __conv intel_sub_group_shuffle( ushort16, uint );
+
+short       __ovld __conv intel_sub_group_shuffle_down( short   cur, short   next, uint );
+short2      __ovld __conv intel_sub_group_shuffle_down( short2  cur, short2  next, uint );
+short3      __ovld __conv intel_sub_group_shuffle_down( short3  cur, short3  next, uint );
+short4      __ovld __conv intel_sub_group_shuffle_down( short4  cur, short4  next, uint );
+short8      __ovld __conv intel_sub_group_shuffle_down( short8  cur, short8  next, uint );
+short16     __ovld __conv intel_sub_group_shuffle_down( short16 cur, short16 next, uint );
+
+ushort      __ovld __conv intel_sub_group_shuffle_down( ushort   cur, ushort   next, uint );
+ushort2     __ovld __conv intel_sub_group_shuffle_down( ushort2  cur, ushort2  next, uint );
+ushort3     __ovld __conv intel_sub_group_shuffle_down( ushort3  cur, ushort3  next, uint );
+ushort4     __ovld __conv intel_sub_group_shuffle_down( ushort4  cur, ushort4  next, uint );
+ushort8     __ovld __conv intel_sub_group_shuffle_down( ushort8  cur, ushort8  next, uint );
+ushort16    __ovld __conv intel_sub_group_shuffle_down( ushort16 cur, ushort16 next, uint );
+
+short       __ovld __conv intel_sub_group_shuffle_up( short   cur, short   next, uint );
+short2      __ovld __conv intel_sub_group_shuffle_up( short2  cur, short2  next, uint );
+short3      __ovld __conv intel_sub_group_shuffle_up( short3  cur, short3  next, uint );
+short4      __ovld __conv intel_sub_group_shuffle_up( short4  cur, short4  next, uint );
+short8      __ovld __conv intel_sub_group_shuffle_up( short8  cur, short8  next, uint );
+short16     __ovld __conv intel_sub_group_shuffle_up( short16 cur, short16 next, uint );
+
+ushort      __ovld __conv intel_sub_group_shuffle_up( ushort   cur, ushort   next, uint );
+ushort2     __ovld __conv intel_sub_group_shuffle_up( ushort2  cur, ushort2  next, uint );
+ushort3     __ovld __conv intel_sub_group_shuffle_up( ushort3  cur, ushort3  next, uint );
+ushort4     __ovld __conv intel_sub_group_shuffle_up( ushort4  cur, ushort4  next, uint );
+ushort8     __ovld __conv intel_sub_group_shuffle_up( ushort8  cur, ushort8  next, uint );
+ushort16    __ovld __conv intel_sub_group_shuffle_up( ushort16 cur, ushort16 next, uint );
+
+short       __ovld __conv intel_sub_group_shuffle_xor( short  , uint );
+short2      __ovld __conv intel_sub_group_shuffle_xor( short2 , uint );
+short3      __ovld __conv intel_sub_group_shuffle_xor( short3 , uint );
+short4      __ovld __conv intel_sub_group_shuffle_xor( short4 , uint );
+short8      __ovld __conv intel_sub_group_shuffle_xor( short8 , uint );
+short16     __ovld __conv intel_sub_group_shuffle_xor( short16, uint );
+
+ushort      __ovld __conv intel_sub_group_shuffle_xor( ushort  , uint );
+ushort2     __ovld __conv intel_sub_group_shuffle_xor( ushort2 , uint );
+ushort3     __ovld __conv intel_sub_group_shuffle_xor( ushort3 , uint );
+ushort4     __ovld __conv intel_sub_group_shuffle_xor( ushort4 , uint );
+ushort8     __ovld __conv intel_sub_group_shuffle_xor( ushort8 , uint );
+ushort16    __ovld __conv intel_sub_group_shuffle_xor( ushort16, uint );
+
+short       __ovld __conv intel_sub_group_reduce_add( short   x );
+ushort      __ovld __conv intel_sub_group_reduce_add( ushort  x );
+short       __ovld __conv intel_sub_group_reduce_min( short   x );
+ushort      __ovld __conv intel_sub_group_reduce_min( ushort  x );
+short       __ovld __conv intel_sub_group_reduce_max( short   x );
+ushort      __ovld __conv intel_sub_group_reduce_max( ushort  x );
+
+short       __ovld __conv intel_sub_group_scan_exclusive_add( short   x );
+ushort      __ovld __conv intel_sub_group_scan_exclusive_add( ushort  x );
+short       __ovld __conv intel_sub_group_scan_exclusive_min( short   x );
+ushort      __ovld __conv intel_sub_group_scan_exclusive_min( ushort  x );
+short       __ovld __conv intel_sub_group_scan_exclusive_max( short   x );
+ushort      __ovld __conv intel_sub_group_scan_exclusive_max( ushort  x );
+
+short       __ovld __conv intel_sub_group_scan_inclusive_add( short   x );
+ushort      __ovld __conv intel_sub_group_scan_inclusive_add( ushort  x );
+short       __ovld __conv intel_sub_group_scan_inclusive_min( short   x );
+ushort      __ovld __conv intel_sub_group_scan_inclusive_min( ushort  x );
+short       __ovld __conv intel_sub_group_scan_inclusive_max( short   x );
+ushort      __ovld __conv intel_sub_group_scan_inclusive_max( ushort  x );
+
+#if defined(__opencl_c_images)
+uint       __ovld __conv intel_sub_group_block_read_ui(read_only image2d_t, int2);
+uint2      __ovld __conv intel_sub_group_block_read_ui2(read_only image2d_t, int2);
+uint4      __ovld __conv intel_sub_group_block_read_ui4(read_only image2d_t, int2);
+uint8      __ovld __conv intel_sub_group_block_read_ui8(read_only image2d_t, int2);
+#endif // defined(__opencl_c_images)
+
+#if defined(__opencl_c_read_write_images)
+uint       __ovld __conv intel_sub_group_block_read_ui(read_write image2d_t, int2);
+uint2      __ovld __conv intel_sub_group_block_read_ui2(read_write image2d_t, int2);
+uint4      __ovld __conv intel_sub_group_block_read_ui4(read_write image2d_t, int2);
+uint8      __ovld __conv intel_sub_group_block_read_ui8(read_write image2d_t, int2);
+#endif // defined(__opencl_c_read_write_images)
+
+uint       __ovld __conv intel_sub_group_block_read_ui( const __global uint* p );
+uint2      __ovld __conv intel_sub_group_block_read_ui2( const __global uint* p );
+uint4      __ovld __conv intel_sub_group_block_read_ui4( const __global uint* p );
+uint8      __ovld __conv intel_sub_group_block_read_ui8( const __global uint* p );
+
+#if defined(__opencl_c_images)
+void       __ovld __conv intel_sub_group_block_write_ui(read_only image2d_t, int2, uint);
+void       __ovld __conv intel_sub_group_block_write_ui2(read_only image2d_t, int2, uint2);
+void       __ovld __conv intel_sub_group_block_write_ui4(read_only image2d_t, int2, uint4);
+void       __ovld __conv intel_sub_group_block_write_ui8(read_only image2d_t, int2, uint8);
+#endif //defined(__opencl_c_images)
+
+#if defined(__opencl_c_read_write_images)
+void       __ovld __conv intel_sub_group_block_write_ui(read_write image2d_t, int2, uint);
+void       __ovld __conv intel_sub_group_block_write_ui2(read_write image2d_t, int2, uint2);
+void       __ovld __conv intel_sub_group_block_write_ui4(read_write image2d_t, int2, uint4);
+void       __ovld __conv intel_sub_group_block_write_ui8(read_write image2d_t, int2, uint8);
+#endif // defined(__opencl_c_read_write_images)
+
+void       __ovld __conv intel_sub_group_block_write_ui( __global uint* p, uint data );
+void       __ovld __conv intel_sub_group_block_write_ui2( __global uint* p, uint2 data );
+void       __ovld __conv intel_sub_group_block_write_ui4( __global uint* p, uint4 data );
+void       __ovld __conv intel_sub_group_block_write_ui8( __global uint* p, uint8 data );
+
+#if defined(__opencl_c_images)
+ushort      __ovld __conv intel_sub_group_block_read_us(read_only image2d_t, int2);
+ushort2     __ovld __conv intel_sub_group_block_read_us2(read_only image2d_t, int2);
+ushort4     __ovld __conv intel_sub_group_block_read_us4(read_only image2d_t, int2);
+ushort8     __ovld __conv intel_sub_group_block_read_us8(read_only image2d_t, int2);
+#endif // defined(__opencl_c_images)
+
+#if defined(__opencl_c_read_write_images)
+ushort      __ovld __conv intel_sub_group_block_read_us(read_write image2d_t, int2);
+ushort2     __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t, int2);
+ushort4     __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t, int2);
+ushort8     __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t, int2);
+#endif // defined(__opencl_c_read_write_images)
+
+ushort      __ovld __conv intel_sub_group_block_read_us(  const __global ushort* p );
+ushort2     __ovld __conv intel_sub_group_block_read_us2( const __global ushort* p );
+ushort4     __ovld __conv intel_sub_group_block_read_us4( const __global ushort* p );
+ushort8     __ovld __conv intel_sub_group_block_read_us8( const __global ushort* p );
+
+#if defined(__opencl_c_images)
+void        __ovld __conv intel_sub_group_block_write_us(write_only image2d_t, int2, ushort);
+void        __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t, int2, ushort2);
+void        __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t, int2, ushort4);
+void        __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t, int2, ushort8);
+#endif // defined(__opencl_c_images)
+
+#if defined(__opencl_c_read_write_images)
+void        __ovld __conv intel_sub_group_block_write_us(read_write image2d_t, int2, ushort);
+void        __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t, int2, ushort2);
+void        __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t, int2, ushort4);
+void        __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t, int2, ushort8);
+#endif // defined(__opencl_c_read_write_images)
+
+void        __ovld __conv intel_sub_group_block_write_us(  __global ushort* p, ushort  data );
+void        __ovld __conv intel_sub_group_block_write_us2( __global ushort* p, ushort2 data );
+void        __ovld __conv intel_sub_group_block_write_us4( __global ushort* p, ushort4 data );
+void        __ovld __conv intel_sub_group_block_write_us8( __global ushort* p, ushort8 data );
+#endif // cl_intel_subgroups_short
+
+#ifdef cl_intel_device_side_avc_motion_estimation
+#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : begin
+
+// MCE built-in functions
+uchar __ovld
+intel_sub_group_avc_mce_get_default_inter_base_multi_reference_penalty(
+    uchar slice_type, uchar qp);
+ulong __ovld intel_sub_group_avc_mce_get_default_inter_shape_penalty(
+    uchar slice_type, uchar qp);
+uchar __ovld intel_sub_group_avc_mce_get_default_inter_direction_penalty(
+    uchar slice_type, uchar qp);
+uint __ovld intel_sub_group_avc_mce_get_default_intra_luma_shape_penalty(
+    uchar slice_type, uchar qp);
+uint2 __ovld
+intel_sub_group_avc_mce_get_default_inter_motion_vector_cost_table(
+    uchar slice_type, uchar qp);
+uchar __ovld intel_sub_group_avc_mce_get_default_intra_luma_mode_penalty(
+    uchar slice_type, uchar qp);
+
+uint2 __ovld intel_sub_group_avc_mce_get_default_high_penalty_cost_table();
+uint2 __ovld intel_sub_group_avc_mce_get_default_medium_penalty_cost_table();
+uint2 __ovld intel_sub_group_avc_mce_get_default_low_penalty_cost_table();
+uint __ovld intel_sub_group_avc_mce_get_default_non_dc_luma_intra_penalty();
+uchar __ovld
+intel_sub_group_avc_mce_get_default_intra_chroma_mode_base_penalty();
+
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_inter_base_multi_reference_penalty(
+    uchar reference_base_penalty, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_inter_shape_penalty(
+    ulong packed_shape_penalty, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_inter_direction_penalty(
+    uchar direction_cost, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_motion_vector_cost_function(
+    ulong packed_cost_center_delta, uint2 packed_cost_table,
+    uchar cost_precision, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_ac_only_haar(
+    intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_source_interlaced_field_polarity(
+    uchar src_field_polarity, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_single_reference_interlaced_field_polarity(
+    uchar ref_field_polarity, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_dual_reference_interlaced_field_polarities(
+    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+    intel_sub_group_avc_mce_payload_t payload);
+
+ulong __ovld intel_sub_group_avc_mce_get_motion_vectors(
+    intel_sub_group_avc_mce_result_t result);
+ushort __ovld intel_sub_group_avc_mce_get_inter_distortions(
+    intel_sub_group_avc_mce_result_t result);
+ushort __ovld intel_sub_group_avc_mce_get_best_inter_distortion(
+    intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_major_shape(
+    intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_minor_shapes(
+    intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_directions(
+    intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_motion_vector_count(
+    intel_sub_group_avc_mce_result_t result);
+uint __ovld intel_sub_group_avc_mce_get_inter_reference_ids(
+    intel_sub_group_avc_mce_result_t result);
+uchar __ovld
+intel_sub_group_avc_mce_get_inter_reference_interlaced_field_polarities(
+    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
+    intel_sub_group_avc_mce_result_t result);
+
+// IME built-in functions
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_initialize(
+    ushort2 src_coord, uchar partition_mask, uchar sad_adjustment);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_single_reference(
+    short2 ref_offset, uchar search_window_config,
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_dual_reference(
+    short2 fwd_ref_offset, short2 bwd_ref_offset, uchar search_window_config,
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_max_motion_vector_count(
+    uchar max_motion_vector_count, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_unidirectional_mix_disable(
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_early_search_termination_threshold(
+    uchar threshold, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_weighted_sad(
+    uint packed_sad_weights, intel_sub_group_avc_ime_payload_t payload);
+
+__attribute__((deprecated("If you use the latest Intel driver, please use "
+                          "intel_sub_group_avc_ime_ref_window_size instead",
+                          "intel_sub_group_avc_ime_ref_window_size")))
+ushort2 __ovld
+intel_sub_group_ime_ref_window_size(uchar search_window_config, char dual_ref);
+ushort2 __ovld intel_sub_group_avc_ime_ref_window_size(
+    uchar search_window_config, char dual_ref);
+short2 __ovld intel_sub_group_avc_ime_adjust_ref_offset(
+    short2 ref_offset, ushort2 src_coord, ushort2 ref_window_size,
+    ushort2 image_size);
+
+#if defined(__opencl_c_images)
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference(
+    read_only image2d_t src_image, read_only image2d_t ref_image,
+    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference(
+    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference_streamout(
+    read_only image2d_t src_image, read_only image2d_t ref_image,
+    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference_streamout(
+    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference_streamin(
+    read_only image2d_t src_image, read_only image2d_t ref_image,
+    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
+    intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference_streamin(
+    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+    intel_sub_group_avc_ime_payload_t payload,
+    intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
+intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference_streaminout(
+    read_only image2d_t src_image, read_only image2d_t ref_image,
+    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
+    intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
+intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference_streaminout(
+    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+    intel_sub_group_avc_ime_payload_t payload,
+    intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
+#endif
+
+intel_sub_group_avc_ime_single_reference_streamin_t __ovld
+intel_sub_group_avc_ime_get_single_reference_streamin(
+    intel_sub_group_avc_ime_result_single_reference_streamout_t result);
+intel_sub_group_avc_ime_dual_reference_streamin_t __ovld
+intel_sub_group_avc_ime_get_dual_reference_streamin(
+    intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_strip_single_reference_streamout(
+    intel_sub_group_avc_ime_result_single_reference_streamout_t result);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_strip_dual_reference_streamout(
+    intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
+
+uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
+    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
+    uchar major_shape);
+ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
+    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
+    uchar major_shape);
+uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
+    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
+    uchar major_shape);
+uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
+    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
+    uchar major_shape, uchar direction);
+ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
+    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
+    uchar major_shape, uchar direction);
+uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
+    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
+    uchar major_shape, uchar direction);
+
+uchar __ovld intel_sub_group_avc_ime_get_border_reached(
+    uchar image_select, intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ime_get_truncated_search_indication(
+    intel_sub_group_avc_ime_result_t result);
+uchar __ovld
+intel_sub_group_avc_ime_get_unidirectional_early_search_termination(
+    intel_sub_group_avc_ime_result_t result);
+uint __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_motion_vector(
+    intel_sub_group_avc_ime_result_t result);
+ushort __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_distortion(
+    intel_sub_group_avc_ime_result_t result);
+
+// REF built-in functions
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_fme_initialize(
+    ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
+    uchar minor_shapes, uchar directions, uchar pixel_resolution,
+    uchar sad_adjustment);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_bme_initialize(
+    ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
+    uchar minor_shapes, uchar directions, uchar pixel_resolution,
+    uchar bidirectional_weight, uchar sad_adjustment);
+
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_bidirectional_mix_disable(
+    intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_bilinear_filter_enable(
+    intel_sub_group_avc_ref_payload_t payload);
+
+#if defined(__opencl_c_images)
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_single_reference(
+    read_only image2d_t src_image, read_only image2d_t ref_image,
+    sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_dual_reference(
+    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+    intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_multi_reference(
+    read_only image2d_t src_image, uint packed_reference_ids,
+    sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_multi_reference(
+    read_only image2d_t src_image, uint packed_reference_ids,
+    uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
+    intel_sub_group_avc_ref_payload_t payload);
+#endif //defined(__opencl_c_images)
+
+// SIC built-in functions
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_initialize(
+    ushort2 src_coord);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_configure_skc(
+    uint skip_block_partition_type, uint skip_motion_vector_mask,
+    ulong motion_vectors, uchar bidirectional_weight, uchar skip_sad_adjustment,
+    intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld intel_sub_group_avc_sic_configure_ipe(
+    uchar luma_intra_partition_mask, uchar intra_neighbour_availability,
+    uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
+    uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
+    uchar intra_sad_adjustment, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld intel_sub_group_avc_sic_configure_ipe(
+    uchar luma_intra_partition_mask, uchar intra_neighbour_availability,
+    uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
+    uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
+    ushort left_edge_chroma_pixels, ushort upper_left_corner_chroma_pixel,
+    ushort upper_edge_chroma_pixels, uchar intra_sad_adjustment,
+    intel_sub_group_avc_sic_payload_t payload);
+uint __ovld
+intel_sub_group_avc_sic_get_motion_vector_mask(
+    uint skip_block_partition_type, uchar direction);
+
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_intra_luma_shape_penalty(
+    uint packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_intra_luma_mode_cost_function(
+    uchar luma_mode_penalty, uint luma_packed_neighbor_modes,
+    uint luma_packed_non_dc_penalty, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_intra_chroma_mode_cost_function(
+    uchar chroma_mode_penalty, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_skc_bilinear_filter_enable(
+    intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_skc_forward_transform_enable(
+    ulong packed_sad_coefficients, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_block_based_raw_skip_sad(
+    uchar block_based_skip_type,
+    intel_sub_group_avc_sic_payload_t payload);
+
+#if defined(__opencl_c_images)
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_ipe(
+    read_only image2d_t src_image, sampler_t vme_media_sampler,
+    intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_single_reference(
+    read_only image2d_t src_image, read_only image2d_t ref_image,
+    sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_dual_reference(
+    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+    intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_multi_reference(
+    read_only image2d_t src_image, uint packed_reference_ids,
+    sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_multi_reference(
+    read_only image2d_t src_image, uint packed_reference_ids,
+    uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
+    intel_sub_group_avc_sic_payload_t payload);
+#endif //defined(__opencl_c_images)
+
+uchar __ovld intel_sub_group_avc_sic_get_ipe_luma_shape(
+    intel_sub_group_avc_sic_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_best_ipe_luma_distortion(
+    intel_sub_group_avc_sic_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_best_ipe_chroma_distortion(
+    intel_sub_group_avc_sic_result_t result);
+ulong __ovld intel_sub_group_avc_sic_get_packed_ipe_luma_modes(
+    intel_sub_group_avc_sic_result_t result);
+uchar __ovld intel_sub_group_avc_sic_get_ipe_chroma_mode(
+    intel_sub_group_avc_sic_result_t result);
+uint __ovld intel_sub_group_avc_sic_get_packed_skc_luma_count_threshold(
+    intel_sub_group_avc_sic_result_t result);
+ulong __ovld intel_sub_group_avc_sic_get_packed_skc_luma_sum_threshold(
+    intel_sub_group_avc_sic_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_inter_raw_sads(
+    intel_sub_group_avc_sic_result_t result);
+
+// Wrappers
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_inter_base_multi_reference_penalty(
+    uchar reference_base_penalty, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_inter_base_multi_reference_penalty(
+    uchar reference_base_penalty, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_inter_base_multi_reference_penalty(
+    uchar reference_base_penalty, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_inter_shape_penalty(
+    ulong packed_shape_cost, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_inter_shape_penalty(
+    ulong packed_shape_cost, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_inter_shape_penalty(
+    ulong packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_inter_direction_penalty(
+    uchar direction_cost, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_inter_direction_penalty(
+    uchar direction_cost, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_inter_direction_penalty(
+    uchar direction_cost, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_motion_vector_cost_function(
+    ulong packed_cost_center_delta, uint2 packed_cost_table,
+    uchar cost_precision, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_motion_vector_cost_function(
+    ulong packed_cost_center_delta, uint2 packed_cost_table,
+    uchar cost_precision, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_motion_vector_cost_function(
+    ulong packed_cost_center_delta, uint2 packed_cost_table,
+    uchar cost_precision, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_source_interlaced_field_polarity(
+    uchar src_field_polarity, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_source_interlaced_field_polarity(
+    uchar src_field_polarity, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_source_interlaced_field_polarity(
+    uchar src_field_polarity, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_single_reference_interlaced_field_polarity(
+    uchar ref_field_polarity, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_single_reference_interlaced_field_polarity(
+    uchar ref_field_polarity, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_single_reference_interlaced_field_polarity(
+    uchar ref_field_polarity, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_dual_reference_interlaced_field_polarities(
+    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_dual_reference_interlaced_field_polarities(
+    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+    intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_dual_reference_interlaced_field_polarities(
+    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+    intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_ac_only_haar(
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_ac_only_haar(
+    intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_ac_only_haar(
+    intel_sub_group_avc_sic_payload_t payload);
+
+ulong __ovld intel_sub_group_avc_ime_get_motion_vectors(
+    intel_sub_group_avc_ime_result_t result);
+ulong __ovld intel_sub_group_avc_ref_get_motion_vectors(
+    intel_sub_group_avc_ref_result_t result);
+
+ushort __ovld intel_sub_group_avc_ime_get_inter_distortions(
+    intel_sub_group_avc_ime_result_t result);
+ushort __ovld intel_sub_group_avc_ref_get_inter_distortions(
+    intel_sub_group_avc_ref_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_inter_distortions(
+    intel_sub_group_avc_sic_result_t result);
+
+ushort __ovld intel_sub_group_avc_ime_get_best_inter_distortion(
+    intel_sub_group_avc_ime_result_t result);
+ushort __ovld intel_sub_group_avc_ref_get_best_inter_distortion(
+    intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld intel_sub_group_avc_ime_get_inter_major_shape(
+    intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_major_shape(
+    intel_sub_group_avc_ref_result_t result);
+uchar __ovld intel_sub_group_avc_ime_get_inter_minor_shapes(
+    intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_minor_shapes(
+    intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld intel_sub_group_avc_ime_get_inter_directions(
+    intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_directions(
+    intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld intel_sub_group_avc_ime_get_inter_motion_vector_count(
+    intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_motion_vector_count(
+    intel_sub_group_avc_ref_result_t result);
+
+uint __ovld intel_sub_group_avc_ime_get_inter_reference_ids(
+    intel_sub_group_avc_ime_result_t result);
+uint __ovld intel_sub_group_avc_ref_get_inter_reference_ids(
+    intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld
+intel_sub_group_avc_ime_get_inter_reference_interlaced_field_polarities(
+    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
+    intel_sub_group_avc_ime_result_t result);
+uchar __ovld
+intel_sub_group_avc_ref_get_inter_reference_interlaced_field_polarities(
+    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
+    intel_sub_group_avc_ref_result_t result);
+
+// Type conversion functions
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_ime_convert_to_mce_payload(
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_mce_convert_to_ime_payload(
+    intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_ref_convert_to_mce_payload(
+    intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_mce_convert_to_ref_payload(
+    intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_sic_convert_to_mce_payload(
+    intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_mce_convert_to_sic_payload(
+    intel_sub_group_avc_mce_payload_t payload);
+
+intel_sub_group_avc_mce_result_t __ovld
+intel_sub_group_avc_ime_convert_to_mce_result(
+    intel_sub_group_avc_ime_result_t result);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_mce_convert_to_ime_result(
+    intel_sub_group_avc_mce_result_t result);
+intel_sub_group_avc_mce_result_t __ovld
+intel_sub_group_avc_ref_convert_to_mce_result(
+    intel_sub_group_avc_ref_result_t result);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_mce_convert_to_ref_result(
+    intel_sub_group_avc_mce_result_t result);
+intel_sub_group_avc_mce_result_t __ovld
+intel_sub_group_avc_sic_convert_to_mce_result(
+    intel_sub_group_avc_sic_result_t result);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_mce_convert_to_sic_result(
+    intel_sub_group_avc_mce_result_t result);
+#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : end
+#endif // cl_intel_device_side_avc_motion_estimation
+
+#ifdef cl_amd_media_ops
+uint __ovld amd_bitalign(uint, uint, uint);
+uint2 __ovld amd_bitalign(uint2, uint2, uint2);
+uint3 __ovld amd_bitalign(uint3, uint3, uint3);
+uint4 __ovld amd_bitalign(uint4, uint4, uint4);
+uint8 __ovld amd_bitalign(uint8, uint8, uint8);
+uint16 __ovld amd_bitalign(uint16, uint16, uint16);
+
+uint __ovld amd_bytealign(uint, uint, uint);
+uint2 __ovld amd_bytealign(uint2, uint2, uint2);
+uint3 __ovld amd_bytealign(uint3, uint3, uint3);
+uint4 __ovld amd_bytealign(uint4, uint4, uint4);
+uint8 __ovld amd_bytealign(uint8, uint8, uint8);
+uint16 __ovld amd_bytealign(uint16, uint16, uint16);
+
+uint __ovld amd_lerp(uint, uint, uint);
+uint2 __ovld amd_lerp(uint2, uint2, uint2);
+uint3 __ovld amd_lerp(uint3, uint3, uint3);
+uint4 __ovld amd_lerp(uint4, uint4, uint4);
+uint8 __ovld amd_lerp(uint8, uint8, uint8);
+uint16 __ovld amd_lerp(uint16, uint16, uint16);
+
+uint __ovld amd_pack(float4 v);
+
+uint __ovld amd_sad4(uint4, uint4, uint);
+
+uint __ovld amd_sadhi(uint, uint, uint);
+uint2 __ovld amd_sadhi(uint2, uint2, uint2);
+uint3 __ovld amd_sadhi(uint3, uint3, uint3);
+uint4 __ovld amd_sadhi(uint4, uint4, uint4);
+uint8 __ovld amd_sadhi(uint8, uint8, uint8);
+uint16 __ovld amd_sadhi(uint16, uint16, uint16);
+
+uint __ovld amd_sad(uint, uint, uint);
+uint2 __ovld amd_sad(uint2, uint2, uint2);
+uint3 __ovld amd_sad(uint3, uint3, uint3);
+uint4 __ovld amd_sad(uint4, uint4, uint4);
+uint8 __ovld amd_sad(uint8, uint8, uint8);
+uint16 __ovld amd_sad(uint16, uint16, uint16);
+
+float __ovld amd_unpack0(uint);
+float2 __ovld amd_unpack0(uint2);
+float3 __ovld amd_unpack0(uint3);
+float4 __ovld amd_unpack0(uint4);
+float8 __ovld amd_unpack0(uint8);
+float16 __ovld amd_unpack0(uint16);
+
+float __ovld amd_unpack1(uint);
+float2 __ovld amd_unpack1(uint2);
+float3 __ovld amd_unpack1(uint3);
+float4 __ovld amd_unpack1(uint4);
+float8 __ovld amd_unpack1(uint8);
+float16 __ovld amd_unpack1(uint16);
+
+float __ovld amd_unpack2(uint);
+float2 __ovld amd_unpack2(uint2);
+float3 __ovld amd_unpack2(uint3);
+float4 __ovld amd_unpack2(uint4);
+float8 __ovld amd_unpack2(uint8);
+float16 __ovld amd_unpack2(uint16);
+
+float __ovld amd_unpack3(uint);
+float2 __ovld amd_unpack3(uint2);
+float3 __ovld amd_unpack3(uint3);
+float4 __ovld amd_unpack3(uint4);
+float8 __ovld amd_unpack3(uint8);
+float16 __ovld amd_unpack3(uint16);
+#endif // cl_amd_media_ops
+
+#ifdef cl_amd_media_ops2
+int __ovld amd_bfe(int src0, uint src1, uint src2);
+int2 __ovld amd_bfe(int2 src0, uint2 src1, uint2 src2);
+int3 __ovld amd_bfe(int3 src0, uint3 src1, uint3 src2);
+int4 __ovld amd_bfe(int4 src0, uint4 src1, uint4 src2);
+int8 __ovld amd_bfe(int8 src0, uint8 src1, uint8 src2);
+int16 __ovld amd_bfe(int16 src0, uint16 src1, uint16 src2);
+
+uint __ovld amd_bfe(uint src0, uint src1, uint src2);
+uint2 __ovld amd_bfe(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_bfe(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_bfe(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_bfe(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_bfe(uint16 src0, uint16 src1, uint16 src2);
+
+uint __ovld amd_bfm(uint src0, uint src1);
+uint2 __ovld amd_bfm(uint2 src0, uint2 src1);
+uint3 __ovld amd_bfm(uint3 src0, uint3 src1);
+uint4 __ovld amd_bfm(uint4 src0, uint4 src1);
+uint8 __ovld amd_bfm(uint8 src0, uint8 src1);
+uint16 __ovld amd_bfm(uint16 src0, uint16 src1);
+
+float __ovld amd_max3(float src0, float src1, float src2);
+float2 __ovld amd_max3(float2 src0, float2 src1, float2 src2);
+float3 __ovld amd_max3(float3 src0, float3 src1, float3 src2);
+float4 __ovld amd_max3(float4 src0, float4 src1, float4 src2);
+float8 __ovld amd_max3(float8 src0, float8 src1, float8 src2);
+float16 __ovld amd_max3(float16 src0, float16 src1, float16 src2);
+
+int __ovld amd_max3(int src0, int src1, int src2);
+int2 __ovld amd_max3(int2 src0, int2 src1, int2 src2);
+int3 __ovld amd_max3(int3 src0, int3 src1, int3 src2);
+int4 __ovld amd_max3(int4 src0, int4 src1, int4 src2);
+int8 __ovld amd_max3(int8 src0, int8 src1, int8 src2);
+int16 __ovld amd_max3(int16 src0, int16 src1, int16 src2);
+
+uint __ovld amd_max3(uint src0, uint src1, uint src2);
+uint2 __ovld amd_max3(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_max3(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_max3(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_max3(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_max3(uint16 src0, uint16 src1, uint16 src2);
+
+float __ovld amd_median3(float src0, float src1, float src2);
+float2 __ovld amd_median3(float2 src0, float2 src1, float2 src2);
+float3 __ovld amd_median3(float3 src0, float3 src1, float3 src2);
+float4 __ovld amd_median3(float4 src0, float4 src1, float4 src2);
+float8 __ovld amd_median3(float8 src0, float8 src1, float8 src2);
+float16 __ovld amd_median3(float16 src0, float16 src1, float16 src2);
+
+int __ovld amd_median3(int src0, int src1, int src2);
+int2 __ovld amd_median3(int2 src0, int2 src1, int2 src2);
+int3 __ovld amd_median3(int3 src0, int3 src1, int3 src2);
+int4 __ovld amd_median3(int4 src0, int4 src1, int4 src2);
+int8 __ovld amd_median3(int8 src0, int8 src1, int8 src2);
+int16 __ovld amd_median3(int16 src0, int16 src1, int16 src2);
+
+uint __ovld amd_median3(uint src0, uint src1, uint src2);
+uint2 __ovld amd_median3(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_median3(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_median3(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_median3(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_median3(uint16 src0, uint16 src1, uint16 src2);
+
+float __ovld amd_min3(float src0, float src1, float src);
+float2 __ovld amd_min3(float2 src0, float2 src1, float2 src);
+float3 __ovld amd_min3(float3 src0, float3 src1, float3 src);
+float4 __ovld amd_min3(float4 src0, float4 src1, float4 src);
+float8 __ovld amd_min3(float8 src0, float8 src1, float8 src);
+float16 __ovld amd_min3(float16 src0, float16 src1, float16 src);
+
+int __ovld amd_min3(int src0, int src1, int src2);
+int2 __ovld amd_min3(int2 src0, int2 src1, int2 src2);
+int3 __ovld amd_min3(int3 src0, int3 src1, int3 src2);
+int4 __ovld amd_min3(int4 src0, int4 src1, int4 src2);
+int8 __ovld amd_min3(int8 src0, int8 src1, int8 src2);
+int16 __ovld amd_min3(int16 src0, int16 src1, int16 src2);
+
+uint __ovld amd_min3(uint src0, uint src1, uint src2);
+uint2 __ovld amd_min3(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_min3(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_min3(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_min3(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_min3(uint16 src0, uint16 src1, uint16 src2);
+
+ulong __ovld amd_mqsad(ulong src0, uint src1, ulong src2);
+ulong2 __ovld amd_mqsad(ulong2 src0, uint2 src1, ulong2 src2);
+ulong3 __ovld amd_mqsad(ulong3 src0, uint3 src1, ulong3 src2);
+ulong4 __ovld amd_mqsad(ulong4 src0, uint4 src1, ulong4 src2);
+ulong8 __ovld amd_mqsad(ulong8 src0, uint8 src1, ulong8 src2);
+ulong16 __ovld amd_mqsad(ulong16 src0, uint16 src1, ulong16 src2);
+
+ulong __ovld amd_qsad(ulong src0, uint src1, ulong src2);
+ulong2 __ovld amd_qsad(ulong2 src0, uint2 src1, ulong2 src2);
+ulong3 __ovld amd_qsad(ulong3 src0, uint3 src1, ulong3 src2);
+ulong4 __ovld amd_qsad(ulong4 src0, uint4 src1, ulong4 src2);
+ulong8 __ovld amd_qsad(ulong8 src0, uint8 src1, ulong8 src2);
+ulong16 __ovld amd_qsad(ulong16 src0, uint16 src1, ulong16 src2);
+
+uint __ovld amd_msad(uint src0, uint src1, uint src2);
+uint2 __ovld amd_msad(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_msad(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_msad(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_msad(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_msad(uint16 src0, uint16 src1, uint16 src2);
+
+uint __ovld amd_sadd(uint src0, uint src1, uint src2);
+uint2 __ovld amd_sadd(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_sadd(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_sadd(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_sadd(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_sadd(uint16 src0, uint16 src1, uint16 src2);
+
+uint __ovld amd_sadw(uint src0, uint src1, uint src2);
+uint2 __ovld amd_sadw(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_sadw(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_sadw(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_sadw(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2);
+#endif // cl_amd_media_ops2
+
+#if defined(cl_arm_integer_dot_product_int8)
+uint __ovld arm_dot(uchar4, uchar4);
+int __ovld arm_dot(char4, char4);
+#endif // defined(cl_arm_integer_dot_product_int8)
+
+#if defined(cl_arm_integer_dot_product_accumulate_int8)
+uint __ovld arm_dot_acc(uchar4, uchar4, uint);
+int __ovld arm_dot_acc(char4, char4, int);
+#endif // defined(cl_arm_integer_dot_product_accumulate_int8)
+
+#if defined(cl_arm_integer_dot_product_accumulate_int16)
+uint __ovld arm_dot_acc(ushort2, ushort2, uint);
+int __ovld arm_dot_acc(short2, short2, int);
+#endif // defined(cl_arm_integer_dot_product_accumulate_int16)
+
+#if defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
+uint __ovld arm_dot_acc_sat(uchar4, uchar4, uint);
+int __ovld arm_dot_acc_sat(char4, char4, int);
+#endif // defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
+
+// Disable any extensions we may have enabled previously.
+#pragma OPENCL EXTENSION all : disable
+
+#undef __opencl_c_named_address_space_builtins
+
+#undef __cnfn
+#undef __ovld
+#endif //_OPENCL_H_
diff --git a/darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/__clang_openmp_device_functions.h b/darwin-x86/lib64/clang/16.0.2/include/openmp_wrappers/__clang_openmp_device_functions.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/__clang_openmp_device_functions.h
rename to darwin-x86/lib64/clang/16.0.2/include/openmp_wrappers/__clang_openmp_device_functions.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/cmath b/darwin-x86/lib64/clang/16.0.2/include/openmp_wrappers/cmath
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/cmath
rename to darwin-x86/lib64/clang/16.0.2/include/openmp_wrappers/cmath
diff --git a/darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex b/darwin-x86/lib64/clang/16.0.2/include/openmp_wrappers/complex
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex
rename to darwin-x86/lib64/clang/16.0.2/include/openmp_wrappers/complex
diff --git a/darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex.h b/darwin-x86/lib64/clang/16.0.2/include/openmp_wrappers/complex.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex.h
rename to darwin-x86/lib64/clang/16.0.2/include/openmp_wrappers/complex.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex_cmath.h b/darwin-x86/lib64/clang/16.0.2/include/openmp_wrappers/complex_cmath.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex_cmath.h
rename to darwin-x86/lib64/clang/16.0.2/include/openmp_wrappers/complex_cmath.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/math.h b/darwin-x86/lib64/clang/16.0.2/include/openmp_wrappers/math.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/math.h
rename to darwin-x86/lib64/clang/16.0.2/include/openmp_wrappers/math.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/new b/darwin-x86/lib64/clang/16.0.2/include/openmp_wrappers/new
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/new
rename to darwin-x86/lib64/clang/16.0.2/include/openmp_wrappers/new
diff --git a/darwin-x86/lib64/clang/16.0.2/include/orc/c_api.h b/darwin-x86/lib64/clang/16.0.2/include/orc/c_api.h
new file mode 100644
index 0000000..96d01df
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/orc/c_api.h
@@ -0,0 +1,205 @@
+/*===- c_api.h - C API for the ORC runtime ------------------------*- C -*-===*\
+|*                                                                            *|
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM          *|
+|* Exceptions.                                                                *|
+|* See https://llvm.org/LICENSE.txt for license information.                  *|
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception                    *|
+|*                                                                            *|
+|*===----------------------------------------------------------------------===*|
+|*                                                                            *|
+|* This file defines the C API for the ORC runtime                            *|
+|*                                                                            *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef ORC_RT_C_API_H
+#define ORC_RT_C_API_H
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* Helper to suppress strict prototype warnings. */
+#ifdef __clang__
+#define ORC_RT_C_STRICT_PROTOTYPES_BEGIN                                       \
+  _Pragma("clang diagnostic push")                                             \
+      _Pragma("clang diagnostic error \"-Wstrict-prototypes\"")
+#define ORC_RT_C_STRICT_PROTOTYPES_END _Pragma("clang diagnostic pop")
+#else
+#define ORC_RT_C_STRICT_PROTOTYPES_BEGIN
+#define ORC_RT_C_STRICT_PROTOTYPES_END
+#endif
+
+/* Helper to wrap C code for C++ */
+#ifdef __cplusplus
+#define ORC_RT_C_EXTERN_C_BEGIN                                                \
+  extern "C" {                                                                 \
+  ORC_RT_C_STRICT_PROTOTYPES_BEGIN
+#define ORC_RT_C_EXTERN_C_END                                                  \
+  ORC_RT_C_STRICT_PROTOTYPES_END                                               \
+  }
+#else
+#define ORC_RT_C_EXTERN_C_BEGIN ORC_RT_C_STRICT_PROTOTYPES_BEGIN
+#define ORC_RT_C_EXTERN_C_END ORC_RT_C_STRICT_PROTOTYPES_END
+#endif
+
+ORC_RT_C_EXTERN_C_BEGIN
+
+typedef union {
+  char *ValuePtr;
+  char Value[sizeof(char *)];
+} __orc_rt_CWrapperFunctionResultDataUnion;
+
+/**
+ * __orc_rt_CWrapperFunctionResult is a kind of C-SmallVector with an
+ * out-of-band error state.
+ *
+ * If Size == 0 and Data.ValuePtr is non-zero then the value is in the
+ * 'out-of-band error' state, and Data.ValuePtr points at a malloc-allocated,
+ * null-terminated string error message.
+ *
+ * If Size <= sizeof(__orc_rt_CWrapperFunctionResultData) then the value is in
+ * the 'small' state and the content is held in the first Size bytes of
+ * Data.Value.
+ *
+ * If Size > sizeof(OrtRTCWrapperFunctionResultData) then the value is in the
+ * 'large' state and the content is held in the first Size bytes of the
+ * memory pointed to by Data.ValuePtr. This memory must have been allocated by
+ * malloc, and will be freed with free when this value is destroyed.
+ */
+typedef struct {
+  __orc_rt_CWrapperFunctionResultDataUnion Data;
+  size_t Size;
+} __orc_rt_CWrapperFunctionResult;
+
+typedef struct __orc_rt_CSharedOpaqueJITProcessControl
+    *__orc_rt_SharedJITProcessControlRef;
+
+/**
+ * Zero-initialize an __orc_rt_CWrapperFunctionResult.
+ */
+static inline void
+__orc_rt_CWrapperFunctionResultInit(__orc_rt_CWrapperFunctionResult *R) {
+  R->Size = 0;
+  R->Data.ValuePtr = 0;
+}
+
+/**
+ * Create an __orc_rt_CWrapperFunctionResult with an uninitialized buffer of
+ * size Size. The buffer is returned via the DataPtr argument.
+ */
+static inline __orc_rt_CWrapperFunctionResult
+__orc_rt_CWrapperFunctionResultAllocate(size_t Size) {
+  __orc_rt_CWrapperFunctionResult R;
+  R.Size = Size;
+  // If Size is 0 ValuePtr must be 0 or it is considered an out-of-band error.
+  R.Data.ValuePtr = 0;
+  if (Size > sizeof(R.Data.Value))
+    R.Data.ValuePtr = (char *)malloc(Size);
+  return R;
+}
+
+/**
+ * Create an __orc_rt_WrapperFunctionResult from the given data range.
+ */
+static inline __orc_rt_CWrapperFunctionResult
+__orc_rt_CreateCWrapperFunctionResultFromRange(const char *Data, size_t Size) {
+  __orc_rt_CWrapperFunctionResult R;
+  R.Size = Size;
+  if (R.Size > sizeof(R.Data.Value)) {
+    char *Tmp = (char *)malloc(Size);
+    memcpy(Tmp, Data, Size);
+    R.Data.ValuePtr = Tmp;
+  } else
+    memcpy(R.Data.Value, Data, Size);
+  return R;
+}
+
+/**
+ * Create an __orc_rt_CWrapperFunctionResult by copying the given string,
+ * including the null-terminator.
+ *
+ * This function copies the input string. The client is responsible for freeing
+ * the ErrMsg arg.
+ */
+static inline __orc_rt_CWrapperFunctionResult
+__orc_rt_CreateCWrapperFunctionResultFromString(const char *Source) {
+  return __orc_rt_CreateCWrapperFunctionResultFromRange(Source,
+                                                        strlen(Source) + 1);
+}
+
+/**
+ * Create an __orc_rt_CWrapperFunctionResult representing an out-of-band
+ * error.
+ *
+ * This function copies the input string. The client is responsible for freeing
+ * the ErrMsg arg.
+ */
+static inline __orc_rt_CWrapperFunctionResult
+__orc_rt_CreateCWrapperFunctionResultFromOutOfBandError(const char *ErrMsg) {
+  __orc_rt_CWrapperFunctionResult R;
+  R.Size = 0;
+  char *Tmp = (char *)malloc(strlen(ErrMsg) + 1);
+  strcpy(Tmp, ErrMsg);
+  R.Data.ValuePtr = Tmp;
+  return R;
+}
+
+/**
+ * This should be called to destroy __orc_rt_CWrapperFunctionResult values
+ * regardless of their state.
+ */
+static inline void
+__orc_rt_DisposeCWrapperFunctionResult(__orc_rt_CWrapperFunctionResult *R) {
+  if (R->Size > sizeof(R->Data.Value) ||
+      (R->Size == 0 && R->Data.ValuePtr))
+    free(R->Data.ValuePtr);
+}
+
+/**
+ * Get a pointer to the data contained in the given
+ * __orc_rt_CWrapperFunctionResult.
+ */
+static inline char *
+__orc_rt_CWrapperFunctionResultData(__orc_rt_CWrapperFunctionResult *R) {
+  assert((R->Size != 0 || R->Data.ValuePtr == NULL) &&
+         "Cannot get data for out-of-band error value");
+  return R->Size > sizeof(R->Data.Value) ? R->Data.ValuePtr : R->Data.Value;
+}
+
+/**
+ * Safely get the size of the given __orc_rt_CWrapperFunctionResult.
+ *
+ * Asserts that we're not trying to access the size of an error value.
+ */
+static inline size_t
+__orc_rt_CWrapperFunctionResultSize(const __orc_rt_CWrapperFunctionResult *R) {
+  assert((R->Size != 0 || R->Data.ValuePtr == NULL) &&
+         "Cannot get size for out-of-band error value");
+  return R->Size;
+}
+
+/**
+ * Returns 1 if this value is equivalent to a value just initialized by
+ * __orc_rt_CWrapperFunctionResultInit, 0 otherwise.
+ */
+static inline size_t
+__orc_rt_CWrapperFunctionResultEmpty(const __orc_rt_CWrapperFunctionResult *R) {
+  return R->Size == 0 && R->Data.ValuePtr == 0;
+}
+
+/**
+ * Returns a pointer to the out-of-band error string for this
+ * __orc_rt_CWrapperFunctionResult, or null if there is no error.
+ *
+ * The __orc_rt_CWrapperFunctionResult retains ownership of the error
+ * string, so it should be copied if the caller wishes to preserve it.
+ */
+static inline const char *__orc_rt_CWrapperFunctionResultGetOutOfBandError(
+    const __orc_rt_CWrapperFunctionResult *R) {
+  return R->Size == 0 ? R->Data.ValuePtr : 0;
+}
+
+ORC_RT_C_EXTERN_C_END
+
+#endif /* ORC_RT_C_API_H */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/pconfigintrin.h b/darwin-x86/lib64/clang/16.0.2/include/pconfigintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/pconfigintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/pconfigintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/pkuintrin.h b/darwin-x86/lib64/clang/16.0.2/include/pkuintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/pkuintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/pkuintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/pmmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/pmmintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/pmmintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/pmmintrin.h
index eda8356..ee660e9 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/pmmintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/pmmintrin.h
@@ -35,7 +35,7 @@
 ///    A pointer to a 128-bit integer vector containing integer values.
 /// \returns A 128-bit vector containing the moved values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_lddqu_si128(__m128i const *__p)
+_mm_lddqu_si128(__m128i_u const *__p)
 {
   return (__m128i)__builtin_ia32_lddqu((char const *)__p);
 }
diff --git a/darwin-x86/lib64/clang/14.0.2/include/popcntintrin.h b/darwin-x86/lib64/clang/16.0.2/include/popcntintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/popcntintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/popcntintrin.h
diff --git a/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/bmi2intrin.h b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/bmi2intrin.h
new file mode 100644
index 0000000..0dc0d14
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/bmi2intrin.h
@@ -0,0 +1,134 @@
+/*===---- bmiintrin.h - Implementation of BMI2 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined X86GPRINTRIN_H_
+#error "Never use <bmi2intrin.h> directly; include <x86gprintrin.h> instead."
+#endif
+
+#ifndef BMI2INTRIN_H_
+#define BMI2INTRIN_H_
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _bzhi_u32(unsigned int __X, unsigned int __Y) {
+  return ((__X << (32 - __Y)) >> (32 - __Y));
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mulx_u32(unsigned int __X, unsigned int __Y, unsigned int *__P) {
+  unsigned long long __res = (unsigned long long)__X * __Y;
+  *__P = (unsigned int)(__res >> 32);
+  return (unsigned int)__res;
+}
+
+#ifdef __PPC64__
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _bzhi_u64(unsigned long long __X, unsigned long long __Y) {
+  return ((__X << (64 - __Y)) >> (64 - __Y));
+}
+
+/* __int128 requires base 64-bit.  */
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mulx_u64(unsigned long long __X, unsigned long long __Y,
+              unsigned long long *__P) {
+  unsigned __int128 __res = (unsigned __int128)__X * __Y;
+  *__P = (unsigned long long)(__res >> 64);
+  return (unsigned long long)__res;
+}
+
+#ifdef _ARCH_PWR7
+/* popcount and bpermd require power7 minimum.  */
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _pdep_u64(unsigned long long __X, unsigned long long __M) {
+  unsigned long __result = 0x0UL;
+  const unsigned long __mask = 0x8000000000000000UL;
+  unsigned long __m = __M;
+  unsigned long __c, __t;
+  unsigned long __p;
+
+  /* The pop-count of the mask gives the number of the bits from
+   source to process.  This is also needed to shift bits from the
+   source into the correct position for the result.  */
+  __p = 64 - __builtin_popcountl(__M);
+
+  /* The loop is for the number of '1' bits in the mask and clearing
+   each mask bit as it is processed.  */
+  while (__m != 0) {
+    __c = __builtin_clzl(__m);
+    __t = __X << (__p - __c);
+    __m ^= (__mask >> __c);
+    __result |= (__t & (__mask >> __c));
+    __p++;
+  }
+  return __result;
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _pext_u64(unsigned long long __X, unsigned long long __M) {
+  unsigned long __p = 0x4040404040404040UL; // initial bit permute control
+  const unsigned long __mask = 0x8000000000000000UL;
+  unsigned long __m = __M;
+  unsigned long __c;
+  unsigned long __result;
+
+  /* if the mask is constant and selects 8 bits or less we can use
+   the Power8 Bit permute instruction.  */
+  if (__builtin_constant_p(__M) && (__builtin_popcountl(__M) <= 8)) {
+    /* Also if the pext mask is constant, then the popcount is
+     constant, we can evaluate the following loop at compile
+     time and use a constant bit permute vector.  */
+    long __i;
+    for (__i = 0; __i < __builtin_popcountl(__M); __i++) {
+      __c = __builtin_clzl(__m);
+      __p = (__p << 8) | __c;
+      __m ^= (__mask >> __c);
+    }
+    __result = __builtin_bpermd(__p, __X);
+  } else {
+    __p = 64 - __builtin_popcountl(__M);
+    __result = 0;
+    /* We could a use a for loop here, but that combined with
+     -funroll-loops can expand to a lot of code.  The while
+     loop avoids unrolling and the compiler commons the xor
+     from clearing the mask bit with the (m != 0) test.  The
+     result is a more compact loop setup and body.  */
+    while (__m != 0) {
+      unsigned long __t;
+      __c = __builtin_clzl(__m);
+      __t = (__X & (__mask >> __c)) >> (__p - __c);
+      __m ^= (__mask >> __c);
+      __result |= (__t);
+      __p++;
+    }
+  }
+  return __result;
+}
+
+/* these 32-bit implementations depend on 64-bit pdep/pext
+   which depend on _ARCH_PWR7.  */
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _pdep_u32(unsigned int __X, unsigned int __Y) {
+  return _pdep_u64(__X, __Y);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _pext_u32(unsigned int __X, unsigned int __Y) {
+  return _pext_u64(__X, __Y);
+}
+#endif /* _ARCH_PWR7  */
+#endif /* __PPC64__  */
+
+#endif /* BMI2INTRIN_H_ */
diff --git a/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/bmiintrin.h b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/bmiintrin.h
new file mode 100644
index 0000000..7d33159
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/bmiintrin.h
@@ -0,0 +1,165 @@
+/*===---- bmiintrin.h - Implementation of BMI intrinsics on PowerPC --------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined X86GPRINTRIN_H_
+#error "Never use <bmiintrin.h> directly; include <x86gprintrin.h> instead."
+#endif
+
+#ifndef BMIINTRIN_H_
+#define BMIINTRIN_H_
+
+extern __inline unsigned short
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __tzcnt_u16(unsigned short __X) {
+  return __builtin_ctz(__X);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __andn_u32(unsigned int __X, unsigned int __Y) {
+  return (~__X & __Y);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _bextr_u32(unsigned int __X, unsigned int __P, unsigned int __L) {
+  return ((__X << (32 - (__L + __P))) >> (32 - __L));
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __bextr_u32(unsigned int __X, unsigned int __Y) {
+  unsigned int __P, __L;
+  __P = __Y & 0xFF;
+  __L = (__Y >> 8) & 0xFF;
+  return (_bextr_u32(__X, __P, __L));
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __blsi_u32(unsigned int __X) {
+  return (__X & -__X);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _blsi_u32(unsigned int __X) {
+  return __blsi_u32(__X);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __blsmsk_u32(unsigned int __X) {
+  return (__X ^ (__X - 1));
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _blsmsk_u32(unsigned int __X) {
+  return __blsmsk_u32(__X);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __blsr_u32(unsigned int __X) {
+  return (__X & (__X - 1));
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _blsr_u32(unsigned int __X) {
+  return __blsr_u32(__X);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __tzcnt_u32(unsigned int __X) {
+  return __builtin_ctz(__X);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _tzcnt_u32(unsigned int __X) {
+  return __builtin_ctz(__X);
+}
+
+/* use the 64-bit shift, rotate, and count leading zeros instructions
+   for long long.  */
+#ifdef __PPC64__
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __andn_u64(unsigned long long __X, unsigned long long __Y) {
+  return (~__X & __Y);
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _bextr_u64(unsigned long long __X, unsigned int __P, unsigned int __L) {
+  return ((__X << (64 - (__L + __P))) >> (64 - __L));
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __bextr_u64(unsigned long long __X, unsigned long long __Y) {
+  unsigned int __P, __L;
+  __P = __Y & 0xFF;
+  __L = (__Y & 0xFF00) >> 8;
+  return (_bextr_u64(__X, __P, __L));
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __blsi_u64(unsigned long long __X) {
+  return __X & -__X;
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _blsi_u64(unsigned long long __X) {
+  return __blsi_u64(__X);
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __blsmsk_u64(unsigned long long __X) {
+  return (__X ^ (__X - 1));
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _blsmsk_u64(unsigned long long __X) {
+  return __blsmsk_u64(__X);
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __blsr_u64(unsigned long long __X) {
+  return (__X & (__X - 1));
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _blsr_u64(unsigned long long __X) {
+  return __blsr_u64(__X);
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __tzcnt_u64(unsigned long long __X) {
+  return __builtin_ctzll(__X);
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _tzcnt_u64(unsigned long long __X) {
+  return __builtin_ctzll(__X);
+}
+#endif /* __PPC64__  */
+
+#endif /* BMIINTRIN_H_ */
diff --git a/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/emmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/emmintrin.h
new file mode 100644
index 0000000..a4c458a
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/emmintrin.h
@@ -0,0 +1,2268 @@
+/*===---- emmintrin.h - Implementation of SSE2 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+   User Guide and Reference, version 9.0.  */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header file is to help porting code using Intel intrinsics
+   explicitly from x86_64 to powerpc64/powerpc64le.
+
+   Since X86 SSE2 intrinsics mainly handles __m128i and __m128d type,
+   PowerPC VMX/VSX ISA is a good match for vector float SIMD operations.
+   However scalar float operations in vector (XMM) registers require
+   the POWER8 VSX ISA (2.07) level. There are differences for data
+   format and placement of float scalars in the vector register, which
+   require extra steps to match SSE2 scalar float semantics on POWER.
+
+   It should be noted that there's much difference between X86_64's
+   MXSCR and PowerISA's FPSCR/VSCR registers. It's recommended to use
+   portable <fenv.h> instead of access MXSCR directly.
+
+   Most SSE2 scalar float intrinsic operations can be performed more
+   efficiently as C language float scalar operations or optimized to
+   use vector SIMD operations. We recommend this for new applications.
+*/
+#error                                                                         \
+    "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef EMMINTRIN_H_
+#define EMMINTRIN_H_
+
+#if defined(__ppc64__) &&                                                      \
+    (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
+
+#include <altivec.h>
+
+/* We need definitions from the SSE header files.  */
+#include <xmmintrin.h>
+
+/* SSE2 */
+typedef __vector double __v2df;
+typedef __vector long long __v2di;
+typedef __vector unsigned long long __v2du;
+typedef __vector int __v4si;
+typedef __vector unsigned int __v4su;
+typedef __vector short __v8hi;
+typedef __vector unsigned short __v8hu;
+typedef __vector signed char __v16qi;
+typedef __vector unsigned char __v16qu;
+
+/* The Intel API is flexible enough that we must allow aliasing with other
+   vector types, and their scalar components.  */
+typedef long long __m128i __attribute__((__vector_size__(16), __may_alias__));
+typedef double __m128d __attribute__((__vector_size__(16), __may_alias__));
+
+/* Unaligned version of the same types.  */
+typedef long long __m128i_u
+    __attribute__((__vector_size__(16), __may_alias__, __aligned__(1)));
+typedef double __m128d_u
+    __attribute__((__vector_size__(16), __may_alias__, __aligned__(1)));
+
+/* Define two value permute mask.  */
+#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
+
+/* Create a vector with element 0 as F and the rest zero.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_sd(double __F) {
+  return __extension__(__m128d){__F, 0.0};
+}
+
+/* Create a vector with both elements equal to F.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set1_pd(double __F) {
+  return __extension__(__m128d){__F, __F};
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_pd1(double __F) {
+  return _mm_set1_pd(__F);
+}
+
+/* Create a vector with the lower value X and upper value W.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_pd(double __W, double __X) {
+  return __extension__(__m128d){__X, __W};
+}
+
+/* Create a vector with the lower value W and upper value X.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setr_pd(double __W, double __X) {
+  return __extension__(__m128d){__W, __X};
+}
+
+/* Create an undefined vector.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_undefined_pd(void) {
+  __m128d __Y = __Y;
+  return __Y;
+}
+
+/* Create a vector of zeros.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setzero_pd(void) {
+  return (__m128d)vec_splats(0);
+}
+
+/* Sets the low DPFP value of A from the low value of B.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_move_sd(__m128d __A, __m128d __B) {
+  __v2df __result = (__v2df)__A;
+  __result[0] = ((__v2df)__B)[0];
+  return (__m128d)__result;
+}
+
+/* Load two DPFP values from P.  The address must be 16-byte aligned.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load_pd(double const *__P) {
+  return ((__m128d)vec_ld(0, (__v16qu *)__P));
+}
+
+/* Load two DPFP values from P.  The address need not be 16-byte aligned.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadu_pd(double const *__P) {
+  return (vec_vsx_ld(0, __P));
+}
+
+/* Create a vector with all two elements equal to *P.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load1_pd(double const *__P) {
+  return (vec_splats(*__P));
+}
+
+/* Create a vector with element 0 as *P and the rest zero.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load_sd(double const *__P) {
+  return _mm_set_sd(*__P);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load_pd1(double const *__P) {
+  return _mm_load1_pd(__P);
+}
+
+/* Load two DPFP values in reverse order.  The address must be aligned.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadr_pd(double const *__P) {
+  __v2df __tmp = _mm_load_pd(__P);
+  return (__m128d)vec_xxpermdi(__tmp, __tmp, 2);
+}
+
+/* Store two DPFP values.  The address must be 16-byte aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store_pd(double *__P, __m128d __A) {
+  vec_st((__v16qu)__A, 0, (__v16qu *)__P);
+}
+
+/* Store two DPFP values.  The address need not be 16-byte aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storeu_pd(double *__P, __m128d __A) {
+  *(__m128d_u *)__P = __A;
+}
+
+/* Stores the lower DPFP value.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store_sd(double *__P, __m128d __A) {
+  *__P = ((__v2df)__A)[0];
+}
+
+extern __inline double
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsd_f64(__m128d __A) {
+  return ((__v2df)__A)[0];
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storel_pd(double *__P, __m128d __A) {
+  _mm_store_sd(__P, __A);
+}
+
+/* Stores the upper DPFP value.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storeh_pd(double *__P, __m128d __A) {
+  *__P = ((__v2df)__A)[1];
+}
+/* Store the lower DPFP value across two words.
+   The address must be 16-byte aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store1_pd(double *__P, __m128d __A) {
+  _mm_store_pd(__P, vec_splat(__A, 0));
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store_pd1(double *__P, __m128d __A) {
+  _mm_store1_pd(__P, __A);
+}
+
+/* Store two DPFP values in reverse order.  The address must be aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storer_pd(double *__P, __m128d __A) {
+  _mm_store_pd(__P, vec_xxpermdi(__A, __A, 2));
+}
+
+/* Intel intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi128_si64(__m128i __A) {
+  return ((__v2di)__A)[0];
+}
+
+/* Microsoft intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi128_si64x(__m128i __A) {
+  return ((__v2di)__A)[0];
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_pd(__m128d __A, __m128d __B) {
+  return (__m128d)((__v2df)__A + (__v2df)__B);
+}
+
+/* Add the lower double-precision (64-bit) floating-point element in
+   a and b, store the result in the lower element of dst, and copy
+   the upper element from a to the upper element of dst. */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_sd(__m128d __A, __m128d __B) {
+  __A[0] = __A[0] + __B[0];
+  return (__A);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_pd(__m128d __A, __m128d __B) {
+  return (__m128d)((__v2df)__A - (__v2df)__B);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_sd(__m128d __A, __m128d __B) {
+  __A[0] = __A[0] - __B[0];
+  return (__A);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mul_pd(__m128d __A, __m128d __B) {
+  return (__m128d)((__v2df)__A * (__v2df)__B);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mul_sd(__m128d __A, __m128d __B) {
+  __A[0] = __A[0] * __B[0];
+  return (__A);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_div_pd(__m128d __A, __m128d __B) {
+  return (__m128d)((__v2df)__A / (__v2df)__B);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_div_sd(__m128d __A, __m128d __B) {
+  __A[0] = __A[0] / __B[0];
+  return (__A);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sqrt_pd(__m128d __A) {
+  return (vec_sqrt(__A));
+}
+
+/* Return pair {sqrt (B[0]), A[1]}.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sqrt_sd(__m128d __A, __m128d __B) {
+  __v2df __c;
+  __c = vec_sqrt((__v2df)_mm_set1_pd(__B[0]));
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_pd(__m128d __A, __m128d __B) {
+  return (vec_min(__A, __B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = vec_min(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_pd(__m128d __A, __m128d __B) {
+  return (vec_max(__A, __B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = vec_max(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmpeq((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmplt_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmplt((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmple_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmple((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmpgt((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpge_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmpge((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpneq_pd(__m128d __A, __m128d __B) {
+  __v2df __temp = (__v2df)vec_cmpeq((__v2df)__A, (__v2df)__B);
+  return ((__m128d)vec_nor(__temp, __temp));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnlt_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmpge((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnle_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmpgt((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpngt_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmple((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnge_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmplt((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpord_pd(__m128d __A, __m128d __B) {
+  __v2du __c, __d;
+  /* Compare against self will return false (0's) if NAN.  */
+  __c = (__v2du)vec_cmpeq(__A, __A);
+  __d = (__v2du)vec_cmpeq(__B, __B);
+  /* A != NAN and B != NAN.  */
+  return ((__m128d)vec_and(__c, __d));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpunord_pd(__m128d __A, __m128d __B) {
+#if _ARCH_PWR8
+  __v2du __c, __d;
+  /* Compare against self will return false (0's) if NAN.  */
+  __c = (__v2du)vec_cmpeq((__v2df)__A, (__v2df)__A);
+  __d = (__v2du)vec_cmpeq((__v2df)__B, (__v2df)__B);
+  /* A == NAN OR B == NAN converts too:
+     NOT(A != NAN) OR NOT(B != NAN).  */
+  __c = vec_nor(__c, __c);
+  return ((__m128d)vec_orc(__c, __d));
+#else
+  __v2du __c, __d;
+  /* Compare against self will return false (0's) if NAN.  */
+  __c = (__v2du)vec_cmpeq((__v2df)__A, (__v2df)__A);
+  __d = (__v2du)vec_cmpeq((__v2df)__B, (__v2df)__B);
+  /* Convert the true ('1's) is NAN.  */
+  __c = vec_nor(__c, __c);
+  __d = vec_nor(__d, __d);
+  return ((__m128d)vec_or(__c, __d));
+#endif
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  /* PowerISA VSX does not allow partial (for just lower double)
+     results. So to insure we don't generate spurious exceptions
+     (from the upper double values) we splat the lower double
+     before we do the operation. */
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = (__v2df)vec_cmpeq(__a, __b);
+  /* Then we merge the lower double result with the original upper
+     double from __A.  */
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmplt_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = (__v2df)vec_cmplt(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmple_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = (__v2df)vec_cmple(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = (__v2df)vec_cmpgt(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpge_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = (__v2df)vec_cmpge(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpneq_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = (__v2df)vec_cmpeq(__a, __b);
+  __c = vec_nor(__c, __c);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnlt_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  /* Not less than is just greater than or equal.  */
+  __c = (__v2df)vec_cmpge(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnle_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  /* Not less than or equal is just greater than.  */
+  __c = (__v2df)vec_cmpge(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpngt_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  /* Not greater than is just less than or equal.  */
+  __c = (__v2df)vec_cmple(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnge_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  /* Not greater than or equal is just less than.  */
+  __c = (__v2df)vec_cmplt(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpord_sd(__m128d __A, __m128d __B) {
+  __v2df __r;
+  __r = (__v2df)_mm_cmpord_pd(vec_splats(__A[0]), vec_splats(__B[0]));
+  return (__m128d)_mm_setr_pd(__r[0], ((__v2df)__A)[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpunord_sd(__m128d __A, __m128d __B) {
+  __v2df __r;
+  __r = _mm_cmpunord_pd(vec_splats(__A[0]), vec_splats(__B[0]));
+  return (__m128d)_mm_setr_pd(__r[0], __A[1]);
+}
+
+/* FIXME
+   The __mm_comi??_sd and __mm_ucomi??_sd implementations below are
+   exactly the same because GCC for PowerPC only generates unordered
+   compares (scalar and vector).
+   Technically __mm_comieq_sp et all should be using the ordered
+   compare and signal for QNaNs.  The __mm_ucomieq_sd et all should
+   be OK.   */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comieq_sd(__m128d __A, __m128d __B) {
+  return (__A[0] == __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comilt_sd(__m128d __A, __m128d __B) {
+  return (__A[0] < __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comile_sd(__m128d __A, __m128d __B) {
+  return (__A[0] <= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comigt_sd(__m128d __A, __m128d __B) {
+  return (__A[0] > __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comige_sd(__m128d __A, __m128d __B) {
+  return (__A[0] >= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comineq_sd(__m128d __A, __m128d __B) {
+  return (__A[0] != __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomieq_sd(__m128d __A, __m128d __B) {
+  return (__A[0] == __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomilt_sd(__m128d __A, __m128d __B) {
+  return (__A[0] < __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomile_sd(__m128d __A, __m128d __B) {
+  return (__A[0] <= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomigt_sd(__m128d __A, __m128d __B) {
+  return (__A[0] > __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomige_sd(__m128d __A, __m128d __B) {
+  return (__A[0] >= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomineq_sd(__m128d __A, __m128d __B) {
+  return (__A[0] != __B[0]);
+}
+
+/* Create a vector of Qi, where i is the element number.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_epi64x(long long __q1, long long __q0) {
+  return __extension__(__m128i)(__v2di){__q0, __q1};
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_epi64(__m64 __q1, __m64 __q0) {
+  return _mm_set_epi64x((long long)__q1, (long long)__q0);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_epi32(int __q3, int __q2, int __q1, int __q0) {
+  return __extension__(__m128i)(__v4si){__q0, __q1, __q2, __q3};
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_epi16(short __q7, short __q6, short __q5, short __q4, short __q3,
+                  short __q2, short __q1, short __q0) {
+  return __extension__(__m128i)(__v8hi){__q0, __q1, __q2, __q3,
+                                        __q4, __q5, __q6, __q7};
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_epi8(char __q15, char __q14, char __q13, char __q12, char __q11,
+                 char __q10, char __q09, char __q08, char __q07, char __q06,
+                 char __q05, char __q04, char __q03, char __q02, char __q01,
+                 char __q00) {
+  return __extension__(__m128i)(__v16qi){
+      __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
+      __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15};
+}
+
+/* Set all of the elements of the vector to A.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set1_epi64x(long long __A) {
+  return _mm_set_epi64x(__A, __A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set1_epi64(__m64 __A) {
+  return _mm_set_epi64(__A, __A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set1_epi32(int __A) {
+  return _mm_set_epi32(__A, __A, __A, __A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set1_epi16(short __A) {
+  return _mm_set_epi16(__A, __A, __A, __A, __A, __A, __A, __A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set1_epi8(char __A) {
+  return _mm_set_epi8(__A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A,
+                      __A, __A, __A, __A, __A);
+}
+
+/* Create a vector of Qi, where i is the element number.
+   The parameter order is reversed from the _mm_set_epi* functions.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setr_epi64(__m64 __q0, __m64 __q1) {
+  return _mm_set_epi64(__q1, __q0);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setr_epi32(int __q0, int __q1, int __q2, int __q3) {
+  return _mm_set_epi32(__q3, __q2, __q1, __q0);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setr_epi16(short __q0, short __q1, short __q2, short __q3, short __q4,
+                   short __q5, short __q6, short __q7) {
+  return _mm_set_epi16(__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setr_epi8(char __q00, char __q01, char __q02, char __q03, char __q04,
+                  char __q05, char __q06, char __q07, char __q08, char __q09,
+                  char __q10, char __q11, char __q12, char __q13, char __q14,
+                  char __q15) {
+  return _mm_set_epi8(__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08,
+                      __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00);
+}
+
+/* Create a vector with element 0 as *P and the rest zero.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load_si128(__m128i const *__P) {
+  return *__P;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadu_si128(__m128i_u const *__P) {
+  return (__m128i)(vec_vsx_ld(0, (signed int const *)__P));
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadl_epi64(__m128i_u const *__P) {
+  return _mm_set_epi64((__m64)0LL, *(__m64 *)__P);
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store_si128(__m128i *__P, __m128i __B) {
+  vec_st((__v16qu)__B, 0, (__v16qu *)__P);
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storeu_si128(__m128i_u *__P, __m128i __B) {
+  *__P = __B;
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storel_epi64(__m128i_u *__P, __m128i __B) {
+  *(long long *)__P = ((__v2di)__B)[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movepi64_pi64(__m128i_u __B) {
+  return (__m64)((__v2di)__B)[0];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movpi64_epi64(__m64 __A) {
+  return _mm_set_epi64((__m64)0LL, __A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_move_epi64(__m128i __A) {
+  return _mm_set_epi64((__m64)0LL, (__m64)__A[0]);
+}
+
+/* Create an undefined vector.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_undefined_si128(void) {
+  __m128i __Y = __Y;
+  return __Y;
+}
+
+/* Create a vector of zeros.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setzero_si128(void) {
+  return __extension__(__m128i)(__v4si){0, 0, 0, 0};
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi32_pd(__m128i __A) {
+  __v2di __val;
+  /* For LE need to generate Vector Unpack Low Signed Word.
+     Which is generated from unpackh.  */
+  __val = (__v2di)vec_unpackh((__v4si)__A);
+
+  return (__m128d)vec_ctf(__val, 0);
+}
+#endif
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi32_ps(__m128i __A) {
+  return ((__m128)vec_ctf((__v4si)__A, 0));
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpd_epi32(__m128d __A) {
+  __v2df __rounded = vec_rint(__A);
+  __v4si __result, __temp;
+  const __v4si __vzero = {0, 0, 0, 0};
+
+  /* VSX Vector truncate Double-Precision to integer and Convert to
+   Signed Integer Word format with Saturate.  */
+  __asm__("xvcvdpsxws %x0,%x1" : "=wa"(__temp) : "wa"(__rounded) :);
+
+#ifdef _ARCH_PWR8
+#ifdef __LITTLE_ENDIAN__
+  __temp = vec_mergeo(__temp, __temp);
+#else
+  __temp = vec_mergee(__temp, __temp);
+#endif
+  __result = (__v4si)vec_vpkudum((__vector long long)__temp,
+                                 (__vector long long)__vzero);
+#else
+  {
+    const __v16qu __pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+                              0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f};
+    __result = (__v4si)vec_perm((__v16qu)__temp, (__v16qu)__vzero, __pkperm);
+  }
+#endif
+  return (__m128i)__result;
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpd_pi32(__m128d __A) {
+  __m128i __result = _mm_cvtpd_epi32(__A);
+
+  return (__m64)__result[0];
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpd_ps(__m128d __A) {
+  __v4sf __result;
+  __v4si __temp;
+  const __v4si __vzero = {0, 0, 0, 0};
+
+  __asm__("xvcvdpsp %x0,%x1" : "=wa"(__temp) : "wa"(__A) :);
+
+#ifdef _ARCH_PWR8
+#ifdef __LITTLE_ENDIAN__
+  __temp = vec_mergeo(__temp, __temp);
+#else
+  __temp = vec_mergee(__temp, __temp);
+#endif
+  __result = (__v4sf)vec_vpkudum((__vector long long)__temp,
+                                 (__vector long long)__vzero);
+#else
+  {
+    const __v16qu __pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+                              0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f};
+    __result = (__v4sf)vec_perm((__v16qu)__temp, (__v16qu)__vzero, __pkperm);
+  }
+#endif
+  return ((__m128)__result);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttpd_epi32(__m128d __A) {
+  __v4si __result;
+  __v4si __temp;
+  const __v4si __vzero = {0, 0, 0, 0};
+
+  /* VSX Vector truncate Double-Precision to integer and Convert to
+   Signed Integer Word format with Saturate.  */
+  __asm__("xvcvdpsxws %x0,%x1" : "=wa"(__temp) : "wa"(__A) :);
+
+#ifdef _ARCH_PWR8
+#ifdef __LITTLE_ENDIAN__
+  __temp = vec_mergeo(__temp, __temp);
+#else
+  __temp = vec_mergee(__temp, __temp);
+#endif
+  __result = (__v4si)vec_vpkudum((__vector long long)__temp,
+                                 (__vector long long)__vzero);
+#else
+  {
+    const __v16qu __pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+                              0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f};
+    __result = (__v4si)vec_perm((__v16qu)__temp, (__v16qu)__vzero, __pkperm);
+  }
+#endif
+
+  return ((__m128i)__result);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttpd_pi32(__m128d __A) {
+  __m128i __result = _mm_cvttpd_epi32(__A);
+
+  return (__m64)__result[0];
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi128_si32(__m128i __A) {
+  return ((__v4si)__A)[0];
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpi32_pd(__m64 __A) {
+  __v4si __temp;
+  __v2di __tmp2;
+  __v2df __result;
+
+  __temp = (__v4si)vec_splats(__A);
+  __tmp2 = (__v2di)vec_unpackl(__temp);
+  __result = vec_ctf((__vector signed long long)__tmp2, 0);
+  return (__m128d)__result;
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtps_epi32(__m128 __A) {
+  __v4sf __rounded;
+  __v4si __result;
+
+  __rounded = vec_rint((__v4sf)__A);
+  __result = vec_cts(__rounded, 0);
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttps_epi32(__m128 __A) {
+  __v4si __result;
+
+  __result = vec_cts((__v4sf)__A, 0);
+  return (__m128i)__result;
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtps_pd(__m128 __A) {
+  /* Check if vec_doubleh is defined by <altivec.h>. If so use that. */
+#ifdef vec_doubleh
+  return (__m128d)vec_doubleh((__v4sf)__A);
+#else
+  /* Otherwise the compiler is not current and so need to generate the
+     equivalent code.  */
+  __v4sf __a = (__v4sf)__A;
+  __v4sf __temp;
+  __v2df __result;
+#ifdef __LITTLE_ENDIAN__
+  /* The input float values are in elements {[0], [1]} but the convert
+     instruction needs them in elements {[1], [3]}, So we use two
+     shift left double vector word immediates to get the elements
+     lined up.  */
+  __temp = __builtin_vsx_xxsldwi(__a, __a, 3);
+  __temp = __builtin_vsx_xxsldwi(__a, __temp, 2);
+#else
+  /* The input float values are in elements {[0], [1]} but the convert
+     instruction needs them in elements {[0], [2]}, So we use two
+     shift left double vector word immediates to get the elements
+     lined up.  */
+  __temp = vec_vmrghw(__a, __a);
+#endif
+  __asm__(" xvcvspdp %x0,%x1" : "=wa"(__result) : "wa"(__temp) :);
+  return (__m128d)__result;
+#endif
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsd_si32(__m128d __A) {
+  __v2df __rounded = vec_rint((__v2df)__A);
+  int __result = ((__v2df)__rounded)[0];
+
+  return __result;
+}
+/* Intel intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsd_si64(__m128d __A) {
+  __v2df __rounded = vec_rint((__v2df)__A);
+  long long __result = ((__v2df)__rounded)[0];
+
+  return __result;
+}
+
+/* Microsoft intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsd_si64x(__m128d __A) {
+  return _mm_cvtsd_si64((__v2df)__A);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttsd_si32(__m128d __A) {
+  int __result = ((__v2df)__A)[0];
+
+  return __result;
+}
+
+/* Intel intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttsd_si64(__m128d __A) {
+  long long __result = ((__v2df)__A)[0];
+
+  return __result;
+}
+
+/* Microsoft intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttsd_si64x(__m128d __A) {
+  return _mm_cvttsd_si64(__A);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsd_ss(__m128 __A, __m128d __B) {
+  __v4sf __result = (__v4sf)__A;
+
+#ifdef __LITTLE_ENDIAN__
+  __v4sf __temp_s;
+  /* Copy double element[0] to element [1] for conversion.  */
+  __v2df __temp_b = vec_splat((__v2df)__B, 0);
+
+  /* Pre-rotate __A left 3 (logically right 1) elements.  */
+  __result = __builtin_vsx_xxsldwi(__result, __result, 3);
+  /* Convert double to single float scalar in a vector.  */
+  __asm__("xscvdpsp %x0,%x1" : "=wa"(__temp_s) : "wa"(__temp_b) :);
+  /* Shift the resulting scalar into vector element [0].  */
+  __result = __builtin_vsx_xxsldwi(__result, __temp_s, 1);
+#else
+  __result[0] = ((__v2df)__B)[0];
+#endif
+  return (__m128)__result;
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi32_sd(__m128d __A, int __B) {
+  __v2df __result = (__v2df)__A;
+  double __db = __B;
+  __result[0] = __db;
+  return (__m128d)__result;
+}
+
+/* Intel intrinsic.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi64_sd(__m128d __A, long long __B) {
+  __v2df __result = (__v2df)__A;
+  double __db = __B;
+  __result[0] = __db;
+  return (__m128d)__result;
+}
+
+/* Microsoft intrinsic.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi64x_sd(__m128d __A, long long __B) {
+  return _mm_cvtsi64_sd(__A, __B);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtss_sd(__m128d __A, __m128 __B) {
+#ifdef __LITTLE_ENDIAN__
+  /* Use splat to move element [0] into position for the convert. */
+  __v4sf __temp = vec_splat((__v4sf)__B, 0);
+  __v2df __res;
+  /* Convert single float scalar to double in a vector.  */
+  __asm__("xscvspdp %x0,%x1" : "=wa"(__res) : "wa"(__temp) :);
+  return (__m128d)vec_mergel(__res, (__v2df)__A);
+#else
+  __v2df __res = (__v2df)__A;
+  __res[0] = ((__v4sf)__B)[0];
+  return (__m128d)__res;
+#endif
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask) {
+  __vector double __result;
+  const int __litmsk = __mask & 0x3;
+
+  if (__litmsk == 0)
+    __result = vec_mergeh(__A, __B);
+#if __GNUC__ < 6
+  else if (__litmsk == 1)
+    __result = vec_xxpermdi(__B, __A, 2);
+  else if (__litmsk == 2)
+    __result = vec_xxpermdi(__B, __A, 1);
+#else
+  else if (__litmsk == 1)
+    __result = vec_xxpermdi(__A, __B, 2);
+  else if (__litmsk == 2)
+    __result = vec_xxpermdi(__A, __B, 1);
+#endif
+  else
+    __result = vec_mergel(__A, __B);
+
+  return __result;
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpackhi_pd(__m128d __A, __m128d __B) {
+  return (__m128d)vec_mergel((__v2df)__A, (__v2df)__B);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpacklo_pd(__m128d __A, __m128d __B) {
+  return (__m128d)vec_mergeh((__v2df)__A, (__v2df)__B);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadh_pd(__m128d __A, double const *__B) {
+  __v2df __result = (__v2df)__A;
+  __result[1] = *__B;
+  return (__m128d)__result;
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadl_pd(__m128d __A, double const *__B) {
+  __v2df __result = (__v2df)__A;
+  __result[0] = *__B;
+  return (__m128d)__result;
+}
+
+#ifdef _ARCH_PWR8
+/* Intrinsic functions that require PowerISA 2.07 minimum.  */
+
+/* Creates a 2-bit mask from the most significant bits of the DPFP values.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movemask_pd(__m128d __A) {
+#ifdef _ARCH_PWR10
+  return vec_extractm((__v2du)__A);
+#else
+  __vector unsigned long long __result;
+  static const __vector unsigned int __perm_mask = {
+#ifdef __LITTLE_ENDIAN__
+      0x80800040, 0x80808080, 0x80808080, 0x80808080
+#else
+      0x80808080, 0x80808080, 0x80808080, 0x80804000
+#endif
+  };
+
+  __result = ((__vector unsigned long long)vec_vbpermq(
+      (__vector unsigned char)__A, (__vector unsigned char)__perm_mask));
+
+#ifdef __LITTLE_ENDIAN__
+  return __result[1];
+#else
+  return __result[0];
+#endif
+#endif /* !_ARCH_PWR10 */
+}
+#endif /* _ARCH_PWR8 */
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_packs_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_packs((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_packs_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)vec_packs((__v4si)__A, (__v4si)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_packus_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_packsu((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpackhi_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergel((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpackhi_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergel((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpackhi_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergel((__v4su)__A, (__v4su)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpackhi_epi64(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergel((__vector long long)__A, (__vector long long)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpacklo_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergeh((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpacklo_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergeh((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpacklo_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergeh((__v4si)__A, (__v4si)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpacklo_epi64(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergeh((__vector long long)__A, (__vector long long)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)((__v16qu)__A + (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)((__v8hu)__A + (__v8hu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)((__v4su)__A + (__v4su)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_epi64(__m128i __A, __m128i __B) {
+  return (__m128i)((__v2du)__A + (__v2du)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_adds_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_adds((__v16qi)__A, (__v16qi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_adds_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_adds((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_adds_epu8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_adds((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_adds_epu16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_adds((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)((__v16qu)__A - (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)((__v8hu)__A - (__v8hu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)((__v4su)__A - (__v4su)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_epi64(__m128i __A, __m128i __B) {
+  return (__m128i)((__v2du)__A - (__v2du)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_subs_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_subs((__v16qi)__A, (__v16qi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_subs_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_subs((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_subs_epu8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_subs((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_subs_epu16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_subs((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_madd_epi16(__m128i __A, __m128i __B) {
+  __vector signed int __zero = {0, 0, 0, 0};
+
+  return (__m128i)vec_vmsumshm((__v8hi)__A, (__v8hi)__B, __zero);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mulhi_epi16(__m128i __A, __m128i __B) {
+  __vector signed int __w0, __w1;
+
+  __vector unsigned char __xform1 = {
+#ifdef __LITTLE_ENDIAN__
+      0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
+      0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+#else
+      0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x08,
+      0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
+#endif
+  };
+
+  __w0 = vec_vmulesh((__v8hi)__A, (__v8hi)__B);
+  __w1 = vec_vmulosh((__v8hi)__A, (__v8hi)__B);
+  return (__m128i)vec_perm(__w0, __w1, __xform1);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mullo_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)((__v8hi)__A * (__v8hi)__B);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mul_su32(__m64 __A, __m64 __B) {
+  unsigned int __a = __A;
+  unsigned int __b = __B;
+
+  return ((__m64)__a * (__m64)__b);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mul_epu32(__m128i __A, __m128i __B) {
+#if __GNUC__ < 8
+  __v2du __result;
+
+#ifdef __LITTLE_ENDIAN__
+  /* VMX Vector Multiply Odd Unsigned Word.  */
+  __asm__("vmulouw %0,%1,%2" : "=v"(__result) : "v"(__A), "v"(__B) :);
+#else
+  /* VMX Vector Multiply Even Unsigned Word.  */
+  __asm__("vmuleuw %0,%1,%2" : "=v"(__result) : "v"(__A), "v"(__B) :);
+#endif
+  return (__m128i)__result;
+#else
+  return (__m128i)vec_mule((__v4su)__A, (__v4su)__B);
+#endif
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_slli_epi16(__m128i __A, int __B) {
+  __v8hu __lshift;
+  __v8hi __result = {0, 0, 0, 0, 0, 0, 0, 0};
+
+  if (__B >= 0 && __B < 16) {
+    if (__builtin_constant_p(__B))
+      __lshift = (__v8hu)vec_splat_s16(__B);
+    else
+      __lshift = vec_splats((unsigned short)__B);
+
+    __result = vec_sl((__v8hi)__A, __lshift);
+  }
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_slli_epi32(__m128i __A, int __B) {
+  __v4su __lshift;
+  __v4si __result = {0, 0, 0, 0};
+
+  if (__B >= 0 && __B < 32) {
+    if (__builtin_constant_p(__B) && __B < 16)
+      __lshift = (__v4su)vec_splat_s32(__B);
+    else
+      __lshift = vec_splats((unsigned int)__B);
+
+    __result = vec_sl((__v4si)__A, __lshift);
+  }
+
+  return (__m128i)__result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_slli_epi64(__m128i __A, int __B) {
+  __v2du __lshift;
+  __v2di __result = {0, 0};
+
+  if (__B >= 0 && __B < 64) {
+    if (__builtin_constant_p(__B) && __B < 16)
+      __lshift = (__v2du)vec_splat_s32(__B);
+    else
+      __lshift = (__v2du)vec_splats((unsigned int)__B);
+
+    __result = vec_sl((__v2di)__A, __lshift);
+  }
+
+  return (__m128i)__result;
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srai_epi16(__m128i __A, int __B) {
+  __v8hu __rshift = {15, 15, 15, 15, 15, 15, 15, 15};
+  __v8hi __result;
+
+  if (__B < 16) {
+    if (__builtin_constant_p(__B))
+      __rshift = (__v8hu)vec_splat_s16(__B);
+    else
+      __rshift = vec_splats((unsigned short)__B);
+  }
+  __result = vec_sra((__v8hi)__A, __rshift);
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srai_epi32(__m128i __A, int __B) {
+  __v4su __rshift = {31, 31, 31, 31};
+  __v4si __result;
+
+  if (__B < 32) {
+    if (__builtin_constant_p(__B)) {
+      if (__B < 16)
+        __rshift = (__v4su)vec_splat_s32(__B);
+      else
+        __rshift = (__v4su)vec_splats((unsigned int)__B);
+    } else
+      __rshift = vec_splats((unsigned int)__B);
+  }
+  __result = vec_sra((__v4si)__A, __rshift);
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_bslli_si128(__m128i __A, const int __N) {
+  __v16qu __result;
+  const __v16qu __zeros = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+  if (__N < 16)
+    __result = vec_sld((__v16qu)__A, __zeros, __N);
+  else
+    __result = __zeros;
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_bsrli_si128(__m128i __A, const int __N) {
+  __v16qu __result;
+  const __v16qu __zeros = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+  if (__N < 16)
+#ifdef __LITTLE_ENDIAN__
+    if (__builtin_constant_p(__N))
+      /* Would like to use Vector Shift Left Double by Octet
+         Immediate here to use the immediate form and avoid
+         load of __N * 8 value into a separate VR.  */
+      __result = vec_sld(__zeros, (__v16qu)__A, (16 - __N));
+    else
+#endif
+    {
+      __v16qu __shift = vec_splats((unsigned char)(__N * 8));
+#ifdef __LITTLE_ENDIAN__
+      __result = vec_sro((__v16qu)__A, __shift);
+#else
+    __result = vec_slo((__v16qu)__A, __shift);
+#endif
+    }
+  else
+    __result = __zeros;
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srli_si128(__m128i __A, const int __N) {
+  return _mm_bsrli_si128(__A, __N);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_slli_si128(__m128i __A, const int _imm5) {
+  __v16qu __result;
+  const __v16qu __zeros = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+  if (_imm5 < 16)
+#ifdef __LITTLE_ENDIAN__
+    __result = vec_sld((__v16qu)__A, __zeros, _imm5);
+#else
+    __result = vec_sld(__zeros, (__v16qu)__A, (16 - _imm5));
+#endif
+  else
+    __result = __zeros;
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+    _mm_srli_epi16(__m128i __A, int __B) {
+  __v8hu __rshift;
+  __v8hi __result = {0, 0, 0, 0, 0, 0, 0, 0};
+
+  if (__B < 16) {
+    if (__builtin_constant_p(__B))
+      __rshift = (__v8hu)vec_splat_s16(__B);
+    else
+      __rshift = vec_splats((unsigned short)__B);
+
+    __result = vec_sr((__v8hi)__A, __rshift);
+  }
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srli_epi32(__m128i __A, int __B) {
+  __v4su __rshift;
+  __v4si __result = {0, 0, 0, 0};
+
+  if (__B < 32) {
+    if (__builtin_constant_p(__B)) {
+      if (__B < 16)
+        __rshift = (__v4su)vec_splat_s32(__B);
+      else
+        __rshift = (__v4su)vec_splats((unsigned int)__B);
+    } else
+      __rshift = vec_splats((unsigned int)__B);
+
+    __result = vec_sr((__v4si)__A, __rshift);
+  }
+
+  return (__m128i)__result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srli_epi64(__m128i __A, int __B) {
+  __v2du __rshift;
+  __v2di __result = {0, 0};
+
+  if (__B < 64) {
+    if (__builtin_constant_p(__B)) {
+      if (__B < 16)
+        __rshift = (__v2du)vec_splat_s32(__B);
+      else
+        __rshift = (__v2du)vec_splats((unsigned long long)__B);
+    } else
+      __rshift = (__v2du)vec_splats((unsigned int)__B);
+
+    __result = vec_sr((__v2di)__A, __rshift);
+  }
+
+  return (__m128i)__result;
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sll_epi16(__m128i __A, __m128i __B) {
+  __v8hu __lshift;
+  __vector __bool short __shmask;
+  const __v8hu __shmax = {15, 15, 15, 15, 15, 15, 15, 15};
+  __v8hu __result;
+
+#ifdef __LITTLE_ENDIAN__
+  __lshift = vec_splat((__v8hu)__B, 0);
+#else
+  __lshift = vec_splat((__v8hu)__B, 3);
+#endif
+  __shmask = vec_cmple(__lshift, __shmax);
+  __result = vec_sl((__v8hu)__A, __lshift);
+  __result = vec_sel((__v8hu)__shmask, __result, __shmask);
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sll_epi32(__m128i __A, __m128i __B) {
+  __v4su __lshift;
+  __vector __bool int __shmask;
+  const __v4su __shmax = {32, 32, 32, 32};
+  __v4su __result;
+#ifdef __LITTLE_ENDIAN__
+  __lshift = vec_splat((__v4su)__B, 0);
+#else
+  __lshift = vec_splat((__v4su)__B, 1);
+#endif
+  __shmask = vec_cmplt(__lshift, __shmax);
+  __result = vec_sl((__v4su)__A, __lshift);
+  __result = vec_sel((__v4su)__shmask, __result, __shmask);
+
+  return (__m128i)__result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sll_epi64(__m128i __A, __m128i __B) {
+  __v2du __lshift;
+  __vector __bool long long __shmask;
+  const __v2du __shmax = {64, 64};
+  __v2du __result;
+
+  __lshift = vec_splat((__v2du)__B, 0);
+  __shmask = vec_cmplt(__lshift, __shmax);
+  __result = vec_sl((__v2du)__A, __lshift);
+  __result = vec_sel((__v2du)__shmask, __result, __shmask);
+
+  return (__m128i)__result;
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sra_epi16(__m128i __A, __m128i __B) {
+  const __v8hu __rshmax = {15, 15, 15, 15, 15, 15, 15, 15};
+  __v8hu __rshift;
+  __v8hi __result;
+
+#ifdef __LITTLE_ENDIAN__
+  __rshift = vec_splat((__v8hu)__B, 0);
+#else
+  __rshift = vec_splat((__v8hu)__B, 3);
+#endif
+  __rshift = vec_min(__rshift, __rshmax);
+  __result = vec_sra((__v8hi)__A, __rshift);
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sra_epi32(__m128i __A, __m128i __B) {
+  const __v4su __rshmax = {31, 31, 31, 31};
+  __v4su __rshift;
+  __v4si __result;
+
+#ifdef __LITTLE_ENDIAN__
+  __rshift = vec_splat((__v4su)__B, 0);
+#else
+  __rshift = vec_splat((__v4su)__B, 1);
+#endif
+  __rshift = vec_min(__rshift, __rshmax);
+  __result = vec_sra((__v4si)__A, __rshift);
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srl_epi16(__m128i __A, __m128i __B) {
+  __v8hu __rshift;
+  __vector __bool short __shmask;
+  const __v8hu __shmax = {15, 15, 15, 15, 15, 15, 15, 15};
+  __v8hu __result;
+
+#ifdef __LITTLE_ENDIAN__
+  __rshift = vec_splat((__v8hu)__B, 0);
+#else
+  __rshift = vec_splat((__v8hu)__B, 3);
+#endif
+  __shmask = vec_cmple(__rshift, __shmax);
+  __result = vec_sr((__v8hu)__A, __rshift);
+  __result = vec_sel((__v8hu)__shmask, __result, __shmask);
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srl_epi32(__m128i __A, __m128i __B) {
+  __v4su __rshift;
+  __vector __bool int __shmask;
+  const __v4su __shmax = {32, 32, 32, 32};
+  __v4su __result;
+
+#ifdef __LITTLE_ENDIAN__
+  __rshift = vec_splat((__v4su)__B, 0);
+#else
+  __rshift = vec_splat((__v4su)__B, 1);
+#endif
+  __shmask = vec_cmplt(__rshift, __shmax);
+  __result = vec_sr((__v4su)__A, __rshift);
+  __result = vec_sel((__v4su)__shmask, __result, __shmask);
+
+  return (__m128i)__result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srl_epi64(__m128i __A, __m128i __B) {
+  __v2du __rshift;
+  __vector __bool long long __shmask;
+  const __v2du __shmax = {64, 64};
+  __v2du __result;
+
+  __rshift = vec_splat((__v2du)__B, 0);
+  __shmask = vec_cmplt(__rshift, __shmax);
+  __result = vec_sr((__v2du)__A, __rshift);
+  __result = vec_sel((__v2du)__shmask, __result, __shmask);
+
+  return (__m128i)__result;
+}
+#endif
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_and_pd(__m128d __A, __m128d __B) {
+  return (vec_and((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_andnot_pd(__m128d __A, __m128d __B) {
+  return (vec_andc((__v2df)__B, (__v2df)__A));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_or_pd(__m128d __A, __m128d __B) {
+  return (vec_or((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_xor_pd(__m128d __A, __m128d __B) {
+  return (vec_xor((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_and_si128(__m128i __A, __m128i __B) {
+  return (__m128i)vec_and((__v2di)__A, (__v2di)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_andnot_si128(__m128i __A, __m128i __B) {
+  return (__m128i)vec_andc((__v2di)__B, (__v2di)__A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_or_si128(__m128i __A, __m128i __B) {
+  return (__m128i)vec_or((__v2di)__A, (__v2di)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_xor_si128(__m128i __A, __m128i __B) {
+  return (__m128i)vec_xor((__v2di)__A, (__v2di)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmpeq((__v16qi)__A, (__v16qi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmpeq((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmpeq((__v4si)__A, (__v4si)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmplt_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmplt((__v16qi)__A, (__v16qi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmplt_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmplt((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmplt_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmplt((__v4si)__A, (__v4si)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmpgt((__v16qi)__A, (__v16qi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmpgt((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmpgt((__v4si)__A, (__v4si)__B);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_extract_epi16(__m128i const __A, int const __N) {
+  return (unsigned short)((__v8hi)__A)[__N & 7];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_insert_epi16(__m128i const __A, int const __D, int const __N) {
+  __v8hi __result = (__v8hi)__A;
+
+  __result[(__N & 7)] = __D;
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_max((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_epu8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_max((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_min((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_epu8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_min((__v16qu)__A, (__v16qu)__B);
+}
+
+#ifdef _ARCH_PWR8
+/* Intrinsic functions that require PowerISA 2.07 minimum.  */
+
+/* Return a mask created from the most significant bit of each 8-bit
+   element in A.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movemask_epi8(__m128i __A) {
+#ifdef _ARCH_PWR10
+  return vec_extractm((__v16qu)__A);
+#else
+  __vector unsigned long long __result;
+  static const __vector unsigned char __perm_mask = {
+      0x78, 0x70, 0x68, 0x60, 0x58, 0x50, 0x48, 0x40,
+      0x38, 0x30, 0x28, 0x20, 0x18, 0x10, 0x08, 0x00};
+
+  __result = ((__vector unsigned long long)vec_vbpermq(
+      (__vector unsigned char)__A, (__vector unsigned char)__perm_mask));
+
+#ifdef __LITTLE_ENDIAN__
+  return __result[1];
+#else
+  return __result[0];
+#endif
+#endif /* !_ARCH_PWR10 */
+}
+#endif /* _ARCH_PWR8 */
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mulhi_epu16(__m128i __A, __m128i __B) {
+  __v4su __w0, __w1;
+  __v16qu __xform1 = {
+#ifdef __LITTLE_ENDIAN__
+      0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
+      0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+#else
+      0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x08,
+      0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
+#endif
+  };
+
+  __w0 = vec_vmuleuh((__v8hu)__A, (__v8hu)__B);
+  __w1 = vec_vmulouh((__v8hu)__A, (__v8hu)__B);
+  return (__m128i)vec_perm(__w0, __w1, __xform1);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_shufflehi_epi16(__m128i __A, const int __mask) {
+  unsigned long __element_selector_98 = __mask & 0x03;
+  unsigned long __element_selector_BA = (__mask >> 2) & 0x03;
+  unsigned long __element_selector_DC = (__mask >> 4) & 0x03;
+  unsigned long __element_selector_FE = (__mask >> 6) & 0x03;
+  static const unsigned short __permute_selectors[4] = {
+#ifdef __LITTLE_ENDIAN__
+      0x0908, 0x0B0A, 0x0D0C, 0x0F0E
+#else
+      0x0809, 0x0A0B, 0x0C0D, 0x0E0F
+#endif
+  };
+  __v2du __pmask =
+#ifdef __LITTLE_ENDIAN__
+      {0x1716151413121110UL, 0UL};
+#else
+      {0x1011121314151617UL, 0UL};
+#endif
+  __m64_union __t;
+  __v2du __a, __r;
+
+  __t.as_short[0] = __permute_selectors[__element_selector_98];
+  __t.as_short[1] = __permute_selectors[__element_selector_BA];
+  __t.as_short[2] = __permute_selectors[__element_selector_DC];
+  __t.as_short[3] = __permute_selectors[__element_selector_FE];
+  __pmask[1] = __t.as_m64;
+  __a = (__v2du)__A;
+  __r = vec_perm(__a, __a, (__vector unsigned char)__pmask);
+  return (__m128i)__r;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_shufflelo_epi16(__m128i __A, const int __mask) {
+  unsigned long __element_selector_10 = __mask & 0x03;
+  unsigned long __element_selector_32 = (__mask >> 2) & 0x03;
+  unsigned long __element_selector_54 = (__mask >> 4) & 0x03;
+  unsigned long __element_selector_76 = (__mask >> 6) & 0x03;
+  static const unsigned short __permute_selectors[4] = {
+#ifdef __LITTLE_ENDIAN__
+      0x0100, 0x0302, 0x0504, 0x0706
+#else
+      0x0001, 0x0203, 0x0405, 0x0607
+#endif
+  };
+  __v2du __pmask =
+#ifdef __LITTLE_ENDIAN__
+      {0UL, 0x1f1e1d1c1b1a1918UL};
+#else
+      {0UL, 0x18191a1b1c1d1e1fUL};
+#endif
+  __m64_union __t;
+  __v2du __a, __r;
+  __t.as_short[0] = __permute_selectors[__element_selector_10];
+  __t.as_short[1] = __permute_selectors[__element_selector_32];
+  __t.as_short[2] = __permute_selectors[__element_selector_54];
+  __t.as_short[3] = __permute_selectors[__element_selector_76];
+  __pmask[0] = __t.as_m64;
+  __a = (__v2du)__A;
+  __r = vec_perm(__a, __a, (__vector unsigned char)__pmask);
+  return (__m128i)__r;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_shuffle_epi32(__m128i __A, const int __mask) {
+  unsigned long __element_selector_10 = __mask & 0x03;
+  unsigned long __element_selector_32 = (__mask >> 2) & 0x03;
+  unsigned long __element_selector_54 = (__mask >> 4) & 0x03;
+  unsigned long __element_selector_76 = (__mask >> 6) & 0x03;
+  static const unsigned int __permute_selectors[4] = {
+#ifdef __LITTLE_ENDIAN__
+      0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
+#else
+      0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
+#endif
+  };
+  __v4su __t;
+
+  __t[0] = __permute_selectors[__element_selector_10];
+  __t[1] = __permute_selectors[__element_selector_32];
+  __t[2] = __permute_selectors[__element_selector_54] + 0x10101010;
+  __t[3] = __permute_selectors[__element_selector_76] + 0x10101010;
+  return (__m128i)vec_perm((__v4si)__A, (__v4si)__A,
+                           (__vector unsigned char)__t);
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_maskmoveu_si128(__m128i __A, __m128i __B, char *__C) {
+  __v2du __hibit = {0x7f7f7f7f7f7f7f7fUL, 0x7f7f7f7f7f7f7f7fUL};
+  __v16qu __mask, __tmp;
+  __m128i_u *__p = (__m128i_u *)__C;
+
+  __tmp = (__v16qu)_mm_loadu_si128(__p);
+  __mask = (__v16qu)vec_cmpgt((__v16qu)__B, (__v16qu)__hibit);
+  __tmp = vec_sel(__tmp, (__v16qu)__A, __mask);
+  _mm_storeu_si128(__p, (__m128i)__tmp);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_avg_epu8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_avg((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_avg_epu16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_avg((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sad_epu8(__m128i __A, __m128i __B) {
+  __v16qu __a, __b;
+  __v16qu __vabsdiff;
+  __v4si __vsum;
+  const __v4su __zero = {0, 0, 0, 0};
+  __v4si __result;
+
+  __a = (__v16qu)__A;
+  __b = (__v16qu)__B;
+#ifndef _ARCH_PWR9
+  __v16qu __vmin = vec_min(__a, __b);
+  __v16qu __vmax = vec_max(__a, __b);
+  __vabsdiff = vec_sub(__vmax, __vmin);
+#else
+  __vabsdiff = vec_absd(__a, __b);
+#endif
+  /* Sum four groups of bytes into integers.  */
+  __vsum = (__vector signed int)vec_sum4s(__vabsdiff, __zero);
+#ifdef __LITTLE_ENDIAN__
+  /* Sum across four integers with two integer results.  */
+  __asm__("vsum2sws %0,%1,%2" : "=v"(__result) : "v"(__vsum), "v"(__zero));
+  /* Note: vec_sum2s could be used here, but on little-endian, vector
+     shifts are added that are not needed for this use-case.
+     A vector shift to correctly position the 32-bit integer results
+     (currently at [0] and [2]) to [1] and [3] would then need to be
+     swapped back again since the desired results are two 64-bit
+     integers ([1]|[0] and [3]|[2]).  Thus, no shift is performed.  */
+#else
+  /* Sum across four integers with two integer results.  */
+  __result = vec_sum2s(__vsum, (__vector signed int)__zero);
+  /* Rotate the sums into the correct position.  */
+  __result = vec_sld(__result, __result, 6);
+#endif
+  return (__m128i)__result;
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_stream_si32(int *__A, int __B) {
+  /* Use the data cache block touch for store transient.  */
+  __asm__("dcbtstt 0,%0" : : "b"(__A) : "memory");
+  *__A = __B;
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_stream_si64(long long int *__A, long long int __B) {
+  /* Use the data cache block touch for store transient.  */
+  __asm__("	dcbtstt	0,%0" : : "b"(__A) : "memory");
+  *__A = __B;
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_stream_si128(__m128i *__A, __m128i __B) {
+  /* Use the data cache block touch for store transient.  */
+  __asm__("dcbtstt 0,%0" : : "b"(__A) : "memory");
+  *__A = __B;
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_stream_pd(double *__A, __m128d __B) {
+  /* Use the data cache block touch for store transient.  */
+  __asm__("dcbtstt 0,%0" : : "b"(__A) : "memory");
+  *(__m128d *)__A = __B;
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_clflush(void const *__A) {
+  /* Use the data cache block flush.  */
+  __asm__("dcbf 0,%0" : : "b"(__A) : "memory");
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_lfence(void) {
+  /* Use light weight sync for load to load ordering.  */
+  __atomic_thread_fence(__ATOMIC_RELEASE);
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mfence(void) {
+  /* Use heavy weight sync for any to any ordering.  */
+  __atomic_thread_fence(__ATOMIC_SEQ_CST);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi32_si128(int __A) {
+  return _mm_set_epi32(0, 0, 0, __A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi64_si128(long long __A) {
+  return __extension__(__m128i)(__v2di){__A, 0LL};
+}
+
+/* Microsoft intrinsic.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi64x_si128(long long __A) {
+  return __extension__(__m128i)(__v2di){__A, 0LL};
+}
+
+/* Casts between various SP, DP, INT vector types.  Note that these do no
+   conversion of values, they just change the type.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castpd_ps(__m128d __A) {
+  return (__m128)__A;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castpd_si128(__m128d __A) {
+  return (__m128i)__A;
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castps_pd(__m128 __A) {
+  return (__m128d)__A;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castps_si128(__m128 __A) {
+  return (__m128i)__A;
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castsi128_ps(__m128i __A) {
+  return (__m128)__A;
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castsi128_pd(__m128i __A) {
+  return (__m128d)__A;
+}
+
+#else
+#include_next <emmintrin.h>
+#endif /* defined(__ppc64__) &&
+        *   (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
+
+#endif /* EMMINTRIN_H_ */
diff --git a/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/immintrin.h b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/immintrin.h
new file mode 100644
index 0000000..c1ada98
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/immintrin.h
@@ -0,0 +1,27 @@
+/*===---- immintrin.h - Implementation of Intel intrinsics on PowerPC ------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef IMMINTRIN_H_
+#define IMMINTRIN_H_
+
+#include <x86gprintrin.h>
+
+#include <mmintrin.h>
+
+#include <xmmintrin.h>
+
+#include <emmintrin.h>
+
+#include <pmmintrin.h>
+
+#include <tmmintrin.h>
+
+#include <smmintrin.h>
+
+#endif /* IMMINTRIN_H_ */
diff --git a/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/mm_malloc.h b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/mm_malloc.h
new file mode 100644
index 0000000..6592091
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/mm_malloc.h
@@ -0,0 +1,45 @@
+/*===---- mm_malloc.h - Implementation of _mm_malloc and _mm_free ----------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _MM_MALLOC_H_INCLUDED
+#define _MM_MALLOC_H_INCLUDED
+
+#if defined(__ppc64__) &&                                                      \
+    (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
+
+#include <stdlib.h>
+
+/* We can't depend on <stdlib.h> since the prototype of posix_memalign
+   may not be visible.  */
+#ifndef __cplusplus
+extern int posix_memalign(void **, size_t, size_t);
+#else
+extern "C" int posix_memalign(void **, size_t, size_t);
+#endif
+
+static __inline void *_mm_malloc(size_t __size, size_t __alignment) {
+  /* PowerPC64 ELF V2 ABI requires quadword alignment.  */
+  size_t __vec_align = sizeof(__vector float);
+  void *__ptr;
+
+  if (__alignment < __vec_align)
+    __alignment = __vec_align;
+  if (posix_memalign(&__ptr, __alignment, __size) == 0)
+    return __ptr;
+  else
+    return NULL;
+}
+
+static __inline void _mm_free(void *__ptr) { free(__ptr); }
+
+#else
+#include_next <mm_malloc.h>
+#endif
+
+#endif /* _MM_MALLOC_H_INCLUDED */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/mmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/mmintrin.h
similarity index 65%
rename from darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/mmintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/mmintrin.h
index 54e4ee9..70e8b81 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/mmintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/mmintrin.h
@@ -35,7 +35,8 @@
 #ifndef _MMINTRIN_H_INCLUDED
 #define _MMINTRIN_H_INCLUDED
 
-#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
+#if defined(__ppc64__) &&                                                      \
+    (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
 
 #include <altivec.h>
 /* The Intel API is flexible enough that we must allow aliasing with other
@@ -149,17 +150,17 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_packs_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short vm1;
-  __vector signed char vresult;
+  __vector signed short __vm1;
+  __vector signed char __vresult;
 
-  vm1 = (__vector signed short)(__vector unsigned long long)
+  __vm1 = (__vector signed short)(__vector unsigned long long)
 #ifdef __LITTLE_ENDIAN__
       {__m1, __m2};
 #else
       {__m2, __m1};
 #endif
-  vresult = vec_packs(vm1, vm1);
-  return (__m64)((__vector long long)vresult)[0];
+  __vresult = vec_packs(__vm1, __vm1);
+  return (__m64)((__vector long long)__vresult)[0];
 }
 
 extern __inline __m64
@@ -174,17 +175,17 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_packs_pi32(__m64 __m1, __m64 __m2) {
-  __vector signed int vm1;
-  __vector signed short vresult;
+  __vector signed int __vm1;
+  __vector signed short __vresult;
 
-  vm1 = (__vector signed int)(__vector unsigned long long)
+  __vm1 = (__vector signed int)(__vector unsigned long long)
 #ifdef __LITTLE_ENDIAN__
       {__m1, __m2};
 #else
       {__m2, __m1};
 #endif
-  vresult = vec_packs(vm1, vm1);
-  return (__m64)((__vector long long)vresult)[0];
+  __vresult = vec_packs(__vm1, __vm1);
+  return (__m64)((__vector long long)__vresult)[0];
 }
 
 extern __inline __m64
@@ -199,19 +200,20 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_packs_pu16(__m64 __m1, __m64 __m2) {
-  __vector unsigned char r;
-  __vector signed short vm1 = (__vector signed short)(__vector long long)
+  __vector unsigned char __r;
+  __vector signed short __vm1 = (__vector signed short)(__vector long long)
 #ifdef __LITTLE_ENDIAN__
       {__m1, __m2};
 #else
       {__m2, __m1};
 #endif
   const __vector signed short __zero = {0};
-  __vector __bool short __select = vec_cmplt(vm1, __zero);
-  r = vec_packs((__vector unsigned short)vm1, (__vector unsigned short)vm1);
-  __vector __bool char packsel = vec_pack(__select, __select);
-  r = vec_sel(r, (const __vector unsigned char)__zero, packsel);
-  return (__m64)((__vector long long)r)[0];
+  __vector __bool short __select = vec_cmplt(__vm1, __zero);
+  __r =
+      vec_packs((__vector unsigned short)__vm1, (__vector unsigned short)__vm1);
+  __vector __bool char __packsel = vec_pack(__select, __select);
+  __r = vec_sel(__r, (const __vector unsigned char)__zero, __packsel);
+  return (__m64)((__vector long long)__r)[0];
 }
 
 extern __inline __m64
@@ -227,28 +229,28 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_unpackhi_pi8(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector unsigned char a, b, c;
+  __vector unsigned char __a, __b, __c;
 
-  a = (__vector unsigned char)vec_splats(__m1);
-  b = (__vector unsigned char)vec_splats(__m2);
-  c = vec_mergel(a, b);
-  return (__m64)((__vector long long)c)[1];
+  __a = (__vector unsigned char)vec_splats(__m1);
+  __b = (__vector unsigned char)vec_splats(__m2);
+  __c = vec_mergel(__a, __b);
+  return (__m64)((__vector long long)__c)[1];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_char[0] = m1.as_char[4];
-  res.as_char[1] = m2.as_char[4];
-  res.as_char[2] = m1.as_char[5];
-  res.as_char[3] = m2.as_char[5];
-  res.as_char[4] = m1.as_char[6];
-  res.as_char[5] = m2.as_char[6];
-  res.as_char[6] = m1.as_char[7];
-  res.as_char[7] = m2.as_char[7];
+  __res.as_char[0] = __mu1.as_char[4];
+  __res.as_char[1] = __mu2.as_char[4];
+  __res.as_char[2] = __mu1.as_char[5];
+  __res.as_char[3] = __mu2.as_char[5];
+  __res.as_char[4] = __mu1.as_char[6];
+  __res.as_char[5] = __mu2.as_char[6];
+  __res.as_char[6] = __mu1.as_char[7];
+  __res.as_char[7] = __mu2.as_char[7];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -263,17 +265,17 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_unpackhi_pi16(__m64 __m1, __m64 __m2) {
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_short[0] = m1.as_short[2];
-  res.as_short[1] = m2.as_short[2];
-  res.as_short[2] = m1.as_short[3];
-  res.as_short[3] = m2.as_short[3];
+  __res.as_short[0] = __mu1.as_short[2];
+  __res.as_short[1] = __mu2.as_short[2];
+  __res.as_short[2] = __mu1.as_short[3];
+  __res.as_short[3] = __mu2.as_short[3];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 }
 
 extern __inline __m64
@@ -286,15 +288,15 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_unpackhi_pi32(__m64 __m1, __m64 __m2) {
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_int[0] = m1.as_int[1];
-  res.as_int[1] = m2.as_int[1];
+  __res.as_int[0] = __mu1.as_int[1];
+  __res.as_int[1] = __mu2.as_int[1];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 }
 
 extern __inline __m64
@@ -308,28 +310,28 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_unpacklo_pi8(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector unsigned char a, b, c;
+  __vector unsigned char __a, __b, __c;
 
-  a = (__vector unsigned char)vec_splats(__m1);
-  b = (__vector unsigned char)vec_splats(__m2);
-  c = vec_mergel(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector unsigned char)vec_splats(__m1);
+  __b = (__vector unsigned char)vec_splats(__m2);
+  __c = vec_mergel(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_char[0] = m1.as_char[0];
-  res.as_char[1] = m2.as_char[0];
-  res.as_char[2] = m1.as_char[1];
-  res.as_char[3] = m2.as_char[1];
-  res.as_char[4] = m1.as_char[2];
-  res.as_char[5] = m2.as_char[2];
-  res.as_char[6] = m1.as_char[3];
-  res.as_char[7] = m2.as_char[3];
+  __res.as_char[0] = __mu1.as_char[0];
+  __res.as_char[1] = __mu2.as_char[0];
+  __res.as_char[2] = __mu1.as_char[1];
+  __res.as_char[3] = __mu2.as_char[1];
+  __res.as_char[4] = __mu1.as_char[2];
+  __res.as_char[5] = __mu2.as_char[2];
+  __res.as_char[6] = __mu1.as_char[3];
+  __res.as_char[7] = __mu2.as_char[3];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -343,17 +345,17 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_unpacklo_pi16(__m64 __m1, __m64 __m2) {
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_short[0] = m1.as_short[0];
-  res.as_short[1] = m2.as_short[0];
-  res.as_short[2] = m1.as_short[1];
-  res.as_short[3] = m2.as_short[1];
+  __res.as_short[0] = __mu1.as_short[0];
+  __res.as_short[1] = __mu2.as_short[0];
+  __res.as_short[2] = __mu1.as_short[1];
+  __res.as_short[3] = __mu2.as_short[1];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 }
 
 extern __inline __m64
@@ -367,15 +369,15 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_unpacklo_pi32(__m64 __m1, __m64 __m2) {
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_int[0] = m1.as_int[0];
-  res.as_int[1] = m2.as_int[0];
+  __res.as_int[0] = __mu1.as_int[0];
+  __res.as_int[1] = __mu2.as_int[0];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 }
 
 extern __inline __m64
@@ -389,28 +391,28 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_add_pi8(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector signed char a, b, c;
+  __vector signed char __a, __b, __c;
 
-  a = (__vector signed char)vec_splats(__m1);
-  b = (__vector signed char)vec_splats(__m2);
-  c = vec_add(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed char)vec_splats(__m1);
+  __b = (__vector signed char)vec_splats(__m2);
+  __c = vec_add(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_char[0] = m1.as_char[0] + m2.as_char[0];
-  res.as_char[1] = m1.as_char[1] + m2.as_char[1];
-  res.as_char[2] = m1.as_char[2] + m2.as_char[2];
-  res.as_char[3] = m1.as_char[3] + m2.as_char[3];
-  res.as_char[4] = m1.as_char[4] + m2.as_char[4];
-  res.as_char[5] = m1.as_char[5] + m2.as_char[5];
-  res.as_char[6] = m1.as_char[6] + m2.as_char[6];
-  res.as_char[7] = m1.as_char[7] + m2.as_char[7];
+  __res.as_char[0] = __mu1.as_char[0] + __mu2.as_char[0];
+  __res.as_char[1] = __mu1.as_char[1] + __mu2.as_char[1];
+  __res.as_char[2] = __mu1.as_char[2] + __mu2.as_char[2];
+  __res.as_char[3] = __mu1.as_char[3] + __mu2.as_char[3];
+  __res.as_char[4] = __mu1.as_char[4] + __mu2.as_char[4];
+  __res.as_char[5] = __mu1.as_char[5] + __mu2.as_char[5];
+  __res.as_char[6] = __mu1.as_char[6] + __mu2.as_char[6];
+  __res.as_char[7] = __mu1.as_char[7] + __mu2.as_char[7];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -425,24 +427,24 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_add_pi16(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector signed short a, b, c;
+  __vector signed short __a, __b, __c;
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = vec_add(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = vec_add(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_short[0] = m1.as_short[0] + m2.as_short[0];
-  res.as_short[1] = m1.as_short[1] + m2.as_short[1];
-  res.as_short[2] = m1.as_short[2] + m2.as_short[2];
-  res.as_short[3] = m1.as_short[3] + m2.as_short[3];
+  __res.as_short[0] = __mu1.as_short[0] + __mu2.as_short[0];
+  __res.as_short[1] = __mu1.as_short[1] + __mu2.as_short[1];
+  __res.as_short[2] = __mu1.as_short[2] + __mu2.as_short[2];
+  __res.as_short[3] = __mu1.as_short[3] + __mu2.as_short[3];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -457,22 +459,22 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_add_pi32(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR9
-  __vector signed int a, b, c;
+  __vector signed int __a, __b, __c;
 
-  a = (__vector signed int)vec_splats(__m1);
-  b = (__vector signed int)vec_splats(__m2);
-  c = vec_add(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed int)vec_splats(__m1);
+  __b = (__vector signed int)vec_splats(__m2);
+  __c = vec_add(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_int[0] = m1.as_int[0] + m2.as_int[0];
-  res.as_int[1] = m1.as_int[1] + m2.as_int[1];
+  __res.as_int[0] = __mu1.as_int[0] + __mu2.as_int[0];
+  __res.as_int[1] = __mu1.as_int[1] + __mu2.as_int[1];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -487,28 +489,28 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_sub_pi8(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector signed char a, b, c;
+  __vector signed char __a, __b, __c;
 
-  a = (__vector signed char)vec_splats(__m1);
-  b = (__vector signed char)vec_splats(__m2);
-  c = vec_sub(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed char)vec_splats(__m1);
+  __b = (__vector signed char)vec_splats(__m2);
+  __c = vec_sub(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_char[0] = m1.as_char[0] - m2.as_char[0];
-  res.as_char[1] = m1.as_char[1] - m2.as_char[1];
-  res.as_char[2] = m1.as_char[2] - m2.as_char[2];
-  res.as_char[3] = m1.as_char[3] - m2.as_char[3];
-  res.as_char[4] = m1.as_char[4] - m2.as_char[4];
-  res.as_char[5] = m1.as_char[5] - m2.as_char[5];
-  res.as_char[6] = m1.as_char[6] - m2.as_char[6];
-  res.as_char[7] = m1.as_char[7] - m2.as_char[7];
+  __res.as_char[0] = __mu1.as_char[0] - __mu2.as_char[0];
+  __res.as_char[1] = __mu1.as_char[1] - __mu2.as_char[1];
+  __res.as_char[2] = __mu1.as_char[2] - __mu2.as_char[2];
+  __res.as_char[3] = __mu1.as_char[3] - __mu2.as_char[3];
+  __res.as_char[4] = __mu1.as_char[4] - __mu2.as_char[4];
+  __res.as_char[5] = __mu1.as_char[5] - __mu2.as_char[5];
+  __res.as_char[6] = __mu1.as_char[6] - __mu2.as_char[6];
+  __res.as_char[7] = __mu1.as_char[7] - __mu2.as_char[7];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -523,24 +525,24 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_sub_pi16(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector signed short a, b, c;
+  __vector signed short __a, __b, __c;
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = vec_sub(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = vec_sub(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_short[0] = m1.as_short[0] - m2.as_short[0];
-  res.as_short[1] = m1.as_short[1] - m2.as_short[1];
-  res.as_short[2] = m1.as_short[2] - m2.as_short[2];
-  res.as_short[3] = m1.as_short[3] - m2.as_short[3];
+  __res.as_short[0] = __mu1.as_short[0] - __mu2.as_short[0];
+  __res.as_short[1] = __mu1.as_short[1] - __mu2.as_short[1];
+  __res.as_short[2] = __mu1.as_short[2] - __mu2.as_short[2];
+  __res.as_short[3] = __mu1.as_short[3] - __mu2.as_short[3];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -555,22 +557,22 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_sub_pi32(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR9
-  __vector signed int a, b, c;
+  __vector signed int __a, __b, __c;
 
-  a = (__vector signed int)vec_splats(__m1);
-  b = (__vector signed int)vec_splats(__m2);
-  c = vec_sub(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed int)vec_splats(__m1);
+  __b = (__vector signed int)vec_splats(__m2);
+  __c = vec_sub(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_int[0] = m1.as_int[0] - m2.as_int[0];
-  res.as_int[1] = m1.as_int[1] - m2.as_int[1];
+  __res.as_int[0] = __mu1.as_int[0] - __mu2.as_int[0];
+  __res.as_int[1] = __mu1.as_int[1] - __mu2.as_int[1];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -708,25 +710,25 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) {
 #if defined(_ARCH_PWR6) && defined(__powerpc64__)
-  __m64 res;
-  __asm__("cmpb %0,%1,%2;\n" : "=r"(res) : "r"(__m1), "r"(__m2) :);
-  return (res);
+  __m64 __res;
+  __asm__("cmpb %0,%1,%2;\n" : "=r"(__res) : "r"(__m1), "r"(__m2) :);
+  return (__res);
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_char[0] = (m1.as_char[0] == m2.as_char[0]) ? -1 : 0;
-  res.as_char[1] = (m1.as_char[1] == m2.as_char[1]) ? -1 : 0;
-  res.as_char[2] = (m1.as_char[2] == m2.as_char[2]) ? -1 : 0;
-  res.as_char[3] = (m1.as_char[3] == m2.as_char[3]) ? -1 : 0;
-  res.as_char[4] = (m1.as_char[4] == m2.as_char[4]) ? -1 : 0;
-  res.as_char[5] = (m1.as_char[5] == m2.as_char[5]) ? -1 : 0;
-  res.as_char[6] = (m1.as_char[6] == m2.as_char[6]) ? -1 : 0;
-  res.as_char[7] = (m1.as_char[7] == m2.as_char[7]) ? -1 : 0;
+  __res.as_char[0] = (__mu1.as_char[0] == __mu2.as_char[0]) ? -1 : 0;
+  __res.as_char[1] = (__mu1.as_char[1] == __mu2.as_char[1]) ? -1 : 0;
+  __res.as_char[2] = (__mu1.as_char[2] == __mu2.as_char[2]) ? -1 : 0;
+  __res.as_char[3] = (__mu1.as_char[3] == __mu2.as_char[3]) ? -1 : 0;
+  __res.as_char[4] = (__mu1.as_char[4] == __mu2.as_char[4]) ? -1 : 0;
+  __res.as_char[5] = (__mu1.as_char[5] == __mu2.as_char[5]) ? -1 : 0;
+  __res.as_char[6] = (__mu1.as_char[6] == __mu2.as_char[6]) ? -1 : 0;
+  __res.as_char[7] = (__mu1.as_char[7] == __mu2.as_char[7]) ? -1 : 0;
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -740,28 +742,28 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector signed char a, b, c;
+  __vector signed char __a, __b, __c;
 
-  a = (__vector signed char)vec_splats(__m1);
-  b = (__vector signed char)vec_splats(__m2);
-  c = (__vector signed char)vec_cmpgt(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed char)vec_splats(__m1);
+  __b = (__vector signed char)vec_splats(__m2);
+  __c = (__vector signed char)vec_cmpgt(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_char[0] = (m1.as_char[0] > m2.as_char[0]) ? -1 : 0;
-  res.as_char[1] = (m1.as_char[1] > m2.as_char[1]) ? -1 : 0;
-  res.as_char[2] = (m1.as_char[2] > m2.as_char[2]) ? -1 : 0;
-  res.as_char[3] = (m1.as_char[3] > m2.as_char[3]) ? -1 : 0;
-  res.as_char[4] = (m1.as_char[4] > m2.as_char[4]) ? -1 : 0;
-  res.as_char[5] = (m1.as_char[5] > m2.as_char[5]) ? -1 : 0;
-  res.as_char[6] = (m1.as_char[6] > m2.as_char[6]) ? -1 : 0;
-  res.as_char[7] = (m1.as_char[7] > m2.as_char[7]) ? -1 : 0;
+  __res.as_char[0] = (__mu1.as_char[0] > __mu2.as_char[0]) ? -1 : 0;
+  __res.as_char[1] = (__mu1.as_char[1] > __mu2.as_char[1]) ? -1 : 0;
+  __res.as_char[2] = (__mu1.as_char[2] > __mu2.as_char[2]) ? -1 : 0;
+  __res.as_char[3] = (__mu1.as_char[3] > __mu2.as_char[3]) ? -1 : 0;
+  __res.as_char[4] = (__mu1.as_char[4] > __mu2.as_char[4]) ? -1 : 0;
+  __res.as_char[5] = (__mu1.as_char[5] > __mu2.as_char[5]) ? -1 : 0;
+  __res.as_char[6] = (__mu1.as_char[6] > __mu2.as_char[6]) ? -1 : 0;
+  __res.as_char[7] = (__mu1.as_char[7] > __mu2.as_char[7]) ? -1 : 0;
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -777,24 +779,24 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector signed short a, b, c;
+  __vector signed short __a, __b, __c;
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = (__vector signed short)vec_cmpeq(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = (__vector signed short)vec_cmpeq(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_short[0] = (m1.as_short[0] == m2.as_short[0]) ? -1 : 0;
-  res.as_short[1] = (m1.as_short[1] == m2.as_short[1]) ? -1 : 0;
-  res.as_short[2] = (m1.as_short[2] == m2.as_short[2]) ? -1 : 0;
-  res.as_short[3] = (m1.as_short[3] == m2.as_short[3]) ? -1 : 0;
+  __res.as_short[0] = (__mu1.as_short[0] == __mu2.as_short[0]) ? -1 : 0;
+  __res.as_short[1] = (__mu1.as_short[1] == __mu2.as_short[1]) ? -1 : 0;
+  __res.as_short[2] = (__mu1.as_short[2] == __mu2.as_short[2]) ? -1 : 0;
+  __res.as_short[3] = (__mu1.as_short[3] == __mu2.as_short[3]) ? -1 : 0;
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -808,24 +810,24 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector signed short a, b, c;
+  __vector signed short __a, __b, __c;
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = (__vector signed short)vec_cmpgt(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = (__vector signed short)vec_cmpgt(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_short[0] = (m1.as_short[0] > m2.as_short[0]) ? -1 : 0;
-  res.as_short[1] = (m1.as_short[1] > m2.as_short[1]) ? -1 : 0;
-  res.as_short[2] = (m1.as_short[2] > m2.as_short[2]) ? -1 : 0;
-  res.as_short[3] = (m1.as_short[3] > m2.as_short[3]) ? -1 : 0;
+  __res.as_short[0] = (__mu1.as_short[0] > __mu2.as_short[0]) ? -1 : 0;
+  __res.as_short[1] = (__mu1.as_short[1] > __mu2.as_short[1]) ? -1 : 0;
+  __res.as_short[2] = (__mu1.as_short[2] > __mu2.as_short[2]) ? -1 : 0;
+  __res.as_short[3] = (__mu1.as_short[3] > __mu2.as_short[3]) ? -1 : 0;
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -841,22 +843,22 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR9
-  __vector signed int a, b, c;
+  __vector signed int __a, __b, __c;
 
-  a = (__vector signed int)vec_splats(__m1);
-  b = (__vector signed int)vec_splats(__m2);
-  c = (__vector signed int)vec_cmpeq(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed int)vec_splats(__m1);
+  __b = (__vector signed int)vec_splats(__m2);
+  __c = (__vector signed int)vec_cmpeq(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_int[0] = (m1.as_int[0] == m2.as_int[0]) ? -1 : 0;
-  res.as_int[1] = (m1.as_int[1] == m2.as_int[1]) ? -1 : 0;
+  __res.as_int[0] = (__mu1.as_int[0] == __mu2.as_int[0]) ? -1 : 0;
+  __res.as_int[1] = (__mu1.as_int[1] == __mu2.as_int[1]) ? -1 : 0;
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -870,22 +872,22 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_cmpgt_pi32(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR9
-  __vector signed int a, b, c;
+  __vector signed int __a, __b, __c;
 
-  a = (__vector signed int)vec_splats(__m1);
-  b = (__vector signed int)vec_splats(__m2);
-  c = (__vector signed int)vec_cmpgt(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed int)vec_splats(__m1);
+  __b = (__vector signed int)vec_splats(__m2);
+  __c = (__vector signed int)vec_cmpgt(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_int[0] = (m1.as_int[0] > m2.as_int[0]) ? -1 : 0;
-  res.as_int[1] = (m1.as_int[1] > m2.as_int[1]) ? -1 : 0;
+  __res.as_int[0] = (__mu1.as_int[0] > __mu2.as_int[0]) ? -1 : 0;
+  __res.as_int[1] = (__mu1.as_int[1] > __mu2.as_int[1]) ? -1 : 0;
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -901,12 +903,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_adds_pi8(__m64 __m1, __m64 __m2) {
-  __vector signed char a, b, c;
+  __vector signed char __a, __b, __c;
 
-  a = (__vector signed char)vec_splats(__m1);
-  b = (__vector signed char)vec_splats(__m2);
-  c = vec_adds(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed char)vec_splats(__m1);
+  __b = (__vector signed char)vec_splats(__m2);
+  __c = vec_adds(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -919,12 +921,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_adds_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short a, b, c;
+  __vector signed short __a, __b, __c;
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = vec_adds(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = vec_adds(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -937,12 +939,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_adds_pu8(__m64 __m1, __m64 __m2) {
-  __vector unsigned char a, b, c;
+  __vector unsigned char __a, __b, __c;
 
-  a = (__vector unsigned char)vec_splats(__m1);
-  b = (__vector unsigned char)vec_splats(__m2);
-  c = vec_adds(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector unsigned char)vec_splats(__m1);
+  __b = (__vector unsigned char)vec_splats(__m2);
+  __c = vec_adds(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -956,12 +958,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_adds_pu16(__m64 __m1, __m64 __m2) {
-  __vector unsigned short a, b, c;
+  __vector unsigned short __a, __b, __c;
 
-  a = (__vector unsigned short)vec_splats(__m1);
-  b = (__vector unsigned short)vec_splats(__m2);
-  c = vec_adds(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector unsigned short)vec_splats(__m1);
+  __b = (__vector unsigned short)vec_splats(__m2);
+  __c = vec_adds(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -975,12 +977,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_subs_pi8(__m64 __m1, __m64 __m2) {
-  __vector signed char a, b, c;
+  __vector signed char __a, __b, __c;
 
-  a = (__vector signed char)vec_splats(__m1);
-  b = (__vector signed char)vec_splats(__m2);
-  c = vec_subs(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed char)vec_splats(__m1);
+  __b = (__vector signed char)vec_splats(__m2);
+  __c = vec_subs(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -994,12 +996,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_subs_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short a, b, c;
+  __vector signed short __a, __b, __c;
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = vec_subs(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = vec_subs(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -1013,12 +1015,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_subs_pu8(__m64 __m1, __m64 __m2) {
-  __vector unsigned char a, b, c;
+  __vector unsigned char __a, __b, __c;
 
-  a = (__vector unsigned char)vec_splats(__m1);
-  b = (__vector unsigned char)vec_splats(__m2);
-  c = vec_subs(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector unsigned char)vec_splats(__m1);
+  __b = (__vector unsigned char)vec_splats(__m2);
+  __c = vec_subs(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -1032,12 +1034,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_subs_pu16(__m64 __m1, __m64 __m2) {
-  __vector unsigned short a, b, c;
+  __vector unsigned short __a, __b, __c;
 
-  a = (__vector unsigned short)vec_splats(__m1);
-  b = (__vector unsigned short)vec_splats(__m2);
-  c = vec_subs(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector unsigned short)vec_splats(__m1);
+  __b = (__vector unsigned short)vec_splats(__m2);
+  __c = vec_subs(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -1052,14 +1054,14 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_madd_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short a, b;
-  __vector signed int c;
-  __vector signed int zero = {0, 0, 0, 0};
+  __vector signed short __a, __b;
+  __vector signed int __c;
+  __vector signed int __zero = {0, 0, 0, 0};
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = vec_vmsumshm(a, b, zero);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = vec_vmsumshm(__a, __b, __zero);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -1072,10 +1074,10 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_mulhi_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short a, b;
-  __vector signed short c;
-  __vector signed int w0, w1;
-  __vector unsigned char xform1 = {
+  __vector signed short __a, __b;
+  __vector signed short __c;
+  __vector signed int __w0, __w1;
+  __vector unsigned char __xform1 = {
 #ifdef __LITTLE_ENDIAN__
       0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
       0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
@@ -1085,14 +1087,14 @@
 #endif
   };
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
 
-  w0 = vec_vmulesh(a, b);
-  w1 = vec_vmulosh(a, b);
-  c = (__vector signed short)vec_perm(w0, w1, xform1);
+  __w0 = vec_vmulesh(__a, __b);
+  __w1 = vec_vmulosh(__a, __b);
+  __c = (__vector signed short)vec_perm(__w0, __w1, __xform1);
 
-  return (__m64)((__vector long long)c)[0];
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -1106,12 +1108,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_mullo_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short a, b, c;
+  __vector signed short __a, __b, __c;
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = a * b;
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = __a * __b;
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -1124,14 +1126,14 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_sll_pi16(__m64 __m, __m64 __count) {
-  __vector signed short m, r;
-  __vector unsigned short c;
+  __vector signed short __r;
+  __vector unsigned short __c;
 
   if (__count <= 15) {
-    m = (__vector signed short)vec_splats(__m);
-    c = (__vector unsigned short)vec_splats((unsigned short)__count);
-    r = vec_sl(m, (__vector unsigned short)c);
-    return (__m64)((__vector long long)r)[0];
+    __r = (__vector signed short)vec_splats(__m);
+    __c = (__vector unsigned short)vec_splats((unsigned short)__count);
+    __r = vec_sl(__r, (__vector unsigned short)__c);
+    return (__m64)((__vector long long)__r)[0];
   } else
     return (0);
 }
@@ -1159,13 +1161,13 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_sll_pi32(__m64 __m, __m64 __count) {
-  __m64_union m, res;
+  __m64_union __res;
 
-  m.as_m64 = __m;
+  __res.as_m64 = __m;
 
-  res.as_int[0] = m.as_int[0] << __count;
-  res.as_int[1] = m.as_int[1] << __count;
-  return (res.as_m64);
+  __res.as_int[0] = __res.as_int[0] << __count;
+  __res.as_int[1] = __res.as_int[1] << __count;
+  return (__res.as_m64);
 }
 
 extern __inline __m64
@@ -1191,14 +1193,14 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_sra_pi16(__m64 __m, __m64 __count) {
-  __vector signed short m, r;
-  __vector unsigned short c;
+  __vector signed short __r;
+  __vector unsigned short __c;
 
   if (__count <= 15) {
-    m = (__vector signed short)vec_splats(__m);
-    c = (__vector unsigned short)vec_splats((unsigned short)__count);
-    r = vec_sra(m, (__vector unsigned short)c);
-    return (__m64)((__vector long long)r)[0];
+    __r = (__vector signed short)vec_splats(__m);
+    __c = (__vector unsigned short)vec_splats((unsigned short)__count);
+    __r = vec_sra(__r, (__vector unsigned short)__c);
+    return (__m64)((__vector long long)__r)[0];
   } else
     return (0);
 }
@@ -1226,13 +1228,13 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_sra_pi32(__m64 __m, __m64 __count) {
-  __m64_union m, res;
+  __m64_union __res;
 
-  m.as_m64 = __m;
+  __res.as_m64 = __m;
 
-  res.as_int[0] = m.as_int[0] >> __count;
-  res.as_int[1] = m.as_int[1] >> __count;
-  return (res.as_m64);
+  __res.as_int[0] = __res.as_int[0] >> __count;
+  __res.as_int[1] = __res.as_int[1] >> __count;
+  return (__res.as_m64);
 }
 
 extern __inline __m64
@@ -1258,14 +1260,14 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_srl_pi16(__m64 __m, __m64 __count) {
-  __vector unsigned short m, r;
-  __vector unsigned short c;
+  __vector unsigned short __r;
+  __vector unsigned short __c;
 
   if (__count <= 15) {
-    m = (__vector unsigned short)vec_splats(__m);
-    c = (__vector unsigned short)vec_splats((unsigned short)__count);
-    r = vec_sr(m, (__vector unsigned short)c);
-    return (__m64)((__vector long long)r)[0];
+    __r = (__vector unsigned short)vec_splats(__m);
+    __c = (__vector unsigned short)vec_splats((unsigned short)__count);
+    __r = vec_sr(__r, (__vector unsigned short)__c);
+    return (__m64)((__vector long long)__r)[0];
   } else
     return (0);
 }
@@ -1293,13 +1295,13 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_srl_pi32(__m64 __m, __m64 __count) {
-  __m64_union m, res;
+  __m64_union __res;
 
-  m.as_m64 = __m;
+  __res.as_m64 = __m;
 
-  res.as_int[0] = (unsigned int)m.as_int[0] >> __count;
-  res.as_int[1] = (unsigned int)m.as_int[1] >> __count;
-  return (res.as_m64);
+  __res.as_int[0] = (unsigned int)__res.as_int[0] >> __count;
+  __res.as_int[1] = (unsigned int)__res.as_int[1] >> __count;
+  return (__res.as_m64);
 }
 
 extern __inline __m64
@@ -1326,24 +1328,24 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_set_pi32(int __i1, int __i0) {
-  __m64_union res;
+  __m64_union __res;
 
-  res.as_int[0] = __i0;
-  res.as_int[1] = __i1;
-  return (res.as_m64);
+  __res.as_int[0] = __i0;
+  __res.as_int[1] = __i1;
+  return (__res.as_m64);
 }
 
 /* Creates a vector of four 16-bit values; W0 is least significant.  */
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_set_pi16(short __w3, short __w2, short __w1, short __w0) {
-  __m64_union res;
+  __m64_union __res;
 
-  res.as_short[0] = __w0;
-  res.as_short[1] = __w1;
-  res.as_short[2] = __w2;
-  res.as_short[3] = __w3;
-  return (res.as_m64);
+  __res.as_short[0] = __w0;
+  __res.as_short[1] = __w1;
+  __res.as_short[2] = __w2;
+  __res.as_short[3] = __w3;
+  return (__res.as_m64);
 }
 
 /* Creates a vector of eight 8-bit values; B0 is least significant.  */
@@ -1351,28 +1353,28 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3,
                 char __b2, char __b1, char __b0) {
-  __m64_union res;
+  __m64_union __res;
 
-  res.as_char[0] = __b0;
-  res.as_char[1] = __b1;
-  res.as_char[2] = __b2;
-  res.as_char[3] = __b3;
-  res.as_char[4] = __b4;
-  res.as_char[5] = __b5;
-  res.as_char[6] = __b6;
-  res.as_char[7] = __b7;
-  return (res.as_m64);
+  __res.as_char[0] = __b0;
+  __res.as_char[1] = __b1;
+  __res.as_char[2] = __b2;
+  __res.as_char[3] = __b3;
+  __res.as_char[4] = __b4;
+  __res.as_char[5] = __b5;
+  __res.as_char[6] = __b6;
+  __res.as_char[7] = __b7;
+  return (__res.as_m64);
 }
 
 /* Similar, but with the arguments in reverse order.  */
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_setr_pi32(int __i0, int __i1) {
-  __m64_union res;
+  __m64_union __res;
 
-  res.as_int[0] = __i0;
-  res.as_int[1] = __i1;
-  return (res.as_m64);
+  __res.as_int[0] = __i0;
+  __res.as_int[1] = __i1;
+  return (__res.as_m64);
 }
 
 extern __inline __m64
@@ -1392,11 +1394,11 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_set1_pi32(int __i) {
-  __m64_union res;
+  __m64_union __res;
 
-  res.as_int[0] = __i;
-  res.as_int[1] = __i;
-  return (res.as_m64);
+  __res.as_int[0] = __i;
+  __res.as_int[1] = __i;
+  return (__res.as_m64);
 }
 
 /* Creates a vector of four 16-bit values, all elements containing W.  */
@@ -1409,13 +1411,13 @@
   w = (__vector signed short)vec_splats(__w);
   return (__m64)((__vector long long)w)[0];
 #else
-  __m64_union res;
+  __m64_union __res;
 
-  res.as_short[0] = __w;
-  res.as_short[1] = __w;
-  res.as_short[2] = __w;
-  res.as_short[3] = __w;
-  return (res.as_m64);
+  __res.as_short[0] = __w;
+  __res.as_short[1] = __w;
+  __res.as_short[2] = __w;
+  __res.as_short[3] = __w;
+  return (__res.as_m64);
 #endif
 }
 
@@ -1424,28 +1426,28 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_set1_pi8(signed char __b) {
 #if _ARCH_PWR8
-  __vector signed char b;
+  __vector signed char __res;
 
-  b = (__vector signed char)vec_splats(__b);
-  return (__m64)((__vector long long)b)[0];
+  __res = (__vector signed char)vec_splats(__b);
+  return (__m64)((__vector long long)__res)[0];
 #else
-  __m64_union res;
+  __m64_union __res;
 
-  res.as_char[0] = __b;
-  res.as_char[1] = __b;
-  res.as_char[2] = __b;
-  res.as_char[3] = __b;
-  res.as_char[4] = __b;
-  res.as_char[5] = __b;
-  res.as_char[6] = __b;
-  res.as_char[7] = __b;
-  return (res.as_m64);
+  __res.as_char[0] = __b;
+  __res.as_char[1] = __b;
+  __res.as_char[2] = __b;
+  __res.as_char[3] = __b;
+  __res.as_char[4] = __b;
+  __res.as_char[5] = __b;
+  __res.as_char[6] = __b;
+  __res.as_char[7] = __b;
+  return (__res.as_m64);
 #endif
 }
 
 #else
 #include_next <mmintrin.h>
-#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))   \
-        */
+#endif /* defined(__ppc64__) &&
+        *   (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
 
 #endif /* _MMINTRIN_H_INCLUDED */
diff --git a/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/pmmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/pmmintrin.h
new file mode 100644
index 0000000..fda39ed
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/pmmintrin.h
@@ -0,0 +1,145 @@
+/*===---- pmmintrin.h - Implementation of SSE3 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+   User Guide and Reference, version 9.0.  */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header is distributed to simplify porting x86_64 code that
+   makes explicit use of Intel intrinsics to powerpc64le.
+   It is the user's responsibility to determine if the results are
+   acceptable and make additional changes as necessary.
+   Note that much code that uses Intel intrinsics can be rewritten in
+   standard C or GNU C extensions, which are more portable and better
+   optimized across multiple targets.
+
+   In the specific case of X86 SSE3 intrinsics, the PowerPC VMX/VSX ISA
+   is a good match for most SIMD operations.  However the Horizontal
+   add/sub requires the data pairs be permuted into a separate
+   registers with vertical even/odd alignment for the operation.
+   And the addsub operation requires the sign of only the even numbered
+   elements be flipped (xored with -0.0).
+   For larger blocks of code using these intrinsic implementations,
+   the compiler be should be able to schedule instructions to avoid
+   additional latency.
+
+   In the specific case of the monitor and mwait instructions there are
+   no direct equivalent in the PowerISA at this time.  So those
+   intrinsics are not implemented.  */
+#error                                                                         \
+    "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this warning."
+#endif
+
+#ifndef PMMINTRIN_H_
+#define PMMINTRIN_H_
+
+#if defined(__ppc64__) &&                                                      \
+    (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
+
+/* We need definitions from the SSE2 and SSE header files*/
+#include <emmintrin.h>
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_addsub_ps(__m128 __X, __m128 __Y) {
+  const __v4sf __even_n0 = {-0.0, 0.0, -0.0, 0.0};
+  __v4sf __even_neg_Y = vec_xor(__Y, __even_n0);
+  return (__m128)vec_add(__X, __even_neg_Y);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_addsub_pd(__m128d __X, __m128d __Y) {
+  const __v2df __even_n0 = {-0.0, 0.0};
+  __v2df __even_neg_Y = vec_xor(__Y, __even_n0);
+  return (__m128d)vec_add(__X, __even_neg_Y);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadd_ps(__m128 __X, __m128 __Y) {
+  __vector unsigned char __xform2 = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09,
+                                     0x0A, 0x0B, 0x10, 0x11, 0x12, 0x13,
+                                     0x18, 0x19, 0x1A, 0x1B};
+  __vector unsigned char __xform1 = {0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D,
+                                     0x0E, 0x0F, 0x14, 0x15, 0x16, 0x17,
+                                     0x1C, 0x1D, 0x1E, 0x1F};
+  return (__m128)vec_add(vec_perm((__v4sf)__X, (__v4sf)__Y, __xform2),
+                         vec_perm((__v4sf)__X, (__v4sf)__Y, __xform1));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsub_ps(__m128 __X, __m128 __Y) {
+  __vector unsigned char __xform2 = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09,
+                                     0x0A, 0x0B, 0x10, 0x11, 0x12, 0x13,
+                                     0x18, 0x19, 0x1A, 0x1B};
+  __vector unsigned char __xform1 = {0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D,
+                                     0x0E, 0x0F, 0x14, 0x15, 0x16, 0x17,
+                                     0x1C, 0x1D, 0x1E, 0x1F};
+  return (__m128)vec_sub(vec_perm((__v4sf)__X, (__v4sf)__Y, __xform2),
+                         vec_perm((__v4sf)__X, (__v4sf)__Y, __xform1));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadd_pd(__m128d __X, __m128d __Y) {
+  return (__m128d)vec_add(vec_mergeh((__v2df)__X, (__v2df)__Y),
+                          vec_mergel((__v2df)__X, (__v2df)__Y));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsub_pd(__m128d __X, __m128d __Y) {
+  return (__m128d)vec_sub(vec_mergeh((__v2df)__X, (__v2df)__Y),
+                          vec_mergel((__v2df)__X, (__v2df)__Y));
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movehdup_ps(__m128 __X) {
+  return (__m128)vec_mergeo((__v4su)__X, (__v4su)__X);
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_moveldup_ps(__m128 __X) {
+  return (__m128)vec_mergee((__v4su)__X, (__v4su)__X);
+}
+#endif
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loaddup_pd(double const *__P) {
+  return (__m128d)vec_splats(*__P);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movedup_pd(__m128d __X) {
+  return _mm_shuffle_pd(__X, __X, _MM_SHUFFLE2(0, 0));
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_lddqu_si128(__m128i const *__P) {
+  return (__m128i)(vec_vsx_ld(0, (signed int const *)__P));
+}
+
+/* POWER8 / POWER9 have no equivalent for _mm_monitor nor _mm_wait.  */
+
+#else
+#include_next <pmmintrin.h>
+#endif /* defined(__ppc64__) &&
+        *   (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
+
+#endif /* PMMINTRIN_H_ */
diff --git a/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/smmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/smmintrin.h
new file mode 100644
index 0000000..6fe6c8a
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/smmintrin.h
@@ -0,0 +1,663 @@
+/*===---- smmintrin.h - Implementation of SSE4 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+   User Guide and Reference, version 9.0.
+
+   NOTE: This is NOT a complete implementation of the SSE4 intrinsics!  */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header is distributed to simplify porting x86_64 code that
+   makes explicit use of Intel intrinsics to powerp64/powerpc64le.
+
+   It is the user's responsibility to determine if the results are
+   acceptable and make additional changes as necessary.
+
+   Note that much code that uses Intel intrinsics can be rewritten in
+   standard C or GNU C extensions, which are more portable and better
+   optimized across multiple targets.  */
+#error                                                                         \
+    "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef SMMINTRIN_H_
+#define SMMINTRIN_H_
+
+#if defined(__ppc64__) &&                                                      \
+    (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
+
+#include <altivec.h>
+#include <tmmintrin.h>
+
+/* Rounding mode macros. */
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_ZERO 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_NEG_INF 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+#define _MM_FROUND_NINT (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_FLOOR (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_CEIL (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_TRUNC (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_RINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_NEARBYINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
+
+#define _MM_FROUND_RAISE_EXC 0x00
+#define _MM_FROUND_NO_EXC 0x08
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_round_pd(__m128d __A, int __rounding) {
+  __v2df __r;
+  union {
+    double __fr;
+    long long __fpscr;
+  } __enables_save, __fpscr_save;
+
+  if (__rounding & _MM_FROUND_NO_EXC) {
+    /* Save enabled exceptions, disable all exceptions,
+       and preserve the rounding mode.  */
+#ifdef _ARCH_PWR9
+    __asm__("mffsce %0" : "=f"(__fpscr_save.__fr));
+    __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+#else
+    __fpscr_save.__fr = __builtin_mffs();
+    __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+    __fpscr_save.__fpscr &= ~0xf8;
+    __builtin_mtfsf(0b00000011, __fpscr_save.__fr);
+#endif
+    /* Insert an artificial "read/write" reference to the variable
+       read below, to ensure the compiler does not schedule
+       a read/use of the variable before the FPSCR is modified, above.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : "+wa"(__A));
+  }
+
+  switch (__rounding) {
+  case _MM_FROUND_TO_NEAREST_INT:
+    __fpscr_save.__fr = __builtin_mffsl();
+    __attribute__((fallthrough));
+  case _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC:
+    __builtin_set_fpscr_rn(0b00);
+    /* Insert an artificial "read/write" reference to the variable
+       read below, to ensure the compiler does not schedule
+       a read/use of the variable before the FPSCR is modified, above.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : "+wa"(__A));
+
+    __r = vec_rint((__v2df)__A);
+
+    /* Insert an artificial "read" reference to the variable written
+       above, to ensure the compiler does not schedule the computation
+       of the value after the manipulation of the FPSCR, below.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : : "wa"(__r));
+    __builtin_set_fpscr_rn(__fpscr_save.__fpscr);
+    break;
+  case _MM_FROUND_TO_NEG_INF:
+  case _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC:
+    __r = vec_floor((__v2df)__A);
+    break;
+  case _MM_FROUND_TO_POS_INF:
+  case _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC:
+    __r = vec_ceil((__v2df)__A);
+    break;
+  case _MM_FROUND_TO_ZERO:
+  case _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC:
+    __r = vec_trunc((__v2df)__A);
+    break;
+  case _MM_FROUND_CUR_DIRECTION:
+    __r = vec_rint((__v2df)__A);
+    break;
+  }
+  if (__rounding & _MM_FROUND_NO_EXC) {
+    /* Insert an artificial "read" reference to the variable written
+       above, to ensure the compiler does not schedule the computation
+       of the value after the manipulation of the FPSCR, below.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : : "wa"(__r));
+    /* Restore enabled exceptions.  */
+    __fpscr_save.__fr = __builtin_mffsl();
+    __fpscr_save.__fpscr |= __enables_save.__fpscr;
+    __builtin_mtfsf(0b00000011, __fpscr_save.__fr);
+  }
+  return (__m128d)__r;
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_round_sd(__m128d __A, __m128d __B, int __rounding) {
+  __B = _mm_round_pd(__B, __rounding);
+  __v2df __r = {((__v2df)__B)[0], ((__v2df)__A)[1]};
+  return (__m128d)__r;
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_round_ps(__m128 __A, int __rounding) {
+  __v4sf __r;
+  union {
+    double __fr;
+    long long __fpscr;
+  } __enables_save, __fpscr_save;
+
+  if (__rounding & _MM_FROUND_NO_EXC) {
+    /* Save enabled exceptions, disable all exceptions,
+       and preserve the rounding mode.  */
+#ifdef _ARCH_PWR9
+    __asm__("mffsce %0" : "=f"(__fpscr_save.__fr));
+    __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+#else
+    __fpscr_save.__fr = __builtin_mffs();
+    __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+    __fpscr_save.__fpscr &= ~0xf8;
+    __builtin_mtfsf(0b00000011, __fpscr_save.__fr);
+#endif
+    /* Insert an artificial "read/write" reference to the variable
+       read below, to ensure the compiler does not schedule
+       a read/use of the variable before the FPSCR is modified, above.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : "+wa"(__A));
+  }
+
+  switch (__rounding) {
+  case _MM_FROUND_TO_NEAREST_INT:
+    __fpscr_save.__fr = __builtin_mffsl();
+    __attribute__((fallthrough));
+  case _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC:
+    __builtin_set_fpscr_rn(0b00);
+    /* Insert an artificial "read/write" reference to the variable
+       read below, to ensure the compiler does not schedule
+       a read/use of the variable before the FPSCR is modified, above.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : "+wa"(__A));
+
+    __r = vec_rint((__v4sf)__A);
+
+    /* Insert an artificial "read" reference to the variable written
+       above, to ensure the compiler does not schedule the computation
+       of the value after the manipulation of the FPSCR, below.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : : "wa"(__r));
+    __builtin_set_fpscr_rn(__fpscr_save.__fpscr);
+    break;
+  case _MM_FROUND_TO_NEG_INF:
+  case _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC:
+    __r = vec_floor((__v4sf)__A);
+    break;
+  case _MM_FROUND_TO_POS_INF:
+  case _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC:
+    __r = vec_ceil((__v4sf)__A);
+    break;
+  case _MM_FROUND_TO_ZERO:
+  case _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC:
+    __r = vec_trunc((__v4sf)__A);
+    break;
+  case _MM_FROUND_CUR_DIRECTION:
+    __r = vec_rint((__v4sf)__A);
+    break;
+  }
+  if (__rounding & _MM_FROUND_NO_EXC) {
+    /* Insert an artificial "read" reference to the variable written
+       above, to ensure the compiler does not schedule the computation
+       of the value after the manipulation of the FPSCR, below.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : : "wa"(__r));
+    /* Restore enabled exceptions.  */
+    __fpscr_save.__fr = __builtin_mffsl();
+    __fpscr_save.__fpscr |= __enables_save.__fpscr;
+    __builtin_mtfsf(0b00000011, __fpscr_save.__fr);
+  }
+  return (__m128)__r;
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_round_ss(__m128 __A, __m128 __B, int __rounding) {
+  __B = _mm_round_ps(__B, __rounding);
+  __v4sf __r = (__v4sf)__A;
+  __r[0] = ((__v4sf)__B)[0];
+  return (__m128)__r;
+}
+
+#define _mm_ceil_pd(V) _mm_round_pd((V), _MM_FROUND_CEIL)
+#define _mm_ceil_sd(D, V) _mm_round_sd((D), (V), _MM_FROUND_CEIL)
+
+#define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR)
+#define _mm_floor_sd(D, V) _mm_round_sd((D), (V), _MM_FROUND_FLOOR)
+
+#define _mm_ceil_ps(V) _mm_round_ps((V), _MM_FROUND_CEIL)
+#define _mm_ceil_ss(D, V) _mm_round_ss((D), (V), _MM_FROUND_CEIL)
+
+#define _mm_floor_ps(V) _mm_round_ps((V), _MM_FROUND_FLOOR)
+#define _mm_floor_ss(D, V) _mm_round_ss((D), (V), _MM_FROUND_FLOOR)
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_insert_epi8(__m128i const __A, int const __D, int const __N) {
+  __v16qi __result = (__v16qi)__A;
+
+  __result[__N & 0xf] = __D;
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_insert_epi32(__m128i const __A, int const __D, int const __N) {
+  __v4si __result = (__v4si)__A;
+
+  __result[__N & 3] = __D;
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_insert_epi64(__m128i const __A, long long const __D, int const __N) {
+  __v2di __result = (__v2di)__A;
+
+  __result[__N & 1] = __D;
+
+  return (__m128i)__result;
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_extract_epi8(__m128i __X, const int __N) {
+  return (unsigned char)((__v16qi)__X)[__N & 15];
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_extract_epi32(__m128i __X, const int __N) {
+  return ((__v4si)__X)[__N & 3];
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_extract_epi64(__m128i __X, const int __N) {
+  return ((__v2di)__X)[__N & 1];
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_extract_ps(__m128 __X, const int __N) {
+  return ((__v4si)__X)[__N & 3];
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_blend_epi16(__m128i __A, __m128i __B, const int __imm8) {
+  __v16qi __charmask = vec_splats((signed char)__imm8);
+  __charmask = vec_gb(__charmask);
+  __v8hu __shortmask = (__v8hu)vec_unpackh(__charmask);
+#ifdef __BIG_ENDIAN__
+  __shortmask = vec_reve(__shortmask);
+#endif
+  return (__m128i)vec_sel((__v8hu)__A, (__v8hu)__B, __shortmask);
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_blendv_epi8(__m128i __A, __m128i __B, __m128i __mask) {
+#ifdef _ARCH_PWR10
+  return (__m128i)vec_blendv((__v16qi)__A, (__v16qi)__B, (__v16qu)__mask);
+#else
+  const __v16qu __seven = vec_splats((unsigned char)0x07);
+  __v16qu __lmask = vec_sra((__v16qu)__mask, __seven);
+  return (__m128i)vec_sel((__v16qi)__A, (__v16qi)__B, __lmask);
+#endif
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_blend_ps(__m128 __A, __m128 __B, const int __imm8) {
+  __v16qu __pcv[] = {
+      {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+      {16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+      {0, 1, 2, 3, 20, 21, 22, 23, 8, 9, 10, 11, 12, 13, 14, 15},
+      {16, 17, 18, 19, 20, 21, 22, 23, 8, 9, 10, 11, 12, 13, 14, 15},
+      {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 12, 13, 14, 15},
+      {16, 17, 18, 19, 4, 5, 6, 7, 24, 25, 26, 27, 12, 13, 14, 15},
+      {0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 12, 13, 14, 15},
+      {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 12, 13, 14, 15},
+      {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 28, 29, 30, 31},
+      {16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 28, 29, 30, 31},
+      {0, 1, 2, 3, 20, 21, 22, 23, 8, 9, 10, 11, 28, 29, 30, 31},
+      {16, 17, 18, 19, 20, 21, 22, 23, 8, 9, 10, 11, 28, 29, 30, 31},
+      {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31},
+      {16, 17, 18, 19, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31},
+      {0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
+      {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
+  };
+  __v16qu __r = vec_perm((__v16qu)__A, (__v16qu)__B, __pcv[__imm8]);
+  return (__m128)__r;
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_blendv_ps(__m128 __A, __m128 __B, __m128 __mask) {
+#ifdef _ARCH_PWR10
+  return (__m128)vec_blendv((__v4sf)__A, (__v4sf)__B, (__v4su)__mask);
+#else
+  const __v4si __zero = {0};
+  const __vector __bool int __boolmask = vec_cmplt((__v4si)__mask, __zero);
+  return (__m128)vec_sel((__v4su)__A, (__v4su)__B, (__v4su)__boolmask);
+#endif
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_blend_pd(__m128d __A, __m128d __B, const int __imm8) {
+  __v16qu __pcv[] = {
+      {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+      {16, 17, 18, 19, 20, 21, 22, 23, 8, 9, 10, 11, 12, 13, 14, 15},
+      {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31},
+      {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}};
+  __v16qu __r = vec_perm((__v16qu)__A, (__v16qu)__B, __pcv[__imm8]);
+  return (__m128d)__r;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_blendv_pd(__m128d __A, __m128d __B, __m128d __mask) {
+#ifdef _ARCH_PWR10
+  return (__m128d)vec_blendv((__v2df)__A, (__v2df)__B, (__v2du)__mask);
+#else
+  const __v2di __zero = {0};
+  const __vector __bool long long __boolmask =
+      vec_cmplt((__v2di)__mask, __zero);
+  return (__m128d)vec_sel((__v2du)__A, (__v2du)__B, (__v2du)__boolmask);
+#endif
+}
+#endif
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_testz_si128(__m128i __A, __m128i __B) {
+  /* Note: This implementation does NOT set "zero" or "carry" flags.  */
+  const __v16qu __zero = {0};
+  return vec_all_eq(vec_and((__v16qu)__A, (__v16qu)__B), __zero);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_testc_si128(__m128i __A, __m128i __B) {
+  /* Note: This implementation does NOT set "zero" or "carry" flags.  */
+  const __v16qu __zero = {0};
+  const __v16qu __notA = vec_nor((__v16qu)__A, (__v16qu)__A);
+  return vec_all_eq(vec_and((__v16qu)__notA, (__v16qu)__B), __zero);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_testnzc_si128(__m128i __A, __m128i __B) {
+  /* Note: This implementation does NOT set "zero" or "carry" flags.  */
+  return _mm_testz_si128(__A, __B) == 0 && _mm_testc_si128(__A, __B) == 0;
+}
+
+#define _mm_test_all_zeros(M, V) _mm_testz_si128((M), (V))
+
+#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
+
+#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_epi64(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_cmpeq((__v2di)__X, (__v2di)__Y);
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_epi8(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_min((__v16qi)__X, (__v16qi)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_epu16(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_min((__v8hu)__X, (__v8hu)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_epi32(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_min((__v4si)__X, (__v4si)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_epu32(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_min((__v4su)__X, (__v4su)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_epi8(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_max((__v16qi)__X, (__v16qi)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_epu16(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_max((__v8hu)__X, (__v8hu)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_epi32(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_max((__v4si)__X, (__v4si)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_epu32(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_max((__v4su)__X, (__v4su)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mullo_epi32(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_mul((__v4su)__X, (__v4su)__Y);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mul_epi32(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_mule((__v4si)__X, (__v4si)__Y);
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi8_epi16(__m128i __A) {
+  return (__m128i)vec_unpackh((__v16qi)__A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi8_epi32(__m128i __A) {
+  __A = (__m128i)vec_unpackh((__v16qi)__A);
+  return (__m128i)vec_unpackh((__v8hi)__A);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi8_epi64(__m128i __A) {
+  __A = (__m128i)vec_unpackh((__v16qi)__A);
+  __A = (__m128i)vec_unpackh((__v8hi)__A);
+  return (__m128i)vec_unpackh((__v4si)__A);
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi16_epi32(__m128i __A) {
+  return (__m128i)vec_unpackh((__v8hi)__A);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi16_epi64(__m128i __A) {
+  __A = (__m128i)vec_unpackh((__v8hi)__A);
+  return (__m128i)vec_unpackh((__v4si)__A);
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi32_epi64(__m128i __A) {
+  return (__m128i)vec_unpackh((__v4si)__A);
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepu8_epi16(__m128i __A) {
+  const __v16qu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+  __A = (__m128i)vec_mergeh((__v16qu)__A, __zero);
+#else  /* __BIG_ENDIAN__.  */
+  __A = (__m128i)vec_mergeh(__zero, (__v16qu)__A);
+#endif /* __BIG_ENDIAN__.  */
+  return __A;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepu8_epi32(__m128i __A) {
+  const __v16qu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+  __A = (__m128i)vec_mergeh((__v16qu)__A, __zero);
+  __A = (__m128i)vec_mergeh((__v8hu)__A, (__v8hu)__zero);
+#else  /* __BIG_ENDIAN__.  */
+  __A = (__m128i)vec_mergeh(__zero, (__v16qu)__A);
+  __A = (__m128i)vec_mergeh((__v8hu)__zero, (__v8hu)__A);
+#endif /* __BIG_ENDIAN__.  */
+  return __A;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepu8_epi64(__m128i __A) {
+  const __v16qu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+  __A = (__m128i)vec_mergeh((__v16qu)__A, __zero);
+  __A = (__m128i)vec_mergeh((__v8hu)__A, (__v8hu)__zero);
+  __A = (__m128i)vec_mergeh((__v4su)__A, (__v4su)__zero);
+#else  /* __BIG_ENDIAN__.  */
+  __A = (__m128i)vec_mergeh(__zero, (__v16qu)__A);
+  __A = (__m128i)vec_mergeh((__v8hu)__zero, (__v8hu)__A);
+  __A = (__m128i)vec_mergeh((__v4su)__zero, (__v4su)__A);
+#endif /* __BIG_ENDIAN__.  */
+  return __A;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepu16_epi32(__m128i __A) {
+  const __v8hu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+  __A = (__m128i)vec_mergeh((__v8hu)__A, __zero);
+#else  /* __BIG_ENDIAN__.  */
+  __A = (__m128i)vec_mergeh(__zero, (__v8hu)__A);
+#endif /* __BIG_ENDIAN__.  */
+  return __A;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepu16_epi64(__m128i __A) {
+  const __v8hu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+  __A = (__m128i)vec_mergeh((__v8hu)__A, __zero);
+  __A = (__m128i)vec_mergeh((__v4su)__A, (__v4su)__zero);
+#else  /* __BIG_ENDIAN__.  */
+  __A = (__m128i)vec_mergeh(__zero, (__v8hu)__A);
+  __A = (__m128i)vec_mergeh((__v4su)__zero, (__v4su)__A);
+#endif /* __BIG_ENDIAN__.  */
+  return __A;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepu32_epi64(__m128i __A) {
+  const __v4su __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+  __A = (__m128i)vec_mergeh((__v4su)__A, __zero);
+#else  /* __BIG_ENDIAN__.  */
+  __A = (__m128i)vec_mergeh(__zero, (__v4su)__A);
+#endif /* __BIG_ENDIAN__.  */
+  return __A;
+}
+
+/* Return horizontal packed word minimum and its index in bits [15:0]
+   and bits [18:16] respectively.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_minpos_epu16(__m128i __A) {
+  union __u {
+    __m128i __m;
+    __v8hu __uh;
+  };
+  union __u __u = {.__m = __A}, __r = {.__m = {0}};
+  unsigned short __ridx = 0;
+  unsigned short __rmin = __u.__uh[__ridx];
+  unsigned long __i;
+  for (__i = 1; __i < 8; __i++) {
+    if (__u.__uh[__i] < __rmin) {
+      __rmin = __u.__uh[__i];
+      __ridx = __i;
+    }
+  }
+  __r.__uh[0] = __rmin;
+  __r.__uh[1] = __ridx;
+  return __r.__m;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_packus_epi32(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_packsu((__v4si)__X, (__v4si)__Y);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_epi64(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_cmpgt((__v2di)__X, (__v2di)__Y);
+}
+#endif
+
+#else
+#include_next <smmintrin.h>
+#endif /* defined(__ppc64__) &&
+        *   (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
+
+#endif /* SMMINTRIN_H_ */
diff --git a/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/tmmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/tmmintrin.h
new file mode 100644
index 0000000..6185ca1
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/tmmintrin.h
@@ -0,0 +1,453 @@
+/*===---- tmmintrin.h - Implementation of SSSE3 intrinsics on PowerPC ------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+   User Guide and Reference, version 9.0.  */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header is distributed to simplify porting x86_64 code that
+   makes explicit use of Intel intrinsics to powerpc64le.
+
+   It is the user's responsibility to determine if the results are
+   acceptable and make additional changes as necessary.
+
+   Note that much code that uses Intel intrinsics can be rewritten in
+   standard C or GNU C extensions, which are more portable and better
+   optimized across multiple targets.  */
+#endif
+
+#ifndef TMMINTRIN_H_
+#define TMMINTRIN_H_
+
+#if defined(__ppc64__) &&                                                      \
+    (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
+
+#include <altivec.h>
+
+/* We need definitions from the SSE header files.  */
+#include <pmmintrin.h>
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_abs_epi16(__m128i __A) {
+  return (__m128i)vec_abs((__v8hi)__A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_abs_epi32(__m128i __A) {
+  return (__m128i)vec_abs((__v4si)__A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_abs_epi8(__m128i __A) {
+  return (__m128i)vec_abs((__v16qi)__A);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_abs_pi16(__m64 __A) {
+  __v8hi __B = (__v8hi)(__v2du){__A, __A};
+  return (__m64)((__v2du)vec_abs(__B))[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_abs_pi32(__m64 __A) {
+  __v4si __B = (__v4si)(__v2du){__A, __A};
+  return (__m64)((__v2du)vec_abs(__B))[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_abs_pi8(__m64 __A) {
+  __v16qi __B = (__v16qi)(__v2du){__A, __A};
+  return (__m64)((__v2du)vec_abs(__B))[0];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_alignr_epi8(__m128i __A, __m128i __B, const unsigned int __count) {
+  if (__builtin_constant_p(__count) && __count < 16) {
+#ifdef __LITTLE_ENDIAN__
+    __A = (__m128i)vec_reve((__v16qu)__A);
+    __B = (__m128i)vec_reve((__v16qu)__B);
+#endif
+    __A = (__m128i)vec_sld((__v16qu)__B, (__v16qu)__A, __count);
+#ifdef __LITTLE_ENDIAN__
+    __A = (__m128i)vec_reve((__v16qu)__A);
+#endif
+    return __A;
+  }
+
+  if (__count == 0)
+    return __B;
+
+  if (__count >= 16) {
+    if (__count >= 32) {
+      const __v16qu __zero = {0};
+      return (__m128i)__zero;
+    } else {
+      const __v16qu __shift = vec_splats((unsigned char)((__count - 16) * 8));
+#ifdef __LITTLE_ENDIAN__
+      return (__m128i)vec_sro((__v16qu)__A, __shift);
+#else
+      return (__m128i)vec_slo((__v16qu)__A, __shift);
+#endif
+    }
+  } else {
+    const __v16qu __shiftA = vec_splats((unsigned char)((16 - __count) * 8));
+    const __v16qu __shiftB = vec_splats((unsigned char)(__count * 8));
+#ifdef __LITTLE_ENDIAN__
+    __A = (__m128i)vec_slo((__v16qu)__A, __shiftA);
+    __B = (__m128i)vec_sro((__v16qu)__B, __shiftB);
+#else
+    __A = (__m128i)vec_sro((__v16qu)__A, __shiftA);
+    __B = (__m128i)vec_slo((__v16qu)__B, __shiftB);
+#endif
+    return (__m128i)vec_or((__v16qu)__A, (__v16qu)__B);
+  }
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_alignr_pi8(__m64 __A, __m64 __B, unsigned int __count) {
+  if (__count < 16) {
+    __v2du __C = {__B, __A};
+#ifdef __LITTLE_ENDIAN__
+    const __v4su __shift = {__count << 3, 0, 0, 0};
+    __C = (__v2du)vec_sro((__v16qu)__C, (__v16qu)__shift);
+#else
+    const __v4su __shift = {0, 0, 0, __count << 3};
+    __C = (__v2du)vec_slo((__v16qu)__C, (__v16qu)__shift);
+#endif
+    return (__m64)__C[0];
+  } else {
+    const __m64 __zero = {0};
+    return __zero;
+  }
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadd_epi16(__m128i __A, __m128i __B) {
+  const __v16qu __P = {0,  1,  4,  5,  8,  9,  12, 13,
+                       16, 17, 20, 21, 24, 25, 28, 29};
+  const __v16qu __Q = {2,  3,  6,  7,  10, 11, 14, 15,
+                       18, 19, 22, 23, 26, 27, 30, 31};
+  __v8hi __C = vec_perm((__v8hi)__A, (__v8hi)__B, __P);
+  __v8hi __D = vec_perm((__v8hi)__A, (__v8hi)__B, __Q);
+  return (__m128i)vec_add(__C, __D);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadd_epi32(__m128i __A, __m128i __B) {
+  const __v16qu __P = {0,  1,  2,  3,  8,  9,  10, 11,
+                       16, 17, 18, 19, 24, 25, 26, 27};
+  const __v16qu __Q = {4,  5,  6,  7,  12, 13, 14, 15,
+                       20, 21, 22, 23, 28, 29, 30, 31};
+  __v4si __C = vec_perm((__v4si)__A, (__v4si)__B, __P);
+  __v4si __D = vec_perm((__v4si)__A, (__v4si)__B, __Q);
+  return (__m128i)vec_add(__C, __D);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadd_pi16(__m64 __A, __m64 __B) {
+  __v8hi __C = (__v8hi)(__v2du){__A, __B};
+  const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13};
+  const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15};
+  __v8hi __D = vec_perm(__C, __C, __Q);
+  __C = vec_perm(__C, __C, __P);
+  __C = vec_add(__C, __D);
+  return (__m64)((__v2du)__C)[1];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadd_pi32(__m64 __A, __m64 __B) {
+  __v4si __C = (__v4si)(__v2du){__A, __B};
+  const __v16qu __P = {0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11};
+  const __v16qu __Q = {4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15};
+  __v4si __D = vec_perm(__C, __C, __Q);
+  __C = vec_perm(__C, __C, __P);
+  __C = vec_add(__C, __D);
+  return (__m64)((__v2du)__C)[1];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadds_epi16(__m128i __A, __m128i __B) {
+  __v4si __C = {0}, __D = {0};
+  __C = vec_sum4s((__v8hi)__A, __C);
+  __D = vec_sum4s((__v8hi)__B, __D);
+  __C = (__v4si)vec_packs(__C, __D);
+  return (__m128i)__C;
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadds_pi16(__m64 __A, __m64 __B) {
+  const __v4si __zero = {0};
+  __v8hi __C = (__v8hi)(__v2du){__A, __B};
+  __v4si __D = vec_sum4s(__C, __zero);
+  __C = vec_packs(__D, __D);
+  return (__m64)((__v2du)__C)[1];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsub_epi16(__m128i __A, __m128i __B) {
+  const __v16qu __P = {0,  1,  4,  5,  8,  9,  12, 13,
+                       16, 17, 20, 21, 24, 25, 28, 29};
+  const __v16qu __Q = {2,  3,  6,  7,  10, 11, 14, 15,
+                       18, 19, 22, 23, 26, 27, 30, 31};
+  __v8hi __C = vec_perm((__v8hi)__A, (__v8hi)__B, __P);
+  __v8hi __D = vec_perm((__v8hi)__A, (__v8hi)__B, __Q);
+  return (__m128i)vec_sub(__C, __D);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsub_epi32(__m128i __A, __m128i __B) {
+  const __v16qu __P = {0,  1,  2,  3,  8,  9,  10, 11,
+                       16, 17, 18, 19, 24, 25, 26, 27};
+  const __v16qu __Q = {4,  5,  6,  7,  12, 13, 14, 15,
+                       20, 21, 22, 23, 28, 29, 30, 31};
+  __v4si __C = vec_perm((__v4si)__A, (__v4si)__B, __P);
+  __v4si __D = vec_perm((__v4si)__A, (__v4si)__B, __Q);
+  return (__m128i)vec_sub(__C, __D);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsub_pi16(__m64 __A, __m64 __B) {
+  const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13};
+  const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15};
+  __v8hi __C = (__v8hi)(__v2du){__A, __B};
+  __v8hi __D = vec_perm(__C, __C, __Q);
+  __C = vec_perm(__C, __C, __P);
+  __C = vec_sub(__C, __D);
+  return (__m64)((__v2du)__C)[1];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsub_pi32(__m64 __A, __m64 __B) {
+  const __v16qu __P = {0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11};
+  const __v16qu __Q = {4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15};
+  __v4si __C = (__v4si)(__v2du){__A, __B};
+  __v4si __D = vec_perm(__C, __C, __Q);
+  __C = vec_perm(__C, __C, __P);
+  __C = vec_sub(__C, __D);
+  return (__m64)((__v2du)__C)[1];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsubs_epi16(__m128i __A, __m128i __B) {
+  const __v16qu __P = {0,  1,  4,  5,  8,  9,  12, 13,
+                       16, 17, 20, 21, 24, 25, 28, 29};
+  const __v16qu __Q = {2,  3,  6,  7,  10, 11, 14, 15,
+                       18, 19, 22, 23, 26, 27, 30, 31};
+  __v8hi __C = vec_perm((__v8hi)__A, (__v8hi)__B, __P);
+  __v8hi __D = vec_perm((__v8hi)__A, (__v8hi)__B, __Q);
+  return (__m128i)vec_subs(__C, __D);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsubs_pi16(__m64 __A, __m64 __B) {
+  const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13};
+  const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15};
+  __v8hi __C = (__v8hi)(__v2du){__A, __B};
+  __v8hi __D = vec_perm(__C, __C, __P);
+  __v8hi __E = vec_perm(__C, __C, __Q);
+  __C = vec_subs(__D, __E);
+  return (__m64)((__v2du)__C)[1];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_shuffle_epi8(__m128i __A, __m128i __B) {
+  const __v16qi __zero = {0};
+  __vector __bool char __select = vec_cmplt((__v16qi)__B, __zero);
+  __v16qi __C = vec_perm((__v16qi)__A, (__v16qi)__A, (__v16qu)__B);
+  return (__m128i)vec_sel(__C, __zero, __select);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_shuffle_pi8(__m64 __A, __m64 __B) {
+  const __v16qi __zero = {0};
+  __v16qi __C = (__v16qi)(__v2du){__A, __A};
+  __v16qi __D = (__v16qi)(__v2du){__B, __B};
+  __vector __bool char __select = vec_cmplt((__v16qi)__D, __zero);
+  __C = vec_perm((__v16qi)__C, (__v16qi)__C, (__v16qu)__D);
+  __C = vec_sel(__C, __zero, __select);
+  return (__m64)((__v2du)(__C))[0];
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sign_epi8(__m128i __A, __m128i __B) {
+  const __v16qi __zero = {0};
+  __v16qi __selectneg = (__v16qi)vec_cmplt((__v16qi)__B, __zero);
+  __v16qi __selectpos =
+      (__v16qi)vec_neg((__v16qi)vec_cmpgt((__v16qi)__B, __zero));
+  __v16qi __conv = vec_add(__selectneg, __selectpos);
+  return (__m128i)vec_mul((__v16qi)__A, (__v16qi)__conv);
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sign_epi16(__m128i __A, __m128i __B) {
+  const __v8hi __zero = {0};
+  __v8hi __selectneg = (__v8hi)vec_cmplt((__v8hi)__B, __zero);
+  __v8hi __selectpos = (__v8hi)vec_neg((__v8hi)vec_cmpgt((__v8hi)__B, __zero));
+  __v8hi __conv = vec_add(__selectneg, __selectpos);
+  return (__m128i)vec_mul((__v8hi)__A, (__v8hi)__conv);
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sign_epi32(__m128i __A, __m128i __B) {
+  const __v4si __zero = {0};
+  __v4si __selectneg = (__v4si)vec_cmplt((__v4si)__B, __zero);
+  __v4si __selectpos = (__v4si)vec_neg((__v4si)vec_cmpgt((__v4si)__B, __zero));
+  __v4si __conv = vec_add(__selectneg, __selectpos);
+  return (__m128i)vec_mul((__v4si)__A, (__v4si)__conv);
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sign_pi8(__m64 __A, __m64 __B) {
+  const __v16qi __zero = {0};
+  __v16qi __C = (__v16qi)(__v2du){__A, __A};
+  __v16qi __D = (__v16qi)(__v2du){__B, __B};
+  __C = (__v16qi)_mm_sign_epi8((__m128i)__C, (__m128i)__D);
+  return (__m64)((__v2du)(__C))[0];
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sign_pi16(__m64 __A, __m64 __B) {
+  const __v8hi __zero = {0};
+  __v8hi __C = (__v8hi)(__v2du){__A, __A};
+  __v8hi __D = (__v8hi)(__v2du){__B, __B};
+  __C = (__v8hi)_mm_sign_epi16((__m128i)__C, (__m128i)__D);
+  return (__m64)((__v2du)(__C))[0];
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sign_pi32(__m64 __A, __m64 __B) {
+  const __v4si __zero = {0};
+  __v4si __C = (__v4si)(__v2du){__A, __A};
+  __v4si __D = (__v4si)(__v2du){__B, __B};
+  __C = (__v4si)_mm_sign_epi32((__m128i)__C, (__m128i)__D);
+  return (__m64)((__v2du)(__C))[0];
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_maddubs_epi16(__m128i __A, __m128i __B) {
+  __v8hi __unsigned = vec_splats((signed short)0x00ff);
+  __v8hi __C = vec_and(vec_unpackh((__v16qi)__A), __unsigned);
+  __v8hi __D = vec_and(vec_unpackl((__v16qi)__A), __unsigned);
+  __v8hi __E = vec_unpackh((__v16qi)__B);
+  __v8hi __F = vec_unpackl((__v16qi)__B);
+  __C = vec_mul(__C, __E);
+  __D = vec_mul(__D, __F);
+  const __v16qu __odds = {0,  1,  4,  5,  8,  9,  12, 13,
+                          16, 17, 20, 21, 24, 25, 28, 29};
+  const __v16qu __evens = {2,  3,  6,  7,  10, 11, 14, 15,
+                           18, 19, 22, 23, 26, 27, 30, 31};
+  __E = vec_perm(__C, __D, __odds);
+  __F = vec_perm(__C, __D, __evens);
+  return (__m128i)vec_adds(__E, __F);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_maddubs_pi16(__m64 __A, __m64 __B) {
+  __v8hi __C = (__v8hi)(__v2du){__A, __A};
+  __C = vec_unpackl((__v16qi)__C);
+  const __v8hi __unsigned = vec_splats((signed short)0x00ff);
+  __C = vec_and(__C, __unsigned);
+  __v8hi __D = (__v8hi)(__v2du){__B, __B};
+  __D = vec_unpackl((__v16qi)__D);
+  __D = vec_mul(__C, __D);
+  const __v16qu __odds = {0,  1,  4,  5,  8,  9,  12, 13,
+                          16, 17, 20, 21, 24, 25, 28, 29};
+  const __v16qu __evens = {2,  3,  6,  7,  10, 11, 14, 15,
+                           18, 19, 22, 23, 26, 27, 30, 31};
+  __C = vec_perm(__D, __D, __odds);
+  __D = vec_perm(__D, __D, __evens);
+  __C = vec_adds(__C, __D);
+  return (__m64)((__v2du)(__C))[0];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mulhrs_epi16(__m128i __A, __m128i __B) {
+  __v4si __C = vec_unpackh((__v8hi)__A);
+  __v4si __D = vec_unpackh((__v8hi)__B);
+  __C = vec_mul(__C, __D);
+  __D = vec_unpackl((__v8hi)__A);
+  __v4si __E = vec_unpackl((__v8hi)__B);
+  __D = vec_mul(__D, __E);
+  const __v4su __shift = vec_splats((unsigned int)14);
+  __C = vec_sr(__C, __shift);
+  __D = vec_sr(__D, __shift);
+  const __v4si __ones = vec_splats((signed int)1);
+  __C = vec_add(__C, __ones);
+  __C = vec_sr(__C, (__v4su)__ones);
+  __D = vec_add(__D, __ones);
+  __D = vec_sr(__D, (__v4su)__ones);
+  return (__m128i)vec_pack(__C, __D);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mulhrs_pi16(__m64 __A, __m64 __B) {
+  __v4si __C = (__v4si)(__v2du){__A, __A};
+  __C = vec_unpackh((__v8hi)__C);
+  __v4si __D = (__v4si)(__v2du){__B, __B};
+  __D = vec_unpackh((__v8hi)__D);
+  __C = vec_mul(__C, __D);
+  const __v4su __shift = vec_splats((unsigned int)14);
+  __C = vec_sr(__C, __shift);
+  const __v4si __ones = vec_splats((signed int)1);
+  __C = vec_add(__C, __ones);
+  __C = vec_sr(__C, (__v4su)__ones);
+  __v8hi __E = vec_pack(__C, __D);
+  return (__m64)((__v2du)(__E))[0];
+}
+
+#else
+#include_next <tmmintrin.h>
+#endif /* defined(__ppc64__) &&
+        *   (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
+
+#endif /* TMMINTRIN_H_ */
diff --git a/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/x86gprintrin.h b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/x86gprintrin.h
new file mode 100644
index 0000000..cbfac26
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/x86gprintrin.h
@@ -0,0 +1,17 @@
+/*===--- x86gprintrin.h - Implementation of X86 GPR intrinsics on PowerPC --===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef X86GPRINTRIN_H_
+#define X86GPRINTRIN_H_
+
+#include <bmiintrin.h>
+
+#include <bmi2intrin.h>
+
+#endif /* X86GPRINTRIN_H_ */
diff --git a/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/x86intrin.h b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/x86intrin.h
new file mode 100644
index 0000000..f5c2012
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/x86intrin.h
@@ -0,0 +1,28 @@
+/*===---- x86intrin.h - Implementation of X86 intrinsics on PowerPC --------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header is distributed to simplify porting x86_64 code that
+   makes explicit use of Intel intrinsics to powerpc64le.
+   It is the user's responsibility to determine if the results are
+   acceptable and make additional changes as necessary.
+   Note that much code that uses Intel intrinsics can be rewritten in
+   standard C or GNU C extensions, which are more portable and better
+   optimized across multiple targets.  */
+#error "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef X86INTRIN_H_
+#define X86INTRIN_H_
+
+#ifdef __ALTIVEC__
+#include <immintrin.h>
+#endif /* __ALTIVEC__ */
+
+#endif /* X86INTRIN_H_ */
diff --git a/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/xmmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/xmmintrin.h
new file mode 100644
index 0000000..ee0032c
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/ppc_wrappers/xmmintrin.h
@@ -0,0 +1,1827 @@
+/*===---- xmmintrin.h - Implementation of SSE intrinsics on PowerPC --------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+   User Guide and Reference, version 9.0.  */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header file is to help porting code using Intel intrinsics
+   explicitly from x86_64 to powerpc64/powerpc64le.
+
+   Since X86 SSE intrinsics mainly handles __m128 type, PowerPC
+   VMX/VSX ISA is a good match for vector float SIMD operations.
+   However scalar float operations in vector (XMM) registers require
+   the POWER8 VSX ISA (2.07) level. There are differences for data
+   format and placement of float scalars in the vector register, which
+   require extra steps to match SSE scalar float semantics on POWER.
+
+   It should be noted that there's much difference between X86_64's
+   MXSCR and PowerISA's FPSCR/VSCR registers. It's recommended to use
+   portable <fenv.h> instead of access MXSCR directly.
+
+   Most SSE scalar float intrinsic operations can be performed more
+   efficiently as C language float scalar operations or optimized to
+   use vector SIMD operations. We recommend this for new applications. */
+#error                                                                         \
+    "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef XMMINTRIN_H_
+#define XMMINTRIN_H_
+
+#if defined(__ppc64__) &&                                                      \
+    (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
+
+/* Define four value permute mask */
+#define _MM_SHUFFLE(w, x, y, z) (((w) << 6) | ((x) << 4) | ((y) << 2) | (z))
+
+#include <altivec.h>
+
+/* Avoid collisions between altivec.h and strict adherence to C++ and
+   C11 standards.  This should eventually be done inside altivec.h itself,
+   but only after testing a full distro build.  */
+#if defined(__STRICT_ANSI__) &&                                                \
+    (defined(__cplusplus) ||                                                   \
+     (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L))
+#undef vector
+#undef pixel
+#undef bool
+#endif
+
+/* We need type definitions from the MMX header file.  */
+#include <mmintrin.h>
+
+/* Get _mm_malloc () and _mm_free ().  */
+#if __STDC_HOSTED__
+#include <mm_malloc.h>
+#endif
+
+/* The Intel API is flexible enough that we must allow aliasing with other
+   vector types, and their scalar components.  */
+typedef vector float __m128 __attribute__((__may_alias__));
+
+/* Unaligned version of the same type.  */
+typedef vector float __m128_u __attribute__((__may_alias__, __aligned__(1)));
+
+/* Internal data types for implementing the intrinsics.  */
+typedef vector float __v4sf;
+
+/* Create an undefined vector.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_undefined_ps(void) {
+  __m128 __Y = __Y;
+  return __Y;
+}
+
+/* Create a vector of zeros.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setzero_ps(void) {
+  return __extension__(__m128){0.0f, 0.0f, 0.0f, 0.0f};
+}
+
+/* Load four SPFP values from P.  The address must be 16-byte aligned.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load_ps(float const *__P) {
+  return ((__m128)vec_ld(0, (__v4sf *)__P));
+}
+
+/* Load four SPFP values from P.  The address need not be 16-byte aligned.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadu_ps(float const *__P) {
+  return (vec_vsx_ld(0, __P));
+}
+
+/* Load four SPFP values in reverse order.  The address must be aligned.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadr_ps(float const *__P) {
+  __v4sf __tmp;
+  __m128 __result;
+  static const __vector unsigned char __permute_vector = {
+      0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B,
+      0x14, 0x15, 0x16, 0x17, 0x10, 0x11, 0x12, 0x13};
+
+  __tmp = vec_ld(0, (__v4sf *)__P);
+  __result = (__m128)vec_perm(__tmp, __tmp, __permute_vector);
+  return __result;
+}
+
+/* Create a vector with all four elements equal to F.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set1_ps(float __F) {
+  return __extension__(__m128)(__v4sf){__F, __F, __F, __F};
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_ps1(float __F) {
+  return _mm_set1_ps(__F);
+}
+
+/* Create the vector [Z Y X W].  */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__,
+                                      __artificial__))
+_mm_set_ps(const float __Z, const float __Y, const float __X, const float __W) {
+  return __extension__(__m128)(__v4sf){__W, __X, __Y, __Z};
+}
+
+/* Create the vector [W X Y Z].  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setr_ps(float __Z, float __Y, float __X, float __W) {
+  return __extension__(__m128)(__v4sf){__Z, __Y, __X, __W};
+}
+
+/* Store four SPFP values.  The address must be 16-byte aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store_ps(float *__P, __m128 __A) {
+  vec_st((__v4sf)__A, 0, (__v4sf *)__P);
+}
+
+/* Store four SPFP values.  The address need not be 16-byte aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storeu_ps(float *__P, __m128 __A) {
+  *(__m128_u *)__P = __A;
+}
+
+/* Store four SPFP values in reverse order.  The address must be aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storer_ps(float *__P, __m128 __A) {
+  __v4sf __tmp;
+  static const __vector unsigned char __permute_vector = {
+      0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B,
+      0x14, 0x15, 0x16, 0x17, 0x10, 0x11, 0x12, 0x13};
+
+  __tmp = (__m128)vec_perm(__A, __A, __permute_vector);
+
+  _mm_store_ps(__P, __tmp);
+}
+
+/* Store the lower SPFP value across four words.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store1_ps(float *__P, __m128 __A) {
+  __v4sf __va = vec_splat((__v4sf)__A, 0);
+  _mm_store_ps(__P, __va);
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store_ps1(float *__P, __m128 __A) {
+  _mm_store1_ps(__P, __A);
+}
+
+/* Create a vector with element 0 as F and the rest zero.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_ss(float __F) {
+  return __extension__(__m128)(__v4sf){__F, 0.0f, 0.0f, 0.0f};
+}
+
+/* Sets the low SPFP value of A from the low value of B.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_move_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+
+  return (vec_sel((__v4sf)__A, (__v4sf)__B, __mask));
+}
+
+/* Create a vector with element 0 as *P and the rest zero.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load_ss(float const *__P) {
+  return _mm_set_ss(*__P);
+}
+
+/* Stores the lower SPFP value.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store_ss(float *__P, __m128 __A) {
+  *__P = ((__v4sf)__A)[0];
+}
+
+/* Perform the respective operation on the lower SPFP (single-precision
+   floating-point) values of A and B; the upper three SPFP values are
+   passed through from A.  */
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_ss(__m128 __A, __m128 __B) {
+#ifdef _ARCH_PWR7
+  __m128 __a, __b, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower double)
+     results. So to insure we don't generate spurious exceptions
+     (from the upper double values) we splat the lower double
+     before we to the operation.  */
+  __a = vec_splat(__A, 0);
+  __b = vec_splat(__B, 0);
+  __c = __a + __b;
+  /* Then we merge the lower float result with the original upper
+     float elements from __A.  */
+  return (vec_sel(__A, __c, __mask));
+#else
+  __A[0] = __A[0] + __B[0];
+  return (__A);
+#endif
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_ss(__m128 __A, __m128 __B) {
+#ifdef _ARCH_PWR7
+  __m128 __a, __b, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower double)
+     results. So to insure we don't generate spurious exceptions
+     (from the upper double values) we splat the lower double
+     before we to the operation.  */
+  __a = vec_splat(__A, 0);
+  __b = vec_splat(__B, 0);
+  __c = __a - __b;
+  /* Then we merge the lower float result with the original upper
+     float elements from __A.  */
+  return (vec_sel(__A, __c, __mask));
+#else
+  __A[0] = __A[0] - __B[0];
+  return (__A);
+#endif
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mul_ss(__m128 __A, __m128 __B) {
+#ifdef _ARCH_PWR7
+  __m128 __a, __b, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower double)
+     results. So to insure we don't generate spurious exceptions
+     (from the upper double values) we splat the lower double
+     before we to the operation.  */
+  __a = vec_splat(__A, 0);
+  __b = vec_splat(__B, 0);
+  __c = __a * __b;
+  /* Then we merge the lower float result with the original upper
+     float elements from __A.  */
+  return (vec_sel(__A, __c, __mask));
+#else
+  __A[0] = __A[0] * __B[0];
+  return (__A);
+#endif
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_div_ss(__m128 __A, __m128 __B) {
+#ifdef _ARCH_PWR7
+  __m128 __a, __b, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower double)
+     results. So to insure we don't generate spurious exceptions
+     (from the upper double values) we splat the lower double
+     before we to the operation.  */
+  __a = vec_splat(__A, 0);
+  __b = vec_splat(__B, 0);
+  __c = __a / __b;
+  /* Then we merge the lower float result with the original upper
+     float elements from __A.  */
+  return (vec_sel(__A, __c, __mask));
+#else
+  __A[0] = __A[0] / __B[0];
+  return (__A);
+#endif
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sqrt_ss(__m128 __A) {
+  __m128 __a, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower double)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper double values) we splat the lower double
+   * before we to the operation. */
+  __a = vec_splat(__A, 0);
+  __c = vec_sqrt(__a);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return (vec_sel(__A, __c, __mask));
+}
+
+/* Perform the respective operation on the four SPFP values in A and B.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_ps(__m128 __A, __m128 __B) {
+  return (__m128)((__v4sf)__A + (__v4sf)__B);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_ps(__m128 __A, __m128 __B) {
+  return (__m128)((__v4sf)__A - (__v4sf)__B);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mul_ps(__m128 __A, __m128 __B) {
+  return (__m128)((__v4sf)__A * (__v4sf)__B);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_div_ps(__m128 __A, __m128 __B) {
+  return (__m128)((__v4sf)__A / (__v4sf)__B);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sqrt_ps(__m128 __A) {
+  return (vec_sqrt((__v4sf)__A));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_rcp_ps(__m128 __A) {
+  return (vec_re((__v4sf)__A));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_rsqrt_ps(__m128 __A) {
+  return (vec_rsqrte(__A));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_rcp_ss(__m128 __A) {
+  __m128 __a, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower double)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper double values) we splat the lower double
+   * before we to the operation. */
+  __a = vec_splat(__A, 0);
+  __c = _mm_rcp_ps(__a);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return (vec_sel(__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_rsqrt_ss(__m128 __A) {
+  __m128 __a, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower double)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper double values) we splat the lower double
+   * before we to the operation. */
+  __a = vec_splat(__A, 0);
+  __c = vec_rsqrte(__a);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return (vec_sel(__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_ss(__m128 __A, __m128 __B) {
+  __v4sf __a, __b, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower float)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper float values) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = vec_min(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return (vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_ss(__m128 __A, __m128 __B) {
+  __v4sf __a, __b, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower float)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper float values) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat(__A, 0);
+  __b = vec_splat(__B, 0);
+  __c = vec_max(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return (vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_ps(__m128 __A, __m128 __B) {
+  __vector __bool int __m = vec_cmpgt((__v4sf)__B, (__v4sf)__A);
+  return vec_sel(__B, __A, __m);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_ps(__m128 __A, __m128 __B) {
+  __vector __bool int __m = vec_cmpgt((__v4sf)__A, (__v4sf)__B);
+  return vec_sel(__B, __A, __m);
+}
+
+/* Perform logical bit-wise operations on 128-bit values.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_and_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_and((__v4sf)__A, (__v4sf)__B));
+  //  return __builtin_ia32_andps (__A, __B);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_andnot_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_andc((__v4sf)__B, (__v4sf)__A));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_or_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_or((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_xor_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_xor((__v4sf)__A, (__v4sf)__B));
+}
+
+/* Perform a comparison on the four SPFP values of A and B.  For each
+   element, if the comparison is true, place a mask of all ones in the
+   result, otherwise a mask of zeros.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmpeq((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmplt_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmplt((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmple_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmple((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmpgt((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpge_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmpge((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpneq_ps(__m128 __A, __m128 __B) {
+  __v4sf __temp = (__v4sf)vec_cmpeq((__v4sf)__A, (__v4sf)__B);
+  return ((__m128)vec_nor(__temp, __temp));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnlt_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmpge((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnle_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmpgt((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpngt_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmple((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnge_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmplt((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpord_ps(__m128 __A, __m128 __B) {
+  __vector unsigned int __a, __b;
+  __vector unsigned int __c, __d;
+  static const __vector unsigned int __float_exp_mask = {
+      0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
+
+  __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+  __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+  __c = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __a);
+  __d = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __b);
+  return ((__m128)vec_and(__c, __d));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpunord_ps(__m128 __A, __m128 __B) {
+  __vector unsigned int __a, __b;
+  __vector unsigned int __c, __d;
+  static const __vector unsigned int __float_exp_mask = {
+      0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
+
+  __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+  __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+  __c = (__vector unsigned int)vec_cmpgt(__a, __float_exp_mask);
+  __d = (__vector unsigned int)vec_cmpgt(__b, __float_exp_mask);
+  return ((__m128)vec_or(__c, __d));
+}
+
+/* Perform a comparison on the lower SPFP values of A and B.  If the
+   comparison is true, place a mask of all ones in the result, otherwise a
+   mask of zeros.  The upper three SPFP values are passed through from A.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmpeq(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmplt_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmplt(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmple_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmple(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmpgt(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpge_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmpge(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpneq_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmpeq(__a, __b);
+  __c = vec_nor(__c, __c);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnlt_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmpge(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnle_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmpgt(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpngt_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmple(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnge_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we do the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmplt(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpord_ss(__m128 __A, __m128 __B) {
+  __vector unsigned int __a, __b;
+  __vector unsigned int __c, __d;
+  static const __vector unsigned int __float_exp_mask = {
+      0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+
+  __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+  __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+  __c = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __a);
+  __d = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __b);
+  __c = vec_and(__c, __d);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, (__v4sf)__c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpunord_ss(__m128 __A, __m128 __B) {
+  __vector unsigned int __a, __b;
+  __vector unsigned int __c, __d;
+  static const __vector unsigned int __float_exp_mask = {
+      0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+
+  __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+  __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+  __c = (__vector unsigned int)vec_cmpgt(__a, __float_exp_mask);
+  __d = (__vector unsigned int)vec_cmpgt(__b, __float_exp_mask);
+  __c = vec_or(__c, __d);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, (__v4sf)__c, __mask));
+}
+
+/* Compare the lower SPFP values of A and B and return 1 if true
+   and 0 if false.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comieq_ss(__m128 __A, __m128 __B) {
+  return (__A[0] == __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comilt_ss(__m128 __A, __m128 __B) {
+  return (__A[0] < __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comile_ss(__m128 __A, __m128 __B) {
+  return (__A[0] <= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comigt_ss(__m128 __A, __m128 __B) {
+  return (__A[0] > __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comige_ss(__m128 __A, __m128 __B) {
+  return (__A[0] >= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comineq_ss(__m128 __A, __m128 __B) {
+  return (__A[0] != __B[0]);
+}
+
+/* FIXME
+ * The __mm_ucomi??_ss implementations below are exactly the same as
+ * __mm_comi??_ss because GCC for PowerPC only generates unordered
+ * compares (scalar and vector).
+ * Technically __mm_comieq_ss et al should be using the ordered
+ * compare and signal for QNaNs.
+ * The __mm_ucomieq_sd et all should be OK, as is.
+ */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomieq_ss(__m128 __A, __m128 __B) {
+  return (__A[0] == __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomilt_ss(__m128 __A, __m128 __B) {
+  return (__A[0] < __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomile_ss(__m128 __A, __m128 __B) {
+  return (__A[0] <= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomigt_ss(__m128 __A, __m128 __B) {
+  return (__A[0] > __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomige_ss(__m128 __A, __m128 __B) {
+  return (__A[0] >= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomineq_ss(__m128 __A, __m128 __B) {
+  return (__A[0] != __B[0]);
+}
+
+extern __inline float
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtss_f32(__m128 __A) {
+  return ((__v4sf)__A)[0];
+}
+
+/* Convert the lower SPFP value to a 32-bit integer according to the current
+   rounding mode.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtss_si32(__m128 __A) {
+  int __res;
+#ifdef _ARCH_PWR8
+  double __dtmp;
+  __asm__(
+#ifdef __LITTLE_ENDIAN__
+      "xxsldwi %x0,%x0,%x0,3;\n"
+#endif
+      "xscvspdp %x2,%x0;\n"
+      "fctiw  %2,%2;\n"
+      "mfvsrd  %1,%x2;\n"
+      : "+wa"(__A), "=r"(__res), "=f"(__dtmp)
+      :);
+#else
+  __res = __builtin_rint(__A[0]);
+#endif
+  return __res;
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvt_ss2si(__m128 __A) {
+  return _mm_cvtss_si32(__A);
+}
+
+/* Convert the lower SPFP value to a 32-bit integer according to the
+   current rounding mode.  */
+
+/* Intel intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtss_si64(__m128 __A) {
+  long long __res;
+#if defined(_ARCH_PWR8) && defined(__powerpc64__)
+  double __dtmp;
+  __asm__(
+#ifdef __LITTLE_ENDIAN__
+      "xxsldwi %x0,%x0,%x0,3;\n"
+#endif
+      "xscvspdp %x2,%x0;\n"
+      "fctid  %2,%2;\n"
+      "mfvsrd  %1,%x2;\n"
+      : "+wa"(__A), "=r"(__res), "=f"(__dtmp)
+      :);
+#else
+  __res = __builtin_llrint(__A[0]);
+#endif
+  return __res;
+}
+
+/* Microsoft intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtss_si64x(__m128 __A) {
+  return _mm_cvtss_si64((__v4sf)__A);
+}
+
+/* Constants for use with _mm_prefetch.  */
+enum _mm_hint {
+  /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit.  */
+  _MM_HINT_ET0 = 7,
+  _MM_HINT_ET1 = 6,
+  _MM_HINT_T0 = 3,
+  _MM_HINT_T1 = 2,
+  _MM_HINT_T2 = 1,
+  _MM_HINT_NTA = 0
+};
+
+/* Loads one cache line from address P to a location "closer" to the
+   processor.  The selector I specifies the type of prefetch operation.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_prefetch(const void *__P, enum _mm_hint __I) {
+  /* Current PowerPC will ignores the hint parameters.  */
+  __builtin_prefetch(__P);
+}
+
+/* Convert the two lower SPFP values to 32-bit integers according to the
+   current rounding mode.  Return the integers in packed form.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtps_pi32(__m128 __A) {
+  /* Splat two lower SPFP values to both halves.  */
+  __v4sf __temp, __rounded;
+  __vector unsigned long long __result;
+
+  /* Splat two lower SPFP values to both halves.  */
+  __temp = (__v4sf)vec_splat((__vector long long)__A, 0);
+  __rounded = vec_rint(__temp);
+  __result = (__vector unsigned long long)vec_cts(__rounded, 0);
+
+  return (__m64)((__vector long long)__result)[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvt_ps2pi(__m128 __A) {
+  return _mm_cvtps_pi32(__A);
+}
+
+/* Truncate the lower SPFP value to a 32-bit integer.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttss_si32(__m128 __A) {
+  /* Extract the lower float element.  */
+  float __temp = __A[0];
+  /* truncate to 32-bit integer and return.  */
+  return __temp;
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtt_ss2si(__m128 __A) {
+  return _mm_cvttss_si32(__A);
+}
+
+/* Intel intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttss_si64(__m128 __A) {
+  /* Extract the lower float element.  */
+  float __temp = __A[0];
+  /* truncate to 32-bit integer and return.  */
+  return __temp;
+}
+
+/* Microsoft intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttss_si64x(__m128 __A) {
+  /* Extract the lower float element.  */
+  float __temp = __A[0];
+  /* truncate to 32-bit integer and return.  */
+  return __temp;
+}
+
+/* Truncate the two lower SPFP values to 32-bit integers.  Return the
+   integers in packed form.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttps_pi32(__m128 __A) {
+  __v4sf __temp;
+  __vector unsigned long long __result;
+
+  /* Splat two lower SPFP values to both halves.  */
+  __temp = (__v4sf)vec_splat((__vector long long)__A, 0);
+  __result = (__vector unsigned long long)vec_cts(__temp, 0);
+
+  return (__m64)((__vector long long)__result)[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtt_ps2pi(__m128 __A) {
+  return _mm_cvttps_pi32(__A);
+}
+
+/* Convert B to a SPFP value and insert it as element zero in A.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi32_ss(__m128 __A, int __B) {
+  float __temp = __B;
+  __A[0] = __temp;
+
+  return __A;
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvt_si2ss(__m128 __A, int __B) {
+  return _mm_cvtsi32_ss(__A, __B);
+}
+
+/* Convert B to a SPFP value and insert it as element zero in A.  */
+/* Intel intrinsic.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi64_ss(__m128 __A, long long __B) {
+  float __temp = __B;
+  __A[0] = __temp;
+
+  return __A;
+}
+
+/* Microsoft intrinsic.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi64x_ss(__m128 __A, long long __B) {
+  return _mm_cvtsi64_ss(__A, __B);
+}
+
+/* Convert the two 32-bit values in B to SPFP form and insert them
+   as the two lower elements in A.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpi32_ps(__m128 __A, __m64 __B) {
+  __vector signed int __vm1;
+  __vector float __vf1;
+
+  __vm1 = (__vector signed int)(__vector unsigned long long){__B, __B};
+  __vf1 = (__vector float)vec_ctf(__vm1, 0);
+
+  return ((__m128)(__vector unsigned long long){
+      ((__vector unsigned long long)__vf1)[0],
+      ((__vector unsigned long long)__A)[1]});
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvt_pi2ps(__m128 __A, __m64 __B) {
+  return _mm_cvtpi32_ps(__A, __B);
+}
+
+/* Convert the four signed 16-bit values in A to SPFP form.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpi16_ps(__m64 __A) {
+  __vector signed short __vs8;
+  __vector signed int __vi4;
+  __vector float __vf1;
+
+  __vs8 = (__vector signed short)(__vector unsigned long long){__A, __A};
+  __vi4 = vec_vupklsh(__vs8);
+  __vf1 = (__vector float)vec_ctf(__vi4, 0);
+
+  return (__m128)__vf1;
+}
+
+/* Convert the four unsigned 16-bit values in A to SPFP form.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpu16_ps(__m64 __A) {
+  const __vector unsigned short __zero = {0, 0, 0, 0, 0, 0, 0, 0};
+  __vector unsigned short __vs8;
+  __vector unsigned int __vi4;
+  __vector float __vf1;
+
+  __vs8 = (__vector unsigned short)(__vector unsigned long long){__A, __A};
+  __vi4 = (__vector unsigned int)vec_mergel
+#ifdef __LITTLE_ENDIAN__
+      (__vs8, __zero);
+#else
+      (__zero, __vs8);
+#endif
+  __vf1 = (__vector float)vec_ctf(__vi4, 0);
+
+  return (__m128)__vf1;
+}
+
+/* Convert the low four signed 8-bit values in A to SPFP form.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpi8_ps(__m64 __A) {
+  __vector signed char __vc16;
+  __vector signed short __vs8;
+  __vector signed int __vi4;
+  __vector float __vf1;
+
+  __vc16 = (__vector signed char)(__vector unsigned long long){__A, __A};
+  __vs8 = vec_vupkhsb(__vc16);
+  __vi4 = vec_vupkhsh(__vs8);
+  __vf1 = (__vector float)vec_ctf(__vi4, 0);
+
+  return (__m128)__vf1;
+}
+
+/* Convert the low four unsigned 8-bit values in A to SPFP form.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+    _mm_cvtpu8_ps(__m64 __A) {
+  const __vector unsigned char __zero = {0, 0, 0, 0, 0, 0, 0, 0};
+  __vector unsigned char __vc16;
+  __vector unsigned short __vs8;
+  __vector unsigned int __vi4;
+  __vector float __vf1;
+
+  __vc16 = (__vector unsigned char)(__vector unsigned long long){__A, __A};
+#ifdef __LITTLE_ENDIAN__
+  __vs8 = (__vector unsigned short)vec_mergel(__vc16, __zero);
+  __vi4 =
+      (__vector unsigned int)vec_mergeh(__vs8, (__vector unsigned short)__zero);
+#else
+  __vs8 = (__vector unsigned short)vec_mergel(__zero, __vc16);
+  __vi4 =
+      (__vector unsigned int)vec_mergeh((__vector unsigned short)__zero, __vs8);
+#endif
+  __vf1 = (__vector float)vec_ctf(__vi4, 0);
+
+  return (__m128)__vf1;
+}
+
+/* Convert the four signed 32-bit values in A and B to SPFP form.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpi32x2_ps(__m64 __A, __m64 __B) {
+  __vector signed int __vi4;
+  __vector float __vf4;
+
+  __vi4 = (__vector signed int)(__vector unsigned long long){__A, __B};
+  __vf4 = (__vector float)vec_ctf(__vi4, 0);
+  return (__m128)__vf4;
+}
+
+/* Convert the four SPFP values in A to four signed 16-bit integers.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtps_pi16(__m128 __A) {
+  __v4sf __rounded;
+  __vector signed int __temp;
+  __vector unsigned long long __result;
+
+  __rounded = vec_rint(__A);
+  __temp = vec_cts(__rounded, 0);
+  __result = (__vector unsigned long long)vec_pack(__temp, __temp);
+
+  return (__m64)((__vector long long)__result)[0];
+}
+
+/* Convert the four SPFP values in A to four signed 8-bit integers.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtps_pi8(__m128 __A) {
+  __v4sf __rounded;
+  __vector signed int __tmp_i;
+  static const __vector signed int __zero = {0, 0, 0, 0};
+  __vector signed short __tmp_s;
+  __vector signed char __res_v;
+
+  __rounded = vec_rint(__A);
+  __tmp_i = vec_cts(__rounded, 0);
+  __tmp_s = vec_pack(__tmp_i, __zero);
+  __res_v = vec_pack(__tmp_s, __tmp_s);
+  return (__m64)((__vector long long)__res_v)[0];
+}
+
+/* Selects four specific SPFP values from A and B based on MASK.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+    _mm_shuffle_ps(__m128 __A, __m128 __B, int const __mask) {
+  unsigned long __element_selector_10 = __mask & 0x03;
+  unsigned long __element_selector_32 = (__mask >> 2) & 0x03;
+  unsigned long __element_selector_54 = (__mask >> 4) & 0x03;
+  unsigned long __element_selector_76 = (__mask >> 6) & 0x03;
+  static const unsigned int __permute_selectors[4] = {
+#ifdef __LITTLE_ENDIAN__
+      0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
+#else
+      0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
+#endif
+  };
+  __vector unsigned int __t;
+
+  __t[0] = __permute_selectors[__element_selector_10];
+  __t[1] = __permute_selectors[__element_selector_32];
+  __t[2] = __permute_selectors[__element_selector_54] + 0x10101010;
+  __t[3] = __permute_selectors[__element_selector_76] + 0x10101010;
+  return vec_perm((__v4sf)__A, (__v4sf)__B, (__vector unsigned char)__t);
+}
+
+/* Selects and interleaves the upper two SPFP values from A and B.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpackhi_ps(__m128 __A, __m128 __B) {
+  return (__m128)vec_vmrglw((__v4sf)__A, (__v4sf)__B);
+}
+
+/* Selects and interleaves the lower two SPFP values from A and B.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpacklo_ps(__m128 __A, __m128 __B) {
+  return (__m128)vec_vmrghw((__v4sf)__A, (__v4sf)__B);
+}
+
+/* Sets the upper two SPFP values with 64-bits of data loaded from P;
+   the lower two values are passed through from A.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadh_pi(__m128 __A, __m64 const *__P) {
+  __vector unsigned long long __a = (__vector unsigned long long)__A;
+  __vector unsigned long long __p = vec_splats(*__P);
+  __a[1] = __p[1];
+
+  return (__m128)__a;
+}
+
+/* Stores the upper two SPFP values of A into P.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storeh_pi(__m64 *__P, __m128 __A) {
+  __vector unsigned long long __a = (__vector unsigned long long)__A;
+
+  *__P = __a[1];
+}
+
+/* Moves the upper two values of B into the lower two values of A.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movehl_ps(__m128 __A, __m128 __B) {
+  return (__m128)vec_mergel((__vector unsigned long long)__B,
+                            (__vector unsigned long long)__A);
+}
+
+/* Moves the lower two values of B into the upper two values of A.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movelh_ps(__m128 __A, __m128 __B) {
+  return (__m128)vec_mergeh((__vector unsigned long long)__A,
+                            (__vector unsigned long long)__B);
+}
+
+/* Sets the lower two SPFP values with 64-bits of data loaded from P;
+   the upper two values are passed through from A.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadl_pi(__m128 __A, __m64 const *__P) {
+  __vector unsigned long long __a = (__vector unsigned long long)__A;
+  __vector unsigned long long __p = vec_splats(*__P);
+  __a[0] = __p[0];
+
+  return (__m128)__a;
+}
+
+/* Stores the lower two SPFP values of A into P.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storel_pi(__m64 *__P, __m128 __A) {
+  __vector unsigned long long __a = (__vector unsigned long long)__A;
+
+  *__P = __a[0];
+}
+
+#ifdef _ARCH_PWR8
+/* Intrinsic functions that require PowerISA 2.07 minimum.  */
+
+/* Creates a 4-bit mask from the most significant bits of the SPFP values.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movemask_ps(__m128 __A) {
+#ifdef _ARCH_PWR10
+  return vec_extractm((__vector unsigned int)__A);
+#else
+  __vector unsigned long long __result;
+  static const __vector unsigned int __perm_mask = {
+#ifdef __LITTLE_ENDIAN__
+      0x00204060, 0x80808080, 0x80808080, 0x80808080
+#else
+      0x80808080, 0x80808080, 0x80808080, 0x00204060
+#endif
+  };
+
+  __result = ((__vector unsigned long long)vec_vbpermq(
+      (__vector unsigned char)__A, (__vector unsigned char)__perm_mask));
+
+#ifdef __LITTLE_ENDIAN__
+  return __result[1];
+#else
+  return __result[0];
+#endif
+#endif /* !_ARCH_PWR10 */
+}
+#endif /* _ARCH_PWR8 */
+
+/* Create a vector with all four elements equal to *P.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load1_ps(float const *__P) {
+  return _mm_set1_ps(*__P);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load_ps1(float const *__P) {
+  return _mm_load1_ps(__P);
+}
+
+/* Extracts one of the four words of A.  The selector N must be immediate.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_extract_pi16(__m64 const __A, int const __N) {
+  unsigned int __shiftr = __N & 3;
+#ifdef __BIG_ENDIAN__
+  __shiftr = 3 - __shiftr;
+#endif
+
+  return ((__A >> (__shiftr * 16)) & 0xffff);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pextrw(__m64 const __A, int const __N) {
+  return _mm_extract_pi16(__A, __N);
+}
+
+/* Inserts word D into one of four words of A.  The selector N must be
+   immediate.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_insert_pi16(__m64 const __A, int const __D, int const __N) {
+  const int __shiftl = (__N & 3) * 16;
+  const __m64 __shiftD = (const __m64)__D << __shiftl;
+  const __m64 __mask = 0xffffUL << __shiftl;
+  __m64 __result = (__A & (~__mask)) | (__shiftD & __mask);
+
+  return __result;
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pinsrw(__m64 const __A, int const __D, int const __N) {
+  return _mm_insert_pi16(__A, __D, __N);
+}
+
+/* Compute the element-wise maximum of signed 16-bit values.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+    _mm_max_pi16(__m64 __A, __m64 __B) {
+#if _ARCH_PWR8
+  __vector signed short __a, __b, __r;
+  __vector __bool short __c;
+
+  __a = (__vector signed short)vec_splats(__A);
+  __b = (__vector signed short)vec_splats(__B);
+  __c = (__vector __bool short)vec_cmpgt(__a, __b);
+  __r = vec_sel(__b, __a, __c);
+  return (__m64)((__vector long long)__r)[0];
+#else
+  __m64_union __m1, __m2, __res;
+
+  __m1.as_m64 = __A;
+  __m2.as_m64 = __B;
+
+  __res.as_short[0] = (__m1.as_short[0] > __m2.as_short[0]) ? __m1.as_short[0]
+                                                            : __m2.as_short[0];
+  __res.as_short[1] = (__m1.as_short[1] > __m2.as_short[1]) ? __m1.as_short[1]
+                                                            : __m2.as_short[1];
+  __res.as_short[2] = (__m1.as_short[2] > __m2.as_short[2]) ? __m1.as_short[2]
+                                                            : __m2.as_short[2];
+  __res.as_short[3] = (__m1.as_short[3] > __m2.as_short[3]) ? __m1.as_short[3]
+                                                            : __m2.as_short[3];
+
+  return (__m64)__res.as_m64;
+#endif
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pmaxsw(__m64 __A, __m64 __B) {
+  return _mm_max_pi16(__A, __B);
+}
+
+/* Compute the element-wise maximum of unsigned 8-bit values.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_pu8(__m64 __A, __m64 __B) {
+#if _ARCH_PWR8
+  __vector unsigned char __a, __b, __r;
+  __vector __bool char __c;
+
+  __a = (__vector unsigned char)vec_splats(__A);
+  __b = (__vector unsigned char)vec_splats(__B);
+  __c = (__vector __bool char)vec_cmpgt(__a, __b);
+  __r = vec_sel(__b, __a, __c);
+  return (__m64)((__vector long long)__r)[0];
+#else
+  __m64_union __m1, __m2, __res;
+  long __i;
+
+  __m1.as_m64 = __A;
+  __m2.as_m64 = __B;
+
+  for (__i = 0; __i < 8; __i++)
+    __res.as_char[__i] =
+        ((unsigned char)__m1.as_char[__i] > (unsigned char)__m2.as_char[__i])
+            ? __m1.as_char[__i]
+            : __m2.as_char[__i];
+
+  return (__m64)__res.as_m64;
+#endif
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pmaxub(__m64 __A, __m64 __B) {
+  return _mm_max_pu8(__A, __B);
+}
+
+/* Compute the element-wise minimum of signed 16-bit values.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_pi16(__m64 __A, __m64 __B) {
+#if _ARCH_PWR8
+  __vector signed short __a, __b, __r;
+  __vector __bool short __c;
+
+  __a = (__vector signed short)vec_splats(__A);
+  __b = (__vector signed short)vec_splats(__B);
+  __c = (__vector __bool short)vec_cmplt(__a, __b);
+  __r = vec_sel(__b, __a, __c);
+  return (__m64)((__vector long long)__r)[0];
+#else
+  __m64_union __m1, __m2, __res;
+
+  __m1.as_m64 = __A;
+  __m2.as_m64 = __B;
+
+  __res.as_short[0] = (__m1.as_short[0] < __m2.as_short[0]) ? __m1.as_short[0]
+                                                            : __m2.as_short[0];
+  __res.as_short[1] = (__m1.as_short[1] < __m2.as_short[1]) ? __m1.as_short[1]
+                                                            : __m2.as_short[1];
+  __res.as_short[2] = (__m1.as_short[2] < __m2.as_short[2]) ? __m1.as_short[2]
+                                                            : __m2.as_short[2];
+  __res.as_short[3] = (__m1.as_short[3] < __m2.as_short[3]) ? __m1.as_short[3]
+                                                            : __m2.as_short[3];
+
+  return (__m64)__res.as_m64;
+#endif
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pminsw(__m64 __A, __m64 __B) {
+  return _mm_min_pi16(__A, __B);
+}
+
+/* Compute the element-wise minimum of unsigned 8-bit values.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_pu8(__m64 __A, __m64 __B) {
+#if _ARCH_PWR8
+  __vector unsigned char __a, __b, __r;
+  __vector __bool char __c;
+
+  __a = (__vector unsigned char)vec_splats(__A);
+  __b = (__vector unsigned char)vec_splats(__B);
+  __c = (__vector __bool char)vec_cmplt(__a, __b);
+  __r = vec_sel(__b, __a, __c);
+  return (__m64)((__vector long long)__r)[0];
+#else
+  __m64_union __m1, __m2, __res;
+  long __i;
+
+  __m1.as_m64 = __A;
+  __m2.as_m64 = __B;
+
+  for (__i = 0; __i < 8; __i++)
+    __res.as_char[__i] =
+        ((unsigned char)__m1.as_char[__i] < (unsigned char)__m2.as_char[__i])
+            ? __m1.as_char[__i]
+            : __m2.as_char[__i];
+
+  return (__m64)__res.as_m64;
+#endif
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pminub(__m64 __A, __m64 __B) {
+  return _mm_min_pu8(__A, __B);
+}
+
+/* Create an 8-bit mask of the signs of 8-bit values.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movemask_pi8(__m64 __A) {
+#ifdef __powerpc64__
+  unsigned long long __p =
+#ifdef __LITTLE_ENDIAN__
+      0x0008101820283038UL; // permute control for sign bits
+#else
+      0x3830282018100800UL; // permute control for sign bits
+#endif
+  return __builtin_bpermd(__p, __A);
+#else
+#ifdef __LITTLE_ENDIAN__
+  unsigned int __mask = 0x20283038UL;
+  unsigned int __r1 = __builtin_bpermd(__mask, __A) & 0xf;
+  unsigned int __r2 = __builtin_bpermd(__mask, __A >> 32) & 0xf;
+#else
+  unsigned int __mask = 0x38302820UL;
+  unsigned int __r1 = __builtin_bpermd(__mask, __A >> 32) & 0xf;
+  unsigned int __r2 = __builtin_bpermd(__mask, __A) & 0xf;
+#endif
+  return (__r2 << 4) | __r1;
+#endif
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pmovmskb(__m64 __A) {
+  return _mm_movemask_pi8(__A);
+}
+
+/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
+   in B and produce the high 16 bits of the 32-bit results.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mulhi_pu16(__m64 __A, __m64 __B) {
+  __vector unsigned short __a, __b;
+  __vector unsigned short __c;
+  __vector unsigned int __w0, __w1;
+  __vector unsigned char __xform1 = {
+#ifdef __LITTLE_ENDIAN__
+      0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
+      0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+#else
+      0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x00,
+      0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15
+#endif
+  };
+
+  __a = (__vector unsigned short)vec_splats(__A);
+  __b = (__vector unsigned short)vec_splats(__B);
+
+  __w0 = vec_vmuleuh(__a, __b);
+  __w1 = vec_vmulouh(__a, __b);
+  __c = (__vector unsigned short)vec_perm(__w0, __w1, __xform1);
+
+  return (__m64)((__vector long long)__c)[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pmulhuw(__m64 __A, __m64 __B) {
+  return _mm_mulhi_pu16(__A, __B);
+}
+
+/* Return a combination of the four 16-bit values in A.  The selector
+   must be an immediate.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_shuffle_pi16(__m64 __A, int const __N) {
+  unsigned long __element_selector_10 = __N & 0x03;
+  unsigned long __element_selector_32 = (__N >> 2) & 0x03;
+  unsigned long __element_selector_54 = (__N >> 4) & 0x03;
+  unsigned long __element_selector_76 = (__N >> 6) & 0x03;
+  static const unsigned short __permute_selectors[4] = {
+#ifdef __LITTLE_ENDIAN__
+      0x0908, 0x0B0A, 0x0D0C, 0x0F0E
+#else
+      0x0607, 0x0405, 0x0203, 0x0001
+#endif
+  };
+  __m64_union __t;
+  __vector unsigned long long __a, __p, __r;
+
+#ifdef __LITTLE_ENDIAN__
+  __t.as_short[0] = __permute_selectors[__element_selector_10];
+  __t.as_short[1] = __permute_selectors[__element_selector_32];
+  __t.as_short[2] = __permute_selectors[__element_selector_54];
+  __t.as_short[3] = __permute_selectors[__element_selector_76];
+#else
+  __t.as_short[3] = __permute_selectors[__element_selector_10];
+  __t.as_short[2] = __permute_selectors[__element_selector_32];
+  __t.as_short[1] = __permute_selectors[__element_selector_54];
+  __t.as_short[0] = __permute_selectors[__element_selector_76];
+#endif
+  __p = vec_splats(__t.as_m64);
+  __a = vec_splats(__A);
+  __r = vec_perm(__a, __a, (__vector unsigned char)__p);
+  return (__m64)((__vector long long)__r)[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pshufw(__m64 __A, int const __N) {
+  return _mm_shuffle_pi16(__A, __N);
+}
+
+/* Conditionally store byte elements of A into P.  The high bit of each
+   byte in the selector N determines whether the corresponding byte from
+   A is stored.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_maskmove_si64(__m64 __A, __m64 __N, char *__P) {
+  __m64 __hibit = 0x8080808080808080UL;
+  __m64 __mask, __tmp;
+  __m64 *__p = (__m64 *)__P;
+
+  __tmp = *__p;
+  __mask = _mm_cmpeq_pi8((__N & __hibit), __hibit);
+  __tmp = (__tmp & (~__mask)) | (__A & __mask);
+  *__p = __tmp;
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_maskmovq(__m64 __A, __m64 __N, char *__P) {
+  _mm_maskmove_si64(__A, __N, __P);
+}
+
+/* Compute the rounded averages of the unsigned 8-bit values in A and B.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_avg_pu8(__m64 __A, __m64 __B) {
+  __vector unsigned char __a, __b, __c;
+
+  __a = (__vector unsigned char)vec_splats(__A);
+  __b = (__vector unsigned char)vec_splats(__B);
+  __c = vec_avg(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pavgb(__m64 __A, __m64 __B) {
+  return _mm_avg_pu8(__A, __B);
+}
+
+/* Compute the rounded averages of the unsigned 16-bit values in A and B.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_avg_pu16(__m64 __A, __m64 __B) {
+  __vector unsigned short __a, __b, __c;
+
+  __a = (__vector unsigned short)vec_splats(__A);
+  __b = (__vector unsigned short)vec_splats(__B);
+  __c = vec_avg(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pavgw(__m64 __A, __m64 __B) {
+  return _mm_avg_pu16(__A, __B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 8-bit
+   values in A and B.  Return the value in the lower 16-bit word; the
+   upper words are cleared.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sad_pu8(__m64 __A, __m64 __B) {
+  __vector unsigned char __a, __b;
+  __vector unsigned char __vmin, __vmax, __vabsdiff;
+  __vector signed int __vsum;
+  const __vector unsigned int __zero = {0, 0, 0, 0};
+  __m64_union __result = {0};
+
+  __a = (__vector unsigned char)(__vector unsigned long long){0UL, __A};
+  __b = (__vector unsigned char)(__vector unsigned long long){0UL, __B};
+  __vmin = vec_min(__a, __b);
+  __vmax = vec_max(__a, __b);
+  __vabsdiff = vec_sub(__vmax, __vmin);
+  /* Sum four groups of bytes into integers.  */
+  __vsum = (__vector signed int)vec_sum4s(__vabsdiff, __zero);
+  /* Sum across four integers with integer result.  */
+  __vsum = vec_sums(__vsum, (__vector signed int)__zero);
+  /* The sum is in the right most 32-bits of the vector result.
+     Transfer to a GPR and truncate to 16 bits.  */
+  __result.as_short[0] = __vsum[3];
+  return __result.as_m64;
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_psadbw(__m64 __A, __m64 __B) {
+  return _mm_sad_pu8(__A, __B);
+}
+
+/* Stores the data in A to the address P without polluting the caches.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_stream_pi(__m64 *__P, __m64 __A) {
+  /* Use the data cache block touch for store transient.  */
+  __asm__("	dcbtstt	0,%0" : : "b"(__P) : "memory");
+  *__P = __A;
+}
+
+/* Likewise.  The address must be 16-byte aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_stream_ps(float *__P, __m128 __A) {
+  /* Use the data cache block touch for store transient.  */
+  __asm__("	dcbtstt	0,%0" : : "b"(__P) : "memory");
+  _mm_store_ps(__P, __A);
+}
+
+/* Guarantees that every preceding store is globally visible before
+   any subsequent store.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sfence(void) {
+  /* Generate a light weight sync.  */
+  __atomic_thread_fence(__ATOMIC_RELEASE);
+}
+
+/* The execution of the next instruction is delayed by an implementation
+   specific amount of time.  The instruction does not modify the
+   architectural state.  This is after the pop_options pragma because
+   it does not require SSE support in the processor--the encoding is a
+   nop on processors that do not support it.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_pause(void) {
+  /* There is no exact match with this construct, but the following is
+     close to the desired effect.  */
+#if _ARCH_PWR8
+  /* On power8 and later processors we can depend on Program Priority
+     (PRI) and associated "very low" PPI setting.  Since we don't know
+     what PPI this thread is running at we: 1) save the current PRI
+     from the PPR SPR into a local GRP, 2) set the PRI to "very low*
+     via the special or 31,31,31 encoding. 3) issue an "isync" to
+     insure the PRI change takes effect before we execute any more
+     instructions.
+     Now we can execute a lwsync (release barrier) while we execute
+     this thread at "very low" PRI.  Finally we restore the original
+     PRI and continue execution.  */
+  unsigned long __PPR;
+
+  __asm__ volatile("	mfppr	%0;"
+                   "   or 31,31,31;"
+                   "   isync;"
+                   "   lwsync;"
+                   "   isync;"
+                   "   mtppr	%0;"
+                   : "=r"(__PPR)
+                   :
+                   : "memory");
+#else
+  /* For older processor where we may not even have Program Priority
+     controls we can only depend on Heavy Weight Sync.  */
+  __atomic_thread_fence(__ATOMIC_SEQ_CST);
+#endif
+}
+
+/* Transpose the 4x4 matrix composed of row[0-3].  */
+#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3)                              \
+  do {                                                                         \
+    __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3);         \
+    __v4sf __t0 = vec_vmrghw(__r0, __r1);                                      \
+    __v4sf __t1 = vec_vmrghw(__r2, __r3);                                      \
+    __v4sf __t2 = vec_vmrglw(__r0, __r1);                                      \
+    __v4sf __t3 = vec_vmrglw(__r2, __r3);                                      \
+    (row0) = (__v4sf)vec_mergeh((__vector long long)__t0,                      \
+                                (__vector long long)__t1);                     \
+    (row1) = (__v4sf)vec_mergel((__vector long long)__t0,                      \
+                                (__vector long long)__t1);                     \
+    (row2) = (__v4sf)vec_mergeh((__vector long long)__t2,                      \
+                                (__vector long long)__t3);                     \
+    (row3) = (__v4sf)vec_mergel((__vector long long)__t2,                      \
+                                (__vector long long)__t3);                     \
+  } while (0)
+
+/* For backward source compatibility.  */
+//# include <emmintrin.h>
+
+#else
+#include_next <xmmintrin.h>
+#endif /* defined(__ppc64__) &&
+        *   (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
+
+#endif /* XMMINTRIN_H_ */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/prfchwintrin.h b/darwin-x86/lib64/clang/16.0.2/include/prfchwintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/prfchwintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/prfchwintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc b/darwin-x86/lib64/clang/16.0.2/include/profile/InstrProfData.inc
similarity index 97%
rename from darwin-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc
rename to darwin-x86/lib64/clang/16.0.2/include/profile/InstrProfData.inc
index 008b8dd..282620d 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc
+++ b/darwin-x86/lib64/clang/16.0.2/include/profile/InstrProfData.inc
@@ -128,8 +128,10 @@
 INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
 INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
 INSTR_PROF_RAW_HEADER(uint64_t, BinaryIdsSize, __llvm_write_binary_ids(NULL))
+/* FIXME: A more accurate name is NumData */
 INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
 INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesBeforeCounters, PaddingBytesBeforeCounters)
+/* FIXME: A more accurate name is NumCounters */
 INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
 INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesAfterCounters, PaddingBytesAfterCounters)
 INSTR_PROF_RAW_HEADER(uint64_t, NamesSize,  NamesSize)
@@ -644,24 +646,33 @@
        (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
         (uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129
 
+/* FIXME: Please remedy the fixme in the header before bumping the version. */
 /* Raw profile format version (start from 1). */
 #define INSTR_PROF_RAW_VERSION 8
 /* Indexed profile format version (start from 1). */
-#define INSTR_PROF_INDEX_VERSION 7
+#define INSTR_PROF_INDEX_VERSION 8
 /* Coverage mapping format version (start from 0). */
 #define INSTR_PROF_COVMAP_VERSION 5
 
 /* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
  * version for other variants of profile. We set the lowest bit of the upper 8
- * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentaiton
+ * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentation
  * generated profile, and 0 if this is a Clang FE generated profile.
  * 1 in bit 57 indicates there are context-sensitive records in the profile.
+ * The 59th bit indicates whether to use debug info to correlate profiles.
+ * The 60th bit indicates single byte coverage instrumentation.
+ * The 61st bit indicates function entry instrumentation only.
+ * The 62nd bit indicates whether memory profile information is present.
  */
 #define VARIANT_MASKS_ALL 0xff00000000000000ULL
 #define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
 #define VARIANT_MASK_IR_PROF (0x1ULL << 56)
 #define VARIANT_MASK_CSIR_PROF (0x1ULL << 57)
 #define VARIANT_MASK_INSTR_ENTRY (0x1ULL << 58)
+#define VARIANT_MASK_DBG_CORRELATE (0x1ULL << 59)
+#define VARIANT_MASK_BYTE_COVERAGE (0x1ULL << 60)
+#define VARIANT_MASK_FUNCTION_ENTRY_ONLY (0x1ULL << 61)
+#define VARIANT_MASK_MEMPROF (0x1ULL << 62)
 #define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
 #define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
 #define INSTR_PROF_PROFILE_COUNTER_BIAS_VAR __llvm_profile_counter_bias
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ptwriteintrin.h b/darwin-x86/lib64/clang/16.0.2/include/ptwriteintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/ptwriteintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/ptwriteintrin.h
diff --git a/darwin-x86/lib64/clang/16.0.2/include/rdpruintrin.h b/darwin-x86/lib64/clang/16.0.2/include/rdpruintrin.h
new file mode 100644
index 0000000..89732bb
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/rdpruintrin.h
@@ -0,0 +1,57 @@
+/*===---- rdpruintrin.h - RDPRU intrinsics ---------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H
+#error "Never use <rdpruintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __RDPRUINTRIN_H
+#define __RDPRUINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+  __attribute__((__always_inline__, __nodebug__,  __target__("rdpru")))
+
+
+/// Reads the content of a processor register.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> RDPRU </c> instruction.
+///
+/// \param reg_id
+///    A processor register identifier.
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__rdpru (int reg_id)
+{
+  return __builtin_ia32_rdpru(reg_id);
+}
+
+#define __RDPRU_MPERF 0
+#define __RDPRU_APERF 1
+
+/// Reads the content of processor register MPERF.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic generates instruction <c> RDPRU </c> to read the value of
+/// register MPERF.
+#define __mperf() __builtin_ia32_rdpru(__RDPRU_MPERF)
+
+/// Reads the content of processor register APERF.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic generates instruction <c> RDPRU </c> to read the value of
+/// register APERF.
+#define __aperf() __builtin_ia32_rdpru(__RDPRU_APERF)
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __RDPRUINTRIN_H */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/rdseedintrin.h b/darwin-x86/lib64/clang/16.0.2/include/rdseedintrin.h
similarity index 87%
rename from darwin-x86/lib64/clang/14.0.2/include/rdseedintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/rdseedintrin.h
index ccb3d2d..405bc24 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/rdseedintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/rdseedintrin.h
@@ -20,20 +20,20 @@
 static __inline__ int __DEFAULT_FN_ATTRS
 _rdseed16_step(unsigned short *__p)
 {
-  return __builtin_ia32_rdseed16_step(__p);
+  return (int) __builtin_ia32_rdseed16_step(__p);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _rdseed32_step(unsigned int *__p)
 {
-  return __builtin_ia32_rdseed32_step(__p);
+  return (int) __builtin_ia32_rdseed32_step(__p);
 }
 
 #ifdef __x86_64__
 static __inline__ int __DEFAULT_FN_ATTRS
 _rdseed64_step(unsigned long long *__p)
 {
-  return __builtin_ia32_rdseed64_step(__p);
+  return (int) __builtin_ia32_rdseed64_step(__p);
 }
 #endif
 
diff --git a/darwin-x86/lib64/clang/16.0.2/include/riscv_vector.h b/darwin-x86/lib64/clang/16.0.2/include/riscv_vector.h
new file mode 100644
index 0000000..cac2b2d
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/riscv_vector.h
@@ -0,0 +1,202 @@
+/*===---- riscv_vector.h - RISC-V V-extension RVVIntrinsics -------------------===
+ *
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __RISCV_VECTOR_H
+#define __RISCV_VECTOR_H
+
+#include <stdint.h>
+#include <stddef.h>
+
+#ifndef __riscv_vector
+#error "Vector intrinsics require the vector extension."
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#pragma clang riscv intrinsic vector
+
+
+enum RVV_CSR {
+  RVV_VSTART = 0,
+  RVV_VXSAT,
+  RVV_VXRM,
+  RVV_VCSR,
+};
+
+static __inline__ __attribute__((__always_inline__, __nodebug__))
+unsigned long vread_csr(enum RVV_CSR __csr) {
+  unsigned long __rv = 0;
+  switch (__csr) {
+    case RVV_VSTART:
+      __asm__ __volatile__ ("csrr\t%0, vstart" : "=r"(__rv) : : "memory");
+      break;
+    case RVV_VXSAT:
+      __asm__ __volatile__ ("csrr\t%0, vxsat" : "=r"(__rv) : : "memory");
+      break;
+    case RVV_VXRM:
+      __asm__ __volatile__ ("csrr\t%0, vxrm" : "=r"(__rv) : : "memory");
+      break;
+    case RVV_VCSR:
+      __asm__ __volatile__ ("csrr\t%0, vcsr" : "=r"(__rv) : : "memory");
+      break;
+  }
+  return __rv;
+}
+
+static __inline__ __attribute__((__always_inline__, __nodebug__))
+void vwrite_csr(enum RVV_CSR __csr, unsigned long __value) {
+  switch (__csr) {
+    case RVV_VSTART:
+      __asm__ __volatile__ ("csrw\tvstart, %z0" : : "rJ"(__value) : "memory");
+      break;
+    case RVV_VXSAT:
+      __asm__ __volatile__ ("csrw\tvxsat, %z0" : : "rJ"(__value) : "memory");
+      break;
+    case RVV_VXRM:
+      __asm__ __volatile__ ("csrw\tvxrm, %z0" : : "rJ"(__value) : "memory");
+      break;
+    case RVV_VCSR:
+      __asm__ __volatile__ ("csrw\tvcsr, %z0" : : "rJ"(__value) : "memory");
+      break;
+  }
+}
+
+#define vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5)
+#define vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6)
+#define vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7)
+#define vsetvl_e8m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 0)
+#define vsetvl_e8m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 1)
+#define vsetvl_e8m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 2)
+#define vsetvl_e8m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 3)
+
+#define vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6)
+#define vsetvl_e16mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 7)
+#define vsetvl_e16m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 0)
+#define vsetvl_e16m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 1)
+#define vsetvl_e16m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 2)
+#define vsetvl_e16m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 3)
+
+#define vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7)
+#define vsetvl_e32m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 0)
+#define vsetvl_e32m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 1)
+#define vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2)
+#define vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3)
+
+#define vsetvl_e64m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 0)
+#define vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1)
+#define vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2)
+#define vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3)
+
+#define vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5)
+#define vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6)
+#define vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7)
+#define vsetvlmax_e8m1() __builtin_rvv_vsetvlimax(0, 0)
+#define vsetvlmax_e8m2() __builtin_rvv_vsetvlimax(0, 1)
+#define vsetvlmax_e8m4() __builtin_rvv_vsetvlimax(0, 2)
+#define vsetvlmax_e8m8() __builtin_rvv_vsetvlimax(0, 3)
+
+#define vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6)
+#define vsetvlmax_e16mf2() __builtin_rvv_vsetvlimax(1, 7)
+#define vsetvlmax_e16m1() __builtin_rvv_vsetvlimax(1, 0)
+#define vsetvlmax_e16m2() __builtin_rvv_vsetvlimax(1, 1)
+#define vsetvlmax_e16m4() __builtin_rvv_vsetvlimax(1, 2)
+#define vsetvlmax_e16m8() __builtin_rvv_vsetvlimax(1, 3)
+
+#define vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7)
+#define vsetvlmax_e32m1() __builtin_rvv_vsetvlimax(2, 0)
+#define vsetvlmax_e32m2() __builtin_rvv_vsetvlimax(2, 1)
+#define vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2)
+#define vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3)
+
+#define vsetvlmax_e64m1() __builtin_rvv_vsetvlimax(3, 0)
+#define vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1)
+#define vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2)
+#define vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3)
+typedef __rvv_bool64_t vbool64_t;
+typedef __rvv_bool32_t vbool32_t;
+typedef __rvv_bool16_t vbool16_t;
+typedef __rvv_bool8_t vbool8_t;
+typedef __rvv_bool4_t vbool4_t;
+typedef __rvv_bool2_t vbool2_t;
+typedef __rvv_bool1_t vbool1_t;
+typedef __rvv_int8mf8_t vint8mf8_t;
+typedef __rvv_uint8mf8_t vuint8mf8_t;
+typedef __rvv_int8mf4_t vint8mf4_t;
+typedef __rvv_uint8mf4_t vuint8mf4_t;
+typedef __rvv_int8mf2_t vint8mf2_t;
+typedef __rvv_uint8mf2_t vuint8mf2_t;
+typedef __rvv_int8m1_t vint8m1_t;
+typedef __rvv_uint8m1_t vuint8m1_t;
+typedef __rvv_int8m2_t vint8m2_t;
+typedef __rvv_uint8m2_t vuint8m2_t;
+typedef __rvv_int8m4_t vint8m4_t;
+typedef __rvv_uint8m4_t vuint8m4_t;
+typedef __rvv_int8m8_t vint8m8_t;
+typedef __rvv_uint8m8_t vuint8m8_t;
+typedef __rvv_int16mf4_t vint16mf4_t;
+typedef __rvv_uint16mf4_t vuint16mf4_t;
+typedef __rvv_int16mf2_t vint16mf2_t;
+typedef __rvv_uint16mf2_t vuint16mf2_t;
+typedef __rvv_int16m1_t vint16m1_t;
+typedef __rvv_uint16m1_t vuint16m1_t;
+typedef __rvv_int16m2_t vint16m2_t;
+typedef __rvv_uint16m2_t vuint16m2_t;
+typedef __rvv_int16m4_t vint16m4_t;
+typedef __rvv_uint16m4_t vuint16m4_t;
+typedef __rvv_int16m8_t vint16m8_t;
+typedef __rvv_uint16m8_t vuint16m8_t;
+typedef __rvv_int32mf2_t vint32mf2_t;
+typedef __rvv_uint32mf2_t vuint32mf2_t;
+typedef __rvv_int32m1_t vint32m1_t;
+typedef __rvv_uint32m1_t vuint32m1_t;
+typedef __rvv_int32m2_t vint32m2_t;
+typedef __rvv_uint32m2_t vuint32m2_t;
+typedef __rvv_int32m4_t vint32m4_t;
+typedef __rvv_uint32m4_t vuint32m4_t;
+typedef __rvv_int32m8_t vint32m8_t;
+typedef __rvv_uint32m8_t vuint32m8_t;
+typedef __rvv_int64m1_t vint64m1_t;
+typedef __rvv_uint64m1_t vuint64m1_t;
+typedef __rvv_int64m2_t vint64m2_t;
+typedef __rvv_uint64m2_t vuint64m2_t;
+typedef __rvv_int64m4_t vint64m4_t;
+typedef __rvv_uint64m4_t vuint64m4_t;
+typedef __rvv_int64m8_t vint64m8_t;
+typedef __rvv_uint64m8_t vuint64m8_t;
+#if defined(__riscv_zvfh)
+typedef __rvv_float16mf4_t vfloat16mf4_t;
+typedef __rvv_float16mf2_t vfloat16mf2_t;
+typedef __rvv_float16m1_t vfloat16m1_t;
+typedef __rvv_float16m2_t vfloat16m2_t;
+typedef __rvv_float16m4_t vfloat16m4_t;
+typedef __rvv_float16m8_t vfloat16m8_t;
+#endif
+#if (__riscv_v_elen_fp >= 32)
+typedef __rvv_float32mf2_t vfloat32mf2_t;
+typedef __rvv_float32m1_t vfloat32m1_t;
+typedef __rvv_float32m2_t vfloat32m2_t;
+typedef __rvv_float32m4_t vfloat32m4_t;
+typedef __rvv_float32m8_t vfloat32m8_t;
+#endif
+#if (__riscv_v_elen_fp >= 64)
+typedef __rvv_float64m1_t vfloat64m1_t;
+typedef __rvv_float64m2_t vfloat64m2_t;
+typedef __rvv_float64m4_t vfloat64m4_t;
+typedef __rvv_float64m8_t vfloat64m8_t;
+#endif
+
+#define __riscv_v_intrinsic_overloading 1
+
+#ifdef __cplusplus
+}
+#endif // __cplusplus
+#endif // __RISCV_VECTOR_H
diff --git a/darwin-x86/lib64/clang/14.0.2/include/rtmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/rtmintrin.h
similarity index 96%
rename from darwin-x86/lib64/clang/14.0.2/include/rtmintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/rtmintrin.h
index 36ff583..a3ec81e 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/rtmintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/rtmintrin.h
@@ -29,7 +29,7 @@
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 _xbegin(void)
 {
-  return __builtin_ia32_xbegin();
+  return (unsigned int)__builtin_ia32_xbegin();
 }
 
 static __inline__ void __DEFAULT_FN_ATTRS
diff --git a/darwin-x86/lib64/clang/14.0.2/include/s390intrin.h b/darwin-x86/lib64/clang/16.0.2/include/s390intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/s390intrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/s390intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/allocator_interface.h b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/allocator_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/allocator_interface.h
rename to darwin-x86/lib64/clang/16.0.2/include/sanitizer/allocator_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/asan_interface.h b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/asan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/asan_interface.h
rename to darwin-x86/lib64/clang/16.0.2/include/sanitizer/asan_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/common_interface_defs.h b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/common_interface_defs.h
similarity index 97%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/common_interface_defs.h
rename to darwin-x86/lib64/clang/16.0.2/include/sanitizer/common_interface_defs.h
index 692b8f7..ba58ad4 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/common_interface_defs.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/common_interface_defs.h
@@ -211,6 +211,15 @@
 // Same as __sanitizer_symbolize_pc, but for data section (i.e. globals).
 void __sanitizer_symbolize_global(void *data_ptr, const char *fmt,
                                   char *out_buf, size_t out_buf_size);
+// Determine the return address.
+#if !defined(_MSC_VER) || defined(__clang__)
+#define __sanitizer_return_address()                                           \
+  __builtin_extract_return_addr(__builtin_return_address(0))
+#else
+extern "C" void *_ReturnAddress(void);
+#pragma intrinsic(_ReturnAddress)
+#define __sanitizer_return_address() _ReturnAddress()
+#endif
 
 /// Sets the callback to be called immediately before death on error.
 ///
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/coverage_interface.h b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/coverage_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/coverage_interface.h
rename to darwin-x86/lib64/clang/16.0.2/include/sanitizer/coverage_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/dfsan_interface.h
similarity index 79%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h
rename to darwin-x86/lib64/clang/16.0.2/include/sanitizer/dfsan_interface.h
index d6209a3..8e581a6 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/dfsan_interface.h
@@ -27,6 +27,10 @@
 /// Signature of the callback argument to dfsan_set_write_callback().
 typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
 
+/// Signature of the callback argument to dfsan_set_conditional_callback().
+typedef void (*dfsan_conditional_callback_t)(dfsan_label label,
+                                             dfsan_origin origin);
+
 /// Computes the union of \c l1 and \c l2, resulting in a union label.
 dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
 
@@ -54,6 +58,10 @@
 /// Retrieves the label associated with the data at the given address.
 dfsan_label dfsan_read_label(const void *addr, size_t size);
 
+/// Return the origin associated with the first taint byte in the size bytes
+/// from the address addr.
+dfsan_origin dfsan_read_origin_of_first_taint(const void *addr, size_t size);
+
 /// Returns whether the given label label contains the label elem.
 int dfsan_has_label(dfsan_label label, dfsan_label elem);
 
@@ -70,6 +78,19 @@
 /// callback executes.  Pass in NULL to remove any callback.
 void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
 
+/// Sets a callback to be invoked on any conditional expressions which have a
+/// taint label set. This can be used to find where tainted data influences
+/// the behavior of the program.
+/// These callbacks will only be added when -dfsan-conditional-callbacks=true.
+void dfsan_set_conditional_callback(dfsan_conditional_callback_t callback);
+
+/// Conditional expressions occur during signal handlers.
+/// Making callbacks that handle signals well is tricky, so when
+/// -dfsan-conditional-callbacks=true, conditional expressions used in signal
+/// handlers will add the labels they see into a global (bitwise-or together).
+/// This function returns all label bits seen in signal handler conditions.
+dfsan_label dfsan_get_labels_in_signal_conditional();
+
 /// Interceptor hooks.
 /// Whenever a dfsan's custom function is called the corresponding
 /// hook is called it non-zero. The hooks should be defined by the user.
@@ -87,6 +108,9 @@
 /// prints description at the beginning of the trace. If origin tracking is not
 /// on, or the address is not labeled, it prints nothing.
 void dfsan_print_origin_trace(const void *addr, const char *description);
+/// As above, but use an origin id from dfsan_get_origin() instead of address.
+/// Does not include header line with taint label and address information.
+void dfsan_print_origin_id_trace(dfsan_origin origin);
 
 /// Prints the origin trace of the label at the address \p addr to a
 /// pre-allocated output buffer. If origin tracking is not on, or the address is
@@ -124,6 +148,10 @@
 /// return value is not less than \p out_buf_size.
 size_t dfsan_sprint_origin_trace(const void *addr, const char *description,
                                  char *out_buf, size_t out_buf_size);
+/// As above, but use an origin id from dfsan_get_origin() instead of address.
+/// Does not include header line with taint label and address information.
+size_t dfsan_sprint_origin_id_trace(dfsan_origin origin, char *out_buf,
+                                    size_t out_buf_size);
 
 /// Prints the stack trace leading to this call to a pre-allocated output
 /// buffer.
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/hwasan_interface.h b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/hwasan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/hwasan_interface.h
rename to darwin-x86/lib64/clang/16.0.2/include/sanitizer/hwasan_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/linux_syscall_hooks.h b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/linux_syscall_hooks.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/linux_syscall_hooks.h
rename to darwin-x86/lib64/clang/16.0.2/include/sanitizer/linux_syscall_hooks.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/lsan_interface.h b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/lsan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/lsan_interface.h
rename to darwin-x86/lib64/clang/16.0.2/include/sanitizer/lsan_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/msan_interface.h b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/msan_interface.h
similarity index 96%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/msan_interface.h
rename to darwin-x86/lib64/clang/16.0.2/include/sanitizer/msan_interface.h
index eeb39fb..854b12c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/msan_interface.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/msan_interface.h
@@ -92,6 +92,8 @@
 
   /* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */
   void __sanitizer_dtor_callback(const volatile void* data, size_t size);
+  void __sanitizer_dtor_callback_fields(const volatile void *data, size_t size);
+  void __sanitizer_dtor_callback_vptr(const volatile void *data);
 
   /* This function may be optionally provided by user and should return
      a string containing Msan runtime options. See msan_flags.h for details. */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/netbsd_syscall_hooks.h b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/netbsd_syscall_hooks.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/netbsd_syscall_hooks.h
rename to darwin-x86/lib64/clang/16.0.2/include/sanitizer/netbsd_syscall_hooks.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/scudo_interface.h b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/scudo_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/scudo_interface.h
rename to darwin-x86/lib64/clang/16.0.2/include/sanitizer/scudo_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface.h b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/tsan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface.h
rename to darwin-x86/lib64/clang/16.0.2/include/sanitizer/tsan_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface_atomic.h b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/tsan_interface_atomic.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface_atomic.h
rename to darwin-x86/lib64/clang/16.0.2/include/sanitizer/tsan_interface_atomic.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/ubsan_interface.h b/darwin-x86/lib64/clang/16.0.2/include/sanitizer/ubsan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/ubsan_interface.h
rename to darwin-x86/lib64/clang/16.0.2/include/sanitizer/ubsan_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/serializeintrin.h b/darwin-x86/lib64/clang/16.0.2/include/serializeintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/serializeintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/serializeintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sgxintrin.h b/darwin-x86/lib64/clang/16.0.2/include/sgxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sgxintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/sgxintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/shaintrin.h b/darwin-x86/lib64/clang/16.0.2/include/shaintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/shaintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/shaintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/smmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/smmintrin.h
similarity index 85%
rename from darwin-x86/lib64/clang/14.0.2/include/smmintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/smmintrin.h
index 710e55a..46fb7bc 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/smmintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/smmintrin.h
@@ -17,23 +17,25 @@
 #include <tmmintrin.h>
 
 /* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS                                                     \
+  __attribute__((__always_inline__, __nodebug__, __target__("sse4.1"),         \
+                 __min_vector_width__(128)))
 
 /* SSE4 Rounding macros. */
-#define _MM_FROUND_TO_NEAREST_INT    0x00
-#define _MM_FROUND_TO_NEG_INF        0x01
-#define _MM_FROUND_TO_POS_INF        0x02
-#define _MM_FROUND_TO_ZERO           0x03
-#define _MM_FROUND_CUR_DIRECTION     0x04
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
 
-#define _MM_FROUND_RAISE_EXC         0x00
-#define _MM_FROUND_NO_EXC            0x08
+#define _MM_FROUND_RAISE_EXC 0x00
+#define _MM_FROUND_NO_EXC 0x08
 
-#define _MM_FROUND_NINT      (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
-#define _MM_FROUND_FLOOR     (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
-#define _MM_FROUND_CEIL      (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
-#define _MM_FROUND_TRUNC     (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
-#define _MM_FROUND_RINT      (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
+#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
+#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
+#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
+#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
+#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
 #define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
 
 /// Rounds up each element of the 128-bit vector of [4 x float] to an
@@ -51,7 +53,7 @@
 /// \param X
 ///    A 128-bit vector of [4 x float] values to be rounded up.
 /// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_ceil_ps(X)       _mm_round_ps((X), _MM_FROUND_CEIL)
+#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
 
 /// Rounds up each element of the 128-bit vector of [2 x double] to an
 ///    integer and returns the rounded values in a 128-bit vector of
@@ -68,7 +70,7 @@
 /// \param X
 ///    A 128-bit vector of [2 x double] values to be rounded up.
 /// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_ceil_pd(X)       _mm_round_pd((X), _MM_FROUND_CEIL)
+#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
 
 /// Copies three upper elements of the first 128-bit vector operand to
 ///    the corresponding three upper elements of the 128-bit result vector of
@@ -93,7 +95,7 @@
 ///    of the result.
 /// \returns A 128-bit vector of [4 x float] containing the copied and rounded
 ///    values.
-#define _mm_ceil_ss(X, Y)    _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
+#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
 
 /// Copies the upper element of the first 128-bit vector operand to the
 ///    corresponding upper element of the 128-bit result vector of [2 x double].
@@ -118,7 +120,7 @@
 ///    of the result.
 /// \returns A 128-bit vector of [2 x double] containing the copied and rounded
 ///    values.
-#define _mm_ceil_sd(X, Y)    _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
+#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
 
 /// Rounds down each element of the 128-bit vector of [4 x float] to an
 ///    an integer and returns the rounded values in a 128-bit vector of
@@ -135,7 +137,7 @@
 /// \param X
 ///    A 128-bit vector of [4 x float] values to be rounded down.
 /// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_floor_ps(X)      _mm_round_ps((X), _MM_FROUND_FLOOR)
+#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
 
 /// Rounds down each element of the 128-bit vector of [2 x double] to an
 ///    integer and returns the rounded values in a 128-bit vector of
@@ -152,7 +154,7 @@
 /// \param X
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_floor_pd(X)      _mm_round_pd((X), _MM_FROUND_FLOOR)
+#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
 
 /// Copies three upper elements of the first 128-bit vector operand to
 ///    the corresponding three upper elements of the 128-bit result vector of
@@ -177,7 +179,7 @@
 ///    of the result.
 /// \returns A 128-bit vector of [4 x float] containing the copied and rounded
 ///    values.
-#define _mm_floor_ss(X, Y)   _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
+#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
 
 /// Copies the upper element of the first 128-bit vector operand to the
 ///    corresponding upper element of the 128-bit result vector of [2 x double].
@@ -202,7 +204,7 @@
 ///    of the result.
 /// \returns A 128-bit vector of [2 x double] containing the copied and rounded
 ///    values.
-#define _mm_floor_sd(X, Y)   _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
+#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
 
 /// Rounds each element of the 128-bit vector of [4 x float] to an
 ///    integer value according to the rounding control specified by the second
@@ -234,7 +236,7 @@
 ///      10: Upward (toward positive infinity) \n
 ///      11: Truncated
 /// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_round_ps(X, M) \
+#define _mm_round_ps(X, M)                                                     \
   ((__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)))
 
 /// Copies three upper elements of the first 128-bit vector operand to
@@ -275,9 +277,9 @@
 ///      11: Truncated
 /// \returns A 128-bit vector of [4 x float] containing the copied and rounded
 ///    values.
-#define _mm_round_ss(X, Y, M) \
-  ((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \
-                                  (__v4sf)(__m128)(Y), (M)))
+#define _mm_round_ss(X, Y, M)                                                  \
+  ((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y),    \
+                                  (M)))
 
 /// Rounds each element of the 128-bit vector of [2 x double] to an
 ///    integer value according to the rounding control specified by the second
@@ -309,7 +311,7 @@
 ///      10: Upward (toward positive infinity) \n
 ///      11: Truncated
 /// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_round_pd(X, M) \
+#define _mm_round_pd(X, M)                                                     \
   ((__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)))
 
 /// Copies the upper element of the first 128-bit vector operand to the
@@ -350,9 +352,9 @@
 ///      11: Truncated
 /// \returns A 128-bit vector of [2 x double] containing the copied and rounded
 ///    values.
-#define _mm_round_sd(X, Y, M) \
-  ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \
-                                   (__v2df)(__m128d)(Y), (M)))
+#define _mm_round_sd(X, Y, M)                                                  \
+  ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \
+                                   (M)))
 
 /* SSE4 Packed Blending Intrinsics.  */
 /// Returns a 128-bit vector of [2 x double] where the values are
@@ -379,9 +381,9 @@
 ///    When a mask bit is 1, the corresponding 64-bit element in operand \a V2
 ///    is copied to the same position in the result.
 /// \returns A 128-bit vector of [2 x double] containing the copied values.
-#define _mm_blend_pd(V1, V2, M) \
-  ((__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(V1), \
-                                     (__v2df)(__m128d)(V2), (int)(M)))
+#define _mm_blend_pd(V1, V2, M)                                                \
+  ((__m128d)__builtin_ia32_blendpd((__v2df)(__m128d)(V1),                      \
+                                   (__v2df)(__m128d)(V2), (int)(M)))
 
 /// Returns a 128-bit vector of [4 x float] where the values are selected
 ///    from either the first or second operand as specified by the third
@@ -407,9 +409,9 @@
 ///    When a mask bit is 1, the corresponding 32-bit element in operand \a V2
 ///    is copied to the same position in the result.
 /// \returns A 128-bit vector of [4 x float] containing the copied values.
-#define _mm_blend_ps(V1, V2, M) \
-  ((__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(V1), \
-                                    (__v4sf)(__m128)(V2), (int)(M)))
+#define _mm_blend_ps(V1, V2, M)                                                \
+  ((__m128)__builtin_ia32_blendps((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2),  \
+                                  (int)(M)))
 
 /// Returns a 128-bit vector of [2 x double] where the values are
 ///    selected from either the first or second operand as specified by the
@@ -431,11 +433,11 @@
 ///    position in the result. When a mask bit is 1, the corresponding 64-bit
 ///    element in operand \a __V2 is copied to the same position in the result.
 /// \returns A 128-bit vector of [2 x double] containing the copied values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
-{
-  return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
-                                            (__v2df)__M);
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_blendv_pd(__m128d __V1,
+                                                           __m128d __V2,
+                                                           __m128d __M) {
+  return (__m128d)__builtin_ia32_blendvpd((__v2df)__V1, (__v2df)__V2,
+                                          (__v2df)__M);
 }
 
 /// Returns a 128-bit vector of [4 x float] where the values are
@@ -458,11 +460,11 @@
 ///    position in the result. When a mask bit is 1, the corresponding 32-bit
 ///    element in operand \a __V2 is copied to the same position in the result.
 /// \returns A 128-bit vector of [4 x float] containing the copied values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
-{
-  return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
-                                           (__v4sf)__M);
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_blendv_ps(__m128 __V1,
+                                                          __m128 __V2,
+                                                          __m128 __M) {
+  return (__m128)__builtin_ia32_blendvps((__v4sf)__V1, (__v4sf)__V2,
+                                         (__v4sf)__M);
 }
 
 /// Returns a 128-bit vector of [16 x i8] where the values are selected
@@ -485,11 +487,11 @@
 ///    position in the result. When a mask bit is 1, the corresponding 8-bit
 ///    element in operand \a __V2 is copied to the same position in the result.
 /// \returns A 128-bit vector of [16 x i8] containing the copied values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
-{
-  return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
-                                               (__v16qi)__M);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_blendv_epi8(__m128i __V1,
+                                                             __m128i __V2,
+                                                             __m128i __M) {
+  return (__m128i)__builtin_ia32_pblendvb128((__v16qi)__V1, (__v16qi)__V2,
+                                             (__v16qi)__M);
 }
 
 /// Returns a 128-bit vector of [8 x i16] where the values are selected
@@ -516,9 +518,9 @@
 ///    When a mask bit is 1, the corresponding 16-bit element in operand \a V2
 ///    is copied to the same position in the result.
 /// \returns A 128-bit vector of [8 x i16] containing the copied values.
-#define _mm_blend_epi16(V1, V2, M) \
-  ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(V1), \
-                                        (__v8hi)(__m128i)(V2), (int)(M)))
+#define _mm_blend_epi16(V1, V2, M)                                             \
+  ((__m128i)__builtin_ia32_pblendw128((__v8hi)(__m128i)(V1),                   \
+                                      (__v8hi)(__m128i)(V2), (int)(M)))
 
 /* SSE4 Dword Multiply Instructions.  */
 /// Multiples corresponding elements of two 128-bit vectors of [4 x i32]
@@ -534,10 +536,9 @@
 /// \param __V2
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the products of both operands.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_mullo_epi32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) ((__v4su)__V1 * (__v4su)__V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi32(__m128i __V1,
+                                                             __m128i __V2) {
+  return (__m128i)((__v4su)__V1 * (__v4su)__V2);
 }
 
 /// Multiplies corresponding even-indexed elements of two 128-bit
@@ -554,10 +555,9 @@
 ///    A 128-bit vector of [4 x i32].
 /// \returns A 128-bit vector of [2 x i64] containing the products of both
 ///    operands.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_mul_epi32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epi32(__m128i __V1,
+                                                           __m128i __V2) {
+  return (__m128i)__builtin_ia32_pmuldq128((__v4si)__V1, (__v4si)__V2);
 }
 
 /* SSE4 Floating Point Dot Product Instructions.  */
@@ -593,9 +593,8 @@
 ///    each [4 x float] subvector. If a bit is set, the dot product is returned
 ///    in the corresponding element; otherwise that element is set to zero.
 /// \returns A 128-bit vector of [4 x float] containing the dot product.
-#define _mm_dp_ps(X, Y, M) \
-  ((__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \
-                                (__v4sf)(__m128)(Y), (M)))
+#define _mm_dp_ps(X, Y, M)                                                     \
+  ((__m128)__builtin_ia32_dpps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (M)))
 
 /// Computes the dot product of the two 128-bit vectors of [2 x double]
 ///    and returns it in the elements of the 128-bit result vector of
@@ -628,9 +627,9 @@
 ///    to the lowest element and bit [1] corresponding to the highest element of
 ///    each [2 x double] vector. If a bit is set, the dot product is returned in
 ///    the corresponding element; otherwise that element is set to zero.
-#define _mm_dp_pd(X, Y, M) \
-  ((__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \
-                                 (__v2df)(__m128d)(Y), (M)))
+#define _mm_dp_pd(X, Y, M)                                                     \
+  ((__m128d)__builtin_ia32_dppd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y),    \
+                                (M)))
 
 /* SSE4 Streaming Load Hint Instruction.  */
 /// Loads integer values from a 128-bit aligned memory location to a
@@ -645,10 +644,9 @@
 ///    values.
 /// \returns A 128-bit integer vector containing the data stored at the
 ///    specified memory location.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_stream_load_si128 (__m128i const *__V)
-{
-  return (__m128i) __builtin_nontemporal_load ((const __v2di *) __V);
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_stream_load_si128(__m128i const *__V) {
+  return (__m128i)__builtin_nontemporal_load((const __v2di *)__V);
 }
 
 /* SSE4 Packed Integer Min/Max Instructions.  */
@@ -665,10 +663,9 @@
 /// \param __V2
 ///    A 128-bit vector of [16 x i8]
 /// \returns A 128-bit vector of [16 x i8] containing the lesser values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi8 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi8(__m128i __V1,
+                                                          __m128i __V2) {
+  return (__m128i)__builtin_elementwise_min((__v16qs)__V1, (__v16qs)__V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -684,10 +681,9 @@
 /// \param __V2
 ///    A 128-bit vector of [16 x i8].
 /// \returns A 128-bit vector of [16 x i8] containing the greater values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi8 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi8(__m128i __V1,
+                                                          __m128i __V2) {
+  return (__m128i)__builtin_elementwise_max((__v16qs)__V1, (__v16qs)__V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -703,10 +699,9 @@
 /// \param __V2
 ///    A 128-bit vector of [8 x u16].
 /// \returns A 128-bit vector of [8 x u16] containing the lesser values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu16 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu16(__m128i __V1,
+                                                           __m128i __V2) {
+  return (__m128i)__builtin_elementwise_min((__v8hu)__V1, (__v8hu)__V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -722,10 +717,9 @@
 /// \param __V2
 ///    A 128-bit vector of [8 x u16].
 /// \returns A 128-bit vector of [8 x u16] containing the greater values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu16 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu16(__m128i __V1,
+                                                           __m128i __V2) {
+  return (__m128i)__builtin_elementwise_max((__v8hu)__V1, (__v8hu)__V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -741,10 +735,9 @@
 /// \param __V2
 ///    A 128-bit vector of [4 x i32].
 /// \returns A 128-bit vector of [4 x i32] containing the lesser values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi32(__m128i __V1,
+                                                           __m128i __V2) {
+  return (__m128i)__builtin_elementwise_min((__v4si)__V1, (__v4si)__V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -760,10 +753,9 @@
 /// \param __V2
 ///    A 128-bit vector of [4 x i32].
 /// \returns A 128-bit vector of [4 x i32] containing the greater values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi32(__m128i __V1,
+                                                           __m128i __V2) {
+  return (__m128i)__builtin_elementwise_max((__v4si)__V1, (__v4si)__V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -779,10 +771,9 @@
 /// \param __V2
 ///    A 128-bit vector of [4 x u32].
 /// \returns A 128-bit vector of [4 x u32] containing the lesser values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu32(__m128i __V1,
+                                                           __m128i __V2) {
+  return (__m128i)__builtin_elementwise_min((__v4su)__V1, (__v4su)__V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -798,10 +789,9 @@
 /// \param __V2
 ///    A 128-bit vector of [4 x u32].
 /// \returns A 128-bit vector of [4 x u32] containing the greater values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu32(__m128i __V1,
+                                                           __m128i __V2) {
+  return (__m128i)__builtin_elementwise_max((__v4su)__V1, (__v4su)__V2);
 }
 
 /* SSE4 Insertion and Extraction from XMM Register Instructions.  */
@@ -869,21 +859,24 @@
 ///    10: Bits [95:64] of parameter \a X are returned. \n
 ///    11: Bits [127:96] of parameter \a X are returned.
 /// \returns A 32-bit integer containing the extracted 32 bits of float data.
-#define _mm_extract_ps(X, N) \
-  __builtin_bit_cast(int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)))
+#define _mm_extract_ps(X, N)                                                   \
+  __builtin_bit_cast(                                                          \
+      int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)))
 
 /* Miscellaneous insert and extract macros.  */
 /* Extract a single-precision float from X at index N into D.  */
-#define _MM_EXTRACT_FLOAT(D, X, N) \
-  do { (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); } while (0)
+#define _MM_EXTRACT_FLOAT(D, X, N)                                             \
+  do {                                                                         \
+    (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N));          \
+  } while (0)
 
 /* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
    an index suitable for _mm_insert_ps.  */
 #define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
 
 /* Extract a float from X at index N into the first index of the return.  */
-#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X),   \
-                                             _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
+#define _MM_PICK_OUT_PS(X, N)                                                  \
+  _mm_insert_ps(_mm_setzero_ps(), (X), _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
 
 /* Insert int into packed integer array at index.  */
 /// Constructs a 128-bit vector of [16 x i8] by first making a copy of
@@ -926,9 +919,9 @@
 ///    1110: Bits [119:112] of the result are used for insertion. \n
 ///    1111: Bits [127:120] of the result are used for insertion.
 /// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi8(X, I, N) \
-  ((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), \
-                                         (int)(I), (int)(N)))
+#define _mm_insert_epi8(X, I, N)                                               \
+  ((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), (int)(I),      \
+                                         (int)(N)))
 
 /// Constructs a 128-bit vector of [4 x i32] by first making a copy of
 ///    the 128-bit integer vector parameter, and then inserting the 32-bit
@@ -958,9 +951,9 @@
 ///    10: Bits [95:64] of the result are used for insertion. \n
 ///    11: Bits [127:96] of the result are used for insertion.
 /// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi32(X, I, N) \
-  ((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), \
-                                        (int)(I), (int)(N)))
+#define _mm_insert_epi32(X, I, N)                                              \
+  ((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), (int)(I),        \
+                                        (int)(N)))
 
 #ifdef __x86_64__
 /// Constructs a 128-bit vector of [2 x i64] by first making a copy of
@@ -989,9 +982,9 @@
 ///    0: Bits [63:0] of the result are used for insertion. \n
 ///    1: Bits [127:64] of the result are used for insertion. \n
 /// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi64(X, I, N) \
-  ((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), \
-                                        (long long)(I), (int)(N)))
+#define _mm_insert_epi64(X, I, N)                                              \
+  ((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), (long long)(I),  \
+                                        (int)(N)))
 #endif /* __x86_64__ */
 
 /* Extract int from packed integer array at index.  This returns the element
@@ -1032,8 +1025,8 @@
 /// \returns  An unsigned integer, whose lower 8 bits are selected from the
 ///    128-bit integer vector parameter and the remaining bits are assigned
 ///    zeros.
-#define _mm_extract_epi8(X, N) \
-  ((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \
+#define _mm_extract_epi8(X, N)                                                 \
+  ((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X),     \
                                                     (int)(N)))
 
 /// Extracts a 32-bit element from the 128-bit integer vector of
@@ -1058,10 +1051,9 @@
 ///    11: Bits [127:96] of the parameter \a X are exracted.
 /// \returns  An integer, whose lower 32 bits are selected from the 128-bit
 ///    integer vector parameter and the remaining bits are assigned zeros.
-#define _mm_extract_epi32(X, N) \
+#define _mm_extract_epi32(X, N)                                                \
   ((int)__builtin_ia32_vec_ext_v4si((__v4si)(__m128i)(X), (int)(N)))
 
-#ifdef __x86_64__
 /// Extracts a 64-bit element from the 128-bit integer vector of
 ///    [2 x i64], using the immediate value parameter \a N as a selector.
 ///
@@ -1071,7 +1063,8 @@
 /// long long _mm_extract_epi64(__m128i X, const int N);
 /// \endcode
 ///
-/// This intrinsic corresponds to the <c> VPEXTRQ / PEXTRQ </c> instruction.
+/// This intrinsic corresponds to the <c> VPEXTRQ / PEXTRQ </c> instruction
+/// in 64-bit mode.
 ///
 /// \param X
 ///    A 128-bit integer vector.
@@ -1081,9 +1074,8 @@
 ///    0: Bits [63:0] are returned. \n
 ///    1: Bits [127:64] are returned. \n
 /// \returns  A 64-bit integer.
-#define _mm_extract_epi64(X, N) \
+#define _mm_extract_epi64(X, N)                                                \
   ((long long)__builtin_ia32_vec_ext_v2di((__v2di)(__m128i)(X), (int)(N)))
-#endif /* __x86_64 */
 
 /* SSE4 128-bit Packed Integer Comparisons.  */
 /// Tests whether the specified bits in a 128-bit integer vector are all
@@ -1098,9 +1090,8 @@
 /// \param __V
 ///    A 128-bit integer vector selecting which bits to test in operand \a __M.
 /// \returns TRUE if the specified bits are all zeros; FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testz_si128(__m128i __M, __m128i __V)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_testz_si128(__m128i __M,
+                                                         __m128i __V) {
   return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
 }
 
@@ -1116,9 +1107,8 @@
 /// \param __V
 ///    A 128-bit integer vector selecting which bits to test in operand \a __M.
 /// \returns TRUE if the specified bits are all ones; FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testc_si128(__m128i __M, __m128i __V)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_testc_si128(__m128i __M,
+                                                         __m128i __V) {
   return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
 }
 
@@ -1135,9 +1125,8 @@
 ///    A 128-bit integer vector selecting which bits to test in operand \a __M.
 /// \returns TRUE if the specified bits are neither all zeros nor all ones;
 ///    FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testnzc_si128(__m128i __M, __m128i __V)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M,
+                                                           __m128i __V) {
   return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
 }
 
@@ -1193,7 +1182,7 @@
 /// \param V
 ///    A 128-bit integer vector selecting which bits to test in operand \a M.
 /// \returns TRUE if the specified bits are all zeros; FALSE otherwise.
-#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
+#define _mm_test_all_zeros(M, V) _mm_testz_si128((M), (V))
 
 /* SSE4 64-bit Packed Integer Comparisons.  */
 /// Compares each of the corresponding 64-bit values of the 128-bit
@@ -1208,9 +1197,8 @@
 /// \param __V2
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi64(__m128i __V1,
+                                                             __m128i __V2) {
   return (__m128i)((__v2di)__V1 == (__v2di)__V2);
 }
 
@@ -1225,15 +1213,16 @@
 /// This intrinsic corresponds to the <c> VPMOVSXBW / PMOVSXBW </c> instruction.
 ///
 /// \param __V
-///    A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are sign-
-///    extended to 16-bit values.
+///    A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are
+///    sign-extended to 16-bit values.
 /// \returns A 128-bit vector of [8 x i16] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi16(__m128i __V)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi16(__m128i __V) {
   /* This function always performs a signed extension, but __v16qi is a char
      which may be signed or unsigned, so use __v16qs. */
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6,
+                              7),
+      __v8hi);
 }
 
 /// Sign-extends each of the lower four 8-bit integer elements of a
@@ -1249,12 +1238,11 @@
 ///    A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
 ///    sign-extended to 32-bit values.
 /// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi32(__m128i __V)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi32(__m128i __V) {
   /* This function always performs a signed extension, but __v16qi is a char
      which may be signed or unsigned, so use __v16qs. */
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
 }
 
 /// Sign-extends each of the lower two 8-bit integer elements of a
@@ -1270,12 +1258,11 @@
 ///    A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
 ///    sign-extended to 64-bit values.
 /// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi64(__m128i __V)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi64(__m128i __V) {
   /* This function always performs a signed extension, but __v16qi is a char
      which may be signed or unsigned, so use __v16qs. */
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
 }
 
 /// Sign-extends each of the lower four 16-bit integer elements of a
@@ -1291,10 +1278,9 @@
 ///    A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
 ///    sign-extended to 32-bit values.
 /// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi16_epi32(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi32(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
 }
 
 /// Sign-extends each of the lower two 16-bit integer elements of a
@@ -1310,10 +1296,9 @@
 ///    A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
 ///     sign-extended to 64-bit values.
 /// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi16_epi64(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi64(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
 }
 
 /// Sign-extends each of the lower two 32-bit integer elements of a
@@ -1329,10 +1314,9 @@
 ///    A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
 ///    sign-extended to 64-bit values.
 /// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi32_epi64(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi32_epi64(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
 }
 
 /* SSE4 Packed Integer Zero-Extension.  */
@@ -1349,10 +1333,11 @@
 ///    A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are
 ///    zero-extended to 16-bit values.
 /// \returns A 128-bit vector of [8 x i16] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi16(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi16(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6,
+                              7),
+      __v8hi);
 }
 
 /// Zero-extends each of the lower four 8-bit integer elements of a
@@ -1368,10 +1353,9 @@
 ///    A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
 ///    zero-extended to 32-bit values.
 /// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi32(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi32(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
 }
 
 /// Zero-extends each of the lower two 8-bit integer elements of a
@@ -1387,10 +1371,9 @@
 ///    A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
 ///    zero-extended to 64-bit values.
 /// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi64(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi64(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
 }
 
 /// Zero-extends each of the lower four 16-bit integer elements of a
@@ -1406,10 +1389,9 @@
 ///    A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
 ///    zero-extended to 32-bit values.
 /// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu16_epi32(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi32(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
 }
 
 /// Zero-extends each of the lower two 16-bit integer elements of a
@@ -1425,10 +1407,9 @@
 ///    A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
 ///    zero-extended to 64-bit values.
 /// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu16_epi64(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi64(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
 }
 
 /// Zero-extends each of the lower two 32-bit integer elements of a
@@ -1444,10 +1425,9 @@
 ///    A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
 ///    zero-extended to 64-bit values.
 /// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu32_epi64(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
 }
 
 /* SSE4 Pack with Unsigned Saturation.  */
@@ -1473,10 +1453,9 @@
 ///    less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values
 ///    are written to the higher 64 bits of the result.
 /// \returns A 128-bit vector of [8 x i16] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packus_epi32(__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1,
+                                                              __m128i __V2) {
+  return (__m128i)__builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
 }
 
 /* SSE4 Multiple Packed Sums of Absolute Difference.  */
@@ -1515,9 +1494,9 @@
 ///    \endcode
 /// \returns A 128-bit integer vector containing the sums of the sets of
 ///    absolute differences between both operands.
-#define _mm_mpsadbw_epu8(X, Y, M) \
-  ((__m128i) __builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
-                                       (__v16qi)(__m128i)(Y), (M)))
+#define _mm_mpsadbw_epu8(X, Y, M)                                              \
+  ((__m128i)__builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X),                   \
+                                      (__v16qi)(__m128i)(Y), (M)))
 
 /// Finds the minimum unsigned 16-bit element in the input 128-bit
 ///    vector of [8 x u16] and returns it and along with its index.
@@ -1532,10 +1511,8 @@
 /// \returns A 128-bit value where bits [15:0] contain the minimum value found
 ///    in parameter \a __V, bits [18:16] contain the index of the minimum value
 ///    and the remaining bits are set to 0.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_minpos_epu16(__m128i __V)
-{
-  return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) {
+  return (__m128i)__builtin_ia32_phminposuw128((__v8hi)__V);
 }
 
 /* Handle the sse4.2 definitions here. */
@@ -1544,33 +1521,34 @@
    so we'll do the same.  */
 
 #undef __DEFAULT_FN_ATTRS
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+#define __DEFAULT_FN_ATTRS                                                     \
+  __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
 
 /* These specify the type of data that we're comparing.  */
-#define _SIDD_UBYTE_OPS                 0x00
-#define _SIDD_UWORD_OPS                 0x01
-#define _SIDD_SBYTE_OPS                 0x02
-#define _SIDD_SWORD_OPS                 0x03
+#define _SIDD_UBYTE_OPS 0x00
+#define _SIDD_UWORD_OPS 0x01
+#define _SIDD_SBYTE_OPS 0x02
+#define _SIDD_SWORD_OPS 0x03
 
 /* These specify the type of comparison operation.  */
-#define _SIDD_CMP_EQUAL_ANY             0x00
-#define _SIDD_CMP_RANGES                0x04
-#define _SIDD_CMP_EQUAL_EACH            0x08
-#define _SIDD_CMP_EQUAL_ORDERED         0x0c
+#define _SIDD_CMP_EQUAL_ANY 0x00
+#define _SIDD_CMP_RANGES 0x04
+#define _SIDD_CMP_EQUAL_EACH 0x08
+#define _SIDD_CMP_EQUAL_ORDERED 0x0c
 
 /* These macros specify the polarity of the operation.  */
-#define _SIDD_POSITIVE_POLARITY         0x00
-#define _SIDD_NEGATIVE_POLARITY         0x10
-#define _SIDD_MASKED_POSITIVE_POLARITY  0x20
-#define _SIDD_MASKED_NEGATIVE_POLARITY  0x30
+#define _SIDD_POSITIVE_POLARITY 0x00
+#define _SIDD_NEGATIVE_POLARITY 0x10
+#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
+#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
 
 /* These macros are used in _mm_cmpXstri() to specify the return.  */
-#define _SIDD_LEAST_SIGNIFICANT         0x00
-#define _SIDD_MOST_SIGNIFICANT          0x40
+#define _SIDD_LEAST_SIGNIFICANT 0x00
+#define _SIDD_MOST_SIGNIFICANT 0x40
 
 /* These macros are used in _mm_cmpXstri() to specify the return.  */
-#define _SIDD_BIT_MASK                  0x00
-#define _SIDD_UNIT_MASK                 0x40
+#define _SIDD_BIT_MASK 0x00
+#define _SIDD_UNIT_MASK 0x40
 
 /* SSE4.2 Packed Comparison Intrinsics.  */
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -1625,8 +1603,8 @@
 ///         repeating each bit 8 or 16 times).
 /// \returns Returns a 128-bit integer vector representing the result mask of
 ///    the comparison.
-#define _mm_cmpistrm(A, B, M) \
-  ((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrm(A, B, M)                                                  \
+  ((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A),                 \
                                         (__v16qi)(__m128i)(B), (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -1679,9 +1657,9 @@
 ///      0: The index of the least significant set bit. \n
 ///      1: The index of the most significant set bit. \n
 /// \returns Returns an integer representing the result index of the comparison.
-#define _mm_cmpistri(A, B, M) \
-  ((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
-                                   (__v16qi)(__m128i)(B), (int)(M)))
+#define _mm_cmpistri(A, B, M)                                                  \
+  ((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A),                     \
+                                    (__v16qi)(__m128i)(B), (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
 ///    data with explicitly defined lengths that is contained in source operands
@@ -1739,9 +1717,9 @@
 ///         repeating each bit 8 or 16 times). \n
 /// \returns Returns a 128-bit integer vector representing the result mask of
 ///    the comparison.
-#define _mm_cmpestrm(A, LA, B, LB, M) \
-  ((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
-                                        (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrm(A, LA, B, LB, M)                                          \
+  ((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA),      \
+                                        (__v16qi)(__m128i)(B), (int)(LB),      \
                                         (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -1798,9 +1776,9 @@
 ///      0: The index of the least significant set bit. \n
 ///      1: The index of the most significant set bit. \n
 /// \returns Returns an integer representing the result index of the comparison.
-#define _mm_cmpestri(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
-                                    (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestri(A, LA, B, LB, M)                                          \
+  ((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA),          \
+                                    (__v16qi)(__m128i)(B), (int)(LB),          \
                                     (int)(M)))
 
 /* SSE4.2 Packed Comparison Intrinsics and EFlag Reading.  */
@@ -1850,8 +1828,8 @@
 ///          to the size of \a A or \a B. \n
 /// \returns Returns 1 if the bit mask is zero and the length of the string in
 ///    \a B is the maximum; otherwise, returns 0.
-#define _mm_cmpistra(A, B, M) \
-  ((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistra(A, B, M)                                                  \
+  ((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A),                    \
                                      (__v16qi)(__m128i)(B), (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -1899,8 +1877,8 @@
 ///      11: Negate the bit mask only for bits with an index less than or equal
 ///          to the size of \a A or \a B.
 /// \returns Returns 1 if the bit mask is non-zero, otherwise, returns 0.
-#define _mm_cmpistrc(A, B, M) \
-  ((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrc(A, B, M)                                                  \
+  ((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A),                    \
                                      (__v16qi)(__m128i)(B), (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -1947,8 +1925,8 @@
 ///      11: Negate the bit mask only for bits with an index less than or equal
 ///          to the size of \a A or \a B. \n
 /// \returns Returns bit 0 of the resulting bit mask.
-#define _mm_cmpistro(A, B, M) \
-  ((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistro(A, B, M)                                                  \
+  ((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A),                    \
                                      (__v16qi)(__m128i)(B), (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -1997,8 +1975,8 @@
 ///          to the size of \a A or \a B. \n
 /// \returns Returns 1 if the length of the string in \a A is less than the
 ///    maximum, otherwise, returns 0.
-#define _mm_cmpistrs(A, B, M) \
-  ((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrs(A, B, M)                                                  \
+  ((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A),                    \
                                      (__v16qi)(__m128i)(B), (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -2047,8 +2025,8 @@
 ///          to the size of \a A or \a B.
 /// \returns Returns 1 if the length of the string in \a B is less than the
 ///    maximum, otherwise, returns 0.
-#define _mm_cmpistrz(A, B, M) \
-  ((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrz(A, B, M)                                                  \
+  ((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A),                    \
                                      (__v16qi)(__m128i)(B), (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -2101,9 +2079,9 @@
 ///          to the size of \a A or \a B.
 /// \returns Returns 1 if the bit mask is zero and the length of the string in
 ///    \a B is the maximum, otherwise, returns 0.
-#define _mm_cmpestra(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
-                                     (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestra(A, LA, B, LB, M)                                          \
+  ((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA),         \
+                                     (__v16qi)(__m128i)(B), (int)(LB),         \
                                      (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -2155,9 +2133,9 @@
 ///      11: Negate the bit mask only for bits with an index less than or equal
 ///          to the size of \a A or \a B. \n
 /// \returns Returns 1 if the resulting mask is non-zero, otherwise, returns 0.
-#define _mm_cmpestrc(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
-                                     (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrc(A, LA, B, LB, M)                                          \
+  ((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA),         \
+                                     (__v16qi)(__m128i)(B), (int)(LB),         \
                                      (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -2208,9 +2186,9 @@
 ///      11: Negate the bit mask only for bits with an index less than or equal
 ///          to the size of \a A or \a B.
 /// \returns Returns bit 0 of the resulting bit mask.
-#define _mm_cmpestro(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
-                                     (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestro(A, LA, B, LB, M)                                          \
+  ((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA),         \
+                                     (__v16qi)(__m128i)(B), (int)(LB),         \
                                      (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -2263,9 +2241,9 @@
 ///          to the size of \a A or \a B. \n
 /// \returns Returns 1 if the length of the string in \a A is less than the
 ///    maximum, otherwise, returns 0.
-#define _mm_cmpestrs(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
-                                     (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrs(A, LA, B, LB, M)                                          \
+  ((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA),         \
+                                     (__v16qi)(__m128i)(B), (int)(LB),         \
                                      (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -2317,9 +2295,9 @@
 ///          to the size of \a A or \a B.
 /// \returns Returns 1 if the length of the string in \a B is less than the
 ///    maximum, otherwise, returns 0.
-#define _mm_cmpestrz(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
-                                     (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrz(A, LA, B, LB, M)                                          \
+  ((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA),         \
+                                     (__v16qi)(__m128i)(B), (int)(LB),         \
                                      (int)(M)))
 
 /* SSE4.2 Compare Packed Data -- Greater Than.  */
@@ -2336,9 +2314,8 @@
 /// \param __V2
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi64(__m128i __V1,
+                                                             __m128i __V2) {
   return (__m128i)((__v2di)__V1 > (__v2di)__V2);
 }
 
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stdalign.h b/darwin-x86/lib64/clang/16.0.2/include/stdalign.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/stdalign.h
rename to darwin-x86/lib64/clang/16.0.2/include/stdalign.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stdarg.h b/darwin-x86/lib64/clang/16.0.2/include/stdarg.h
similarity index 84%
rename from darwin-x86/lib64/clang/14.0.2/include/stdarg.h
rename to darwin-x86/lib64/clang/16.0.2/include/stdarg.h
index 0bc3940..dc7becf 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/stdarg.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/stdarg.h
@@ -23,7 +23,9 @@
  */
 #define __va_copy(d,s) __builtin_va_copy(d,s)
 
-#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L || !defined(__STRICT_ANSI__)
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) ||              \
+    (defined(__cplusplus) && __cplusplus >= 201103L) ||                        \
+    !defined(__STRICT_ANSI__)
 #define va_copy(dest, src)  __builtin_va_copy(dest, src)
 #endif
 
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stdatomic.h b/darwin-x86/lib64/clang/16.0.2/include/stdatomic.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/stdatomic.h
rename to darwin-x86/lib64/clang/16.0.2/include/stdatomic.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stdbool.h b/darwin-x86/lib64/clang/16.0.2/include/stdbool.h
similarity index 69%
rename from darwin-x86/lib64/clang/14.0.2/include/stdbool.h
rename to darwin-x86/lib64/clang/16.0.2/include/stdbool.h
index 2525363..9406aab 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/stdbool.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/stdbool.h
@@ -10,22 +10,25 @@
 #ifndef __STDBOOL_H
 #define __STDBOOL_H
 
-/* Don't define bool, true, and false in C++, except as a GNU extension. */
-#ifndef __cplusplus
+#define __bool_true_false_are_defined 1
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L
+/* FIXME: We should be issuing a deprecation warning here, but cannot yet due
+ * to system headers which include this header file unconditionally.
+ */
+#elif !defined(__cplusplus)
 #define bool _Bool
 #define true 1
 #define false 0
 #elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
 /* Define _Bool as a GNU extension. */
 #define _Bool bool
-#if __cplusplus < 201103L
+#if defined(__cplusplus) && __cplusplus < 201103L
 /* For C++98, define bool, false, true as a GNU extension. */
-#define bool  bool
+#define bool bool
 #define false false
-#define true  true
+#define true true
 #endif
 #endif
 
-#define __bool_true_false_are_defined 1
-
 #endif /* __STDBOOL_H */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stddef.h b/darwin-x86/lib64/clang/16.0.2/include/stddef.h
similarity index 94%
rename from darwin-x86/lib64/clang/14.0.2/include/stddef.h
rename to darwin-x86/lib64/clang/16.0.2/include/stddef.h
index 15acd44..8e86a21 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/stddef.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/stddef.h
@@ -62,7 +62,7 @@
 #endif /* defined(__need_STDDEF_H_misc) */
 
 #if defined(__need_wchar_t)
-#ifndef __cplusplus
+#if !defined(__cplusplus) || (defined(_MSC_VER) && !_NATIVE_WCHAR_T_DEFINED)
 /* Always define wchar_t when modules are available. */
 #if !defined(_WCHAR_T) || __has_feature(modules)
 #if !__has_feature(modules)
@@ -98,7 +98,8 @@
 #endif /* defined(__need_NULL) */
 
 #if defined(__need_STDDEF_H_misc)
-#if __STDC_VERSION__ >= 201112L || __cplusplus >= 201103L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) ||              \
+    (defined(__cplusplus) && __cplusplus >= 201103L)
 #include "__stddef_max_align_t.h"
 #endif
 #define offsetof(t, d) __builtin_offsetof(t, d)
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stdint.h b/darwin-x86/lib64/clang/16.0.2/include/stdint.h
similarity index 66%
rename from darwin-x86/lib64/clang/14.0.2/include/stdint.h
rename to darwin-x86/lib64/clang/16.0.2/include/stdint.h
index 192f653..a47e91b 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/stdint.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/stdint.h
@@ -96,13 +96,21 @@
 typedef __INT64_TYPE__ int64_t;
 # endif /* __int8_t_defined */
 typedef __UINT64_TYPE__ uint64_t;
+# undef __int_least64_t
 # define __int_least64_t int64_t
+# undef __uint_least64_t
 # define __uint_least64_t uint64_t
+# undef __int_least32_t
 # define __int_least32_t int64_t
+# undef __uint_least32_t
 # define __uint_least32_t uint64_t
+# undef __int_least16_t
 # define __int_least16_t int64_t
+# undef __uint_least16_t
 # define __uint_least16_t uint64_t
+# undef __int_least8_t
 # define __int_least8_t int64_t
+# undef __uint_least8_t
 # define __uint_least8_t uint64_t
 #endif /* __INT64_TYPE__ */
 
@@ -120,11 +128,17 @@
 typedef uint56_t uint_least56_t;
 typedef int56_t int_fast56_t;
 typedef uint56_t uint_fast56_t;
+# undef __int_least32_t
 # define __int_least32_t int56_t
+# undef __uint_least32_t
 # define __uint_least32_t uint56_t
+# undef __int_least16_t
 # define __int_least16_t int56_t
+# undef __uint_least16_t
 # define __uint_least16_t uint56_t
+# undef __int_least8_t
 # define __int_least8_t int56_t
+# undef __uint_least8_t
 # define __uint_least8_t uint56_t
 #endif /* __INT56_TYPE__ */
 
@@ -136,11 +150,17 @@
 typedef uint48_t uint_least48_t;
 typedef int48_t int_fast48_t;
 typedef uint48_t uint_fast48_t;
+# undef __int_least32_t
 # define __int_least32_t int48_t
+# undef __uint_least32_t
 # define __uint_least32_t uint48_t
+# undef __int_least16_t
 # define __int_least16_t int48_t
+# undef __uint_least16_t
 # define __uint_least16_t uint48_t
+# undef __int_least8_t
 # define __int_least8_t int48_t
+# undef __uint_least8_t
 # define __uint_least8_t uint48_t
 #endif /* __INT48_TYPE__ */
 
@@ -152,11 +172,17 @@
 typedef uint40_t uint_least40_t;
 typedef int40_t int_fast40_t;
 typedef uint40_t uint_fast40_t;
+# undef __int_least32_t
 # define __int_least32_t int40_t
+# undef __uint_least32_t
 # define __uint_least32_t uint40_t
+# undef __int_least16_t
 # define __int_least16_t int40_t
+# undef __uint_least16_t
 # define __uint_least16_t uint40_t
+# undef __int_least8_t
 # define __int_least8_t int40_t
+# undef __uint_least8_t
 # define __uint_least8_t uint40_t
 #endif /* __INT40_TYPE__ */
 
@@ -172,11 +198,17 @@
 typedef __UINT32_TYPE__ uint32_t;
 # endif /* __uint32_t_defined */
 
+# undef __int_least32_t
 # define __int_least32_t int32_t
+# undef __uint_least32_t
 # define __uint_least32_t uint32_t
+# undef __int_least16_t
 # define __int_least16_t int32_t
+# undef __uint_least16_t
 # define __uint_least16_t uint32_t
+# undef __int_least8_t
 # define __int_least8_t int32_t
+# undef __uint_least8_t
 # define __uint_least8_t uint32_t
 #endif /* __INT32_TYPE__ */
 
@@ -194,9 +226,13 @@
 typedef uint24_t uint_least24_t;
 typedef int24_t int_fast24_t;
 typedef uint24_t uint_fast24_t;
+# undef __int_least16_t
 # define __int_least16_t int24_t
+# undef __uint_least16_t
 # define __uint_least16_t uint24_t
+# undef __int_least8_t
 # define __int_least8_t int24_t
+# undef __uint_least8_t
 # define __uint_least8_t uint24_t
 #endif /* __INT24_TYPE__ */
 
@@ -205,9 +241,13 @@
 typedef __INT16_TYPE__ int16_t;
 #endif /* __int8_t_defined */
 typedef __UINT16_TYPE__ uint16_t;
+# undef __int_least16_t
 # define __int_least16_t int16_t
+# undef __uint_least16_t
 # define __uint_least16_t uint16_t
+# undef __int_least8_t
 # define __int_least8_t int16_t
+# undef __uint_least8_t
 # define __uint_least8_t uint16_t
 #endif /* __INT16_TYPE__ */
 
@@ -224,7 +264,9 @@
 typedef __INT8_TYPE__ int8_t;
 #endif /* __int8_t_defined */
 typedef __UINT8_TYPE__ uint8_t;
+# undef __int_least8_t
 # define __int_least8_t int8_t
+# undef __uint_least8_t
 # define __uint_least8_t uint8_t
 #endif /* __INT8_TYPE__ */
 
@@ -285,16 +327,15 @@
 
 
 #ifdef __INT64_TYPE__
+# undef __int64_c_suffix
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef  __int8_c_suffix
 # ifdef __INT64_C_SUFFIX__
 #  define __int64_c_suffix __INT64_C_SUFFIX__
 #  define __int32_c_suffix __INT64_C_SUFFIX__
 #  define __int16_c_suffix __INT64_C_SUFFIX__
 #  define  __int8_c_suffix __INT64_C_SUFFIX__
-# else
-#  undef __int64_c_suffix
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
 # endif /* __INT64_C_SUFFIX__ */
 #endif /* __INT64_TYPE__ */
 
@@ -310,6 +351,9 @@
 
 
 #ifdef __INT56_TYPE__
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef  __int8_c_suffix
 # ifdef __INT56_C_SUFFIX__
 #  define INT56_C(v) __int_c(v, __INT56_C_SUFFIX__)
 #  define UINT56_C(v) __uint_c(v, __INT56_C_SUFFIX__)
@@ -319,14 +363,14 @@
 # else
 #  define INT56_C(v) v
 #  define UINT56_C(v) v ## U
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
 # endif /* __INT56_C_SUFFIX__ */
 #endif /* __INT56_TYPE__ */
 
 
 #ifdef __INT48_TYPE__
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef  __int8_c_suffix
 # ifdef __INT48_C_SUFFIX__
 #  define INT48_C(v) __int_c(v, __INT48_C_SUFFIX__)
 #  define UINT48_C(v) __uint_c(v, __INT48_C_SUFFIX__)
@@ -336,14 +380,14 @@
 # else
 #  define INT48_C(v) v
 #  define UINT48_C(v) v ## U
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
 # endif /* __INT48_C_SUFFIX__ */
 #endif /* __INT48_TYPE__ */
 
 
 #ifdef __INT40_TYPE__
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef  __int8_c_suffix
 # ifdef __INT40_C_SUFFIX__
 #  define INT40_C(v) __int_c(v, __INT40_C_SUFFIX__)
 #  define UINT40_C(v) __uint_c(v, __INT40_C_SUFFIX__)
@@ -353,22 +397,18 @@
 # else
 #  define INT40_C(v) v
 #  define UINT40_C(v) v ## U
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
 # endif /* __INT40_C_SUFFIX__ */
 #endif /* __INT40_TYPE__ */
 
 
 #ifdef __INT32_TYPE__
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef  __int8_c_suffix
 # ifdef __INT32_C_SUFFIX__
 #  define __int32_c_suffix __INT32_C_SUFFIX__
 #  define __int16_c_suffix __INT32_C_SUFFIX__
 #  define __int8_c_suffix  __INT32_C_SUFFIX__
-#else
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
 # endif /* __INT32_C_SUFFIX__ */
 #endif /* __INT32_TYPE__ */
 
@@ -384,6 +424,8 @@
 
 
 #ifdef __INT24_TYPE__
+# undef __int16_c_suffix
+# undef  __int8_c_suffix
 # ifdef __INT24_C_SUFFIX__
 #  define INT24_C(v) __int_c(v, __INT24_C_SUFFIX__)
 #  define UINT24_C(v) __uint_c(v, __INT24_C_SUFFIX__)
@@ -392,19 +434,16 @@
 # else
 #  define INT24_C(v) v
 #  define UINT24_C(v) v ## U
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
 # endif /* __INT24_C_SUFFIX__ */
 #endif /* __INT24_TYPE__ */
 
 
 #ifdef __INT16_TYPE__
+# undef __int16_c_suffix
+# undef  __int8_c_suffix
 # ifdef __INT16_C_SUFFIX__
 #  define __int16_c_suffix __INT16_C_SUFFIX__
 #  define __int8_c_suffix  __INT16_C_SUFFIX__
-#else
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
 # endif /* __INT16_C_SUFFIX__ */
 #endif /* __INT16_TYPE__ */
 
@@ -420,10 +459,9 @@
 
 
 #ifdef __INT8_TYPE__
+# undef  __int8_c_suffix
 # ifdef __INT8_C_SUFFIX__
 #  define __int8_c_suffix __INT8_C_SUFFIX__
-#else
-#  undef  __int8_c_suffix
 # endif /* __INT8_C_SUFFIX__ */
 #endif /* __INT8_TYPE__ */
 
@@ -461,17 +499,41 @@
 # define INT64_MAX           INT64_C( 9223372036854775807)
 # define INT64_MIN         (-INT64_C( 9223372036854775807)-1)
 # define UINT64_MAX         UINT64_C(18446744073709551615)
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT64_WIDTH         64
+# define INT64_WIDTH          UINT64_WIDTH
+
+# define __UINT_LEAST64_WIDTH UINT64_WIDTH
+# undef __UINT_LEAST32_WIDTH
+# define __UINT_LEAST32_WIDTH UINT64_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT64_WIDTH
+# undef __UINT_LEAST8_MAX
+# define __UINT_LEAST8_MAX UINT64_MAX
+#endif /* __STDC_VERSION__ */
+
 # define __INT_LEAST64_MIN   INT64_MIN
 # define __INT_LEAST64_MAX   INT64_MAX
 # define __UINT_LEAST64_MAX UINT64_MAX
+# undef __INT_LEAST32_MIN
 # define __INT_LEAST32_MIN   INT64_MIN
+# undef __INT_LEAST32_MAX
 # define __INT_LEAST32_MAX   INT64_MAX
+# undef __UINT_LEAST32_MAX
 # define __UINT_LEAST32_MAX UINT64_MAX
+# undef __INT_LEAST16_MIN
 # define __INT_LEAST16_MIN   INT64_MIN
+# undef __INT_LEAST16_MAX
 # define __INT_LEAST16_MAX   INT64_MAX
+# undef __UINT_LEAST16_MAX
 # define __UINT_LEAST16_MAX UINT64_MAX
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT64_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT64_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT64_MAX
 #endif /* __INT64_TYPE__ */
 
@@ -482,6 +544,15 @@
 # define INT_FAST64_MIN    __INT_LEAST64_MIN
 # define INT_FAST64_MAX    __INT_LEAST64_MAX
 # define UINT_FAST64_MAX  __UINT_LEAST64_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) &&  __STDC_VERSION__ >= 202000L
+# define UINT_LEAST64_WIDTH __UINT_LEAST64_WIDTH
+# define INT_LEAST64_WIDTH  UINT_LEAST64_WIDTH
+# define UINT_FAST64_WIDTH  __UINT_LEAST64_WIDTH
+# define INT_FAST64_WIDTH   UINT_FAST64_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST64_MIN */
 
 
@@ -495,15 +566,42 @@
 # define INT_FAST56_MIN      INT56_MIN
 # define INT_FAST56_MAX      INT56_MAX
 # define UINT_FAST56_MAX    UINT56_MAX
+
+# undef __INT_LEAST32_MIN
 # define __INT_LEAST32_MIN   INT56_MIN
+# undef __INT_LEAST32_MAX
 # define __INT_LEAST32_MAX   INT56_MAX
+# undef __UINT_LEAST32_MAX
 # define __UINT_LEAST32_MAX UINT56_MAX
+# undef __INT_LEAST16_MIN
 # define __INT_LEAST16_MIN   INT56_MIN
+# undef __INT_LEAST16_MAX
 # define __INT_LEAST16_MAX   INT56_MAX
+# undef __UINT_LEAST16_MAX
 # define __UINT_LEAST16_MAX UINT56_MAX
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT56_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT56_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT56_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT56_WIDTH         56
+# define INT56_WIDTH          UINT56_WIDTH
+# define UINT_LEAST56_WIDTH   UINT56_WIDTH
+# define INT_LEAST56_WIDTH    UINT_LEAST56_WIDTH
+# define UINT_FAST56_WIDTH    UINT56_WIDTH
+# define INT_FAST56_WIDTH     UINT_FAST56_WIDTH
+# undef __UINT_LEAST32_WIDTH
+# define __UINT_LEAST32_WIDTH UINT56_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT56_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT56_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT56_TYPE__ */
 
 
@@ -517,15 +615,42 @@
 # define INT_FAST48_MIN      INT48_MIN
 # define INT_FAST48_MAX      INT48_MAX
 # define UINT_FAST48_MAX    UINT48_MAX
+
+# undef __INT_LEAST32_MIN
 # define __INT_LEAST32_MIN   INT48_MIN
+# undef __INT_LEAST32_MAX
 # define __INT_LEAST32_MAX   INT48_MAX
+# undef __UINT_LEAST32_MAX
 # define __UINT_LEAST32_MAX UINT48_MAX
+# undef __INT_LEAST16_MIN
 # define __INT_LEAST16_MIN   INT48_MIN
+# undef __INT_LEAST16_MAX
 # define __INT_LEAST16_MAX   INT48_MAX
+# undef __UINT_LEAST16_MAX
 # define __UINT_LEAST16_MAX UINT48_MAX
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT48_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT48_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT48_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+#define UINT48_WIDTH         48
+#define INT48_WIDTH          UINT48_WIDTH
+#define UINT_LEAST48_WIDTH   UINT48_WIDTH
+#define INT_LEAST48_WIDTH    UINT_LEAST48_WIDTH
+#define UINT_FAST48_WIDTH    UINT48_WIDTH
+#define INT_FAST48_WIDTH     UINT_FAST48_WIDTH
+#undef __UINT_LEAST32_WIDTH
+#define __UINT_LEAST32_WIDTH UINT48_WIDTH
+# undef __UINT_LEAST16_WIDTH
+#define __UINT_LEAST16_WIDTH UINT48_WIDTH
+# undef __UINT_LEAST8_WIDTH
+#define __UINT_LEAST8_WIDTH  UINT48_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT48_TYPE__ */
 
 
@@ -539,15 +664,42 @@
 # define INT_FAST40_MIN      INT40_MIN
 # define INT_FAST40_MAX      INT40_MAX
 # define UINT_FAST40_MAX    UINT40_MAX
+
+# undef __INT_LEAST32_MIN
 # define __INT_LEAST32_MIN   INT40_MIN
+# undef __INT_LEAST32_MAX
 # define __INT_LEAST32_MAX   INT40_MAX
+# undef __UINT_LEAST32_MAX
 # define __UINT_LEAST32_MAX UINT40_MAX
+# undef __INT_LEAST16_MIN
 # define __INT_LEAST16_MIN   INT40_MIN
+# undef __INT_LEAST16_MAX
 # define __INT_LEAST16_MAX   INT40_MAX
+# undef __UINT_LEAST16_MAX
 # define __UINT_LEAST16_MAX UINT40_MAX
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT40_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT40_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT40_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT40_WIDTH         40
+# define INT40_WIDTH          UINT40_WIDTH
+# define UINT_LEAST40_WIDTH   UINT40_WIDTH
+# define INT_LEAST40_WIDTH    UINT_LEAST40_WIDTH
+# define UINT_FAST40_WIDTH    UINT40_WIDTH
+# define INT_FAST40_WIDTH     UINT_FAST40_WIDTH
+# undef __UINT_LEAST32_WIDTH
+# define __UINT_LEAST32_WIDTH UINT40_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT40_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT40_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT40_TYPE__ */
 
 
@@ -555,15 +707,38 @@
 # define INT32_MAX           INT32_C(2147483647)
 # define INT32_MIN         (-INT32_C(2147483647)-1)
 # define UINT32_MAX         UINT32_C(4294967295)
+
+# undef __INT_LEAST32_MIN
 # define __INT_LEAST32_MIN   INT32_MIN
+# undef __INT_LEAST32_MAX
 # define __INT_LEAST32_MAX   INT32_MAX
+# undef __UINT_LEAST32_MAX
 # define __UINT_LEAST32_MAX UINT32_MAX
+# undef __INT_LEAST16_MIN
 # define __INT_LEAST16_MIN   INT32_MIN
+# undef __INT_LEAST16_MAX
 # define __INT_LEAST16_MAX   INT32_MAX
+# undef __UINT_LEAST16_MAX
 # define __UINT_LEAST16_MAX UINT32_MAX
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT32_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT32_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT32_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT32_WIDTH         32
+# define INT32_WIDTH          UINT32_WIDTH
+# undef __UINT_LEAST32_WIDTH
+# define __UINT_LEAST32_WIDTH UINT32_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT32_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT32_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT32_TYPE__ */
 
 #ifdef __INT_LEAST32_MIN
@@ -573,6 +748,15 @@
 # define INT_FAST32_MIN    __INT_LEAST32_MIN
 # define INT_FAST32_MAX    __INT_LEAST32_MAX
 # define UINT_FAST32_MAX  __UINT_LEAST32_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT_LEAST32_WIDTH __UINT_LEAST32_WIDTH
+# define INT_LEAST32_WIDTH  UINT_LEAST32_WIDTH
+# define UINT_FAST32_WIDTH  __UINT_LEAST32_WIDTH
+# define INT_FAST32_WIDTH   UINT_FAST32_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST32_MIN */
 
 
@@ -586,12 +770,34 @@
 # define INT_FAST24_MIN      INT24_MIN
 # define INT_FAST24_MAX      INT24_MAX
 # define UINT_FAST24_MAX    UINT24_MAX
+
+# undef __INT_LEAST16_MIN
 # define __INT_LEAST16_MIN   INT24_MIN
+# undef __INT_LEAST16_MAX
 # define __INT_LEAST16_MAX   INT24_MAX
+# undef __UINT_LEAST16_MAX
 # define __UINT_LEAST16_MAX UINT24_MAX
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT24_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT24_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT24_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT24_WIDTH         24
+# define INT24_WIDTH          UINT24_WIDTH
+# define UINT_LEAST24_WIDTH   UINT24_WIDTH
+# define INT_LEAST24_WIDTH    UINT_LEAST24_WIDTH
+# define UINT_FAST24_WIDTH    UINT24_WIDTH
+# define INT_FAST24_WIDTH     UINT_FAST24_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT24_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT24_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT24_TYPE__ */
 
 
@@ -599,12 +805,30 @@
 #define INT16_MAX            INT16_C(32767)
 #define INT16_MIN          (-INT16_C(32767)-1)
 #define UINT16_MAX          UINT16_C(65535)
+
+# undef __INT_LEAST16_MIN
 # define __INT_LEAST16_MIN   INT16_MIN
+# undef __INT_LEAST16_MAX
 # define __INT_LEAST16_MAX   INT16_MAX
+# undef __UINT_LEAST16_MAX
 # define __UINT_LEAST16_MAX UINT16_MAX
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT16_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT16_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT16_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT16_WIDTH         16
+# define INT16_WIDTH          UINT16_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT16_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT16_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT16_TYPE__ */
 
 #ifdef __INT_LEAST16_MIN
@@ -614,6 +838,15 @@
 # define INT_FAST16_MIN    __INT_LEAST16_MIN
 # define INT_FAST16_MAX    __INT_LEAST16_MAX
 # define UINT_FAST16_MAX  __UINT_LEAST16_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT_LEAST16_WIDTH __UINT_LEAST16_WIDTH
+# define INT_LEAST16_WIDTH  UINT_LEAST16_WIDTH
+# define UINT_FAST16_WIDTH  __UINT_LEAST16_WIDTH
+# define INT_FAST16_WIDTH   UINT_FAST16_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST16_MIN */
 
 
@@ -621,9 +854,22 @@
 # define INT8_MAX            INT8_C(127)
 # define INT8_MIN          (-INT8_C(127)-1)
 # define UINT8_MAX          UINT8_C(255)
+
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT8_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT8_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT8_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT8_WIDTH         8
+# define INT8_WIDTH          UINT8_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH UINT8_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT8_TYPE__ */
 
 #ifdef __INT_LEAST8_MIN
@@ -633,6 +879,15 @@
 # define INT_FAST8_MIN    __INT_LEAST8_MIN
 # define INT_FAST8_MAX    __INT_LEAST8_MAX
 # define UINT_FAST8_MAX  __UINT_LEAST8_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT_LEAST8_WIDTH __UINT_LEAST8_WIDTH
+# define INT_LEAST8_WIDTH  UINT_LEAST8_WIDTH
+# define UINT_FAST8_WIDTH  __UINT_LEAST8_WIDTH
+# define INT_FAST8_WIDTH   UINT_FAST8_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST8_MIN */
 
 /* Some utility macros */
@@ -652,6 +907,16 @@
 #define PTRDIFF_MAX   __PTRDIFF_MAX__
 #define    SIZE_MAX      __SIZE_MAX__
 
+/* C2x 7.20.2.4 Width of integer types capable of holding object pointers. */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+/* NB: The C standard requires that these be the same value, but the compiler
+   exposes separate internal width macros. */
+#define INTPTR_WIDTH  __INTPTR_WIDTH__
+#define UINTPTR_WIDTH __UINTPTR_WIDTH__
+#endif
+
 /* ISO9899:2011 7.20 (C11 Annex K): Define RSIZE_MAX if __STDC_WANT_LIB_EXT1__
  * is enabled. */
 #if defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1
@@ -663,6 +928,16 @@
 #define  INTMAX_MAX   __INTMAX_MAX__
 #define UINTMAX_MAX  __UINTMAX_MAX__
 
+/* C2x 7.20.2.5 Width of greatest-width integer types. */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+/* NB: The C standard requires that these be the same value, but the compiler
+   exposes separate internal width macros. */
+#define INTMAX_WIDTH __INTMAX_WIDTH__
+#define UINTMAX_WIDTH __UINTMAX_WIDTH__
+#endif
+
 /* C99 7.18.3 Limits of other integer types. */
 #define SIG_ATOMIC_MIN __INTN_MIN(__SIG_ATOMIC_WIDTH__)
 #define SIG_ATOMIC_MAX __INTN_MAX(__SIG_ATOMIC_WIDTH__)
@@ -689,5 +964,16 @@
 #define  INTMAX_C(v) __int_c(v,  __INTMAX_C_SUFFIX__)
 #define UINTMAX_C(v) __int_c(v, __UINTMAX_C_SUFFIX__)
 
+/* C2x 7.20.3.x Width of other integer types. */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+#define PTRDIFF_WIDTH    __PTRDIFF_WIDTH__
+#define SIG_ATOMIC_WIDTH __SIG_ATOMIC_WIDTH__
+#define SIZE_WIDTH       __SIZE_WIDTH__
+#define WCHAR_WIDTH      __WCHAR_WIDTH__
+#define WINT_WIDTH       __WINT_WIDTH__
+#endif
+
 #endif /* __STDC_HOSTED__ */
 #endif /* __CLANG_STDINT_H */
diff --git a/darwin-x86/lib64/clang/16.0.2/include/stdnoreturn.h b/darwin-x86/lib64/clang/16.0.2/include/stdnoreturn.h
new file mode 100644
index 0000000..967be94
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/stdnoreturn.h
@@ -0,0 +1,29 @@
+/*===---- stdnoreturn.h - Standard header for noreturn macro ---------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDNORETURN_H
+#define __STDNORETURN_H
+
+#define noreturn _Noreturn
+#define __noreturn_is_defined 1
+
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L) &&               \
+    !defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS)
+/* The noreturn macro is deprecated in C2x. We do not mark it as such because
+   including the header file in C2x is also deprecated and we do not want to
+   issue a confusing diagnostic for code which includes <stdnoreturn.h>
+   followed by code that writes [[noreturn]]. The issue with such code is not
+   with the attribute, or the use of 'noreturn', but the inclusion of the
+   header. */
+/* FIXME: We should be issuing a deprecation warning here, but cannot yet due
+ * to system headers which include this header file unconditionally.
+ */
+#endif
+
+#endif /* __STDNORETURN_H */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/tbmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/tbmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/tbmintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/tbmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/tgmath.h b/darwin-x86/lib64/clang/16.0.2/include/tgmath.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/tgmath.h
rename to darwin-x86/lib64/clang/16.0.2/include/tgmath.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/tmmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/tmmintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/tmmintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/tmmintrin.h
index bcffa81..cb9be23 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/tmmintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/tmmintrin.h
@@ -53,7 +53,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_abs_epi8(__m128i __a)
 {
-    return (__m128i)__builtin_ia32_pabsb128((__v16qi)__a);
+    return (__m128i)__builtin_elementwise_abs((__v16qs)__a);
 }
 
 /// Computes the absolute value of each of the packed 16-bit signed
@@ -89,7 +89,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_abs_epi16(__m128i __a)
 {
-    return (__m128i)__builtin_ia32_pabsw128((__v8hi)__a);
+    return (__m128i)__builtin_elementwise_abs((__v8hi)__a);
 }
 
 /// Computes the absolute value of each of the packed 32-bit signed
@@ -125,7 +125,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_abs_epi32(__m128i __a)
 {
-    return (__m128i)__builtin_ia32_pabsd128((__v4si)__a);
+    return (__m128i)__builtin_elementwise_abs((__v4si)__a);
 }
 
 /// Concatenates the two 128-bit integer vector operands, and
diff --git a/darwin-x86/lib64/clang/14.0.2/include/tsxldtrkintrin.h b/darwin-x86/lib64/clang/16.0.2/include/tsxldtrkintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/tsxldtrkintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/tsxldtrkintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/uintrintrin.h b/darwin-x86/lib64/clang/16.0.2/include/uintrintrin.h
similarity index 97%
rename from darwin-x86/lib64/clang/14.0.2/include/uintrintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/uintrintrin.h
index e3839dc..135dc81 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/uintrintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/uintrintrin.h
@@ -39,9 +39,9 @@
 ///
 /// This intrinsic corresponds to the <c> CLUI </c> instruction.
 ///
-/// \operation
+/// \code{.operation}
 ///   UIF := 0
-/// \endoperation
+/// \endcode
 static __inline__ void __DEFAULT_FN_ATTRS
 _clui (void)
 {
@@ -60,9 +60,9 @@
 ///
 /// This intrinsic corresponds to the <c> STUI </c> instruction.
 ///
-/// \operation
+/// \code{.operation}
 ///   UIF := 1
-/// \endoperation
+/// \endcode
 static __inline__ void __DEFAULT_FN_ATTRS
 _stui (void)
 {
@@ -81,7 +81,7 @@
 ///
 /// \returns The current value of the user interrupt flag (UIF).
 ///
-/// \operation
+/// \code{.operation}
 ///   CF := UIF
 ///   ZF := 0
 ///   AF := 0
@@ -89,7 +89,7 @@
 ///   PF := 0
 ///   SF := 0
 ///   dst := CF
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _testui (void)
 {
@@ -110,7 +110,7 @@
 ///    Index of user-interrupt target table entry in user-interrupt target
 ///    table.
 ///
-/// \operation
+/// \code{.operation}
 ///   IF __a > UITTSZ
 ///     GP (0)
 ///   FI
@@ -143,7 +143,7 @@
 ///       SendOrdinaryIPI(tempUPID.NV, tempUPID.NDST[15:8])
 ///     FI
 ///   FI
-/// \endoperation
+/// \endcode
 static __inline__ void __DEFAULT_FN_ATTRS
 _senduipi (unsigned long long __a)
 {
diff --git a/darwin-x86/lib64/clang/14.0.2/include/unwind.h b/darwin-x86/lib64/clang/16.0.2/include/unwind.h
similarity index 95%
rename from darwin-x86/lib64/clang/14.0.2/include/unwind.h
rename to darwin-x86/lib64/clang/16.0.2/include/unwind.h
index 029524b..971a62d 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/unwind.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/unwind.h
@@ -62,7 +62,8 @@
 typedef uintptr_t _uleb128_t;
 
 struct _Unwind_Context;
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
+#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || \
+                          defined(__ARM_DWARF_EH__) || defined(__SEH__))
 struct _Unwind_Control_Block;
 typedef struct _Unwind_Control_Block _Unwind_Exception; /* Alias */
 #else
@@ -72,7 +73,7 @@
 typedef enum {
   _URC_NO_REASON = 0,
 #if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
-    !defined(__ARM_DWARF_EH__)
+    !defined(__ARM_DWARF_EH__) && !defined(__SEH__)
   _URC_OK = 0, /* used by ARM EHABI */
 #endif
   _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
@@ -86,7 +87,7 @@
   _URC_INSTALL_CONTEXT = 7,
   _URC_CONTINUE_UNWIND = 8,
 #if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
-    !defined(__ARM_DWARF_EH__)
+    !defined(__ARM_DWARF_EH__) && !defined(__SEH__)
   _URC_FAILURE = 9 /* used by ARM EHABI */
 #endif
 } _Unwind_Reason_Code;
@@ -103,7 +104,8 @@
 typedef void (*_Unwind_Exception_Cleanup_Fn)(_Unwind_Reason_Code,
                                              _Unwind_Exception *);
 
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
+#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || \
+                          defined(__ARM_DWARF_EH__) || defined(__SEH__))
 typedef struct _Unwind_Control_Block _Unwind_Control_Block;
 typedef uint32_t _Unwind_EHT_Header;
 
@@ -167,12 +169,14 @@
 typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context *,
                                                 void *);
 
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
+#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) ||                \
+                          defined(__ARM_DWARF_EH__) || defined(__SEH__))
 typedef enum {
   _UVRSC_CORE = 0,        /* integer register */
   _UVRSC_VFP = 1,         /* vfp */
   _UVRSC_WMMXD = 3,       /* Intel WMMX data register */
-  _UVRSC_WMMXC = 4        /* Intel WMMX control register */
+  _UVRSC_WMMXC = 4,       /* Intel WMMX control register */
+  _UVRSC_PSEUDO = 5       /* Special purpose pseudo register */
 } _Unwind_VRS_RegClass;
 
 typedef enum {
diff --git a/darwin-x86/lib64/clang/14.0.2/include/vadefs.h b/darwin-x86/lib64/clang/16.0.2/include/vadefs.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/vadefs.h
rename to darwin-x86/lib64/clang/16.0.2/include/vadefs.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/vaesintrin.h b/darwin-x86/lib64/clang/16.0.2/include/vaesintrin.h
similarity index 98%
rename from darwin-x86/lib64/clang/14.0.2/include/vaesintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/vaesintrin.h
index f3c0807..294dcff 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/vaesintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/vaesintrin.h
@@ -82,4 +82,4 @@
 #undef __DEFAULT_FN_ATTRS
 #undef __DEFAULT_FN_ATTRS_F
 
-#endif
+#endif // __VAESINTRIN_H
diff --git a/darwin-x86/lib64/clang/14.0.2/include/varargs.h b/darwin-x86/lib64/clang/16.0.2/include/varargs.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/varargs.h
rename to darwin-x86/lib64/clang/16.0.2/include/varargs.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/vecintrin.h b/darwin-x86/lib64/clang/16.0.2/include/vecintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/vecintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/vecintrin.h
diff --git a/darwin-x86/lib64/clang/16.0.2/include/velintrin.h b/darwin-x86/lib64/clang/16.0.2/include/velintrin.h
new file mode 100644
index 0000000..3f2bc00
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/velintrin.h
@@ -0,0 +1,71 @@
+/*===---- velintrin.h - VEL intrinsics for VE ------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __VEL_INTRIN_H__
+#define __VEL_INTRIN_H__
+
+// Vector registers
+typedef double __vr __attribute__((__vector_size__(2048)));
+
+// Vector mask registers
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+// For C99
+typedef _Bool __vm    __attribute__((ext_vector_type(256)));
+typedef _Bool __vm256 __attribute__((ext_vector_type(256)));
+typedef _Bool __vm512 __attribute__((ext_vector_type(512)));
+#else
+#ifdef __cplusplus
+// For C++
+typedef bool __vm    __attribute__((ext_vector_type(256)));
+typedef bool __vm256 __attribute__((ext_vector_type(256)));
+typedef bool __vm512 __attribute__((ext_vector_type(512)));
+#else
+#error need C++ or C99 to use vector intrinsics for VE
+#endif
+#endif
+
+enum VShuffleCodes {
+  VE_VSHUFFLE_YUYU = 0,
+  VE_VSHUFFLE_YUYL = 1,
+  VE_VSHUFFLE_YUZU = 2,
+  VE_VSHUFFLE_YUZL = 3,
+  VE_VSHUFFLE_YLYU = 4,
+  VE_VSHUFFLE_YLYL = 5,
+  VE_VSHUFFLE_YLZU = 6,
+  VE_VSHUFFLE_YLZL = 7,
+  VE_VSHUFFLE_ZUYU = 8,
+  VE_VSHUFFLE_ZUYL = 9,
+  VE_VSHUFFLE_ZUZU = 10,
+  VE_VSHUFFLE_ZUZL = 11,
+  VE_VSHUFFLE_ZLYU = 12,
+  VE_VSHUFFLE_ZLYL = 13,
+  VE_VSHUFFLE_ZLZU = 14,
+  VE_VSHUFFLE_ZLZL = 15,
+};
+
+// Use generated intrinsic name definitions
+#include <velintrin_gen.h>
+
+// Use helper functions
+#include <velintrin_approx.h>
+
+// pack
+
+#define _vel_pack_f32p __builtin_ve_vl_pack_f32p
+#define _vel_pack_f32a __builtin_ve_vl_pack_f32a
+
+static inline unsigned long int _vel_pack_i32(unsigned int a, unsigned int b) {
+  return (((unsigned long int)a) << 32) | b;
+}
+
+#define _vel_extract_vm512u(vm) __builtin_ve_vl_extract_vm512u(vm)
+#define _vel_extract_vm512l(vm) __builtin_ve_vl_extract_vm512l(vm)
+#define _vel_insert_vm512u(vm512, vm) __builtin_ve_vl_insert_vm512u(vm512, vm)
+#define _vel_insert_vm512l(vm512, vm) __builtin_ve_vl_insert_vm512l(vm512, vm)
+
+#endif
diff --git a/darwin-x86/lib64/clang/16.0.2/include/velintrin_approx.h b/darwin-x86/lib64/clang/16.0.2/include/velintrin_approx.h
new file mode 100644
index 0000000..89d270f
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/velintrin_approx.h
@@ -0,0 +1,120 @@
+/*===---- velintrin_approx.h - VEL intrinsics helper for VE ----------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __VEL_INTRIN_APPROX_H__
+#define __VEL_INTRIN_APPROX_H__
+
+static inline __vr _vel_approx_vfdivs_vvvl(__vr v0, __vr v1, int l) {
+  float s0;
+  __vr v2, v3, v4, v5;
+  v5 = _vel_vrcps_vvl(v1, l);
+  s0 = 1.0;
+  v4 = _vel_vfnmsbs_vsvvl(s0, v1, v5, l);
+  v3 = _vel_vfmads_vvvvl(v5, v5, v4, l);
+  v2 = _vel_vfmuls_vvvl(v0, v3, l);
+  v4 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l);
+  v2 = _vel_vfmads_vvvvl(v2, v5, v4, l);
+  v0 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l);
+  v0 = _vel_vfmads_vvvvl(v2, v3, v0, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_pvfdiv_vvvl(__vr v0, __vr v1, int l) {
+  float s0;
+  __vr v2, v3, v4, v5;
+  v5 = _vel_pvrcp_vvl(v1, l);
+  s0 = 1.0;
+  v4 = _vel_pvfnmsb_vsvvl(s0, v1, v5, l);
+  v3 = _vel_pvfmad_vvvvl(v5, v5, v4, l);
+  v2 = _vel_pvfmul_vvvl(v0, v3, l);
+  v4 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l);
+  v2 = _vel_pvfmad_vvvvl(v2, v5, v4, l);
+  v0 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l);
+  v0 = _vel_pvfmad_vvvvl(v2, v3, v0, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_vfdivs_vsvl(float s0, __vr v0, int l) {
+  float s1;
+  __vr v1, v2, v3, v4;
+  v4 = _vel_vrcps_vvl(v0, l);
+  s1 = 1.0;
+  v2 = _vel_vfnmsbs_vsvvl(s1, v0, v4, l);
+  v2 = _vel_vfmads_vvvvl(v4, v4, v2, l);
+  v1 = _vel_vfmuls_vsvl(s0, v2, l);
+  v3 = _vel_vfnmsbs_vsvvl(s0, v1, v0, l);
+  v1 = _vel_vfmads_vvvvl(v1, v4, v3, l);
+  v3 = _vel_vfnmsbs_vsvvl(s0, v1, v0, l);
+  v0 = _vel_vfmads_vvvvl(v1, v2, v3, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_vfdivs_vvsl(__vr v0, float s0, int l) {
+  float s1;
+  __vr v1, v2;
+  s1 = 1.0f / s0;
+  v1 = _vel_vfmuls_vsvl(s1, v0, l);
+  v2 = _vel_vfnmsbs_vvsvl(v0, s0, v1, l);
+  v0 = _vel_vfmads_vvsvl(v1, s1, v2, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_vfdivd_vsvl(double s0, __vr v0, int l) {
+  __vr v1, v2, v3;
+  v2 = _vel_vrcpd_vvl(v0, l);
+  double s1 = 1.0;
+  v3 = _vel_vfnmsbd_vsvvl(s1, v0, v2, l);
+  v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);
+  v1 = _vel_vfnmsbd_vsvvl(s1, v0, v2, l);
+  v1 = _vel_vfmadd_vvvvl(v2, v2, v1, l);
+  v1 = _vel_vaddul_vsvl(1, v1, l);
+  v3 = _vel_vfnmsbd_vsvvl(s1, v0, v1, l);
+  v3 = _vel_vfmadd_vvvvl(v1, v1, v3, l);
+  v1 = _vel_vfmuld_vsvl(s0, v3, l);
+  v0 = _vel_vfnmsbd_vsvvl(s0, v1, v0, l);
+  v0 = _vel_vfmadd_vvvvl(v1, v3, v0, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_vfsqrtd_vvl(__vr v0, int l) {
+  double s0, s1;
+  __vr v1, v2, v3;
+  v2 = _vel_vrsqrtdnex_vvl(v0, l);
+  v1 = _vel_vfmuld_vvvl(v0, v2, l);
+  s0 = 1.0;
+  s1 = 0.5;
+  v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+  v3 = _vel_vfmuld_vsvl(s1, v3, l);
+  v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);
+  v1 = _vel_vfmuld_vvvl(v0, v2, l);
+  v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+  v3 = _vel_vfmuld_vsvl(s1, v3, l);
+  v0 = _vel_vfmadd_vvvvl(v1, v1, v3, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_vfsqrts_vvl(__vr v0, int l) {
+  float s0, s1;
+  __vr v1, v2, v3;
+  v0 = _vel_vcvtds_vvl(v0, l);
+  v2 = _vel_vrsqrtdnex_vvl(v0, l);
+  v1 = _vel_vfmuld_vvvl(v0, v2, l);
+  s0 = 1.0;
+  s1 = 0.5;
+  v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+  v3 = _vel_vfmuld_vsvl(s1, v3, l);
+  v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);
+  v1 = _vel_vfmuld_vvvl(v0, v2, l);
+  v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+  v3 = _vel_vfmuld_vsvl(s1, v3, l);
+  v0 = _vel_vfmadd_vvvvl(v1, v1, v3, l);
+  v0 = _vel_vcvtsd_vvl(v0, l);
+  return v0;
+}
+
+#endif
diff --git a/darwin-x86/lib64/clang/16.0.2/include/velintrin_gen.h b/darwin-x86/lib64/clang/16.0.2/include/velintrin_gen.h
new file mode 100644
index 0000000..845c0da
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/velintrin_gen.h
@@ -0,0 +1,1257 @@
+#define _vel_vld_vssl __builtin_ve_vl_vld_vssl
+#define _vel_vld_vssvl __builtin_ve_vl_vld_vssvl
+#define _vel_vldnc_vssl __builtin_ve_vl_vldnc_vssl
+#define _vel_vldnc_vssvl __builtin_ve_vl_vldnc_vssvl
+#define _vel_vldu_vssl __builtin_ve_vl_vldu_vssl
+#define _vel_vldu_vssvl __builtin_ve_vl_vldu_vssvl
+#define _vel_vldunc_vssl __builtin_ve_vl_vldunc_vssl
+#define _vel_vldunc_vssvl __builtin_ve_vl_vldunc_vssvl
+#define _vel_vldlsx_vssl __builtin_ve_vl_vldlsx_vssl
+#define _vel_vldlsx_vssvl __builtin_ve_vl_vldlsx_vssvl
+#define _vel_vldlsxnc_vssl __builtin_ve_vl_vldlsxnc_vssl
+#define _vel_vldlsxnc_vssvl __builtin_ve_vl_vldlsxnc_vssvl
+#define _vel_vldlzx_vssl __builtin_ve_vl_vldlzx_vssl
+#define _vel_vldlzx_vssvl __builtin_ve_vl_vldlzx_vssvl
+#define _vel_vldlzxnc_vssl __builtin_ve_vl_vldlzxnc_vssl
+#define _vel_vldlzxnc_vssvl __builtin_ve_vl_vldlzxnc_vssvl
+#define _vel_vld2d_vssl __builtin_ve_vl_vld2d_vssl
+#define _vel_vld2d_vssvl __builtin_ve_vl_vld2d_vssvl
+#define _vel_vld2dnc_vssl __builtin_ve_vl_vld2dnc_vssl
+#define _vel_vld2dnc_vssvl __builtin_ve_vl_vld2dnc_vssvl
+#define _vel_vldu2d_vssl __builtin_ve_vl_vldu2d_vssl
+#define _vel_vldu2d_vssvl __builtin_ve_vl_vldu2d_vssvl
+#define _vel_vldu2dnc_vssl __builtin_ve_vl_vldu2dnc_vssl
+#define _vel_vldu2dnc_vssvl __builtin_ve_vl_vldu2dnc_vssvl
+#define _vel_vldl2dsx_vssl __builtin_ve_vl_vldl2dsx_vssl
+#define _vel_vldl2dsx_vssvl __builtin_ve_vl_vldl2dsx_vssvl
+#define _vel_vldl2dsxnc_vssl __builtin_ve_vl_vldl2dsxnc_vssl
+#define _vel_vldl2dsxnc_vssvl __builtin_ve_vl_vldl2dsxnc_vssvl
+#define _vel_vldl2dzx_vssl __builtin_ve_vl_vldl2dzx_vssl
+#define _vel_vldl2dzx_vssvl __builtin_ve_vl_vldl2dzx_vssvl
+#define _vel_vldl2dzxnc_vssl __builtin_ve_vl_vldl2dzxnc_vssl
+#define _vel_vldl2dzxnc_vssvl __builtin_ve_vl_vldl2dzxnc_vssvl
+#define _vel_vst_vssl __builtin_ve_vl_vst_vssl
+#define _vel_vst_vssml __builtin_ve_vl_vst_vssml
+#define _vel_vstnc_vssl __builtin_ve_vl_vstnc_vssl
+#define _vel_vstnc_vssml __builtin_ve_vl_vstnc_vssml
+#define _vel_vstot_vssl __builtin_ve_vl_vstot_vssl
+#define _vel_vstot_vssml __builtin_ve_vl_vstot_vssml
+#define _vel_vstncot_vssl __builtin_ve_vl_vstncot_vssl
+#define _vel_vstncot_vssml __builtin_ve_vl_vstncot_vssml
+#define _vel_vstu_vssl __builtin_ve_vl_vstu_vssl
+#define _vel_vstu_vssml __builtin_ve_vl_vstu_vssml
+#define _vel_vstunc_vssl __builtin_ve_vl_vstunc_vssl
+#define _vel_vstunc_vssml __builtin_ve_vl_vstunc_vssml
+#define _vel_vstuot_vssl __builtin_ve_vl_vstuot_vssl
+#define _vel_vstuot_vssml __builtin_ve_vl_vstuot_vssml
+#define _vel_vstuncot_vssl __builtin_ve_vl_vstuncot_vssl
+#define _vel_vstuncot_vssml __builtin_ve_vl_vstuncot_vssml
+#define _vel_vstl_vssl __builtin_ve_vl_vstl_vssl
+#define _vel_vstl_vssml __builtin_ve_vl_vstl_vssml
+#define _vel_vstlnc_vssl __builtin_ve_vl_vstlnc_vssl
+#define _vel_vstlnc_vssml __builtin_ve_vl_vstlnc_vssml
+#define _vel_vstlot_vssl __builtin_ve_vl_vstlot_vssl
+#define _vel_vstlot_vssml __builtin_ve_vl_vstlot_vssml
+#define _vel_vstlncot_vssl __builtin_ve_vl_vstlncot_vssl
+#define _vel_vstlncot_vssml __builtin_ve_vl_vstlncot_vssml
+#define _vel_vst2d_vssl __builtin_ve_vl_vst2d_vssl
+#define _vel_vst2d_vssml __builtin_ve_vl_vst2d_vssml
+#define _vel_vst2dnc_vssl __builtin_ve_vl_vst2dnc_vssl
+#define _vel_vst2dnc_vssml __builtin_ve_vl_vst2dnc_vssml
+#define _vel_vst2dot_vssl __builtin_ve_vl_vst2dot_vssl
+#define _vel_vst2dot_vssml __builtin_ve_vl_vst2dot_vssml
+#define _vel_vst2dncot_vssl __builtin_ve_vl_vst2dncot_vssl
+#define _vel_vst2dncot_vssml __builtin_ve_vl_vst2dncot_vssml
+#define _vel_vstu2d_vssl __builtin_ve_vl_vstu2d_vssl
+#define _vel_vstu2d_vssml __builtin_ve_vl_vstu2d_vssml
+#define _vel_vstu2dnc_vssl __builtin_ve_vl_vstu2dnc_vssl
+#define _vel_vstu2dnc_vssml __builtin_ve_vl_vstu2dnc_vssml
+#define _vel_vstu2dot_vssl __builtin_ve_vl_vstu2dot_vssl
+#define _vel_vstu2dot_vssml __builtin_ve_vl_vstu2dot_vssml
+#define _vel_vstu2dncot_vssl __builtin_ve_vl_vstu2dncot_vssl
+#define _vel_vstu2dncot_vssml __builtin_ve_vl_vstu2dncot_vssml
+#define _vel_vstl2d_vssl __builtin_ve_vl_vstl2d_vssl
+#define _vel_vstl2d_vssml __builtin_ve_vl_vstl2d_vssml
+#define _vel_vstl2dnc_vssl __builtin_ve_vl_vstl2dnc_vssl
+#define _vel_vstl2dnc_vssml __builtin_ve_vl_vstl2dnc_vssml
+#define _vel_vstl2dot_vssl __builtin_ve_vl_vstl2dot_vssl
+#define _vel_vstl2dot_vssml __builtin_ve_vl_vstl2dot_vssml
+#define _vel_vstl2dncot_vssl __builtin_ve_vl_vstl2dncot_vssl
+#define _vel_vstl2dncot_vssml __builtin_ve_vl_vstl2dncot_vssml
+#define _vel_pfchv_ssl __builtin_ve_vl_pfchv_ssl
+#define _vel_pfchvnc_ssl __builtin_ve_vl_pfchvnc_ssl
+#define _vel_lsv_vvss __builtin_ve_vl_lsv_vvss
+#define _vel_lvsl_svs __builtin_ve_vl_lvsl_svs
+#define _vel_lvsd_svs __builtin_ve_vl_lvsd_svs
+#define _vel_lvss_svs __builtin_ve_vl_lvss_svs
+#define _vel_lvm_mmss __builtin_ve_vl_lvm_mmss
+#define _vel_lvm_MMss __builtin_ve_vl_lvm_MMss
+#define _vel_svm_sms __builtin_ve_vl_svm_sms
+#define _vel_svm_sMs __builtin_ve_vl_svm_sMs
+#define _vel_vbrdd_vsl __builtin_ve_vl_vbrdd_vsl
+#define _vel_vbrdd_vsvl __builtin_ve_vl_vbrdd_vsvl
+#define _vel_vbrdd_vsmvl __builtin_ve_vl_vbrdd_vsmvl
+#define _vel_vbrdl_vsl __builtin_ve_vl_vbrdl_vsl
+#define _vel_vbrdl_vsvl __builtin_ve_vl_vbrdl_vsvl
+#define _vel_vbrdl_vsmvl __builtin_ve_vl_vbrdl_vsmvl
+#define _vel_vbrds_vsl __builtin_ve_vl_vbrds_vsl
+#define _vel_vbrds_vsvl __builtin_ve_vl_vbrds_vsvl
+#define _vel_vbrds_vsmvl __builtin_ve_vl_vbrds_vsmvl
+#define _vel_vbrdw_vsl __builtin_ve_vl_vbrdw_vsl
+#define _vel_vbrdw_vsvl __builtin_ve_vl_vbrdw_vsvl
+#define _vel_vbrdw_vsmvl __builtin_ve_vl_vbrdw_vsmvl
+#define _vel_pvbrd_vsl __builtin_ve_vl_pvbrd_vsl
+#define _vel_pvbrd_vsvl __builtin_ve_vl_pvbrd_vsvl
+#define _vel_pvbrd_vsMvl __builtin_ve_vl_pvbrd_vsMvl
+#define _vel_vmv_vsvl __builtin_ve_vl_vmv_vsvl
+#define _vel_vmv_vsvvl __builtin_ve_vl_vmv_vsvvl
+#define _vel_vmv_vsvmvl __builtin_ve_vl_vmv_vsvmvl
+#define _vel_vaddul_vvvl __builtin_ve_vl_vaddul_vvvl
+#define _vel_vaddul_vvvvl __builtin_ve_vl_vaddul_vvvvl
+#define _vel_vaddul_vsvl __builtin_ve_vl_vaddul_vsvl
+#define _vel_vaddul_vsvvl __builtin_ve_vl_vaddul_vsvvl
+#define _vel_vaddul_vvvmvl __builtin_ve_vl_vaddul_vvvmvl
+#define _vel_vaddul_vsvmvl __builtin_ve_vl_vaddul_vsvmvl
+#define _vel_vadduw_vvvl __builtin_ve_vl_vadduw_vvvl
+#define _vel_vadduw_vvvvl __builtin_ve_vl_vadduw_vvvvl
+#define _vel_vadduw_vsvl __builtin_ve_vl_vadduw_vsvl
+#define _vel_vadduw_vsvvl __builtin_ve_vl_vadduw_vsvvl
+#define _vel_vadduw_vvvmvl __builtin_ve_vl_vadduw_vvvmvl
+#define _vel_vadduw_vsvmvl __builtin_ve_vl_vadduw_vsvmvl
+#define _vel_pvaddu_vvvl __builtin_ve_vl_pvaddu_vvvl
+#define _vel_pvaddu_vvvvl __builtin_ve_vl_pvaddu_vvvvl
+#define _vel_pvaddu_vsvl __builtin_ve_vl_pvaddu_vsvl
+#define _vel_pvaddu_vsvvl __builtin_ve_vl_pvaddu_vsvvl
+#define _vel_pvaddu_vvvMvl __builtin_ve_vl_pvaddu_vvvMvl
+#define _vel_pvaddu_vsvMvl __builtin_ve_vl_pvaddu_vsvMvl
+#define _vel_vaddswsx_vvvl __builtin_ve_vl_vaddswsx_vvvl
+#define _vel_vaddswsx_vvvvl __builtin_ve_vl_vaddswsx_vvvvl
+#define _vel_vaddswsx_vsvl __builtin_ve_vl_vaddswsx_vsvl
+#define _vel_vaddswsx_vsvvl __builtin_ve_vl_vaddswsx_vsvvl
+#define _vel_vaddswsx_vvvmvl __builtin_ve_vl_vaddswsx_vvvmvl
+#define _vel_vaddswsx_vsvmvl __builtin_ve_vl_vaddswsx_vsvmvl
+#define _vel_vaddswzx_vvvl __builtin_ve_vl_vaddswzx_vvvl
+#define _vel_vaddswzx_vvvvl __builtin_ve_vl_vaddswzx_vvvvl
+#define _vel_vaddswzx_vsvl __builtin_ve_vl_vaddswzx_vsvl
+#define _vel_vaddswzx_vsvvl __builtin_ve_vl_vaddswzx_vsvvl
+#define _vel_vaddswzx_vvvmvl __builtin_ve_vl_vaddswzx_vvvmvl
+#define _vel_vaddswzx_vsvmvl __builtin_ve_vl_vaddswzx_vsvmvl
+#define _vel_pvadds_vvvl __builtin_ve_vl_pvadds_vvvl
+#define _vel_pvadds_vvvvl __builtin_ve_vl_pvadds_vvvvl
+#define _vel_pvadds_vsvl __builtin_ve_vl_pvadds_vsvl
+#define _vel_pvadds_vsvvl __builtin_ve_vl_pvadds_vsvvl
+#define _vel_pvadds_vvvMvl __builtin_ve_vl_pvadds_vvvMvl
+#define _vel_pvadds_vsvMvl __builtin_ve_vl_pvadds_vsvMvl
+#define _vel_vaddsl_vvvl __builtin_ve_vl_vaddsl_vvvl
+#define _vel_vaddsl_vvvvl __builtin_ve_vl_vaddsl_vvvvl
+#define _vel_vaddsl_vsvl __builtin_ve_vl_vaddsl_vsvl
+#define _vel_vaddsl_vsvvl __builtin_ve_vl_vaddsl_vsvvl
+#define _vel_vaddsl_vvvmvl __builtin_ve_vl_vaddsl_vvvmvl
+#define _vel_vaddsl_vsvmvl __builtin_ve_vl_vaddsl_vsvmvl
+#define _vel_vsubul_vvvl __builtin_ve_vl_vsubul_vvvl
+#define _vel_vsubul_vvvvl __builtin_ve_vl_vsubul_vvvvl
+#define _vel_vsubul_vsvl __builtin_ve_vl_vsubul_vsvl
+#define _vel_vsubul_vsvvl __builtin_ve_vl_vsubul_vsvvl
+#define _vel_vsubul_vvvmvl __builtin_ve_vl_vsubul_vvvmvl
+#define _vel_vsubul_vsvmvl __builtin_ve_vl_vsubul_vsvmvl
+#define _vel_vsubuw_vvvl __builtin_ve_vl_vsubuw_vvvl
+#define _vel_vsubuw_vvvvl __builtin_ve_vl_vsubuw_vvvvl
+#define _vel_vsubuw_vsvl __builtin_ve_vl_vsubuw_vsvl
+#define _vel_vsubuw_vsvvl __builtin_ve_vl_vsubuw_vsvvl
+#define _vel_vsubuw_vvvmvl __builtin_ve_vl_vsubuw_vvvmvl
+#define _vel_vsubuw_vsvmvl __builtin_ve_vl_vsubuw_vsvmvl
+#define _vel_pvsubu_vvvl __builtin_ve_vl_pvsubu_vvvl
+#define _vel_pvsubu_vvvvl __builtin_ve_vl_pvsubu_vvvvl
+#define _vel_pvsubu_vsvl __builtin_ve_vl_pvsubu_vsvl
+#define _vel_pvsubu_vsvvl __builtin_ve_vl_pvsubu_vsvvl
+#define _vel_pvsubu_vvvMvl __builtin_ve_vl_pvsubu_vvvMvl
+#define _vel_pvsubu_vsvMvl __builtin_ve_vl_pvsubu_vsvMvl
+#define _vel_vsubswsx_vvvl __builtin_ve_vl_vsubswsx_vvvl
+#define _vel_vsubswsx_vvvvl __builtin_ve_vl_vsubswsx_vvvvl
+#define _vel_vsubswsx_vsvl __builtin_ve_vl_vsubswsx_vsvl
+#define _vel_vsubswsx_vsvvl __builtin_ve_vl_vsubswsx_vsvvl
+#define _vel_vsubswsx_vvvmvl __builtin_ve_vl_vsubswsx_vvvmvl
+#define _vel_vsubswsx_vsvmvl __builtin_ve_vl_vsubswsx_vsvmvl
+#define _vel_vsubswzx_vvvl __builtin_ve_vl_vsubswzx_vvvl
+#define _vel_vsubswzx_vvvvl __builtin_ve_vl_vsubswzx_vvvvl
+#define _vel_vsubswzx_vsvl __builtin_ve_vl_vsubswzx_vsvl
+#define _vel_vsubswzx_vsvvl __builtin_ve_vl_vsubswzx_vsvvl
+#define _vel_vsubswzx_vvvmvl __builtin_ve_vl_vsubswzx_vvvmvl
+#define _vel_vsubswzx_vsvmvl __builtin_ve_vl_vsubswzx_vsvmvl
+#define _vel_pvsubs_vvvl __builtin_ve_vl_pvsubs_vvvl
+#define _vel_pvsubs_vvvvl __builtin_ve_vl_pvsubs_vvvvl
+#define _vel_pvsubs_vsvl __builtin_ve_vl_pvsubs_vsvl
+#define _vel_pvsubs_vsvvl __builtin_ve_vl_pvsubs_vsvvl
+#define _vel_pvsubs_vvvMvl __builtin_ve_vl_pvsubs_vvvMvl
+#define _vel_pvsubs_vsvMvl __builtin_ve_vl_pvsubs_vsvMvl
+#define _vel_vsubsl_vvvl __builtin_ve_vl_vsubsl_vvvl
+#define _vel_vsubsl_vvvvl __builtin_ve_vl_vsubsl_vvvvl
+#define _vel_vsubsl_vsvl __builtin_ve_vl_vsubsl_vsvl
+#define _vel_vsubsl_vsvvl __builtin_ve_vl_vsubsl_vsvvl
+#define _vel_vsubsl_vvvmvl __builtin_ve_vl_vsubsl_vvvmvl
+#define _vel_vsubsl_vsvmvl __builtin_ve_vl_vsubsl_vsvmvl
+#define _vel_vmulul_vvvl __builtin_ve_vl_vmulul_vvvl
+#define _vel_vmulul_vvvvl __builtin_ve_vl_vmulul_vvvvl
+#define _vel_vmulul_vsvl __builtin_ve_vl_vmulul_vsvl
+#define _vel_vmulul_vsvvl __builtin_ve_vl_vmulul_vsvvl
+#define _vel_vmulul_vvvmvl __builtin_ve_vl_vmulul_vvvmvl
+#define _vel_vmulul_vsvmvl __builtin_ve_vl_vmulul_vsvmvl
+#define _vel_vmuluw_vvvl __builtin_ve_vl_vmuluw_vvvl
+#define _vel_vmuluw_vvvvl __builtin_ve_vl_vmuluw_vvvvl
+#define _vel_vmuluw_vsvl __builtin_ve_vl_vmuluw_vsvl
+#define _vel_vmuluw_vsvvl __builtin_ve_vl_vmuluw_vsvvl
+#define _vel_vmuluw_vvvmvl __builtin_ve_vl_vmuluw_vvvmvl
+#define _vel_vmuluw_vsvmvl __builtin_ve_vl_vmuluw_vsvmvl
+#define _vel_vmulswsx_vvvl __builtin_ve_vl_vmulswsx_vvvl
+#define _vel_vmulswsx_vvvvl __builtin_ve_vl_vmulswsx_vvvvl
+#define _vel_vmulswsx_vsvl __builtin_ve_vl_vmulswsx_vsvl
+#define _vel_vmulswsx_vsvvl __builtin_ve_vl_vmulswsx_vsvvl
+#define _vel_vmulswsx_vvvmvl __builtin_ve_vl_vmulswsx_vvvmvl
+#define _vel_vmulswsx_vsvmvl __builtin_ve_vl_vmulswsx_vsvmvl
+#define _vel_vmulswzx_vvvl __builtin_ve_vl_vmulswzx_vvvl
+#define _vel_vmulswzx_vvvvl __builtin_ve_vl_vmulswzx_vvvvl
+#define _vel_vmulswzx_vsvl __builtin_ve_vl_vmulswzx_vsvl
+#define _vel_vmulswzx_vsvvl __builtin_ve_vl_vmulswzx_vsvvl
+#define _vel_vmulswzx_vvvmvl __builtin_ve_vl_vmulswzx_vvvmvl
+#define _vel_vmulswzx_vsvmvl __builtin_ve_vl_vmulswzx_vsvmvl
+#define _vel_vmulsl_vvvl __builtin_ve_vl_vmulsl_vvvl
+#define _vel_vmulsl_vvvvl __builtin_ve_vl_vmulsl_vvvvl
+#define _vel_vmulsl_vsvl __builtin_ve_vl_vmulsl_vsvl
+#define _vel_vmulsl_vsvvl __builtin_ve_vl_vmulsl_vsvvl
+#define _vel_vmulsl_vvvmvl __builtin_ve_vl_vmulsl_vvvmvl
+#define _vel_vmulsl_vsvmvl __builtin_ve_vl_vmulsl_vsvmvl
+#define _vel_vmulslw_vvvl __builtin_ve_vl_vmulslw_vvvl
+#define _vel_vmulslw_vvvvl __builtin_ve_vl_vmulslw_vvvvl
+#define _vel_vmulslw_vsvl __builtin_ve_vl_vmulslw_vsvl
+#define _vel_vmulslw_vsvvl __builtin_ve_vl_vmulslw_vsvvl
+#define _vel_vdivul_vvvl __builtin_ve_vl_vdivul_vvvl
+#define _vel_vdivul_vvvvl __builtin_ve_vl_vdivul_vvvvl
+#define _vel_vdivul_vsvl __builtin_ve_vl_vdivul_vsvl
+#define _vel_vdivul_vsvvl __builtin_ve_vl_vdivul_vsvvl
+#define _vel_vdivul_vvvmvl __builtin_ve_vl_vdivul_vvvmvl
+#define _vel_vdivul_vsvmvl __builtin_ve_vl_vdivul_vsvmvl
+#define _vel_vdivuw_vvvl __builtin_ve_vl_vdivuw_vvvl
+#define _vel_vdivuw_vvvvl __builtin_ve_vl_vdivuw_vvvvl
+#define _vel_vdivuw_vsvl __builtin_ve_vl_vdivuw_vsvl
+#define _vel_vdivuw_vsvvl __builtin_ve_vl_vdivuw_vsvvl
+#define _vel_vdivuw_vvvmvl __builtin_ve_vl_vdivuw_vvvmvl
+#define _vel_vdivuw_vsvmvl __builtin_ve_vl_vdivuw_vsvmvl
+#define _vel_vdivul_vvsl __builtin_ve_vl_vdivul_vvsl
+#define _vel_vdivul_vvsvl __builtin_ve_vl_vdivul_vvsvl
+#define _vel_vdivul_vvsmvl __builtin_ve_vl_vdivul_vvsmvl
+#define _vel_vdivuw_vvsl __builtin_ve_vl_vdivuw_vvsl
+#define _vel_vdivuw_vvsvl __builtin_ve_vl_vdivuw_vvsvl
+#define _vel_vdivuw_vvsmvl __builtin_ve_vl_vdivuw_vvsmvl
+#define _vel_vdivswsx_vvvl __builtin_ve_vl_vdivswsx_vvvl
+#define _vel_vdivswsx_vvvvl __builtin_ve_vl_vdivswsx_vvvvl
+#define _vel_vdivswsx_vsvl __builtin_ve_vl_vdivswsx_vsvl
+#define _vel_vdivswsx_vsvvl __builtin_ve_vl_vdivswsx_vsvvl
+#define _vel_vdivswsx_vvvmvl __builtin_ve_vl_vdivswsx_vvvmvl
+#define _vel_vdivswsx_vsvmvl __builtin_ve_vl_vdivswsx_vsvmvl
+#define _vel_vdivswzx_vvvl __builtin_ve_vl_vdivswzx_vvvl
+#define _vel_vdivswzx_vvvvl __builtin_ve_vl_vdivswzx_vvvvl
+#define _vel_vdivswzx_vsvl __builtin_ve_vl_vdivswzx_vsvl
+#define _vel_vdivswzx_vsvvl __builtin_ve_vl_vdivswzx_vsvvl
+#define _vel_vdivswzx_vvvmvl __builtin_ve_vl_vdivswzx_vvvmvl
+#define _vel_vdivswzx_vsvmvl __builtin_ve_vl_vdivswzx_vsvmvl
+#define _vel_vdivswsx_vvsl __builtin_ve_vl_vdivswsx_vvsl
+#define _vel_vdivswsx_vvsvl __builtin_ve_vl_vdivswsx_vvsvl
+#define _vel_vdivswsx_vvsmvl __builtin_ve_vl_vdivswsx_vvsmvl
+#define _vel_vdivswzx_vvsl __builtin_ve_vl_vdivswzx_vvsl
+#define _vel_vdivswzx_vvsvl __builtin_ve_vl_vdivswzx_vvsvl
+#define _vel_vdivswzx_vvsmvl __builtin_ve_vl_vdivswzx_vvsmvl
+#define _vel_vdivsl_vvvl __builtin_ve_vl_vdivsl_vvvl
+#define _vel_vdivsl_vvvvl __builtin_ve_vl_vdivsl_vvvvl
+#define _vel_vdivsl_vsvl __builtin_ve_vl_vdivsl_vsvl
+#define _vel_vdivsl_vsvvl __builtin_ve_vl_vdivsl_vsvvl
+#define _vel_vdivsl_vvvmvl __builtin_ve_vl_vdivsl_vvvmvl
+#define _vel_vdivsl_vsvmvl __builtin_ve_vl_vdivsl_vsvmvl
+#define _vel_vdivsl_vvsl __builtin_ve_vl_vdivsl_vvsl
+#define _vel_vdivsl_vvsvl __builtin_ve_vl_vdivsl_vvsvl
+#define _vel_vdivsl_vvsmvl __builtin_ve_vl_vdivsl_vvsmvl
+#define _vel_vcmpul_vvvl __builtin_ve_vl_vcmpul_vvvl
+#define _vel_vcmpul_vvvvl __builtin_ve_vl_vcmpul_vvvvl
+#define _vel_vcmpul_vsvl __builtin_ve_vl_vcmpul_vsvl
+#define _vel_vcmpul_vsvvl __builtin_ve_vl_vcmpul_vsvvl
+#define _vel_vcmpul_vvvmvl __builtin_ve_vl_vcmpul_vvvmvl
+#define _vel_vcmpul_vsvmvl __builtin_ve_vl_vcmpul_vsvmvl
+#define _vel_vcmpuw_vvvl __builtin_ve_vl_vcmpuw_vvvl
+#define _vel_vcmpuw_vvvvl __builtin_ve_vl_vcmpuw_vvvvl
+#define _vel_vcmpuw_vsvl __builtin_ve_vl_vcmpuw_vsvl
+#define _vel_vcmpuw_vsvvl __builtin_ve_vl_vcmpuw_vsvvl
+#define _vel_vcmpuw_vvvmvl __builtin_ve_vl_vcmpuw_vvvmvl
+#define _vel_vcmpuw_vsvmvl __builtin_ve_vl_vcmpuw_vsvmvl
+#define _vel_pvcmpu_vvvl __builtin_ve_vl_pvcmpu_vvvl
+#define _vel_pvcmpu_vvvvl __builtin_ve_vl_pvcmpu_vvvvl
+#define _vel_pvcmpu_vsvl __builtin_ve_vl_pvcmpu_vsvl
+#define _vel_pvcmpu_vsvvl __builtin_ve_vl_pvcmpu_vsvvl
+#define _vel_pvcmpu_vvvMvl __builtin_ve_vl_pvcmpu_vvvMvl
+#define _vel_pvcmpu_vsvMvl __builtin_ve_vl_pvcmpu_vsvMvl
+#define _vel_vcmpswsx_vvvl __builtin_ve_vl_vcmpswsx_vvvl
+#define _vel_vcmpswsx_vvvvl __builtin_ve_vl_vcmpswsx_vvvvl
+#define _vel_vcmpswsx_vsvl __builtin_ve_vl_vcmpswsx_vsvl
+#define _vel_vcmpswsx_vsvvl __builtin_ve_vl_vcmpswsx_vsvvl
+#define _vel_vcmpswsx_vvvmvl __builtin_ve_vl_vcmpswsx_vvvmvl
+#define _vel_vcmpswsx_vsvmvl __builtin_ve_vl_vcmpswsx_vsvmvl
+#define _vel_vcmpswzx_vvvl __builtin_ve_vl_vcmpswzx_vvvl
+#define _vel_vcmpswzx_vvvvl __builtin_ve_vl_vcmpswzx_vvvvl
+#define _vel_vcmpswzx_vsvl __builtin_ve_vl_vcmpswzx_vsvl
+#define _vel_vcmpswzx_vsvvl __builtin_ve_vl_vcmpswzx_vsvvl
+#define _vel_vcmpswzx_vvvmvl __builtin_ve_vl_vcmpswzx_vvvmvl
+#define _vel_vcmpswzx_vsvmvl __builtin_ve_vl_vcmpswzx_vsvmvl
+#define _vel_pvcmps_vvvl __builtin_ve_vl_pvcmps_vvvl
+#define _vel_pvcmps_vvvvl __builtin_ve_vl_pvcmps_vvvvl
+#define _vel_pvcmps_vsvl __builtin_ve_vl_pvcmps_vsvl
+#define _vel_pvcmps_vsvvl __builtin_ve_vl_pvcmps_vsvvl
+#define _vel_pvcmps_vvvMvl __builtin_ve_vl_pvcmps_vvvMvl
+#define _vel_pvcmps_vsvMvl __builtin_ve_vl_pvcmps_vsvMvl
+#define _vel_vcmpsl_vvvl __builtin_ve_vl_vcmpsl_vvvl
+#define _vel_vcmpsl_vvvvl __builtin_ve_vl_vcmpsl_vvvvl
+#define _vel_vcmpsl_vsvl __builtin_ve_vl_vcmpsl_vsvl
+#define _vel_vcmpsl_vsvvl __builtin_ve_vl_vcmpsl_vsvvl
+#define _vel_vcmpsl_vvvmvl __builtin_ve_vl_vcmpsl_vvvmvl
+#define _vel_vcmpsl_vsvmvl __builtin_ve_vl_vcmpsl_vsvmvl
+#define _vel_vmaxswsx_vvvl __builtin_ve_vl_vmaxswsx_vvvl
+#define _vel_vmaxswsx_vvvvl __builtin_ve_vl_vmaxswsx_vvvvl
+#define _vel_vmaxswsx_vsvl __builtin_ve_vl_vmaxswsx_vsvl
+#define _vel_vmaxswsx_vsvvl __builtin_ve_vl_vmaxswsx_vsvvl
+#define _vel_vmaxswsx_vvvmvl __builtin_ve_vl_vmaxswsx_vvvmvl
+#define _vel_vmaxswsx_vsvmvl __builtin_ve_vl_vmaxswsx_vsvmvl
+#define _vel_vmaxswzx_vvvl __builtin_ve_vl_vmaxswzx_vvvl
+#define _vel_vmaxswzx_vvvvl __builtin_ve_vl_vmaxswzx_vvvvl
+#define _vel_vmaxswzx_vsvl __builtin_ve_vl_vmaxswzx_vsvl
+#define _vel_vmaxswzx_vsvvl __builtin_ve_vl_vmaxswzx_vsvvl
+#define _vel_vmaxswzx_vvvmvl __builtin_ve_vl_vmaxswzx_vvvmvl
+#define _vel_vmaxswzx_vsvmvl __builtin_ve_vl_vmaxswzx_vsvmvl
+#define _vel_pvmaxs_vvvl __builtin_ve_vl_pvmaxs_vvvl
+#define _vel_pvmaxs_vvvvl __builtin_ve_vl_pvmaxs_vvvvl
+#define _vel_pvmaxs_vsvl __builtin_ve_vl_pvmaxs_vsvl
+#define _vel_pvmaxs_vsvvl __builtin_ve_vl_pvmaxs_vsvvl
+#define _vel_pvmaxs_vvvMvl __builtin_ve_vl_pvmaxs_vvvMvl
+#define _vel_pvmaxs_vsvMvl __builtin_ve_vl_pvmaxs_vsvMvl
+#define _vel_vminswsx_vvvl __builtin_ve_vl_vminswsx_vvvl
+#define _vel_vminswsx_vvvvl __builtin_ve_vl_vminswsx_vvvvl
+#define _vel_vminswsx_vsvl __builtin_ve_vl_vminswsx_vsvl
+#define _vel_vminswsx_vsvvl __builtin_ve_vl_vminswsx_vsvvl
+#define _vel_vminswsx_vvvmvl __builtin_ve_vl_vminswsx_vvvmvl
+#define _vel_vminswsx_vsvmvl __builtin_ve_vl_vminswsx_vsvmvl
+#define _vel_vminswzx_vvvl __builtin_ve_vl_vminswzx_vvvl
+#define _vel_vminswzx_vvvvl __builtin_ve_vl_vminswzx_vvvvl
+#define _vel_vminswzx_vsvl __builtin_ve_vl_vminswzx_vsvl
+#define _vel_vminswzx_vsvvl __builtin_ve_vl_vminswzx_vsvvl
+#define _vel_vminswzx_vvvmvl __builtin_ve_vl_vminswzx_vvvmvl
+#define _vel_vminswzx_vsvmvl __builtin_ve_vl_vminswzx_vsvmvl
+#define _vel_pvmins_vvvl __builtin_ve_vl_pvmins_vvvl
+#define _vel_pvmins_vvvvl __builtin_ve_vl_pvmins_vvvvl
+#define _vel_pvmins_vsvl __builtin_ve_vl_pvmins_vsvl
+#define _vel_pvmins_vsvvl __builtin_ve_vl_pvmins_vsvvl
+#define _vel_pvmins_vvvMvl __builtin_ve_vl_pvmins_vvvMvl
+#define _vel_pvmins_vsvMvl __builtin_ve_vl_pvmins_vsvMvl
+#define _vel_vmaxsl_vvvl __builtin_ve_vl_vmaxsl_vvvl
+#define _vel_vmaxsl_vvvvl __builtin_ve_vl_vmaxsl_vvvvl
+#define _vel_vmaxsl_vsvl __builtin_ve_vl_vmaxsl_vsvl
+#define _vel_vmaxsl_vsvvl __builtin_ve_vl_vmaxsl_vsvvl
+#define _vel_vmaxsl_vvvmvl __builtin_ve_vl_vmaxsl_vvvmvl
+#define _vel_vmaxsl_vsvmvl __builtin_ve_vl_vmaxsl_vsvmvl
+#define _vel_vminsl_vvvl __builtin_ve_vl_vminsl_vvvl
+#define _vel_vminsl_vvvvl __builtin_ve_vl_vminsl_vvvvl
+#define _vel_vminsl_vsvl __builtin_ve_vl_vminsl_vsvl
+#define _vel_vminsl_vsvvl __builtin_ve_vl_vminsl_vsvvl
+#define _vel_vminsl_vvvmvl __builtin_ve_vl_vminsl_vvvmvl
+#define _vel_vminsl_vsvmvl __builtin_ve_vl_vminsl_vsvmvl
+#define _vel_vand_vvvl __builtin_ve_vl_vand_vvvl
+#define _vel_vand_vvvvl __builtin_ve_vl_vand_vvvvl
+#define _vel_vand_vsvl __builtin_ve_vl_vand_vsvl
+#define _vel_vand_vsvvl __builtin_ve_vl_vand_vsvvl
+#define _vel_vand_vvvmvl __builtin_ve_vl_vand_vvvmvl
+#define _vel_vand_vsvmvl __builtin_ve_vl_vand_vsvmvl
+#define _vel_pvand_vvvl __builtin_ve_vl_pvand_vvvl
+#define _vel_pvand_vvvvl __builtin_ve_vl_pvand_vvvvl
+#define _vel_pvand_vsvl __builtin_ve_vl_pvand_vsvl
+#define _vel_pvand_vsvvl __builtin_ve_vl_pvand_vsvvl
+#define _vel_pvand_vvvMvl __builtin_ve_vl_pvand_vvvMvl
+#define _vel_pvand_vsvMvl __builtin_ve_vl_pvand_vsvMvl
+#define _vel_vor_vvvl __builtin_ve_vl_vor_vvvl
+#define _vel_vor_vvvvl __builtin_ve_vl_vor_vvvvl
+#define _vel_vor_vsvl __builtin_ve_vl_vor_vsvl
+#define _vel_vor_vsvvl __builtin_ve_vl_vor_vsvvl
+#define _vel_vor_vvvmvl __builtin_ve_vl_vor_vvvmvl
+#define _vel_vor_vsvmvl __builtin_ve_vl_vor_vsvmvl
+#define _vel_pvor_vvvl __builtin_ve_vl_pvor_vvvl
+#define _vel_pvor_vvvvl __builtin_ve_vl_pvor_vvvvl
+#define _vel_pvor_vsvl __builtin_ve_vl_pvor_vsvl
+#define _vel_pvor_vsvvl __builtin_ve_vl_pvor_vsvvl
+#define _vel_pvor_vvvMvl __builtin_ve_vl_pvor_vvvMvl
+#define _vel_pvor_vsvMvl __builtin_ve_vl_pvor_vsvMvl
+#define _vel_vxor_vvvl __builtin_ve_vl_vxor_vvvl
+#define _vel_vxor_vvvvl __builtin_ve_vl_vxor_vvvvl
+#define _vel_vxor_vsvl __builtin_ve_vl_vxor_vsvl
+#define _vel_vxor_vsvvl __builtin_ve_vl_vxor_vsvvl
+#define _vel_vxor_vvvmvl __builtin_ve_vl_vxor_vvvmvl
+#define _vel_vxor_vsvmvl __builtin_ve_vl_vxor_vsvmvl
+#define _vel_pvxor_vvvl __builtin_ve_vl_pvxor_vvvl
+#define _vel_pvxor_vvvvl __builtin_ve_vl_pvxor_vvvvl
+#define _vel_pvxor_vsvl __builtin_ve_vl_pvxor_vsvl
+#define _vel_pvxor_vsvvl __builtin_ve_vl_pvxor_vsvvl
+#define _vel_pvxor_vvvMvl __builtin_ve_vl_pvxor_vvvMvl
+#define _vel_pvxor_vsvMvl __builtin_ve_vl_pvxor_vsvMvl
+#define _vel_veqv_vvvl __builtin_ve_vl_veqv_vvvl
+#define _vel_veqv_vvvvl __builtin_ve_vl_veqv_vvvvl
+#define _vel_veqv_vsvl __builtin_ve_vl_veqv_vsvl
+#define _vel_veqv_vsvvl __builtin_ve_vl_veqv_vsvvl
+#define _vel_veqv_vvvmvl __builtin_ve_vl_veqv_vvvmvl
+#define _vel_veqv_vsvmvl __builtin_ve_vl_veqv_vsvmvl
+#define _vel_pveqv_vvvl __builtin_ve_vl_pveqv_vvvl
+#define _vel_pveqv_vvvvl __builtin_ve_vl_pveqv_vvvvl
+#define _vel_pveqv_vsvl __builtin_ve_vl_pveqv_vsvl
+#define _vel_pveqv_vsvvl __builtin_ve_vl_pveqv_vsvvl
+#define _vel_pveqv_vvvMvl __builtin_ve_vl_pveqv_vvvMvl
+#define _vel_pveqv_vsvMvl __builtin_ve_vl_pveqv_vsvMvl
+#define _vel_vldz_vvl __builtin_ve_vl_vldz_vvl
+#define _vel_vldz_vvvl __builtin_ve_vl_vldz_vvvl
+#define _vel_vldz_vvmvl __builtin_ve_vl_vldz_vvmvl
+#define _vel_pvldzlo_vvl __builtin_ve_vl_pvldzlo_vvl
+#define _vel_pvldzlo_vvvl __builtin_ve_vl_pvldzlo_vvvl
+#define _vel_pvldzlo_vvmvl __builtin_ve_vl_pvldzlo_vvmvl
+#define _vel_pvldzup_vvl __builtin_ve_vl_pvldzup_vvl
+#define _vel_pvldzup_vvvl __builtin_ve_vl_pvldzup_vvvl
+#define _vel_pvldzup_vvmvl __builtin_ve_vl_pvldzup_vvmvl
+#define _vel_pvldz_vvl __builtin_ve_vl_pvldz_vvl
+#define _vel_pvldz_vvvl __builtin_ve_vl_pvldz_vvvl
+#define _vel_pvldz_vvMvl __builtin_ve_vl_pvldz_vvMvl
+#define _vel_vpcnt_vvl __builtin_ve_vl_vpcnt_vvl
+#define _vel_vpcnt_vvvl __builtin_ve_vl_vpcnt_vvvl
+#define _vel_vpcnt_vvmvl __builtin_ve_vl_vpcnt_vvmvl
+#define _vel_pvpcntlo_vvl __builtin_ve_vl_pvpcntlo_vvl
+#define _vel_pvpcntlo_vvvl __builtin_ve_vl_pvpcntlo_vvvl
+#define _vel_pvpcntlo_vvmvl __builtin_ve_vl_pvpcntlo_vvmvl
+#define _vel_pvpcntup_vvl __builtin_ve_vl_pvpcntup_vvl
+#define _vel_pvpcntup_vvvl __builtin_ve_vl_pvpcntup_vvvl
+#define _vel_pvpcntup_vvmvl __builtin_ve_vl_pvpcntup_vvmvl
+#define _vel_pvpcnt_vvl __builtin_ve_vl_pvpcnt_vvl
+#define _vel_pvpcnt_vvvl __builtin_ve_vl_pvpcnt_vvvl
+#define _vel_pvpcnt_vvMvl __builtin_ve_vl_pvpcnt_vvMvl
+#define _vel_vbrv_vvl __builtin_ve_vl_vbrv_vvl
+#define _vel_vbrv_vvvl __builtin_ve_vl_vbrv_vvvl
+#define _vel_vbrv_vvmvl __builtin_ve_vl_vbrv_vvmvl
+#define _vel_pvbrvlo_vvl __builtin_ve_vl_pvbrvlo_vvl
+#define _vel_pvbrvlo_vvvl __builtin_ve_vl_pvbrvlo_vvvl
+#define _vel_pvbrvlo_vvmvl __builtin_ve_vl_pvbrvlo_vvmvl
+#define _vel_pvbrvup_vvl __builtin_ve_vl_pvbrvup_vvl
+#define _vel_pvbrvup_vvvl __builtin_ve_vl_pvbrvup_vvvl
+#define _vel_pvbrvup_vvmvl __builtin_ve_vl_pvbrvup_vvmvl
+#define _vel_pvbrv_vvl __builtin_ve_vl_pvbrv_vvl
+#define _vel_pvbrv_vvvl __builtin_ve_vl_pvbrv_vvvl
+#define _vel_pvbrv_vvMvl __builtin_ve_vl_pvbrv_vvMvl
+#define _vel_vseq_vl __builtin_ve_vl_vseq_vl
+#define _vel_vseq_vvl __builtin_ve_vl_vseq_vvl
+#define _vel_pvseqlo_vl __builtin_ve_vl_pvseqlo_vl
+#define _vel_pvseqlo_vvl __builtin_ve_vl_pvseqlo_vvl
+#define _vel_pvsequp_vl __builtin_ve_vl_pvsequp_vl
+#define _vel_pvsequp_vvl __builtin_ve_vl_pvsequp_vvl
+#define _vel_pvseq_vl __builtin_ve_vl_pvseq_vl
+#define _vel_pvseq_vvl __builtin_ve_vl_pvseq_vvl
+#define _vel_vsll_vvvl __builtin_ve_vl_vsll_vvvl
+#define _vel_vsll_vvvvl __builtin_ve_vl_vsll_vvvvl
+#define _vel_vsll_vvsl __builtin_ve_vl_vsll_vvsl
+#define _vel_vsll_vvsvl __builtin_ve_vl_vsll_vvsvl
+#define _vel_vsll_vvvmvl __builtin_ve_vl_vsll_vvvmvl
+#define _vel_vsll_vvsmvl __builtin_ve_vl_vsll_vvsmvl
+#define _vel_pvsll_vvvl __builtin_ve_vl_pvsll_vvvl
+#define _vel_pvsll_vvvvl __builtin_ve_vl_pvsll_vvvvl
+#define _vel_pvsll_vvsl __builtin_ve_vl_pvsll_vvsl
+#define _vel_pvsll_vvsvl __builtin_ve_vl_pvsll_vvsvl
+#define _vel_pvsll_vvvMvl __builtin_ve_vl_pvsll_vvvMvl
+#define _vel_pvsll_vvsMvl __builtin_ve_vl_pvsll_vvsMvl
+#define _vel_vsrl_vvvl __builtin_ve_vl_vsrl_vvvl
+#define _vel_vsrl_vvvvl __builtin_ve_vl_vsrl_vvvvl
+#define _vel_vsrl_vvsl __builtin_ve_vl_vsrl_vvsl
+#define _vel_vsrl_vvsvl __builtin_ve_vl_vsrl_vvsvl
+#define _vel_vsrl_vvvmvl __builtin_ve_vl_vsrl_vvvmvl
+#define _vel_vsrl_vvsmvl __builtin_ve_vl_vsrl_vvsmvl
+#define _vel_pvsrl_vvvl __builtin_ve_vl_pvsrl_vvvl
+#define _vel_pvsrl_vvvvl __builtin_ve_vl_pvsrl_vvvvl
+#define _vel_pvsrl_vvsl __builtin_ve_vl_pvsrl_vvsl
+#define _vel_pvsrl_vvsvl __builtin_ve_vl_pvsrl_vvsvl
+#define _vel_pvsrl_vvvMvl __builtin_ve_vl_pvsrl_vvvMvl
+#define _vel_pvsrl_vvsMvl __builtin_ve_vl_pvsrl_vvsMvl
+#define _vel_vslawsx_vvvl __builtin_ve_vl_vslawsx_vvvl
+#define _vel_vslawsx_vvvvl __builtin_ve_vl_vslawsx_vvvvl
+#define _vel_vslawsx_vvsl __builtin_ve_vl_vslawsx_vvsl
+#define _vel_vslawsx_vvsvl __builtin_ve_vl_vslawsx_vvsvl
+#define _vel_vslawsx_vvvmvl __builtin_ve_vl_vslawsx_vvvmvl
+#define _vel_vslawsx_vvsmvl __builtin_ve_vl_vslawsx_vvsmvl
+#define _vel_vslawzx_vvvl __builtin_ve_vl_vslawzx_vvvl
+#define _vel_vslawzx_vvvvl __builtin_ve_vl_vslawzx_vvvvl
+#define _vel_vslawzx_vvsl __builtin_ve_vl_vslawzx_vvsl
+#define _vel_vslawzx_vvsvl __builtin_ve_vl_vslawzx_vvsvl
+#define _vel_vslawzx_vvvmvl __builtin_ve_vl_vslawzx_vvvmvl
+#define _vel_vslawzx_vvsmvl __builtin_ve_vl_vslawzx_vvsmvl
+#define _vel_pvsla_vvvl __builtin_ve_vl_pvsla_vvvl
+#define _vel_pvsla_vvvvl __builtin_ve_vl_pvsla_vvvvl
+#define _vel_pvsla_vvsl __builtin_ve_vl_pvsla_vvsl
+#define _vel_pvsla_vvsvl __builtin_ve_vl_pvsla_vvsvl
+#define _vel_pvsla_vvvMvl __builtin_ve_vl_pvsla_vvvMvl
+#define _vel_pvsla_vvsMvl __builtin_ve_vl_pvsla_vvsMvl
+#define _vel_vslal_vvvl __builtin_ve_vl_vslal_vvvl
+#define _vel_vslal_vvvvl __builtin_ve_vl_vslal_vvvvl
+#define _vel_vslal_vvsl __builtin_ve_vl_vslal_vvsl
+#define _vel_vslal_vvsvl __builtin_ve_vl_vslal_vvsvl
+#define _vel_vslal_vvvmvl __builtin_ve_vl_vslal_vvvmvl
+#define _vel_vslal_vvsmvl __builtin_ve_vl_vslal_vvsmvl
+#define _vel_vsrawsx_vvvl __builtin_ve_vl_vsrawsx_vvvl
+#define _vel_vsrawsx_vvvvl __builtin_ve_vl_vsrawsx_vvvvl
+#define _vel_vsrawsx_vvsl __builtin_ve_vl_vsrawsx_vvsl
+#define _vel_vsrawsx_vvsvl __builtin_ve_vl_vsrawsx_vvsvl
+#define _vel_vsrawsx_vvvmvl __builtin_ve_vl_vsrawsx_vvvmvl
+#define _vel_vsrawsx_vvsmvl __builtin_ve_vl_vsrawsx_vvsmvl
+#define _vel_vsrawzx_vvvl __builtin_ve_vl_vsrawzx_vvvl
+#define _vel_vsrawzx_vvvvl __builtin_ve_vl_vsrawzx_vvvvl
+#define _vel_vsrawzx_vvsl __builtin_ve_vl_vsrawzx_vvsl
+#define _vel_vsrawzx_vvsvl __builtin_ve_vl_vsrawzx_vvsvl
+#define _vel_vsrawzx_vvvmvl __builtin_ve_vl_vsrawzx_vvvmvl
+#define _vel_vsrawzx_vvsmvl __builtin_ve_vl_vsrawzx_vvsmvl
+#define _vel_pvsra_vvvl __builtin_ve_vl_pvsra_vvvl
+#define _vel_pvsra_vvvvl __builtin_ve_vl_pvsra_vvvvl
+#define _vel_pvsra_vvsl __builtin_ve_vl_pvsra_vvsl
+#define _vel_pvsra_vvsvl __builtin_ve_vl_pvsra_vvsvl
+#define _vel_pvsra_vvvMvl __builtin_ve_vl_pvsra_vvvMvl
+#define _vel_pvsra_vvsMvl __builtin_ve_vl_pvsra_vvsMvl
+#define _vel_vsral_vvvl __builtin_ve_vl_vsral_vvvl
+#define _vel_vsral_vvvvl __builtin_ve_vl_vsral_vvvvl
+#define _vel_vsral_vvsl __builtin_ve_vl_vsral_vvsl
+#define _vel_vsral_vvsvl __builtin_ve_vl_vsral_vvsvl
+#define _vel_vsral_vvvmvl __builtin_ve_vl_vsral_vvvmvl
+#define _vel_vsral_vvsmvl __builtin_ve_vl_vsral_vvsmvl
+#define _vel_vsfa_vvssl __builtin_ve_vl_vsfa_vvssl
+#define _vel_vsfa_vvssvl __builtin_ve_vl_vsfa_vvssvl
+#define _vel_vsfa_vvssmvl __builtin_ve_vl_vsfa_vvssmvl
+#define _vel_vfaddd_vvvl __builtin_ve_vl_vfaddd_vvvl
+#define _vel_vfaddd_vvvvl __builtin_ve_vl_vfaddd_vvvvl
+#define _vel_vfaddd_vsvl __builtin_ve_vl_vfaddd_vsvl
+#define _vel_vfaddd_vsvvl __builtin_ve_vl_vfaddd_vsvvl
+#define _vel_vfaddd_vvvmvl __builtin_ve_vl_vfaddd_vvvmvl
+#define _vel_vfaddd_vsvmvl __builtin_ve_vl_vfaddd_vsvmvl
+#define _vel_vfadds_vvvl __builtin_ve_vl_vfadds_vvvl
+#define _vel_vfadds_vvvvl __builtin_ve_vl_vfadds_vvvvl
+#define _vel_vfadds_vsvl __builtin_ve_vl_vfadds_vsvl
+#define _vel_vfadds_vsvvl __builtin_ve_vl_vfadds_vsvvl
+#define _vel_vfadds_vvvmvl __builtin_ve_vl_vfadds_vvvmvl
+#define _vel_vfadds_vsvmvl __builtin_ve_vl_vfadds_vsvmvl
+#define _vel_pvfadd_vvvl __builtin_ve_vl_pvfadd_vvvl
+#define _vel_pvfadd_vvvvl __builtin_ve_vl_pvfadd_vvvvl
+#define _vel_pvfadd_vsvl __builtin_ve_vl_pvfadd_vsvl
+#define _vel_pvfadd_vsvvl __builtin_ve_vl_pvfadd_vsvvl
+#define _vel_pvfadd_vvvMvl __builtin_ve_vl_pvfadd_vvvMvl
+#define _vel_pvfadd_vsvMvl __builtin_ve_vl_pvfadd_vsvMvl
+#define _vel_vfsubd_vvvl __builtin_ve_vl_vfsubd_vvvl
+#define _vel_vfsubd_vvvvl __builtin_ve_vl_vfsubd_vvvvl
+#define _vel_vfsubd_vsvl __builtin_ve_vl_vfsubd_vsvl
+#define _vel_vfsubd_vsvvl __builtin_ve_vl_vfsubd_vsvvl
+#define _vel_vfsubd_vvvmvl __builtin_ve_vl_vfsubd_vvvmvl
+#define _vel_vfsubd_vsvmvl __builtin_ve_vl_vfsubd_vsvmvl
+#define _vel_vfsubs_vvvl __builtin_ve_vl_vfsubs_vvvl
+#define _vel_vfsubs_vvvvl __builtin_ve_vl_vfsubs_vvvvl
+#define _vel_vfsubs_vsvl __builtin_ve_vl_vfsubs_vsvl
+#define _vel_vfsubs_vsvvl __builtin_ve_vl_vfsubs_vsvvl
+#define _vel_vfsubs_vvvmvl __builtin_ve_vl_vfsubs_vvvmvl
+#define _vel_vfsubs_vsvmvl __builtin_ve_vl_vfsubs_vsvmvl
+#define _vel_pvfsub_vvvl __builtin_ve_vl_pvfsub_vvvl
+#define _vel_pvfsub_vvvvl __builtin_ve_vl_pvfsub_vvvvl
+#define _vel_pvfsub_vsvl __builtin_ve_vl_pvfsub_vsvl
+#define _vel_pvfsub_vsvvl __builtin_ve_vl_pvfsub_vsvvl
+#define _vel_pvfsub_vvvMvl __builtin_ve_vl_pvfsub_vvvMvl
+#define _vel_pvfsub_vsvMvl __builtin_ve_vl_pvfsub_vsvMvl
+#define _vel_vfmuld_vvvl __builtin_ve_vl_vfmuld_vvvl
+#define _vel_vfmuld_vvvvl __builtin_ve_vl_vfmuld_vvvvl
+#define _vel_vfmuld_vsvl __builtin_ve_vl_vfmuld_vsvl
+#define _vel_vfmuld_vsvvl __builtin_ve_vl_vfmuld_vsvvl
+#define _vel_vfmuld_vvvmvl __builtin_ve_vl_vfmuld_vvvmvl
+#define _vel_vfmuld_vsvmvl __builtin_ve_vl_vfmuld_vsvmvl
+#define _vel_vfmuls_vvvl __builtin_ve_vl_vfmuls_vvvl
+#define _vel_vfmuls_vvvvl __builtin_ve_vl_vfmuls_vvvvl
+#define _vel_vfmuls_vsvl __builtin_ve_vl_vfmuls_vsvl
+#define _vel_vfmuls_vsvvl __builtin_ve_vl_vfmuls_vsvvl
+#define _vel_vfmuls_vvvmvl __builtin_ve_vl_vfmuls_vvvmvl
+#define _vel_vfmuls_vsvmvl __builtin_ve_vl_vfmuls_vsvmvl
+#define _vel_pvfmul_vvvl __builtin_ve_vl_pvfmul_vvvl
+#define _vel_pvfmul_vvvvl __builtin_ve_vl_pvfmul_vvvvl
+#define _vel_pvfmul_vsvl __builtin_ve_vl_pvfmul_vsvl
+#define _vel_pvfmul_vsvvl __builtin_ve_vl_pvfmul_vsvvl
+#define _vel_pvfmul_vvvMvl __builtin_ve_vl_pvfmul_vvvMvl
+#define _vel_pvfmul_vsvMvl __builtin_ve_vl_pvfmul_vsvMvl
+#define _vel_vfdivd_vvvl __builtin_ve_vl_vfdivd_vvvl
+#define _vel_vfdivd_vvvvl __builtin_ve_vl_vfdivd_vvvvl
+#define _vel_vfdivd_vsvl __builtin_ve_vl_vfdivd_vsvl
+#define _vel_vfdivd_vsvvl __builtin_ve_vl_vfdivd_vsvvl
+#define _vel_vfdivd_vvvmvl __builtin_ve_vl_vfdivd_vvvmvl
+#define _vel_vfdivd_vsvmvl __builtin_ve_vl_vfdivd_vsvmvl
+#define _vel_vfdivs_vvvl __builtin_ve_vl_vfdivs_vvvl
+#define _vel_vfdivs_vvvvl __builtin_ve_vl_vfdivs_vvvvl
+#define _vel_vfdivs_vsvl __builtin_ve_vl_vfdivs_vsvl
+#define _vel_vfdivs_vsvvl __builtin_ve_vl_vfdivs_vsvvl
+#define _vel_vfdivs_vvvmvl __builtin_ve_vl_vfdivs_vvvmvl
+#define _vel_vfdivs_vsvmvl __builtin_ve_vl_vfdivs_vsvmvl
+#define _vel_vfsqrtd_vvl __builtin_ve_vl_vfsqrtd_vvl
+#define _vel_vfsqrtd_vvvl __builtin_ve_vl_vfsqrtd_vvvl
+#define _vel_vfsqrts_vvl __builtin_ve_vl_vfsqrts_vvl
+#define _vel_vfsqrts_vvvl __builtin_ve_vl_vfsqrts_vvvl
+#define _vel_vfcmpd_vvvl __builtin_ve_vl_vfcmpd_vvvl
+#define _vel_vfcmpd_vvvvl __builtin_ve_vl_vfcmpd_vvvvl
+#define _vel_vfcmpd_vsvl __builtin_ve_vl_vfcmpd_vsvl
+#define _vel_vfcmpd_vsvvl __builtin_ve_vl_vfcmpd_vsvvl
+#define _vel_vfcmpd_vvvmvl __builtin_ve_vl_vfcmpd_vvvmvl
+#define _vel_vfcmpd_vsvmvl __builtin_ve_vl_vfcmpd_vsvmvl
+#define _vel_vfcmps_vvvl __builtin_ve_vl_vfcmps_vvvl
+#define _vel_vfcmps_vvvvl __builtin_ve_vl_vfcmps_vvvvl
+#define _vel_vfcmps_vsvl __builtin_ve_vl_vfcmps_vsvl
+#define _vel_vfcmps_vsvvl __builtin_ve_vl_vfcmps_vsvvl
+#define _vel_vfcmps_vvvmvl __builtin_ve_vl_vfcmps_vvvmvl
+#define _vel_vfcmps_vsvmvl __builtin_ve_vl_vfcmps_vsvmvl
+#define _vel_pvfcmp_vvvl __builtin_ve_vl_pvfcmp_vvvl
+#define _vel_pvfcmp_vvvvl __builtin_ve_vl_pvfcmp_vvvvl
+#define _vel_pvfcmp_vsvl __builtin_ve_vl_pvfcmp_vsvl
+#define _vel_pvfcmp_vsvvl __builtin_ve_vl_pvfcmp_vsvvl
+#define _vel_pvfcmp_vvvMvl __builtin_ve_vl_pvfcmp_vvvMvl
+#define _vel_pvfcmp_vsvMvl __builtin_ve_vl_pvfcmp_vsvMvl
+#define _vel_vfmaxd_vvvl __builtin_ve_vl_vfmaxd_vvvl
+#define _vel_vfmaxd_vvvvl __builtin_ve_vl_vfmaxd_vvvvl
+#define _vel_vfmaxd_vsvl __builtin_ve_vl_vfmaxd_vsvl
+#define _vel_vfmaxd_vsvvl __builtin_ve_vl_vfmaxd_vsvvl
+#define _vel_vfmaxd_vvvmvl __builtin_ve_vl_vfmaxd_vvvmvl
+#define _vel_vfmaxd_vsvmvl __builtin_ve_vl_vfmaxd_vsvmvl
+#define _vel_vfmaxs_vvvl __builtin_ve_vl_vfmaxs_vvvl
+#define _vel_vfmaxs_vvvvl __builtin_ve_vl_vfmaxs_vvvvl
+#define _vel_vfmaxs_vsvl __builtin_ve_vl_vfmaxs_vsvl
+#define _vel_vfmaxs_vsvvl __builtin_ve_vl_vfmaxs_vsvvl
+#define _vel_vfmaxs_vvvmvl __builtin_ve_vl_vfmaxs_vvvmvl
+#define _vel_vfmaxs_vsvmvl __builtin_ve_vl_vfmaxs_vsvmvl
+#define _vel_pvfmax_vvvl __builtin_ve_vl_pvfmax_vvvl
+#define _vel_pvfmax_vvvvl __builtin_ve_vl_pvfmax_vvvvl
+#define _vel_pvfmax_vsvl __builtin_ve_vl_pvfmax_vsvl
+#define _vel_pvfmax_vsvvl __builtin_ve_vl_pvfmax_vsvvl
+#define _vel_pvfmax_vvvMvl __builtin_ve_vl_pvfmax_vvvMvl
+#define _vel_pvfmax_vsvMvl __builtin_ve_vl_pvfmax_vsvMvl
+#define _vel_vfmind_vvvl __builtin_ve_vl_vfmind_vvvl
+#define _vel_vfmind_vvvvl __builtin_ve_vl_vfmind_vvvvl
+#define _vel_vfmind_vsvl __builtin_ve_vl_vfmind_vsvl
+#define _vel_vfmind_vsvvl __builtin_ve_vl_vfmind_vsvvl
+#define _vel_vfmind_vvvmvl __builtin_ve_vl_vfmind_vvvmvl
+#define _vel_vfmind_vsvmvl __builtin_ve_vl_vfmind_vsvmvl
+#define _vel_vfmins_vvvl __builtin_ve_vl_vfmins_vvvl
+#define _vel_vfmins_vvvvl __builtin_ve_vl_vfmins_vvvvl
+#define _vel_vfmins_vsvl __builtin_ve_vl_vfmins_vsvl
+#define _vel_vfmins_vsvvl __builtin_ve_vl_vfmins_vsvvl
+#define _vel_vfmins_vvvmvl __builtin_ve_vl_vfmins_vvvmvl
+#define _vel_vfmins_vsvmvl __builtin_ve_vl_vfmins_vsvmvl
+#define _vel_pvfmin_vvvl __builtin_ve_vl_pvfmin_vvvl
+#define _vel_pvfmin_vvvvl __builtin_ve_vl_pvfmin_vvvvl
+#define _vel_pvfmin_vsvl __builtin_ve_vl_pvfmin_vsvl
+#define _vel_pvfmin_vsvvl __builtin_ve_vl_pvfmin_vsvvl
+#define _vel_pvfmin_vvvMvl __builtin_ve_vl_pvfmin_vvvMvl
+#define _vel_pvfmin_vsvMvl __builtin_ve_vl_pvfmin_vsvMvl
+#define _vel_vfmadd_vvvvl __builtin_ve_vl_vfmadd_vvvvl
+#define _vel_vfmadd_vvvvvl __builtin_ve_vl_vfmadd_vvvvvl
+#define _vel_vfmadd_vsvvl __builtin_ve_vl_vfmadd_vsvvl
+#define _vel_vfmadd_vsvvvl __builtin_ve_vl_vfmadd_vsvvvl
+#define _vel_vfmadd_vvsvl __builtin_ve_vl_vfmadd_vvsvl
+#define _vel_vfmadd_vvsvvl __builtin_ve_vl_vfmadd_vvsvvl
+#define _vel_vfmadd_vvvvmvl __builtin_ve_vl_vfmadd_vvvvmvl
+#define _vel_vfmadd_vsvvmvl __builtin_ve_vl_vfmadd_vsvvmvl
+#define _vel_vfmadd_vvsvmvl __builtin_ve_vl_vfmadd_vvsvmvl
+#define _vel_vfmads_vvvvl __builtin_ve_vl_vfmads_vvvvl
+#define _vel_vfmads_vvvvvl __builtin_ve_vl_vfmads_vvvvvl
+#define _vel_vfmads_vsvvl __builtin_ve_vl_vfmads_vsvvl
+#define _vel_vfmads_vsvvvl __builtin_ve_vl_vfmads_vsvvvl
+#define _vel_vfmads_vvsvl __builtin_ve_vl_vfmads_vvsvl
+#define _vel_vfmads_vvsvvl __builtin_ve_vl_vfmads_vvsvvl
+#define _vel_vfmads_vvvvmvl __builtin_ve_vl_vfmads_vvvvmvl
+#define _vel_vfmads_vsvvmvl __builtin_ve_vl_vfmads_vsvvmvl
+#define _vel_vfmads_vvsvmvl __builtin_ve_vl_vfmads_vvsvmvl
+#define _vel_pvfmad_vvvvl __builtin_ve_vl_pvfmad_vvvvl
+#define _vel_pvfmad_vvvvvl __builtin_ve_vl_pvfmad_vvvvvl
+#define _vel_pvfmad_vsvvl __builtin_ve_vl_pvfmad_vsvvl
+#define _vel_pvfmad_vsvvvl __builtin_ve_vl_pvfmad_vsvvvl
+#define _vel_pvfmad_vvsvl __builtin_ve_vl_pvfmad_vvsvl
+#define _vel_pvfmad_vvsvvl __builtin_ve_vl_pvfmad_vvsvvl
+#define _vel_pvfmad_vvvvMvl __builtin_ve_vl_pvfmad_vvvvMvl
+#define _vel_pvfmad_vsvvMvl __builtin_ve_vl_pvfmad_vsvvMvl
+#define _vel_pvfmad_vvsvMvl __builtin_ve_vl_pvfmad_vvsvMvl
+#define _vel_vfmsbd_vvvvl __builtin_ve_vl_vfmsbd_vvvvl
+#define _vel_vfmsbd_vvvvvl __builtin_ve_vl_vfmsbd_vvvvvl
+#define _vel_vfmsbd_vsvvl __builtin_ve_vl_vfmsbd_vsvvl
+#define _vel_vfmsbd_vsvvvl __builtin_ve_vl_vfmsbd_vsvvvl
+#define _vel_vfmsbd_vvsvl __builtin_ve_vl_vfmsbd_vvsvl
+#define _vel_vfmsbd_vvsvvl __builtin_ve_vl_vfmsbd_vvsvvl
+#define _vel_vfmsbd_vvvvmvl __builtin_ve_vl_vfmsbd_vvvvmvl
+#define _vel_vfmsbd_vsvvmvl __builtin_ve_vl_vfmsbd_vsvvmvl
+#define _vel_vfmsbd_vvsvmvl __builtin_ve_vl_vfmsbd_vvsvmvl
+#define _vel_vfmsbs_vvvvl __builtin_ve_vl_vfmsbs_vvvvl
+#define _vel_vfmsbs_vvvvvl __builtin_ve_vl_vfmsbs_vvvvvl
+#define _vel_vfmsbs_vsvvl __builtin_ve_vl_vfmsbs_vsvvl
+#define _vel_vfmsbs_vsvvvl __builtin_ve_vl_vfmsbs_vsvvvl
+#define _vel_vfmsbs_vvsvl __builtin_ve_vl_vfmsbs_vvsvl
+#define _vel_vfmsbs_vvsvvl __builtin_ve_vl_vfmsbs_vvsvvl
+#define _vel_vfmsbs_vvvvmvl __builtin_ve_vl_vfmsbs_vvvvmvl
+#define _vel_vfmsbs_vsvvmvl __builtin_ve_vl_vfmsbs_vsvvmvl
+#define _vel_vfmsbs_vvsvmvl __builtin_ve_vl_vfmsbs_vvsvmvl
+#define _vel_pvfmsb_vvvvl __builtin_ve_vl_pvfmsb_vvvvl
+#define _vel_pvfmsb_vvvvvl __builtin_ve_vl_pvfmsb_vvvvvl
+#define _vel_pvfmsb_vsvvl __builtin_ve_vl_pvfmsb_vsvvl
+#define _vel_pvfmsb_vsvvvl __builtin_ve_vl_pvfmsb_vsvvvl
+#define _vel_pvfmsb_vvsvl __builtin_ve_vl_pvfmsb_vvsvl
+#define _vel_pvfmsb_vvsvvl __builtin_ve_vl_pvfmsb_vvsvvl
+#define _vel_pvfmsb_vvvvMvl __builtin_ve_vl_pvfmsb_vvvvMvl
+#define _vel_pvfmsb_vsvvMvl __builtin_ve_vl_pvfmsb_vsvvMvl
+#define _vel_pvfmsb_vvsvMvl __builtin_ve_vl_pvfmsb_vvsvMvl
+#define _vel_vfnmadd_vvvvl __builtin_ve_vl_vfnmadd_vvvvl
+#define _vel_vfnmadd_vvvvvl __builtin_ve_vl_vfnmadd_vvvvvl
+#define _vel_vfnmadd_vsvvl __builtin_ve_vl_vfnmadd_vsvvl
+#define _vel_vfnmadd_vsvvvl __builtin_ve_vl_vfnmadd_vsvvvl
+#define _vel_vfnmadd_vvsvl __builtin_ve_vl_vfnmadd_vvsvl
+#define _vel_vfnmadd_vvsvvl __builtin_ve_vl_vfnmadd_vvsvvl
+#define _vel_vfnmadd_vvvvmvl __builtin_ve_vl_vfnmadd_vvvvmvl
+#define _vel_vfnmadd_vsvvmvl __builtin_ve_vl_vfnmadd_vsvvmvl
+#define _vel_vfnmadd_vvsvmvl __builtin_ve_vl_vfnmadd_vvsvmvl
+#define _vel_vfnmads_vvvvl __builtin_ve_vl_vfnmads_vvvvl
+#define _vel_vfnmads_vvvvvl __builtin_ve_vl_vfnmads_vvvvvl
+#define _vel_vfnmads_vsvvl __builtin_ve_vl_vfnmads_vsvvl
+#define _vel_vfnmads_vsvvvl __builtin_ve_vl_vfnmads_vsvvvl
+#define _vel_vfnmads_vvsvl __builtin_ve_vl_vfnmads_vvsvl
+#define _vel_vfnmads_vvsvvl __builtin_ve_vl_vfnmads_vvsvvl
+#define _vel_vfnmads_vvvvmvl __builtin_ve_vl_vfnmads_vvvvmvl
+#define _vel_vfnmads_vsvvmvl __builtin_ve_vl_vfnmads_vsvvmvl
+#define _vel_vfnmads_vvsvmvl __builtin_ve_vl_vfnmads_vvsvmvl
+#define _vel_pvfnmad_vvvvl __builtin_ve_vl_pvfnmad_vvvvl
+#define _vel_pvfnmad_vvvvvl __builtin_ve_vl_pvfnmad_vvvvvl
+#define _vel_pvfnmad_vsvvl __builtin_ve_vl_pvfnmad_vsvvl
+#define _vel_pvfnmad_vsvvvl __builtin_ve_vl_pvfnmad_vsvvvl
+#define _vel_pvfnmad_vvsvl __builtin_ve_vl_pvfnmad_vvsvl
+#define _vel_pvfnmad_vvsvvl __builtin_ve_vl_pvfnmad_vvsvvl
+#define _vel_pvfnmad_vvvvMvl __builtin_ve_vl_pvfnmad_vvvvMvl
+#define _vel_pvfnmad_vsvvMvl __builtin_ve_vl_pvfnmad_vsvvMvl
+#define _vel_pvfnmad_vvsvMvl __builtin_ve_vl_pvfnmad_vvsvMvl
+#define _vel_vfnmsbd_vvvvl __builtin_ve_vl_vfnmsbd_vvvvl
+#define _vel_vfnmsbd_vvvvvl __builtin_ve_vl_vfnmsbd_vvvvvl
+#define _vel_vfnmsbd_vsvvl __builtin_ve_vl_vfnmsbd_vsvvl
+#define _vel_vfnmsbd_vsvvvl __builtin_ve_vl_vfnmsbd_vsvvvl
+#define _vel_vfnmsbd_vvsvl __builtin_ve_vl_vfnmsbd_vvsvl
+#define _vel_vfnmsbd_vvsvvl __builtin_ve_vl_vfnmsbd_vvsvvl
+#define _vel_vfnmsbd_vvvvmvl __builtin_ve_vl_vfnmsbd_vvvvmvl
+#define _vel_vfnmsbd_vsvvmvl __builtin_ve_vl_vfnmsbd_vsvvmvl
+#define _vel_vfnmsbd_vvsvmvl __builtin_ve_vl_vfnmsbd_vvsvmvl
+#define _vel_vfnmsbs_vvvvl __builtin_ve_vl_vfnmsbs_vvvvl
+#define _vel_vfnmsbs_vvvvvl __builtin_ve_vl_vfnmsbs_vvvvvl
+#define _vel_vfnmsbs_vsvvl __builtin_ve_vl_vfnmsbs_vsvvl
+#define _vel_vfnmsbs_vsvvvl __builtin_ve_vl_vfnmsbs_vsvvvl
+#define _vel_vfnmsbs_vvsvl __builtin_ve_vl_vfnmsbs_vvsvl
+#define _vel_vfnmsbs_vvsvvl __builtin_ve_vl_vfnmsbs_vvsvvl
+#define _vel_vfnmsbs_vvvvmvl __builtin_ve_vl_vfnmsbs_vvvvmvl
+#define _vel_vfnmsbs_vsvvmvl __builtin_ve_vl_vfnmsbs_vsvvmvl
+#define _vel_vfnmsbs_vvsvmvl __builtin_ve_vl_vfnmsbs_vvsvmvl
+#define _vel_pvfnmsb_vvvvl __builtin_ve_vl_pvfnmsb_vvvvl
+#define _vel_pvfnmsb_vvvvvl __builtin_ve_vl_pvfnmsb_vvvvvl
+#define _vel_pvfnmsb_vsvvl __builtin_ve_vl_pvfnmsb_vsvvl
+#define _vel_pvfnmsb_vsvvvl __builtin_ve_vl_pvfnmsb_vsvvvl
+#define _vel_pvfnmsb_vvsvl __builtin_ve_vl_pvfnmsb_vvsvl
+#define _vel_pvfnmsb_vvsvvl __builtin_ve_vl_pvfnmsb_vvsvvl
+#define _vel_pvfnmsb_vvvvMvl __builtin_ve_vl_pvfnmsb_vvvvMvl
+#define _vel_pvfnmsb_vsvvMvl __builtin_ve_vl_pvfnmsb_vsvvMvl
+#define _vel_pvfnmsb_vvsvMvl __builtin_ve_vl_pvfnmsb_vvsvMvl
+#define _vel_vrcpd_vvl __builtin_ve_vl_vrcpd_vvl
+#define _vel_vrcpd_vvvl __builtin_ve_vl_vrcpd_vvvl
+#define _vel_vrcps_vvl __builtin_ve_vl_vrcps_vvl
+#define _vel_vrcps_vvvl __builtin_ve_vl_vrcps_vvvl
+#define _vel_pvrcp_vvl __builtin_ve_vl_pvrcp_vvl
+#define _vel_pvrcp_vvvl __builtin_ve_vl_pvrcp_vvvl
+#define _vel_vrsqrtd_vvl __builtin_ve_vl_vrsqrtd_vvl
+#define _vel_vrsqrtd_vvvl __builtin_ve_vl_vrsqrtd_vvvl
+#define _vel_vrsqrts_vvl __builtin_ve_vl_vrsqrts_vvl
+#define _vel_vrsqrts_vvvl __builtin_ve_vl_vrsqrts_vvvl
+#define _vel_pvrsqrt_vvl __builtin_ve_vl_pvrsqrt_vvl
+#define _vel_pvrsqrt_vvvl __builtin_ve_vl_pvrsqrt_vvvl
+#define _vel_vrsqrtdnex_vvl __builtin_ve_vl_vrsqrtdnex_vvl
+#define _vel_vrsqrtdnex_vvvl __builtin_ve_vl_vrsqrtdnex_vvvl
+#define _vel_vrsqrtsnex_vvl __builtin_ve_vl_vrsqrtsnex_vvl
+#define _vel_vrsqrtsnex_vvvl __builtin_ve_vl_vrsqrtsnex_vvvl
+#define _vel_pvrsqrtnex_vvl __builtin_ve_vl_pvrsqrtnex_vvl
+#define _vel_pvrsqrtnex_vvvl __builtin_ve_vl_pvrsqrtnex_vvvl
+#define _vel_vcvtwdsx_vvl __builtin_ve_vl_vcvtwdsx_vvl
+#define _vel_vcvtwdsx_vvvl __builtin_ve_vl_vcvtwdsx_vvvl
+#define _vel_vcvtwdsx_vvmvl __builtin_ve_vl_vcvtwdsx_vvmvl
+#define _vel_vcvtwdsxrz_vvl __builtin_ve_vl_vcvtwdsxrz_vvl
+#define _vel_vcvtwdsxrz_vvvl __builtin_ve_vl_vcvtwdsxrz_vvvl
+#define _vel_vcvtwdsxrz_vvmvl __builtin_ve_vl_vcvtwdsxrz_vvmvl
+#define _vel_vcvtwdzx_vvl __builtin_ve_vl_vcvtwdzx_vvl
+#define _vel_vcvtwdzx_vvvl __builtin_ve_vl_vcvtwdzx_vvvl
+#define _vel_vcvtwdzx_vvmvl __builtin_ve_vl_vcvtwdzx_vvmvl
+#define _vel_vcvtwdzxrz_vvl __builtin_ve_vl_vcvtwdzxrz_vvl
+#define _vel_vcvtwdzxrz_vvvl __builtin_ve_vl_vcvtwdzxrz_vvvl
+#define _vel_vcvtwdzxrz_vvmvl __builtin_ve_vl_vcvtwdzxrz_vvmvl
+#define _vel_vcvtwssx_vvl __builtin_ve_vl_vcvtwssx_vvl
+#define _vel_vcvtwssx_vvvl __builtin_ve_vl_vcvtwssx_vvvl
+#define _vel_vcvtwssx_vvmvl __builtin_ve_vl_vcvtwssx_vvmvl
+#define _vel_vcvtwssxrz_vvl __builtin_ve_vl_vcvtwssxrz_vvl
+#define _vel_vcvtwssxrz_vvvl __builtin_ve_vl_vcvtwssxrz_vvvl
+#define _vel_vcvtwssxrz_vvmvl __builtin_ve_vl_vcvtwssxrz_vvmvl
+#define _vel_vcvtwszx_vvl __builtin_ve_vl_vcvtwszx_vvl
+#define _vel_vcvtwszx_vvvl __builtin_ve_vl_vcvtwszx_vvvl
+#define _vel_vcvtwszx_vvmvl __builtin_ve_vl_vcvtwszx_vvmvl
+#define _vel_vcvtwszxrz_vvl __builtin_ve_vl_vcvtwszxrz_vvl
+#define _vel_vcvtwszxrz_vvvl __builtin_ve_vl_vcvtwszxrz_vvvl
+#define _vel_vcvtwszxrz_vvmvl __builtin_ve_vl_vcvtwszxrz_vvmvl
+#define _vel_pvcvtws_vvl __builtin_ve_vl_pvcvtws_vvl
+#define _vel_pvcvtws_vvvl __builtin_ve_vl_pvcvtws_vvvl
+#define _vel_pvcvtws_vvMvl __builtin_ve_vl_pvcvtws_vvMvl
+#define _vel_pvcvtwsrz_vvl __builtin_ve_vl_pvcvtwsrz_vvl
+#define _vel_pvcvtwsrz_vvvl __builtin_ve_vl_pvcvtwsrz_vvvl
+#define _vel_pvcvtwsrz_vvMvl __builtin_ve_vl_pvcvtwsrz_vvMvl
+#define _vel_vcvtld_vvl __builtin_ve_vl_vcvtld_vvl
+#define _vel_vcvtld_vvvl __builtin_ve_vl_vcvtld_vvvl
+#define _vel_vcvtld_vvmvl __builtin_ve_vl_vcvtld_vvmvl
+#define _vel_vcvtldrz_vvl __builtin_ve_vl_vcvtldrz_vvl
+#define _vel_vcvtldrz_vvvl __builtin_ve_vl_vcvtldrz_vvvl
+#define _vel_vcvtldrz_vvmvl __builtin_ve_vl_vcvtldrz_vvmvl
+#define _vel_vcvtdw_vvl __builtin_ve_vl_vcvtdw_vvl
+#define _vel_vcvtdw_vvvl __builtin_ve_vl_vcvtdw_vvvl
+#define _vel_vcvtsw_vvl __builtin_ve_vl_vcvtsw_vvl
+#define _vel_vcvtsw_vvvl __builtin_ve_vl_vcvtsw_vvvl
+#define _vel_pvcvtsw_vvl __builtin_ve_vl_pvcvtsw_vvl
+#define _vel_pvcvtsw_vvvl __builtin_ve_vl_pvcvtsw_vvvl
+#define _vel_vcvtdl_vvl __builtin_ve_vl_vcvtdl_vvl
+#define _vel_vcvtdl_vvvl __builtin_ve_vl_vcvtdl_vvvl
+#define _vel_vcvtds_vvl __builtin_ve_vl_vcvtds_vvl
+#define _vel_vcvtds_vvvl __builtin_ve_vl_vcvtds_vvvl
+#define _vel_vcvtsd_vvl __builtin_ve_vl_vcvtsd_vvl
+#define _vel_vcvtsd_vvvl __builtin_ve_vl_vcvtsd_vvvl
+#define _vel_vmrg_vvvml __builtin_ve_vl_vmrg_vvvml
+#define _vel_vmrg_vvvmvl __builtin_ve_vl_vmrg_vvvmvl
+#define _vel_vmrg_vsvml __builtin_ve_vl_vmrg_vsvml
+#define _vel_vmrg_vsvmvl __builtin_ve_vl_vmrg_vsvmvl
+#define _vel_vmrgw_vvvMl __builtin_ve_vl_vmrgw_vvvMl
+#define _vel_vmrgw_vvvMvl __builtin_ve_vl_vmrgw_vvvMvl
+#define _vel_vmrgw_vsvMl __builtin_ve_vl_vmrgw_vsvMl
+#define _vel_vmrgw_vsvMvl __builtin_ve_vl_vmrgw_vsvMvl
+#define _vel_vshf_vvvsl __builtin_ve_vl_vshf_vvvsl
+#define _vel_vshf_vvvsvl __builtin_ve_vl_vshf_vvvsvl
+#define _vel_vcp_vvmvl __builtin_ve_vl_vcp_vvmvl
+#define _vel_vex_vvmvl __builtin_ve_vl_vex_vvmvl
+#define _vel_vfmklat_ml __builtin_ve_vl_vfmklat_ml
+#define _vel_vfmklaf_ml __builtin_ve_vl_vfmklaf_ml
+#define _vel_pvfmkat_Ml __builtin_ve_vl_pvfmkat_Ml
+#define _vel_pvfmkaf_Ml __builtin_ve_vl_pvfmkaf_Ml
+#define _vel_vfmklgt_mvl __builtin_ve_vl_vfmklgt_mvl
+#define _vel_vfmklgt_mvml __builtin_ve_vl_vfmklgt_mvml
+#define _vel_vfmkllt_mvl __builtin_ve_vl_vfmkllt_mvl
+#define _vel_vfmkllt_mvml __builtin_ve_vl_vfmkllt_mvml
+#define _vel_vfmklne_mvl __builtin_ve_vl_vfmklne_mvl
+#define _vel_vfmklne_mvml __builtin_ve_vl_vfmklne_mvml
+#define _vel_vfmkleq_mvl __builtin_ve_vl_vfmkleq_mvl
+#define _vel_vfmkleq_mvml __builtin_ve_vl_vfmkleq_mvml
+#define _vel_vfmklge_mvl __builtin_ve_vl_vfmklge_mvl
+#define _vel_vfmklge_mvml __builtin_ve_vl_vfmklge_mvml
+#define _vel_vfmklle_mvl __builtin_ve_vl_vfmklle_mvl
+#define _vel_vfmklle_mvml __builtin_ve_vl_vfmklle_mvml
+#define _vel_vfmklnum_mvl __builtin_ve_vl_vfmklnum_mvl
+#define _vel_vfmklnum_mvml __builtin_ve_vl_vfmklnum_mvml
+#define _vel_vfmklnan_mvl __builtin_ve_vl_vfmklnan_mvl
+#define _vel_vfmklnan_mvml __builtin_ve_vl_vfmklnan_mvml
+#define _vel_vfmklgtnan_mvl __builtin_ve_vl_vfmklgtnan_mvl
+#define _vel_vfmklgtnan_mvml __builtin_ve_vl_vfmklgtnan_mvml
+#define _vel_vfmklltnan_mvl __builtin_ve_vl_vfmklltnan_mvl
+#define _vel_vfmklltnan_mvml __builtin_ve_vl_vfmklltnan_mvml
+#define _vel_vfmklnenan_mvl __builtin_ve_vl_vfmklnenan_mvl
+#define _vel_vfmklnenan_mvml __builtin_ve_vl_vfmklnenan_mvml
+#define _vel_vfmkleqnan_mvl __builtin_ve_vl_vfmkleqnan_mvl
+#define _vel_vfmkleqnan_mvml __builtin_ve_vl_vfmkleqnan_mvml
+#define _vel_vfmklgenan_mvl __builtin_ve_vl_vfmklgenan_mvl
+#define _vel_vfmklgenan_mvml __builtin_ve_vl_vfmklgenan_mvml
+#define _vel_vfmkllenan_mvl __builtin_ve_vl_vfmkllenan_mvl
+#define _vel_vfmkllenan_mvml __builtin_ve_vl_vfmkllenan_mvml
+#define _vel_vfmkwgt_mvl __builtin_ve_vl_vfmkwgt_mvl
+#define _vel_vfmkwgt_mvml __builtin_ve_vl_vfmkwgt_mvml
+#define _vel_vfmkwlt_mvl __builtin_ve_vl_vfmkwlt_mvl
+#define _vel_vfmkwlt_mvml __builtin_ve_vl_vfmkwlt_mvml
+#define _vel_vfmkwne_mvl __builtin_ve_vl_vfmkwne_mvl
+#define _vel_vfmkwne_mvml __builtin_ve_vl_vfmkwne_mvml
+#define _vel_vfmkweq_mvl __builtin_ve_vl_vfmkweq_mvl
+#define _vel_vfmkweq_mvml __builtin_ve_vl_vfmkweq_mvml
+#define _vel_vfmkwge_mvl __builtin_ve_vl_vfmkwge_mvl
+#define _vel_vfmkwge_mvml __builtin_ve_vl_vfmkwge_mvml
+#define _vel_vfmkwle_mvl __builtin_ve_vl_vfmkwle_mvl
+#define _vel_vfmkwle_mvml __builtin_ve_vl_vfmkwle_mvml
+#define _vel_vfmkwnum_mvl __builtin_ve_vl_vfmkwnum_mvl
+#define _vel_vfmkwnum_mvml __builtin_ve_vl_vfmkwnum_mvml
+#define _vel_vfmkwnan_mvl __builtin_ve_vl_vfmkwnan_mvl
+#define _vel_vfmkwnan_mvml __builtin_ve_vl_vfmkwnan_mvml
+#define _vel_vfmkwgtnan_mvl __builtin_ve_vl_vfmkwgtnan_mvl
+#define _vel_vfmkwgtnan_mvml __builtin_ve_vl_vfmkwgtnan_mvml
+#define _vel_vfmkwltnan_mvl __builtin_ve_vl_vfmkwltnan_mvl
+#define _vel_vfmkwltnan_mvml __builtin_ve_vl_vfmkwltnan_mvml
+#define _vel_vfmkwnenan_mvl __builtin_ve_vl_vfmkwnenan_mvl
+#define _vel_vfmkwnenan_mvml __builtin_ve_vl_vfmkwnenan_mvml
+#define _vel_vfmkweqnan_mvl __builtin_ve_vl_vfmkweqnan_mvl
+#define _vel_vfmkweqnan_mvml __builtin_ve_vl_vfmkweqnan_mvml
+#define _vel_vfmkwgenan_mvl __builtin_ve_vl_vfmkwgenan_mvl
+#define _vel_vfmkwgenan_mvml __builtin_ve_vl_vfmkwgenan_mvml
+#define _vel_vfmkwlenan_mvl __builtin_ve_vl_vfmkwlenan_mvl
+#define _vel_vfmkwlenan_mvml __builtin_ve_vl_vfmkwlenan_mvml
+#define _vel_pvfmkwlogt_mvl __builtin_ve_vl_pvfmkwlogt_mvl
+#define _vel_pvfmkwupgt_mvl __builtin_ve_vl_pvfmkwupgt_mvl
+#define _vel_pvfmkwlogt_mvml __builtin_ve_vl_pvfmkwlogt_mvml
+#define _vel_pvfmkwupgt_mvml __builtin_ve_vl_pvfmkwupgt_mvml
+#define _vel_pvfmkwlolt_mvl __builtin_ve_vl_pvfmkwlolt_mvl
+#define _vel_pvfmkwuplt_mvl __builtin_ve_vl_pvfmkwuplt_mvl
+#define _vel_pvfmkwlolt_mvml __builtin_ve_vl_pvfmkwlolt_mvml
+#define _vel_pvfmkwuplt_mvml __builtin_ve_vl_pvfmkwuplt_mvml
+#define _vel_pvfmkwlone_mvl __builtin_ve_vl_pvfmkwlone_mvl
+#define _vel_pvfmkwupne_mvl __builtin_ve_vl_pvfmkwupne_mvl
+#define _vel_pvfmkwlone_mvml __builtin_ve_vl_pvfmkwlone_mvml
+#define _vel_pvfmkwupne_mvml __builtin_ve_vl_pvfmkwupne_mvml
+#define _vel_pvfmkwloeq_mvl __builtin_ve_vl_pvfmkwloeq_mvl
+#define _vel_pvfmkwupeq_mvl __builtin_ve_vl_pvfmkwupeq_mvl
+#define _vel_pvfmkwloeq_mvml __builtin_ve_vl_pvfmkwloeq_mvml
+#define _vel_pvfmkwupeq_mvml __builtin_ve_vl_pvfmkwupeq_mvml
+#define _vel_pvfmkwloge_mvl __builtin_ve_vl_pvfmkwloge_mvl
+#define _vel_pvfmkwupge_mvl __builtin_ve_vl_pvfmkwupge_mvl
+#define _vel_pvfmkwloge_mvml __builtin_ve_vl_pvfmkwloge_mvml
+#define _vel_pvfmkwupge_mvml __builtin_ve_vl_pvfmkwupge_mvml
+#define _vel_pvfmkwlole_mvl __builtin_ve_vl_pvfmkwlole_mvl
+#define _vel_pvfmkwuple_mvl __builtin_ve_vl_pvfmkwuple_mvl
+#define _vel_pvfmkwlole_mvml __builtin_ve_vl_pvfmkwlole_mvml
+#define _vel_pvfmkwuple_mvml __builtin_ve_vl_pvfmkwuple_mvml
+#define _vel_pvfmkwlonum_mvl __builtin_ve_vl_pvfmkwlonum_mvl
+#define _vel_pvfmkwupnum_mvl __builtin_ve_vl_pvfmkwupnum_mvl
+#define _vel_pvfmkwlonum_mvml __builtin_ve_vl_pvfmkwlonum_mvml
+#define _vel_pvfmkwupnum_mvml __builtin_ve_vl_pvfmkwupnum_mvml
+#define _vel_pvfmkwlonan_mvl __builtin_ve_vl_pvfmkwlonan_mvl
+#define _vel_pvfmkwupnan_mvl __builtin_ve_vl_pvfmkwupnan_mvl
+#define _vel_pvfmkwlonan_mvml __builtin_ve_vl_pvfmkwlonan_mvml
+#define _vel_pvfmkwupnan_mvml __builtin_ve_vl_pvfmkwupnan_mvml
+#define _vel_pvfmkwlogtnan_mvl __builtin_ve_vl_pvfmkwlogtnan_mvl
+#define _vel_pvfmkwupgtnan_mvl __builtin_ve_vl_pvfmkwupgtnan_mvl
+#define _vel_pvfmkwlogtnan_mvml __builtin_ve_vl_pvfmkwlogtnan_mvml
+#define _vel_pvfmkwupgtnan_mvml __builtin_ve_vl_pvfmkwupgtnan_mvml
+#define _vel_pvfmkwloltnan_mvl __builtin_ve_vl_pvfmkwloltnan_mvl
+#define _vel_pvfmkwupltnan_mvl __builtin_ve_vl_pvfmkwupltnan_mvl
+#define _vel_pvfmkwloltnan_mvml __builtin_ve_vl_pvfmkwloltnan_mvml
+#define _vel_pvfmkwupltnan_mvml __builtin_ve_vl_pvfmkwupltnan_mvml
+#define _vel_pvfmkwlonenan_mvl __builtin_ve_vl_pvfmkwlonenan_mvl
+#define _vel_pvfmkwupnenan_mvl __builtin_ve_vl_pvfmkwupnenan_mvl
+#define _vel_pvfmkwlonenan_mvml __builtin_ve_vl_pvfmkwlonenan_mvml
+#define _vel_pvfmkwupnenan_mvml __builtin_ve_vl_pvfmkwupnenan_mvml
+#define _vel_pvfmkwloeqnan_mvl __builtin_ve_vl_pvfmkwloeqnan_mvl
+#define _vel_pvfmkwupeqnan_mvl __builtin_ve_vl_pvfmkwupeqnan_mvl
+#define _vel_pvfmkwloeqnan_mvml __builtin_ve_vl_pvfmkwloeqnan_mvml
+#define _vel_pvfmkwupeqnan_mvml __builtin_ve_vl_pvfmkwupeqnan_mvml
+#define _vel_pvfmkwlogenan_mvl __builtin_ve_vl_pvfmkwlogenan_mvl
+#define _vel_pvfmkwupgenan_mvl __builtin_ve_vl_pvfmkwupgenan_mvl
+#define _vel_pvfmkwlogenan_mvml __builtin_ve_vl_pvfmkwlogenan_mvml
+#define _vel_pvfmkwupgenan_mvml __builtin_ve_vl_pvfmkwupgenan_mvml
+#define _vel_pvfmkwlolenan_mvl __builtin_ve_vl_pvfmkwlolenan_mvl
+#define _vel_pvfmkwuplenan_mvl __builtin_ve_vl_pvfmkwuplenan_mvl
+#define _vel_pvfmkwlolenan_mvml __builtin_ve_vl_pvfmkwlolenan_mvml
+#define _vel_pvfmkwuplenan_mvml __builtin_ve_vl_pvfmkwuplenan_mvml
+#define _vel_pvfmkwgt_Mvl __builtin_ve_vl_pvfmkwgt_Mvl
+#define _vel_pvfmkwgt_MvMl __builtin_ve_vl_pvfmkwgt_MvMl
+#define _vel_pvfmkwlt_Mvl __builtin_ve_vl_pvfmkwlt_Mvl
+#define _vel_pvfmkwlt_MvMl __builtin_ve_vl_pvfmkwlt_MvMl
+#define _vel_pvfmkwne_Mvl __builtin_ve_vl_pvfmkwne_Mvl
+#define _vel_pvfmkwne_MvMl __builtin_ve_vl_pvfmkwne_MvMl
+#define _vel_pvfmkweq_Mvl __builtin_ve_vl_pvfmkweq_Mvl
+#define _vel_pvfmkweq_MvMl __builtin_ve_vl_pvfmkweq_MvMl
+#define _vel_pvfmkwge_Mvl __builtin_ve_vl_pvfmkwge_Mvl
+#define _vel_pvfmkwge_MvMl __builtin_ve_vl_pvfmkwge_MvMl
+#define _vel_pvfmkwle_Mvl __builtin_ve_vl_pvfmkwle_Mvl
+#define _vel_pvfmkwle_MvMl __builtin_ve_vl_pvfmkwle_MvMl
+#define _vel_pvfmkwnum_Mvl __builtin_ve_vl_pvfmkwnum_Mvl
+#define _vel_pvfmkwnum_MvMl __builtin_ve_vl_pvfmkwnum_MvMl
+#define _vel_pvfmkwnan_Mvl __builtin_ve_vl_pvfmkwnan_Mvl
+#define _vel_pvfmkwnan_MvMl __builtin_ve_vl_pvfmkwnan_MvMl
+#define _vel_pvfmkwgtnan_Mvl __builtin_ve_vl_pvfmkwgtnan_Mvl
+#define _vel_pvfmkwgtnan_MvMl __builtin_ve_vl_pvfmkwgtnan_MvMl
+#define _vel_pvfmkwltnan_Mvl __builtin_ve_vl_pvfmkwltnan_Mvl
+#define _vel_pvfmkwltnan_MvMl __builtin_ve_vl_pvfmkwltnan_MvMl
+#define _vel_pvfmkwnenan_Mvl __builtin_ve_vl_pvfmkwnenan_Mvl
+#define _vel_pvfmkwnenan_MvMl __builtin_ve_vl_pvfmkwnenan_MvMl
+#define _vel_pvfmkweqnan_Mvl __builtin_ve_vl_pvfmkweqnan_Mvl
+#define _vel_pvfmkweqnan_MvMl __builtin_ve_vl_pvfmkweqnan_MvMl
+#define _vel_pvfmkwgenan_Mvl __builtin_ve_vl_pvfmkwgenan_Mvl
+#define _vel_pvfmkwgenan_MvMl __builtin_ve_vl_pvfmkwgenan_MvMl
+#define _vel_pvfmkwlenan_Mvl __builtin_ve_vl_pvfmkwlenan_Mvl
+#define _vel_pvfmkwlenan_MvMl __builtin_ve_vl_pvfmkwlenan_MvMl
+#define _vel_vfmkdgt_mvl __builtin_ve_vl_vfmkdgt_mvl
+#define _vel_vfmkdgt_mvml __builtin_ve_vl_vfmkdgt_mvml
+#define _vel_vfmkdlt_mvl __builtin_ve_vl_vfmkdlt_mvl
+#define _vel_vfmkdlt_mvml __builtin_ve_vl_vfmkdlt_mvml
+#define _vel_vfmkdne_mvl __builtin_ve_vl_vfmkdne_mvl
+#define _vel_vfmkdne_mvml __builtin_ve_vl_vfmkdne_mvml
+#define _vel_vfmkdeq_mvl __builtin_ve_vl_vfmkdeq_mvl
+#define _vel_vfmkdeq_mvml __builtin_ve_vl_vfmkdeq_mvml
+#define _vel_vfmkdge_mvl __builtin_ve_vl_vfmkdge_mvl
+#define _vel_vfmkdge_mvml __builtin_ve_vl_vfmkdge_mvml
+#define _vel_vfmkdle_mvl __builtin_ve_vl_vfmkdle_mvl
+#define _vel_vfmkdle_mvml __builtin_ve_vl_vfmkdle_mvml
+#define _vel_vfmkdnum_mvl __builtin_ve_vl_vfmkdnum_mvl
+#define _vel_vfmkdnum_mvml __builtin_ve_vl_vfmkdnum_mvml
+#define _vel_vfmkdnan_mvl __builtin_ve_vl_vfmkdnan_mvl
+#define _vel_vfmkdnan_mvml __builtin_ve_vl_vfmkdnan_mvml
+#define _vel_vfmkdgtnan_mvl __builtin_ve_vl_vfmkdgtnan_mvl
+#define _vel_vfmkdgtnan_mvml __builtin_ve_vl_vfmkdgtnan_mvml
+#define _vel_vfmkdltnan_mvl __builtin_ve_vl_vfmkdltnan_mvl
+#define _vel_vfmkdltnan_mvml __builtin_ve_vl_vfmkdltnan_mvml
+#define _vel_vfmkdnenan_mvl __builtin_ve_vl_vfmkdnenan_mvl
+#define _vel_vfmkdnenan_mvml __builtin_ve_vl_vfmkdnenan_mvml
+#define _vel_vfmkdeqnan_mvl __builtin_ve_vl_vfmkdeqnan_mvl
+#define _vel_vfmkdeqnan_mvml __builtin_ve_vl_vfmkdeqnan_mvml
+#define _vel_vfmkdgenan_mvl __builtin_ve_vl_vfmkdgenan_mvl
+#define _vel_vfmkdgenan_mvml __builtin_ve_vl_vfmkdgenan_mvml
+#define _vel_vfmkdlenan_mvl __builtin_ve_vl_vfmkdlenan_mvl
+#define _vel_vfmkdlenan_mvml __builtin_ve_vl_vfmkdlenan_mvml
+#define _vel_vfmksgt_mvl __builtin_ve_vl_vfmksgt_mvl
+#define _vel_vfmksgt_mvml __builtin_ve_vl_vfmksgt_mvml
+#define _vel_vfmkslt_mvl __builtin_ve_vl_vfmkslt_mvl
+#define _vel_vfmkslt_mvml __builtin_ve_vl_vfmkslt_mvml
+#define _vel_vfmksne_mvl __builtin_ve_vl_vfmksne_mvl
+#define _vel_vfmksne_mvml __builtin_ve_vl_vfmksne_mvml
+#define _vel_vfmkseq_mvl __builtin_ve_vl_vfmkseq_mvl
+#define _vel_vfmkseq_mvml __builtin_ve_vl_vfmkseq_mvml
+#define _vel_vfmksge_mvl __builtin_ve_vl_vfmksge_mvl
+#define _vel_vfmksge_mvml __builtin_ve_vl_vfmksge_mvml
+#define _vel_vfmksle_mvl __builtin_ve_vl_vfmksle_mvl
+#define _vel_vfmksle_mvml __builtin_ve_vl_vfmksle_mvml
+#define _vel_vfmksnum_mvl __builtin_ve_vl_vfmksnum_mvl
+#define _vel_vfmksnum_mvml __builtin_ve_vl_vfmksnum_mvml
+#define _vel_vfmksnan_mvl __builtin_ve_vl_vfmksnan_mvl
+#define _vel_vfmksnan_mvml __builtin_ve_vl_vfmksnan_mvml
+#define _vel_vfmksgtnan_mvl __builtin_ve_vl_vfmksgtnan_mvl
+#define _vel_vfmksgtnan_mvml __builtin_ve_vl_vfmksgtnan_mvml
+#define _vel_vfmksltnan_mvl __builtin_ve_vl_vfmksltnan_mvl
+#define _vel_vfmksltnan_mvml __builtin_ve_vl_vfmksltnan_mvml
+#define _vel_vfmksnenan_mvl __builtin_ve_vl_vfmksnenan_mvl
+#define _vel_vfmksnenan_mvml __builtin_ve_vl_vfmksnenan_mvml
+#define _vel_vfmkseqnan_mvl __builtin_ve_vl_vfmkseqnan_mvl
+#define _vel_vfmkseqnan_mvml __builtin_ve_vl_vfmkseqnan_mvml
+#define _vel_vfmksgenan_mvl __builtin_ve_vl_vfmksgenan_mvl
+#define _vel_vfmksgenan_mvml __builtin_ve_vl_vfmksgenan_mvml
+#define _vel_vfmkslenan_mvl __builtin_ve_vl_vfmkslenan_mvl
+#define _vel_vfmkslenan_mvml __builtin_ve_vl_vfmkslenan_mvml
+#define _vel_pvfmkslogt_mvl __builtin_ve_vl_pvfmkslogt_mvl
+#define _vel_pvfmksupgt_mvl __builtin_ve_vl_pvfmksupgt_mvl
+#define _vel_pvfmkslogt_mvml __builtin_ve_vl_pvfmkslogt_mvml
+#define _vel_pvfmksupgt_mvml __builtin_ve_vl_pvfmksupgt_mvml
+#define _vel_pvfmkslolt_mvl __builtin_ve_vl_pvfmkslolt_mvl
+#define _vel_pvfmksuplt_mvl __builtin_ve_vl_pvfmksuplt_mvl
+#define _vel_pvfmkslolt_mvml __builtin_ve_vl_pvfmkslolt_mvml
+#define _vel_pvfmksuplt_mvml __builtin_ve_vl_pvfmksuplt_mvml
+#define _vel_pvfmkslone_mvl __builtin_ve_vl_pvfmkslone_mvl
+#define _vel_pvfmksupne_mvl __builtin_ve_vl_pvfmksupne_mvl
+#define _vel_pvfmkslone_mvml __builtin_ve_vl_pvfmkslone_mvml
+#define _vel_pvfmksupne_mvml __builtin_ve_vl_pvfmksupne_mvml
+#define _vel_pvfmksloeq_mvl __builtin_ve_vl_pvfmksloeq_mvl
+#define _vel_pvfmksupeq_mvl __builtin_ve_vl_pvfmksupeq_mvl
+#define _vel_pvfmksloeq_mvml __builtin_ve_vl_pvfmksloeq_mvml
+#define _vel_pvfmksupeq_mvml __builtin_ve_vl_pvfmksupeq_mvml
+#define _vel_pvfmksloge_mvl __builtin_ve_vl_pvfmksloge_mvl
+#define _vel_pvfmksupge_mvl __builtin_ve_vl_pvfmksupge_mvl
+#define _vel_pvfmksloge_mvml __builtin_ve_vl_pvfmksloge_mvml
+#define _vel_pvfmksupge_mvml __builtin_ve_vl_pvfmksupge_mvml
+#define _vel_pvfmkslole_mvl __builtin_ve_vl_pvfmkslole_mvl
+#define _vel_pvfmksuple_mvl __builtin_ve_vl_pvfmksuple_mvl
+#define _vel_pvfmkslole_mvml __builtin_ve_vl_pvfmkslole_mvml
+#define _vel_pvfmksuple_mvml __builtin_ve_vl_pvfmksuple_mvml
+#define _vel_pvfmkslonum_mvl __builtin_ve_vl_pvfmkslonum_mvl
+#define _vel_pvfmksupnum_mvl __builtin_ve_vl_pvfmksupnum_mvl
+#define _vel_pvfmkslonum_mvml __builtin_ve_vl_pvfmkslonum_mvml
+#define _vel_pvfmksupnum_mvml __builtin_ve_vl_pvfmksupnum_mvml
+#define _vel_pvfmkslonan_mvl __builtin_ve_vl_pvfmkslonan_mvl
+#define _vel_pvfmksupnan_mvl __builtin_ve_vl_pvfmksupnan_mvl
+#define _vel_pvfmkslonan_mvml __builtin_ve_vl_pvfmkslonan_mvml
+#define _vel_pvfmksupnan_mvml __builtin_ve_vl_pvfmksupnan_mvml
+#define _vel_pvfmkslogtnan_mvl __builtin_ve_vl_pvfmkslogtnan_mvl
+#define _vel_pvfmksupgtnan_mvl __builtin_ve_vl_pvfmksupgtnan_mvl
+#define _vel_pvfmkslogtnan_mvml __builtin_ve_vl_pvfmkslogtnan_mvml
+#define _vel_pvfmksupgtnan_mvml __builtin_ve_vl_pvfmksupgtnan_mvml
+#define _vel_pvfmksloltnan_mvl __builtin_ve_vl_pvfmksloltnan_mvl
+#define _vel_pvfmksupltnan_mvl __builtin_ve_vl_pvfmksupltnan_mvl
+#define _vel_pvfmksloltnan_mvml __builtin_ve_vl_pvfmksloltnan_mvml
+#define _vel_pvfmksupltnan_mvml __builtin_ve_vl_pvfmksupltnan_mvml
+#define _vel_pvfmkslonenan_mvl __builtin_ve_vl_pvfmkslonenan_mvl
+#define _vel_pvfmksupnenan_mvl __builtin_ve_vl_pvfmksupnenan_mvl
+#define _vel_pvfmkslonenan_mvml __builtin_ve_vl_pvfmkslonenan_mvml
+#define _vel_pvfmksupnenan_mvml __builtin_ve_vl_pvfmksupnenan_mvml
+#define _vel_pvfmksloeqnan_mvl __builtin_ve_vl_pvfmksloeqnan_mvl
+#define _vel_pvfmksupeqnan_mvl __builtin_ve_vl_pvfmksupeqnan_mvl
+#define _vel_pvfmksloeqnan_mvml __builtin_ve_vl_pvfmksloeqnan_mvml
+#define _vel_pvfmksupeqnan_mvml __builtin_ve_vl_pvfmksupeqnan_mvml
+#define _vel_pvfmkslogenan_mvl __builtin_ve_vl_pvfmkslogenan_mvl
+#define _vel_pvfmksupgenan_mvl __builtin_ve_vl_pvfmksupgenan_mvl
+#define _vel_pvfmkslogenan_mvml __builtin_ve_vl_pvfmkslogenan_mvml
+#define _vel_pvfmksupgenan_mvml __builtin_ve_vl_pvfmksupgenan_mvml
+#define _vel_pvfmkslolenan_mvl __builtin_ve_vl_pvfmkslolenan_mvl
+#define _vel_pvfmksuplenan_mvl __builtin_ve_vl_pvfmksuplenan_mvl
+#define _vel_pvfmkslolenan_mvml __builtin_ve_vl_pvfmkslolenan_mvml
+#define _vel_pvfmksuplenan_mvml __builtin_ve_vl_pvfmksuplenan_mvml
+#define _vel_pvfmksgt_Mvl __builtin_ve_vl_pvfmksgt_Mvl
+#define _vel_pvfmksgt_MvMl __builtin_ve_vl_pvfmksgt_MvMl
+#define _vel_pvfmkslt_Mvl __builtin_ve_vl_pvfmkslt_Mvl
+#define _vel_pvfmkslt_MvMl __builtin_ve_vl_pvfmkslt_MvMl
+#define _vel_pvfmksne_Mvl __builtin_ve_vl_pvfmksne_Mvl
+#define _vel_pvfmksne_MvMl __builtin_ve_vl_pvfmksne_MvMl
+#define _vel_pvfmkseq_Mvl __builtin_ve_vl_pvfmkseq_Mvl
+#define _vel_pvfmkseq_MvMl __builtin_ve_vl_pvfmkseq_MvMl
+#define _vel_pvfmksge_Mvl __builtin_ve_vl_pvfmksge_Mvl
+#define _vel_pvfmksge_MvMl __builtin_ve_vl_pvfmksge_MvMl
+#define _vel_pvfmksle_Mvl __builtin_ve_vl_pvfmksle_Mvl
+#define _vel_pvfmksle_MvMl __builtin_ve_vl_pvfmksle_MvMl
+#define _vel_pvfmksnum_Mvl __builtin_ve_vl_pvfmksnum_Mvl
+#define _vel_pvfmksnum_MvMl __builtin_ve_vl_pvfmksnum_MvMl
+#define _vel_pvfmksnan_Mvl __builtin_ve_vl_pvfmksnan_Mvl
+#define _vel_pvfmksnan_MvMl __builtin_ve_vl_pvfmksnan_MvMl
+#define _vel_pvfmksgtnan_Mvl __builtin_ve_vl_pvfmksgtnan_Mvl
+#define _vel_pvfmksgtnan_MvMl __builtin_ve_vl_pvfmksgtnan_MvMl
+#define _vel_pvfmksltnan_Mvl __builtin_ve_vl_pvfmksltnan_Mvl
+#define _vel_pvfmksltnan_MvMl __builtin_ve_vl_pvfmksltnan_MvMl
+#define _vel_pvfmksnenan_Mvl __builtin_ve_vl_pvfmksnenan_Mvl
+#define _vel_pvfmksnenan_MvMl __builtin_ve_vl_pvfmksnenan_MvMl
+#define _vel_pvfmkseqnan_Mvl __builtin_ve_vl_pvfmkseqnan_Mvl
+#define _vel_pvfmkseqnan_MvMl __builtin_ve_vl_pvfmkseqnan_MvMl
+#define _vel_pvfmksgenan_Mvl __builtin_ve_vl_pvfmksgenan_Mvl
+#define _vel_pvfmksgenan_MvMl __builtin_ve_vl_pvfmksgenan_MvMl
+#define _vel_pvfmkslenan_Mvl __builtin_ve_vl_pvfmkslenan_Mvl
+#define _vel_pvfmkslenan_MvMl __builtin_ve_vl_pvfmkslenan_MvMl
+#define _vel_vsumwsx_vvl __builtin_ve_vl_vsumwsx_vvl
+#define _vel_vsumwsx_vvml __builtin_ve_vl_vsumwsx_vvml
+#define _vel_vsumwzx_vvl __builtin_ve_vl_vsumwzx_vvl
+#define _vel_vsumwzx_vvml __builtin_ve_vl_vsumwzx_vvml
+#define _vel_vsuml_vvl __builtin_ve_vl_vsuml_vvl
+#define _vel_vsuml_vvml __builtin_ve_vl_vsuml_vvml
+#define _vel_vfsumd_vvl __builtin_ve_vl_vfsumd_vvl
+#define _vel_vfsumd_vvml __builtin_ve_vl_vfsumd_vvml
+#define _vel_vfsums_vvl __builtin_ve_vl_vfsums_vvl
+#define _vel_vfsums_vvml __builtin_ve_vl_vfsums_vvml
+#define _vel_vrmaxswfstsx_vvl __builtin_ve_vl_vrmaxswfstsx_vvl
+#define _vel_vrmaxswfstsx_vvvl __builtin_ve_vl_vrmaxswfstsx_vvvl
+#define _vel_vrmaxswlstsx_vvl __builtin_ve_vl_vrmaxswlstsx_vvl
+#define _vel_vrmaxswlstsx_vvvl __builtin_ve_vl_vrmaxswlstsx_vvvl
+#define _vel_vrmaxswfstzx_vvl __builtin_ve_vl_vrmaxswfstzx_vvl
+#define _vel_vrmaxswfstzx_vvvl __builtin_ve_vl_vrmaxswfstzx_vvvl
+#define _vel_vrmaxswlstzx_vvl __builtin_ve_vl_vrmaxswlstzx_vvl
+#define _vel_vrmaxswlstzx_vvvl __builtin_ve_vl_vrmaxswlstzx_vvvl
+#define _vel_vrminswfstsx_vvl __builtin_ve_vl_vrminswfstsx_vvl
+#define _vel_vrminswfstsx_vvvl __builtin_ve_vl_vrminswfstsx_vvvl
+#define _vel_vrminswlstsx_vvl __builtin_ve_vl_vrminswlstsx_vvl
+#define _vel_vrminswlstsx_vvvl __builtin_ve_vl_vrminswlstsx_vvvl
+#define _vel_vrminswfstzx_vvl __builtin_ve_vl_vrminswfstzx_vvl
+#define _vel_vrminswfstzx_vvvl __builtin_ve_vl_vrminswfstzx_vvvl
+#define _vel_vrminswlstzx_vvl __builtin_ve_vl_vrminswlstzx_vvl
+#define _vel_vrminswlstzx_vvvl __builtin_ve_vl_vrminswlstzx_vvvl
+#define _vel_vrmaxslfst_vvl __builtin_ve_vl_vrmaxslfst_vvl
+#define _vel_vrmaxslfst_vvvl __builtin_ve_vl_vrmaxslfst_vvvl
+#define _vel_vrmaxsllst_vvl __builtin_ve_vl_vrmaxsllst_vvl
+#define _vel_vrmaxsllst_vvvl __builtin_ve_vl_vrmaxsllst_vvvl
+#define _vel_vrminslfst_vvl __builtin_ve_vl_vrminslfst_vvl
+#define _vel_vrminslfst_vvvl __builtin_ve_vl_vrminslfst_vvvl
+#define _vel_vrminsllst_vvl __builtin_ve_vl_vrminsllst_vvl
+#define _vel_vrminsllst_vvvl __builtin_ve_vl_vrminsllst_vvvl
+#define _vel_vfrmaxdfst_vvl __builtin_ve_vl_vfrmaxdfst_vvl
+#define _vel_vfrmaxdfst_vvvl __builtin_ve_vl_vfrmaxdfst_vvvl
+#define _vel_vfrmaxdlst_vvl __builtin_ve_vl_vfrmaxdlst_vvl
+#define _vel_vfrmaxdlst_vvvl __builtin_ve_vl_vfrmaxdlst_vvvl
+#define _vel_vfrmaxsfst_vvl __builtin_ve_vl_vfrmaxsfst_vvl
+#define _vel_vfrmaxsfst_vvvl __builtin_ve_vl_vfrmaxsfst_vvvl
+#define _vel_vfrmaxslst_vvl __builtin_ve_vl_vfrmaxslst_vvl
+#define _vel_vfrmaxslst_vvvl __builtin_ve_vl_vfrmaxslst_vvvl
+#define _vel_vfrmindfst_vvl __builtin_ve_vl_vfrmindfst_vvl
+#define _vel_vfrmindfst_vvvl __builtin_ve_vl_vfrmindfst_vvvl
+#define _vel_vfrmindlst_vvl __builtin_ve_vl_vfrmindlst_vvl
+#define _vel_vfrmindlst_vvvl __builtin_ve_vl_vfrmindlst_vvvl
+#define _vel_vfrminsfst_vvl __builtin_ve_vl_vfrminsfst_vvl
+#define _vel_vfrminsfst_vvvl __builtin_ve_vl_vfrminsfst_vvvl
+#define _vel_vfrminslst_vvl __builtin_ve_vl_vfrminslst_vvl
+#define _vel_vfrminslst_vvvl __builtin_ve_vl_vfrminslst_vvvl
+#define _vel_vrand_vvl __builtin_ve_vl_vrand_vvl
+#define _vel_vrand_vvml __builtin_ve_vl_vrand_vvml
+#define _vel_vror_vvl __builtin_ve_vl_vror_vvl
+#define _vel_vror_vvml __builtin_ve_vl_vror_vvml
+#define _vel_vrxor_vvl __builtin_ve_vl_vrxor_vvl
+#define _vel_vrxor_vvml __builtin_ve_vl_vrxor_vvml
+#define _vel_vgt_vvssl __builtin_ve_vl_vgt_vvssl
+#define _vel_vgt_vvssvl __builtin_ve_vl_vgt_vvssvl
+#define _vel_vgt_vvssml __builtin_ve_vl_vgt_vvssml
+#define _vel_vgt_vvssmvl __builtin_ve_vl_vgt_vvssmvl
+#define _vel_vgtnc_vvssl __builtin_ve_vl_vgtnc_vvssl
+#define _vel_vgtnc_vvssvl __builtin_ve_vl_vgtnc_vvssvl
+#define _vel_vgtnc_vvssml __builtin_ve_vl_vgtnc_vvssml
+#define _vel_vgtnc_vvssmvl __builtin_ve_vl_vgtnc_vvssmvl
+#define _vel_vgtu_vvssl __builtin_ve_vl_vgtu_vvssl
+#define _vel_vgtu_vvssvl __builtin_ve_vl_vgtu_vvssvl
+#define _vel_vgtu_vvssml __builtin_ve_vl_vgtu_vvssml
+#define _vel_vgtu_vvssmvl __builtin_ve_vl_vgtu_vvssmvl
+#define _vel_vgtunc_vvssl __builtin_ve_vl_vgtunc_vvssl
+#define _vel_vgtunc_vvssvl __builtin_ve_vl_vgtunc_vvssvl
+#define _vel_vgtunc_vvssml __builtin_ve_vl_vgtunc_vvssml
+#define _vel_vgtunc_vvssmvl __builtin_ve_vl_vgtunc_vvssmvl
+#define _vel_vgtlsx_vvssl __builtin_ve_vl_vgtlsx_vvssl
+#define _vel_vgtlsx_vvssvl __builtin_ve_vl_vgtlsx_vvssvl
+#define _vel_vgtlsx_vvssml __builtin_ve_vl_vgtlsx_vvssml
+#define _vel_vgtlsx_vvssmvl __builtin_ve_vl_vgtlsx_vvssmvl
+#define _vel_vgtlsxnc_vvssl __builtin_ve_vl_vgtlsxnc_vvssl
+#define _vel_vgtlsxnc_vvssvl __builtin_ve_vl_vgtlsxnc_vvssvl
+#define _vel_vgtlsxnc_vvssml __builtin_ve_vl_vgtlsxnc_vvssml
+#define _vel_vgtlsxnc_vvssmvl __builtin_ve_vl_vgtlsxnc_vvssmvl
+#define _vel_vgtlzx_vvssl __builtin_ve_vl_vgtlzx_vvssl
+#define _vel_vgtlzx_vvssvl __builtin_ve_vl_vgtlzx_vvssvl
+#define _vel_vgtlzx_vvssml __builtin_ve_vl_vgtlzx_vvssml
+#define _vel_vgtlzx_vvssmvl __builtin_ve_vl_vgtlzx_vvssmvl
+#define _vel_vgtlzxnc_vvssl __builtin_ve_vl_vgtlzxnc_vvssl
+#define _vel_vgtlzxnc_vvssvl __builtin_ve_vl_vgtlzxnc_vvssvl
+#define _vel_vgtlzxnc_vvssml __builtin_ve_vl_vgtlzxnc_vvssml
+#define _vel_vgtlzxnc_vvssmvl __builtin_ve_vl_vgtlzxnc_vvssmvl
+#define _vel_vsc_vvssl __builtin_ve_vl_vsc_vvssl
+#define _vel_vsc_vvssml __builtin_ve_vl_vsc_vvssml
+#define _vel_vscnc_vvssl __builtin_ve_vl_vscnc_vvssl
+#define _vel_vscnc_vvssml __builtin_ve_vl_vscnc_vvssml
+#define _vel_vscot_vvssl __builtin_ve_vl_vscot_vvssl
+#define _vel_vscot_vvssml __builtin_ve_vl_vscot_vvssml
+#define _vel_vscncot_vvssl __builtin_ve_vl_vscncot_vvssl
+#define _vel_vscncot_vvssml __builtin_ve_vl_vscncot_vvssml
+#define _vel_vscu_vvssl __builtin_ve_vl_vscu_vvssl
+#define _vel_vscu_vvssml __builtin_ve_vl_vscu_vvssml
+#define _vel_vscunc_vvssl __builtin_ve_vl_vscunc_vvssl
+#define _vel_vscunc_vvssml __builtin_ve_vl_vscunc_vvssml
+#define _vel_vscuot_vvssl __builtin_ve_vl_vscuot_vvssl
+#define _vel_vscuot_vvssml __builtin_ve_vl_vscuot_vvssml
+#define _vel_vscuncot_vvssl __builtin_ve_vl_vscuncot_vvssl
+#define _vel_vscuncot_vvssml __builtin_ve_vl_vscuncot_vvssml
+#define _vel_vscl_vvssl __builtin_ve_vl_vscl_vvssl
+#define _vel_vscl_vvssml __builtin_ve_vl_vscl_vvssml
+#define _vel_vsclnc_vvssl __builtin_ve_vl_vsclnc_vvssl
+#define _vel_vsclnc_vvssml __builtin_ve_vl_vsclnc_vvssml
+#define _vel_vsclot_vvssl __builtin_ve_vl_vsclot_vvssl
+#define _vel_vsclot_vvssml __builtin_ve_vl_vsclot_vvssml
+#define _vel_vsclncot_vvssl __builtin_ve_vl_vsclncot_vvssl
+#define _vel_vsclncot_vvssml __builtin_ve_vl_vsclncot_vvssml
+#define _vel_andm_mmm __builtin_ve_vl_andm_mmm
+#define _vel_andm_MMM __builtin_ve_vl_andm_MMM
+#define _vel_orm_mmm __builtin_ve_vl_orm_mmm
+#define _vel_orm_MMM __builtin_ve_vl_orm_MMM
+#define _vel_xorm_mmm __builtin_ve_vl_xorm_mmm
+#define _vel_xorm_MMM __builtin_ve_vl_xorm_MMM
+#define _vel_eqvm_mmm __builtin_ve_vl_eqvm_mmm
+#define _vel_eqvm_MMM __builtin_ve_vl_eqvm_MMM
+#define _vel_nndm_mmm __builtin_ve_vl_nndm_mmm
+#define _vel_nndm_MMM __builtin_ve_vl_nndm_MMM
+#define _vel_negm_mm __builtin_ve_vl_negm_mm
+#define _vel_negm_MM __builtin_ve_vl_negm_MM
+#define _vel_pcvm_sml __builtin_ve_vl_pcvm_sml
+#define _vel_lzvm_sml __builtin_ve_vl_lzvm_sml
+#define _vel_tovm_sml __builtin_ve_vl_tovm_sml
+#define _vel_lcr_sss __builtin_ve_vl_lcr_sss
+#define _vel_scr_sss __builtin_ve_vl_scr_sss
+#define _vel_tscr_ssss __builtin_ve_vl_tscr_ssss
+#define _vel_fidcr_sss __builtin_ve_vl_fidcr_sss
+#define _vel_fencei __builtin_ve_vl_fencei
+#define _vel_fencem_s __builtin_ve_vl_fencem_s
+#define _vel_fencec_s __builtin_ve_vl_fencec_s
+#define _vel_svob __builtin_ve_vl_svob
diff --git a/darwin-x86/lib64/clang/14.0.2/include/vpclmulqdqintrin.h b/darwin-x86/lib64/clang/16.0.2/include/vpclmulqdqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/vpclmulqdqintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/vpclmulqdqintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/waitpkgintrin.h b/darwin-x86/lib64/clang/16.0.2/include/waitpkgintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/waitpkgintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/waitpkgintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/wasm_simd128.h b/darwin-x86/lib64/clang/16.0.2/include/wasm_simd128.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/wasm_simd128.h
rename to darwin-x86/lib64/clang/16.0.2/include/wasm_simd128.h
index 3889a27..f93de12 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/wasm_simd128.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/wasm_simd128.h
@@ -1405,12 +1405,12 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i32x4_trunc_sat_f64x2_zero(v128_t __a) {
-  return (v128_t)__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4((__f64x2)__a);
+  return (v128_t)__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4((__f64x2)__a);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_u32x4_trunc_sat_f64x2_zero(v128_t __a) {
-  return (v128_t)__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4((__f64x2)__a);
+  return (v128_t)__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4((__f64x2)__a);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
diff --git a/darwin-x86/lib64/clang/14.0.2/include/wbnoinvdintrin.h b/darwin-x86/lib64/clang/16.0.2/include/wbnoinvdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/wbnoinvdintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/wbnoinvdintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/wmmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/wmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/wmmintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/wmmintrin.h
diff --git a/darwin-x86/lib64/clang/16.0.2/include/x86gprintrin.h b/darwin-x86/lib64/clang/16.0.2/include/x86gprintrin.h
new file mode 100644
index 0000000..81d7360
--- /dev/null
+++ b/darwin-x86/lib64/clang/16.0.2/include/x86gprintrin.h
@@ -0,0 +1,50 @@
+/*===--------------- x86gprintrin.h - X86 GPR intrinsics ------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86GPRINTRIN_H
+#define __X86GPRINTRIN_H
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__HRESET__)
+#include <hresetintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__UINTR__)
+#include <uintrintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__CRC32__)
+#include <crc32intrin.h>
+#endif
+
+#if defined(__i386__)
+#define __SAVE_GPRBX "mov {%%ebx, %%eax |eax, ebx};"
+#define __RESTORE_GPRBX "mov {%%eax, %%ebx |ebx, eax};"
+#define __TMPGPR "eax"
+#else
+// When in 64-bit target, the 32-bit operands generate a 32-bit result,
+// zero-extended to a 64-bit result in the destination general-purpose,
+// It means "mov x %ebx" will clobber the higher 32 bits of rbx, so we
+// should preserve the 64-bit register rbx.
+#define __SAVE_GPRBX "mov {%%rbx, %%rax |rax, rbx};"
+#define __RESTORE_GPRBX "mov {%%rax, %%rbx |rbx, rax};"
+#define __TMPGPR "rax"
+#endif
+
+#define __SSC_MARK(__Tag)                                                      \
+  __asm__ __volatile__( __SAVE_GPRBX                                           \
+                       "mov {%0, %%ebx|ebx, %0}; "                             \
+                       ".byte 0x64, 0x67, 0x90; "                              \
+                        __RESTORE_GPRBX                                        \
+                       ::"i"(__Tag)                                            \
+                       :  __TMPGPR );
+
+#endif /* __X86GPRINTRIN_H */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/x86intrin.h b/darwin-x86/lib64/clang/16.0.2/include/x86intrin.h
similarity index 92%
rename from darwin-x86/lib64/clang/14.0.2/include/x86intrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/x86intrin.h
index 768d0e5..450fd00 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/x86intrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/x86intrin.h
@@ -59,5 +59,9 @@
 #include <clzerointrin.h>
 #endif
 
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__RDPRU__)
+#include <rdpruintrin.h>
+#endif
 
 #endif /* __X86INTRIN_H */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/xmmintrin.h b/darwin-x86/lib64/clang/16.0.2/include/xmmintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/xmmintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/xmmintrin.h
index 1612d3d..b3a14ad 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/xmmintrin.h
+++ b/darwin-x86/lib64/clang/16.0.2/include/xmmintrin.h
@@ -2086,7 +2086,7 @@
 /// \headerfile <x86intrin.h>
 ///
 /// \code
-/// void _mm_prefetch(const void * a, const int sel);
+/// void _mm_prefetch(const void *a, const int sel);
 /// \endcode
 ///
 /// This intrinsic corresponds to the <c> PREFETCHNTA </c> instruction.
@@ -2360,7 +2360,10 @@
 ///    00: assigned from bits [15:0] of \a a. \n
 ///    01: assigned from bits [31:16] of \a a. \n
 ///    10: assigned from bits [47:32] of \a a. \n
-///    11: assigned from bits [63:48] of \a a.
+///    11: assigned from bits [63:48] of \a a. \n
+///    Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+///    <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+///    <c>[b6, b4, b2, b0]</c>.
 /// \returns A 64-bit integer vector containing the shuffled values.
 #define _mm_shuffle_pi16(a, n) \
   ((__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n)))
@@ -2602,7 +2605,10 @@
 ///    00: Bits [31:0] copied from the specified operand. \n
 ///    01: Bits [63:32] copied from the specified operand. \n
 ///    10: Bits [95:64] copied from the specified operand. \n
-///    11: Bits [127:96] copied from the specified operand.
+///    11: Bits [127:96] copied from the specified operand. \n
+///    Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+///    <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+///    <c>[b6, b4, b2, b0]</c>.
 /// \returns A 128-bit vector of [4 x float] containing the shuffled values.
 #define _mm_shuffle_ps(a, b, mask) \
   ((__m128)__builtin_ia32_shufps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \
@@ -2999,7 +3005,6 @@
 #define _m_pavgw _mm_avg_pu16
 #define _m_psadbw _mm_sad_pu8
 #define _m_ _mm_
-#define _m_ _mm_
 
 #undef __DEFAULT_FN_ATTRS
 #undef __DEFAULT_FN_ATTRS_MMX
diff --git a/darwin-x86/lib64/clang/14.0.2/include/xopintrin.h b/darwin-x86/lib64/clang/16.0.2/include/xopintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/xopintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/xopintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/xsavecintrin.h b/darwin-x86/lib64/clang/16.0.2/include/xsavecintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/xsavecintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/xsavecintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/xsaveintrin.h b/darwin-x86/lib64/clang/16.0.2/include/xsaveintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/xsaveintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/xsaveintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/xsaveoptintrin.h b/darwin-x86/lib64/clang/16.0.2/include/xsaveoptintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/xsaveoptintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/xsaveoptintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/xsavesintrin.h b/darwin-x86/lib64/clang/16.0.2/include/xsavesintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/xsavesintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/xsavesintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/xtestintrin.h b/darwin-x86/lib64/clang/16.0.2/include/xtestintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/xtestintrin.h
rename to darwin-x86/lib64/clang/16.0.2/include/xtestintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/share/asan_ignorelist.txt b/darwin-x86/lib64/clang/16.0.2/share/asan_ignorelist.txt
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/share/asan_ignorelist.txt
rename to darwin-x86/lib64/clang/16.0.2/share/asan_ignorelist.txt
diff --git a/darwin-x86/lib64/clang/14.0.2/share/cfi_ignorelist.txt b/darwin-x86/lib64/clang/16.0.2/share/cfi_ignorelist.txt
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/share/cfi_ignorelist.txt
rename to darwin-x86/lib64/clang/16.0.2/share/cfi_ignorelist.txt
diff --git a/darwin-x86/lib64/libbase.dylib b/darwin-x86/lib64/libbase.dylib
index 4b405c7..5807361 100755
--- a/darwin-x86/lib64/libbase.dylib
+++ b/darwin-x86/lib64/libbase.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libc++.1.dylib b/darwin-x86/lib64/libc++.1.dylib
index ffda1a3..8cbea7c 100755
--- a/darwin-x86/lib64/libc++.1.dylib
+++ b/darwin-x86/lib64/libc++.1.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libc++.dylib b/darwin-x86/lib64/libc++.dylib
index 9121ca8..6c21d80 100755
--- a/darwin-x86/lib64/libc++.dylib
+++ b/darwin-x86/lib64/libc++.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libc++abi.1.dylib b/darwin-x86/lib64/libc++abi.1.dylib
index b47ac58..fc87f45 100755
--- a/darwin-x86/lib64/libc++abi.1.dylib
+++ b/darwin-x86/lib64/libc++abi.1.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libclang-cpp.dylib b/darwin-x86/lib64/libclang-cpp.dylib
index e7e46d7..b42be62 100755
--- a/darwin-x86/lib64/libclang-cpp.dylib
+++ b/darwin-x86/lib64/libclang-cpp.dylib
Binary files differ
diff --git a/darwin-x86/lib64/liblog.dylib b/darwin-x86/lib64/liblog.dylib
index 9f0f1b1..6bf3c24 100755
--- a/darwin-x86/lib64/liblog.dylib
+++ b/darwin-x86/lib64/liblog.dylib
Binary files differ
diff --git a/linux-x86/bin/bindgen b/linux-x86/bin/bindgen
index bc0ef1d..4e83dee 100755
--- a/linux-x86/bin/bindgen
+++ b/linux-x86/bin/bindgen
Binary files differ
diff --git a/linux-x86/bin/cxx_extractor b/linux-x86/bin/cxx_extractor
index 7ab8dff..097a449 100755
--- a/linux-x86/bin/cxx_extractor
+++ b/linux-x86/bin/cxx_extractor
Binary files differ
diff --git a/linux-x86/bin/header-abi-diff b/linux-x86/bin/header-abi-diff
index 30ce9d3..57dfbe8 100755
--- a/linux-x86/bin/header-abi-diff
+++ b/linux-x86/bin/header-abi-diff
Binary files differ
diff --git a/linux-x86/bin/header-abi-dumper b/linux-x86/bin/header-abi-dumper
index 30f803a..5348a4e 100755
--- a/linux-x86/bin/header-abi-dumper
+++ b/linux-x86/bin/header-abi-dumper
Binary files differ
diff --git a/linux-x86/bin/header-abi-linker b/linux-x86/bin/header-abi-linker
index 521954a..b9ee9b5 100755
--- a/linux-x86/bin/header-abi-linker
+++ b/linux-x86/bin/header-abi-linker
Binary files differ
diff --git a/linux-x86/bin/proto_metadata_plugin b/linux-x86/bin/proto_metadata_plugin
index c2f2579..c6bc133 100755
--- a/linux-x86/bin/proto_metadata_plugin
+++ b/linux-x86/bin/proto_metadata_plugin
Binary files differ
diff --git a/linux-x86/bin/protoc_extractor b/linux-x86/bin/protoc_extractor
index 7217d3c..454b543 100755
--- a/linux-x86/bin/protoc_extractor
+++ b/linux-x86/bin/protoc_extractor
Binary files differ
diff --git a/linux-x86/bin/versioner b/linux-x86/bin/versioner
index 98a5765..7659587 100755
--- a/linux-x86/bin/versioner
+++ b/linux-x86/bin/versioner
Binary files differ
diff --git a/linux-x86/clang-headers b/linux-x86/clang-headers
index fabd016..e62fa5e 120000
--- a/linux-x86/clang-headers
+++ b/linux-x86/clang-headers
@@ -1 +1 @@
-lib64/clang/14.0.2/include
\ No newline at end of file
+lib/clang/16.0.2/include
\ No newline at end of file
diff --git a/linux-x86/lib b/linux-x86/lib
new file mode 120000
index 0000000..6b60c32
--- /dev/null
+++ b/linux-x86/lib
@@ -0,0 +1 @@
+lib64
\ No newline at end of file
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_intrinsics.h b/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_intrinsics.h
deleted file mode 100644
index e0875bb..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_intrinsics.h
+++ /dev/null
@@ -1,518 +0,0 @@
-/*===--- __clang_cuda_intrinsics.h - Device-side CUDA intrinsic wrappers ---===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __CLANG_CUDA_INTRINSICS_H__
-#define __CLANG_CUDA_INTRINSICS_H__
-#ifndef __CUDA__
-#error "This file is for CUDA compilation only."
-#endif
-
-// sm_30 intrinsics: __shfl_{up,down,xor}.
-
-#define __SM_30_INTRINSICS_H__
-#define __SM_30_INTRINSICS_HPP__
-
-#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
-
-#pragma push_macro("__MAKE_SHUFFLES")
-#define __MAKE_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, __Mask,    \
-                        __Type)                                                \
-  inline __device__ int __FnName(int __val, __Type __offset,                   \
-                                 int __width = warpSize) {                     \
-    return __IntIntrinsic(__val, __offset,                                     \
-                          ((warpSize - __width) << 8) | (__Mask));             \
-  }                                                                            \
-  inline __device__ float __FnName(float __val, __Type __offset,               \
-                                   int __width = warpSize) {                   \
-    return __FloatIntrinsic(__val, __offset,                                   \
-                            ((warpSize - __width) << 8) | (__Mask));           \
-  }                                                                            \
-  inline __device__ unsigned int __FnName(unsigned int __val, __Type __offset, \
-                                          int __width = warpSize) {            \
-    return static_cast<unsigned int>(                                          \
-        ::__FnName(static_cast<int>(__val), __offset, __width));               \
-  }                                                                            \
-  inline __device__ long long __FnName(long long __val, __Type __offset,       \
-                                       int __width = warpSize) {               \
-    struct __Bits {                                                            \
-      int __a, __b;                                                            \
-    };                                                                         \
-    _Static_assert(sizeof(__val) == sizeof(__Bits));                           \
-    _Static_assert(sizeof(__Bits) == 2 * sizeof(int));                         \
-    __Bits __tmp;                                                              \
-    memcpy(&__tmp, &__val, sizeof(__val));                                \
-    __tmp.__a = ::__FnName(__tmp.__a, __offset, __width);                      \
-    __tmp.__b = ::__FnName(__tmp.__b, __offset, __width);                      \
-    long long __ret;                                                           \
-    memcpy(&__ret, &__tmp, sizeof(__tmp));                                     \
-    return __ret;                                                              \
-  }                                                                            \
-  inline __device__ long __FnName(long __val, __Type __offset,                 \
-                                  int __width = warpSize) {                    \
-    _Static_assert(sizeof(long) == sizeof(long long) ||                        \
-                   sizeof(long) == sizeof(int));                               \
-    if (sizeof(long) == sizeof(long long)) {                                   \
-      return static_cast<long>(                                                \
-          ::__FnName(static_cast<long long>(__val), __offset, __width));       \
-    } else if (sizeof(long) == sizeof(int)) {                                  \
-      return static_cast<long>(                                                \
-          ::__FnName(static_cast<int>(__val), __offset, __width));             \
-    }                                                                          \
-  }                                                                            \
-  inline __device__ unsigned long __FnName(                                    \
-      unsigned long __val, __Type __offset, int __width = warpSize) {          \
-    return static_cast<unsigned long>(                                         \
-        ::__FnName(static_cast<long>(__val), __offset, __width));              \
-  }                                                                            \
-  inline __device__ unsigned long long __FnName(                               \
-      unsigned long long __val, __Type __offset, int __width = warpSize) {     \
-    return static_cast<unsigned long long>(::__FnName(                         \
-        static_cast<unsigned long long>(__val), __offset, __width));           \
-  }                                                                            \
-  inline __device__ double __FnName(double __val, __Type __offset,             \
-                                    int __width = warpSize) {                  \
-    long long __tmp;                                                           \
-    _Static_assert(sizeof(__tmp) == sizeof(__val));                            \
-    memcpy(&__tmp, &__val, sizeof(__val));                                     \
-    __tmp = ::__FnName(__tmp, __offset, __width);                              \
-    double __ret;                                                              \
-    memcpy(&__ret, &__tmp, sizeof(__ret));                                     \
-    return __ret;                                                              \
-  }
-
-__MAKE_SHUFFLES(__shfl, __nvvm_shfl_idx_i32, __nvvm_shfl_idx_f32, 0x1f, int);
-// We use 0 rather than 31 as our mask, because shfl.up applies to lanes >=
-// maxLane.
-__MAKE_SHUFFLES(__shfl_up, __nvvm_shfl_up_i32, __nvvm_shfl_up_f32, 0,
-                unsigned int);
-__MAKE_SHUFFLES(__shfl_down, __nvvm_shfl_down_i32, __nvvm_shfl_down_f32, 0x1f,
-                unsigned int);
-__MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f,
-                int);
-#pragma pop_macro("__MAKE_SHUFFLES")
-
-#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
-
-#if CUDA_VERSION >= 9000
-#if (!defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300)
-// __shfl_sync_* variants available in CUDA-9
-#pragma push_macro("__MAKE_SYNC_SHUFFLES")
-#define __MAKE_SYNC_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic,       \
-                             __Mask, __Type)                                   \
-  inline __device__ int __FnName(unsigned int __mask, int __val,               \
-                                 __Type __offset, int __width = warpSize) {    \
-    return __IntIntrinsic(__mask, __val, __offset,                             \
-                          ((warpSize - __width) << 8) | (__Mask));             \
-  }                                                                            \
-  inline __device__ float __FnName(unsigned int __mask, float __val,           \
-                                   __Type __offset, int __width = warpSize) {  \
-    return __FloatIntrinsic(__mask, __val, __offset,                           \
-                            ((warpSize - __width) << 8) | (__Mask));           \
-  }                                                                            \
-  inline __device__ unsigned int __FnName(unsigned int __mask,                 \
-                                          unsigned int __val, __Type __offset, \
-                                          int __width = warpSize) {            \
-    return static_cast<unsigned int>(                                          \
-        ::__FnName(__mask, static_cast<int>(__val), __offset, __width));       \
-  }                                                                            \
-  inline __device__ long long __FnName(unsigned int __mask, long long __val,   \
-                                       __Type __offset,                        \
-                                       int __width = warpSize) {               \
-    struct __Bits {                                                            \
-      int __a, __b;                                                            \
-    };                                                                         \
-    _Static_assert(sizeof(__val) == sizeof(__Bits));                           \
-    _Static_assert(sizeof(__Bits) == 2 * sizeof(int));                         \
-    __Bits __tmp;                                                              \
-    memcpy(&__tmp, &__val, sizeof(__val));                                     \
-    __tmp.__a = ::__FnName(__mask, __tmp.__a, __offset, __width);              \
-    __tmp.__b = ::__FnName(__mask, __tmp.__b, __offset, __width);              \
-    long long __ret;                                                           \
-    memcpy(&__ret, &__tmp, sizeof(__tmp));                                     \
-    return __ret;                                                              \
-  }                                                                            \
-  inline __device__ unsigned long long __FnName(                               \
-      unsigned int __mask, unsigned long long __val, __Type __offset,          \
-      int __width = warpSize) {                                                \
-    return static_cast<unsigned long long>(::__FnName(                         \
-        __mask, static_cast<unsigned long long>(__val), __offset, __width));   \
-  }                                                                            \
-  inline __device__ long __FnName(unsigned int __mask, long __val,             \
-                                  __Type __offset, int __width = warpSize) {   \
-    _Static_assert(sizeof(long) == sizeof(long long) ||                        \
-                   sizeof(long) == sizeof(int));                               \
-    if (sizeof(long) == sizeof(long long)) {                                   \
-      return static_cast<long>(::__FnName(                                     \
-          __mask, static_cast<long long>(__val), __offset, __width));          \
-    } else if (sizeof(long) == sizeof(int)) {                                  \
-      return static_cast<long>(                                                \
-          ::__FnName(__mask, static_cast<int>(__val), __offset, __width));     \
-    }                                                                          \
-  }                                                                            \
-  inline __device__ unsigned long __FnName(                                    \
-      unsigned int __mask, unsigned long __val, __Type __offset,               \
-      int __width = warpSize) {                                                \
-    return static_cast<unsigned long>(                                         \
-        ::__FnName(__mask, static_cast<long>(__val), __offset, __width));      \
-  }                                                                            \
-  inline __device__ double __FnName(unsigned int __mask, double __val,         \
-                                    __Type __offset, int __width = warpSize) { \
-    long long __tmp;                                                           \
-    _Static_assert(sizeof(__tmp) == sizeof(__val));                            \
-    memcpy(&__tmp, &__val, sizeof(__val));                                     \
-    __tmp = ::__FnName(__mask, __tmp, __offset, __width);                      \
-    double __ret;                                                              \
-    memcpy(&__ret, &__tmp, sizeof(__ret));                                     \
-    return __ret;                                                              \
-  }
-__MAKE_SYNC_SHUFFLES(__shfl_sync, __nvvm_shfl_sync_idx_i32,
-                     __nvvm_shfl_sync_idx_f32, 0x1f, int);
-// We use 0 rather than 31 as our mask, because shfl.up applies to lanes >=
-// maxLane.
-__MAKE_SYNC_SHUFFLES(__shfl_up_sync, __nvvm_shfl_sync_up_i32,
-                     __nvvm_shfl_sync_up_f32, 0, unsigned int);
-__MAKE_SYNC_SHUFFLES(__shfl_down_sync, __nvvm_shfl_sync_down_i32,
-                     __nvvm_shfl_sync_down_f32, 0x1f, unsigned int);
-__MAKE_SYNC_SHUFFLES(__shfl_xor_sync, __nvvm_shfl_sync_bfly_i32,
-                     __nvvm_shfl_sync_bfly_f32, 0x1f, int);
-#pragma pop_macro("__MAKE_SYNC_SHUFFLES")
-
-inline __device__ void __syncwarp(unsigned int mask = 0xffffffff) {
-  return __nvvm_bar_warp_sync(mask);
-}
-
-inline __device__ void __barrier_sync(unsigned int id) {
-  __nvvm_barrier_sync(id);
-}
-
-inline __device__ void __barrier_sync_count(unsigned int id,
-                                            unsigned int count) {
-  __nvvm_barrier_sync_cnt(id, count);
-}
-
-inline __device__ int __all_sync(unsigned int mask, int pred) {
-  return __nvvm_vote_all_sync(mask, pred);
-}
-
-inline __device__ int __any_sync(unsigned int mask, int pred) {
-  return __nvvm_vote_any_sync(mask, pred);
-}
-
-inline __device__ int __uni_sync(unsigned int mask, int pred) {
-  return __nvvm_vote_uni_sync(mask, pred);
-}
-
-inline __device__ unsigned int __ballot_sync(unsigned int mask, int pred) {
-  return __nvvm_vote_ballot_sync(mask, pred);
-}
-
-inline __device__ unsigned int __activemask() {
-#if CUDA_VERSION < 9020
-  return __nvvm_vote_ballot(1);
-#else
-  unsigned int mask;
-  asm volatile("activemask.b32 %0;" : "=r"(mask));
-  return mask;
-#endif
-}
-
-inline __device__ unsigned int __fns(unsigned mask, unsigned base, int offset) {
-  return __nvvm_fns(mask, base, offset);
-}
-
-#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
-
-// Define __match* builtins CUDA-9 headers expect to see.
-#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
-inline __device__ unsigned int __match32_any_sync(unsigned int mask,
-                                                  unsigned int value) {
-  return __nvvm_match_any_sync_i32(mask, value);
-}
-
-inline __device__ unsigned long long
-__match64_any_sync(unsigned int mask, unsigned long long value) {
-  return __nvvm_match_any_sync_i64(mask, value);
-}
-
-inline __device__ unsigned int
-__match32_all_sync(unsigned int mask, unsigned int value, int *pred) {
-  return __nvvm_match_all_sync_i32p(mask, value, pred);
-}
-
-inline __device__ unsigned long long
-__match64_all_sync(unsigned int mask, unsigned long long value, int *pred) {
-  return __nvvm_match_all_sync_i64p(mask, value, pred);
-}
-#include "crt/sm_70_rt.hpp"
-
-#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
-#endif // __CUDA_VERSION >= 9000
-
-// sm_32 intrinsics: __ldg and __funnelshift_{l,lc,r,rc}.
-
-// Prevent the vanilla sm_32 intrinsics header from being included.
-#define __SM_32_INTRINSICS_H__
-#define __SM_32_INTRINSICS_HPP__
-
-#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
-
-inline __device__ char __ldg(const char *ptr) { return __nvvm_ldg_c(ptr); }
-inline __device__ short __ldg(const short *ptr) { return __nvvm_ldg_s(ptr); }
-inline __device__ int __ldg(const int *ptr) { return __nvvm_ldg_i(ptr); }
-inline __device__ long __ldg(const long *ptr) { return __nvvm_ldg_l(ptr); }
-inline __device__ long long __ldg(const long long *ptr) {
-  return __nvvm_ldg_ll(ptr);
-}
-inline __device__ unsigned char __ldg(const unsigned char *ptr) {
-  return __nvvm_ldg_uc(ptr);
-}
-inline __device__ signed char __ldg(const signed char *ptr) {
-  return __nvvm_ldg_uc((const unsigned char *)ptr);
-}
-inline __device__ unsigned short __ldg(const unsigned short *ptr) {
-  return __nvvm_ldg_us(ptr);
-}
-inline __device__ unsigned int __ldg(const unsigned int *ptr) {
-  return __nvvm_ldg_ui(ptr);
-}
-inline __device__ unsigned long __ldg(const unsigned long *ptr) {
-  return __nvvm_ldg_ul(ptr);
-}
-inline __device__ unsigned long long __ldg(const unsigned long long *ptr) {
-  return __nvvm_ldg_ull(ptr);
-}
-inline __device__ float __ldg(const float *ptr) { return __nvvm_ldg_f(ptr); }
-inline __device__ double __ldg(const double *ptr) { return __nvvm_ldg_d(ptr); }
-
-inline __device__ char2 __ldg(const char2 *ptr) {
-  typedef char c2 __attribute__((ext_vector_type(2)));
-  // We can assume that ptr is aligned at least to char2's alignment, but the
-  // load will assume that ptr is aligned to char2's alignment.  This is only
-  // safe if alignof(c2) <= alignof(char2).
-  c2 rv = __nvvm_ldg_c2(reinterpret_cast<const c2 *>(ptr));
-  char2 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  return ret;
-}
-inline __device__ char4 __ldg(const char4 *ptr) {
-  typedef char c4 __attribute__((ext_vector_type(4)));
-  c4 rv = __nvvm_ldg_c4(reinterpret_cast<const c4 *>(ptr));
-  char4 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  ret.z = rv[2];
-  ret.w = rv[3];
-  return ret;
-}
-inline __device__ short2 __ldg(const short2 *ptr) {
-  typedef short s2 __attribute__((ext_vector_type(2)));
-  s2 rv = __nvvm_ldg_s2(reinterpret_cast<const s2 *>(ptr));
-  short2 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  return ret;
-}
-inline __device__ short4 __ldg(const short4 *ptr) {
-  typedef short s4 __attribute__((ext_vector_type(4)));
-  s4 rv = __nvvm_ldg_s4(reinterpret_cast<const s4 *>(ptr));
-  short4 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  ret.z = rv[2];
-  ret.w = rv[3];
-  return ret;
-}
-inline __device__ int2 __ldg(const int2 *ptr) {
-  typedef int i2 __attribute__((ext_vector_type(2)));
-  i2 rv = __nvvm_ldg_i2(reinterpret_cast<const i2 *>(ptr));
-  int2 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  return ret;
-}
-inline __device__ int4 __ldg(const int4 *ptr) {
-  typedef int i4 __attribute__((ext_vector_type(4)));
-  i4 rv = __nvvm_ldg_i4(reinterpret_cast<const i4 *>(ptr));
-  int4 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  ret.z = rv[2];
-  ret.w = rv[3];
-  return ret;
-}
-inline __device__ longlong2 __ldg(const longlong2 *ptr) {
-  typedef long long ll2 __attribute__((ext_vector_type(2)));
-  ll2 rv = __nvvm_ldg_ll2(reinterpret_cast<const ll2 *>(ptr));
-  longlong2 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  return ret;
-}
-
-inline __device__ uchar2 __ldg(const uchar2 *ptr) {
-  typedef unsigned char uc2 __attribute__((ext_vector_type(2)));
-  uc2 rv = __nvvm_ldg_uc2(reinterpret_cast<const uc2 *>(ptr));
-  uchar2 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  return ret;
-}
-inline __device__ uchar4 __ldg(const uchar4 *ptr) {
-  typedef unsigned char uc4 __attribute__((ext_vector_type(4)));
-  uc4 rv = __nvvm_ldg_uc4(reinterpret_cast<const uc4 *>(ptr));
-  uchar4 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  ret.z = rv[2];
-  ret.w = rv[3];
-  return ret;
-}
-inline __device__ ushort2 __ldg(const ushort2 *ptr) {
-  typedef unsigned short us2 __attribute__((ext_vector_type(2)));
-  us2 rv = __nvvm_ldg_us2(reinterpret_cast<const us2 *>(ptr));
-  ushort2 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  return ret;
-}
-inline __device__ ushort4 __ldg(const ushort4 *ptr) {
-  typedef unsigned short us4 __attribute__((ext_vector_type(4)));
-  us4 rv = __nvvm_ldg_us4(reinterpret_cast<const us4 *>(ptr));
-  ushort4 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  ret.z = rv[2];
-  ret.w = rv[3];
-  return ret;
-}
-inline __device__ uint2 __ldg(const uint2 *ptr) {
-  typedef unsigned int ui2 __attribute__((ext_vector_type(2)));
-  ui2 rv = __nvvm_ldg_ui2(reinterpret_cast<const ui2 *>(ptr));
-  uint2 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  return ret;
-}
-inline __device__ uint4 __ldg(const uint4 *ptr) {
-  typedef unsigned int ui4 __attribute__((ext_vector_type(4)));
-  ui4 rv = __nvvm_ldg_ui4(reinterpret_cast<const ui4 *>(ptr));
-  uint4 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  ret.z = rv[2];
-  ret.w = rv[3];
-  return ret;
-}
-inline __device__ ulonglong2 __ldg(const ulonglong2 *ptr) {
-  typedef unsigned long long ull2 __attribute__((ext_vector_type(2)));
-  ull2 rv = __nvvm_ldg_ull2(reinterpret_cast<const ull2 *>(ptr));
-  ulonglong2 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  return ret;
-}
-
-inline __device__ float2 __ldg(const float2 *ptr) {
-  typedef float f2 __attribute__((ext_vector_type(2)));
-  f2 rv = __nvvm_ldg_f2(reinterpret_cast<const f2 *>(ptr));
-  float2 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  return ret;
-}
-inline __device__ float4 __ldg(const float4 *ptr) {
-  typedef float f4 __attribute__((ext_vector_type(4)));
-  f4 rv = __nvvm_ldg_f4(reinterpret_cast<const f4 *>(ptr));
-  float4 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  ret.z = rv[2];
-  ret.w = rv[3];
-  return ret;
-}
-inline __device__ double2 __ldg(const double2 *ptr) {
-  typedef double d2 __attribute__((ext_vector_type(2)));
-  d2 rv = __nvvm_ldg_d2(reinterpret_cast<const d2 *>(ptr));
-  double2 ret;
-  ret.x = rv[0];
-  ret.y = rv[1];
-  return ret;
-}
-
-// TODO: Implement these as intrinsics, so the backend can work its magic on
-// these.  Alternatively, we could implement these as plain C and try to get
-// llvm to recognize the relevant patterns.
-inline __device__ unsigned __funnelshift_l(unsigned low32, unsigned high32,
-                                           unsigned shiftWidth) {
-  unsigned result;
-  asm("shf.l.wrap.b32 %0, %1, %2, %3;"
-      : "=r"(result)
-      : "r"(low32), "r"(high32), "r"(shiftWidth));
-  return result;
-}
-inline __device__ unsigned __funnelshift_lc(unsigned low32, unsigned high32,
-                                            unsigned shiftWidth) {
-  unsigned result;
-  asm("shf.l.clamp.b32 %0, %1, %2, %3;"
-      : "=r"(result)
-      : "r"(low32), "r"(high32), "r"(shiftWidth));
-  return result;
-}
-inline __device__ unsigned __funnelshift_r(unsigned low32, unsigned high32,
-                                           unsigned shiftWidth) {
-  unsigned result;
-  asm("shf.r.wrap.b32 %0, %1, %2, %3;"
-      : "=r"(result)
-      : "r"(low32), "r"(high32), "r"(shiftWidth));
-  return result;
-}
-inline __device__ unsigned __funnelshift_rc(unsigned low32, unsigned high32,
-                                            unsigned shiftWidth) {
-  unsigned ret;
-  asm("shf.r.clamp.b32 %0, %1, %2, %3;"
-      : "=r"(ret)
-      : "r"(low32), "r"(high32), "r"(shiftWidth));
-  return ret;
-}
-
-#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
-
-#if CUDA_VERSION >= 11000
-extern "C" {
-__device__ inline size_t __nv_cvta_generic_to_global_impl(const void *__ptr) {
-  return (size_t)(void __attribute__((address_space(1))) *)__ptr;
-}
-__device__ inline size_t __nv_cvta_generic_to_shared_impl(const void *__ptr) {
-  return (size_t)(void __attribute__((address_space(3))) *)__ptr;
-}
-__device__ inline size_t __nv_cvta_generic_to_constant_impl(const void *__ptr) {
-  return (size_t)(void __attribute__((address_space(4))) *)__ptr;
-}
-__device__ inline size_t __nv_cvta_generic_to_local_impl(const void *__ptr) {
-  return (size_t)(void __attribute__((address_space(5))) *)__ptr;
-}
-__device__ inline void *__nv_cvta_global_to_generic_impl(size_t __ptr) {
-  return (void *)(void __attribute__((address_space(1))) *)__ptr;
-}
-__device__ inline void *__nv_cvta_shared_to_generic_impl(size_t __ptr) {
-  return (void *)(void __attribute__((address_space(3))) *)__ptr;
-}
-__device__ inline void *__nv_cvta_constant_to_generic_impl(size_t __ptr) {
-  return (void *)(void __attribute__((address_space(4))) *)__ptr;
-}
-__device__ inline void *__nv_cvta_local_to_generic_impl(size_t __ptr) {
-  return (void *)(void __attribute__((address_space(5))) *)__ptr;
-}
-__device__ inline uint32_t __nvvm_get_smem_pointer(void *__ptr) {
-  return __nv_cvta_generic_to_shared_impl(__ptr);
-}
-} // extern "C"
-#endif // CUDA_VERSION >= 11000
-
-#endif // defined(__CLANG_CUDA_INTRINSICS_H__)
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h b/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h
deleted file mode 100644
index 538556f..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h
+++ /dev/null
@@ -1,348 +0,0 @@
-/*===---- __clang_cuda_math.h - Device-side CUDA math support --------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __CLANG_CUDA_MATH_H__
-#define __CLANG_CUDA_MATH_H__
-#ifndef __CUDA__
-#error "This file is for CUDA compilation only."
-#endif
-
-#ifndef __OPENMP_NVPTX__
-#if CUDA_VERSION < 9000
-#error This file is intended to be used with CUDA-9+ only.
-#endif
-#endif
-
-// __DEVICE__ is a helper macro with common set of attributes for the wrappers
-// we implement in this file. We need static in order to avoid emitting unused
-// functions and __forceinline__ helps inlining these wrappers at -O1.
-#pragma push_macro("__DEVICE__")
-#ifdef __OPENMP_NVPTX__
-#if defined(__cplusplus)
-#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
-#else
-#define __DEVICE__ static __attribute__((always_inline, nothrow))
-#endif
-#else
-#define __DEVICE__ static __device__ __forceinline__
-#endif
-
-// Specialized version of __DEVICE__ for functions with void return type. Needed
-// because the OpenMP overlay requires constexpr functions here but prior to
-// c++14 void return functions could not be constexpr.
-#pragma push_macro("__DEVICE_VOID__")
-#ifdef __OPENMP_NVPTX__ && defined(__cplusplus) && __cplusplus < 201402L
-#define __DEVICE_VOID__ static __attribute__((always_inline, nothrow))
-#else
-#define __DEVICE_VOID__ __DEVICE__
-#endif
-
-// libdevice provides fast low precision and slow full-recision implementations
-// for some functions. Which one gets selected depends on
-// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if
-// -ffast-math or -fcuda-approx-transcendentals are in effect.
-#pragma push_macro("__FAST_OR_SLOW")
-#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
-#define __FAST_OR_SLOW(fast, slow) fast
-#else
-#define __FAST_OR_SLOW(fast, slow) slow
-#endif
-
-__DEVICE__ int abs(int __a) { return __nv_abs(__a); }
-__DEVICE__ double fabs(double __a) { return __nv_fabs(__a); }
-__DEVICE__ double acos(double __a) { return __nv_acos(__a); }
-__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); }
-__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); }
-__DEVICE__ float acoshf(float __a) { return __nv_acoshf(__a); }
-__DEVICE__ double asin(double __a) { return __nv_asin(__a); }
-__DEVICE__ float asinf(float __a) { return __nv_asinf(__a); }
-__DEVICE__ double asinh(double __a) { return __nv_asinh(__a); }
-__DEVICE__ float asinhf(float __a) { return __nv_asinhf(__a); }
-__DEVICE__ double atan(double __a) { return __nv_atan(__a); }
-__DEVICE__ double atan2(double __a, double __b) { return __nv_atan2(__a, __b); }
-__DEVICE__ float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); }
-__DEVICE__ float atanf(float __a) { return __nv_atanf(__a); }
-__DEVICE__ double atanh(double __a) { return __nv_atanh(__a); }
-__DEVICE__ float atanhf(float __a) { return __nv_atanhf(__a); }
-__DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); }
-__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); }
-__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); }
-__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); }
-__DEVICE__ double copysign(double __a, double __b) {
-  return __nv_copysign(__a, __b);
-}
-__DEVICE__ float copysignf(float __a, float __b) {
-  return __nv_copysignf(__a, __b);
-}
-__DEVICE__ double cos(double __a) { return __nv_cos(__a); }
-__DEVICE__ float cosf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a);
-}
-__DEVICE__ double cosh(double __a) { return __nv_cosh(__a); }
-__DEVICE__ float coshf(float __a) { return __nv_coshf(__a); }
-__DEVICE__ double cospi(double __a) { return __nv_cospi(__a); }
-__DEVICE__ float cospif(float __a) { return __nv_cospif(__a); }
-__DEVICE__ double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); }
-__DEVICE__ float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); }
-__DEVICE__ double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); }
-__DEVICE__ float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); }
-__DEVICE__ double erf(double __a) { return __nv_erf(__a); }
-__DEVICE__ double erfc(double __a) { return __nv_erfc(__a); }
-__DEVICE__ float erfcf(float __a) { return __nv_erfcf(__a); }
-__DEVICE__ double erfcinv(double __a) { return __nv_erfcinv(__a); }
-__DEVICE__ float erfcinvf(float __a) { return __nv_erfcinvf(__a); }
-__DEVICE__ double erfcx(double __a) { return __nv_erfcx(__a); }
-__DEVICE__ float erfcxf(float __a) { return __nv_erfcxf(__a); }
-__DEVICE__ float erff(float __a) { return __nv_erff(__a); }
-__DEVICE__ double erfinv(double __a) { return __nv_erfinv(__a); }
-__DEVICE__ float erfinvf(float __a) { return __nv_erfinvf(__a); }
-__DEVICE__ double exp(double __a) { return __nv_exp(__a); }
-__DEVICE__ double exp10(double __a) { return __nv_exp10(__a); }
-__DEVICE__ float exp10f(float __a) { return __nv_exp10f(__a); }
-__DEVICE__ double exp2(double __a) { return __nv_exp2(__a); }
-__DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); }
-__DEVICE__ float expf(float __a) { return __nv_expf(__a); }
-__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); }
-__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); }
-__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); }
-__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }
-__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }
-__DEVICE__ double fdivide(double __a, double __b) { return __a / __b; }
-__DEVICE__ float fdividef(float __a, float __b) {
-#if __FAST_MATH__ && !__CUDA_PREC_DIV
-  return __nv_fast_fdividef(__a, __b);
-#else
-  return __a / __b;
-#endif
-}
-__DEVICE__ double floor(double __f) { return __nv_floor(__f); }
-__DEVICE__ float floorf(float __f) { return __nv_floorf(__f); }
-__DEVICE__ double fma(double __a, double __b, double __c) {
-  return __nv_fma(__a, __b, __c);
-}
-__DEVICE__ float fmaf(float __a, float __b, float __c) {
-  return __nv_fmaf(__a, __b, __c);
-}
-__DEVICE__ double fmax(double __a, double __b) { return __nv_fmax(__a, __b); }
-__DEVICE__ float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); }
-__DEVICE__ double fmin(double __a, double __b) { return __nv_fmin(__a, __b); }
-__DEVICE__ float fminf(float __a, float __b) { return __nv_fminf(__a, __b); }
-__DEVICE__ double fmod(double __a, double __b) { return __nv_fmod(__a, __b); }
-__DEVICE__ float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); }
-__DEVICE__ double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); }
-__DEVICE__ float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); }
-__DEVICE__ double hypot(double __a, double __b) { return __nv_hypot(__a, __b); }
-__DEVICE__ float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); }
-__DEVICE__ int ilogb(double __a) { return __nv_ilogb(__a); }
-__DEVICE__ int ilogbf(float __a) { return __nv_ilogbf(__a); }
-__DEVICE__ double j0(double __a) { return __nv_j0(__a); }
-__DEVICE__ float j0f(float __a) { return __nv_j0f(__a); }
-__DEVICE__ double j1(double __a) { return __nv_j1(__a); }
-__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); }
-__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); }
-__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long labs(long __a) { return __nv_llabs(__a); };
-#else
-__DEVICE__ long labs(long __a) { return __nv_abs(__a); };
-#endif
-__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }
-__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }
-__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); }
-__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); }
-__DEVICE__ long long llabs(long long __a) { return __nv_llabs(__a); }
-__DEVICE__ long long llmax(long long __a, long long __b) {
-  return __nv_llmax(__a, __b);
-}
-__DEVICE__ long long llmin(long long __a, long long __b) {
-  return __nv_llmin(__a, __b);
-}
-__DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); }
-__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); }
-__DEVICE__ long long llround(double __a) { return __nv_llround(__a); }
-__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); }
-__DEVICE__ double round(double __a) { return __nv_round(__a); }
-__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }
-__DEVICE__ double log(double __a) { return __nv_log(__a); }
-__DEVICE__ double log10(double __a) { return __nv_log10(__a); }
-__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); }
-__DEVICE__ double log1p(double __a) { return __nv_log1p(__a); }
-__DEVICE__ float log1pf(float __a) { return __nv_log1pf(__a); }
-__DEVICE__ double log2(double __a) { return __nv_log2(__a); }
-__DEVICE__ float log2f(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a);
-}
-__DEVICE__ double logb(double __a) { return __nv_logb(__a); }
-__DEVICE__ float logbf(float __a) { return __nv_logbf(__a); }
-__DEVICE__ float logf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);
-}
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long lrint(double __a) { return llrint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); }
-__DEVICE__ long lround(double __a) { return llround(__a); }
-__DEVICE__ long lroundf(float __a) { return llroundf(__a); }
-#else
-__DEVICE__ long lrint(double __a) { return (long)rint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2int_rn(__a); }
-__DEVICE__ long lround(double __a) { return round(__a); }
-__DEVICE__ long lroundf(float __a) { return roundf(__a); }
-#endif
-__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }
-__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
-__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
-__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
-__DEVICE__ double nearbyint(double __a) { return __builtin_nearbyint(__a); }
-__DEVICE__ float nearbyintf(float __a) { return __builtin_nearbyintf(__a); }
-__DEVICE__ double nextafter(double __a, double __b) {
-  return __nv_nextafter(__a, __b);
-}
-__DEVICE__ float nextafterf(float __a, float __b) {
-  return __nv_nextafterf(__a, __b);
-}
-__DEVICE__ double norm(int __dim, const double *__t) {
-  return __nv_norm(__dim, __t);
-}
-__DEVICE__ double norm3d(double __a, double __b, double __c) {
-  return __nv_norm3d(__a, __b, __c);
-}
-__DEVICE__ float norm3df(float __a, float __b, float __c) {
-  return __nv_norm3df(__a, __b, __c);
-}
-__DEVICE__ double norm4d(double __a, double __b, double __c, double __d) {
-  return __nv_norm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float norm4df(float __a, float __b, float __c, float __d) {
-  return __nv_norm4df(__a, __b, __c, __d);
-}
-__DEVICE__ double normcdf(double __a) { return __nv_normcdf(__a); }
-__DEVICE__ float normcdff(float __a) { return __nv_normcdff(__a); }
-__DEVICE__ double normcdfinv(double __a) { return __nv_normcdfinv(__a); }
-__DEVICE__ float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); }
-__DEVICE__ float normf(int __dim, const float *__t) {
-  return __nv_normf(__dim, __t);
-}
-__DEVICE__ double pow(double __a, double __b) { return __nv_pow(__a, __b); }
-__DEVICE__ float powf(float __a, float __b) { return __nv_powf(__a, __b); }
-__DEVICE__ double powi(double __a, int __b) { return __nv_powi(__a, __b); }
-__DEVICE__ float powif(float __a, int __b) { return __nv_powif(__a, __b); }
-__DEVICE__ double rcbrt(double __a) { return __nv_rcbrt(__a); }
-__DEVICE__ float rcbrtf(float __a) { return __nv_rcbrtf(__a); }
-__DEVICE__ double remainder(double __a, double __b) {
-  return __nv_remainder(__a, __b);
-}
-__DEVICE__ float remainderf(float __a, float __b) {
-  return __nv_remainderf(__a, __b);
-}
-__DEVICE__ double remquo(double __a, double __b, int *__c) {
-  return __nv_remquo(__a, __b, __c);
-}
-__DEVICE__ float remquof(float __a, float __b, int *__c) {
-  return __nv_remquof(__a, __b, __c);
-}
-__DEVICE__ double rhypot(double __a, double __b) {
-  return __nv_rhypot(__a, __b);
-}
-__DEVICE__ float rhypotf(float __a, float __b) {
-  return __nv_rhypotf(__a, __b);
-}
-// __nv_rint* in libdevice is buggy and produces incorrect results.
-__DEVICE__ double rint(double __a) { return __builtin_rint(__a); }
-__DEVICE__ float rintf(float __a) { return __builtin_rintf(__a); }
-__DEVICE__ double rnorm(int __a, const double *__b) {
-  return __nv_rnorm(__a, __b);
-}
-__DEVICE__ double rnorm3d(double __a, double __b, double __c) {
-  return __nv_rnorm3d(__a, __b, __c);
-}
-__DEVICE__ float rnorm3df(float __a, float __b, float __c) {
-  return __nv_rnorm3df(__a, __b, __c);
-}
-__DEVICE__ double rnorm4d(double __a, double __b, double __c, double __d) {
-  return __nv_rnorm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) {
-  return __nv_rnorm4df(__a, __b, __c, __d);
-}
-__DEVICE__ float rnormf(int __dim, const float *__t) {
-  return __nv_rnormf(__dim, __t);
-}
-__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }
-__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
-__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
-__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }
-__DEVICE__ double scalbln(double __a, long __b) {
-  if (__b > INT_MAX)
-    return __a > 0 ? HUGE_VAL : -HUGE_VAL;
-  if (__b < INT_MIN)
-    return __a > 0 ? 0.0 : -0.0;
-  return scalbn(__a, (int)__b);
-}
-__DEVICE__ float scalblnf(float __a, long __b) {
-  if (__b > INT_MAX)
-    return __a > 0 ? HUGE_VALF : -HUGE_VALF;
-  if (__b < INT_MIN)
-    return __a > 0 ? 0.f : -0.f;
-  return scalbnf(__a, (int)__b);
-}
-__DEVICE__ double sin(double __a) { return __nv_sin(__a); }
-__DEVICE_VOID__ void sincos(double __a, double *__s, double *__c) {
-  return __nv_sincos(__a, __s, __c);
-}
-__DEVICE_VOID__ void sincosf(float __a, float *__s, float *__c) {
-  return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __s, __c);
-}
-__DEVICE_VOID__ void sincospi(double __a, double *__s, double *__c) {
-  return __nv_sincospi(__a, __s, __c);
-}
-__DEVICE_VOID__ void sincospif(float __a, float *__s, float *__c) {
-  return __nv_sincospif(__a, __s, __c);
-}
-__DEVICE__ float sinf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);
-}
-__DEVICE__ double sinh(double __a) { return __nv_sinh(__a); }
-__DEVICE__ float sinhf(float __a) { return __nv_sinhf(__a); }
-__DEVICE__ double sinpi(double __a) { return __nv_sinpi(__a); }
-__DEVICE__ float sinpif(float __a) { return __nv_sinpif(__a); }
-__DEVICE__ double sqrt(double __a) { return __nv_sqrt(__a); }
-__DEVICE__ float sqrtf(float __a) { return __nv_sqrtf(__a); }
-__DEVICE__ double tan(double __a) { return __nv_tan(__a); }
-__DEVICE__ float tanf(float __a) { return __nv_tanf(__a); }
-__DEVICE__ double tanh(double __a) { return __nv_tanh(__a); }
-__DEVICE__ float tanhf(float __a) { return __nv_tanhf(__a); }
-__DEVICE__ double tgamma(double __a) { return __nv_tgamma(__a); }
-__DEVICE__ float tgammaf(float __a) { return __nv_tgammaf(__a); }
-__DEVICE__ double trunc(double __a) { return __nv_trunc(__a); }
-__DEVICE__ float truncf(float __a) { return __nv_truncf(__a); }
-__DEVICE__ unsigned long long ullmax(unsigned long long __a,
-                                     unsigned long long __b) {
-  return __nv_ullmax(__a, __b);
-}
-__DEVICE__ unsigned long long ullmin(unsigned long long __a,
-                                     unsigned long long __b) {
-  return __nv_ullmin(__a, __b);
-}
-__DEVICE__ unsigned int umax(unsigned int __a, unsigned int __b) {
-  return __nv_umax(__a, __b);
-}
-__DEVICE__ unsigned int umin(unsigned int __a, unsigned int __b) {
-  return __nv_umin(__a, __b);
-}
-__DEVICE__ double y0(double __a) { return __nv_y0(__a); }
-__DEVICE__ float y0f(float __a) { return __nv_y0f(__a); }
-__DEVICE__ double y1(double __a) { return __nv_y1(__a); }
-__DEVICE__ float y1f(float __a) { return __nv_y1f(__a); }
-__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); }
-__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
-
-#pragma pop_macro("__DEVICE__")
-#pragma pop_macro("__DEVICE_VOID__")
-#pragma pop_macro("__FAST_OR_SLOW")
-
-#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h b/linux-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h
deleted file mode 100644
index 73021d2..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*===---- __clang_hip_runtime_wrapper.h - HIP runtime support ---------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/*
- * WARNING: This header is intended to be directly -include'd by
- * the compiler and is not supposed to be included by users.
- *
- */
-
-#ifndef __CLANG_HIP_RUNTIME_WRAPPER_H__
-#define __CLANG_HIP_RUNTIME_WRAPPER_H__
-
-#if __HIP__
-
-#define __host__ __attribute__((host))
-#define __device__ __attribute__((device))
-#define __global__ __attribute__((global))
-#define __shared__ __attribute__((shared))
-#define __constant__ __attribute__((constant))
-#define __managed__ __attribute__((managed))
-
-#if !defined(__cplusplus) || __cplusplus < 201103L
-  #define nullptr NULL;
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-  __attribute__((__visibility__("default")))
-  __attribute__((weak))
-  __attribute__((noreturn))
-  __device__ void __cxa_pure_virtual(void) {
-    __builtin_trap();
-  }
-  __attribute__((__visibility__("default")))
-  __attribute__((weak))
-  __attribute__((noreturn))
-  __device__ void __cxa_deleted_virtual(void) {
-    __builtin_trap();
-  }
-}
-#endif //__cplusplus
-
-#if !defined(__HIPCC_RTC__)
-#include <cmath>
-#include <cstdlib>
-#include <stdlib.h>
-#else
-typedef __SIZE_TYPE__ size_t;
-// Define macros which are needed to declare HIP device API's without standard
-// C/C++ headers. This is for readability so that these API's can be written
-// the same way as non-hipRTC use case. These macros need to be popped so that
-// they do not pollute users' name space.
-#pragma push_macro("NULL")
-#pragma push_macro("uint32_t")
-#pragma push_macro("uint64_t")
-#pragma push_macro("CHAR_BIT")
-#pragma push_macro("INT_MAX")
-#define NULL (void *)0
-#define uint32_t __UINT32_TYPE__
-#define uint64_t __UINT64_TYPE__
-#define CHAR_BIT __CHAR_BIT__
-#define INT_MAX __INTMAX_MAX__
-#endif // __HIPCC_RTC__
-
-typedef __SIZE_TYPE__ __hip_size_t;
-
-#ifdef __cplusplus
-extern "C" {
-#endif //__cplusplus
-
-#if __HIP_ENABLE_DEVICE_MALLOC__
-__device__ void *__hip_malloc(__hip_size_t __size);
-__device__ void *__hip_free(void *__ptr);
-__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
-  return __hip_malloc(__size);
-}
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
-  return __hip_free(__ptr);
-}
-#else
-__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
-  __builtin_trap();
-  return (void *)0;
-}
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
-  __builtin_trap();
-  return (void *)0;
-}
-#endif
-
-#ifdef __cplusplus
-} // extern "C"
-#endif //__cplusplus
-
-#include <__clang_hip_libdevice_declares.h>
-#include <__clang_hip_math.h>
-
-#if defined(__HIPCC_RTC__)
-#include <__clang_hip_cmath.h>
-#else
-#include <__clang_cuda_math_forward_declares.h>
-#include <__clang_hip_cmath.h>
-#include <__clang_cuda_complex_builtins.h>
-#include <algorithm>
-#include <complex>
-#include <new>
-#endif // __HIPCC_RTC__
-
-#define __CLANG_HIP_RUNTIME_WRAPPER_INCLUDED__ 1
-#if defined(__HIPCC_RTC__)
-#pragma pop_macro("NULL")
-#pragma pop_macro("uint32_t")
-#pragma pop_macro("uint64_t")
-#pragma pop_macro("CHAR_BIT")
-#pragma pop_macro("INT_MAX")
-#endif // __HIPCC_RTC__
-#endif // __HIP__
-#endif // __CLANG_HIP_RUNTIME_WRAPPER_H__
diff --git a/linux-x86/lib64/clang/14.0.2/include/__wmmintrin_pclmul.h b/linux-x86/lib64/clang/14.0.2/include/__wmmintrin_pclmul.h
deleted file mode 100644
index fef4b93..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/__wmmintrin_pclmul.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*===---- __wmmintrin_pclmul.h - PCMUL intrinsics ---------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __WMMINTRIN_H
-#error "Never use <__wmmintrin_pclmul.h> directly; include <wmmintrin.h> instead."
-#endif
-
-#ifndef __WMMINTRIN_PCLMUL_H
-#define __WMMINTRIN_PCLMUL_H
-
-/// Multiplies two 64-bit integer values, which are selected from source
-///    operands using the immediate-value operand. The multiplication is a
-///    carry-less multiplication, and the 128-bit integer product is stored in
-///    the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_clmulepi64_si128(__m128i __X, __m128i __Y, const int __I);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPCLMULQDQ </c> instruction.
-///
-/// \param __X
-///    A 128-bit vector of [2 x i64] containing one of the source operands.
-/// \param __Y
-///    A 128-bit vector of [2 x i64] containing one of the source operands.
-/// \param __I
-///    An immediate value specifying which 64-bit values to select from the
-///    operands. Bit 0 is used to select a value from operand \a __X, and bit
-///    4 is used to select a value from operand \a __Y: \n
-///    Bit[0]=0 indicates that bits[63:0] of operand \a __X are used. \n
-///    Bit[0]=1 indicates that bits[127:64] of operand \a __X are used. \n
-///    Bit[4]=0 indicates that bits[63:0] of operand \a __Y are used. \n
-///    Bit[4]=1 indicates that bits[127:64] of operand \a __Y are used.
-/// \returns The 128-bit integer vector containing the result of the carry-less
-///    multiplication of the selected 64-bit values.
-#define _mm_clmulepi64_si128(X, Y, I) \
-  ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(X), \
-                                        (__v2di)(__m128i)(Y), (char)(I)))
-
-#endif /* __WMMINTRIN_PCLMUL_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/altivec.h b/linux-x86/lib64/clang/14.0.2/include/altivec.h
deleted file mode 100644
index 55195b0..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/altivec.h
+++ /dev/null
@@ -1,19091 +0,0 @@
-/*===---- altivec.h - Standard header for type generic math ---------------===*\
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
-\*===----------------------------------------------------------------------===*/
-
-#ifndef __ALTIVEC_H
-#define __ALTIVEC_H
-
-#ifndef __ALTIVEC__
-#error "AltiVec support not enabled"
-#endif
-
-/* Constants for mapping CR6 bits to predicate result. */
-
-#define __CR6_EQ 0
-#define __CR6_EQ_REV 1
-#define __CR6_LT 2
-#define __CR6_LT_REV 3
-#define __CR6_GT 4
-#define __CR6_GT_REV 5
-#define __CR6_SO 6
-#define __CR6_SO_REV 7
-
-/* Constants for vec_test_data_class */
-#define __VEC_CLASS_FP_SUBNORMAL_N (1 << 0)
-#define __VEC_CLASS_FP_SUBNORMAL_P (1 << 1)
-#define __VEC_CLASS_FP_SUBNORMAL (__VEC_CLASS_FP_SUBNORMAL_P | \
-                                  __VEC_CLASS_FP_SUBNORMAL_N)
-#define __VEC_CLASS_FP_ZERO_N (1<<2)
-#define __VEC_CLASS_FP_ZERO_P (1<<3)
-#define __VEC_CLASS_FP_ZERO (__VEC_CLASS_FP_ZERO_P           | \
-                             __VEC_CLASS_FP_ZERO_N)
-#define __VEC_CLASS_FP_INFINITY_N (1<<4)
-#define __VEC_CLASS_FP_INFINITY_P (1<<5)
-#define __VEC_CLASS_FP_INFINITY (__VEC_CLASS_FP_INFINITY_P   | \
-                                 __VEC_CLASS_FP_INFINITY_N)
-#define __VEC_CLASS_FP_NAN (1<<6)
-#define __VEC_CLASS_FP_NOT_NORMAL (__VEC_CLASS_FP_NAN        | \
-                                   __VEC_CLASS_FP_SUBNORMAL  | \
-                                   __VEC_CLASS_FP_ZERO       | \
-                                   __VEC_CLASS_FP_INFINITY)
-
-#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
-
-#include <stddef.h>
-
-static __inline__ vector signed char __ATTRS_o_ai vec_perm(
-    vector signed char __a, vector signed char __b, vector unsigned char __c);
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_perm(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned char __c);
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c);
-
-static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a,
-                                                     vector signed short __b,
-                                                     vector unsigned char __c);
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_perm(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned char __c);
-
-static __inline__ vector bool short __ATTRS_o_ai vec_perm(
-    vector bool short __a, vector bool short __b, vector unsigned char __c);
-
-static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a,
-                                                     vector pixel __b,
-                                                     vector unsigned char __c);
-
-static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a,
-                                                   vector signed int __b,
-                                                   vector unsigned char __c);
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_perm(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned char __c);
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c);
-
-static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a,
-                                                     vector float __b,
-                                                     vector unsigned char __c);
-
-#ifdef __VSX__
-static __inline__ vector long long __ATTRS_o_ai
-vec_perm(vector signed long long __a, vector signed long long __b,
-         vector unsigned char __c);
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_perm(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned char __c);
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_perm(vector bool long long __a, vector bool long long __b,
-         vector unsigned char __c);
-
-static __inline__ vector double __ATTRS_o_ai vec_perm(vector double __a,
-                                                      vector double __b,
-                                                      vector unsigned char __c);
-#endif
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector unsigned char __a, vector unsigned char __b);
-
-/* vec_abs */
-
-#define __builtin_altivec_abs_v16qi vec_abs
-#define __builtin_altivec_abs_v8hi vec_abs
-#define __builtin_altivec_abs_v4si vec_abs
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_abs(vector signed char __a) {
-  return __builtin_altivec_vmaxsb(__a, -__a);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_abs(vector signed short __a) {
-  return __builtin_altivec_vmaxsh(__a, -__a);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_abs(vector signed int __a) {
-  return __builtin_altivec_vmaxsw(__a, -__a);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_abs(vector signed long long __a) {
-  return __builtin_altivec_vmaxsd(__a, -__a);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_abs(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvabssp(__a);
-#else
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)(0x7FFFFFFF);
-  return (vector float)__res;
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_abs(vector double __a) {
-  return __builtin_vsx_xvabsdp(__a);
-}
-#endif
-
-/* vec_abss */
-#define __builtin_altivec_abss_v16qi vec_abss
-#define __builtin_altivec_abss_v8hi vec_abss
-#define __builtin_altivec_abss_v4si vec_abss
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_abss(vector signed char __a) {
-  return __builtin_altivec_vmaxsb(
-      __a, __builtin_altivec_vsubsbs((vector signed char)(0), __a));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_abss(vector signed short __a) {
-  return __builtin_altivec_vmaxsh(
-      __a, __builtin_altivec_vsubshs((vector signed short)(0), __a));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_abss(vector signed int __a) {
-  return __builtin_altivec_vmaxsw(
-      __a, __builtin_altivec_vsubsws((vector signed int)(0), __a));
-}
-
-/* vec_absd */
-#if defined(__POWER9_VECTOR__)
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_absd(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vabsdub(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_absd(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vabsduh(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_absd(vector unsigned int __a,  vector unsigned int __b) {
-  return __builtin_altivec_vabsduw(__a, __b);
-}
-
-#endif /* End __POWER9_VECTOR__ */
-
-/* vec_add */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_add(vector signed char __a, vector signed char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_add(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_add(vector signed char __a, vector bool char __b) {
-  return __a + (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_add(vector unsigned char __a, vector unsigned char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_add(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_add(vector unsigned char __a, vector bool char __b) {
-  return __a + (vector unsigned char)__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a,
-                                                    vector short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_add(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a,
-                                                    vector bool short __b) {
-  return __a + (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_add(vector unsigned short __a, vector unsigned short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_add(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_add(vector unsigned short __a, vector bool short __b) {
-  return __a + (vector unsigned short)__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a,
-                                                  vector int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_add(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a,
-                                                  vector bool int __b) {
-  return __a + (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_add(vector unsigned int __a, vector unsigned int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_add(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_add(vector unsigned int __a, vector bool int __b) {
-  return __a + (vector unsigned int)__b;
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_add(vector signed long long __a, vector signed long long __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_add(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a + __b;
-}
-
-#ifdef __SIZEOF_INT128__
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_add(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_add(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a + __b;
-}
-#endif
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_add_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vadduqm(__a, __b);
-}
-#elif defined(__VSX__)
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_add(vector signed long long __a, vector signed long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  // Little endian systems on CPU's prior to Power8 don't really exist
-  // so scalarizing is fine.
-  return __a + __b;
-#else
-  vector unsigned int __res =
-      (vector unsigned int)__a + (vector unsigned int)__b;
-  vector unsigned int __carry = __builtin_altivec_vaddcuw(
-      (vector unsigned int)__a, (vector unsigned int)__b);
-  __carry = __builtin_shufflevector((vector unsigned char)__carry,
-                                    (vector unsigned char)__carry, 0, 0, 0, 7,
-                                    0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0);
-  return (vector signed long long)(__res + __carry);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_add(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)vec_add((vector signed long long)__a,
-                                            (vector signed long long)__b);
-}
-#endif // __POWER8_VECTOR__
-
-static __inline__ vector float __ATTRS_o_ai vec_add(vector float __a,
-                                                    vector float __b) {
-  return __a + __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_add(vector double __a,
-                                                     vector double __b) {
-  return __a + __b;
-}
-#endif // __VSX__
-
-/* vec_adde */
-
-#ifdef __POWER8_VECTOR__
-#ifdef __SIZEOF_INT128__
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_adde(vector signed __int128 __a, vector signed __int128 __b,
-         vector signed __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b,
-         vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-#endif
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_adde_u128(vector unsigned char __a, vector unsigned char __b,
-              vector unsigned char __c) {
-  return (vector unsigned char)__builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-#endif
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_adde(vector signed int __a, vector signed int __b,
-         vector signed int __c) {
-  vector signed int __mask = {1, 1, 1, 1};
-  vector signed int __carry = __c & __mask;
-  return vec_add(vec_add(__a, __b), __carry);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adde(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  vector unsigned int __mask = {1, 1, 1, 1};
-  vector unsigned int __carry = __c & __mask;
-  return vec_add(vec_add(__a, __b), __carry);
-}
-
-/* vec_addec */
-
-#ifdef __POWER8_VECTOR__
-#ifdef __SIZEOF_INT128__
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_addec(vector signed __int128 __a, vector signed __int128 __b,
-          vector signed __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b,
-          vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-#endif
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_addec_u128(vector unsigned char __a, vector unsigned char __b,
-               vector unsigned char __c) {
-  return (vector unsigned char)__builtin_altivec_vaddecuq(__a, __b, __c);
-}
-
-#ifdef __powerpc64__
-static __inline__ vector signed int __ATTRS_o_ai
-vec_addec(vector signed int __a, vector signed int __b,
-          vector signed int __c) {
-
-  signed int __result[4];
-  for (int i = 0; i < 4; i++) {
-    unsigned int __tempa = (unsigned int) __a[i];
-    unsigned int __tempb = (unsigned int) __b[i];
-    unsigned int __tempc = (unsigned int) __c[i];
-    __tempc = __tempc & 0x00000001;
-    unsigned long long __longa = (unsigned long long) __tempa;
-    unsigned long long __longb = (unsigned long long) __tempb;
-    unsigned long long __longc = (unsigned long long) __tempc;
-    unsigned long long __sum = __longa + __longb + __longc;
-    unsigned long long __res = (__sum >> 32) & 0x01;
-    unsigned long long __tempres = (unsigned int) __res;
-    __result[i] = (signed int) __tempres;
-  }
-
-  vector signed int ret = { __result[0], __result[1], __result[2], __result[3] };
-  return ret;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_addec(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned int __c) {
-
-  unsigned int __result[4];
-  for (int i = 0; i < 4; i++) {
-    unsigned int __tempc = __c[i] & 1;
-    unsigned long long __longa = (unsigned long long) __a[i];
-    unsigned long long __longb = (unsigned long long) __b[i];
-    unsigned long long __longc = (unsigned long long) __tempc;
-    unsigned long long __sum = __longa + __longb + __longc;
-    unsigned long long __res = (__sum >> 32) & 0x01;
-    unsigned long long __tempres = (unsigned int) __res;
-    __result[i] = (signed int) __tempres;
-  }
-
-  vector unsigned int ret = { __result[0], __result[1], __result[2], __result[3] };
-  return ret;
-}
-#endif // __powerpc64__
-#endif // __POWER8_VECTOR__
-
-/* vec_vaddubm */
-
-#define __builtin_altivec_vaddubm vec_vaddubm
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddubm(vector signed char __a, vector signed char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddubm(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddubm(vector signed char __a, vector bool char __b) {
-  return __a + (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubm(vector unsigned char __a, vector unsigned char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubm(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubm(vector unsigned char __a, vector bool char __b) {
-  return __a + (vector unsigned char)__b;
-}
-
-/* vec_vadduhm */
-
-#define __builtin_altivec_vadduhm vec_vadduhm
-
-static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
-                                                        vector short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector bool short __a,
-                                                        vector short __b) {
-  return (vector short)__a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
-                                                        vector bool short __b) {
-  return __a + (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhm(vector unsigned short __a, vector unsigned short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhm(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhm(vector unsigned short __a, vector bool short __b) {
-  return __a + (vector unsigned short)__b;
-}
-
-/* vec_vadduwm */
-
-#define __builtin_altivec_vadduwm vec_vadduwm
-
-static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a,
-                                                      vector int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector bool int __a,
-                                                      vector int __b) {
-  return (vector int)__a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a,
-                                                      vector bool int __b) {
-  return __a + (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduwm(vector unsigned int __a, vector unsigned int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduwm(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduwm(vector unsigned int __a, vector bool int __b) {
-  return __a + (vector unsigned int)__b;
-}
-
-/* vec_vaddfp */
-
-#define __builtin_altivec_vaddfp vec_vaddfp
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vaddfp(vector float __a, vector float __b) {
-  return __a + __b;
-}
-
-/* vec_addc */
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_addc(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_altivec_vaddcuw((vector unsigned int)__a,
-                                                      (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_addc(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vaddcuw(__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-#ifdef __SIZEOF_INT128__
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_addc(vector signed __int128 __a, vector signed __int128 __b) {
-  return (vector signed __int128)__builtin_altivec_vaddcuq(
-      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_addc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vaddcuq(__a, __b);
-}
-#endif
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_addc_u128(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vaddcuq(__a, __b);
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-/* vec_vaddcuw */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vaddcuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vaddcuw(__a, __b);
-}
-
-/* vec_adds */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_adds(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_adds(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_adds(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_adds(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_adds(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_adds(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vaddshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_adds(vector bool short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vaddshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a,
-                                                     vector bool short __b) {
-  return __builtin_altivec_vaddshs(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_adds(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_adds(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_adds(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vaddsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_adds(vector bool int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vaddsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a,
-                                                   vector bool int __b) {
-  return __builtin_altivec_vaddsws(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adds(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adds(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adds(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
-}
-
-/* vec_vaddsbs */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddsbs(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddsbs(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddsbs(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
-}
-
-/* vec_vaddubs */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubs(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubs(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubs(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
-}
-
-/* vec_vaddshs */
-
-static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vaddshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector bool short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vaddshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
-                                                        vector bool short __b) {
-  return __builtin_altivec_vaddshs(__a, (vector short)__b);
-}
-
-/* vec_vadduhs */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhs(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhs(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhs(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
-}
-
-/* vec_vaddsws */
-
-static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vaddsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector bool int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vaddsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a,
-                                                      vector bool int __b) {
-  return __builtin_altivec_vaddsws(__a, (vector int)__b);
-}
-
-/* vec_vadduws */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduws(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduws(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduws(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) &&                    \
-    defined(__SIZEOF_INT128__)
-/* vec_vadduqm */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vadduqm(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vadduqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a + __b;
-}
-
-/* vec_vaddeuqm */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vaddeuqm(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vaddeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-
-/* vec_vaddcuq */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vaddcuq(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vaddcuq(__a, __b);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vaddcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vaddcuq(__a, __b);
-}
-
-/* vec_vaddecuq */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vaddecuq(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vaddecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-/* vec_and */
-
-#define __builtin_altivec_vand vec_and
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_and(vector signed char __a, vector signed char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_and(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_and(vector signed char __a, vector bool char __b) {
-  return __a & (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_and(vector unsigned char __a, vector unsigned char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_and(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_and(vector unsigned char __a, vector bool char __b) {
-  return __a & (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_and(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a,
-                                                    vector short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_and(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a,
-                                                    vector bool short __b) {
-  return __a & (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_and(vector unsigned short __a, vector unsigned short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_and(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_and(vector unsigned short __a, vector bool short __b) {
-  return __a & (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_and(vector bool short __a, vector bool short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a,
-                                                  vector int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_and(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a,
-                                                  vector bool int __b) {
-  return __a & (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_and(vector unsigned int __a, vector unsigned int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_and(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_and(vector unsigned int __a, vector bool int __b) {
-  return __a & (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_and(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_and(vector bool int __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a,
-                                                    vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_and(vector bool long long __a,
-                                                     vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & (vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_and(vector double __a, vector bool long long __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & (vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_and(vector double __a,
-                                                     vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & (vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_and(vector signed long long __a, vector signed long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_and(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_and(vector signed long long __a, vector bool long long __b) {
-  return __a & (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_and(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_and(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_and(vector unsigned long long __a, vector bool long long __b) {
-  return __a & (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_and(vector bool long long __a, vector bool long long __b) {
-  return __a & __b;
-}
-#endif
-
-/* vec_vand */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vand(vector signed char __a, vector signed char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vand(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vand(vector signed char __a, vector bool char __b) {
-  return __a & (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vand(vector unsigned char __a, vector unsigned char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vand(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vand(vector unsigned char __a, vector bool char __b) {
-  return __a & (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vand(vector bool char __a,
-                                                         vector bool char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a,
-                                                     vector short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vand(vector bool short __a,
-                                                     vector short __b) {
-  return (vector short)__a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a,
-                                                     vector bool short __b) {
-  return __a & (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vand(vector unsigned short __a, vector unsigned short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vand(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vand(vector unsigned short __a, vector bool short __b) {
-  return __a & (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vand(vector bool short __a, vector bool short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a,
-                                                   vector int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vand(vector bool int __a,
-                                                   vector int __b) {
-  return (vector int)__a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a,
-                                                   vector bool int __b) {
-  return __a & (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vand(vector unsigned int __a, vector unsigned int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vand(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vand(vector unsigned int __a, vector bool int __b) {
-  return __a & (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vand(vector bool int __a,
-                                                        vector bool int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vand(vector bool int __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a,
-                                                     vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vand(vector signed long long __a, vector signed long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vand(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vand(vector signed long long __a, vector bool long long __b) {
-  return __a & (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vand(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vand(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vand(vector unsigned long long __a, vector bool long long __b) {
-  return __a & (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vand(vector bool long long __a, vector bool long long __b) {
-  return __a & __b;
-}
-#endif
-
-/* vec_andc */
-
-#define __builtin_altivec_vandc vec_andc
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_andc(vector signed char __a, vector signed char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_andc(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_andc(vector signed char __a, vector bool char __b) {
-  return __a & ~(vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_andc(vector unsigned char __a, vector unsigned char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_andc(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_andc(vector unsigned char __a, vector bool char __b) {
-  return __a & ~(vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_andc(vector bool char __a,
-                                                         vector bool char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a,
-                                                     vector short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_andc(vector bool short __a,
-                                                     vector short __b) {
-  return (vector short)__a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a,
-                                                     vector bool short __b) {
-  return __a & ~(vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_andc(vector unsigned short __a, vector unsigned short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_andc(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_andc(vector unsigned short __a, vector bool short __b) {
-  return __a & ~(vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_andc(vector bool short __a, vector bool short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a,
-                                                   vector int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_andc(vector bool int __a,
-                                                   vector int __b) {
-  return (vector int)__a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a,
-                                                   vector bool int __b) {
-  return __a & ~(vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_andc(vector unsigned int __a, vector unsigned int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_andc(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_andc(vector unsigned int __a, vector bool int __b) {
-  return __a & ~(vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_andc(vector bool int __a,
-                                                        vector bool int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_andc(vector bool int __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a,
-                                                     vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_andc(vector bool long long __a,
-                                                      vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & ~(vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_andc(vector double __a, vector bool long long __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & ~(vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_andc(vector double __a,
-                                                      vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & ~(vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_andc(vector signed long long __a, vector signed long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_andc(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_andc(vector signed long long __a, vector bool long long __b) {
-  return __a & ~(vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_andc(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_andc(vector unsigned long long __a, vector bool long long __b) {
-  return __a & ~(vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_andc(vector bool long long __a, vector bool long long __b) {
-  return __a & ~__b;
-}
-#endif
-
-/* vec_vandc */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vandc(vector signed char __a, vector signed char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vandc(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vandc(vector signed char __a, vector bool char __b) {
-  return __a & ~(vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vandc(vector unsigned char __a, vector unsigned char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vandc(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vandc(vector unsigned char __a, vector bool char __b) {
-  return __a & ~(vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vandc(vector bool char __a, vector bool char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a,
-                                                      vector short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vandc(vector bool short __a,
-                                                      vector short __b) {
-  return (vector short)__a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a,
-                                                      vector bool short __b) {
-  return __a & ~(vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vandc(vector unsigned short __a, vector unsigned short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vandc(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vandc(vector unsigned short __a, vector bool short __b) {
-  return __a & ~(vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vandc(vector bool short __a, vector bool short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a,
-                                                    vector int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vandc(vector bool int __a,
-                                                    vector int __b) {
-  return (vector int)__a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a,
-                                                    vector bool int __b) {
-  return __a & ~(vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vandc(vector unsigned int __a, vector unsigned int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vandc(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vandc(vector unsigned int __a, vector bool int __b) {
-  return __a & ~(vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vandc(vector bool int __a,
-                                                         vector bool int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a,
-                                                      vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vandc(vector bool int __a,
-                                                      vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a,
-                                                      vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vandc(vector signed long long __a, vector signed long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vandc(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vandc(vector signed long long __a, vector bool long long __b) {
-  return __a & ~(vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vandc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vandc(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vandc(vector unsigned long long __a, vector bool long long __b) {
-  return __a & ~(vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vandc(vector bool long long __a, vector bool long long __b) {
-  return __a & ~__b;
-}
-#endif
-
-/* vec_avg */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_avg(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vavgsb(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_avg(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vavgub(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_avg(vector short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vavgsh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_avg(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vavguh(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_avg(vector int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vavgsw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_avg(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vavguw(__a, __b);
-}
-
-/* vec_vavgsb */
-
-static __inline__ vector signed char __attribute__((__always_inline__))
-vec_vavgsb(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vavgsb(__a, __b);
-}
-
-/* vec_vavgub */
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_vavgub(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vavgub(__a, __b);
-}
-
-/* vec_vavgsh */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vavgsh(vector short __a, vector short __b) {
-  return __builtin_altivec_vavgsh(__a, __b);
-}
-
-/* vec_vavguh */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vavguh(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vavguh(__a, __b);
-}
-
-/* vec_vavgsw */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vavgsw(vector int __a, vector int __b) {
-  return __builtin_altivec_vavgsw(__a, __b);
-}
-
-/* vec_vavguw */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vavguw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vavguw(__a, __b);
-}
-
-/* vec_ceil */
-
-static __inline__ vector float __ATTRS_o_ai vec_ceil(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrspip(__a);
-#else
-  return __builtin_altivec_vrfip(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_ceil(vector double __a) {
-  return __builtin_vsx_xvrdpip(__a);
-}
-#endif
-
-/* vec_roundp */
-static __inline__ vector float __ATTRS_o_ai vec_roundp(vector float __a) {
-  return vec_ceil(__a);
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_roundp(vector double __a) {
-  return vec_ceil(__a);
-}
-#endif
-
-/* vec_vrfip */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfip(vector float __a) {
-  return __builtin_altivec_vrfip(__a);
-}
-
-/* vec_cmpb */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_cmpb(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp(__a, __b);
-}
-
-/* vec_vcmpbfp */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vcmpbfp(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp(__a, __b);
-}
-
-/* vec_cmpeq */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpeq(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpeq(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpeq(vector bool char __a, vector bool char __b) {
-  return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_cmpeq(vector short __a,
-                                                           vector short __b) {
-  return (vector bool short)__builtin_altivec_vcmpequh(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpeq(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpeq(vector bool short __a, vector bool short __b) {
-  return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector int __a,
-                                                         vector int __b) {
-  return (vector bool int)__builtin_altivec_vcmpequw(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpeq(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector bool int __a,
-                                                         vector bool int __b) {
-  return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpequd(__a, __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpequd(
-      (vector long long)__a, (vector long long)__b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpequd(
-      (vector long long)__a, (vector long long)__b);
-}
-#elif defined(__VSX__)
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector signed long long __a, vector signed long long __b) {
-  vector bool int __wordcmp =
-      vec_cmpeq((vector signed int)__a, (vector signed int)__b);
-#ifdef __LITTLE_ENDIAN__
-  __wordcmp &= __builtin_shufflevector(__wordcmp, __wordcmp, 3, 0, 1, 2);
-  return (vector bool long long)__builtin_shufflevector(__wordcmp, __wordcmp, 1,
-                                                        1, 3, 3);
-#else
-  __wordcmp &= __builtin_shufflevector(__wordcmp, __wordcmp, 1, 2, 3, 0);
-  return (vector bool long long)__builtin_shufflevector(__wordcmp, __wordcmp, 0,
-                                                        0, 2, 2);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_cmpeq((vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector bool long long __a, vector bool long long __b) {
-  return vec_cmpeq((vector signed long long)__a, (vector signed long long)__b);
-}
-#endif
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a,
-                                                         vector float __b) {
-#ifdef __VSX__
-  return (vector bool int)__builtin_vsx_xvcmpeqsp(__a, __b);
-#else
-  return (vector bool int)__builtin_altivec_vcmpeqfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector double __a, vector double __b) {
-  return (vector bool long long)__builtin_vsx_xvcmpeqdp(__a, __b);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ vector bool __int128 __ATTRS_o_ai
-vec_cmpeq(vector signed __int128 __a, vector signed __int128 __b) {
-  return (vector bool __int128)__builtin_altivec_vcmpequq(
-      (vector bool __int128)__a, (vector bool __int128)__b);
-}
-
-static __inline__ vector bool __int128 __ATTRS_o_ai
-vec_cmpeq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return (vector bool __int128)__builtin_altivec_vcmpequq(
-      (vector bool __int128)__a, (vector bool __int128)__b);
-}
-
-static __inline__ vector bool __int128 __ATTRS_o_ai
-vec_cmpeq(vector bool __int128 __a, vector bool  __int128 __b) {
-  return (vector bool __int128)__builtin_altivec_vcmpequq(__a, __b);
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-/* vec_cmpne */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpne(vector bool char __a, vector bool char __b) {
-  return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a,
-                                                     (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpne(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a,
-                                                     (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpne(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a,
-                                                     (vector char)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpne(vector bool short __a, vector bool short __b) {
-  return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a,
-                                                      (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpne(vector signed short __a, vector signed short __b) {
-  return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a,
-                                                      (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpne(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a,
-                                                      (vector short)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector bool int __a, vector bool int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector signed int __a, vector signed int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector float __a, vector float __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ vector bool __int128 __ATTRS_o_ai
-vec_cmpne(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
-      (vector bool __int128)__a, (vector bool __int128)__b));
-}
-
-static __inline__ vector bool __int128 __ATTRS_o_ai
-vec_cmpne(vector signed __int128 __a, vector signed __int128 __b) {
-  return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
-      (vector bool __int128)__a, (vector bool __int128)__b));
-}
-
-static __inline__ vector bool __int128 __ATTRS_o_ai
-vec_cmpne(vector bool __int128 __a, vector bool __int128 __b) {
-  return (vector bool __int128) ~(__builtin_altivec_vcmpequq(__a, __b));
-}
-#endif
-
-/* vec_cmpnez */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpnez(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpnezb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpnez(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpnezb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpnez(vector signed short __a, vector signed short __b) {
-  return (vector bool short)__builtin_altivec_vcmpnezh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpnez(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpnezh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpnez(vector signed int __a, vector signed int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnezw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpnez(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnezw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cntlz_lsbb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vctzlsbb(__a);
-#else
-  return __builtin_altivec_vclzlsbb(__a);
-#endif
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cntlz_lsbb(vector unsigned char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vctzlsbb(__a);
-#else
-  return __builtin_altivec_vclzlsbb(__a);
-#endif
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cnttz_lsbb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclzlsbb(__a);
-#else
-  return __builtin_altivec_vctzlsbb(__a);
-#endif
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cnttz_lsbb(vector unsigned char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclzlsbb(__a);
-#else
-  return __builtin_altivec_vctzlsbb(__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_parity_lsbb(vector unsigned int __a) {
-  return __builtin_altivec_vprtybw(__a);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_parity_lsbb(vector signed int __a) {
-  return __builtin_altivec_vprtybw(__a);
-}
-
-#ifdef __SIZEOF_INT128__
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_parity_lsbb(vector unsigned __int128 __a) {
-  return __builtin_altivec_vprtybq(__a);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_parity_lsbb(vector signed __int128 __a) {
-  return __builtin_altivec_vprtybq(__a);
-}
-#endif
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_parity_lsbb(vector unsigned long long __a) {
-  return __builtin_altivec_vprtybd(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_parity_lsbb(vector signed long long __a) {
-  return __builtin_altivec_vprtybd(__a);
-}
-
-#else
-/* vec_cmpne */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpne(vector bool char __a, vector bool char __b) {
-  return ~(vec_cmpeq(__a, __b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpne(vector signed char __a, vector signed char __b) {
-  return ~(vec_cmpeq(__a, __b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpne(vector unsigned char __a, vector unsigned char __b) {
-  return ~(vec_cmpeq(__a, __b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpne(vector bool short __a, vector bool short __b) {
-  return ~(vec_cmpeq(__a, __b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpne(vector signed short __a, vector signed short __b) {
-  return ~(vec_cmpeq(__a, __b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpne(vector unsigned short __a, vector unsigned short __b) {
-  return ~(vec_cmpeq(__a, __b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector bool int __a, vector bool int __b) {
-  return ~(vec_cmpeq(__a, __b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector signed int __a, vector signed int __b) {
-  return ~(vec_cmpeq(__a, __b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector unsigned int __a, vector unsigned int __b) {
-  return ~(vec_cmpeq(__a, __b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector float __a, vector float __b) {
-  return ~(vec_cmpeq(__a, __b));
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-#elif defined(__VSX__)
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)~(
-      vec_cmpeq((vector signed long long)__a, (vector signed long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)~(
-      vec_cmpeq((vector signed long long)__a, (vector signed long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)~(
-      vec_cmpeq((vector signed long long)__a, (vector signed long long)__b));
-}
-#endif
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector double __a, vector double __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-#endif
-
-/* vec_cmpgt */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpgt(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpgt(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_cmpgt(vector short __a,
-                                                           vector short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpgt(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector int __a,
-                                                         vector int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpgt(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpgtsd(__a, __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpgtud(__a, __b);
-}
-#elif defined(__VSX__)
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector signed long long __a, vector signed long long __b) {
-  vector signed int __sgtw = (vector signed int)vec_cmpgt(
-      (vector signed int)__a, (vector signed int)__b);
-  vector unsigned int __ugtw = (vector unsigned int)vec_cmpgt(
-      (vector unsigned int)__a, (vector unsigned int)__b);
-  vector unsigned int __eqw = (vector unsigned int)vec_cmpeq(
-      (vector signed int)__a, (vector signed int)__b);
-#ifdef __LITTLE_ENDIAN__
-  __ugtw = __builtin_shufflevector(__ugtw, __ugtw, 3, 0, 1, 2) & __eqw;
-  __sgtw |= (vector signed int)__ugtw;
-  return (vector bool long long)__builtin_shufflevector(__sgtw, __sgtw, 1, 1, 3,
-                                                        3);
-#else
-  __ugtw = __builtin_shufflevector(__ugtw, __ugtw, 1, 2, 3, 0) & __eqw;
-  __sgtw |= (vector signed int)__ugtw;
-  return (vector bool long long)__builtin_shufflevector(__sgtw, __sgtw, 0, 0, 2,
-                                                        2);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
-  vector unsigned int __ugtw = (vector unsigned int)vec_cmpgt(
-      (vector unsigned int)__a, (vector unsigned int)__b);
-  vector unsigned int __eqw = (vector unsigned int)vec_cmpeq(
-      (vector signed int)__a, (vector signed int)__b);
-#ifdef __LITTLE_ENDIAN__
-  __eqw = __builtin_shufflevector(__ugtw, __ugtw, 3, 0, 1, 2) & __eqw;
-  __ugtw |= __eqw;
-  return (vector bool long long)__builtin_shufflevector(__ugtw, __ugtw, 1, 1, 3,
-                                                        3);
-#else
-  __eqw = __builtin_shufflevector(__ugtw, __ugtw, 1, 2, 3, 0) & __eqw;
-  __ugtw |= __eqw;
-  return (vector bool long long)__builtin_shufflevector(__ugtw, __ugtw, 0, 0, 2,
-                                                        2);
-#endif
-}
-#endif
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector float __a,
-                                                         vector float __b) {
-#ifdef __VSX__
-  return (vector bool int)__builtin_vsx_xvcmpgtsp(__a, __b);
-#else
-  return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector double __a, vector double __b) {
-  return (vector bool long long)__builtin_vsx_xvcmpgtdp(__a, __b);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ vector bool __int128 __ATTRS_o_ai
-vec_cmpgt(vector signed __int128 __a, vector signed __int128 __b) {
-  return (vector bool __int128)__builtin_altivec_vcmpgtsq(
-      (vector bool __int128)__a, (vector bool __int128)__b);
-}
-
-static __inline__ vector bool __int128 __ATTRS_o_ai
-vec_cmpgt(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return (vector bool __int128)__builtin_altivec_vcmpgtuq(
-      (vector bool __int128)__a, (vector bool __int128)__b);
-}
-#endif
-
-/* vec_cmpge */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpge(vector signed char __a, vector signed char __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpge(vector unsigned char __a, vector unsigned char __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpge(vector signed short __a, vector signed short __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpge(vector unsigned short __a, vector unsigned short __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpge(vector signed int __a, vector signed int __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpge(vector unsigned int __a, vector unsigned int __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpge(vector float __a,
-                                                         vector float __b) {
-#ifdef __VSX__
-  return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b);
-#else
-  return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpge(vector double __a, vector double __b) {
-  return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpge(vector signed long long __a, vector signed long long __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ vector bool __int128 __ATTRS_o_ai
-vec_cmpge(vector signed __int128 __a, vector signed __int128 __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool __int128 __ATTRS_o_ai
-vec_cmpge(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-#endif
-
-/* vec_vcmpgefp */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgefp(vector float __a, vector float __b) {
-  return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
-}
-
-/* vec_vcmpgtsb */
-
-static __inline__ vector bool char __attribute__((__always_inline__))
-vec_vcmpgtsb(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
-}
-
-/* vec_vcmpgtub */
-
-static __inline__ vector bool char __attribute__((__always_inline__))
-vec_vcmpgtub(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
-}
-
-/* vec_vcmpgtsh */
-
-static __inline__ vector bool short __attribute__((__always_inline__))
-vec_vcmpgtsh(vector short __a, vector short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
-}
-
-/* vec_vcmpgtuh */
-
-static __inline__ vector bool short __attribute__((__always_inline__))
-vec_vcmpgtuh(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
-}
-
-/* vec_vcmpgtsw */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgtsw(vector int __a, vector int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
-}
-
-/* vec_vcmpgtuw */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgtuw(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
-}
-
-/* vec_vcmpgtfp */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgtfp(vector float __a, vector float __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
-}
-
-/* vec_cmple */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmple(vector signed char __a, vector signed char __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmple(vector unsigned char __a, vector unsigned char __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmple(vector signed short __a, vector signed short __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmple(vector unsigned short __a, vector unsigned short __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmple(vector signed int __a, vector signed int __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmple(vector unsigned int __a, vector unsigned int __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmple(vector float __a,
-                                                         vector float __b) {
-  return vec_cmpge(__b, __a);
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmple(vector double __a, vector double __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmple(vector signed long long __a, vector signed long long __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_cmpge(__b, __a);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ vector bool __int128 __ATTRS_o_ai
-vec_cmple(vector signed __int128 __a, vector signed __int128 __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool __int128 __ATTRS_o_ai
-vec_cmple(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return vec_cmpge(__b, __a);
-}
-#endif
-
-/* vec_cmplt */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmplt(vector signed char __a, vector signed char __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmplt(vector unsigned char __a, vector unsigned char __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_cmplt(vector short __a,
-                                                           vector short __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmplt(vector unsigned short __a, vector unsigned short __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector int __a,
-                                                         vector int __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmplt(vector unsigned int __a, vector unsigned int __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector float __a,
-                                                         vector float __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmplt(vector double __a, vector double __b) {
-  return vec_cmpgt(__b, __a);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ vector bool __int128 __ATTRS_o_ai
-vec_cmplt(vector signed __int128 __a, vector signed __int128 __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool __int128 __ATTRS_o_ai
-vec_cmplt(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return vec_cmpgt(__b, __a);
-}
-#endif
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmplt(vector signed long long __a, vector signed long long __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_cmpgt(__b, __a);
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-/* vec_popcnt */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_popcnt(vector signed char __a) {
-  return __builtin_altivec_vpopcntb(__a);
-}
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_popcnt(vector unsigned char __a) {
-  return __builtin_altivec_vpopcntb(__a);
-}
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_popcnt(vector signed short __a) {
-  return __builtin_altivec_vpopcnth(__a);
-}
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_popcnt(vector unsigned short __a) {
-  return __builtin_altivec_vpopcnth(__a);
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_popcnt(vector signed int __a) {
-  return __builtin_altivec_vpopcntw(__a);
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_popcnt(vector unsigned int __a) {
-  return __builtin_altivec_vpopcntw(__a);
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_popcnt(vector signed long long __a) {
-  return __builtin_altivec_vpopcntd(__a);
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_popcnt(vector unsigned long long __a) {
-  return __builtin_altivec_vpopcntd(__a);
-}
-
-#define vec_vclz vec_cntlz
-/* vec_cntlz */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_cntlz(vector signed char __a) {
-  return __builtin_altivec_vclzb(__a);
-}
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_cntlz(vector unsigned char __a) {
-  return __builtin_altivec_vclzb(__a);
-}
-static __inline__ vector signed short __ATTRS_o_ai
-vec_cntlz(vector signed short __a) {
-  return __builtin_altivec_vclzh(__a);
-}
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_cntlz(vector unsigned short __a) {
-  return __builtin_altivec_vclzh(__a);
-}
-static __inline__ vector signed int __ATTRS_o_ai
-vec_cntlz(vector signed int __a) {
-  return __builtin_altivec_vclzw(__a);
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_cntlz(vector unsigned int __a) {
-  return __builtin_altivec_vclzw(__a);
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_cntlz(vector signed long long __a) {
-  return __builtin_altivec_vclzd(__a);
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_cntlz(vector unsigned long long __a) {
-  return __builtin_altivec_vclzd(__a);
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-
-/* vec_cnttz */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_cnttz(vector signed char __a) {
-  return __builtin_altivec_vctzb(__a);
-}
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_cnttz(vector unsigned char __a) {
-  return __builtin_altivec_vctzb(__a);
-}
-static __inline__ vector signed short __ATTRS_o_ai
-vec_cnttz(vector signed short __a) {
-  return __builtin_altivec_vctzh(__a);
-}
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_cnttz(vector unsigned short __a) {
-  return __builtin_altivec_vctzh(__a);
-}
-static __inline__ vector signed int __ATTRS_o_ai
-vec_cnttz(vector signed int __a) {
-  return __builtin_altivec_vctzw(__a);
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_cnttz(vector unsigned int __a) {
-  return __builtin_altivec_vctzw(__a);
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_cnttz(vector signed long long __a) {
-  return __builtin_altivec_vctzd(__a);
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_cnttz(vector unsigned long long __a) {
-  return __builtin_altivec_vctzd(__a);
-}
-
-/* vec_first_match_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector signed char __a, vector signed char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector unsigned char __a, vector unsigned char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector signed short __a, vector signed short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector unsigned short __a, vector unsigned short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector signed int __a, vector signed int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector unsigned int __a, vector unsigned int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-/* vec_first_match_or_eos_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector signed char __a, vector signed char __b) {
-  /* Compare the result of the comparison of two vectors with either and OR the
-     result. Either the elements are equal or one will equal the comparison
-     result if either is zero.
-  */
-  vector bool char __tmp1 = vec_cmpeq(__a, __b);
-  vector bool char __tmp2 = __tmp1 |
-                            vec_cmpeq((vector signed char)__tmp1, __a) |
-                            vec_cmpeq((vector signed char)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector unsigned char __a,
-                             vector unsigned char __b) {
-  vector bool char __tmp1 = vec_cmpeq(__a, __b);
-  vector bool char __tmp2 = __tmp1 |
-                            vec_cmpeq((vector unsigned char)__tmp1, __a) |
-                            vec_cmpeq((vector unsigned char)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector signed short __a, vector signed short __b) {
-  vector bool short __tmp1 = vec_cmpeq(__a, __b);
-  vector bool short __tmp2 = __tmp1 |
-                             vec_cmpeq((vector signed short)__tmp1, __a) |
-                             vec_cmpeq((vector signed short)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector unsigned short __a,
-                             vector unsigned short __b) {
-  vector bool short __tmp1 = vec_cmpeq(__a, __b);
-  vector bool short __tmp2 = __tmp1 |
-                             vec_cmpeq((vector unsigned short)__tmp1, __a) |
-                             vec_cmpeq((vector unsigned short)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector signed int __a, vector signed int __b) {
-  vector bool int __tmp1 = vec_cmpeq(__a, __b);
-  vector bool int __tmp2 = __tmp1 | vec_cmpeq((vector signed int)__tmp1, __a) |
-                           vec_cmpeq((vector signed int)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector unsigned int __a, vector unsigned int __b) {
-  vector bool int __tmp1 = vec_cmpeq(__a, __b);
-  vector bool int __tmp2 = __tmp1 |
-                           vec_cmpeq((vector unsigned int)__tmp1, __a) |
-                           vec_cmpeq((vector unsigned int)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)__tmp2);
-#else
-    vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-/* vec_first_mismatch_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector signed char __a, vector signed char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector unsigned char __a, vector unsigned char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector signed short __a, vector signed short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector unsigned short __a, vector unsigned short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector signed int __a, vector signed int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector unsigned int __a, vector unsigned int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-/* vec_first_mismatch_or_eos_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector signed char __a,
-                                vector signed char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector unsigned char __a,
-                                vector unsigned char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector signed short __a,
-                                vector signed short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector unsigned short __a,
-                                vector unsigned short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector signed int __a, vector signed int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector unsigned int __a,
-                                vector unsigned int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ vector double  __ATTRS_o_ai
-vec_insert_exp(vector double __a, vector unsigned long long __b) {
-  return __builtin_vsx_xviexpdp((vector unsigned long long)__a,__b);
-}
-
-static __inline__ vector double  __ATTRS_o_ai
-vec_insert_exp(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_vsx_xviexpdp(__a,__b);
-}
-
-static __inline__ vector float  __ATTRS_o_ai
-vec_insert_exp(vector float __a, vector unsigned int __b) {
-  return __builtin_vsx_xviexpsp((vector unsigned int)__a,__b);
-}
-
-static __inline__ vector float  __ATTRS_o_ai
-vec_insert_exp(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_vsx_xviexpsp(__a,__b);
-}
-
-#if defined(__powerpc64__)
-static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(const signed char *__a,
-                                                             size_t __b) {
-  return (vector signed char)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_len(const unsigned char *__a, size_t __b) {
-  return (vector unsigned char)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(const signed short *__a,
-                                                              size_t __b) {
-  return (vector signed short)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xl_len(const unsigned short *__a, size_t __b) {
-  return (vector unsigned short)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(const signed int *__a,
-                                                            size_t __b) {
-  return (vector signed int)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(const unsigned int *__a,
-                                                              size_t __b) {
-  return (vector unsigned int)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xl_len(const float *__a, size_t __b) {
-  return (vector float)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-#ifdef __SIZEOF_INT128__
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_xl_len(const signed __int128 *__a, size_t __b) {
-  return (vector signed __int128)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_len(const unsigned __int128 *__a, size_t __b) {
-  return (vector unsigned __int128)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-#endif
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xl_len(const signed long long *__a, size_t __b) {
-  return (vector signed long long)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xl_len(const unsigned long long *__a, size_t __b) {
-  return (vector unsigned long long)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_xl_len(const double *__a,
-                                                        size_t __b) {
-  return (vector double)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_len_r(const unsigned char *__a, size_t __b) {
-  vector unsigned char __res =
-      (vector unsigned char)__builtin_vsx_lxvll(__a, (__b << 56));
-  vector unsigned char __mask =
-      (vector unsigned char)__builtin_altivec_lvsr(16 - __b, (int *)NULL);
-  return (vector unsigned char)__builtin_altivec_vperm_4si(
-      (vector int)__res, (vector int)__res, __mask);
-}
-
-// vec_xst_len
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned char __a,
-                                                unsigned char *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed char __a,
-                                                signed char *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed short __a,
-                                                signed short *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned short __a,
-                                                unsigned short *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed int __a,
-                                                signed int *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned int __a,
-                                                unsigned int *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector float __a, float *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-#ifdef __SIZEOF_INT128__
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed __int128 __a,
-                                                signed __int128 *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned __int128 __a,
-                                                unsigned __int128 *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-#endif
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed long long __a,
-                                                signed long long *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned long long __a,
-                                                unsigned long long *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector double __a, double *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len_r(vector unsigned char __a,
-                                                  unsigned char *__b,
-                                                  size_t __c) {
-  vector unsigned char __mask =
-      (vector unsigned char)__builtin_altivec_lvsl(16 - __c, (int *)NULL);
-  vector unsigned char __res =
-      __builtin_altivec_vperm_4si((vector int)__a, (vector int)__a, __mask);
-  return __builtin_vsx_stxvll((vector int)__res, __b, (__c << 56));
-}
-#endif
-#endif
-
-#if defined(__POWER9_VECTOR__) && defined(__powerpc64__)
-#define __vec_ldrmb(PTR, CNT) vec_xl_len_r((const unsigned char *)(PTR), (CNT))
-#define __vec_strmb(PTR, CNT, VAL)                                             \
-  vec_xst_len_r((VAL), (unsigned char *)(PTR), (CNT))
-#else
-#define __vec_ldrmb __builtin_vsx_ldrmb
-#define __vec_strmb __builtin_vsx_strmb
-#endif
-
-/* vec_cpsgn */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_cpsgn(vector float __a,
-                                                      vector float __b) {
-  return __builtin_vsx_xvcpsgnsp(__b, __a);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
-                                                       vector double __b) {
-  return __builtin_vsx_xvcpsgndp(__b, __a);
-}
-#endif
-
-/* vec_ctf */
-
-#ifdef __VSX__
-// There are some functions that have different signatures with the XL compiler
-// from those in Clang/GCC and documented in the PVIPR. This macro ensures that
-// the XL-compatible signatures are used for those functions.
-#ifdef __XL_COMPAT_ALTIVEC__
-#define vec_ctf(__a, __b)                                                      \
-  _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),  \
-             vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
-                                                   (__b)),                     \
-             vector unsigned long long                                         \
-           : (__builtin_vsx_xvcvuxdsp((vector unsigned long long)(__a)) *      \
-              (vector float)(vector unsigned)((0x7f - (__b)) << 23)),          \
-             vector signed long long                                           \
-           : (__builtin_vsx_xvcvsxdsp((vector signed long long)(__a)) *        \
-              (vector float)(vector unsigned)((0x7f - (__b)) << 23)))
-#else // __XL_COMPAT_ALTIVEC__
-#define vec_ctf(__a, __b)                                                      \
-  _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),  \
-             vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
-                                                   (__b)),                     \
-             vector unsigned long long                                         \
-           : (__builtin_convertvector((vector unsigned long long)(__a),        \
-                                      vector double) *                         \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)),              \
-             vector signed long long                                           \
-           : (__builtin_convertvector((vector signed long long)(__a),          \
-                                      vector double) *                         \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)))
-#endif // __XL_COMPAT_ALTIVEC__
-#else
-#define vec_ctf(__a, __b)                                                      \
-  _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),  \
-             vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
-                                                   (__b)))
-#endif
-
-/* vec_ctd */
-#ifdef __VSX__
-#define vec_ctd(__a, __b)                                                      \
-  _Generic((__a), vector signed int                                            \
-           : (vec_doublee((vector signed int)(__a)) *                          \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)),              \
-             vector unsigned int                                               \
-           : (vec_doublee((vector unsigned int)(__a)) *                        \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)),              \
-             vector unsigned long long                                         \
-           : (__builtin_convertvector((vector unsigned long long)(__a),        \
-                                      vector double) *                         \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)),              \
-             vector signed long long                                           \
-           : (__builtin_convertvector((vector signed long long)(__a),          \
-                                      vector double) *                         \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)))
-#endif // __VSX__
-
-/* vec_vcfsx */
-
-#define vec_vcfux __builtin_altivec_vcfux
-/* vec_vcfux */
-
-#define vec_vcfsx(__a, __b) __builtin_altivec_vcfsx((vector int)(__a), (__b))
-
-/* vec_cts */
-
-#ifdef __VSX__
-#ifdef __XL_COMPAT_ALTIVEC__
-#define vec_cts(__a, __b)                                                      \
-  _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctsxs((vector float)(__a), (__b)),             \
-             vector double                                                     \
-           : __extension__({                                                   \
-             vector double __ret =                                             \
-                 (vector double)(__a) *                                        \
-                 (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
-                                                            << 52);            \
-             __builtin_vsx_xvcvdpsxws(__ret);                                  \
-           }))
-#else // __XL_COMPAT_ALTIVEC__
-#define vec_cts(__a, __b)                                                      \
-  _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctsxs((vector float)(__a), (__b)),             \
-             vector double                                                     \
-           : __extension__({                                                   \
-             vector double __ret =                                             \
-                 (vector double)(__a) *                                        \
-                 (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
-                                                            << 52);            \
-             __builtin_convertvector(__ret, vector signed long long);          \
-           }))
-#endif // __XL_COMPAT_ALTIVEC__
-#else
-#define vec_cts __builtin_altivec_vctsxs
-#endif
-
-/* vec_vctsxs */
-
-#define vec_vctsxs __builtin_altivec_vctsxs
-
-/* vec_ctu */
-
-#ifdef __VSX__
-#ifdef __XL_COMPAT_ALTIVEC__
-#define vec_ctu(__a, __b)                                                      \
-  _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctuxs((vector float)(__a), (__b)),             \
-             vector double                                                     \
-           : __extension__({                                                   \
-             vector double __ret =                                             \
-                 (vector double)(__a) *                                        \
-                 (vector double)(vector unsigned long long)((0x3ffULL + __b)   \
-                                                            << 52);            \
-             __builtin_vsx_xvcvdpuxws(__ret);                                  \
-           }))
-#else // __XL_COMPAT_ALTIVEC__
-#define vec_ctu(__a, __b)                                                      \
-  _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctuxs((vector float)(__a), (__b)),             \
-             vector double                                                     \
-           : __extension__({                                                   \
-             vector double __ret =                                             \
-                 (vector double)(__a) *                                        \
-                 (vector double)(vector unsigned long long)((0x3ffULL + __b)   \
-                                                            << 52);            \
-             __builtin_convertvector(__ret, vector unsigned long long);        \
-           }))
-#endif // __XL_COMPAT_ALTIVEC__
-#else
-#define vec_ctu __builtin_altivec_vctuxs
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-/* vec_ctsl */
-
-#ifdef __VSX__
-#define vec_ctsl(__a, __b)                                                     \
-  _Generic((__a), vector float                                                 \
-           : __extension__({                                                   \
-               vector float __ret =                                            \
-                   (vector float)(__a) *                                       \
-                   (vector float)(vector unsigned)((0x7f + (__b)) << 23);      \
-               __builtin_vsx_xvcvspsxds(                                       \
-                   __builtin_vsx_xxsldwi(__ret, __ret, 1));                    \
-             }),                                                               \
-             vector double                                                     \
-           : __extension__({                                                   \
-             vector double __ret =                                             \
-                 (vector double)(__a) *                                        \
-                 (vector double)(vector unsigned long long)((0x3ffULL + __b)   \
-                                                            << 52);            \
-             __builtin_convertvector(__ret, vector signed long long);          \
-           }))
-
-/* vec_ctul */
-
-#define vec_ctul(__a, __b)                                                     \
-  _Generic((__a), vector float                                                 \
-           : __extension__({                                                   \
-               vector float __ret =                                            \
-                   (vector float)(__a) *                                       \
-                   (vector float)(vector unsigned)((0x7f + (__b)) << 23);      \
-               __builtin_vsx_xvcvspuxds(                                       \
-                   __builtin_vsx_xxsldwi(__ret, __ret, 1));                    \
-             }),                                                               \
-             vector double                                                     \
-           : __extension__({                                                   \
-             vector double __ret =                                             \
-                 (vector double)(__a) *                                        \
-                 (vector double)(vector unsigned long long)((0x3ffULL + __b)   \
-                                                            << 52);            \
-             __builtin_convertvector(__ret, vector unsigned long long);        \
-           }))
-#endif
-#else // __LITTLE_ENDIAN__
-/* vec_ctsl */
-
-#ifdef __VSX__
-#define vec_ctsl(__a, __b)                                                     \
-  _Generic((__a), vector float                                                 \
-           : __extension__({                                                   \
-               vector float __ret =                                            \
-                   (vector float)(__a) *                                       \
-                   (vector float)(vector unsigned)((0x7f + (__b)) << 23);      \
-               __builtin_vsx_xvcvspsxds(__ret);                                \
-             }),                                                               \
-             vector double                                                     \
-           : __extension__({                                                   \
-             vector double __ret =                                             \
-                 (vector double)(__a) *                                        \
-                 (vector double)(vector unsigned long long)((0x3ffULL + __b)   \
-                                                            << 52);            \
-             __builtin_convertvector(__ret, vector signed long long);          \
-           }))
-
-/* vec_ctul */
-
-#define vec_ctul(__a, __b)                                                     \
-  _Generic((__a), vector float                                                 \
-           : __extension__({                                                   \
-               vector float __ret =                                            \
-                   (vector float)(__a) *                                       \
-                   (vector float)(vector unsigned)((0x7f + (__b)) << 23);      \
-               __builtin_vsx_xvcvspuxds(__ret);                                \
-             }),                                                               \
-             vector double                                                     \
-           : __extension__({                                                   \
-             vector double __ret =                                             \
-                 (vector double)(__a) *                                        \
-                 (vector double)(vector unsigned long long)((0x3ffULL + __b)   \
-                                                            << 52);            \
-             __builtin_convertvector(__ret, vector unsigned long long);        \
-           }))
-#endif
-#endif // __LITTLE_ENDIAN__
-
-/* vec_vctuxs */
-
-#define vec_vctuxs __builtin_altivec_vctuxs
-
-/* vec_signext */
-
-#ifdef __POWER9_VECTOR__
-static __inline__ vector signed int __ATTRS_o_ai
-vec_signexti(vector signed char __a) {
-  return __builtin_altivec_vextsb2w(__a);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_signexti(vector signed short __a) {
-  return __builtin_altivec_vextsh2w(__a);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_signextll(vector signed char __a) {
-  return __builtin_altivec_vextsb2d(__a);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_signextll(vector signed short __a) {
-  return __builtin_altivec_vextsh2d(__a);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_signextll(vector signed int __a) {
-  return __builtin_altivec_vextsw2d(__a);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_signextq(vector signed long long __a) {
-  return __builtin_altivec_vextsd2q(__a);
-}
-#endif
-
-/* vec_signed */
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sld(vector signed int, vector signed int, unsigned const int __c);
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_signed(vector float __a) {
-  return __builtin_convertvector(__a, vector signed int);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_signed(vector double __a) {
-  return __builtin_convertvector(__a, vector signed long long);
-}
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_signed2(vector double __a, vector double __b) {
-  return (vector signed int) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_signede(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvdpsxws(__a);
-#endif
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_signedo(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvdpsxws(__a);
-#else
-  vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-#endif
-
-/* vec_unsigned */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sld(vector unsigned int, vector unsigned int, unsigned const int __c);
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unsigned(vector float __a) {
-  return __builtin_convertvector(__a, vector unsigned int);
-}
-
-#ifdef __VSX__
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_unsigned(vector double __a) {
-  return __builtin_convertvector(__a, vector unsigned long long);
-}
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_unsigned2(vector double __a, vector double __b) {
-  return (vector unsigned int) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unsignede(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvdpuxws(__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unsignedo(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvdpuxws(__a);
-#else
-  vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-#endif
-
-/* vec_float */
-
-static __inline__ vector float __ATTRS_o_ai
-vec_sld(vector float, vector float, unsigned const int __c);
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float(vector signed int __a) {
-  return __builtin_convertvector(__a, vector float);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float(vector unsigned int __a) {
-  return __builtin_convertvector(__a, vector float);
-}
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai
-vec_float2(vector signed long long __a, vector signed long long __b) {
-  return (vector float) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float2(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector float) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float2(vector double __a, vector double __b) {
-  return (vector float) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floate(vector signed long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector float __ret = __builtin_vsx_xvcvsxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvsxdsp(__a);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floate(vector unsigned long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector float __ret = __builtin_vsx_xvcvuxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvuxdsp(__a);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floate(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector float __ret = __builtin_vsx_xvcvdpsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvdpsp(__a);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floato(vector signed long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvsxdsp(__a);
-#else
-  vector float __ret = __builtin_vsx_xvcvsxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floato(vector unsigned long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvuxdsp(__a);
-#else
-  vector float __ret = __builtin_vsx_xvcvuxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floato(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvdpsp(__a);
-#else
-  vector float __ret = __builtin_vsx_xvcvdpsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-#endif
-
-/* vec_double */
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai
-vec_double(vector signed long long __a) {
-  return __builtin_convertvector(__a, vector double);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_double(vector unsigned long long __a) {
-  return __builtin_convertvector(__a, vector double);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublee(vector signed int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4));
-#else
-  return __builtin_vsx_xvcvsxwdp(__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublee(vector unsigned int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4));
-#else
-  return __builtin_vsx_xvcvuxwdp(__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublee(vector float __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4));
-#else
-  return __builtin_vsx_xvcvspdp(__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleh(vector signed int __a) {
-  vector double __ret = {__a[0], __a[1]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleh(vector unsigned int __a) {
-  vector double __ret = {__a[0], __a[1]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleh(vector float __a) {
-  vector double __ret = {__a[0], __a[1]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublel(vector signed int __a) {
-  vector double __ret = {__a[2], __a[3]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublel(vector unsigned int __a) {
-  vector double __ret = {__a[2], __a[3]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublel(vector float __a) {
-  vector double __ret = {__a[2], __a[3]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleo(vector signed int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvsxwdp(__a);
-#else
-  return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4));
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleo(vector unsigned int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvuxwdp(__a);
-#else
-  return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4));
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleo(vector float __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvspdp(__a);
-#else
-  return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4));
-#endif
-}
-
-/* vec_cvf */
-static __inline__ vector double __ATTRS_o_ai vec_cvf(vector float __a) {
-  return vec_doublee(__a);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_cvf(vector double __a) {
-  return vec_floate(__a);
-}
-#endif
-
-/* vec_div */
-
-/* Integer vector divides (vectors are scalarized, elements divided
-   and the vectors reassembled).
-*/
-static __inline__ vector signed char __ATTRS_o_ai
-vec_div(vector signed char __a, vector signed char __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_div(vector unsigned char __a, vector unsigned char __b) {
-  return __a / __b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_div(vector signed short __a, vector signed short __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_div(vector unsigned short __a, vector unsigned short __b) {
-  return __a / __b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_div(vector signed int __a, vector signed int __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_div(vector unsigned int __a, vector unsigned int __b) {
-  return __a / __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_div(vector signed long long __a, vector signed long long __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_div(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a / __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_div(vector float __a,
-                                                    vector float __b) {
-  return __a / __b;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_div(vector double __a,
-                                                     vector double __b) {
-  return __a / __b;
-}
-#endif
-
-/* vec_dive */
-
-#ifdef __POWER10_VECTOR__
-static __inline__ vector signed int __ATTRS_o_ai
-vec_dive(vector signed int __a, vector signed int __b) {
-  return __builtin_altivec_vdivesw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_dive(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vdiveuw(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_dive(vector signed long long __a, vector signed long long __b) {
-  return __builtin_altivec_vdivesd(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_dive(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vdiveud(__a, __b);
-}
-
-#ifdef __SIZEOF_INT128__
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_dive(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vdiveuq(__a, __b);
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_dive(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vdivesq(__a, __b);
-}
-#endif
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_div(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a / __b;
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_div(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a / __b;
-}
-#endif /* __POWER10_VECTOR__ */
-
-/* vec_xvtdiv */
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_test_swdiv(vector double __a,
-                                                  vector double __b) {
-  return __builtin_vsx_xvtdivdp(__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_test_swdivs(vector float __a,
-                                                   vector float __b) {
-  return __builtin_vsx_xvtdivsp(__a, __b);
-}
-#endif
-
-/* vec_dss */
-
-#define vec_dss __builtin_altivec_dss
-
-/* vec_dssall */
-
-static __inline__ void __attribute__((__always_inline__)) vec_dssall(void) {
-  __builtin_altivec_dssall();
-}
-
-/* vec_dst */
-#define vec_dst(__PTR, __CW, __STR) \
-  __builtin_altivec_dst((const void *)(__PTR), (__CW), (__STR))
-
-/* vec_dstst */
-#define vec_dstst(__PTR, __CW, __STR) \
-  __builtin_altivec_dstst((const void *)(__PTR), (__CW), (__STR))
-
-/* vec_dststt */
-#define vec_dststt(__PTR, __CW, __STR) \
-  __builtin_altivec_dststt((const void *)(__PTR), (__CW), (__STR))
-
-/* vec_dstt */
-#define vec_dstt(__PTR, __CW, __STR) \
-  __builtin_altivec_dstt((const void *)(__PTR), (__CW), (__STR))
-
-/* vec_eqv */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_eqv(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                  (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_eqv(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                    (vector unsigned int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_eqv(vector bool char __a,
-                                                        vector bool char __b) {
-  return (vector bool char)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                (vector unsigned int)__b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_eqv(vector signed short __a, vector signed short __b) {
-  return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                   (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_eqv(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                     (vector unsigned int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_eqv(vector bool short __a, vector bool short __b) {
-  return (vector bool short)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                 (vector unsigned int)__b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_eqv(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                 (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_eqv(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_vsx_xxleqv(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_eqv(vector bool int __a,
-                                                       vector bool int __b) {
-  return (vector bool int)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                               (vector unsigned int)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_eqv(vector signed long long __a, vector signed long long __b) {
-  return (vector signed long long)__builtin_vsx_xxleqv(
-      (vector unsigned int)__a, (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_eqv(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__builtin_vsx_xxleqv(
-      (vector unsigned int)__a, (vector unsigned int)__b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_eqv(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                     (vector unsigned int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_eqv(vector float __a,
-                                                    vector float __b) {
-  return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                            (vector unsigned int)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_eqv(vector double __a,
-                                                     vector double __b) {
-  return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                             (vector unsigned int)__b);
-}
-#endif
-
-/* vec_expte */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_expte(vector float __a) {
-  return __builtin_altivec_vexptefp(__a);
-}
-
-/* vec_vexptefp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vexptefp(vector float __a) {
-  return __builtin_altivec_vexptefp(__a);
-}
-
-/* vec_floor */
-
-static __inline__ vector float __ATTRS_o_ai vec_floor(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrspim(__a);
-#else
-  return __builtin_altivec_vrfim(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_floor(vector double __a) {
-  return __builtin_vsx_xvrdpim(__a);
-}
-#endif
-
-/* vec_roundm */
-static __inline__ vector float __ATTRS_o_ai vec_roundm(vector float __a) {
-  return vec_floor(__a);
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_roundm(vector double __a) {
-  return vec_floor(__a);
-}
-#endif
-
-/* vec_vrfim */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfim(vector float __a) {
-  return __builtin_altivec_vrfim(__a);
-}
-
-/* vec_ld */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ld(long __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ld(long __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ld(long __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ld(long __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_ld(long __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ld(long __a,
-                                                   const vector short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ld(long __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ld(long __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ld(long __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_ld(long __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_ld(long __a,
-                                                   const vector pixel *__b) {
-  return (vector pixel)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ld(long __a,
-                                                 const vector int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ld(long __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ld(long __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ld(long __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_ld(long __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ld(long __a,
-                                                   const vector float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ld(long __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-/* vec_lvx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvx(long __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvx(long __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvx(long __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvx(long __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvx(long __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvx(long __a,
-                                                    const vector short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvx(long __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvx(long __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvx(long __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvx(long __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvx(long __a,
-                                                    const vector pixel *__b) {
-  return (vector pixel)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvx(long __a,
-                                                  const vector int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvx(long __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvx(long __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvx(long __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvx(long __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvx(long __a,
-                                                    const vector float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvx(long __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-/* vec_lde */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lde(long __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvebx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lde(long __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lde(long __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvehx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lde(long __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lde(long __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lde(long __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lde(long __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvewx(__a, __b);
-}
-
-/* vec_lvebx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvebx(long __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvebx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvebx(long __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
-}
-
-/* vec_lvehx */
-
-static __inline__ vector short __ATTRS_o_ai vec_lvehx(long __a,
-                                                      const short *__b) {
-  return (vector short)__builtin_altivec_lvehx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvehx(long __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
-}
-
-/* vec_lvewx */
-
-static __inline__ vector int __ATTRS_o_ai vec_lvewx(long __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvewx(long __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvewx(long __a,
-                                                      const float *__b) {
-  return (vector float)__builtin_altivec_lvewx(__a, __b);
-}
-
-/* vec_ldl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ldl(long __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ldl(long __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ldl(long __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ldl(long __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_ldl(long __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ldl(long __a,
-                                                    const vector short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ldl(long __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ldl(long __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ldl(long __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_ldl(long __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_ldl(long __a,
-                                                    const vector pixel *__b) {
-  return (vector pixel short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ldl(long __a,
-                                                  const vector int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ldl(long __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ldl(long __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ldl(long __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_ldl(long __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ldl(long __a,
-                                                    const vector float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ldl(long __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-/* vec_lvxl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvxl(long __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvxl(long __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvxl(long __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvxl(long __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvxl(long __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvxl(long __a,
-                                                     const vector short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvxl(long __a,
-                                                     const short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvxl(long __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvxl(long __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvxl(long __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvxl(long __a,
-                                                     const vector pixel *__b) {
-  return (vector pixel)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvxl(long __a,
-                                                   const vector int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvxl(long __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvxl(long __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvxl(long __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvxl(long __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvxl(long __a,
-                                                     const vector float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvxl(long __a,
-                                                     const float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-/* vec_loge */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_loge(vector float __a) {
-  return __builtin_altivec_vlogefp(__a);
-}
-
-/* vec_vlogefp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vlogefp(vector float __a) {
-  return __builtin_altivec_vlogefp(__a);
-}
-
-/* vec_lvsl */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const signed char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const signed char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const unsigned char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                             const short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const unsigned short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const unsigned short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                             const int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const unsigned int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const unsigned int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const float *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                             const float *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-/* vec_lvsr */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const signed char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const signed char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const unsigned char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                             const short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const unsigned short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const unsigned short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                             const int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const unsigned int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const unsigned int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const float *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                             const float *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-/* vec_madd */
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mladd(vector signed short, vector signed short, vector signed short);
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mladd(vector signed short, vector unsigned short, vector unsigned short);
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mladd(vector unsigned short, vector signed short, vector signed short);
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mladd(vector unsigned short, vector unsigned short, vector unsigned short);
-
-static __inline__ vector signed short __ATTRS_o_ai vec_madd(
-    vector signed short __a, vector signed short __b, vector signed short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_madd(vector signed short __a, vector unsigned short __b,
-         vector unsigned short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_madd(vector unsigned short __a, vector signed short __b,
-         vector signed short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_madd(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_madd(vector float __a,
-                                                     vector float __b,
-                                                     vector float __c) {
-#ifdef __VSX__
-  return __builtin_vsx_xvmaddasp(__a, __b, __c);
-#else
-  return __builtin_altivec_vmaddfp(__a, __b, __c);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_madd(vector double __a,
-                                                      vector double __b,
-                                                      vector double __c) {
-  return __builtin_vsx_xvmaddadp(__a, __b, __c);
-}
-#endif
-
-/* vec_vmaddfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vmaddfp(vector float __a, vector float __b, vector float __c) {
-  return __builtin_altivec_vmaddfp(__a, __b, __c);
-}
-
-/* vec_madds */
-
-static __inline__ vector signed short __attribute__((__always_inline__))
-vec_madds(vector signed short __a, vector signed short __b,
-          vector signed short __c) {
-  return __builtin_altivec_vmhaddshs(__a, __b, __c);
-}
-
-/* vec_vmhaddshs */
-static __inline__ vector signed short __attribute__((__always_inline__))
-vec_vmhaddshs(vector signed short __a, vector signed short __b,
-              vector signed short __c) {
-  return __builtin_altivec_vmhaddshs(__a, __b, __c);
-}
-
-/* vec_msub */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_msub(vector float __a,
-                                                     vector float __b,
-                                                     vector float __c) {
-  return __builtin_vsx_xvmsubasp(__a, __b, __c);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_msub(vector double __a,
-                                                      vector double __b,
-                                                      vector double __c) {
-  return __builtin_vsx_xvmsubadp(__a, __b, __c);
-}
-#endif
-
-/* vec_max */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_max(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_max(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_max(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_max(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_max(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_max(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vmaxsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_max(vector bool short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vmaxsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a,
-                                                    vector bool short __b) {
-  return __builtin_altivec_vmaxsh(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_max(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_max(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_max(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vmaxsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_max(vector bool int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vmaxsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a,
-                                                  vector bool int __b) {
-  return __builtin_altivec_vmaxsw(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_max(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_max(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_max(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_max(vector signed long long __a, vector signed long long __b) {
-  return __builtin_altivec_vmaxsd(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_max(vector bool long long __a, vector signed long long __b) {
-  return __builtin_altivec_vmaxsd((vector signed long long)__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_max(vector signed long long __a, vector bool long long __b) {
-  return __builtin_altivec_vmaxsd(__a, (vector signed long long)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_max(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vmaxud(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_max(vector bool long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vmaxud((vector unsigned long long)__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_max(vector unsigned long long __a, vector bool long long __b) {
-  return __builtin_altivec_vmaxud(__a, (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_max(vector float __a,
-                                                    vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvmaxsp(__a, __b);
-#else
-  return __builtin_altivec_vmaxfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_max(vector double __a,
-                                                     vector double __b) {
-  return __builtin_vsx_xvmaxdp(__a, __b);
-}
-#endif
-
-/* vec_vmaxsb */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmaxsb(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmaxsb(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmaxsb(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
-}
-
-/* vec_vmaxub */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmaxub(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmaxub(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmaxub(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
-}
-
-/* vec_vmaxsh */
-
-static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vmaxsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector bool short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vmaxsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
-                                                       vector bool short __b) {
-  return __builtin_altivec_vmaxsh(__a, (vector short)__b);
-}
-
-/* vec_vmaxuh */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmaxuh(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmaxuh(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmaxuh(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
-}
-
-/* vec_vmaxsw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vmaxsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector bool int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vmaxsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a,
-                                                     vector bool int __b) {
-  return __builtin_altivec_vmaxsw(__a, (vector int)__b);
-}
-
-/* vec_vmaxuw */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmaxuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmaxuw(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmaxuw(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
-}
-
-/* vec_vmaxfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vmaxfp(vector float __a, vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvmaxsp(__a, __b);
-#else
-  return __builtin_altivec_vmaxfp(__a, __b);
-#endif
-}
-
-/* vec_mergeh */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_mergeh(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_mergeh(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_mergeh(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mergeh(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_mergeh(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_mergeh(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mergeh(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergeh(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergeh(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_mergeh(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeh(vector signed long long __a, vector signed long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeh(vector signed long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector signed long long)__b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector signed long long __b) {
-  return vec_perm((vector signed long long)__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeh(vector unsigned long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector unsigned long long)__b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector unsigned long long __b) {
-  return vec_perm((vector unsigned long long)__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector bool long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_mergeh(vector double __a,
-                                                        vector double __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergeh(vector double __a, vector bool long long __b) {
-  return vec_perm(__a, (vector double)__b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector double __b) {
-  return vec_perm((vector double)__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-#endif
-
-/* vec_vmrghb */
-
-#define __builtin_altivec_vmrghb vec_vmrghb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmrghb(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmrghb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vmrghb(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-/* vec_vmrghh */
-
-#define __builtin_altivec_vmrghh vec_vmrghh
-
-static __inline__ vector short __ATTRS_o_ai vec_vmrghh(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmrghh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vmrghh(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vmrghh(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-/* vec_vmrghw */
-
-#define __builtin_altivec_vmrghw vec_vmrghw
-
-static __inline__ vector int __ATTRS_o_ai vec_vmrghw(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmrghw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vmrghw(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vmrghw(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-/* vec_mergel */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_mergel(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_mergel(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_mergel(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mergel(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mergel(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_mergel(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_mergel(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mergel(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergel(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergel(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_mergel(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergel(vector signed long long __a, vector signed long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergel(vector signed long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector signed long long)__b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector signed long long __b) {
-  return vec_perm((vector signed long long)__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergel(vector unsigned long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector unsigned long long)__b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector unsigned long long __b) {
-  return vec_perm((vector unsigned long long)__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector bool long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector double __ATTRS_o_ai vec_mergel(vector double __a,
-                                                        vector double __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergel(vector double __a, vector bool long long __b) {
-  return vec_perm(__a, (vector double)__b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector double __b) {
-  return vec_perm((vector double)__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-#endif
-
-/* vec_vmrglb */
-
-#define __builtin_altivec_vmrglb vec_vmrglb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmrglb(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmrglb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vmrglb(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-/* vec_vmrglh */
-
-#define __builtin_altivec_vmrglh vec_vmrglh
-
-static __inline__ vector short __ATTRS_o_ai vec_vmrglh(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmrglh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vmrglh(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vmrglh(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-/* vec_vmrglw */
-
-#define __builtin_altivec_vmrglw vec_vmrglw
-
-static __inline__ vector int __ATTRS_o_ai vec_vmrglw(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmrglw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vmrglw(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vmrglw(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-#ifdef __POWER8_VECTOR__
-/* vec_mergee */
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergee(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_mergee(vector signed int __a, vector signed int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergee(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergee(vector bool long long __a, vector bool long long __b) {
-  return vec_mergeh(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergee(vector signed long long __a, vector signed long long __b) {
-  return vec_mergeh(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergee(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_mergeh(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_mergee(vector float __a, vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_mergee(vector double __a, vector double __b) {
-  return vec_mergeh(__a, __b);
-}
-
-/* vec_mergeo */
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergeo(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_mergeo(vector signed int __a, vector signed int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergeo(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergeo(vector bool long long __a, vector bool long long __b) {
-  return vec_mergel(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeo(vector signed long long __a, vector signed long long __b) {
-  return vec_mergel(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeo(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_mergel(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_mergeo(vector float __a, vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_mergeo(vector double __a, vector double __b) {
-  return vec_mergel(__a, __b);
-}
-
-#endif
-
-/* vec_mfvscr */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_mfvscr(void) {
-  return __builtin_altivec_mfvscr();
-}
-
-/* vec_min */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_min(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_min(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_min(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vminsb(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_min(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_min(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_min(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vminsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_min(vector bool short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vminsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a,
-                                                    vector bool short __b) {
-  return __builtin_altivec_vminsh(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_min(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_min(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_min(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vminsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_min(vector bool int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vminsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a,
-                                                  vector bool int __b) {
-  return __builtin_altivec_vminsw(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_min(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_min(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_min(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_min(vector signed long long __a, vector signed long long __b) {
-  return __builtin_altivec_vminsd(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_min(vector bool long long __a, vector signed long long __b) {
-  return __builtin_altivec_vminsd((vector signed long long)__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_min(vector signed long long __a, vector bool long long __b) {
-  return __builtin_altivec_vminsd(__a, (vector signed long long)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_min(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vminud(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_min(vector bool long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vminud((vector unsigned long long)__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_min(vector unsigned long long __a, vector bool long long __b) {
-  return __builtin_altivec_vminud(__a, (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_min(vector float __a,
-                                                    vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvminsp(__a, __b);
-#else
-  return __builtin_altivec_vminfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_min(vector double __a,
-                                                     vector double __b) {
-  return __builtin_vsx_xvmindp(__a, __b);
-}
-#endif
-
-/* vec_vminsb */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vminsb(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vminsb(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vminsb(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vminsb(__a, (vector signed char)__b);
-}
-
-/* vec_vminub */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vminub(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vminub(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vminub(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
-}
-
-/* vec_vminsh */
-
-static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vminsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector bool short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vminsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a,
-                                                       vector bool short __b) {
-  return __builtin_altivec_vminsh(__a, (vector short)__b);
-}
-
-/* vec_vminuh */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vminuh(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vminuh(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vminuh(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
-}
-
-/* vec_vminsw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vminsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector bool int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vminsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a,
-                                                     vector bool int __b) {
-  return __builtin_altivec_vminsw(__a, (vector int)__b);
-}
-
-/* vec_vminuw */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vminuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vminuw(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vminuw(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
-}
-
-/* vec_vminfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vminfp(vector float __a, vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvminsp(__a, __b);
-#else
-  return __builtin_altivec_vminfp(__a, __b);
-#endif
-}
-
-/* vec_mladd */
-
-#define __builtin_altivec_vmladduhm vec_mladd
-
-static __inline__ vector short __ATTRS_o_ai vec_mladd(vector short __a,
-                                                      vector short __b,
-                                                      vector short __c) {
-  return __a * __b + __c;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mladd(
-    vector short __a, vector unsigned short __b, vector unsigned short __c) {
-  return __a * (vector short)__b + (vector short)__c;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mladd(vector unsigned short __a,
-                                                      vector short __b,
-                                                      vector short __c) {
-  return (vector short)__a * __b + __c;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mladd(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned short __c) {
-  return __a * __b + __c;
-}
-
-/* vec_vmladduhm */
-
-static __inline__ vector short __ATTRS_o_ai vec_vmladduhm(vector short __a,
-                                                          vector short __b,
-                                                          vector short __c) {
-  return __a * __b + __c;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vmladduhm(
-    vector short __a, vector unsigned short __b, vector unsigned short __c) {
-  return __a * (vector short)__b + (vector short)__c;
-}
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vmladduhm(vector unsigned short __a, vector short __b, vector short __c) {
-  return (vector short)__a * __b + __c;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmladduhm(vector unsigned short __a, vector unsigned short __b,
-              vector unsigned short __c) {
-  return __a * __b + __c;
-}
-
-/* vec_mradds */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_mradds(vector short __a, vector short __b, vector short __c) {
-  return __builtin_altivec_vmhraddshs(__a, __b, __c);
-}
-
-/* vec_vmhraddshs */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vmhraddshs(vector short __a, vector short __b, vector short __c) {
-  return __builtin_altivec_vmhraddshs(__a, __b, __c);
-}
-
-/* vec_msum */
-
-static __inline__ vector int __ATTRS_o_ai vec_msum(vector signed char __a,
-                                                   vector unsigned char __b,
-                                                   vector int __c) {
-  return __builtin_altivec_vmsummbm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_msum(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned int __c) {
-  return __builtin_altivec_vmsumubm(__a, __b, __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_msum(vector short __a,
-                                                   vector short __b,
-                                                   vector int __c) {
-  return __builtin_altivec_vmsumshm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_msum(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhm(__a, __b, __c);
-}
-
-/* vec_msumc */
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_msumc(vector unsigned long long __a, vector unsigned long long __b,
-          vector unsigned __int128 __c) {
-  return __builtin_altivec_vmsumcud(__a, __b, __c);
-}
-#endif
-
-/* vec_vmsummbm */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmsummbm(vector signed char __a, vector unsigned char __b, vector int __c) {
-  return __builtin_altivec_vmsummbm(__a, __b, __c);
-}
-
-/* vec_vmsumubm */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmsumubm(vector unsigned char __a, vector unsigned char __b,
-             vector unsigned int __c) {
-  return __builtin_altivec_vmsumubm(__a, __b, __c);
-}
-
-/* vec_vmsumshm */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmsumshm(vector short __a, vector short __b, vector int __c) {
-  return __builtin_altivec_vmsumshm(__a, __b, __c);
-}
-
-/* vec_vmsumuhm */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmsumuhm(vector unsigned short __a, vector unsigned short __b,
-             vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhm(__a, __b, __c);
-}
-
-/* vec_msums */
-
-static __inline__ vector int __ATTRS_o_ai vec_msums(vector short __a,
-                                                    vector short __b,
-                                                    vector int __c) {
-  return __builtin_altivec_vmsumshs(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_msums(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhs(__a, __b, __c);
-}
-
-/* vec_vmsumshs */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmsumshs(vector short __a, vector short __b, vector int __c) {
-  return __builtin_altivec_vmsumshs(__a, __b, __c);
-}
-
-/* vec_vmsumuhs */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmsumuhs(vector unsigned short __a, vector unsigned short __b,
-             vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhs(__a, __b, __c);
-}
-
-/* vec_mtvscr */
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector signed char __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned char __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool char __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector short __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned short __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool short __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector pixel __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector int __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned int __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool int __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector float __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-/* vec_mul */
-
-/* Integer vector multiplication will involve multiplication of the odd/even
-   elements separately, then truncating the results and moving to the
-   result vector.
-*/
-static __inline__ vector signed char __ATTRS_o_ai
-vec_mul(vector signed char __a, vector signed char __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_mul(vector unsigned char __a, vector unsigned char __b) {
-  return __a * __b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mul(vector signed short __a, vector signed short __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mul(vector unsigned short __a, vector unsigned short __b) {
-  return __a * __b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_mul(vector signed int __a, vector signed int __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mul(vector unsigned int __a, vector unsigned int __b) {
-  return __a * __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mul(vector signed long long __a, vector signed long long __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mul(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a * __b;
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_mul(vector float __a,
-                                                    vector float __b) {
-  return __a * __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_mul(vector double __a,
-                                                     vector double __b) {
-  return __a * __b;
-}
-#endif
-
-/* The vmulos* and vmules* instructions have a big endian bias, so
-   we must reverse the meaning of "even" and "odd" for little endian.  */
-
-/* vec_mule */
-
-static __inline__ vector short __ATTRS_o_ai vec_mule(vector signed char __a,
-                                                     vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosb(__a, __b);
-#else
-  return __builtin_altivec_vmulesb(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mule(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuloub(__a, __b);
-#else
-  return __builtin_altivec_vmuleub(__a, __b);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mule(vector short __a,
-                                                   vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosh(__a, __b);
-#else
-  return __builtin_altivec_vmulesh(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mule(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulouh(__a, __b);
-#else
-  return __builtin_altivec_vmuleuh(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mule(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosw(__a, __b);
-#else
-  return __builtin_altivec_vmulesw(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mule(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulouw(__a, __b);
-#else
-  return __builtin_altivec_vmuleuw(__a, __b);
-#endif
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_mule(vector signed long long __a, vector signed long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosd(__a, __b);
-#else
-  return __builtin_altivec_vmulesd(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_mule(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuloud(__a, __b);
-#else
-  return __builtin_altivec_vmuleud(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vmulesb */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vmulesb(vector signed char __a, vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosb(__a, __b);
-#else
-  return __builtin_altivec_vmulesb(__a, __b);
-#endif
-}
-
-/* vec_vmuleub */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vmuleub(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuloub(__a, __b);
-#else
-  return __builtin_altivec_vmuleub(__a, __b);
-#endif
-}
-
-/* vec_vmulesh */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmulesh(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosh(__a, __b);
-#else
-  return __builtin_altivec_vmulesh(__a, __b);
-#endif
-}
-
-/* vec_vmuleuh */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmuleuh(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulouh(__a, __b);
-#else
-  return __builtin_altivec_vmuleuh(__a, __b);
-#endif
-}
-
-/* vec_mulh */
-
-#ifdef __POWER10_VECTOR__
-static __inline__ vector signed int __ATTRS_o_ai
-vec_mulh(vector signed int __a, vector signed int __b) {
-  return __builtin_altivec_vmulhsw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mulh(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmulhuw(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mulh(vector signed long long __a, vector signed long long __b) {
-  return __builtin_altivec_vmulhsd(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mulh(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vmulhud(__a, __b);
-}
-#endif
-
-/* vec_mulo */
-
-static __inline__ vector short __ATTRS_o_ai vec_mulo(vector signed char __a,
-                                                     vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesb(__a, __b);
-#else
-  return __builtin_altivec_vmulosb(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mulo(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleub(__a, __b);
-#else
-  return __builtin_altivec_vmuloub(__a, __b);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mulo(vector short __a,
-                                                   vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesh(__a, __b);
-#else
-  return __builtin_altivec_vmulosh(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mulo(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleuh(__a, __b);
-#else
-  return __builtin_altivec_vmulouh(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mulo(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesw(__a, __b);
-#else
-  return __builtin_altivec_vmulosw(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mulo(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleuw(__a, __b);
-#else
-  return __builtin_altivec_vmulouw(__a, __b);
-#endif
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_mulo(vector signed long long __a, vector signed long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesd(__a, __b);
-#else
-  return __builtin_altivec_vmulosd(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_mulo(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleud(__a, __b);
-#else
-  return __builtin_altivec_vmuloud(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vmulosb */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vmulosb(vector signed char __a, vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesb(__a, __b);
-#else
-  return __builtin_altivec_vmulosb(__a, __b);
-#endif
-}
-
-/* vec_vmuloub */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vmuloub(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleub(__a, __b);
-#else
-  return __builtin_altivec_vmuloub(__a, __b);
-#endif
-}
-
-/* vec_vmulosh */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmulosh(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesh(__a, __b);
-#else
-  return __builtin_altivec_vmulosh(__a, __b);
-#endif
-}
-
-/* vec_vmulouh */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmulouh(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleuh(__a, __b);
-#else
-  return __builtin_altivec_vmulouh(__a, __b);
-#endif
-}
-
-/*  vec_nand */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nand(vector signed char __a, vector signed char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nand(vector signed char __a, vector bool char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nand(vector bool char __a, vector signed char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nand(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nand(vector unsigned char __a, vector bool char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nand(vector bool char __a, vector unsigned char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_nand(vector bool char __a,
-                                                         vector bool char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_nand(vector signed short __a, vector signed short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_nand(vector signed short __a, vector bool short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_nand(vector bool short __a, vector signed short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_nand(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_nand(vector unsigned short __a, vector bool short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_nand(vector bool short __a, vector bool short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_nand(vector signed int __a, vector signed int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
-                                                          vector bool int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_nand(vector bool int __a, vector signed int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nand(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nand(vector unsigned int __a, vector bool int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nand(vector bool int __a, vector unsigned int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
-                                                        vector bool int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_nand(vector float __a, vector float __b) {
-  return (vector float)(~((vector unsigned int)__a &
-                          (vector unsigned int)__b));
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nand(vector signed long long __a, vector signed long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nand(vector signed long long __a, vector bool long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nand(vector bool long long __a, vector signed long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nand(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nand(vector unsigned long long __a, vector bool long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nand(vector bool long long __a, vector unsigned long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_nand(vector bool long long __a, vector bool long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_nand(vector double __a, vector double __b) {
-  return (vector double)(~((vector unsigned long long)__a &
-                           (vector unsigned long long)__b));
-}
-
-#endif
-
-/* vec_nmadd */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_nmadd(vector float __a,
-                                                      vector float __b,
-                                                      vector float __c) {
-  return __builtin_vsx_xvnmaddasp(__a, __b, __c);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_nmadd(vector double __a,
-                                                       vector double __b,
-                                                       vector double __c) {
-  return __builtin_vsx_xvnmaddadp(__a, __b, __c);
-}
-#endif
-
-/* vec_nmsub */
-
-static __inline__ vector float __ATTRS_o_ai vec_nmsub(vector float __a,
-                                                      vector float __b,
-                                                      vector float __c) {
-#ifdef __VSX__
-  return __builtin_vsx_xvnmsubasp(__a, __b, __c);
-#else
-  return __builtin_altivec_vnmsubfp(__a, __b, __c);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_nmsub(vector double __a,
-                                                       vector double __b,
-                                                       vector double __c) {
-  return __builtin_vsx_xvnmsubadp(__a, __b, __c);
-}
-#endif
-
-/* vec_vnmsubfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vnmsubfp(vector float __a, vector float __b, vector float __c) {
-  return __builtin_altivec_vnmsubfp(__a, __b, __c);
-}
-
-/* vec_nor */
-
-#define __builtin_altivec_vnor vec_nor
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nor(vector signed char __a, vector signed char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nor(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_nor(vector bool char __a,
-                                                        vector bool char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_nor(vector short __a,
-                                                    vector short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_nor(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_nor(vector bool short __a, vector bool short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_nor(vector int __a,
-                                                  vector int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nor(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_nor(vector bool int __a,
-                                                       vector bool int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_nor(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      ~((vector unsigned int)__a | (vector unsigned int)__b);
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_nor(vector double __a,
-                                                     vector double __b) {
-  vector unsigned long long __res =
-      ~((vector unsigned long long)__a | (vector unsigned long long)__b);
-  return (vector double)__res;
-}
-#endif
-
-/* vec_vnor */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vnor(vector signed char __a, vector signed char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vnor(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vnor(vector bool char __a,
-                                                         vector bool char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vnor(vector short __a,
-                                                     vector short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vnor(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vnor(vector bool short __a, vector bool short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vnor(vector int __a,
-                                                   vector int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vnor(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vnor(vector bool int __a,
-                                                        vector bool int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vnor(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      ~((vector unsigned int)__a | (vector unsigned int)__b);
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nor(vector signed long long __a, vector signed long long __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_nor(vector bool long long __a, vector bool long long __b) {
-  return ~(__a | __b);
-}
-#endif
-
-/* vec_or */
-
-#define __builtin_altivec_vor vec_or
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_or(vector signed char __a, vector signed char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_or(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai vec_or(vector signed char __a,
-                                                         vector bool char __b) {
-  return __a | (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_or(vector unsigned char __a, vector unsigned char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_or(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_or(vector unsigned char __a, vector bool char __b) {
-  return __a | (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_or(vector bool char __a,
-                                                       vector bool char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a,
-                                                   vector short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_or(vector bool short __a,
-                                                   vector short __b) {
-  return (vector short)__a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a,
-                                                   vector bool short __b) {
-  return __a | (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_or(vector unsigned short __a, vector unsigned short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_or(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_or(vector unsigned short __a, vector bool short __b) {
-  return __a | (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_or(vector bool short __a,
-                                                        vector bool short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a,
-                                                 vector int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_or(vector bool int __a,
-                                                 vector int __b) {
-  return (vector int)__a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a,
-                                                 vector bool int __b) {
-  return __a | (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_or(vector unsigned int __a, vector unsigned int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_or(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_or(vector unsigned int __a, vector bool int __b) {
-  return __a | (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_or(vector bool int __a,
-                                                      vector bool int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a,
-                                                   vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_or(vector bool int __a,
-                                                   vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a,
-                                                   vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_or(vector bool long long __a,
-                                                    vector double __b) {
-  return (vector double)((vector unsigned long long)__a |
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
-                                                    vector bool long long __b) {
-  return (vector double)((vector unsigned long long)__a |
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
-                                                    vector double __b) {
-  return (vector double)((vector unsigned long long)__a |
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_or(vector signed long long __a, vector signed long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_or(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_or(vector signed long long __a, vector bool long long __b) {
-  return __a | (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_or(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_or(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_or(vector unsigned long long __a, vector bool long long __b) {
-  return __a | (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_or(vector bool long long __a, vector bool long long __b) {
-  return __a | __b;
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_orc(vector signed char __a, vector signed char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_orc(vector signed char __a, vector bool char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_orc(vector bool char __a, vector signed char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_orc(vector unsigned char __a, vector unsigned char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_orc(vector unsigned char __a, vector bool char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_orc(vector bool char __a, vector unsigned char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_orc(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_orc(vector signed short __a, vector signed short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_orc(vector signed short __a, vector bool short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_orc(vector bool short __a, vector signed short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_orc(vector unsigned short __a, vector unsigned short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_orc(vector unsigned short __a, vector bool short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_orc(vector bool short __a, vector unsigned short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_orc(vector bool short __a, vector bool short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_orc(vector signed int __a, vector signed int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
-                                                         vector bool int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_orc(vector bool int __a, vector signed int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_orc(vector unsigned int __a, vector unsigned int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_orc(vector unsigned int __a, vector bool int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_orc(vector bool int __a, vector unsigned int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_orc(vector bool int __a, vector float __b) {
- return (vector float)(__a | ~(vector unsigned int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_orc(vector float __a, vector bool int __b) {
-  return (vector float)((vector unsigned int)__a | ~__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_orc(vector float __a,
-                                                    vector float __b) {
-  return (vector float)((vector unsigned int)__a | ~(vector unsigned int)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_orc(vector signed long long __a, vector signed long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_orc(vector signed long long __a, vector bool long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector signed long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_orc(vector unsigned long long __a, vector bool long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector unsigned long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector bool long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_orc(vector double __a, vector bool long long __b) {
-  return (vector double)((vector unsigned long long)__a | ~__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector double __b) {
-  return (vector double)(__a | ~(vector unsigned long long)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_orc(vector double __a,
-                                                     vector double __b) {
-  return (vector double)((vector bool long long)__a |
-                         ~(vector unsigned long long)__b);
-}
-#endif
-
-/* vec_vor */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vor(vector signed char __a, vector signed char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vor(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vor(vector signed char __a, vector bool char __b) {
-  return __a | (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vor(vector unsigned char __a, vector unsigned char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vor(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vor(vector unsigned char __a, vector bool char __b) {
-  return __a | (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vor(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a,
-                                                    vector short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vor(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a,
-                                                    vector bool short __b) {
-  return __a | (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vor(vector unsigned short __a, vector unsigned short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vor(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vor(vector unsigned short __a, vector bool short __b) {
-  return __a | (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vor(vector bool short __a, vector bool short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a,
-                                                  vector int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vor(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a,
-                                                  vector bool int __b) {
-  return __a | (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vor(vector unsigned int __a, vector unsigned int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vor(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vor(vector unsigned int __a, vector bool int __b) {
-  return __a | (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vor(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vor(vector bool int __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a,
-                                                    vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vor(vector signed long long __a, vector signed long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vor(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vor(vector signed long long __a, vector bool long long __b) {
-  return __a | (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vor(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vor(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vor(vector unsigned long long __a, vector bool long long __b) {
-  return __a | (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vor(vector bool long long __a, vector bool long long __b) {
-  return __a | __b;
-}
-#endif
-
-/* vec_pack */
-
-/* The various vector pack instructions have a big-endian bias, so for
-   little endian we must handle reversed element numbering.  */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_pack(vector signed short __a, vector signed short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_pack(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_pack(vector bool short __a, vector bool short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_pack(vector int __a,
-                                                     vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_pack(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_pack(vector bool int __a,
-                                                          vector bool int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector signed int __ATTRS_o_ai
-vec_pack(vector signed long long __a, vector signed long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector signed int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector signed int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_pack(vector bool long long __a, vector bool long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector bool int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_pack(vector double __a, vector double __b) {
-  return (vector float) (__a[0], __a[1], __b[0], __b[1]);
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_pack_to_short_fp32(vector float __a, vector float __b) {
-  vector float __resa = __builtin_vsx_xvcvsphp(__a);
-  vector float __resb = __builtin_vsx_xvcvsphp(__b);
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned short)vec_mergee(__resa, __resb);
-#else
-  return (vector unsigned short)vec_mergeo(__resa, __resb);
-#endif
-}
-
-#endif
-/* vec_vpkuhum */
-
-#define __builtin_altivec_vpkuhum vec_vpkuhum
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vpkuhum(vector signed short __a, vector signed short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vpkuhum(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vpkuhum(vector bool short __a, vector bool short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-/* vec_vpkuwum */
-
-#define __builtin_altivec_vpkuwum vec_vpkuwum
-
-static __inline__ vector short __ATTRS_o_ai vec_vpkuwum(vector int __a,
-                                                        vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vpkuwum(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vpkuwum(vector bool int __a, vector bool int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-/* vec_vpkudum */
-
-#ifdef __POWER8_VECTOR__
-#define __builtin_altivec_vpkudum vec_vpkudum
-
-static __inline__ vector int __ATTRS_o_ai vec_vpkudum(vector long long __a,
-                                                      vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vpkudum(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vpkudum(vector bool long long __a, vector bool long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)vec_perm(
-      (vector long long)__a, (vector long long)__b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector bool int)vec_perm(
-      (vector long long)__a, (vector long long)__b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-#endif
-
-/* vec_packpx */
-
-static __inline__ vector pixel __attribute__((__always_inline__))
-vec_packpx(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
-#else
-  return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
-#endif
-}
-
-/* vec_vpkpx */
-
-static __inline__ vector pixel __attribute__((__always_inline__))
-vec_vpkpx(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
-#else
-  return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
-#endif
-}
-
-/* vec_packs */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_packs(vector short __a,
-                                                            vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshss(__b, __a);
-#else
-  return __builtin_altivec_vpkshss(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_packs(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_packs(vector int __a,
-                                                             vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswss(__b, __a);
-#else
-  return __builtin_altivec_vpkswss(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_packs(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector int __ATTRS_o_ai vec_packs(vector long long __a,
-                                                    vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdss(__b, __a);
-#else
-  return __builtin_altivec_vpksdss(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkudus(__b, __a);
-#else
-  return __builtin_altivec_vpkudus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkshss */
-
-static __inline__ vector signed char __attribute__((__always_inline__))
-vec_vpkshss(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshss(__b, __a);
-#else
-  return __builtin_altivec_vpkshss(__a, __b);
-#endif
-}
-
-/* vec_vpksdss */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector int __ATTRS_o_ai vec_vpksdss(vector long long __a,
-                                                      vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdss(__b, __a);
-#else
-  return __builtin_altivec_vpksdss(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkuhus */
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_vpkuhus(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-/* vec_vpkudus */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vpkudus(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkudus(__b, __a);
-#else
-  return __builtin_altivec_vpkudus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkswss */
-
-static __inline__ vector signed short __attribute__((__always_inline__))
-vec_vpkswss(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswss(__b, __a);
-#else
-  return __builtin_altivec_vpkswss(__a, __b);
-#endif
-}
-
-/* vec_vpkuwus */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vpkuwus(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-/* vec_packsu */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_packsu(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshus(__b, __a);
-#else
-  return __builtin_altivec_vpkshus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_packsu(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_packsu(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswus(__b, __a);
-#else
-  return __builtin_altivec_vpkswus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_packsu(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_packsu(vector long long __a, vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdus(__b, __a);
-#else
-  return __builtin_altivec_vpksdus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkudus(__b, __a);
-#else
-  return __builtin_altivec_vpkudus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkshus */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vpkshus(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshus(__b, __a);
-#else
-  return __builtin_altivec_vpkshus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vpkshus(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-/* vec_vpkswus */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vpkswus(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswus(__b, __a);
-#else
-  return __builtin_altivec_vpkswus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vpkswus(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-/* vec_vpksdus */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vpksdus(vector long long __a, vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdus(__b, __a);
-#else
-  return __builtin_altivec_vpksdus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_perm */
-
-// The vperm instruction is defined architecturally with a big-endian bias.
-// For little endian, we swap the input operands and invert the permute
-// control vector.  Only the rightmost 5 bits matter, so we could use
-// a vector of all 31s instead of all 255s to perform the inversion.
-// However, when the PCV is not a constant, using 255 has an advantage
-// in that the vec_xor can be recognized as a vec_nor (and for P8 and
-// later, possibly a vec_nand).
-
-static __inline__ vector signed char __ATTRS_o_ai vec_perm(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed char)__builtin_altivec_vperm_4si((vector int)__b,
-                                                         (vector int)__a, __d);
-#else
-  return (vector signed char)__builtin_altivec_vperm_4si((vector int)__a,
-                                                         (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_perm(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned char)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector unsigned char)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool char)__builtin_altivec_vperm_4si((vector int)__b,
-                                                       (vector int)__a, __d);
-#else
-  return (vector bool char)__builtin_altivec_vperm_4si((vector int)__a,
-                                                       (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a,
-                                                     vector signed short __b,
-                                                     vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed short)__builtin_altivec_vperm_4si((vector int)__b,
-                                                          (vector int)__a, __d);
-#else
-  return (vector signed short)__builtin_altivec_vperm_4si((vector int)__a,
-                                                          (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_perm(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned short)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector unsigned short)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_perm(
-    vector bool short __a, vector bool short __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool short)__builtin_altivec_vperm_4si((vector int)__b,
-                                                        (vector int)__a, __d);
-#else
-  return (vector bool short)__builtin_altivec_vperm_4si((vector int)__a,
-                                                        (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a,
-                                                     vector pixel __b,
-                                                     vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector pixel)__builtin_altivec_vperm_4si((vector int)__b,
-                                                   (vector int)__a, __d);
-#else
-  return (vector pixel)__builtin_altivec_vperm_4si((vector int)__a,
-                                                   (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a,
-                                                   vector signed int __b,
-                                                   vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed int)__builtin_altivec_vperm_4si(__b, __a, __d);
-#else
-  return (vector signed int)__builtin_altivec_vperm_4si(__a, __b, __c);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_perm(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__b,
-                                                          (vector int)__a, __d);
-#else
-  return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__a,
-                                                          (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool int)__builtin_altivec_vperm_4si((vector int)__b,
-                                                      (vector int)__a, __d);
-#else
-  return (vector bool int)__builtin_altivec_vperm_4si((vector int)__a,
-                                                      (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a,
-                                                     vector float __b,
-                                                     vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector float)__builtin_altivec_vperm_4si((vector int)__b,
-                                                   (vector int)__a, __d);
-#else
-  return (vector float)__builtin_altivec_vperm_4si((vector int)__a,
-                                                   (vector int)__b, __c);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector long long __ATTRS_o_ai
-vec_perm(vector signed long long __a, vector signed long long __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed long long)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector signed long long)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_perm(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned long long)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector unsigned long long)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_perm(vector bool long long __a, vector bool long long __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool long long)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector bool long long)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_perm(vector double __a, vector double __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector double)__builtin_altivec_vperm_4si((vector int)__b,
-                                                    (vector int)__a, __d);
-#else
-  return (vector double)__builtin_altivec_vperm_4si((vector int)__a,
-                                                    (vector int)__b, __c);
-#endif
-}
-#endif
-
-/* vec_vperm */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_vperm(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vperm(vector unsigned char __a, vector unsigned char __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vperm(
-    vector bool char __a, vector bool char __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vperm(vector short __a, vector short __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vperm(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_vperm(
-    vector bool short __a, vector bool short __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai
-vec_vperm(vector pixel __a, vector pixel __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vperm(vector int __a,
-                                                    vector int __b,
-                                                    vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vperm(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vperm(vector bool int __a, vector bool int __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_vperm(vector float __a, vector float __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-#ifdef __VSX__
-static __inline__ vector long long __ATTRS_o_ai vec_vperm(
-    vector long long __a, vector long long __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vperm(vector unsigned long long __a, vector unsigned long long __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_vperm(vector double __a, vector double __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-#endif
-
-/* vec_re */
-
-static __inline__ vector float __ATTRS_o_ai vec_re(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvresp(__a);
-#else
-  return __builtin_altivec_vrefp(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_re(vector double __a) {
-  return __builtin_vsx_xvredp(__a);
-}
-#endif
-
-/* vec_vrefp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrefp(vector float __a) {
-  return __builtin_altivec_vrefp(__a);
-}
-
-/* vec_rl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_rl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_rl(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_rl(vector short __a,
-                                                   vector unsigned short __b) {
-  return __builtin_altivec_vrlh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_rl(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_rl(vector int __a,
-                                                 vector unsigned int __b) {
-  return __builtin_altivec_vrlw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_rl(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_rl(vector signed long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vrld(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vrld(__a, __b);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_rl(vector signed __int128 __a, vector unsigned __int128 __b) {
-  return (__b << __a)|(__b >> ((__CHAR_BIT__ * sizeof(vector signed __int128)) - __a));
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_rl(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return (__b << __a)|(__b >> ((__CHAR_BIT__ * sizeof(vector unsigned __int128)) - __a));
-}
-#endif
-
-/* vec_rlmi */
-#ifdef __POWER9_VECTOR__
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_rlmi(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  return __builtin_altivec_vrlwmi(__a, __c, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_rlmi(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned long long __c) {
-  return __builtin_altivec_vrldmi(__a, __c, __b);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_rlmi(vector unsigned __int128 __a, vector unsigned __int128 __b,
-         vector unsigned __int128 __c) {
-  return __builtin_altivec_vrlqmi(__a, __c, __b);
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_rlmi(vector signed __int128 __a, vector signed __int128 __b,
-         vector signed __int128 __c) {
-  return __builtin_altivec_vrlqmi(__a, __c, __b);
-}
-#endif
-
-/* vec_rlnm */
-#ifdef __POWER9_VECTOR__
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_rlnm(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  vector unsigned int OneByte = { 0x8, 0x8, 0x8, 0x8 };
-  return __builtin_altivec_vrlwnm(__a, ((__c << OneByte) | __b));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_rlnm(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned long long __c) {
-  vector unsigned long long OneByte = { 0x8, 0x8 };
-  return __builtin_altivec_vrldnm(__a, ((__c << OneByte) | __b));
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_rlnm(vector unsigned __int128 __a, vector unsigned __int128 __b,
-         vector unsigned __int128 __c) {
-  // Merge __b and __c using an appropriate shuffle.
-  vector unsigned char TmpB = (vector unsigned char)__b;
-  vector unsigned char TmpC = (vector unsigned char)__c;
-  vector unsigned char MaskAndShift =
-#ifdef __LITTLE_ENDIAN__
-      __builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, -1, -1, -1, 16, 0,
-                              1, -1, -1, -1, -1, -1);
-#else
-      __builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, 31, 30, 15, -1,
-                              -1, -1, -1, -1, -1, -1, -1);
-#endif
-   return __builtin_altivec_vrlqnm(__a, (vector unsigned __int128) MaskAndShift);
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_rlnm(vector signed __int128 __a, vector signed __int128 __b,
-         vector signed __int128 __c) {
-  // Merge __b and __c using an appropriate shuffle.
-  vector unsigned char TmpB = (vector unsigned char)__b;
-  vector unsigned char TmpC = (vector unsigned char)__c;
-  vector unsigned char MaskAndShift =
-#ifdef __LITTLE_ENDIAN__
-      __builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, -1, -1, -1, 16, 0,
-                              1, -1, -1, -1, -1, -1);
-#else
-      __builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, 31, 30, 15, -1,
-                              -1, -1, -1, -1, -1, -1, -1);
-#endif
-  return __builtin_altivec_vrlqnm(__a, (vector unsigned __int128) MaskAndShift);
-}
-#endif
-
-/* vec_vrlb */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vrlb(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vrlb(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-/* vec_vrlh */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vrlh(vector short __a, vector unsigned short __b) {
-  return __builtin_altivec_vrlh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vrlh(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
-}
-
-/* vec_vrlw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vrlw(vector int __a,
-                                                   vector unsigned int __b) {
-  return __builtin_altivec_vrlw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vrlw(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
-}
-
-/* vec_round */
-
-static __inline__ vector float __ATTRS_o_ai vec_round(vector float __a) {
-  return __builtin_altivec_vrfin(__a);
-}
-
-#ifdef __VSX__
-#ifdef __XL_COMPAT_ALTIVEC__
-static __inline__ vector double __ATTRS_o_ai vec_rint(vector double __a);
-static __inline__ vector double __ATTRS_o_ai vec_round(vector double __a) {
-  double __fpscr = __builtin_readflm();
-  __builtin_setrnd(0);
-  vector double __rounded = vec_rint(__a);
-  __builtin_setflm(__fpscr);
-  return __rounded;
-}
-#else
-static __inline__ vector double __ATTRS_o_ai vec_round(vector double __a) {
-  return __builtin_vsx_xvrdpi(__a);
-}
-#endif
-
-/* vec_rint */
-
-static __inline__ vector float __ATTRS_o_ai vec_rint(vector float __a) {
-  return __builtin_vsx_xvrspic(__a);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_rint(vector double __a) {
-  return __builtin_vsx_xvrdpic(__a);
-}
-
-/* vec_roundc */
-
-static __inline__ vector float __ATTRS_o_ai vec_roundc(vector float __a) {
-  return __builtin_vsx_xvrspic(__a);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_roundc(vector double __a) {
-  return __builtin_vsx_xvrdpic(__a);
-}
-
-/* vec_nearbyint */
-
-static __inline__ vector float __ATTRS_o_ai vec_nearbyint(vector float __a) {
-  return __builtin_vsx_xvrspi(__a);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_nearbyint(vector double __a) {
-  return __builtin_vsx_xvrdpi(__a);
-}
-#endif
-
-/* vec_vrfin */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfin(vector float __a) {
-  return __builtin_altivec_vrfin(__a);
-}
-
-/* vec_sqrt */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_sqrt(vector float __a) {
-  return __builtin_vsx_xvsqrtsp(__a);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_sqrt(vector double __a) {
-  return __builtin_vsx_xvsqrtdp(__a);
-}
-#endif
-
-/* vec_rsqrte */
-
-static __inline__ vector float __ATTRS_o_ai vec_rsqrte(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrsqrtesp(__a);
-#else
-  return __builtin_altivec_vrsqrtefp(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_rsqrte(vector double __a) {
-  return __builtin_vsx_xvrsqrtedp(__a);
-}
-#endif
-
-static vector float __ATTRS_o_ai vec_rsqrt(vector float __a) {
-  return __builtin_ppc_rsqrtf(__a);
-}
-
-#ifdef __VSX__
-static vector double __ATTRS_o_ai vec_rsqrt(vector double __a) {
-  return __builtin_ppc_rsqrtd(__a);
-}
-#endif
-
-/* vec_vrsqrtefp */
-
-static __inline__ __vector float __attribute__((__always_inline__))
-vec_vrsqrtefp(vector float __a) {
-  return __builtin_altivec_vrsqrtefp(__a);
-}
-
-/* vec_xvtsqrt */
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_test_swsqrt(vector double __a) {
-  return __builtin_vsx_xvtsqrtdp(__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_test_swsqrts(vector float __a) {
-  return __builtin_vsx_xvtsqrtsp(__a);
-}
-#endif
-
-/* vec_sel */
-
-#define __builtin_altivec_vsel_4si vec_sel
-
-static __inline__ vector signed char __ATTRS_o_ai vec_sel(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sel(vector unsigned char __a, vector unsigned char __b,
-        vector unsigned char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai vec_sel(
-    vector unsigned char __a, vector unsigned char __b, vector bool char __c) {
-  return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
-  return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_sel(vector bool char __a,
-                                                        vector bool char __b,
-                                                        vector bool char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a,
-                                                    vector short __b,
-                                                    vector unsigned short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a,
-                                                    vector short __b,
-                                                    vector bool short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sel(vector unsigned short __a, vector unsigned short __b,
-        vector unsigned short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sel(vector unsigned short __a, vector unsigned short __b,
-        vector bool short __c) {
-  return (__a & ~(vector unsigned short)__c) |
-         (__b & (vector unsigned short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_sel(
-    vector bool short __a, vector bool short __b, vector unsigned short __c) {
-  return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a,
-                                                  vector int __b,
-                                                  vector unsigned int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a,
-                                                  vector int __b,
-                                                  vector bool int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_sel(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
-  return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
-  return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_sel(vector bool int __a,
-                                                       vector bool int __b,
-                                                       vector bool int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a,
-                                                    vector float __b,
-                                                    vector unsigned int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a,
-                                                    vector float __b,
-                                                    vector bool int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai
-vec_sel(vector double __a, vector double __b, vector bool long long __c) {
-  vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
-                           ((vector long long)__b & (vector long long)__c);
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_sel(vector double __a, vector double __b, vector unsigned long long __c) {
-  vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
-                           ((vector long long)__b & (vector long long)__c);
-  return (vector double)__res;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_sel(vector bool long long __a, vector bool long long __b,
-        vector bool long long __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_sel(vector bool long long __a, vector bool long long __b,
-        vector unsigned long long __c) {
-  return (__a & ~(vector bool long long)__c) |
-         (__b & (vector bool long long)__c);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sel(vector signed long long __a, vector signed long long __b,
-        vector bool long long __c) {
-  return (__a & ~(vector signed long long)__c) |
-         (__b & (vector signed long long)__c);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sel(vector signed long long __a, vector signed long long __b,
-        vector unsigned long long __c) {
-  return (__a & ~(vector signed long long)__c) |
-         (__b & (vector signed long long)__c);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sel(vector unsigned long long __a, vector unsigned long long __b,
-        vector bool long long __c) {
-  return (__a & ~(vector unsigned long long)__c) |
-         (__b & (vector unsigned long long)__c);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sel(vector unsigned long long __a, vector unsigned long long __b,
-        vector unsigned long long __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-#endif
-
-/* vec_vsel */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_vsel(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsel(vector signed char __a, vector signed char __b, vector bool char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsel(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai vec_vsel(
-    vector unsigned char __a, vector unsigned char __b, vector bool char __c) {
-  return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
-  return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vsel(vector bool char __a,
-                                                         vector bool char __b,
-                                                         vector bool char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vsel(vector short __a, vector short __b, vector unsigned short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsel(vector short __a,
-                                                     vector short __b,
-                                                     vector bool short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsel(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsel(vector unsigned short __a, vector unsigned short __b,
-         vector bool short __c) {
-  return (__a & ~(vector unsigned short)__c) |
-         (__b & (vector unsigned short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_vsel(
-    vector bool short __a, vector bool short __b, vector unsigned short __c) {
-  return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsel(vector bool short __a, vector bool short __b, vector bool short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a,
-                                                   vector int __b,
-                                                   vector unsigned int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a,
-                                                   vector int __b,
-                                                   vector bool int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel(
-    vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
-  return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
-  return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vsel(vector bool int __a,
-                                                        vector bool int __b,
-                                                        vector bool int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a,
-                                                     vector float __b,
-                                                     vector unsigned int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a,
-                                                     vector float __b,
-                                                     vector bool int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-/* vec_sl */
-
-// vec_sl does modulo arithmetic on __b first, so __b is allowed to be more
-// than the length of __a.
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sl(vector unsigned char __a, vector unsigned char __b) {
-  return __a << (__b %
-                 (vector unsigned char)(sizeof(unsigned char) * __CHAR_BIT__));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)vec_sl((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sl(vector unsigned short __a, vector unsigned short __b) {
-  return __a << (__b % (vector unsigned short)(sizeof(unsigned short) *
-                                               __CHAR_BIT__));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sl(vector short __a,
-                                                   vector unsigned short __b) {
-  return (vector short)vec_sl((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sl(vector unsigned int __a, vector unsigned int __b) {
-  return __a << (__b %
-                 (vector unsigned int)(sizeof(unsigned int) * __CHAR_BIT__));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sl(vector int __a,
-                                                 vector unsigned int __b) {
-  return (vector int)vec_sl((vector unsigned int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sl(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a << (__b % (vector unsigned long long)(sizeof(unsigned long long) *
-                                                   __CHAR_BIT__));
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_sl(vector long long __a, vector unsigned long long __b) {
-  return (vector long long)vec_sl((vector unsigned long long)__a, __b);
-}
-#elif defined(__VSX__)
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vspltb(vector unsigned char __a, unsigned char __b);
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sl(vector unsigned long long __a, vector unsigned long long __b) {
-  __b %= (vector unsigned long long)(sizeof(unsigned long long) * __CHAR_BIT__);
-
-  // Big endian element one (the right doubleword) can be left shifted as-is.
-  // The other element needs to be swapped into the right doubleword and
-  // shifted. Then the right doublewords of the two result vectors are merged.
-  vector signed long long __rightelt =
-      (vector signed long long)__builtin_altivec_vslo((vector signed int)__a,
-                                                      (vector signed int)__b);
-#ifdef __LITTLE_ENDIAN__
-  __rightelt = (vector signed long long)__builtin_altivec_vsl(
-      (vector signed int)__rightelt,
-      (vector signed int)vec_vspltb((vector unsigned char)__b, 0));
-#else
-  __rightelt = (vector signed long long)__builtin_altivec_vsl(
-      (vector signed int)__rightelt,
-      (vector signed int)vec_vspltb((vector unsigned char)__b, 15));
-#endif
-  __a = __builtin_shufflevector(__a, __a, 1, 0);
-  __b = __builtin_shufflevector(__b, __b, 1, 0);
-  vector signed long long __leftelt =
-      (vector signed long long)__builtin_altivec_vslo((vector signed int)__a,
-                                                      (vector signed int)__b);
-#ifdef __LITTLE_ENDIAN__
-  __leftelt = (vector signed long long)__builtin_altivec_vsl(
-      (vector signed int)__leftelt,
-      (vector signed int)vec_vspltb((vector unsigned char)__b, 0));
-  return (vector unsigned long long)__builtin_shufflevector(__rightelt,
-                                                            __leftelt, 0, 2);
-#else
-  __leftelt = (vector signed long long)__builtin_altivec_vsl(
-      (vector signed int)__leftelt,
-      (vector signed int)vec_vspltb((vector unsigned char)__b, 15));
-  return (vector unsigned long long)__builtin_shufflevector(__leftelt,
-                                                            __rightelt, 1, 3);
-#endif
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_sl(vector long long __a, vector unsigned long long __b) {
-  return (vector long long)vec_sl((vector unsigned long long)__a, __b);
-}
-#endif /* __VSX__ */
-
-/* vec_vslb */
-
-#define __builtin_altivec_vslb vec_vslb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vslb(vector signed char __a, vector unsigned char __b) {
-  return vec_sl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vslb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_sl(__a, __b);
-}
-
-/* vec_vslh */
-
-#define __builtin_altivec_vslh vec_vslh
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vslh(vector short __a, vector unsigned short __b) {
-  return vec_sl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vslh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_sl(__a, __b);
-}
-
-/* vec_vslw */
-
-#define __builtin_altivec_vslw vec_vslw
-
-static __inline__ vector int __ATTRS_o_ai vec_vslw(vector int __a,
-                                                   vector unsigned int __b) {
-  return vec_sl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vslw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_sl(__a, __b);
-}
-
-/* vec_sld */
-
-#define __builtin_altivec_vsldoi_4si vec_sld
-
-static __inline__ vector signed char __ATTRS_o_ai vec_sld(
-    vector signed char __a, vector signed char __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sld(vector unsigned char __a, vector unsigned char __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sld(vector bool char __a, vector bool char __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_sld(
-    vector signed short __a, vector signed short __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sld(vector unsigned short __a, vector unsigned short __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sld(vector bool short __a, vector bool short __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sld(vector pixel __a,
-                                                    vector pixel __b,
-                                                    unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sld(vector signed int __a, vector signed int __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_sld(
-    vector unsigned int __a, vector unsigned int __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_sld(vector bool int __a,
-                                                       vector bool int __b,
-                                                       unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sld(vector float __a,
-                                                    vector float __b,
-                                                    unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_sld(vector bool long long __a, vector bool long long __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sld(vector signed long long __a, vector signed long long __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sld(vector unsigned long long __a, vector unsigned long long __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_sld(vector double __a,
-                                                     vector double __b,
-                                                     unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-#endif
-
-/* vec_sldw */
-static __inline__ vector signed char __ATTRS_o_ai vec_sldw(
-    vector signed char __a, vector signed char __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sldw(vector unsigned char __a, vector unsigned char __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_sldw(
-    vector signed short __a, vector signed short __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sldw(vector unsigned short __a, vector unsigned short __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sldw(vector signed int __a, vector signed int __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_sldw(
-    vector unsigned int __a, vector unsigned int __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sldw(
-    vector float __a, vector float __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sldw(vector signed long long __a, vector signed long long __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sldw(vector unsigned long long __a, vector unsigned long long __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_sldw(
-    vector double __a, vector double __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-/* vec_slv */
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_slv(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vslv(__a, __b);
-}
-
-/* vec_srv */
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srv(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsrv(__a, __b);
-}
-#endif
-
-/* vec_vsldoi */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsldoi(vector signed char __a, vector signed char __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai vec_vsldoi(
-    vector unsigned char __a, vector unsigned char __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsldoi(vector short __a,
-                                                       vector short __b,
-                                                       unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai vec_vsldoi(
-    vector unsigned short __a, vector unsigned short __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsldoi(vector pixel __a,
-                                                       vector pixel __b,
-                                                       unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsldoi(vector int __a,
-                                                     vector int __b,
-                                                     unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_vsldoi(
-    vector unsigned int __a, vector unsigned int __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsldoi(vector float __a,
-                                                       vector float __b,
-                                                       unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-/* vec_sll */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sll(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sll(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sll(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sll(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sll(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sll(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sll(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sll(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sll(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sll(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sll(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sll(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sll(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sll(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sll(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sll(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sll(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sll(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sll(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sll(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sll(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sll(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vsl((vector int)__a,
-                                                        (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sll(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsl((vector int)__a,
-                                                          (vector int)__b);
-}
-#endif
-
-/* vec_vsl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsl(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsl(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsl(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsl(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsl(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsl(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsl(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsl(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsl(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsl(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsl(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsl(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsl(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsl(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsl(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsl(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsl(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsl(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsl(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsl(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-/* vec_slo */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_slo(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_slo(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_slo(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_slo(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a,
-                                                    vector signed char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_slo(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_slo(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
-                                                    vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a,
-                                                  vector signed char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_slo(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_slo(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a,
-                                                    vector signed char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a,
-                                                    vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_slo(vector signed long long __a, vector signed char __b) {
-  return (vector signed long long)__builtin_altivec_vslo((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_slo(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vslo((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_slo(vector unsigned long long __a, vector signed char __b) {
-  return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a,
-                                                           (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_slo(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a,
-                                                           (vector int)__b);
-}
-#endif
-
-/* vec_vslo */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vslo(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vslo(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vslo(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vslo(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a,
-                                                     vector signed char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a,
-                                                     vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vslo(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vslo(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
-                                                     vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
-                                                     vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a,
-                                                   vector signed char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a,
-                                                   vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vslo(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vslo(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a,
-                                                     vector signed char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a,
-                                                     vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-/* vec_splat */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_splat(vector signed char __a, unsigned const int __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_splat(vector unsigned char __a, unsigned const int __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_splat(vector bool char __a, unsigned const int __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_splat(vector signed short __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_splat(vector unsigned short __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_splat(vector bool short __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_splat(vector pixel __a,
-                                                      unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_splat(vector signed int __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_splat(vector unsigned int __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_splat(vector bool int __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_splat(vector float __a,
-                                                      unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_splat(vector double __a,
-                                                       unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_splat(vector bool long long __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_splat(vector signed long long __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_splat(vector unsigned long long __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-#endif
-
-/* vec_vspltb */
-
-#define __builtin_altivec_vspltb vec_vspltb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vspltb(vector signed char __a, unsigned char __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vspltb(vector unsigned char __a, unsigned char __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vspltb(vector bool char __a,
-                                                           unsigned char __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b));
-}
-
-/* vec_vsplth */
-
-#define __builtin_altivec_vsplth vec_vsplth
-
-static __inline__ vector short __ATTRS_o_ai vec_vsplth(vector short __a,
-                                                       unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsplth(vector unsigned short __a, unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsplth(vector bool short __a, unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsplth(vector pixel __a,
-                                                       unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-/* vec_vspltw */
-
-#define __builtin_altivec_vspltw vec_vspltw
-
-static __inline__ vector int __ATTRS_o_ai vec_vspltw(vector int __a,
-                                                     unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vspltw(vector unsigned int __a, unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vspltw(vector bool int __a,
-                                                          unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vspltw(vector float __a,
-                                                       unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-/* vec_splat_s8 */
-
-#define __builtin_altivec_vspltisb vec_splat_s8
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector signed char __ATTRS_o_ai
-vec_splat_s8(signed char __a) {
-  return (vector signed char)(__a);
-}
-
-/* vec_vspltisb */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vspltisb(signed char __a) {
-  return (vector signed char)(__a);
-}
-
-/* vec_splat_s16 */
-
-#define __builtin_altivec_vspltish vec_splat_s16
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector short __ATTRS_o_ai vec_splat_s16(signed char __a) {
-  return (vector short)(__a);
-}
-
-/* vec_vspltish */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector short __ATTRS_o_ai vec_vspltish(signed char __a) {
-  return (vector short)(__a);
-}
-
-/* vec_splat_s32 */
-
-#define __builtin_altivec_vspltisw vec_splat_s32
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector int __ATTRS_o_ai vec_splat_s32(signed char __a) {
-  return (vector int)(__a);
-}
-
-/* vec_vspltisw */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector int __ATTRS_o_ai vec_vspltisw(signed char __a) {
-  return (vector int)(__a);
-}
-
-/* vec_splat_u8 */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_splat_u8(unsigned char __a) {
-  return (vector unsigned char)(__a);
-}
-
-/* vec_splat_u16 */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_splat_u16(signed char __a) {
-  return (vector unsigned short)(__a);
-}
-
-/* vec_splat_u32 */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_splat_u32(signed char __a) {
-  return (vector unsigned int)(__a);
-}
-
-/* vec_sr */
-
-// vec_sr does modulo arithmetic on __b first, so __b is allowed to be more
-// than the length of __a.
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sr(vector unsigned char __a, vector unsigned char __b) {
-  return __a >>
-         (__b % (vector unsigned char)(sizeof(unsigned char) * __CHAR_BIT__));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sr(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)vec_sr((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sr(vector unsigned short __a, vector unsigned short __b) {
-  return __a >>
-         (__b % (vector unsigned short)(sizeof(unsigned short) * __CHAR_BIT__));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sr(vector short __a,
-                                                   vector unsigned short __b) {
-  return (vector short)vec_sr((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sr(vector unsigned int __a, vector unsigned int __b) {
-  return __a >>
-         (__b % (vector unsigned int)(sizeof(unsigned int) * __CHAR_BIT__));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sr(vector int __a,
-                                                 vector unsigned int __b) {
-  return (vector int)vec_sr((vector unsigned int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a >> (__b % (vector unsigned long long)(sizeof(unsigned long long) *
-                                                   __CHAR_BIT__));
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_sr(vector long long __a, vector unsigned long long __b) {
-  return (vector long long)vec_sr((vector unsigned long long)__a, __b);
-}
-#elif defined(__VSX__)
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
-  __b %= (vector unsigned long long)(sizeof(unsigned long long) * __CHAR_BIT__);
-
-  // Big endian element zero (the left doubleword) can be right shifted as-is.
-  // However the shift amount must be in the right doubleword.
-  // The other element needs to be swapped into the left doubleword and
-  // shifted. Then the left doublewords of the two result vectors are merged.
-  vector unsigned long long __swapshift =
-      __builtin_shufflevector(__b, __b, 1, 0);
-  vector unsigned long long __leftelt =
-      (vector unsigned long long)__builtin_altivec_vsro(
-          (vector signed int)__a, (vector signed int)__swapshift);
-#ifdef __LITTLE_ENDIAN__
-  __leftelt = (vector unsigned long long)__builtin_altivec_vsr(
-      (vector signed int)__leftelt,
-      (vector signed int)vec_vspltb((vector unsigned char)__swapshift, 0));
-#else
-  __leftelt = (vector unsigned long long)__builtin_altivec_vsr(
-      (vector signed int)__leftelt,
-      (vector signed int)vec_vspltb((vector unsigned char)__swapshift, 15));
-#endif
-  __a = __builtin_shufflevector(__a, __a, 1, 0);
-  vector unsigned long long __rightelt =
-      (vector unsigned long long)__builtin_altivec_vsro((vector signed int)__a,
-                                                        (vector signed int)__b);
-#ifdef __LITTLE_ENDIAN__
-  __rightelt = (vector unsigned long long)__builtin_altivec_vsr(
-      (vector signed int)__rightelt,
-      (vector signed int)vec_vspltb((vector unsigned char)__b, 0));
-  return __builtin_shufflevector(__rightelt, __leftelt, 1, 3);
-#else
-  __rightelt = (vector unsigned long long)__builtin_altivec_vsr(
-      (vector signed int)__rightelt,
-      (vector signed int)vec_vspltb((vector unsigned char)__b, 15));
-  return __builtin_shufflevector(__leftelt, __rightelt, 0, 2);
-#endif
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_sr(vector long long __a, vector unsigned long long __b) {
-  return (vector long long)vec_sr((vector unsigned long long)__a, __b);
-}
-#endif /* __VSX__ */
-
-/* vec_vsrb */
-
-#define __builtin_altivec_vsrb vec_vsrb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsrb(vector signed char __a, vector unsigned char __b) {
-  return vec_sr(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsrb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_sr(__a, __b);
-}
-
-/* vec_vsrh */
-
-#define __builtin_altivec_vsrh vec_vsrh
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vsrh(vector short __a, vector unsigned short __b) {
-  return vec_sr(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsrh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_sr(__a, __b);
-}
-
-/* vec_vsrw */
-
-#define __builtin_altivec_vsrw vec_vsrw
-
-static __inline__ vector int __ATTRS_o_ai vec_vsrw(vector int __a,
-                                                   vector unsigned int __b) {
-  return vec_sr(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsrw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_sr(__a, __b);
-}
-
-/* vec_sra */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sra(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sra(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sra(vector short __a,
-                                                    vector unsigned short __b) {
-  return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sra(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sra(vector int __a,
-                                                  vector unsigned int __b) {
-  return __builtin_altivec_vsraw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sra(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sra(vector signed long long __a, vector unsigned long long __b) {
-  return __a >> __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sra(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)((vector signed long long)__a >> __b);
-}
-#elif defined(__VSX__)
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sra(vector signed long long __a, vector unsigned long long __b) {
-  __b %= (vector unsigned long long)(sizeof(unsigned long long) * __CHAR_BIT__);
-  return __a >> __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sra(vector unsigned long long __a, vector unsigned long long __b) {
-  __b %= (vector unsigned long long)(sizeof(unsigned long long) * __CHAR_BIT__);
-  return (vector unsigned long long)((vector signed long long)__a >> __b);
-}
-#endif /* __VSX__ */
-
-/* vec_vsrab */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsrab(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsrab(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-/* vec_vsrah */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vsrah(vector short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsrah(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
-}
-
-/* vec_vsraw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vsraw(vector int __a,
-                                                    vector unsigned int __b) {
-  return __builtin_altivec_vsraw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsraw(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
-}
-
-/* vec_srl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_srl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_srl(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_srl(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srl(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srl(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srl(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_srl(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_srl(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_srl(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_srl(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_srl(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_srl(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_srl(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_srl(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_srl(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_srl(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_srl(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_srl(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_srl(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_srl(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_srl(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_srl(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vsr((vector int)__a,
-                                                        (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_srl(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsr((vector int)__a,
-                                                          (vector int)__b);
-}
-#endif
-
-/* vec_vsr */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsr(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsr(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsr(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsr(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsr(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsr(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsr(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsr(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsr(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsr(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsr(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsr(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsr(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsr(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsr(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsr(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsr(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsr(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsr(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsr(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsr(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-/* vec_sro */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sro(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sro(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sro(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sro(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a,
-                                                    vector signed char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sro(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sro(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
-                                                    vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a,
-                                                  vector signed char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sro(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sro(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a,
-                                                    vector signed char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a,
-                                                    vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sro(vector signed long long __a, vector signed char __b) {
-  return (vector signed long long)__builtin_altivec_vsro((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sro(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vsro((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sro(vector unsigned long long __a, vector signed char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a,
-                                                           (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sro(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a,
-                                                           (vector int)__b);
-}
-#endif
-
-/* vec_vsro */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsro(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsro(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsro(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsro(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a,
-                                                     vector signed char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a,
-                                                     vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsro(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsro(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
-                                                     vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
-                                                     vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a,
-                                                   vector signed char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a,
-                                                   vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsro(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsro(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a,
-                                                     vector signed char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a,
-                                                     vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-/* vec_st */
-
-static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, long __b,
-                                           vector signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, long __b,
-                                           signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, long __b,
-                                           vector unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, long __b,
-                                           unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, long __b,
-                                           signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, long __b,
-                                           unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, long __b,
-                                           vector bool char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector short __a, long __b,
-                                           vector short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector short __a, long __b,
-                                           short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, long __b,
-                                           vector unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, long __b,
-                                           unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, long __b,
-                                           short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, long __b,
-                                           unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, long __b,
-                                           vector bool short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, long __b,
-                                           short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, long __b,
-                                           unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, long __b,
-                                           vector pixel *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector int __a, long __b,
-                                           vector int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector int __a, long __b, int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, long __b,
-                                           vector unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, long __b,
-                                           unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, long __b,
-                                           int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, long __b,
-                                           unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, long __b,
-                                           vector bool int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector float __a, long __b,
-                                           vector float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector float __a, long __b,
-                                           float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-/* vec_stvx */
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, long __b,
-                                             vector signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, long __b,
-                                             signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, long __b,
-                                             vector unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, long __b,
-                                             unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, long __b,
-                                             signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, long __b,
-                                             unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, long __b,
-                                             vector bool char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, long __b,
-                                             vector short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, long __b,
-                                             short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, long __b,
-                                             vector unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, long __b,
-                                             unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, long __b,
-                                             short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, long __b,
-                                             unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, long __b,
-                                             vector bool short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, long __b,
-                                             short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, long __b,
-                                             unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, long __b,
-                                             vector pixel *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, long __b,
-                                             vector int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, long __b,
-                                             int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, long __b,
-                                             vector unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, long __b,
-                                             unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, long __b,
-                                             int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, long __b,
-                                             unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, long __b,
-                                             vector bool int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, long __b,
-                                             vector float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, long __b,
-                                             float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-/* vec_ste */
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector signed char __a, long __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned char __a, long __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, long __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, long __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector short __a, long __b,
-                                            short *__c) {
-  __builtin_altivec_stvehx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned short __a, long __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, long __b,
-                                            short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, long __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, long __b,
-                                            short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, long __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector int __a, long __b, int *__c) {
-  __builtin_altivec_stvewx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned int __a, long __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, long __b,
-                                            int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, long __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector float __a, long __b,
-                                            float *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-/* vec_stvebx */
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector signed char __a, long __b,
-                                               signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector unsigned char __a,
-                                               long __b, unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, long __b,
-                                               signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, long __b,
-                                               unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-/* vec_stvehx */
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector short __a, long __b,
-                                               short *__c) {
-  __builtin_altivec_stvehx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector unsigned short __a,
-                                               long __b, unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, long __b,
-                                               short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, long __b,
-                                               unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, long __b,
-                                               short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, long __b,
-                                               unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-/* vec_stvewx */
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector int __a, long __b,
-                                               int *__c) {
-  __builtin_altivec_stvewx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector unsigned int __a, long __b,
-                                               unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, long __b,
-                                               int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, long __b,
-                                               unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector float __a, long __b,
-                                               float *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-/* vec_stl */
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
-                                            vector signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
-                                            vector unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
-                                            vector bool char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b,
-                                            vector short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
-                                            vector unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
-                                            vector bool short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
-                                            vector pixel *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b,
-                                            vector int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b, int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
-                                            vector unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
-                                            int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
-                                            vector bool int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b,
-                                            vector float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b,
-                                            float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-/* vec_stvxl */
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
-                                              vector signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
-                                              signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
-                                              vector unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
-                                              unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
-                                              signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
-                                              unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
-                                              vector bool char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b,
-                                              vector short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b,
-                                              short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a,
-                                              int __b,
-                                              vector unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a,
-                                              int __b, unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
-                                              short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
-                                              unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
-                                              vector bool short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
-                                              short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
-                                              unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
-                                              vector pixel *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b,
-                                              vector int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b,
-                                              int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
-                                              vector unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
-                                              unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
-                                              int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
-                                              unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
-                                              vector bool int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b,
-                                              vector float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b,
-                                              float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-/* vec_sub */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sub(vector signed char __a, vector signed char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sub(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sub(vector signed char __a, vector bool char __b) {
-  return __a - (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sub(vector unsigned char __a, vector unsigned char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sub(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sub(vector unsigned char __a, vector bool char __b) {
-  return __a - (vector unsigned char)__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a,
-                                                    vector short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sub(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a,
-                                                    vector bool short __b) {
-  return __a - (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sub(vector unsigned short __a, vector unsigned short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sub(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sub(vector unsigned short __a, vector bool short __b) {
-  return __a - (vector unsigned short)__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a,
-                                                  vector int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sub(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a,
-                                                  vector bool int __b) {
-  return __a - (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sub(vector unsigned int __a, vector unsigned int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sub(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sub(vector unsigned int __a, vector bool int __b) {
-  return __a - (vector unsigned int)__b;
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) &&                    \
-    defined(__SIZEOF_INT128__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_sub(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_sub(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a - __b;
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) &&
-       // defined(__SIZEOF_INT128__)
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sub(vector signed long long __a, vector signed long long __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sub(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a - __b;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_sub(vector double __a,
-                                                     vector double __b) {
-  return __a - __b;
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_sub(vector float __a,
-                                                    vector float __b) {
-  return __a - __b;
-}
-
-/* vec_vsububm */
-
-#define __builtin_altivec_vsububm vec_vsububm
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsububm(vector signed char __a, vector signed char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsububm(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsububm(vector signed char __a, vector bool char __b) {
-  return __a - (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububm(vector unsigned char __a, vector unsigned char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububm(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububm(vector unsigned char __a, vector bool char __b) {
-  return __a - (vector unsigned char)__b;
-}
-
-/* vec_vsubuhm */
-
-#define __builtin_altivec_vsubuhm vec_vsubuhm
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
-                                                        vector short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector bool short __a,
-                                                        vector short __b) {
-  return (vector short)__a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
-                                                        vector bool short __b) {
-  return __a - (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhm(vector unsigned short __a, vector unsigned short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhm(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhm(vector unsigned short __a, vector bool short __b) {
-  return __a - (vector unsigned short)__b;
-}
-
-/* vec_vsubuwm */
-
-#define __builtin_altivec_vsubuwm vec_vsubuwm
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a,
-                                                      vector int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector bool int __a,
-                                                      vector int __b) {
-  return (vector int)__a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a,
-                                                      vector bool int __b) {
-  return __a - (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuwm(vector unsigned int __a, vector unsigned int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuwm(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuwm(vector unsigned int __a, vector bool int __b) {
-  return __a - (vector unsigned int)__b;
-}
-
-/* vec_vsubfp */
-
-#define __builtin_altivec_vsubfp vec_vsubfp
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vsubfp(vector float __a, vector float __b) {
-  return __a - __b;
-}
-
-/* vec_subc */
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_subc(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_altivec_vsubcuw((vector unsigned int)__a,
-                                                      (vector unsigned int) __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subc(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubcuw(__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-#ifdef __SIZEOF_INT128__
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_subc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_subc(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-#endif
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_subc_u128(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsubcuq(__a, __b);
-}
-#endif // __POWER8_VECTOR__
-
-/* vec_vsubcuw */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vsubcuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubcuw(__a, __b);
-}
-
-/* vec_subs */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_subs(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_subs(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_subs(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_subs(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_subs(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_subs(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vsubshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_subs(vector bool short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vsubshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a,
-                                                     vector bool short __b) {
-  return __builtin_altivec_vsubshs(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_subs(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_subs(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_subs(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vsubsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_subs(vector bool int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vsubsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a,
-                                                   vector bool int __b) {
-  return __builtin_altivec_vsubsws(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subs(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subs(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subs(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
-}
-
-/* vec_vsubsbs */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsubsbs(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsubsbs(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsubsbs(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
-}
-
-/* vec_vsububs */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububs(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububs(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububs(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
-}
-
-/* vec_vsubshs */
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vsubshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector bool short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vsubshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
-                                                        vector bool short __b) {
-  return __builtin_altivec_vsubshs(__a, (vector short)__b);
-}
-
-/* vec_vsubuhs */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhs(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhs(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhs(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
-}
-
-/* vec_vsubsws */
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vsubsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector bool int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vsubsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a,
-                                                      vector bool int __b) {
-  return __builtin_altivec_vsubsws(__a, (vector int)__b);
-}
-
-/* vec_vsubuws */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuws(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuws(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuws(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-/* vec_vsubuqm */
-
-#ifdef __SIZEOF_INT128__
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubuqm(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubuqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a - __b;
-}
-#endif
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsubuqm(__a, __b);
-}
-
-/* vec_vsubeuqm */
-
-#ifdef __SIZEOF_INT128__
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_sube(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_sube(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-#endif
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
-              vector unsigned char __c) {
-  return (vector unsigned char)__builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-/* vec_vsubcuq */
-
-#ifdef __SIZEOF_INT128__
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-
-/* vec_vsubecuq */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubecuq(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-#endif
-
-#ifdef __powerpc64__
-static __inline__ vector signed int __ATTRS_o_ai
-vec_subec(vector signed int __a, vector signed int __b,
-             vector signed int __c) {
-  return vec_addec(__a, ~__b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subec(vector unsigned int __a, vector unsigned int __b,
-             vector unsigned int __c) {
-  return vec_addec(__a, ~__b, __c);
-}
-#endif
-
-#ifdef __SIZEOF_INT128__
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_subec(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_subec(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-#endif
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_subec_u128(vector unsigned char __a, vector unsigned char __b,
-               vector unsigned char __c) {
-  return (vector unsigned char)__builtin_altivec_vsubecuq(__a, __b, __c);
-}
-#endif // __POWER8_VECTOR__
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sube(vector signed int __a, vector signed int __b,
-         vector signed int __c) {
-  vector signed int __mask = {1, 1, 1, 1};
-  vector signed int __carry = __c & __mask;
-  return vec_adde(__a, ~__b, __carry);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sube(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  vector unsigned int __mask = {1, 1, 1, 1};
-  vector unsigned int __carry = __c & __mask;
-  return vec_adde(__a, ~__b, __carry);
-}
-/* vec_sum4s */
-
-static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed char __a,
-                                                    vector int __b) {
-  return __builtin_altivec_vsum4sbs(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sum4s(vector unsigned char __a, vector unsigned int __b) {
-  return __builtin_altivec_vsum4ubs(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed short __a,
-                                                    vector int __b) {
-  return __builtin_altivec_vsum4shs(__a, __b);
-}
-
-/* vec_vsum4sbs */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vsum4sbs(vector signed char __a, vector int __b) {
-  return __builtin_altivec_vsum4sbs(__a, __b);
-}
-
-/* vec_vsum4ubs */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vsum4ubs(vector unsigned char __a, vector unsigned int __b) {
-  return __builtin_altivec_vsum4ubs(__a, __b);
-}
-
-/* vec_vsum4shs */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vsum4shs(vector signed short __a, vector int __b) {
-  return __builtin_altivec_vsum4shs(__a, __b);
-}
-
-/* vec_sum2s */
-
-/* The vsum2sws instruction has a big-endian bias, so that the second
-   input vector and the result always reference big-endian elements
-   1 and 3 (little-endian element 0 and 2).  For ease of porting the
-   programmer wants elements 1 and 3 in both cases, so for little
-   endian we must perform some permutes.  */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_sum2s(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  vector int __c = (vector signed int)vec_perm(
-      __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-  __c = __builtin_altivec_vsum2sws(__a, __c);
-  return (vector signed int)vec_perm(
-      __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-#else
-  return __builtin_altivec_vsum2sws(__a, __b);
-#endif
-}
-
-/* vec_vsum2sws */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_vsum2sws(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  vector int __c = (vector signed int)vec_perm(
-      __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-  __c = __builtin_altivec_vsum2sws(__a, __c);
-  return (vector signed int)vec_perm(
-      __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-#else
-  return __builtin_altivec_vsum2sws(__a, __b);
-#endif
-}
-
-/* vec_sums */
-
-/* The vsumsws instruction has a big-endian bias, so that the second
-   input vector and the result always reference big-endian element 3
-   (little-endian element 0).  For ease of porting the programmer
-   wants element 3 in both cases, so for little endian we must perform
-   some permutes.  */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_sums(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  __b = (vector signed int)vec_splat(__b, 3);
-  __b = __builtin_altivec_vsumsws(__a, __b);
-  return (vector signed int)(0, 0, 0, __b[0]);
-#else
-  return __builtin_altivec_vsumsws(__a, __b);
-#endif
-}
-
-/* vec_vsumsws */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_vsumsws(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  __b = (vector signed int)vec_splat(__b, 3);
-  __b = __builtin_altivec_vsumsws(__a, __b);
-  return (vector signed int)(0, 0, 0, __b[0]);
-#else
-  return __builtin_altivec_vsumsws(__a, __b);
-#endif
-}
-
-/* vec_trunc */
-
-static __inline__ vector float __ATTRS_o_ai vec_trunc(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrspiz(__a);
-#else
-  return __builtin_altivec_vrfiz(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_trunc(vector double __a) {
-  return __builtin_vsx_xvrdpiz(__a);
-}
-#endif
-
-/* vec_roundz */
-static __inline__ vector float __ATTRS_o_ai vec_roundz(vector float __a) {
-  return vec_trunc(__a);
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_roundz(vector double __a) {
-  return vec_trunc(__a);
-}
-#endif
-
-/* vec_vrfiz */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfiz(vector float __a) {
-  return __builtin_altivec_vrfiz(__a);
-}
-
-/* vec_unpackh */
-
-/* The vector unpack instructions all have a big-endian bias, so for
-   little endian we must reverse the meanings of "high" and "low."  */
-#ifdef __LITTLE_ENDIAN__
-#define vec_vupkhpx(__a) __builtin_altivec_vupklpx((vector short)(__a))
-#define vec_vupklpx(__a) __builtin_altivec_vupkhpx((vector short)(__a))
-#else
-#define vec_vupkhpx(__a) __builtin_altivec_vupkhpx((vector short)(__a))
-#define vec_vupklpx(__a) __builtin_altivec_vupklpx((vector short)(__a))
-#endif
-
-static __inline__ vector short __ATTRS_o_ai
-vec_unpackh(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsb((vector char)__a);
-#else
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_unpackh(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_unpackh(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsh(__a);
-#else
-  return __builtin_altivec_vupkhsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_unpackh(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unpackh(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_unpackh(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsw(__a);
-#else
-  return __builtin_altivec_vupkhsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_unpackh(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_unpackh(vector float __a) {
-  return (vector double)(__a[0], __a[1]);
-}
-#endif
-
-/* vec_vupkhsb */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vupkhsb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsb((vector char)__a);
-#else
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vupkhsb(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-/* vec_vupkhsh */
-
-static __inline__ vector int __ATTRS_o_ai vec_vupkhsh(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsh(__a);
-#else
-  return __builtin_altivec_vupkhsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vupkhsh(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vupkhsh(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#endif
-}
-
-/* vec_vupkhsw */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_vupkhsw(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsw(__a);
-#else
-  return __builtin_altivec_vupkhsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vupkhsw(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#endif
-}
-#endif
-
-/* vec_unpackl */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_unpackl(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return __builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_unpackl(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_unpackl(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsh(__a);
-#else
-  return __builtin_altivec_vupklsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_unpackl(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unpackl(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_unpackl(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsw(__a);
-#else
-  return __builtin_altivec_vupklsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_unpackl(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_unpackl(vector float __a) {
-  return (vector double)(__a[2], __a[3]);
-}
-#endif
-
-/* vec_vupklsb */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vupklsb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return __builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vupklsb(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-/* vec_vupklsh */
-
-static __inline__ vector int __ATTRS_o_ai vec_vupklsh(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsh(__a);
-#else
-  return __builtin_altivec_vupklsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vupklsh(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vupklsh(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#endif
-}
-
-/* vec_vupklsw */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_vupklsw(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsw(__a);
-#else
-  return __builtin_altivec_vupklsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vupklsw(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#endif
-}
-#endif
-
-/* vec_vsx_ld */
-
-#ifdef __VSX__
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed int *__b) {
-  return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_vsx_ld(int __a, const signed int *__b) {
-  return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsx_ld(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector float *__b) {
-  return (vector float)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsx_ld(int __a,
-                                                       const float *__b) {
-  return (vector float)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed long long *__b) {
-  return (vector signed long long)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned long long *__b) {
-  return (vector unsigned long long)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector double *__b) {
-  return (vector double)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_vsx_ld(int __a, const double *__b) {
-  return (vector double)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed short *__b) {
-  return (vector signed short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_vsx_ld(int __a, const signed short *__b) {
-  return (vector signed short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsx_ld(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsx_ld(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsx_ld(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-#endif
-
-/* vec_vsx_st */
-
-#ifdef __VSX__
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
-                                               vector bool int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
-                                               signed int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
-                                               unsigned int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
-                                               vector signed int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
-                                               signed int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b,
-                                               vector unsigned int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b,
-                                               unsigned int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b,
-                                               vector float *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b,
-                                               float *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed long long __a,
-                                               int __b,
-                                               vector signed long long *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned long long __a,
-                                               int __b,
-                                               vector unsigned long long *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
-                                               vector double *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
-                                               double *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
-                                               vector bool short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
-                                               signed short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
-                                               unsigned short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b,
-                                               vector signed short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b,
-                                               signed short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a,
-                                               int __b,
-                                               vector unsigned short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a,
-                                               int __b, unsigned short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
-                                               vector bool char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
-                                               signed char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
-                                               unsigned char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
-                                               vector signed char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
-                                               signed char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
-                                               int __b,
-                                               vector unsigned char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
-                                               int __b, unsigned char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-#endif
-
-#ifdef __VSX__
-#define vec_xxpermdi __builtin_vsx_xxpermdi
-#define vec_xxsldwi __builtin_vsx_xxsldwi
-#define vec_permi(__a, __b, __c)                                               \
-  _Generic((__a), vector signed long long                                      \
-           : __builtin_shufflevector((__a), (__b), (((__c) >> 1) & 0x1),       \
-                                     (((__c)&0x1) + 2)),                       \
-             vector unsigned long long                                         \
-           : __builtin_shufflevector((__a), (__b), (((__c) >> 1) & 0x1),       \
-                                     (((__c)&0x1) + 2)),                       \
-             vector double                                                     \
-           : __builtin_shufflevector((__a), (__b), (((__c) >> 1) & 0x1),       \
-                                     (((__c)&0x1) + 2)))
-#endif
-
-/* vec_xor */
-
-#define __builtin_altivec_vxor vec_xor
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xor(vector signed char __a, vector signed char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xor(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xor(vector signed char __a, vector bool char __b) {
-  return __a ^ (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector unsigned char __a, vector unsigned char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector unsigned char __a, vector bool char __b) {
-  return __a ^ (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_xor(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a,
-                                                    vector short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_xor(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a,
-                                                    vector bool short __b) {
-  return __a ^ (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xor(vector unsigned short __a, vector unsigned short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xor(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xor(vector unsigned short __a, vector bool short __b) {
-  return __a ^ (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_xor(vector bool short __a, vector bool short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a,
-                                                  vector int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_xor(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a,
-                                                  vector bool int __b) {
-  return __a ^ (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xor(vector unsigned int __a, vector unsigned int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xor(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xor(vector unsigned int __a, vector bool int __b) {
-  return __a ^ (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_xor(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xor(vector bool int __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a,
-                                                    vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xor(vector signed long long __a, vector signed long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xor(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xor(vector signed long long __a, vector bool long long __b) {
-  return __a ^ (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xor(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xor(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xor(vector unsigned long long __a, vector bool long long __b) {
-  return __a ^ (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_xor(vector bool long long __a, vector bool long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_xor(vector double __a,
-                                                     vector double __b) {
-  return (vector double)((vector unsigned long long)__a ^
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_xor(vector double __a, vector bool long long __b) {
-  return (vector double)((vector unsigned long long)__a ^
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_xor(vector bool long long __a,
-                                                     vector double __b) {
-  return (vector double)((vector unsigned long long)__a ^
-                         (vector unsigned long long)__b);
-}
-#endif
-
-/* vec_vxor */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vxor(vector signed char __a, vector signed char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vxor(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vxor(vector signed char __a, vector bool char __b) {
-  return __a ^ (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vxor(vector unsigned char __a, vector unsigned char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vxor(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vxor(vector unsigned char __a, vector bool char __b) {
-  return __a ^ (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vxor(vector bool char __a,
-                                                         vector bool char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a,
-                                                     vector short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vxor(vector bool short __a,
-                                                     vector short __b) {
-  return (vector short)__a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a,
-                                                     vector bool short __b) {
-  return __a ^ (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vxor(vector unsigned short __a, vector unsigned short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vxor(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vxor(vector unsigned short __a, vector bool short __b) {
-  return __a ^ (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vxor(vector bool short __a, vector bool short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a,
-                                                   vector int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vxor(vector bool int __a,
-                                                   vector int __b) {
-  return (vector int)__a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a,
-                                                   vector bool int __b) {
-  return __a ^ (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vxor(vector unsigned int __a, vector unsigned int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vxor(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vxor(vector unsigned int __a, vector bool int __b) {
-  return __a ^ (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vxor(vector bool int __a,
-                                                        vector bool int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vxor(vector bool int __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a,
-                                                     vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vxor(vector signed long long __a, vector signed long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vxor(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vxor(vector signed long long __a, vector bool long long __b) {
-  return __a ^ (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vxor(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vxor(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vxor(vector unsigned long long __a, vector bool long long __b) {
-  return __a ^ (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vxor(vector bool long long __a, vector bool long long __b) {
-  return __a ^ __b;
-}
-#endif
-
-/* ------------------------ extensions for CBEA ----------------------------- */
-
-/* vec_extract */
-
-static __inline__ signed char __ATTRS_o_ai vec_extract(vector signed char __a,
-                                                       signed int __b) {
-  return __a[__b & 0xf];
-}
-
-static __inline__ unsigned char __ATTRS_o_ai
-vec_extract(vector unsigned char __a, signed int __b) {
-  return __a[__b & 0xf];
-}
-
-static __inline__ unsigned char __ATTRS_o_ai vec_extract(vector bool char __a,
-                                                         signed int __b) {
-  return __a[__b & 0xf];
-}
-
-static __inline__ signed short __ATTRS_o_ai vec_extract(vector signed short __a,
-                                                        signed int __b) {
-  return __a[__b & 0x7];
-}
-
-static __inline__ unsigned short __ATTRS_o_ai
-vec_extract(vector unsigned short __a, signed int __b) {
-  return __a[__b & 0x7];
-}
-
-static __inline__ unsigned short __ATTRS_o_ai vec_extract(vector bool short __a,
-                                                          signed int __b) {
-  return __a[__b & 0x7];
-}
-
-static __inline__ signed int __ATTRS_o_ai vec_extract(vector signed int __a,
-                                                      signed int __b) {
-  return __a[__b & 0x3];
-}
-
-static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector unsigned int __a,
-                                                        signed int __b) {
-  return __a[__b & 0x3];
-}
-
-static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector bool int __a,
-                                                        signed int __b) {
-  return __a[__b & 0x3];
-}
-
-#ifdef __VSX__
-static __inline__ signed long long __ATTRS_o_ai
-vec_extract(vector signed long long __a, signed int __b) {
-  return __a[__b & 0x1];
-}
-
-static __inline__ unsigned long long __ATTRS_o_ai
-vec_extract(vector unsigned long long __a, signed int __b) {
-  return __a[__b & 0x1];
-}
-
-static __inline__ unsigned long long __ATTRS_o_ai
-vec_extract(vector bool long long __a, signed int __b) {
-  return __a[__b & 0x1];
-}
-
-static __inline__ double __ATTRS_o_ai vec_extract(vector double __a,
-                                                  signed int __b) {
-  return __a[__b & 0x1];
-}
-#endif
-
-static __inline__ float __ATTRS_o_ai vec_extract(vector float __a,
-                                                 signed int __b) {
-  return __a[__b & 0x3];
-}
-
-#ifdef __POWER9_VECTOR__
-
-#define vec_insert4b __builtin_vsx_insertword
-#define vec_extract4b __builtin_vsx_extractuword
-
-/* vec_extract_exp */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_extract_exp(vector float __a) {
-  return __builtin_vsx_xvxexpsp(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_extract_exp(vector double __a) {
-  return __builtin_vsx_xvxexpdp(__a);
-}
-
-/* vec_extract_sig */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_extract_sig(vector float __a) {
-  return __builtin_vsx_xvxsigsp(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_extract_sig (vector double __a) {
-  return __builtin_vsx_xvxsigdp(__a);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_extract_fp32_from_shorth(vector unsigned short __a) {
-  vector unsigned short __b =
-#ifdef __LITTLE_ENDIAN__
-            __builtin_shufflevector(__a, __a, 0, -1, 1, -1, 2, -1, 3, -1);
-#else
-            __builtin_shufflevector(__a, __a, -1, 0, -1, 1, -1, 2, -1, 3);
-#endif
-  return __builtin_vsx_xvcvhpsp(__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_extract_fp32_from_shortl(vector unsigned short __a) {
-  vector unsigned short __b =
-#ifdef __LITTLE_ENDIAN__
-            __builtin_shufflevector(__a, __a, 4, -1, 5, -1, 6, -1, 7, -1);
-#else
-            __builtin_shufflevector(__a, __a, -1, 4, -1, 5, -1, 6, -1, 7);
-#endif
-  return __builtin_vsx_xvcvhpsp(__b);
-}
-#endif /* __POWER9_VECTOR__ */
-
-/* vec_insert */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_insert(signed char __a, vector signed char __b, int __c) {
-  __b[__c & 0xF] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_insert(unsigned char __a, vector unsigned char __b, int __c) {
-  __b[__c & 0xF] = __a;
-  return __b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_insert(unsigned char __a,
-                                                           vector bool char __b,
-                                                           int __c) {
-  __b[__c & 0xF] = __a;
-  return __b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_insert(signed short __a, vector signed short __b, int __c) {
-  __b[__c & 0x7] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_insert(unsigned short __a, vector unsigned short __b, int __c) {
-  __b[__c & 0x7] = __a;
-  return __b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_insert(unsigned short __a, vector bool short __b, int __c) {
-  __b[__c & 0x7] = __a;
-  return __b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_insert(signed int __a, vector signed int __b, int __c) {
-  __b[__c & 0x3] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_insert(unsigned int __a, vector unsigned int __b, int __c) {
-  __b[__c & 0x3] = __a;
-  return __b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_insert(unsigned int __a,
-                                                          vector bool int __b,
-                                                          int __c) {
-  __b[__c & 0x3] = __a;
-  return __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_insert(signed long long __a, vector signed long long __b, int __c) {
-  __b[__c & 0x1] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_insert(unsigned long long __a, vector unsigned long long __b, int __c) {
-  __b[__c & 0x1] = __a;
-  return __b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_insert(unsigned long long __a, vector bool long long __b, int __c) {
-  __b[__c & 0x1] = __a;
-  return __b;
-}
-static __inline__ vector double __ATTRS_o_ai vec_insert(double __a,
-                                                        vector double __b,
-                                                        int __c) {
-  __b[__c & 0x1] = __a;
-  return __b;
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_insert(float __a,
-                                                       vector float __b,
-                                                       int __c) {
-  __b[__c & 0x3] = __a;
-  return __b;
-}
-
-/* vec_lvlx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlx(int __a, const signed char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlx(int __a, const vector signed char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlx(int __a, const unsigned char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlx(int __a, const vector unsigned char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvlx(int __a, const vector bool char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector bool char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const vector short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlx(int __a, const unsigned short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlx(int __a, const vector unsigned short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvlx(int __a, const vector bool short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector bool short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const vector pixel *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector pixel)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a, const int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a,
-                                                   const vector int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlx(int __a, const unsigned int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlx(int __a, const vector unsigned int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvlx(int __a, const vector bool int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector bool int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const float *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const vector float *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector float)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_lvlxl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlxl(int __a, const signed char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlxl(int __a, const vector signed char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlxl(int __a, const unsigned char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlxl(int __a, const vector unsigned char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvlxl(int __a, const vector bool char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector bool char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const vector short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlxl(int __a, const unsigned short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlxl(int __a, const vector unsigned short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvlxl(int __a, const vector bool short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector bool short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const vector pixel *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector pixel)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a, const int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a,
-                                                    const vector int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlxl(int __a, const unsigned int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlxl(int __a, const vector unsigned int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvlxl(int __a, const vector bool int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector bool int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const float *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      vector float *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector float)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_lvrx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrx(int __a, const signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrx(int __a, const vector signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrx(int __a, const unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrx(int __a, const vector unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvrx(int __a, const vector bool char *__b) {
-  return vec_perm((vector bool char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const short *__b) {
-  return vec_perm((vector short)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const vector short *__b) {
-  return vec_perm((vector short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrx(int __a, const unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrx(int __a, const vector unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvrx(int __a, const vector bool short *__b) {
-  return vec_perm((vector bool short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const vector pixel *__b) {
-  return vec_perm((vector pixel)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a, const int *__b) {
-  return vec_perm((vector int)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a,
-                                                   const vector int *__b) {
-  return vec_perm((vector int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrx(int __a, const unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrx(int __a, const vector unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvrx(int __a, const vector bool int *__b) {
-  return vec_perm((vector bool int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const float *__b) {
-  return vec_perm((vector float)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const vector float *__b) {
-  return vec_perm((vector float)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_lvrxl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrxl(int __a, const signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrxl(int __a, const vector signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrxl(int __a, const unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrxl(int __a, const vector unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvrxl(int __a, const vector bool char *__b) {
-  return vec_perm((vector bool char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const short *__b) {
-  return vec_perm((vector short)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const vector short *__b) {
-  return vec_perm((vector short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrxl(int __a, const unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrxl(int __a, const vector unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvrxl(int __a, const vector bool short *__b) {
-  return vec_perm((vector bool short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const vector pixel *__b) {
-  return vec_perm((vector pixel)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a, const int *__b) {
-  return vec_perm((vector int)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a,
-                                                    const vector int *__b) {
-  return vec_perm((vector int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrxl(int __a, const unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrxl(int __a, const vector unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvrxl(int __a, const vector bool int *__b) {
-  return vec_perm((vector bool int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const float *__b) {
-  return vec_perm((vector float)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const vector float *__b) {
-  return vec_perm((vector float)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_stvlx */
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
-                                              signed char *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
-                                              vector signed char *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
-                                              unsigned char *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
-                                              vector unsigned char *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool char __a, int __b,
-                                              vector bool char *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b,
-                                              short *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b,
-                                              vector short *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a,
-                                              int __b, unsigned short *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a,
-                                              int __b,
-                                              vector unsigned short *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool short __a, int __b,
-                                              vector bool short *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector pixel __a, int __b,
-                                              vector pixel *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b,
-                                              int *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b,
-                                              vector int *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
-                                              unsigned int *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
-                                              vector unsigned int *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool int __a, int __b,
-                                              vector bool int *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector float __a, int __b,
-                                              vector float *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_stvlxl */
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
-                                               signed char *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
-                                               vector signed char *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a,
-                                               int __b, unsigned char *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a,
-                                               int __b,
-                                               vector unsigned char *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool char __a, int __b,
-                                               vector bool char *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b,
-                                               short *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b,
-                                               vector short *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a,
-                                               int __b, unsigned short *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a,
-                                               int __b,
-                                               vector unsigned short *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool short __a, int __b,
-                                               vector bool short *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector pixel __a, int __b,
-                                               vector pixel *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b,
-                                               int *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b,
-                                               vector int *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
-                                               unsigned int *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
-                                               vector unsigned int *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool int __a, int __b,
-                                               vector bool int *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector float __a, int __b,
-                                               vector float *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_stvrx */
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
-                                              signed char *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
-                                              vector signed char *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
-                                              unsigned char *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
-                                              vector unsigned char *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool char __a, int __b,
-                                              vector bool char *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b,
-                                              short *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b,
-                                              vector short *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a,
-                                              int __b, unsigned short *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a,
-                                              int __b,
-                                              vector unsigned short *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool short __a, int __b,
-                                              vector bool short *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector pixel __a, int __b,
-                                              vector pixel *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b,
-                                              int *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b,
-                                              vector int *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
-                                              unsigned int *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
-                                              vector unsigned int *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool int __a, int __b,
-                                              vector bool int *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector float __a, int __b,
-                                              vector float *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_stvrxl */
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
-                                               signed char *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
-                                               vector signed char *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a,
-                                               int __b, unsigned char *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a,
-                                               int __b,
-                                               vector unsigned char *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool char __a, int __b,
-                                               vector bool char *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b,
-                                               short *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b,
-                                               vector short *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a,
-                                               int __b, unsigned short *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a,
-                                               int __b,
-                                               vector unsigned short *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool short __a, int __b,
-                                               vector bool short *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector pixel __a, int __b,
-                                               vector pixel *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b,
-                                               int *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b,
-                                               vector int *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
-                                               unsigned int *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
-                                               vector unsigned int *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool int __a, int __b,
-                                               vector bool int *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector float __a, int __b,
-                                               vector float *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_promote */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_promote(signed char __a,
-                                                              int __b) {
-  vector signed char __res = (vector signed char)(0);
-  __res[__b & 0x7] = __a;
-  return __res;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_promote(unsigned char __a, int __b) {
-  vector unsigned char __res = (vector unsigned char)(0);
-  __res[__b & 0x7] = __a;
-  return __res;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_promote(short __a, int __b) {
-  vector short __res = (vector short)(0);
-  __res[__b & 0x7] = __a;
-  return __res;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_promote(unsigned short __a, int __b) {
-  vector unsigned short __res = (vector unsigned short)(0);
-  __res[__b & 0x7] = __a;
-  return __res;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_promote(int __a, int __b) {
-  vector int __res = (vector int)(0);
-  __res[__b & 0x3] = __a;
-  return __res;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_promote(unsigned int __a,
-                                                               int __b) {
-  vector unsigned int __res = (vector unsigned int)(0);
-  __res[__b & 0x3] = __a;
-  return __res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_promote(float __a, int __b) {
-  vector float __res = (vector float)(0);
-  __res[__b & 0x3] = __a;
-  return __res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_promote(double __a, int __b) {
-  vector double __res = (vector double)(0);
-  __res[__b & 0x1] = __a;
-  return __res;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_promote(signed long long __a, int __b) {
-  vector signed long long __res = (vector signed long long)(0);
-  __res[__b & 0x1] = __a;
-  return __res;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_promote(unsigned long long __a, int __b) {
-  vector unsigned long long __res = (vector unsigned long long)(0);
-  __res[__b & 0x1] = __a;
-  return __res;
-}
-#endif
-
-/* vec_splats */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_splats(signed char __a) {
-  return (vector signed char)(__a);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_splats(unsigned char __a) {
-  return (vector unsigned char)(__a);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_splats(short __a) {
-  return (vector short)(__a);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_splats(unsigned short __a) {
-  return (vector unsigned short)(__a);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_splats(int __a) {
-  return (vector int)(__a);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_splats(unsigned int __a) {
-  return (vector unsigned int)(__a);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_splats(signed long long __a) {
-  return (vector signed long long)(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_splats(unsigned long long __a) {
-  return (vector unsigned long long)(__a);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) &&                    \
-    defined(__SIZEOF_INT128__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_splats(signed __int128 __a) {
-  return (vector signed __int128)(__a);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_splats(unsigned __int128 __a) {
-  return (vector unsigned __int128)(__a);
-}
-
-#endif
-
-static __inline__ vector double __ATTRS_o_ai vec_splats(double __a) {
-  return (vector double)(__a);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_splats(float __a) {
-  return (vector float)(__a);
-}
-
-/* ----------------------------- predicates --------------------------------- */
-
-/* vec_all_eq */
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed long long __a,
-                                              vector signed long long __b) {
-#ifdef __POWER8_VECTOR__
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, __b);
-#else
-  // No vcmpequd on Power7 so we xor the two vectors and compare against zero as
-  // 32-bit elements.
-  return vec_all_eq((vector signed int)vec_xor(__a, __b), (vector signed int)0);
-#endif
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector long long __a,
-                                              vector bool long long __b) {
-  return vec_all_eq((vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return vec_all_eq((vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return vec_all_eq((vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
-                                              vector long long __b) {
-  return vec_all_eq((vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return vec_all_eq((vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
-                                              vector bool long long __b) {
-  return vec_all_eq((vector signed long long)__a, (vector signed long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_LT, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_LT, __a, __b);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed __int128 __a,
-                                              vector signed __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned __int128 __a,
-                                              vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool __int128 __a,
-                                              vector bool __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
-}
-#endif
-
-/* vec_all_ge */
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __b, (vector signed char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __b, (vector signed short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __b, (vector signed int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __b, __a);
-}
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __b,
-                                      (vector signed long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __a, __b);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __a, __b);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed __int128 __a,
-                                              vector signed __int128 __b) {
-  return __builtin_altivec_vcmpgtsq_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned __int128 __a,
-                                              vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpgtuq_p(__CR6_EQ, __b, __a);
-}
-#endif
-
-/* vec_all_gt */
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector signed short)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector signed int)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a, __b);
-}
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __a, __b);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __a, __b);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed __int128 __a,
-                                              vector signed __int128 __b) {
-  return __builtin_altivec_vcmpgtsq_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned __int128 __a,
-                                              vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpgtuq_p(__CR6_LT, __a, __b);
-}
-#endif
-
-/* vec_all_in */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_all_in(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp_p(__CR6_EQ, __a, __b);
-}
-
-/* vec_all_le */
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector signed short)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector signed int)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __b, __a);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_le(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __b, __a);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed __int128 __a,
-                                              vector signed __int128 __b) {
-  return __builtin_altivec_vcmpgtsq_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned __int128 __a,
-                                              vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpgtuq_p(__CR6_EQ, __a, __b);
-}
-#endif
-
-/* vec_all_lt */
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __b, (vector signed char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __b, (vector signed short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __b, (vector signed int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __b,
-                                      (vector signed long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __b, __a);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __b, __a);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed __int128 __a,
-                                              vector signed __int128 __b) {
-  return __builtin_altivec_vcmpgtsq_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned __int128 __a,
-                                              vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpgtuq_p(__CR6_LT, __b, __a);
-}
-#endif
-
-/* vec_all_nan */
-
-static __inline__ int __ATTRS_o_ai vec_all_nan(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __a);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_nan(vector double __a) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __a);
-}
-#endif
-
-/* vec_all_ne */
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed __int128 __a,
-                                              vector signed __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned __int128 __a,
-                                              vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool __int128 __a,
-                                              vector bool __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
-}
-#endif
-
-/* vec_all_nge */
-
-static __inline__ int __ATTRS_o_ai vec_all_nge(vector float __a,
-                                               vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_EQ, __a, __b);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_nge(vector double __a,
-                                               vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_EQ, __a, __b);
-}
-#endif
-
-/* vec_all_ngt */
-
-static __inline__ int __ATTRS_o_ai vec_all_ngt(vector float __a,
-                                               vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ, __a, __b);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_ngt(vector double __a,
-                                               vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ, __a, __b);
-}
-#endif
-
-/* vec_all_nle */
-
-static __inline__ int __ATTRS_o_ai
-vec_all_nle(vector float __a, vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_EQ, __b, __a);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_nle(vector double __a,
-                                               vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_EQ, __b, __a);
-}
-#endif
-
-/* vec_all_nlt */
-
-static __inline__ int __ATTRS_o_ai
-vec_all_nlt(vector float __a, vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ, __b, __a);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_nlt(vector double __a,
-                                               vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ, __b, __a);
-}
-#endif
-
-/* vec_all_numeric */
-
-static __inline__ int __ATTRS_o_ai
-vec_all_numeric(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_LT, __a, __a);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_numeric(vector double __a) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_LT, __a, __a);
-}
-#endif
-
-/* vec_any_eq */
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ_REV, __a, __b);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed __int128 __a,
-                                              vector signed __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned __int128 __a,
-                                              vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool __int128 __a,
-                                              vector bool __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
-}
-#endif
-
-/* vec_any_ge */
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __b,
-                                      (vector signed char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __b,
-                                      (vector signed short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __b,
-                                      (vector signed int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV,
-                                      (vector signed long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __b,
-                                      (vector signed long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __a, __b);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed __int128 __a,
-                                              vector signed __int128 __b) {
-  return __builtin_altivec_vcmpgtsq_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned __int128 __a,
-                                              vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpgtuq_p(__CR6_LT_REV, __b, __a);
-}
-#endif
-
-/* vec_any_gt */
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a,
-                                      (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector signed short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector signed int)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV,
-                                      (vector signed long long)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __a, __b);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed __int128 __a,
-                                              vector signed __int128 __b) {
-  return __builtin_altivec_vcmpgtsq_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned __int128 __a,
-                                              vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpgtuq_p(__CR6_EQ_REV, __a, __b);
-}
-#endif
-
-/* vec_any_le */
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a,
-                                      (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector signed short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector signed int)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV,
-                                      (vector signed long long)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __b, __a);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_le(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __b, __a);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed __int128 __a,
-                                              vector signed __int128 __b) {
-  return __builtin_altivec_vcmpgtsq_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned __int128 __a,
-                                              vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpgtuq_p(__CR6_LT_REV, __a, __b);
-}
-#endif
-
-/* vec_any_lt */
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __b,
-                                      (vector signed char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __b,
-                                      (vector signed short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __b,
-                                      (vector signed int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV,
-                                      (vector signed long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __b,
-                                      (vector signed long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __b, __a);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __b, __a);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed __int128 __a,
-                                              vector signed __int128 __b) {
-  return __builtin_altivec_vcmpgtsq_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned __int128 __a,
-                                              vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpgtuq_p(__CR6_EQ_REV, __b, __a);
-}
-#endif
-
-/* vec_any_nan */
-
-static __inline__ int __ATTRS_o_ai vec_any_nan(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_LT_REV, __a, __a);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __a);
-#endif
-}
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_nan(vector double __a) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_LT_REV, __a, __a);
-}
-#endif
-
-/* vec_any_ne */
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
-                                              vector signed long long __b) {
-#ifdef __POWER8_VECTOR__
-  return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, __b);
-#else
-  // Take advantage of the optimized sequence for vec_all_eq when vcmpequd is
-  // not available.
-  return !vec_all_eq(__a, __b);
-#endif
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return vec_any_ne((vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
-                                              vector bool long long __b) {
-  return vec_any_ne((vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return vec_any_ne((vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
-                                              vector signed long long __b) {
-  return vec_any_ne((vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return vec_any_ne((vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
-                                              vector bool long long __b) {
-  return vec_any_ne((vector signed long long)__a, (vector signed long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_LT_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_LT_REV, __a, __b);
-}
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed __int128 __a,
-                                              vector signed __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned __int128 __a,
-                                              vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool __int128 __a,
-                                              vector bool __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
-}
-#endif
-
-/* vec_any_nge */
-
-static __inline__ int __ATTRS_o_ai vec_any_nge(vector float __a,
-                                               vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_LT_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_nge(vector double __a,
-                                               vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_LT_REV, __a, __b);
-}
-#endif
-
-/* vec_any_ngt */
-
-static __inline__ int __ATTRS_o_ai vec_any_ngt(vector float __a,
-                                               vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_LT_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_ngt(vector double __a,
-                                               vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_LT_REV, __a, __b);
-}
-#endif
-
-/* vec_any_nle */
-
-static __inline__ int __ATTRS_o_ai vec_any_nle(vector float __a,
-                                               vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_LT_REV, __b, __a);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_nle(vector double __a,
-                                               vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_LT_REV, __b, __a);
-}
-#endif
-
-/* vec_any_nlt */
-
-static __inline__ int __ATTRS_o_ai vec_any_nlt(vector float __a,
-                                               vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_LT_REV, __b, __a);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_nlt(vector double __a,
-                                               vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_LT_REV, __b, __a);
-}
-#endif
-
-/* vec_any_numeric */
-
-static __inline__ int __ATTRS_o_ai vec_any_numeric(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ_REV, __a, __a);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_numeric(vector double __a) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ_REV, __a, __a);
-}
-#endif
-
-/* vec_any_out */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_out(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp_p(__CR6_EQ_REV, __a, __b);
-}
-
-/* Power 8 Crypto functions
-Note: We diverge from the current GCC implementation with regard
-to cryptography and related functions as follows:
-- Only the SHA and AES instructions and builtins are disabled by -mno-crypto
-- The remaining ones are only available on Power8 and up so
-  require -mpower8-vector
-The justification for this is that export requirements require that
-Category:Vector.Crypto is optional (i.e. compliant hardware may not provide
-support). As a result, we need to be able to turn off support for those.
-The remaining ones (currently controlled by -mcrypto for GCC) still
-need to be provided on compliant hardware even if Vector.Crypto is not
-provided.
-*/
-#ifdef __CRYPTO__
-#define vec_sbox_be __builtin_altivec_crypto_vsbox
-#define vec_cipher_be __builtin_altivec_crypto_vcipher
-#define vec_cipherlast_be __builtin_altivec_crypto_vcipherlast
-#define vec_ncipher_be __builtin_altivec_crypto_vncipher
-#define vec_ncipherlast_be __builtin_altivec_crypto_vncipherlast
-
-#ifdef __VSX__
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vsbox(vector unsigned long long __a) {
-  return __builtin_altivec_crypto_vsbox(__a);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vcipher(vector unsigned long long __a,
-                         vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vcipher(__a, __b);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vcipherlast(vector unsigned long long __a,
-                             vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vcipherlast(__a, __b);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vncipher(vector unsigned long long __a,
-                          vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vncipher(__a, __b);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vncipherlast(vector unsigned long long __a,
-                              vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vncipherlast(__a, __b);
-}
-#endif /* __VSX__ */
-
-#define __builtin_crypto_vshasigmad __builtin_altivec_crypto_vshasigmad
-#define __builtin_crypto_vshasigmaw __builtin_altivec_crypto_vshasigmaw
-
-#define vec_shasigma_be(X, Y, Z)                                               \
-  _Generic((X), vector unsigned int                                            \
-           : __builtin_crypto_vshasigmaw, vector unsigned long long            \
-           : __builtin_crypto_vshasigmad)((X), (Y), (Z))
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool char __ATTRS_o_ai
-vec_permxor(vector bool char __a, vector bool char __b,
-            vector bool char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_permxor(vector signed char __a, vector signed char __b,
-            vector signed char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_permxor(vector unsigned char __a, vector unsigned char __b,
-            vector unsigned char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-__builtin_crypto_vpermxor(vector unsigned char __a, vector unsigned char __b,
-                          vector unsigned char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-__builtin_crypto_vpermxor(vector unsigned short __a, vector unsigned short __b,
-                          vector unsigned short __c) {
-  return (vector unsigned short)__builtin_altivec_crypto_vpermxor(
-      (vector unsigned char)__a, (vector unsigned char)__b,
-      (vector unsigned char)__c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai __builtin_crypto_vpermxor(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
-  return (vector unsigned int)__builtin_altivec_crypto_vpermxor(
-      (vector unsigned char)__a, (vector unsigned char)__b,
-      (vector unsigned char)__c);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-__builtin_crypto_vpermxor(vector unsigned long long __a,
-                          vector unsigned long long __b,
-                          vector unsigned long long __c) {
-  return (vector unsigned long long)__builtin_altivec_crypto_vpermxor(
-      (vector unsigned char)__a, (vector unsigned char)__b,
-      (vector unsigned char)__c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_crypto_vpmsumb(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_crypto_vpmsumh(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_crypto_vpmsumw(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned long long __a,
-                         vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vpmsumd(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vgbbd(vector signed char __a) {
-  return __builtin_altivec_vgbbd((vector unsigned char)__a);
-}
-
-#define vec_pmsum_be __builtin_crypto_vpmsumb
-#define vec_gb __builtin_altivec_vgbbd
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vgbbd(vector unsigned char __a) {
-  return __builtin_altivec_vgbbd(__a);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_gbb(vector signed long long __a) {
-  return __builtin_altivec_vgbbd((vector unsigned char)__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_gbb(vector unsigned long long __a) {
-  return __builtin_altivec_vgbbd((vector unsigned char)__a);
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_vbpermq(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vbpermq((vector unsigned char)__a,
-                                   (vector unsigned char)__b);
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_vbpermq(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vbpermq(__a, __b);
-}
-
-#if defined(__powerpc64__) && defined(__SIZEOF_INT128__)
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_bperm(vector unsigned __int128 __a, vector unsigned char __b) {
-  return __builtin_altivec_vbpermq((vector unsigned char)__a,
-                                   (vector unsigned char)__b);
-}
-#endif
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_bperm(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vbpermq(__a, __b);
-}
-#endif // __POWER8_VECTOR__
-#ifdef __POWER9_VECTOR__
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_bperm(vector unsigned long long __a, vector unsigned char __b) {
-  return __builtin_altivec_vbpermd(__a, __b);
-}
-#endif
-
-
-/* vec_reve */
-
-static inline __ATTRS_o_ai vector bool char vec_reve(vector bool char __a) {
-  return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
-                                 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed char vec_reve(vector signed char __a) {
-  return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
-                                 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_reve(vector unsigned char __a) {
-  return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
-                                 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector bool int vec_reve(vector bool int __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed int vec_reve(vector signed int __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_reve(vector unsigned int __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector bool short vec_reve(vector bool short __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_reve(vector signed short __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_reve(vector unsigned short __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector float vec_reve(vector float __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-#ifdef __VSX__
-static inline __ATTRS_o_ai vector bool long long
-vec_reve(vector bool long long __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_reve(vector signed long long __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_reve(vector unsigned long long __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector double vec_reve(vector double __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-#endif
-
-/* vec_revb */
-static __inline__ vector bool char __ATTRS_o_ai
-vec_revb(vector bool char __a) {
-  return __a;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_revb(vector signed char __a) {
-  return __a;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_revb(vector unsigned char __a) {
-  return __a;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_revb(vector bool short __a) {
-  vector unsigned char __indices =
-      { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_revb(vector signed short __a) {
-  vector unsigned char __indices =
-      { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_revb(vector unsigned short __a) {
-  vector unsigned char __indices =
-     { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_revb(vector bool int __a) {
-  vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_revb(vector signed int __a) {
-  vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_revb(vector unsigned int __a) {
-  vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_revb(vector float __a) {
- vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
- return vec_perm(__a, __a, __indices);
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_revb(vector bool long long __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_revb(vector signed long long __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_revb(vector unsigned long long __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_revb(vector double __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-#endif /* End __VSX__ */
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) &&                    \
-    defined(__SIZEOF_INT128__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_revb(vector signed __int128 __a) {
-  vector unsigned char __indices =
-      { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
-  return (vector signed __int128)vec_perm((vector signed int)__a,
-                                          (vector signed int)__a,
-                                           __indices);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_revb(vector unsigned __int128 __a) {
-  vector unsigned char __indices =
-      { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
-  return (vector unsigned __int128)vec_perm((vector signed int)__a,
-                                            (vector signed int)__a,
-                                             __indices);
-}
-#endif /* END __POWER8_VECTOR__ && __powerpc64__ */
-
-/* vec_xl */
-
-#define vec_xld2 vec_xl
-#define vec_xlw4 vec_xl
-typedef vector signed char unaligned_vec_schar __attribute__((aligned(1)));
-typedef vector unsigned char unaligned_vec_uchar __attribute__((aligned(1)));
-typedef vector signed short unaligned_vec_sshort __attribute__((aligned(1)));
-typedef vector unsigned short unaligned_vec_ushort __attribute__((aligned(1)));
-typedef vector signed int unaligned_vec_sint __attribute__((aligned(1)));
-typedef vector unsigned int unaligned_vec_uint __attribute__((aligned(1)));
-typedef vector float unaligned_vec_float __attribute__((aligned(1)));
-
-static inline __ATTRS_o_ai vector signed char vec_xl(ptrdiff_t __offset,
-                                                     const signed char *__ptr) {
-  return *(unaligned_vec_schar *)(__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_xl(ptrdiff_t __offset, const unsigned char *__ptr) {
-  return *(unaligned_vec_uchar*)(__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_xl(ptrdiff_t __offset, const signed short *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_sshort *)__addr;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_xl(ptrdiff_t __offset, const unsigned short *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_ushort *)__addr;
-}
-
-static inline __ATTRS_o_ai vector signed int vec_xl(ptrdiff_t __offset,
-                                                    const signed int *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_sint *)__addr;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_xl(ptrdiff_t __offset, const unsigned int *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_uint *)__addr;
-}
-
-static inline __ATTRS_o_ai vector float vec_xl(ptrdiff_t __offset,
-                                               const float *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_float *)__addr;
-}
-
-#ifdef __VSX__
-typedef vector signed long long unaligned_vec_sll __attribute__((aligned(1)));
-typedef vector unsigned long long unaligned_vec_ull __attribute__((aligned(1)));
-typedef vector double unaligned_vec_double __attribute__((aligned(1)));
-
-static inline __ATTRS_o_ai vector signed long long
-vec_xl(ptrdiff_t __offset, const signed long long *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_sll *)__addr;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_xl(ptrdiff_t __offset, const unsigned long long *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_ull *)__addr;
-}
-
-static inline __ATTRS_o_ai vector double vec_xl(ptrdiff_t __offset,
-                                                const double *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_double *)__addr;
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) &&                    \
-    defined(__SIZEOF_INT128__)
-typedef vector signed __int128 unaligned_vec_si128 __attribute__((aligned(1)));
-typedef vector unsigned __int128 unaligned_vec_ui128
-    __attribute__((aligned(1)));
-static inline __ATTRS_o_ai vector signed __int128
-vec_xl(ptrdiff_t __offset, const signed __int128 *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_si128 *)__addr;
-}
-
-static inline __ATTRS_o_ai vector unsigned __int128
-vec_xl(ptrdiff_t __offset, const unsigned __int128 *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_ui128 *)__addr;
-}
-#endif
-
-/* vec_xl_be */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xl_be(ptrdiff_t __offset, const signed char *__ptr) {
-  vector signed char __vec = (vector signed char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                                 13, 12, 11, 10, 9, 8);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_be(ptrdiff_t __offset, const unsigned char *__ptr) {
-  vector unsigned char __vec = (vector unsigned char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                                 13, 12, 11, 10, 9, 8);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_xl_be(ptrdiff_t __offset, const signed short *__ptr) {
-  vector signed short __vec = (vector signed short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xl_be(ptrdiff_t __offset, const unsigned short *__ptr) {
-  vector unsigned short __vec = (vector unsigned short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, const signed int *__ptr) {
-  return (vector signed int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, const unsigned int *__ptr) {
-  return (vector unsigned int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, const float *__ptr) {
-  return (vector float)__builtin_vsx_lxvw4x_be(__offset, __ptr);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, const signed long long *__ptr) {
-  return (vector signed long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, const unsigned long long *__ptr) {
-  return (vector unsigned long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, const double *__ptr) {
-  return (vector double)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) &&                    \
-    defined(__SIZEOF_INT128__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, const signed __int128 *__ptr) {
-  return vec_xl(__offset, __ptr);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, const unsigned __int128 *__ptr) {
-  return vec_xl(__offset, __ptr);
-}
-#endif
-#else
-  #define vec_xl_be vec_xl
-#endif
-
-#if defined(__POWER10_VECTOR__) && defined(__VSX__) &&                         \
-    defined(__SIZEOF_INT128__)
-
-/* vect_xl_sext */
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_sext(ptrdiff_t __offset, const signed char *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_sext(ptrdiff_t __offset, const signed short *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_sext(ptrdiff_t __offset, const signed int *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_sext(ptrdiff_t __offset, const signed long long *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
-}
-
-/* vec_xl_zext */
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_zext(ptrdiff_t __offset, const unsigned char *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_zext(ptrdiff_t __offset, const unsigned short *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_zext(ptrdiff_t __offset, const unsigned int *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_zext(ptrdiff_t __offset, const unsigned long long *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
-}
-
-#endif
-
-/* vec_xlds */
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xlds(ptrdiff_t __offset, const signed long long *__ptr) {
-  signed long long *__addr = (signed long long*)((signed char *)__ptr + __offset);
-  return (vector signed long long) *__addr;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xlds(ptrdiff_t __offset, const unsigned long long *__ptr) {
-  unsigned long long *__addr = (unsigned long long *)((signed char *)__ptr + __offset);
-  return (unaligned_vec_ull) *__addr;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_xlds(ptrdiff_t __offset,
-                                                      const double *__ptr) {
-  double *__addr = (double*)((signed char *)__ptr + __offset);
-  return (unaligned_vec_double) *__addr;
-}
-
-/* vec_load_splats */
-static __inline__ vector signed int __ATTRS_o_ai
-vec_load_splats(signed long long __offset, const signed int *__ptr) {
-  signed int *__addr = (signed int*)((signed char *)__ptr + __offset);
-  return (vector signed int)*__addr;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_load_splats(unsigned long long __offset, const signed int *__ptr) {
-  signed int *__addr = (signed int*)((signed char *)__ptr + __offset);
-  return (vector signed int)*__addr;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_load_splats(signed long long __offset, const unsigned int *__ptr) {
-  unsigned int *__addr = (unsigned int*)((signed char *)__ptr + __offset);
-  return (vector unsigned int)*__addr;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_load_splats(unsigned long long __offset, const unsigned int *__ptr) {
-  unsigned int *__addr = (unsigned int*)((signed char *)__ptr + __offset);
-  return (vector unsigned int)*__addr;
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_load_splats(signed long long __offset, const float *__ptr) {
-  float *__addr = (float*)((signed char *)__ptr + __offset);
-  return (vector float)*__addr;
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_load_splats(unsigned long long __offset, const float *__ptr) {
-  float *__addr = (float*)((signed char *)__ptr + __offset);
-  return (vector float)*__addr;
-}
-#endif
-
-/* vec_xst */
-
-#define vec_xstd2 vec_xst
-#define vec_xstw4 vec_xst
-static inline __ATTRS_o_ai void
-vec_xst(vector signed char __vec, ptrdiff_t __offset, signed char *__ptr) {
-  *(unaligned_vec_schar *)(__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector unsigned char __vec, ptrdiff_t __offset, unsigned char *__ptr) {
-  *(unaligned_vec_uchar *)(__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector signed short __vec, ptrdiff_t __offset, signed short *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_sshort *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
-                                        ptrdiff_t __offset,
-                                        unsigned short *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_ushort *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
-                                        ptrdiff_t __offset, signed int *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_sint *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector unsigned int __vec, ptrdiff_t __offset, unsigned int *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_uint *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector float __vec, ptrdiff_t __offset,
-                                        float *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_float *)__addr = __vec;
-}
-
-#ifdef __VSX__
-static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
-                                        ptrdiff_t __offset,
-                                        signed long long *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_sll *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
-                                        ptrdiff_t __offset,
-                                        unsigned long long *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_ull *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector double __vec, ptrdiff_t __offset,
-                                        double *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_double *)__addr = __vec;
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) &&                    \
-    defined(__SIZEOF_INT128__)
-static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
-                                        ptrdiff_t __offset,
-                                        signed __int128 *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_si128 *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
-                                        ptrdiff_t __offset,
-                                        unsigned __int128 *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_ui128 *)__addr = __vec;
-}
-#endif
-
-/* vec_xst_trunc */
-
-#if defined(__POWER10_VECTOR__) && defined(__VSX__) &&                         \
-    defined(__SIZEOF_INT128__)
-static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
-                                              ptrdiff_t __offset,
-                                              signed char *__ptr) {
-  *(__ptr + __offset) = (signed char)__vec[0];
-}
-
-static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
-                                              ptrdiff_t __offset,
-                                              unsigned char *__ptr) {
-  *(__ptr + __offset) = (unsigned char)__vec[0];
-}
-
-static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
-                                              ptrdiff_t __offset,
-                                              signed short *__ptr) {
-  *(__ptr + __offset) = (signed short)__vec[0];
-}
-
-static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
-                                              ptrdiff_t __offset,
-                                              unsigned short *__ptr) {
-  *(__ptr + __offset) = (unsigned short)__vec[0];
-}
-
-static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
-                                              ptrdiff_t __offset,
-                                              signed int *__ptr) {
-  *(__ptr + __offset) = (signed int)__vec[0];
-}
-
-static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
-                                              ptrdiff_t __offset,
-                                              unsigned int *__ptr) {
-  *(__ptr + __offset) = (unsigned int)__vec[0];
-}
-
-static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
-                                              ptrdiff_t __offset,
-                                              signed long long *__ptr) {
-  *(__ptr + __offset) = (signed long long)__vec[0];
-}
-
-static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
-                                              ptrdiff_t __offset,
-                                              unsigned long long *__ptr) {
-  *(__ptr + __offset) = (unsigned long long)__vec[0];
-}
-#endif
-
-/* vec_xst_be */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed char __vec,
-                                               signed long long  __offset,
-                                               signed char *__ptr) {
-  vector signed char __tmp =
-     __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                             13, 12, 11, 10, 9, 8);
-  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
-  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned char __vec,
-                                               signed long long  __offset,
-                                               unsigned char *__ptr) {
-  vector unsigned char __tmp =
-     __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                             13, 12, 11, 10, 9, 8);
-  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
-  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed short __vec,
-                                               signed long long  __offset,
-                                               signed short *__ptr) {
-  vector signed short __tmp =
-     __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
-  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned short __vec,
-                                               signed long long  __offset,
-                                               unsigned short *__ptr) {
-  vector unsigned short __tmp =
-     __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
-  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed int __vec,
-                                               signed long long  __offset,
-                                               signed int *__ptr) {
-  __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned int __vec,
-                                               signed long long  __offset,
-                                               unsigned int *__ptr) {
-  __builtin_vsx_stxvw4x_be((vector int)__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector float __vec,
-                                               signed long long  __offset,
-                                               float *__ptr) {
-  __builtin_vsx_stxvw4x_be((vector int)__vec, __offset, __ptr);
-}
-
-#ifdef __VSX__
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed long long __vec,
-                                               signed long long  __offset,
-                                               signed long long *__ptr) {
-  __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned long long __vec,
-                                               signed long long  __offset,
-                                               unsigned long long *__ptr) {
-  __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector double __vec,
-                                               signed long long  __offset,
-                                               double *__ptr) {
-  __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr);
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) &&                    \
-    defined(__SIZEOF_INT128__)
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed __int128 __vec,
-                                               signed long long  __offset,
-                                               signed __int128 *__ptr) {
-  vec_xst(__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned __int128 __vec,
-                                               signed long long  __offset,
-                                               unsigned __int128 *__ptr) {
-  vec_xst(__vec, __offset, __ptr);
-}
-#endif
-#else
-  #define vec_xst_be vec_xst
-#endif
-
-#ifdef __POWER9_VECTOR__
-#define vec_test_data_class(__a, __b)                                          \
-  _Generic(                                                                    \
-      (__a), vector float                                                      \
-      : (vector bool int)__builtin_vsx_xvtstdcsp((vector float)(__a), (__b)),  \
-        vector double                                                          \
-      : (vector bool long long)__builtin_vsx_xvtstdcdp((vector double)(__a),   \
-                                                       (__b)))
-
-#endif /* #ifdef __POWER9_VECTOR__ */
-
-static vector float __ATTRS_o_ai vec_neg(vector float __a) {
-  return -__a;
-}
-
-#ifdef __VSX__
-static vector double __ATTRS_o_ai vec_neg(vector double __a) {
-  return -__a;
-}
-
-#endif
-
-#ifdef __VSX__
-static vector long long __ATTRS_o_ai vec_neg(vector long long __a) {
-  return -__a;
-}
-#endif
-
-static vector signed int __ATTRS_o_ai vec_neg(vector signed int __a) {
-  return -__a;
-}
-
-static vector signed short __ATTRS_o_ai vec_neg(vector signed short __a) {
-  return -__a;
-}
-
-static vector signed char __ATTRS_o_ai vec_neg(vector signed char __a) {
-  return -__a;
-}
-
-static vector float __ATTRS_o_ai vec_nabs(vector float __a) {
-  return - vec_abs(__a);
-}
-
-#ifdef __VSX__
-static vector double __ATTRS_o_ai vec_nabs(vector double __a) {
-  return - vec_abs(__a);
-}
-
-#endif
-
-#ifdef __POWER8_VECTOR__
-static vector long long __ATTRS_o_ai vec_nabs(vector long long __a) {
-  return __builtin_altivec_vminsd(__a, -__a);
-}
-#endif
-
-static vector signed int __ATTRS_o_ai vec_nabs(vector signed int __a) {
-  return __builtin_altivec_vminsw(__a, -__a);
-}
-
-static vector signed short __ATTRS_o_ai vec_nabs(vector signed short __a) {
-  return __builtin_altivec_vminsh(__a, -__a);
-}
-
-static vector signed char __ATTRS_o_ai vec_nabs(vector signed char __a) {
-  return __builtin_altivec_vminsb(__a, -__a);
-}
-
-static vector float __ATTRS_o_ai vec_recipdiv(vector float __a,
-                                              vector float __b) {
-  return __builtin_ppc_recipdivf(__a, __b);
-}
-
-#ifdef __VSX__
-static vector double __ATTRS_o_ai vec_recipdiv(vector double __a,
-                                               vector double __b) {
-  return __builtin_ppc_recipdivd(__a, __b);
-}
-#endif
-
-#ifdef __POWER10_VECTOR__
-
-/* vec_extractm */
-
-static __inline__ unsigned int __ATTRS_o_ai
-vec_extractm(vector unsigned char __a) {
-  return __builtin_altivec_vextractbm(__a);
-}
-
-static __inline__ unsigned int __ATTRS_o_ai
-vec_extractm(vector unsigned short __a) {
-  return __builtin_altivec_vextracthm(__a);
-}
-
-static __inline__ unsigned int __ATTRS_o_ai
-vec_extractm(vector unsigned int __a) {
-  return __builtin_altivec_vextractwm(__a);
-}
-
-static __inline__ unsigned int __ATTRS_o_ai
-vec_extractm(vector unsigned long long __a) {
-  return __builtin_altivec_vextractdm(__a);
-}
-
-#ifdef __SIZEOF_INT128__
-static __inline__ unsigned int __ATTRS_o_ai
-vec_extractm(vector unsigned __int128 __a) {
-  return __builtin_altivec_vextractqm(__a);
-}
-#endif
-
-/* vec_expandm */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_expandm(vector unsigned char __a) {
-  return __builtin_altivec_vexpandbm(__a);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_expandm(vector unsigned short __a) {
-  return __builtin_altivec_vexpandhm(__a);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_expandm(vector unsigned int __a) {
-  return __builtin_altivec_vexpandwm(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_expandm(vector unsigned long long __a) {
-  return __builtin_altivec_vexpanddm(__a);
-}
-
-#ifdef __SIZEOF_INT128__
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_expandm(vector unsigned __int128 __a) {
-  return __builtin_altivec_vexpandqm(__a);
-}
-#endif
-
-/* vec_cntm */
-
-#define vec_cntm(__a, __mp)                                                    \
-  _Generic((__a), vector unsigned char                                         \
-           : __builtin_altivec_vcntmbb((__a), (unsigned char)(__mp)),          \
-             vector unsigned short                                             \
-           : __builtin_altivec_vcntmbh((__a), (unsigned char)(__mp)),          \
-             vector unsigned int                                               \
-           : __builtin_altivec_vcntmbw((__a), (unsigned char)(__mp)),          \
-             vector unsigned long long                                         \
-           : __builtin_altivec_vcntmbd((__a), (unsigned char)(__mp)))
-
-/* vec_gen[b|h|w|d|q]m */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_genbm(unsigned long long __bm) {
-  return __builtin_altivec_mtvsrbm(__bm);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_genhm(unsigned long long __bm) {
-  return __builtin_altivec_mtvsrhm(__bm);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_genwm(unsigned long long __bm) {
-  return __builtin_altivec_mtvsrwm(__bm);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_gendm(unsigned long long __bm) {
-  return __builtin_altivec_mtvsrdm(__bm);
-}
-
-#ifdef __SIZEOF_INT128__
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_genqm(unsigned long long __bm) {
-  return __builtin_altivec_mtvsrqm(__bm);
-}
-#endif
-
-/* vec_pdep */
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_pdep(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vpdepd(__a, __b);
-}
-
-/* vec_pext */
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_pext(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vpextd(__a, __b);
-}
-
-/* vec_cfuge */
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_cfuge(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vcfuged(__a, __b);
-}
-
-/* vec_gnb */
-
-#define vec_gnb(__a, __b) __builtin_altivec_vgnb(__a, __b)
-
-/* vec_ternarylogic */
-#ifdef __VSX__
-#ifdef __SIZEOF_INT128__
-#define vec_ternarylogic(__a, __b, __c, __imm)                                 \
-  _Generic((__a), vector unsigned char                                         \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
-             vector unsigned short                                             \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
-             vector unsigned int                                               \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
-             vector unsigned long long                                         \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
-             vector unsigned __int128                                          \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)))
-#else
-#define vec_ternarylogic(__a, __b, __c, __imm)                                 \
-  _Generic((__a), vector unsigned char                                         \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
-             vector unsigned short                                             \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
-             vector unsigned int                                               \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
-             vector unsigned long long                                         \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)))
-#endif /* __SIZEOF_INT128__ */
-#endif /* __VSX__ */
-
-/* vec_genpcvm */
-
-#ifdef __VSX__
-#define vec_genpcvm(__a, __imm)                                                \
-  _Generic((__a), vector unsigned char                                         \
-           : __builtin_vsx_xxgenpcvbm((__a), (int)(__imm)),                    \
-             vector unsigned short                                             \
-           : __builtin_vsx_xxgenpcvhm((__a), (int)(__imm)),                    \
-             vector unsigned int                                               \
-           : __builtin_vsx_xxgenpcvwm((__a), (int)(__imm)),                    \
-             vector unsigned long long                                         \
-           : __builtin_vsx_xxgenpcvdm((__a), (int)(__imm)))
-#endif /* __VSX__ */
-
-/* vec_clr_first */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_clr_first(vector signed char __a, unsigned int __n) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrrb(__a, __n);
-#else
-  return __builtin_altivec_vclrlb( __a, __n);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_clr_first(vector unsigned char __a, unsigned int __n) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrrb((vector signed char)__a, __n);
-#else
-  return __builtin_altivec_vclrlb((vector signed char)__a, __n);
-#endif
-}
-
-/* vec_clr_last */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_clr_last(vector signed char __a, unsigned int __n) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrlb(__a, __n);
-#else
-  return __builtin_altivec_vclrrb( __a, __n);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_clr_last(vector unsigned char __a, unsigned int __n) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrlb((vector signed char)__a, __n);
-#else
-  return __builtin_altivec_vclrrb((vector signed char)__a, __n);
-#endif
-}
-
-/* vec_cntlzm */
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_cntlzm(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vclzdm(__a, __b);
-}
-
-/* vec_cnttzm */
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_cnttzm(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vctzdm(__a, __b);
-}
-
-/* vec_mod */
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_mod(vector signed int __a, vector signed int __b) {
-  return __a % __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mod(vector unsigned int __a, vector unsigned int __b) {
-  return __a % __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mod(vector signed long long __a, vector signed long long __b) {
-  return __a % __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mod(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a % __b;
-}
-
-#ifdef __SIZEOF_INT128__
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_mod(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a % __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_mod(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return  __a % __b;
-}
-#endif
-
-/* vec_sldbi */
-
-#define vec_sldb(__a, __b, __c) __builtin_altivec_vsldbi(__a, __b, (__c & 0x7))
-
-/* vec_srdbi */
-
-#define vec_srdb(__a, __b, __c) __builtin_altivec_vsrdbi(__a, __b, (__c & 0x7))
-
-/* vec_insertl */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_insertl(unsigned char __a, vector unsigned char __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinsbrx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinsblx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_insertl(unsigned short __a, vector unsigned short __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinshrx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinshlx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_insertl(unsigned int __a, vector unsigned int __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinswrx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinswlx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_insertl(unsigned long long __a, vector unsigned long long __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinsdrx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinsdlx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_insertl(vector unsigned char __a, vector unsigned char __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinsbvrx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinsbvlx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_insertl(vector unsigned short __a, vector unsigned short __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinshvrx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinshvlx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_insertl(vector unsigned int __a, vector unsigned int __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinswvrx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinswvlx(__b, __c, __a);
-#endif
-}
-
-/* vec_inserth */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_inserth(unsigned char __a, vector unsigned char __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinsblx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinsbrx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_inserth(unsigned short __a, vector unsigned short __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinshlx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinshrx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_inserth(unsigned int __a, vector unsigned int __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinswlx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinswrx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_inserth(unsigned long long __a, vector unsigned long long __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinsdlx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinsdrx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_inserth(vector unsigned char __a, vector unsigned char __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinsbvlx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinsbvrx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_inserth(vector unsigned short __a, vector unsigned short __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinshvlx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinshvrx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_inserth(vector unsigned int __a, vector unsigned int __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinswvlx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinswvrx(__b, __c, __a);
-#endif
-}
-
-/* vec_extractl */
-
-static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl(
-    vector unsigned char __a, vector unsigned char __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vextdubvrx(__a, __b, __c);
-#else
-  vector unsigned long long __ret = __builtin_altivec_vextdubvlx(__a, __b, __c);
-  return vec_sld(__ret, __ret, 8);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl(
-    vector unsigned short __a, vector unsigned short __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vextduhvrx(__a, __b, __c);
-#else
-  vector unsigned long long __ret = __builtin_altivec_vextduhvlx(__a, __b, __c);
-  return vec_sld(__ret, __ret, 8);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl(
-    vector unsigned int __a, vector unsigned int __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vextduwvrx(__a, __b, __c);
-#else
-  vector unsigned long long __ret = __builtin_altivec_vextduwvlx(__a, __b, __c);
-  return vec_sld(__ret, __ret, 8);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_extractl(vector unsigned long long __a, vector unsigned long long __b,
-             unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vextddvrx(__a, __b, __c);
-#else
-  vector unsigned long long __ret = __builtin_altivec_vextddvlx(__a, __b, __c);
-  return vec_sld(__ret, __ret, 8);
-#endif
-}
-
-/* vec_extracth */
-
-static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth(
-    vector unsigned char __a, vector unsigned char __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vextdubvlx(__a, __b, __c);
-#else
-  vector unsigned long long __ret = __builtin_altivec_vextdubvrx(__a, __b, __c);
-  return vec_sld(__ret, __ret, 8);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth(
-    vector unsigned short __a, vector unsigned short __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vextduhvlx(__a, __b, __c);
-#else
-  vector unsigned long long __ret = __builtin_altivec_vextduhvrx(__a, __b, __c);
-  return vec_sld(__ret, __ret, 8);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth(
-    vector unsigned int __a, vector unsigned int __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vextduwvlx(__a, __b, __c);
-#else
-  vector unsigned long long __ret = __builtin_altivec_vextduwvrx(__a, __b, __c);
-  return vec_sld(__ret, __ret, 8);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_extracth(vector unsigned long long __a, vector unsigned long long __b,
-             unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vextddvlx(__a, __b, __c);
-#else
-  vector unsigned long long __ret = __builtin_altivec_vextddvrx(__a, __b, __c);
-  return vec_sld(__ret, __ret, 8);
-#endif
-}
-
-#ifdef __VSX__
-
-/* vec_permx */
-
-#define vec_permx(__a, __b, __c, __d)                                          \
-  __builtin_vsx_xxpermx((__a), (__b), (__c), (__d))
-
-/* vec_blendv */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_blendv(vector signed char __a, vector signed char __b,
-           vector unsigned char __c) {
-  return __builtin_vsx_xxblendvb(__a, __b, __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_blendv(vector unsigned char __a, vector unsigned char __b,
-           vector unsigned char __c) {
-  return __builtin_vsx_xxblendvb(__a, __b, __c);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_blendv(vector signed short __a, vector signed short __b,
-           vector unsigned short __c) {
-  return __builtin_vsx_xxblendvh(__a, __b, __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_blendv(vector unsigned short __a, vector unsigned short __b,
-           vector unsigned short __c) {
-  return __builtin_vsx_xxblendvh(__a, __b, __c);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_blendv(vector signed int __a, vector signed int __b,
-           vector unsigned int __c) {
-  return __builtin_vsx_xxblendvw(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_blendv(vector unsigned int __a, vector unsigned int __b,
-           vector unsigned int __c) {
-  return __builtin_vsx_xxblendvw(__a, __b, __c);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_blendv(vector signed long long __a, vector signed long long __b,
-           vector unsigned long long __c) {
-  return __builtin_vsx_xxblendvd(__a, __b, __c);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_blendv(vector unsigned long long __a, vector unsigned long long __b,
-           vector unsigned long long __c) {
-  return __builtin_vsx_xxblendvd(__a, __b, __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_blendv(vector float __a, vector float __b, vector unsigned int __c) {
-  return __builtin_vsx_xxblendvw(__a, __b, __c);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_blendv(vector double __a, vector double __b,
-           vector unsigned long long __c) {
-  return __builtin_vsx_xxblendvd(__a, __b, __c);
-}
-
-/* vec_replace_elt */
-
-#define vec_replace_elt __builtin_altivec_vec_replace_elt
-
-/* vec_replace_unaligned */
-
-#define vec_replace_unaligned __builtin_altivec_vec_replace_unaligned
-
-/* vec_splati */
-
-#define vec_splati(__a)                                                        \
-  _Generic((__a), signed int                                                   \
-           : ((vector signed int)__a), unsigned int                            \
-           : ((vector unsigned int)__a), float                                 \
-           : ((vector float)__a))
-
-/* vec_spatid */
-
-static __inline__ vector double __ATTRS_o_ai vec_splatid(const float __a) {
-  return ((vector double)((double)__a));
-}
-
-/* vec_splati_ins */
-
-static __inline__ vector signed int __ATTRS_o_ai vec_splati_ins(
-    vector signed int __a, const unsigned int __b, const signed int __c) {
-  const unsigned int __d = __b & 0x01;
-#ifdef __LITTLE_ENDIAN__
-  __a[1 - __d] = __c;
-  __a[3 - __d] = __c;
-#else
-  __a[__d] = __c;
-  __a[2 + __d] = __c;
-#endif
-  return __a;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_splati_ins(
-    vector unsigned int __a, const unsigned int __b, const unsigned int __c) {
-  const unsigned int __d = __b & 0x01;
-#ifdef __LITTLE_ENDIAN__
-  __a[1 - __d] = __c;
-  __a[3 - __d] = __c;
-#else
-  __a[__d] = __c;
-  __a[2 + __d] = __c;
-#endif
-  return __a;
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_splati_ins(vector float __a, const unsigned int __b, const float __c) {
-  const unsigned int __d = __b & 0x01;
-#ifdef __LITTLE_ENDIAN__
-  __a[1 - __d] = __c;
-  __a[3 - __d] = __c;
-#else
-  __a[__d] = __c;
-  __a[2 + __d] = __c;
-#endif
-  return __a;
-}
-
-/* vec_test_lsbb_all_ones */
-
-static __inline__ int __ATTRS_o_ai
-vec_test_lsbb_all_ones(vector unsigned char __a) {
-  return __builtin_vsx_xvtlsbb(__a, 1);
-}
-
-/* vec_test_lsbb_all_zeros */
-
-static __inline__ int __ATTRS_o_ai
-vec_test_lsbb_all_zeros(vector unsigned char __a) {
-  return __builtin_vsx_xvtlsbb(__a, 0);
-}
-#endif /* __VSX__ */
-
-/* vec_stril */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_stril(vector unsigned char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribr((vector signed char)__a);
-#else
-  return __builtin_altivec_vstribl((vector signed char)__a);
-#endif
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_stril(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribr(__a);
-#else
-  return __builtin_altivec_vstribl(__a);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_stril(vector unsigned short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstrihr((vector signed short)__a);
-#else
-  return __builtin_altivec_vstrihl((vector signed short)__a);
-#endif
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_stril(vector signed short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstrihr(__a);
-#else
-  return __builtin_altivec_vstrihl(__a);
-#endif
-}
-
-/* vec_stril_p */
-
-static __inline__ int __ATTRS_o_ai vec_stril_p(vector unsigned char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
-#else
-  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
-#endif
-}
-
-static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
-#else
-  return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
-#endif
-}
-
-static __inline__ int __ATTRS_o_ai vec_stril_p(vector unsigned short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstrihr_p(__CR6_EQ, (vector signed short)__a);
-#else
-  return __builtin_altivec_vstrihl_p(__CR6_EQ, (vector signed short)__a);
-#endif
-}
-
-static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstrihr_p(__CR6_EQ, __a);
-#else
-  return __builtin_altivec_vstrihl_p(__CR6_EQ, __a);
-#endif
-}
-
-/* vec_strir */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_strir(vector unsigned char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribl((vector signed char)__a);
-#else
-  return __builtin_altivec_vstribr((vector signed char)__a);
-#endif
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_strir(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribl(__a);
-#else
-  return __builtin_altivec_vstribr(__a);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_strir(vector unsigned short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstrihl((vector signed short)__a);
-#else
-  return __builtin_altivec_vstrihr((vector signed short)__a);
-#endif
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_strir(vector signed short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstrihl(__a);
-#else
-  return __builtin_altivec_vstrihr(__a);
-#endif
-}
-
-/* vec_strir_p */
-
-static __inline__ int __ATTRS_o_ai vec_strir_p(vector unsigned char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
-#else
-  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
-#endif
-}
-
-static __inline__ int __ATTRS_o_ai vec_strir_p(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
-#else
-  return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
-#endif
-}
-
-static __inline__ int __ATTRS_o_ai vec_strir_p(vector unsigned short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstrihl_p(__CR6_EQ, (vector signed short)__a);
-#else
-  return __builtin_altivec_vstrihr_p(__CR6_EQ, (vector signed short)__a);
-#endif
-}
-
-static __inline__ int __ATTRS_o_ai vec_strir_p(vector signed short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstrihl_p(__CR6_EQ, __a);
-#else
-  return __builtin_altivec_vstrihr_p(__CR6_EQ, __a);
-#endif
-}
-
-/* vs[l | r | ra] */
-
-#ifdef __SIZEOF_INT128__
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_sl(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a << (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
-                                                  __CHAR_BIT__));
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_sl(vector signed __int128 __a, vector unsigned __int128 __b) {
-  return __a << (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
-                                                  __CHAR_BIT__));
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_sr(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a >> (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
-                                                  __CHAR_BIT__));
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_sr(vector signed __int128 __a, vector unsigned __int128 __b) {
-  return (
-      vector signed __int128)(((vector unsigned __int128)__a) >>
-                              (__b %
-                               (vector unsigned __int128)(sizeof(
-                                                              unsigned __int128) *
-                                                          __CHAR_BIT__)));
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_sra(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return (
-      vector unsigned __int128)(((vector signed __int128)__a) >>
-                                (__b %
-                                 (vector unsigned __int128)(sizeof(
-                                                                unsigned __int128) *
-                                                            __CHAR_BIT__)));
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_sra(vector signed __int128 __a, vector unsigned __int128 __b) {
-  return __a >> (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
-                                                  __CHAR_BIT__));
-}
-
-#endif /* __SIZEOF_INT128__ */
-#endif /* __POWER10_VECTOR__ */
-
-#ifdef __POWER8_VECTOR__
-#define __bcdadd(__a, __b, __ps) __builtin_ppc_bcdadd((__a), (__b), (__ps))
-#define __bcdsub(__a, __b, __ps) __builtin_ppc_bcdsub((__a), (__b), (__ps))
-
-static __inline__ long __bcdadd_ofl(vector unsigned char __a,
-                                    vector unsigned char __b) {
-  return __builtin_ppc_bcdadd_p(__CR6_SO, __a, __b);
-}
-
-static __inline__ long __bcdsub_ofl(vector unsigned char __a,
-                                    vector unsigned char __b) {
-  return __builtin_ppc_bcdsub_p(__CR6_SO, __a, __b);
-}
-
-static __inline__ long __bcd_invalid(vector unsigned char __a) {
-  return __builtin_ppc_bcdsub_p(__CR6_SO, __a, __a);
-}
-
-static __inline__ long __bcdcmpeq(vector unsigned char __a,
-                                  vector unsigned char __b) {
-  return __builtin_ppc_bcdsub_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ long __bcdcmplt(vector unsigned char __a,
-                                  vector unsigned char __b) {
-  return __builtin_ppc_bcdsub_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ long __bcdcmpgt(vector unsigned char __a,
-                                  vector unsigned char __b) {
-  return __builtin_ppc_bcdsub_p(__CR6_GT, __a, __b);
-}
-
-static __inline__ long __bcdcmple(vector unsigned char __a,
-                                  vector unsigned char __b) {
-  return __builtin_ppc_bcdsub_p(__CR6_GT_REV, __a, __b);
-}
-
-static __inline__ long __bcdcmpge(vector unsigned char __a,
-                                  vector unsigned char __b) {
-  return __builtin_ppc_bcdsub_p(__CR6_LT_REV, __a, __b);
-}
-
-#endif // __POWER8_VECTOR__
-
-#undef __ATTRS_o_ai
-
-#endif /* __ALTIVEC_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/amxintrin.h b/linux-x86/lib64/clang/14.0.2/include/amxintrin.h
deleted file mode 100644
index 4940666..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/amxintrin.h
+++ /dev/null
@@ -1,494 +0,0 @@
-/*===--------------- amxintrin.h - AMX intrinsics -*- C/C++ -*---------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===------------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#error "Never use <amxintrin.h> directly; include <immintrin.h> instead."
-#endif /* __IMMINTRIN_H */
-
-#ifndef __AMXINTRIN_H
-#define __AMXINTRIN_H
-#ifdef __x86_64__
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS_TILE                                                \
-  __attribute__((__always_inline__, __nodebug__, __target__("amx-tile")))
-#define __DEFAULT_FN_ATTRS_INT8                                                \
-  __attribute__((__always_inline__, __nodebug__, __target__("amx-int8")))
-#define __DEFAULT_FN_ATTRS_BF16                                                \
-  __attribute__((__always_inline__, __nodebug__, __target__("amx-bf16")))
-
-/// Load tile configuration from a 64-byte memory location specified by
-/// "mem_addr". The tile configuration includes the tile type palette, the
-/// number of bytes per row, and the number of rows. If the specified
-/// palette_id is zero, that signifies the init state for both the tile
-/// config and the tile data, and the tiles are zeroed. Any invalid
-/// configurations will result in #GP fault.
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> LDTILECFG </c> instruction.
-///
-/// \param __config
-///    A pointer to 512-bits configuration
-static __inline__ void __DEFAULT_FN_ATTRS_TILE
-_tile_loadconfig(const void *__config) {
-  __builtin_ia32_tile_loadconfig(__config);
-}
-
-/// Stores the current tile configuration to a 64-byte memory location
-/// specified by "mem_addr". The tile configuration includes the tile type
-/// palette, the number of bytes per row, and the number of rows. If tiles
-/// are not configured, all zeroes will be stored to memory.
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> STTILECFG </c> instruction.
-///
-/// \param __config
-///    A pointer to 512-bits configuration
-static __inline__ void __DEFAULT_FN_ATTRS_TILE
-_tile_storeconfig(void *__config) {
-  __builtin_ia32_tile_storeconfig(__config);
-}
-
-/// Release the tile configuration to return to the init state, which
-/// releases all storage it currently holds.
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TILERELEASE </c> instruction.
-static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) {
-  __builtin_ia32_tilerelease();
-}
-
-/// Load tile rows from memory specifieid by "base" address and "stride" into
-/// destination tile "dst" using the tile configuration previously configured
-/// via "_tile_loadconfig".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TILELOADD </c> instruction.
-///
-/// \param dst
-///    A destination tile. Max size is 1024 Bytes.
-/// \param base
-///    A pointer to base address.
-/// \param stride
-///    The stride between the rows' data to be loaded in memory.
-#define _tile_loadd(dst, base, stride)                                         \
-  __builtin_ia32_tileloadd64((dst), ((const void *)(base)),                    \
-                             (__SIZE_TYPE__)(stride))
-
-/// Load tile rows from memory specifieid by "base" address and "stride" into
-/// destination tile "dst" using the tile configuration previously configured
-/// via "_tile_loadconfig". This intrinsic provides a hint to the implementation
-/// that the data will likely not be reused in the near future and the data
-/// caching can be optimized accordingly.
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TILELOADDT1 </c> instruction.
-///
-/// \param dst
-///    A destination tile. Max size is 1024 Bytes.
-/// \param base
-///    A pointer to base address.
-/// \param stride
-///    The stride between the rows' data to be loaded in memory.
-#define _tile_stream_loadd(dst, base, stride)                                  \
-  __builtin_ia32_tileloaddt164((dst), ((const void *)(base)),                  \
-                               (__SIZE_TYPE__)(stride))
-
-/// Store the tile specified by "src" to memory specifieid by "base" address and
-/// "stride" using the tile configuration previously configured via
-/// "_tile_loadconfig".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TILESTORED </c> instruction.
-///
-/// \param dst
-///    A destination tile. Max size is 1024 Bytes.
-/// \param base
-///    A pointer to base address.
-/// \param stride
-///    The stride between the rows' data to be stored in memory.
-#define _tile_stored(dst, base, stride)                                        \
-  __builtin_ia32_tilestored64((dst), ((void *)(base)), (__SIZE_TYPE__)(stride))
-
-/// Zero the tile specified by "tdest".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TILEZERO </c> instruction.
-///
-/// \param tile
-///    The destination tile to be zero. Max size is 1024 Bytes.
-#define _tile_zero(tile) __builtin_ia32_tilezero((tile))
-
-/// Compute dot-product of bytes in tiles with a source/destination accumulator.
-/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
-/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
-/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
-/// and store the 32-bit result back to tile "dst".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TDPBSSD </c> instruction.
-///
-/// \param dst
-///    The destination tile. Max size is 1024 Bytes.
-/// \param src0
-///    The 1st source tile. Max size is 1024 Bytes.
-/// \param src1
-///    The 2nd source tile. Max size is 1024 Bytes.
-#define _tile_dpbssd(dst, src0, src1)                                          \
-  __builtin_ia32_tdpbssd((dst), (src0), (src1))
-
-/// Compute dot-product of bytes in tiles with a source/destination accumulator.
-/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
-/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
-/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer
-/// in "dst", and store the 32-bit result back to tile "dst".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TDPBSUD </c> instruction.
-///
-/// \param dst
-///    The destination tile. Max size is 1024 Bytes.
-/// \param src0
-///    The 1st source tile. Max size is 1024 Bytes.
-/// \param src1
-///    The 2nd source tile. Max size is 1024 Bytes.
-#define _tile_dpbsud(dst, src0, src1)                                          \
-  __builtin_ia32_tdpbsud((dst), (src0), (src1))
-
-/// Compute dot-product of bytes in tiles with a source/destination accumulator.
-/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
-/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
-/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
-/// and store the 32-bit result back to tile "dst".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TDPBUSD </c> instruction.
-///
-/// \param dst
-///    The destination tile. Max size is 1024 Bytes.
-/// \param src0
-///    The 1st source tile. Max size is 1024 Bytes.
-/// \param src1
-///    The 2nd source tile. Max size is 1024 Bytes.
-#define _tile_dpbusd(dst, src0, src1)                                          \
-  __builtin_ia32_tdpbusd((dst), (src0), (src1))
-
-/// Compute dot-product of bytes in tiles with a source/destination accumulator.
-/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
-/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
-/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in
-/// "dst", and store the 32-bit result back to tile "dst".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TDPBUUD </c> instruction.
-///
-/// \param dst
-///    The destination tile. Max size is 1024 Bytes.
-/// \param src0
-///    The 1st source tile. Max size is 1024 Bytes.
-/// \param src1
-///    The 2nd source tile. Max size is 1024 Bytes.
-#define _tile_dpbuud(dst, src0, src1)                                          \
-  __builtin_ia32_tdpbuud((dst), (src0), (src1))
-
-/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and
-/// src1, accumulating the intermediate single-precision (32-bit) floating-point
-/// elements with elements in "dst", and store the 32-bit result back to tile
-/// "dst".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TDPBF16PS </c> instruction.
-///
-/// \param dst
-///    The destination tile. Max size is 1024 Bytes.
-/// \param src0
-///    The 1st source tile. Max size is 1024 Bytes.
-/// \param src1
-///    The 2nd source tile. Max size is 1024 Bytes.
-#define _tile_dpbf16ps(dst, src0, src1)                                        \
-  __builtin_ia32_tdpbf16ps((dst), (src0), (src1))
-
-/// AMX tile register size can be configured, the maximum size is 16x64=1024
-/// bytes. Since there is no 2D type in llvm IR, we use vector type to
-/// represent 2D tile and the fixed size is maximum amx tile register size.
-typedef int _tile1024i __attribute__((__vector_size__(1024), __aligned__(64)));
-
-/// This is internal intrinsic. C/C++ user should avoid calling it directly.
-static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
-_tile_loadd_internal(unsigned short m, unsigned short n, const void *base,
-                     __SIZE_TYPE__ stride) {
-  return __builtin_ia32_tileloadd64_internal(m, n, base,
-                                             (__SIZE_TYPE__)(stride));
-}
-
-/// This is internal intrinsic. C/C++ user should avoid calling it directly.
-static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
-_tile_loaddt1_internal(unsigned short m, unsigned short n, const void *base,
-                       __SIZE_TYPE__ stride) {
-  return __builtin_ia32_tileloaddt164_internal(m, n, base,
-                                               (__SIZE_TYPE__)(stride));
-}
-
-/// This is internal intrinsic. C/C++ user should avoid calling it directly.
-static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
-_tile_dpbssd_internal(unsigned short m, unsigned short n, unsigned short k,
-                      _tile1024i dst, _tile1024i src1, _tile1024i src2) {
-  return __builtin_ia32_tdpbssd_internal(m, n, k, dst, src1, src2);
-}
-
-/// This is internal intrinsic. C/C++ user should avoid calling it directly.
-static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
-_tile_dpbsud_internal(unsigned short m, unsigned short n, unsigned short k,
-                      _tile1024i dst, _tile1024i src1, _tile1024i src2) {
-  return __builtin_ia32_tdpbsud_internal(m, n, k, dst, src1, src2);
-}
-
-/// This is internal intrinsic. C/C++ user should avoid calling it directly.
-static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
-_tile_dpbusd_internal(unsigned short m, unsigned short n, unsigned short k,
-                      _tile1024i dst, _tile1024i src1, _tile1024i src2) {
-  return __builtin_ia32_tdpbusd_internal(m, n, k, dst, src1, src2);
-}
-
-/// This is internal intrinsic. C/C++ user should avoid calling it directly.
-static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
-_tile_dpbuud_internal(unsigned short m, unsigned short n, unsigned short k,
-                      _tile1024i dst, _tile1024i src1, _tile1024i src2) {
-  return __builtin_ia32_tdpbuud_internal(m, n, k, dst, src1, src2);
-}
-
-/// This is internal intrinsic. C/C++ user should avoid calling it directly.
-static __inline__ void __DEFAULT_FN_ATTRS_INT8
-_tile_stored_internal(unsigned short m, unsigned short n, void *base,
-                      __SIZE_TYPE__ stride, _tile1024i tile) {
-  return __builtin_ia32_tilestored64_internal(m, n, base,
-                                              (__SIZE_TYPE__)(stride), tile);
-}
-
-/// This is internal intrinsic. C/C++ user should avoid calling it directly.
-static __inline__ _tile1024i __DEFAULT_FN_ATTRS_BF16
-_tile_dpbf16ps_internal(unsigned short m, unsigned short n, unsigned short k,
-                        _tile1024i dst, _tile1024i src1, _tile1024i src2) {
-  return __builtin_ia32_tdpbf16ps_internal(m, n, k, dst, src1, src2);
-}
-
-/// This struct pack the shape and tile data together for user. We suggest
-/// initializing the struct as early as possible, because compiler depends
-/// on the shape information to do configure. The constant value is preferred
-/// for optimization by compiler.
-typedef struct __tile1024i_str {
-  const unsigned short row;
-  const unsigned short col;
-  _tile1024i tile;
-} __tile1024i;
-
-/// Load tile rows from memory specifieid by "base" address and "stride" into
-/// destination tile "dst".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TILELOADD </c> instruction.
-///
-/// \param dst
-///    A destination tile. Max size is 1024 Bytes.
-/// \param base
-///    A pointer to base address.
-/// \param stride
-///    The stride between the rows' data to be loaded in memory.
-__DEFAULT_FN_ATTRS_TILE
-static __inline__ void __tile_loadd(__tile1024i *dst, const void *base,
-                                    __SIZE_TYPE__ stride) {
-  dst->tile = _tile_loadd_internal(dst->row, dst->col, base, stride);
-}
-
-/// Load tile rows from memory specifieid by "base" address and "stride" into
-/// destination tile "dst". This intrinsic provides a hint to the implementation
-/// that the data will likely not be reused in the near future and the data
-/// caching can be optimized accordingly.
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TILELOADDT1 </c> instruction.
-///
-/// \param dst
-///    A destination tile. Max size is 1024 Bytes.
-/// \param base
-///    A pointer to base address.
-/// \param stride
-///    The stride between the rows' data to be loaded in memory.
-__DEFAULT_FN_ATTRS_TILE
-static __inline__ void __tile_stream_loadd(__tile1024i *dst, const void *base,
-                                           __SIZE_TYPE__ stride) {
-  dst->tile = _tile_loaddt1_internal(dst->row, dst->col, base, stride);
-}
-
-/// Compute dot-product of bytes in tiles with a source/destination accumulator.
-/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
-/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
-/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
-/// and store the 32-bit result back to tile "dst".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TDPBSSD </c> instruction.
-///
-/// \param dst
-///    The destination tile. Max size is 1024 Bytes.
-/// \param src0
-///    The 1st source tile. Max size is 1024 Bytes.
-/// \param src1
-///    The 2nd source tile. Max size is 1024 Bytes.
-__DEFAULT_FN_ATTRS_INT8
-static __inline__ void __tile_dpbssd(__tile1024i *dst, __tile1024i src0,
-                                     __tile1024i src1) {
-  dst->tile = _tile_dpbssd_internal(src0.row, src1.col, src0.col, dst->tile,
-                                    src0.tile, src1.tile);
-}
-
-/// Compute dot-product of bytes in tiles with a source/destination accumulator.
-/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
-/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
-/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer
-/// in "dst", and store the 32-bit result back to tile "dst".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TDPBSUD </c> instruction.
-///
-/// \param dst
-///    The destination tile. Max size is 1024 Bytes.
-/// \param src0
-///    The 1st source tile. Max size is 1024 Bytes.
-/// \param src1
-///    The 2nd source tile. Max size is 1024 Bytes.
-__DEFAULT_FN_ATTRS_INT8
-static __inline__ void __tile_dpbsud(__tile1024i *dst, __tile1024i src0,
-                                     __tile1024i src1) {
-  dst->tile = _tile_dpbsud_internal(src0.row, src1.col, src0.col, dst->tile,
-                                    src0.tile, src1.tile);
-}
-
-/// Compute dot-product of bytes in tiles with a source/destination accumulator.
-/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
-/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
-/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
-/// and store the 32-bit result back to tile "dst".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TDPBUSD </c> instruction.
-///
-/// \param dst
-///    The destination tile. Max size is 1024 Bytes.
-/// \param src0
-///    The 1st source tile. Max size is 1024 Bytes.
-/// \param src1
-///    The 2nd source tile. Max size is 1024 Bytes.
-__DEFAULT_FN_ATTRS_INT8
-static __inline__ void __tile_dpbusd(__tile1024i *dst, __tile1024i src0,
-                                     __tile1024i src1) {
-  dst->tile = _tile_dpbusd_internal(src0.row, src1.col, src0.col, dst->tile,
-                                    src0.tile, src1.tile);
-}
-
-/// Compute dot-product of bytes in tiles with a source/destination accumulator.
-/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
-/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
-/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in
-/// "dst", and store the 32-bit result back to tile "dst".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TDPBUUD </c> instruction.
-///
-/// \param dst
-///    The destination tile. Max size is 1024 Bytes.
-/// \param src0
-///    The 1st source tile. Max size is 1024 Bytes.
-/// \param src1
-///    The 2nd source tile. Max size is 1024 Bytes.
-__DEFAULT_FN_ATTRS_INT8
-static __inline__ void __tile_dpbuud(__tile1024i *dst, __tile1024i src0,
-                                     __tile1024i src1) {
-  dst->tile = _tile_dpbuud_internal(src0.row, src1.col, src0.col, dst->tile,
-                                    src0.tile, src1.tile);
-}
-
-/// Store the tile specified by "src" to memory specifieid by "base" address and
-/// "stride".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TILESTORED </c> instruction.
-///
-/// \param dst
-///    A destination tile. Max size is 1024 Bytes.
-/// \param base
-///    A pointer to base address.
-/// \param stride
-///    The stride between the rows' data to be stored in memory.
-__DEFAULT_FN_ATTRS_TILE
-static __inline__ void __tile_stored(void *base, __SIZE_TYPE__ stride,
-                                     __tile1024i src) {
-  _tile_stored_internal(src.row, src.col, base, stride, src.tile);
-}
-
-/// Zero the tile specified by "dst".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TILEZERO </c> instruction.
-///
-/// \param dst
-///    The destination tile to be zero. Max size is 1024 Bytes.
-__DEFAULT_FN_ATTRS_TILE
-static __inline__ void __tile_zero(__tile1024i *dst) {
-  dst->tile = __builtin_ia32_tilezero_internal(dst->row, dst->col);
-}
-
-/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and
-/// src1, accumulating the intermediate single-precision (32-bit) floating-point
-/// elements with elements in "dst", and store the 32-bit result back to tile
-/// "dst".
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> TDPBF16PS </c> instruction.
-///
-/// \param dst
-///    The destination tile. Max size is 1024 Bytes.
-/// \param src0
-///    The 1st source tile. Max size is 1024 Bytes.
-/// \param src1
-///    The 2nd source tile. Max size is 1024 Bytes.
-__DEFAULT_FN_ATTRS_BF16
-static __inline__ void __tile_dpbf16ps(__tile1024i *dst, __tile1024i src0,
-                                       __tile1024i src1) {
-  dst->tile = _tile_dpbf16ps_internal(src0.row, src1.col, src0.col, dst->tile,
-                                      src0.tile, src1.tile);
-}
-
-#undef __DEFAULT_FN_ATTRS_TILE
-#undef __DEFAULT_FN_ATTRS_INT8
-#undef __DEFAULT_FN_ATTRS_BF16
-
-#endif /* __x86_64__ */
-#endif /* __AMXINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_acle.h b/linux-x86/lib64/clang/14.0.2/include/arm_acle.h
deleted file mode 100644
index 45fac24..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/arm_acle.h
+++ /dev/null
@@ -1,771 +0,0 @@
-/*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __ARM_ACLE_H
-#define __ARM_ACLE_H
-
-#ifndef __ARM_ACLE
-#error "ACLE intrinsics support not enabled."
-#endif
-
-#include <stdint.h>
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
-/* 8.3 Memory barriers */
-#if !__has_builtin(__dmb)
-#define __dmb(i) __builtin_arm_dmb(i)
-#endif
-#if !__has_builtin(__dsb)
-#define __dsb(i) __builtin_arm_dsb(i)
-#endif
-#if !__has_builtin(__isb)
-#define __isb(i) __builtin_arm_isb(i)
-#endif
-
-/* 8.4 Hints */
-
-#if !__has_builtin(__wfi)
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {
-  __builtin_arm_wfi();
-}
-#endif
-
-#if !__has_builtin(__wfe)
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) {
-  __builtin_arm_wfe();
-}
-#endif
-
-#if !__has_builtin(__sev)
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) {
-  __builtin_arm_sev();
-}
-#endif
-
-#if !__has_builtin(__sevl)
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) {
-  __builtin_arm_sevl();
-}
-#endif
-
-#if !__has_builtin(__yield)
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) {
-  __builtin_arm_yield();
-}
-#endif
-
-#if __ARM_32BIT_STATE
-#define __dbg(t) __builtin_arm_dbg(t)
-#endif
-
-/* 8.5 Swap */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__swp(uint32_t __x, volatile uint32_t *__p) {
-  uint32_t v;
-  do
-    v = __builtin_arm_ldrex(__p);
-  while (__builtin_arm_strex(__x, __p));
-  return v;
-}
-
-/* 8.6 Memory prefetch intrinsics */
-/* 8.6.1 Data prefetch */
-#define __pld(addr) __pldx(0, 0, 0, addr)
-
-#if __ARM_32BIT_STATE
-#define __pldx(access_kind, cache_level, retention_policy, addr) \
-  __builtin_arm_prefetch(addr, access_kind, 1)
-#else
-#define __pldx(access_kind, cache_level, retention_policy, addr) \
-  __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1)
-#endif
-
-/* 8.6.2 Instruction prefetch */
-#define __pli(addr) __plix(0, 0, addr)
-
-#if __ARM_32BIT_STATE
-#define __plix(cache_level, retention_policy, addr) \
-  __builtin_arm_prefetch(addr, 0, 0)
-#else
-#define __plix(cache_level, retention_policy, addr) \
-  __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0)
-#endif
-
-/* 8.7 NOP */
-#if !defined(_MSC_VER) || !defined(__aarch64__)
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {
-  __builtin_arm_nop();
-}
-#endif
-
-/* 9 DATA-PROCESSING INTRINSICS */
-/* 9.2 Miscellaneous data-processing intrinsics */
-/* ROR */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__ror(uint32_t __x, uint32_t __y) {
-  __y %= 32;
-  if (__y == 0)
-    return __x;
-  return (__x >> __y) | (__x << (32 - __y));
-}
-
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__rorll(uint64_t __x, uint32_t __y) {
-  __y %= 64;
-  if (__y == 0)
-    return __x;
-  return (__x >> __y) | (__x << (64 - __y));
-}
-
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__rorl(unsigned long __x, uint32_t __y) {
-#if __SIZEOF_LONG__ == 4
-  return __ror(__x, __y);
-#else
-  return __rorll(__x, __y);
-#endif
-}
-
-
-/* CLZ */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__clz(uint32_t __t) {
-  return __builtin_clz(__t);
-}
-
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__clzl(unsigned long __t) {
-  return __builtin_clzl(__t);
-}
-
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__clzll(uint64_t __t) {
-  return __builtin_clzll(__t);
-}
-
-/* CLS */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__cls(uint32_t __t) {
-  return __builtin_arm_cls(__t);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__clsl(unsigned long __t) {
-#if __SIZEOF_LONG__ == 4
-  return __builtin_arm_cls(__t);
-#else
-  return __builtin_arm_cls64(__t);
-#endif
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__clsll(uint64_t __t) {
-  return __builtin_arm_cls64(__t);
-}
-
-/* REV */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__rev(uint32_t __t) {
-  return __builtin_bswap32(__t);
-}
-
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__revl(unsigned long __t) {
-#if __SIZEOF_LONG__ == 4
-  return __builtin_bswap32(__t);
-#else
-  return __builtin_bswap64(__t);
-#endif
-}
-
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__revll(uint64_t __t) {
-  return __builtin_bswap64(__t);
-}
-
-/* REV16 */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__rev16(uint32_t __t) {
-  return __ror(__rev(__t), 16);
-}
-
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__rev16ll(uint64_t __t) {
-  return (((uint64_t)__rev16(__t >> 32)) << 32) | __rev16(__t);
-}
-
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__rev16l(unsigned long __t) {
-#if __SIZEOF_LONG__ == 4
-    return __rev16(__t);
-#else
-    return __rev16ll(__t);
-#endif
-}
-
-/* REVSH */
-static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))
-__revsh(int16_t __t) {
-  return __builtin_bswap16(__t);
-}
-
-/* RBIT */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__rbit(uint32_t __t) {
-  return __builtin_arm_rbit(__t);
-}
-
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__rbitll(uint64_t __t) {
-#if __ARM_32BIT_STATE
-  return (((uint64_t)__builtin_arm_rbit(__t)) << 32) |
-         __builtin_arm_rbit(__t >> 32);
-#else
-  return __builtin_arm_rbit64(__t);
-#endif
-}
-
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__rbitl(unsigned long __t) {
-#if __SIZEOF_LONG__ == 4
-  return __rbit(__t);
-#else
-  return __rbitll(__t);
-#endif
-}
-
-/*
- * 9.3 16-bit multiplications
- */
-#if __ARM_FEATURE_DSP
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smulbb(int32_t __a, int32_t __b) {
-  return __builtin_arm_smulbb(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smulbt(int32_t __a, int32_t __b) {
-  return __builtin_arm_smulbt(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smultb(int32_t __a, int32_t __b) {
-  return __builtin_arm_smultb(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smultt(int32_t __a, int32_t __b) {
-  return __builtin_arm_smultt(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smulwb(int32_t __a, int32_t __b) {
-  return __builtin_arm_smulwb(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smulwt(int32_t __a, int32_t __b) {
-  return __builtin_arm_smulwt(__a, __b);
-}
-#endif
-
-/*
- * 9.4 Saturating intrinsics
- *
- * FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag
- * intrinsics are implemented and the flag is enabled.
- */
-/* 9.4.1 Width-specified saturation intrinsics */
-#if __ARM_FEATURE_SAT
-#define __ssat(x, y) __builtin_arm_ssat(x, y)
-#define __usat(x, y) __builtin_arm_usat(x, y)
-#endif
-
-/* 9.4.2 Saturating addition and subtraction intrinsics */
-#if __ARM_FEATURE_DSP
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__qadd(int32_t __t, int32_t __v) {
-  return __builtin_arm_qadd(__t, __v);
-}
-
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__qsub(int32_t __t, int32_t __v) {
-  return __builtin_arm_qsub(__t, __v);
-}
-
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__qdbl(int32_t __t) {
-  return __builtin_arm_qadd(__t, __t);
-}
-#endif
-
-/* 9.4.3 Accumultating multiplications */
-#if __ARM_FEATURE_DSP
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlabb(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlabb(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlabt(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlabt(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlatb(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlatb(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlatt(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlatt(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlawb(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlawb(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlawt(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlawt(__a, __b, __c);
-}
-#endif
-
-
-/* 9.5.4 Parallel 16-bit saturation */
-#if __ARM_FEATURE_SIMD32
-#define __ssat16(x, y) __builtin_arm_ssat16(x, y)
-#define __usat16(x, y) __builtin_arm_usat16(x, y)
-#endif
-
-/* 9.5.5 Packing and unpacking */
-#if __ARM_FEATURE_SIMD32
-typedef int32_t int8x4_t;
-typedef int32_t int16x2_t;
-typedef uint32_t uint8x4_t;
-typedef uint32_t uint16x2_t;
-
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__sxtab16(int16x2_t __a, int8x4_t __b) {
-  return __builtin_arm_sxtab16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__sxtb16(int8x4_t __a) {
-  return __builtin_arm_sxtb16(__a);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__uxtab16(int16x2_t __a, int8x4_t __b) {
-  return __builtin_arm_uxtab16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__uxtb16(int8x4_t __a) {
-  return __builtin_arm_uxtb16(__a);
-}
-#endif
-
-/* 9.5.6 Parallel selection */
-#if __ARM_FEATURE_SIMD32
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__sel(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_sel(__a, __b);
-}
-#endif
-
-/* 9.5.7 Parallel 8-bit addition and subtraction */
-#if __ARM_FEATURE_SIMD32
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__qadd8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_qadd8(__a, __b);
-}
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__qsub8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_qsub8(__a, __b);
-}
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__sadd8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_sadd8(__a, __b);
-}
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__shadd8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_shadd8(__a, __b);
-}
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__shsub8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_shsub8(__a, __b);
-}
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__ssub8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_ssub8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__uadd8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_uadd8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__uhadd8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_uhadd8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__uhsub8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_uhsub8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__uqadd8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_uqadd8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__uqsub8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_uqsub8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__usub8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_usub8(__a, __b);
-}
-#endif
-
-/* 9.5.8 Sum of 8-bit absolute differences */
-#if __ARM_FEATURE_SIMD32
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__usad8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_usad8(__a, __b);
-}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) {
-  return __builtin_arm_usada8(__a, __b, __c);
-}
-#endif
-
-/* 9.5.9 Parallel 16-bit addition and subtraction */
-#if __ARM_FEATURE_SIMD32
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__qadd16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_qadd16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__qasx(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_qasx(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__qsax(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_qsax(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__qsub16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_qsub16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__sadd16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_sadd16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__sasx(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_sasx(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__shadd16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_shadd16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__shasx(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_shasx(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__shsax(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_shsax(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__shsub16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_shsub16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__ssax(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_ssax(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__ssub16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_ssub16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uadd16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uadd16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uasx(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uasx(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uhadd16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uhadd16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uhasx(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uhasx(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uhsax(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uhsax(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uhsub16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uhsub16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uqadd16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uqadd16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uqasx(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uqasx(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uqsax(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uqsax(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uqsub16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uqsub16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__usax(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_usax(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__usub16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_usub16(__a, __b);
-}
-#endif
-
-/* 9.5.10 Parallel 16-bit multiplications */
-#if __ARM_FEATURE_SIMD32
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {
-  return __builtin_arm_smlad(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smladx(int16x2_t __a, int16x2_t __b, int32_t __c) {
-  return __builtin_arm_smladx(__a, __b, __c);
-}
-static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
-__smlald(int16x2_t __a, int16x2_t __b, int64_t __c) {
-  return __builtin_arm_smlald(__a, __b, __c);
-}
-static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
-__smlaldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
-  return __builtin_arm_smlaldx(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlsd(int16x2_t __a, int16x2_t __b, int32_t __c) {
-  return __builtin_arm_smlsd(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlsdx(int16x2_t __a, int16x2_t __b, int32_t __c) {
-  return __builtin_arm_smlsdx(__a, __b, __c);
-}
-static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
-__smlsld(int16x2_t __a, int16x2_t __b, int64_t __c) {
-  return __builtin_arm_smlsld(__a, __b, __c);
-}
-static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
-__smlsldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
-  return __builtin_arm_smlsldx(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smuad(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_smuad(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smuadx(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_smuadx(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smusd(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_smusd(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smusdx(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_smusdx(__a, __b);
-}
-#endif
-
-/* 9.7 CRC32 intrinsics */
-#if __ARM_FEATURE_CRC32
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32b(uint32_t __a, uint8_t __b) {
-  return __builtin_arm_crc32b(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32h(uint32_t __a, uint16_t __b) {
-  return __builtin_arm_crc32h(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32w(uint32_t __a, uint32_t __b) {
-  return __builtin_arm_crc32w(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32d(uint32_t __a, uint64_t __b) {
-  return __builtin_arm_crc32d(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32cb(uint32_t __a, uint8_t __b) {
-  return __builtin_arm_crc32cb(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32ch(uint32_t __a, uint16_t __b) {
-  return __builtin_arm_crc32ch(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32cw(uint32_t __a, uint32_t __b) {
-  return __builtin_arm_crc32cw(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32cd(uint32_t __a, uint64_t __b) {
-  return __builtin_arm_crc32cd(__a, __b);
-}
-#endif
-
-/* Armv8.3-A Javascript conversion intrinsic */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_JCVT)
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__jcvt(double __a) {
-  return __builtin_arm_jcvt(__a);
-}
-#endif
-
-/* Armv8.5-A FP rounding intrinsics */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_FRINT)
-static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint32zf(float __a) {
-  return __builtin_arm_frint32zf(__a);
-}
-
-static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint32z(double __a) {
-  return __builtin_arm_frint32z(__a);
-}
-
-static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint64zf(float __a) {
-  return __builtin_arm_frint64zf(__a);
-}
-
-static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint64z(double __a) {
-  return __builtin_arm_frint64z(__a);
-}
-
-static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint32xf(float __a) {
-  return __builtin_arm_frint32xf(__a);
-}
-
-static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint32x(double __a) {
-  return __builtin_arm_frint32x(__a);
-}
-
-static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint64xf(float __a) {
-  return __builtin_arm_frint64xf(__a);
-}
-
-static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint64x(double __a) {
-  return __builtin_arm_frint64x(__a);
-}
-#endif
-
-/* Armv8.7-A load/store 64-byte intrinsics */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_LS64)
-typedef struct {
-    uint64_t val[8];
-} data512_t;
-
-static __inline__ data512_t __attribute__((__always_inline__, __nodebug__))
-__arm_ld64b(const void *__addr) {
-    data512_t __value;
-    __builtin_arm_ld64b(__addr, __value.val);
-    return __value;
-}
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
-__arm_st64b(void *__addr, data512_t __value) {
-    __builtin_arm_st64b(__addr, __value.val);
-}
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__arm_st64bv(void *__addr, data512_t __value) {
-    return __builtin_arm_st64bv(__addr, __value.val);
-}
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__arm_st64bv0(void *__addr, data512_t __value) {
-    return __builtin_arm_st64bv0(__addr, __value.val);
-}
-#endif
-
-/* 10.1 Special register intrinsics */
-#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
-#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
-#define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg)
-#define __arm_rsrf(sysreg) __builtin_bit_cast(float, __arm_rsr(sysreg))
-#define __arm_rsrf64(sysreg) __builtin_bit_cast(double, __arm_rsr64(sysreg))
-#define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v)
-#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)
-#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)
-#define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v))
-#define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v))
-
-/* Memory Tagging Extensions (MTE) Intrinsics */
-#if __ARM_FEATURE_MEMORY_TAGGING
-#define __arm_mte_create_random_tag(__ptr, __mask)  __builtin_arm_irg(__ptr, __mask)
-#define __arm_mte_increment_tag(__ptr, __tag_offset)  __builtin_arm_addg(__ptr, __tag_offset)
-#define __arm_mte_exclude_tag(__ptr, __excluded)  __builtin_arm_gmi(__ptr, __excluded)
-#define __arm_mte_get_tag(__ptr) __builtin_arm_ldg(__ptr)
-#define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr)
-#define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb)
-#endif
-
-/* Transactional Memory Extension (TME) Intrinsics */
-#if __ARM_FEATURE_TME
-
-#define _TMFAILURE_REASON  0x00007fffu
-#define _TMFAILURE_RTRY    0x00008000u
-#define _TMFAILURE_CNCL    0x00010000u
-#define _TMFAILURE_MEM     0x00020000u
-#define _TMFAILURE_IMP     0x00040000u
-#define _TMFAILURE_ERR     0x00080000u
-#define _TMFAILURE_SIZE    0x00100000u
-#define _TMFAILURE_NEST    0x00200000u
-#define _TMFAILURE_DBG     0x00400000u
-#define _TMFAILURE_INT     0x00800000u
-#define _TMFAILURE_TRIVIAL 0x01000000u
-
-#define __tstart()        __builtin_arm_tstart()
-#define __tcommit()       __builtin_arm_tcommit()
-#define __tcancel(__arg)  __builtin_arm_tcancel(__arg)
-#define __ttest()         __builtin_arm_ttest()
-
-#endif /* __ARM_FEATURE_TME */
-
-/* Armv8.5-A Random number generation intrinsics */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_RNG)
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
-__rndr(uint64_t *__p) {
-  return __builtin_arm_rndr(__p);
-}
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
-__rndrrs(uint64_t *__p) {
-  return __builtin_arm_rndrrs(__p);
-}
-#endif
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif /* __ARM_ACLE_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_fp16.h b/linux-x86/lib64/clang/14.0.2/include/arm_fp16.h
deleted file mode 100644
index ce993ce..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/arm_fp16.h
+++ /dev/null
@@ -1,596 +0,0 @@
-/*===---- arm_fp16.h - ARM FP16 intrinsics ---------------------------------===
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __ARM_FP16_H
-#define __ARM_FP16_H
-
-#include <stdint.h>
-
-typedef __fp16 float16_t;
-#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
-
-#if defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) && defined(__aarch64__)
-#define vabdh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vabdh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vabsh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vabsh_f16(__s0); \
-  __ret; \
-})
-#define vaddh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vaddh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vcageh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcageh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vcagth_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcagth_f16(__s0, __s1); \
-  __ret; \
-})
-#define vcaleh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcaleh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vcalth_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcalth_f16(__s0, __s1); \
-  __ret; \
-})
-#define vceqh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vceqh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vceqzh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vceqzh_f16(__s0); \
-  __ret; \
-})
-#define vcgeh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcgeh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vcgezh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcgezh_f16(__s0); \
-  __ret; \
-})
-#define vcgth_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcgth_f16(__s0, __s1); \
-  __ret; \
-})
-#define vcgtzh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcgtzh_f16(__s0); \
-  __ret; \
-})
-#define vcleh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcleh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vclezh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vclezh_f16(__s0); \
-  __ret; \
-})
-#define vclth_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vclth_f16(__s0, __s1); \
-  __ret; \
-})
-#define vcltzh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcltzh_f16(__s0); \
-  __ret; \
-})
-#define vcvth_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vcvth_n_s16_f16(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_s32_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvth_n_s32_f16(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_s64_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvth_n_s64_f16(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcvth_n_u16_f16(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_u32_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvth_n_u32_f16(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_u64_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvth_n_u64_f16(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vcvth_s16_f16(__s0); \
-  __ret; \
-})
-#define vcvth_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvth_s32_f16(__s0); \
-  __ret; \
-})
-#define vcvth_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvth_s64_f16(__s0); \
-  __ret; \
-})
-#define vcvth_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcvth_u16_f16(__s0); \
-  __ret; \
-})
-#define vcvth_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvth_u32_f16(__s0); \
-  __ret; \
-})
-#define vcvth_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvth_u64_f16(__s0); \
-  __ret; \
-})
-#define vcvtah_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vcvtah_s16_f16(__s0); \
-  __ret; \
-})
-#define vcvtah_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvtah_s32_f16(__s0); \
-  __ret; \
-})
-#define vcvtah_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvtah_s64_f16(__s0); \
-  __ret; \
-})
-#define vcvtah_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcvtah_u16_f16(__s0); \
-  __ret; \
-})
-#define vcvtah_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvtah_u32_f16(__s0); \
-  __ret; \
-})
-#define vcvtah_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvtah_u64_f16(__s0); \
-  __ret; \
-})
-#define vcvth_f16_u16(__p0) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_f16_u16(__s0); \
-  __ret; \
-})
-#define vcvth_f16_s16(__p0) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__s0); \
-  __ret; \
-})
-#define vcvth_f16_u32(__p0) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__s0); \
-  __ret; \
-})
-#define vcvth_f16_s32(__p0) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_f16_s32(__s0); \
-  __ret; \
-})
-#define vcvth_f16_u64(__p0) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__s0); \
-  __ret; \
-})
-#define vcvth_f16_s64(__p0) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_f16_s64(__s0); \
-  __ret; \
-})
-#define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_s16(__s0, __p1); \
-  __ret; \
-})
-#define vcvtmh_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vcvtmh_s16_f16(__s0); \
-  __ret; \
-})
-#define vcvtmh_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvtmh_s32_f16(__s0); \
-  __ret; \
-})
-#define vcvtmh_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvtmh_s64_f16(__s0); \
-  __ret; \
-})
-#define vcvtmh_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcvtmh_u16_f16(__s0); \
-  __ret; \
-})
-#define vcvtmh_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvtmh_u32_f16(__s0); \
-  __ret; \
-})
-#define vcvtmh_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvtmh_u64_f16(__s0); \
-  __ret; \
-})
-#define vcvtnh_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vcvtnh_s16_f16(__s0); \
-  __ret; \
-})
-#define vcvtnh_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvtnh_s32_f16(__s0); \
-  __ret; \
-})
-#define vcvtnh_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvtnh_s64_f16(__s0); \
-  __ret; \
-})
-#define vcvtnh_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcvtnh_u16_f16(__s0); \
-  __ret; \
-})
-#define vcvtnh_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvtnh_u32_f16(__s0); \
-  __ret; \
-})
-#define vcvtnh_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvtnh_u64_f16(__s0); \
-  __ret; \
-})
-#define vcvtph_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vcvtph_s16_f16(__s0); \
-  __ret; \
-})
-#define vcvtph_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvtph_s32_f16(__s0); \
-  __ret; \
-})
-#define vcvtph_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvtph_s64_f16(__s0); \
-  __ret; \
-})
-#define vcvtph_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcvtph_u16_f16(__s0); \
-  __ret; \
-})
-#define vcvtph_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvtph_u32_f16(__s0); \
-  __ret; \
-})
-#define vcvtph_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvtph_u64_f16(__s0); \
-  __ret; \
-})
-#define vdivh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vdivh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vfmah_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_f16(__s0, __s1, __s2); \
-  __ret; \
-})
-#define vfmsh_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmsh_f16(__s0, __s1, __s2); \
-  __ret; \
-})
-#define vmaxh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vmaxnmh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vminh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vminnmh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vmulh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vmulxh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vnegh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vnegh_f16(__s0); \
-  __ret; \
-})
-#define vrecpeh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrecpeh_f16(__s0); \
-  __ret; \
-})
-#define vrecpsh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrecpsh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vrecpxh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrecpxh_f16(__s0); \
-  __ret; \
-})
-#define vrndh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrndh_f16(__s0); \
-  __ret; \
-})
-#define vrndah_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrndah_f16(__s0); \
-  __ret; \
-})
-#define vrndih_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrndih_f16(__s0); \
-  __ret; \
-})
-#define vrndmh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrndmh_f16(__s0); \
-  __ret; \
-})
-#define vrndnh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrndnh_f16(__s0); \
-  __ret; \
-})
-#define vrndph_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrndph_f16(__s0); \
-  __ret; \
-})
-#define vrndxh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrndxh_f16(__s0); \
-  __ret; \
-})
-#define vrsqrteh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrsqrteh_f16(__s0); \
-  __ret; \
-})
-#define vrsqrtsh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrsqrtsh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vsqrth_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vsqrth_f16(__s0); \
-  __ret; \
-})
-#define vsubh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vsubh_f16(__s0, __s1); \
-  __ret; \
-})
-#endif
-
-#undef __ai
-
-#endif /* __ARM_FP16_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_neon.h b/linux-x86/lib64/clang/14.0.2/include/arm_neon.h
deleted file mode 100644
index 2448870..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/arm_neon.h
+++ /dev/null
@@ -1,69894 +0,0 @@
-/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------===
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __ARM_NEON_H
-#define __ARM_NEON_H
-
-#ifndef __ARM_FP
-#error "NEON intrinsics not available with the soft-float ABI. Please use -mfloat-abi=softfp or -mfloat-abi=hard"
-#else
-
-#if !defined(__ARM_NEON)
-#error "NEON support not enabled"
-#else
-
-#include <stdint.h>
-
-#ifdef __ARM_FEATURE_BF16
-#include <arm_bf16.h>
-typedef __bf16 bfloat16_t;
-#endif
-
-typedef float float32_t;
-typedef __fp16 float16_t;
-#ifdef __aarch64__
-typedef double float64_t;
-#endif
-
-#ifdef __aarch64__
-typedef uint8_t poly8_t;
-typedef uint16_t poly16_t;
-typedef uint64_t poly64_t;
-typedef __uint128_t poly128_t;
-#else
-typedef int8_t poly8_t;
-typedef int16_t poly16_t;
-typedef int64_t poly64_t;
-#endif
-typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t;
-typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
-typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t;
-typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t;
-typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t;
-typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t;
-typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t;
-typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t;
-typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t;
-typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
-typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t;
-typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t;
-typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t;
-typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t;
-typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t;
-typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t;
-typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t;
-typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
-typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t;
-typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
-#ifdef __aarch64__
-typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t;
-typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t;
-#endif
-typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t;
-typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
-typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
-typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
-typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
-typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
-
-typedef struct int8x8x2_t {
-  int8x8_t val[2];
-} int8x8x2_t;
-
-typedef struct int8x16x2_t {
-  int8x16_t val[2];
-} int8x16x2_t;
-
-typedef struct int16x4x2_t {
-  int16x4_t val[2];
-} int16x4x2_t;
-
-typedef struct int16x8x2_t {
-  int16x8_t val[2];
-} int16x8x2_t;
-
-typedef struct int32x2x2_t {
-  int32x2_t val[2];
-} int32x2x2_t;
-
-typedef struct int32x4x2_t {
-  int32x4_t val[2];
-} int32x4x2_t;
-
-typedef struct int64x1x2_t {
-  int64x1_t val[2];
-} int64x1x2_t;
-
-typedef struct int64x2x2_t {
-  int64x2_t val[2];
-} int64x2x2_t;
-
-typedef struct uint8x8x2_t {
-  uint8x8_t val[2];
-} uint8x8x2_t;
-
-typedef struct uint8x16x2_t {
-  uint8x16_t val[2];
-} uint8x16x2_t;
-
-typedef struct uint16x4x2_t {
-  uint16x4_t val[2];
-} uint16x4x2_t;
-
-typedef struct uint16x8x2_t {
-  uint16x8_t val[2];
-} uint16x8x2_t;
-
-typedef struct uint32x2x2_t {
-  uint32x2_t val[2];
-} uint32x2x2_t;
-
-typedef struct uint32x4x2_t {
-  uint32x4_t val[2];
-} uint32x4x2_t;
-
-typedef struct uint64x1x2_t {
-  uint64x1_t val[2];
-} uint64x1x2_t;
-
-typedef struct uint64x2x2_t {
-  uint64x2_t val[2];
-} uint64x2x2_t;
-
-typedef struct float16x4x2_t {
-  float16x4_t val[2];
-} float16x4x2_t;
-
-typedef struct float16x8x2_t {
-  float16x8_t val[2];
-} float16x8x2_t;
-
-typedef struct float32x2x2_t {
-  float32x2_t val[2];
-} float32x2x2_t;
-
-typedef struct float32x4x2_t {
-  float32x4_t val[2];
-} float32x4x2_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x2_t {
-  float64x1_t val[2];
-} float64x1x2_t;
-
-typedef struct float64x2x2_t {
-  float64x2_t val[2];
-} float64x2x2_t;
-
-#endif
-typedef struct poly8x8x2_t {
-  poly8x8_t val[2];
-} poly8x8x2_t;
-
-typedef struct poly8x16x2_t {
-  poly8x16_t val[2];
-} poly8x16x2_t;
-
-typedef struct poly16x4x2_t {
-  poly16x4_t val[2];
-} poly16x4x2_t;
-
-typedef struct poly16x8x2_t {
-  poly16x8_t val[2];
-} poly16x8x2_t;
-
-typedef struct poly64x1x2_t {
-  poly64x1_t val[2];
-} poly64x1x2_t;
-
-typedef struct poly64x2x2_t {
-  poly64x2_t val[2];
-} poly64x2x2_t;
-
-typedef struct int8x8x3_t {
-  int8x8_t val[3];
-} int8x8x3_t;
-
-typedef struct int8x16x3_t {
-  int8x16_t val[3];
-} int8x16x3_t;
-
-typedef struct int16x4x3_t {
-  int16x4_t val[3];
-} int16x4x3_t;
-
-typedef struct int16x8x3_t {
-  int16x8_t val[3];
-} int16x8x3_t;
-
-typedef struct int32x2x3_t {
-  int32x2_t val[3];
-} int32x2x3_t;
-
-typedef struct int32x4x3_t {
-  int32x4_t val[3];
-} int32x4x3_t;
-
-typedef struct int64x1x3_t {
-  int64x1_t val[3];
-} int64x1x3_t;
-
-typedef struct int64x2x3_t {
-  int64x2_t val[3];
-} int64x2x3_t;
-
-typedef struct uint8x8x3_t {
-  uint8x8_t val[3];
-} uint8x8x3_t;
-
-typedef struct uint8x16x3_t {
-  uint8x16_t val[3];
-} uint8x16x3_t;
-
-typedef struct uint16x4x3_t {
-  uint16x4_t val[3];
-} uint16x4x3_t;
-
-typedef struct uint16x8x3_t {
-  uint16x8_t val[3];
-} uint16x8x3_t;
-
-typedef struct uint32x2x3_t {
-  uint32x2_t val[3];
-} uint32x2x3_t;
-
-typedef struct uint32x4x3_t {
-  uint32x4_t val[3];
-} uint32x4x3_t;
-
-typedef struct uint64x1x3_t {
-  uint64x1_t val[3];
-} uint64x1x3_t;
-
-typedef struct uint64x2x3_t {
-  uint64x2_t val[3];
-} uint64x2x3_t;
-
-typedef struct float16x4x3_t {
-  float16x4_t val[3];
-} float16x4x3_t;
-
-typedef struct float16x8x3_t {
-  float16x8_t val[3];
-} float16x8x3_t;
-
-typedef struct float32x2x3_t {
-  float32x2_t val[3];
-} float32x2x3_t;
-
-typedef struct float32x4x3_t {
-  float32x4_t val[3];
-} float32x4x3_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x3_t {
-  float64x1_t val[3];
-} float64x1x3_t;
-
-typedef struct float64x2x3_t {
-  float64x2_t val[3];
-} float64x2x3_t;
-
-#endif
-typedef struct poly8x8x3_t {
-  poly8x8_t val[3];
-} poly8x8x3_t;
-
-typedef struct poly8x16x3_t {
-  poly8x16_t val[3];
-} poly8x16x3_t;
-
-typedef struct poly16x4x3_t {
-  poly16x4_t val[3];
-} poly16x4x3_t;
-
-typedef struct poly16x8x3_t {
-  poly16x8_t val[3];
-} poly16x8x3_t;
-
-typedef struct poly64x1x3_t {
-  poly64x1_t val[3];
-} poly64x1x3_t;
-
-typedef struct poly64x2x3_t {
-  poly64x2_t val[3];
-} poly64x2x3_t;
-
-typedef struct int8x8x4_t {
-  int8x8_t val[4];
-} int8x8x4_t;
-
-typedef struct int8x16x4_t {
-  int8x16_t val[4];
-} int8x16x4_t;
-
-typedef struct int16x4x4_t {
-  int16x4_t val[4];
-} int16x4x4_t;
-
-typedef struct int16x8x4_t {
-  int16x8_t val[4];
-} int16x8x4_t;
-
-typedef struct int32x2x4_t {
-  int32x2_t val[4];
-} int32x2x4_t;
-
-typedef struct int32x4x4_t {
-  int32x4_t val[4];
-} int32x4x4_t;
-
-typedef struct int64x1x4_t {
-  int64x1_t val[4];
-} int64x1x4_t;
-
-typedef struct int64x2x4_t {
-  int64x2_t val[4];
-} int64x2x4_t;
-
-typedef struct uint8x8x4_t {
-  uint8x8_t val[4];
-} uint8x8x4_t;
-
-typedef struct uint8x16x4_t {
-  uint8x16_t val[4];
-} uint8x16x4_t;
-
-typedef struct uint16x4x4_t {
-  uint16x4_t val[4];
-} uint16x4x4_t;
-
-typedef struct uint16x8x4_t {
-  uint16x8_t val[4];
-} uint16x8x4_t;
-
-typedef struct uint32x2x4_t {
-  uint32x2_t val[4];
-} uint32x2x4_t;
-
-typedef struct uint32x4x4_t {
-  uint32x4_t val[4];
-} uint32x4x4_t;
-
-typedef struct uint64x1x4_t {
-  uint64x1_t val[4];
-} uint64x1x4_t;
-
-typedef struct uint64x2x4_t {
-  uint64x2_t val[4];
-} uint64x2x4_t;
-
-typedef struct float16x4x4_t {
-  float16x4_t val[4];
-} float16x4x4_t;
-
-typedef struct float16x8x4_t {
-  float16x8_t val[4];
-} float16x8x4_t;
-
-typedef struct float32x2x4_t {
-  float32x2_t val[4];
-} float32x2x4_t;
-
-typedef struct float32x4x4_t {
-  float32x4_t val[4];
-} float32x4x4_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x4_t {
-  float64x1_t val[4];
-} float64x1x4_t;
-
-typedef struct float64x2x4_t {
-  float64x2_t val[4];
-} float64x2x4_t;
-
-#endif
-typedef struct poly8x8x4_t {
-  poly8x8_t val[4];
-} poly8x8x4_t;
-
-typedef struct poly8x16x4_t {
-  poly8x16_t val[4];
-} poly8x16x4_t;
-
-typedef struct poly16x4x4_t {
-  poly16x4_t val[4];
-} poly16x4x4_t;
-
-typedef struct poly16x8x4_t {
-  poly16x8_t val[4];
-} poly16x8x4_t;
-
-typedef struct poly64x1x4_t {
-  poly64x1_t val[4];
-} poly64x1x4_t;
-
-typedef struct poly64x2x4_t {
-  poly64x2_t val[4];
-} poly64x2x4_t;
-
-#ifdef __ARM_FEATURE_BF16
-typedef __attribute__((neon_vector_type(4))) bfloat16_t bfloat16x4_t;
-typedef __attribute__((neon_vector_type(8))) bfloat16_t bfloat16x8_t;
-
-typedef struct bfloat16x4x2_t {
-  bfloat16x4_t val[2];
-} bfloat16x4x2_t;
-
-typedef struct bfloat16x8x2_t {
-  bfloat16x8_t val[2];
-} bfloat16x8x2_t;
-
-typedef struct bfloat16x4x3_t {
-  bfloat16x4_t val[3];
-} bfloat16x4x3_t;
-
-typedef struct bfloat16x8x3_t {
-  bfloat16x8_t val[3];
-} bfloat16x8x3_t;
-
-typedef struct bfloat16x4x4_t {
-  bfloat16x4_t val[4];
-} bfloat16x4x4_t;
-
-typedef struct bfloat16x8x4_t {
-  bfloat16x8_t val[4];
-} bfloat16x8x4_t;
-
-#endif
-
-#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \
-  __ret; \
-})
-#else
-#define splat_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \
-  __ret; \
-})
-#endif
-
-#define splat_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \
-  __ret; \
-})
-#else
-#define splat_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \
-  __ret; \
-})
-#else
-#define splatq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
-  __ret; \
-})
-#else
-#define splatq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \
-  __ret; \
-})
-#else
-#define splatq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define splatq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define splatq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#else
-#define splatq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define splatq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define splatq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
-  __ret; \
-})
-#else
-#define splatq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \
-  __ret; \
-})
-#else
-#define splatq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \
-  __ret; \
-})
-#else
-#define splatq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define splatq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#else
-#define splatq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define splatq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define splat_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define splat_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#define splat_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define splat_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define splat_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#define splat_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \
-  __ret; \
-})
-#else
-#define splat_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \
-  __ret; \
-})
-#else
-#define splat_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define splat_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#define splat_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define splat_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \
-  __ret; \
-})
-#else
-#define splat_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \
-  __ret; \
-})
-#else
-#define splat_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 38); \
-  __ret; \
-})
-#define __noswap_splat_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \
-  __ret; \
-})
-#else
-#define splat_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \
-  __ret; \
-})
-#else
-#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \
-  __ret; \
-})
-#else
-#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \
-  __ret; \
-})
-#else
-#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \
-  __ret; \
-})
-#else
-#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \
-  __ret; \
-})
-#else
-#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \
-  __ret; \
-})
-#else
-#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define splat_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define splat_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define splat_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 51); \
-  __ret; \
-})
-#define __noswap_splat_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define splat_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define splat_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \
-  __ret; \
-})
-#else
-#define splat_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 42); \
-  __ret; \
-})
-#define __noswap_splat_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \
-  __ret; \
-})
-#else
-#define splat_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \
-  __ret; \
-})
-#else
-#define splat_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define splat_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define splat_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 35); \
-  __ret; \
-})
-#define __noswap_splat_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define splat_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vabsq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vabsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vabsq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vabsq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabsq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vabsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabsq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vabsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vabs_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vabs_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vabs_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vabs_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vabs_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vabs_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vabs_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vabs_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai poly64x1_t vadd_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 6);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 38);
-  return __ret;
-}
-#else
-__ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 38);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5);
-  return __ret;
-}
-#else
-__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37);
-  return __ret;
-}
-#else
-__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vclsq_u8(uint8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vclsq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vclsq_u32(uint32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vclsq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vclsq_u16(uint16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vclsq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vclsq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vclsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vclsq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vclsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vclsq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vclsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vcls_u8(uint8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vcls_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcls_u32(uint32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcls_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcls_u16(uint16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcls_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vcls_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vcls_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcls_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcls_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcls_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcls_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vclzq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vclzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vclzq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vclzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vclzq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vclzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclz_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclz_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclz_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclz_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclz_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclz_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vclz_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vclz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vclz_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vclz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vclz_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vclz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vcntq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vcntq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vcnt_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vcnt_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#define vcreate_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (poly8x8_t)(__promote); \
-  __ret; \
-})
-#define vcreate_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (poly16x4_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint8x8_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint32x2_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u64(__p0) __extension__ ({ \
-  uint64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint64x1_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint16x4_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int8x8_t)(__promote); \
-  __ret; \
-})
-#define vcreate_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (float32x2_t)(__promote); \
-  __ret; \
-})
-#define vcreate_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (float16x4_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int32x2_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s64(__p0) __extension__ ({ \
-  int64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int64x1_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int16x4_t)(__promote); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_p8(__p0_0, __p1_0) __extension__ ({ \
-  poly8x8_t __s0_0 = __p0_0; \
-  poly8x8_t __ret_0; \
-  __ret_0 = splat_lane_p8(__s0_0, __p1_0); \
-  __ret_0; \
-})
-#else
-#define vdup_lane_p8(__p0_1, __p1_1) __extension__ ({ \
-  poly8x8_t __s0_1 = __p0_1; \
-  poly8x8_t __rev0_1;  __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_1; \
-  __ret_1 = __noswap_splat_lane_p8(__rev0_1, __p1_1); \
-  __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_1; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_p16(__p0_2, __p1_2) __extension__ ({ \
-  poly16x4_t __s0_2 = __p0_2; \
-  poly16x4_t __ret_2; \
-  __ret_2 = splat_lane_p16(__s0_2, __p1_2); \
-  __ret_2; \
-})
-#else
-#define vdup_lane_p16(__p0_3, __p1_3) __extension__ ({ \
-  poly16x4_t __s0_3 = __p0_3; \
-  poly16x4_t __rev0_3;  __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 3, 2, 1, 0); \
-  poly16x4_t __ret_3; \
-  __ret_3 = __noswap_splat_lane_p16(__rev0_3, __p1_3); \
-  __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 3, 2, 1, 0); \
-  __ret_3; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p8(__p0_4, __p1_4) __extension__ ({ \
-  poly8x8_t __s0_4 = __p0_4; \
-  poly8x16_t __ret_4; \
-  __ret_4 = splatq_lane_p8(__s0_4, __p1_4); \
-  __ret_4; \
-})
-#else
-#define vdupq_lane_p8(__p0_5, __p1_5) __extension__ ({ \
-  poly8x8_t __s0_5 = __p0_5; \
-  poly8x8_t __rev0_5;  __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_5; \
-  __ret_5 = __noswap_splatq_lane_p8(__rev0_5, __p1_5); \
-  __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_5; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p16(__p0_6, __p1_6) __extension__ ({ \
-  poly16x4_t __s0_6 = __p0_6; \
-  poly16x8_t __ret_6; \
-  __ret_6 = splatq_lane_p16(__s0_6, __p1_6); \
-  __ret_6; \
-})
-#else
-#define vdupq_lane_p16(__p0_7, __p1_7) __extension__ ({ \
-  poly16x4_t __s0_7 = __p0_7; \
-  poly16x4_t __rev0_7;  __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 3, 2, 1, 0); \
-  poly16x8_t __ret_7; \
-  __ret_7 = __noswap_splatq_lane_p16(__rev0_7, __p1_7); \
-  __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_7; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u8(__p0_8, __p1_8) __extension__ ({ \
-  uint8x8_t __s0_8 = __p0_8; \
-  uint8x16_t __ret_8; \
-  __ret_8 = splatq_lane_u8(__s0_8, __p1_8); \
-  __ret_8; \
-})
-#else
-#define vdupq_lane_u8(__p0_9, __p1_9) __extension__ ({ \
-  uint8x8_t __s0_9 = __p0_9; \
-  uint8x8_t __rev0_9;  __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_9; \
-  __ret_9 = __noswap_splatq_lane_u8(__rev0_9, __p1_9); \
-  __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_9; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u32(__p0_10, __p1_10) __extension__ ({ \
-  uint32x2_t __s0_10 = __p0_10; \
-  uint32x4_t __ret_10; \
-  __ret_10 = splatq_lane_u32(__s0_10, __p1_10); \
-  __ret_10; \
-})
-#else
-#define vdupq_lane_u32(__p0_11, __p1_11) __extension__ ({ \
-  uint32x2_t __s0_11 = __p0_11; \
-  uint32x2_t __rev0_11;  __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 1, 0); \
-  uint32x4_t __ret_11; \
-  __ret_11 = __noswap_splatq_lane_u32(__rev0_11, __p1_11); \
-  __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \
-  __ret_11; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u64(__p0_12, __p1_12) __extension__ ({ \
-  uint64x1_t __s0_12 = __p0_12; \
-  uint64x2_t __ret_12; \
-  __ret_12 = splatq_lane_u64(__s0_12, __p1_12); \
-  __ret_12; \
-})
-#else
-#define vdupq_lane_u64(__p0_13, __p1_13) __extension__ ({ \
-  uint64x1_t __s0_13 = __p0_13; \
-  uint64x2_t __ret_13; \
-  __ret_13 = __noswap_splatq_lane_u64(__s0_13, __p1_13); \
-  __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 1, 0); \
-  __ret_13; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u16(__p0_14, __p1_14) __extension__ ({ \
-  uint16x4_t __s0_14 = __p0_14; \
-  uint16x8_t __ret_14; \
-  __ret_14 = splatq_lane_u16(__s0_14, __p1_14); \
-  __ret_14; \
-})
-#else
-#define vdupq_lane_u16(__p0_15, __p1_15) __extension__ ({ \
-  uint16x4_t __s0_15 = __p0_15; \
-  uint16x4_t __rev0_15;  __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \
-  uint16x8_t __ret_15; \
-  __ret_15 = __noswap_splatq_lane_u16(__rev0_15, __p1_15); \
-  __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_15; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s8(__p0_16, __p1_16) __extension__ ({ \
-  int8x8_t __s0_16 = __p0_16; \
-  int8x16_t __ret_16; \
-  __ret_16 = splatq_lane_s8(__s0_16, __p1_16); \
-  __ret_16; \
-})
-#else
-#define vdupq_lane_s8(__p0_17, __p1_17) __extension__ ({ \
-  int8x8_t __s0_17 = __p0_17; \
-  int8x8_t __rev0_17;  __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_17; \
-  __ret_17 = __noswap_splatq_lane_s8(__rev0_17, __p1_17); \
-  __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_17; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f32(__p0_18, __p1_18) __extension__ ({ \
-  float32x2_t __s0_18 = __p0_18; \
-  float32x4_t __ret_18; \
-  __ret_18 = splatq_lane_f32(__s0_18, __p1_18); \
-  __ret_18; \
-})
-#else
-#define vdupq_lane_f32(__p0_19, __p1_19) __extension__ ({ \
-  float32x2_t __s0_19 = __p0_19; \
-  float32x2_t __rev0_19;  __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \
-  float32x4_t __ret_19; \
-  __ret_19 = __noswap_splatq_lane_f32(__rev0_19, __p1_19); \
-  __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \
-  __ret_19; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s32(__p0_20, __p1_20) __extension__ ({ \
-  int32x2_t __s0_20 = __p0_20; \
-  int32x4_t __ret_20; \
-  __ret_20 = splatq_lane_s32(__s0_20, __p1_20); \
-  __ret_20; \
-})
-#else
-#define vdupq_lane_s32(__p0_21, __p1_21) __extension__ ({ \
-  int32x2_t __s0_21 = __p0_21; \
-  int32x2_t __rev0_21;  __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \
-  int32x4_t __ret_21; \
-  __ret_21 = __noswap_splatq_lane_s32(__rev0_21, __p1_21); \
-  __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 3, 2, 1, 0); \
-  __ret_21; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s64(__p0_22, __p1_22) __extension__ ({ \
-  int64x1_t __s0_22 = __p0_22; \
-  int64x2_t __ret_22; \
-  __ret_22 = splatq_lane_s64(__s0_22, __p1_22); \
-  __ret_22; \
-})
-#else
-#define vdupq_lane_s64(__p0_23, __p1_23) __extension__ ({ \
-  int64x1_t __s0_23 = __p0_23; \
-  int64x2_t __ret_23; \
-  __ret_23 = __noswap_splatq_lane_s64(__s0_23, __p1_23); \
-  __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 1, 0); \
-  __ret_23; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s16(__p0_24, __p1_24) __extension__ ({ \
-  int16x4_t __s0_24 = __p0_24; \
-  int16x8_t __ret_24; \
-  __ret_24 = splatq_lane_s16(__s0_24, __p1_24); \
-  __ret_24; \
-})
-#else
-#define vdupq_lane_s16(__p0_25, __p1_25) __extension__ ({ \
-  int16x4_t __s0_25 = __p0_25; \
-  int16x4_t __rev0_25;  __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 3, 2, 1, 0); \
-  int16x8_t __ret_25; \
-  __ret_25 = __noswap_splatq_lane_s16(__rev0_25, __p1_25); \
-  __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_25; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u8(__p0_26, __p1_26) __extension__ ({ \
-  uint8x8_t __s0_26 = __p0_26; \
-  uint8x8_t __ret_26; \
-  __ret_26 = splat_lane_u8(__s0_26, __p1_26); \
-  __ret_26; \
-})
-#else
-#define vdup_lane_u8(__p0_27, __p1_27) __extension__ ({ \
-  uint8x8_t __s0_27 = __p0_27; \
-  uint8x8_t __rev0_27;  __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_27; \
-  __ret_27 = __noswap_splat_lane_u8(__rev0_27, __p1_27); \
-  __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_27; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u32(__p0_28, __p1_28) __extension__ ({ \
-  uint32x2_t __s0_28 = __p0_28; \
-  uint32x2_t __ret_28; \
-  __ret_28 = splat_lane_u32(__s0_28, __p1_28); \
-  __ret_28; \
-})
-#else
-#define vdup_lane_u32(__p0_29, __p1_29) __extension__ ({ \
-  uint32x2_t __s0_29 = __p0_29; \
-  uint32x2_t __rev0_29;  __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 1, 0); \
-  uint32x2_t __ret_29; \
-  __ret_29 = __noswap_splat_lane_u32(__rev0_29, __p1_29); \
-  __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 1, 0); \
-  __ret_29; \
-})
-#endif
-
-#define vdup_lane_u64(__p0_30, __p1_30) __extension__ ({ \
-  uint64x1_t __s0_30 = __p0_30; \
-  uint64x1_t __ret_30; \
-  __ret_30 = splat_lane_u64(__s0_30, __p1_30); \
-  __ret_30; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u16(__p0_31, __p1_31) __extension__ ({ \
-  uint16x4_t __s0_31 = __p0_31; \
-  uint16x4_t __ret_31; \
-  __ret_31 = splat_lane_u16(__s0_31, __p1_31); \
-  __ret_31; \
-})
-#else
-#define vdup_lane_u16(__p0_32, __p1_32) __extension__ ({ \
-  uint16x4_t __s0_32 = __p0_32; \
-  uint16x4_t __rev0_32;  __rev0_32 = __builtin_shufflevector(__s0_32, __s0_32, 3, 2, 1, 0); \
-  uint16x4_t __ret_32; \
-  __ret_32 = __noswap_splat_lane_u16(__rev0_32, __p1_32); \
-  __ret_32 = __builtin_shufflevector(__ret_32, __ret_32, 3, 2, 1, 0); \
-  __ret_32; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s8(__p0_33, __p1_33) __extension__ ({ \
-  int8x8_t __s0_33 = __p0_33; \
-  int8x8_t __ret_33; \
-  __ret_33 = splat_lane_s8(__s0_33, __p1_33); \
-  __ret_33; \
-})
-#else
-#define vdup_lane_s8(__p0_34, __p1_34) __extension__ ({ \
-  int8x8_t __s0_34 = __p0_34; \
-  int8x8_t __rev0_34;  __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_34; \
-  __ret_34 = __noswap_splat_lane_s8(__rev0_34, __p1_34); \
-  __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_34; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f32(__p0_35, __p1_35) __extension__ ({ \
-  float32x2_t __s0_35 = __p0_35; \
-  float32x2_t __ret_35; \
-  __ret_35 = splat_lane_f32(__s0_35, __p1_35); \
-  __ret_35; \
-})
-#else
-#define vdup_lane_f32(__p0_36, __p1_36) __extension__ ({ \
-  float32x2_t __s0_36 = __p0_36; \
-  float32x2_t __rev0_36;  __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 1, 0); \
-  float32x2_t __ret_36; \
-  __ret_36 = __noswap_splat_lane_f32(__rev0_36, __p1_36); \
-  __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 1, 0); \
-  __ret_36; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s32(__p0_37, __p1_37) __extension__ ({ \
-  int32x2_t __s0_37 = __p0_37; \
-  int32x2_t __ret_37; \
-  __ret_37 = splat_lane_s32(__s0_37, __p1_37); \
-  __ret_37; \
-})
-#else
-#define vdup_lane_s32(__p0_38, __p1_38) __extension__ ({ \
-  int32x2_t __s0_38 = __p0_38; \
-  int32x2_t __rev0_38;  __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \
-  int32x2_t __ret_38; \
-  __ret_38 = __noswap_splat_lane_s32(__rev0_38, __p1_38); \
-  __ret_38 = __builtin_shufflevector(__ret_38, __ret_38, 1, 0); \
-  __ret_38; \
-})
-#endif
-
-#define vdup_lane_s64(__p0_39, __p1_39) __extension__ ({ \
-  int64x1_t __s0_39 = __p0_39; \
-  int64x1_t __ret_39; \
-  __ret_39 = splat_lane_s64(__s0_39, __p1_39); \
-  __ret_39; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s16(__p0_40, __p1_40) __extension__ ({ \
-  int16x4_t __s0_40 = __p0_40; \
-  int16x4_t __ret_40; \
-  __ret_40 = splat_lane_s16(__s0_40, __p1_40); \
-  __ret_40; \
-})
-#else
-#define vdup_lane_s16(__p0_41, __p1_41) __extension__ ({ \
-  int16x4_t __s0_41 = __p0_41; \
-  int16x4_t __rev0_41;  __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 3, 2, 1, 0); \
-  int16x4_t __ret_41; \
-  __ret_41 = __noswap_splat_lane_s16(__rev0_41, __p1_41); \
-  __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 3, 2, 1, 0); \
-  __ret_41; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vdup_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x8_t vdup_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vdup_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x4_t vdup_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vdupq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x16_t vdupq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vdupq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x4_t vdupq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vdupq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x4_t vdupq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vdupq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int64x2_t vdupq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vdupq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x8_t vdupq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vdup_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x8_t vdup_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vdup_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x2_t vdup_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vdup_n_u64(uint64_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vdup_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x4_t vdup_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vdup_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x8_t vdup_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vdup_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x2_t vdup_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vdup_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x2_t vdup_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vdup_n_s64(int64_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vdup_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x4_t vdup_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \
-  __ret; \
-})
-#else
-#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \
-  __ret; \
-})
-#else
-#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vget_high_s8(int8x16_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vget_high_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vget_high_f32(float32x4_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vget_high_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vget_high_f16(float16x8_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vget_high_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vget_high_s32(int32x4_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vget_high_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vget_high_s64(int64x2_t __p0) {
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai int64x1_t vget_high_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vget_high_s16(int16x8_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vget_high_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vget_low_s8(int8x16_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai int8x8_t vget_low_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vget_low_f32(float32x4_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
-  return __ret;
-}
-#else
-__ai float32x2_t vget_low_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vget_low_f16(float16x8_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai float16x4_t vget_low_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vget_low_s32(int32x4_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
-  return __ret;
-}
-#else
-__ai int32x2_t vget_low_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vget_low_s64(int64x2_t __p0) {
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai int64x1_t vget_low_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vget_low_s16(int16x8_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai int16x4_t vget_low_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64(__p0) __extension__ ({ \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64(__p0) __extension__ ({ \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_dup_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_dup_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_dup_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_dup_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_u64(__p0) __extension__ ({ \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_dup_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_dup_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_dup_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_dup_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_s64(__p0) __extension__ ({ \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_dup_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
-  __ret; \
-})
-#else
-#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8_x2(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8_x2(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16_x2(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16_x2(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8_x2(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8_x2(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16_x2(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16_x2(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8_x2(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8_x2(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32_x2(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32_x2(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64_x2(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64_x2(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16_x2(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16_x2(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8_x2(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8_x2(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32_x2(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32_x2(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32_x2(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32_x2(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64_x2(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64_x2(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16_x2(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16_x2(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8_x2(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8_x2(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32_x2(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32_x2(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64_x2(__p0) __extension__ ({ \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16_x2(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16_x2(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8_x2(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8_x2(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32_x2(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32_x2(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32_x2(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32_x2(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64_x2(__p0) __extension__ ({ \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16_x2(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16_x2(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8_x3(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8_x3(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16_x3(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16_x3(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8_x3(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8_x3(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16_x3(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16_x3(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8_x3(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8_x3(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32_x3(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32_x3(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64_x3(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64_x3(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16_x3(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16_x3(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8_x3(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8_x3(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32_x3(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32_x3(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32_x3(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32_x3(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64_x3(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64_x3(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16_x3(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16_x3(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8_x3(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8_x3(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32_x3(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32_x3(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64_x3(__p0) __extension__ ({ \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16_x3(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16_x3(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8_x3(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8_x3(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32_x3(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32_x3(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32_x3(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32_x3(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64_x3(__p0) __extension__ ({ \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16_x3(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16_x3(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8_x4(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8_x4(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16_x4(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16_x4(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8_x4(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8_x4(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16_x4(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16_x4(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8_x4(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8_x4(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32_x4(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32_x4(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64_x4(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64_x4(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16_x4(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16_x4(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8_x4(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8_x4(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32_x4(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32_x4(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32_x4(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32_x4(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64_x4(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64_x4(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16_x4(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16_x4(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8_x4(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8_x4(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32_x4(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32_x4(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64_x4(__p0) __extension__ ({ \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16_x4(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16_x4(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8_x4(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8_x4(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32_x4(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32_x4(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32_x4(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32_x4(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64_x4(__p0) __extension__ ({ \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16_x4(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16_x4(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld2_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld2_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld2q_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld2q_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld2q_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld2q_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld2q_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld2q_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld2q_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld2q_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld2q_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld2_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld2_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_u64(__p0) __extension__ ({ \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld2_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld2_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld2_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld2_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_s64(__p0) __extension__ ({ \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld2_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld2_dup_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld2_dup_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld2_dup_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld2_dup_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_u64(__p0) __extension__ ({ \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld2_dup_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld2_dup_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld2_dup_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld2_dup_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_s64(__p0) __extension__ ({ \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld2_dup_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
-  __ret; \
-})
-#else
-#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
-  __ret; \
-})
-#else
-#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
-  __ret; \
-})
-#else
-#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
-  __ret; \
-})
-#else
-#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
-  __ret; \
-})
-#else
-#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
-  __ret; \
-})
-#else
-#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \
-  __ret; \
-})
-#else
-#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \
-  __ret; \
-})
-#else
-#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \
-  __ret; \
-})
-#else
-#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld3_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld3_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld3q_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld3q_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld3q_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld3q_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld3q_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld3q_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld3q_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld3q_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld3q_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld3_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld3_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_u64(__p0) __extension__ ({ \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld3_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld3_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld3_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld3_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_s64(__p0) __extension__ ({ \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld3_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld3_dup_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld3_dup_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld3_dup_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld3_dup_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_u64(__p0) __extension__ ({ \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld3_dup_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld3_dup_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld3_dup_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld3_dup_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_s64(__p0) __extension__ ({ \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld3_dup_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
-  __ret; \
-})
-#else
-#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
-  __ret; \
-})
-#else
-#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
-  __ret; \
-})
-#else
-#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
-  __ret; \
-})
-#else
-#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
-  __ret; \
-})
-#else
-#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
-  __ret; \
-})
-#else
-#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \
-  __ret; \
-})
-#else
-#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \
-  __ret; \
-})
-#else
-#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \
-  __ret; \
-})
-#else
-#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld4_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld4_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld4q_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld4q_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld4q_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld4q_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld4q_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld4q_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld4q_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld4q_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld4q_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld4_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld4_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_u64(__p0) __extension__ ({ \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld4_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld4_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld4_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld4_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_s64(__p0) __extension__ ({ \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld4_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld4_dup_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld4_dup_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld4_dup_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld4_dup_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_u64(__p0) __extension__ ({ \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld4_dup_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld4_dup_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld4_dup_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld4_dup_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_s64(__p0) __extension__ ({ \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld4_dup_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
-  __ret; \
-})
-#else
-#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
-  __ret; \
-})
-#else
-#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
-  __ret; \
-})
-#else
-#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
-  __ret; \
-})
-#else
-#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
-  __ret; \
-})
-#else
-#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
-  __ret; \
-})
-#else
-#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \
-  __ret; \
-})
-#else
-#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \
-  __ret; \
-})
-#else
-#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \
-  __ret; \
-})
-#else
-#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u32(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
-  uint32x4_t __s0_42 = __p0_42; \
-  uint32x4_t __s1_42 = __p1_42; \
-  uint32x2_t __s2_42 = __p2_42; \
-  uint32x4_t __ret_42; \
-  __ret_42 = __s0_42 + __s1_42 * splatq_lane_u32(__s2_42, __p3_42); \
-  __ret_42; \
-})
-#else
-#define vmlaq_lane_u32(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
-  uint32x4_t __s0_43 = __p0_43; \
-  uint32x4_t __s1_43 = __p1_43; \
-  uint32x2_t __s2_43 = __p2_43; \
-  uint32x4_t __rev0_43;  __rev0_43 = __builtin_shufflevector(__s0_43, __s0_43, 3, 2, 1, 0); \
-  uint32x4_t __rev1_43;  __rev1_43 = __builtin_shufflevector(__s1_43, __s1_43, 3, 2, 1, 0); \
-  uint32x2_t __rev2_43;  __rev2_43 = __builtin_shufflevector(__s2_43, __s2_43, 1, 0); \
-  uint32x4_t __ret_43; \
-  __ret_43 = __rev0_43 + __rev1_43 * __noswap_splatq_lane_u32(__rev2_43, __p3_43); \
-  __ret_43 = __builtin_shufflevector(__ret_43, __ret_43, 3, 2, 1, 0); \
-  __ret_43; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
-  uint16x8_t __s0_44 = __p0_44; \
-  uint16x8_t __s1_44 = __p1_44; \
-  uint16x4_t __s2_44 = __p2_44; \
-  uint16x8_t __ret_44; \
-  __ret_44 = __s0_44 + __s1_44 * splatq_lane_u16(__s2_44, __p3_44); \
-  __ret_44; \
-})
-#else
-#define vmlaq_lane_u16(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
-  uint16x8_t __s0_45 = __p0_45; \
-  uint16x8_t __s1_45 = __p1_45; \
-  uint16x4_t __s2_45 = __p2_45; \
-  uint16x8_t __rev0_45;  __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_45;  __rev1_45 = __builtin_shufflevector(__s1_45, __s1_45, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_45;  __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 3, 2, 1, 0); \
-  uint16x8_t __ret_45; \
-  __ret_45 = __rev0_45 + __rev1_45 * __noswap_splatq_lane_u16(__rev2_45, __p3_45); \
-  __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_45; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_f32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
-  float32x4_t __s0_46 = __p0_46; \
-  float32x4_t __s1_46 = __p1_46; \
-  float32x2_t __s2_46 = __p2_46; \
-  float32x4_t __ret_46; \
-  __ret_46 = __s0_46 + __s1_46 * splatq_lane_f32(__s2_46, __p3_46); \
-  __ret_46; \
-})
-#else
-#define vmlaq_lane_f32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
-  float32x4_t __s0_47 = __p0_47; \
-  float32x4_t __s1_47 = __p1_47; \
-  float32x2_t __s2_47 = __p2_47; \
-  float32x4_t __rev0_47;  __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \
-  float32x4_t __rev1_47;  __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \
-  float32x2_t __rev2_47;  __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \
-  float32x4_t __ret_47; \
-  __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_f32(__rev2_47, __p3_47); \
-  __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 3, 2, 1, 0); \
-  __ret_47; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
-  int32x4_t __s0_48 = __p0_48; \
-  int32x4_t __s1_48 = __p1_48; \
-  int32x2_t __s2_48 = __p2_48; \
-  int32x4_t __ret_48; \
-  __ret_48 = __s0_48 + __s1_48 * splatq_lane_s32(__s2_48, __p3_48); \
-  __ret_48; \
-})
-#else
-#define vmlaq_lane_s32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
-  int32x4_t __s0_49 = __p0_49; \
-  int32x4_t __s1_49 = __p1_49; \
-  int32x2_t __s2_49 = __p2_49; \
-  int32x4_t __rev0_49;  __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 3, 2, 1, 0); \
-  int32x4_t __rev1_49;  __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 3, 2, 1, 0); \
-  int32x2_t __rev2_49;  __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 1, 0); \
-  int32x4_t __ret_49; \
-  __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_s32(__rev2_49, __p3_49); \
-  __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 3, 2, 1, 0); \
-  __ret_49; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s16(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
-  int16x8_t __s0_50 = __p0_50; \
-  int16x8_t __s1_50 = __p1_50; \
-  int16x4_t __s2_50 = __p2_50; \
-  int16x8_t __ret_50; \
-  __ret_50 = __s0_50 + __s1_50 * splatq_lane_s16(__s2_50, __p3_50); \
-  __ret_50; \
-})
-#else
-#define vmlaq_lane_s16(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
-  int16x8_t __s0_51 = __p0_51; \
-  int16x8_t __s1_51 = __p1_51; \
-  int16x4_t __s2_51 = __p2_51; \
-  int16x8_t __rev0_51;  __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_51;  __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_51;  __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 3, 2, 1, 0); \
-  int16x8_t __ret_51; \
-  __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_s16(__rev2_51, __p3_51); \
-  __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_51; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
-  uint32x2_t __s0_52 = __p0_52; \
-  uint32x2_t __s1_52 = __p1_52; \
-  uint32x2_t __s2_52 = __p2_52; \
-  uint32x2_t __ret_52; \
-  __ret_52 = __s0_52 + __s1_52 * splat_lane_u32(__s2_52, __p3_52); \
-  __ret_52; \
-})
-#else
-#define vmla_lane_u32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
-  uint32x2_t __s0_53 = __p0_53; \
-  uint32x2_t __s1_53 = __p1_53; \
-  uint32x2_t __s2_53 = __p2_53; \
-  uint32x2_t __rev0_53;  __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 1, 0); \
-  uint32x2_t __rev1_53;  __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 1, 0); \
-  uint32x2_t __rev2_53;  __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \
-  uint32x2_t __ret_53; \
-  __ret_53 = __rev0_53 + __rev1_53 * __noswap_splat_lane_u32(__rev2_53, __p3_53); \
-  __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 1, 0); \
-  __ret_53; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
-  uint16x4_t __s0_54 = __p0_54; \
-  uint16x4_t __s1_54 = __p1_54; \
-  uint16x4_t __s2_54 = __p2_54; \
-  uint16x4_t __ret_54; \
-  __ret_54 = __s0_54 + __s1_54 * splat_lane_u16(__s2_54, __p3_54); \
-  __ret_54; \
-})
-#else
-#define vmla_lane_u16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
-  uint16x4_t __s0_55 = __p0_55; \
-  uint16x4_t __s1_55 = __p1_55; \
-  uint16x4_t __s2_55 = __p2_55; \
-  uint16x4_t __rev0_55;  __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 3, 2, 1, 0); \
-  uint16x4_t __rev1_55;  __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 3, 2, 1, 0); \
-  uint16x4_t __rev2_55;  __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \
-  uint16x4_t __ret_55; \
-  __ret_55 = __rev0_55 + __rev1_55 * __noswap_splat_lane_u16(__rev2_55, __p3_55); \
-  __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 3, 2, 1, 0); \
-  __ret_55; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_f32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
-  float32x2_t __s0_56 = __p0_56; \
-  float32x2_t __s1_56 = __p1_56; \
-  float32x2_t __s2_56 = __p2_56; \
-  float32x2_t __ret_56; \
-  __ret_56 = __s0_56 + __s1_56 * splat_lane_f32(__s2_56, __p3_56); \
-  __ret_56; \
-})
-#else
-#define vmla_lane_f32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
-  float32x2_t __s0_57 = __p0_57; \
-  float32x2_t __s1_57 = __p1_57; \
-  float32x2_t __s2_57 = __p2_57; \
-  float32x2_t __rev0_57;  __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \
-  float32x2_t __rev1_57;  __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \
-  float32x2_t __rev2_57;  __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \
-  float32x2_t __ret_57; \
-  __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_f32(__rev2_57, __p3_57); \
-  __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 1, 0); \
-  __ret_57; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
-  int32x2_t __s0_58 = __p0_58; \
-  int32x2_t __s1_58 = __p1_58; \
-  int32x2_t __s2_58 = __p2_58; \
-  int32x2_t __ret_58; \
-  __ret_58 = __s0_58 + __s1_58 * splat_lane_s32(__s2_58, __p3_58); \
-  __ret_58; \
-})
-#else
-#define vmla_lane_s32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
-  int32x2_t __s0_59 = __p0_59; \
-  int32x2_t __s1_59 = __p1_59; \
-  int32x2_t __s2_59 = __p2_59; \
-  int32x2_t __rev0_59;  __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 1, 0); \
-  int32x2_t __rev1_59;  __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 1, 0); \
-  int32x2_t __rev2_59;  __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 1, 0); \
-  int32x2_t __ret_59; \
-  __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_s32(__rev2_59, __p3_59); \
-  __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 1, 0); \
-  __ret_59; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s16(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
-  int16x4_t __s0_60 = __p0_60; \
-  int16x4_t __s1_60 = __p1_60; \
-  int16x4_t __s2_60 = __p2_60; \
-  int16x4_t __ret_60; \
-  __ret_60 = __s0_60 + __s1_60 * splat_lane_s16(__s2_60, __p3_60); \
-  __ret_60; \
-})
-#else
-#define vmla_lane_s16(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
-  int16x4_t __s0_61 = __p0_61; \
-  int16x4_t __s1_61 = __p1_61; \
-  int16x4_t __s2_61 = __p2_61; \
-  int16x4_t __rev0_61;  __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \
-  int16x4_t __rev1_61;  __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 3, 2, 1, 0); \
-  int16x4_t __rev2_61;  __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \
-  int16x4_t __ret_61; \
-  __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_s16(__rev2_61, __p3_61); \
-  __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \
-  __ret_61; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
-  uint32x4_t __s0_62 = __p0_62; \
-  uint32x4_t __s1_62 = __p1_62; \
-  uint32x2_t __s2_62 = __p2_62; \
-  uint32x4_t __ret_62; \
-  __ret_62 = __s0_62 - __s1_62 * splatq_lane_u32(__s2_62, __p3_62); \
-  __ret_62; \
-})
-#else
-#define vmlsq_lane_u32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
-  uint32x4_t __s0_63 = __p0_63; \
-  uint32x4_t __s1_63 = __p1_63; \
-  uint32x2_t __s2_63 = __p2_63; \
-  uint32x4_t __rev0_63;  __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 3, 2, 1, 0); \
-  uint32x4_t __rev1_63;  __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 3, 2, 1, 0); \
-  uint32x2_t __rev2_63;  __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \
-  uint32x4_t __ret_63; \
-  __ret_63 = __rev0_63 - __rev1_63 * __noswap_splatq_lane_u32(__rev2_63, __p3_63); \
-  __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 3, 2, 1, 0); \
-  __ret_63; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
-  uint16x8_t __s0_64 = __p0_64; \
-  uint16x8_t __s1_64 = __p1_64; \
-  uint16x4_t __s2_64 = __p2_64; \
-  uint16x8_t __ret_64; \
-  __ret_64 = __s0_64 - __s1_64 * splatq_lane_u16(__s2_64, __p3_64); \
-  __ret_64; \
-})
-#else
-#define vmlsq_lane_u16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
-  uint16x8_t __s0_65 = __p0_65; \
-  uint16x8_t __s1_65 = __p1_65; \
-  uint16x4_t __s2_65 = __p2_65; \
-  uint16x8_t __rev0_65;  __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_65;  __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_65;  __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \
-  uint16x8_t __ret_65; \
-  __ret_65 = __rev0_65 - __rev1_65 * __noswap_splatq_lane_u16(__rev2_65, __p3_65); \
-  __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_65; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_f32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
-  float32x4_t __s0_66 = __p0_66; \
-  float32x4_t __s1_66 = __p1_66; \
-  float32x2_t __s2_66 = __p2_66; \
-  float32x4_t __ret_66; \
-  __ret_66 = __s0_66 - __s1_66 * splatq_lane_f32(__s2_66, __p3_66); \
-  __ret_66; \
-})
-#else
-#define vmlsq_lane_f32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
-  float32x4_t __s0_67 = __p0_67; \
-  float32x4_t __s1_67 = __p1_67; \
-  float32x2_t __s2_67 = __p2_67; \
-  float32x4_t __rev0_67;  __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \
-  float32x4_t __rev1_67;  __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \
-  float32x2_t __rev2_67;  __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \
-  float32x4_t __ret_67; \
-  __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_f32(__rev2_67, __p3_67); \
-  __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \
-  __ret_67; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
-  int32x4_t __s0_68 = __p0_68; \
-  int32x4_t __s1_68 = __p1_68; \
-  int32x2_t __s2_68 = __p2_68; \
-  int32x4_t __ret_68; \
-  __ret_68 = __s0_68 - __s1_68 * splatq_lane_s32(__s2_68, __p3_68); \
-  __ret_68; \
-})
-#else
-#define vmlsq_lane_s32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
-  int32x4_t __s0_69 = __p0_69; \
-  int32x4_t __s1_69 = __p1_69; \
-  int32x2_t __s2_69 = __p2_69; \
-  int32x4_t __rev0_69;  __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \
-  int32x4_t __rev1_69;  __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 3, 2, 1, 0); \
-  int32x2_t __rev2_69;  __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 1, 0); \
-  int32x4_t __ret_69; \
-  __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_s32(__rev2_69, __p3_69); \
-  __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \
-  __ret_69; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s16(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
-  int16x8_t __s0_70 = __p0_70; \
-  int16x8_t __s1_70 = __p1_70; \
-  int16x4_t __s2_70 = __p2_70; \
-  int16x8_t __ret_70; \
-  __ret_70 = __s0_70 - __s1_70 * splatq_lane_s16(__s2_70, __p3_70); \
-  __ret_70; \
-})
-#else
-#define vmlsq_lane_s16(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
-  int16x8_t __s0_71 = __p0_71; \
-  int16x8_t __s1_71 = __p1_71; \
-  int16x4_t __s2_71 = __p2_71; \
-  int16x8_t __rev0_71;  __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_71;  __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_71;  __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \
-  int16x8_t __ret_71; \
-  __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_s16(__rev2_71, __p3_71); \
-  __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_71; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
-  uint32x2_t __s0_72 = __p0_72; \
-  uint32x2_t __s1_72 = __p1_72; \
-  uint32x2_t __s2_72 = __p2_72; \
-  uint32x2_t __ret_72; \
-  __ret_72 = __s0_72 - __s1_72 * splat_lane_u32(__s2_72, __p3_72); \
-  __ret_72; \
-})
-#else
-#define vmls_lane_u32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
-  uint32x2_t __s0_73 = __p0_73; \
-  uint32x2_t __s1_73 = __p1_73; \
-  uint32x2_t __s2_73 = __p2_73; \
-  uint32x2_t __rev0_73;  __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \
-  uint32x2_t __rev1_73;  __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 1, 0); \
-  uint32x2_t __rev2_73;  __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \
-  uint32x2_t __ret_73; \
-  __ret_73 = __rev0_73 - __rev1_73 * __noswap_splat_lane_u32(__rev2_73, __p3_73); \
-  __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \
-  __ret_73; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
-  uint16x4_t __s0_74 = __p0_74; \
-  uint16x4_t __s1_74 = __p1_74; \
-  uint16x4_t __s2_74 = __p2_74; \
-  uint16x4_t __ret_74; \
-  __ret_74 = __s0_74 - __s1_74 * splat_lane_u16(__s2_74, __p3_74); \
-  __ret_74; \
-})
-#else
-#define vmls_lane_u16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
-  uint16x4_t __s0_75 = __p0_75; \
-  uint16x4_t __s1_75 = __p1_75; \
-  uint16x4_t __s2_75 = __p2_75; \
-  uint16x4_t __rev0_75;  __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 3, 2, 1, 0); \
-  uint16x4_t __rev1_75;  __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 3, 2, 1, 0); \
-  uint16x4_t __rev2_75;  __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \
-  uint16x4_t __ret_75; \
-  __ret_75 = __rev0_75 - __rev1_75 * __noswap_splat_lane_u16(__rev2_75, __p3_75); \
-  __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 3, 2, 1, 0); \
-  __ret_75; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_f32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
-  float32x2_t __s0_76 = __p0_76; \
-  float32x2_t __s1_76 = __p1_76; \
-  float32x2_t __s2_76 = __p2_76; \
-  float32x2_t __ret_76; \
-  __ret_76 = __s0_76 - __s1_76 * splat_lane_f32(__s2_76, __p3_76); \
-  __ret_76; \
-})
-#else
-#define vmls_lane_f32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
-  float32x2_t __s0_77 = __p0_77; \
-  float32x2_t __s1_77 = __p1_77; \
-  float32x2_t __s2_77 = __p2_77; \
-  float32x2_t __rev0_77;  __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \
-  float32x2_t __rev1_77;  __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \
-  float32x2_t __rev2_77;  __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \
-  float32x2_t __ret_77; \
-  __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_f32(__rev2_77, __p3_77); \
-  __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 1, 0); \
-  __ret_77; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s32(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
-  int32x2_t __s0_78 = __p0_78; \
-  int32x2_t __s1_78 = __p1_78; \
-  int32x2_t __s2_78 = __p2_78; \
-  int32x2_t __ret_78; \
-  __ret_78 = __s0_78 - __s1_78 * splat_lane_s32(__s2_78, __p3_78); \
-  __ret_78; \
-})
-#else
-#define vmls_lane_s32(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
-  int32x2_t __s0_79 = __p0_79; \
-  int32x2_t __s1_79 = __p1_79; \
-  int32x2_t __s2_79 = __p2_79; \
-  int32x2_t __rev0_79;  __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 1, 0); \
-  int32x2_t __rev1_79;  __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 1, 0); \
-  int32x2_t __rev2_79;  __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 1, 0); \
-  int32x2_t __ret_79; \
-  __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_s32(__rev2_79, __p3_79); \
-  __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 1, 0); \
-  __ret_79; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s16(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
-  int16x4_t __s0_80 = __p0_80; \
-  int16x4_t __s1_80 = __p1_80; \
-  int16x4_t __s2_80 = __p2_80; \
-  int16x4_t __ret_80; \
-  __ret_80 = __s0_80 - __s1_80 * splat_lane_s16(__s2_80, __p3_80); \
-  __ret_80; \
-})
-#else
-#define vmls_lane_s16(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
-  int16x4_t __s0_81 = __p0_81; \
-  int16x4_t __s1_81 = __p1_81; \
-  int16x4_t __s2_81 = __p2_81; \
-  int16x4_t __rev0_81;  __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 3, 2, 1, 0); \
-  int16x4_t __rev1_81;  __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 3, 2, 1, 0); \
-  int16x4_t __rev2_81;  __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 3, 2, 1, 0); \
-  int16x4_t __ret_81; \
-  __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_s16(__rev2_81, __p3_81); \
-  __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 3, 2, 1, 0); \
-  __ret_81; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vmov_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x8_t vmov_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vmov_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x4_t vmov_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmovq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x16_t vmovq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmovq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x4_t vmovq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x4_t vmovq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int64x2_t vmovq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x8_t vmovq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmov_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x8_t vmov_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmov_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmov_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vmov_n_u64(uint64_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmov_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmov_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmov_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x8_t vmov_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmov_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x2_t vmov_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmov_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x2_t vmov_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vmov_n_s64(int64_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmov_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x4_t vmov_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_s8(int8x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vmovl_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_s32(int32x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vmovl_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_s16(int16x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmovl_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vmovn_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vmovn_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vmovn_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u32(__p0_82, __p1_82, __p2_82) __extension__ ({ \
-  uint32x4_t __s0_82 = __p0_82; \
-  uint32x2_t __s1_82 = __p1_82; \
-  uint32x4_t __ret_82; \
-  __ret_82 = __s0_82 * splatq_lane_u32(__s1_82, __p2_82); \
-  __ret_82; \
-})
-#else
-#define vmulq_lane_u32(__p0_83, __p1_83, __p2_83) __extension__ ({ \
-  uint32x4_t __s0_83 = __p0_83; \
-  uint32x2_t __s1_83 = __p1_83; \
-  uint32x4_t __rev0_83;  __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 3, 2, 1, 0); \
-  uint32x2_t __rev1_83;  __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \
-  uint32x4_t __ret_83; \
-  __ret_83 = __rev0_83 * __noswap_splatq_lane_u32(__rev1_83, __p2_83); \
-  __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 3, 2, 1, 0); \
-  __ret_83; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u16(__p0_84, __p1_84, __p2_84) __extension__ ({ \
-  uint16x8_t __s0_84 = __p0_84; \
-  uint16x4_t __s1_84 = __p1_84; \
-  uint16x8_t __ret_84; \
-  __ret_84 = __s0_84 * splatq_lane_u16(__s1_84, __p2_84); \
-  __ret_84; \
-})
-#else
-#define vmulq_lane_u16(__p0_85, __p1_85, __p2_85) __extension__ ({ \
-  uint16x8_t __s0_85 = __p0_85; \
-  uint16x4_t __s1_85 = __p1_85; \
-  uint16x8_t __rev0_85;  __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1_85;  __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \
-  uint16x8_t __ret_85; \
-  __ret_85 = __rev0_85 * __noswap_splatq_lane_u16(__rev1_85, __p2_85); \
-  __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_85; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f32(__p0_86, __p1_86, __p2_86) __extension__ ({ \
-  float32x4_t __s0_86 = __p0_86; \
-  float32x2_t __s1_86 = __p1_86; \
-  float32x4_t __ret_86; \
-  __ret_86 = __s0_86 * splatq_lane_f32(__s1_86, __p2_86); \
-  __ret_86; \
-})
-#else
-#define vmulq_lane_f32(__p0_87, __p1_87, __p2_87) __extension__ ({ \
-  float32x4_t __s0_87 = __p0_87; \
-  float32x2_t __s1_87 = __p1_87; \
-  float32x4_t __rev0_87;  __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \
-  float32x2_t __rev1_87;  __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \
-  float32x4_t __ret_87; \
-  __ret_87 = __rev0_87 * __noswap_splatq_lane_f32(__rev1_87, __p2_87); \
-  __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \
-  __ret_87; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s32(__p0_88, __p1_88, __p2_88) __extension__ ({ \
-  int32x4_t __s0_88 = __p0_88; \
-  int32x2_t __s1_88 = __p1_88; \
-  int32x4_t __ret_88; \
-  __ret_88 = __s0_88 * splatq_lane_s32(__s1_88, __p2_88); \
-  __ret_88; \
-})
-#else
-#define vmulq_lane_s32(__p0_89, __p1_89, __p2_89) __extension__ ({ \
-  int32x4_t __s0_89 = __p0_89; \
-  int32x2_t __s1_89 = __p1_89; \
-  int32x4_t __rev0_89;  __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 3, 2, 1, 0); \
-  int32x2_t __rev1_89;  __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 1, 0); \
-  int32x4_t __ret_89; \
-  __ret_89 = __rev0_89 * __noswap_splatq_lane_s32(__rev1_89, __p2_89); \
-  __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 3, 2, 1, 0); \
-  __ret_89; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s16(__p0_90, __p1_90, __p2_90) __extension__ ({ \
-  int16x8_t __s0_90 = __p0_90; \
-  int16x4_t __s1_90 = __p1_90; \
-  int16x8_t __ret_90; \
-  __ret_90 = __s0_90 * splatq_lane_s16(__s1_90, __p2_90); \
-  __ret_90; \
-})
-#else
-#define vmulq_lane_s16(__p0_91, __p1_91, __p2_91) __extension__ ({ \
-  int16x8_t __s0_91 = __p0_91; \
-  int16x4_t __s1_91 = __p1_91; \
-  int16x8_t __rev0_91;  __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_91;  __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 3, 2, 1, 0); \
-  int16x8_t __ret_91; \
-  __ret_91 = __rev0_91 * __noswap_splatq_lane_s16(__rev1_91, __p2_91); \
-  __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_91; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u32(__p0_92, __p1_92, __p2_92) __extension__ ({ \
-  uint32x2_t __s0_92 = __p0_92; \
-  uint32x2_t __s1_92 = __p1_92; \
-  uint32x2_t __ret_92; \
-  __ret_92 = __s0_92 * splat_lane_u32(__s1_92, __p2_92); \
-  __ret_92; \
-})
-#else
-#define vmul_lane_u32(__p0_93, __p1_93, __p2_93) __extension__ ({ \
-  uint32x2_t __s0_93 = __p0_93; \
-  uint32x2_t __s1_93 = __p1_93; \
-  uint32x2_t __rev0_93;  __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \
-  uint32x2_t __rev1_93;  __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \
-  uint32x2_t __ret_93; \
-  __ret_93 = __rev0_93 * __noswap_splat_lane_u32(__rev1_93, __p2_93); \
-  __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \
-  __ret_93; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u16(__p0_94, __p1_94, __p2_94) __extension__ ({ \
-  uint16x4_t __s0_94 = __p0_94; \
-  uint16x4_t __s1_94 = __p1_94; \
-  uint16x4_t __ret_94; \
-  __ret_94 = __s0_94 * splat_lane_u16(__s1_94, __p2_94); \
-  __ret_94; \
-})
-#else
-#define vmul_lane_u16(__p0_95, __p1_95, __p2_95) __extension__ ({ \
-  uint16x4_t __s0_95 = __p0_95; \
-  uint16x4_t __s1_95 = __p1_95; \
-  uint16x4_t __rev0_95;  __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \
-  uint16x4_t __rev1_95;  __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \
-  uint16x4_t __ret_95; \
-  __ret_95 = __rev0_95 * __noswap_splat_lane_u16(__rev1_95, __p2_95); \
-  __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \
-  __ret_95; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f32(__p0_96, __p1_96, __p2_96) __extension__ ({ \
-  float32x2_t __s0_96 = __p0_96; \
-  float32x2_t __s1_96 = __p1_96; \
-  float32x2_t __ret_96; \
-  __ret_96 = __s0_96 * splat_lane_f32(__s1_96, __p2_96); \
-  __ret_96; \
-})
-#else
-#define vmul_lane_f32(__p0_97, __p1_97, __p2_97) __extension__ ({ \
-  float32x2_t __s0_97 = __p0_97; \
-  float32x2_t __s1_97 = __p1_97; \
-  float32x2_t __rev0_97;  __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \
-  float32x2_t __rev1_97;  __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \
-  float32x2_t __ret_97; \
-  __ret_97 = __rev0_97 * __noswap_splat_lane_f32(__rev1_97, __p2_97); \
-  __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 1, 0); \
-  __ret_97; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s32(__p0_98, __p1_98, __p2_98) __extension__ ({ \
-  int32x2_t __s0_98 = __p0_98; \
-  int32x2_t __s1_98 = __p1_98; \
-  int32x2_t __ret_98; \
-  __ret_98 = __s0_98 * splat_lane_s32(__s1_98, __p2_98); \
-  __ret_98; \
-})
-#else
-#define vmul_lane_s32(__p0_99, __p1_99, __p2_99) __extension__ ({ \
-  int32x2_t __s0_99 = __p0_99; \
-  int32x2_t __s1_99 = __p1_99; \
-  int32x2_t __rev0_99;  __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 1, 0); \
-  int32x2_t __rev1_99;  __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \
-  int32x2_t __ret_99; \
-  __ret_99 = __rev0_99 * __noswap_splat_lane_s32(__rev1_99, __p2_99); \
-  __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 1, 0); \
-  __ret_99; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s16(__p0_100, __p1_100, __p2_100) __extension__ ({ \
-  int16x4_t __s0_100 = __p0_100; \
-  int16x4_t __s1_100 = __p1_100; \
-  int16x4_t __ret_100; \
-  __ret_100 = __s0_100 * splat_lane_s16(__s1_100, __p2_100); \
-  __ret_100; \
-})
-#else
-#define vmul_lane_s16(__p0_101, __p1_101, __p2_101) __extension__ ({ \
-  int16x4_t __s0_101 = __p0_101; \
-  int16x4_t __s1_101 = __p1_101; \
-  int16x4_t __rev0_101;  __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 3, 2, 1, 0); \
-  int16x4_t __rev1_101;  __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 3, 2, 1, 0); \
-  int16x4_t __ret_101; \
-  __ret_101 = __rev0_101 * __noswap_splat_lane_s16(__rev1_101, __p2_101); \
-  __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 3, 2, 1, 0); \
-  __ret_101; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 * (uint32x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 * (uint32x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 * (float32x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 * (float32x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 * (int32x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 * (int32x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u32(__p0_102, __p1_102, __p2_102) __extension__ ({ \
-  uint32x2_t __s0_102 = __p0_102; \
-  uint32x2_t __s1_102 = __p1_102; \
-  uint64x2_t __ret_102; \
-  __ret_102 = vmull_u32(__s0_102, splat_lane_u32(__s1_102, __p2_102)); \
-  __ret_102; \
-})
-#else
-#define vmull_lane_u32(__p0_103, __p1_103, __p2_103) __extension__ ({ \
-  uint32x2_t __s0_103 = __p0_103; \
-  uint32x2_t __s1_103 = __p1_103; \
-  uint32x2_t __rev0_103;  __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \
-  uint32x2_t __rev1_103;  __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \
-  uint64x2_t __ret_103; \
-  __ret_103 = __noswap_vmull_u32(__rev0_103, __noswap_splat_lane_u32(__rev1_103, __p2_103)); \
-  __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \
-  __ret_103; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u16(__p0_104, __p1_104, __p2_104) __extension__ ({ \
-  uint16x4_t __s0_104 = __p0_104; \
-  uint16x4_t __s1_104 = __p1_104; \
-  uint32x4_t __ret_104; \
-  __ret_104 = vmull_u16(__s0_104, splat_lane_u16(__s1_104, __p2_104)); \
-  __ret_104; \
-})
-#else
-#define vmull_lane_u16(__p0_105, __p1_105, __p2_105) __extension__ ({ \
-  uint16x4_t __s0_105 = __p0_105; \
-  uint16x4_t __s1_105 = __p1_105; \
-  uint16x4_t __rev0_105;  __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \
-  uint16x4_t __rev1_105;  __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \
-  uint32x4_t __ret_105; \
-  __ret_105 = __noswap_vmull_u16(__rev0_105, __noswap_splat_lane_u16(__rev1_105, __p2_105)); \
-  __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \
-  __ret_105; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s32(__p0_106, __p1_106, __p2_106) __extension__ ({ \
-  int32x2_t __s0_106 = __p0_106; \
-  int32x2_t __s1_106 = __p1_106; \
-  int64x2_t __ret_106; \
-  __ret_106 = vmull_s32(__s0_106, splat_lane_s32(__s1_106, __p2_106)); \
-  __ret_106; \
-})
-#else
-#define vmull_lane_s32(__p0_107, __p1_107, __p2_107) __extension__ ({ \
-  int32x2_t __s0_107 = __p0_107; \
-  int32x2_t __s1_107 = __p1_107; \
-  int32x2_t __rev0_107;  __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \
-  int32x2_t __rev1_107;  __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \
-  int64x2_t __ret_107; \
-  __ret_107 = __noswap_vmull_s32(__rev0_107, __noswap_splat_lane_s32(__rev1_107, __p2_107)); \
-  __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \
-  __ret_107; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s16(__p0_108, __p1_108, __p2_108) __extension__ ({ \
-  int16x4_t __s0_108 = __p0_108; \
-  int16x4_t __s1_108 = __p1_108; \
-  int32x4_t __ret_108; \
-  __ret_108 = vmull_s16(__s0_108, splat_lane_s16(__s1_108, __p2_108)); \
-  __ret_108; \
-})
-#else
-#define vmull_lane_s16(__p0_109, __p1_109, __p2_109) __extension__ ({ \
-  int16x4_t __s0_109 = __p0_109; \
-  int16x4_t __s1_109 = __p1_109; \
-  int16x4_t __rev0_109;  __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \
-  int16x4_t __rev1_109;  __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \
-  int32x4_t __ret_109; \
-  __ret_109 = __noswap_vmull_s16(__rev0_109, __noswap_splat_lane_s16(__rev1_109, __p2_109)); \
-  __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 3, 2, 1, 0); \
-  __ret_109; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = __noswap_vmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = __noswap_vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmvnq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int8x16_t vmvnq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmvnq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int32x4_t vmvnq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmvnq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int16x8_t vmvnq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmvn_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int8x8_t vmvn_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmvn_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int32x2_t vmvn_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmvn_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int16x4_t vmvn_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vnegq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int8x16_t vnegq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vnegq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float32x4_t vnegq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vnegq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int32x4_t vnegq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vnegq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int16x8_t vnegq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vneg_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int8x8_t vneg_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vneg_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float32x2_t vneg_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vneg_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int32x2_t vneg_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vneg_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int16x4_t vneg_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#else
-__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#else
-__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#else
-__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpaddl_s8(int8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpaddl_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vpaddl_s32(int32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#else
-__ai int64x1_t vpaddl_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpaddl_s16(int16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpaddl_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqabsq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqabsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqabsq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqabsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqabsq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqabsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqabs_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqabs_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqabs_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqabs_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqabs_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqabs_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
-  int64x2_t __s0_110 = __p0_110; \
-  int32x2_t __s1_110 = __p1_110; \
-  int32x2_t __s2_110 = __p2_110; \
-  int64x2_t __ret_110; \
-  __ret_110 = vqdmlal_s32(__s0_110, __s1_110, splat_lane_s32(__s2_110, __p3_110)); \
-  __ret_110; \
-})
-#else
-#define vqdmlal_lane_s32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
-  int64x2_t __s0_111 = __p0_111; \
-  int32x2_t __s1_111 = __p1_111; \
-  int32x2_t __s2_111 = __p2_111; \
-  int64x2_t __rev0_111;  __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \
-  int32x2_t __rev1_111;  __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \
-  int32x2_t __rev2_111;  __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 1, 0); \
-  int64x2_t __ret_111; \
-  __ret_111 = __noswap_vqdmlal_s32(__rev0_111, __rev1_111, __noswap_splat_lane_s32(__rev2_111, __p3_111)); \
-  __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \
-  __ret_111; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s16(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \
-  int32x4_t __s0_112 = __p0_112; \
-  int16x4_t __s1_112 = __p1_112; \
-  int16x4_t __s2_112 = __p2_112; \
-  int32x4_t __ret_112; \
-  __ret_112 = vqdmlal_s16(__s0_112, __s1_112, splat_lane_s16(__s2_112, __p3_112)); \
-  __ret_112; \
-})
-#else
-#define vqdmlal_lane_s16(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \
-  int32x4_t __s0_113 = __p0_113; \
-  int16x4_t __s1_113 = __p1_113; \
-  int16x4_t __s2_113 = __p2_113; \
-  int32x4_t __rev0_113;  __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \
-  int16x4_t __rev1_113;  __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \
-  int16x4_t __rev2_113;  __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 3, 2, 1, 0); \
-  int32x4_t __ret_113; \
-  __ret_113 = __noswap_vqdmlal_s16(__rev0_113, __rev1_113, __noswap_splat_lane_s16(__rev2_113, __p3_113)); \
-  __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \
-  __ret_113; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
-  int64x2_t __s0_114 = __p0_114; \
-  int32x2_t __s1_114 = __p1_114; \
-  int32x2_t __s2_114 = __p2_114; \
-  int64x2_t __ret_114; \
-  __ret_114 = vqdmlsl_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \
-  __ret_114; \
-})
-#else
-#define vqdmlsl_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
-  int64x2_t __s0_115 = __p0_115; \
-  int32x2_t __s1_115 = __p1_115; \
-  int32x2_t __s2_115 = __p2_115; \
-  int64x2_t __rev0_115;  __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \
-  int32x2_t __rev1_115;  __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \
-  int32x2_t __rev2_115;  __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \
-  int64x2_t __ret_115; \
-  __ret_115 = __noswap_vqdmlsl_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \
-  __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \
-  __ret_115; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
-  int32x4_t __s0_116 = __p0_116; \
-  int16x4_t __s1_116 = __p1_116; \
-  int16x4_t __s2_116 = __p2_116; \
-  int32x4_t __ret_116; \
-  __ret_116 = vqdmlsl_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \
-  __ret_116; \
-})
-#else
-#define vqdmlsl_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
-  int32x4_t __s0_117 = __p0_117; \
-  int16x4_t __s1_117 = __p1_117; \
-  int16x4_t __s2_117 = __p2_117; \
-  int32x4_t __rev0_117;  __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \
-  int16x4_t __rev1_117;  __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \
-  int16x4_t __rev2_117;  __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \
-  int32x4_t __ret_117; \
-  __ret_117 = __noswap_vqdmlsl_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \
-  __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \
-  __ret_117; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __ret;
-  __ret = vqdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __ret;
-  __ret = vqdmulh_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __ret;
-  __ret = vqdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s32(__p0_118, __p1_118, __p2_118) __extension__ ({ \
-  int32x2_t __s0_118 = __p0_118; \
-  int32x2_t __s1_118 = __p1_118; \
-  int64x2_t __ret_118; \
-  __ret_118 = vqdmull_s32(__s0_118, splat_lane_s32(__s1_118, __p2_118)); \
-  __ret_118; \
-})
-#else
-#define vqdmull_lane_s32(__p0_119, __p1_119, __p2_119) __extension__ ({ \
-  int32x2_t __s0_119 = __p0_119; \
-  int32x2_t __s1_119 = __p1_119; \
-  int32x2_t __rev0_119;  __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \
-  int32x2_t __rev1_119;  __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \
-  int64x2_t __ret_119; \
-  __ret_119 = __noswap_vqdmull_s32(__rev0_119, __noswap_splat_lane_s32(__rev1_119, __p2_119)); \
-  __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \
-  __ret_119; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s16(__p0_120, __p1_120, __p2_120) __extension__ ({ \
-  int16x4_t __s0_120 = __p0_120; \
-  int16x4_t __s1_120 = __p1_120; \
-  int32x4_t __ret_120; \
-  __ret_120 = vqdmull_s16(__s0_120, splat_lane_s16(__s1_120, __p2_120)); \
-  __ret_120; \
-})
-#else
-#define vqdmull_lane_s16(__p0_121, __p1_121, __p2_121) __extension__ ({ \
-  int16x4_t __s0_121 = __p0_121; \
-  int16x4_t __s1_121 = __p1_121; \
-  int16x4_t __rev0_121;  __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \
-  int16x4_t __rev1_121;  __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \
-  int32x4_t __ret_121; \
-  __ret_121 = __noswap_vqdmull_s16(__rev0_121, __noswap_splat_lane_s16(__rev1_121, __p2_121)); \
-  __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \
-  __ret_121; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqmovn_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqmovn_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqmovn_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqnegq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqnegq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqnegq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqnegq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqnegq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqnegq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqneg_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqneg_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqneg_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqneg_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqneg_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqneg_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __ret;
-  __ret = vqrdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __ret;
-  __ret = vqrdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __ret;
-  __ret = vqrdmulh_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __ret;
-  __ret = vqrdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vqshl_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vqshl_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrecpe_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrecpe_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrev16q_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  return __ret;
-}
-#else
-__ai int8x16_t vrev16q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrev16_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai int8x8_t vrev16_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrev32q_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  return __ret;
-}
-#else
-__ai int8x16_t vrev32q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrev32q_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai int16x8_t vrev32q_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrev32_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai int8x8_t vrev32_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrev32_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai int16x4_t vrev32_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrev64q_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  return __ret;
-}
-#else
-__ai int8x16_t vrev64q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrev64q_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai float32x4_t vrev64q_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrev64q_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai int32x4_t vrev64q_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrev64q_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai int16x8_t vrev64q_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrev64_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrev64_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrev64_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
-  return __ret;
-}
-#else
-__ai float32x2_t vrev64_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrev64_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
-  return __ret;
-}
-#else
-__ai int32x2_t vrev64_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrev64_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai int16x4_t vrev64_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrshr_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrshr_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshl_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshl_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshr_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshr_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \
-})
-#else
-#define vst1_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \
-})
-#else
-#define vst1_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \
-})
-#else
-#define vst1q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \
-})
-#else
-#define vst1q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \
-})
-#else
-#define vst1q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \
-})
-#else
-#define vst1q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \
-})
-#else
-#define vst1q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \
-})
-#else
-#define vst1q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \
-})
-#else
-#define vst1q_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \
-})
-#else
-#define vst1q_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \
-})
-#else
-#define vst1q_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \
-})
-#else
-#define vst1q_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \
-})
-#else
-#define vst1q_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \
-})
-#else
-#define vst1_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \
-})
-#else
-#define vst1_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \
-})
-#endif
-
-#define vst1_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \
-})
-#else
-#define vst1_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \
-})
-#else
-#define vst1_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \
-})
-#else
-#define vst1_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \
-})
-#else
-#define vst1_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \
-})
-#endif
-
-#define vst1_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \
-})
-#else
-#define vst1_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
-})
-#else
-#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
-})
-#else
-#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
-})
-#else
-#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
-})
-#else
-#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
-})
-#else
-#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
-})
-#else
-#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
-})
-#else
-#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
-})
-#else
-#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
-})
-#else
-#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
-})
-#else
-#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
-})
-#else
-#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
-})
-#else
-#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
-})
-#else
-#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
-})
-#else
-#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
-})
-#else
-#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
-})
-#endif
-
-#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
-})
-#else
-#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
-})
-#else
-#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
-})
-#else
-#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
-})
-#else
-#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
-})
-#endif
-
-#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
-})
-#else
-#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
-})
-#else
-#define vst1_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
-})
-#else
-#define vst1_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
-})
-#else
-#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
-})
-#else
-#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
-})
-#else
-#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
-})
-#else
-#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
-})
-#else
-#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
-})
-#else
-#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
-})
-#else
-#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \
-})
-#else
-#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \
-})
-#else
-#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \
-})
-#else
-#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \
-})
-#else
-#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
-})
-#else
-#define vst1_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
-})
-#else
-#define vst1_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
-})
-#endif
-
-#define vst1_u64_x2(__p0, __p1) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
-})
-#else
-#define vst1_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
-})
-#else
-#define vst1_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \
-})
-#else
-#define vst1_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \
-})
-#else
-#define vst1_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \
-})
-#endif
-
-#define vst1_s64_x2(__p0, __p1) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \
-})
-#else
-#define vst1_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
-})
-#else
-#define vst1_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
-})
-#else
-#define vst1_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
-})
-#else
-#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
-})
-#else
-#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
-})
-#else
-#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
-})
-#else
-#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
-})
-#else
-#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
-})
-#else
-#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
-})
-#else
-#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \
-})
-#else
-#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \
-})
-#else
-#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \
-})
-#else
-#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \
-})
-#else
-#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
-})
-#else
-#define vst1_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
-})
-#else
-#define vst1_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
-})
-#endif
-
-#define vst1_u64_x3(__p0, __p1) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
-})
-#else
-#define vst1_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
-})
-#else
-#define vst1_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \
-})
-#else
-#define vst1_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \
-})
-#else
-#define vst1_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \
-})
-#endif
-
-#define vst1_s64_x3(__p0, __p1) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \
-})
-#else
-#define vst1_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
-})
-#else
-#define vst1_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
-})
-#else
-#define vst1_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
-})
-#else
-#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
-})
-#else
-#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
-})
-#else
-#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
-})
-#else
-#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
-})
-#else
-#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
-})
-#else
-#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
-})
-#else
-#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \
-})
-#else
-#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \
-})
-#else
-#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \
-})
-#else
-#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \
-})
-#else
-#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
-})
-#else
-#define vst1_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
-})
-#else
-#define vst1_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
-})
-#endif
-
-#define vst1_u64_x4(__p0, __p1) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
-})
-#else
-#define vst1_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
-})
-#else
-#define vst1_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \
-})
-#else
-#define vst1_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \
-})
-#else
-#define vst1_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \
-})
-#endif
-
-#define vst1_s64_x4(__p0, __p1) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \
-})
-#else
-#define vst1_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
-})
-#else
-#define vst2_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
-})
-#else
-#define vst2_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
-})
-#else
-#define vst2q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
-})
-#else
-#define vst2q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
-})
-#else
-#define vst2q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
-})
-#else
-#define vst2q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
-})
-#else
-#define vst2q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
-})
-#else
-#define vst2q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \
-})
-#else
-#define vst2q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \
-})
-#else
-#define vst2q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \
-})
-#else
-#define vst2q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
-})
-#else
-#define vst2_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
-})
-#else
-#define vst2_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
-})
-#endif
-
-#define vst2_u64(__p0, __p1) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
-})
-#else
-#define vst2_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_s8(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
-})
-#else
-#define vst2_s8(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_f32(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \
-})
-#else
-#define vst2_f32(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_s32(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \
-})
-#else
-#define vst2_s32(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \
-})
-#endif
-
-#define vst2_s64(__p0, __p1) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2_s16(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \
-})
-#else
-#define vst2_s16(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
-})
-#else
-#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
-})
-#else
-#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
-})
-#else
-#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
-})
-#else
-#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
-})
-#else
-#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \
-})
-#else
-#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \
-})
-#else
-#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \
-})
-#else
-#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
-})
-#else
-#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
-})
-#else
-#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
-})
-#else
-#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
-})
-#else
-#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \
-})
-#else
-#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \
-})
-#else
-#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \
-})
-#else
-#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
-})
-#else
-#define vst3_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
-})
-#else
-#define vst3_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
-})
-#else
-#define vst3q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
-})
-#else
-#define vst3q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
-})
-#else
-#define vst3q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
-})
-#else
-#define vst3q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
-})
-#else
-#define vst3q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
-})
-#else
-#define vst3q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \
-})
-#else
-#define vst3q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \
-})
-#else
-#define vst3q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \
-})
-#else
-#define vst3q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
-})
-#else
-#define vst3_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
-})
-#else
-#define vst3_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
-})
-#endif
-
-#define vst3_u64(__p0, __p1) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
-})
-#else
-#define vst3_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_s8(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
-})
-#else
-#define vst3_s8(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_f32(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \
-})
-#else
-#define vst3_f32(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_s32(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \
-})
-#else
-#define vst3_s32(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \
-})
-#endif
-
-#define vst3_s64(__p0, __p1) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3_s16(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \
-})
-#else
-#define vst3_s16(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
-})
-#else
-#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
-})
-#else
-#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
-})
-#else
-#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
-})
-#else
-#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
-})
-#else
-#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \
-})
-#else
-#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \
-})
-#else
-#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \
-})
-#else
-#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
-})
-#else
-#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
-})
-#else
-#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
-})
-#else
-#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
-})
-#else
-#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \
-})
-#else
-#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \
-})
-#else
-#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \
-})
-#else
-#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
-})
-#else
-#define vst4_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
-})
-#else
-#define vst4_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
-})
-#else
-#define vst4q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
-})
-#else
-#define vst4q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
-})
-#else
-#define vst4q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
-})
-#else
-#define vst4q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
-})
-#else
-#define vst4q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
-})
-#else
-#define vst4q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \
-})
-#else
-#define vst4q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \
-})
-#else
-#define vst4q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \
-})
-#else
-#define vst4q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
-})
-#else
-#define vst4_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
-})
-#else
-#define vst4_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
-})
-#endif
-
-#define vst4_u64(__p0, __p1) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
-})
-#else
-#define vst4_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_s8(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
-})
-#else
-#define vst4_s8(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_f32(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \
-})
-#else
-#define vst4_f32(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_s32(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \
-})
-#else
-#define vst4_s32(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \
-})
-#endif
-
-#define vst4_s64(__p0, __p1) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4_s16(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \
-})
-#else
-#define vst4_s16(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
-})
-#else
-#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
-})
-#else
-#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
-})
-#else
-#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
-})
-#else
-#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
-})
-#else
-#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \
-})
-#else
-#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \
-})
-#else
-#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \
-})
-#else
-#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
-})
-#else
-#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
-})
-#else
-#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
-})
-#else
-#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
-})
-#else
-#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \
-})
-#else
-#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \
-})
-#else
-#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \
-})
-#else
-#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_u8(__p0) - vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_u32(__p0) - vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_u16(__p0) - vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_s8(__p0) - vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_s32(__p0) - vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_s16(__p0) - vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 - vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 - vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 - vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 - vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
-  poly8x8x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
-  int8x8x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
-  poly8x8x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
-  uint8x8x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
-  int8x8x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
-  poly8x8x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
-  uint8x8x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
-  int8x8x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#if !defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0_122, __p1_122) __extension__ ({ \
-  float16x4_t __s0_122 = __p0_122; \
-  float16x8_t __ret_122; \
-  __ret_122 = splatq_lane_f16(__s0_122, __p1_122); \
-  __ret_122; \
-})
-#else
-#define vdupq_lane_f16(__p0_123, __p1_123) __extension__ ({ \
-  float16x4_t __s0_123 = __p0_123; \
-  float16x4_t __rev0_123;  __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 3, 2, 1, 0); \
-  float16x8_t __ret_123; \
-  __ret_123 = __noswap_splatq_lane_f16(__rev0_123, __p1_123); \
-  __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_123; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0_124, __p1_124) __extension__ ({ \
-  float16x4_t __s0_124 = __p0_124; \
-  float16x4_t __ret_124; \
-  __ret_124 = splat_lane_f16(__s0_124, __p1_124); \
-  __ret_124; \
-})
-#else
-#define vdup_lane_f16(__p0_125, __p1_125) __extension__ ({ \
-  float16x4_t __s0_125 = __p0_125; \
-  float16x4_t __rev0_125;  __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \
-  float16x4_t __ret_125; \
-  __ret_125 = __noswap_splat_lane_f16(__rev0_125, __p1_125); \
-  __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \
-  __ret_125; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s32(__p0_126, __p1_126, __p2_126) __extension__ ({ \
-  int32x4_t __s0_126 = __p0_126; \
-  int32x2_t __s1_126 = __p1_126; \
-  int32x4_t __ret_126; \
-  __ret_126 = vqdmulhq_s32(__s0_126, splatq_lane_s32(__s1_126, __p2_126)); \
-  __ret_126; \
-})
-#else
-#define vqdmulhq_lane_s32(__p0_127, __p1_127, __p2_127) __extension__ ({ \
-  int32x4_t __s0_127 = __p0_127; \
-  int32x2_t __s1_127 = __p1_127; \
-  int32x4_t __rev0_127;  __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 3, 2, 1, 0); \
-  int32x2_t __rev1_127;  __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 1, 0); \
-  int32x4_t __ret_127; \
-  __ret_127 = __noswap_vqdmulhq_s32(__rev0_127, __noswap_splatq_lane_s32(__rev1_127, __p2_127)); \
-  __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0); \
-  __ret_127; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s16(__p0_128, __p1_128, __p2_128) __extension__ ({ \
-  int16x8_t __s0_128 = __p0_128; \
-  int16x4_t __s1_128 = __p1_128; \
-  int16x8_t __ret_128; \
-  __ret_128 = vqdmulhq_s16(__s0_128, splatq_lane_s16(__s1_128, __p2_128)); \
-  __ret_128; \
-})
-#else
-#define vqdmulhq_lane_s16(__p0_129, __p1_129, __p2_129) __extension__ ({ \
-  int16x8_t __s0_129 = __p0_129; \
-  int16x4_t __s1_129 = __p1_129; \
-  int16x8_t __rev0_129;  __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_129;  __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \
-  int16x8_t __ret_129; \
-  __ret_129 = __noswap_vqdmulhq_s16(__rev0_129, __noswap_splatq_lane_s16(__rev1_129, __p2_129)); \
-  __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_129; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s32(__p0_130, __p1_130, __p2_130) __extension__ ({ \
-  int32x2_t __s0_130 = __p0_130; \
-  int32x2_t __s1_130 = __p1_130; \
-  int32x2_t __ret_130; \
-  __ret_130 = vqdmulh_s32(__s0_130, splat_lane_s32(__s1_130, __p2_130)); \
-  __ret_130; \
-})
-#else
-#define vqdmulh_lane_s32(__p0_131, __p1_131, __p2_131) __extension__ ({ \
-  int32x2_t __s0_131 = __p0_131; \
-  int32x2_t __s1_131 = __p1_131; \
-  int32x2_t __rev0_131;  __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 1, 0); \
-  int32x2_t __rev1_131;  __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 1, 0); \
-  int32x2_t __ret_131; \
-  __ret_131 = __noswap_vqdmulh_s32(__rev0_131, __noswap_splat_lane_s32(__rev1_131, __p2_131)); \
-  __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 1, 0); \
-  __ret_131; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s16(__p0_132, __p1_132, __p2_132) __extension__ ({ \
-  int16x4_t __s0_132 = __p0_132; \
-  int16x4_t __s1_132 = __p1_132; \
-  int16x4_t __ret_132; \
-  __ret_132 = vqdmulh_s16(__s0_132, splat_lane_s16(__s1_132, __p2_132)); \
-  __ret_132; \
-})
-#else
-#define vqdmulh_lane_s16(__p0_133, __p1_133, __p2_133) __extension__ ({ \
-  int16x4_t __s0_133 = __p0_133; \
-  int16x4_t __s1_133 = __p1_133; \
-  int16x4_t __rev0_133;  __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 3, 2, 1, 0); \
-  int16x4_t __rev1_133;  __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \
-  int16x4_t __ret_133; \
-  __ret_133 = __noswap_vqdmulh_s16(__rev0_133, __noswap_splat_lane_s16(__rev1_133, __p2_133)); \
-  __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 3, 2, 1, 0); \
-  __ret_133; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s32(__p0_134, __p1_134, __p2_134) __extension__ ({ \
-  int32x4_t __s0_134 = __p0_134; \
-  int32x2_t __s1_134 = __p1_134; \
-  int32x4_t __ret_134; \
-  __ret_134 = vqrdmulhq_s32(__s0_134, splatq_lane_s32(__s1_134, __p2_134)); \
-  __ret_134; \
-})
-#else
-#define vqrdmulhq_lane_s32(__p0_135, __p1_135, __p2_135) __extension__ ({ \
-  int32x4_t __s0_135 = __p0_135; \
-  int32x2_t __s1_135 = __p1_135; \
-  int32x4_t __rev0_135;  __rev0_135 = __builtin_shufflevector(__s0_135, __s0_135, 3, 2, 1, 0); \
-  int32x2_t __rev1_135;  __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, 1, 0); \
-  int32x4_t __ret_135; \
-  __ret_135 = __noswap_vqrdmulhq_s32(__rev0_135, __noswap_splatq_lane_s32(__rev1_135, __p2_135)); \
-  __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); \
-  __ret_135; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s16(__p0_136, __p1_136, __p2_136) __extension__ ({ \
-  int16x8_t __s0_136 = __p0_136; \
-  int16x4_t __s1_136 = __p1_136; \
-  int16x8_t __ret_136; \
-  __ret_136 = vqrdmulhq_s16(__s0_136, splatq_lane_s16(__s1_136, __p2_136)); \
-  __ret_136; \
-})
-#else
-#define vqrdmulhq_lane_s16(__p0_137, __p1_137, __p2_137) __extension__ ({ \
-  int16x8_t __s0_137 = __p0_137; \
-  int16x4_t __s1_137 = __p1_137; \
-  int16x8_t __rev0_137;  __rev0_137 = __builtin_shufflevector(__s0_137, __s0_137, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_137;  __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 3, 2, 1, 0); \
-  int16x8_t __ret_137; \
-  __ret_137 = __noswap_vqrdmulhq_s16(__rev0_137, __noswap_splatq_lane_s16(__rev1_137, __p2_137)); \
-  __ret_137 = __builtin_shufflevector(__ret_137, __ret_137, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_137; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s32(__p0_138, __p1_138, __p2_138) __extension__ ({ \
-  int32x2_t __s0_138 = __p0_138; \
-  int32x2_t __s1_138 = __p1_138; \
-  int32x2_t __ret_138; \
-  __ret_138 = vqrdmulh_s32(__s0_138, splat_lane_s32(__s1_138, __p2_138)); \
-  __ret_138; \
-})
-#else
-#define vqrdmulh_lane_s32(__p0_139, __p1_139, __p2_139) __extension__ ({ \
-  int32x2_t __s0_139 = __p0_139; \
-  int32x2_t __s1_139 = __p1_139; \
-  int32x2_t __rev0_139;  __rev0_139 = __builtin_shufflevector(__s0_139, __s0_139, 1, 0); \
-  int32x2_t __rev1_139;  __rev1_139 = __builtin_shufflevector(__s1_139, __s1_139, 1, 0); \
-  int32x2_t __ret_139; \
-  __ret_139 = __noswap_vqrdmulh_s32(__rev0_139, __noswap_splat_lane_s32(__rev1_139, __p2_139)); \
-  __ret_139 = __builtin_shufflevector(__ret_139, __ret_139, 1, 0); \
-  __ret_139; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s16(__p0_140, __p1_140, __p2_140) __extension__ ({ \
-  int16x4_t __s0_140 = __p0_140; \
-  int16x4_t __s1_140 = __p1_140; \
-  int16x4_t __ret_140; \
-  __ret_140 = vqrdmulh_s16(__s0_140, splat_lane_s16(__s1_140, __p2_140)); \
-  __ret_140; \
-})
-#else
-#define vqrdmulh_lane_s16(__p0_141, __p1_141, __p2_141) __extension__ ({ \
-  int16x4_t __s0_141 = __p0_141; \
-  int16x4_t __s1_141 = __p1_141; \
-  int16x4_t __rev0_141;  __rev0_141 = __builtin_shufflevector(__s0_141, __s0_141, 3, 2, 1, 0); \
-  int16x4_t __rev1_141;  __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 3, 2, 1, 0); \
-  int16x4_t __ret_141; \
-  __ret_141 = __noswap_vqrdmulh_s16(__rev0_141, __noswap_splat_lane_s16(__rev1_141, __p2_141)); \
-  __ret_141 = __builtin_shufflevector(__ret_141, __ret_141, 3, 2, 1, 0); \
-  __ret_141; \
-})
-#endif
-
-__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if (__ARM_FP & 2)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_dup_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
-  __ret; \
-})
-#else
-#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16_x2(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16_x2(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16_x2(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16_x2(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16_x3(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16_x3(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16_x3(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16_x3(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16_x4(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16_x4(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16_x4(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16_x4(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld2q_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld2_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld2_dup_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \
-  __ret; \
-})
-#else
-#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld3q_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld3_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld3_dup_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \
-  __ret; \
-})
-#else
-#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld4q_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld4_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld4_dup_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \
-  __ret; \
-})
-#else
-#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \
-})
-#else
-#define vst1q_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \
-})
-#else
-#define vst1_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
-})
-#else
-#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
-})
-#else
-#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \
-})
-#else
-#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \
-})
-#else
-#define vst1_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \
-})
-#else
-#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \
-})
-#else
-#define vst1_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \
-})
-#else
-#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \
-})
-#else
-#define vst1_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \
-})
-#else
-#define vst2q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_f16(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \
-})
-#else
-#define vst2_f16(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \
-})
-#else
-#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \
-})
-#else
-#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \
-})
-#else
-#define vst3q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_f16(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \
-})
-#else
-#define vst3_f16(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \
-})
-#else
-#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \
-})
-#else
-#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \
-})
-#else
-#define vst4q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_f16(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \
-})
-#else
-#define vst4_f16(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \
-})
-#else
-#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \
-})
-#else
-#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \
-})
-#endif
-
-#endif
-#if __ARM_ARCH >= 8
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_AES)
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndaq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndaq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnda_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnda_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndiq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndiq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndi_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndi_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndmq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndmq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndm_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndm_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndnq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndnq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndn_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndn_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float32_t vrndns_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrndns_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndpq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndpq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndp_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndp_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndxq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndxq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndx_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndx_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrnd_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrnd_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndaq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndaq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrnda_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrnda_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndmq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndmq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndm_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndm_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndnq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndnq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndn_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndn_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndpq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndpq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndp_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndp_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndxq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndxq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndx_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndx_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA2)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__p0, __p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__rev0, __p1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint32_t vsha1h_u32(uint32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__p0, __p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__rev0, __p1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__p0, __p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__rev0, __p1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA3) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vrax1q_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vrax1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vxarq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vxarq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA512) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM3) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
-  __ret; \
-})
-#else
-#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
-  __ret; \
-})
-#else
-#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
-  __ret; \
-})
-#else
-#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
-  __ret; \
-})
-#else
-#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM4) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm4eq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm4eq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrnd_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrnda_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndi_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndm_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndn_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndp_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndx_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_FRINT)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd32x_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd32x_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd32z_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd32z_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd64x_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd64x_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd64z_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd64z_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_BF16) && !defined(__aarch64__)
-__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_BF16) && defined(__aarch64__)
-__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#else
-#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#else
-#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#else
-#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#else
-#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdotq_lane_f32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \
-  float32x4_t __s0_142 = __p0_142; \
-  bfloat16x8_t __s1_142 = __p1_142; \
-  bfloat16x4_t __s2_142 = __p2_142; \
-  float32x4_t __ret_142; \
-bfloat16x4_t __reint_142 = __s2_142; \
-float32x4_t __reint1_142 = splatq_lane_f32(*(float32x2_t *) &__reint_142, __p3_142); \
-  __ret_142 = vbfdotq_f32(__s0_142, __s1_142, *(bfloat16x8_t *) &__reint1_142); \
-  __ret_142; \
-})
-#else
-#define vbfdotq_lane_f32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \
-  float32x4_t __s0_143 = __p0_143; \
-  bfloat16x8_t __s1_143 = __p1_143; \
-  bfloat16x4_t __s2_143 = __p2_143; \
-  float32x4_t __rev0_143;  __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_143;  __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_143;  __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, 3, 2, 1, 0); \
-  float32x4_t __ret_143; \
-bfloat16x4_t __reint_143 = __rev2_143; \
-float32x4_t __reint1_143 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_143, __p3_143); \
-  __ret_143 = __noswap_vbfdotq_f32(__rev0_143, __rev1_143, *(bfloat16x8_t *) &__reint1_143); \
-  __ret_143 = __builtin_shufflevector(__ret_143, __ret_143, 3, 2, 1, 0); \
-  __ret_143; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdot_lane_f32(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \
-  float32x2_t __s0_144 = __p0_144; \
-  bfloat16x4_t __s1_144 = __p1_144; \
-  bfloat16x4_t __s2_144 = __p2_144; \
-  float32x2_t __ret_144; \
-bfloat16x4_t __reint_144 = __s2_144; \
-float32x2_t __reint1_144 = splat_lane_f32(*(float32x2_t *) &__reint_144, __p3_144); \
-  __ret_144 = vbfdot_f32(__s0_144, __s1_144, *(bfloat16x4_t *) &__reint1_144); \
-  __ret_144; \
-})
-#else
-#define vbfdot_lane_f32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \
-  float32x2_t __s0_145 = __p0_145; \
-  bfloat16x4_t __s1_145 = __p1_145; \
-  bfloat16x4_t __s2_145 = __p2_145; \
-  float32x2_t __rev0_145;  __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, 1, 0); \
-  bfloat16x4_t __rev1_145;  __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_145;  __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, 3, 2, 1, 0); \
-  float32x2_t __ret_145; \
-bfloat16x4_t __reint_145 = __rev2_145; \
-float32x2_t __reint1_145 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_145, __p3_145); \
-  __ret_145 = __noswap_vbfdot_f32(__rev0_145, __rev1_145, *(bfloat16x4_t *) &__reint1_145); \
-  __ret_145 = __builtin_shufflevector(__ret_145, __ret_145, 1, 0); \
-  __ret_145; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdotq_laneq_f32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \
-  float32x4_t __s0_146 = __p0_146; \
-  bfloat16x8_t __s1_146 = __p1_146; \
-  bfloat16x8_t __s2_146 = __p2_146; \
-  float32x4_t __ret_146; \
-bfloat16x8_t __reint_146 = __s2_146; \
-float32x4_t __reint1_146 = splatq_laneq_f32(*(float32x4_t *) &__reint_146, __p3_146); \
-  __ret_146 = vbfdotq_f32(__s0_146, __s1_146, *(bfloat16x8_t *) &__reint1_146); \
-  __ret_146; \
-})
-#else
-#define vbfdotq_laneq_f32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \
-  float32x4_t __s0_147 = __p0_147; \
-  bfloat16x8_t __s1_147 = __p1_147; \
-  bfloat16x8_t __s2_147 = __p2_147; \
-  float32x4_t __rev0_147;  __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_147;  __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_147;  __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_147; \
-bfloat16x8_t __reint_147 = __rev2_147; \
-float32x4_t __reint1_147 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_147, __p3_147); \
-  __ret_147 = __noswap_vbfdotq_f32(__rev0_147, __rev1_147, *(bfloat16x8_t *) &__reint1_147); \
-  __ret_147 = __builtin_shufflevector(__ret_147, __ret_147, 3, 2, 1, 0); \
-  __ret_147; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdot_laneq_f32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \
-  float32x2_t __s0_148 = __p0_148; \
-  bfloat16x4_t __s1_148 = __p1_148; \
-  bfloat16x8_t __s2_148 = __p2_148; \
-  float32x2_t __ret_148; \
-bfloat16x8_t __reint_148 = __s2_148; \
-float32x2_t __reint1_148 = splat_laneq_f32(*(float32x4_t *) &__reint_148, __p3_148); \
-  __ret_148 = vbfdot_f32(__s0_148, __s1_148, *(bfloat16x4_t *) &__reint1_148); \
-  __ret_148; \
-})
-#else
-#define vbfdot_laneq_f32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \
-  float32x2_t __s0_149 = __p0_149; \
-  bfloat16x4_t __s1_149 = __p1_149; \
-  bfloat16x8_t __s2_149 = __p2_149; \
-  float32x2_t __rev0_149;  __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, 1, 0); \
-  bfloat16x4_t __rev1_149;  __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_149;  __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_149; \
-bfloat16x8_t __reint_149 = __rev2_149; \
-float32x2_t __reint1_149 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_149, __p3_149); \
-  __ret_149 = __noswap_vbfdot_f32(__rev0_149, __rev1_149, *(bfloat16x4_t *) &__reint1_149); \
-  __ret_149 = __builtin_shufflevector(__ret_149, __ret_149, 1, 0); \
-  __ret_149; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#define vcreate_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (bfloat16x4_t)(__promote); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_150) {
-  float32x4_t __ret_150;
-bfloat16x4_t __reint_150 = __p0_150;
-int32x4_t __reint1_150 = vshll_n_s16(*(int16x4_t *) &__reint_150, 16);
-  __ret_150 = *(float32x4_t *) &__reint1_150;
-  return __ret_150;
-}
-#else
-__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_151) {
-  bfloat16x4_t __rev0_151;  __rev0_151 = __builtin_shufflevector(__p0_151, __p0_151, 3, 2, 1, 0);
-  float32x4_t __ret_151;
-bfloat16x4_t __reint_151 = __rev0_151;
-int32x4_t __reint1_151 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_151, 16);
-  __ret_151 = *(float32x4_t *) &__reint1_151;
-  __ret_151 = __builtin_shufflevector(__ret_151, __ret_151, 3, 2, 1, 0);
-  return __ret_151;
-}
-__ai float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_152) {
-  float32x4_t __ret_152;
-bfloat16x4_t __reint_152 = __p0_152;
-int32x4_t __reint1_152 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_152, 16);
-  __ret_152 = *(float32x4_t *) &__reint1_152;
-  return __ret_152;
-}
-#endif
-
-__ai float32_t vcvtah_f32_bf16(bfloat16_t __p0) {
-  float32_t __ret;
-bfloat16_t __reint = __p0;
-int32_t __reint1 = *(int32_t *) &__reint << 16;
-  __ret = *(float32_t *) &__reint1;
-  return __ret;
-}
-__ai bfloat16_t vcvth_bf16_f32(float32_t __p0) {
-  bfloat16_t __ret;
-  __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_bf16(__p0_153, __p1_153) __extension__ ({ \
-  bfloat16x4_t __s0_153 = __p0_153; \
-  bfloat16x8_t __ret_153; \
-  __ret_153 = splatq_lane_bf16(__s0_153, __p1_153); \
-  __ret_153; \
-})
-#else
-#define vdupq_lane_bf16(__p0_154, __p1_154) __extension__ ({ \
-  bfloat16x4_t __s0_154 = __p0_154; \
-  bfloat16x4_t __rev0_154;  __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_154; \
-  __ret_154 = __noswap_splatq_lane_bf16(__rev0_154, __p1_154); \
-  __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_154; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_bf16(__p0_155, __p1_155) __extension__ ({ \
-  bfloat16x4_t __s0_155 = __p0_155; \
-  bfloat16x4_t __ret_155; \
-  __ret_155 = splat_lane_bf16(__s0_155, __p1_155); \
-  __ret_155; \
-})
-#else
-#define vdup_lane_bf16(__p0_156, __p1_156) __extension__ ({ \
-  bfloat16x4_t __s0_156 = __p0_156; \
-  bfloat16x4_t __rev0_156;  __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_156; \
-  __ret_156 = __noswap_splat_lane_bf16(__rev0_156, __p1_156); \
-  __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \
-  __ret_156; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_bf16(__p0_157, __p1_157) __extension__ ({ \
-  bfloat16x8_t __s0_157 = __p0_157; \
-  bfloat16x8_t __ret_157; \
-  __ret_157 = splatq_laneq_bf16(__s0_157, __p1_157); \
-  __ret_157; \
-})
-#else
-#define vdupq_laneq_bf16(__p0_158, __p1_158) __extension__ ({ \
-  bfloat16x8_t __s0_158 = __p0_158; \
-  bfloat16x8_t __rev0_158;  __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_158; \
-  __ret_158 = __noswap_splatq_laneq_bf16(__rev0_158, __p1_158); \
-  __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_158; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_bf16(__p0_159, __p1_159) __extension__ ({ \
-  bfloat16x8_t __s0_159 = __p0_159; \
-  bfloat16x4_t __ret_159; \
-  __ret_159 = splat_laneq_bf16(__s0_159, __p1_159); \
-  __ret_159; \
-})
-#else
-#define vdup_laneq_bf16(__p0_160, __p1_160) __extension__ ({ \
-  bfloat16x8_t __s0_160 = __p0_160; \
-  bfloat16x8_t __rev0_160;  __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_160; \
-  __ret_160 = __noswap_splat_laneq_bf16(__rev0_160, __p1_160); \
-  __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 3, 2, 1, 0); \
-  __ret_160; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
-  __ret; \
-})
-#else
-#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
-  __ret; \
-})
-#else
-#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld2q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld2_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld2q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld2_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
-  __ret; \
-})
-#else
-#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
-  __ret; \
-})
-#else
-#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld3q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld3_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld3q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld3_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
-  __ret; \
-})
-#else
-#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
-  __ret; \
-})
-#else
-#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld4q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld4_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld4q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld4_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
-  __ret; \
-})
-#else
-#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
-  __ret; \
-})
-#else
-#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 43); \
-})
-#else
-#define vst1q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 11); \
-})
-#else
-#define vst1_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
-})
-#else
-#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
-})
-#else
-#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
-})
-#else
-#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
-})
-#else
-#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
-})
-#else
-#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
-})
-#else
-#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
-})
-#else
-#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
-})
-#else
-#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
-})
-#else
-#define vst2q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
-})
-#else
-#define vst2_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
-})
-#else
-#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
-})
-#else
-#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
-})
-#else
-#define vst3q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
-})
-#else
-#define vst3_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
-})
-#else
-#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
-})
-#else
-#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
-})
-#else
-#define vst4q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
-})
-#else
-#define vst4_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
-})
-#else
-#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
-})
-#else
-#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__rev0, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __a32_vcvt_bf16_f32(__p0);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __noswap___a32_vcvt_bf16_f32(__rev0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0));
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0));
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__rev0, 43);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_bf16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \
-  bfloat16x8_t __s0_161 = __p0_161; \
-  bfloat16x4_t __s2_161 = __p2_161; \
-  bfloat16x8_t __ret_161; \
-  __ret_161 = vsetq_lane_bf16(vget_lane_bf16(__s2_161, __p3_161), __s0_161, __p1_161); \
-  __ret_161; \
-})
-#else
-#define vcopyq_lane_bf16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \
-  bfloat16x8_t __s0_162 = __p0_162; \
-  bfloat16x4_t __s2_162 = __p2_162; \
-  bfloat16x8_t __rev0_162;  __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_162;  __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_162; \
-  __ret_162 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_162, __p3_162), __rev0_162, __p1_162); \
-  __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_162; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_bf16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \
-  bfloat16x4_t __s0_163 = __p0_163; \
-  bfloat16x4_t __s2_163 = __p2_163; \
-  bfloat16x4_t __ret_163; \
-  __ret_163 = vset_lane_bf16(vget_lane_bf16(__s2_163, __p3_163), __s0_163, __p1_163); \
-  __ret_163; \
-})
-#else
-#define vcopy_lane_bf16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \
-  bfloat16x4_t __s0_164 = __p0_164; \
-  bfloat16x4_t __s2_164 = __p2_164; \
-  bfloat16x4_t __rev0_164;  __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_164;  __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_164; \
-  __ret_164 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_164, __p3_164), __rev0_164, __p1_164); \
-  __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 3, 2, 1, 0); \
-  __ret_164; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_bf16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \
-  bfloat16x8_t __s0_165 = __p0_165; \
-  bfloat16x8_t __s2_165 = __p2_165; \
-  bfloat16x8_t __ret_165; \
-  __ret_165 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_165, __p3_165), __s0_165, __p1_165); \
-  __ret_165; \
-})
-#else
-#define vcopyq_laneq_bf16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \
-  bfloat16x8_t __s0_166 = __p0_166; \
-  bfloat16x8_t __s2_166 = __p2_166; \
-  bfloat16x8_t __rev0_166;  __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_166;  __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_166; \
-  __ret_166 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_166, __p3_166), __rev0_166, __p1_166); \
-  __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_166; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_bf16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \
-  bfloat16x4_t __s0_167 = __p0_167; \
-  bfloat16x8_t __s2_167 = __p2_167; \
-  bfloat16x4_t __ret_167; \
-  __ret_167 = vset_lane_bf16(vgetq_lane_bf16(__s2_167, __p3_167), __s0_167, __p1_167); \
-  __ret_167; \
-})
-#else
-#define vcopy_laneq_bf16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \
-  bfloat16x4_t __s0_168 = __p0_168; \
-  bfloat16x8_t __s2_168 = __p2_168; \
-  bfloat16x4_t __rev0_168;  __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_168;  __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_168; \
-  __ret_168 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_168, __p3_168), __rev0_168, __p1_168); \
-  __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \
-  __ret_168; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0));
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__p0, (int8x16_t)__p1, 43);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__rev0, (int8x16_t)__rev1, 43);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = __a64_vcvtq_low_bf16_f32(__p0);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_lane_f32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \
-  float32x2_t __s0_169 = __p0_169; \
-  float32x2_t __s1_169 = __p1_169; \
-  float32x2_t __s2_169 = __p2_169; \
-  float32x2_t __ret_169; \
-float32x2_t __reint_169 = __s2_169; \
-uint64x1_t __reint1_169 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_169, __p3_169)}; \
-  __ret_169 = vcmla_f32(__s0_169, __s1_169, *(float32x2_t *) &__reint1_169); \
-  __ret_169; \
-})
-#else
-#define vcmla_lane_f32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \
-  float32x2_t __s0_170 = __p0_170; \
-  float32x2_t __s1_170 = __p1_170; \
-  float32x2_t __s2_170 = __p2_170; \
-  float32x2_t __rev0_170;  __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 1, 0); \
-  float32x2_t __rev1_170;  __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 1, 0); \
-  float32x2_t __rev2_170;  __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 1, 0); \
-  float32x2_t __ret_170; \
-float32x2_t __reint_170 = __rev2_170; \
-uint64x1_t __reint1_170 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_170, __p3_170)}; \
-  __ret_170 = __noswap_vcmla_f32(__rev0_170, __rev1_170, *(float32x2_t *) &__reint1_170); \
-  __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 1, 0); \
-  __ret_170; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_lane_f32(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \
-  float32x4_t __s0_171 = __p0_171; \
-  float32x4_t __s1_171 = __p1_171; \
-  float32x2_t __s2_171 = __p2_171; \
-  float32x4_t __ret_171; \
-float32x2_t __reint_171 = __s2_171; \
-uint64x2_t __reint1_171 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171), vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171)}; \
-  __ret_171 = vcmlaq_f32(__s0_171, __s1_171, *(float32x4_t *) &__reint1_171); \
-  __ret_171; \
-})
-#else
-#define vcmlaq_lane_f32(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \
-  float32x4_t __s0_172 = __p0_172; \
-  float32x4_t __s1_172 = __p1_172; \
-  float32x2_t __s2_172 = __p2_172; \
-  float32x4_t __rev0_172;  __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 3, 2, 1, 0); \
-  float32x4_t __rev1_172;  __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 3, 2, 1, 0); \
-  float32x2_t __rev2_172;  __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 1, 0); \
-  float32x4_t __ret_172; \
-float32x2_t __reint_172 = __rev2_172; \
-uint64x2_t __reint1_172 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172), vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172)}; \
-  __ret_172 = __noswap_vcmlaq_f32(__rev0_172, __rev1_172, *(float32x4_t *) &__reint1_172); \
-  __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 3, 2, 1, 0); \
-  __ret_172; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_laneq_f32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \
-  float32x2_t __s0_173 = __p0_173; \
-  float32x2_t __s1_173 = __p1_173; \
-  float32x4_t __s2_173 = __p2_173; \
-  float32x2_t __ret_173; \
-float32x4_t __reint_173 = __s2_173; \
-uint64x1_t __reint1_173 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_173, __p3_173)}; \
-  __ret_173 = vcmla_f32(__s0_173, __s1_173, *(float32x2_t *) &__reint1_173); \
-  __ret_173; \
-})
-#else
-#define vcmla_laneq_f32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \
-  float32x2_t __s0_174 = __p0_174; \
-  float32x2_t __s1_174 = __p1_174; \
-  float32x4_t __s2_174 = __p2_174; \
-  float32x2_t __rev0_174;  __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \
-  float32x2_t __rev1_174;  __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 1, 0); \
-  float32x4_t __rev2_174;  __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 3, 2, 1, 0); \
-  float32x2_t __ret_174; \
-float32x4_t __reint_174 = __rev2_174; \
-uint64x1_t __reint1_174 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_174, __p3_174)}; \
-  __ret_174 = __noswap_vcmla_f32(__rev0_174, __rev1_174, *(float32x2_t *) &__reint1_174); \
-  __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \
-  __ret_174; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_laneq_f32(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \
-  float32x4_t __s0_175 = __p0_175; \
-  float32x4_t __s1_175 = __p1_175; \
-  float32x4_t __s2_175 = __p2_175; \
-  float32x4_t __ret_175; \
-float32x4_t __reint_175 = __s2_175; \
-uint64x2_t __reint1_175 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175), vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175)}; \
-  __ret_175 = vcmlaq_f32(__s0_175, __s1_175, *(float32x4_t *) &__reint1_175); \
-  __ret_175; \
-})
-#else
-#define vcmlaq_laneq_f32(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \
-  float32x4_t __s0_176 = __p0_176; \
-  float32x4_t __s1_176 = __p1_176; \
-  float32x4_t __s2_176 = __p2_176; \
-  float32x4_t __rev0_176;  __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 3, 2, 1, 0); \
-  float32x4_t __rev1_176;  __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 3, 2, 1, 0); \
-  float32x4_t __rev2_176;  __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 3, 2, 1, 0); \
-  float32x4_t __ret_176; \
-float32x4_t __reint_176 = __rev2_176; \
-uint64x2_t __reint1_176 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176)}; \
-  __ret_176 = __noswap_vcmlaq_f32(__rev0_176, __rev1_176, *(float32x4_t *) &__reint1_176); \
-  __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 3, 2, 1, 0); \
-  __ret_176; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_lane_f32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \
-  float32x2_t __s0_177 = __p0_177; \
-  float32x2_t __s1_177 = __p1_177; \
-  float32x2_t __s2_177 = __p2_177; \
-  float32x2_t __ret_177; \
-float32x2_t __reint_177 = __s2_177; \
-uint64x1_t __reint1_177 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_177, __p3_177)}; \
-  __ret_177 = vcmla_rot180_f32(__s0_177, __s1_177, *(float32x2_t *) &__reint1_177); \
-  __ret_177; \
-})
-#else
-#define vcmla_rot180_lane_f32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \
-  float32x2_t __s0_178 = __p0_178; \
-  float32x2_t __s1_178 = __p1_178; \
-  float32x2_t __s2_178 = __p2_178; \
-  float32x2_t __rev0_178;  __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 1, 0); \
-  float32x2_t __rev1_178;  __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 1, 0); \
-  float32x2_t __rev2_178;  __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 1, 0); \
-  float32x2_t __ret_178; \
-float32x2_t __reint_178 = __rev2_178; \
-uint64x1_t __reint1_178 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_178, __p3_178)}; \
-  __ret_178 = __noswap_vcmla_rot180_f32(__rev0_178, __rev1_178, *(float32x2_t *) &__reint1_178); \
-  __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 1, 0); \
-  __ret_178; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_lane_f32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \
-  float32x4_t __s0_179 = __p0_179; \
-  float32x4_t __s1_179 = __p1_179; \
-  float32x2_t __s2_179 = __p2_179; \
-  float32x4_t __ret_179; \
-float32x2_t __reint_179 = __s2_179; \
-uint64x2_t __reint1_179 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179), vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179)}; \
-  __ret_179 = vcmlaq_rot180_f32(__s0_179, __s1_179, *(float32x4_t *) &__reint1_179); \
-  __ret_179; \
-})
-#else
-#define vcmlaq_rot180_lane_f32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \
-  float32x4_t __s0_180 = __p0_180; \
-  float32x4_t __s1_180 = __p1_180; \
-  float32x2_t __s2_180 = __p2_180; \
-  float32x4_t __rev0_180;  __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \
-  float32x4_t __rev1_180;  __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 3, 2, 1, 0); \
-  float32x2_t __rev2_180;  __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 1, 0); \
-  float32x4_t __ret_180; \
-float32x2_t __reint_180 = __rev2_180; \
-uint64x2_t __reint1_180 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180), vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180)}; \
-  __ret_180 = __noswap_vcmlaq_rot180_f32(__rev0_180, __rev1_180, *(float32x4_t *) &__reint1_180); \
-  __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \
-  __ret_180; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_laneq_f32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \
-  float32x2_t __s0_181 = __p0_181; \
-  float32x2_t __s1_181 = __p1_181; \
-  float32x4_t __s2_181 = __p2_181; \
-  float32x2_t __ret_181; \
-float32x4_t __reint_181 = __s2_181; \
-uint64x1_t __reint1_181 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_181, __p3_181)}; \
-  __ret_181 = vcmla_rot180_f32(__s0_181, __s1_181, *(float32x2_t *) &__reint1_181); \
-  __ret_181; \
-})
-#else
-#define vcmla_rot180_laneq_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \
-  float32x2_t __s0_182 = __p0_182; \
-  float32x2_t __s1_182 = __p1_182; \
-  float32x4_t __s2_182 = __p2_182; \
-  float32x2_t __rev0_182;  __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \
-  float32x2_t __rev1_182;  __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 1, 0); \
-  float32x4_t __rev2_182;  __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 3, 2, 1, 0); \
-  float32x2_t __ret_182; \
-float32x4_t __reint_182 = __rev2_182; \
-uint64x1_t __reint1_182 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_182, __p3_182)}; \
-  __ret_182 = __noswap_vcmla_rot180_f32(__rev0_182, __rev1_182, *(float32x2_t *) &__reint1_182); \
-  __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \
-  __ret_182; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_laneq_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \
-  float32x4_t __s0_183 = __p0_183; \
-  float32x4_t __s1_183 = __p1_183; \
-  float32x4_t __s2_183 = __p2_183; \
-  float32x4_t __ret_183; \
-float32x4_t __reint_183 = __s2_183; \
-uint64x2_t __reint1_183 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183), vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183)}; \
-  __ret_183 = vcmlaq_rot180_f32(__s0_183, __s1_183, *(float32x4_t *) &__reint1_183); \
-  __ret_183; \
-})
-#else
-#define vcmlaq_rot180_laneq_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \
-  float32x4_t __s0_184 = __p0_184; \
-  float32x4_t __s1_184 = __p1_184; \
-  float32x4_t __s2_184 = __p2_184; \
-  float32x4_t __rev0_184;  __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 3, 2, 1, 0); \
-  float32x4_t __rev1_184;  __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 3, 2, 1, 0); \
-  float32x4_t __rev2_184;  __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 3, 2, 1, 0); \
-  float32x4_t __ret_184; \
-float32x4_t __reint_184 = __rev2_184; \
-uint64x2_t __reint1_184 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184)}; \
-  __ret_184 = __noswap_vcmlaq_rot180_f32(__rev0_184, __rev1_184, *(float32x4_t *) &__reint1_184); \
-  __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 3, 2, 1, 0); \
-  __ret_184; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \
-  float32x2_t __s0_185 = __p0_185; \
-  float32x2_t __s1_185 = __p1_185; \
-  float32x2_t __s2_185 = __p2_185; \
-  float32x2_t __ret_185; \
-float32x2_t __reint_185 = __s2_185; \
-uint64x1_t __reint1_185 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \
-  __ret_185 = vcmla_rot270_f32(__s0_185, __s1_185, *(float32x2_t *) &__reint1_185); \
-  __ret_185; \
-})
-#else
-#define vcmla_rot270_lane_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \
-  float32x2_t __s0_186 = __p0_186; \
-  float32x2_t __s1_186 = __p1_186; \
-  float32x2_t __s2_186 = __p2_186; \
-  float32x2_t __rev0_186;  __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 1, 0); \
-  float32x2_t __rev1_186;  __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 1, 0); \
-  float32x2_t __rev2_186;  __rev2_186 = __builtin_shufflevector(__s2_186, __s2_186, 1, 0); \
-  float32x2_t __ret_186; \
-float32x2_t __reint_186 = __rev2_186; \
-uint64x1_t __reint1_186 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_186, __p3_186)}; \
-  __ret_186 = __noswap_vcmla_rot270_f32(__rev0_186, __rev1_186, *(float32x2_t *) &__reint1_186); \
-  __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 1, 0); \
-  __ret_186; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_lane_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \
-  float32x4_t __s0_187 = __p0_187; \
-  float32x4_t __s1_187 = __p1_187; \
-  float32x2_t __s2_187 = __p2_187; \
-  float32x4_t __ret_187; \
-float32x2_t __reint_187 = __s2_187; \
-uint64x2_t __reint1_187 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187), vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187)}; \
-  __ret_187 = vcmlaq_rot270_f32(__s0_187, __s1_187, *(float32x4_t *) &__reint1_187); \
-  __ret_187; \
-})
-#else
-#define vcmlaq_rot270_lane_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \
-  float32x4_t __s0_188 = __p0_188; \
-  float32x4_t __s1_188 = __p1_188; \
-  float32x2_t __s2_188 = __p2_188; \
-  float32x4_t __rev0_188;  __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \
-  float32x4_t __rev1_188;  __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \
-  float32x2_t __rev2_188;  __rev2_188 = __builtin_shufflevector(__s2_188, __s2_188, 1, 0); \
-  float32x4_t __ret_188; \
-float32x2_t __reint_188 = __rev2_188; \
-uint64x2_t __reint1_188 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188), vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188)}; \
-  __ret_188 = __noswap_vcmlaq_rot270_f32(__rev0_188, __rev1_188, *(float32x4_t *) &__reint1_188); \
-  __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \
-  __ret_188; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \
-  float32x2_t __s0_189 = __p0_189; \
-  float32x2_t __s1_189 = __p1_189; \
-  float32x4_t __s2_189 = __p2_189; \
-  float32x2_t __ret_189; \
-float32x4_t __reint_189 = __s2_189; \
-uint64x1_t __reint1_189 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \
-  __ret_189 = vcmla_rot270_f32(__s0_189, __s1_189, *(float32x2_t *) &__reint1_189); \
-  __ret_189; \
-})
-#else
-#define vcmla_rot270_laneq_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \
-  float32x2_t __s0_190 = __p0_190; \
-  float32x2_t __s1_190 = __p1_190; \
-  float32x4_t __s2_190 = __p2_190; \
-  float32x2_t __rev0_190;  __rev0_190 = __builtin_shufflevector(__s0_190, __s0_190, 1, 0); \
-  float32x2_t __rev1_190;  __rev1_190 = __builtin_shufflevector(__s1_190, __s1_190, 1, 0); \
-  float32x4_t __rev2_190;  __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \
-  float32x2_t __ret_190; \
-float32x4_t __reint_190 = __rev2_190; \
-uint64x1_t __reint1_190 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_190, __p3_190)}; \
-  __ret_190 = __noswap_vcmla_rot270_f32(__rev0_190, __rev1_190, *(float32x2_t *) &__reint1_190); \
-  __ret_190 = __builtin_shufflevector(__ret_190, __ret_190, 1, 0); \
-  __ret_190; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_laneq_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \
-  float32x4_t __s0_191 = __p0_191; \
-  float32x4_t __s1_191 = __p1_191; \
-  float32x4_t __s2_191 = __p2_191; \
-  float32x4_t __ret_191; \
-float32x4_t __reint_191 = __s2_191; \
-uint64x2_t __reint1_191 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191), vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191)}; \
-  __ret_191 = vcmlaq_rot270_f32(__s0_191, __s1_191, *(float32x4_t *) &__reint1_191); \
-  __ret_191; \
-})
-#else
-#define vcmlaq_rot270_laneq_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \
-  float32x4_t __s0_192 = __p0_192; \
-  float32x4_t __s1_192 = __p1_192; \
-  float32x4_t __s2_192 = __p2_192; \
-  float32x4_t __rev0_192;  __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 3, 2, 1, 0); \
-  float32x4_t __rev1_192;  __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 3, 2, 1, 0); \
-  float32x4_t __rev2_192;  __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \
-  float32x4_t __ret_192; \
-float32x4_t __reint_192 = __rev2_192; \
-uint64x2_t __reint1_192 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192)}; \
-  __ret_192 = __noswap_vcmlaq_rot270_f32(__rev0_192, __rev1_192, *(float32x4_t *) &__reint1_192); \
-  __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 3, 2, 1, 0); \
-  __ret_192; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \
-  float32x2_t __s0_193 = __p0_193; \
-  float32x2_t __s1_193 = __p1_193; \
-  float32x2_t __s2_193 = __p2_193; \
-  float32x2_t __ret_193; \
-float32x2_t __reint_193 = __s2_193; \
-uint64x1_t __reint1_193 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \
-  __ret_193 = vcmla_rot90_f32(__s0_193, __s1_193, *(float32x2_t *) &__reint1_193); \
-  __ret_193; \
-})
-#else
-#define vcmla_rot90_lane_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \
-  float32x2_t __s0_194 = __p0_194; \
-  float32x2_t __s1_194 = __p1_194; \
-  float32x2_t __s2_194 = __p2_194; \
-  float32x2_t __rev0_194;  __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 1, 0); \
-  float32x2_t __rev1_194;  __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 1, 0); \
-  float32x2_t __rev2_194;  __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 1, 0); \
-  float32x2_t __ret_194; \
-float32x2_t __reint_194 = __rev2_194; \
-uint64x1_t __reint1_194 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_194, __p3_194)}; \
-  __ret_194 = __noswap_vcmla_rot90_f32(__rev0_194, __rev1_194, *(float32x2_t *) &__reint1_194); \
-  __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 1, 0); \
-  __ret_194; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_lane_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \
-  float32x4_t __s0_195 = __p0_195; \
-  float32x4_t __s1_195 = __p1_195; \
-  float32x2_t __s2_195 = __p2_195; \
-  float32x4_t __ret_195; \
-float32x2_t __reint_195 = __s2_195; \
-uint64x2_t __reint1_195 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195), vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195)}; \
-  __ret_195 = vcmlaq_rot90_f32(__s0_195, __s1_195, *(float32x4_t *) &__reint1_195); \
-  __ret_195; \
-})
-#else
-#define vcmlaq_rot90_lane_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \
-  float32x4_t __s0_196 = __p0_196; \
-  float32x4_t __s1_196 = __p1_196; \
-  float32x2_t __s2_196 = __p2_196; \
-  float32x4_t __rev0_196;  __rev0_196 = __builtin_shufflevector(__s0_196, __s0_196, 3, 2, 1, 0); \
-  float32x4_t __rev1_196;  __rev1_196 = __builtin_shufflevector(__s1_196, __s1_196, 3, 2, 1, 0); \
-  float32x2_t __rev2_196;  __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 1, 0); \
-  float32x4_t __ret_196; \
-float32x2_t __reint_196 = __rev2_196; \
-uint64x2_t __reint1_196 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196), vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196)}; \
-  __ret_196 = __noswap_vcmlaq_rot90_f32(__rev0_196, __rev1_196, *(float32x4_t *) &__reint1_196); \
-  __ret_196 = __builtin_shufflevector(__ret_196, __ret_196, 3, 2, 1, 0); \
-  __ret_196; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \
-  float32x2_t __s0_197 = __p0_197; \
-  float32x2_t __s1_197 = __p1_197; \
-  float32x4_t __s2_197 = __p2_197; \
-  float32x2_t __ret_197; \
-float32x4_t __reint_197 = __s2_197; \
-uint64x1_t __reint1_197 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \
-  __ret_197 = vcmla_rot90_f32(__s0_197, __s1_197, *(float32x2_t *) &__reint1_197); \
-  __ret_197; \
-})
-#else
-#define vcmla_rot90_laneq_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \
-  float32x2_t __s0_198 = __p0_198; \
-  float32x2_t __s1_198 = __p1_198; \
-  float32x4_t __s2_198 = __p2_198; \
-  float32x2_t __rev0_198;  __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 1, 0); \
-  float32x2_t __rev1_198;  __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 1, 0); \
-  float32x4_t __rev2_198;  __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 3, 2, 1, 0); \
-  float32x2_t __ret_198; \
-float32x4_t __reint_198 = __rev2_198; \
-uint64x1_t __reint1_198 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_198, __p3_198)}; \
-  __ret_198 = __noswap_vcmla_rot90_f32(__rev0_198, __rev1_198, *(float32x2_t *) &__reint1_198); \
-  __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 1, 0); \
-  __ret_198; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_laneq_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \
-  float32x4_t __s0_199 = __p0_199; \
-  float32x4_t __s1_199 = __p1_199; \
-  float32x4_t __s2_199 = __p2_199; \
-  float32x4_t __ret_199; \
-float32x4_t __reint_199 = __s2_199; \
-uint64x2_t __reint1_199 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199), vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199)}; \
-  __ret_199 = vcmlaq_rot90_f32(__s0_199, __s1_199, *(float32x4_t *) &__reint1_199); \
-  __ret_199; \
-})
-#else
-#define vcmlaq_rot90_laneq_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \
-  float32x4_t __s0_200 = __p0_200; \
-  float32x4_t __s1_200 = __p1_200; \
-  float32x4_t __s2_200 = __p2_200; \
-  float32x4_t __rev0_200;  __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \
-  float32x4_t __rev1_200;  __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \
-  float32x4_t __rev2_200;  __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 3, 2, 1, 0); \
-  float32x4_t __ret_200; \
-float32x4_t __reint_200 = __rev2_200; \
-uint64x2_t __reint1_200 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200)}; \
-  __ret_200 = __noswap_vcmlaq_rot90_f32(__rev0_200, __rev1_200, *(float32x4_t *) &__reint1_200); \
-  __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \
-  __ret_200; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_lane_f16(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \
-  float16x4_t __s0_201 = __p0_201; \
-  float16x4_t __s1_201 = __p1_201; \
-  float16x4_t __s2_201 = __p2_201; \
-  float16x4_t __ret_201; \
-float16x4_t __reint_201 = __s2_201; \
-uint32x2_t __reint1_201 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201), vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201)}; \
-  __ret_201 = vcmla_f16(__s0_201, __s1_201, *(float16x4_t *) &__reint1_201); \
-  __ret_201; \
-})
-#else
-#define vcmla_lane_f16(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \
-  float16x4_t __s0_202 = __p0_202; \
-  float16x4_t __s1_202 = __p1_202; \
-  float16x4_t __s2_202 = __p2_202; \
-  float16x4_t __rev0_202;  __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 3, 2, 1, 0); \
-  float16x4_t __rev1_202;  __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 3, 2, 1, 0); \
-  float16x4_t __rev2_202;  __rev2_202 = __builtin_shufflevector(__s2_202, __s2_202, 3, 2, 1, 0); \
-  float16x4_t __ret_202; \
-float16x4_t __reint_202 = __rev2_202; \
-uint32x2_t __reint1_202 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202)}; \
-  __ret_202 = __noswap_vcmla_f16(__rev0_202, __rev1_202, *(float16x4_t *) &__reint1_202); \
-  __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 3, 2, 1, 0); \
-  __ret_202; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_lane_f16(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \
-  float16x8_t __s0_203 = __p0_203; \
-  float16x8_t __s1_203 = __p1_203; \
-  float16x4_t __s2_203 = __p2_203; \
-  float16x8_t __ret_203; \
-float16x4_t __reint_203 = __s2_203; \
-uint32x4_t __reint1_203 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203)}; \
-  __ret_203 = vcmlaq_f16(__s0_203, __s1_203, *(float16x8_t *) &__reint1_203); \
-  __ret_203; \
-})
-#else
-#define vcmlaq_lane_f16(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \
-  float16x8_t __s0_204 = __p0_204; \
-  float16x8_t __s1_204 = __p1_204; \
-  float16x4_t __s2_204 = __p2_204; \
-  float16x8_t __rev0_204;  __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_204;  __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_204;  __rev2_204 = __builtin_shufflevector(__s2_204, __s2_204, 3, 2, 1, 0); \
-  float16x8_t __ret_204; \
-float16x4_t __reint_204 = __rev2_204; \
-uint32x4_t __reint1_204 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204)}; \
-  __ret_204 = __noswap_vcmlaq_f16(__rev0_204, __rev1_204, *(float16x8_t *) &__reint1_204); \
-  __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_204; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_laneq_f16(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \
-  float16x4_t __s0_205 = __p0_205; \
-  float16x4_t __s1_205 = __p1_205; \
-  float16x8_t __s2_205 = __p2_205; \
-  float16x4_t __ret_205; \
-float16x8_t __reint_205 = __s2_205; \
-uint32x2_t __reint1_205 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205), vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205)}; \
-  __ret_205 = vcmla_f16(__s0_205, __s1_205, *(float16x4_t *) &__reint1_205); \
-  __ret_205; \
-})
-#else
-#define vcmla_laneq_f16(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \
-  float16x4_t __s0_206 = __p0_206; \
-  float16x4_t __s1_206 = __p1_206; \
-  float16x8_t __s2_206 = __p2_206; \
-  float16x4_t __rev0_206;  __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 3, 2, 1, 0); \
-  float16x4_t __rev1_206;  __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 3, 2, 1, 0); \
-  float16x8_t __rev2_206;  __rev2_206 = __builtin_shufflevector(__s2_206, __s2_206, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_206; \
-float16x8_t __reint_206 = __rev2_206; \
-uint32x2_t __reint1_206 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206)}; \
-  __ret_206 = __noswap_vcmla_f16(__rev0_206, __rev1_206, *(float16x4_t *) &__reint1_206); \
-  __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 3, 2, 1, 0); \
-  __ret_206; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_laneq_f16(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \
-  float16x8_t __s0_207 = __p0_207; \
-  float16x8_t __s1_207 = __p1_207; \
-  float16x8_t __s2_207 = __p2_207; \
-  float16x8_t __ret_207; \
-float16x8_t __reint_207 = __s2_207; \
-uint32x4_t __reint1_207 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207)}; \
-  __ret_207 = vcmlaq_f16(__s0_207, __s1_207, *(float16x8_t *) &__reint1_207); \
-  __ret_207; \
-})
-#else
-#define vcmlaq_laneq_f16(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \
-  float16x8_t __s0_208 = __p0_208; \
-  float16x8_t __s1_208 = __p1_208; \
-  float16x8_t __s2_208 = __p2_208; \
-  float16x8_t __rev0_208;  __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_208;  __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_208;  __rev2_208 = __builtin_shufflevector(__s2_208, __s2_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_208; \
-float16x8_t __reint_208 = __rev2_208; \
-uint32x4_t __reint1_208 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208)}; \
-  __ret_208 = __noswap_vcmlaq_f16(__rev0_208, __rev1_208, *(float16x8_t *) &__reint1_208); \
-  __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_208; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_lane_f16(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \
-  float16x4_t __s0_209 = __p0_209; \
-  float16x4_t __s1_209 = __p1_209; \
-  float16x4_t __s2_209 = __p2_209; \
-  float16x4_t __ret_209; \
-float16x4_t __reint_209 = __s2_209; \
-uint32x2_t __reint1_209 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209), vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209)}; \
-  __ret_209 = vcmla_rot180_f16(__s0_209, __s1_209, *(float16x4_t *) &__reint1_209); \
-  __ret_209; \
-})
-#else
-#define vcmla_rot180_lane_f16(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \
-  float16x4_t __s0_210 = __p0_210; \
-  float16x4_t __s1_210 = __p1_210; \
-  float16x4_t __s2_210 = __p2_210; \
-  float16x4_t __rev0_210;  __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 3, 2, 1, 0); \
-  float16x4_t __rev1_210;  __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 3, 2, 1, 0); \
-  float16x4_t __rev2_210;  __rev2_210 = __builtin_shufflevector(__s2_210, __s2_210, 3, 2, 1, 0); \
-  float16x4_t __ret_210; \
-float16x4_t __reint_210 = __rev2_210; \
-uint32x2_t __reint1_210 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210)}; \
-  __ret_210 = __noswap_vcmla_rot180_f16(__rev0_210, __rev1_210, *(float16x4_t *) &__reint1_210); \
-  __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 3, 2, 1, 0); \
-  __ret_210; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_lane_f16(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \
-  float16x8_t __s0_211 = __p0_211; \
-  float16x8_t __s1_211 = __p1_211; \
-  float16x4_t __s2_211 = __p2_211; \
-  float16x8_t __ret_211; \
-float16x4_t __reint_211 = __s2_211; \
-uint32x4_t __reint1_211 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211)}; \
-  __ret_211 = vcmlaq_rot180_f16(__s0_211, __s1_211, *(float16x8_t *) &__reint1_211); \
-  __ret_211; \
-})
-#else
-#define vcmlaq_rot180_lane_f16(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \
-  float16x8_t __s0_212 = __p0_212; \
-  float16x8_t __s1_212 = __p1_212; \
-  float16x4_t __s2_212 = __p2_212; \
-  float16x8_t __rev0_212;  __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_212;  __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_212;  __rev2_212 = __builtin_shufflevector(__s2_212, __s2_212, 3, 2, 1, 0); \
-  float16x8_t __ret_212; \
-float16x4_t __reint_212 = __rev2_212; \
-uint32x4_t __reint1_212 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212)}; \
-  __ret_212 = __noswap_vcmlaq_rot180_f16(__rev0_212, __rev1_212, *(float16x8_t *) &__reint1_212); \
-  __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_212; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_laneq_f16(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \
-  float16x4_t __s0_213 = __p0_213; \
-  float16x4_t __s1_213 = __p1_213; \
-  float16x8_t __s2_213 = __p2_213; \
-  float16x4_t __ret_213; \
-float16x8_t __reint_213 = __s2_213; \
-uint32x2_t __reint1_213 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213), vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213)}; \
-  __ret_213 = vcmla_rot180_f16(__s0_213, __s1_213, *(float16x4_t *) &__reint1_213); \
-  __ret_213; \
-})
-#else
-#define vcmla_rot180_laneq_f16(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \
-  float16x4_t __s0_214 = __p0_214; \
-  float16x4_t __s1_214 = __p1_214; \
-  float16x8_t __s2_214 = __p2_214; \
-  float16x4_t __rev0_214;  __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \
-  float16x4_t __rev1_214;  __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 3, 2, 1, 0); \
-  float16x8_t __rev2_214;  __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_214; \
-float16x8_t __reint_214 = __rev2_214; \
-uint32x2_t __reint1_214 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214)}; \
-  __ret_214 = __noswap_vcmla_rot180_f16(__rev0_214, __rev1_214, *(float16x4_t *) &__reint1_214); \
-  __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \
-  __ret_214; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_laneq_f16(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \
-  float16x8_t __s0_215 = __p0_215; \
-  float16x8_t __s1_215 = __p1_215; \
-  float16x8_t __s2_215 = __p2_215; \
-  float16x8_t __ret_215; \
-float16x8_t __reint_215 = __s2_215; \
-uint32x4_t __reint1_215 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215)}; \
-  __ret_215 = vcmlaq_rot180_f16(__s0_215, __s1_215, *(float16x8_t *) &__reint1_215); \
-  __ret_215; \
-})
-#else
-#define vcmlaq_rot180_laneq_f16(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \
-  float16x8_t __s0_216 = __p0_216; \
-  float16x8_t __s1_216 = __p1_216; \
-  float16x8_t __s2_216 = __p2_216; \
-  float16x8_t __rev0_216;  __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_216;  __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_216;  __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_216; \
-float16x8_t __reint_216 = __rev2_216; \
-uint32x4_t __reint1_216 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216)}; \
-  __ret_216 = __noswap_vcmlaq_rot180_f16(__rev0_216, __rev1_216, *(float16x8_t *) &__reint1_216); \
-  __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_216; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_lane_f16(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \
-  float16x4_t __s0_217 = __p0_217; \
-  float16x4_t __s1_217 = __p1_217; \
-  float16x4_t __s2_217 = __p2_217; \
-  float16x4_t __ret_217; \
-float16x4_t __reint_217 = __s2_217; \
-uint32x2_t __reint1_217 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217), vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217)}; \
-  __ret_217 = vcmla_rot270_f16(__s0_217, __s1_217, *(float16x4_t *) &__reint1_217); \
-  __ret_217; \
-})
-#else
-#define vcmla_rot270_lane_f16(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \
-  float16x4_t __s0_218 = __p0_218; \
-  float16x4_t __s1_218 = __p1_218; \
-  float16x4_t __s2_218 = __p2_218; \
-  float16x4_t __rev0_218;  __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \
-  float16x4_t __rev1_218;  __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \
-  float16x4_t __rev2_218;  __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 3, 2, 1, 0); \
-  float16x4_t __ret_218; \
-float16x4_t __reint_218 = __rev2_218; \
-uint32x2_t __reint1_218 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218)}; \
-  __ret_218 = __noswap_vcmla_rot270_f16(__rev0_218, __rev1_218, *(float16x4_t *) &__reint1_218); \
-  __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \
-  __ret_218; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_lane_f16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \
-  float16x8_t __s0_219 = __p0_219; \
-  float16x8_t __s1_219 = __p1_219; \
-  float16x4_t __s2_219 = __p2_219; \
-  float16x8_t __ret_219; \
-float16x4_t __reint_219 = __s2_219; \
-uint32x4_t __reint1_219 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219)}; \
-  __ret_219 = vcmlaq_rot270_f16(__s0_219, __s1_219, *(float16x8_t *) &__reint1_219); \
-  __ret_219; \
-})
-#else
-#define vcmlaq_rot270_lane_f16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \
-  float16x8_t __s0_220 = __p0_220; \
-  float16x8_t __s1_220 = __p1_220; \
-  float16x4_t __s2_220 = __p2_220; \
-  float16x8_t __rev0_220;  __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_220;  __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_220;  __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \
-  float16x8_t __ret_220; \
-float16x4_t __reint_220 = __rev2_220; \
-uint32x4_t __reint1_220 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220)}; \
-  __ret_220 = __noswap_vcmlaq_rot270_f16(__rev0_220, __rev1_220, *(float16x8_t *) &__reint1_220); \
-  __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_220; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_laneq_f16(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \
-  float16x4_t __s0_221 = __p0_221; \
-  float16x4_t __s1_221 = __p1_221; \
-  float16x8_t __s2_221 = __p2_221; \
-  float16x4_t __ret_221; \
-float16x8_t __reint_221 = __s2_221; \
-uint32x2_t __reint1_221 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221), vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221)}; \
-  __ret_221 = vcmla_rot270_f16(__s0_221, __s1_221, *(float16x4_t *) &__reint1_221); \
-  __ret_221; \
-})
-#else
-#define vcmla_rot270_laneq_f16(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \
-  float16x4_t __s0_222 = __p0_222; \
-  float16x4_t __s1_222 = __p1_222; \
-  float16x8_t __s2_222 = __p2_222; \
-  float16x4_t __rev0_222;  __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 3, 2, 1, 0); \
-  float16x4_t __rev1_222;  __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 3, 2, 1, 0); \
-  float16x8_t __rev2_222;  __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_222; \
-float16x8_t __reint_222 = __rev2_222; \
-uint32x2_t __reint1_222 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222)}; \
-  __ret_222 = __noswap_vcmla_rot270_f16(__rev0_222, __rev1_222, *(float16x4_t *) &__reint1_222); \
-  __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 3, 2, 1, 0); \
-  __ret_222; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_laneq_f16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \
-  float16x8_t __s0_223 = __p0_223; \
-  float16x8_t __s1_223 = __p1_223; \
-  float16x8_t __s2_223 = __p2_223; \
-  float16x8_t __ret_223; \
-float16x8_t __reint_223 = __s2_223; \
-uint32x4_t __reint1_223 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223)}; \
-  __ret_223 = vcmlaq_rot270_f16(__s0_223, __s1_223, *(float16x8_t *) &__reint1_223); \
-  __ret_223; \
-})
-#else
-#define vcmlaq_rot270_laneq_f16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \
-  float16x8_t __s0_224 = __p0_224; \
-  float16x8_t __s1_224 = __p1_224; \
-  float16x8_t __s2_224 = __p2_224; \
-  float16x8_t __rev0_224;  __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_224;  __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_224;  __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_224; \
-float16x8_t __reint_224 = __rev2_224; \
-uint32x4_t __reint1_224 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224)}; \
-  __ret_224 = __noswap_vcmlaq_rot270_f16(__rev0_224, __rev1_224, *(float16x8_t *) &__reint1_224); \
-  __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_224; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_lane_f16(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \
-  float16x4_t __s0_225 = __p0_225; \
-  float16x4_t __s1_225 = __p1_225; \
-  float16x4_t __s2_225 = __p2_225; \
-  float16x4_t __ret_225; \
-float16x4_t __reint_225 = __s2_225; \
-uint32x2_t __reint1_225 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225), vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225)}; \
-  __ret_225 = vcmla_rot90_f16(__s0_225, __s1_225, *(float16x4_t *) &__reint1_225); \
-  __ret_225; \
-})
-#else
-#define vcmla_rot90_lane_f16(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \
-  float16x4_t __s0_226 = __p0_226; \
-  float16x4_t __s1_226 = __p1_226; \
-  float16x4_t __s2_226 = __p2_226; \
-  float16x4_t __rev0_226;  __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \
-  float16x4_t __rev1_226;  __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \
-  float16x4_t __rev2_226;  __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 3, 2, 1, 0); \
-  float16x4_t __ret_226; \
-float16x4_t __reint_226 = __rev2_226; \
-uint32x2_t __reint1_226 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226)}; \
-  __ret_226 = __noswap_vcmla_rot90_f16(__rev0_226, __rev1_226, *(float16x4_t *) &__reint1_226); \
-  __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \
-  __ret_226; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_lane_f16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \
-  float16x8_t __s0_227 = __p0_227; \
-  float16x8_t __s1_227 = __p1_227; \
-  float16x4_t __s2_227 = __p2_227; \
-  float16x8_t __ret_227; \
-float16x4_t __reint_227 = __s2_227; \
-uint32x4_t __reint1_227 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227)}; \
-  __ret_227 = vcmlaq_rot90_f16(__s0_227, __s1_227, *(float16x8_t *) &__reint1_227); \
-  __ret_227; \
-})
-#else
-#define vcmlaq_rot90_lane_f16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \
-  float16x8_t __s0_228 = __p0_228; \
-  float16x8_t __s1_228 = __p1_228; \
-  float16x4_t __s2_228 = __p2_228; \
-  float16x8_t __rev0_228;  __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_228;  __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_228;  __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \
-  float16x8_t __ret_228; \
-float16x4_t __reint_228 = __rev2_228; \
-uint32x4_t __reint1_228 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228)}; \
-  __ret_228 = __noswap_vcmlaq_rot90_f16(__rev0_228, __rev1_228, *(float16x8_t *) &__reint1_228); \
-  __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_228; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_laneq_f16(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \
-  float16x4_t __s0_229 = __p0_229; \
-  float16x4_t __s1_229 = __p1_229; \
-  float16x8_t __s2_229 = __p2_229; \
-  float16x4_t __ret_229; \
-float16x8_t __reint_229 = __s2_229; \
-uint32x2_t __reint1_229 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229), vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229)}; \
-  __ret_229 = vcmla_rot90_f16(__s0_229, __s1_229, *(float16x4_t *) &__reint1_229); \
-  __ret_229; \
-})
-#else
-#define vcmla_rot90_laneq_f16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \
-  float16x4_t __s0_230 = __p0_230; \
-  float16x4_t __s1_230 = __p1_230; \
-  float16x8_t __s2_230 = __p2_230; \
-  float16x4_t __rev0_230;  __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 3, 2, 1, 0); \
-  float16x4_t __rev1_230;  __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 3, 2, 1, 0); \
-  float16x8_t __rev2_230;  __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_230; \
-float16x8_t __reint_230 = __rev2_230; \
-uint32x2_t __reint1_230 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230)}; \
-  __ret_230 = __noswap_vcmla_rot90_f16(__rev0_230, __rev1_230, *(float16x4_t *) &__reint1_230); \
-  __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 3, 2, 1, 0); \
-  __ret_230; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_laneq_f16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \
-  float16x8_t __s0_231 = __p0_231; \
-  float16x8_t __s1_231 = __p1_231; \
-  float16x8_t __s2_231 = __p2_231; \
-  float16x8_t __ret_231; \
-float16x8_t __reint_231 = __s2_231; \
-uint32x4_t __reint1_231 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231)}; \
-  __ret_231 = vcmlaq_rot90_f16(__s0_231, __s1_231, *(float16x8_t *) &__reint1_231); \
-  __ret_231; \
-})
-#else
-#define vcmlaq_rot90_laneq_f16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \
-  float16x8_t __s0_232 = __p0_232; \
-  float16x8_t __s1_232 = __p1_232; \
-  float16x8_t __s2_232 = __p2_232; \
-  float16x8_t __rev0_232;  __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_232;  __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_232;  __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_232; \
-float16x8_t __reint_232 = __rev2_232; \
-uint32x4_t __reint1_232 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232)}; \
-  __ret_232 = __noswap_vcmlaq_rot90_f16(__rev0_232, __rev1_232, *(float16x8_t *) &__reint1_232); \
-  __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_232; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_lane_f64(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \
-  float64x1_t __s0_233 = __p0_233; \
-  float64x1_t __s1_233 = __p1_233; \
-  float64x1_t __s2_233 = __p2_233; \
-  float64x1_t __ret_233; \
-float64x1_t __reint_233 = __s2_233; \
-uint64x2_t __reint1_233 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233), vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233)}; \
-  __ret_233 = vcmla_f64(__s0_233, __s1_233, *(float64x1_t *) &__reint1_233); \
-  __ret_233; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_lane_f64(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \
-  float64x2_t __s0_234 = __p0_234; \
-  float64x2_t __s1_234 = __p1_234; \
-  float64x1_t __s2_234 = __p2_234; \
-  float64x2_t __ret_234; \
-float64x1_t __reint_234 = __s2_234; \
-uint64x2_t __reint1_234 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234), vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234)}; \
-  __ret_234 = vcmlaq_f64(__s0_234, __s1_234, *(float64x2_t *) &__reint1_234); \
-  __ret_234; \
-})
-#else
-#define vcmlaq_lane_f64(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \
-  float64x2_t __s0_235 = __p0_235; \
-  float64x2_t __s1_235 = __p1_235; \
-  float64x1_t __s2_235 = __p2_235; \
-  float64x2_t __rev0_235;  __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 1, 0); \
-  float64x2_t __rev1_235;  __rev1_235 = __builtin_shufflevector(__s1_235, __s1_235, 1, 0); \
-  float64x2_t __ret_235; \
-float64x1_t __reint_235 = __s2_235; \
-uint64x2_t __reint1_235 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235)}; \
-  __ret_235 = __noswap_vcmlaq_f64(__rev0_235, __rev1_235, *(float64x2_t *) &__reint1_235); \
-  __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 1, 0); \
-  __ret_235; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_laneq_f64(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \
-  float64x1_t __s0_236 = __p0_236; \
-  float64x1_t __s1_236 = __p1_236; \
-  float64x2_t __s2_236 = __p2_236; \
-  float64x1_t __ret_236; \
-float64x2_t __reint_236 = __s2_236; \
-uint64x2_t __reint1_236 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236), vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236)}; \
-  __ret_236 = vcmla_f64(__s0_236, __s1_236, *(float64x1_t *) &__reint1_236); \
-  __ret_236; \
-})
-#else
-#define vcmla_laneq_f64(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \
-  float64x1_t __s0_237 = __p0_237; \
-  float64x1_t __s1_237 = __p1_237; \
-  float64x2_t __s2_237 = __p2_237; \
-  float64x2_t __rev2_237;  __rev2_237 = __builtin_shufflevector(__s2_237, __s2_237, 1, 0); \
-  float64x1_t __ret_237; \
-float64x2_t __reint_237 = __rev2_237; \
-uint64x2_t __reint1_237 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237)}; \
-  __ret_237 = vcmla_f64(__s0_237, __s1_237, *(float64x1_t *) &__reint1_237); \
-  __ret_237; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_laneq_f64(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \
-  float64x2_t __s0_238 = __p0_238; \
-  float64x2_t __s1_238 = __p1_238; \
-  float64x2_t __s2_238 = __p2_238; \
-  float64x2_t __ret_238; \
-float64x2_t __reint_238 = __s2_238; \
-uint64x2_t __reint1_238 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238), vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238)}; \
-  __ret_238 = vcmlaq_f64(__s0_238, __s1_238, *(float64x2_t *) &__reint1_238); \
-  __ret_238; \
-})
-#else
-#define vcmlaq_laneq_f64(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \
-  float64x2_t __s0_239 = __p0_239; \
-  float64x2_t __s1_239 = __p1_239; \
-  float64x2_t __s2_239 = __p2_239; \
-  float64x2_t __rev0_239;  __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 1, 0); \
-  float64x2_t __rev1_239;  __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 1, 0); \
-  float64x2_t __rev2_239;  __rev2_239 = __builtin_shufflevector(__s2_239, __s2_239, 1, 0); \
-  float64x2_t __ret_239; \
-float64x2_t __reint_239 = __rev2_239; \
-uint64x2_t __reint1_239 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239)}; \
-  __ret_239 = __noswap_vcmlaq_f64(__rev0_239, __rev1_239, *(float64x2_t *) &__reint1_239); \
-  __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 1, 0); \
-  __ret_239; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_rot180_lane_f64(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \
-  float64x1_t __s0_240 = __p0_240; \
-  float64x1_t __s1_240 = __p1_240; \
-  float64x1_t __s2_240 = __p2_240; \
-  float64x1_t __ret_240; \
-float64x1_t __reint_240 = __s2_240; \
-uint64x2_t __reint1_240 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240), vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240)}; \
-  __ret_240 = vcmla_rot180_f64(__s0_240, __s1_240, *(float64x1_t *) &__reint1_240); \
-  __ret_240; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_lane_f64(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \
-  float64x2_t __s0_241 = __p0_241; \
-  float64x2_t __s1_241 = __p1_241; \
-  float64x1_t __s2_241 = __p2_241; \
-  float64x2_t __ret_241; \
-float64x1_t __reint_241 = __s2_241; \
-uint64x2_t __reint1_241 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241), vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241)}; \
-  __ret_241 = vcmlaq_rot180_f64(__s0_241, __s1_241, *(float64x2_t *) &__reint1_241); \
-  __ret_241; \
-})
-#else
-#define vcmlaq_rot180_lane_f64(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \
-  float64x2_t __s0_242 = __p0_242; \
-  float64x2_t __s1_242 = __p1_242; \
-  float64x1_t __s2_242 = __p2_242; \
-  float64x2_t __rev0_242;  __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 1, 0); \
-  float64x2_t __rev1_242;  __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 1, 0); \
-  float64x2_t __ret_242; \
-float64x1_t __reint_242 = __s2_242; \
-uint64x2_t __reint1_242 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242)}; \
-  __ret_242 = __noswap_vcmlaq_rot180_f64(__rev0_242, __rev1_242, *(float64x2_t *) &__reint1_242); \
-  __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 1, 0); \
-  __ret_242; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_laneq_f64(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \
-  float64x1_t __s0_243 = __p0_243; \
-  float64x1_t __s1_243 = __p1_243; \
-  float64x2_t __s2_243 = __p2_243; \
-  float64x1_t __ret_243; \
-float64x2_t __reint_243 = __s2_243; \
-uint64x2_t __reint1_243 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243), vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243)}; \
-  __ret_243 = vcmla_rot180_f64(__s0_243, __s1_243, *(float64x1_t *) &__reint1_243); \
-  __ret_243; \
-})
-#else
-#define vcmla_rot180_laneq_f64(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \
-  float64x1_t __s0_244 = __p0_244; \
-  float64x1_t __s1_244 = __p1_244; \
-  float64x2_t __s2_244 = __p2_244; \
-  float64x2_t __rev2_244;  __rev2_244 = __builtin_shufflevector(__s2_244, __s2_244, 1, 0); \
-  float64x1_t __ret_244; \
-float64x2_t __reint_244 = __rev2_244; \
-uint64x2_t __reint1_244 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244)}; \
-  __ret_244 = vcmla_rot180_f64(__s0_244, __s1_244, *(float64x1_t *) &__reint1_244); \
-  __ret_244; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_laneq_f64(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \
-  float64x2_t __s0_245 = __p0_245; \
-  float64x2_t __s1_245 = __p1_245; \
-  float64x2_t __s2_245 = __p2_245; \
-  float64x2_t __ret_245; \
-float64x2_t __reint_245 = __s2_245; \
-uint64x2_t __reint1_245 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245), vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245)}; \
-  __ret_245 = vcmlaq_rot180_f64(__s0_245, __s1_245, *(float64x2_t *) &__reint1_245); \
-  __ret_245; \
-})
-#else
-#define vcmlaq_rot180_laneq_f64(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \
-  float64x2_t __s0_246 = __p0_246; \
-  float64x2_t __s1_246 = __p1_246; \
-  float64x2_t __s2_246 = __p2_246; \
-  float64x2_t __rev0_246;  __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \
-  float64x2_t __rev1_246;  __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \
-  float64x2_t __rev2_246;  __rev2_246 = __builtin_shufflevector(__s2_246, __s2_246, 1, 0); \
-  float64x2_t __ret_246; \
-float64x2_t __reint_246 = __rev2_246; \
-uint64x2_t __reint1_246 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246)}; \
-  __ret_246 = __noswap_vcmlaq_rot180_f64(__rev0_246, __rev1_246, *(float64x2_t *) &__reint1_246); \
-  __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \
-  __ret_246; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_rot270_lane_f64(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \
-  float64x1_t __s0_247 = __p0_247; \
-  float64x1_t __s1_247 = __p1_247; \
-  float64x1_t __s2_247 = __p2_247; \
-  float64x1_t __ret_247; \
-float64x1_t __reint_247 = __s2_247; \
-uint64x2_t __reint1_247 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247), vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247)}; \
-  __ret_247 = vcmla_rot270_f64(__s0_247, __s1_247, *(float64x1_t *) &__reint1_247); \
-  __ret_247; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_lane_f64(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \
-  float64x2_t __s0_248 = __p0_248; \
-  float64x2_t __s1_248 = __p1_248; \
-  float64x1_t __s2_248 = __p2_248; \
-  float64x2_t __ret_248; \
-float64x1_t __reint_248 = __s2_248; \
-uint64x2_t __reint1_248 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248), vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248)}; \
-  __ret_248 = vcmlaq_rot270_f64(__s0_248, __s1_248, *(float64x2_t *) &__reint1_248); \
-  __ret_248; \
-})
-#else
-#define vcmlaq_rot270_lane_f64(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \
-  float64x2_t __s0_249 = __p0_249; \
-  float64x2_t __s1_249 = __p1_249; \
-  float64x1_t __s2_249 = __p2_249; \
-  float64x2_t __rev0_249;  __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 1, 0); \
-  float64x2_t __rev1_249;  __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 1, 0); \
-  float64x2_t __ret_249; \
-float64x1_t __reint_249 = __s2_249; \
-uint64x2_t __reint1_249 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249)}; \
-  __ret_249 = __noswap_vcmlaq_rot270_f64(__rev0_249, __rev1_249, *(float64x2_t *) &__reint1_249); \
-  __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 1, 0); \
-  __ret_249; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_laneq_f64(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \
-  float64x1_t __s0_250 = __p0_250; \
-  float64x1_t __s1_250 = __p1_250; \
-  float64x2_t __s2_250 = __p2_250; \
-  float64x1_t __ret_250; \
-float64x2_t __reint_250 = __s2_250; \
-uint64x2_t __reint1_250 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250), vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250)}; \
-  __ret_250 = vcmla_rot270_f64(__s0_250, __s1_250, *(float64x1_t *) &__reint1_250); \
-  __ret_250; \
-})
-#else
-#define vcmla_rot270_laneq_f64(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \
-  float64x1_t __s0_251 = __p0_251; \
-  float64x1_t __s1_251 = __p1_251; \
-  float64x2_t __s2_251 = __p2_251; \
-  float64x2_t __rev2_251;  __rev2_251 = __builtin_shufflevector(__s2_251, __s2_251, 1, 0); \
-  float64x1_t __ret_251; \
-float64x2_t __reint_251 = __rev2_251; \
-uint64x2_t __reint1_251 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251)}; \
-  __ret_251 = vcmla_rot270_f64(__s0_251, __s1_251, *(float64x1_t *) &__reint1_251); \
-  __ret_251; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_laneq_f64(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \
-  float64x2_t __s0_252 = __p0_252; \
-  float64x2_t __s1_252 = __p1_252; \
-  float64x2_t __s2_252 = __p2_252; \
-  float64x2_t __ret_252; \
-float64x2_t __reint_252 = __s2_252; \
-uint64x2_t __reint1_252 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252), vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252)}; \
-  __ret_252 = vcmlaq_rot270_f64(__s0_252, __s1_252, *(float64x2_t *) &__reint1_252); \
-  __ret_252; \
-})
-#else
-#define vcmlaq_rot270_laneq_f64(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \
-  float64x2_t __s0_253 = __p0_253; \
-  float64x2_t __s1_253 = __p1_253; \
-  float64x2_t __s2_253 = __p2_253; \
-  float64x2_t __rev0_253;  __rev0_253 = __builtin_shufflevector(__s0_253, __s0_253, 1, 0); \
-  float64x2_t __rev1_253;  __rev1_253 = __builtin_shufflevector(__s1_253, __s1_253, 1, 0); \
-  float64x2_t __rev2_253;  __rev2_253 = __builtin_shufflevector(__s2_253, __s2_253, 1, 0); \
-  float64x2_t __ret_253; \
-float64x2_t __reint_253 = __rev2_253; \
-uint64x2_t __reint1_253 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253)}; \
-  __ret_253 = __noswap_vcmlaq_rot270_f64(__rev0_253, __rev1_253, *(float64x2_t *) &__reint1_253); \
-  __ret_253 = __builtin_shufflevector(__ret_253, __ret_253, 1, 0); \
-  __ret_253; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_rot90_lane_f64(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \
-  float64x1_t __s0_254 = __p0_254; \
-  float64x1_t __s1_254 = __p1_254; \
-  float64x1_t __s2_254 = __p2_254; \
-  float64x1_t __ret_254; \
-float64x1_t __reint_254 = __s2_254; \
-uint64x2_t __reint1_254 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254), vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254)}; \
-  __ret_254 = vcmla_rot90_f64(__s0_254, __s1_254, *(float64x1_t *) &__reint1_254); \
-  __ret_254; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_lane_f64(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \
-  float64x2_t __s0_255 = __p0_255; \
-  float64x2_t __s1_255 = __p1_255; \
-  float64x1_t __s2_255 = __p2_255; \
-  float64x2_t __ret_255; \
-float64x1_t __reint_255 = __s2_255; \
-uint64x2_t __reint1_255 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255), vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255)}; \
-  __ret_255 = vcmlaq_rot90_f64(__s0_255, __s1_255, *(float64x2_t *) &__reint1_255); \
-  __ret_255; \
-})
-#else
-#define vcmlaq_rot90_lane_f64(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \
-  float64x2_t __s0_256 = __p0_256; \
-  float64x2_t __s1_256 = __p1_256; \
-  float64x1_t __s2_256 = __p2_256; \
-  float64x2_t __rev0_256;  __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 1, 0); \
-  float64x2_t __rev1_256;  __rev1_256 = __builtin_shufflevector(__s1_256, __s1_256, 1, 0); \
-  float64x2_t __ret_256; \
-float64x1_t __reint_256 = __s2_256; \
-uint64x2_t __reint1_256 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256)}; \
-  __ret_256 = __noswap_vcmlaq_rot90_f64(__rev0_256, __rev1_256, *(float64x2_t *) &__reint1_256); \
-  __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 1, 0); \
-  __ret_256; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_laneq_f64(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \
-  float64x1_t __s0_257 = __p0_257; \
-  float64x1_t __s1_257 = __p1_257; \
-  float64x2_t __s2_257 = __p2_257; \
-  float64x1_t __ret_257; \
-float64x2_t __reint_257 = __s2_257; \
-uint64x2_t __reint1_257 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257), vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257)}; \
-  __ret_257 = vcmla_rot90_f64(__s0_257, __s1_257, *(float64x1_t *) &__reint1_257); \
-  __ret_257; \
-})
-#else
-#define vcmla_rot90_laneq_f64(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
-  float64x1_t __s0_258 = __p0_258; \
-  float64x1_t __s1_258 = __p1_258; \
-  float64x2_t __s2_258 = __p2_258; \
-  float64x2_t __rev2_258;  __rev2_258 = __builtin_shufflevector(__s2_258, __s2_258, 1, 0); \
-  float64x1_t __ret_258; \
-float64x2_t __reint_258 = __rev2_258; \
-uint64x2_t __reint1_258 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258)}; \
-  __ret_258 = vcmla_rot90_f64(__s0_258, __s1_258, *(float64x1_t *) &__reint1_258); \
-  __ret_258; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_laneq_f64(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
-  float64x2_t __s0_259 = __p0_259; \
-  float64x2_t __s1_259 = __p1_259; \
-  float64x2_t __s2_259 = __p2_259; \
-  float64x2_t __ret_259; \
-float64x2_t __reint_259 = __s2_259; \
-uint64x2_t __reint1_259 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259), vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259)}; \
-  __ret_259 = vcmlaq_rot90_f64(__s0_259, __s1_259, *(float64x2_t *) &__reint1_259); \
-  __ret_259; \
-})
-#else
-#define vcmlaq_rot90_laneq_f64(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
-  float64x2_t __s0_260 = __p0_260; \
-  float64x2_t __s1_260 = __p1_260; \
-  float64x2_t __s2_260 = __p2_260; \
-  float64x2_t __rev0_260;  __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 1, 0); \
-  float64x2_t __rev1_260;  __rev1_260 = __builtin_shufflevector(__s1_260, __s1_260, 1, 0); \
-  float64x2_t __rev2_260;  __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 1, 0); \
-  float64x2_t __ret_260; \
-float64x2_t __reint_260 = __rev2_260; \
-uint64x2_t __reint1_260 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260)}; \
-  __ret_260 = __noswap_vcmlaq_rot90_f64(__rev0_260, __rev1_260, *(float64x2_t *) &__reint1_260); \
-  __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 1, 0); \
-  __ret_260; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_DOTPROD)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_u32(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
-  uint32x4_t __s0_261 = __p0_261; \
-  uint8x16_t __s1_261 = __p1_261; \
-  uint8x8_t __s2_261 = __p2_261; \
-  uint32x4_t __ret_261; \
-uint8x8_t __reint_261 = __s2_261; \
-uint32x4_t __reint1_261 = splatq_lane_u32(*(uint32x2_t *) &__reint_261, __p3_261); \
-  __ret_261 = vdotq_u32(__s0_261, __s1_261, *(uint8x16_t *) &__reint1_261); \
-  __ret_261; \
-})
-#else
-#define vdotq_lane_u32(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
-  uint32x4_t __s0_262 = __p0_262; \
-  uint8x16_t __s1_262 = __p1_262; \
-  uint8x8_t __s2_262 = __p2_262; \
-  uint32x4_t __rev0_262;  __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 3, 2, 1, 0); \
-  uint8x16_t __rev1_262;  __rev1_262 = __builtin_shufflevector(__s1_262, __s1_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_262;  __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_262; \
-uint8x8_t __reint_262 = __rev2_262; \
-uint32x4_t __reint1_262 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_262, __p3_262); \
-  __ret_262 = __noswap_vdotq_u32(__rev0_262, __rev1_262, *(uint8x16_t *) &__reint1_262); \
-  __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 3, 2, 1, 0); \
-  __ret_262; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_s32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
-  int32x4_t __s0_263 = __p0_263; \
-  int8x16_t __s1_263 = __p1_263; \
-  int8x8_t __s2_263 = __p2_263; \
-  int32x4_t __ret_263; \
-int8x8_t __reint_263 = __s2_263; \
-int32x4_t __reint1_263 = splatq_lane_s32(*(int32x2_t *) &__reint_263, __p3_263); \
-  __ret_263 = vdotq_s32(__s0_263, __s1_263, *(int8x16_t *) &__reint1_263); \
-  __ret_263; \
-})
-#else
-#define vdotq_lane_s32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
-  int32x4_t __s0_264 = __p0_264; \
-  int8x16_t __s1_264 = __p1_264; \
-  int8x8_t __s2_264 = __p2_264; \
-  int32x4_t __rev0_264;  __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \
-  int8x16_t __rev1_264;  __rev1_264 = __builtin_shufflevector(__s1_264, __s1_264, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_264;  __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_264; \
-int8x8_t __reint_264 = __rev2_264; \
-int32x4_t __reint1_264 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_264, __p3_264); \
-  __ret_264 = __noswap_vdotq_s32(__rev0_264, __rev1_264, *(int8x16_t *) &__reint1_264); \
-  __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \
-  __ret_264; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_lane_u32(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
-  uint32x2_t __s0_265 = __p0_265; \
-  uint8x8_t __s1_265 = __p1_265; \
-  uint8x8_t __s2_265 = __p2_265; \
-  uint32x2_t __ret_265; \
-uint8x8_t __reint_265 = __s2_265; \
-uint32x2_t __reint1_265 = splat_lane_u32(*(uint32x2_t *) &__reint_265, __p3_265); \
-  __ret_265 = vdot_u32(__s0_265, __s1_265, *(uint8x8_t *) &__reint1_265); \
-  __ret_265; \
-})
-#else
-#define vdot_lane_u32(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \
-  uint32x2_t __s0_266 = __p0_266; \
-  uint8x8_t __s1_266 = __p1_266; \
-  uint8x8_t __s2_266 = __p2_266; \
-  uint32x2_t __rev0_266;  __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 1, 0); \
-  uint8x8_t __rev1_266;  __rev1_266 = __builtin_shufflevector(__s1_266, __s1_266, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_266;  __rev2_266 = __builtin_shufflevector(__s2_266, __s2_266, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret_266; \
-uint8x8_t __reint_266 = __rev2_266; \
-uint32x2_t __reint1_266 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_266, __p3_266); \
-  __ret_266 = __noswap_vdot_u32(__rev0_266, __rev1_266, *(uint8x8_t *) &__reint1_266); \
-  __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 1, 0); \
-  __ret_266; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_lane_s32(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \
-  int32x2_t __s0_267 = __p0_267; \
-  int8x8_t __s1_267 = __p1_267; \
-  int8x8_t __s2_267 = __p2_267; \
-  int32x2_t __ret_267; \
-int8x8_t __reint_267 = __s2_267; \
-int32x2_t __reint1_267 = splat_lane_s32(*(int32x2_t *) &__reint_267, __p3_267); \
-  __ret_267 = vdot_s32(__s0_267, __s1_267, *(int8x8_t *) &__reint1_267); \
-  __ret_267; \
-})
-#else
-#define vdot_lane_s32(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
-  int32x2_t __s0_268 = __p0_268; \
-  int8x8_t __s1_268 = __p1_268; \
-  int8x8_t __s2_268 = __p2_268; \
-  int32x2_t __rev0_268;  __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 1, 0); \
-  int8x8_t __rev1_268;  __rev1_268 = __builtin_shufflevector(__s1_268, __s1_268, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_268;  __rev2_268 = __builtin_shufflevector(__s2_268, __s2_268, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_268; \
-int8x8_t __reint_268 = __rev2_268; \
-int32x2_t __reint1_268 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_268, __p3_268); \
-  __ret_268 = __noswap_vdot_s32(__rev0_268, __rev1_268, *(int8x8_t *) &__reint1_268); \
-  __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 1, 0); \
-  __ret_268; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_u32(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
-  uint32x4_t __s0_269 = __p0_269; \
-  uint8x16_t __s1_269 = __p1_269; \
-  uint8x16_t __s2_269 = __p2_269; \
-  uint32x4_t __ret_269; \
-uint8x16_t __reint_269 = __s2_269; \
-uint32x4_t __reint1_269 = splatq_laneq_u32(*(uint32x4_t *) &__reint_269, __p3_269); \
-  __ret_269 = vdotq_u32(__s0_269, __s1_269, *(uint8x16_t *) &__reint1_269); \
-  __ret_269; \
-})
-#else
-#define vdotq_laneq_u32(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
-  uint32x4_t __s0_270 = __p0_270; \
-  uint8x16_t __s1_270 = __p1_270; \
-  uint8x16_t __s2_270 = __p2_270; \
-  uint32x4_t __rev0_270;  __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 3, 2, 1, 0); \
-  uint8x16_t __rev1_270;  __rev1_270 = __builtin_shufflevector(__s1_270, __s1_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_270;  __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_270; \
-uint8x16_t __reint_270 = __rev2_270; \
-uint32x4_t __reint1_270 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_270, __p3_270); \
-  __ret_270 = __noswap_vdotq_u32(__rev0_270, __rev1_270, *(uint8x16_t *) &__reint1_270); \
-  __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 3, 2, 1, 0); \
-  __ret_270; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_s32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
-  int32x4_t __s0_271 = __p0_271; \
-  int8x16_t __s1_271 = __p1_271; \
-  int8x16_t __s2_271 = __p2_271; \
-  int32x4_t __ret_271; \
-int8x16_t __reint_271 = __s2_271; \
-int32x4_t __reint1_271 = splatq_laneq_s32(*(int32x4_t *) &__reint_271, __p3_271); \
-  __ret_271 = vdotq_s32(__s0_271, __s1_271, *(int8x16_t *) &__reint1_271); \
-  __ret_271; \
-})
-#else
-#define vdotq_laneq_s32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
-  int32x4_t __s0_272 = __p0_272; \
-  int8x16_t __s1_272 = __p1_272; \
-  int8x16_t __s2_272 = __p2_272; \
-  int32x4_t __rev0_272;  __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 3, 2, 1, 0); \
-  int8x16_t __rev1_272;  __rev1_272 = __builtin_shufflevector(__s1_272, __s1_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_272;  __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_272; \
-int8x16_t __reint_272 = __rev2_272; \
-int32x4_t __reint1_272 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_272, __p3_272); \
-  __ret_272 = __noswap_vdotq_s32(__rev0_272, __rev1_272, *(int8x16_t *) &__reint1_272); \
-  __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 3, 2, 1, 0); \
-  __ret_272; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_u32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
-  uint32x2_t __s0_273 = __p0_273; \
-  uint8x8_t __s1_273 = __p1_273; \
-  uint8x16_t __s2_273 = __p2_273; \
-  uint32x2_t __ret_273; \
-uint8x16_t __reint_273 = __s2_273; \
-uint32x2_t __reint1_273 = splat_laneq_u32(*(uint32x4_t *) &__reint_273, __p3_273); \
-  __ret_273 = vdot_u32(__s0_273, __s1_273, *(uint8x8_t *) &__reint1_273); \
-  __ret_273; \
-})
-#else
-#define vdot_laneq_u32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
-  uint32x2_t __s0_274 = __p0_274; \
-  uint8x8_t __s1_274 = __p1_274; \
-  uint8x16_t __s2_274 = __p2_274; \
-  uint32x2_t __rev0_274;  __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 1, 0); \
-  uint8x8_t __rev1_274;  __rev1_274 = __builtin_shufflevector(__s1_274, __s1_274, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_274;  __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret_274; \
-uint8x16_t __reint_274 = __rev2_274; \
-uint32x2_t __reint1_274 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_274, __p3_274); \
-  __ret_274 = __noswap_vdot_u32(__rev0_274, __rev1_274, *(uint8x8_t *) &__reint1_274); \
-  __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 1, 0); \
-  __ret_274; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_s32(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
-  int32x2_t __s0_275 = __p0_275; \
-  int8x8_t __s1_275 = __p1_275; \
-  int8x16_t __s2_275 = __p2_275; \
-  int32x2_t __ret_275; \
-int8x16_t __reint_275 = __s2_275; \
-int32x2_t __reint1_275 = splat_laneq_s32(*(int32x4_t *) &__reint_275, __p3_275); \
-  __ret_275 = vdot_s32(__s0_275, __s1_275, *(int8x8_t *) &__reint1_275); \
-  __ret_275; \
-})
-#else
-#define vdot_laneq_s32(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
-  int32x2_t __s0_276 = __p0_276; \
-  int8x8_t __s1_276 = __p1_276; \
-  int8x16_t __s2_276 = __p2_276; \
-  int32x2_t __rev0_276;  __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 1, 0); \
-  int8x8_t __rev1_276;  __rev1_276 = __builtin_shufflevector(__s1_276, __s1_276, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_276;  __rev2_276 = __builtin_shufflevector(__s2_276, __s2_276, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_276; \
-int8x16_t __reint_276 = __rev2_276; \
-int32x2_t __reint1_276 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_276, __p3_276); \
-  __ret_276 = __noswap_vdot_s32(__rev0_276, __rev1_276, *(int8x8_t *) &__reint1_276); \
-  __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 1, 0); \
-  __ret_276; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FMA)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vabsq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vabsq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vabs_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vabs_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclez_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclez_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \
-  __ret; \
-})
-#else
-#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \
-  __ret; \
-})
-#else
-#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = vfmaq_f16(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = vfma_f16(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f16(__p0_277, __p1_277, __p2_277) __extension__ ({ \
-  float16x8_t __s0_277 = __p0_277; \
-  float16x4_t __s1_277 = __p1_277; \
-  float16x8_t __ret_277; \
-  __ret_277 = __s0_277 * splatq_lane_f16(__s1_277, __p2_277); \
-  __ret_277; \
-})
-#else
-#define vmulq_lane_f16(__p0_278, __p1_278, __p2_278) __extension__ ({ \
-  float16x8_t __s0_278 = __p0_278; \
-  float16x4_t __s1_278 = __p1_278; \
-  float16x8_t __rev0_278;  __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1_278;  __rev1_278 = __builtin_shufflevector(__s1_278, __s1_278, 3, 2, 1, 0); \
-  float16x8_t __ret_278; \
-  __ret_278 = __rev0_278 * __noswap_splatq_lane_f16(__rev1_278, __p2_278); \
-  __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_278; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f16(__p0_279, __p1_279, __p2_279) __extension__ ({ \
-  float16x4_t __s0_279 = __p0_279; \
-  float16x4_t __s1_279 = __p1_279; \
-  float16x4_t __ret_279; \
-  __ret_279 = __s0_279 * splat_lane_f16(__s1_279, __p2_279); \
-  __ret_279; \
-})
-#else
-#define vmul_lane_f16(__p0_280, __p1_280, __p2_280) __extension__ ({ \
-  float16x4_t __s0_280 = __p0_280; \
-  float16x4_t __s1_280 = __p1_280; \
-  float16x4_t __rev0_280;  __rev0_280 = __builtin_shufflevector(__s0_280, __s0_280, 3, 2, 1, 0); \
-  float16x4_t __rev1_280;  __rev1_280 = __builtin_shufflevector(__s1_280, __s1_280, 3, 2, 1, 0); \
-  float16x4_t __ret_280; \
-  __ret_280 = __rev0_280 * __noswap_splat_lane_f16(__rev1_280, __p2_280); \
-  __ret_280 = __builtin_shufflevector(__ret_280, __ret_280, 3, 2, 1, 0); \
-  __ret_280; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
-  __ret; \
-})
-#else
-#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
-  __ret; \
-})
-#else
-#define vmul_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vnegq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float16x8_t vnegq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vneg_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float16x4_t vneg_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrev64_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai float16x4_t vrev64_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
-  __ret; \
-})
-#else
-#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsh_lane_f16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
-  float16_t __s0_281 = __p0_281; \
-  float16_t __s1_281 = __p1_281; \
-  float16x4_t __s2_281 = __p2_281; \
-  float16_t __ret_281; \
-  __ret_281 = vfmah_lane_f16(__s0_281, -__s1_281, __s2_281, __p3_281); \
-  __ret_281; \
-})
-#else
-#define vfmsh_lane_f16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
-  float16_t __s0_282 = __p0_282; \
-  float16_t __s1_282 = __p1_282; \
-  float16x4_t __s2_282 = __p2_282; \
-  float16x4_t __rev2_282;  __rev2_282 = __builtin_shufflevector(__s2_282, __s2_282, 3, 2, 1, 0); \
-  float16_t __ret_282; \
-  __ret_282 = __noswap_vfmah_lane_f16(__s0_282, -__s1_282, __rev2_282, __p3_282); \
-  __ret_282; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f16(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
-  float16x8_t __s0_283 = __p0_283; \
-  float16x8_t __s1_283 = __p1_283; \
-  float16x4_t __s2_283 = __p2_283; \
-  float16x8_t __ret_283; \
-  __ret_283 = vfmaq_lane_f16(__s0_283, -__s1_283, __s2_283, __p3_283); \
-  __ret_283; \
-})
-#else
-#define vfmsq_lane_f16(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
-  float16x8_t __s0_284 = __p0_284; \
-  float16x8_t __s1_284 = __p1_284; \
-  float16x4_t __s2_284 = __p2_284; \
-  float16x8_t __rev0_284;  __rev0_284 = __builtin_shufflevector(__s0_284, __s0_284, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_284;  __rev1_284 = __builtin_shufflevector(__s1_284, __s1_284, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_284;  __rev2_284 = __builtin_shufflevector(__s2_284, __s2_284, 3, 2, 1, 0); \
-  float16x8_t __ret_284; \
-  __ret_284 = __noswap_vfmaq_lane_f16(__rev0_284, -__rev1_284, __rev2_284, __p3_284); \
-  __ret_284 = __builtin_shufflevector(__ret_284, __ret_284, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_284; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f16(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
-  float16x4_t __s0_285 = __p0_285; \
-  float16x4_t __s1_285 = __p1_285; \
-  float16x4_t __s2_285 = __p2_285; \
-  float16x4_t __ret_285; \
-  __ret_285 = vfma_lane_f16(__s0_285, -__s1_285, __s2_285, __p3_285); \
-  __ret_285; \
-})
-#else
-#define vfms_lane_f16(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
-  float16x4_t __s0_286 = __p0_286; \
-  float16x4_t __s1_286 = __p1_286; \
-  float16x4_t __s2_286 = __p2_286; \
-  float16x4_t __rev0_286;  __rev0_286 = __builtin_shufflevector(__s0_286, __s0_286, 3, 2, 1, 0); \
-  float16x4_t __rev1_286;  __rev1_286 = __builtin_shufflevector(__s1_286, __s1_286, 3, 2, 1, 0); \
-  float16x4_t __rev2_286;  __rev2_286 = __builtin_shufflevector(__s2_286, __s2_286, 3, 2, 1, 0); \
-  float16x4_t __ret_286; \
-  __ret_286 = __noswap_vfma_lane_f16(__rev0_286, -__rev1_286, __rev2_286, __p3_286); \
-  __ret_286 = __builtin_shufflevector(__ret_286, __ret_286, 3, 2, 1, 0); \
-  __ret_286; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsh_laneq_f16(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
-  float16_t __s0_287 = __p0_287; \
-  float16_t __s1_287 = __p1_287; \
-  float16x8_t __s2_287 = __p2_287; \
-  float16_t __ret_287; \
-  __ret_287 = vfmah_laneq_f16(__s0_287, -__s1_287, __s2_287, __p3_287); \
-  __ret_287; \
-})
-#else
-#define vfmsh_laneq_f16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
-  float16_t __s0_288 = __p0_288; \
-  float16_t __s1_288 = __p1_288; \
-  float16x8_t __s2_288 = __p2_288; \
-  float16x8_t __rev2_288;  __rev2_288 = __builtin_shufflevector(__s2_288, __s2_288, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_288; \
-  __ret_288 = __noswap_vfmah_laneq_f16(__s0_288, -__s1_288, __rev2_288, __p3_288); \
-  __ret_288; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
-  float16x8_t __s0_289 = __p0_289; \
-  float16x8_t __s1_289 = __p1_289; \
-  float16x8_t __s2_289 = __p2_289; \
-  float16x8_t __ret_289; \
-  __ret_289 = vfmaq_laneq_f16(__s0_289, -__s1_289, __s2_289, __p3_289); \
-  __ret_289; \
-})
-#else
-#define vfmsq_laneq_f16(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \
-  float16x8_t __s0_290 = __p0_290; \
-  float16x8_t __s1_290 = __p1_290; \
-  float16x8_t __s2_290 = __p2_290; \
-  float16x8_t __rev0_290;  __rev0_290 = __builtin_shufflevector(__s0_290, __s0_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_290;  __rev1_290 = __builtin_shufflevector(__s1_290, __s1_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_290;  __rev2_290 = __builtin_shufflevector(__s2_290, __s2_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_290; \
-  __ret_290 = __noswap_vfmaq_laneq_f16(__rev0_290, -__rev1_290, __rev2_290, __p3_290); \
-  __ret_290 = __builtin_shufflevector(__ret_290, __ret_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_290; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f16(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \
-  float16x4_t __s0_291 = __p0_291; \
-  float16x4_t __s1_291 = __p1_291; \
-  float16x8_t __s2_291 = __p2_291; \
-  float16x4_t __ret_291; \
-  __ret_291 = vfma_laneq_f16(__s0_291, -__s1_291, __s2_291, __p3_291); \
-  __ret_291; \
-})
-#else
-#define vfms_laneq_f16(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \
-  float16x4_t __s0_292 = __p0_292; \
-  float16x4_t __s1_292 = __p1_292; \
-  float16x8_t __s2_292 = __p2_292; \
-  float16x4_t __rev0_292;  __rev0_292 = __builtin_shufflevector(__s0_292, __s0_292, 3, 2, 1, 0); \
-  float16x4_t __rev1_292;  __rev1_292 = __builtin_shufflevector(__s1_292, __s1_292, 3, 2, 1, 0); \
-  float16x8_t __rev2_292;  __rev2_292 = __builtin_shufflevector(__s2_292, __s2_292, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_292; \
-  __ret_292 = __noswap_vfma_laneq_f16(__rev0_292, -__rev1_292, __rev2_292, __p3_292); \
-  __ret_292 = __builtin_shufflevector(__ret_292, __ret_292, 3, 2, 1, 0); \
-  __ret_292; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vminnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vminnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vminvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vminv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f16(__p0_293, __p1_293, __p2_293) __extension__ ({ \
-  float16x8_t __s0_293 = __p0_293; \
-  float16x8_t __s1_293 = __p1_293; \
-  float16x8_t __ret_293; \
-  __ret_293 = __s0_293 * splatq_laneq_f16(__s1_293, __p2_293); \
-  __ret_293; \
-})
-#else
-#define vmulq_laneq_f16(__p0_294, __p1_294, __p2_294) __extension__ ({ \
-  float16x8_t __s0_294 = __p0_294; \
-  float16x8_t __s1_294 = __p1_294; \
-  float16x8_t __rev0_294;  __rev0_294 = __builtin_shufflevector(__s0_294, __s0_294, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_294;  __rev1_294 = __builtin_shufflevector(__s1_294, __s1_294, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_294; \
-  __ret_294 = __rev0_294 * __noswap_splatq_laneq_f16(__rev1_294, __p2_294); \
-  __ret_294 = __builtin_shufflevector(__ret_294, __ret_294, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_294; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f16(__p0_295, __p1_295, __p2_295) __extension__ ({ \
-  float16x4_t __s0_295 = __p0_295; \
-  float16x8_t __s1_295 = __p1_295; \
-  float16x4_t __ret_295; \
-  __ret_295 = __s0_295 * splat_laneq_f16(__s1_295, __p2_295); \
-  __ret_295; \
-})
-#else
-#define vmul_laneq_f16(__p0_296, __p1_296, __p2_296) __extension__ ({ \
-  float16x4_t __s0_296 = __p0_296; \
-  float16x8_t __s1_296 = __p1_296; \
-  float16x4_t __rev0_296;  __rev0_296 = __builtin_shufflevector(__s0_296, __s0_296, 3, 2, 1, 0); \
-  float16x8_t __rev1_296;  __rev1_296 = __builtin_shufflevector(__s1_296, __s1_296, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_296; \
-  __ret_296 = __rev0_296 * __noswap_splat_laneq_f16(__rev1_296, __p2_296); \
-  __ret_296 = __builtin_shufflevector(__ret_296, __ret_296, 3, 2, 1, 0); \
-  __ret_296; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f16(__p0_297, __p1_297, __p2_297) __extension__ ({ \
-  float16x8_t __s0_297 = __p0_297; \
-  float16x4_t __s1_297 = __p1_297; \
-  float16x8_t __ret_297; \
-  __ret_297 = vmulxq_f16(__s0_297, splatq_lane_f16(__s1_297, __p2_297)); \
-  __ret_297; \
-})
-#else
-#define vmulxq_lane_f16(__p0_298, __p1_298, __p2_298) __extension__ ({ \
-  float16x8_t __s0_298 = __p0_298; \
-  float16x4_t __s1_298 = __p1_298; \
-  float16x8_t __rev0_298;  __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1_298;  __rev1_298 = __builtin_shufflevector(__s1_298, __s1_298, 3, 2, 1, 0); \
-  float16x8_t __ret_298; \
-  __ret_298 = __noswap_vmulxq_f16(__rev0_298, __noswap_splatq_lane_f16(__rev1_298, __p2_298)); \
-  __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_298; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f16(__p0_299, __p1_299, __p2_299) __extension__ ({ \
-  float16x4_t __s0_299 = __p0_299; \
-  float16x4_t __s1_299 = __p1_299; \
-  float16x4_t __ret_299; \
-  __ret_299 = vmulx_f16(__s0_299, splat_lane_f16(__s1_299, __p2_299)); \
-  __ret_299; \
-})
-#else
-#define vmulx_lane_f16(__p0_300, __p1_300, __p2_300) __extension__ ({ \
-  float16x4_t __s0_300 = __p0_300; \
-  float16x4_t __s1_300 = __p1_300; \
-  float16x4_t __rev0_300;  __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 3, 2, 1, 0); \
-  float16x4_t __rev1_300;  __rev1_300 = __builtin_shufflevector(__s1_300, __s1_300, 3, 2, 1, 0); \
-  float16x4_t __ret_300; \
-  __ret_300 = __noswap_vmulx_f16(__rev0_300, __noswap_splat_lane_f16(__rev1_300, __p2_300)); \
-  __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 3, 2, 1, 0); \
-  __ret_300; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f16(__p0_301, __p1_301, __p2_301) __extension__ ({ \
-  float16x8_t __s0_301 = __p0_301; \
-  float16x8_t __s1_301 = __p1_301; \
-  float16x8_t __ret_301; \
-  __ret_301 = vmulxq_f16(__s0_301, splatq_laneq_f16(__s1_301, __p2_301)); \
-  __ret_301; \
-})
-#else
-#define vmulxq_laneq_f16(__p0_302, __p1_302, __p2_302) __extension__ ({ \
-  float16x8_t __s0_302 = __p0_302; \
-  float16x8_t __s1_302 = __p1_302; \
-  float16x8_t __rev0_302;  __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_302;  __rev1_302 = __builtin_shufflevector(__s1_302, __s1_302, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_302; \
-  __ret_302 = __noswap_vmulxq_f16(__rev0_302, __noswap_splatq_laneq_f16(__rev1_302, __p2_302)); \
-  __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_302; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f16(__p0_303, __p1_303, __p2_303) __extension__ ({ \
-  float16x4_t __s0_303 = __p0_303; \
-  float16x8_t __s1_303 = __p1_303; \
-  float16x4_t __ret_303; \
-  __ret_303 = vmulx_f16(__s0_303, splat_laneq_f16(__s1_303, __p2_303)); \
-  __ret_303; \
-})
-#else
-#define vmulx_laneq_f16(__p0_304, __p1_304, __p2_304) __extension__ ({ \
-  float16x4_t __s0_304 = __p0_304; \
-  float16x8_t __s1_304 = __p1_304; \
-  float16x4_t __rev0_304;  __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 3, 2, 1, 0); \
-  float16x8_t __rev1_304;  __rev1_304 = __builtin_shufflevector(__s1_304, __s1_304, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_304; \
-  __ret_304 = __noswap_vmulx_f16(__rev0_304, __noswap_splat_laneq_f16(__rev1_304, __p2_304)); \
-  __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 3, 2, 1, 0); \
-  __ret_304; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
-  __ret; \
-})
-#else
-#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
-  __ret; \
-})
-#else
-#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndi_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndi_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_MATMUL_INT8)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdotq_lane_s32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \
-  int32x4_t __s0_305 = __p0_305; \
-  uint8x16_t __s1_305 = __p1_305; \
-  int8x8_t __s2_305 = __p2_305; \
-  int32x4_t __ret_305; \
-int8x8_t __reint_305 = __s2_305; \
-  __ret_305 = vusdotq_s32(__s0_305, __s1_305, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_305, __p3_305))); \
-  __ret_305; \
-})
-#else
-#define vusdotq_lane_s32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \
-  int32x4_t __s0_306 = __p0_306; \
-  uint8x16_t __s1_306 = __p1_306; \
-  int8x8_t __s2_306 = __p2_306; \
-  int32x4_t __rev0_306;  __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \
-  uint8x16_t __rev1_306;  __rev1_306 = __builtin_shufflevector(__s1_306, __s1_306, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_306;  __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_306; \
-int8x8_t __reint_306 = __rev2_306; \
-  __ret_306 = __noswap_vusdotq_s32(__rev0_306, __rev1_306, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_306, __p3_306))); \
-  __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \
-  __ret_306; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdot_lane_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \
-  int32x2_t __s0_307 = __p0_307; \
-  uint8x8_t __s1_307 = __p1_307; \
-  int8x8_t __s2_307 = __p2_307; \
-  int32x2_t __ret_307; \
-int8x8_t __reint_307 = __s2_307; \
-  __ret_307 = vusdot_s32(__s0_307, __s1_307, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_307, __p3_307))); \
-  __ret_307; \
-})
-#else
-#define vusdot_lane_s32(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \
-  int32x2_t __s0_308 = __p0_308; \
-  uint8x8_t __s1_308 = __p1_308; \
-  int8x8_t __s2_308 = __p2_308; \
-  int32x2_t __rev0_308;  __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 1, 0); \
-  uint8x8_t __rev1_308;  __rev1_308 = __builtin_shufflevector(__s1_308, __s1_308, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_308;  __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_308; \
-int8x8_t __reint_308 = __rev2_308; \
-  __ret_308 = __noswap_vusdot_s32(__rev0_308, __rev1_308, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_308, __p3_308))); \
-  __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 1, 0); \
-  __ret_308; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX)
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s32(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \
-  int32x4_t __s0_309 = __p0_309; \
-  int32x4_t __s1_309 = __p1_309; \
-  int32x2_t __s2_309 = __p2_309; \
-  int32x4_t __ret_309; \
-  __ret_309 = vqaddq_s32(__s0_309, vqrdmulhq_s32(__s1_309, splatq_lane_s32(__s2_309, __p3_309))); \
-  __ret_309; \
-})
-#else
-#define vqrdmlahq_lane_s32(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \
-  int32x4_t __s0_310 = __p0_310; \
-  int32x4_t __s1_310 = __p1_310; \
-  int32x2_t __s2_310 = __p2_310; \
-  int32x4_t __rev0_310;  __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 3, 2, 1, 0); \
-  int32x4_t __rev1_310;  __rev1_310 = __builtin_shufflevector(__s1_310, __s1_310, 3, 2, 1, 0); \
-  int32x2_t __rev2_310;  __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 1, 0); \
-  int32x4_t __ret_310; \
-  __ret_310 = __noswap_vqaddq_s32(__rev0_310, __noswap_vqrdmulhq_s32(__rev1_310, __noswap_splatq_lane_s32(__rev2_310, __p3_310))); \
-  __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 3, 2, 1, 0); \
-  __ret_310; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s16(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \
-  int16x8_t __s0_311 = __p0_311; \
-  int16x8_t __s1_311 = __p1_311; \
-  int16x4_t __s2_311 = __p2_311; \
-  int16x8_t __ret_311; \
-  __ret_311 = vqaddq_s16(__s0_311, vqrdmulhq_s16(__s1_311, splatq_lane_s16(__s2_311, __p3_311))); \
-  __ret_311; \
-})
-#else
-#define vqrdmlahq_lane_s16(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \
-  int16x8_t __s0_312 = __p0_312; \
-  int16x8_t __s1_312 = __p1_312; \
-  int16x4_t __s2_312 = __p2_312; \
-  int16x8_t __rev0_312;  __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_312;  __rev1_312 = __builtin_shufflevector(__s1_312, __s1_312, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_312;  __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 3, 2, 1, 0); \
-  int16x8_t __ret_312; \
-  __ret_312 = __noswap_vqaddq_s16(__rev0_312, __noswap_vqrdmulhq_s16(__rev1_312, __noswap_splatq_lane_s16(__rev2_312, __p3_312))); \
-  __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_312; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \
-  int32x2_t __s0_313 = __p0_313; \
-  int32x2_t __s1_313 = __p1_313; \
-  int32x2_t __s2_313 = __p2_313; \
-  int32x2_t __ret_313; \
-  __ret_313 = vqadd_s32(__s0_313, vqrdmulh_s32(__s1_313, splat_lane_s32(__s2_313, __p3_313))); \
-  __ret_313; \
-})
-#else
-#define vqrdmlah_lane_s32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \
-  int32x2_t __s0_314 = __p0_314; \
-  int32x2_t __s1_314 = __p1_314; \
-  int32x2_t __s2_314 = __p2_314; \
-  int32x2_t __rev0_314;  __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 1, 0); \
-  int32x2_t __rev1_314;  __rev1_314 = __builtin_shufflevector(__s1_314, __s1_314, 1, 0); \
-  int32x2_t __rev2_314;  __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 1, 0); \
-  int32x2_t __ret_314; \
-  __ret_314 = __noswap_vqadd_s32(__rev0_314, __noswap_vqrdmulh_s32(__rev1_314, __noswap_splat_lane_s32(__rev2_314, __p3_314))); \
-  __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 1, 0); \
-  __ret_314; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s16(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \
-  int16x4_t __s0_315 = __p0_315; \
-  int16x4_t __s1_315 = __p1_315; \
-  int16x4_t __s2_315 = __p2_315; \
-  int16x4_t __ret_315; \
-  __ret_315 = vqadd_s16(__s0_315, vqrdmulh_s16(__s1_315, splat_lane_s16(__s2_315, __p3_315))); \
-  __ret_315; \
-})
-#else
-#define vqrdmlah_lane_s16(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \
-  int16x4_t __s0_316 = __p0_316; \
-  int16x4_t __s1_316 = __p1_316; \
-  int16x4_t __s2_316 = __p2_316; \
-  int16x4_t __rev0_316;  __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \
-  int16x4_t __rev1_316;  __rev1_316 = __builtin_shufflevector(__s1_316, __s1_316, 3, 2, 1, 0); \
-  int16x4_t __rev2_316;  __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 3, 2, 1, 0); \
-  int16x4_t __ret_316; \
-  __ret_316 = __noswap_vqadd_s16(__rev0_316, __noswap_vqrdmulh_s16(__rev1_316, __noswap_splat_lane_s16(__rev2_316, __p3_316))); \
-  __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \
-  __ret_316; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s32(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \
-  int32x4_t __s0_317 = __p0_317; \
-  int32x4_t __s1_317 = __p1_317; \
-  int32x2_t __s2_317 = __p2_317; \
-  int32x4_t __ret_317; \
-  __ret_317 = vqsubq_s32(__s0_317, vqrdmulhq_s32(__s1_317, splatq_lane_s32(__s2_317, __p3_317))); \
-  __ret_317; \
-})
-#else
-#define vqrdmlshq_lane_s32(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \
-  int32x4_t __s0_318 = __p0_318; \
-  int32x4_t __s1_318 = __p1_318; \
-  int32x2_t __s2_318 = __p2_318; \
-  int32x4_t __rev0_318;  __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 3, 2, 1, 0); \
-  int32x4_t __rev1_318;  __rev1_318 = __builtin_shufflevector(__s1_318, __s1_318, 3, 2, 1, 0); \
-  int32x2_t __rev2_318;  __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 1, 0); \
-  int32x4_t __ret_318; \
-  __ret_318 = __noswap_vqsubq_s32(__rev0_318, __noswap_vqrdmulhq_s32(__rev1_318, __noswap_splatq_lane_s32(__rev2_318, __p3_318))); \
-  __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 3, 2, 1, 0); \
-  __ret_318; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \
-  int16x8_t __s0_319 = __p0_319; \
-  int16x8_t __s1_319 = __p1_319; \
-  int16x4_t __s2_319 = __p2_319; \
-  int16x8_t __ret_319; \
-  __ret_319 = vqsubq_s16(__s0_319, vqrdmulhq_s16(__s1_319, splatq_lane_s16(__s2_319, __p3_319))); \
-  __ret_319; \
-})
-#else
-#define vqrdmlshq_lane_s16(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \
-  int16x8_t __s0_320 = __p0_320; \
-  int16x8_t __s1_320 = __p1_320; \
-  int16x4_t __s2_320 = __p2_320; \
-  int16x8_t __rev0_320;  __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_320;  __rev1_320 = __builtin_shufflevector(__s1_320, __s1_320, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_320;  __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 3, 2, 1, 0); \
-  int16x8_t __ret_320; \
-  __ret_320 = __noswap_vqsubq_s16(__rev0_320, __noswap_vqrdmulhq_s16(__rev1_320, __noswap_splatq_lane_s16(__rev2_320, __p3_320))); \
-  __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_320; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s32(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \
-  int32x2_t __s0_321 = __p0_321; \
-  int32x2_t __s1_321 = __p1_321; \
-  int32x2_t __s2_321 = __p2_321; \
-  int32x2_t __ret_321; \
-  __ret_321 = vqsub_s32(__s0_321, vqrdmulh_s32(__s1_321, splat_lane_s32(__s2_321, __p3_321))); \
-  __ret_321; \
-})
-#else
-#define vqrdmlsh_lane_s32(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \
-  int32x2_t __s0_322 = __p0_322; \
-  int32x2_t __s1_322 = __p1_322; \
-  int32x2_t __s2_322 = __p2_322; \
-  int32x2_t __rev0_322;  __rev0_322 = __builtin_shufflevector(__s0_322, __s0_322, 1, 0); \
-  int32x2_t __rev1_322;  __rev1_322 = __builtin_shufflevector(__s1_322, __s1_322, 1, 0); \
-  int32x2_t __rev2_322;  __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 1, 0); \
-  int32x2_t __ret_322; \
-  __ret_322 = __noswap_vqsub_s32(__rev0_322, __noswap_vqrdmulh_s32(__rev1_322, __noswap_splat_lane_s32(__rev2_322, __p3_322))); \
-  __ret_322 = __builtin_shufflevector(__ret_322, __ret_322, 1, 0); \
-  __ret_322; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \
-  int16x4_t __s0_323 = __p0_323; \
-  int16x4_t __s1_323 = __p1_323; \
-  int16x4_t __s2_323 = __p2_323; \
-  int16x4_t __ret_323; \
-  __ret_323 = vqsub_s16(__s0_323, vqrdmulh_s16(__s1_323, splat_lane_s16(__s2_323, __p3_323))); \
-  __ret_323; \
-})
-#else
-#define vqrdmlsh_lane_s16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \
-  int16x4_t __s0_324 = __p0_324; \
-  int16x4_t __s1_324 = __p1_324; \
-  int16x4_t __s2_324 = __p2_324; \
-  int16x4_t __rev0_324;  __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \
-  int16x4_t __rev1_324;  __rev1_324 = __builtin_shufflevector(__s1_324, __s1_324, 3, 2, 1, 0); \
-  int16x4_t __rev2_324;  __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 3, 2, 1, 0); \
-  int16x4_t __ret_324; \
-  __ret_324 = __noswap_vqsub_s16(__rev0_324, __noswap_vqrdmulh_s16(__rev1_324, __noswap_splat_lane_s16(__rev2_324, __p3_324))); \
-  __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \
-  __ret_324; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s32(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \
-  int32x4_t __s0_325 = __p0_325; \
-  int32x4_t __s1_325 = __p1_325; \
-  int32x4_t __s2_325 = __p2_325; \
-  int32x4_t __ret_325; \
-  __ret_325 = vqaddq_s32(__s0_325, vqrdmulhq_s32(__s1_325, splatq_laneq_s32(__s2_325, __p3_325))); \
-  __ret_325; \
-})
-#else
-#define vqrdmlahq_laneq_s32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \
-  int32x4_t __s0_326 = __p0_326; \
-  int32x4_t __s1_326 = __p1_326; \
-  int32x4_t __s2_326 = __p2_326; \
-  int32x4_t __rev0_326;  __rev0_326 = __builtin_shufflevector(__s0_326, __s0_326, 3, 2, 1, 0); \
-  int32x4_t __rev1_326;  __rev1_326 = __builtin_shufflevector(__s1_326, __s1_326, 3, 2, 1, 0); \
-  int32x4_t __rev2_326;  __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 3, 2, 1, 0); \
-  int32x4_t __ret_326; \
-  __ret_326 = __noswap_vqaddq_s32(__rev0_326, __noswap_vqrdmulhq_s32(__rev1_326, __noswap_splatq_laneq_s32(__rev2_326, __p3_326))); \
-  __ret_326 = __builtin_shufflevector(__ret_326, __ret_326, 3, 2, 1, 0); \
-  __ret_326; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s16(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \
-  int16x8_t __s0_327 = __p0_327; \
-  int16x8_t __s1_327 = __p1_327; \
-  int16x8_t __s2_327 = __p2_327; \
-  int16x8_t __ret_327; \
-  __ret_327 = vqaddq_s16(__s0_327, vqrdmulhq_s16(__s1_327, splatq_laneq_s16(__s2_327, __p3_327))); \
-  __ret_327; \
-})
-#else
-#define vqrdmlahq_laneq_s16(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \
-  int16x8_t __s0_328 = __p0_328; \
-  int16x8_t __s1_328 = __p1_328; \
-  int16x8_t __s2_328 = __p2_328; \
-  int16x8_t __rev0_328;  __rev0_328 = __builtin_shufflevector(__s0_328, __s0_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_328;  __rev1_328 = __builtin_shufflevector(__s1_328, __s1_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_328;  __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_328; \
-  __ret_328 = __noswap_vqaddq_s16(__rev0_328, __noswap_vqrdmulhq_s16(__rev1_328, __noswap_splatq_laneq_s16(__rev2_328, __p3_328))); \
-  __ret_328 = __builtin_shufflevector(__ret_328, __ret_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_328; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \
-  int32x2_t __s0_329 = __p0_329; \
-  int32x2_t __s1_329 = __p1_329; \
-  int32x4_t __s2_329 = __p2_329; \
-  int32x2_t __ret_329; \
-  __ret_329 = vqadd_s32(__s0_329, vqrdmulh_s32(__s1_329, splat_laneq_s32(__s2_329, __p3_329))); \
-  __ret_329; \
-})
-#else
-#define vqrdmlah_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \
-  int32x2_t __s0_330 = __p0_330; \
-  int32x2_t __s1_330 = __p1_330; \
-  int32x4_t __s2_330 = __p2_330; \
-  int32x2_t __rev0_330;  __rev0_330 = __builtin_shufflevector(__s0_330, __s0_330, 1, 0); \
-  int32x2_t __rev1_330;  __rev1_330 = __builtin_shufflevector(__s1_330, __s1_330, 1, 0); \
-  int32x4_t __rev2_330;  __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 3, 2, 1, 0); \
-  int32x2_t __ret_330; \
-  __ret_330 = __noswap_vqadd_s32(__rev0_330, __noswap_vqrdmulh_s32(__rev1_330, __noswap_splat_laneq_s32(__rev2_330, __p3_330))); \
-  __ret_330 = __builtin_shufflevector(__ret_330, __ret_330, 1, 0); \
-  __ret_330; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \
-  int16x4_t __s0_331 = __p0_331; \
-  int16x4_t __s1_331 = __p1_331; \
-  int16x8_t __s2_331 = __p2_331; \
-  int16x4_t __ret_331; \
-  __ret_331 = vqadd_s16(__s0_331, vqrdmulh_s16(__s1_331, splat_laneq_s16(__s2_331, __p3_331))); \
-  __ret_331; \
-})
-#else
-#define vqrdmlah_laneq_s16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \
-  int16x4_t __s0_332 = __p0_332; \
-  int16x4_t __s1_332 = __p1_332; \
-  int16x8_t __s2_332 = __p2_332; \
-  int16x4_t __rev0_332;  __rev0_332 = __builtin_shufflevector(__s0_332, __s0_332, 3, 2, 1, 0); \
-  int16x4_t __rev1_332;  __rev1_332 = __builtin_shufflevector(__s1_332, __s1_332, 3, 2, 1, 0); \
-  int16x8_t __rev2_332;  __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_332; \
-  __ret_332 = __noswap_vqadd_s16(__rev0_332, __noswap_vqrdmulh_s16(__rev1_332, __noswap_splat_laneq_s16(__rev2_332, __p3_332))); \
-  __ret_332 = __builtin_shufflevector(__ret_332, __ret_332, 3, 2, 1, 0); \
-  __ret_332; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s32(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \
-  int32x4_t __s0_333 = __p0_333; \
-  int32x4_t __s1_333 = __p1_333; \
-  int32x4_t __s2_333 = __p2_333; \
-  int32x4_t __ret_333; \
-  __ret_333 = vqsubq_s32(__s0_333, vqrdmulhq_s32(__s1_333, splatq_laneq_s32(__s2_333, __p3_333))); \
-  __ret_333; \
-})
-#else
-#define vqrdmlshq_laneq_s32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \
-  int32x4_t __s0_334 = __p0_334; \
-  int32x4_t __s1_334 = __p1_334; \
-  int32x4_t __s2_334 = __p2_334; \
-  int32x4_t __rev0_334;  __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 3, 2, 1, 0); \
-  int32x4_t __rev1_334;  __rev1_334 = __builtin_shufflevector(__s1_334, __s1_334, 3, 2, 1, 0); \
-  int32x4_t __rev2_334;  __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 3, 2, 1, 0); \
-  int32x4_t __ret_334; \
-  __ret_334 = __noswap_vqsubq_s32(__rev0_334, __noswap_vqrdmulhq_s32(__rev1_334, __noswap_splatq_laneq_s32(__rev2_334, __p3_334))); \
-  __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 3, 2, 1, 0); \
-  __ret_334; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s16(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \
-  int16x8_t __s0_335 = __p0_335; \
-  int16x8_t __s1_335 = __p1_335; \
-  int16x8_t __s2_335 = __p2_335; \
-  int16x8_t __ret_335; \
-  __ret_335 = vqsubq_s16(__s0_335, vqrdmulhq_s16(__s1_335, splatq_laneq_s16(__s2_335, __p3_335))); \
-  __ret_335; \
-})
-#else
-#define vqrdmlshq_laneq_s16(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \
-  int16x8_t __s0_336 = __p0_336; \
-  int16x8_t __s1_336 = __p1_336; \
-  int16x8_t __s2_336 = __p2_336; \
-  int16x8_t __rev0_336;  __rev0_336 = __builtin_shufflevector(__s0_336, __s0_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_336;  __rev1_336 = __builtin_shufflevector(__s1_336, __s1_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_336;  __rev2_336 = __builtin_shufflevector(__s2_336, __s2_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_336; \
-  __ret_336 = __noswap_vqsubq_s16(__rev0_336, __noswap_vqrdmulhq_s16(__rev1_336, __noswap_splatq_laneq_s16(__rev2_336, __p3_336))); \
-  __ret_336 = __builtin_shufflevector(__ret_336, __ret_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_336; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \
-  int32x2_t __s0_337 = __p0_337; \
-  int32x2_t __s1_337 = __p1_337; \
-  int32x4_t __s2_337 = __p2_337; \
-  int32x2_t __ret_337; \
-  __ret_337 = vqsub_s32(__s0_337, vqrdmulh_s32(__s1_337, splat_laneq_s32(__s2_337, __p3_337))); \
-  __ret_337; \
-})
-#else
-#define vqrdmlsh_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \
-  int32x2_t __s0_338 = __p0_338; \
-  int32x2_t __s1_338 = __p1_338; \
-  int32x4_t __s2_338 = __p2_338; \
-  int32x2_t __rev0_338;  __rev0_338 = __builtin_shufflevector(__s0_338, __s0_338, 1, 0); \
-  int32x2_t __rev1_338;  __rev1_338 = __builtin_shufflevector(__s1_338, __s1_338, 1, 0); \
-  int32x4_t __rev2_338;  __rev2_338 = __builtin_shufflevector(__s2_338, __s2_338, 3, 2, 1, 0); \
-  int32x2_t __ret_338; \
-  __ret_338 = __noswap_vqsub_s32(__rev0_338, __noswap_vqrdmulh_s32(__rev1_338, __noswap_splat_laneq_s32(__rev2_338, __p3_338))); \
-  __ret_338 = __builtin_shufflevector(__ret_338, __ret_338, 1, 0); \
-  __ret_338; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s16(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \
-  int16x4_t __s0_339 = __p0_339; \
-  int16x4_t __s1_339 = __p1_339; \
-  int16x8_t __s2_339 = __p2_339; \
-  int16x4_t __ret_339; \
-  __ret_339 = vqsub_s16(__s0_339, vqrdmulh_s16(__s1_339, splat_laneq_s16(__s2_339, __p3_339))); \
-  __ret_339; \
-})
-#else
-#define vqrdmlsh_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \
-  int16x4_t __s0_340 = __p0_340; \
-  int16x4_t __s1_340 = __p1_340; \
-  int16x8_t __s2_340 = __p2_340; \
-  int16x4_t __rev0_340;  __rev0_340 = __builtin_shufflevector(__s0_340, __s0_340, 3, 2, 1, 0); \
-  int16x4_t __rev1_340;  __rev1_340 = __builtin_shufflevector(__s1_340, __s1_340, 3, 2, 1, 0); \
-  int16x8_t __rev2_340;  __rev2_340 = __builtin_shufflevector(__s2_340, __s2_340, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_340; \
-  __ret_340 = __noswap_vqsub_s16(__rev0_340, __noswap_vqrdmulh_s16(__rev1_340, __noswap_splat_laneq_s16(__rev2_340, __p3_340))); \
-  __ret_340 = __builtin_shufflevector(__ret_340, __ret_340, 3, 2, 1, 0); \
-  __ret_340; \
-})
-#endif
-
-#endif
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vabsq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vabsq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabsq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vabsq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vabs_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai int64x1_t vabs_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int64_t vabsd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai poly128_t vaddq_p128(poly128_t __p0, poly128_t __p1) {
-  poly128_t __ret;
-  __ret = (poly128_t) __builtin_neon_vaddq_p128(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddlvq_s8(int8x16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddlvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddlvq_s32(int32x4_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddlvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddlvq_s16(int16x8_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddlvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddlv_s8(int8x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddlv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddlv_s32(int32x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddlv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddlv_s16(int16x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddlv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vaddvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vaddvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vaddvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vaddvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vaddvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vaddvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddvq_s64(int64x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddvq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vaddv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vaddv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vaddv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vaddv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vaddv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vaddv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38);
-  return __ret;
-}
-#else
-__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vceqzd_u64(uint64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
-  return __ret;
-}
-__ai uint64_t vceqzd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vceqzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vceqzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64_t vcged_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgez_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgez_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vcgezd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgezd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcgezd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcgezs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vcgtzd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcgtzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcgtzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcled_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclez_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclez_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclez_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclez_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclez_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclez_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclez_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclez_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclez_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclez_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vclezd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vclezd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vclezd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vclezs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcltz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcltz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vcltzd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcltzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcltzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p8(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \
-  poly8x16_t __s0_341 = __p0_341; \
-  poly8x8_t __s2_341 = __p2_341; \
-  poly8x16_t __ret_341; \
-  __ret_341 = vsetq_lane_p8(vget_lane_p8(__s2_341, __p3_341), __s0_341, __p1_341); \
-  __ret_341; \
-})
-#else
-#define vcopyq_lane_p8(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \
-  poly8x16_t __s0_342 = __p0_342; \
-  poly8x8_t __s2_342 = __p2_342; \
-  poly8x16_t __rev0_342;  __rev0_342 = __builtin_shufflevector(__s0_342, __s0_342, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_342;  __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_342; \
-  __ret_342 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_342, __p3_342), __rev0_342, __p1_342); \
-  __ret_342 = __builtin_shufflevector(__ret_342, __ret_342, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_342; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p16(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \
-  poly16x8_t __s0_343 = __p0_343; \
-  poly16x4_t __s2_343 = __p2_343; \
-  poly16x8_t __ret_343; \
-  __ret_343 = vsetq_lane_p16(vget_lane_p16(__s2_343, __p3_343), __s0_343, __p1_343); \
-  __ret_343; \
-})
-#else
-#define vcopyq_lane_p16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \
-  poly16x8_t __s0_344 = __p0_344; \
-  poly16x4_t __s2_344 = __p2_344; \
-  poly16x8_t __rev0_344;  __rev0_344 = __builtin_shufflevector(__s0_344, __s0_344, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __rev2_344;  __rev2_344 = __builtin_shufflevector(__s2_344, __s2_344, 3, 2, 1, 0); \
-  poly16x8_t __ret_344; \
-  __ret_344 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_344, __p3_344), __rev0_344, __p1_344); \
-  __ret_344 = __builtin_shufflevector(__ret_344, __ret_344, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_344; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u8(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \
-  uint8x16_t __s0_345 = __p0_345; \
-  uint8x8_t __s2_345 = __p2_345; \
-  uint8x16_t __ret_345; \
-  __ret_345 = vsetq_lane_u8(vget_lane_u8(__s2_345, __p3_345), __s0_345, __p1_345); \
-  __ret_345; \
-})
-#else
-#define vcopyq_lane_u8(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \
-  uint8x16_t __s0_346 = __p0_346; \
-  uint8x8_t __s2_346 = __p2_346; \
-  uint8x16_t __rev0_346;  __rev0_346 = __builtin_shufflevector(__s0_346, __s0_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_346;  __rev2_346 = __builtin_shufflevector(__s2_346, __s2_346, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_346; \
-  __ret_346 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_346, __p3_346), __rev0_346, __p1_346); \
-  __ret_346 = __builtin_shufflevector(__ret_346, __ret_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_346; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u32(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \
-  uint32x4_t __s0_347 = __p0_347; \
-  uint32x2_t __s2_347 = __p2_347; \
-  uint32x4_t __ret_347; \
-  __ret_347 = vsetq_lane_u32(vget_lane_u32(__s2_347, __p3_347), __s0_347, __p1_347); \
-  __ret_347; \
-})
-#else
-#define vcopyq_lane_u32(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \
-  uint32x4_t __s0_348 = __p0_348; \
-  uint32x2_t __s2_348 = __p2_348; \
-  uint32x4_t __rev0_348;  __rev0_348 = __builtin_shufflevector(__s0_348, __s0_348, 3, 2, 1, 0); \
-  uint32x2_t __rev2_348;  __rev2_348 = __builtin_shufflevector(__s2_348, __s2_348, 1, 0); \
-  uint32x4_t __ret_348; \
-  __ret_348 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_348, __p3_348), __rev0_348, __p1_348); \
-  __ret_348 = __builtin_shufflevector(__ret_348, __ret_348, 3, 2, 1, 0); \
-  __ret_348; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u64(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \
-  uint64x2_t __s0_349 = __p0_349; \
-  uint64x1_t __s2_349 = __p2_349; \
-  uint64x2_t __ret_349; \
-  __ret_349 = vsetq_lane_u64(vget_lane_u64(__s2_349, __p3_349), __s0_349, __p1_349); \
-  __ret_349; \
-})
-#else
-#define vcopyq_lane_u64(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \
-  uint64x2_t __s0_350 = __p0_350; \
-  uint64x1_t __s2_350 = __p2_350; \
-  uint64x2_t __rev0_350;  __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 1, 0); \
-  uint64x2_t __ret_350; \
-  __ret_350 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_350, __p3_350), __rev0_350, __p1_350); \
-  __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 1, 0); \
-  __ret_350; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u16(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \
-  uint16x8_t __s0_351 = __p0_351; \
-  uint16x4_t __s2_351 = __p2_351; \
-  uint16x8_t __ret_351; \
-  __ret_351 = vsetq_lane_u16(vget_lane_u16(__s2_351, __p3_351), __s0_351, __p1_351); \
-  __ret_351; \
-})
-#else
-#define vcopyq_lane_u16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \
-  uint16x8_t __s0_352 = __p0_352; \
-  uint16x4_t __s2_352 = __p2_352; \
-  uint16x8_t __rev0_352;  __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_352;  __rev2_352 = __builtin_shufflevector(__s2_352, __s2_352, 3, 2, 1, 0); \
-  uint16x8_t __ret_352; \
-  __ret_352 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_352, __p3_352), __rev0_352, __p1_352); \
-  __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_352; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s8(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \
-  int8x16_t __s0_353 = __p0_353; \
-  int8x8_t __s2_353 = __p2_353; \
-  int8x16_t __ret_353; \
-  __ret_353 = vsetq_lane_s8(vget_lane_s8(__s2_353, __p3_353), __s0_353, __p1_353); \
-  __ret_353; \
-})
-#else
-#define vcopyq_lane_s8(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \
-  int8x16_t __s0_354 = __p0_354; \
-  int8x8_t __s2_354 = __p2_354; \
-  int8x16_t __rev0_354;  __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_354;  __rev2_354 = __builtin_shufflevector(__s2_354, __s2_354, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_354; \
-  __ret_354 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_354, __p3_354), __rev0_354, __p1_354); \
-  __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_354; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f32(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \
-  float32x4_t __s0_355 = __p0_355; \
-  float32x2_t __s2_355 = __p2_355; \
-  float32x4_t __ret_355; \
-  __ret_355 = vsetq_lane_f32(vget_lane_f32(__s2_355, __p3_355), __s0_355, __p1_355); \
-  __ret_355; \
-})
-#else
-#define vcopyq_lane_f32(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \
-  float32x4_t __s0_356 = __p0_356; \
-  float32x2_t __s2_356 = __p2_356; \
-  float32x4_t __rev0_356;  __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 3, 2, 1, 0); \
-  float32x2_t __rev2_356;  __rev2_356 = __builtin_shufflevector(__s2_356, __s2_356, 1, 0); \
-  float32x4_t __ret_356; \
-  __ret_356 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_356, __p3_356), __rev0_356, __p1_356); \
-  __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 3, 2, 1, 0); \
-  __ret_356; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s32(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \
-  int32x4_t __s0_357 = __p0_357; \
-  int32x2_t __s2_357 = __p2_357; \
-  int32x4_t __ret_357; \
-  __ret_357 = vsetq_lane_s32(vget_lane_s32(__s2_357, __p3_357), __s0_357, __p1_357); \
-  __ret_357; \
-})
-#else
-#define vcopyq_lane_s32(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \
-  int32x4_t __s0_358 = __p0_358; \
-  int32x2_t __s2_358 = __p2_358; \
-  int32x4_t __rev0_358;  __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 3, 2, 1, 0); \
-  int32x2_t __rev2_358;  __rev2_358 = __builtin_shufflevector(__s2_358, __s2_358, 1, 0); \
-  int32x4_t __ret_358; \
-  __ret_358 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_358, __p3_358), __rev0_358, __p1_358); \
-  __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 3, 2, 1, 0); \
-  __ret_358; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s64(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \
-  int64x2_t __s0_359 = __p0_359; \
-  int64x1_t __s2_359 = __p2_359; \
-  int64x2_t __ret_359; \
-  __ret_359 = vsetq_lane_s64(vget_lane_s64(__s2_359, __p3_359), __s0_359, __p1_359); \
-  __ret_359; \
-})
-#else
-#define vcopyq_lane_s64(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \
-  int64x2_t __s0_360 = __p0_360; \
-  int64x1_t __s2_360 = __p2_360; \
-  int64x2_t __rev0_360;  __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 1, 0); \
-  int64x2_t __ret_360; \
-  __ret_360 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_360, __p3_360), __rev0_360, __p1_360); \
-  __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 1, 0); \
-  __ret_360; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s16(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \
-  int16x8_t __s0_361 = __p0_361; \
-  int16x4_t __s2_361 = __p2_361; \
-  int16x8_t __ret_361; \
-  __ret_361 = vsetq_lane_s16(vget_lane_s16(__s2_361, __p3_361), __s0_361, __p1_361); \
-  __ret_361; \
-})
-#else
-#define vcopyq_lane_s16(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \
-  int16x8_t __s0_362 = __p0_362; \
-  int16x4_t __s2_362 = __p2_362; \
-  int16x8_t __rev0_362;  __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_362;  __rev2_362 = __builtin_shufflevector(__s2_362, __s2_362, 3, 2, 1, 0); \
-  int16x8_t __ret_362; \
-  __ret_362 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_362, __p3_362), __rev0_362, __p1_362); \
-  __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_362; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p8(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \
-  poly8x8_t __s0_363 = __p0_363; \
-  poly8x8_t __s2_363 = __p2_363; \
-  poly8x8_t __ret_363; \
-  __ret_363 = vset_lane_p8(vget_lane_p8(__s2_363, __p3_363), __s0_363, __p1_363); \
-  __ret_363; \
-})
-#else
-#define vcopy_lane_p8(__p0_364, __p1_364, __p2_364, __p3_364) __extension__ ({ \
-  poly8x8_t __s0_364 = __p0_364; \
-  poly8x8_t __s2_364 = __p2_364; \
-  poly8x8_t __rev0_364;  __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_364;  __rev2_364 = __builtin_shufflevector(__s2_364, __s2_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_364; \
-  __ret_364 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_364, __p3_364), __rev0_364, __p1_364); \
-  __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_364; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p16(__p0_365, __p1_365, __p2_365, __p3_365) __extension__ ({ \
-  poly16x4_t __s0_365 = __p0_365; \
-  poly16x4_t __s2_365 = __p2_365; \
-  poly16x4_t __ret_365; \
-  __ret_365 = vset_lane_p16(vget_lane_p16(__s2_365, __p3_365), __s0_365, __p1_365); \
-  __ret_365; \
-})
-#else
-#define vcopy_lane_p16(__p0_366, __p1_366, __p2_366, __p3_366) __extension__ ({ \
-  poly16x4_t __s0_366 = __p0_366; \
-  poly16x4_t __s2_366 = __p2_366; \
-  poly16x4_t __rev0_366;  __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 3, 2, 1, 0); \
-  poly16x4_t __rev2_366;  __rev2_366 = __builtin_shufflevector(__s2_366, __s2_366, 3, 2, 1, 0); \
-  poly16x4_t __ret_366; \
-  __ret_366 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_366, __p3_366), __rev0_366, __p1_366); \
-  __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 3, 2, 1, 0); \
-  __ret_366; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u8(__p0_367, __p1_367, __p2_367, __p3_367) __extension__ ({ \
-  uint8x8_t __s0_367 = __p0_367; \
-  uint8x8_t __s2_367 = __p2_367; \
-  uint8x8_t __ret_367; \
-  __ret_367 = vset_lane_u8(vget_lane_u8(__s2_367, __p3_367), __s0_367, __p1_367); \
-  __ret_367; \
-})
-#else
-#define vcopy_lane_u8(__p0_368, __p1_368, __p2_368, __p3_368) __extension__ ({ \
-  uint8x8_t __s0_368 = __p0_368; \
-  uint8x8_t __s2_368 = __p2_368; \
-  uint8x8_t __rev0_368;  __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_368;  __rev2_368 = __builtin_shufflevector(__s2_368, __s2_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_368; \
-  __ret_368 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_368, __p3_368), __rev0_368, __p1_368); \
-  __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_368; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u32(__p0_369, __p1_369, __p2_369, __p3_369) __extension__ ({ \
-  uint32x2_t __s0_369 = __p0_369; \
-  uint32x2_t __s2_369 = __p2_369; \
-  uint32x2_t __ret_369; \
-  __ret_369 = vset_lane_u32(vget_lane_u32(__s2_369, __p3_369), __s0_369, __p1_369); \
-  __ret_369; \
-})
-#else
-#define vcopy_lane_u32(__p0_370, __p1_370, __p2_370, __p3_370) __extension__ ({ \
-  uint32x2_t __s0_370 = __p0_370; \
-  uint32x2_t __s2_370 = __p2_370; \
-  uint32x2_t __rev0_370;  __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 1, 0); \
-  uint32x2_t __rev2_370;  __rev2_370 = __builtin_shufflevector(__s2_370, __s2_370, 1, 0); \
-  uint32x2_t __ret_370; \
-  __ret_370 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_370, __p3_370), __rev0_370, __p1_370); \
-  __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 1, 0); \
-  __ret_370; \
-})
-#endif
-
-#define vcopy_lane_u64(__p0_371, __p1_371, __p2_371, __p3_371) __extension__ ({ \
-  uint64x1_t __s0_371 = __p0_371; \
-  uint64x1_t __s2_371 = __p2_371; \
-  uint64x1_t __ret_371; \
-  __ret_371 = vset_lane_u64(vget_lane_u64(__s2_371, __p3_371), __s0_371, __p1_371); \
-  __ret_371; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u16(__p0_372, __p1_372, __p2_372, __p3_372) __extension__ ({ \
-  uint16x4_t __s0_372 = __p0_372; \
-  uint16x4_t __s2_372 = __p2_372; \
-  uint16x4_t __ret_372; \
-  __ret_372 = vset_lane_u16(vget_lane_u16(__s2_372, __p3_372), __s0_372, __p1_372); \
-  __ret_372; \
-})
-#else
-#define vcopy_lane_u16(__p0_373, __p1_373, __p2_373, __p3_373) __extension__ ({ \
-  uint16x4_t __s0_373 = __p0_373; \
-  uint16x4_t __s2_373 = __p2_373; \
-  uint16x4_t __rev0_373;  __rev0_373 = __builtin_shufflevector(__s0_373, __s0_373, 3, 2, 1, 0); \
-  uint16x4_t __rev2_373;  __rev2_373 = __builtin_shufflevector(__s2_373, __s2_373, 3, 2, 1, 0); \
-  uint16x4_t __ret_373; \
-  __ret_373 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_373, __p3_373), __rev0_373, __p1_373); \
-  __ret_373 = __builtin_shufflevector(__ret_373, __ret_373, 3, 2, 1, 0); \
-  __ret_373; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s8(__p0_374, __p1_374, __p2_374, __p3_374) __extension__ ({ \
-  int8x8_t __s0_374 = __p0_374; \
-  int8x8_t __s2_374 = __p2_374; \
-  int8x8_t __ret_374; \
-  __ret_374 = vset_lane_s8(vget_lane_s8(__s2_374, __p3_374), __s0_374, __p1_374); \
-  __ret_374; \
-})
-#else
-#define vcopy_lane_s8(__p0_375, __p1_375, __p2_375, __p3_375) __extension__ ({ \
-  int8x8_t __s0_375 = __p0_375; \
-  int8x8_t __s2_375 = __p2_375; \
-  int8x8_t __rev0_375;  __rev0_375 = __builtin_shufflevector(__s0_375, __s0_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_375;  __rev2_375 = __builtin_shufflevector(__s2_375, __s2_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_375; \
-  __ret_375 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_375, __p3_375), __rev0_375, __p1_375); \
-  __ret_375 = __builtin_shufflevector(__ret_375, __ret_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_375; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_f32(__p0_376, __p1_376, __p2_376, __p3_376) __extension__ ({ \
-  float32x2_t __s0_376 = __p0_376; \
-  float32x2_t __s2_376 = __p2_376; \
-  float32x2_t __ret_376; \
-  __ret_376 = vset_lane_f32(vget_lane_f32(__s2_376, __p3_376), __s0_376, __p1_376); \
-  __ret_376; \
-})
-#else
-#define vcopy_lane_f32(__p0_377, __p1_377, __p2_377, __p3_377) __extension__ ({ \
-  float32x2_t __s0_377 = __p0_377; \
-  float32x2_t __s2_377 = __p2_377; \
-  float32x2_t __rev0_377;  __rev0_377 = __builtin_shufflevector(__s0_377, __s0_377, 1, 0); \
-  float32x2_t __rev2_377;  __rev2_377 = __builtin_shufflevector(__s2_377, __s2_377, 1, 0); \
-  float32x2_t __ret_377; \
-  __ret_377 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_377, __p3_377), __rev0_377, __p1_377); \
-  __ret_377 = __builtin_shufflevector(__ret_377, __ret_377, 1, 0); \
-  __ret_377; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s32(__p0_378, __p1_378, __p2_378, __p3_378) __extension__ ({ \
-  int32x2_t __s0_378 = __p0_378; \
-  int32x2_t __s2_378 = __p2_378; \
-  int32x2_t __ret_378; \
-  __ret_378 = vset_lane_s32(vget_lane_s32(__s2_378, __p3_378), __s0_378, __p1_378); \
-  __ret_378; \
-})
-#else
-#define vcopy_lane_s32(__p0_379, __p1_379, __p2_379, __p3_379) __extension__ ({ \
-  int32x2_t __s0_379 = __p0_379; \
-  int32x2_t __s2_379 = __p2_379; \
-  int32x2_t __rev0_379;  __rev0_379 = __builtin_shufflevector(__s0_379, __s0_379, 1, 0); \
-  int32x2_t __rev2_379;  __rev2_379 = __builtin_shufflevector(__s2_379, __s2_379, 1, 0); \
-  int32x2_t __ret_379; \
-  __ret_379 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_379, __p3_379), __rev0_379, __p1_379); \
-  __ret_379 = __builtin_shufflevector(__ret_379, __ret_379, 1, 0); \
-  __ret_379; \
-})
-#endif
-
-#define vcopy_lane_s64(__p0_380, __p1_380, __p2_380, __p3_380) __extension__ ({ \
-  int64x1_t __s0_380 = __p0_380; \
-  int64x1_t __s2_380 = __p2_380; \
-  int64x1_t __ret_380; \
-  __ret_380 = vset_lane_s64(vget_lane_s64(__s2_380, __p3_380), __s0_380, __p1_380); \
-  __ret_380; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s16(__p0_381, __p1_381, __p2_381, __p3_381) __extension__ ({ \
-  int16x4_t __s0_381 = __p0_381; \
-  int16x4_t __s2_381 = __p2_381; \
-  int16x4_t __ret_381; \
-  __ret_381 = vset_lane_s16(vget_lane_s16(__s2_381, __p3_381), __s0_381, __p1_381); \
-  __ret_381; \
-})
-#else
-#define vcopy_lane_s16(__p0_382, __p1_382, __p2_382, __p3_382) __extension__ ({ \
-  int16x4_t __s0_382 = __p0_382; \
-  int16x4_t __s2_382 = __p2_382; \
-  int16x4_t __rev0_382;  __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \
-  int16x4_t __rev2_382;  __rev2_382 = __builtin_shufflevector(__s2_382, __s2_382, 3, 2, 1, 0); \
-  int16x4_t __ret_382; \
-  __ret_382 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_382, __p3_382), __rev0_382, __p1_382); \
-  __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 3, 2, 1, 0); \
-  __ret_382; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p8(__p0_383, __p1_383, __p2_383, __p3_383) __extension__ ({ \
-  poly8x16_t __s0_383 = __p0_383; \
-  poly8x16_t __s2_383 = __p2_383; \
-  poly8x16_t __ret_383; \
-  __ret_383 = vsetq_lane_p8(vgetq_lane_p8(__s2_383, __p3_383), __s0_383, __p1_383); \
-  __ret_383; \
-})
-#else
-#define vcopyq_laneq_p8(__p0_384, __p1_384, __p2_384, __p3_384) __extension__ ({ \
-  poly8x16_t __s0_384 = __p0_384; \
-  poly8x16_t __s2_384 = __p2_384; \
-  poly8x16_t __rev0_384;  __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_384;  __rev2_384 = __builtin_shufflevector(__s2_384, __s2_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_384; \
-  __ret_384 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_384, __p3_384), __rev0_384, __p1_384); \
-  __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_384; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p16(__p0_385, __p1_385, __p2_385, __p3_385) __extension__ ({ \
-  poly16x8_t __s0_385 = __p0_385; \
-  poly16x8_t __s2_385 = __p2_385; \
-  poly16x8_t __ret_385; \
-  __ret_385 = vsetq_lane_p16(vgetq_lane_p16(__s2_385, __p3_385), __s0_385, __p1_385); \
-  __ret_385; \
-})
-#else
-#define vcopyq_laneq_p16(__p0_386, __p1_386, __p2_386, __p3_386) __extension__ ({ \
-  poly16x8_t __s0_386 = __p0_386; \
-  poly16x8_t __s2_386 = __p2_386; \
-  poly16x8_t __rev0_386;  __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev2_386;  __rev2_386 = __builtin_shufflevector(__s2_386, __s2_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_386; \
-  __ret_386 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_386, __p3_386), __rev0_386, __p1_386); \
-  __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_386; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u8(__p0_387, __p1_387, __p2_387, __p3_387) __extension__ ({ \
-  uint8x16_t __s0_387 = __p0_387; \
-  uint8x16_t __s2_387 = __p2_387; \
-  uint8x16_t __ret_387; \
-  __ret_387 = vsetq_lane_u8(vgetq_lane_u8(__s2_387, __p3_387), __s0_387, __p1_387); \
-  __ret_387; \
-})
-#else
-#define vcopyq_laneq_u8(__p0_388, __p1_388, __p2_388, __p3_388) __extension__ ({ \
-  uint8x16_t __s0_388 = __p0_388; \
-  uint8x16_t __s2_388 = __p2_388; \
-  uint8x16_t __rev0_388;  __rev0_388 = __builtin_shufflevector(__s0_388, __s0_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_388;  __rev2_388 = __builtin_shufflevector(__s2_388, __s2_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_388; \
-  __ret_388 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_388, __p3_388), __rev0_388, __p1_388); \
-  __ret_388 = __builtin_shufflevector(__ret_388, __ret_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_388; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u32(__p0_389, __p1_389, __p2_389, __p3_389) __extension__ ({ \
-  uint32x4_t __s0_389 = __p0_389; \
-  uint32x4_t __s2_389 = __p2_389; \
-  uint32x4_t __ret_389; \
-  __ret_389 = vsetq_lane_u32(vgetq_lane_u32(__s2_389, __p3_389), __s0_389, __p1_389); \
-  __ret_389; \
-})
-#else
-#define vcopyq_laneq_u32(__p0_390, __p1_390, __p2_390, __p3_390) __extension__ ({ \
-  uint32x4_t __s0_390 = __p0_390; \
-  uint32x4_t __s2_390 = __p2_390; \
-  uint32x4_t __rev0_390;  __rev0_390 = __builtin_shufflevector(__s0_390, __s0_390, 3, 2, 1, 0); \
-  uint32x4_t __rev2_390;  __rev2_390 = __builtin_shufflevector(__s2_390, __s2_390, 3, 2, 1, 0); \
-  uint32x4_t __ret_390; \
-  __ret_390 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_390, __p3_390), __rev0_390, __p1_390); \
-  __ret_390 = __builtin_shufflevector(__ret_390, __ret_390, 3, 2, 1, 0); \
-  __ret_390; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u64(__p0_391, __p1_391, __p2_391, __p3_391) __extension__ ({ \
-  uint64x2_t __s0_391 = __p0_391; \
-  uint64x2_t __s2_391 = __p2_391; \
-  uint64x2_t __ret_391; \
-  __ret_391 = vsetq_lane_u64(vgetq_lane_u64(__s2_391, __p3_391), __s0_391, __p1_391); \
-  __ret_391; \
-})
-#else
-#define vcopyq_laneq_u64(__p0_392, __p1_392, __p2_392, __p3_392) __extension__ ({ \
-  uint64x2_t __s0_392 = __p0_392; \
-  uint64x2_t __s2_392 = __p2_392; \
-  uint64x2_t __rev0_392;  __rev0_392 = __builtin_shufflevector(__s0_392, __s0_392, 1, 0); \
-  uint64x2_t __rev2_392;  __rev2_392 = __builtin_shufflevector(__s2_392, __s2_392, 1, 0); \
-  uint64x2_t __ret_392; \
-  __ret_392 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_392, __p3_392), __rev0_392, __p1_392); \
-  __ret_392 = __builtin_shufflevector(__ret_392, __ret_392, 1, 0); \
-  __ret_392; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u16(__p0_393, __p1_393, __p2_393, __p3_393) __extension__ ({ \
-  uint16x8_t __s0_393 = __p0_393; \
-  uint16x8_t __s2_393 = __p2_393; \
-  uint16x8_t __ret_393; \
-  __ret_393 = vsetq_lane_u16(vgetq_lane_u16(__s2_393, __p3_393), __s0_393, __p1_393); \
-  __ret_393; \
-})
-#else
-#define vcopyq_laneq_u16(__p0_394, __p1_394, __p2_394, __p3_394) __extension__ ({ \
-  uint16x8_t __s0_394 = __p0_394; \
-  uint16x8_t __s2_394 = __p2_394; \
-  uint16x8_t __rev0_394;  __rev0_394 = __builtin_shufflevector(__s0_394, __s0_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_394;  __rev2_394 = __builtin_shufflevector(__s2_394, __s2_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_394; \
-  __ret_394 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_394, __p3_394), __rev0_394, __p1_394); \
-  __ret_394 = __builtin_shufflevector(__ret_394, __ret_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_394; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s8(__p0_395, __p1_395, __p2_395, __p3_395) __extension__ ({ \
-  int8x16_t __s0_395 = __p0_395; \
-  int8x16_t __s2_395 = __p2_395; \
-  int8x16_t __ret_395; \
-  __ret_395 = vsetq_lane_s8(vgetq_lane_s8(__s2_395, __p3_395), __s0_395, __p1_395); \
-  __ret_395; \
-})
-#else
-#define vcopyq_laneq_s8(__p0_396, __p1_396, __p2_396, __p3_396) __extension__ ({ \
-  int8x16_t __s0_396 = __p0_396; \
-  int8x16_t __s2_396 = __p2_396; \
-  int8x16_t __rev0_396;  __rev0_396 = __builtin_shufflevector(__s0_396, __s0_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_396;  __rev2_396 = __builtin_shufflevector(__s2_396, __s2_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_396; \
-  __ret_396 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_396, __p3_396), __rev0_396, __p1_396); \
-  __ret_396 = __builtin_shufflevector(__ret_396, __ret_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_396; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f32(__p0_397, __p1_397, __p2_397, __p3_397) __extension__ ({ \
-  float32x4_t __s0_397 = __p0_397; \
-  float32x4_t __s2_397 = __p2_397; \
-  float32x4_t __ret_397; \
-  __ret_397 = vsetq_lane_f32(vgetq_lane_f32(__s2_397, __p3_397), __s0_397, __p1_397); \
-  __ret_397; \
-})
-#else
-#define vcopyq_laneq_f32(__p0_398, __p1_398, __p2_398, __p3_398) __extension__ ({ \
-  float32x4_t __s0_398 = __p0_398; \
-  float32x4_t __s2_398 = __p2_398; \
-  float32x4_t __rev0_398;  __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 3, 2, 1, 0); \
-  float32x4_t __rev2_398;  __rev2_398 = __builtin_shufflevector(__s2_398, __s2_398, 3, 2, 1, 0); \
-  float32x4_t __ret_398; \
-  __ret_398 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_398, __p3_398), __rev0_398, __p1_398); \
-  __ret_398 = __builtin_shufflevector(__ret_398, __ret_398, 3, 2, 1, 0); \
-  __ret_398; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s32(__p0_399, __p1_399, __p2_399, __p3_399) __extension__ ({ \
-  int32x4_t __s0_399 = __p0_399; \
-  int32x4_t __s2_399 = __p2_399; \
-  int32x4_t __ret_399; \
-  __ret_399 = vsetq_lane_s32(vgetq_lane_s32(__s2_399, __p3_399), __s0_399, __p1_399); \
-  __ret_399; \
-})
-#else
-#define vcopyq_laneq_s32(__p0_400, __p1_400, __p2_400, __p3_400) __extension__ ({ \
-  int32x4_t __s0_400 = __p0_400; \
-  int32x4_t __s2_400 = __p2_400; \
-  int32x4_t __rev0_400;  __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 3, 2, 1, 0); \
-  int32x4_t __rev2_400;  __rev2_400 = __builtin_shufflevector(__s2_400, __s2_400, 3, 2, 1, 0); \
-  int32x4_t __ret_400; \
-  __ret_400 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_400, __p3_400), __rev0_400, __p1_400); \
-  __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 3, 2, 1, 0); \
-  __ret_400; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s64(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \
-  int64x2_t __s0_401 = __p0_401; \
-  int64x2_t __s2_401 = __p2_401; \
-  int64x2_t __ret_401; \
-  __ret_401 = vsetq_lane_s64(vgetq_lane_s64(__s2_401, __p3_401), __s0_401, __p1_401); \
-  __ret_401; \
-})
-#else
-#define vcopyq_laneq_s64(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \
-  int64x2_t __s0_402 = __p0_402; \
-  int64x2_t __s2_402 = __p2_402; \
-  int64x2_t __rev0_402;  __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 1, 0); \
-  int64x2_t __rev2_402;  __rev2_402 = __builtin_shufflevector(__s2_402, __s2_402, 1, 0); \
-  int64x2_t __ret_402; \
-  __ret_402 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_402, __p3_402), __rev0_402, __p1_402); \
-  __ret_402 = __builtin_shufflevector(__ret_402, __ret_402, 1, 0); \
-  __ret_402; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s16(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \
-  int16x8_t __s0_403 = __p0_403; \
-  int16x8_t __s2_403 = __p2_403; \
-  int16x8_t __ret_403; \
-  __ret_403 = vsetq_lane_s16(vgetq_lane_s16(__s2_403, __p3_403), __s0_403, __p1_403); \
-  __ret_403; \
-})
-#else
-#define vcopyq_laneq_s16(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \
-  int16x8_t __s0_404 = __p0_404; \
-  int16x8_t __s2_404 = __p2_404; \
-  int16x8_t __rev0_404;  __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_404;  __rev2_404 = __builtin_shufflevector(__s2_404, __s2_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_404; \
-  __ret_404 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_404, __p3_404), __rev0_404, __p1_404); \
-  __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_404; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p8(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \
-  poly8x8_t __s0_405 = __p0_405; \
-  poly8x16_t __s2_405 = __p2_405; \
-  poly8x8_t __ret_405; \
-  __ret_405 = vset_lane_p8(vgetq_lane_p8(__s2_405, __p3_405), __s0_405, __p1_405); \
-  __ret_405; \
-})
-#else
-#define vcopy_laneq_p8(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \
-  poly8x8_t __s0_406 = __p0_406; \
-  poly8x16_t __s2_406 = __p2_406; \
-  poly8x8_t __rev0_406;  __rev0_406 = __builtin_shufflevector(__s0_406, __s0_406, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_406;  __rev2_406 = __builtin_shufflevector(__s2_406, __s2_406, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_406; \
-  __ret_406 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_406, __p3_406), __rev0_406, __p1_406); \
-  __ret_406 = __builtin_shufflevector(__ret_406, __ret_406, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_406; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p16(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \
-  poly16x4_t __s0_407 = __p0_407; \
-  poly16x8_t __s2_407 = __p2_407; \
-  poly16x4_t __ret_407; \
-  __ret_407 = vset_lane_p16(vgetq_lane_p16(__s2_407, __p3_407), __s0_407, __p1_407); \
-  __ret_407; \
-})
-#else
-#define vcopy_laneq_p16(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \
-  poly16x4_t __s0_408 = __p0_408; \
-  poly16x8_t __s2_408 = __p2_408; \
-  poly16x4_t __rev0_408;  __rev0_408 = __builtin_shufflevector(__s0_408, __s0_408, 3, 2, 1, 0); \
-  poly16x8_t __rev2_408;  __rev2_408 = __builtin_shufflevector(__s2_408, __s2_408, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_408; \
-  __ret_408 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_408, __p3_408), __rev0_408, __p1_408); \
-  __ret_408 = __builtin_shufflevector(__ret_408, __ret_408, 3, 2, 1, 0); \
-  __ret_408; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u8(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \
-  uint8x8_t __s0_409 = __p0_409; \
-  uint8x16_t __s2_409 = __p2_409; \
-  uint8x8_t __ret_409; \
-  __ret_409 = vset_lane_u8(vgetq_lane_u8(__s2_409, __p3_409), __s0_409, __p1_409); \
-  __ret_409; \
-})
-#else
-#define vcopy_laneq_u8(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \
-  uint8x8_t __s0_410 = __p0_410; \
-  uint8x16_t __s2_410 = __p2_410; \
-  uint8x8_t __rev0_410;  __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_410;  __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_410; \
-  __ret_410 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_410, __p3_410), __rev0_410, __p1_410); \
-  __ret_410 = __builtin_shufflevector(__ret_410, __ret_410, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_410; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u32(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \
-  uint32x2_t __s0_411 = __p0_411; \
-  uint32x4_t __s2_411 = __p2_411; \
-  uint32x2_t __ret_411; \
-  __ret_411 = vset_lane_u32(vgetq_lane_u32(__s2_411, __p3_411), __s0_411, __p1_411); \
-  __ret_411; \
-})
-#else
-#define vcopy_laneq_u32(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \
-  uint32x2_t __s0_412 = __p0_412; \
-  uint32x4_t __s2_412 = __p2_412; \
-  uint32x2_t __rev0_412;  __rev0_412 = __builtin_shufflevector(__s0_412, __s0_412, 1, 0); \
-  uint32x4_t __rev2_412;  __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 3, 2, 1, 0); \
-  uint32x2_t __ret_412; \
-  __ret_412 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_412, __p3_412), __rev0_412, __p1_412); \
-  __ret_412 = __builtin_shufflevector(__ret_412, __ret_412, 1, 0); \
-  __ret_412; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u64(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \
-  uint64x1_t __s0_413 = __p0_413; \
-  uint64x2_t __s2_413 = __p2_413; \
-  uint64x1_t __ret_413; \
-  __ret_413 = vset_lane_u64(vgetq_lane_u64(__s2_413, __p3_413), __s0_413, __p1_413); \
-  __ret_413; \
-})
-#else
-#define vcopy_laneq_u64(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \
-  uint64x1_t __s0_414 = __p0_414; \
-  uint64x2_t __s2_414 = __p2_414; \
-  uint64x2_t __rev2_414;  __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 1, 0); \
-  uint64x1_t __ret_414; \
-  __ret_414 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_414, __p3_414), __s0_414, __p1_414); \
-  __ret_414; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u16(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \
-  uint16x4_t __s0_415 = __p0_415; \
-  uint16x8_t __s2_415 = __p2_415; \
-  uint16x4_t __ret_415; \
-  __ret_415 = vset_lane_u16(vgetq_lane_u16(__s2_415, __p3_415), __s0_415, __p1_415); \
-  __ret_415; \
-})
-#else
-#define vcopy_laneq_u16(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \
-  uint16x4_t __s0_416 = __p0_416; \
-  uint16x8_t __s2_416 = __p2_416; \
-  uint16x4_t __rev0_416;  __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 3, 2, 1, 0); \
-  uint16x8_t __rev2_416;  __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_416; \
-  __ret_416 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_416, __p3_416), __rev0_416, __p1_416); \
-  __ret_416 = __builtin_shufflevector(__ret_416, __ret_416, 3, 2, 1, 0); \
-  __ret_416; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s8(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \
-  int8x8_t __s0_417 = __p0_417; \
-  int8x16_t __s2_417 = __p2_417; \
-  int8x8_t __ret_417; \
-  __ret_417 = vset_lane_s8(vgetq_lane_s8(__s2_417, __p3_417), __s0_417, __p1_417); \
-  __ret_417; \
-})
-#else
-#define vcopy_laneq_s8(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \
-  int8x8_t __s0_418 = __p0_418; \
-  int8x16_t __s2_418 = __p2_418; \
-  int8x8_t __rev0_418;  __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_418;  __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_418; \
-  __ret_418 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_418, __p3_418), __rev0_418, __p1_418); \
-  __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_418; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f32(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \
-  float32x2_t __s0_419 = __p0_419; \
-  float32x4_t __s2_419 = __p2_419; \
-  float32x2_t __ret_419; \
-  __ret_419 = vset_lane_f32(vgetq_lane_f32(__s2_419, __p3_419), __s0_419, __p1_419); \
-  __ret_419; \
-})
-#else
-#define vcopy_laneq_f32(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \
-  float32x2_t __s0_420 = __p0_420; \
-  float32x4_t __s2_420 = __p2_420; \
-  float32x2_t __rev0_420;  __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 1, 0); \
-  float32x4_t __rev2_420;  __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 3, 2, 1, 0); \
-  float32x2_t __ret_420; \
-  __ret_420 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_420, __p3_420), __rev0_420, __p1_420); \
-  __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 1, 0); \
-  __ret_420; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s32(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \
-  int32x2_t __s0_421 = __p0_421; \
-  int32x4_t __s2_421 = __p2_421; \
-  int32x2_t __ret_421; \
-  __ret_421 = vset_lane_s32(vgetq_lane_s32(__s2_421, __p3_421), __s0_421, __p1_421); \
-  __ret_421; \
-})
-#else
-#define vcopy_laneq_s32(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \
-  int32x2_t __s0_422 = __p0_422; \
-  int32x4_t __s2_422 = __p2_422; \
-  int32x2_t __rev0_422;  __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 1, 0); \
-  int32x4_t __rev2_422;  __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 3, 2, 1, 0); \
-  int32x2_t __ret_422; \
-  __ret_422 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_422, __p3_422), __rev0_422, __p1_422); \
-  __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 1, 0); \
-  __ret_422; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s64(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \
-  int64x1_t __s0_423 = __p0_423; \
-  int64x2_t __s2_423 = __p2_423; \
-  int64x1_t __ret_423; \
-  __ret_423 = vset_lane_s64(vgetq_lane_s64(__s2_423, __p3_423), __s0_423, __p1_423); \
-  __ret_423; \
-})
-#else
-#define vcopy_laneq_s64(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \
-  int64x1_t __s0_424 = __p0_424; \
-  int64x2_t __s2_424 = __p2_424; \
-  int64x2_t __rev2_424;  __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 1, 0); \
-  int64x1_t __ret_424; \
-  __ret_424 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_424, __p3_424), __s0_424, __p1_424); \
-  __ret_424; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s16(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \
-  int16x4_t __s0_425 = __p0_425; \
-  int16x8_t __s2_425 = __p2_425; \
-  int16x4_t __ret_425; \
-  __ret_425 = vset_lane_s16(vgetq_lane_s16(__s2_425, __p3_425), __s0_425, __p1_425); \
-  __ret_425; \
-})
-#else
-#define vcopy_laneq_s16(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \
-  int16x4_t __s0_426 = __p0_426; \
-  int16x8_t __s2_426 = __p2_426; \
-  int16x4_t __rev0_426;  __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 3, 2, 1, 0); \
-  int16x8_t __rev2_426;  __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_426; \
-  __ret_426 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_426, __p3_426), __rev0_426, __p1_426); \
-  __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 3, 2, 1, 0); \
-  __ret_426; \
-})
-#endif
-
-#define vcreate_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (poly64x1_t)(__promote); \
-  __ret; \
-})
-#define vcreate_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (float64x1_t)(__promote); \
-  __ret; \
-})
-__ai float32_t vcvts_f32_s32(int32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
-  return __ret;
-}
-__ai float32_t vcvts_f32_u32(uint32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
-  return __ret;
-}
-#endif
-
-__ai float64_t vcvtd_f64_s64(int64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
-  return __ret;
-}
-__ai float64_t vcvtd_f64_u64(uint64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
-  float16x8_t __ret;
-  __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1));
-  return __ret;
-}
-#else
-__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = vcvt_f32_f16(vget_high_f16(__p0));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x4_t __ret;
-  __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = vcvt_f64_f32(vget_high_f32(__p0));
-  return __ret;
-}
-#else
-__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
-  __ret; \
-})
-#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
-  __ret; \
-})
-__ai int32_t vcvts_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai uint32_t vcvts_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai int32_t vcvtas_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtad_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtas_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtad_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtms_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtmd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtms_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtns_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtnd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtns_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtps_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtpd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtps_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
-  return __ret;
-}
-__ai float32_t vcvtxd_f32_f64(float64_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x4_t __ret;
-  __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdup_lane_p64(__p0_427, __p1_427) __extension__ ({ \
-  poly64x1_t __s0_427 = __p0_427; \
-  poly64x1_t __ret_427; \
-  __ret_427 = splat_lane_p64(__s0_427, __p1_427); \
-  __ret_427; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p64(__p0_428, __p1_428) __extension__ ({ \
-  poly64x1_t __s0_428 = __p0_428; \
-  poly64x2_t __ret_428; \
-  __ret_428 = splatq_lane_p64(__s0_428, __p1_428); \
-  __ret_428; \
-})
-#else
-#define vdupq_lane_p64(__p0_429, __p1_429) __extension__ ({ \
-  poly64x1_t __s0_429 = __p0_429; \
-  poly64x2_t __ret_429; \
-  __ret_429 = __noswap_splatq_lane_p64(__s0_429, __p1_429); \
-  __ret_429 = __builtin_shufflevector(__ret_429, __ret_429, 1, 0); \
-  __ret_429; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f64(__p0_430, __p1_430) __extension__ ({ \
-  float64x1_t __s0_430 = __p0_430; \
-  float64x2_t __ret_430; \
-  __ret_430 = splatq_lane_f64(__s0_430, __p1_430); \
-  __ret_430; \
-})
-#else
-#define vdupq_lane_f64(__p0_431, __p1_431) __extension__ ({ \
-  float64x1_t __s0_431 = __p0_431; \
-  float64x2_t __ret_431; \
-  __ret_431 = __noswap_splatq_lane_f64(__s0_431, __p1_431); \
-  __ret_431 = __builtin_shufflevector(__ret_431, __ret_431, 1, 0); \
-  __ret_431; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0_432, __p1_432) __extension__ ({ \
-  float16x4_t __s0_432 = __p0_432; \
-  float16x8_t __ret_432; \
-  __ret_432 = splatq_lane_f16(__s0_432, __p1_432); \
-  __ret_432; \
-})
-#else
-#define vdupq_lane_f16(__p0_433, __p1_433) __extension__ ({ \
-  float16x4_t __s0_433 = __p0_433; \
-  float16x4_t __rev0_433;  __rev0_433 = __builtin_shufflevector(__s0_433, __s0_433, 3, 2, 1, 0); \
-  float16x8_t __ret_433; \
-  __ret_433 = __noswap_splatq_lane_f16(__rev0_433, __p1_433); \
-  __ret_433 = __builtin_shufflevector(__ret_433, __ret_433, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_433; \
-})
-#endif
-
-#define vdup_lane_f64(__p0_434, __p1_434) __extension__ ({ \
-  float64x1_t __s0_434 = __p0_434; \
-  float64x1_t __ret_434; \
-  __ret_434 = splat_lane_f64(__s0_434, __p1_434); \
-  __ret_434; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0_435, __p1_435) __extension__ ({ \
-  float16x4_t __s0_435 = __p0_435; \
-  float16x4_t __ret_435; \
-  __ret_435 = splat_lane_f16(__s0_435, __p1_435); \
-  __ret_435; \
-})
-#else
-#define vdup_lane_f16(__p0_436, __p1_436) __extension__ ({ \
-  float16x4_t __s0_436 = __p0_436; \
-  float16x4_t __rev0_436;  __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 3, 2, 1, 0); \
-  float16x4_t __ret_436; \
-  __ret_436 = __noswap_splat_lane_f16(__rev0_436, __p1_436); \
-  __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 3, 2, 1, 0); \
-  __ret_436; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p8(__p0_437, __p1_437) __extension__ ({ \
-  poly8x16_t __s0_437 = __p0_437; \
-  poly8x8_t __ret_437; \
-  __ret_437 = splat_laneq_p8(__s0_437, __p1_437); \
-  __ret_437; \
-})
-#else
-#define vdup_laneq_p8(__p0_438, __p1_438) __extension__ ({ \
-  poly8x16_t __s0_438 = __p0_438; \
-  poly8x16_t __rev0_438;  __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_438; \
-  __ret_438 = __noswap_splat_laneq_p8(__rev0_438, __p1_438); \
-  __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_438; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p64(__p0_439, __p1_439) __extension__ ({ \
-  poly64x2_t __s0_439 = __p0_439; \
-  poly64x1_t __ret_439; \
-  __ret_439 = splat_laneq_p64(__s0_439, __p1_439); \
-  __ret_439; \
-})
-#else
-#define vdup_laneq_p64(__p0_440, __p1_440) __extension__ ({ \
-  poly64x2_t __s0_440 = __p0_440; \
-  poly64x2_t __rev0_440;  __rev0_440 = __builtin_shufflevector(__s0_440, __s0_440, 1, 0); \
-  poly64x1_t __ret_440; \
-  __ret_440 = __noswap_splat_laneq_p64(__rev0_440, __p1_440); \
-  __ret_440; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p16(__p0_441, __p1_441) __extension__ ({ \
-  poly16x8_t __s0_441 = __p0_441; \
-  poly16x4_t __ret_441; \
-  __ret_441 = splat_laneq_p16(__s0_441, __p1_441); \
-  __ret_441; \
-})
-#else
-#define vdup_laneq_p16(__p0_442, __p1_442) __extension__ ({ \
-  poly16x8_t __s0_442 = __p0_442; \
-  poly16x8_t __rev0_442;  __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_442; \
-  __ret_442 = __noswap_splat_laneq_p16(__rev0_442, __p1_442); \
-  __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \
-  __ret_442; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p8(__p0_443, __p1_443) __extension__ ({ \
-  poly8x16_t __s0_443 = __p0_443; \
-  poly8x16_t __ret_443; \
-  __ret_443 = splatq_laneq_p8(__s0_443, __p1_443); \
-  __ret_443; \
-})
-#else
-#define vdupq_laneq_p8(__p0_444, __p1_444) __extension__ ({ \
-  poly8x16_t __s0_444 = __p0_444; \
-  poly8x16_t __rev0_444;  __rev0_444 = __builtin_shufflevector(__s0_444, __s0_444, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_444; \
-  __ret_444 = __noswap_splatq_laneq_p8(__rev0_444, __p1_444); \
-  __ret_444 = __builtin_shufflevector(__ret_444, __ret_444, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_444; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p64(__p0_445, __p1_445) __extension__ ({ \
-  poly64x2_t __s0_445 = __p0_445; \
-  poly64x2_t __ret_445; \
-  __ret_445 = splatq_laneq_p64(__s0_445, __p1_445); \
-  __ret_445; \
-})
-#else
-#define vdupq_laneq_p64(__p0_446, __p1_446) __extension__ ({ \
-  poly64x2_t __s0_446 = __p0_446; \
-  poly64x2_t __rev0_446;  __rev0_446 = __builtin_shufflevector(__s0_446, __s0_446, 1, 0); \
-  poly64x2_t __ret_446; \
-  __ret_446 = __noswap_splatq_laneq_p64(__rev0_446, __p1_446); \
-  __ret_446 = __builtin_shufflevector(__ret_446, __ret_446, 1, 0); \
-  __ret_446; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p16(__p0_447, __p1_447) __extension__ ({ \
-  poly16x8_t __s0_447 = __p0_447; \
-  poly16x8_t __ret_447; \
-  __ret_447 = splatq_laneq_p16(__s0_447, __p1_447); \
-  __ret_447; \
-})
-#else
-#define vdupq_laneq_p16(__p0_448, __p1_448) __extension__ ({ \
-  poly16x8_t __s0_448 = __p0_448; \
-  poly16x8_t __rev0_448;  __rev0_448 = __builtin_shufflevector(__s0_448, __s0_448, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_448; \
-  __ret_448 = __noswap_splatq_laneq_p16(__rev0_448, __p1_448); \
-  __ret_448 = __builtin_shufflevector(__ret_448, __ret_448, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_448; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u8(__p0_449, __p1_449) __extension__ ({ \
-  uint8x16_t __s0_449 = __p0_449; \
-  uint8x16_t __ret_449; \
-  __ret_449 = splatq_laneq_u8(__s0_449, __p1_449); \
-  __ret_449; \
-})
-#else
-#define vdupq_laneq_u8(__p0_450, __p1_450) __extension__ ({ \
-  uint8x16_t __s0_450 = __p0_450; \
-  uint8x16_t __rev0_450;  __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_450; \
-  __ret_450 = __noswap_splatq_laneq_u8(__rev0_450, __p1_450); \
-  __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_450; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u32(__p0_451, __p1_451) __extension__ ({ \
-  uint32x4_t __s0_451 = __p0_451; \
-  uint32x4_t __ret_451; \
-  __ret_451 = splatq_laneq_u32(__s0_451, __p1_451); \
-  __ret_451; \
-})
-#else
-#define vdupq_laneq_u32(__p0_452, __p1_452) __extension__ ({ \
-  uint32x4_t __s0_452 = __p0_452; \
-  uint32x4_t __rev0_452;  __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 3, 2, 1, 0); \
-  uint32x4_t __ret_452; \
-  __ret_452 = __noswap_splatq_laneq_u32(__rev0_452, __p1_452); \
-  __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 3, 2, 1, 0); \
-  __ret_452; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u64(__p0_453, __p1_453) __extension__ ({ \
-  uint64x2_t __s0_453 = __p0_453; \
-  uint64x2_t __ret_453; \
-  __ret_453 = splatq_laneq_u64(__s0_453, __p1_453); \
-  __ret_453; \
-})
-#else
-#define vdupq_laneq_u64(__p0_454, __p1_454) __extension__ ({ \
-  uint64x2_t __s0_454 = __p0_454; \
-  uint64x2_t __rev0_454;  __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 1, 0); \
-  uint64x2_t __ret_454; \
-  __ret_454 = __noswap_splatq_laneq_u64(__rev0_454, __p1_454); \
-  __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 1, 0); \
-  __ret_454; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u16(__p0_455, __p1_455) __extension__ ({ \
-  uint16x8_t __s0_455 = __p0_455; \
-  uint16x8_t __ret_455; \
-  __ret_455 = splatq_laneq_u16(__s0_455, __p1_455); \
-  __ret_455; \
-})
-#else
-#define vdupq_laneq_u16(__p0_456, __p1_456) __extension__ ({ \
-  uint16x8_t __s0_456 = __p0_456; \
-  uint16x8_t __rev0_456;  __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_456; \
-  __ret_456 = __noswap_splatq_laneq_u16(__rev0_456, __p1_456); \
-  __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_456; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s8(__p0_457, __p1_457) __extension__ ({ \
-  int8x16_t __s0_457 = __p0_457; \
-  int8x16_t __ret_457; \
-  __ret_457 = splatq_laneq_s8(__s0_457, __p1_457); \
-  __ret_457; \
-})
-#else
-#define vdupq_laneq_s8(__p0_458, __p1_458) __extension__ ({ \
-  int8x16_t __s0_458 = __p0_458; \
-  int8x16_t __rev0_458;  __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_458; \
-  __ret_458 = __noswap_splatq_laneq_s8(__rev0_458, __p1_458); \
-  __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_458; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f64(__p0_459, __p1_459) __extension__ ({ \
-  float64x2_t __s0_459 = __p0_459; \
-  float64x2_t __ret_459; \
-  __ret_459 = splatq_laneq_f64(__s0_459, __p1_459); \
-  __ret_459; \
-})
-#else
-#define vdupq_laneq_f64(__p0_460, __p1_460) __extension__ ({ \
-  float64x2_t __s0_460 = __p0_460; \
-  float64x2_t __rev0_460;  __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 1, 0); \
-  float64x2_t __ret_460; \
-  __ret_460 = __noswap_splatq_laneq_f64(__rev0_460, __p1_460); \
-  __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 1, 0); \
-  __ret_460; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f32(__p0_461, __p1_461) __extension__ ({ \
-  float32x4_t __s0_461 = __p0_461; \
-  float32x4_t __ret_461; \
-  __ret_461 = splatq_laneq_f32(__s0_461, __p1_461); \
-  __ret_461; \
-})
-#else
-#define vdupq_laneq_f32(__p0_462, __p1_462) __extension__ ({ \
-  float32x4_t __s0_462 = __p0_462; \
-  float32x4_t __rev0_462;  __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 3, 2, 1, 0); \
-  float32x4_t __ret_462; \
-  __ret_462 = __noswap_splatq_laneq_f32(__rev0_462, __p1_462); \
-  __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 3, 2, 1, 0); \
-  __ret_462; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f16(__p0_463, __p1_463) __extension__ ({ \
-  float16x8_t __s0_463 = __p0_463; \
-  float16x8_t __ret_463; \
-  __ret_463 = splatq_laneq_f16(__s0_463, __p1_463); \
-  __ret_463; \
-})
-#else
-#define vdupq_laneq_f16(__p0_464, __p1_464) __extension__ ({ \
-  float16x8_t __s0_464 = __p0_464; \
-  float16x8_t __rev0_464;  __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_464; \
-  __ret_464 = __noswap_splatq_laneq_f16(__rev0_464, __p1_464); \
-  __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_464; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s32(__p0_465, __p1_465) __extension__ ({ \
-  int32x4_t __s0_465 = __p0_465; \
-  int32x4_t __ret_465; \
-  __ret_465 = splatq_laneq_s32(__s0_465, __p1_465); \
-  __ret_465; \
-})
-#else
-#define vdupq_laneq_s32(__p0_466, __p1_466) __extension__ ({ \
-  int32x4_t __s0_466 = __p0_466; \
-  int32x4_t __rev0_466;  __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 3, 2, 1, 0); \
-  int32x4_t __ret_466; \
-  __ret_466 = __noswap_splatq_laneq_s32(__rev0_466, __p1_466); \
-  __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 3, 2, 1, 0); \
-  __ret_466; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s64(__p0_467, __p1_467) __extension__ ({ \
-  int64x2_t __s0_467 = __p0_467; \
-  int64x2_t __ret_467; \
-  __ret_467 = splatq_laneq_s64(__s0_467, __p1_467); \
-  __ret_467; \
-})
-#else
-#define vdupq_laneq_s64(__p0_468, __p1_468) __extension__ ({ \
-  int64x2_t __s0_468 = __p0_468; \
-  int64x2_t __rev0_468;  __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 1, 0); \
-  int64x2_t __ret_468; \
-  __ret_468 = __noswap_splatq_laneq_s64(__rev0_468, __p1_468); \
-  __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 1, 0); \
-  __ret_468; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s16(__p0_469, __p1_469) __extension__ ({ \
-  int16x8_t __s0_469 = __p0_469; \
-  int16x8_t __ret_469; \
-  __ret_469 = splatq_laneq_s16(__s0_469, __p1_469); \
-  __ret_469; \
-})
-#else
-#define vdupq_laneq_s16(__p0_470, __p1_470) __extension__ ({ \
-  int16x8_t __s0_470 = __p0_470; \
-  int16x8_t __rev0_470;  __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_470; \
-  __ret_470 = __noswap_splatq_laneq_s16(__rev0_470, __p1_470); \
-  __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_470; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u8(__p0_471, __p1_471) __extension__ ({ \
-  uint8x16_t __s0_471 = __p0_471; \
-  uint8x8_t __ret_471; \
-  __ret_471 = splat_laneq_u8(__s0_471, __p1_471); \
-  __ret_471; \
-})
-#else
-#define vdup_laneq_u8(__p0_472, __p1_472) __extension__ ({ \
-  uint8x16_t __s0_472 = __p0_472; \
-  uint8x16_t __rev0_472;  __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_472; \
-  __ret_472 = __noswap_splat_laneq_u8(__rev0_472, __p1_472); \
-  __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_472; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u32(__p0_473, __p1_473) __extension__ ({ \
-  uint32x4_t __s0_473 = __p0_473; \
-  uint32x2_t __ret_473; \
-  __ret_473 = splat_laneq_u32(__s0_473, __p1_473); \
-  __ret_473; \
-})
-#else
-#define vdup_laneq_u32(__p0_474, __p1_474) __extension__ ({ \
-  uint32x4_t __s0_474 = __p0_474; \
-  uint32x4_t __rev0_474;  __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 3, 2, 1, 0); \
-  uint32x2_t __ret_474; \
-  __ret_474 = __noswap_splat_laneq_u32(__rev0_474, __p1_474); \
-  __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 1, 0); \
-  __ret_474; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u64(__p0_475, __p1_475) __extension__ ({ \
-  uint64x2_t __s0_475 = __p0_475; \
-  uint64x1_t __ret_475; \
-  __ret_475 = splat_laneq_u64(__s0_475, __p1_475); \
-  __ret_475; \
-})
-#else
-#define vdup_laneq_u64(__p0_476, __p1_476) __extension__ ({ \
-  uint64x2_t __s0_476 = __p0_476; \
-  uint64x2_t __rev0_476;  __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 1, 0); \
-  uint64x1_t __ret_476; \
-  __ret_476 = __noswap_splat_laneq_u64(__rev0_476, __p1_476); \
-  __ret_476; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u16(__p0_477, __p1_477) __extension__ ({ \
-  uint16x8_t __s0_477 = __p0_477; \
-  uint16x4_t __ret_477; \
-  __ret_477 = splat_laneq_u16(__s0_477, __p1_477); \
-  __ret_477; \
-})
-#else
-#define vdup_laneq_u16(__p0_478, __p1_478) __extension__ ({ \
-  uint16x8_t __s0_478 = __p0_478; \
-  uint16x8_t __rev0_478;  __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_478; \
-  __ret_478 = __noswap_splat_laneq_u16(__rev0_478, __p1_478); \
-  __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 3, 2, 1, 0); \
-  __ret_478; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s8(__p0_479, __p1_479) __extension__ ({ \
-  int8x16_t __s0_479 = __p0_479; \
-  int8x8_t __ret_479; \
-  __ret_479 = splat_laneq_s8(__s0_479, __p1_479); \
-  __ret_479; \
-})
-#else
-#define vdup_laneq_s8(__p0_480, __p1_480) __extension__ ({ \
-  int8x16_t __s0_480 = __p0_480; \
-  int8x16_t __rev0_480;  __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_480; \
-  __ret_480 = __noswap_splat_laneq_s8(__rev0_480, __p1_480); \
-  __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_480; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f64(__p0_481, __p1_481) __extension__ ({ \
-  float64x2_t __s0_481 = __p0_481; \
-  float64x1_t __ret_481; \
-  __ret_481 = splat_laneq_f64(__s0_481, __p1_481); \
-  __ret_481; \
-})
-#else
-#define vdup_laneq_f64(__p0_482, __p1_482) __extension__ ({ \
-  float64x2_t __s0_482 = __p0_482; \
-  float64x2_t __rev0_482;  __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 1, 0); \
-  float64x1_t __ret_482; \
-  __ret_482 = __noswap_splat_laneq_f64(__rev0_482, __p1_482); \
-  __ret_482; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f32(__p0_483, __p1_483) __extension__ ({ \
-  float32x4_t __s0_483 = __p0_483; \
-  float32x2_t __ret_483; \
-  __ret_483 = splat_laneq_f32(__s0_483, __p1_483); \
-  __ret_483; \
-})
-#else
-#define vdup_laneq_f32(__p0_484, __p1_484) __extension__ ({ \
-  float32x4_t __s0_484 = __p0_484; \
-  float32x4_t __rev0_484;  __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 3, 2, 1, 0); \
-  float32x2_t __ret_484; \
-  __ret_484 = __noswap_splat_laneq_f32(__rev0_484, __p1_484); \
-  __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 1, 0); \
-  __ret_484; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f16(__p0_485, __p1_485) __extension__ ({ \
-  float16x8_t __s0_485 = __p0_485; \
-  float16x4_t __ret_485; \
-  __ret_485 = splat_laneq_f16(__s0_485, __p1_485); \
-  __ret_485; \
-})
-#else
-#define vdup_laneq_f16(__p0_486, __p1_486) __extension__ ({ \
-  float16x8_t __s0_486 = __p0_486; \
-  float16x8_t __rev0_486;  __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_486; \
-  __ret_486 = __noswap_splat_laneq_f16(__rev0_486, __p1_486); \
-  __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 3, 2, 1, 0); \
-  __ret_486; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s32(__p0_487, __p1_487) __extension__ ({ \
-  int32x4_t __s0_487 = __p0_487; \
-  int32x2_t __ret_487; \
-  __ret_487 = splat_laneq_s32(__s0_487, __p1_487); \
-  __ret_487; \
-})
-#else
-#define vdup_laneq_s32(__p0_488, __p1_488) __extension__ ({ \
-  int32x4_t __s0_488 = __p0_488; \
-  int32x4_t __rev0_488;  __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 3, 2, 1, 0); \
-  int32x2_t __ret_488; \
-  __ret_488 = __noswap_splat_laneq_s32(__rev0_488, __p1_488); \
-  __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 1, 0); \
-  __ret_488; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s64(__p0_489, __p1_489) __extension__ ({ \
-  int64x2_t __s0_489 = __p0_489; \
-  int64x1_t __ret_489; \
-  __ret_489 = splat_laneq_s64(__s0_489, __p1_489); \
-  __ret_489; \
-})
-#else
-#define vdup_laneq_s64(__p0_490, __p1_490) __extension__ ({ \
-  int64x2_t __s0_490 = __p0_490; \
-  int64x2_t __rev0_490;  __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 1, 0); \
-  int64x1_t __ret_490; \
-  __ret_490 = __noswap_splat_laneq_s64(__rev0_490, __p1_490); \
-  __ret_490; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s16(__p0_491, __p1_491) __extension__ ({ \
-  int16x8_t __s0_491 = __p0_491; \
-  int16x4_t __ret_491; \
-  __ret_491 = splat_laneq_s16(__s0_491, __p1_491); \
-  __ret_491; \
-})
-#else
-#define vdup_laneq_s16(__p0_492, __p1_492) __extension__ ({ \
-  int16x8_t __s0_492 = __p0_492; \
-  int16x8_t __rev0_492;  __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_492; \
-  __ret_492 = __noswap_splat_laneq_s16(__rev0_492, __p1_492); \
-  __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 3, 2, 1, 0); \
-  __ret_492; \
-})
-#endif
-
-__ai poly64x1_t vdup_n_p64(poly64_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vdupq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float64x2_t vdupq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vdup_n_f64(float64_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) {__p0};
-  return __ret;
-}
-#define vext_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \
-  __ret; \
-})
-#else
-#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
-  __ret; \
-})
-#endif
-
-#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
-  __ret; \
-})
-#else
-#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2});
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, -__p1, __p2);
-  return __ret;
-}
-#define vfmsd_lane_f64(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \
-  float64_t __s0_493 = __p0_493; \
-  float64_t __s1_493 = __p1_493; \
-  float64x1_t __s2_493 = __p2_493; \
-  float64_t __ret_493; \
-  __ret_493 = vfmad_lane_f64(__s0_493, -__s1_493, __s2_493, __p3_493); \
-  __ret_493; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_lane_f32(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \
-  float32_t __s0_494 = __p0_494; \
-  float32_t __s1_494 = __p1_494; \
-  float32x2_t __s2_494 = __p2_494; \
-  float32_t __ret_494; \
-  __ret_494 = vfmas_lane_f32(__s0_494, -__s1_494, __s2_494, __p3_494); \
-  __ret_494; \
-})
-#else
-#define vfmss_lane_f32(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \
-  float32_t __s0_495 = __p0_495; \
-  float32_t __s1_495 = __p1_495; \
-  float32x2_t __s2_495 = __p2_495; \
-  float32x2_t __rev2_495;  __rev2_495 = __builtin_shufflevector(__s2_495, __s2_495, 1, 0); \
-  float32_t __ret_495; \
-  __ret_495 = __noswap_vfmas_lane_f32(__s0_495, -__s1_495, __rev2_495, __p3_495); \
-  __ret_495; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f64(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \
-  float64x2_t __s0_496 = __p0_496; \
-  float64x2_t __s1_496 = __p1_496; \
-  float64x1_t __s2_496 = __p2_496; \
-  float64x2_t __ret_496; \
-  __ret_496 = vfmaq_lane_f64(__s0_496, -__s1_496, __s2_496, __p3_496); \
-  __ret_496; \
-})
-#else
-#define vfmsq_lane_f64(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \
-  float64x2_t __s0_497 = __p0_497; \
-  float64x2_t __s1_497 = __p1_497; \
-  float64x1_t __s2_497 = __p2_497; \
-  float64x2_t __rev0_497;  __rev0_497 = __builtin_shufflevector(__s0_497, __s0_497, 1, 0); \
-  float64x2_t __rev1_497;  __rev1_497 = __builtin_shufflevector(__s1_497, __s1_497, 1, 0); \
-  float64x2_t __ret_497; \
-  __ret_497 = __noswap_vfmaq_lane_f64(__rev0_497, -__rev1_497, __s2_497, __p3_497); \
-  __ret_497 = __builtin_shufflevector(__ret_497, __ret_497, 1, 0); \
-  __ret_497; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f32(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \
-  float32x4_t __s0_498 = __p0_498; \
-  float32x4_t __s1_498 = __p1_498; \
-  float32x2_t __s2_498 = __p2_498; \
-  float32x4_t __ret_498; \
-  __ret_498 = vfmaq_lane_f32(__s0_498, -__s1_498, __s2_498, __p3_498); \
-  __ret_498; \
-})
-#else
-#define vfmsq_lane_f32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \
-  float32x4_t __s0_499 = __p0_499; \
-  float32x4_t __s1_499 = __p1_499; \
-  float32x2_t __s2_499 = __p2_499; \
-  float32x4_t __rev0_499;  __rev0_499 = __builtin_shufflevector(__s0_499, __s0_499, 3, 2, 1, 0); \
-  float32x4_t __rev1_499;  __rev1_499 = __builtin_shufflevector(__s1_499, __s1_499, 3, 2, 1, 0); \
-  float32x2_t __rev2_499;  __rev2_499 = __builtin_shufflevector(__s2_499, __s2_499, 1, 0); \
-  float32x4_t __ret_499; \
-  __ret_499 = __noswap_vfmaq_lane_f32(__rev0_499, -__rev1_499, __rev2_499, __p3_499); \
-  __ret_499 = __builtin_shufflevector(__ret_499, __ret_499, 3, 2, 1, 0); \
-  __ret_499; \
-})
-#endif
-
-#define vfms_lane_f64(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \
-  float64x1_t __s0_500 = __p0_500; \
-  float64x1_t __s1_500 = __p1_500; \
-  float64x1_t __s2_500 = __p2_500; \
-  float64x1_t __ret_500; \
-  __ret_500 = vfma_lane_f64(__s0_500, -__s1_500, __s2_500, __p3_500); \
-  __ret_500; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f32(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \
-  float32x2_t __s0_501 = __p0_501; \
-  float32x2_t __s1_501 = __p1_501; \
-  float32x2_t __s2_501 = __p2_501; \
-  float32x2_t __ret_501; \
-  __ret_501 = vfma_lane_f32(__s0_501, -__s1_501, __s2_501, __p3_501); \
-  __ret_501; \
-})
-#else
-#define vfms_lane_f32(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \
-  float32x2_t __s0_502 = __p0_502; \
-  float32x2_t __s1_502 = __p1_502; \
-  float32x2_t __s2_502 = __p2_502; \
-  float32x2_t __rev0_502;  __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 1, 0); \
-  float32x2_t __rev1_502;  __rev1_502 = __builtin_shufflevector(__s1_502, __s1_502, 1, 0); \
-  float32x2_t __rev2_502;  __rev2_502 = __builtin_shufflevector(__s2_502, __s2_502, 1, 0); \
-  float32x2_t __ret_502; \
-  __ret_502 = __noswap_vfma_lane_f32(__rev0_502, -__rev1_502, __rev2_502, __p3_502); \
-  __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 1, 0); \
-  __ret_502; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsd_laneq_f64(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \
-  float64_t __s0_503 = __p0_503; \
-  float64_t __s1_503 = __p1_503; \
-  float64x2_t __s2_503 = __p2_503; \
-  float64_t __ret_503; \
-  __ret_503 = vfmad_laneq_f64(__s0_503, -__s1_503, __s2_503, __p3_503); \
-  __ret_503; \
-})
-#else
-#define vfmsd_laneq_f64(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \
-  float64_t __s0_504 = __p0_504; \
-  float64_t __s1_504 = __p1_504; \
-  float64x2_t __s2_504 = __p2_504; \
-  float64x2_t __rev2_504;  __rev2_504 = __builtin_shufflevector(__s2_504, __s2_504, 1, 0); \
-  float64_t __ret_504; \
-  __ret_504 = __noswap_vfmad_laneq_f64(__s0_504, -__s1_504, __rev2_504, __p3_504); \
-  __ret_504; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_laneq_f32(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \
-  float32_t __s0_505 = __p0_505; \
-  float32_t __s1_505 = __p1_505; \
-  float32x4_t __s2_505 = __p2_505; \
-  float32_t __ret_505; \
-  __ret_505 = vfmas_laneq_f32(__s0_505, -__s1_505, __s2_505, __p3_505); \
-  __ret_505; \
-})
-#else
-#define vfmss_laneq_f32(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \
-  float32_t __s0_506 = __p0_506; \
-  float32_t __s1_506 = __p1_506; \
-  float32x4_t __s2_506 = __p2_506; \
-  float32x4_t __rev2_506;  __rev2_506 = __builtin_shufflevector(__s2_506, __s2_506, 3, 2, 1, 0); \
-  float32_t __ret_506; \
-  __ret_506 = __noswap_vfmas_laneq_f32(__s0_506, -__s1_506, __rev2_506, __p3_506); \
-  __ret_506; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f64(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \
-  float64x2_t __s0_507 = __p0_507; \
-  float64x2_t __s1_507 = __p1_507; \
-  float64x2_t __s2_507 = __p2_507; \
-  float64x2_t __ret_507; \
-  __ret_507 = vfmaq_laneq_f64(__s0_507, -__s1_507, __s2_507, __p3_507); \
-  __ret_507; \
-})
-#else
-#define vfmsq_laneq_f64(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \
-  float64x2_t __s0_508 = __p0_508; \
-  float64x2_t __s1_508 = __p1_508; \
-  float64x2_t __s2_508 = __p2_508; \
-  float64x2_t __rev0_508;  __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 1, 0); \
-  float64x2_t __rev1_508;  __rev1_508 = __builtin_shufflevector(__s1_508, __s1_508, 1, 0); \
-  float64x2_t __rev2_508;  __rev2_508 = __builtin_shufflevector(__s2_508, __s2_508, 1, 0); \
-  float64x2_t __ret_508; \
-  __ret_508 = __noswap_vfmaq_laneq_f64(__rev0_508, -__rev1_508, __rev2_508, __p3_508); \
-  __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 1, 0); \
-  __ret_508; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f32(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \
-  float32x4_t __s0_509 = __p0_509; \
-  float32x4_t __s1_509 = __p1_509; \
-  float32x4_t __s2_509 = __p2_509; \
-  float32x4_t __ret_509; \
-  __ret_509 = vfmaq_laneq_f32(__s0_509, -__s1_509, __s2_509, __p3_509); \
-  __ret_509; \
-})
-#else
-#define vfmsq_laneq_f32(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \
-  float32x4_t __s0_510 = __p0_510; \
-  float32x4_t __s1_510 = __p1_510; \
-  float32x4_t __s2_510 = __p2_510; \
-  float32x4_t __rev0_510;  __rev0_510 = __builtin_shufflevector(__s0_510, __s0_510, 3, 2, 1, 0); \
-  float32x4_t __rev1_510;  __rev1_510 = __builtin_shufflevector(__s1_510, __s1_510, 3, 2, 1, 0); \
-  float32x4_t __rev2_510;  __rev2_510 = __builtin_shufflevector(__s2_510, __s2_510, 3, 2, 1, 0); \
-  float32x4_t __ret_510; \
-  __ret_510 = __noswap_vfmaq_laneq_f32(__rev0_510, -__rev1_510, __rev2_510, __p3_510); \
-  __ret_510 = __builtin_shufflevector(__ret_510, __ret_510, 3, 2, 1, 0); \
-  __ret_510; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f64(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \
-  float64x1_t __s0_511 = __p0_511; \
-  float64x1_t __s1_511 = __p1_511; \
-  float64x2_t __s2_511 = __p2_511; \
-  float64x1_t __ret_511; \
-  __ret_511 = vfma_laneq_f64(__s0_511, -__s1_511, __s2_511, __p3_511); \
-  __ret_511; \
-})
-#else
-#define vfms_laneq_f64(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \
-  float64x1_t __s0_512 = __p0_512; \
-  float64x1_t __s1_512 = __p1_512; \
-  float64x2_t __s2_512 = __p2_512; \
-  float64x2_t __rev2_512;  __rev2_512 = __builtin_shufflevector(__s2_512, __s2_512, 1, 0); \
-  float64x1_t __ret_512; \
-  __ret_512 = __noswap_vfma_laneq_f64(__s0_512, -__s1_512, __rev2_512, __p3_512); \
-  __ret_512; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f32(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \
-  float32x2_t __s0_513 = __p0_513; \
-  float32x2_t __s1_513 = __p1_513; \
-  float32x4_t __s2_513 = __p2_513; \
-  float32x2_t __ret_513; \
-  __ret_513 = vfma_laneq_f32(__s0_513, -__s1_513, __s2_513, __p3_513); \
-  __ret_513; \
-})
-#else
-#define vfms_laneq_f32(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \
-  float32x2_t __s0_514 = __p0_514; \
-  float32x2_t __s1_514 = __p1_514; \
-  float32x4_t __s2_514 = __p2_514; \
-  float32x2_t __rev0_514;  __rev0_514 = __builtin_shufflevector(__s0_514, __s0_514, 1, 0); \
-  float32x2_t __rev1_514;  __rev1_514 = __builtin_shufflevector(__s1_514, __s1_514, 1, 0); \
-  float32x4_t __rev2_514;  __rev2_514 = __builtin_shufflevector(__s2_514, __s2_514, 3, 2, 1, 0); \
-  float32x2_t __ret_514; \
-  __ret_514 = __noswap_vfma_laneq_f32(__rev0_514, -__rev1_514, __rev2_514, __p3_514); \
-  __ret_514 = __builtin_shufflevector(__ret_514, __ret_514, 1, 0); \
-  __ret_514; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2});
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x1_t vget_high_f64(float64x2_t __p0) {
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai float64x1_t vget_high_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#define vget_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x1_t vget_low_f64(float64x2_t __p0) {
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai float64x1_t vget_low_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#define vld1_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
-  __ret; \
-})
-#define vld1_dup_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
-  __ret; \
-})
-#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#define vld1_p64_x2(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x2(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x2(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x2(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x2(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x2(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld1_p64_x3(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x3(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x3(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x3(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x3(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x3(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld1_p64_x4(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x4(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x4(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x4(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x4(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x4(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_p64(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld2q_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld2q_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld2q_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld2q_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_f64(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_dup_p64(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_f64(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
-  __ret; \
-})
-#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
-  __ret; \
-})
-#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
-  __ret; \
-})
-#define vld3_p64(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld3q_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld3q_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld3q_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld3q_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_f64(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld3_dup_p64(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_f64(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
-  __ret; \
-})
-#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
-  __ret; \
-})
-#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
-  __ret; \
-})
-#define vld4_p64(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld4q_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld4q_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld4q_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld4q_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_f64(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld4_dup_p64(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_f64(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
-  __ret; \
-})
-#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
-  __ret; \
-})
-#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
-  __ret; \
-})
-#define vldrq_p128(__p0) __extension__ ({ \
-  poly128_t __ret; \
-  __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vmaxvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vmaxvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vmaxvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vmaxvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vmaxvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vmaxvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vmaxvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vmaxvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vmaxv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vmaxv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vmaxv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vmaxv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vmaxv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vmaxv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vminnmvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vminnmvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminnmvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminnmvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminnmv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminnmv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vminvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vminvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vminvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vminvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vminvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vminvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vminvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vminvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vminvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vminvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vminvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vminvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vminvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vminvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vminv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vminv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vminv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vminv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vminv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vminv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vminv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vminv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vminv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vminv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vminv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vminv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u32(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \
-  uint32x4_t __s0_515 = __p0_515; \
-  uint32x4_t __s1_515 = __p1_515; \
-  uint32x4_t __s2_515 = __p2_515; \
-  uint32x4_t __ret_515; \
-  __ret_515 = __s0_515 + __s1_515 * splatq_laneq_u32(__s2_515, __p3_515); \
-  __ret_515; \
-})
-#else
-#define vmlaq_laneq_u32(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \
-  uint32x4_t __s0_516 = __p0_516; \
-  uint32x4_t __s1_516 = __p1_516; \
-  uint32x4_t __s2_516 = __p2_516; \
-  uint32x4_t __rev0_516;  __rev0_516 = __builtin_shufflevector(__s0_516, __s0_516, 3, 2, 1, 0); \
-  uint32x4_t __rev1_516;  __rev1_516 = __builtin_shufflevector(__s1_516, __s1_516, 3, 2, 1, 0); \
-  uint32x4_t __rev2_516;  __rev2_516 = __builtin_shufflevector(__s2_516, __s2_516, 3, 2, 1, 0); \
-  uint32x4_t __ret_516; \
-  __ret_516 = __rev0_516 + __rev1_516 * __noswap_splatq_laneq_u32(__rev2_516, __p3_516); \
-  __ret_516 = __builtin_shufflevector(__ret_516, __ret_516, 3, 2, 1, 0); \
-  __ret_516; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u16(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \
-  uint16x8_t __s0_517 = __p0_517; \
-  uint16x8_t __s1_517 = __p1_517; \
-  uint16x8_t __s2_517 = __p2_517; \
-  uint16x8_t __ret_517; \
-  __ret_517 = __s0_517 + __s1_517 * splatq_laneq_u16(__s2_517, __p3_517); \
-  __ret_517; \
-})
-#else
-#define vmlaq_laneq_u16(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \
-  uint16x8_t __s0_518 = __p0_518; \
-  uint16x8_t __s1_518 = __p1_518; \
-  uint16x8_t __s2_518 = __p2_518; \
-  uint16x8_t __rev0_518;  __rev0_518 = __builtin_shufflevector(__s0_518, __s0_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_518;  __rev1_518 = __builtin_shufflevector(__s1_518, __s1_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_518;  __rev2_518 = __builtin_shufflevector(__s2_518, __s2_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_518; \
-  __ret_518 = __rev0_518 + __rev1_518 * __noswap_splatq_laneq_u16(__rev2_518, __p3_518); \
-  __ret_518 = __builtin_shufflevector(__ret_518, __ret_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_518; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_f32(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \
-  float32x4_t __s0_519 = __p0_519; \
-  float32x4_t __s1_519 = __p1_519; \
-  float32x4_t __s2_519 = __p2_519; \
-  float32x4_t __ret_519; \
-  __ret_519 = __s0_519 + __s1_519 * splatq_laneq_f32(__s2_519, __p3_519); \
-  __ret_519; \
-})
-#else
-#define vmlaq_laneq_f32(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \
-  float32x4_t __s0_520 = __p0_520; \
-  float32x4_t __s1_520 = __p1_520; \
-  float32x4_t __s2_520 = __p2_520; \
-  float32x4_t __rev0_520;  __rev0_520 = __builtin_shufflevector(__s0_520, __s0_520, 3, 2, 1, 0); \
-  float32x4_t __rev1_520;  __rev1_520 = __builtin_shufflevector(__s1_520, __s1_520, 3, 2, 1, 0); \
-  float32x4_t __rev2_520;  __rev2_520 = __builtin_shufflevector(__s2_520, __s2_520, 3, 2, 1, 0); \
-  float32x4_t __ret_520; \
-  __ret_520 = __rev0_520 + __rev1_520 * __noswap_splatq_laneq_f32(__rev2_520, __p3_520); \
-  __ret_520 = __builtin_shufflevector(__ret_520, __ret_520, 3, 2, 1, 0); \
-  __ret_520; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \
-  int32x4_t __s0_521 = __p0_521; \
-  int32x4_t __s1_521 = __p1_521; \
-  int32x4_t __s2_521 = __p2_521; \
-  int32x4_t __ret_521; \
-  __ret_521 = __s0_521 + __s1_521 * splatq_laneq_s32(__s2_521, __p3_521); \
-  __ret_521; \
-})
-#else
-#define vmlaq_laneq_s32(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \
-  int32x4_t __s0_522 = __p0_522; \
-  int32x4_t __s1_522 = __p1_522; \
-  int32x4_t __s2_522 = __p2_522; \
-  int32x4_t __rev0_522;  __rev0_522 = __builtin_shufflevector(__s0_522, __s0_522, 3, 2, 1, 0); \
-  int32x4_t __rev1_522;  __rev1_522 = __builtin_shufflevector(__s1_522, __s1_522, 3, 2, 1, 0); \
-  int32x4_t __rev2_522;  __rev2_522 = __builtin_shufflevector(__s2_522, __s2_522, 3, 2, 1, 0); \
-  int32x4_t __ret_522; \
-  __ret_522 = __rev0_522 + __rev1_522 * __noswap_splatq_laneq_s32(__rev2_522, __p3_522); \
-  __ret_522 = __builtin_shufflevector(__ret_522, __ret_522, 3, 2, 1, 0); \
-  __ret_522; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s16(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \
-  int16x8_t __s0_523 = __p0_523; \
-  int16x8_t __s1_523 = __p1_523; \
-  int16x8_t __s2_523 = __p2_523; \
-  int16x8_t __ret_523; \
-  __ret_523 = __s0_523 + __s1_523 * splatq_laneq_s16(__s2_523, __p3_523); \
-  __ret_523; \
-})
-#else
-#define vmlaq_laneq_s16(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \
-  int16x8_t __s0_524 = __p0_524; \
-  int16x8_t __s1_524 = __p1_524; \
-  int16x8_t __s2_524 = __p2_524; \
-  int16x8_t __rev0_524;  __rev0_524 = __builtin_shufflevector(__s0_524, __s0_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_524;  __rev1_524 = __builtin_shufflevector(__s1_524, __s1_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_524;  __rev2_524 = __builtin_shufflevector(__s2_524, __s2_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_524; \
-  __ret_524 = __rev0_524 + __rev1_524 * __noswap_splatq_laneq_s16(__rev2_524, __p3_524); \
-  __ret_524 = __builtin_shufflevector(__ret_524, __ret_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_524; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \
-  uint32x2_t __s0_525 = __p0_525; \
-  uint32x2_t __s1_525 = __p1_525; \
-  uint32x4_t __s2_525 = __p2_525; \
-  uint32x2_t __ret_525; \
-  __ret_525 = __s0_525 + __s1_525 * splat_laneq_u32(__s2_525, __p3_525); \
-  __ret_525; \
-})
-#else
-#define vmla_laneq_u32(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \
-  uint32x2_t __s0_526 = __p0_526; \
-  uint32x2_t __s1_526 = __p1_526; \
-  uint32x4_t __s2_526 = __p2_526; \
-  uint32x2_t __rev0_526;  __rev0_526 = __builtin_shufflevector(__s0_526, __s0_526, 1, 0); \
-  uint32x2_t __rev1_526;  __rev1_526 = __builtin_shufflevector(__s1_526, __s1_526, 1, 0); \
-  uint32x4_t __rev2_526;  __rev2_526 = __builtin_shufflevector(__s2_526, __s2_526, 3, 2, 1, 0); \
-  uint32x2_t __ret_526; \
-  __ret_526 = __rev0_526 + __rev1_526 * __noswap_splat_laneq_u32(__rev2_526, __p3_526); \
-  __ret_526 = __builtin_shufflevector(__ret_526, __ret_526, 1, 0); \
-  __ret_526; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u16(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \
-  uint16x4_t __s0_527 = __p0_527; \
-  uint16x4_t __s1_527 = __p1_527; \
-  uint16x8_t __s2_527 = __p2_527; \
-  uint16x4_t __ret_527; \
-  __ret_527 = __s0_527 + __s1_527 * splat_laneq_u16(__s2_527, __p3_527); \
-  __ret_527; \
-})
-#else
-#define vmla_laneq_u16(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \
-  uint16x4_t __s0_528 = __p0_528; \
-  uint16x4_t __s1_528 = __p1_528; \
-  uint16x8_t __s2_528 = __p2_528; \
-  uint16x4_t __rev0_528;  __rev0_528 = __builtin_shufflevector(__s0_528, __s0_528, 3, 2, 1, 0); \
-  uint16x4_t __rev1_528;  __rev1_528 = __builtin_shufflevector(__s1_528, __s1_528, 3, 2, 1, 0); \
-  uint16x8_t __rev2_528;  __rev2_528 = __builtin_shufflevector(__s2_528, __s2_528, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_528; \
-  __ret_528 = __rev0_528 + __rev1_528 * __noswap_splat_laneq_u16(__rev2_528, __p3_528); \
-  __ret_528 = __builtin_shufflevector(__ret_528, __ret_528, 3, 2, 1, 0); \
-  __ret_528; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_f32(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \
-  float32x2_t __s0_529 = __p0_529; \
-  float32x2_t __s1_529 = __p1_529; \
-  float32x4_t __s2_529 = __p2_529; \
-  float32x2_t __ret_529; \
-  __ret_529 = __s0_529 + __s1_529 * splat_laneq_f32(__s2_529, __p3_529); \
-  __ret_529; \
-})
-#else
-#define vmla_laneq_f32(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \
-  float32x2_t __s0_530 = __p0_530; \
-  float32x2_t __s1_530 = __p1_530; \
-  float32x4_t __s2_530 = __p2_530; \
-  float32x2_t __rev0_530;  __rev0_530 = __builtin_shufflevector(__s0_530, __s0_530, 1, 0); \
-  float32x2_t __rev1_530;  __rev1_530 = __builtin_shufflevector(__s1_530, __s1_530, 1, 0); \
-  float32x4_t __rev2_530;  __rev2_530 = __builtin_shufflevector(__s2_530, __s2_530, 3, 2, 1, 0); \
-  float32x2_t __ret_530; \
-  __ret_530 = __rev0_530 + __rev1_530 * __noswap_splat_laneq_f32(__rev2_530, __p3_530); \
-  __ret_530 = __builtin_shufflevector(__ret_530, __ret_530, 1, 0); \
-  __ret_530; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s32(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \
-  int32x2_t __s0_531 = __p0_531; \
-  int32x2_t __s1_531 = __p1_531; \
-  int32x4_t __s2_531 = __p2_531; \
-  int32x2_t __ret_531; \
-  __ret_531 = __s0_531 + __s1_531 * splat_laneq_s32(__s2_531, __p3_531); \
-  __ret_531; \
-})
-#else
-#define vmla_laneq_s32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \
-  int32x2_t __s0_532 = __p0_532; \
-  int32x2_t __s1_532 = __p1_532; \
-  int32x4_t __s2_532 = __p2_532; \
-  int32x2_t __rev0_532;  __rev0_532 = __builtin_shufflevector(__s0_532, __s0_532, 1, 0); \
-  int32x2_t __rev1_532;  __rev1_532 = __builtin_shufflevector(__s1_532, __s1_532, 1, 0); \
-  int32x4_t __rev2_532;  __rev2_532 = __builtin_shufflevector(__s2_532, __s2_532, 3, 2, 1, 0); \
-  int32x2_t __ret_532; \
-  __ret_532 = __rev0_532 + __rev1_532 * __noswap_splat_laneq_s32(__rev2_532, __p3_532); \
-  __ret_532 = __builtin_shufflevector(__ret_532, __ret_532, 1, 0); \
-  __ret_532; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s16(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \
-  int16x4_t __s0_533 = __p0_533; \
-  int16x4_t __s1_533 = __p1_533; \
-  int16x8_t __s2_533 = __p2_533; \
-  int16x4_t __ret_533; \
-  __ret_533 = __s0_533 + __s1_533 * splat_laneq_s16(__s2_533, __p3_533); \
-  __ret_533; \
-})
-#else
-#define vmla_laneq_s16(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \
-  int16x4_t __s0_534 = __p0_534; \
-  int16x4_t __s1_534 = __p1_534; \
-  int16x8_t __s2_534 = __p2_534; \
-  int16x4_t __rev0_534;  __rev0_534 = __builtin_shufflevector(__s0_534, __s0_534, 3, 2, 1, 0); \
-  int16x4_t __rev1_534;  __rev1_534 = __builtin_shufflevector(__s1_534, __s1_534, 3, 2, 1, 0); \
-  int16x8_t __rev2_534;  __rev2_534 = __builtin_shufflevector(__s2_534, __s2_534, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_534; \
-  __ret_534 = __rev0_534 + __rev1_534 * __noswap_splat_laneq_s16(__rev2_534, __p3_534); \
-  __ret_534 = __builtin_shufflevector(__ret_534, __ret_534, 3, 2, 1, 0); \
-  __ret_534; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u32(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \
-  uint64x2_t __s0_535 = __p0_535; \
-  uint32x4_t __s1_535 = __p1_535; \
-  uint32x2_t __s2_535 = __p2_535; \
-  uint64x2_t __ret_535; \
-  __ret_535 = __s0_535 + vmull_u32(vget_high_u32(__s1_535), splat_lane_u32(__s2_535, __p3_535)); \
-  __ret_535; \
-})
-#else
-#define vmlal_high_lane_u32(__p0_536, __p1_536, __p2_536, __p3_536) __extension__ ({ \
-  uint64x2_t __s0_536 = __p0_536; \
-  uint32x4_t __s1_536 = __p1_536; \
-  uint32x2_t __s2_536 = __p2_536; \
-  uint64x2_t __rev0_536;  __rev0_536 = __builtin_shufflevector(__s0_536, __s0_536, 1, 0); \
-  uint32x4_t __rev1_536;  __rev1_536 = __builtin_shufflevector(__s1_536, __s1_536, 3, 2, 1, 0); \
-  uint32x2_t __rev2_536;  __rev2_536 = __builtin_shufflevector(__s2_536, __s2_536, 1, 0); \
-  uint64x2_t __ret_536; \
-  __ret_536 = __rev0_536 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_536), __noswap_splat_lane_u32(__rev2_536, __p3_536)); \
-  __ret_536 = __builtin_shufflevector(__ret_536, __ret_536, 1, 0); \
-  __ret_536; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u16(__p0_537, __p1_537, __p2_537, __p3_537) __extension__ ({ \
-  uint32x4_t __s0_537 = __p0_537; \
-  uint16x8_t __s1_537 = __p1_537; \
-  uint16x4_t __s2_537 = __p2_537; \
-  uint32x4_t __ret_537; \
-  __ret_537 = __s0_537 + vmull_u16(vget_high_u16(__s1_537), splat_lane_u16(__s2_537, __p3_537)); \
-  __ret_537; \
-})
-#else
-#define vmlal_high_lane_u16(__p0_538, __p1_538, __p2_538, __p3_538) __extension__ ({ \
-  uint32x4_t __s0_538 = __p0_538; \
-  uint16x8_t __s1_538 = __p1_538; \
-  uint16x4_t __s2_538 = __p2_538; \
-  uint32x4_t __rev0_538;  __rev0_538 = __builtin_shufflevector(__s0_538, __s0_538, 3, 2, 1, 0); \
-  uint16x8_t __rev1_538;  __rev1_538 = __builtin_shufflevector(__s1_538, __s1_538, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_538;  __rev2_538 = __builtin_shufflevector(__s2_538, __s2_538, 3, 2, 1, 0); \
-  uint32x4_t __ret_538; \
-  __ret_538 = __rev0_538 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_538), __noswap_splat_lane_u16(__rev2_538, __p3_538)); \
-  __ret_538 = __builtin_shufflevector(__ret_538, __ret_538, 3, 2, 1, 0); \
-  __ret_538; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s32(__p0_539, __p1_539, __p2_539, __p3_539) __extension__ ({ \
-  int64x2_t __s0_539 = __p0_539; \
-  int32x4_t __s1_539 = __p1_539; \
-  int32x2_t __s2_539 = __p2_539; \
-  int64x2_t __ret_539; \
-  __ret_539 = __s0_539 + vmull_s32(vget_high_s32(__s1_539), splat_lane_s32(__s2_539, __p3_539)); \
-  __ret_539; \
-})
-#else
-#define vmlal_high_lane_s32(__p0_540, __p1_540, __p2_540, __p3_540) __extension__ ({ \
-  int64x2_t __s0_540 = __p0_540; \
-  int32x4_t __s1_540 = __p1_540; \
-  int32x2_t __s2_540 = __p2_540; \
-  int64x2_t __rev0_540;  __rev0_540 = __builtin_shufflevector(__s0_540, __s0_540, 1, 0); \
-  int32x4_t __rev1_540;  __rev1_540 = __builtin_shufflevector(__s1_540, __s1_540, 3, 2, 1, 0); \
-  int32x2_t __rev2_540;  __rev2_540 = __builtin_shufflevector(__s2_540, __s2_540, 1, 0); \
-  int64x2_t __ret_540; \
-  __ret_540 = __rev0_540 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_540), __noswap_splat_lane_s32(__rev2_540, __p3_540)); \
-  __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 1, 0); \
-  __ret_540; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s16(__p0_541, __p1_541, __p2_541, __p3_541) __extension__ ({ \
-  int32x4_t __s0_541 = __p0_541; \
-  int16x8_t __s1_541 = __p1_541; \
-  int16x4_t __s2_541 = __p2_541; \
-  int32x4_t __ret_541; \
-  __ret_541 = __s0_541 + vmull_s16(vget_high_s16(__s1_541), splat_lane_s16(__s2_541, __p3_541)); \
-  __ret_541; \
-})
-#else
-#define vmlal_high_lane_s16(__p0_542, __p1_542, __p2_542, __p3_542) __extension__ ({ \
-  int32x4_t __s0_542 = __p0_542; \
-  int16x8_t __s1_542 = __p1_542; \
-  int16x4_t __s2_542 = __p2_542; \
-  int32x4_t __rev0_542;  __rev0_542 = __builtin_shufflevector(__s0_542, __s0_542, 3, 2, 1, 0); \
-  int16x8_t __rev1_542;  __rev1_542 = __builtin_shufflevector(__s1_542, __s1_542, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_542;  __rev2_542 = __builtin_shufflevector(__s2_542, __s2_542, 3, 2, 1, 0); \
-  int32x4_t __ret_542; \
-  __ret_542 = __rev0_542 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_542), __noswap_splat_lane_s16(__rev2_542, __p3_542)); \
-  __ret_542 = __builtin_shufflevector(__ret_542, __ret_542, 3, 2, 1, 0); \
-  __ret_542; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u32(__p0_543, __p1_543, __p2_543, __p3_543) __extension__ ({ \
-  uint64x2_t __s0_543 = __p0_543; \
-  uint32x4_t __s1_543 = __p1_543; \
-  uint32x4_t __s2_543 = __p2_543; \
-  uint64x2_t __ret_543; \
-  __ret_543 = __s0_543 + vmull_u32(vget_high_u32(__s1_543), splat_laneq_u32(__s2_543, __p3_543)); \
-  __ret_543; \
-})
-#else
-#define vmlal_high_laneq_u32(__p0_544, __p1_544, __p2_544, __p3_544) __extension__ ({ \
-  uint64x2_t __s0_544 = __p0_544; \
-  uint32x4_t __s1_544 = __p1_544; \
-  uint32x4_t __s2_544 = __p2_544; \
-  uint64x2_t __rev0_544;  __rev0_544 = __builtin_shufflevector(__s0_544, __s0_544, 1, 0); \
-  uint32x4_t __rev1_544;  __rev1_544 = __builtin_shufflevector(__s1_544, __s1_544, 3, 2, 1, 0); \
-  uint32x4_t __rev2_544;  __rev2_544 = __builtin_shufflevector(__s2_544, __s2_544, 3, 2, 1, 0); \
-  uint64x2_t __ret_544; \
-  __ret_544 = __rev0_544 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_544), __noswap_splat_laneq_u32(__rev2_544, __p3_544)); \
-  __ret_544 = __builtin_shufflevector(__ret_544, __ret_544, 1, 0); \
-  __ret_544; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u16(__p0_545, __p1_545, __p2_545, __p3_545) __extension__ ({ \
-  uint32x4_t __s0_545 = __p0_545; \
-  uint16x8_t __s1_545 = __p1_545; \
-  uint16x8_t __s2_545 = __p2_545; \
-  uint32x4_t __ret_545; \
-  __ret_545 = __s0_545 + vmull_u16(vget_high_u16(__s1_545), splat_laneq_u16(__s2_545, __p3_545)); \
-  __ret_545; \
-})
-#else
-#define vmlal_high_laneq_u16(__p0_546, __p1_546, __p2_546, __p3_546) __extension__ ({ \
-  uint32x4_t __s0_546 = __p0_546; \
-  uint16x8_t __s1_546 = __p1_546; \
-  uint16x8_t __s2_546 = __p2_546; \
-  uint32x4_t __rev0_546;  __rev0_546 = __builtin_shufflevector(__s0_546, __s0_546, 3, 2, 1, 0); \
-  uint16x8_t __rev1_546;  __rev1_546 = __builtin_shufflevector(__s1_546, __s1_546, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_546;  __rev2_546 = __builtin_shufflevector(__s2_546, __s2_546, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_546; \
-  __ret_546 = __rev0_546 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_546), __noswap_splat_laneq_u16(__rev2_546, __p3_546)); \
-  __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 3, 2, 1, 0); \
-  __ret_546; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s32(__p0_547, __p1_547, __p2_547, __p3_547) __extension__ ({ \
-  int64x2_t __s0_547 = __p0_547; \
-  int32x4_t __s1_547 = __p1_547; \
-  int32x4_t __s2_547 = __p2_547; \
-  int64x2_t __ret_547; \
-  __ret_547 = __s0_547 + vmull_s32(vget_high_s32(__s1_547), splat_laneq_s32(__s2_547, __p3_547)); \
-  __ret_547; \
-})
-#else
-#define vmlal_high_laneq_s32(__p0_548, __p1_548, __p2_548, __p3_548) __extension__ ({ \
-  int64x2_t __s0_548 = __p0_548; \
-  int32x4_t __s1_548 = __p1_548; \
-  int32x4_t __s2_548 = __p2_548; \
-  int64x2_t __rev0_548;  __rev0_548 = __builtin_shufflevector(__s0_548, __s0_548, 1, 0); \
-  int32x4_t __rev1_548;  __rev1_548 = __builtin_shufflevector(__s1_548, __s1_548, 3, 2, 1, 0); \
-  int32x4_t __rev2_548;  __rev2_548 = __builtin_shufflevector(__s2_548, __s2_548, 3, 2, 1, 0); \
-  int64x2_t __ret_548; \
-  __ret_548 = __rev0_548 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_548), __noswap_splat_laneq_s32(__rev2_548, __p3_548)); \
-  __ret_548 = __builtin_shufflevector(__ret_548, __ret_548, 1, 0); \
-  __ret_548; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s16(__p0_549, __p1_549, __p2_549, __p3_549) __extension__ ({ \
-  int32x4_t __s0_549 = __p0_549; \
-  int16x8_t __s1_549 = __p1_549; \
-  int16x8_t __s2_549 = __p2_549; \
-  int32x4_t __ret_549; \
-  __ret_549 = __s0_549 + vmull_s16(vget_high_s16(__s1_549), splat_laneq_s16(__s2_549, __p3_549)); \
-  __ret_549; \
-})
-#else
-#define vmlal_high_laneq_s16(__p0_550, __p1_550, __p2_550, __p3_550) __extension__ ({ \
-  int32x4_t __s0_550 = __p0_550; \
-  int16x8_t __s1_550 = __p1_550; \
-  int16x8_t __s2_550 = __p2_550; \
-  int32x4_t __rev0_550;  __rev0_550 = __builtin_shufflevector(__s0_550, __s0_550, 3, 2, 1, 0); \
-  int16x8_t __rev1_550;  __rev1_550 = __builtin_shufflevector(__s1_550, __s1_550, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_550;  __rev2_550 = __builtin_shufflevector(__s2_550, __s2_550, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_550; \
-  __ret_550 = __rev0_550 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_550), __noswap_splat_laneq_s16(__rev2_550, __p3_550)); \
-  __ret_550 = __builtin_shufflevector(__ret_550, __ret_550, 3, 2, 1, 0); \
-  __ret_550; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u32(__p0_551, __p1_551, __p2_551, __p3_551) __extension__ ({ \
-  uint64x2_t __s0_551 = __p0_551; \
-  uint32x2_t __s1_551 = __p1_551; \
-  uint32x4_t __s2_551 = __p2_551; \
-  uint64x2_t __ret_551; \
-  __ret_551 = __s0_551 + vmull_u32(__s1_551, splat_laneq_u32(__s2_551, __p3_551)); \
-  __ret_551; \
-})
-#else
-#define vmlal_laneq_u32(__p0_552, __p1_552, __p2_552, __p3_552) __extension__ ({ \
-  uint64x2_t __s0_552 = __p0_552; \
-  uint32x2_t __s1_552 = __p1_552; \
-  uint32x4_t __s2_552 = __p2_552; \
-  uint64x2_t __rev0_552;  __rev0_552 = __builtin_shufflevector(__s0_552, __s0_552, 1, 0); \
-  uint32x2_t __rev1_552;  __rev1_552 = __builtin_shufflevector(__s1_552, __s1_552, 1, 0); \
-  uint32x4_t __rev2_552;  __rev2_552 = __builtin_shufflevector(__s2_552, __s2_552, 3, 2, 1, 0); \
-  uint64x2_t __ret_552; \
-  __ret_552 = __rev0_552 + __noswap_vmull_u32(__rev1_552, __noswap_splat_laneq_u32(__rev2_552, __p3_552)); \
-  __ret_552 = __builtin_shufflevector(__ret_552, __ret_552, 1, 0); \
-  __ret_552; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u16(__p0_553, __p1_553, __p2_553, __p3_553) __extension__ ({ \
-  uint32x4_t __s0_553 = __p0_553; \
-  uint16x4_t __s1_553 = __p1_553; \
-  uint16x8_t __s2_553 = __p2_553; \
-  uint32x4_t __ret_553; \
-  __ret_553 = __s0_553 + vmull_u16(__s1_553, splat_laneq_u16(__s2_553, __p3_553)); \
-  __ret_553; \
-})
-#else
-#define vmlal_laneq_u16(__p0_554, __p1_554, __p2_554, __p3_554) __extension__ ({ \
-  uint32x4_t __s0_554 = __p0_554; \
-  uint16x4_t __s1_554 = __p1_554; \
-  uint16x8_t __s2_554 = __p2_554; \
-  uint32x4_t __rev0_554;  __rev0_554 = __builtin_shufflevector(__s0_554, __s0_554, 3, 2, 1, 0); \
-  uint16x4_t __rev1_554;  __rev1_554 = __builtin_shufflevector(__s1_554, __s1_554, 3, 2, 1, 0); \
-  uint16x8_t __rev2_554;  __rev2_554 = __builtin_shufflevector(__s2_554, __s2_554, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_554; \
-  __ret_554 = __rev0_554 + __noswap_vmull_u16(__rev1_554, __noswap_splat_laneq_u16(__rev2_554, __p3_554)); \
-  __ret_554 = __builtin_shufflevector(__ret_554, __ret_554, 3, 2, 1, 0); \
-  __ret_554; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s32(__p0_555, __p1_555, __p2_555, __p3_555) __extension__ ({ \
-  int64x2_t __s0_555 = __p0_555; \
-  int32x2_t __s1_555 = __p1_555; \
-  int32x4_t __s2_555 = __p2_555; \
-  int64x2_t __ret_555; \
-  __ret_555 = __s0_555 + vmull_s32(__s1_555, splat_laneq_s32(__s2_555, __p3_555)); \
-  __ret_555; \
-})
-#else
-#define vmlal_laneq_s32(__p0_556, __p1_556, __p2_556, __p3_556) __extension__ ({ \
-  int64x2_t __s0_556 = __p0_556; \
-  int32x2_t __s1_556 = __p1_556; \
-  int32x4_t __s2_556 = __p2_556; \
-  int64x2_t __rev0_556;  __rev0_556 = __builtin_shufflevector(__s0_556, __s0_556, 1, 0); \
-  int32x2_t __rev1_556;  __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 1, 0); \
-  int32x4_t __rev2_556;  __rev2_556 = __builtin_shufflevector(__s2_556, __s2_556, 3, 2, 1, 0); \
-  int64x2_t __ret_556; \
-  __ret_556 = __rev0_556 + __noswap_vmull_s32(__rev1_556, __noswap_splat_laneq_s32(__rev2_556, __p3_556)); \
-  __ret_556 = __builtin_shufflevector(__ret_556, __ret_556, 1, 0); \
-  __ret_556; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s16(__p0_557, __p1_557, __p2_557, __p3_557) __extension__ ({ \
-  int32x4_t __s0_557 = __p0_557; \
-  int16x4_t __s1_557 = __p1_557; \
-  int16x8_t __s2_557 = __p2_557; \
-  int32x4_t __ret_557; \
-  __ret_557 = __s0_557 + vmull_s16(__s1_557, splat_laneq_s16(__s2_557, __p3_557)); \
-  __ret_557; \
-})
-#else
-#define vmlal_laneq_s16(__p0_558, __p1_558, __p2_558, __p3_558) __extension__ ({ \
-  int32x4_t __s0_558 = __p0_558; \
-  int16x4_t __s1_558 = __p1_558; \
-  int16x8_t __s2_558 = __p2_558; \
-  int32x4_t __rev0_558;  __rev0_558 = __builtin_shufflevector(__s0_558, __s0_558, 3, 2, 1, 0); \
-  int16x4_t __rev1_558;  __rev1_558 = __builtin_shufflevector(__s1_558, __s1_558, 3, 2, 1, 0); \
-  int16x8_t __rev2_558;  __rev2_558 = __builtin_shufflevector(__s2_558, __s2_558, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_558; \
-  __ret_558 = __rev0_558 + __noswap_vmull_s16(__rev1_558, __noswap_splat_laneq_s16(__rev2_558, __p3_558)); \
-  __ret_558 = __builtin_shufflevector(__ret_558, __ret_558, 3, 2, 1, 0); \
-  __ret_558; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u32(__p0_559, __p1_559, __p2_559, __p3_559) __extension__ ({ \
-  uint32x4_t __s0_559 = __p0_559; \
-  uint32x4_t __s1_559 = __p1_559; \
-  uint32x4_t __s2_559 = __p2_559; \
-  uint32x4_t __ret_559; \
-  __ret_559 = __s0_559 - __s1_559 * splatq_laneq_u32(__s2_559, __p3_559); \
-  __ret_559; \
-})
-#else
-#define vmlsq_laneq_u32(__p0_560, __p1_560, __p2_560, __p3_560) __extension__ ({ \
-  uint32x4_t __s0_560 = __p0_560; \
-  uint32x4_t __s1_560 = __p1_560; \
-  uint32x4_t __s2_560 = __p2_560; \
-  uint32x4_t __rev0_560;  __rev0_560 = __builtin_shufflevector(__s0_560, __s0_560, 3, 2, 1, 0); \
-  uint32x4_t __rev1_560;  __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 3, 2, 1, 0); \
-  uint32x4_t __rev2_560;  __rev2_560 = __builtin_shufflevector(__s2_560, __s2_560, 3, 2, 1, 0); \
-  uint32x4_t __ret_560; \
-  __ret_560 = __rev0_560 - __rev1_560 * __noswap_splatq_laneq_u32(__rev2_560, __p3_560); \
-  __ret_560 = __builtin_shufflevector(__ret_560, __ret_560, 3, 2, 1, 0); \
-  __ret_560; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u16(__p0_561, __p1_561, __p2_561, __p3_561) __extension__ ({ \
-  uint16x8_t __s0_561 = __p0_561; \
-  uint16x8_t __s1_561 = __p1_561; \
-  uint16x8_t __s2_561 = __p2_561; \
-  uint16x8_t __ret_561; \
-  __ret_561 = __s0_561 - __s1_561 * splatq_laneq_u16(__s2_561, __p3_561); \
-  __ret_561; \
-})
-#else
-#define vmlsq_laneq_u16(__p0_562, __p1_562, __p2_562, __p3_562) __extension__ ({ \
-  uint16x8_t __s0_562 = __p0_562; \
-  uint16x8_t __s1_562 = __p1_562; \
-  uint16x8_t __s2_562 = __p2_562; \
-  uint16x8_t __rev0_562;  __rev0_562 = __builtin_shufflevector(__s0_562, __s0_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_562;  __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_562;  __rev2_562 = __builtin_shufflevector(__s2_562, __s2_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_562; \
-  __ret_562 = __rev0_562 - __rev1_562 * __noswap_splatq_laneq_u16(__rev2_562, __p3_562); \
-  __ret_562 = __builtin_shufflevector(__ret_562, __ret_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_562; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_f32(__p0_563, __p1_563, __p2_563, __p3_563) __extension__ ({ \
-  float32x4_t __s0_563 = __p0_563; \
-  float32x4_t __s1_563 = __p1_563; \
-  float32x4_t __s2_563 = __p2_563; \
-  float32x4_t __ret_563; \
-  __ret_563 = __s0_563 - __s1_563 * splatq_laneq_f32(__s2_563, __p3_563); \
-  __ret_563; \
-})
-#else
-#define vmlsq_laneq_f32(__p0_564, __p1_564, __p2_564, __p3_564) __extension__ ({ \
-  float32x4_t __s0_564 = __p0_564; \
-  float32x4_t __s1_564 = __p1_564; \
-  float32x4_t __s2_564 = __p2_564; \
-  float32x4_t __rev0_564;  __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 3, 2, 1, 0); \
-  float32x4_t __rev1_564;  __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 3, 2, 1, 0); \
-  float32x4_t __rev2_564;  __rev2_564 = __builtin_shufflevector(__s2_564, __s2_564, 3, 2, 1, 0); \
-  float32x4_t __ret_564; \
-  __ret_564 = __rev0_564 - __rev1_564 * __noswap_splatq_laneq_f32(__rev2_564, __p3_564); \
-  __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 3, 2, 1, 0); \
-  __ret_564; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s32(__p0_565, __p1_565, __p2_565, __p3_565) __extension__ ({ \
-  int32x4_t __s0_565 = __p0_565; \
-  int32x4_t __s1_565 = __p1_565; \
-  int32x4_t __s2_565 = __p2_565; \
-  int32x4_t __ret_565; \
-  __ret_565 = __s0_565 - __s1_565 * splatq_laneq_s32(__s2_565, __p3_565); \
-  __ret_565; \
-})
-#else
-#define vmlsq_laneq_s32(__p0_566, __p1_566, __p2_566, __p3_566) __extension__ ({ \
-  int32x4_t __s0_566 = __p0_566; \
-  int32x4_t __s1_566 = __p1_566; \
-  int32x4_t __s2_566 = __p2_566; \
-  int32x4_t __rev0_566;  __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 3, 2, 1, 0); \
-  int32x4_t __rev1_566;  __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 3, 2, 1, 0); \
-  int32x4_t __rev2_566;  __rev2_566 = __builtin_shufflevector(__s2_566, __s2_566, 3, 2, 1, 0); \
-  int32x4_t __ret_566; \
-  __ret_566 = __rev0_566 - __rev1_566 * __noswap_splatq_laneq_s32(__rev2_566, __p3_566); \
-  __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 3, 2, 1, 0); \
-  __ret_566; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s16(__p0_567, __p1_567, __p2_567, __p3_567) __extension__ ({ \
-  int16x8_t __s0_567 = __p0_567; \
-  int16x8_t __s1_567 = __p1_567; \
-  int16x8_t __s2_567 = __p2_567; \
-  int16x8_t __ret_567; \
-  __ret_567 = __s0_567 - __s1_567 * splatq_laneq_s16(__s2_567, __p3_567); \
-  __ret_567; \
-})
-#else
-#define vmlsq_laneq_s16(__p0_568, __p1_568, __p2_568, __p3_568) __extension__ ({ \
-  int16x8_t __s0_568 = __p0_568; \
-  int16x8_t __s1_568 = __p1_568; \
-  int16x8_t __s2_568 = __p2_568; \
-  int16x8_t __rev0_568;  __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_568;  __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_568;  __rev2_568 = __builtin_shufflevector(__s2_568, __s2_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_568; \
-  __ret_568 = __rev0_568 - __rev1_568 * __noswap_splatq_laneq_s16(__rev2_568, __p3_568); \
-  __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_568; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u32(__p0_569, __p1_569, __p2_569, __p3_569) __extension__ ({ \
-  uint32x2_t __s0_569 = __p0_569; \
-  uint32x2_t __s1_569 = __p1_569; \
-  uint32x4_t __s2_569 = __p2_569; \
-  uint32x2_t __ret_569; \
-  __ret_569 = __s0_569 - __s1_569 * splat_laneq_u32(__s2_569, __p3_569); \
-  __ret_569; \
-})
-#else
-#define vmls_laneq_u32(__p0_570, __p1_570, __p2_570, __p3_570) __extension__ ({ \
-  uint32x2_t __s0_570 = __p0_570; \
-  uint32x2_t __s1_570 = __p1_570; \
-  uint32x4_t __s2_570 = __p2_570; \
-  uint32x2_t __rev0_570;  __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 1, 0); \
-  uint32x2_t __rev1_570;  __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 1, 0); \
-  uint32x4_t __rev2_570;  __rev2_570 = __builtin_shufflevector(__s2_570, __s2_570, 3, 2, 1, 0); \
-  uint32x2_t __ret_570; \
-  __ret_570 = __rev0_570 - __rev1_570 * __noswap_splat_laneq_u32(__rev2_570, __p3_570); \
-  __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 1, 0); \
-  __ret_570; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u16(__p0_571, __p1_571, __p2_571, __p3_571) __extension__ ({ \
-  uint16x4_t __s0_571 = __p0_571; \
-  uint16x4_t __s1_571 = __p1_571; \
-  uint16x8_t __s2_571 = __p2_571; \
-  uint16x4_t __ret_571; \
-  __ret_571 = __s0_571 - __s1_571 * splat_laneq_u16(__s2_571, __p3_571); \
-  __ret_571; \
-})
-#else
-#define vmls_laneq_u16(__p0_572, __p1_572, __p2_572, __p3_572) __extension__ ({ \
-  uint16x4_t __s0_572 = __p0_572; \
-  uint16x4_t __s1_572 = __p1_572; \
-  uint16x8_t __s2_572 = __p2_572; \
-  uint16x4_t __rev0_572;  __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 3, 2, 1, 0); \
-  uint16x4_t __rev1_572;  __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 3, 2, 1, 0); \
-  uint16x8_t __rev2_572;  __rev2_572 = __builtin_shufflevector(__s2_572, __s2_572, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_572; \
-  __ret_572 = __rev0_572 - __rev1_572 * __noswap_splat_laneq_u16(__rev2_572, __p3_572); \
-  __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 3, 2, 1, 0); \
-  __ret_572; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_f32(__p0_573, __p1_573, __p2_573, __p3_573) __extension__ ({ \
-  float32x2_t __s0_573 = __p0_573; \
-  float32x2_t __s1_573 = __p1_573; \
-  float32x4_t __s2_573 = __p2_573; \
-  float32x2_t __ret_573; \
-  __ret_573 = __s0_573 - __s1_573 * splat_laneq_f32(__s2_573, __p3_573); \
-  __ret_573; \
-})
-#else
-#define vmls_laneq_f32(__p0_574, __p1_574, __p2_574, __p3_574) __extension__ ({ \
-  float32x2_t __s0_574 = __p0_574; \
-  float32x2_t __s1_574 = __p1_574; \
-  float32x4_t __s2_574 = __p2_574; \
-  float32x2_t __rev0_574;  __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 1, 0); \
-  float32x2_t __rev1_574;  __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 1, 0); \
-  float32x4_t __rev2_574;  __rev2_574 = __builtin_shufflevector(__s2_574, __s2_574, 3, 2, 1, 0); \
-  float32x2_t __ret_574; \
-  __ret_574 = __rev0_574 - __rev1_574 * __noswap_splat_laneq_f32(__rev2_574, __p3_574); \
-  __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 1, 0); \
-  __ret_574; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s32(__p0_575, __p1_575, __p2_575, __p3_575) __extension__ ({ \
-  int32x2_t __s0_575 = __p0_575; \
-  int32x2_t __s1_575 = __p1_575; \
-  int32x4_t __s2_575 = __p2_575; \
-  int32x2_t __ret_575; \
-  __ret_575 = __s0_575 - __s1_575 * splat_laneq_s32(__s2_575, __p3_575); \
-  __ret_575; \
-})
-#else
-#define vmls_laneq_s32(__p0_576, __p1_576, __p2_576, __p3_576) __extension__ ({ \
-  int32x2_t __s0_576 = __p0_576; \
-  int32x2_t __s1_576 = __p1_576; \
-  int32x4_t __s2_576 = __p2_576; \
-  int32x2_t __rev0_576;  __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 1, 0); \
-  int32x2_t __rev1_576;  __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 1, 0); \
-  int32x4_t __rev2_576;  __rev2_576 = __builtin_shufflevector(__s2_576, __s2_576, 3, 2, 1, 0); \
-  int32x2_t __ret_576; \
-  __ret_576 = __rev0_576 - __rev1_576 * __noswap_splat_laneq_s32(__rev2_576, __p3_576); \
-  __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 1, 0); \
-  __ret_576; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s16(__p0_577, __p1_577, __p2_577, __p3_577) __extension__ ({ \
-  int16x4_t __s0_577 = __p0_577; \
-  int16x4_t __s1_577 = __p1_577; \
-  int16x8_t __s2_577 = __p2_577; \
-  int16x4_t __ret_577; \
-  __ret_577 = __s0_577 - __s1_577 * splat_laneq_s16(__s2_577, __p3_577); \
-  __ret_577; \
-})
-#else
-#define vmls_laneq_s16(__p0_578, __p1_578, __p2_578, __p3_578) __extension__ ({ \
-  int16x4_t __s0_578 = __p0_578; \
-  int16x4_t __s1_578 = __p1_578; \
-  int16x8_t __s2_578 = __p2_578; \
-  int16x4_t __rev0_578;  __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 3, 2, 1, 0); \
-  int16x4_t __rev1_578;  __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 3, 2, 1, 0); \
-  int16x8_t __rev2_578;  __rev2_578 = __builtin_shufflevector(__s2_578, __s2_578, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_578; \
-  __ret_578 = __rev0_578 - __rev1_578 * __noswap_splat_laneq_s16(__rev2_578, __p3_578); \
-  __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 3, 2, 1, 0); \
-  __ret_578; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u32(__p0_579, __p1_579, __p2_579, __p3_579) __extension__ ({ \
-  uint64x2_t __s0_579 = __p0_579; \
-  uint32x4_t __s1_579 = __p1_579; \
-  uint32x2_t __s2_579 = __p2_579; \
-  uint64x2_t __ret_579; \
-  __ret_579 = __s0_579 - vmull_u32(vget_high_u32(__s1_579), splat_lane_u32(__s2_579, __p3_579)); \
-  __ret_579; \
-})
-#else
-#define vmlsl_high_lane_u32(__p0_580, __p1_580, __p2_580, __p3_580) __extension__ ({ \
-  uint64x2_t __s0_580 = __p0_580; \
-  uint32x4_t __s1_580 = __p1_580; \
-  uint32x2_t __s2_580 = __p2_580; \
-  uint64x2_t __rev0_580;  __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 1, 0); \
-  uint32x4_t __rev1_580;  __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 3, 2, 1, 0); \
-  uint32x2_t __rev2_580;  __rev2_580 = __builtin_shufflevector(__s2_580, __s2_580, 1, 0); \
-  uint64x2_t __ret_580; \
-  __ret_580 = __rev0_580 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_580), __noswap_splat_lane_u32(__rev2_580, __p3_580)); \
-  __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 1, 0); \
-  __ret_580; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u16(__p0_581, __p1_581, __p2_581, __p3_581) __extension__ ({ \
-  uint32x4_t __s0_581 = __p0_581; \
-  uint16x8_t __s1_581 = __p1_581; \
-  uint16x4_t __s2_581 = __p2_581; \
-  uint32x4_t __ret_581; \
-  __ret_581 = __s0_581 - vmull_u16(vget_high_u16(__s1_581), splat_lane_u16(__s2_581, __p3_581)); \
-  __ret_581; \
-})
-#else
-#define vmlsl_high_lane_u16(__p0_582, __p1_582, __p2_582, __p3_582) __extension__ ({ \
-  uint32x4_t __s0_582 = __p0_582; \
-  uint16x8_t __s1_582 = __p1_582; \
-  uint16x4_t __s2_582 = __p2_582; \
-  uint32x4_t __rev0_582;  __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 3, 2, 1, 0); \
-  uint16x8_t __rev1_582;  __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_582;  __rev2_582 = __builtin_shufflevector(__s2_582, __s2_582, 3, 2, 1, 0); \
-  uint32x4_t __ret_582; \
-  __ret_582 = __rev0_582 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_582), __noswap_splat_lane_u16(__rev2_582, __p3_582)); \
-  __ret_582 = __builtin_shufflevector(__ret_582, __ret_582, 3, 2, 1, 0); \
-  __ret_582; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s32(__p0_583, __p1_583, __p2_583, __p3_583) __extension__ ({ \
-  int64x2_t __s0_583 = __p0_583; \
-  int32x4_t __s1_583 = __p1_583; \
-  int32x2_t __s2_583 = __p2_583; \
-  int64x2_t __ret_583; \
-  __ret_583 = __s0_583 - vmull_s32(vget_high_s32(__s1_583), splat_lane_s32(__s2_583, __p3_583)); \
-  __ret_583; \
-})
-#else
-#define vmlsl_high_lane_s32(__p0_584, __p1_584, __p2_584, __p3_584) __extension__ ({ \
-  int64x2_t __s0_584 = __p0_584; \
-  int32x4_t __s1_584 = __p1_584; \
-  int32x2_t __s2_584 = __p2_584; \
-  int64x2_t __rev0_584;  __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 1, 0); \
-  int32x4_t __rev1_584;  __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 3, 2, 1, 0); \
-  int32x2_t __rev2_584;  __rev2_584 = __builtin_shufflevector(__s2_584, __s2_584, 1, 0); \
-  int64x2_t __ret_584; \
-  __ret_584 = __rev0_584 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_584), __noswap_splat_lane_s32(__rev2_584, __p3_584)); \
-  __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 1, 0); \
-  __ret_584; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s16(__p0_585, __p1_585, __p2_585, __p3_585) __extension__ ({ \
-  int32x4_t __s0_585 = __p0_585; \
-  int16x8_t __s1_585 = __p1_585; \
-  int16x4_t __s2_585 = __p2_585; \
-  int32x4_t __ret_585; \
-  __ret_585 = __s0_585 - vmull_s16(vget_high_s16(__s1_585), splat_lane_s16(__s2_585, __p3_585)); \
-  __ret_585; \
-})
-#else
-#define vmlsl_high_lane_s16(__p0_586, __p1_586, __p2_586, __p3_586) __extension__ ({ \
-  int32x4_t __s0_586 = __p0_586; \
-  int16x8_t __s1_586 = __p1_586; \
-  int16x4_t __s2_586 = __p2_586; \
-  int32x4_t __rev0_586;  __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 3, 2, 1, 0); \
-  int16x8_t __rev1_586;  __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_586;  __rev2_586 = __builtin_shufflevector(__s2_586, __s2_586, 3, 2, 1, 0); \
-  int32x4_t __ret_586; \
-  __ret_586 = __rev0_586 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_586), __noswap_splat_lane_s16(__rev2_586, __p3_586)); \
-  __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 3, 2, 1, 0); \
-  __ret_586; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u32(__p0_587, __p1_587, __p2_587, __p3_587) __extension__ ({ \
-  uint64x2_t __s0_587 = __p0_587; \
-  uint32x4_t __s1_587 = __p1_587; \
-  uint32x4_t __s2_587 = __p2_587; \
-  uint64x2_t __ret_587; \
-  __ret_587 = __s0_587 - vmull_u32(vget_high_u32(__s1_587), splat_laneq_u32(__s2_587, __p3_587)); \
-  __ret_587; \
-})
-#else
-#define vmlsl_high_laneq_u32(__p0_588, __p1_588, __p2_588, __p3_588) __extension__ ({ \
-  uint64x2_t __s0_588 = __p0_588; \
-  uint32x4_t __s1_588 = __p1_588; \
-  uint32x4_t __s2_588 = __p2_588; \
-  uint64x2_t __rev0_588;  __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 1, 0); \
-  uint32x4_t __rev1_588;  __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 3, 2, 1, 0); \
-  uint32x4_t __rev2_588;  __rev2_588 = __builtin_shufflevector(__s2_588, __s2_588, 3, 2, 1, 0); \
-  uint64x2_t __ret_588; \
-  __ret_588 = __rev0_588 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_588), __noswap_splat_laneq_u32(__rev2_588, __p3_588)); \
-  __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 1, 0); \
-  __ret_588; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u16(__p0_589, __p1_589, __p2_589, __p3_589) __extension__ ({ \
-  uint32x4_t __s0_589 = __p0_589; \
-  uint16x8_t __s1_589 = __p1_589; \
-  uint16x8_t __s2_589 = __p2_589; \
-  uint32x4_t __ret_589; \
-  __ret_589 = __s0_589 - vmull_u16(vget_high_u16(__s1_589), splat_laneq_u16(__s2_589, __p3_589)); \
-  __ret_589; \
-})
-#else
-#define vmlsl_high_laneq_u16(__p0_590, __p1_590, __p2_590, __p3_590) __extension__ ({ \
-  uint32x4_t __s0_590 = __p0_590; \
-  uint16x8_t __s1_590 = __p1_590; \
-  uint16x8_t __s2_590 = __p2_590; \
-  uint32x4_t __rev0_590;  __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 3, 2, 1, 0); \
-  uint16x8_t __rev1_590;  __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_590;  __rev2_590 = __builtin_shufflevector(__s2_590, __s2_590, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_590; \
-  __ret_590 = __rev0_590 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_590), __noswap_splat_laneq_u16(__rev2_590, __p3_590)); \
-  __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 3, 2, 1, 0); \
-  __ret_590; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s32(__p0_591, __p1_591, __p2_591, __p3_591) __extension__ ({ \
-  int64x2_t __s0_591 = __p0_591; \
-  int32x4_t __s1_591 = __p1_591; \
-  int32x4_t __s2_591 = __p2_591; \
-  int64x2_t __ret_591; \
-  __ret_591 = __s0_591 - vmull_s32(vget_high_s32(__s1_591), splat_laneq_s32(__s2_591, __p3_591)); \
-  __ret_591; \
-})
-#else
-#define vmlsl_high_laneq_s32(__p0_592, __p1_592, __p2_592, __p3_592) __extension__ ({ \
-  int64x2_t __s0_592 = __p0_592; \
-  int32x4_t __s1_592 = __p1_592; \
-  int32x4_t __s2_592 = __p2_592; \
-  int64x2_t __rev0_592;  __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \
-  int32x4_t __rev1_592;  __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 3, 2, 1, 0); \
-  int32x4_t __rev2_592;  __rev2_592 = __builtin_shufflevector(__s2_592, __s2_592, 3, 2, 1, 0); \
-  int64x2_t __ret_592; \
-  __ret_592 = __rev0_592 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_592), __noswap_splat_laneq_s32(__rev2_592, __p3_592)); \
-  __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \
-  __ret_592; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s16(__p0_593, __p1_593, __p2_593, __p3_593) __extension__ ({ \
-  int32x4_t __s0_593 = __p0_593; \
-  int16x8_t __s1_593 = __p1_593; \
-  int16x8_t __s2_593 = __p2_593; \
-  int32x4_t __ret_593; \
-  __ret_593 = __s0_593 - vmull_s16(vget_high_s16(__s1_593), splat_laneq_s16(__s2_593, __p3_593)); \
-  __ret_593; \
-})
-#else
-#define vmlsl_high_laneq_s16(__p0_594, __p1_594, __p2_594, __p3_594) __extension__ ({ \
-  int32x4_t __s0_594 = __p0_594; \
-  int16x8_t __s1_594 = __p1_594; \
-  int16x8_t __s2_594 = __p2_594; \
-  int32x4_t __rev0_594;  __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \
-  int16x8_t __rev1_594;  __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_594;  __rev2_594 = __builtin_shufflevector(__s2_594, __s2_594, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_594; \
-  __ret_594 = __rev0_594 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_594), __noswap_splat_laneq_s16(__rev2_594, __p3_594)); \
-  __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 3, 2, 1, 0); \
-  __ret_594; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u32(__p0_595, __p1_595, __p2_595, __p3_595) __extension__ ({ \
-  uint64x2_t __s0_595 = __p0_595; \
-  uint32x2_t __s1_595 = __p1_595; \
-  uint32x4_t __s2_595 = __p2_595; \
-  uint64x2_t __ret_595; \
-  __ret_595 = __s0_595 - vmull_u32(__s1_595, splat_laneq_u32(__s2_595, __p3_595)); \
-  __ret_595; \
-})
-#else
-#define vmlsl_laneq_u32(__p0_596, __p1_596, __p2_596, __p3_596) __extension__ ({ \
-  uint64x2_t __s0_596 = __p0_596; \
-  uint32x2_t __s1_596 = __p1_596; \
-  uint32x4_t __s2_596 = __p2_596; \
-  uint64x2_t __rev0_596;  __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 1, 0); \
-  uint32x2_t __rev1_596;  __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 1, 0); \
-  uint32x4_t __rev2_596;  __rev2_596 = __builtin_shufflevector(__s2_596, __s2_596, 3, 2, 1, 0); \
-  uint64x2_t __ret_596; \
-  __ret_596 = __rev0_596 - __noswap_vmull_u32(__rev1_596, __noswap_splat_laneq_u32(__rev2_596, __p3_596)); \
-  __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 1, 0); \
-  __ret_596; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u16(__p0_597, __p1_597, __p2_597, __p3_597) __extension__ ({ \
-  uint32x4_t __s0_597 = __p0_597; \
-  uint16x4_t __s1_597 = __p1_597; \
-  uint16x8_t __s2_597 = __p2_597; \
-  uint32x4_t __ret_597; \
-  __ret_597 = __s0_597 - vmull_u16(__s1_597, splat_laneq_u16(__s2_597, __p3_597)); \
-  __ret_597; \
-})
-#else
-#define vmlsl_laneq_u16(__p0_598, __p1_598, __p2_598, __p3_598) __extension__ ({ \
-  uint32x4_t __s0_598 = __p0_598; \
-  uint16x4_t __s1_598 = __p1_598; \
-  uint16x8_t __s2_598 = __p2_598; \
-  uint32x4_t __rev0_598;  __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 3, 2, 1, 0); \
-  uint16x4_t __rev1_598;  __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 3, 2, 1, 0); \
-  uint16x8_t __rev2_598;  __rev2_598 = __builtin_shufflevector(__s2_598, __s2_598, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_598; \
-  __ret_598 = __rev0_598 - __noswap_vmull_u16(__rev1_598, __noswap_splat_laneq_u16(__rev2_598, __p3_598)); \
-  __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 3, 2, 1, 0); \
-  __ret_598; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s32(__p0_599, __p1_599, __p2_599, __p3_599) __extension__ ({ \
-  int64x2_t __s0_599 = __p0_599; \
-  int32x2_t __s1_599 = __p1_599; \
-  int32x4_t __s2_599 = __p2_599; \
-  int64x2_t __ret_599; \
-  __ret_599 = __s0_599 - vmull_s32(__s1_599, splat_laneq_s32(__s2_599, __p3_599)); \
-  __ret_599; \
-})
-#else
-#define vmlsl_laneq_s32(__p0_600, __p1_600, __p2_600, __p3_600) __extension__ ({ \
-  int64x2_t __s0_600 = __p0_600; \
-  int32x2_t __s1_600 = __p1_600; \
-  int32x4_t __s2_600 = __p2_600; \
-  int64x2_t __rev0_600;  __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 1, 0); \
-  int32x2_t __rev1_600;  __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 1, 0); \
-  int32x4_t __rev2_600;  __rev2_600 = __builtin_shufflevector(__s2_600, __s2_600, 3, 2, 1, 0); \
-  int64x2_t __ret_600; \
-  __ret_600 = __rev0_600 - __noswap_vmull_s32(__rev1_600, __noswap_splat_laneq_s32(__rev2_600, __p3_600)); \
-  __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 1, 0); \
-  __ret_600; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s16(__p0_601, __p1_601, __p2_601, __p3_601) __extension__ ({ \
-  int32x4_t __s0_601 = __p0_601; \
-  int16x4_t __s1_601 = __p1_601; \
-  int16x8_t __s2_601 = __p2_601; \
-  int32x4_t __ret_601; \
-  __ret_601 = __s0_601 - vmull_s16(__s1_601, splat_laneq_s16(__s2_601, __p3_601)); \
-  __ret_601; \
-})
-#else
-#define vmlsl_laneq_s16(__p0_602, __p1_602, __p2_602, __p3_602) __extension__ ({ \
-  int32x4_t __s0_602 = __p0_602; \
-  int16x4_t __s1_602 = __p1_602; \
-  int16x8_t __s2_602 = __p2_602; \
-  int32x4_t __rev0_602;  __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 3, 2, 1, 0); \
-  int16x4_t __rev1_602;  __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 3, 2, 1, 0); \
-  int16x8_t __rev2_602;  __rev2_602 = __builtin_shufflevector(__s2_602, __s2_602, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_602; \
-  __ret_602 = __rev0_602 - __noswap_vmull_s16(__rev1_602, __noswap_splat_laneq_s16(__rev2_602, __p3_602)); \
-  __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 3, 2, 1, 0); \
-  __ret_602; \
-})
-#endif
-
-__ai poly64x1_t vmov_n_p64(poly64_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmovq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float64x2_t vmovq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmov_n_f64(float64_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_603) {
-  uint16x8_t __ret_603;
-  uint8x8_t __a1_603 = vget_high_u8(__p0_603);
-  __ret_603 = (uint16x8_t)(vshll_n_u8(__a1_603, 0));
-  return __ret_603;
-}
-#else
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_604) {
-  uint8x16_t __rev0_604;  __rev0_604 = __builtin_shufflevector(__p0_604, __p0_604, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret_604;
-  uint8x8_t __a1_604 = __noswap_vget_high_u8(__rev0_604);
-  __ret_604 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_604, 0));
-  __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_604;
-}
-__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_605) {
-  uint16x8_t __ret_605;
-  uint8x8_t __a1_605 = __noswap_vget_high_u8(__p0_605);
-  __ret_605 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_605, 0));
-  return __ret_605;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_606) {
-  uint64x2_t __ret_606;
-  uint32x2_t __a1_606 = vget_high_u32(__p0_606);
-  __ret_606 = (uint64x2_t)(vshll_n_u32(__a1_606, 0));
-  return __ret_606;
-}
-#else
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_607) {
-  uint32x4_t __rev0_607;  __rev0_607 = __builtin_shufflevector(__p0_607, __p0_607, 3, 2, 1, 0);
-  uint64x2_t __ret_607;
-  uint32x2_t __a1_607 = __noswap_vget_high_u32(__rev0_607);
-  __ret_607 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_607, 0));
-  __ret_607 = __builtin_shufflevector(__ret_607, __ret_607, 1, 0);
-  return __ret_607;
-}
-__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_608) {
-  uint64x2_t __ret_608;
-  uint32x2_t __a1_608 = __noswap_vget_high_u32(__p0_608);
-  __ret_608 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_608, 0));
-  return __ret_608;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_609) {
-  uint32x4_t __ret_609;
-  uint16x4_t __a1_609 = vget_high_u16(__p0_609);
-  __ret_609 = (uint32x4_t)(vshll_n_u16(__a1_609, 0));
-  return __ret_609;
-}
-#else
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_610) {
-  uint16x8_t __rev0_610;  __rev0_610 = __builtin_shufflevector(__p0_610, __p0_610, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret_610;
-  uint16x4_t __a1_610 = __noswap_vget_high_u16(__rev0_610);
-  __ret_610 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_610, 0));
-  __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0);
-  return __ret_610;
-}
-__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_611) {
-  uint32x4_t __ret_611;
-  uint16x4_t __a1_611 = __noswap_vget_high_u16(__p0_611);
-  __ret_611 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_611, 0));
-  return __ret_611;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_612) {
-  int16x8_t __ret_612;
-  int8x8_t __a1_612 = vget_high_s8(__p0_612);
-  __ret_612 = (int16x8_t)(vshll_n_s8(__a1_612, 0));
-  return __ret_612;
-}
-#else
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_613) {
-  int8x16_t __rev0_613;  __rev0_613 = __builtin_shufflevector(__p0_613, __p0_613, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret_613;
-  int8x8_t __a1_613 = __noswap_vget_high_s8(__rev0_613);
-  __ret_613 = (int16x8_t)(__noswap_vshll_n_s8(__a1_613, 0));
-  __ret_613 = __builtin_shufflevector(__ret_613, __ret_613, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_613;
-}
-__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_614) {
-  int16x8_t __ret_614;
-  int8x8_t __a1_614 = __noswap_vget_high_s8(__p0_614);
-  __ret_614 = (int16x8_t)(__noswap_vshll_n_s8(__a1_614, 0));
-  return __ret_614;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_615) {
-  int64x2_t __ret_615;
-  int32x2_t __a1_615 = vget_high_s32(__p0_615);
-  __ret_615 = (int64x2_t)(vshll_n_s32(__a1_615, 0));
-  return __ret_615;
-}
-#else
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_616) {
-  int32x4_t __rev0_616;  __rev0_616 = __builtin_shufflevector(__p0_616, __p0_616, 3, 2, 1, 0);
-  int64x2_t __ret_616;
-  int32x2_t __a1_616 = __noswap_vget_high_s32(__rev0_616);
-  __ret_616 = (int64x2_t)(__noswap_vshll_n_s32(__a1_616, 0));
-  __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0);
-  return __ret_616;
-}
-__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_617) {
-  int64x2_t __ret_617;
-  int32x2_t __a1_617 = __noswap_vget_high_s32(__p0_617);
-  __ret_617 = (int64x2_t)(__noswap_vshll_n_s32(__a1_617, 0));
-  return __ret_617;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_618) {
-  int32x4_t __ret_618;
-  int16x4_t __a1_618 = vget_high_s16(__p0_618);
-  __ret_618 = (int32x4_t)(vshll_n_s16(__a1_618, 0));
-  return __ret_618;
-}
-#else
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_619) {
-  int16x8_t __rev0_619;  __rev0_619 = __builtin_shufflevector(__p0_619, __p0_619, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret_619;
-  int16x4_t __a1_619 = __noswap_vget_high_s16(__rev0_619);
-  __ret_619 = (int32x4_t)(__noswap_vshll_n_s16(__a1_619, 0));
-  __ret_619 = __builtin_shufflevector(__ret_619, __ret_619, 3, 2, 1, 0);
-  return __ret_619;
-}
-__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_620) {
-  int32x4_t __ret_620;
-  int16x4_t __a1_620 = __noswap_vget_high_s16(__p0_620);
-  __ret_620 = (int32x4_t)(__noswap_vshll_n_s16(__a1_620, 0));
-  return __ret_620;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vmovn_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vmovn_u64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vmovn_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vmovn_s32(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vmovn_s64(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vmovn_s16(__p1));
-  return __ret;
-}
-#else
-__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#define vmuld_lane_f64(__p0_621, __p1_621, __p2_621) __extension__ ({ \
-  float64_t __s0_621 = __p0_621; \
-  float64x1_t __s1_621 = __p1_621; \
-  float64_t __ret_621; \
-  __ret_621 = __s0_621 * vget_lane_f64(__s1_621, __p2_621); \
-  __ret_621; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmuls_lane_f32(__p0_622, __p1_622, __p2_622) __extension__ ({ \
-  float32_t __s0_622 = __p0_622; \
-  float32x2_t __s1_622 = __p1_622; \
-  float32_t __ret_622; \
-  __ret_622 = __s0_622 * vget_lane_f32(__s1_622, __p2_622); \
-  __ret_622; \
-})
-#else
-#define vmuls_lane_f32(__p0_623, __p1_623, __p2_623) __extension__ ({ \
-  float32_t __s0_623 = __p0_623; \
-  float32x2_t __s1_623 = __p1_623; \
-  float32x2_t __rev1_623;  __rev1_623 = __builtin_shufflevector(__s1_623, __s1_623, 1, 0); \
-  float32_t __ret_623; \
-  __ret_623 = __s0_623 * __noswap_vget_lane_f32(__rev1_623, __p2_623); \
-  __ret_623; \
-})
-#endif
-
-#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f64(__p0_624, __p1_624, __p2_624) __extension__ ({ \
-  float64x2_t __s0_624 = __p0_624; \
-  float64x1_t __s1_624 = __p1_624; \
-  float64x2_t __ret_624; \
-  __ret_624 = __s0_624 * splatq_lane_f64(__s1_624, __p2_624); \
-  __ret_624; \
-})
-#else
-#define vmulq_lane_f64(__p0_625, __p1_625, __p2_625) __extension__ ({ \
-  float64x2_t __s0_625 = __p0_625; \
-  float64x1_t __s1_625 = __p1_625; \
-  float64x2_t __rev0_625;  __rev0_625 = __builtin_shufflevector(__s0_625, __s0_625, 1, 0); \
-  float64x2_t __ret_625; \
-  __ret_625 = __rev0_625 * __noswap_splatq_lane_f64(__s1_625, __p2_625); \
-  __ret_625 = __builtin_shufflevector(__ret_625, __ret_625, 1, 0); \
-  __ret_625; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmuld_laneq_f64(__p0_626, __p1_626, __p2_626) __extension__ ({ \
-  float64_t __s0_626 = __p0_626; \
-  float64x2_t __s1_626 = __p1_626; \
-  float64_t __ret_626; \
-  __ret_626 = __s0_626 * vgetq_lane_f64(__s1_626, __p2_626); \
-  __ret_626; \
-})
-#else
-#define vmuld_laneq_f64(__p0_627, __p1_627, __p2_627) __extension__ ({ \
-  float64_t __s0_627 = __p0_627; \
-  float64x2_t __s1_627 = __p1_627; \
-  float64x2_t __rev1_627;  __rev1_627 = __builtin_shufflevector(__s1_627, __s1_627, 1, 0); \
-  float64_t __ret_627; \
-  __ret_627 = __s0_627 * __noswap_vgetq_lane_f64(__rev1_627, __p2_627); \
-  __ret_627; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmuls_laneq_f32(__p0_628, __p1_628, __p2_628) __extension__ ({ \
-  float32_t __s0_628 = __p0_628; \
-  float32x4_t __s1_628 = __p1_628; \
-  float32_t __ret_628; \
-  __ret_628 = __s0_628 * vgetq_lane_f32(__s1_628, __p2_628); \
-  __ret_628; \
-})
-#else
-#define vmuls_laneq_f32(__p0_629, __p1_629, __p2_629) __extension__ ({ \
-  float32_t __s0_629 = __p0_629; \
-  float32x4_t __s1_629 = __p1_629; \
-  float32x4_t __rev1_629;  __rev1_629 = __builtin_shufflevector(__s1_629, __s1_629, 3, 2, 1, 0); \
-  float32_t __ret_629; \
-  __ret_629 = __s0_629 * __noswap_vgetq_lane_f32(__rev1_629, __p2_629); \
-  __ret_629; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \
-  __ret; \
-})
-#else
-#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u32(__p0_630, __p1_630, __p2_630) __extension__ ({ \
-  uint32x4_t __s0_630 = __p0_630; \
-  uint32x4_t __s1_630 = __p1_630; \
-  uint32x4_t __ret_630; \
-  __ret_630 = __s0_630 * splatq_laneq_u32(__s1_630, __p2_630); \
-  __ret_630; \
-})
-#else
-#define vmulq_laneq_u32(__p0_631, __p1_631, __p2_631) __extension__ ({ \
-  uint32x4_t __s0_631 = __p0_631; \
-  uint32x4_t __s1_631 = __p1_631; \
-  uint32x4_t __rev0_631;  __rev0_631 = __builtin_shufflevector(__s0_631, __s0_631, 3, 2, 1, 0); \
-  uint32x4_t __rev1_631;  __rev1_631 = __builtin_shufflevector(__s1_631, __s1_631, 3, 2, 1, 0); \
-  uint32x4_t __ret_631; \
-  __ret_631 = __rev0_631 * __noswap_splatq_laneq_u32(__rev1_631, __p2_631); \
-  __ret_631 = __builtin_shufflevector(__ret_631, __ret_631, 3, 2, 1, 0); \
-  __ret_631; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u16(__p0_632, __p1_632, __p2_632) __extension__ ({ \
-  uint16x8_t __s0_632 = __p0_632; \
-  uint16x8_t __s1_632 = __p1_632; \
-  uint16x8_t __ret_632; \
-  __ret_632 = __s0_632 * splatq_laneq_u16(__s1_632, __p2_632); \
-  __ret_632; \
-})
-#else
-#define vmulq_laneq_u16(__p0_633, __p1_633, __p2_633) __extension__ ({ \
-  uint16x8_t __s0_633 = __p0_633; \
-  uint16x8_t __s1_633 = __p1_633; \
-  uint16x8_t __rev0_633;  __rev0_633 = __builtin_shufflevector(__s0_633, __s0_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_633;  __rev1_633 = __builtin_shufflevector(__s1_633, __s1_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_633; \
-  __ret_633 = __rev0_633 * __noswap_splatq_laneq_u16(__rev1_633, __p2_633); \
-  __ret_633 = __builtin_shufflevector(__ret_633, __ret_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_633; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f64(__p0_634, __p1_634, __p2_634) __extension__ ({ \
-  float64x2_t __s0_634 = __p0_634; \
-  float64x2_t __s1_634 = __p1_634; \
-  float64x2_t __ret_634; \
-  __ret_634 = __s0_634 * splatq_laneq_f64(__s1_634, __p2_634); \
-  __ret_634; \
-})
-#else
-#define vmulq_laneq_f64(__p0_635, __p1_635, __p2_635) __extension__ ({ \
-  float64x2_t __s0_635 = __p0_635; \
-  float64x2_t __s1_635 = __p1_635; \
-  float64x2_t __rev0_635;  __rev0_635 = __builtin_shufflevector(__s0_635, __s0_635, 1, 0); \
-  float64x2_t __rev1_635;  __rev1_635 = __builtin_shufflevector(__s1_635, __s1_635, 1, 0); \
-  float64x2_t __ret_635; \
-  __ret_635 = __rev0_635 * __noswap_splatq_laneq_f64(__rev1_635, __p2_635); \
-  __ret_635 = __builtin_shufflevector(__ret_635, __ret_635, 1, 0); \
-  __ret_635; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f32(__p0_636, __p1_636, __p2_636) __extension__ ({ \
-  float32x4_t __s0_636 = __p0_636; \
-  float32x4_t __s1_636 = __p1_636; \
-  float32x4_t __ret_636; \
-  __ret_636 = __s0_636 * splatq_laneq_f32(__s1_636, __p2_636); \
-  __ret_636; \
-})
-#else
-#define vmulq_laneq_f32(__p0_637, __p1_637, __p2_637) __extension__ ({ \
-  float32x4_t __s0_637 = __p0_637; \
-  float32x4_t __s1_637 = __p1_637; \
-  float32x4_t __rev0_637;  __rev0_637 = __builtin_shufflevector(__s0_637, __s0_637, 3, 2, 1, 0); \
-  float32x4_t __rev1_637;  __rev1_637 = __builtin_shufflevector(__s1_637, __s1_637, 3, 2, 1, 0); \
-  float32x4_t __ret_637; \
-  __ret_637 = __rev0_637 * __noswap_splatq_laneq_f32(__rev1_637, __p2_637); \
-  __ret_637 = __builtin_shufflevector(__ret_637, __ret_637, 3, 2, 1, 0); \
-  __ret_637; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s32(__p0_638, __p1_638, __p2_638) __extension__ ({ \
-  int32x4_t __s0_638 = __p0_638; \
-  int32x4_t __s1_638 = __p1_638; \
-  int32x4_t __ret_638; \
-  __ret_638 = __s0_638 * splatq_laneq_s32(__s1_638, __p2_638); \
-  __ret_638; \
-})
-#else
-#define vmulq_laneq_s32(__p0_639, __p1_639, __p2_639) __extension__ ({ \
-  int32x4_t __s0_639 = __p0_639; \
-  int32x4_t __s1_639 = __p1_639; \
-  int32x4_t __rev0_639;  __rev0_639 = __builtin_shufflevector(__s0_639, __s0_639, 3, 2, 1, 0); \
-  int32x4_t __rev1_639;  __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 3, 2, 1, 0); \
-  int32x4_t __ret_639; \
-  __ret_639 = __rev0_639 * __noswap_splatq_laneq_s32(__rev1_639, __p2_639); \
-  __ret_639 = __builtin_shufflevector(__ret_639, __ret_639, 3, 2, 1, 0); \
-  __ret_639; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s16(__p0_640, __p1_640, __p2_640) __extension__ ({ \
-  int16x8_t __s0_640 = __p0_640; \
-  int16x8_t __s1_640 = __p1_640; \
-  int16x8_t __ret_640; \
-  __ret_640 = __s0_640 * splatq_laneq_s16(__s1_640, __p2_640); \
-  __ret_640; \
-})
-#else
-#define vmulq_laneq_s16(__p0_641, __p1_641, __p2_641) __extension__ ({ \
-  int16x8_t __s0_641 = __p0_641; \
-  int16x8_t __s1_641 = __p1_641; \
-  int16x8_t __rev0_641;  __rev0_641 = __builtin_shufflevector(__s0_641, __s0_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_641;  __rev1_641 = __builtin_shufflevector(__s1_641, __s1_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_641; \
-  __ret_641 = __rev0_641 * __noswap_splatq_laneq_s16(__rev1_641, __p2_641); \
-  __ret_641 = __builtin_shufflevector(__ret_641, __ret_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_641; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u32(__p0_642, __p1_642, __p2_642) __extension__ ({ \
-  uint32x2_t __s0_642 = __p0_642; \
-  uint32x4_t __s1_642 = __p1_642; \
-  uint32x2_t __ret_642; \
-  __ret_642 = __s0_642 * splat_laneq_u32(__s1_642, __p2_642); \
-  __ret_642; \
-})
-#else
-#define vmul_laneq_u32(__p0_643, __p1_643, __p2_643) __extension__ ({ \
-  uint32x2_t __s0_643 = __p0_643; \
-  uint32x4_t __s1_643 = __p1_643; \
-  uint32x2_t __rev0_643;  __rev0_643 = __builtin_shufflevector(__s0_643, __s0_643, 1, 0); \
-  uint32x4_t __rev1_643;  __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 3, 2, 1, 0); \
-  uint32x2_t __ret_643; \
-  __ret_643 = __rev0_643 * __noswap_splat_laneq_u32(__rev1_643, __p2_643); \
-  __ret_643 = __builtin_shufflevector(__ret_643, __ret_643, 1, 0); \
-  __ret_643; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u16(__p0_644, __p1_644, __p2_644) __extension__ ({ \
-  uint16x4_t __s0_644 = __p0_644; \
-  uint16x8_t __s1_644 = __p1_644; \
-  uint16x4_t __ret_644; \
-  __ret_644 = __s0_644 * splat_laneq_u16(__s1_644, __p2_644); \
-  __ret_644; \
-})
-#else
-#define vmul_laneq_u16(__p0_645, __p1_645, __p2_645) __extension__ ({ \
-  uint16x4_t __s0_645 = __p0_645; \
-  uint16x8_t __s1_645 = __p1_645; \
-  uint16x4_t __rev0_645;  __rev0_645 = __builtin_shufflevector(__s0_645, __s0_645, 3, 2, 1, 0); \
-  uint16x8_t __rev1_645;  __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_645; \
-  __ret_645 = __rev0_645 * __noswap_splat_laneq_u16(__rev1_645, __p2_645); \
-  __ret_645 = __builtin_shufflevector(__ret_645, __ret_645, 3, 2, 1, 0); \
-  __ret_645; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f32(__p0_646, __p1_646, __p2_646) __extension__ ({ \
-  float32x2_t __s0_646 = __p0_646; \
-  float32x4_t __s1_646 = __p1_646; \
-  float32x2_t __ret_646; \
-  __ret_646 = __s0_646 * splat_laneq_f32(__s1_646, __p2_646); \
-  __ret_646; \
-})
-#else
-#define vmul_laneq_f32(__p0_647, __p1_647, __p2_647) __extension__ ({ \
-  float32x2_t __s0_647 = __p0_647; \
-  float32x4_t __s1_647 = __p1_647; \
-  float32x2_t __rev0_647;  __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 1, 0); \
-  float32x4_t __rev1_647;  __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 3, 2, 1, 0); \
-  float32x2_t __ret_647; \
-  __ret_647 = __rev0_647 * __noswap_splat_laneq_f32(__rev1_647, __p2_647); \
-  __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 1, 0); \
-  __ret_647; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s32(__p0_648, __p1_648, __p2_648) __extension__ ({ \
-  int32x2_t __s0_648 = __p0_648; \
-  int32x4_t __s1_648 = __p1_648; \
-  int32x2_t __ret_648; \
-  __ret_648 = __s0_648 * splat_laneq_s32(__s1_648, __p2_648); \
-  __ret_648; \
-})
-#else
-#define vmul_laneq_s32(__p0_649, __p1_649, __p2_649) __extension__ ({ \
-  int32x2_t __s0_649 = __p0_649; \
-  int32x4_t __s1_649 = __p1_649; \
-  int32x2_t __rev0_649;  __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 1, 0); \
-  int32x4_t __rev1_649;  __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 3, 2, 1, 0); \
-  int32x2_t __ret_649; \
-  __ret_649 = __rev0_649 * __noswap_splat_laneq_s32(__rev1_649, __p2_649); \
-  __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 1, 0); \
-  __ret_649; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s16(__p0_650, __p1_650, __p2_650) __extension__ ({ \
-  int16x4_t __s0_650 = __p0_650; \
-  int16x8_t __s1_650 = __p1_650; \
-  int16x4_t __ret_650; \
-  __ret_650 = __s0_650 * splat_laneq_s16(__s1_650, __p2_650); \
-  __ret_650; \
-})
-#else
-#define vmul_laneq_s16(__p0_651, __p1_651, __p2_651) __extension__ ({ \
-  int16x4_t __s0_651 = __p0_651; \
-  int16x8_t __s1_651 = __p1_651; \
-  int16x4_t __rev0_651;  __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 3, 2, 1, 0); \
-  int16x8_t __rev1_651;  __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_651; \
-  __ret_651 = __rev0_651 * __noswap_splat_laneq_s16(__rev1_651, __p2_651); \
-  __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 3, 2, 1, 0); \
-  __ret_651; \
-})
-#endif
-
-__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 * (float64x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 * (float64x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
-  poly128_t __ret;
-  __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly16x8_t __ret;
-  __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1));
-  return __ret;
-}
-#else
-__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly128_t __ret;
-  __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1)));
-  return __ret;
-}
-#else
-__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly128_t __ret;
-  __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1)));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u32(__p0_652, __p1_652, __p2_652) __extension__ ({ \
-  uint32x4_t __s0_652 = __p0_652; \
-  uint32x2_t __s1_652 = __p1_652; \
-  uint64x2_t __ret_652; \
-  __ret_652 = vmull_u32(vget_high_u32(__s0_652), splat_lane_u32(__s1_652, __p2_652)); \
-  __ret_652; \
-})
-#else
-#define vmull_high_lane_u32(__p0_653, __p1_653, __p2_653) __extension__ ({ \
-  uint32x4_t __s0_653 = __p0_653; \
-  uint32x2_t __s1_653 = __p1_653; \
-  uint32x4_t __rev0_653;  __rev0_653 = __builtin_shufflevector(__s0_653, __s0_653, 3, 2, 1, 0); \
-  uint32x2_t __rev1_653;  __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 1, 0); \
-  uint64x2_t __ret_653; \
-  __ret_653 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_653), __noswap_splat_lane_u32(__rev1_653, __p2_653)); \
-  __ret_653 = __builtin_shufflevector(__ret_653, __ret_653, 1, 0); \
-  __ret_653; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u16(__p0_654, __p1_654, __p2_654) __extension__ ({ \
-  uint16x8_t __s0_654 = __p0_654; \
-  uint16x4_t __s1_654 = __p1_654; \
-  uint32x4_t __ret_654; \
-  __ret_654 = vmull_u16(vget_high_u16(__s0_654), splat_lane_u16(__s1_654, __p2_654)); \
-  __ret_654; \
-})
-#else
-#define vmull_high_lane_u16(__p0_655, __p1_655, __p2_655) __extension__ ({ \
-  uint16x8_t __s0_655 = __p0_655; \
-  uint16x4_t __s1_655 = __p1_655; \
-  uint16x8_t __rev0_655;  __rev0_655 = __builtin_shufflevector(__s0_655, __s0_655, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1_655;  __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \
-  uint32x4_t __ret_655; \
-  __ret_655 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_655), __noswap_splat_lane_u16(__rev1_655, __p2_655)); \
-  __ret_655 = __builtin_shufflevector(__ret_655, __ret_655, 3, 2, 1, 0); \
-  __ret_655; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \
-  int32x4_t __s0_656 = __p0_656; \
-  int32x2_t __s1_656 = __p1_656; \
-  int64x2_t __ret_656; \
-  __ret_656 = vmull_s32(vget_high_s32(__s0_656), splat_lane_s32(__s1_656, __p2_656)); \
-  __ret_656; \
-})
-#else
-#define vmull_high_lane_s32(__p0_657, __p1_657, __p2_657) __extension__ ({ \
-  int32x4_t __s0_657 = __p0_657; \
-  int32x2_t __s1_657 = __p1_657; \
-  int32x4_t __rev0_657;  __rev0_657 = __builtin_shufflevector(__s0_657, __s0_657, 3, 2, 1, 0); \
-  int32x2_t __rev1_657;  __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 1, 0); \
-  int64x2_t __ret_657; \
-  __ret_657 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_657), __noswap_splat_lane_s32(__rev1_657, __p2_657)); \
-  __ret_657 = __builtin_shufflevector(__ret_657, __ret_657, 1, 0); \
-  __ret_657; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \
-  int16x8_t __s0_658 = __p0_658; \
-  int16x4_t __s1_658 = __p1_658; \
-  int32x4_t __ret_658; \
-  __ret_658 = vmull_s16(vget_high_s16(__s0_658), splat_lane_s16(__s1_658, __p2_658)); \
-  __ret_658; \
-})
-#else
-#define vmull_high_lane_s16(__p0_659, __p1_659, __p2_659) __extension__ ({ \
-  int16x8_t __s0_659 = __p0_659; \
-  int16x4_t __s1_659 = __p1_659; \
-  int16x8_t __rev0_659;  __rev0_659 = __builtin_shufflevector(__s0_659, __s0_659, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_659;  __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 3, 2, 1, 0); \
-  int32x4_t __ret_659; \
-  __ret_659 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_659), __noswap_splat_lane_s16(__rev1_659, __p2_659)); \
-  __ret_659 = __builtin_shufflevector(__ret_659, __ret_659, 3, 2, 1, 0); \
-  __ret_659; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u32(__p0_660, __p1_660, __p2_660) __extension__ ({ \
-  uint32x4_t __s0_660 = __p0_660; \
-  uint32x4_t __s1_660 = __p1_660; \
-  uint64x2_t __ret_660; \
-  __ret_660 = vmull_u32(vget_high_u32(__s0_660), splat_laneq_u32(__s1_660, __p2_660)); \
-  __ret_660; \
-})
-#else
-#define vmull_high_laneq_u32(__p0_661, __p1_661, __p2_661) __extension__ ({ \
-  uint32x4_t __s0_661 = __p0_661; \
-  uint32x4_t __s1_661 = __p1_661; \
-  uint32x4_t __rev0_661;  __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 3, 2, 1, 0); \
-  uint32x4_t __rev1_661;  __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 3, 2, 1, 0); \
-  uint64x2_t __ret_661; \
-  __ret_661 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_661), __noswap_splat_laneq_u32(__rev1_661, __p2_661)); \
-  __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 1, 0); \
-  __ret_661; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u16(__p0_662, __p1_662, __p2_662) __extension__ ({ \
-  uint16x8_t __s0_662 = __p0_662; \
-  uint16x8_t __s1_662 = __p1_662; \
-  uint32x4_t __ret_662; \
-  __ret_662 = vmull_u16(vget_high_u16(__s0_662), splat_laneq_u16(__s1_662, __p2_662)); \
-  __ret_662; \
-})
-#else
-#define vmull_high_laneq_u16(__p0_663, __p1_663, __p2_663) __extension__ ({ \
-  uint16x8_t __s0_663 = __p0_663; \
-  uint16x8_t __s1_663 = __p1_663; \
-  uint16x8_t __rev0_663;  __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_663;  __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_663; \
-  __ret_663 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_663), __noswap_splat_laneq_u16(__rev1_663, __p2_663)); \
-  __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 3, 2, 1, 0); \
-  __ret_663; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \
-  int32x4_t __s0_664 = __p0_664; \
-  int32x4_t __s1_664 = __p1_664; \
-  int64x2_t __ret_664; \
-  __ret_664 = vmull_s32(vget_high_s32(__s0_664), splat_laneq_s32(__s1_664, __p2_664)); \
-  __ret_664; \
-})
-#else
-#define vmull_high_laneq_s32(__p0_665, __p1_665, __p2_665) __extension__ ({ \
-  int32x4_t __s0_665 = __p0_665; \
-  int32x4_t __s1_665 = __p1_665; \
-  int32x4_t __rev0_665;  __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 3, 2, 1, 0); \
-  int32x4_t __rev1_665;  __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 3, 2, 1, 0); \
-  int64x2_t __ret_665; \
-  __ret_665 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_665), __noswap_splat_laneq_s32(__rev1_665, __p2_665)); \
-  __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \
-  __ret_665; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \
-  int16x8_t __s0_666 = __p0_666; \
-  int16x8_t __s1_666 = __p1_666; \
-  int32x4_t __ret_666; \
-  __ret_666 = vmull_s16(vget_high_s16(__s0_666), splat_laneq_s16(__s1_666, __p2_666)); \
-  __ret_666; \
-})
-#else
-#define vmull_high_laneq_s16(__p0_667, __p1_667, __p2_667) __extension__ ({ \
-  int16x8_t __s0_667 = __p0_667; \
-  int16x8_t __s1_667 = __p1_667; \
-  int16x8_t __rev0_667;  __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_667;  __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_667; \
-  __ret_667 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_667), __noswap_splat_laneq_s16(__rev1_667, __p2_667)); \
-  __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \
-  __ret_667; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_n_u32(vget_high_u32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_n_u16(vget_high_u16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_n_s32(vget_high_s32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_n_s16(vget_high_s16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u32(__p0_668, __p1_668, __p2_668) __extension__ ({ \
-  uint32x2_t __s0_668 = __p0_668; \
-  uint32x4_t __s1_668 = __p1_668; \
-  uint64x2_t __ret_668; \
-  __ret_668 = vmull_u32(__s0_668, splat_laneq_u32(__s1_668, __p2_668)); \
-  __ret_668; \
-})
-#else
-#define vmull_laneq_u32(__p0_669, __p1_669, __p2_669) __extension__ ({ \
-  uint32x2_t __s0_669 = __p0_669; \
-  uint32x4_t __s1_669 = __p1_669; \
-  uint32x2_t __rev0_669;  __rev0_669 = __builtin_shufflevector(__s0_669, __s0_669, 1, 0); \
-  uint32x4_t __rev1_669;  __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 3, 2, 1, 0); \
-  uint64x2_t __ret_669; \
-  __ret_669 = __noswap_vmull_u32(__rev0_669, __noswap_splat_laneq_u32(__rev1_669, __p2_669)); \
-  __ret_669 = __builtin_shufflevector(__ret_669, __ret_669, 1, 0); \
-  __ret_669; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u16(__p0_670, __p1_670, __p2_670) __extension__ ({ \
-  uint16x4_t __s0_670 = __p0_670; \
-  uint16x8_t __s1_670 = __p1_670; \
-  uint32x4_t __ret_670; \
-  __ret_670 = vmull_u16(__s0_670, splat_laneq_u16(__s1_670, __p2_670)); \
-  __ret_670; \
-})
-#else
-#define vmull_laneq_u16(__p0_671, __p1_671, __p2_671) __extension__ ({ \
-  uint16x4_t __s0_671 = __p0_671; \
-  uint16x8_t __s1_671 = __p1_671; \
-  uint16x4_t __rev0_671;  __rev0_671 = __builtin_shufflevector(__s0_671, __s0_671, 3, 2, 1, 0); \
-  uint16x8_t __rev1_671;  __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_671; \
-  __ret_671 = __noswap_vmull_u16(__rev0_671, __noswap_splat_laneq_u16(__rev1_671, __p2_671)); \
-  __ret_671 = __builtin_shufflevector(__ret_671, __ret_671, 3, 2, 1, 0); \
-  __ret_671; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s32(__p0_672, __p1_672, __p2_672) __extension__ ({ \
-  int32x2_t __s0_672 = __p0_672; \
-  int32x4_t __s1_672 = __p1_672; \
-  int64x2_t __ret_672; \
-  __ret_672 = vmull_s32(__s0_672, splat_laneq_s32(__s1_672, __p2_672)); \
-  __ret_672; \
-})
-#else
-#define vmull_laneq_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \
-  int32x2_t __s0_673 = __p0_673; \
-  int32x4_t __s1_673 = __p1_673; \
-  int32x2_t __rev0_673;  __rev0_673 = __builtin_shufflevector(__s0_673, __s0_673, 1, 0); \
-  int32x4_t __rev1_673;  __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 3, 2, 1, 0); \
-  int64x2_t __ret_673; \
-  __ret_673 = __noswap_vmull_s32(__rev0_673, __noswap_splat_laneq_s32(__rev1_673, __p2_673)); \
-  __ret_673 = __builtin_shufflevector(__ret_673, __ret_673, 1, 0); \
-  __ret_673; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \
-  int16x4_t __s0_674 = __p0_674; \
-  int16x8_t __s1_674 = __p1_674; \
-  int32x4_t __ret_674; \
-  __ret_674 = vmull_s16(__s0_674, splat_laneq_s16(__s1_674, __p2_674)); \
-  __ret_674; \
-})
-#else
-#define vmull_laneq_s16(__p0_675, __p1_675, __p2_675) __extension__ ({ \
-  int16x4_t __s0_675 = __p0_675; \
-  int16x8_t __s1_675 = __p1_675; \
-  int16x4_t __rev0_675;  __rev0_675 = __builtin_shufflevector(__s0_675, __s0_675, 3, 2, 1, 0); \
-  int16x8_t __rev1_675;  __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_675; \
-  __ret_675 = __noswap_vmull_s16(__rev0_675, __noswap_splat_laneq_s16(__rev1_675, __p2_675)); \
-  __ret_675 = __builtin_shufflevector(__ret_675, __ret_675, 3, 2, 1, 0); \
-  __ret_675; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#endif
-
-__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
-  return __ret;
-}
-#define vmulxd_lane_f64(__p0_676, __p1_676, __p2_676) __extension__ ({ \
-  float64_t __s0_676 = __p0_676; \
-  float64x1_t __s1_676 = __p1_676; \
-  float64_t __ret_676; \
-  __ret_676 = vmulxd_f64(__s0_676, vget_lane_f64(__s1_676, __p2_676)); \
-  __ret_676; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_lane_f32(__p0_677, __p1_677, __p2_677) __extension__ ({ \
-  float32_t __s0_677 = __p0_677; \
-  float32x2_t __s1_677 = __p1_677; \
-  float32_t __ret_677; \
-  __ret_677 = vmulxs_f32(__s0_677, vget_lane_f32(__s1_677, __p2_677)); \
-  __ret_677; \
-})
-#else
-#define vmulxs_lane_f32(__p0_678, __p1_678, __p2_678) __extension__ ({ \
-  float32_t __s0_678 = __p0_678; \
-  float32x2_t __s1_678 = __p1_678; \
-  float32x2_t __rev1_678;  __rev1_678 = __builtin_shufflevector(__s1_678, __s1_678, 1, 0); \
-  float32_t __ret_678; \
-  __ret_678 = vmulxs_f32(__s0_678, __noswap_vget_lane_f32(__rev1_678, __p2_678)); \
-  __ret_678; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f64(__p0_679, __p1_679, __p2_679) __extension__ ({ \
-  float64x2_t __s0_679 = __p0_679; \
-  float64x1_t __s1_679 = __p1_679; \
-  float64x2_t __ret_679; \
-  __ret_679 = vmulxq_f64(__s0_679, splatq_lane_f64(__s1_679, __p2_679)); \
-  __ret_679; \
-})
-#else
-#define vmulxq_lane_f64(__p0_680, __p1_680, __p2_680) __extension__ ({ \
-  float64x2_t __s0_680 = __p0_680; \
-  float64x1_t __s1_680 = __p1_680; \
-  float64x2_t __rev0_680;  __rev0_680 = __builtin_shufflevector(__s0_680, __s0_680, 1, 0); \
-  float64x2_t __ret_680; \
-  __ret_680 = __noswap_vmulxq_f64(__rev0_680, __noswap_splatq_lane_f64(__s1_680, __p2_680)); \
-  __ret_680 = __builtin_shufflevector(__ret_680, __ret_680, 1, 0); \
-  __ret_680; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f32(__p0_681, __p1_681, __p2_681) __extension__ ({ \
-  float32x4_t __s0_681 = __p0_681; \
-  float32x2_t __s1_681 = __p1_681; \
-  float32x4_t __ret_681; \
-  __ret_681 = vmulxq_f32(__s0_681, splatq_lane_f32(__s1_681, __p2_681)); \
-  __ret_681; \
-})
-#else
-#define vmulxq_lane_f32(__p0_682, __p1_682, __p2_682) __extension__ ({ \
-  float32x4_t __s0_682 = __p0_682; \
-  float32x2_t __s1_682 = __p1_682; \
-  float32x4_t __rev0_682;  __rev0_682 = __builtin_shufflevector(__s0_682, __s0_682, 3, 2, 1, 0); \
-  float32x2_t __rev1_682;  __rev1_682 = __builtin_shufflevector(__s1_682, __s1_682, 1, 0); \
-  float32x4_t __ret_682; \
-  __ret_682 = __noswap_vmulxq_f32(__rev0_682, __noswap_splatq_lane_f32(__rev1_682, __p2_682)); \
-  __ret_682 = __builtin_shufflevector(__ret_682, __ret_682, 3, 2, 1, 0); \
-  __ret_682; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f32(__p0_683, __p1_683, __p2_683) __extension__ ({ \
-  float32x2_t __s0_683 = __p0_683; \
-  float32x2_t __s1_683 = __p1_683; \
-  float32x2_t __ret_683; \
-  __ret_683 = vmulx_f32(__s0_683, splat_lane_f32(__s1_683, __p2_683)); \
-  __ret_683; \
-})
-#else
-#define vmulx_lane_f32(__p0_684, __p1_684, __p2_684) __extension__ ({ \
-  float32x2_t __s0_684 = __p0_684; \
-  float32x2_t __s1_684 = __p1_684; \
-  float32x2_t __rev0_684;  __rev0_684 = __builtin_shufflevector(__s0_684, __s0_684, 1, 0); \
-  float32x2_t __rev1_684;  __rev1_684 = __builtin_shufflevector(__s1_684, __s1_684, 1, 0); \
-  float32x2_t __ret_684; \
-  __ret_684 = __noswap_vmulx_f32(__rev0_684, __noswap_splat_lane_f32(__rev1_684, __p2_684)); \
-  __ret_684 = __builtin_shufflevector(__ret_684, __ret_684, 1, 0); \
-  __ret_684; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxd_laneq_f64(__p0_685, __p1_685, __p2_685) __extension__ ({ \
-  float64_t __s0_685 = __p0_685; \
-  float64x2_t __s1_685 = __p1_685; \
-  float64_t __ret_685; \
-  __ret_685 = vmulxd_f64(__s0_685, vgetq_lane_f64(__s1_685, __p2_685)); \
-  __ret_685; \
-})
-#else
-#define vmulxd_laneq_f64(__p0_686, __p1_686, __p2_686) __extension__ ({ \
-  float64_t __s0_686 = __p0_686; \
-  float64x2_t __s1_686 = __p1_686; \
-  float64x2_t __rev1_686;  __rev1_686 = __builtin_shufflevector(__s1_686, __s1_686, 1, 0); \
-  float64_t __ret_686; \
-  __ret_686 = vmulxd_f64(__s0_686, __noswap_vgetq_lane_f64(__rev1_686, __p2_686)); \
-  __ret_686; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_laneq_f32(__p0_687, __p1_687, __p2_687) __extension__ ({ \
-  float32_t __s0_687 = __p0_687; \
-  float32x4_t __s1_687 = __p1_687; \
-  float32_t __ret_687; \
-  __ret_687 = vmulxs_f32(__s0_687, vgetq_lane_f32(__s1_687, __p2_687)); \
-  __ret_687; \
-})
-#else
-#define vmulxs_laneq_f32(__p0_688, __p1_688, __p2_688) __extension__ ({ \
-  float32_t __s0_688 = __p0_688; \
-  float32x4_t __s1_688 = __p1_688; \
-  float32x4_t __rev1_688;  __rev1_688 = __builtin_shufflevector(__s1_688, __s1_688, 3, 2, 1, 0); \
-  float32_t __ret_688; \
-  __ret_688 = vmulxs_f32(__s0_688, __noswap_vgetq_lane_f32(__rev1_688, __p2_688)); \
-  __ret_688; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f64(__p0_689, __p1_689, __p2_689) __extension__ ({ \
-  float64x2_t __s0_689 = __p0_689; \
-  float64x2_t __s1_689 = __p1_689; \
-  float64x2_t __ret_689; \
-  __ret_689 = vmulxq_f64(__s0_689, splatq_laneq_f64(__s1_689, __p2_689)); \
-  __ret_689; \
-})
-#else
-#define vmulxq_laneq_f64(__p0_690, __p1_690, __p2_690) __extension__ ({ \
-  float64x2_t __s0_690 = __p0_690; \
-  float64x2_t __s1_690 = __p1_690; \
-  float64x2_t __rev0_690;  __rev0_690 = __builtin_shufflevector(__s0_690, __s0_690, 1, 0); \
-  float64x2_t __rev1_690;  __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 1, 0); \
-  float64x2_t __ret_690; \
-  __ret_690 = __noswap_vmulxq_f64(__rev0_690, __noswap_splatq_laneq_f64(__rev1_690, __p2_690)); \
-  __ret_690 = __builtin_shufflevector(__ret_690, __ret_690, 1, 0); \
-  __ret_690; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f32(__p0_691, __p1_691, __p2_691) __extension__ ({ \
-  float32x4_t __s0_691 = __p0_691; \
-  float32x4_t __s1_691 = __p1_691; \
-  float32x4_t __ret_691; \
-  __ret_691 = vmulxq_f32(__s0_691, splatq_laneq_f32(__s1_691, __p2_691)); \
-  __ret_691; \
-})
-#else
-#define vmulxq_laneq_f32(__p0_692, __p1_692, __p2_692) __extension__ ({ \
-  float32x4_t __s0_692 = __p0_692; \
-  float32x4_t __s1_692 = __p1_692; \
-  float32x4_t __rev0_692;  __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 3, 2, 1, 0); \
-  float32x4_t __rev1_692;  __rev1_692 = __builtin_shufflevector(__s1_692, __s1_692, 3, 2, 1, 0); \
-  float32x4_t __ret_692; \
-  __ret_692 = __noswap_vmulxq_f32(__rev0_692, __noswap_splatq_laneq_f32(__rev1_692, __p2_692)); \
-  __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 3, 2, 1, 0); \
-  __ret_692; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f32(__p0_693, __p1_693, __p2_693) __extension__ ({ \
-  float32x2_t __s0_693 = __p0_693; \
-  float32x4_t __s1_693 = __p1_693; \
-  float32x2_t __ret_693; \
-  __ret_693 = vmulx_f32(__s0_693, splat_laneq_f32(__s1_693, __p2_693)); \
-  __ret_693; \
-})
-#else
-#define vmulx_laneq_f32(__p0_694, __p1_694, __p2_694) __extension__ ({ \
-  float32x2_t __s0_694 = __p0_694; \
-  float32x4_t __s1_694 = __p1_694; \
-  float32x2_t __rev0_694;  __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 1, 0); \
-  float32x4_t __rev1_694;  __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 3, 2, 1, 0); \
-  float32x2_t __ret_694; \
-  __ret_694 = __noswap_vmulx_f32(__rev0_694, __noswap_splat_laneq_f32(__rev1_694, __p2_694)); \
-  __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 1, 0); \
-  __ret_694; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vnegq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float64x2_t vnegq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vnegq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int64x2_t vnegq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vneg_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-__ai int64x1_t vneg_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-__ai int64_t vnegd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpaddd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpaddd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vpaddd_s64(int64x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vpaddd_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpadds_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpadds_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmaxs_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmaxs_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpminqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpminqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmins_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmins_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpminnms_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpminnms_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqabs_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int8_t vqabsb_s8(int8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
-  return __ret;
-}
-__ai int32_t vqabss_s32(int32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
-  return __ret;
-}
-__ai int64_t vqabsd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
-  return __ret;
-}
-__ai int16_t vqabsh_s16(int16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
-  return __ret;
-}
-__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
-  return __ret;
-}
-__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s32(__p0_695, __p1_695, __p2_695, __p3_695) __extension__ ({ \
-  int64x2_t __s0_695 = __p0_695; \
-  int32x4_t __s1_695 = __p1_695; \
-  int32x2_t __s2_695 = __p2_695; \
-  int64x2_t __ret_695; \
-  __ret_695 = vqdmlal_s32(__s0_695, vget_high_s32(__s1_695), splat_lane_s32(__s2_695, __p3_695)); \
-  __ret_695; \
-})
-#else
-#define vqdmlal_high_lane_s32(__p0_696, __p1_696, __p2_696, __p3_696) __extension__ ({ \
-  int64x2_t __s0_696 = __p0_696; \
-  int32x4_t __s1_696 = __p1_696; \
-  int32x2_t __s2_696 = __p2_696; \
-  int64x2_t __rev0_696;  __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \
-  int32x4_t __rev1_696;  __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 3, 2, 1, 0); \
-  int32x2_t __rev2_696;  __rev2_696 = __builtin_shufflevector(__s2_696, __s2_696, 1, 0); \
-  int64x2_t __ret_696; \
-  __ret_696 = __noswap_vqdmlal_s32(__rev0_696, __noswap_vget_high_s32(__rev1_696), __noswap_splat_lane_s32(__rev2_696, __p3_696)); \
-  __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 1, 0); \
-  __ret_696; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s16(__p0_697, __p1_697, __p2_697, __p3_697) __extension__ ({ \
-  int32x4_t __s0_697 = __p0_697; \
-  int16x8_t __s1_697 = __p1_697; \
-  int16x4_t __s2_697 = __p2_697; \
-  int32x4_t __ret_697; \
-  __ret_697 = vqdmlal_s16(__s0_697, vget_high_s16(__s1_697), splat_lane_s16(__s2_697, __p3_697)); \
-  __ret_697; \
-})
-#else
-#define vqdmlal_high_lane_s16(__p0_698, __p1_698, __p2_698, __p3_698) __extension__ ({ \
-  int32x4_t __s0_698 = __p0_698; \
-  int16x8_t __s1_698 = __p1_698; \
-  int16x4_t __s2_698 = __p2_698; \
-  int32x4_t __rev0_698;  __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 3, 2, 1, 0); \
-  int16x8_t __rev1_698;  __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_698;  __rev2_698 = __builtin_shufflevector(__s2_698, __s2_698, 3, 2, 1, 0); \
-  int32x4_t __ret_698; \
-  __ret_698 = __noswap_vqdmlal_s16(__rev0_698, __noswap_vget_high_s16(__rev1_698), __noswap_splat_lane_s16(__rev2_698, __p3_698)); \
-  __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 3, 2, 1, 0); \
-  __ret_698; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s32(__p0_699, __p1_699, __p2_699, __p3_699) __extension__ ({ \
-  int64x2_t __s0_699 = __p0_699; \
-  int32x4_t __s1_699 = __p1_699; \
-  int32x4_t __s2_699 = __p2_699; \
-  int64x2_t __ret_699; \
-  __ret_699 = vqdmlal_s32(__s0_699, vget_high_s32(__s1_699), splat_laneq_s32(__s2_699, __p3_699)); \
-  __ret_699; \
-})
-#else
-#define vqdmlal_high_laneq_s32(__p0_700, __p1_700, __p2_700, __p3_700) __extension__ ({ \
-  int64x2_t __s0_700 = __p0_700; \
-  int32x4_t __s1_700 = __p1_700; \
-  int32x4_t __s2_700 = __p2_700; \
-  int64x2_t __rev0_700;  __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 1, 0); \
-  int32x4_t __rev1_700;  __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 3, 2, 1, 0); \
-  int32x4_t __rev2_700;  __rev2_700 = __builtin_shufflevector(__s2_700, __s2_700, 3, 2, 1, 0); \
-  int64x2_t __ret_700; \
-  __ret_700 = __noswap_vqdmlal_s32(__rev0_700, __noswap_vget_high_s32(__rev1_700), __noswap_splat_laneq_s32(__rev2_700, __p3_700)); \
-  __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 1, 0); \
-  __ret_700; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s16(__p0_701, __p1_701, __p2_701, __p3_701) __extension__ ({ \
-  int32x4_t __s0_701 = __p0_701; \
-  int16x8_t __s1_701 = __p1_701; \
-  int16x8_t __s2_701 = __p2_701; \
-  int32x4_t __ret_701; \
-  __ret_701 = vqdmlal_s16(__s0_701, vget_high_s16(__s1_701), splat_laneq_s16(__s2_701, __p3_701)); \
-  __ret_701; \
-})
-#else
-#define vqdmlal_high_laneq_s16(__p0_702, __p1_702, __p2_702, __p3_702) __extension__ ({ \
-  int32x4_t __s0_702 = __p0_702; \
-  int16x8_t __s1_702 = __p1_702; \
-  int16x8_t __s2_702 = __p2_702; \
-  int32x4_t __rev0_702;  __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 3, 2, 1, 0); \
-  int16x8_t __rev1_702;  __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_702;  __rev2_702 = __builtin_shufflevector(__s2_702, __s2_702, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_702; \
-  __ret_702 = __noswap_vqdmlal_s16(__rev0_702, __noswap_vget_high_s16(__rev1_702), __noswap_splat_laneq_s16(__rev2_702, __p3_702)); \
-  __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 3, 2, 1, 0); \
-  __ret_702; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s32(__p0_703, __p1_703, __p2_703, __p3_703) __extension__ ({ \
-  int64x2_t __s0_703 = __p0_703; \
-  int32x2_t __s1_703 = __p1_703; \
-  int32x4_t __s2_703 = __p2_703; \
-  int64x2_t __ret_703; \
-  __ret_703 = vqdmlal_s32(__s0_703, __s1_703, splat_laneq_s32(__s2_703, __p3_703)); \
-  __ret_703; \
-})
-#else
-#define vqdmlal_laneq_s32(__p0_704, __p1_704, __p2_704, __p3_704) __extension__ ({ \
-  int64x2_t __s0_704 = __p0_704; \
-  int32x2_t __s1_704 = __p1_704; \
-  int32x4_t __s2_704 = __p2_704; \
-  int64x2_t __rev0_704;  __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 1, 0); \
-  int32x2_t __rev1_704;  __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 1, 0); \
-  int32x4_t __rev2_704;  __rev2_704 = __builtin_shufflevector(__s2_704, __s2_704, 3, 2, 1, 0); \
-  int64x2_t __ret_704; \
-  __ret_704 = __noswap_vqdmlal_s32(__rev0_704, __rev1_704, __noswap_splat_laneq_s32(__rev2_704, __p3_704)); \
-  __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 1, 0); \
-  __ret_704; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s16(__p0_705, __p1_705, __p2_705, __p3_705) __extension__ ({ \
-  int32x4_t __s0_705 = __p0_705; \
-  int16x4_t __s1_705 = __p1_705; \
-  int16x8_t __s2_705 = __p2_705; \
-  int32x4_t __ret_705; \
-  __ret_705 = vqdmlal_s16(__s0_705, __s1_705, splat_laneq_s16(__s2_705, __p3_705)); \
-  __ret_705; \
-})
-#else
-#define vqdmlal_laneq_s16(__p0_706, __p1_706, __p2_706, __p3_706) __extension__ ({ \
-  int32x4_t __s0_706 = __p0_706; \
-  int16x4_t __s1_706 = __p1_706; \
-  int16x8_t __s2_706 = __p2_706; \
-  int32x4_t __rev0_706;  __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 3, 2, 1, 0); \
-  int16x4_t __rev1_706;  __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 3, 2, 1, 0); \
-  int16x8_t __rev2_706;  __rev2_706 = __builtin_shufflevector(__s2_706, __s2_706, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_706; \
-  __ret_706 = __noswap_vqdmlal_s16(__rev0_706, __rev1_706, __noswap_splat_laneq_s16(__rev2_706, __p3_706)); \
-  __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 3, 2, 1, 0); \
-  __ret_706; \
-})
-#endif
-
-__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
-  return __ret;
-}
-__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s32(__p0_707, __p1_707, __p2_707, __p3_707) __extension__ ({ \
-  int64x2_t __s0_707 = __p0_707; \
-  int32x4_t __s1_707 = __p1_707; \
-  int32x2_t __s2_707 = __p2_707; \
-  int64x2_t __ret_707; \
-  __ret_707 = vqdmlsl_s32(__s0_707, vget_high_s32(__s1_707), splat_lane_s32(__s2_707, __p3_707)); \
-  __ret_707; \
-})
-#else
-#define vqdmlsl_high_lane_s32(__p0_708, __p1_708, __p2_708, __p3_708) __extension__ ({ \
-  int64x2_t __s0_708 = __p0_708; \
-  int32x4_t __s1_708 = __p1_708; \
-  int32x2_t __s2_708 = __p2_708; \
-  int64x2_t __rev0_708;  __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 1, 0); \
-  int32x4_t __rev1_708;  __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 3, 2, 1, 0); \
-  int32x2_t __rev2_708;  __rev2_708 = __builtin_shufflevector(__s2_708, __s2_708, 1, 0); \
-  int64x2_t __ret_708; \
-  __ret_708 = __noswap_vqdmlsl_s32(__rev0_708, __noswap_vget_high_s32(__rev1_708), __noswap_splat_lane_s32(__rev2_708, __p3_708)); \
-  __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 1, 0); \
-  __ret_708; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s16(__p0_709, __p1_709, __p2_709, __p3_709) __extension__ ({ \
-  int32x4_t __s0_709 = __p0_709; \
-  int16x8_t __s1_709 = __p1_709; \
-  int16x4_t __s2_709 = __p2_709; \
-  int32x4_t __ret_709; \
-  __ret_709 = vqdmlsl_s16(__s0_709, vget_high_s16(__s1_709), splat_lane_s16(__s2_709, __p3_709)); \
-  __ret_709; \
-})
-#else
-#define vqdmlsl_high_lane_s16(__p0_710, __p1_710, __p2_710, __p3_710) __extension__ ({ \
-  int32x4_t __s0_710 = __p0_710; \
-  int16x8_t __s1_710 = __p1_710; \
-  int16x4_t __s2_710 = __p2_710; \
-  int32x4_t __rev0_710;  __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 3, 2, 1, 0); \
-  int16x8_t __rev1_710;  __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_710;  __rev2_710 = __builtin_shufflevector(__s2_710, __s2_710, 3, 2, 1, 0); \
-  int32x4_t __ret_710; \
-  __ret_710 = __noswap_vqdmlsl_s16(__rev0_710, __noswap_vget_high_s16(__rev1_710), __noswap_splat_lane_s16(__rev2_710, __p3_710)); \
-  __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 3, 2, 1, 0); \
-  __ret_710; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s32(__p0_711, __p1_711, __p2_711, __p3_711) __extension__ ({ \
-  int64x2_t __s0_711 = __p0_711; \
-  int32x4_t __s1_711 = __p1_711; \
-  int32x4_t __s2_711 = __p2_711; \
-  int64x2_t __ret_711; \
-  __ret_711 = vqdmlsl_s32(__s0_711, vget_high_s32(__s1_711), splat_laneq_s32(__s2_711, __p3_711)); \
-  __ret_711; \
-})
-#else
-#define vqdmlsl_high_laneq_s32(__p0_712, __p1_712, __p2_712, __p3_712) __extension__ ({ \
-  int64x2_t __s0_712 = __p0_712; \
-  int32x4_t __s1_712 = __p1_712; \
-  int32x4_t __s2_712 = __p2_712; \
-  int64x2_t __rev0_712;  __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \
-  int32x4_t __rev1_712;  __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 3, 2, 1, 0); \
-  int32x4_t __rev2_712;  __rev2_712 = __builtin_shufflevector(__s2_712, __s2_712, 3, 2, 1, 0); \
-  int64x2_t __ret_712; \
-  __ret_712 = __noswap_vqdmlsl_s32(__rev0_712, __noswap_vget_high_s32(__rev1_712), __noswap_splat_laneq_s32(__rev2_712, __p3_712)); \
-  __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 1, 0); \
-  __ret_712; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s16(__p0_713, __p1_713, __p2_713, __p3_713) __extension__ ({ \
-  int32x4_t __s0_713 = __p0_713; \
-  int16x8_t __s1_713 = __p1_713; \
-  int16x8_t __s2_713 = __p2_713; \
-  int32x4_t __ret_713; \
-  __ret_713 = vqdmlsl_s16(__s0_713, vget_high_s16(__s1_713), splat_laneq_s16(__s2_713, __p3_713)); \
-  __ret_713; \
-})
-#else
-#define vqdmlsl_high_laneq_s16(__p0_714, __p1_714, __p2_714, __p3_714) __extension__ ({ \
-  int32x4_t __s0_714 = __p0_714; \
-  int16x8_t __s1_714 = __p1_714; \
-  int16x8_t __s2_714 = __p2_714; \
-  int32x4_t __rev0_714;  __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \
-  int16x8_t __rev1_714;  __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_714;  __rev2_714 = __builtin_shufflevector(__s2_714, __s2_714, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_714; \
-  __ret_714 = __noswap_vqdmlsl_s16(__rev0_714, __noswap_vget_high_s16(__rev1_714), __noswap_splat_laneq_s16(__rev2_714, __p3_714)); \
-  __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 3, 2, 1, 0); \
-  __ret_714; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s32(__p0_715, __p1_715, __p2_715, __p3_715) __extension__ ({ \
-  int64x2_t __s0_715 = __p0_715; \
-  int32x2_t __s1_715 = __p1_715; \
-  int32x4_t __s2_715 = __p2_715; \
-  int64x2_t __ret_715; \
-  __ret_715 = vqdmlsl_s32(__s0_715, __s1_715, splat_laneq_s32(__s2_715, __p3_715)); \
-  __ret_715; \
-})
-#else
-#define vqdmlsl_laneq_s32(__p0_716, __p1_716, __p2_716, __p3_716) __extension__ ({ \
-  int64x2_t __s0_716 = __p0_716; \
-  int32x2_t __s1_716 = __p1_716; \
-  int32x4_t __s2_716 = __p2_716; \
-  int64x2_t __rev0_716;  __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 1, 0); \
-  int32x2_t __rev1_716;  __rev1_716 = __builtin_shufflevector(__s1_716, __s1_716, 1, 0); \
-  int32x4_t __rev2_716;  __rev2_716 = __builtin_shufflevector(__s2_716, __s2_716, 3, 2, 1, 0); \
-  int64x2_t __ret_716; \
-  __ret_716 = __noswap_vqdmlsl_s32(__rev0_716, __rev1_716, __noswap_splat_laneq_s32(__rev2_716, __p3_716)); \
-  __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 1, 0); \
-  __ret_716; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s16(__p0_717, __p1_717, __p2_717, __p3_717) __extension__ ({ \
-  int32x4_t __s0_717 = __p0_717; \
-  int16x4_t __s1_717 = __p1_717; \
-  int16x8_t __s2_717 = __p2_717; \
-  int32x4_t __ret_717; \
-  __ret_717 = vqdmlsl_s16(__s0_717, __s1_717, splat_laneq_s16(__s2_717, __p3_717)); \
-  __ret_717; \
-})
-#else
-#define vqdmlsl_laneq_s16(__p0_718, __p1_718, __p2_718, __p3_718) __extension__ ({ \
-  int32x4_t __s0_718 = __p0_718; \
-  int16x4_t __s1_718 = __p1_718; \
-  int16x8_t __s2_718 = __p2_718; \
-  int32x4_t __rev0_718;  __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \
-  int16x4_t __rev1_718;  __rev1_718 = __builtin_shufflevector(__s1_718, __s1_718, 3, 2, 1, 0); \
-  int16x8_t __rev2_718;  __rev2_718 = __builtin_shufflevector(__s2_718, __s2_718, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_718; \
-  __ret_718 = __noswap_vqdmlsl_s16(__rev0_718, __rev1_718, __noswap_splat_laneq_s16(__rev2_718, __p3_718)); \
-  __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 3, 2, 1, 0); \
-  __ret_718; \
-})
-#endif
-
-__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_lane_s32(__p0_719, __p1_719, __p2_719) __extension__ ({ \
-  int32_t __s0_719 = __p0_719; \
-  int32x2_t __s1_719 = __p1_719; \
-  int32_t __ret_719; \
-  __ret_719 = vqdmulhs_s32(__s0_719, vget_lane_s32(__s1_719, __p2_719)); \
-  __ret_719; \
-})
-#else
-#define vqdmulhs_lane_s32(__p0_720, __p1_720, __p2_720) __extension__ ({ \
-  int32_t __s0_720 = __p0_720; \
-  int32x2_t __s1_720 = __p1_720; \
-  int32x2_t __rev1_720;  __rev1_720 = __builtin_shufflevector(__s1_720, __s1_720, 1, 0); \
-  int32_t __ret_720; \
-  __ret_720 = vqdmulhs_s32(__s0_720, __noswap_vget_lane_s32(__rev1_720, __p2_720)); \
-  __ret_720; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_lane_s16(__p0_721, __p1_721, __p2_721) __extension__ ({ \
-  int16_t __s0_721 = __p0_721; \
-  int16x4_t __s1_721 = __p1_721; \
-  int16_t __ret_721; \
-  __ret_721 = vqdmulhh_s16(__s0_721, vget_lane_s16(__s1_721, __p2_721)); \
-  __ret_721; \
-})
-#else
-#define vqdmulhh_lane_s16(__p0_722, __p1_722, __p2_722) __extension__ ({ \
-  int16_t __s0_722 = __p0_722; \
-  int16x4_t __s1_722 = __p1_722; \
-  int16x4_t __rev1_722;  __rev1_722 = __builtin_shufflevector(__s1_722, __s1_722, 3, 2, 1, 0); \
-  int16_t __ret_722; \
-  __ret_722 = vqdmulhh_s16(__s0_722, __noswap_vget_lane_s16(__rev1_722, __p2_722)); \
-  __ret_722; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_laneq_s32(__p0_723, __p1_723, __p2_723) __extension__ ({ \
-  int32_t __s0_723 = __p0_723; \
-  int32x4_t __s1_723 = __p1_723; \
-  int32_t __ret_723; \
-  __ret_723 = vqdmulhs_s32(__s0_723, vgetq_lane_s32(__s1_723, __p2_723)); \
-  __ret_723; \
-})
-#else
-#define vqdmulhs_laneq_s32(__p0_724, __p1_724, __p2_724) __extension__ ({ \
-  int32_t __s0_724 = __p0_724; \
-  int32x4_t __s1_724 = __p1_724; \
-  int32x4_t __rev1_724;  __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \
-  int32_t __ret_724; \
-  __ret_724 = vqdmulhs_s32(__s0_724, __noswap_vgetq_lane_s32(__rev1_724, __p2_724)); \
-  __ret_724; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_laneq_s16(__p0_725, __p1_725, __p2_725) __extension__ ({ \
-  int16_t __s0_725 = __p0_725; \
-  int16x8_t __s1_725 = __p1_725; \
-  int16_t __ret_725; \
-  __ret_725 = vqdmulhh_s16(__s0_725, vgetq_lane_s16(__s1_725, __p2_725)); \
-  __ret_725; \
-})
-#else
-#define vqdmulhh_laneq_s16(__p0_726, __p1_726, __p2_726) __extension__ ({ \
-  int16_t __s0_726 = __p0_726; \
-  int16x8_t __s1_726 = __p1_726; \
-  int16x8_t __rev1_726;  __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_726; \
-  __ret_726 = vqdmulhh_s16(__s0_726, __noswap_vgetq_lane_s16(__rev1_726, __p2_726)); \
-  __ret_726; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s32(__p0_727, __p1_727, __p2_727) __extension__ ({ \
-  int32x4_t __s0_727 = __p0_727; \
-  int32x2_t __s1_727 = __p1_727; \
-  int64x2_t __ret_727; \
-  __ret_727 = vqdmull_s32(vget_high_s32(__s0_727), splat_lane_s32(__s1_727, __p2_727)); \
-  __ret_727; \
-})
-#else
-#define vqdmull_high_lane_s32(__p0_728, __p1_728, __p2_728) __extension__ ({ \
-  int32x4_t __s0_728 = __p0_728; \
-  int32x2_t __s1_728 = __p1_728; \
-  int32x4_t __rev0_728;  __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 3, 2, 1, 0); \
-  int32x2_t __rev1_728;  __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 1, 0); \
-  int64x2_t __ret_728; \
-  __ret_728 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_728), __noswap_splat_lane_s32(__rev1_728, __p2_728)); \
-  __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 1, 0); \
-  __ret_728; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s16(__p0_729, __p1_729, __p2_729) __extension__ ({ \
-  int16x8_t __s0_729 = __p0_729; \
-  int16x4_t __s1_729 = __p1_729; \
-  int32x4_t __ret_729; \
-  __ret_729 = vqdmull_s16(vget_high_s16(__s0_729), splat_lane_s16(__s1_729, __p2_729)); \
-  __ret_729; \
-})
-#else
-#define vqdmull_high_lane_s16(__p0_730, __p1_730, __p2_730) __extension__ ({ \
-  int16x8_t __s0_730 = __p0_730; \
-  int16x4_t __s1_730 = __p1_730; \
-  int16x8_t __rev0_730;  __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_730;  __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 3, 2, 1, 0); \
-  int32x4_t __ret_730; \
-  __ret_730 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_730), __noswap_splat_lane_s16(__rev1_730, __p2_730)); \
-  __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \
-  __ret_730; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s32(__p0_731, __p1_731, __p2_731) __extension__ ({ \
-  int32x4_t __s0_731 = __p0_731; \
-  int32x4_t __s1_731 = __p1_731; \
-  int64x2_t __ret_731; \
-  __ret_731 = vqdmull_s32(vget_high_s32(__s0_731), splat_laneq_s32(__s1_731, __p2_731)); \
-  __ret_731; \
-})
-#else
-#define vqdmull_high_laneq_s32(__p0_732, __p1_732, __p2_732) __extension__ ({ \
-  int32x4_t __s0_732 = __p0_732; \
-  int32x4_t __s1_732 = __p1_732; \
-  int32x4_t __rev0_732;  __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 3, 2, 1, 0); \
-  int32x4_t __rev1_732;  __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 3, 2, 1, 0); \
-  int64x2_t __ret_732; \
-  __ret_732 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_732), __noswap_splat_laneq_s32(__rev1_732, __p2_732)); \
-  __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 1, 0); \
-  __ret_732; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s16(__p0_733, __p1_733, __p2_733) __extension__ ({ \
-  int16x8_t __s0_733 = __p0_733; \
-  int16x8_t __s1_733 = __p1_733; \
-  int32x4_t __ret_733; \
-  __ret_733 = vqdmull_s16(vget_high_s16(__s0_733), splat_laneq_s16(__s1_733, __p2_733)); \
-  __ret_733; \
-})
-#else
-#define vqdmull_high_laneq_s16(__p0_734, __p1_734, __p2_734) __extension__ ({ \
-  int16x8_t __s0_734 = __p0_734; \
-  int16x8_t __s1_734 = __p1_734; \
-  int16x8_t __rev0_734;  __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_734;  __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_734; \
-  __ret_734 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_734), __noswap_splat_laneq_s16(__rev1_734, __p2_734)); \
-  __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 3, 2, 1, 0); \
-  __ret_734; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulls_lane_s32(__p0_735, __p1_735, __p2_735) __extension__ ({ \
-  int32_t __s0_735 = __p0_735; \
-  int32x2_t __s1_735 = __p1_735; \
-  int64_t __ret_735; \
-  __ret_735 = vqdmulls_s32(__s0_735, vget_lane_s32(__s1_735, __p2_735)); \
-  __ret_735; \
-})
-#else
-#define vqdmulls_lane_s32(__p0_736, __p1_736, __p2_736) __extension__ ({ \
-  int32_t __s0_736 = __p0_736; \
-  int32x2_t __s1_736 = __p1_736; \
-  int32x2_t __rev1_736;  __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 1, 0); \
-  int64_t __ret_736; \
-  __ret_736 = vqdmulls_s32(__s0_736, __noswap_vget_lane_s32(__rev1_736, __p2_736)); \
-  __ret_736; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmullh_lane_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \
-  int16_t __s0_737 = __p0_737; \
-  int16x4_t __s1_737 = __p1_737; \
-  int32_t __ret_737; \
-  __ret_737 = vqdmullh_s16(__s0_737, vget_lane_s16(__s1_737, __p2_737)); \
-  __ret_737; \
-})
-#else
-#define vqdmullh_lane_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \
-  int16_t __s0_738 = __p0_738; \
-  int16x4_t __s1_738 = __p1_738; \
-  int16x4_t __rev1_738;  __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 3, 2, 1, 0); \
-  int32_t __ret_738; \
-  __ret_738 = vqdmullh_s16(__s0_738, __noswap_vget_lane_s16(__rev1_738, __p2_738)); \
-  __ret_738; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulls_laneq_s32(__p0_739, __p1_739, __p2_739) __extension__ ({ \
-  int32_t __s0_739 = __p0_739; \
-  int32x4_t __s1_739 = __p1_739; \
-  int64_t __ret_739; \
-  __ret_739 = vqdmulls_s32(__s0_739, vgetq_lane_s32(__s1_739, __p2_739)); \
-  __ret_739; \
-})
-#else
-#define vqdmulls_laneq_s32(__p0_740, __p1_740, __p2_740) __extension__ ({ \
-  int32_t __s0_740 = __p0_740; \
-  int32x4_t __s1_740 = __p1_740; \
-  int32x4_t __rev1_740;  __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 3, 2, 1, 0); \
-  int64_t __ret_740; \
-  __ret_740 = vqdmulls_s32(__s0_740, __noswap_vgetq_lane_s32(__rev1_740, __p2_740)); \
-  __ret_740; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmullh_laneq_s16(__p0_741, __p1_741, __p2_741) __extension__ ({ \
-  int16_t __s0_741 = __p0_741; \
-  int16x8_t __s1_741 = __p1_741; \
-  int32_t __ret_741; \
-  __ret_741 = vqdmullh_s16(__s0_741, vgetq_lane_s16(__s1_741, __p2_741)); \
-  __ret_741; \
-})
-#else
-#define vqdmullh_laneq_s16(__p0_742, __p1_742, __p2_742) __extension__ ({ \
-  int16_t __s0_742 = __p0_742; \
-  int16x8_t __s1_742 = __p1_742; \
-  int16x8_t __rev1_742;  __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret_742; \
-  __ret_742 = vqdmullh_s16(__s0_742, __noswap_vgetq_lane_s16(__rev1_742, __p2_742)); \
-  __ret_742; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s32(__p0_743, __p1_743, __p2_743) __extension__ ({ \
-  int32x2_t __s0_743 = __p0_743; \
-  int32x4_t __s1_743 = __p1_743; \
-  int64x2_t __ret_743; \
-  __ret_743 = vqdmull_s32(__s0_743, splat_laneq_s32(__s1_743, __p2_743)); \
-  __ret_743; \
-})
-#else
-#define vqdmull_laneq_s32(__p0_744, __p1_744, __p2_744) __extension__ ({ \
-  int32x2_t __s0_744 = __p0_744; \
-  int32x4_t __s1_744 = __p1_744; \
-  int32x2_t __rev0_744;  __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 1, 0); \
-  int32x4_t __rev1_744;  __rev1_744 = __builtin_shufflevector(__s1_744, __s1_744, 3, 2, 1, 0); \
-  int64x2_t __ret_744; \
-  __ret_744 = __noswap_vqdmull_s32(__rev0_744, __noswap_splat_laneq_s32(__rev1_744, __p2_744)); \
-  __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 1, 0); \
-  __ret_744; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s16(__p0_745, __p1_745, __p2_745) __extension__ ({ \
-  int16x4_t __s0_745 = __p0_745; \
-  int16x8_t __s1_745 = __p1_745; \
-  int32x4_t __ret_745; \
-  __ret_745 = vqdmull_s16(__s0_745, splat_laneq_s16(__s1_745, __p2_745)); \
-  __ret_745; \
-})
-#else
-#define vqdmull_laneq_s16(__p0_746, __p1_746, __p2_746) __extension__ ({ \
-  int16x4_t __s0_746 = __p0_746; \
-  int16x8_t __s1_746 = __p1_746; \
-  int16x4_t __rev0_746;  __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 3, 2, 1, 0); \
-  int16x8_t __rev1_746;  __rev1_746 = __builtin_shufflevector(__s1_746, __s1_746, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_746; \
-  __ret_746 = __noswap_vqdmull_s16(__rev0_746, __noswap_splat_laneq_s16(__rev1_746, __p2_746)); \
-  __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \
-  __ret_746; \
-})
-#endif
-
-__ai int16_t vqmovns_s32(int32_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
-  return __ret;
-}
-__ai int32_t vqmovnd_s64(int64_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
-  return __ret;
-}
-__ai int8_t vqmovnh_s16(int16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
-  return __ret;
-}
-__ai uint16_t vqmovns_u32(uint32_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
-  return __ret;
-}
-__ai uint32_t vqmovnd_u64(uint64_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
-  return __ret;
-}
-__ai uint8_t vqmovnh_u16(uint16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vqmovn_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vqmovn_u64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vqmovn_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vqmovn_s32(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vqmovn_s64(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vqmovn_s16(__p1));
-  return __ret;
-}
-#else
-__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint16_t vqmovuns_s32(int32_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqmovuns_s32(__p0);
-  return __ret;
-}
-__ai uint32_t vqmovund_s64(int64_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqmovund_s64(__p0);
-  return __ret;
-}
-__ai uint8_t vqmovunh_s16(int16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqmovunh_s16(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqneg_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int8_t vqnegb_s8(int8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
-  return __ret;
-}
-__ai int32_t vqnegs_s32(int32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
-  return __ret;
-}
-__ai int64_t vqnegd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
-  return __ret;
-}
-__ai int16_t vqnegh_s16(int16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
-  return __ret;
-}
-__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_lane_s32(__p0_747, __p1_747, __p2_747) __extension__ ({ \
-  int32_t __s0_747 = __p0_747; \
-  int32x2_t __s1_747 = __p1_747; \
-  int32_t __ret_747; \
-  __ret_747 = vqrdmulhs_s32(__s0_747, vget_lane_s32(__s1_747, __p2_747)); \
-  __ret_747; \
-})
-#else
-#define vqrdmulhs_lane_s32(__p0_748, __p1_748, __p2_748) __extension__ ({ \
-  int32_t __s0_748 = __p0_748; \
-  int32x2_t __s1_748 = __p1_748; \
-  int32x2_t __rev1_748;  __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 1, 0); \
-  int32_t __ret_748; \
-  __ret_748 = vqrdmulhs_s32(__s0_748, __noswap_vget_lane_s32(__rev1_748, __p2_748)); \
-  __ret_748; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_lane_s16(__p0_749, __p1_749, __p2_749) __extension__ ({ \
-  int16_t __s0_749 = __p0_749; \
-  int16x4_t __s1_749 = __p1_749; \
-  int16_t __ret_749; \
-  __ret_749 = vqrdmulhh_s16(__s0_749, vget_lane_s16(__s1_749, __p2_749)); \
-  __ret_749; \
-})
-#else
-#define vqrdmulhh_lane_s16(__p0_750, __p1_750, __p2_750) __extension__ ({ \
-  int16_t __s0_750 = __p0_750; \
-  int16x4_t __s1_750 = __p1_750; \
-  int16x4_t __rev1_750;  __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 3, 2, 1, 0); \
-  int16_t __ret_750; \
-  __ret_750 = vqrdmulhh_s16(__s0_750, __noswap_vget_lane_s16(__rev1_750, __p2_750)); \
-  __ret_750; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_laneq_s32(__p0_751, __p1_751, __p2_751) __extension__ ({ \
-  int32_t __s0_751 = __p0_751; \
-  int32x4_t __s1_751 = __p1_751; \
-  int32_t __ret_751; \
-  __ret_751 = vqrdmulhs_s32(__s0_751, vgetq_lane_s32(__s1_751, __p2_751)); \
-  __ret_751; \
-})
-#else
-#define vqrdmulhs_laneq_s32(__p0_752, __p1_752, __p2_752) __extension__ ({ \
-  int32_t __s0_752 = __p0_752; \
-  int32x4_t __s1_752 = __p1_752; \
-  int32x4_t __rev1_752;  __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 3, 2, 1, 0); \
-  int32_t __ret_752; \
-  __ret_752 = vqrdmulhs_s32(__s0_752, __noswap_vgetq_lane_s32(__rev1_752, __p2_752)); \
-  __ret_752; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_laneq_s16(__p0_753, __p1_753, __p2_753) __extension__ ({ \
-  int16_t __s0_753 = __p0_753; \
-  int16x8_t __s1_753 = __p1_753; \
-  int16_t __ret_753; \
-  __ret_753 = vqrdmulhh_s16(__s0_753, vgetq_lane_s16(__s1_753, __p2_753)); \
-  __ret_753; \
-})
-#else
-#define vqrdmulhh_laneq_s16(__p0_754, __p1_754, __p2_754) __extension__ ({ \
-  int16_t __s0_754 = __p0_754; \
-  int16x8_t __s1_754 = __p1_754; \
-  int16x8_t __rev1_754;  __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_754; \
-  __ret_754 = vqrdmulhh_s16(__s0_754, __noswap_vgetq_lane_s16(__rev1_754, __p2_754)); \
-  __ret_754; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u32(__p0_755, __p1_755, __p2_755) __extension__ ({ \
-  uint16x4_t __s0_755 = __p0_755; \
-  uint32x4_t __s1_755 = __p1_755; \
-  uint16x8_t __ret_755; \
-  __ret_755 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_755), (uint16x4_t)(vqrshrn_n_u32(__s1_755, __p2_755)))); \
-  __ret_755; \
-})
-#else
-#define vqrshrn_high_n_u32(__p0_756, __p1_756, __p2_756) __extension__ ({ \
-  uint16x4_t __s0_756 = __p0_756; \
-  uint32x4_t __s1_756 = __p1_756; \
-  uint16x4_t __rev0_756;  __rev0_756 = __builtin_shufflevector(__s0_756, __s0_756, 3, 2, 1, 0); \
-  uint32x4_t __rev1_756;  __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \
-  uint16x8_t __ret_756; \
-  __ret_756 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_756), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_756, __p2_756)))); \
-  __ret_756 = __builtin_shufflevector(__ret_756, __ret_756, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_756; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u64(__p0_757, __p1_757, __p2_757) __extension__ ({ \
-  uint32x2_t __s0_757 = __p0_757; \
-  uint64x2_t __s1_757 = __p1_757; \
-  uint32x4_t __ret_757; \
-  __ret_757 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_757), (uint32x2_t)(vqrshrn_n_u64(__s1_757, __p2_757)))); \
-  __ret_757; \
-})
-#else
-#define vqrshrn_high_n_u64(__p0_758, __p1_758, __p2_758) __extension__ ({ \
-  uint32x2_t __s0_758 = __p0_758; \
-  uint64x2_t __s1_758 = __p1_758; \
-  uint32x2_t __rev0_758;  __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 1, 0); \
-  uint64x2_t __rev1_758;  __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 1, 0); \
-  uint32x4_t __ret_758; \
-  __ret_758 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_758), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_758, __p2_758)))); \
-  __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 3, 2, 1, 0); \
-  __ret_758; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u16(__p0_759, __p1_759, __p2_759) __extension__ ({ \
-  uint8x8_t __s0_759 = __p0_759; \
-  uint16x8_t __s1_759 = __p1_759; \
-  uint8x16_t __ret_759; \
-  __ret_759 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_759), (uint8x8_t)(vqrshrn_n_u16(__s1_759, __p2_759)))); \
-  __ret_759; \
-})
-#else
-#define vqrshrn_high_n_u16(__p0_760, __p1_760, __p2_760) __extension__ ({ \
-  uint8x8_t __s0_760 = __p0_760; \
-  uint16x8_t __s1_760 = __p1_760; \
-  uint8x8_t __rev0_760;  __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_760;  __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_760; \
-  __ret_760 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_760), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_760, __p2_760)))); \
-  __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_760; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s32(__p0_761, __p1_761, __p2_761) __extension__ ({ \
-  int16x4_t __s0_761 = __p0_761; \
-  int32x4_t __s1_761 = __p1_761; \
-  int16x8_t __ret_761; \
-  __ret_761 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_761), (int16x4_t)(vqrshrn_n_s32(__s1_761, __p2_761)))); \
-  __ret_761; \
-})
-#else
-#define vqrshrn_high_n_s32(__p0_762, __p1_762, __p2_762) __extension__ ({ \
-  int16x4_t __s0_762 = __p0_762; \
-  int32x4_t __s1_762 = __p1_762; \
-  int16x4_t __rev0_762;  __rev0_762 = __builtin_shufflevector(__s0_762, __s0_762, 3, 2, 1, 0); \
-  int32x4_t __rev1_762;  __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 3, 2, 1, 0); \
-  int16x8_t __ret_762; \
-  __ret_762 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_762), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_762, __p2_762)))); \
-  __ret_762 = __builtin_shufflevector(__ret_762, __ret_762, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_762; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s64(__p0_763, __p1_763, __p2_763) __extension__ ({ \
-  int32x2_t __s0_763 = __p0_763; \
-  int64x2_t __s1_763 = __p1_763; \
-  int32x4_t __ret_763; \
-  __ret_763 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_763), (int32x2_t)(vqrshrn_n_s64(__s1_763, __p2_763)))); \
-  __ret_763; \
-})
-#else
-#define vqrshrn_high_n_s64(__p0_764, __p1_764, __p2_764) __extension__ ({ \
-  int32x2_t __s0_764 = __p0_764; \
-  int64x2_t __s1_764 = __p1_764; \
-  int32x2_t __rev0_764;  __rev0_764 = __builtin_shufflevector(__s0_764, __s0_764, 1, 0); \
-  int64x2_t __rev1_764;  __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 1, 0); \
-  int32x4_t __ret_764; \
-  __ret_764 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_764), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_764, __p2_764)))); \
-  __ret_764 = __builtin_shufflevector(__ret_764, __ret_764, 3, 2, 1, 0); \
-  __ret_764; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s16(__p0_765, __p1_765, __p2_765) __extension__ ({ \
-  int8x8_t __s0_765 = __p0_765; \
-  int16x8_t __s1_765 = __p1_765; \
-  int8x16_t __ret_765; \
-  __ret_765 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_765), (int8x8_t)(vqrshrn_n_s16(__s1_765, __p2_765)))); \
-  __ret_765; \
-})
-#else
-#define vqrshrn_high_n_s16(__p0_766, __p1_766, __p2_766) __extension__ ({ \
-  int8x8_t __s0_766 = __p0_766; \
-  int16x8_t __s1_766 = __p1_766; \
-  int8x8_t __rev0_766;  __rev0_766 = __builtin_shufflevector(__s0_766, __s0_766, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_766;  __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_766; \
-  __ret_766 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_766), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_766, __p2_766)))); \
-  __ret_766 = __builtin_shufflevector(__ret_766, __ret_766, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_766; \
-})
-#endif
-
-#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s32(__p0_767, __p1_767, __p2_767) __extension__ ({ \
-  int16x4_t __s0_767 = __p0_767; \
-  int32x4_t __s1_767 = __p1_767; \
-  int16x8_t __ret_767; \
-  __ret_767 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_767), (int16x4_t)(vqrshrun_n_s32(__s1_767, __p2_767)))); \
-  __ret_767; \
-})
-#else
-#define vqrshrun_high_n_s32(__p0_768, __p1_768, __p2_768) __extension__ ({ \
-  int16x4_t __s0_768 = __p0_768; \
-  int32x4_t __s1_768 = __p1_768; \
-  int16x4_t __rev0_768;  __rev0_768 = __builtin_shufflevector(__s0_768, __s0_768, 3, 2, 1, 0); \
-  int32x4_t __rev1_768;  __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 3, 2, 1, 0); \
-  int16x8_t __ret_768; \
-  __ret_768 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_768), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_768, __p2_768)))); \
-  __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_768; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s64(__p0_769, __p1_769, __p2_769) __extension__ ({ \
-  int32x2_t __s0_769 = __p0_769; \
-  int64x2_t __s1_769 = __p1_769; \
-  int32x4_t __ret_769; \
-  __ret_769 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_769), (int32x2_t)(vqrshrun_n_s64(__s1_769, __p2_769)))); \
-  __ret_769; \
-})
-#else
-#define vqrshrun_high_n_s64(__p0_770, __p1_770, __p2_770) __extension__ ({ \
-  int32x2_t __s0_770 = __p0_770; \
-  int64x2_t __s1_770 = __p1_770; \
-  int32x2_t __rev0_770;  __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 1, 0); \
-  int64x2_t __rev1_770;  __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 1, 0); \
-  int32x4_t __ret_770; \
-  __ret_770 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_770), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_770, __p2_770)))); \
-  __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \
-  __ret_770; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s16(__p0_771, __p1_771, __p2_771) __extension__ ({ \
-  int8x8_t __s0_771 = __p0_771; \
-  int16x8_t __s1_771 = __p1_771; \
-  int8x16_t __ret_771; \
-  __ret_771 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_771), (int8x8_t)(vqrshrun_n_s16(__s1_771, __p2_771)))); \
-  __ret_771; \
-})
-#else
-#define vqrshrun_high_n_s16(__p0_772, __p1_772, __p2_772) __extension__ ({ \
-  int8x8_t __s0_772 = __p0_772; \
-  int16x8_t __s1_772 = __p1_772; \
-  int8x8_t __rev0_772;  __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_772;  __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_772; \
-  __ret_772 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_772), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_772, __p2_772)))); \
-  __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_772; \
-})
-#endif
-
-#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
-  __ret; \
-})
-__ai uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
-  return __ret;
-}
-#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
-  __ret; \
-})
-#define vqshls_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqshld_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
-  __ret; \
-})
-#define vqshls_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshld_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
-  __ret; \
-})
-#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u32(__p0_773, __p1_773, __p2_773) __extension__ ({ \
-  uint16x4_t __s0_773 = __p0_773; \
-  uint32x4_t __s1_773 = __p1_773; \
-  uint16x8_t __ret_773; \
-  __ret_773 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_773), (uint16x4_t)(vqshrn_n_u32(__s1_773, __p2_773)))); \
-  __ret_773; \
-})
-#else
-#define vqshrn_high_n_u32(__p0_774, __p1_774, __p2_774) __extension__ ({ \
-  uint16x4_t __s0_774 = __p0_774; \
-  uint32x4_t __s1_774 = __p1_774; \
-  uint16x4_t __rev0_774;  __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \
-  uint32x4_t __rev1_774;  __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 3, 2, 1, 0); \
-  uint16x8_t __ret_774; \
-  __ret_774 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_774), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_774, __p2_774)))); \
-  __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_774; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u64(__p0_775, __p1_775, __p2_775) __extension__ ({ \
-  uint32x2_t __s0_775 = __p0_775; \
-  uint64x2_t __s1_775 = __p1_775; \
-  uint32x4_t __ret_775; \
-  __ret_775 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_775), (uint32x2_t)(vqshrn_n_u64(__s1_775, __p2_775)))); \
-  __ret_775; \
-})
-#else
-#define vqshrn_high_n_u64(__p0_776, __p1_776, __p2_776) __extension__ ({ \
-  uint32x2_t __s0_776 = __p0_776; \
-  uint64x2_t __s1_776 = __p1_776; \
-  uint32x2_t __rev0_776;  __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 1, 0); \
-  uint64x2_t __rev1_776;  __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 1, 0); \
-  uint32x4_t __ret_776; \
-  __ret_776 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_776), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_776, __p2_776)))); \
-  __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 3, 2, 1, 0); \
-  __ret_776; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u16(__p0_777, __p1_777, __p2_777) __extension__ ({ \
-  uint8x8_t __s0_777 = __p0_777; \
-  uint16x8_t __s1_777 = __p1_777; \
-  uint8x16_t __ret_777; \
-  __ret_777 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_777), (uint8x8_t)(vqshrn_n_u16(__s1_777, __p2_777)))); \
-  __ret_777; \
-})
-#else
-#define vqshrn_high_n_u16(__p0_778, __p1_778, __p2_778) __extension__ ({ \
-  uint8x8_t __s0_778 = __p0_778; \
-  uint16x8_t __s1_778 = __p1_778; \
-  uint8x8_t __rev0_778;  __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_778;  __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_778; \
-  __ret_778 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_778), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_778, __p2_778)))); \
-  __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_778; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s32(__p0_779, __p1_779, __p2_779) __extension__ ({ \
-  int16x4_t __s0_779 = __p0_779; \
-  int32x4_t __s1_779 = __p1_779; \
-  int16x8_t __ret_779; \
-  __ret_779 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_779), (int16x4_t)(vqshrn_n_s32(__s1_779, __p2_779)))); \
-  __ret_779; \
-})
-#else
-#define vqshrn_high_n_s32(__p0_780, __p1_780, __p2_780) __extension__ ({ \
-  int16x4_t __s0_780 = __p0_780; \
-  int32x4_t __s1_780 = __p1_780; \
-  int16x4_t __rev0_780;  __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 3, 2, 1, 0); \
-  int32x4_t __rev1_780;  __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 3, 2, 1, 0); \
-  int16x8_t __ret_780; \
-  __ret_780 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_780), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_780, __p2_780)))); \
-  __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_780; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s64(__p0_781, __p1_781, __p2_781) __extension__ ({ \
-  int32x2_t __s0_781 = __p0_781; \
-  int64x2_t __s1_781 = __p1_781; \
-  int32x4_t __ret_781; \
-  __ret_781 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_781), (int32x2_t)(vqshrn_n_s64(__s1_781, __p2_781)))); \
-  __ret_781; \
-})
-#else
-#define vqshrn_high_n_s64(__p0_782, __p1_782, __p2_782) __extension__ ({ \
-  int32x2_t __s0_782 = __p0_782; \
-  int64x2_t __s1_782 = __p1_782; \
-  int32x2_t __rev0_782;  __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 1, 0); \
-  int64x2_t __rev1_782;  __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 1, 0); \
-  int32x4_t __ret_782; \
-  __ret_782 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_782), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_782, __p2_782)))); \
-  __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 3, 2, 1, 0); \
-  __ret_782; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s16(__p0_783, __p1_783, __p2_783) __extension__ ({ \
-  int8x8_t __s0_783 = __p0_783; \
-  int16x8_t __s1_783 = __p1_783; \
-  int8x16_t __ret_783; \
-  __ret_783 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_783), (int8x8_t)(vqshrn_n_s16(__s1_783, __p2_783)))); \
-  __ret_783; \
-})
-#else
-#define vqshrn_high_n_s16(__p0_784, __p1_784, __p2_784) __extension__ ({ \
-  int8x8_t __s0_784 = __p0_784; \
-  int16x8_t __s1_784 = __p1_784; \
-  int8x8_t __rev0_784;  __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_784;  __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_784; \
-  __ret_784 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_784), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_784, __p2_784)))); \
-  __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_784; \
-})
-#endif
-
-#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s32(__p0_785, __p1_785, __p2_785) __extension__ ({ \
-  int16x4_t __s0_785 = __p0_785; \
-  int32x4_t __s1_785 = __p1_785; \
-  int16x8_t __ret_785; \
-  __ret_785 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_785), (int16x4_t)(vqshrun_n_s32(__s1_785, __p2_785)))); \
-  __ret_785; \
-})
-#else
-#define vqshrun_high_n_s32(__p0_786, __p1_786, __p2_786) __extension__ ({ \
-  int16x4_t __s0_786 = __p0_786; \
-  int32x4_t __s1_786 = __p1_786; \
-  int16x4_t __rev0_786;  __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 3, 2, 1, 0); \
-  int32x4_t __rev1_786;  __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 3, 2, 1, 0); \
-  int16x8_t __ret_786; \
-  __ret_786 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_786), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_786, __p2_786)))); \
-  __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_786; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s64(__p0_787, __p1_787, __p2_787) __extension__ ({ \
-  int32x2_t __s0_787 = __p0_787; \
-  int64x2_t __s1_787 = __p1_787; \
-  int32x4_t __ret_787; \
-  __ret_787 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_787), (int32x2_t)(vqshrun_n_s64(__s1_787, __p2_787)))); \
-  __ret_787; \
-})
-#else
-#define vqshrun_high_n_s64(__p0_788, __p1_788, __p2_788) __extension__ ({ \
-  int32x2_t __s0_788 = __p0_788; \
-  int64x2_t __s1_788 = __p1_788; \
-  int32x2_t __rev0_788;  __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 1, 0); \
-  int64x2_t __rev1_788;  __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 1, 0); \
-  int32x4_t __ret_788; \
-  __ret_788 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_788), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_788, __p2_788)))); \
-  __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 3, 2, 1, 0); \
-  __ret_788; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s16(__p0_789, __p1_789, __p2_789) __extension__ ({ \
-  int8x8_t __s0_789 = __p0_789; \
-  int16x8_t __s1_789 = __p1_789; \
-  int8x16_t __ret_789; \
-  __ret_789 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_789), (int8x8_t)(vqshrun_n_s16(__s1_789, __p2_789)))); \
-  __ret_789; \
-})
-#else
-#define vqshrun_high_n_s16(__p0_790, __p1_790, __p2_790) __extension__ ({ \
-  int8x8_t __s0_790 = __p0_790; \
-  int16x8_t __s1_790 = __p1_790; \
-  int8x8_t __rev0_790;  __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_790;  __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_790; \
-  __ret_790 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_790), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_790, __p2_790)))); \
-  __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_790; \
-})
-#endif
-
-#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
-  __ret; \
-})
-__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
-  poly8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
-  poly8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
-  int8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
-  uint8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
-  int8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
-  poly8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
-  poly8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
-  uint8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
-  int8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
-  uint8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
-  int8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
-  poly8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
-  poly8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
-  uint8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
-  int8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
-  uint8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
-  int8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrbit_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrbit_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrecpe_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai float64_t vrecped_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrecpes_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
-  return __ret;
-}
-__ai float64_t vrecpxd_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrecpxs_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1);
-  return __ret;
-}
-#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u32(__p0_791, __p1_791, __p2_791) __extension__ ({ \
-  uint16x4_t __s0_791 = __p0_791; \
-  uint32x4_t __s1_791 = __p1_791; \
-  uint16x8_t __ret_791; \
-  __ret_791 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_791), (uint16x4_t)(vrshrn_n_u32(__s1_791, __p2_791)))); \
-  __ret_791; \
-})
-#else
-#define vrshrn_high_n_u32(__p0_792, __p1_792, __p2_792) __extension__ ({ \
-  uint16x4_t __s0_792 = __p0_792; \
-  uint32x4_t __s1_792 = __p1_792; \
-  uint16x4_t __rev0_792;  __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 3, 2, 1, 0); \
-  uint32x4_t __rev1_792;  __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 3, 2, 1, 0); \
-  uint16x8_t __ret_792; \
-  __ret_792 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_792), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_792, __p2_792)))); \
-  __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_792; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u64(__p0_793, __p1_793, __p2_793) __extension__ ({ \
-  uint32x2_t __s0_793 = __p0_793; \
-  uint64x2_t __s1_793 = __p1_793; \
-  uint32x4_t __ret_793; \
-  __ret_793 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_793), (uint32x2_t)(vrshrn_n_u64(__s1_793, __p2_793)))); \
-  __ret_793; \
-})
-#else
-#define vrshrn_high_n_u64(__p0_794, __p1_794, __p2_794) __extension__ ({ \
-  uint32x2_t __s0_794 = __p0_794; \
-  uint64x2_t __s1_794 = __p1_794; \
-  uint32x2_t __rev0_794;  __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 1, 0); \
-  uint64x2_t __rev1_794;  __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 1, 0); \
-  uint32x4_t __ret_794; \
-  __ret_794 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_794), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_794, __p2_794)))); \
-  __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 3, 2, 1, 0); \
-  __ret_794; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u16(__p0_795, __p1_795, __p2_795) __extension__ ({ \
-  uint8x8_t __s0_795 = __p0_795; \
-  uint16x8_t __s1_795 = __p1_795; \
-  uint8x16_t __ret_795; \
-  __ret_795 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_795), (uint8x8_t)(vrshrn_n_u16(__s1_795, __p2_795)))); \
-  __ret_795; \
-})
-#else
-#define vrshrn_high_n_u16(__p0_796, __p1_796, __p2_796) __extension__ ({ \
-  uint8x8_t __s0_796 = __p0_796; \
-  uint16x8_t __s1_796 = __p1_796; \
-  uint8x8_t __rev0_796;  __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_796;  __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_796; \
-  __ret_796 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_796), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_796, __p2_796)))); \
-  __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_796; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s32(__p0_797, __p1_797, __p2_797) __extension__ ({ \
-  int16x4_t __s0_797 = __p0_797; \
-  int32x4_t __s1_797 = __p1_797; \
-  int16x8_t __ret_797; \
-  __ret_797 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_797), (int16x4_t)(vrshrn_n_s32(__s1_797, __p2_797)))); \
-  __ret_797; \
-})
-#else
-#define vrshrn_high_n_s32(__p0_798, __p1_798, __p2_798) __extension__ ({ \
-  int16x4_t __s0_798 = __p0_798; \
-  int32x4_t __s1_798 = __p1_798; \
-  int16x4_t __rev0_798;  __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 3, 2, 1, 0); \
-  int32x4_t __rev1_798;  __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 3, 2, 1, 0); \
-  int16x8_t __ret_798; \
-  __ret_798 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_798), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_798, __p2_798)))); \
-  __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_798; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s64(__p0_799, __p1_799, __p2_799) __extension__ ({ \
-  int32x2_t __s0_799 = __p0_799; \
-  int64x2_t __s1_799 = __p1_799; \
-  int32x4_t __ret_799; \
-  __ret_799 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_799), (int32x2_t)(vrshrn_n_s64(__s1_799, __p2_799)))); \
-  __ret_799; \
-})
-#else
-#define vrshrn_high_n_s64(__p0_800, __p1_800, __p2_800) __extension__ ({ \
-  int32x2_t __s0_800 = __p0_800; \
-  int64x2_t __s1_800 = __p1_800; \
-  int32x2_t __rev0_800;  __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \
-  int64x2_t __rev1_800;  __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 1, 0); \
-  int32x4_t __ret_800; \
-  __ret_800 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_800), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_800, __p2_800)))); \
-  __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 3, 2, 1, 0); \
-  __ret_800; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s16(__p0_801, __p1_801, __p2_801) __extension__ ({ \
-  int8x8_t __s0_801 = __p0_801; \
-  int16x8_t __s1_801 = __p1_801; \
-  int8x16_t __ret_801; \
-  __ret_801 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_801), (int8x8_t)(vrshrn_n_s16(__s1_801, __p2_801)))); \
-  __ret_801; \
-})
-#else
-#define vrshrn_high_n_s16(__p0_802, __p1_802, __p2_802) __extension__ ({ \
-  int8x8_t __s0_802 = __p0_802; \
-  int16x8_t __s1_802 = __p1_802; \
-  int8x8_t __rev0_802;  __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_802;  __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_802; \
-  __ret_802 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_802), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_802, __p2_802)))); \
-  __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_802; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrsqrte_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai float64_t vrsqrted_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrsqrtes_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1);
-  return __ret;
-}
-#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (poly64x1_t)__s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \
-  __ret; \
-})
-__ai uint64_t vshld_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1);
-  return __ret;
-}
-#define vshld_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vshld_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u8(__p0_803, __p1_803) __extension__ ({ \
-  uint8x16_t __s0_803 = __p0_803; \
-  uint16x8_t __ret_803; \
-  __ret_803 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_803), __p1_803)); \
-  __ret_803; \
-})
-#else
-#define vshll_high_n_u8(__p0_804, __p1_804) __extension__ ({ \
-  uint8x16_t __s0_804 = __p0_804; \
-  uint8x16_t __rev0_804;  __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_804; \
-  __ret_804 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_804), __p1_804)); \
-  __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_804; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u32(__p0_805, __p1_805) __extension__ ({ \
-  uint32x4_t __s0_805 = __p0_805; \
-  uint64x2_t __ret_805; \
-  __ret_805 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_805), __p1_805)); \
-  __ret_805; \
-})
-#else
-#define vshll_high_n_u32(__p0_806, __p1_806) __extension__ ({ \
-  uint32x4_t __s0_806 = __p0_806; \
-  uint32x4_t __rev0_806;  __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 3, 2, 1, 0); \
-  uint64x2_t __ret_806; \
-  __ret_806 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_806), __p1_806)); \
-  __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 1, 0); \
-  __ret_806; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u16(__p0_807, __p1_807) __extension__ ({ \
-  uint16x8_t __s0_807 = __p0_807; \
-  uint32x4_t __ret_807; \
-  __ret_807 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_807), __p1_807)); \
-  __ret_807; \
-})
-#else
-#define vshll_high_n_u16(__p0_808, __p1_808) __extension__ ({ \
-  uint16x8_t __s0_808 = __p0_808; \
-  uint16x8_t __rev0_808;  __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_808; \
-  __ret_808 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_808), __p1_808)); \
-  __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 3, 2, 1, 0); \
-  __ret_808; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s8(__p0_809, __p1_809) __extension__ ({ \
-  int8x16_t __s0_809 = __p0_809; \
-  int16x8_t __ret_809; \
-  __ret_809 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_809), __p1_809)); \
-  __ret_809; \
-})
-#else
-#define vshll_high_n_s8(__p0_810, __p1_810) __extension__ ({ \
-  int8x16_t __s0_810 = __p0_810; \
-  int8x16_t __rev0_810;  __rev0_810 = __builtin_shufflevector(__s0_810, __s0_810, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_810; \
-  __ret_810 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_810), __p1_810)); \
-  __ret_810 = __builtin_shufflevector(__ret_810, __ret_810, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_810; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s32(__p0_811, __p1_811) __extension__ ({ \
-  int32x4_t __s0_811 = __p0_811; \
-  int64x2_t __ret_811; \
-  __ret_811 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_811), __p1_811)); \
-  __ret_811; \
-})
-#else
-#define vshll_high_n_s32(__p0_812, __p1_812) __extension__ ({ \
-  int32x4_t __s0_812 = __p0_812; \
-  int32x4_t __rev0_812;  __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 3, 2, 1, 0); \
-  int64x2_t __ret_812; \
-  __ret_812 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_812), __p1_812)); \
-  __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 1, 0); \
-  __ret_812; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s16(__p0_813, __p1_813) __extension__ ({ \
-  int16x8_t __s0_813 = __p0_813; \
-  int32x4_t __ret_813; \
-  __ret_813 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_813), __p1_813)); \
-  __ret_813; \
-})
-#else
-#define vshll_high_n_s16(__p0_814, __p1_814) __extension__ ({ \
-  int16x8_t __s0_814 = __p0_814; \
-  int16x8_t __rev0_814;  __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_814; \
-  __ret_814 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_814), __p1_814)); \
-  __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 3, 2, 1, 0); \
-  __ret_814; \
-})
-#endif
-
-#define vshrd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vshrd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u32(__p0_815, __p1_815, __p2_815) __extension__ ({ \
-  uint16x4_t __s0_815 = __p0_815; \
-  uint32x4_t __s1_815 = __p1_815; \
-  uint16x8_t __ret_815; \
-  __ret_815 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_815), (uint16x4_t)(vshrn_n_u32(__s1_815, __p2_815)))); \
-  __ret_815; \
-})
-#else
-#define vshrn_high_n_u32(__p0_816, __p1_816, __p2_816) __extension__ ({ \
-  uint16x4_t __s0_816 = __p0_816; \
-  uint32x4_t __s1_816 = __p1_816; \
-  uint16x4_t __rev0_816;  __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 3, 2, 1, 0); \
-  uint32x4_t __rev1_816;  __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 3, 2, 1, 0); \
-  uint16x8_t __ret_816; \
-  __ret_816 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_816), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_816, __p2_816)))); \
-  __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_816; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u64(__p0_817, __p1_817, __p2_817) __extension__ ({ \
-  uint32x2_t __s0_817 = __p0_817; \
-  uint64x2_t __s1_817 = __p1_817; \
-  uint32x4_t __ret_817; \
-  __ret_817 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_817), (uint32x2_t)(vshrn_n_u64(__s1_817, __p2_817)))); \
-  __ret_817; \
-})
-#else
-#define vshrn_high_n_u64(__p0_818, __p1_818, __p2_818) __extension__ ({ \
-  uint32x2_t __s0_818 = __p0_818; \
-  uint64x2_t __s1_818 = __p1_818; \
-  uint32x2_t __rev0_818;  __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 1, 0); \
-  uint64x2_t __rev1_818;  __rev1_818 = __builtin_shufflevector(__s1_818, __s1_818, 1, 0); \
-  uint32x4_t __ret_818; \
-  __ret_818 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_818), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_818, __p2_818)))); \
-  __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 3, 2, 1, 0); \
-  __ret_818; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u16(__p0_819, __p1_819, __p2_819) __extension__ ({ \
-  uint8x8_t __s0_819 = __p0_819; \
-  uint16x8_t __s1_819 = __p1_819; \
-  uint8x16_t __ret_819; \
-  __ret_819 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_819), (uint8x8_t)(vshrn_n_u16(__s1_819, __p2_819)))); \
-  __ret_819; \
-})
-#else
-#define vshrn_high_n_u16(__p0_820, __p1_820, __p2_820) __extension__ ({ \
-  uint8x8_t __s0_820 = __p0_820; \
-  uint16x8_t __s1_820 = __p1_820; \
-  uint8x8_t __rev0_820;  __rev0_820 = __builtin_shufflevector(__s0_820, __s0_820, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_820;  __rev1_820 = __builtin_shufflevector(__s1_820, __s1_820, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_820; \
-  __ret_820 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_820), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_820, __p2_820)))); \
-  __ret_820 = __builtin_shufflevector(__ret_820, __ret_820, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_820; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s32(__p0_821, __p1_821, __p2_821) __extension__ ({ \
-  int16x4_t __s0_821 = __p0_821; \
-  int32x4_t __s1_821 = __p1_821; \
-  int16x8_t __ret_821; \
-  __ret_821 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_821), (int16x4_t)(vshrn_n_s32(__s1_821, __p2_821)))); \
-  __ret_821; \
-})
-#else
-#define vshrn_high_n_s32(__p0_822, __p1_822, __p2_822) __extension__ ({ \
-  int16x4_t __s0_822 = __p0_822; \
-  int32x4_t __s1_822 = __p1_822; \
-  int16x4_t __rev0_822;  __rev0_822 = __builtin_shufflevector(__s0_822, __s0_822, 3, 2, 1, 0); \
-  int32x4_t __rev1_822;  __rev1_822 = __builtin_shufflevector(__s1_822, __s1_822, 3, 2, 1, 0); \
-  int16x8_t __ret_822; \
-  __ret_822 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_822), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_822, __p2_822)))); \
-  __ret_822 = __builtin_shufflevector(__ret_822, __ret_822, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_822; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s64(__p0_823, __p1_823, __p2_823) __extension__ ({ \
-  int32x2_t __s0_823 = __p0_823; \
-  int64x2_t __s1_823 = __p1_823; \
-  int32x4_t __ret_823; \
-  __ret_823 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_823), (int32x2_t)(vshrn_n_s64(__s1_823, __p2_823)))); \
-  __ret_823; \
-})
-#else
-#define vshrn_high_n_s64(__p0_824, __p1_824, __p2_824) __extension__ ({ \
-  int32x2_t __s0_824 = __p0_824; \
-  int64x2_t __s1_824 = __p1_824; \
-  int32x2_t __rev0_824;  __rev0_824 = __builtin_shufflevector(__s0_824, __s0_824, 1, 0); \
-  int64x2_t __rev1_824;  __rev1_824 = __builtin_shufflevector(__s1_824, __s1_824, 1, 0); \
-  int32x4_t __ret_824; \
-  __ret_824 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_824), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_824, __p2_824)))); \
-  __ret_824 = __builtin_shufflevector(__ret_824, __ret_824, 3, 2, 1, 0); \
-  __ret_824; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s16(__p0_825, __p1_825, __p2_825) __extension__ ({ \
-  int8x8_t __s0_825 = __p0_825; \
-  int16x8_t __s1_825 = __p1_825; \
-  int8x16_t __ret_825; \
-  __ret_825 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_825), (int8x8_t)(vshrn_n_s16(__s1_825, __p2_825)))); \
-  __ret_825; \
-})
-#else
-#define vshrn_high_n_s16(__p0_826, __p1_826, __p2_826) __extension__ ({ \
-  int8x8_t __s0_826 = __p0_826; \
-  int16x8_t __s1_826 = __p1_826; \
-  int8x8_t __rev0_826;  __rev0_826 = __builtin_shufflevector(__s0_826, __s0_826, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_826;  __rev1_826 = __builtin_shufflevector(__s1_826, __s1_826, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_826; \
-  __ret_826 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_826), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_826, __p2_826)))); \
-  __ret_826 = __builtin_shufflevector(__ret_826, __ret_826, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_826; \
-})
-#endif
-
-#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai uint8_t vsqaddb_u8(uint8_t __p0, int8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vsqadds_u32(uint32_t __p0, int32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vsqaddd_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vsqaddh_u16(uint16_t __p0, int16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vsqadd_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vsqrt_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vsqrt_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vsqrt_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vst1_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \
-})
-#else
-#define vst1q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \
-})
-#else
-#define vst1q_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \
-})
-#endif
-
-#define vst1_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \
-})
-#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
-})
-#else
-#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
-})
-#else
-#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
-})
-#endif
-
-#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
-})
-#define vst1_p64_x2(__p0, __p1) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
-})
-#else
-#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \
-})
-#else
-#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \
-})
-#endif
-
-#define vst1_f64_x2(__p0, __p1) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \
-})
-#define vst1_p64_x3(__p0, __p1) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
-})
-#else
-#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \
-})
-#else
-#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \
-})
-#endif
-
-#define vst1_f64_x3(__p0, __p1) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \
-})
-#define vst1_p64_x4(__p0, __p1) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
-})
-#else
-#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \
-})
-#else
-#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \
-})
-#endif
-
-#define vst1_f64_x4(__p0, __p1) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \
-})
-#define vst2_p64(__p0, __p1) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
-})
-#else
-#define vst2q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
-})
-#else
-#define vst2q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \
-})
-#else
-#define vst2q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \
-})
-#else
-#define vst2q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \
-})
-#endif
-
-#define vst2_f64(__p0, __p1) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \
-})
-#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
-})
-#else
-#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
-})
-#else
-#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
-})
-#else
-#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
-})
-#else
-#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
-})
-#else
-#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
-})
-#else
-#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
-})
-#else
-#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
-})
-#endif
-
-#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
-})
-#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
-})
-#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
-})
-#define vst3_p64(__p0, __p1) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
-})
-#else
-#define vst3q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
-})
-#else
-#define vst3q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \
-})
-#else
-#define vst3q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \
-})
-#else
-#define vst3q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \
-})
-#endif
-
-#define vst3_f64(__p0, __p1) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \
-})
-#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
-})
-#else
-#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
-})
-#else
-#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
-})
-#else
-#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
-})
-#else
-#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
-})
-#else
-#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
-})
-#else
-#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
-})
-#else
-#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
-})
-#endif
-
-#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
-})
-#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
-})
-#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
-})
-#define vst4_p64(__p0, __p1) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
-})
-#else
-#define vst4q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
-})
-#else
-#define vst4q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \
-})
-#else
-#define vst4q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \
-})
-#else
-#define vst4q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \
-})
-#endif
-
-#define vst4_f64(__p0, __p1) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \
-})
-#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
-})
-#else
-#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
-})
-#else
-#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
-})
-#else
-#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
-})
-#else
-#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
-})
-#else
-#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
-})
-#else
-#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
-})
-#else
-#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
-})
-#endif
-
-#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
-})
-#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
-})
-#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
-})
-#define vstrq_p128(__p0, __p1) __extension__ ({ \
-  poly128_t __s1 = __p1; \
-  __builtin_neon_vstrq_p128(__p0, __s1); \
-})
-__ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 - vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 - vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 - vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 - vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsudotq_laneq_s32(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \
-  int32x4_t __s0_827 = __p0_827; \
-  int8x16_t __s1_827 = __p1_827; \
-  uint8x16_t __s2_827 = __p2_827; \
-  int32x4_t __ret_827; \
-uint8x16_t __reint_827 = __s2_827; \
-  __ret_827 = vusdotq_s32(__s0_827, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_827, __p3_827)), __s1_827); \
-  __ret_827; \
-})
-#else
-#define vsudotq_laneq_s32(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \
-  int32x4_t __s0_828 = __p0_828; \
-  int8x16_t __s1_828 = __p1_828; \
-  uint8x16_t __s2_828 = __p2_828; \
-  int32x4_t __rev0_828;  __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 3, 2, 1, 0); \
-  int8x16_t __rev1_828;  __rev1_828 = __builtin_shufflevector(__s1_828, __s1_828, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_828;  __rev2_828 = __builtin_shufflevector(__s2_828, __s2_828, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_828; \
-uint8x16_t __reint_828 = __rev2_828; \
-  __ret_828 = __noswap_vusdotq_s32(__rev0_828, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_828, __p3_828)), __rev1_828); \
-  __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 3, 2, 1, 0); \
-  __ret_828; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsudot_laneq_s32(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \
-  int32x2_t __s0_829 = __p0_829; \
-  int8x8_t __s1_829 = __p1_829; \
-  uint8x16_t __s2_829 = __p2_829; \
-  int32x2_t __ret_829; \
-uint8x16_t __reint_829 = __s2_829; \
-  __ret_829 = vusdot_s32(__s0_829, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_829, __p3_829)), __s1_829); \
-  __ret_829; \
-})
-#else
-#define vsudot_laneq_s32(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \
-  int32x2_t __s0_830 = __p0_830; \
-  int8x8_t __s1_830 = __p1_830; \
-  uint8x16_t __s2_830 = __p2_830; \
-  int32x2_t __rev0_830;  __rev0_830 = __builtin_shufflevector(__s0_830, __s0_830, 1, 0); \
-  int8x8_t __rev1_830;  __rev1_830 = __builtin_shufflevector(__s1_830, __s1_830, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_830;  __rev2_830 = __builtin_shufflevector(__s2_830, __s2_830, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_830; \
-uint8x16_t __reint_830 = __rev2_830; \
-  __ret_830 = __noswap_vusdot_s32(__rev0_830, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_830, __p3_830)), __rev1_830); \
-  __ret_830 = __builtin_shufflevector(__ret_830, __ret_830, 1, 0); \
-  __ret_830; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  return __ret;
-}
-#else
-__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  return __ret;
-}
-#else
-__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  return __ret;
-}
-#else
-__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  return __ret;
-}
-#else
-__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vtstd_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vtstd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vuqadds_s32(int32_t __p0, uint32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vuqaddd_s64(int64_t __p0, uint64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vuqaddh_s16(int16_t __p0, uint16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vuqadd_s64(int64x1_t __p0, uint64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdotq_laneq_s32(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \
-  int32x4_t __s0_831 = __p0_831; \
-  uint8x16_t __s1_831 = __p1_831; \
-  int8x16_t __s2_831 = __p2_831; \
-  int32x4_t __ret_831; \
-int8x16_t __reint_831 = __s2_831; \
-  __ret_831 = vusdotq_s32(__s0_831, __s1_831, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_831, __p3_831))); \
-  __ret_831; \
-})
-#else
-#define vusdotq_laneq_s32(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \
-  int32x4_t __s0_832 = __p0_832; \
-  uint8x16_t __s1_832 = __p1_832; \
-  int8x16_t __s2_832 = __p2_832; \
-  int32x4_t __rev0_832;  __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 3, 2, 1, 0); \
-  uint8x16_t __rev1_832;  __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_832;  __rev2_832 = __builtin_shufflevector(__s2_832, __s2_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_832; \
-int8x16_t __reint_832 = __rev2_832; \
-  __ret_832 = __noswap_vusdotq_s32(__rev0_832, __rev1_832, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_832, __p3_832))); \
-  __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 3, 2, 1, 0); \
-  __ret_832; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdot_laneq_s32(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \
-  int32x2_t __s0_833 = __p0_833; \
-  uint8x8_t __s1_833 = __p1_833; \
-  int8x16_t __s2_833 = __p2_833; \
-  int32x2_t __ret_833; \
-int8x16_t __reint_833 = __s2_833; \
-  __ret_833 = vusdot_s32(__s0_833, __s1_833, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_833, __p3_833))); \
-  __ret_833; \
-})
-#else
-#define vusdot_laneq_s32(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \
-  int32x2_t __s0_834 = __p0_834; \
-  uint8x8_t __s1_834 = __p1_834; \
-  int8x16_t __s2_834 = __p2_834; \
-  int32x2_t __rev0_834;  __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 1, 0); \
-  uint8x8_t __rev1_834;  __rev1_834 = __builtin_shufflevector(__s1_834, __s1_834, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_834;  __rev2_834 = __builtin_shufflevector(__s2_834, __s2_834, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_834; \
-int8x16_t __reint_834 = __rev2_834; \
-  __ret_834 = __noswap_vusdot_s32(__rev0_834, __rev1_834, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_834, __p3_834))); \
-  __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 1, 0); \
-  __ret_834; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  return __ret;
-}
-#else
-__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  return __ret;
-}
-#else
-__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  return __ret;
-}
-#else
-__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  return __ret;
-}
-#else
-__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  return __ret;
-}
-#else
-__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  return __ret;
-}
-#else
-__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  return __ret;
-}
-#else
-__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  return __ret;
-}
-#else
-__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  return __ret;
-}
-#else
-__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  return __ret;
-}
-#else
-__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  return __ret;
-}
-#else
-__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  return __ret;
-}
-#else
-__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = __p0 + vabdq_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vabdq_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + vabdq_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = __p0 + vabdq_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vabdq_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + vabdq_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = __p0 + vabd_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 + vabd_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 + vabd_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = __p0 + vabd_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 + vabd_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 + vabd_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_u8(__p0) + vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_u32(__p0) + vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_u16(__p0) + vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_s8(__p0) + vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_s32(__p0) + vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_s16(__p0) + vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 + vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 + vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 + vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 + vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_f16(__p0_835, __p1_835) __extension__ ({ \
-  float16x4_t __s0_835 = __p0_835; \
-  float16_t __ret_835; \
-float16x4_t __reint_835 = __s0_835; \
-int16_t __reint1_835 = vget_lane_s16(*(int16x4_t *) &__reint_835, __p1_835); \
-  __ret_835 = *(float16_t *) &__reint1_835; \
-  __ret_835; \
-})
-#else
-#define vget_lane_f16(__p0_836, __p1_836) __extension__ ({ \
-  float16x4_t __s0_836 = __p0_836; \
-  float16x4_t __rev0_836;  __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 3, 2, 1, 0); \
-  float16_t __ret_836; \
-float16x4_t __reint_836 = __rev0_836; \
-int16_t __reint1_836 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_836, __p1_836); \
-  __ret_836 = *(float16_t *) &__reint1_836; \
-  __ret_836; \
-})
-#define __noswap_vget_lane_f16(__p0_837, __p1_837) __extension__ ({ \
-  float16x4_t __s0_837 = __p0_837; \
-  float16_t __ret_837; \
-float16x4_t __reint_837 = __s0_837; \
-int16_t __reint1_837 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_837, __p1_837); \
-  __ret_837 = *(float16_t *) &__reint1_837; \
-  __ret_837; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f16(__p0_838, __p1_838) __extension__ ({ \
-  float16x8_t __s0_838 = __p0_838; \
-  float16_t __ret_838; \
-float16x8_t __reint_838 = __s0_838; \
-int16_t __reint1_838 = vgetq_lane_s16(*(int16x8_t *) &__reint_838, __p1_838); \
-  __ret_838 = *(float16_t *) &__reint1_838; \
-  __ret_838; \
-})
-#else
-#define vgetq_lane_f16(__p0_839, __p1_839) __extension__ ({ \
-  float16x8_t __s0_839 = __p0_839; \
-  float16x8_t __rev0_839;  __rev0_839 = __builtin_shufflevector(__s0_839, __s0_839, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_839; \
-float16x8_t __reint_839 = __rev0_839; \
-int16_t __reint1_839 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_839, __p1_839); \
-  __ret_839 = *(float16_t *) &__reint1_839; \
-  __ret_839; \
-})
-#define __noswap_vgetq_lane_f16(__p0_840, __p1_840) __extension__ ({ \
-  float16x8_t __s0_840 = __p0_840; \
-  float16_t __ret_840; \
-float16x8_t __reint_840 = __s0_840; \
-int16_t __reint1_840 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_840, __p1_840); \
-  __ret_840 = *(float16_t *) &__reint1_840; \
-  __ret_840; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + vmull_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __noswap_vmull_u8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmull_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_u32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmull_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_u16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + vmull_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __noswap_vmull_s8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + vmull_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_s32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vmull_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_s16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u32(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \
-  uint64x2_t __s0_841 = __p0_841; \
-  uint32x2_t __s1_841 = __p1_841; \
-  uint32x2_t __s2_841 = __p2_841; \
-  uint64x2_t __ret_841; \
-  __ret_841 = __s0_841 + vmull_u32(__s1_841, splat_lane_u32(__s2_841, __p3_841)); \
-  __ret_841; \
-})
-#else
-#define vmlal_lane_u32(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \
-  uint64x2_t __s0_842 = __p0_842; \
-  uint32x2_t __s1_842 = __p1_842; \
-  uint32x2_t __s2_842 = __p2_842; \
-  uint64x2_t __rev0_842;  __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \
-  uint32x2_t __rev1_842;  __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 1, 0); \
-  uint32x2_t __rev2_842;  __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, 1, 0); \
-  uint64x2_t __ret_842; \
-  __ret_842 = __rev0_842 + __noswap_vmull_u32(__rev1_842, __noswap_splat_lane_u32(__rev2_842, __p3_842)); \
-  __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \
-  __ret_842; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u16(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \
-  uint32x4_t __s0_843 = __p0_843; \
-  uint16x4_t __s1_843 = __p1_843; \
-  uint16x4_t __s2_843 = __p2_843; \
-  uint32x4_t __ret_843; \
-  __ret_843 = __s0_843 + vmull_u16(__s1_843, splat_lane_u16(__s2_843, __p3_843)); \
-  __ret_843; \
-})
-#else
-#define vmlal_lane_u16(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \
-  uint32x4_t __s0_844 = __p0_844; \
-  uint16x4_t __s1_844 = __p1_844; \
-  uint16x4_t __s2_844 = __p2_844; \
-  uint32x4_t __rev0_844;  __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 3, 2, 1, 0); \
-  uint16x4_t __rev1_844;  __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 3, 2, 1, 0); \
-  uint16x4_t __rev2_844;  __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 3, 2, 1, 0); \
-  uint32x4_t __ret_844; \
-  __ret_844 = __rev0_844 + __noswap_vmull_u16(__rev1_844, __noswap_splat_lane_u16(__rev2_844, __p3_844)); \
-  __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 3, 2, 1, 0); \
-  __ret_844; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \
-  int64x2_t __s0_845 = __p0_845; \
-  int32x2_t __s1_845 = __p1_845; \
-  int32x2_t __s2_845 = __p2_845; \
-  int64x2_t __ret_845; \
-  __ret_845 = __s0_845 + vmull_s32(__s1_845, splat_lane_s32(__s2_845, __p3_845)); \
-  __ret_845; \
-})
-#else
-#define vmlal_lane_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \
-  int64x2_t __s0_846 = __p0_846; \
-  int32x2_t __s1_846 = __p1_846; \
-  int32x2_t __s2_846 = __p2_846; \
-  int64x2_t __rev0_846;  __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \
-  int32x2_t __rev1_846;  __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 1, 0); \
-  int32x2_t __rev2_846;  __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 1, 0); \
-  int64x2_t __ret_846; \
-  __ret_846 = __rev0_846 + __noswap_vmull_s32(__rev1_846, __noswap_splat_lane_s32(__rev2_846, __p3_846)); \
-  __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \
-  __ret_846; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s16(__p0_847, __p1_847, __p2_847, __p3_847) __extension__ ({ \
-  int32x4_t __s0_847 = __p0_847; \
-  int16x4_t __s1_847 = __p1_847; \
-  int16x4_t __s2_847 = __p2_847; \
-  int32x4_t __ret_847; \
-  __ret_847 = __s0_847 + vmull_s16(__s1_847, splat_lane_s16(__s2_847, __p3_847)); \
-  __ret_847; \
-})
-#else
-#define vmlal_lane_s16(__p0_848, __p1_848, __p2_848, __p3_848) __extension__ ({ \
-  int32x4_t __s0_848 = __p0_848; \
-  int16x4_t __s1_848 = __p1_848; \
-  int16x4_t __s2_848 = __p2_848; \
-  int32x4_t __rev0_848;  __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \
-  int16x4_t __rev1_848;  __rev1_848 = __builtin_shufflevector(__s1_848, __s1_848, 3, 2, 1, 0); \
-  int16x4_t __rev2_848;  __rev2_848 = __builtin_shufflevector(__s2_848, __s2_848, 3, 2, 1, 0); \
-  int32x4_t __ret_848; \
-  __ret_848 = __rev0_848 + __noswap_vmull_s16(__rev1_848, __noswap_splat_lane_s16(__rev2_848, __p3_848)); \
-  __ret_848 = __builtin_shufflevector(__ret_848, __ret_848, 3, 2, 1, 0); \
-  __ret_848; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - vmull_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - __noswap_vmull_u8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmull_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_u32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmull_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_u16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - vmull_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - __noswap_vmull_s8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - vmull_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_s32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - vmull_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_s16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u32(__p0_849, __p1_849, __p2_849, __p3_849) __extension__ ({ \
-  uint64x2_t __s0_849 = __p0_849; \
-  uint32x2_t __s1_849 = __p1_849; \
-  uint32x2_t __s2_849 = __p2_849; \
-  uint64x2_t __ret_849; \
-  __ret_849 = __s0_849 - vmull_u32(__s1_849, splat_lane_u32(__s2_849, __p3_849)); \
-  __ret_849; \
-})
-#else
-#define vmlsl_lane_u32(__p0_850, __p1_850, __p2_850, __p3_850) __extension__ ({ \
-  uint64x2_t __s0_850 = __p0_850; \
-  uint32x2_t __s1_850 = __p1_850; \
-  uint32x2_t __s2_850 = __p2_850; \
-  uint64x2_t __rev0_850;  __rev0_850 = __builtin_shufflevector(__s0_850, __s0_850, 1, 0); \
-  uint32x2_t __rev1_850;  __rev1_850 = __builtin_shufflevector(__s1_850, __s1_850, 1, 0); \
-  uint32x2_t __rev2_850;  __rev2_850 = __builtin_shufflevector(__s2_850, __s2_850, 1, 0); \
-  uint64x2_t __ret_850; \
-  __ret_850 = __rev0_850 - __noswap_vmull_u32(__rev1_850, __noswap_splat_lane_u32(__rev2_850, __p3_850)); \
-  __ret_850 = __builtin_shufflevector(__ret_850, __ret_850, 1, 0); \
-  __ret_850; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u16(__p0_851, __p1_851, __p2_851, __p3_851) __extension__ ({ \
-  uint32x4_t __s0_851 = __p0_851; \
-  uint16x4_t __s1_851 = __p1_851; \
-  uint16x4_t __s2_851 = __p2_851; \
-  uint32x4_t __ret_851; \
-  __ret_851 = __s0_851 - vmull_u16(__s1_851, splat_lane_u16(__s2_851, __p3_851)); \
-  __ret_851; \
-})
-#else
-#define vmlsl_lane_u16(__p0_852, __p1_852, __p2_852, __p3_852) __extension__ ({ \
-  uint32x4_t __s0_852 = __p0_852; \
-  uint16x4_t __s1_852 = __p1_852; \
-  uint16x4_t __s2_852 = __p2_852; \
-  uint32x4_t __rev0_852;  __rev0_852 = __builtin_shufflevector(__s0_852, __s0_852, 3, 2, 1, 0); \
-  uint16x4_t __rev1_852;  __rev1_852 = __builtin_shufflevector(__s1_852, __s1_852, 3, 2, 1, 0); \
-  uint16x4_t __rev2_852;  __rev2_852 = __builtin_shufflevector(__s2_852, __s2_852, 3, 2, 1, 0); \
-  uint32x4_t __ret_852; \
-  __ret_852 = __rev0_852 - __noswap_vmull_u16(__rev1_852, __noswap_splat_lane_u16(__rev2_852, __p3_852)); \
-  __ret_852 = __builtin_shufflevector(__ret_852, __ret_852, 3, 2, 1, 0); \
-  __ret_852; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \
-  int64x2_t __s0_853 = __p0_853; \
-  int32x2_t __s1_853 = __p1_853; \
-  int32x2_t __s2_853 = __p2_853; \
-  int64x2_t __ret_853; \
-  __ret_853 = __s0_853 - vmull_s32(__s1_853, splat_lane_s32(__s2_853, __p3_853)); \
-  __ret_853; \
-})
-#else
-#define vmlsl_lane_s32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \
-  int64x2_t __s0_854 = __p0_854; \
-  int32x2_t __s1_854 = __p1_854; \
-  int32x2_t __s2_854 = __p2_854; \
-  int64x2_t __rev0_854;  __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \
-  int32x2_t __rev1_854;  __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 1, 0); \
-  int32x2_t __rev2_854;  __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 1, 0); \
-  int64x2_t __ret_854; \
-  __ret_854 = __rev0_854 - __noswap_vmull_s32(__rev1_854, __noswap_splat_lane_s32(__rev2_854, __p3_854)); \
-  __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \
-  __ret_854; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s16(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \
-  int32x4_t __s0_855 = __p0_855; \
-  int16x4_t __s1_855 = __p1_855; \
-  int16x4_t __s2_855 = __p2_855; \
-  int32x4_t __ret_855; \
-  __ret_855 = __s0_855 - vmull_s16(__s1_855, splat_lane_s16(__s2_855, __p3_855)); \
-  __ret_855; \
-})
-#else
-#define vmlsl_lane_s16(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \
-  int32x4_t __s0_856 = __p0_856; \
-  int16x4_t __s1_856 = __p1_856; \
-  int16x4_t __s2_856 = __p2_856; \
-  int32x4_t __rev0_856;  __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \
-  int16x4_t __rev1_856;  __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 3, 2, 1, 0); \
-  int16x4_t __rev2_856;  __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 3, 2, 1, 0); \
-  int32x4_t __ret_856; \
-  __ret_856 = __rev0_856 - __noswap_vmull_s16(__rev1_856, __noswap_splat_lane_s16(__rev2_856, __p3_856)); \
-  __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \
-  __ret_856; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_f16(__p0_857, __p1_857, __p2_857) __extension__ ({ \
-  float16_t __s0_857 = __p0_857; \
-  float16x4_t __s1_857 = __p1_857; \
-  float16x4_t __ret_857; \
-float16_t __reint_857 = __s0_857; \
-float16x4_t __reint1_857 = __s1_857; \
-int16x4_t __reint2_857 = vset_lane_s16(*(int16_t *) &__reint_857, *(int16x4_t *) &__reint1_857, __p2_857); \
-  __ret_857 = *(float16x4_t *) &__reint2_857; \
-  __ret_857; \
-})
-#else
-#define vset_lane_f16(__p0_858, __p1_858, __p2_858) __extension__ ({ \
-  float16_t __s0_858 = __p0_858; \
-  float16x4_t __s1_858 = __p1_858; \
-  float16x4_t __rev1_858;  __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 3, 2, 1, 0); \
-  float16x4_t __ret_858; \
-float16_t __reint_858 = __s0_858; \
-float16x4_t __reint1_858 = __rev1_858; \
-int16x4_t __reint2_858 = __noswap_vset_lane_s16(*(int16_t *) &__reint_858, *(int16x4_t *) &__reint1_858, __p2_858); \
-  __ret_858 = *(float16x4_t *) &__reint2_858; \
-  __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 3, 2, 1, 0); \
-  __ret_858; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f16(__p0_859, __p1_859, __p2_859) __extension__ ({ \
-  float16_t __s0_859 = __p0_859; \
-  float16x8_t __s1_859 = __p1_859; \
-  float16x8_t __ret_859; \
-float16_t __reint_859 = __s0_859; \
-float16x8_t __reint1_859 = __s1_859; \
-int16x8_t __reint2_859 = vsetq_lane_s16(*(int16_t *) &__reint_859, *(int16x8_t *) &__reint1_859, __p2_859); \
-  __ret_859 = *(float16x8_t *) &__reint2_859; \
-  __ret_859; \
-})
-#else
-#define vsetq_lane_f16(__p0_860, __p1_860, __p2_860) __extension__ ({ \
-  float16_t __s0_860 = __p0_860; \
-  float16x8_t __s1_860 = __p1_860; \
-  float16x8_t __rev1_860;  __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_860; \
-float16_t __reint_860 = __s0_860; \
-float16x8_t __reint1_860 = __rev1_860; \
-int16x8_t __reint2_860 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_860, *(int16x8_t *) &__reint1_860, __p2_860); \
-  __ret_860 = *(float16x8_t *) &__reint2_860; \
-  __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_860; \
-})
-#endif
-
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-#define vbfmlalbq_lane_f32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \
-  float32x4_t __s0_861 = __p0_861; \
-  bfloat16x8_t __s1_861 = __p1_861; \
-  bfloat16x4_t __s2_861 = __p2_861; \
-  float32x4_t __ret_861; \
-  __ret_861 = vbfmlalbq_f32(__s0_861, __s1_861, (bfloat16x8_t) {vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861)}); \
-  __ret_861; \
-})
-#else
-#define vbfmlalbq_lane_f32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \
-  float32x4_t __s0_862 = __p0_862; \
-  bfloat16x8_t __s1_862 = __p1_862; \
-  bfloat16x4_t __s2_862 = __p2_862; \
-  float32x4_t __rev0_862;  __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_862;  __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_862;  __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 3, 2, 1, 0); \
-  float32x4_t __ret_862; \
-  __ret_862 = __noswap_vbfmlalbq_f32(__rev0_862, __rev1_862, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862)}); \
-  __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 3, 2, 1, 0); \
-  __ret_862; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfmlalbq_laneq_f32(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \
-  float32x4_t __s0_863 = __p0_863; \
-  bfloat16x8_t __s1_863 = __p1_863; \
-  bfloat16x8_t __s2_863 = __p2_863; \
-  float32x4_t __ret_863; \
-  __ret_863 = vbfmlalbq_f32(__s0_863, __s1_863, (bfloat16x8_t) {vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863)}); \
-  __ret_863; \
-})
-#else
-#define vbfmlalbq_laneq_f32(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \
-  float32x4_t __s0_864 = __p0_864; \
-  bfloat16x8_t __s1_864 = __p1_864; \
-  bfloat16x8_t __s2_864 = __p2_864; \
-  float32x4_t __rev0_864;  __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_864;  __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_864;  __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_864; \
-  __ret_864 = __noswap_vbfmlalbq_f32(__rev0_864, __rev1_864, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864)}); \
-  __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \
-  __ret_864; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfmlaltq_lane_f32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \
-  float32x4_t __s0_865 = __p0_865; \
-  bfloat16x8_t __s1_865 = __p1_865; \
-  bfloat16x4_t __s2_865 = __p2_865; \
-  float32x4_t __ret_865; \
-  __ret_865 = vbfmlaltq_f32(__s0_865, __s1_865, (bfloat16x8_t) {vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865)}); \
-  __ret_865; \
-})
-#else
-#define vbfmlaltq_lane_f32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \
-  float32x4_t __s0_866 = __p0_866; \
-  bfloat16x8_t __s1_866 = __p1_866; \
-  bfloat16x4_t __s2_866 = __p2_866; \
-  float32x4_t __rev0_866;  __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_866;  __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_866;  __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 3, 2, 1, 0); \
-  float32x4_t __ret_866; \
-  __ret_866 = __noswap_vbfmlaltq_f32(__rev0_866, __rev1_866, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866)}); \
-  __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 3, 2, 1, 0); \
-  __ret_866; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfmlaltq_laneq_f32(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \
-  float32x4_t __s0_867 = __p0_867; \
-  bfloat16x8_t __s1_867 = __p1_867; \
-  bfloat16x8_t __s2_867 = __p2_867; \
-  float32x4_t __ret_867; \
-  __ret_867 = vbfmlaltq_f32(__s0_867, __s1_867, (bfloat16x8_t) {vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867)}); \
-  __ret_867; \
-})
-#else
-#define vbfmlaltq_laneq_f32(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \
-  float32x4_t __s0_868 = __p0_868; \
-  bfloat16x8_t __s1_868 = __p1_868; \
-  bfloat16x8_t __s2_868 = __p2_868; \
-  float32x4_t __rev0_868;  __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_868;  __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_868;  __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_868; \
-  __ret_868 = __noswap_vbfmlaltq_f32(__rev0_868, __rev1_868, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868)}); \
-  __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \
-  __ret_868; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = vcvt_f32_bf16(vget_high_bf16(__p0));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = vcvt_f32_bf16(vget_low_bf16(__p0));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_high_f16(__p0_869, __p1_869, __p2_869, __p3_869) __extension__ ({ \
-  float32x4_t __s0_869 = __p0_869; \
-  float16x8_t __s1_869 = __p1_869; \
-  float16x4_t __s2_869 = __p2_869; \
-  float32x4_t __ret_869; \
-  __ret_869 = vfmlalq_high_f16(__s0_869, __s1_869, (float16x8_t) {vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869)}); \
-  __ret_869; \
-})
-#else
-#define vfmlalq_lane_high_f16(__p0_870, __p1_870, __p2_870, __p3_870) __extension__ ({ \
-  float32x4_t __s0_870 = __p0_870; \
-  float16x8_t __s1_870 = __p1_870; \
-  float16x4_t __s2_870 = __p2_870; \
-  float32x4_t __rev0_870;  __rev0_870 = __builtin_shufflevector(__s0_870, __s0_870, 3, 2, 1, 0); \
-  float16x8_t __rev1_870;  __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_870;  __rev2_870 = __builtin_shufflevector(__s2_870, __s2_870, 3, 2, 1, 0); \
-  float32x4_t __ret_870; \
-  __ret_870 = __noswap_vfmlalq_high_f16(__rev0_870, __rev1_870, (float16x8_t) {__noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870)}); \
-  __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 3, 2, 1, 0); \
-  __ret_870; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_high_f16(__p0_871, __p1_871, __p2_871, __p3_871) __extension__ ({ \
-  float32x2_t __s0_871 = __p0_871; \
-  float16x4_t __s1_871 = __p1_871; \
-  float16x4_t __s2_871 = __p2_871; \
-  float32x2_t __ret_871; \
-  __ret_871 = vfmlal_high_f16(__s0_871, __s1_871, (float16x4_t) {vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871)}); \
-  __ret_871; \
-})
-#else
-#define vfmlal_lane_high_f16(__p0_872, __p1_872, __p2_872, __p3_872) __extension__ ({ \
-  float32x2_t __s0_872 = __p0_872; \
-  float16x4_t __s1_872 = __p1_872; \
-  float16x4_t __s2_872 = __p2_872; \
-  float32x2_t __rev0_872;  __rev0_872 = __builtin_shufflevector(__s0_872, __s0_872, 1, 0); \
-  float16x4_t __rev1_872;  __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 3, 2, 1, 0); \
-  float16x4_t __rev2_872;  __rev2_872 = __builtin_shufflevector(__s2_872, __s2_872, 3, 2, 1, 0); \
-  float32x2_t __ret_872; \
-  __ret_872 = __noswap_vfmlal_high_f16(__rev0_872, __rev1_872, (float16x4_t) {__noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872)}); \
-  __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 1, 0); \
-  __ret_872; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_low_f16(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \
-  float32x4_t __s0_873 = __p0_873; \
-  float16x8_t __s1_873 = __p1_873; \
-  float16x4_t __s2_873 = __p2_873; \
-  float32x4_t __ret_873; \
-  __ret_873 = vfmlalq_low_f16(__s0_873, __s1_873, (float16x8_t) {vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873)}); \
-  __ret_873; \
-})
-#else
-#define vfmlalq_lane_low_f16(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \
-  float32x4_t __s0_874 = __p0_874; \
-  float16x8_t __s1_874 = __p1_874; \
-  float16x4_t __s2_874 = __p2_874; \
-  float32x4_t __rev0_874;  __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 3, 2, 1, 0); \
-  float16x8_t __rev1_874;  __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_874;  __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 3, 2, 1, 0); \
-  float32x4_t __ret_874; \
-  __ret_874 = __noswap_vfmlalq_low_f16(__rev0_874, __rev1_874, (float16x8_t) {__noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874)}); \
-  __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \
-  __ret_874; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_low_f16(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \
-  float32x2_t __s0_875 = __p0_875; \
-  float16x4_t __s1_875 = __p1_875; \
-  float16x4_t __s2_875 = __p2_875; \
-  float32x2_t __ret_875; \
-  __ret_875 = vfmlal_low_f16(__s0_875, __s1_875, (float16x4_t) {vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875)}); \
-  __ret_875; \
-})
-#else
-#define vfmlal_lane_low_f16(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \
-  float32x2_t __s0_876 = __p0_876; \
-  float16x4_t __s1_876 = __p1_876; \
-  float16x4_t __s2_876 = __p2_876; \
-  float32x2_t __rev0_876;  __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 1, 0); \
-  float16x4_t __rev1_876;  __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 3, 2, 1, 0); \
-  float16x4_t __rev2_876;  __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 3, 2, 1, 0); \
-  float32x2_t __ret_876; \
-  __ret_876 = __noswap_vfmlal_low_f16(__rev0_876, __rev1_876, (float16x4_t) {__noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876)}); \
-  __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 1, 0); \
-  __ret_876; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_high_f16(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \
-  float32x4_t __s0_877 = __p0_877; \
-  float16x8_t __s1_877 = __p1_877; \
-  float16x8_t __s2_877 = __p2_877; \
-  float32x4_t __ret_877; \
-  __ret_877 = vfmlalq_high_f16(__s0_877, __s1_877, (float16x8_t) {vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877)}); \
-  __ret_877; \
-})
-#else
-#define vfmlalq_laneq_high_f16(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \
-  float32x4_t __s0_878 = __p0_878; \
-  float16x8_t __s1_878 = __p1_878; \
-  float16x8_t __s2_878 = __p2_878; \
-  float32x4_t __rev0_878;  __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \
-  float16x8_t __rev1_878;  __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_878;  __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_878; \
-  __ret_878 = __noswap_vfmlalq_high_f16(__rev0_878, __rev1_878, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878)}); \
-  __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \
-  __ret_878; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_high_f16(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \
-  float32x2_t __s0_879 = __p0_879; \
-  float16x4_t __s1_879 = __p1_879; \
-  float16x8_t __s2_879 = __p2_879; \
-  float32x2_t __ret_879; \
-  __ret_879 = vfmlal_high_f16(__s0_879, __s1_879, (float16x4_t) {vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879)}); \
-  __ret_879; \
-})
-#else
-#define vfmlal_laneq_high_f16(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \
-  float32x2_t __s0_880 = __p0_880; \
-  float16x4_t __s1_880 = __p1_880; \
-  float16x8_t __s2_880 = __p2_880; \
-  float32x2_t __rev0_880;  __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 1, 0); \
-  float16x4_t __rev1_880;  __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 3, 2, 1, 0); \
-  float16x8_t __rev2_880;  __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_880; \
-  __ret_880 = __noswap_vfmlal_high_f16(__rev0_880, __rev1_880, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880)}); \
-  __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 1, 0); \
-  __ret_880; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_low_f16(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \
-  float32x4_t __s0_881 = __p0_881; \
-  float16x8_t __s1_881 = __p1_881; \
-  float16x8_t __s2_881 = __p2_881; \
-  float32x4_t __ret_881; \
-  __ret_881 = vfmlalq_low_f16(__s0_881, __s1_881, (float16x8_t) {vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881)}); \
-  __ret_881; \
-})
-#else
-#define vfmlalq_laneq_low_f16(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \
-  float32x4_t __s0_882 = __p0_882; \
-  float16x8_t __s1_882 = __p1_882; \
-  float16x8_t __s2_882 = __p2_882; \
-  float32x4_t __rev0_882;  __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, 3, 2, 1, 0); \
-  float16x8_t __rev1_882;  __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_882;  __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_882; \
-  __ret_882 = __noswap_vfmlalq_low_f16(__rev0_882, __rev1_882, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882)}); \
-  __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \
-  __ret_882; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_low_f16(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \
-  float32x2_t __s0_883 = __p0_883; \
-  float16x4_t __s1_883 = __p1_883; \
-  float16x8_t __s2_883 = __p2_883; \
-  float32x2_t __ret_883; \
-  __ret_883 = vfmlal_low_f16(__s0_883, __s1_883, (float16x4_t) {vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883)}); \
-  __ret_883; \
-})
-#else
-#define vfmlal_laneq_low_f16(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \
-  float32x2_t __s0_884 = __p0_884; \
-  float16x4_t __s1_884 = __p1_884; \
-  float16x8_t __s2_884 = __p2_884; \
-  float32x2_t __rev0_884;  __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, 1, 0); \
-  float16x4_t __rev1_884;  __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 3, 2, 1, 0); \
-  float16x8_t __rev2_884;  __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_884; \
-  __ret_884 = __noswap_vfmlal_low_f16(__rev0_884, __rev1_884, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884)}); \
-  __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 1, 0); \
-  __ret_884; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_high_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \
-  float32x4_t __s0_885 = __p0_885; \
-  float16x8_t __s1_885 = __p1_885; \
-  float16x4_t __s2_885 = __p2_885; \
-  float32x4_t __ret_885; \
-  __ret_885 = vfmlslq_high_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \
-  __ret_885; \
-})
-#else
-#define vfmlslq_lane_high_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \
-  float32x4_t __s0_886 = __p0_886; \
-  float16x8_t __s1_886 = __p1_886; \
-  float16x4_t __s2_886 = __p2_886; \
-  float32x4_t __rev0_886;  __rev0_886 = __builtin_shufflevector(__s0_886, __s0_886, 3, 2, 1, 0); \
-  float16x8_t __rev1_886;  __rev1_886 = __builtin_shufflevector(__s1_886, __s1_886, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_886;  __rev2_886 = __builtin_shufflevector(__s2_886, __s2_886, 3, 2, 1, 0); \
-  float32x4_t __ret_886; \
-  __ret_886 = __noswap_vfmlslq_high_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \
-  __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 3, 2, 1, 0); \
-  __ret_886; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_high_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \
-  float32x2_t __s0_887 = __p0_887; \
-  float16x4_t __s1_887 = __p1_887; \
-  float16x4_t __s2_887 = __p2_887; \
-  float32x2_t __ret_887; \
-  __ret_887 = vfmlsl_high_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \
-  __ret_887; \
-})
-#else
-#define vfmlsl_lane_high_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \
-  float32x2_t __s0_888 = __p0_888; \
-  float16x4_t __s1_888 = __p1_888; \
-  float16x4_t __s2_888 = __p2_888; \
-  float32x2_t __rev0_888;  __rev0_888 = __builtin_shufflevector(__s0_888, __s0_888, 1, 0); \
-  float16x4_t __rev1_888;  __rev1_888 = __builtin_shufflevector(__s1_888, __s1_888, 3, 2, 1, 0); \
-  float16x4_t __rev2_888;  __rev2_888 = __builtin_shufflevector(__s2_888, __s2_888, 3, 2, 1, 0); \
-  float32x2_t __ret_888; \
-  __ret_888 = __noswap_vfmlsl_high_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \
-  __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \
-  __ret_888; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_low_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \
-  float32x4_t __s0_889 = __p0_889; \
-  float16x8_t __s1_889 = __p1_889; \
-  float16x4_t __s2_889 = __p2_889; \
-  float32x4_t __ret_889; \
-  __ret_889 = vfmlslq_low_f16(__s0_889, __s1_889, (float16x8_t) {vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889)}); \
-  __ret_889; \
-})
-#else
-#define vfmlslq_lane_low_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \
-  float32x4_t __s0_890 = __p0_890; \
-  float16x8_t __s1_890 = __p1_890; \
-  float16x4_t __s2_890 = __p2_890; \
-  float32x4_t __rev0_890;  __rev0_890 = __builtin_shufflevector(__s0_890, __s0_890, 3, 2, 1, 0); \
-  float16x8_t __rev1_890;  __rev1_890 = __builtin_shufflevector(__s1_890, __s1_890, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_890;  __rev2_890 = __builtin_shufflevector(__s2_890, __s2_890, 3, 2, 1, 0); \
-  float32x4_t __ret_890; \
-  __ret_890 = __noswap_vfmlslq_low_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890)}); \
-  __ret_890 = __builtin_shufflevector(__ret_890, __ret_890, 3, 2, 1, 0); \
-  __ret_890; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_low_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \
-  float32x2_t __s0_891 = __p0_891; \
-  float16x4_t __s1_891 = __p1_891; \
-  float16x4_t __s2_891 = __p2_891; \
-  float32x2_t __ret_891; \
-  __ret_891 = vfmlsl_low_f16(__s0_891, __s1_891, (float16x4_t) {vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891)}); \
-  __ret_891; \
-})
-#else
-#define vfmlsl_lane_low_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \
-  float32x2_t __s0_892 = __p0_892; \
-  float16x4_t __s1_892 = __p1_892; \
-  float16x4_t __s2_892 = __p2_892; \
-  float32x2_t __rev0_892;  __rev0_892 = __builtin_shufflevector(__s0_892, __s0_892, 1, 0); \
-  float16x4_t __rev1_892;  __rev1_892 = __builtin_shufflevector(__s1_892, __s1_892, 3, 2, 1, 0); \
-  float16x4_t __rev2_892;  __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 3, 2, 1, 0); \
-  float32x2_t __ret_892; \
-  __ret_892 = __noswap_vfmlsl_low_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892)}); \
-  __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \
-  __ret_892; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_high_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \
-  float32x4_t __s0_893 = __p0_893; \
-  float16x8_t __s1_893 = __p1_893; \
-  float16x8_t __s2_893 = __p2_893; \
-  float32x4_t __ret_893; \
-  __ret_893 = vfmlslq_high_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \
-  __ret_893; \
-})
-#else
-#define vfmlslq_laneq_high_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \
-  float32x4_t __s0_894 = __p0_894; \
-  float16x8_t __s1_894 = __p1_894; \
-  float16x8_t __s2_894 = __p2_894; \
-  float32x4_t __rev0_894;  __rev0_894 = __builtin_shufflevector(__s0_894, __s0_894, 3, 2, 1, 0); \
-  float16x8_t __rev1_894;  __rev1_894 = __builtin_shufflevector(__s1_894, __s1_894, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_894;  __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_894; \
-  __ret_894 = __noswap_vfmlslq_high_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \
-  __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 3, 2, 1, 0); \
-  __ret_894; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_high_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \
-  float32x2_t __s0_895 = __p0_895; \
-  float16x4_t __s1_895 = __p1_895; \
-  float16x8_t __s2_895 = __p2_895; \
-  float32x2_t __ret_895; \
-  __ret_895 = vfmlsl_high_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \
-  __ret_895; \
-})
-#else
-#define vfmlsl_laneq_high_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \
-  float32x2_t __s0_896 = __p0_896; \
-  float16x4_t __s1_896 = __p1_896; \
-  float16x8_t __s2_896 = __p2_896; \
-  float32x2_t __rev0_896;  __rev0_896 = __builtin_shufflevector(__s0_896, __s0_896, 1, 0); \
-  float16x4_t __rev1_896;  __rev1_896 = __builtin_shufflevector(__s1_896, __s1_896, 3, 2, 1, 0); \
-  float16x8_t __rev2_896;  __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_896; \
-  __ret_896 = __noswap_vfmlsl_high_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \
-  __ret_896 = __builtin_shufflevector(__ret_896, __ret_896, 1, 0); \
-  __ret_896; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_low_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \
-  float32x4_t __s0_897 = __p0_897; \
-  float16x8_t __s1_897 = __p1_897; \
-  float16x8_t __s2_897 = __p2_897; \
-  float32x4_t __ret_897; \
-  __ret_897 = vfmlslq_low_f16(__s0_897, __s1_897, (float16x8_t) {vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897)}); \
-  __ret_897; \
-})
-#else
-#define vfmlslq_laneq_low_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \
-  float32x4_t __s0_898 = __p0_898; \
-  float16x8_t __s1_898 = __p1_898; \
-  float16x8_t __s2_898 = __p2_898; \
-  float32x4_t __rev0_898;  __rev0_898 = __builtin_shufflevector(__s0_898, __s0_898, 3, 2, 1, 0); \
-  float16x8_t __rev1_898;  __rev1_898 = __builtin_shufflevector(__s1_898, __s1_898, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_898;  __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_898; \
-  __ret_898 = __noswap_vfmlslq_low_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898)}); \
-  __ret_898 = __builtin_shufflevector(__ret_898, __ret_898, 3, 2, 1, 0); \
-  __ret_898; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_low_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \
-  float32x2_t __s0_899 = __p0_899; \
-  float16x4_t __s1_899 = __p1_899; \
-  float16x8_t __s2_899 = __p2_899; \
-  float32x2_t __ret_899; \
-  __ret_899 = vfmlsl_low_f16(__s0_899, __s1_899, (float16x4_t) {vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899)}); \
-  __ret_899; \
-})
-#else
-#define vfmlsl_laneq_low_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \
-  float32x2_t __s0_900 = __p0_900; \
-  float16x4_t __s1_900 = __p1_900; \
-  float16x8_t __s2_900 = __p2_900; \
-  float32x2_t __rev0_900;  __rev0_900 = __builtin_shufflevector(__s0_900, __s0_900, 1, 0); \
-  float16x4_t __rev1_900;  __rev1_900 = __builtin_shufflevector(__s1_900, __s1_900, 3, 2, 1, 0); \
-  float16x8_t __rev2_900;  __rev2_900 = __builtin_shufflevector(__s2_900, __s2_900, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_900; \
-  __ret_900 = __noswap_vfmlsl_low_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900)}); \
-  __ret_900 = __builtin_shufflevector(__ret_900, __ret_900, 1, 0); \
-  __ret_900; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vmulh_lane_f16(__p0_901, __p1_901, __p2_901) __extension__ ({ \
-  float16_t __s0_901 = __p0_901; \
-  float16x4_t __s1_901 = __p1_901; \
-  float16_t __ret_901; \
-  __ret_901 = __s0_901 * vget_lane_f16(__s1_901, __p2_901); \
-  __ret_901; \
-})
-#else
-#define vmulh_lane_f16(__p0_902, __p1_902, __p2_902) __extension__ ({ \
-  float16_t __s0_902 = __p0_902; \
-  float16x4_t __s1_902 = __p1_902; \
-  float16x4_t __rev1_902;  __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 3, 2, 1, 0); \
-  float16_t __ret_902; \
-  __ret_902 = __s0_902 * __noswap_vget_lane_f16(__rev1_902, __p2_902); \
-  __ret_902; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulh_laneq_f16(__p0_903, __p1_903, __p2_903) __extension__ ({ \
-  float16_t __s0_903 = __p0_903; \
-  float16x8_t __s1_903 = __p1_903; \
-  float16_t __ret_903; \
-  __ret_903 = __s0_903 * vgetq_lane_f16(__s1_903, __p2_903); \
-  __ret_903; \
-})
-#else
-#define vmulh_laneq_f16(__p0_904, __p1_904, __p2_904) __extension__ ({ \
-  float16_t __s0_904 = __p0_904; \
-  float16x8_t __s1_904 = __p1_904; \
-  float16x8_t __rev1_904;  __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_904; \
-  __ret_904 = __s0_904 * __noswap_vgetq_lane_f16(__rev1_904, __p2_904); \
-  __ret_904; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_MATMUL_INT8)
-#ifdef __LITTLE_ENDIAN__
-#define vsudotq_lane_s32(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \
-  int32x4_t __s0_905 = __p0_905; \
-  int8x16_t __s1_905 = __p1_905; \
-  uint8x8_t __s2_905 = __p2_905; \
-  int32x4_t __ret_905; \
-uint8x8_t __reint_905 = __s2_905; \
-  __ret_905 = vusdotq_s32(__s0_905, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_905, __p3_905)), __s1_905); \
-  __ret_905; \
-})
-#else
-#define vsudotq_lane_s32(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \
-  int32x4_t __s0_906 = __p0_906; \
-  int8x16_t __s1_906 = __p1_906; \
-  uint8x8_t __s2_906 = __p2_906; \
-  int32x4_t __rev0_906;  __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \
-  int8x16_t __rev1_906;  __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_906;  __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_906; \
-uint8x8_t __reint_906 = __rev2_906; \
-  __ret_906 = __noswap_vusdotq_s32(__rev0_906, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_906, __p3_906)), __rev1_906); \
-  __ret_906 = __builtin_shufflevector(__ret_906, __ret_906, 3, 2, 1, 0); \
-  __ret_906; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsudot_lane_s32(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \
-  int32x2_t __s0_907 = __p0_907; \
-  int8x8_t __s1_907 = __p1_907; \
-  uint8x8_t __s2_907 = __p2_907; \
-  int32x2_t __ret_907; \
-uint8x8_t __reint_907 = __s2_907; \
-  __ret_907 = vusdot_s32(__s0_907, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_907, __p3_907)), __s1_907); \
-  __ret_907; \
-})
-#else
-#define vsudot_lane_s32(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \
-  int32x2_t __s0_908 = __p0_908; \
-  int8x8_t __s1_908 = __p1_908; \
-  uint8x8_t __s2_908 = __p2_908; \
-  int32x2_t __rev0_908;  __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \
-  int8x8_t __rev1_908;  __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_908;  __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_908; \
-uint8x8_t __reint_908 = __rev2_908; \
-  __ret_908 = __noswap_vusdot_s32(__rev0_908, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_908, __p3_908)), __rev1_908); \
-  __ret_908 = __builtin_shufflevector(__ret_908, __ret_908, 1, 0); \
-  __ret_908; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_lane_s32(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \
-  int32_t __s0_909 = __p0_909; \
-  int32_t __s1_909 = __p1_909; \
-  int32x2_t __s2_909 = __p2_909; \
-  int32_t __ret_909; \
-  __ret_909 = vqadds_s32(__s0_909, vqrdmulhs_s32(__s1_909, vget_lane_s32(__s2_909, __p3_909))); \
-  __ret_909; \
-})
-#else
-#define vqrdmlahs_lane_s32(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \
-  int32_t __s0_910 = __p0_910; \
-  int32_t __s1_910 = __p1_910; \
-  int32x2_t __s2_910 = __p2_910; \
-  int32x2_t __rev2_910;  __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 1, 0); \
-  int32_t __ret_910; \
-  __ret_910 = vqadds_s32(__s0_910, vqrdmulhs_s32(__s1_910, __noswap_vget_lane_s32(__rev2_910, __p3_910))); \
-  __ret_910; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_lane_s16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \
-  int16_t __s0_911 = __p0_911; \
-  int16_t __s1_911 = __p1_911; \
-  int16x4_t __s2_911 = __p2_911; \
-  int16_t __ret_911; \
-  __ret_911 = vqaddh_s16(__s0_911, vqrdmulhh_s16(__s1_911, vget_lane_s16(__s2_911, __p3_911))); \
-  __ret_911; \
-})
-#else
-#define vqrdmlahh_lane_s16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \
-  int16_t __s0_912 = __p0_912; \
-  int16_t __s1_912 = __p1_912; \
-  int16x4_t __s2_912 = __p2_912; \
-  int16x4_t __rev2_912;  __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 3, 2, 1, 0); \
-  int16_t __ret_912; \
-  __ret_912 = vqaddh_s16(__s0_912, vqrdmulhh_s16(__s1_912, __noswap_vget_lane_s16(__rev2_912, __p3_912))); \
-  __ret_912; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_laneq_s32(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \
-  int32_t __s0_913 = __p0_913; \
-  int32_t __s1_913 = __p1_913; \
-  int32x4_t __s2_913 = __p2_913; \
-  int32_t __ret_913; \
-  __ret_913 = vqadds_s32(__s0_913, vqrdmulhs_s32(__s1_913, vgetq_lane_s32(__s2_913, __p3_913))); \
-  __ret_913; \
-})
-#else
-#define vqrdmlahs_laneq_s32(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \
-  int32_t __s0_914 = __p0_914; \
-  int32_t __s1_914 = __p1_914; \
-  int32x4_t __s2_914 = __p2_914; \
-  int32x4_t __rev2_914;  __rev2_914 = __builtin_shufflevector(__s2_914, __s2_914, 3, 2, 1, 0); \
-  int32_t __ret_914; \
-  __ret_914 = vqadds_s32(__s0_914, vqrdmulhs_s32(__s1_914, __noswap_vgetq_lane_s32(__rev2_914, __p3_914))); \
-  __ret_914; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_laneq_s16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \
-  int16_t __s0_915 = __p0_915; \
-  int16_t __s1_915 = __p1_915; \
-  int16x8_t __s2_915 = __p2_915; \
-  int16_t __ret_915; \
-  __ret_915 = vqaddh_s16(__s0_915, vqrdmulhh_s16(__s1_915, vgetq_lane_s16(__s2_915, __p3_915))); \
-  __ret_915; \
-})
-#else
-#define vqrdmlahh_laneq_s16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \
-  int16_t __s0_916 = __p0_916; \
-  int16_t __s1_916 = __p1_916; \
-  int16x8_t __s2_916 = __p2_916; \
-  int16x8_t __rev2_916;  __rev2_916 = __builtin_shufflevector(__s2_916, __s2_916, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_916; \
-  __ret_916 = vqaddh_s16(__s0_916, vqrdmulhh_s16(__s1_916, __noswap_vgetq_lane_s16(__rev2_916, __p3_916))); \
-  __ret_916; \
-})
-#endif
-
-__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_lane_s32(__p0_917, __p1_917, __p2_917, __p3_917) __extension__ ({ \
-  int32_t __s0_917 = __p0_917; \
-  int32_t __s1_917 = __p1_917; \
-  int32x2_t __s2_917 = __p2_917; \
-  int32_t __ret_917; \
-  __ret_917 = vqsubs_s32(__s0_917, vqrdmulhs_s32(__s1_917, vget_lane_s32(__s2_917, __p3_917))); \
-  __ret_917; \
-})
-#else
-#define vqrdmlshs_lane_s32(__p0_918, __p1_918, __p2_918, __p3_918) __extension__ ({ \
-  int32_t __s0_918 = __p0_918; \
-  int32_t __s1_918 = __p1_918; \
-  int32x2_t __s2_918 = __p2_918; \
-  int32x2_t __rev2_918;  __rev2_918 = __builtin_shufflevector(__s2_918, __s2_918, 1, 0); \
-  int32_t __ret_918; \
-  __ret_918 = vqsubs_s32(__s0_918, vqrdmulhs_s32(__s1_918, __noswap_vget_lane_s32(__rev2_918, __p3_918))); \
-  __ret_918; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_lane_s16(__p0_919, __p1_919, __p2_919, __p3_919) __extension__ ({ \
-  int16_t __s0_919 = __p0_919; \
-  int16_t __s1_919 = __p1_919; \
-  int16x4_t __s2_919 = __p2_919; \
-  int16_t __ret_919; \
-  __ret_919 = vqsubh_s16(__s0_919, vqrdmulhh_s16(__s1_919, vget_lane_s16(__s2_919, __p3_919))); \
-  __ret_919; \
-})
-#else
-#define vqrdmlshh_lane_s16(__p0_920, __p1_920, __p2_920, __p3_920) __extension__ ({ \
-  int16_t __s0_920 = __p0_920; \
-  int16_t __s1_920 = __p1_920; \
-  int16x4_t __s2_920 = __p2_920; \
-  int16x4_t __rev2_920;  __rev2_920 = __builtin_shufflevector(__s2_920, __s2_920, 3, 2, 1, 0); \
-  int16_t __ret_920; \
-  __ret_920 = vqsubh_s16(__s0_920, vqrdmulhh_s16(__s1_920, __noswap_vget_lane_s16(__rev2_920, __p3_920))); \
-  __ret_920; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_laneq_s32(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \
-  int32_t __s0_921 = __p0_921; \
-  int32_t __s1_921 = __p1_921; \
-  int32x4_t __s2_921 = __p2_921; \
-  int32_t __ret_921; \
-  __ret_921 = vqsubs_s32(__s0_921, vqrdmulhs_s32(__s1_921, vgetq_lane_s32(__s2_921, __p3_921))); \
-  __ret_921; \
-})
-#else
-#define vqrdmlshs_laneq_s32(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \
-  int32_t __s0_922 = __p0_922; \
-  int32_t __s1_922 = __p1_922; \
-  int32x4_t __s2_922 = __p2_922; \
-  int32x4_t __rev2_922;  __rev2_922 = __builtin_shufflevector(__s2_922, __s2_922, 3, 2, 1, 0); \
-  int32_t __ret_922; \
-  __ret_922 = vqsubs_s32(__s0_922, vqrdmulhs_s32(__s1_922, __noswap_vgetq_lane_s32(__rev2_922, __p3_922))); \
-  __ret_922; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_laneq_s16(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \
-  int16_t __s0_923 = __p0_923; \
-  int16_t __s1_923 = __p1_923; \
-  int16x8_t __s2_923 = __p2_923; \
-  int16_t __ret_923; \
-  __ret_923 = vqsubh_s16(__s0_923, vqrdmulhh_s16(__s1_923, vgetq_lane_s16(__s2_923, __p3_923))); \
-  __ret_923; \
-})
-#else
-#define vqrdmlshh_laneq_s16(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \
-  int16_t __s0_924 = __p0_924; \
-  int16_t __s1_924 = __p1_924; \
-  int16x8_t __s2_924 = __p2_924; \
-  int16x8_t __rev2_924;  __rev2_924 = __builtin_shufflevector(__s2_924, __s2_924, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_924; \
-  __ret_924 = vqsubh_s16(__s0_924, vqrdmulhh_s16(__s1_924, __noswap_vgetq_lane_s16(__rev2_924, __p3_924))); \
-  __ret_924; \
-})
-#endif
-
-#endif
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 + vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 + vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 + vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 + vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p64(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \
-  poly64x2_t __s0_925 = __p0_925; \
-  poly64x1_t __s2_925 = __p2_925; \
-  poly64x2_t __ret_925; \
-  __ret_925 = vsetq_lane_p64(vget_lane_p64(__s2_925, __p3_925), __s0_925, __p1_925); \
-  __ret_925; \
-})
-#else
-#define vcopyq_lane_p64(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \
-  poly64x2_t __s0_926 = __p0_926; \
-  poly64x1_t __s2_926 = __p2_926; \
-  poly64x2_t __rev0_926;  __rev0_926 = __builtin_shufflevector(__s0_926, __s0_926, 1, 0); \
-  poly64x2_t __ret_926; \
-  __ret_926 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_926, __p3_926), __rev0_926, __p1_926); \
-  __ret_926 = __builtin_shufflevector(__ret_926, __ret_926, 1, 0); \
-  __ret_926; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f64(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \
-  float64x2_t __s0_927 = __p0_927; \
-  float64x1_t __s2_927 = __p2_927; \
-  float64x2_t __ret_927; \
-  __ret_927 = vsetq_lane_f64(vget_lane_f64(__s2_927, __p3_927), __s0_927, __p1_927); \
-  __ret_927; \
-})
-#else
-#define vcopyq_lane_f64(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \
-  float64x2_t __s0_928 = __p0_928; \
-  float64x1_t __s2_928 = __p2_928; \
-  float64x2_t __rev0_928;  __rev0_928 = __builtin_shufflevector(__s0_928, __s0_928, 1, 0); \
-  float64x2_t __ret_928; \
-  __ret_928 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_928, __p3_928), __rev0_928, __p1_928); \
-  __ret_928 = __builtin_shufflevector(__ret_928, __ret_928, 1, 0); \
-  __ret_928; \
-})
-#endif
-
-#define vcopy_lane_p64(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \
-  poly64x1_t __s0_929 = __p0_929; \
-  poly64x1_t __s2_929 = __p2_929; \
-  poly64x1_t __ret_929; \
-  __ret_929 = vset_lane_p64(vget_lane_p64(__s2_929, __p3_929), __s0_929, __p1_929); \
-  __ret_929; \
-})
-#define vcopy_lane_f64(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \
-  float64x1_t __s0_930 = __p0_930; \
-  float64x1_t __s2_930 = __p2_930; \
-  float64x1_t __ret_930; \
-  __ret_930 = vset_lane_f64(vget_lane_f64(__s2_930, __p3_930), __s0_930, __p1_930); \
-  __ret_930; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p64(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \
-  poly64x2_t __s0_931 = __p0_931; \
-  poly64x2_t __s2_931 = __p2_931; \
-  poly64x2_t __ret_931; \
-  __ret_931 = vsetq_lane_p64(vgetq_lane_p64(__s2_931, __p3_931), __s0_931, __p1_931); \
-  __ret_931; \
-})
-#else
-#define vcopyq_laneq_p64(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \
-  poly64x2_t __s0_932 = __p0_932; \
-  poly64x2_t __s2_932 = __p2_932; \
-  poly64x2_t __rev0_932;  __rev0_932 = __builtin_shufflevector(__s0_932, __s0_932, 1, 0); \
-  poly64x2_t __rev2_932;  __rev2_932 = __builtin_shufflevector(__s2_932, __s2_932, 1, 0); \
-  poly64x2_t __ret_932; \
-  __ret_932 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_932, __p3_932), __rev0_932, __p1_932); \
-  __ret_932 = __builtin_shufflevector(__ret_932, __ret_932, 1, 0); \
-  __ret_932; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f64(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \
-  float64x2_t __s0_933 = __p0_933; \
-  float64x2_t __s2_933 = __p2_933; \
-  float64x2_t __ret_933; \
-  __ret_933 = vsetq_lane_f64(vgetq_lane_f64(__s2_933, __p3_933), __s0_933, __p1_933); \
-  __ret_933; \
-})
-#else
-#define vcopyq_laneq_f64(__p0_934, __p1_934, __p2_934, __p3_934) __extension__ ({ \
-  float64x2_t __s0_934 = __p0_934; \
-  float64x2_t __s2_934 = __p2_934; \
-  float64x2_t __rev0_934;  __rev0_934 = __builtin_shufflevector(__s0_934, __s0_934, 1, 0); \
-  float64x2_t __rev2_934;  __rev2_934 = __builtin_shufflevector(__s2_934, __s2_934, 1, 0); \
-  float64x2_t __ret_934; \
-  __ret_934 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_934, __p3_934), __rev0_934, __p1_934); \
-  __ret_934 = __builtin_shufflevector(__ret_934, __ret_934, 1, 0); \
-  __ret_934; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p64(__p0_935, __p1_935, __p2_935, __p3_935) __extension__ ({ \
-  poly64x1_t __s0_935 = __p0_935; \
-  poly64x2_t __s2_935 = __p2_935; \
-  poly64x1_t __ret_935; \
-  __ret_935 = vset_lane_p64(vgetq_lane_p64(__s2_935, __p3_935), __s0_935, __p1_935); \
-  __ret_935; \
-})
-#else
-#define vcopy_laneq_p64(__p0_936, __p1_936, __p2_936, __p3_936) __extension__ ({ \
-  poly64x1_t __s0_936 = __p0_936; \
-  poly64x2_t __s2_936 = __p2_936; \
-  poly64x2_t __rev2_936;  __rev2_936 = __builtin_shufflevector(__s2_936, __s2_936, 1, 0); \
-  poly64x1_t __ret_936; \
-  __ret_936 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_936, __p3_936), __s0_936, __p1_936); \
-  __ret_936; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f64(__p0_937, __p1_937, __p2_937, __p3_937) __extension__ ({ \
-  float64x1_t __s0_937 = __p0_937; \
-  float64x2_t __s2_937 = __p2_937; \
-  float64x1_t __ret_937; \
-  __ret_937 = vset_lane_f64(vgetq_lane_f64(__s2_937, __p3_937), __s0_937, __p1_937); \
-  __ret_937; \
-})
-#else
-#define vcopy_laneq_f64(__p0_938, __p1_938, __p2_938, __p3_938) __extension__ ({ \
-  float64x1_t __s0_938 = __p0_938; \
-  float64x2_t __s2_938 = __p2_938; \
-  float64x2_t __rev2_938;  __rev2_938 = __builtin_shufflevector(__s2_938, __s2_938, 1, 0); \
-  float64x1_t __ret_938; \
-  __ret_938 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_938, __p3_938), __s0_938, __p1_938); \
-  __ret_938; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __ret;
-  __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __ret;
-  __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __ret;
-  __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __ret;
-  __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vmulx_lane_f64(__p0_939, __p1_939, __p2_939) __extension__ ({ \
-  float64x1_t __s0_939 = __p0_939; \
-  float64x1_t __s1_939 = __p1_939; \
-  float64x1_t __ret_939; \
-  float64_t __x_939 = vget_lane_f64(__s0_939, 0); \
-  float64_t __y_939 = vget_lane_f64(__s1_939, __p2_939); \
-  float64_t __z_939 = vmulxd_f64(__x_939, __y_939); \
-  __ret_939 = vset_lane_f64(__z_939, __s0_939, __p2_939); \
-  __ret_939; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f64(__p0_940, __p1_940, __p2_940) __extension__ ({ \
-  float64x1_t __s0_940 = __p0_940; \
-  float64x2_t __s1_940 = __p1_940; \
-  float64x1_t __ret_940; \
-  float64_t __x_940 = vget_lane_f64(__s0_940, 0); \
-  float64_t __y_940 = vgetq_lane_f64(__s1_940, __p2_940); \
-  float64_t __z_940 = vmulxd_f64(__x_940, __y_940); \
-  __ret_940 = vset_lane_f64(__z_940, __s0_940, 0); \
-  __ret_940; \
-})
-#else
-#define vmulx_laneq_f64(__p0_941, __p1_941, __p2_941) __extension__ ({ \
-  float64x1_t __s0_941 = __p0_941; \
-  float64x2_t __s1_941 = __p1_941; \
-  float64x2_t __rev1_941;  __rev1_941 = __builtin_shufflevector(__s1_941, __s1_941, 1, 0); \
-  float64x1_t __ret_941; \
-  float64_t __x_941 = vget_lane_f64(__s0_941, 0); \
-  float64_t __y_941 = __noswap_vgetq_lane_f64(__rev1_941, __p2_941); \
-  float64_t __z_941 = vmulxd_f64(__x_941, __y_941); \
-  __ret_941 = vset_lane_f64(__z_941, __s0_941, 0); \
-  __ret_941; \
-})
-#endif
-
-#endif
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + vabdl_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __noswap_vabdl_u8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + vabdl_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + __noswap_vabdl_u32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vabdl_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __noswap_vabdl_u16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + vabdl_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __noswap_vabdl_s8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + vabdl_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + __noswap_vabdl_s32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vabdl_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __noswap_vabdl_s16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __ret;
-  __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __ret;
-  __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
-  return __ret;
-}
-#else
-__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __ret;
-  __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __ret;
-  __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-
-#undef __ai
-
-#endif /* if !defined(__ARM_NEON) */
-#endif /* ifndef __ARM_FP */
-#endif /* __ARM_NEON_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_sve.h b/linux-x86/lib64/clang/14.0.2/include/arm_sve.h
deleted file mode 100644
index 909634a..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/arm_sve.h
+++ /dev/null
@@ -1,24043 +0,0 @@
-/*===---- arm_sve.h - ARM SVE intrinsics -----------------------------------===
- *
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __ARM_SVE_H
-#define __ARM_SVE_H
-
-#if !defined(__ARM_FEATURE_SVE)
-#error "SVE support not enabled"
-#else
-
-#if !defined(__LITTLE_ENDIAN__)
-#error "Big endian is currently not supported for arm_sve.h"
-#endif
-#include <stdint.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#else
-#include <stdbool.h>
-#endif
-
-typedef __fp16 float16_t;
-typedef float float32_t;
-typedef double float64_t;
-typedef __SVInt8_t svint8_t;
-typedef __SVInt16_t svint16_t;
-typedef __SVInt32_t svint32_t;
-typedef __SVInt64_t svint64_t;
-typedef __SVUint8_t svuint8_t;
-typedef __SVUint16_t svuint16_t;
-typedef __SVUint32_t svuint32_t;
-typedef __SVUint64_t svuint64_t;
-typedef __SVFloat16_t svfloat16_t;
-
-#if defined(__ARM_FEATURE_SVE_BF16) && !defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)
-#error "__ARM_FEATURE_BF16_SCALAR_ARITHMETIC must be defined when __ARM_FEATURE_SVE_BF16 is defined"
-#endif
-
-#if defined(__ARM_FEATURE_SVE_BF16)
-typedef __SVBFloat16_t svbfloat16_t;
-#endif
-
-#if defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)
-#include <arm_bf16.h>
-typedef __bf16 bfloat16_t;
-#endif
-
-typedef __SVFloat32_t svfloat32_t;
-typedef __SVFloat64_t svfloat64_t;
-typedef __clang_svint8x2_t svint8x2_t;
-typedef __clang_svint16x2_t svint16x2_t;
-typedef __clang_svint32x2_t svint32x2_t;
-typedef __clang_svint64x2_t svint64x2_t;
-typedef __clang_svuint8x2_t svuint8x2_t;
-typedef __clang_svuint16x2_t svuint16x2_t;
-typedef __clang_svuint32x2_t svuint32x2_t;
-typedef __clang_svuint64x2_t svuint64x2_t;
-typedef __clang_svfloat16x2_t svfloat16x2_t;
-typedef __clang_svfloat32x2_t svfloat32x2_t;
-typedef __clang_svfloat64x2_t svfloat64x2_t;
-typedef __clang_svint8x3_t svint8x3_t;
-typedef __clang_svint16x3_t svint16x3_t;
-typedef __clang_svint32x3_t svint32x3_t;
-typedef __clang_svint64x3_t svint64x3_t;
-typedef __clang_svuint8x3_t svuint8x3_t;
-typedef __clang_svuint16x3_t svuint16x3_t;
-typedef __clang_svuint32x3_t svuint32x3_t;
-typedef __clang_svuint64x3_t svuint64x3_t;
-typedef __clang_svfloat16x3_t svfloat16x3_t;
-typedef __clang_svfloat32x3_t svfloat32x3_t;
-typedef __clang_svfloat64x3_t svfloat64x3_t;
-typedef __clang_svint8x4_t svint8x4_t;
-typedef __clang_svint16x4_t svint16x4_t;
-typedef __clang_svint32x4_t svint32x4_t;
-typedef __clang_svint64x4_t svint64x4_t;
-typedef __clang_svuint8x4_t svuint8x4_t;
-typedef __clang_svuint16x4_t svuint16x4_t;
-typedef __clang_svuint32x4_t svuint32x4_t;
-typedef __clang_svuint64x4_t svuint64x4_t;
-typedef __clang_svfloat16x4_t svfloat16x4_t;
-typedef __clang_svfloat32x4_t svfloat32x4_t;
-typedef __clang_svfloat64x4_t svfloat64x4_t;
-typedef __SVBool_t  svbool_t;
-
-#ifdef __ARM_FEATURE_SVE_BF16
-typedef __clang_svbfloat16x2_t svbfloat16x2_t;
-typedef __clang_svbfloat16x3_t svbfloat16x3_t;
-typedef __clang_svbfloat16x4_t svbfloat16x4_t;
-#endif
-enum svpattern
-{
-  SV_POW2 = 0,
-  SV_VL1 = 1,
-  SV_VL2 = 2,
-  SV_VL3 = 3,
-  SV_VL4 = 4,
-  SV_VL5 = 5,
-  SV_VL6 = 6,
-  SV_VL7 = 7,
-  SV_VL8 = 8,
-  SV_VL16 = 9,
-  SV_VL32 = 10,
-  SV_VL64 = 11,
-  SV_VL128 = 12,
-  SV_VL256 = 13,
-  SV_MUL4 = 29,
-  SV_MUL3 = 30,
-  SV_ALL = 31
-};
-
-enum svprfop
-{
-  SV_PLDL1KEEP = 0,
-  SV_PLDL1STRM = 1,
-  SV_PLDL2KEEP = 2,
-  SV_PLDL2STRM = 3,
-  SV_PLDL3KEEP = 4,
-  SV_PLDL3STRM = 5,
-  SV_PSTL1KEEP = 8,
-  SV_PSTL1STRM = 9,
-  SV_PSTL2KEEP = 10,
-  SV_PSTL2STRM = 11,
-  SV_PSTL3KEEP = 12,
-  SV_PSTL3STRM = 13
-};
-
-/* Function attributes */
-#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
-
-#define __aio static __inline__ __attribute__((__always_inline__, __nodebug__, __overloadable__))
-
-#define svreinterpret_s8_s8(...) __builtin_sve_reinterpret_s8_s8(__VA_ARGS__)
-#define svreinterpret_s8_s16(...) __builtin_sve_reinterpret_s8_s16(__VA_ARGS__)
-#define svreinterpret_s8_s32(...) __builtin_sve_reinterpret_s8_s32(__VA_ARGS__)
-#define svreinterpret_s8_s64(...) __builtin_sve_reinterpret_s8_s64(__VA_ARGS__)
-#define svreinterpret_s8_u8(...) __builtin_sve_reinterpret_s8_u8(__VA_ARGS__)
-#define svreinterpret_s8_u16(...) __builtin_sve_reinterpret_s8_u16(__VA_ARGS__)
-#define svreinterpret_s8_u32(...) __builtin_sve_reinterpret_s8_u32(__VA_ARGS__)
-#define svreinterpret_s8_u64(...) __builtin_sve_reinterpret_s8_u64(__VA_ARGS__)
-#define svreinterpret_s8_f16(...) __builtin_sve_reinterpret_s8_f16(__VA_ARGS__)
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_s8_bf16(...) __builtin_sve_reinterpret_s8_bf16(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#define svreinterpret_s8_f32(...) __builtin_sve_reinterpret_s8_f32(__VA_ARGS__)
-#define svreinterpret_s8_f64(...) __builtin_sve_reinterpret_s8_f64(__VA_ARGS__)
-#define svreinterpret_s16_s8(...) __builtin_sve_reinterpret_s16_s8(__VA_ARGS__)
-#define svreinterpret_s16_s16(...) __builtin_sve_reinterpret_s16_s16(__VA_ARGS__)
-#define svreinterpret_s16_s32(...) __builtin_sve_reinterpret_s16_s32(__VA_ARGS__)
-#define svreinterpret_s16_s64(...) __builtin_sve_reinterpret_s16_s64(__VA_ARGS__)
-#define svreinterpret_s16_u8(...) __builtin_sve_reinterpret_s16_u8(__VA_ARGS__)
-#define svreinterpret_s16_u16(...) __builtin_sve_reinterpret_s16_u16(__VA_ARGS__)
-#define svreinterpret_s16_u32(...) __builtin_sve_reinterpret_s16_u32(__VA_ARGS__)
-#define svreinterpret_s16_u64(...) __builtin_sve_reinterpret_s16_u64(__VA_ARGS__)
-#define svreinterpret_s16_f16(...) __builtin_sve_reinterpret_s16_f16(__VA_ARGS__)
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_s16_bf16(...) __builtin_sve_reinterpret_s16_bf16(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#define svreinterpret_s16_f32(...) __builtin_sve_reinterpret_s16_f32(__VA_ARGS__)
-#define svreinterpret_s16_f64(...) __builtin_sve_reinterpret_s16_f64(__VA_ARGS__)
-#define svreinterpret_s32_s8(...) __builtin_sve_reinterpret_s32_s8(__VA_ARGS__)
-#define svreinterpret_s32_s16(...) __builtin_sve_reinterpret_s32_s16(__VA_ARGS__)
-#define svreinterpret_s32_s32(...) __builtin_sve_reinterpret_s32_s32(__VA_ARGS__)
-#define svreinterpret_s32_s64(...) __builtin_sve_reinterpret_s32_s64(__VA_ARGS__)
-#define svreinterpret_s32_u8(...) __builtin_sve_reinterpret_s32_u8(__VA_ARGS__)
-#define svreinterpret_s32_u16(...) __builtin_sve_reinterpret_s32_u16(__VA_ARGS__)
-#define svreinterpret_s32_u32(...) __builtin_sve_reinterpret_s32_u32(__VA_ARGS__)
-#define svreinterpret_s32_u64(...) __builtin_sve_reinterpret_s32_u64(__VA_ARGS__)
-#define svreinterpret_s32_f16(...) __builtin_sve_reinterpret_s32_f16(__VA_ARGS__)
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_s32_bf16(...) __builtin_sve_reinterpret_s32_bf16(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#define svreinterpret_s32_f32(...) __builtin_sve_reinterpret_s32_f32(__VA_ARGS__)
-#define svreinterpret_s32_f64(...) __builtin_sve_reinterpret_s32_f64(__VA_ARGS__)
-#define svreinterpret_s64_s8(...) __builtin_sve_reinterpret_s64_s8(__VA_ARGS__)
-#define svreinterpret_s64_s16(...) __builtin_sve_reinterpret_s64_s16(__VA_ARGS__)
-#define svreinterpret_s64_s32(...) __builtin_sve_reinterpret_s64_s32(__VA_ARGS__)
-#define svreinterpret_s64_s64(...) __builtin_sve_reinterpret_s64_s64(__VA_ARGS__)
-#define svreinterpret_s64_u8(...) __builtin_sve_reinterpret_s64_u8(__VA_ARGS__)
-#define svreinterpret_s64_u16(...) __builtin_sve_reinterpret_s64_u16(__VA_ARGS__)
-#define svreinterpret_s64_u32(...) __builtin_sve_reinterpret_s64_u32(__VA_ARGS__)
-#define svreinterpret_s64_u64(...) __builtin_sve_reinterpret_s64_u64(__VA_ARGS__)
-#define svreinterpret_s64_f16(...) __builtin_sve_reinterpret_s64_f16(__VA_ARGS__)
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_s64_bf16(...) __builtin_sve_reinterpret_s64_bf16(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#define svreinterpret_s64_f32(...) __builtin_sve_reinterpret_s64_f32(__VA_ARGS__)
-#define svreinterpret_s64_f64(...) __builtin_sve_reinterpret_s64_f64(__VA_ARGS__)
-#define svreinterpret_u8_s8(...) __builtin_sve_reinterpret_u8_s8(__VA_ARGS__)
-#define svreinterpret_u8_s16(...) __builtin_sve_reinterpret_u8_s16(__VA_ARGS__)
-#define svreinterpret_u8_s32(...) __builtin_sve_reinterpret_u8_s32(__VA_ARGS__)
-#define svreinterpret_u8_s64(...) __builtin_sve_reinterpret_u8_s64(__VA_ARGS__)
-#define svreinterpret_u8_u8(...) __builtin_sve_reinterpret_u8_u8(__VA_ARGS__)
-#define svreinterpret_u8_u16(...) __builtin_sve_reinterpret_u8_u16(__VA_ARGS__)
-#define svreinterpret_u8_u32(...) __builtin_sve_reinterpret_u8_u32(__VA_ARGS__)
-#define svreinterpret_u8_u64(...) __builtin_sve_reinterpret_u8_u64(__VA_ARGS__)
-#define svreinterpret_u8_f16(...) __builtin_sve_reinterpret_u8_f16(__VA_ARGS__)
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_u8_bf16(...) __builtin_sve_reinterpret_u8_bf16(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#define svreinterpret_u8_f32(...) __builtin_sve_reinterpret_u8_f32(__VA_ARGS__)
-#define svreinterpret_u8_f64(...) __builtin_sve_reinterpret_u8_f64(__VA_ARGS__)
-#define svreinterpret_u16_s8(...) __builtin_sve_reinterpret_u16_s8(__VA_ARGS__)
-#define svreinterpret_u16_s16(...) __builtin_sve_reinterpret_u16_s16(__VA_ARGS__)
-#define svreinterpret_u16_s32(...) __builtin_sve_reinterpret_u16_s32(__VA_ARGS__)
-#define svreinterpret_u16_s64(...) __builtin_sve_reinterpret_u16_s64(__VA_ARGS__)
-#define svreinterpret_u16_u8(...) __builtin_sve_reinterpret_u16_u8(__VA_ARGS__)
-#define svreinterpret_u16_u16(...) __builtin_sve_reinterpret_u16_u16(__VA_ARGS__)
-#define svreinterpret_u16_u32(...) __builtin_sve_reinterpret_u16_u32(__VA_ARGS__)
-#define svreinterpret_u16_u64(...) __builtin_sve_reinterpret_u16_u64(__VA_ARGS__)
-#define svreinterpret_u16_f16(...) __builtin_sve_reinterpret_u16_f16(__VA_ARGS__)
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_u16_bf16(...) __builtin_sve_reinterpret_u16_bf16(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#define svreinterpret_u16_f32(...) __builtin_sve_reinterpret_u16_f32(__VA_ARGS__)
-#define svreinterpret_u16_f64(...) __builtin_sve_reinterpret_u16_f64(__VA_ARGS__)
-#define svreinterpret_u32_s8(...) __builtin_sve_reinterpret_u32_s8(__VA_ARGS__)
-#define svreinterpret_u32_s16(...) __builtin_sve_reinterpret_u32_s16(__VA_ARGS__)
-#define svreinterpret_u32_s32(...) __builtin_sve_reinterpret_u32_s32(__VA_ARGS__)
-#define svreinterpret_u32_s64(...) __builtin_sve_reinterpret_u32_s64(__VA_ARGS__)
-#define svreinterpret_u32_u8(...) __builtin_sve_reinterpret_u32_u8(__VA_ARGS__)
-#define svreinterpret_u32_u16(...) __builtin_sve_reinterpret_u32_u16(__VA_ARGS__)
-#define svreinterpret_u32_u32(...) __builtin_sve_reinterpret_u32_u32(__VA_ARGS__)
-#define svreinterpret_u32_u64(...) __builtin_sve_reinterpret_u32_u64(__VA_ARGS__)
-#define svreinterpret_u32_f16(...) __builtin_sve_reinterpret_u32_f16(__VA_ARGS__)
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_u32_bf16(...) __builtin_sve_reinterpret_u32_bf16(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#define svreinterpret_u32_f32(...) __builtin_sve_reinterpret_u32_f32(__VA_ARGS__)
-#define svreinterpret_u32_f64(...) __builtin_sve_reinterpret_u32_f64(__VA_ARGS__)
-#define svreinterpret_u64_s8(...) __builtin_sve_reinterpret_u64_s8(__VA_ARGS__)
-#define svreinterpret_u64_s16(...) __builtin_sve_reinterpret_u64_s16(__VA_ARGS__)
-#define svreinterpret_u64_s32(...) __builtin_sve_reinterpret_u64_s32(__VA_ARGS__)
-#define svreinterpret_u64_s64(...) __builtin_sve_reinterpret_u64_s64(__VA_ARGS__)
-#define svreinterpret_u64_u8(...) __builtin_sve_reinterpret_u64_u8(__VA_ARGS__)
-#define svreinterpret_u64_u16(...) __builtin_sve_reinterpret_u64_u16(__VA_ARGS__)
-#define svreinterpret_u64_u32(...) __builtin_sve_reinterpret_u64_u32(__VA_ARGS__)
-#define svreinterpret_u64_u64(...) __builtin_sve_reinterpret_u64_u64(__VA_ARGS__)
-#define svreinterpret_u64_f16(...) __builtin_sve_reinterpret_u64_f16(__VA_ARGS__)
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_u64_bf16(...) __builtin_sve_reinterpret_u64_bf16(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#define svreinterpret_u64_f32(...) __builtin_sve_reinterpret_u64_f32(__VA_ARGS__)
-#define svreinterpret_u64_f64(...) __builtin_sve_reinterpret_u64_f64(__VA_ARGS__)
-#define svreinterpret_f16_s8(...) __builtin_sve_reinterpret_f16_s8(__VA_ARGS__)
-#define svreinterpret_f16_s16(...) __builtin_sve_reinterpret_f16_s16(__VA_ARGS__)
-#define svreinterpret_f16_s32(...) __builtin_sve_reinterpret_f16_s32(__VA_ARGS__)
-#define svreinterpret_f16_s64(...) __builtin_sve_reinterpret_f16_s64(__VA_ARGS__)
-#define svreinterpret_f16_u8(...) __builtin_sve_reinterpret_f16_u8(__VA_ARGS__)
-#define svreinterpret_f16_u16(...) __builtin_sve_reinterpret_f16_u16(__VA_ARGS__)
-#define svreinterpret_f16_u32(...) __builtin_sve_reinterpret_f16_u32(__VA_ARGS__)
-#define svreinterpret_f16_u64(...) __builtin_sve_reinterpret_f16_u64(__VA_ARGS__)
-#define svreinterpret_f16_f16(...) __builtin_sve_reinterpret_f16_f16(__VA_ARGS__)
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_f16_bf16(...) __builtin_sve_reinterpret_f16_bf16(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#define svreinterpret_f16_f32(...) __builtin_sve_reinterpret_f16_f32(__VA_ARGS__)
-#define svreinterpret_f16_f64(...) __builtin_sve_reinterpret_f16_f64(__VA_ARGS__)
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_bf16_s8(...) __builtin_sve_reinterpret_bf16_s8(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_bf16_s16(...) __builtin_sve_reinterpret_bf16_s16(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_bf16_s32(...) __builtin_sve_reinterpret_bf16_s32(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_bf16_s64(...) __builtin_sve_reinterpret_bf16_s64(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_bf16_u8(...) __builtin_sve_reinterpret_bf16_u8(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_bf16_u16(...) __builtin_sve_reinterpret_bf16_u16(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_bf16_u32(...) __builtin_sve_reinterpret_bf16_u32(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_bf16_u64(...) __builtin_sve_reinterpret_bf16_u64(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_bf16_f16(...) __builtin_sve_reinterpret_bf16_f16(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_bf16_bf16(...) __builtin_sve_reinterpret_bf16_bf16(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_bf16_f32(...) __builtin_sve_reinterpret_bf16_f32(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_bf16_f64(...) __builtin_sve_reinterpret_bf16_f64(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#define svreinterpret_f32_s8(...) __builtin_sve_reinterpret_f32_s8(__VA_ARGS__)
-#define svreinterpret_f32_s16(...) __builtin_sve_reinterpret_f32_s16(__VA_ARGS__)
-#define svreinterpret_f32_s32(...) __builtin_sve_reinterpret_f32_s32(__VA_ARGS__)
-#define svreinterpret_f32_s64(...) __builtin_sve_reinterpret_f32_s64(__VA_ARGS__)
-#define svreinterpret_f32_u8(...) __builtin_sve_reinterpret_f32_u8(__VA_ARGS__)
-#define svreinterpret_f32_u16(...) __builtin_sve_reinterpret_f32_u16(__VA_ARGS__)
-#define svreinterpret_f32_u32(...) __builtin_sve_reinterpret_f32_u32(__VA_ARGS__)
-#define svreinterpret_f32_u64(...) __builtin_sve_reinterpret_f32_u64(__VA_ARGS__)
-#define svreinterpret_f32_f16(...) __builtin_sve_reinterpret_f32_f16(__VA_ARGS__)
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_f32_bf16(...) __builtin_sve_reinterpret_f32_bf16(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#define svreinterpret_f32_f32(...) __builtin_sve_reinterpret_f32_f32(__VA_ARGS__)
-#define svreinterpret_f32_f64(...) __builtin_sve_reinterpret_f32_f64(__VA_ARGS__)
-#define svreinterpret_f64_s8(...) __builtin_sve_reinterpret_f64_s8(__VA_ARGS__)
-#define svreinterpret_f64_s16(...) __builtin_sve_reinterpret_f64_s16(__VA_ARGS__)
-#define svreinterpret_f64_s32(...) __builtin_sve_reinterpret_f64_s32(__VA_ARGS__)
-#define svreinterpret_f64_s64(...) __builtin_sve_reinterpret_f64_s64(__VA_ARGS__)
-#define svreinterpret_f64_u8(...) __builtin_sve_reinterpret_f64_u8(__VA_ARGS__)
-#define svreinterpret_f64_u16(...) __builtin_sve_reinterpret_f64_u16(__VA_ARGS__)
-#define svreinterpret_f64_u32(...) __builtin_sve_reinterpret_f64_u32(__VA_ARGS__)
-#define svreinterpret_f64_u64(...) __builtin_sve_reinterpret_f64_u64(__VA_ARGS__)
-#define svreinterpret_f64_f16(...) __builtin_sve_reinterpret_f64_f16(__VA_ARGS__)
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svreinterpret_f64_bf16(...) __builtin_sve_reinterpret_f64_bf16(__VA_ARGS__)
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#define svreinterpret_f64_f32(...) __builtin_sve_reinterpret_f64_f32(__VA_ARGS__)
-#define svreinterpret_f64_f64(...) __builtin_sve_reinterpret_f64_f64(__VA_ARGS__)
-__aio svint8_t svreinterpret_s8(svint8_t op) {
-  return __builtin_sve_reinterpret_s8_s8(op);
-}
-
-__aio svint8_t svreinterpret_s8(svint16_t op) {
-  return __builtin_sve_reinterpret_s8_s16(op);
-}
-
-__aio svint8_t svreinterpret_s8(svint32_t op) {
-  return __builtin_sve_reinterpret_s8_s32(op);
-}
-
-__aio svint8_t svreinterpret_s8(svint64_t op) {
-  return __builtin_sve_reinterpret_s8_s64(op);
-}
-
-__aio svint8_t svreinterpret_s8(svuint8_t op) {
-  return __builtin_sve_reinterpret_s8_u8(op);
-}
-
-__aio svint8_t svreinterpret_s8(svuint16_t op) {
-  return __builtin_sve_reinterpret_s8_u16(op);
-}
-
-__aio svint8_t svreinterpret_s8(svuint32_t op) {
-  return __builtin_sve_reinterpret_s8_u32(op);
-}
-
-__aio svint8_t svreinterpret_s8(svuint64_t op) {
-  return __builtin_sve_reinterpret_s8_u64(op);
-}
-
-__aio svint8_t svreinterpret_s8(svfloat16_t op) {
-  return __builtin_sve_reinterpret_s8_f16(op);
-}
-
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svint8_t svreinterpret_s8(svbfloat16_t op) {
-  return __builtin_sve_reinterpret_s8_bf16(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-__aio svint8_t svreinterpret_s8(svfloat32_t op) {
-  return __builtin_sve_reinterpret_s8_f32(op);
-}
-
-__aio svint8_t svreinterpret_s8(svfloat64_t op) {
-  return __builtin_sve_reinterpret_s8_f64(op);
-}
-
-__aio svint16_t svreinterpret_s16(svint8_t op) {
-  return __builtin_sve_reinterpret_s16_s8(op);
-}
-
-__aio svint16_t svreinterpret_s16(svint16_t op) {
-  return __builtin_sve_reinterpret_s16_s16(op);
-}
-
-__aio svint16_t svreinterpret_s16(svint32_t op) {
-  return __builtin_sve_reinterpret_s16_s32(op);
-}
-
-__aio svint16_t svreinterpret_s16(svint64_t op) {
-  return __builtin_sve_reinterpret_s16_s64(op);
-}
-
-__aio svint16_t svreinterpret_s16(svuint8_t op) {
-  return __builtin_sve_reinterpret_s16_u8(op);
-}
-
-__aio svint16_t svreinterpret_s16(svuint16_t op) {
-  return __builtin_sve_reinterpret_s16_u16(op);
-}
-
-__aio svint16_t svreinterpret_s16(svuint32_t op) {
-  return __builtin_sve_reinterpret_s16_u32(op);
-}
-
-__aio svint16_t svreinterpret_s16(svuint64_t op) {
-  return __builtin_sve_reinterpret_s16_u64(op);
-}
-
-__aio svint16_t svreinterpret_s16(svfloat16_t op) {
-  return __builtin_sve_reinterpret_s16_f16(op);
-}
-
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svint16_t svreinterpret_s16(svbfloat16_t op) {
-  return __builtin_sve_reinterpret_s16_bf16(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-__aio svint16_t svreinterpret_s16(svfloat32_t op) {
-  return __builtin_sve_reinterpret_s16_f32(op);
-}
-
-__aio svint16_t svreinterpret_s16(svfloat64_t op) {
-  return __builtin_sve_reinterpret_s16_f64(op);
-}
-
-__aio svint32_t svreinterpret_s32(svint8_t op) {
-  return __builtin_sve_reinterpret_s32_s8(op);
-}
-
-__aio svint32_t svreinterpret_s32(svint16_t op) {
-  return __builtin_sve_reinterpret_s32_s16(op);
-}
-
-__aio svint32_t svreinterpret_s32(svint32_t op) {
-  return __builtin_sve_reinterpret_s32_s32(op);
-}
-
-__aio svint32_t svreinterpret_s32(svint64_t op) {
-  return __builtin_sve_reinterpret_s32_s64(op);
-}
-
-__aio svint32_t svreinterpret_s32(svuint8_t op) {
-  return __builtin_sve_reinterpret_s32_u8(op);
-}
-
-__aio svint32_t svreinterpret_s32(svuint16_t op) {
-  return __builtin_sve_reinterpret_s32_u16(op);
-}
-
-__aio svint32_t svreinterpret_s32(svuint32_t op) {
-  return __builtin_sve_reinterpret_s32_u32(op);
-}
-
-__aio svint32_t svreinterpret_s32(svuint64_t op) {
-  return __builtin_sve_reinterpret_s32_u64(op);
-}
-
-__aio svint32_t svreinterpret_s32(svfloat16_t op) {
-  return __builtin_sve_reinterpret_s32_f16(op);
-}
-
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svint32_t svreinterpret_s32(svbfloat16_t op) {
-  return __builtin_sve_reinterpret_s32_bf16(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-__aio svint32_t svreinterpret_s32(svfloat32_t op) {
-  return __builtin_sve_reinterpret_s32_f32(op);
-}
-
-__aio svint32_t svreinterpret_s32(svfloat64_t op) {
-  return __builtin_sve_reinterpret_s32_f64(op);
-}
-
-__aio svint64_t svreinterpret_s64(svint8_t op) {
-  return __builtin_sve_reinterpret_s64_s8(op);
-}
-
-__aio svint64_t svreinterpret_s64(svint16_t op) {
-  return __builtin_sve_reinterpret_s64_s16(op);
-}
-
-__aio svint64_t svreinterpret_s64(svint32_t op) {
-  return __builtin_sve_reinterpret_s64_s32(op);
-}
-
-__aio svint64_t svreinterpret_s64(svint64_t op) {
-  return __builtin_sve_reinterpret_s64_s64(op);
-}
-
-__aio svint64_t svreinterpret_s64(svuint8_t op) {
-  return __builtin_sve_reinterpret_s64_u8(op);
-}
-
-__aio svint64_t svreinterpret_s64(svuint16_t op) {
-  return __builtin_sve_reinterpret_s64_u16(op);
-}
-
-__aio svint64_t svreinterpret_s64(svuint32_t op) {
-  return __builtin_sve_reinterpret_s64_u32(op);
-}
-
-__aio svint64_t svreinterpret_s64(svuint64_t op) {
-  return __builtin_sve_reinterpret_s64_u64(op);
-}
-
-__aio svint64_t svreinterpret_s64(svfloat16_t op) {
-  return __builtin_sve_reinterpret_s64_f16(op);
-}
-
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svint64_t svreinterpret_s64(svbfloat16_t op) {
-  return __builtin_sve_reinterpret_s64_bf16(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-__aio svint64_t svreinterpret_s64(svfloat32_t op) {
-  return __builtin_sve_reinterpret_s64_f32(op);
-}
-
-__aio svint64_t svreinterpret_s64(svfloat64_t op) {
-  return __builtin_sve_reinterpret_s64_f64(op);
-}
-
-__aio svuint8_t svreinterpret_u8(svint8_t op) {
-  return __builtin_sve_reinterpret_u8_s8(op);
-}
-
-__aio svuint8_t svreinterpret_u8(svint16_t op) {
-  return __builtin_sve_reinterpret_u8_s16(op);
-}
-
-__aio svuint8_t svreinterpret_u8(svint32_t op) {
-  return __builtin_sve_reinterpret_u8_s32(op);
-}
-
-__aio svuint8_t svreinterpret_u8(svint64_t op) {
-  return __builtin_sve_reinterpret_u8_s64(op);
-}
-
-__aio svuint8_t svreinterpret_u8(svuint8_t op) {
-  return __builtin_sve_reinterpret_u8_u8(op);
-}
-
-__aio svuint8_t svreinterpret_u8(svuint16_t op) {
-  return __builtin_sve_reinterpret_u8_u16(op);
-}
-
-__aio svuint8_t svreinterpret_u8(svuint32_t op) {
-  return __builtin_sve_reinterpret_u8_u32(op);
-}
-
-__aio svuint8_t svreinterpret_u8(svuint64_t op) {
-  return __builtin_sve_reinterpret_u8_u64(op);
-}
-
-__aio svuint8_t svreinterpret_u8(svfloat16_t op) {
-  return __builtin_sve_reinterpret_u8_f16(op);
-}
-
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svuint8_t svreinterpret_u8(svbfloat16_t op) {
-  return __builtin_sve_reinterpret_u8_bf16(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-__aio svuint8_t svreinterpret_u8(svfloat32_t op) {
-  return __builtin_sve_reinterpret_u8_f32(op);
-}
-
-__aio svuint8_t svreinterpret_u8(svfloat64_t op) {
-  return __builtin_sve_reinterpret_u8_f64(op);
-}
-
-__aio svuint16_t svreinterpret_u16(svint8_t op) {
-  return __builtin_sve_reinterpret_u16_s8(op);
-}
-
-__aio svuint16_t svreinterpret_u16(svint16_t op) {
-  return __builtin_sve_reinterpret_u16_s16(op);
-}
-
-__aio svuint16_t svreinterpret_u16(svint32_t op) {
-  return __builtin_sve_reinterpret_u16_s32(op);
-}
-
-__aio svuint16_t svreinterpret_u16(svint64_t op) {
-  return __builtin_sve_reinterpret_u16_s64(op);
-}
-
-__aio svuint16_t svreinterpret_u16(svuint8_t op) {
-  return __builtin_sve_reinterpret_u16_u8(op);
-}
-
-__aio svuint16_t svreinterpret_u16(svuint16_t op) {
-  return __builtin_sve_reinterpret_u16_u16(op);
-}
-
-__aio svuint16_t svreinterpret_u16(svuint32_t op) {
-  return __builtin_sve_reinterpret_u16_u32(op);
-}
-
-__aio svuint16_t svreinterpret_u16(svuint64_t op) {
-  return __builtin_sve_reinterpret_u16_u64(op);
-}
-
-__aio svuint16_t svreinterpret_u16(svfloat16_t op) {
-  return __builtin_sve_reinterpret_u16_f16(op);
-}
-
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svuint16_t svreinterpret_u16(svbfloat16_t op) {
-  return __builtin_sve_reinterpret_u16_bf16(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-__aio svuint16_t svreinterpret_u16(svfloat32_t op) {
-  return __builtin_sve_reinterpret_u16_f32(op);
-}
-
-__aio svuint16_t svreinterpret_u16(svfloat64_t op) {
-  return __builtin_sve_reinterpret_u16_f64(op);
-}
-
-__aio svuint32_t svreinterpret_u32(svint8_t op) {
-  return __builtin_sve_reinterpret_u32_s8(op);
-}
-
-__aio svuint32_t svreinterpret_u32(svint16_t op) {
-  return __builtin_sve_reinterpret_u32_s16(op);
-}
-
-__aio svuint32_t svreinterpret_u32(svint32_t op) {
-  return __builtin_sve_reinterpret_u32_s32(op);
-}
-
-__aio svuint32_t svreinterpret_u32(svint64_t op) {
-  return __builtin_sve_reinterpret_u32_s64(op);
-}
-
-__aio svuint32_t svreinterpret_u32(svuint8_t op) {
-  return __builtin_sve_reinterpret_u32_u8(op);
-}
-
-__aio svuint32_t svreinterpret_u32(svuint16_t op) {
-  return __builtin_sve_reinterpret_u32_u16(op);
-}
-
-__aio svuint32_t svreinterpret_u32(svuint32_t op) {
-  return __builtin_sve_reinterpret_u32_u32(op);
-}
-
-__aio svuint32_t svreinterpret_u32(svuint64_t op) {
-  return __builtin_sve_reinterpret_u32_u64(op);
-}
-
-__aio svuint32_t svreinterpret_u32(svfloat16_t op) {
-  return __builtin_sve_reinterpret_u32_f16(op);
-}
-
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svuint32_t svreinterpret_u32(svbfloat16_t op) {
-  return __builtin_sve_reinterpret_u32_bf16(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-__aio svuint32_t svreinterpret_u32(svfloat32_t op) {
-  return __builtin_sve_reinterpret_u32_f32(op);
-}
-
-__aio svuint32_t svreinterpret_u32(svfloat64_t op) {
-  return __builtin_sve_reinterpret_u32_f64(op);
-}
-
-__aio svuint64_t svreinterpret_u64(svint8_t op) {
-  return __builtin_sve_reinterpret_u64_s8(op);
-}
-
-__aio svuint64_t svreinterpret_u64(svint16_t op) {
-  return __builtin_sve_reinterpret_u64_s16(op);
-}
-
-__aio svuint64_t svreinterpret_u64(svint32_t op) {
-  return __builtin_sve_reinterpret_u64_s32(op);
-}
-
-__aio svuint64_t svreinterpret_u64(svint64_t op) {
-  return __builtin_sve_reinterpret_u64_s64(op);
-}
-
-__aio svuint64_t svreinterpret_u64(svuint8_t op) {
-  return __builtin_sve_reinterpret_u64_u8(op);
-}
-
-__aio svuint64_t svreinterpret_u64(svuint16_t op) {
-  return __builtin_sve_reinterpret_u64_u16(op);
-}
-
-__aio svuint64_t svreinterpret_u64(svuint32_t op) {
-  return __builtin_sve_reinterpret_u64_u32(op);
-}
-
-__aio svuint64_t svreinterpret_u64(svuint64_t op) {
-  return __builtin_sve_reinterpret_u64_u64(op);
-}
-
-__aio svuint64_t svreinterpret_u64(svfloat16_t op) {
-  return __builtin_sve_reinterpret_u64_f16(op);
-}
-
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svuint64_t svreinterpret_u64(svbfloat16_t op) {
-  return __builtin_sve_reinterpret_u64_bf16(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-__aio svuint64_t svreinterpret_u64(svfloat32_t op) {
-  return __builtin_sve_reinterpret_u64_f32(op);
-}
-
-__aio svuint64_t svreinterpret_u64(svfloat64_t op) {
-  return __builtin_sve_reinterpret_u64_f64(op);
-}
-
-__aio svfloat16_t svreinterpret_f16(svint8_t op) {
-  return __builtin_sve_reinterpret_f16_s8(op);
-}
-
-__aio svfloat16_t svreinterpret_f16(svint16_t op) {
-  return __builtin_sve_reinterpret_f16_s16(op);
-}
-
-__aio svfloat16_t svreinterpret_f16(svint32_t op) {
-  return __builtin_sve_reinterpret_f16_s32(op);
-}
-
-__aio svfloat16_t svreinterpret_f16(svint64_t op) {
-  return __builtin_sve_reinterpret_f16_s64(op);
-}
-
-__aio svfloat16_t svreinterpret_f16(svuint8_t op) {
-  return __builtin_sve_reinterpret_f16_u8(op);
-}
-
-__aio svfloat16_t svreinterpret_f16(svuint16_t op) {
-  return __builtin_sve_reinterpret_f16_u16(op);
-}
-
-__aio svfloat16_t svreinterpret_f16(svuint32_t op) {
-  return __builtin_sve_reinterpret_f16_u32(op);
-}
-
-__aio svfloat16_t svreinterpret_f16(svuint64_t op) {
-  return __builtin_sve_reinterpret_f16_u64(op);
-}
-
-__aio svfloat16_t svreinterpret_f16(svfloat16_t op) {
-  return __builtin_sve_reinterpret_f16_f16(op);
-}
-
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svfloat16_t svreinterpret_f16(svbfloat16_t op) {
-  return __builtin_sve_reinterpret_f16_bf16(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-__aio svfloat16_t svreinterpret_f16(svfloat32_t op) {
-  return __builtin_sve_reinterpret_f16_f32(op);
-}
-
-__aio svfloat16_t svreinterpret_f16(svfloat64_t op) {
-  return __builtin_sve_reinterpret_f16_f64(op);
-}
-
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svbfloat16_t svreinterpret_bf16(svint8_t op) {
-  return __builtin_sve_reinterpret_bf16_s8(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svbfloat16_t svreinterpret_bf16(svint16_t op) {
-  return __builtin_sve_reinterpret_bf16_s16(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svbfloat16_t svreinterpret_bf16(svint32_t op) {
-  return __builtin_sve_reinterpret_bf16_s32(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svbfloat16_t svreinterpret_bf16(svint64_t op) {
-  return __builtin_sve_reinterpret_bf16_s64(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svbfloat16_t svreinterpret_bf16(svuint8_t op) {
-  return __builtin_sve_reinterpret_bf16_u8(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svbfloat16_t svreinterpret_bf16(svuint16_t op) {
-  return __builtin_sve_reinterpret_bf16_u16(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svbfloat16_t svreinterpret_bf16(svuint32_t op) {
-  return __builtin_sve_reinterpret_bf16_u32(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svbfloat16_t svreinterpret_bf16(svuint64_t op) {
-  return __builtin_sve_reinterpret_bf16_u64(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svbfloat16_t svreinterpret_bf16(svfloat16_t op) {
-  return __builtin_sve_reinterpret_bf16_f16(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svbfloat16_t svreinterpret_bf16(svbfloat16_t op) {
-  return __builtin_sve_reinterpret_bf16_bf16(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svbfloat16_t svreinterpret_bf16(svfloat32_t op) {
-  return __builtin_sve_reinterpret_bf16_f32(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svbfloat16_t svreinterpret_bf16(svfloat64_t op) {
-  return __builtin_sve_reinterpret_bf16_f64(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-__aio svfloat32_t svreinterpret_f32(svint8_t op) {
-  return __builtin_sve_reinterpret_f32_s8(op);
-}
-
-__aio svfloat32_t svreinterpret_f32(svint16_t op) {
-  return __builtin_sve_reinterpret_f32_s16(op);
-}
-
-__aio svfloat32_t svreinterpret_f32(svint32_t op) {
-  return __builtin_sve_reinterpret_f32_s32(op);
-}
-
-__aio svfloat32_t svreinterpret_f32(svint64_t op) {
-  return __builtin_sve_reinterpret_f32_s64(op);
-}
-
-__aio svfloat32_t svreinterpret_f32(svuint8_t op) {
-  return __builtin_sve_reinterpret_f32_u8(op);
-}
-
-__aio svfloat32_t svreinterpret_f32(svuint16_t op) {
-  return __builtin_sve_reinterpret_f32_u16(op);
-}
-
-__aio svfloat32_t svreinterpret_f32(svuint32_t op) {
-  return __builtin_sve_reinterpret_f32_u32(op);
-}
-
-__aio svfloat32_t svreinterpret_f32(svuint64_t op) {
-  return __builtin_sve_reinterpret_f32_u64(op);
-}
-
-__aio svfloat32_t svreinterpret_f32(svfloat16_t op) {
-  return __builtin_sve_reinterpret_f32_f16(op);
-}
-
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svfloat32_t svreinterpret_f32(svbfloat16_t op) {
-  return __builtin_sve_reinterpret_f32_bf16(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-__aio svfloat32_t svreinterpret_f32(svfloat32_t op) {
-  return __builtin_sve_reinterpret_f32_f32(op);
-}
-
-__aio svfloat32_t svreinterpret_f32(svfloat64_t op) {
-  return __builtin_sve_reinterpret_f32_f64(op);
-}
-
-__aio svfloat64_t svreinterpret_f64(svint8_t op) {
-  return __builtin_sve_reinterpret_f64_s8(op);
-}
-
-__aio svfloat64_t svreinterpret_f64(svint16_t op) {
-  return __builtin_sve_reinterpret_f64_s16(op);
-}
-
-__aio svfloat64_t svreinterpret_f64(svint32_t op) {
-  return __builtin_sve_reinterpret_f64_s32(op);
-}
-
-__aio svfloat64_t svreinterpret_f64(svint64_t op) {
-  return __builtin_sve_reinterpret_f64_s64(op);
-}
-
-__aio svfloat64_t svreinterpret_f64(svuint8_t op) {
-  return __builtin_sve_reinterpret_f64_u8(op);
-}
-
-__aio svfloat64_t svreinterpret_f64(svuint16_t op) {
-  return __builtin_sve_reinterpret_f64_u16(op);
-}
-
-__aio svfloat64_t svreinterpret_f64(svuint32_t op) {
-  return __builtin_sve_reinterpret_f64_u32(op);
-}
-
-__aio svfloat64_t svreinterpret_f64(svuint64_t op) {
-  return __builtin_sve_reinterpret_f64_u64(op);
-}
-
-__aio svfloat64_t svreinterpret_f64(svfloat16_t op) {
-  return __builtin_sve_reinterpret_f64_f16(op);
-}
-
-#if defined(__ARM_FEATURE_SVE_BF16)
-__aio svfloat64_t svreinterpret_f64(svbfloat16_t op) {
-  return __builtin_sve_reinterpret_f64_bf16(op);
-}
-
-#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
-__aio svfloat64_t svreinterpret_f64(svfloat32_t op) {
-  return __builtin_sve_reinterpret_f64_f32(op);
-}
-
-__aio svfloat64_t svreinterpret_f64(svfloat64_t op) {
-  return __builtin_sve_reinterpret_f64_f64(op);
-}
-
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_m)))
-svfloat64_t svabd_n_f64_m(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_m)))
-svfloat32_t svabd_n_f32_m(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_m)))
-svfloat16_t svabd_n_f16_m(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_x)))
-svfloat64_t svabd_n_f64_x(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_x)))
-svfloat32_t svabd_n_f32_x(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_x)))
-svfloat16_t svabd_n_f16_x(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_z)))
-svfloat64_t svabd_n_f64_z(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_z)))
-svfloat32_t svabd_n_f32_z(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_z)))
-svfloat16_t svabd_n_f16_z(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_m)))
-svint8_t svabd_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_m)))
-svint32_t svabd_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_m)))
-svint64_t svabd_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_m)))
-svint16_t svabd_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_x)))
-svint8_t svabd_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_x)))
-svint32_t svabd_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_x)))
-svint64_t svabd_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_x)))
-svint16_t svabd_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_z)))
-svint8_t svabd_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_z)))
-svint32_t svabd_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_z)))
-svint64_t svabd_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_z)))
-svint16_t svabd_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_m)))
-svuint8_t svabd_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_m)))
-svuint32_t svabd_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_m)))
-svuint64_t svabd_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_m)))
-svuint16_t svabd_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_x)))
-svuint8_t svabd_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_x)))
-svuint32_t svabd_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_x)))
-svuint64_t svabd_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_x)))
-svuint16_t svabd_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_z)))
-svuint8_t svabd_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_z)))
-svuint32_t svabd_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_z)))
-svuint64_t svabd_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_z)))
-svuint16_t svabd_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_m)))
-svfloat64_t svabd_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_m)))
-svfloat32_t svabd_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_m)))
-svfloat16_t svabd_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_x)))
-svfloat64_t svabd_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_x)))
-svfloat32_t svabd_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_x)))
-svfloat16_t svabd_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_z)))
-svfloat64_t svabd_f64_z(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_z)))
-svfloat32_t svabd_f32_z(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_z)))
-svfloat16_t svabd_f16_z(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_m)))
-svint8_t svabd_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_m)))
-svint32_t svabd_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_m)))
-svint64_t svabd_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_m)))
-svint16_t svabd_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_x)))
-svint8_t svabd_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_x)))
-svint32_t svabd_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_x)))
-svint64_t svabd_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_x)))
-svint16_t svabd_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_z)))
-svint8_t svabd_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_z)))
-svint32_t svabd_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_z)))
-svint64_t svabd_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_z)))
-svint16_t svabd_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_m)))
-svuint8_t svabd_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_m)))
-svuint32_t svabd_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_m)))
-svuint64_t svabd_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_m)))
-svuint16_t svabd_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_x)))
-svuint8_t svabd_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_x)))
-svuint32_t svabd_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_x)))
-svuint64_t svabd_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_x)))
-svuint16_t svabd_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_z)))
-svuint8_t svabd_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_z)))
-svuint32_t svabd_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_z)))
-svuint64_t svabd_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_z)))
-svuint16_t svabd_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_m)))
-svfloat64_t svabs_f64_m(svfloat64_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_m)))
-svfloat32_t svabs_f32_m(svfloat32_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_m)))
-svfloat16_t svabs_f16_m(svfloat16_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_x)))
-svfloat64_t svabs_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_x)))
-svfloat32_t svabs_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_x)))
-svfloat16_t svabs_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_z)))
-svfloat64_t svabs_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_z)))
-svfloat32_t svabs_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_z)))
-svfloat16_t svabs_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_m)))
-svint8_t svabs_s8_m(svint8_t, svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_m)))
-svint32_t svabs_s32_m(svint32_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_m)))
-svint64_t svabs_s64_m(svint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_m)))
-svint16_t svabs_s16_m(svint16_t, svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_x)))
-svint8_t svabs_s8_x(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_x)))
-svint32_t svabs_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_x)))
-svint64_t svabs_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_x)))
-svint16_t svabs_s16_x(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_z)))
-svint8_t svabs_s8_z(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_z)))
-svint32_t svabs_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_z)))
-svint64_t svabs_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_z)))
-svint16_t svabs_s16_z(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f64)))
-svbool_t svacge_n_f64(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f32)))
-svbool_t svacge_n_f32(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f16)))
-svbool_t svacge_n_f16(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f64)))
-svbool_t svacge_f64(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f32)))
-svbool_t svacge_f32(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f16)))
-svbool_t svacge_f16(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f64)))
-svbool_t svacgt_n_f64(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f32)))
-svbool_t svacgt_n_f32(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f16)))
-svbool_t svacgt_n_f16(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f64)))
-svbool_t svacgt_f64(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f32)))
-svbool_t svacgt_f32(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f16)))
-svbool_t svacgt_f16(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f64)))
-svbool_t svacle_n_f64(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f32)))
-svbool_t svacle_n_f32(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f16)))
-svbool_t svacle_n_f16(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f64)))
-svbool_t svacle_f64(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f32)))
-svbool_t svacle_f32(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f16)))
-svbool_t svacle_f16(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f64)))
-svbool_t svaclt_n_f64(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f32)))
-svbool_t svaclt_n_f32(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f16)))
-svbool_t svaclt_n_f16(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f64)))
-svbool_t svaclt_f64(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f32)))
-svbool_t svaclt_f32(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f16)))
-svbool_t svaclt_f16(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_m)))
-svfloat64_t svadd_n_f64_m(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_m)))
-svfloat32_t svadd_n_f32_m(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_m)))
-svfloat16_t svadd_n_f16_m(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_x)))
-svfloat64_t svadd_n_f64_x(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_x)))
-svfloat32_t svadd_n_f32_x(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_x)))
-svfloat16_t svadd_n_f16_x(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_z)))
-svfloat64_t svadd_n_f64_z(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_z)))
-svfloat32_t svadd_n_f32_z(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_z)))
-svfloat16_t svadd_n_f16_z(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_m)))
-svuint8_t svadd_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_m)))
-svuint32_t svadd_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_m)))
-svuint64_t svadd_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_m)))
-svuint16_t svadd_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_m)))
-svint8_t svadd_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_m)))
-svint32_t svadd_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_m)))
-svint64_t svadd_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_m)))
-svint16_t svadd_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_x)))
-svuint8_t svadd_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_x)))
-svuint32_t svadd_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_x)))
-svuint64_t svadd_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_x)))
-svuint16_t svadd_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_x)))
-svint8_t svadd_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_x)))
-svint32_t svadd_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_x)))
-svint64_t svadd_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_x)))
-svint16_t svadd_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_z)))
-svuint8_t svadd_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_z)))
-svuint32_t svadd_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_z)))
-svuint64_t svadd_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_z)))
-svuint16_t svadd_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_z)))
-svint8_t svadd_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_z)))
-svint32_t svadd_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_z)))
-svint64_t svadd_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_z)))
-svint16_t svadd_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_m)))
-svfloat64_t svadd_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_m)))
-svfloat32_t svadd_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_m)))
-svfloat16_t svadd_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_x)))
-svfloat64_t svadd_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_x)))
-svfloat32_t svadd_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_x)))
-svfloat16_t svadd_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_z)))
-svfloat64_t svadd_f64_z(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_z)))
-svfloat32_t svadd_f32_z(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_z)))
-svfloat16_t svadd_f16_z(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_m)))
-svuint8_t svadd_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_m)))
-svuint32_t svadd_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_m)))
-svuint64_t svadd_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_m)))
-svuint16_t svadd_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_m)))
-svint8_t svadd_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_m)))
-svint32_t svadd_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_m)))
-svint64_t svadd_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_m)))
-svint16_t svadd_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_x)))
-svuint8_t svadd_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_x)))
-svuint32_t svadd_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_x)))
-svuint64_t svadd_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_x)))
-svuint16_t svadd_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_x)))
-svint8_t svadd_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_x)))
-svint32_t svadd_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_x)))
-svint64_t svadd_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_x)))
-svint16_t svadd_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_z)))
-svuint8_t svadd_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_z)))
-svuint32_t svadd_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_z)))
-svuint64_t svadd_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_z)))
-svuint16_t svadd_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_z)))
-svint8_t svadd_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_z)))
-svint32_t svadd_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_z)))
-svint64_t svadd_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_z)))
-svint16_t svadd_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f64)))
-float64_t svadda_f64(svbool_t, float64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f32)))
-float32_t svadda_f32(svbool_t, float32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f16)))
-float16_t svadda_f16(svbool_t, float16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s8)))
-int64_t svaddv_s8(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s32)))
-int64_t svaddv_s32(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s64)))
-int64_t svaddv_s64(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s16)))
-int64_t svaddv_s16(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u8)))
-uint64_t svaddv_u8(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u32)))
-uint64_t svaddv_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u64)))
-uint64_t svaddv_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u16)))
-uint64_t svaddv_u16(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f64)))
-float64_t svaddv_f64(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32)))
-float32_t svaddv_f32(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16)))
-float16_t svaddv_f16(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset)))
-svuint32_t svadrb_u32base_u32offset(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset)))
-svuint64_t svadrb_u64base_u64offset(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset)))
-svuint32_t svadrb_u32base_s32offset(svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset)))
-svuint64_t svadrb_u64base_s64offset(svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index)))
-svuint32_t svadrd_u32base_u32index(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index)))
-svuint64_t svadrd_u64base_u64index(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index)))
-svuint32_t svadrd_u32base_s32index(svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index)))
-svuint64_t svadrd_u64base_s64index(svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index)))
-svuint32_t svadrh_u32base_u32index(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index)))
-svuint64_t svadrh_u64base_u64index(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index)))
-svuint32_t svadrh_u32base_s32index(svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index)))
-svuint64_t svadrh_u64base_s64index(svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index)))
-svuint32_t svadrw_u32base_u32index(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index)))
-svuint64_t svadrw_u64base_u64index(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index)))
-svuint32_t svadrw_u32base_s32index(svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index)))
-svuint64_t svadrw_u64base_s64index(svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z)))
-svbool_t svand_b_z(svbool_t, svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m)))
-svuint8_t svand_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_m)))
-svuint32_t svand_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_m)))
-svuint64_t svand_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_m)))
-svuint16_t svand_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_m)))
-svint8_t svand_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_m)))
-svint32_t svand_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_m)))
-svint64_t svand_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_m)))
-svint16_t svand_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_x)))
-svuint8_t svand_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_x)))
-svuint32_t svand_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_x)))
-svuint64_t svand_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_x)))
-svuint16_t svand_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_x)))
-svint8_t svand_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_x)))
-svint32_t svand_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_x)))
-svint64_t svand_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_x)))
-svint16_t svand_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_z)))
-svuint8_t svand_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_z)))
-svuint32_t svand_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_z)))
-svuint64_t svand_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_z)))
-svuint16_t svand_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_z)))
-svint8_t svand_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_z)))
-svint32_t svand_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_z)))
-svint64_t svand_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_z)))
-svint16_t svand_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_m)))
-svuint8_t svand_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_m)))
-svuint32_t svand_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_m)))
-svuint64_t svand_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_m)))
-svuint16_t svand_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_m)))
-svint8_t svand_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_m)))
-svint32_t svand_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_m)))
-svint64_t svand_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_m)))
-svint16_t svand_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_x)))
-svuint8_t svand_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_x)))
-svuint32_t svand_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_x)))
-svuint64_t svand_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_x)))
-svuint16_t svand_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_x)))
-svint8_t svand_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_x)))
-svint32_t svand_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_x)))
-svint64_t svand_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_x)))
-svint16_t svand_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_z)))
-svuint8_t svand_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_z)))
-svuint32_t svand_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_z)))
-svuint64_t svand_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_z)))
-svuint16_t svand_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_z)))
-svint8_t svand_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_z)))
-svint32_t svand_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_z)))
-svint64_t svand_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_z)))
-svint16_t svand_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u8)))
-uint8_t svandv_u8(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u32)))
-uint32_t svandv_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u64)))
-uint64_t svandv_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u16)))
-uint16_t svandv_u16(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s8)))
-int8_t svandv_s8(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s32)))
-int32_t svandv_s32(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s64)))
-int64_t svandv_s64(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s16)))
-int16_t svandv_s16(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_m)))
-svint8_t svasr_n_s8_m(svbool_t, svint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_m)))
-svint32_t svasr_n_s32_m(svbool_t, svint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_m)))
-svint64_t svasr_n_s64_m(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_m)))
-svint16_t svasr_n_s16_m(svbool_t, svint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_x)))
-svint8_t svasr_n_s8_x(svbool_t, svint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_x)))
-svint32_t svasr_n_s32_x(svbool_t, svint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_x)))
-svint64_t svasr_n_s64_x(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_x)))
-svint16_t svasr_n_s16_x(svbool_t, svint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_z)))
-svint8_t svasr_n_s8_z(svbool_t, svint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_z)))
-svint32_t svasr_n_s32_z(svbool_t, svint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_z)))
-svint64_t svasr_n_s64_z(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_z)))
-svint16_t svasr_n_s16_z(svbool_t, svint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_m)))
-svint8_t svasr_s8_m(svbool_t, svint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_m)))
-svint32_t svasr_s32_m(svbool_t, svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_m)))
-svint64_t svasr_s64_m(svbool_t, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_m)))
-svint16_t svasr_s16_m(svbool_t, svint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_x)))
-svint8_t svasr_s8_x(svbool_t, svint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_x)))
-svint32_t svasr_s32_x(svbool_t, svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_x)))
-svint64_t svasr_s64_x(svbool_t, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_x)))
-svint16_t svasr_s16_x(svbool_t, svint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_z)))
-svint8_t svasr_s8_z(svbool_t, svint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_z)))
-svint32_t svasr_s32_z(svbool_t, svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_z)))
-svint64_t svasr_s64_z(svbool_t, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_z)))
-svint16_t svasr_s16_z(svbool_t, svint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_m)))
-svint8_t svasr_wide_n_s8_m(svbool_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_m)))
-svint32_t svasr_wide_n_s32_m(svbool_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_m)))
-svint16_t svasr_wide_n_s16_m(svbool_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_x)))
-svint8_t svasr_wide_n_s8_x(svbool_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_x)))
-svint32_t svasr_wide_n_s32_x(svbool_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_x)))
-svint16_t svasr_wide_n_s16_x(svbool_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_z)))
-svint8_t svasr_wide_n_s8_z(svbool_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_z)))
-svint32_t svasr_wide_n_s32_z(svbool_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_z)))
-svint16_t svasr_wide_n_s16_z(svbool_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_m)))
-svint8_t svasr_wide_s8_m(svbool_t, svint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_m)))
-svint32_t svasr_wide_s32_m(svbool_t, svint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_m)))
-svint16_t svasr_wide_s16_m(svbool_t, svint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_x)))
-svint8_t svasr_wide_s8_x(svbool_t, svint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_x)))
-svint32_t svasr_wide_s32_x(svbool_t, svint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_x)))
-svint16_t svasr_wide_s16_x(svbool_t, svint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_z)))
-svint8_t svasr_wide_s8_z(svbool_t, svint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_z)))
-svint32_t svasr_wide_s32_z(svbool_t, svint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_z)))
-svint16_t svasr_wide_s16_z(svbool_t, svint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_m)))
-svint8_t svasrd_n_s8_m(svbool_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_m)))
-svint32_t svasrd_n_s32_m(svbool_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_m)))
-svint64_t svasrd_n_s64_m(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_m)))
-svint16_t svasrd_n_s16_m(svbool_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_x)))
-svint8_t svasrd_n_s8_x(svbool_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_x)))
-svint32_t svasrd_n_s32_x(svbool_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_x)))
-svint64_t svasrd_n_s64_x(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_x)))
-svint16_t svasrd_n_s16_x(svbool_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_z)))
-svint8_t svasrd_n_s8_z(svbool_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_z)))
-svint32_t svasrd_n_s32_z(svbool_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_z)))
-svint64_t svasrd_n_s64_z(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_z)))
-svint16_t svasrd_n_s16_z(svbool_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_b_z)))
-svbool_t svbic_b_z(svbool_t, svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_m)))
-svuint8_t svbic_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_m)))
-svuint32_t svbic_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_m)))
-svuint64_t svbic_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_m)))
-svuint16_t svbic_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_m)))
-svint8_t svbic_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_m)))
-svint32_t svbic_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_m)))
-svint64_t svbic_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_m)))
-svint16_t svbic_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_x)))
-svuint8_t svbic_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_x)))
-svuint32_t svbic_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_x)))
-svuint64_t svbic_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_x)))
-svuint16_t svbic_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_x)))
-svint8_t svbic_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_x)))
-svint32_t svbic_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_x)))
-svint64_t svbic_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_x)))
-svint16_t svbic_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_z)))
-svuint8_t svbic_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_z)))
-svuint32_t svbic_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_z)))
-svuint64_t svbic_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_z)))
-svuint16_t svbic_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_z)))
-svint8_t svbic_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_z)))
-svint32_t svbic_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_z)))
-svint64_t svbic_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_z)))
-svint16_t svbic_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_m)))
-svuint8_t svbic_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_m)))
-svuint32_t svbic_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_m)))
-svuint64_t svbic_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_m)))
-svuint16_t svbic_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_m)))
-svint8_t svbic_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_m)))
-svint32_t svbic_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_m)))
-svint64_t svbic_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_m)))
-svint16_t svbic_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_x)))
-svuint8_t svbic_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_x)))
-svuint32_t svbic_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_x)))
-svuint64_t svbic_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_x)))
-svuint16_t svbic_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_x)))
-svint8_t svbic_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_x)))
-svint32_t svbic_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_x)))
-svint64_t svbic_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_x)))
-svint16_t svbic_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_z)))
-svuint8_t svbic_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_z)))
-svuint32_t svbic_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_z)))
-svuint64_t svbic_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_z)))
-svuint16_t svbic_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_z)))
-svint8_t svbic_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_z)))
-svint32_t svbic_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_z)))
-svint64_t svbic_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_z)))
-svint16_t svbic_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_m)))
-svbool_t svbrka_b_m(svbool_t, svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_z)))
-svbool_t svbrka_b_z(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_m)))
-svbool_t svbrkb_b_m(svbool_t, svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_z)))
-svbool_t svbrkb_b_z(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkn_b_z)))
-svbool_t svbrkn_b_z(svbool_t, svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpa_b_z)))
-svbool_t svbrkpa_b_z(svbool_t, svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpb_b_z)))
-svbool_t svbrkpb_b_z(svbool_t, svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_m)))
-svfloat64_t svcadd_f64_m(svbool_t, svfloat64_t, svfloat64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_m)))
-svfloat32_t svcadd_f32_m(svbool_t, svfloat32_t, svfloat32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_m)))
-svfloat16_t svcadd_f16_m(svbool_t, svfloat16_t, svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_x)))
-svfloat64_t svcadd_f64_x(svbool_t, svfloat64_t, svfloat64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_x)))
-svfloat32_t svcadd_f32_x(svbool_t, svfloat32_t, svfloat32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_x)))
-svfloat16_t svcadd_f16_x(svbool_t, svfloat16_t, svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_z)))
-svfloat64_t svcadd_f64_z(svbool_t, svfloat64_t, svfloat64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_z)))
-svfloat32_t svcadd_f32_z(svbool_t, svfloat32_t, svfloat32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_z)))
-svfloat16_t svcadd_f16_z(svbool_t, svfloat16_t, svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u8)))
-uint8_t svclasta_n_u8(svbool_t, uint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u32)))
-uint32_t svclasta_n_u32(svbool_t, uint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u64)))
-uint64_t svclasta_n_u64(svbool_t, uint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u16)))
-uint16_t svclasta_n_u16(svbool_t, uint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s8)))
-int8_t svclasta_n_s8(svbool_t, int8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f64)))
-float64_t svclasta_n_f64(svbool_t, float64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f32)))
-float32_t svclasta_n_f32(svbool_t, float32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f16)))
-float16_t svclasta_n_f16(svbool_t, float16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s32)))
-int32_t svclasta_n_s32(svbool_t, int32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s64)))
-int64_t svclasta_n_s64(svbool_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s16)))
-int16_t svclasta_n_s16(svbool_t, int16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u8)))
-svuint8_t svclasta_u8(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u32)))
-svuint32_t svclasta_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u64)))
-svuint64_t svclasta_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u16)))
-svuint16_t svclasta_u16(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s8)))
-svint8_t svclasta_s8(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f64)))
-svfloat64_t svclasta_f64(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f32)))
-svfloat32_t svclasta_f32(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f16)))
-svfloat16_t svclasta_f16(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s32)))
-svint32_t svclasta_s32(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s64)))
-svint64_t svclasta_s64(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s16)))
-svint16_t svclasta_s16(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u8)))
-uint8_t svclastb_n_u8(svbool_t, uint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u32)))
-uint32_t svclastb_n_u32(svbool_t, uint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u64)))
-uint64_t svclastb_n_u64(svbool_t, uint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u16)))
-uint16_t svclastb_n_u16(svbool_t, uint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s8)))
-int8_t svclastb_n_s8(svbool_t, int8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f64)))
-float64_t svclastb_n_f64(svbool_t, float64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f32)))
-float32_t svclastb_n_f32(svbool_t, float32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f16)))
-float16_t svclastb_n_f16(svbool_t, float16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s32)))
-int32_t svclastb_n_s32(svbool_t, int32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s64)))
-int64_t svclastb_n_s64(svbool_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s16)))
-int16_t svclastb_n_s16(svbool_t, int16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u8)))
-svuint8_t svclastb_u8(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u32)))
-svuint32_t svclastb_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u64)))
-svuint64_t svclastb_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u16)))
-svuint16_t svclastb_u16(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s8)))
-svint8_t svclastb_s8(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f64)))
-svfloat64_t svclastb_f64(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f32)))
-svfloat32_t svclastb_f32(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f16)))
-svfloat16_t svclastb_f16(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s32)))
-svint32_t svclastb_s32(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s64)))
-svint64_t svclastb_s64(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s16)))
-svint16_t svclastb_s16(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_m)))
-svuint8_t svcls_s8_m(svuint8_t, svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_m)))
-svuint32_t svcls_s32_m(svuint32_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_m)))
-svuint64_t svcls_s64_m(svuint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_m)))
-svuint16_t svcls_s16_m(svuint16_t, svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_x)))
-svuint8_t svcls_s8_x(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_x)))
-svuint32_t svcls_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_x)))
-svuint64_t svcls_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_x)))
-svuint16_t svcls_s16_x(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_z)))
-svuint8_t svcls_s8_z(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_z)))
-svuint32_t svcls_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_z)))
-svuint64_t svcls_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_z)))
-svuint16_t svcls_s16_z(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_m)))
-svuint8_t svclz_u8_m(svuint8_t, svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_m)))
-svuint32_t svclz_u32_m(svuint32_t, svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_m)))
-svuint64_t svclz_u64_m(svuint64_t, svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_m)))
-svuint16_t svclz_u16_m(svuint16_t, svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_m)))
-svuint8_t svclz_s8_m(svuint8_t, svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_m)))
-svuint32_t svclz_s32_m(svuint32_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_m)))
-svuint64_t svclz_s64_m(svuint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_m)))
-svuint16_t svclz_s16_m(svuint16_t, svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_x)))
-svuint8_t svclz_u8_x(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_x)))
-svuint32_t svclz_u32_x(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_x)))
-svuint64_t svclz_u64_x(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_x)))
-svuint16_t svclz_u16_x(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_x)))
-svuint8_t svclz_s8_x(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_x)))
-svuint32_t svclz_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_x)))
-svuint64_t svclz_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_x)))
-svuint16_t svclz_s16_x(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_z)))
-svuint8_t svclz_u8_z(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_z)))
-svuint32_t svclz_u32_z(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_z)))
-svuint64_t svclz_u64_z(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_z)))
-svuint16_t svclz_u16_z(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_z)))
-svuint8_t svclz_s8_z(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_z)))
-svuint32_t svclz_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_z)))
-svuint64_t svclz_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_z)))
-svuint16_t svclz_s16_z(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_m)))
-svfloat64_t svcmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_m)))
-svfloat32_t svcmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_m)))
-svfloat16_t svcmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_x)))
-svfloat64_t svcmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_x)))
-svfloat32_t svcmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_x)))
-svfloat16_t svcmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_z)))
-svfloat64_t svcmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_z)))
-svfloat32_t svcmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_z)))
-svfloat16_t svcmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f32)))
-svfloat32_t svcmla_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f16)))
-svfloat16_t svcmla_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f64)))
-svbool_t svcmpeq_n_f64(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f32)))
-svbool_t svcmpeq_n_f32(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f16)))
-svbool_t svcmpeq_n_f16(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u8)))
-svbool_t svcmpeq_n_u8(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u32)))
-svbool_t svcmpeq_n_u32(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u64)))
-svbool_t svcmpeq_n_u64(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u16)))
-svbool_t svcmpeq_n_u16(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s8)))
-svbool_t svcmpeq_n_s8(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s32)))
-svbool_t svcmpeq_n_s32(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s64)))
-svbool_t svcmpeq_n_s64(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s16)))
-svbool_t svcmpeq_n_s16(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u8)))
-svbool_t svcmpeq_u8(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u32)))
-svbool_t svcmpeq_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u64)))
-svbool_t svcmpeq_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u16)))
-svbool_t svcmpeq_u16(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s8)))
-svbool_t svcmpeq_s8(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s32)))
-svbool_t svcmpeq_s32(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s64)))
-svbool_t svcmpeq_s64(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s16)))
-svbool_t svcmpeq_s16(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f64)))
-svbool_t svcmpeq_f64(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f32)))
-svbool_t svcmpeq_f32(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f16)))
-svbool_t svcmpeq_f16(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s8)))
-svbool_t svcmpeq_wide_n_s8(svbool_t, svint8_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s32)))
-svbool_t svcmpeq_wide_n_s32(svbool_t, svint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s16)))
-svbool_t svcmpeq_wide_n_s16(svbool_t, svint16_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s8)))
-svbool_t svcmpeq_wide_s8(svbool_t, svint8_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s32)))
-svbool_t svcmpeq_wide_s32(svbool_t, svint32_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s16)))
-svbool_t svcmpeq_wide_s16(svbool_t, svint16_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f64)))
-svbool_t svcmpge_n_f64(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f32)))
-svbool_t svcmpge_n_f32(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f16)))
-svbool_t svcmpge_n_f16(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s8)))
-svbool_t svcmpge_n_s8(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s32)))
-svbool_t svcmpge_n_s32(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s64)))
-svbool_t svcmpge_n_s64(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s16)))
-svbool_t svcmpge_n_s16(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u8)))
-svbool_t svcmpge_n_u8(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u32)))
-svbool_t svcmpge_n_u32(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u64)))
-svbool_t svcmpge_n_u64(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u16)))
-svbool_t svcmpge_n_u16(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s8)))
-svbool_t svcmpge_s8(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s32)))
-svbool_t svcmpge_s32(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s64)))
-svbool_t svcmpge_s64(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s16)))
-svbool_t svcmpge_s16(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f64)))
-svbool_t svcmpge_f64(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f32)))
-svbool_t svcmpge_f32(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f16)))
-svbool_t svcmpge_f16(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u8)))
-svbool_t svcmpge_u8(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u32)))
-svbool_t svcmpge_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u64)))
-svbool_t svcmpge_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u16)))
-svbool_t svcmpge_u16(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s8)))
-svbool_t svcmpge_wide_n_s8(svbool_t, svint8_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s32)))
-svbool_t svcmpge_wide_n_s32(svbool_t, svint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s16)))
-svbool_t svcmpge_wide_n_s16(svbool_t, svint16_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u8)))
-svbool_t svcmpge_wide_n_u8(svbool_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u32)))
-svbool_t svcmpge_wide_n_u32(svbool_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u16)))
-svbool_t svcmpge_wide_n_u16(svbool_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s8)))
-svbool_t svcmpge_wide_s8(svbool_t, svint8_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s32)))
-svbool_t svcmpge_wide_s32(svbool_t, svint32_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s16)))
-svbool_t svcmpge_wide_s16(svbool_t, svint16_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u8)))
-svbool_t svcmpge_wide_u8(svbool_t, svuint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u32)))
-svbool_t svcmpge_wide_u32(svbool_t, svuint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u16)))
-svbool_t svcmpge_wide_u16(svbool_t, svuint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f64)))
-svbool_t svcmpgt_n_f64(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f32)))
-svbool_t svcmpgt_n_f32(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f16)))
-svbool_t svcmpgt_n_f16(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s8)))
-svbool_t svcmpgt_n_s8(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s32)))
-svbool_t svcmpgt_n_s32(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s64)))
-svbool_t svcmpgt_n_s64(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s16)))
-svbool_t svcmpgt_n_s16(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u8)))
-svbool_t svcmpgt_n_u8(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u32)))
-svbool_t svcmpgt_n_u32(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u64)))
-svbool_t svcmpgt_n_u64(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u16)))
-svbool_t svcmpgt_n_u16(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s8)))
-svbool_t svcmpgt_s8(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s32)))
-svbool_t svcmpgt_s32(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s64)))
-svbool_t svcmpgt_s64(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s16)))
-svbool_t svcmpgt_s16(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f64)))
-svbool_t svcmpgt_f64(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f32)))
-svbool_t svcmpgt_f32(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f16)))
-svbool_t svcmpgt_f16(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u8)))
-svbool_t svcmpgt_u8(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u32)))
-svbool_t svcmpgt_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u64)))
-svbool_t svcmpgt_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u16)))
-svbool_t svcmpgt_u16(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s8)))
-svbool_t svcmpgt_wide_n_s8(svbool_t, svint8_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s32)))
-svbool_t svcmpgt_wide_n_s32(svbool_t, svint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s16)))
-svbool_t svcmpgt_wide_n_s16(svbool_t, svint16_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u8)))
-svbool_t svcmpgt_wide_n_u8(svbool_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u32)))
-svbool_t svcmpgt_wide_n_u32(svbool_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u16)))
-svbool_t svcmpgt_wide_n_u16(svbool_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s8)))
-svbool_t svcmpgt_wide_s8(svbool_t, svint8_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s32)))
-svbool_t svcmpgt_wide_s32(svbool_t, svint32_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s16)))
-svbool_t svcmpgt_wide_s16(svbool_t, svint16_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u8)))
-svbool_t svcmpgt_wide_u8(svbool_t, svuint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u32)))
-svbool_t svcmpgt_wide_u32(svbool_t, svuint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u16)))
-svbool_t svcmpgt_wide_u16(svbool_t, svuint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f64)))
-svbool_t svcmple_n_f64(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f32)))
-svbool_t svcmple_n_f32(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f16)))
-svbool_t svcmple_n_f16(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s8)))
-svbool_t svcmple_n_s8(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s32)))
-svbool_t svcmple_n_s32(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s64)))
-svbool_t svcmple_n_s64(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s16)))
-svbool_t svcmple_n_s16(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u8)))
-svbool_t svcmple_n_u8(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u32)))
-svbool_t svcmple_n_u32(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u64)))
-svbool_t svcmple_n_u64(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u16)))
-svbool_t svcmple_n_u16(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s8)))
-svbool_t svcmple_s8(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s32)))
-svbool_t svcmple_s32(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s64)))
-svbool_t svcmple_s64(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s16)))
-svbool_t svcmple_s16(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f64)))
-svbool_t svcmple_f64(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f32)))
-svbool_t svcmple_f32(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f16)))
-svbool_t svcmple_f16(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u8)))
-svbool_t svcmple_u8(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u32)))
-svbool_t svcmple_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u64)))
-svbool_t svcmple_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u16)))
-svbool_t svcmple_u16(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s8)))
-svbool_t svcmple_wide_n_s8(svbool_t, svint8_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s32)))
-svbool_t svcmple_wide_n_s32(svbool_t, svint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s16)))
-svbool_t svcmple_wide_n_s16(svbool_t, svint16_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u8)))
-svbool_t svcmple_wide_n_u8(svbool_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u32)))
-svbool_t svcmple_wide_n_u32(svbool_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u16)))
-svbool_t svcmple_wide_n_u16(svbool_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s8)))
-svbool_t svcmple_wide_s8(svbool_t, svint8_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s32)))
-svbool_t svcmple_wide_s32(svbool_t, svint32_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s16)))
-svbool_t svcmple_wide_s16(svbool_t, svint16_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u8)))
-svbool_t svcmple_wide_u8(svbool_t, svuint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u32)))
-svbool_t svcmple_wide_u32(svbool_t, svuint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u16)))
-svbool_t svcmple_wide_u16(svbool_t, svuint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u8)))
-svbool_t svcmplt_n_u8(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u32)))
-svbool_t svcmplt_n_u32(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u64)))
-svbool_t svcmplt_n_u64(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u16)))
-svbool_t svcmplt_n_u16(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f64)))
-svbool_t svcmplt_n_f64(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f32)))
-svbool_t svcmplt_n_f32(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f16)))
-svbool_t svcmplt_n_f16(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s8)))
-svbool_t svcmplt_n_s8(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s32)))
-svbool_t svcmplt_n_s32(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s64)))
-svbool_t svcmplt_n_s64(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s16)))
-svbool_t svcmplt_n_s16(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u8)))
-svbool_t svcmplt_u8(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u32)))
-svbool_t svcmplt_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u64)))
-svbool_t svcmplt_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u16)))
-svbool_t svcmplt_u16(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s8)))
-svbool_t svcmplt_s8(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s32)))
-svbool_t svcmplt_s32(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s64)))
-svbool_t svcmplt_s64(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s16)))
-svbool_t svcmplt_s16(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f64)))
-svbool_t svcmplt_f64(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f32)))
-svbool_t svcmplt_f32(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f16)))
-svbool_t svcmplt_f16(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u8)))
-svbool_t svcmplt_wide_n_u8(svbool_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u32)))
-svbool_t svcmplt_wide_n_u32(svbool_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u16)))
-svbool_t svcmplt_wide_n_u16(svbool_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s8)))
-svbool_t svcmplt_wide_n_s8(svbool_t, svint8_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s32)))
-svbool_t svcmplt_wide_n_s32(svbool_t, svint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s16)))
-svbool_t svcmplt_wide_n_s16(svbool_t, svint16_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u8)))
-svbool_t svcmplt_wide_u8(svbool_t, svuint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u32)))
-svbool_t svcmplt_wide_u32(svbool_t, svuint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u16)))
-svbool_t svcmplt_wide_u16(svbool_t, svuint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s8)))
-svbool_t svcmplt_wide_s8(svbool_t, svint8_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s32)))
-svbool_t svcmplt_wide_s32(svbool_t, svint32_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s16)))
-svbool_t svcmplt_wide_s16(svbool_t, svint16_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f64)))
-svbool_t svcmpne_n_f64(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f32)))
-svbool_t svcmpne_n_f32(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f16)))
-svbool_t svcmpne_n_f16(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u8)))
-svbool_t svcmpne_n_u8(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u32)))
-svbool_t svcmpne_n_u32(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u64)))
-svbool_t svcmpne_n_u64(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u16)))
-svbool_t svcmpne_n_u16(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s8)))
-svbool_t svcmpne_n_s8(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s32)))
-svbool_t svcmpne_n_s32(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s64)))
-svbool_t svcmpne_n_s64(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s16)))
-svbool_t svcmpne_n_s16(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u8)))
-svbool_t svcmpne_u8(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u32)))
-svbool_t svcmpne_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u64)))
-svbool_t svcmpne_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u16)))
-svbool_t svcmpne_u16(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s8)))
-svbool_t svcmpne_s8(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s32)))
-svbool_t svcmpne_s32(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s64)))
-svbool_t svcmpne_s64(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s16)))
-svbool_t svcmpne_s16(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f64)))
-svbool_t svcmpne_f64(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f32)))
-svbool_t svcmpne_f32(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f16)))
-svbool_t svcmpne_f16(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s8)))
-svbool_t svcmpne_wide_n_s8(svbool_t, svint8_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s32)))
-svbool_t svcmpne_wide_n_s32(svbool_t, svint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s16)))
-svbool_t svcmpne_wide_n_s16(svbool_t, svint16_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s8)))
-svbool_t svcmpne_wide_s8(svbool_t, svint8_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s32)))
-svbool_t svcmpne_wide_s32(svbool_t, svint32_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s16)))
-svbool_t svcmpne_wide_s16(svbool_t, svint16_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f64)))
-svbool_t svcmpuo_n_f64(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f32)))
-svbool_t svcmpuo_n_f32(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f16)))
-svbool_t svcmpuo_n_f16(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f64)))
-svbool_t svcmpuo_f64(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f32)))
-svbool_t svcmpuo_f32(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f16)))
-svbool_t svcmpuo_f16(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_m)))
-svuint8_t svcnot_u8_m(svuint8_t, svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_m)))
-svuint32_t svcnot_u32_m(svuint32_t, svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_m)))
-svuint64_t svcnot_u64_m(svuint64_t, svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_m)))
-svuint16_t svcnot_u16_m(svuint16_t, svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_m)))
-svint8_t svcnot_s8_m(svint8_t, svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_m)))
-svint32_t svcnot_s32_m(svint32_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_m)))
-svint64_t svcnot_s64_m(svint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_m)))
-svint16_t svcnot_s16_m(svint16_t, svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_x)))
-svuint8_t svcnot_u8_x(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_x)))
-svuint32_t svcnot_u32_x(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_x)))
-svuint64_t svcnot_u64_x(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_x)))
-svuint16_t svcnot_u16_x(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_x)))
-svint8_t svcnot_s8_x(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_x)))
-svint32_t svcnot_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_x)))
-svint64_t svcnot_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_x)))
-svint16_t svcnot_s16_x(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_z)))
-svuint8_t svcnot_u8_z(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_z)))
-svuint32_t svcnot_u32_z(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_z)))
-svuint64_t svcnot_u64_z(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_z)))
-svuint16_t svcnot_u16_z(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_z)))
-svint8_t svcnot_s8_z(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_z)))
-svint32_t svcnot_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_z)))
-svint64_t svcnot_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_z)))
-svint16_t svcnot_s16_z(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_m)))
-svuint8_t svcnt_u8_m(svuint8_t, svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_m)))
-svuint32_t svcnt_u32_m(svuint32_t, svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_m)))
-svuint64_t svcnt_u64_m(svuint64_t, svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_m)))
-svuint16_t svcnt_u16_m(svuint16_t, svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_m)))
-svuint8_t svcnt_s8_m(svuint8_t, svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_m)))
-svuint64_t svcnt_f64_m(svuint64_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_m)))
-svuint32_t svcnt_f32_m(svuint32_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_m)))
-svuint16_t svcnt_f16_m(svuint16_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_m)))
-svuint32_t svcnt_s32_m(svuint32_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_m)))
-svuint64_t svcnt_s64_m(svuint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_m)))
-svuint16_t svcnt_s16_m(svuint16_t, svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_x)))
-svuint8_t svcnt_u8_x(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_x)))
-svuint32_t svcnt_u32_x(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_x)))
-svuint64_t svcnt_u64_x(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_x)))
-svuint16_t svcnt_u16_x(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_x)))
-svuint8_t svcnt_s8_x(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_x)))
-svuint64_t svcnt_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_x)))
-svuint32_t svcnt_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_x)))
-svuint16_t svcnt_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_x)))
-svuint32_t svcnt_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_x)))
-svuint64_t svcnt_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_x)))
-svuint16_t svcnt_s16_x(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_z)))
-svuint8_t svcnt_u8_z(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_z)))
-svuint32_t svcnt_u32_z(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_z)))
-svuint64_t svcnt_u64_z(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_z)))
-svuint16_t svcnt_u16_z(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_z)))
-svuint8_t svcnt_s8_z(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_z)))
-svuint64_t svcnt_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_z)))
-svuint32_t svcnt_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_z)))
-svuint16_t svcnt_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_z)))
-svuint32_t svcnt_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_z)))
-svuint64_t svcnt_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z)))
-svuint16_t svcnt_s16_z(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb)))
-uint64_t svcntb();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb_pat)))
-uint64_t svcntb_pat(enum svpattern);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd)))
-uint64_t svcntd();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd_pat)))
-uint64_t svcntd_pat(enum svpattern);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth)))
-uint64_t svcnth();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth_pat)))
-uint64_t svcnth_pat(enum svpattern);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b8)))
-uint64_t svcntp_b8(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b32)))
-uint64_t svcntp_b32(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b64)))
-uint64_t svcntp_b64(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b16)))
-uint64_t svcntp_b16(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw)))
-uint64_t svcntw();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw_pat)))
-uint64_t svcntw_pat(enum svpattern);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32)))
-svuint32_t svcompact_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64)))
-svuint64_t svcompact_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64)))
-svfloat64_t svcompact_f64(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32)))
-svfloat32_t svcompact_f32(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32)))
-svint32_t svcompact_s32(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64)))
-svint64_t svcompact_s64(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8)))
-svuint8x2_t svcreate2_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32)))
-svuint32x2_t svcreate2_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u64)))
-svuint64x2_t svcreate2_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u16)))
-svuint16x2_t svcreate2_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s8)))
-svint8x2_t svcreate2_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f64)))
-svfloat64x2_t svcreate2_f64(svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f32)))
-svfloat32x2_t svcreate2_f32(svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f16)))
-svfloat16x2_t svcreate2_f16(svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s32)))
-svint32x2_t svcreate2_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s64)))
-svint64x2_t svcreate2_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s16)))
-svint16x2_t svcreate2_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u8)))
-svuint8x3_t svcreate3_u8(svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u32)))
-svuint32x3_t svcreate3_u32(svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u64)))
-svuint64x3_t svcreate3_u64(svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u16)))
-svuint16x3_t svcreate3_u16(svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s8)))
-svint8x3_t svcreate3_s8(svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f64)))
-svfloat64x3_t svcreate3_f64(svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f32)))
-svfloat32x3_t svcreate3_f32(svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f16)))
-svfloat16x3_t svcreate3_f16(svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s32)))
-svint32x3_t svcreate3_s32(svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s64)))
-svint64x3_t svcreate3_s64(svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s16)))
-svint16x3_t svcreate3_s16(svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u8)))
-svuint8x4_t svcreate4_u8(svuint8_t, svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u32)))
-svuint32x4_t svcreate4_u32(svuint32_t, svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u64)))
-svuint64x4_t svcreate4_u64(svuint64_t, svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u16)))
-svuint16x4_t svcreate4_u16(svuint16_t, svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s8)))
-svint8x4_t svcreate4_s8(svint8_t, svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f64)))
-svfloat64x4_t svcreate4_f64(svfloat64_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f32)))
-svfloat32x4_t svcreate4_f32(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f16)))
-svfloat16x4_t svcreate4_f16(svfloat16_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s32)))
-svint32x4_t svcreate4_s32(svint32_t, svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s64)))
-svint64x4_t svcreate4_s64(svint64_t, svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s16)))
-svint16x4_t svcreate4_s16(svint16_t, svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_m)))
-svfloat16_t svcvt_f16_f32_m(svfloat16_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x)))
-svfloat16_t svcvt_f16_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_z)))
-svfloat16_t svcvt_f16_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_m)))
-svfloat16_t svcvt_f16_f64_m(svfloat16_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_x)))
-svfloat16_t svcvt_f16_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_z)))
-svfloat16_t svcvt_f16_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_m)))
-svfloat16_t svcvt_f16_s16_m(svfloat16_t, svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_x)))
-svfloat16_t svcvt_f16_s16_x(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_z)))
-svfloat16_t svcvt_f16_s16_z(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_m)))
-svfloat16_t svcvt_f16_s32_m(svfloat16_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_x)))
-svfloat16_t svcvt_f16_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_z)))
-svfloat16_t svcvt_f16_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_m)))
-svfloat16_t svcvt_f16_s64_m(svfloat16_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_x)))
-svfloat16_t svcvt_f16_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_z)))
-svfloat16_t svcvt_f16_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_m)))
-svfloat16_t svcvt_f16_u16_m(svfloat16_t, svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_x)))
-svfloat16_t svcvt_f16_u16_x(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_z)))
-svfloat16_t svcvt_f16_u16_z(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_m)))
-svfloat16_t svcvt_f16_u32_m(svfloat16_t, svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_x)))
-svfloat16_t svcvt_f16_u32_x(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_z)))
-svfloat16_t svcvt_f16_u32_z(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_m)))
-svfloat16_t svcvt_f16_u64_m(svfloat16_t, svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_x)))
-svfloat16_t svcvt_f16_u64_x(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_z)))
-svfloat16_t svcvt_f16_u64_z(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_m)))
-svfloat32_t svcvt_f32_f16_m(svfloat32_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x)))
-svfloat32_t svcvt_f32_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_z)))
-svfloat32_t svcvt_f32_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_m)))
-svfloat32_t svcvt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_x)))
-svfloat32_t svcvt_f32_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_z)))
-svfloat32_t svcvt_f32_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_m)))
-svfloat32_t svcvt_f32_s32_m(svfloat32_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x)))
-svfloat32_t svcvt_f32_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_z)))
-svfloat32_t svcvt_f32_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_m)))
-svfloat32_t svcvt_f32_s64_m(svfloat32_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_x)))
-svfloat32_t svcvt_f32_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_z)))
-svfloat32_t svcvt_f32_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_m)))
-svfloat32_t svcvt_f32_u32_m(svfloat32_t, svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x)))
-svfloat32_t svcvt_f32_u32_x(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_z)))
-svfloat32_t svcvt_f32_u32_z(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_m)))
-svfloat32_t svcvt_f32_u64_m(svfloat32_t, svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_x)))
-svfloat32_t svcvt_f32_u64_x(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_z)))
-svfloat32_t svcvt_f32_u64_z(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_m)))
-svfloat64_t svcvt_f64_f16_m(svfloat64_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_x)))
-svfloat64_t svcvt_f64_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_z)))
-svfloat64_t svcvt_f64_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_m)))
-svfloat64_t svcvt_f64_f32_m(svfloat64_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_x)))
-svfloat64_t svcvt_f64_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_z)))
-svfloat64_t svcvt_f64_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_m)))
-svfloat64_t svcvt_f64_s32_m(svfloat64_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_x)))
-svfloat64_t svcvt_f64_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_z)))
-svfloat64_t svcvt_f64_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_m)))
-svfloat64_t svcvt_f64_s64_m(svfloat64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_x)))
-svfloat64_t svcvt_f64_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_z)))
-svfloat64_t svcvt_f64_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_m)))
-svfloat64_t svcvt_f64_u32_m(svfloat64_t, svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_x)))
-svfloat64_t svcvt_f64_u32_x(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_z)))
-svfloat64_t svcvt_f64_u32_z(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_m)))
-svfloat64_t svcvt_f64_u64_m(svfloat64_t, svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_x)))
-svfloat64_t svcvt_f64_u64_x(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_z)))
-svfloat64_t svcvt_f64_u64_z(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_m)))
-svint16_t svcvt_s16_f16_m(svint16_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_x)))
-svint16_t svcvt_s16_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_z)))
-svint16_t svcvt_s16_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_m)))
-svint32_t svcvt_s32_f16_m(svint32_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_x)))
-svint32_t svcvt_s32_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_z)))
-svint32_t svcvt_s32_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_m)))
-svint32_t svcvt_s32_f32_m(svint32_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x)))
-svint32_t svcvt_s32_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_z)))
-svint32_t svcvt_s32_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_m)))
-svint32_t svcvt_s32_f64_m(svint32_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_x)))
-svint32_t svcvt_s32_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_z)))
-svint32_t svcvt_s32_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_m)))
-svint64_t svcvt_s64_f16_m(svint64_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_x)))
-svint64_t svcvt_s64_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_z)))
-svint64_t svcvt_s64_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_m)))
-svint64_t svcvt_s64_f32_m(svint64_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_x)))
-svint64_t svcvt_s64_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_z)))
-svint64_t svcvt_s64_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_m)))
-svint64_t svcvt_s64_f64_m(svint64_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_x)))
-svint64_t svcvt_s64_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_z)))
-svint64_t svcvt_s64_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_m)))
-svuint16_t svcvt_u16_f16_m(svuint16_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_x)))
-svuint16_t svcvt_u16_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_z)))
-svuint16_t svcvt_u16_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_m)))
-svuint32_t svcvt_u32_f16_m(svuint32_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_x)))
-svuint32_t svcvt_u32_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_z)))
-svuint32_t svcvt_u32_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_m)))
-svuint32_t svcvt_u32_f32_m(svuint32_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x)))
-svuint32_t svcvt_u32_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_z)))
-svuint32_t svcvt_u32_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_m)))
-svuint32_t svcvt_u32_f64_m(svuint32_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_x)))
-svuint32_t svcvt_u32_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_z)))
-svuint32_t svcvt_u32_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_m)))
-svuint64_t svcvt_u64_f16_m(svuint64_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_x)))
-svuint64_t svcvt_u64_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_z)))
-svuint64_t svcvt_u64_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_m)))
-svuint64_t svcvt_u64_f32_m(svuint64_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_x)))
-svuint64_t svcvt_u64_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_z)))
-svuint64_t svcvt_u64_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_m)))
-svuint64_t svcvt_u64_f64_m(svuint64_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_x)))
-svuint64_t svcvt_u64_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_z)))
-svuint64_t svcvt_u64_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_m)))
-svfloat64_t svdiv_n_f64_m(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_m)))
-svfloat32_t svdiv_n_f32_m(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_m)))
-svfloat16_t svdiv_n_f16_m(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_x)))
-svfloat64_t svdiv_n_f64_x(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_x)))
-svfloat32_t svdiv_n_f32_x(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_x)))
-svfloat16_t svdiv_n_f16_x(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_z)))
-svfloat64_t svdiv_n_f64_z(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_z)))
-svfloat32_t svdiv_n_f32_z(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_z)))
-svfloat16_t svdiv_n_f16_z(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_m)))
-svint32_t svdiv_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_m)))
-svint64_t svdiv_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_x)))
-svint32_t svdiv_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_x)))
-svint64_t svdiv_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_z)))
-svint32_t svdiv_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_z)))
-svint64_t svdiv_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_m)))
-svuint32_t svdiv_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_m)))
-svuint64_t svdiv_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_x)))
-svuint32_t svdiv_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_x)))
-svuint64_t svdiv_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_z)))
-svuint32_t svdiv_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_z)))
-svuint64_t svdiv_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_m)))
-svfloat64_t svdiv_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_m)))
-svfloat32_t svdiv_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_m)))
-svfloat16_t svdiv_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_x)))
-svfloat64_t svdiv_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_x)))
-svfloat32_t svdiv_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_x)))
-svfloat16_t svdiv_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_z)))
-svfloat64_t svdiv_f64_z(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_z)))
-svfloat32_t svdiv_f32_z(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_z)))
-svfloat16_t svdiv_f16_z(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_m)))
-svint32_t svdiv_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_m)))
-svint64_t svdiv_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_x)))
-svint32_t svdiv_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_x)))
-svint64_t svdiv_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_z)))
-svint32_t svdiv_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_z)))
-svint64_t svdiv_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_m)))
-svuint32_t svdiv_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_m)))
-svuint64_t svdiv_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_x)))
-svuint32_t svdiv_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_x)))
-svuint64_t svdiv_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_z)))
-svuint32_t svdiv_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_z)))
-svuint64_t svdiv_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_m)))
-svfloat64_t svdivr_n_f64_m(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_m)))
-svfloat32_t svdivr_n_f32_m(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_m)))
-svfloat16_t svdivr_n_f16_m(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_x)))
-svfloat64_t svdivr_n_f64_x(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_x)))
-svfloat32_t svdivr_n_f32_x(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_x)))
-svfloat16_t svdivr_n_f16_x(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_z)))
-svfloat64_t svdivr_n_f64_z(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_z)))
-svfloat32_t svdivr_n_f32_z(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_z)))
-svfloat16_t svdivr_n_f16_z(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_m)))
-svint32_t svdivr_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_m)))
-svint64_t svdivr_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_x)))
-svint32_t svdivr_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_x)))
-svint64_t svdivr_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_z)))
-svint32_t svdivr_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_z)))
-svint64_t svdivr_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_m)))
-svuint32_t svdivr_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_m)))
-svuint64_t svdivr_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_x)))
-svuint32_t svdivr_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_x)))
-svuint64_t svdivr_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_z)))
-svuint32_t svdivr_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_z)))
-svuint64_t svdivr_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_m)))
-svfloat64_t svdivr_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_m)))
-svfloat32_t svdivr_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_m)))
-svfloat16_t svdivr_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_x)))
-svfloat64_t svdivr_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_x)))
-svfloat32_t svdivr_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_x)))
-svfloat16_t svdivr_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_z)))
-svfloat64_t svdivr_f64_z(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_z)))
-svfloat32_t svdivr_f32_z(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_z)))
-svfloat16_t svdivr_f16_z(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_m)))
-svint32_t svdivr_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_m)))
-svint64_t svdivr_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_x)))
-svint32_t svdivr_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_x)))
-svint64_t svdivr_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_z)))
-svint32_t svdivr_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_z)))
-svint64_t svdivr_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_m)))
-svuint32_t svdivr_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_m)))
-svuint64_t svdivr_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_x)))
-svuint32_t svdivr_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_x)))
-svuint64_t svdivr_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_z)))
-svuint32_t svdivr_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_z)))
-svuint64_t svdivr_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s32)))
-svint32_t svdot_n_s32(svint32_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s64)))
-svint64_t svdot_n_s64(svint64_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u32)))
-svuint32_t svdot_n_u32(svuint32_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u64)))
-svuint64_t svdot_n_u64(svuint64_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32)))
-svint32_t svdot_s32(svint32_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s64)))
-svint64_t svdot_s64(svint64_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32)))
-svuint32_t svdot_u32(svuint32_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u64)))
-svuint64_t svdot_u64(svuint64_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32)))
-svint32_t svdot_lane_s32(svint32_t, svint8_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s64)))
-svint64_t svdot_lane_s64(svint64_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32)))
-svuint32_t svdot_lane_u32(svuint32_t, svuint8_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u64)))
-svuint64_t svdot_lane_u64(svuint64_t, svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8)))
-svuint8_t svdup_n_u8(uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32)))
-svuint32_t svdup_n_u32(uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64)))
-svuint64_t svdup_n_u64(uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16)))
-svuint16_t svdup_n_u16(uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8)))
-svint8_t svdup_n_s8(int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64)))
-svfloat64_t svdup_n_f64(float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32)))
-svfloat32_t svdup_n_f32(float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16)))
-svfloat16_t svdup_n_f16(float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32)))
-svint32_t svdup_n_s32(int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64)))
-svint64_t svdup_n_s64(int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16)))
-svint16_t svdup_n_s16(int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_m)))
-svuint8_t svdup_n_u8_m(svuint8_t, svbool_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_m)))
-svuint32_t svdup_n_u32_m(svuint32_t, svbool_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_m)))
-svuint64_t svdup_n_u64_m(svuint64_t, svbool_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_m)))
-svuint16_t svdup_n_u16_m(svuint16_t, svbool_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_m)))
-svint8_t svdup_n_s8_m(svint8_t, svbool_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_m)))
-svfloat64_t svdup_n_f64_m(svfloat64_t, svbool_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_m)))
-svfloat32_t svdup_n_f32_m(svfloat32_t, svbool_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_m)))
-svfloat16_t svdup_n_f16_m(svfloat16_t, svbool_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_m)))
-svint32_t svdup_n_s32_m(svint32_t, svbool_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_m)))
-svint64_t svdup_n_s64_m(svint64_t, svbool_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_m)))
-svint16_t svdup_n_s16_m(svint16_t, svbool_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b8)))
-svbool_t svdup_n_b8(bool);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b32)))
-svbool_t svdup_n_b32(bool);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b64)))
-svbool_t svdup_n_b64(bool);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b16)))
-svbool_t svdup_n_b16(bool);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_x)))
-svuint8_t svdup_n_u8_x(svbool_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_x)))
-svuint32_t svdup_n_u32_x(svbool_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_x)))
-svuint64_t svdup_n_u64_x(svbool_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_x)))
-svuint16_t svdup_n_u16_x(svbool_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_x)))
-svint8_t svdup_n_s8_x(svbool_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_x)))
-svfloat64_t svdup_n_f64_x(svbool_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_x)))
-svfloat32_t svdup_n_f32_x(svbool_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_x)))
-svfloat16_t svdup_n_f16_x(svbool_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_x)))
-svint32_t svdup_n_s32_x(svbool_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_x)))
-svint64_t svdup_n_s64_x(svbool_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_x)))
-svint16_t svdup_n_s16_x(svbool_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_z)))
-svuint8_t svdup_n_u8_z(svbool_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_z)))
-svuint32_t svdup_n_u32_z(svbool_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_z)))
-svuint64_t svdup_n_u64_z(svbool_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_z)))
-svuint16_t svdup_n_u16_z(svbool_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_z)))
-svint8_t svdup_n_s8_z(svbool_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_z)))
-svfloat64_t svdup_n_f64_z(svbool_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_z)))
-svfloat32_t svdup_n_f32_z(svbool_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_z)))
-svfloat16_t svdup_n_f16_z(svbool_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_z)))
-svint32_t svdup_n_s32_z(svbool_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_z)))
-svint64_t svdup_n_s64_z(svbool_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_z)))
-svint16_t svdup_n_s16_z(svbool_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u8)))
-svuint8_t svdup_lane_u8(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u32)))
-svuint32_t svdup_lane_u32(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u64)))
-svuint64_t svdup_lane_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u16)))
-svuint16_t svdup_lane_u16(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s8)))
-svint8_t svdup_lane_s8(svint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f64)))
-svfloat64_t svdup_lane_f64(svfloat64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f32)))
-svfloat32_t svdup_lane_f32(svfloat32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f16)))
-svfloat16_t svdup_lane_f16(svfloat16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s32)))
-svint32_t svdup_lane_s32(svint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64)))
-svint64_t svdup_lane_s64(svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16)))
-svint16_t svdup_lane_s16(svint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16)))
-svuint16_t svdupq_n_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16)))
-svfloat16_t svdupq_n_f16(float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s16)))
-svint16_t svdupq_n_s16(int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u32)))
-svuint32_t svdupq_n_u32(uint32_t, uint32_t, uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f32)))
-svfloat32_t svdupq_n_f32(float32_t, float32_t, float32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s32)))
-svint32_t svdupq_n_s32(int32_t, int32_t, int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u64)))
-svuint64_t svdupq_n_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64)))
-svfloat64_t svdupq_n_f64(float64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64)))
-svint64_t svdupq_n_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8)))
-svuint8_t svdupq_n_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8)))
-svint8_t svdupq_n_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16)))
-svbool_t svdupq_n_b16(bool, bool, bool, bool, bool, bool, bool, bool);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32)))
-svbool_t svdupq_n_b32(bool, bool, bool, bool);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64)))
-svbool_t svdupq_n_b64(bool, bool);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8)))
-svbool_t svdupq_n_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8)))
-svuint8_t svdupq_lane_u8(svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32)))
-svuint32_t svdupq_lane_u32(svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u64)))
-svuint64_t svdupq_lane_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u16)))
-svuint16_t svdupq_lane_u16(svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s8)))
-svint8_t svdupq_lane_s8(svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f64)))
-svfloat64_t svdupq_lane_f64(svfloat64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f32)))
-svfloat32_t svdupq_lane_f32(svfloat32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f16)))
-svfloat16_t svdupq_lane_f16(svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s32)))
-svint32_t svdupq_lane_s32(svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s64)))
-svint64_t svdupq_lane_s64(svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s16)))
-svint16_t svdupq_lane_s16(svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_b_z)))
-svbool_t sveor_b_z(svbool_t, svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_m)))
-svuint8_t sveor_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_m)))
-svuint32_t sveor_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_m)))
-svuint64_t sveor_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_m)))
-svuint16_t sveor_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_m)))
-svint8_t sveor_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_m)))
-svint32_t sveor_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_m)))
-svint64_t sveor_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_m)))
-svint16_t sveor_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_x)))
-svuint8_t sveor_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_x)))
-svuint32_t sveor_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_x)))
-svuint64_t sveor_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_x)))
-svuint16_t sveor_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_x)))
-svint8_t sveor_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_x)))
-svint32_t sveor_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_x)))
-svint64_t sveor_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_x)))
-svint16_t sveor_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_z)))
-svuint8_t sveor_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_z)))
-svuint32_t sveor_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_z)))
-svuint64_t sveor_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_z)))
-svuint16_t sveor_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_z)))
-svint8_t sveor_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_z)))
-svint32_t sveor_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_z)))
-svint64_t sveor_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_z)))
-svint16_t sveor_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_m)))
-svuint8_t sveor_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_m)))
-svuint32_t sveor_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_m)))
-svuint64_t sveor_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_m)))
-svuint16_t sveor_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_m)))
-svint8_t sveor_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_m)))
-svint32_t sveor_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_m)))
-svint64_t sveor_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_m)))
-svint16_t sveor_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_x)))
-svuint8_t sveor_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_x)))
-svuint32_t sveor_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_x)))
-svuint64_t sveor_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_x)))
-svuint16_t sveor_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_x)))
-svint8_t sveor_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_x)))
-svint32_t sveor_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_x)))
-svint64_t sveor_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_x)))
-svint16_t sveor_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_z)))
-svuint8_t sveor_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_z)))
-svuint32_t sveor_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_z)))
-svuint64_t sveor_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_z)))
-svuint16_t sveor_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_z)))
-svint8_t sveor_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_z)))
-svint32_t sveor_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_z)))
-svint64_t sveor_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_z)))
-svint16_t sveor_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u8)))
-uint8_t sveorv_u8(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u32)))
-uint32_t sveorv_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u64)))
-uint64_t sveorv_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u16)))
-uint16_t sveorv_u16(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s8)))
-int8_t sveorv_s8(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s32)))
-int32_t sveorv_s32(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64)))
-int64_t sveorv_s64(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16)))
-int16_t sveorv_s16(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64)))
-svfloat64_t svexpa_f64(svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32)))
-svfloat32_t svexpa_f32(svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16)))
-svfloat16_t svexpa_f16(svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8)))
-svuint8_t svext_u8(svuint8_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32)))
-svuint32_t svext_u32(svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u64)))
-svuint64_t svext_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u16)))
-svuint16_t svext_u16(svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s8)))
-svint8_t svext_s8(svint8_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f64)))
-svfloat64_t svext_f64(svfloat64_t, svfloat64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f32)))
-svfloat32_t svext_f32(svfloat32_t, svfloat32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f16)))
-svfloat16_t svext_f16(svfloat16_t, svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s32)))
-svint32_t svext_s32(svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s64)))
-svint64_t svext_s64(svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s16)))
-svint16_t svext_s16(svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_m)))
-svint32_t svextb_s32_m(svint32_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_m)))
-svint64_t svextb_s64_m(svint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_m)))
-svint16_t svextb_s16_m(svint16_t, svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_x)))
-svint32_t svextb_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_x)))
-svint64_t svextb_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_x)))
-svint16_t svextb_s16_x(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_z)))
-svint32_t svextb_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_z)))
-svint64_t svextb_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_z)))
-svint16_t svextb_s16_z(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_m)))
-svuint32_t svextb_u32_m(svuint32_t, svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_m)))
-svuint64_t svextb_u64_m(svuint64_t, svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_m)))
-svuint16_t svextb_u16_m(svuint16_t, svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_x)))
-svuint32_t svextb_u32_x(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_x)))
-svuint64_t svextb_u64_x(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_x)))
-svuint16_t svextb_u16_x(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_z)))
-svuint32_t svextb_u32_z(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_z)))
-svuint64_t svextb_u64_z(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_z)))
-svuint16_t svextb_u16_z(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_m)))
-svint32_t svexth_s32_m(svint32_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_m)))
-svint64_t svexth_s64_m(svint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_x)))
-svint32_t svexth_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_x)))
-svint64_t svexth_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_z)))
-svint32_t svexth_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_z)))
-svint64_t svexth_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_m)))
-svuint32_t svexth_u32_m(svuint32_t, svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_m)))
-svuint64_t svexth_u64_m(svuint64_t, svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_x)))
-svuint32_t svexth_u32_x(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_x)))
-svuint64_t svexth_u64_x(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_z)))
-svuint32_t svexth_u32_z(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_z)))
-svuint64_t svexth_u64_z(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_m)))
-svint64_t svextw_s64_m(svint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_x)))
-svint64_t svextw_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_z)))
-svint64_t svextw_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_m)))
-svuint64_t svextw_u64_m(svuint64_t, svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_x)))
-svuint64_t svextw_u64_x(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_z)))
-svuint64_t svextw_u64_z(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u8)))
-svuint8_t svget2_u8(svuint8x2_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u32)))
-svuint32_t svget2_u32(svuint32x2_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u64)))
-svuint64_t svget2_u64(svuint64x2_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u16)))
-svuint16_t svget2_u16(svuint16x2_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s8)))
-svint8_t svget2_s8(svint8x2_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f64)))
-svfloat64_t svget2_f64(svfloat64x2_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f32)))
-svfloat32_t svget2_f32(svfloat32x2_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f16)))
-svfloat16_t svget2_f16(svfloat16x2_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s32)))
-svint32_t svget2_s32(svint32x2_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s64)))
-svint64_t svget2_s64(svint64x2_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s16)))
-svint16_t svget2_s16(svint16x2_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u8)))
-svuint8_t svget3_u8(svuint8x3_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u32)))
-svuint32_t svget3_u32(svuint32x3_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u64)))
-svuint64_t svget3_u64(svuint64x3_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u16)))
-svuint16_t svget3_u16(svuint16x3_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s8)))
-svint8_t svget3_s8(svint8x3_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f64)))
-svfloat64_t svget3_f64(svfloat64x3_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f32)))
-svfloat32_t svget3_f32(svfloat32x3_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f16)))
-svfloat16_t svget3_f16(svfloat16x3_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s32)))
-svint32_t svget3_s32(svint32x3_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s64)))
-svint64_t svget3_s64(svint64x3_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s16)))
-svint16_t svget3_s16(svint16x3_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u8)))
-svuint8_t svget4_u8(svuint8x4_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u32)))
-svuint32_t svget4_u32(svuint32x4_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u64)))
-svuint64_t svget4_u64(svuint64x4_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u16)))
-svuint16_t svget4_u16(svuint16x4_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s8)))
-svint8_t svget4_s8(svint8x4_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f64)))
-svfloat64_t svget4_f64(svfloat64x4_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f32)))
-svfloat32_t svget4_f32(svfloat32x4_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f16)))
-svfloat16_t svget4_f16(svfloat16x4_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s32)))
-svint32_t svget4_s32(svint32x4_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s64)))
-svint64_t svget4_s64(svint64x4_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s16)))
-svint16_t svget4_s16(svint16x4_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u8)))
-svuint8_t svindex_u8(uint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u32)))
-svuint32_t svindex_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u64)))
-svuint64_t svindex_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u16)))
-svuint16_t svindex_u16(uint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s8)))
-svint8_t svindex_s8(int8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s32)))
-svint32_t svindex_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s64)))
-svint64_t svindex_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s16)))
-svint16_t svindex_s16(int16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u8)))
-svuint8_t svinsr_n_u8(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u32)))
-svuint32_t svinsr_n_u32(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u64)))
-svuint64_t svinsr_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u16)))
-svuint16_t svinsr_n_u16(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s8)))
-svint8_t svinsr_n_s8(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f64)))
-svfloat64_t svinsr_n_f64(svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f32)))
-svfloat32_t svinsr_n_f32(svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f16)))
-svfloat16_t svinsr_n_f16(svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s32)))
-svint32_t svinsr_n_s32(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s64)))
-svint64_t svinsr_n_s64(svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s16)))
-svint16_t svinsr_n_s16(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u8)))
-uint8_t svlasta_u8(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u32)))
-uint32_t svlasta_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u64)))
-uint64_t svlasta_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u16)))
-uint16_t svlasta_u16(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s8)))
-int8_t svlasta_s8(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f64)))
-float64_t svlasta_f64(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f32)))
-float32_t svlasta_f32(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f16)))
-float16_t svlasta_f16(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s32)))
-int32_t svlasta_s32(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s64)))
-int64_t svlasta_s64(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s16)))
-int16_t svlasta_s16(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u8)))
-uint8_t svlastb_u8(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u32)))
-uint32_t svlastb_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u64)))
-uint64_t svlastb_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u16)))
-uint16_t svlastb_u16(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s8)))
-int8_t svlastb_s8(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f64)))
-float64_t svlastb_f64(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f32)))
-float32_t svlastb_f32(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f16)))
-float16_t svlastb_f16(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s32)))
-int32_t svlastb_s32(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s64)))
-int64_t svlastb_s64(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s16)))
-int16_t svlastb_s16(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8)))
-svuint8_t svld1_u8(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32)))
-svuint32_t svld1_u32(svbool_t, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64)))
-svuint64_t svld1_u64(svbool_t, uint64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16)))
-svuint16_t svld1_u16(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8)))
-svint8_t svld1_s8(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64)))
-svfloat64_t svld1_f64(svbool_t, float64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32)))
-svfloat32_t svld1_f32(svbool_t, float32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16)))
-svfloat16_t svld1_f16(svbool_t, float16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32)))
-svint32_t svld1_s32(svbool_t, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64)))
-svint64_t svld1_s64(svbool_t, int64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16)))
-svint16_t svld1_s16(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32)))
-svuint32_t svld1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64)))
-svuint64_t svld1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64)))
-svfloat64_t svld1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32)))
-svfloat32_t svld1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32)))
-svint32_t svld1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64)))
-svint64_t svld1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32)))
-svuint32_t svld1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64)))
-svuint64_t svld1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64)))
-svfloat64_t svld1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32)))
-svfloat32_t svld1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32)))
-svint32_t svld1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64)))
-svint64_t svld1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32)))
-svuint32_t svld1_gather_u32base_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64)))
-svuint64_t svld1_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64)))
-svfloat64_t svld1_gather_u64base_f64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32)))
-svfloat32_t svld1_gather_u32base_f32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32)))
-svint32_t svld1_gather_u32base_s32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64)))
-svint64_t svld1_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32)))
-svuint32_t svld1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32)))
-svfloat32_t svld1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32)))
-svint32_t svld1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32)))
-svuint32_t svld1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32)))
-svfloat32_t svld1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32)))
-svint32_t svld1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64)))
-svuint64_t svld1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64)))
-svfloat64_t svld1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64)))
-svint64_t svld1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64)))
-svuint64_t svld1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64)))
-svfloat64_t svld1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64)))
-svint64_t svld1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32)))
-svuint32_t svld1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32)))
-svfloat32_t svld1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32)))
-svint32_t svld1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32)))
-svuint32_t svld1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32)))
-svfloat32_t svld1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32)))
-svint32_t svld1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64)))
-svuint64_t svld1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64)))
-svfloat64_t svld1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64)))
-svint64_t svld1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64)))
-svuint64_t svld1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64)))
-svfloat64_t svld1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64)))
-svint64_t svld1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8)))
-svuint8_t svld1_vnum_u8(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32)))
-svuint32_t svld1_vnum_u32(svbool_t, uint32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64)))
-svuint64_t svld1_vnum_u64(svbool_t, uint64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16)))
-svuint16_t svld1_vnum_u16(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8)))
-svint8_t svld1_vnum_s8(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64)))
-svfloat64_t svld1_vnum_f64(svbool_t, float64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32)))
-svfloat32_t svld1_vnum_f32(svbool_t, float32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16)))
-svfloat16_t svld1_vnum_f16(svbool_t, float16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32)))
-svint32_t svld1_vnum_s32(svbool_t, int32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64)))
-svint64_t svld1_vnum_s64(svbool_t, int64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16)))
-svint16_t svld1_vnum_s16(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u8)))
-svuint8_t svld1rq_u8(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u32)))
-svuint32_t svld1rq_u32(svbool_t, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u64)))
-svuint64_t svld1rq_u64(svbool_t, uint64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u16)))
-svuint16_t svld1rq_u16(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s8)))
-svint8_t svld1rq_s8(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f64)))
-svfloat64_t svld1rq_f64(svbool_t, float64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f32)))
-svfloat32_t svld1rq_f32(svbool_t, float32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f16)))
-svfloat16_t svld1rq_f16(svbool_t, float16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s32)))
-svint32_t svld1rq_s32(svbool_t, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64)))
-svint64_t svld1rq_s64(svbool_t, int64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16)))
-svint16_t svld1rq_s16(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32)))
-svuint32_t svld1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64)))
-svuint64_t svld1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32)))
-svint32_t svld1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64)))
-svint64_t svld1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32)))
-svuint32_t svld1sb_gather_u32base_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64)))
-svuint64_t svld1sb_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32)))
-svint32_t svld1sb_gather_u32base_s32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64)))
-svint64_t svld1sb_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32)))
-svuint32_t svld1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32)))
-svint32_t svld1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32)))
-svuint32_t svld1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32)))
-svint32_t svld1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64)))
-svuint64_t svld1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64)))
-svint64_t svld1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64)))
-svuint64_t svld1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64)))
-svint64_t svld1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u32)))
-svuint32_t svld1sb_vnum_u32(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u64)))
-svuint64_t svld1sb_vnum_u64(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u16)))
-svuint16_t svld1sb_vnum_u16(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s32)))
-svint32_t svld1sb_vnum_s32(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s64)))
-svint64_t svld1sb_vnum_s64(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s16)))
-svint16_t svld1sb_vnum_s16(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u32)))
-svuint32_t svld1sb_u32(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u64)))
-svuint64_t svld1sb_u64(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u16)))
-svuint16_t svld1sb_u16(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s32)))
-svint32_t svld1sb_s32(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s64)))
-svint64_t svld1sb_s64(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s16)))
-svint16_t svld1sb_s16(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32)))
-svuint32_t svld1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64)))
-svuint64_t svld1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32)))
-svint32_t svld1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64)))
-svint64_t svld1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32)))
-svuint32_t svld1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64)))
-svuint64_t svld1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32)))
-svint32_t svld1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64)))
-svint64_t svld1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32)))
-svuint32_t svld1sh_gather_u32base_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64)))
-svuint64_t svld1sh_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32)))
-svint32_t svld1sh_gather_u32base_s32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64)))
-svint64_t svld1sh_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32)))
-svuint32_t svld1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32)))
-svint32_t svld1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32)))
-svuint32_t svld1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32)))
-svint32_t svld1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64)))
-svuint64_t svld1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64)))
-svint64_t svld1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64)))
-svuint64_t svld1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64)))
-svint64_t svld1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32)))
-svuint32_t svld1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32)))
-svint32_t svld1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32)))
-svuint32_t svld1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32)))
-svint32_t svld1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64)))
-svuint64_t svld1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64)))
-svint64_t svld1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64)))
-svuint64_t svld1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64)))
-svint64_t svld1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_u32)))
-svuint32_t svld1sh_vnum_u32(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_u64)))
-svuint64_t svld1sh_vnum_u64(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_s32)))
-svint32_t svld1sh_vnum_s32(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_s64)))
-svint64_t svld1sh_vnum_s64(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_u32)))
-svuint32_t svld1sh_u32(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_u64)))
-svuint64_t svld1sh_u64(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_s32)))
-svint32_t svld1sh_s32(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_s64)))
-svint64_t svld1sh_s64(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64)))
-svuint64_t svld1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64)))
-svint64_t svld1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64)))
-svuint64_t svld1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64)))
-svint64_t svld1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64)))
-svuint64_t svld1sw_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64)))
-svint64_t svld1sw_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64)))
-svuint64_t svld1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64)))
-svint64_t svld1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64)))
-svuint64_t svld1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64)))
-svint64_t svld1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64)))
-svuint64_t svld1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64)))
-svint64_t svld1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64)))
-svuint64_t svld1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64)))
-svint64_t svld1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_vnum_u64)))
-svuint64_t svld1sw_vnum_u64(svbool_t, int32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_vnum_s64)))
-svint64_t svld1sw_vnum_s64(svbool_t, int32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_u64)))
-svuint64_t svld1sw_u64(svbool_t, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_s64)))
-svint64_t svld1sw_s64(svbool_t, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32)))
-svuint32_t svld1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64)))
-svuint64_t svld1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32)))
-svint32_t svld1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64)))
-svint64_t svld1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32)))
-svuint32_t svld1ub_gather_u32base_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64)))
-svuint64_t svld1ub_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32)))
-svint32_t svld1ub_gather_u32base_s32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64)))
-svint64_t svld1ub_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32)))
-svuint32_t svld1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32)))
-svint32_t svld1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32)))
-svuint32_t svld1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32)))
-svint32_t svld1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64)))
-svuint64_t svld1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64)))
-svint64_t svld1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64)))
-svuint64_t svld1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64)))
-svint64_t svld1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u32)))
-svuint32_t svld1ub_vnum_u32(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u64)))
-svuint64_t svld1ub_vnum_u64(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u16)))
-svuint16_t svld1ub_vnum_u16(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s32)))
-svint32_t svld1ub_vnum_s32(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s64)))
-svint64_t svld1ub_vnum_s64(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s16)))
-svint16_t svld1ub_vnum_s16(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u32)))
-svuint32_t svld1ub_u32(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u64)))
-svuint64_t svld1ub_u64(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u16)))
-svuint16_t svld1ub_u16(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s32)))
-svint32_t svld1ub_s32(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s64)))
-svint64_t svld1ub_s64(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s16)))
-svint16_t svld1ub_s16(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32)))
-svuint32_t svld1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64)))
-svuint64_t svld1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32)))
-svint32_t svld1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64)))
-svint64_t svld1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32)))
-svuint32_t svld1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64)))
-svuint64_t svld1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32)))
-svint32_t svld1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64)))
-svint64_t svld1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32)))
-svuint32_t svld1uh_gather_u32base_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64)))
-svuint64_t svld1uh_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32)))
-svint32_t svld1uh_gather_u32base_s32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64)))
-svint64_t svld1uh_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32)))
-svuint32_t svld1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32)))
-svint32_t svld1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32)))
-svuint32_t svld1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32)))
-svint32_t svld1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64)))
-svuint64_t svld1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64)))
-svint64_t svld1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64)))
-svuint64_t svld1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64)))
-svint64_t svld1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32)))
-svuint32_t svld1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32)))
-svint32_t svld1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32)))
-svuint32_t svld1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32)))
-svint32_t svld1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64)))
-svuint64_t svld1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64)))
-svint64_t svld1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64)))
-svuint64_t svld1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64)))
-svint64_t svld1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_u32)))
-svuint32_t svld1uh_vnum_u32(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_u64)))
-svuint64_t svld1uh_vnum_u64(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_s32)))
-svint32_t svld1uh_vnum_s32(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_s64)))
-svint64_t svld1uh_vnum_s64(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_u32)))
-svuint32_t svld1uh_u32(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_u64)))
-svuint64_t svld1uh_u64(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_s32)))
-svint32_t svld1uh_s32(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_s64)))
-svint64_t svld1uh_s64(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64)))
-svuint64_t svld1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64)))
-svint64_t svld1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64)))
-svuint64_t svld1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64)))
-svint64_t svld1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64)))
-svuint64_t svld1uw_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64)))
-svint64_t svld1uw_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64)))
-svuint64_t svld1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64)))
-svint64_t svld1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64)))
-svuint64_t svld1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64)))
-svint64_t svld1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64)))
-svuint64_t svld1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64)))
-svint64_t svld1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64)))
-svuint64_t svld1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64)))
-svint64_t svld1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_vnum_u64)))
-svuint64_t svld1uw_vnum_u64(svbool_t, uint32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_vnum_s64)))
-svint64_t svld1uw_vnum_s64(svbool_t, uint32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_u64)))
-svuint64_t svld1uw_u64(svbool_t, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_s64)))
-svint64_t svld1uw_s64(svbool_t, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u8)))
-svuint8x2_t svld2_u8(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u32)))
-svuint32x2_t svld2_u32(svbool_t, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u64)))
-svuint64x2_t svld2_u64(svbool_t, uint64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u16)))
-svuint16x2_t svld2_u16(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s8)))
-svint8x2_t svld2_s8(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f64)))
-svfloat64x2_t svld2_f64(svbool_t, float64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f32)))
-svfloat32x2_t svld2_f32(svbool_t, float32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f16)))
-svfloat16x2_t svld2_f16(svbool_t, float16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s32)))
-svint32x2_t svld2_s32(svbool_t, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s64)))
-svint64x2_t svld2_s64(svbool_t, int64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s16)))
-svint16x2_t svld2_s16(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u8)))
-svuint8x2_t svld2_vnum_u8(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u32)))
-svuint32x2_t svld2_vnum_u32(svbool_t, uint32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u64)))
-svuint64x2_t svld2_vnum_u64(svbool_t, uint64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u16)))
-svuint16x2_t svld2_vnum_u16(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s8)))
-svint8x2_t svld2_vnum_s8(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f64)))
-svfloat64x2_t svld2_vnum_f64(svbool_t, float64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f32)))
-svfloat32x2_t svld2_vnum_f32(svbool_t, float32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f16)))
-svfloat16x2_t svld2_vnum_f16(svbool_t, float16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s32)))
-svint32x2_t svld2_vnum_s32(svbool_t, int32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s64)))
-svint64x2_t svld2_vnum_s64(svbool_t, int64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s16)))
-svint16x2_t svld2_vnum_s16(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u8)))
-svuint8x3_t svld3_u8(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u32)))
-svuint32x3_t svld3_u32(svbool_t, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u64)))
-svuint64x3_t svld3_u64(svbool_t, uint64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u16)))
-svuint16x3_t svld3_u16(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s8)))
-svint8x3_t svld3_s8(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f64)))
-svfloat64x3_t svld3_f64(svbool_t, float64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f32)))
-svfloat32x3_t svld3_f32(svbool_t, float32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f16)))
-svfloat16x3_t svld3_f16(svbool_t, float16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s32)))
-svint32x3_t svld3_s32(svbool_t, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s64)))
-svint64x3_t svld3_s64(svbool_t, int64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s16)))
-svint16x3_t svld3_s16(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u8)))
-svuint8x3_t svld3_vnum_u8(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u32)))
-svuint32x3_t svld3_vnum_u32(svbool_t, uint32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u64)))
-svuint64x3_t svld3_vnum_u64(svbool_t, uint64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u16)))
-svuint16x3_t svld3_vnum_u16(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s8)))
-svint8x3_t svld3_vnum_s8(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f64)))
-svfloat64x3_t svld3_vnum_f64(svbool_t, float64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f32)))
-svfloat32x3_t svld3_vnum_f32(svbool_t, float32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f16)))
-svfloat16x3_t svld3_vnum_f16(svbool_t, float16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s32)))
-svint32x3_t svld3_vnum_s32(svbool_t, int32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s64)))
-svint64x3_t svld3_vnum_s64(svbool_t, int64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s16)))
-svint16x3_t svld3_vnum_s16(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u8)))
-svuint8x4_t svld4_u8(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u32)))
-svuint32x4_t svld4_u32(svbool_t, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u64)))
-svuint64x4_t svld4_u64(svbool_t, uint64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u16)))
-svuint16x4_t svld4_u16(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s8)))
-svint8x4_t svld4_s8(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f64)))
-svfloat64x4_t svld4_f64(svbool_t, float64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f32)))
-svfloat32x4_t svld4_f32(svbool_t, float32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f16)))
-svfloat16x4_t svld4_f16(svbool_t, float16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s32)))
-svint32x4_t svld4_s32(svbool_t, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s64)))
-svint64x4_t svld4_s64(svbool_t, int64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s16)))
-svint16x4_t svld4_s16(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u8)))
-svuint8x4_t svld4_vnum_u8(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u32)))
-svuint32x4_t svld4_vnum_u32(svbool_t, uint32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u64)))
-svuint64x4_t svld4_vnum_u64(svbool_t, uint64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u16)))
-svuint16x4_t svld4_vnum_u16(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s8)))
-svint8x4_t svld4_vnum_s8(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f64)))
-svfloat64x4_t svld4_vnum_f64(svbool_t, float64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f32)))
-svfloat32x4_t svld4_vnum_f32(svbool_t, float32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f16)))
-svfloat16x4_t svld4_vnum_f16(svbool_t, float16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s32)))
-svint32x4_t svld4_vnum_s32(svbool_t, int32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64)))
-svint64x4_t svld4_vnum_s64(svbool_t, int64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16)))
-svint16x4_t svld4_vnum_s16(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8)))
-svuint8_t svldff1_u8(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32)))
-svuint32_t svldff1_u32(svbool_t, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64)))
-svuint64_t svldff1_u64(svbool_t, uint64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16)))
-svuint16_t svldff1_u16(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8)))
-svint8_t svldff1_s8(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64)))
-svfloat64_t svldff1_f64(svbool_t, float64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32)))
-svfloat32_t svldff1_f32(svbool_t, float32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16)))
-svfloat16_t svldff1_f16(svbool_t, float16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32)))
-svint32_t svldff1_s32(svbool_t, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64)))
-svint64_t svldff1_s64(svbool_t, int64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16)))
-svint16_t svldff1_s16(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32)))
-svuint32_t svldff1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64)))
-svuint64_t svldff1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64)))
-svfloat64_t svldff1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32)))
-svfloat32_t svldff1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32)))
-svint32_t svldff1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64)))
-svint64_t svldff1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32)))
-svuint32_t svldff1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64)))
-svuint64_t svldff1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64)))
-svfloat64_t svldff1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32)))
-svfloat32_t svldff1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32)))
-svint32_t svldff1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64)))
-svint64_t svldff1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32)))
-svuint32_t svldff1_gather_u32base_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64)))
-svuint64_t svldff1_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64)))
-svfloat64_t svldff1_gather_u64base_f64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32)))
-svfloat32_t svldff1_gather_u32base_f32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32)))
-svint32_t svldff1_gather_u32base_s32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64)))
-svint64_t svldff1_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32)))
-svuint32_t svldff1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32)))
-svfloat32_t svldff1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32)))
-svint32_t svldff1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32)))
-svuint32_t svldff1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32)))
-svfloat32_t svldff1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32)))
-svint32_t svldff1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64)))
-svuint64_t svldff1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64)))
-svfloat64_t svldff1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64)))
-svint64_t svldff1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64)))
-svuint64_t svldff1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64)))
-svfloat64_t svldff1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64)))
-svint64_t svldff1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32)))
-svuint32_t svldff1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32)))
-svfloat32_t svldff1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32)))
-svint32_t svldff1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32)))
-svuint32_t svldff1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32)))
-svfloat32_t svldff1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32)))
-svint32_t svldff1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64)))
-svuint64_t svldff1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64)))
-svfloat64_t svldff1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64)))
-svint64_t svldff1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64)))
-svuint64_t svldff1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64)))
-svfloat64_t svldff1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64)))
-svint64_t svldff1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8)))
-svuint8_t svldff1_vnum_u8(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32)))
-svuint32_t svldff1_vnum_u32(svbool_t, uint32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64)))
-svuint64_t svldff1_vnum_u64(svbool_t, uint64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16)))
-svuint16_t svldff1_vnum_u16(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8)))
-svint8_t svldff1_vnum_s8(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64)))
-svfloat64_t svldff1_vnum_f64(svbool_t, float64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32)))
-svfloat32_t svldff1_vnum_f32(svbool_t, float32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16)))
-svfloat16_t svldff1_vnum_f16(svbool_t, float16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32)))
-svint32_t svldff1_vnum_s32(svbool_t, int32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64)))
-svint64_t svldff1_vnum_s64(svbool_t, int64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16)))
-svint16_t svldff1_vnum_s16(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32)))
-svuint32_t svldff1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64)))
-svuint64_t svldff1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32)))
-svint32_t svldff1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64)))
-svint64_t svldff1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32)))
-svuint32_t svldff1sb_gather_u32base_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64)))
-svuint64_t svldff1sb_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32)))
-svint32_t svldff1sb_gather_u32base_s32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64)))
-svint64_t svldff1sb_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32)))
-svuint32_t svldff1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32)))
-svint32_t svldff1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32)))
-svuint32_t svldff1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32)))
-svint32_t svldff1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64)))
-svuint64_t svldff1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64)))
-svint64_t svldff1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64)))
-svuint64_t svldff1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64)))
-svint64_t svldff1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u32)))
-svuint32_t svldff1sb_vnum_u32(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u64)))
-svuint64_t svldff1sb_vnum_u64(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u16)))
-svuint16_t svldff1sb_vnum_u16(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s32)))
-svint32_t svldff1sb_vnum_s32(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s64)))
-svint64_t svldff1sb_vnum_s64(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s16)))
-svint16_t svldff1sb_vnum_s16(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u32)))
-svuint32_t svldff1sb_u32(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u64)))
-svuint64_t svldff1sb_u64(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u16)))
-svuint16_t svldff1sb_u16(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s32)))
-svint32_t svldff1sb_s32(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s64)))
-svint64_t svldff1sb_s64(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s16)))
-svint16_t svldff1sb_s16(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32)))
-svuint32_t svldff1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64)))
-svuint64_t svldff1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32)))
-svint32_t svldff1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64)))
-svint64_t svldff1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32)))
-svuint32_t svldff1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64)))
-svuint64_t svldff1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32)))
-svint32_t svldff1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64)))
-svint64_t svldff1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32)))
-svuint32_t svldff1sh_gather_u32base_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64)))
-svuint64_t svldff1sh_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32)))
-svint32_t svldff1sh_gather_u32base_s32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64)))
-svint64_t svldff1sh_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32)))
-svuint32_t svldff1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32)))
-svint32_t svldff1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32)))
-svuint32_t svldff1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32)))
-svint32_t svldff1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64)))
-svuint64_t svldff1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64)))
-svint64_t svldff1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64)))
-svuint64_t svldff1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64)))
-svint64_t svldff1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32)))
-svuint32_t svldff1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32)))
-svint32_t svldff1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32)))
-svuint32_t svldff1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32)))
-svint32_t svldff1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64)))
-svuint64_t svldff1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64)))
-svint64_t svldff1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64)))
-svuint64_t svldff1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64)))
-svint64_t svldff1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u32)))
-svuint32_t svldff1sh_vnum_u32(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u64)))
-svuint64_t svldff1sh_vnum_u64(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s32)))
-svint32_t svldff1sh_vnum_s32(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s64)))
-svint64_t svldff1sh_vnum_s64(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u32)))
-svuint32_t svldff1sh_u32(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u64)))
-svuint64_t svldff1sh_u64(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s32)))
-svint32_t svldff1sh_s32(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s64)))
-svint64_t svldff1sh_s64(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64)))
-svuint64_t svldff1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64)))
-svint64_t svldff1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64)))
-svuint64_t svldff1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64)))
-svint64_t svldff1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64)))
-svuint64_t svldff1sw_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64)))
-svint64_t svldff1sw_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64)))
-svuint64_t svldff1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64)))
-svint64_t svldff1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64)))
-svuint64_t svldff1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64)))
-svint64_t svldff1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64)))
-svuint64_t svldff1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64)))
-svint64_t svldff1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64)))
-svuint64_t svldff1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64)))
-svint64_t svldff1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_u64)))
-svuint64_t svldff1sw_vnum_u64(svbool_t, int32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_s64)))
-svint64_t svldff1sw_vnum_s64(svbool_t, int32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_u64)))
-svuint64_t svldff1sw_u64(svbool_t, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_s64)))
-svint64_t svldff1sw_s64(svbool_t, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32)))
-svuint32_t svldff1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64)))
-svuint64_t svldff1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32)))
-svint32_t svldff1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64)))
-svint64_t svldff1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32)))
-svuint32_t svldff1ub_gather_u32base_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64)))
-svuint64_t svldff1ub_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32)))
-svint32_t svldff1ub_gather_u32base_s32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64)))
-svint64_t svldff1ub_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32)))
-svuint32_t svldff1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32)))
-svint32_t svldff1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32)))
-svuint32_t svldff1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32)))
-svint32_t svldff1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64)))
-svuint64_t svldff1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64)))
-svint64_t svldff1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64)))
-svuint64_t svldff1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64)))
-svint64_t svldff1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u32)))
-svuint32_t svldff1ub_vnum_u32(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u64)))
-svuint64_t svldff1ub_vnum_u64(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u16)))
-svuint16_t svldff1ub_vnum_u16(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s32)))
-svint32_t svldff1ub_vnum_s32(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s64)))
-svint64_t svldff1ub_vnum_s64(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s16)))
-svint16_t svldff1ub_vnum_s16(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u32)))
-svuint32_t svldff1ub_u32(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u64)))
-svuint64_t svldff1ub_u64(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u16)))
-svuint16_t svldff1ub_u16(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s32)))
-svint32_t svldff1ub_s32(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s64)))
-svint64_t svldff1ub_s64(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s16)))
-svint16_t svldff1ub_s16(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32)))
-svuint32_t svldff1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64)))
-svuint64_t svldff1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32)))
-svint32_t svldff1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64)))
-svint64_t svldff1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32)))
-svuint32_t svldff1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64)))
-svuint64_t svldff1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32)))
-svint32_t svldff1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64)))
-svint64_t svldff1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32)))
-svuint32_t svldff1uh_gather_u32base_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64)))
-svuint64_t svldff1uh_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32)))
-svint32_t svldff1uh_gather_u32base_s32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64)))
-svint64_t svldff1uh_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32)))
-svuint32_t svldff1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32)))
-svint32_t svldff1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32)))
-svuint32_t svldff1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32)))
-svint32_t svldff1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64)))
-svuint64_t svldff1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64)))
-svint64_t svldff1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64)))
-svuint64_t svldff1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64)))
-svint64_t svldff1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32)))
-svuint32_t svldff1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32)))
-svint32_t svldff1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32)))
-svuint32_t svldff1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32)))
-svint32_t svldff1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64)))
-svuint64_t svldff1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64)))
-svint64_t svldff1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64)))
-svuint64_t svldff1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64)))
-svint64_t svldff1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u32)))
-svuint32_t svldff1uh_vnum_u32(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u64)))
-svuint64_t svldff1uh_vnum_u64(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s32)))
-svint32_t svldff1uh_vnum_s32(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s64)))
-svint64_t svldff1uh_vnum_s64(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u32)))
-svuint32_t svldff1uh_u32(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u64)))
-svuint64_t svldff1uh_u64(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s32)))
-svint32_t svldff1uh_s32(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s64)))
-svint64_t svldff1uh_s64(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64)))
-svuint64_t svldff1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64)))
-svint64_t svldff1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64)))
-svuint64_t svldff1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64)))
-svint64_t svldff1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64)))
-svuint64_t svldff1uw_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64)))
-svint64_t svldff1uw_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64)))
-svuint64_t svldff1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64)))
-svint64_t svldff1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64)))
-svuint64_t svldff1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64)))
-svint64_t svldff1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64)))
-svuint64_t svldff1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64)))
-svint64_t svldff1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64)))
-svuint64_t svldff1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64)))
-svint64_t svldff1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_u64)))
-svuint64_t svldff1uw_vnum_u64(svbool_t, uint32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_s64)))
-svint64_t svldff1uw_vnum_s64(svbool_t, uint32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_u64)))
-svuint64_t svldff1uw_u64(svbool_t, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_s64)))
-svint64_t svldff1uw_s64(svbool_t, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8)))
-svuint8_t svldnf1_u8(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32)))
-svuint32_t svldnf1_u32(svbool_t, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64)))
-svuint64_t svldnf1_u64(svbool_t, uint64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16)))
-svuint16_t svldnf1_u16(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8)))
-svint8_t svldnf1_s8(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64)))
-svfloat64_t svldnf1_f64(svbool_t, float64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32)))
-svfloat32_t svldnf1_f32(svbool_t, float32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16)))
-svfloat16_t svldnf1_f16(svbool_t, float16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32)))
-svint32_t svldnf1_s32(svbool_t, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64)))
-svint64_t svldnf1_s64(svbool_t, int64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16)))
-svint16_t svldnf1_s16(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8)))
-svuint8_t svldnf1_vnum_u8(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32)))
-svuint32_t svldnf1_vnum_u32(svbool_t, uint32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64)))
-svuint64_t svldnf1_vnum_u64(svbool_t, uint64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16)))
-svuint16_t svldnf1_vnum_u16(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8)))
-svint8_t svldnf1_vnum_s8(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64)))
-svfloat64_t svldnf1_vnum_f64(svbool_t, float64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32)))
-svfloat32_t svldnf1_vnum_f32(svbool_t, float32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16)))
-svfloat16_t svldnf1_vnum_f16(svbool_t, float16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32)))
-svint32_t svldnf1_vnum_s32(svbool_t, int32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64)))
-svint64_t svldnf1_vnum_s64(svbool_t, int64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16)))
-svint16_t svldnf1_vnum_s16(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u32)))
-svuint32_t svldnf1sb_vnum_u32(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u64)))
-svuint64_t svldnf1sb_vnum_u64(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u16)))
-svuint16_t svldnf1sb_vnum_u16(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s32)))
-svint32_t svldnf1sb_vnum_s32(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s64)))
-svint64_t svldnf1sb_vnum_s64(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s16)))
-svint16_t svldnf1sb_vnum_s16(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u32)))
-svuint32_t svldnf1sb_u32(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u64)))
-svuint64_t svldnf1sb_u64(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u16)))
-svuint16_t svldnf1sb_u16(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s32)))
-svint32_t svldnf1sb_s32(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s64)))
-svint64_t svldnf1sb_s64(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s16)))
-svint16_t svldnf1sb_s16(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u32)))
-svuint32_t svldnf1sh_vnum_u32(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u64)))
-svuint64_t svldnf1sh_vnum_u64(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s32)))
-svint32_t svldnf1sh_vnum_s32(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s64)))
-svint64_t svldnf1sh_vnum_s64(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u32)))
-svuint32_t svldnf1sh_u32(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u64)))
-svuint64_t svldnf1sh_u64(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s32)))
-svint32_t svldnf1sh_s32(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s64)))
-svint64_t svldnf1sh_s64(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_u64)))
-svuint64_t svldnf1sw_vnum_u64(svbool_t, int32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_s64)))
-svint64_t svldnf1sw_vnum_s64(svbool_t, int32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_u64)))
-svuint64_t svldnf1sw_u64(svbool_t, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_s64)))
-svint64_t svldnf1sw_s64(svbool_t, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u32)))
-svuint32_t svldnf1ub_vnum_u32(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u64)))
-svuint64_t svldnf1ub_vnum_u64(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u16)))
-svuint16_t svldnf1ub_vnum_u16(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s32)))
-svint32_t svldnf1ub_vnum_s32(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s64)))
-svint64_t svldnf1ub_vnum_s64(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s16)))
-svint16_t svldnf1ub_vnum_s16(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u32)))
-svuint32_t svldnf1ub_u32(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u64)))
-svuint64_t svldnf1ub_u64(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u16)))
-svuint16_t svldnf1ub_u16(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s32)))
-svint32_t svldnf1ub_s32(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s64)))
-svint64_t svldnf1ub_s64(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s16)))
-svint16_t svldnf1ub_s16(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u32)))
-svuint32_t svldnf1uh_vnum_u32(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u64)))
-svuint64_t svldnf1uh_vnum_u64(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s32)))
-svint32_t svldnf1uh_vnum_s32(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s64)))
-svint64_t svldnf1uh_vnum_s64(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u32)))
-svuint32_t svldnf1uh_u32(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u64)))
-svuint64_t svldnf1uh_u64(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s32)))
-svint32_t svldnf1uh_s32(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s64)))
-svint64_t svldnf1uh_s64(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_u64)))
-svuint64_t svldnf1uw_vnum_u64(svbool_t, uint32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_s64)))
-svint64_t svldnf1uw_vnum_s64(svbool_t, uint32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_u64)))
-svuint64_t svldnf1uw_u64(svbool_t, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_s64)))
-svint64_t svldnf1uw_s64(svbool_t, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8)))
-svuint8_t svldnt1_u8(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32)))
-svuint32_t svldnt1_u32(svbool_t, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64)))
-svuint64_t svldnt1_u64(svbool_t, uint64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16)))
-svuint16_t svldnt1_u16(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8)))
-svint8_t svldnt1_s8(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64)))
-svfloat64_t svldnt1_f64(svbool_t, float64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32)))
-svfloat32_t svldnt1_f32(svbool_t, float32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16)))
-svfloat16_t svldnt1_f16(svbool_t, float16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32)))
-svint32_t svldnt1_s32(svbool_t, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64)))
-svint64_t svldnt1_s64(svbool_t, int64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16)))
-svint16_t svldnt1_s16(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8)))
-svuint8_t svldnt1_vnum_u8(svbool_t, uint8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32)))
-svuint32_t svldnt1_vnum_u32(svbool_t, uint32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64)))
-svuint64_t svldnt1_vnum_u64(svbool_t, uint64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16)))
-svuint16_t svldnt1_vnum_u16(svbool_t, uint16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8)))
-svint8_t svldnt1_vnum_s8(svbool_t, int8_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64)))
-svfloat64_t svldnt1_vnum_f64(svbool_t, float64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32)))
-svfloat32_t svldnt1_vnum_f32(svbool_t, float32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16)))
-svfloat16_t svldnt1_vnum_f16(svbool_t, float16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32)))
-svint32_t svldnt1_vnum_s32(svbool_t, int32_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64)))
-svint64_t svldnt1_vnum_s64(svbool_t, int64_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16)))
-svint16_t svldnt1_vnum_s16(svbool_t, int16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u8)))
-uint64_t svlen_u8(svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u32)))
-uint64_t svlen_u32(svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u64)))
-uint64_t svlen_u64(svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u16)))
-uint64_t svlen_u16(svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s8)))
-uint64_t svlen_s8(svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f64)))
-uint64_t svlen_f64(svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f32)))
-uint64_t svlen_f32(svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f16)))
-uint64_t svlen_f16(svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s32)))
-uint64_t svlen_s32(svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s64)))
-uint64_t svlen_s64(svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s16)))
-uint64_t svlen_s16(svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_m)))
-svuint8_t svlsl_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_m)))
-svuint32_t svlsl_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_m)))
-svuint64_t svlsl_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_m)))
-svuint16_t svlsl_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_m)))
-svint8_t svlsl_n_s8_m(svbool_t, svint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_m)))
-svint32_t svlsl_n_s32_m(svbool_t, svint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_m)))
-svint64_t svlsl_n_s64_m(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_m)))
-svint16_t svlsl_n_s16_m(svbool_t, svint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_x)))
-svuint8_t svlsl_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_x)))
-svuint32_t svlsl_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_x)))
-svuint64_t svlsl_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_x)))
-svuint16_t svlsl_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_x)))
-svint8_t svlsl_n_s8_x(svbool_t, svint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_x)))
-svint32_t svlsl_n_s32_x(svbool_t, svint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_x)))
-svint64_t svlsl_n_s64_x(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_x)))
-svint16_t svlsl_n_s16_x(svbool_t, svint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_z)))
-svuint8_t svlsl_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_z)))
-svuint32_t svlsl_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_z)))
-svuint64_t svlsl_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_z)))
-svuint16_t svlsl_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_z)))
-svint8_t svlsl_n_s8_z(svbool_t, svint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_z)))
-svint32_t svlsl_n_s32_z(svbool_t, svint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_z)))
-svint64_t svlsl_n_s64_z(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_z)))
-svint16_t svlsl_n_s16_z(svbool_t, svint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_m)))
-svuint8_t svlsl_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_m)))
-svuint32_t svlsl_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_m)))
-svuint64_t svlsl_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_m)))
-svuint16_t svlsl_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_m)))
-svint8_t svlsl_s8_m(svbool_t, svint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_m)))
-svint32_t svlsl_s32_m(svbool_t, svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_m)))
-svint64_t svlsl_s64_m(svbool_t, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_m)))
-svint16_t svlsl_s16_m(svbool_t, svint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_x)))
-svuint8_t svlsl_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_x)))
-svuint32_t svlsl_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_x)))
-svuint64_t svlsl_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_x)))
-svuint16_t svlsl_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_x)))
-svint8_t svlsl_s8_x(svbool_t, svint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_x)))
-svint32_t svlsl_s32_x(svbool_t, svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_x)))
-svint64_t svlsl_s64_x(svbool_t, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_x)))
-svint16_t svlsl_s16_x(svbool_t, svint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_z)))
-svuint8_t svlsl_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_z)))
-svuint32_t svlsl_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_z)))
-svuint64_t svlsl_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_z)))
-svuint16_t svlsl_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_z)))
-svint8_t svlsl_s8_z(svbool_t, svint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_z)))
-svint32_t svlsl_s32_z(svbool_t, svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_z)))
-svint64_t svlsl_s64_z(svbool_t, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_z)))
-svint16_t svlsl_s16_z(svbool_t, svint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_m)))
-svuint8_t svlsl_wide_n_u8_m(svbool_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_m)))
-svuint32_t svlsl_wide_n_u32_m(svbool_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_m)))
-svuint16_t svlsl_wide_n_u16_m(svbool_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_m)))
-svint8_t svlsl_wide_n_s8_m(svbool_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_m)))
-svint32_t svlsl_wide_n_s32_m(svbool_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_m)))
-svint16_t svlsl_wide_n_s16_m(svbool_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_x)))
-svuint8_t svlsl_wide_n_u8_x(svbool_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_x)))
-svuint32_t svlsl_wide_n_u32_x(svbool_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_x)))
-svuint16_t svlsl_wide_n_u16_x(svbool_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_x)))
-svint8_t svlsl_wide_n_s8_x(svbool_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_x)))
-svint32_t svlsl_wide_n_s32_x(svbool_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_x)))
-svint16_t svlsl_wide_n_s16_x(svbool_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_z)))
-svuint8_t svlsl_wide_n_u8_z(svbool_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_z)))
-svuint32_t svlsl_wide_n_u32_z(svbool_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_z)))
-svuint16_t svlsl_wide_n_u16_z(svbool_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_z)))
-svint8_t svlsl_wide_n_s8_z(svbool_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_z)))
-svint32_t svlsl_wide_n_s32_z(svbool_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_z)))
-svint16_t svlsl_wide_n_s16_z(svbool_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_m)))
-svuint8_t svlsl_wide_u8_m(svbool_t, svuint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_m)))
-svuint32_t svlsl_wide_u32_m(svbool_t, svuint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_m)))
-svuint16_t svlsl_wide_u16_m(svbool_t, svuint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_m)))
-svint8_t svlsl_wide_s8_m(svbool_t, svint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_m)))
-svint32_t svlsl_wide_s32_m(svbool_t, svint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_m)))
-svint16_t svlsl_wide_s16_m(svbool_t, svint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_x)))
-svuint8_t svlsl_wide_u8_x(svbool_t, svuint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_x)))
-svuint32_t svlsl_wide_u32_x(svbool_t, svuint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_x)))
-svuint16_t svlsl_wide_u16_x(svbool_t, svuint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_x)))
-svint8_t svlsl_wide_s8_x(svbool_t, svint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_x)))
-svint32_t svlsl_wide_s32_x(svbool_t, svint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_x)))
-svint16_t svlsl_wide_s16_x(svbool_t, svint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_z)))
-svuint8_t svlsl_wide_u8_z(svbool_t, svuint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_z)))
-svuint32_t svlsl_wide_u32_z(svbool_t, svuint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_z)))
-svuint16_t svlsl_wide_u16_z(svbool_t, svuint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_z)))
-svint8_t svlsl_wide_s8_z(svbool_t, svint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_z)))
-svint32_t svlsl_wide_s32_z(svbool_t, svint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_z)))
-svint16_t svlsl_wide_s16_z(svbool_t, svint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_m)))
-svuint8_t svlsr_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_m)))
-svuint32_t svlsr_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_m)))
-svuint64_t svlsr_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_m)))
-svuint16_t svlsr_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_x)))
-svuint8_t svlsr_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_x)))
-svuint32_t svlsr_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_x)))
-svuint64_t svlsr_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_x)))
-svuint16_t svlsr_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_z)))
-svuint8_t svlsr_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_z)))
-svuint32_t svlsr_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_z)))
-svuint64_t svlsr_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_z)))
-svuint16_t svlsr_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_m)))
-svuint8_t svlsr_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_m)))
-svuint32_t svlsr_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_m)))
-svuint64_t svlsr_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_m)))
-svuint16_t svlsr_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_x)))
-svuint8_t svlsr_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_x)))
-svuint32_t svlsr_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_x)))
-svuint64_t svlsr_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_x)))
-svuint16_t svlsr_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_z)))
-svuint8_t svlsr_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_z)))
-svuint32_t svlsr_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_z)))
-svuint64_t svlsr_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_z)))
-svuint16_t svlsr_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_m)))
-svuint8_t svlsr_wide_n_u8_m(svbool_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_m)))
-svuint32_t svlsr_wide_n_u32_m(svbool_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_m)))
-svuint16_t svlsr_wide_n_u16_m(svbool_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_x)))
-svuint8_t svlsr_wide_n_u8_x(svbool_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_x)))
-svuint32_t svlsr_wide_n_u32_x(svbool_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_x)))
-svuint16_t svlsr_wide_n_u16_x(svbool_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_z)))
-svuint8_t svlsr_wide_n_u8_z(svbool_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_z)))
-svuint32_t svlsr_wide_n_u32_z(svbool_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_z)))
-svuint16_t svlsr_wide_n_u16_z(svbool_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_m)))
-svuint8_t svlsr_wide_u8_m(svbool_t, svuint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_m)))
-svuint32_t svlsr_wide_u32_m(svbool_t, svuint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_m)))
-svuint16_t svlsr_wide_u16_m(svbool_t, svuint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_x)))
-svuint8_t svlsr_wide_u8_x(svbool_t, svuint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_x)))
-svuint32_t svlsr_wide_u32_x(svbool_t, svuint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_x)))
-svuint16_t svlsr_wide_u16_x(svbool_t, svuint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_z)))
-svuint8_t svlsr_wide_u8_z(svbool_t, svuint8_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_z)))
-svuint32_t svlsr_wide_u32_z(svbool_t, svuint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_z)))
-svuint16_t svlsr_wide_u16_z(svbool_t, svuint16_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_m)))
-svfloat64_t svmad_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_m)))
-svfloat32_t svmad_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_m)))
-svfloat16_t svmad_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_x)))
-svfloat64_t svmad_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_x)))
-svfloat32_t svmad_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_x)))
-svfloat16_t svmad_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_z)))
-svfloat64_t svmad_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_z)))
-svfloat32_t svmad_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_z)))
-svfloat16_t svmad_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_m)))
-svuint8_t svmad_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_m)))
-svuint32_t svmad_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_m)))
-svuint64_t svmad_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_m)))
-svuint16_t svmad_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_m)))
-svint8_t svmad_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_m)))
-svint32_t svmad_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_m)))
-svint64_t svmad_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_m)))
-svint16_t svmad_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_x)))
-svuint8_t svmad_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_x)))
-svuint32_t svmad_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_x)))
-svuint64_t svmad_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_x)))
-svuint16_t svmad_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_x)))
-svint8_t svmad_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_x)))
-svint32_t svmad_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_x)))
-svint64_t svmad_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_x)))
-svint16_t svmad_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_z)))
-svuint8_t svmad_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_z)))
-svuint32_t svmad_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_z)))
-svuint64_t svmad_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_z)))
-svuint16_t svmad_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_z)))
-svint8_t svmad_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_z)))
-svint32_t svmad_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_z)))
-svint64_t svmad_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_z)))
-svint16_t svmad_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_m)))
-svfloat64_t svmad_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_m)))
-svfloat32_t svmad_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_m)))
-svfloat16_t svmad_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_x)))
-svfloat64_t svmad_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_x)))
-svfloat32_t svmad_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_x)))
-svfloat16_t svmad_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_z)))
-svfloat64_t svmad_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_z)))
-svfloat32_t svmad_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_z)))
-svfloat16_t svmad_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_m)))
-svuint8_t svmad_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_m)))
-svuint32_t svmad_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_m)))
-svuint64_t svmad_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_m)))
-svuint16_t svmad_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_m)))
-svint8_t svmad_s8_m(svbool_t, svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_m)))
-svint32_t svmad_s32_m(svbool_t, svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_m)))
-svint64_t svmad_s64_m(svbool_t, svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_m)))
-svint16_t svmad_s16_m(svbool_t, svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_x)))
-svuint8_t svmad_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_x)))
-svuint32_t svmad_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_x)))
-svuint64_t svmad_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_x)))
-svuint16_t svmad_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_x)))
-svint8_t svmad_s8_x(svbool_t, svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_x)))
-svint32_t svmad_s32_x(svbool_t, svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_x)))
-svint64_t svmad_s64_x(svbool_t, svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_x)))
-svint16_t svmad_s16_x(svbool_t, svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_z)))
-svuint8_t svmad_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_z)))
-svuint32_t svmad_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_z)))
-svuint64_t svmad_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_z)))
-svuint16_t svmad_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_z)))
-svint8_t svmad_s8_z(svbool_t, svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_z)))
-svint32_t svmad_s32_z(svbool_t, svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_z)))
-svint64_t svmad_s64_z(svbool_t, svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_z)))
-svint16_t svmad_s16_z(svbool_t, svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_m)))
-svfloat64_t svmax_n_f64_m(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_m)))
-svfloat32_t svmax_n_f32_m(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_m)))
-svfloat16_t svmax_n_f16_m(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_x)))
-svfloat64_t svmax_n_f64_x(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_x)))
-svfloat32_t svmax_n_f32_x(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_x)))
-svfloat16_t svmax_n_f16_x(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_z)))
-svfloat64_t svmax_n_f64_z(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_z)))
-svfloat32_t svmax_n_f32_z(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_z)))
-svfloat16_t svmax_n_f16_z(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_m)))
-svint8_t svmax_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_m)))
-svint32_t svmax_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_m)))
-svint64_t svmax_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_m)))
-svint16_t svmax_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_x)))
-svint8_t svmax_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_x)))
-svint32_t svmax_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_x)))
-svint64_t svmax_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_x)))
-svint16_t svmax_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_z)))
-svint8_t svmax_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_z)))
-svint32_t svmax_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_z)))
-svint64_t svmax_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_z)))
-svint16_t svmax_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_m)))
-svuint8_t svmax_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_m)))
-svuint32_t svmax_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_m)))
-svuint64_t svmax_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_m)))
-svuint16_t svmax_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_x)))
-svuint8_t svmax_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_x)))
-svuint32_t svmax_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_x)))
-svuint64_t svmax_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_x)))
-svuint16_t svmax_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_z)))
-svuint8_t svmax_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_z)))
-svuint32_t svmax_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_z)))
-svuint64_t svmax_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_z)))
-svuint16_t svmax_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_m)))
-svfloat64_t svmax_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_m)))
-svfloat32_t svmax_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_m)))
-svfloat16_t svmax_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x)))
-svfloat64_t svmax_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x)))
-svfloat32_t svmax_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x)))
-svfloat16_t svmax_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_z)))
-svfloat64_t svmax_f64_z(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_z)))
-svfloat32_t svmax_f32_z(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_z)))
-svfloat16_t svmax_f16_z(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_m)))
-svint8_t svmax_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_m)))
-svint32_t svmax_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_m)))
-svint64_t svmax_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_m)))
-svint16_t svmax_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x)))
-svint8_t svmax_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x)))
-svint32_t svmax_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x)))
-svint64_t svmax_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x)))
-svint16_t svmax_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_z)))
-svint8_t svmax_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_z)))
-svint32_t svmax_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_z)))
-svint64_t svmax_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_z)))
-svint16_t svmax_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_m)))
-svuint8_t svmax_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_m)))
-svuint32_t svmax_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_m)))
-svuint64_t svmax_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_m)))
-svuint16_t svmax_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x)))
-svuint8_t svmax_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x)))
-svuint32_t svmax_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x)))
-svuint64_t svmax_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x)))
-svuint16_t svmax_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_z)))
-svuint8_t svmax_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_z)))
-svuint32_t svmax_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_z)))
-svuint64_t svmax_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_z)))
-svuint16_t svmax_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_m)))
-svfloat64_t svmaxnm_n_f64_m(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_m)))
-svfloat32_t svmaxnm_n_f32_m(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_m)))
-svfloat16_t svmaxnm_n_f16_m(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_x)))
-svfloat64_t svmaxnm_n_f64_x(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_x)))
-svfloat32_t svmaxnm_n_f32_x(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_x)))
-svfloat16_t svmaxnm_n_f16_x(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_z)))
-svfloat64_t svmaxnm_n_f64_z(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_z)))
-svfloat32_t svmaxnm_n_f32_z(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_z)))
-svfloat16_t svmaxnm_n_f16_z(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_m)))
-svfloat64_t svmaxnm_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_m)))
-svfloat32_t svmaxnm_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_m)))
-svfloat16_t svmaxnm_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x)))
-svfloat64_t svmaxnm_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x)))
-svfloat32_t svmaxnm_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x)))
-svfloat16_t svmaxnm_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_z)))
-svfloat64_t svmaxnm_f64_z(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_z)))
-svfloat32_t svmaxnm_f32_z(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_z)))
-svfloat16_t svmaxnm_f16_z(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f64)))
-float64_t svmaxnmv_f64(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f32)))
-float32_t svmaxnmv_f32(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f16)))
-float16_t svmaxnmv_f16(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f64)))
-float64_t svmaxv_f64(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f32)))
-float32_t svmaxv_f32(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f16)))
-float16_t svmaxv_f16(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s8)))
-int8_t svmaxv_s8(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s32)))
-int32_t svmaxv_s32(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s64)))
-int64_t svmaxv_s64(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s16)))
-int16_t svmaxv_s16(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u8)))
-uint8_t svmaxv_u8(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u32)))
-uint32_t svmaxv_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u64)))
-uint64_t svmaxv_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u16)))
-uint16_t svmaxv_u16(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_m)))
-svfloat64_t svmin_n_f64_m(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_m)))
-svfloat32_t svmin_n_f32_m(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_m)))
-svfloat16_t svmin_n_f16_m(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_x)))
-svfloat64_t svmin_n_f64_x(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_x)))
-svfloat32_t svmin_n_f32_x(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_x)))
-svfloat16_t svmin_n_f16_x(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_z)))
-svfloat64_t svmin_n_f64_z(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_z)))
-svfloat32_t svmin_n_f32_z(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_z)))
-svfloat16_t svmin_n_f16_z(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_m)))
-svint8_t svmin_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_m)))
-svint32_t svmin_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_m)))
-svint64_t svmin_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_m)))
-svint16_t svmin_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_x)))
-svint8_t svmin_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_x)))
-svint32_t svmin_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_x)))
-svint64_t svmin_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_x)))
-svint16_t svmin_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_z)))
-svint8_t svmin_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_z)))
-svint32_t svmin_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_z)))
-svint64_t svmin_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_z)))
-svint16_t svmin_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_m)))
-svuint8_t svmin_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_m)))
-svuint32_t svmin_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_m)))
-svuint64_t svmin_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_m)))
-svuint16_t svmin_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_x)))
-svuint8_t svmin_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_x)))
-svuint32_t svmin_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_x)))
-svuint64_t svmin_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_x)))
-svuint16_t svmin_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_z)))
-svuint8_t svmin_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_z)))
-svuint32_t svmin_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_z)))
-svuint64_t svmin_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_z)))
-svuint16_t svmin_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_m)))
-svfloat64_t svmin_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_m)))
-svfloat32_t svmin_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_m)))
-svfloat16_t svmin_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x)))
-svfloat64_t svmin_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x)))
-svfloat32_t svmin_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x)))
-svfloat16_t svmin_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_z)))
-svfloat64_t svmin_f64_z(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_z)))
-svfloat32_t svmin_f32_z(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_z)))
-svfloat16_t svmin_f16_z(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_m)))
-svint8_t svmin_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_m)))
-svint32_t svmin_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_m)))
-svint64_t svmin_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_m)))
-svint16_t svmin_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x)))
-svint8_t svmin_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x)))
-svint32_t svmin_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x)))
-svint64_t svmin_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x)))
-svint16_t svmin_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_z)))
-svint8_t svmin_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_z)))
-svint32_t svmin_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_z)))
-svint64_t svmin_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_z)))
-svint16_t svmin_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_m)))
-svuint8_t svmin_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_m)))
-svuint32_t svmin_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_m)))
-svuint64_t svmin_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_m)))
-svuint16_t svmin_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x)))
-svuint8_t svmin_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x)))
-svuint32_t svmin_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x)))
-svuint64_t svmin_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x)))
-svuint16_t svmin_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_z)))
-svuint8_t svmin_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_z)))
-svuint32_t svmin_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_z)))
-svuint64_t svmin_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_z)))
-svuint16_t svmin_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_m)))
-svfloat64_t svminnm_n_f64_m(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_m)))
-svfloat32_t svminnm_n_f32_m(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_m)))
-svfloat16_t svminnm_n_f16_m(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_x)))
-svfloat64_t svminnm_n_f64_x(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_x)))
-svfloat32_t svminnm_n_f32_x(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_x)))
-svfloat16_t svminnm_n_f16_x(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_z)))
-svfloat64_t svminnm_n_f64_z(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_z)))
-svfloat32_t svminnm_n_f32_z(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_z)))
-svfloat16_t svminnm_n_f16_z(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_m)))
-svfloat64_t svminnm_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_m)))
-svfloat32_t svminnm_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_m)))
-svfloat16_t svminnm_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x)))
-svfloat64_t svminnm_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x)))
-svfloat32_t svminnm_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x)))
-svfloat16_t svminnm_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_z)))
-svfloat64_t svminnm_f64_z(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_z)))
-svfloat32_t svminnm_f32_z(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_z)))
-svfloat16_t svminnm_f16_z(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f64)))
-float64_t svminnmv_f64(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f32)))
-float32_t svminnmv_f32(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f16)))
-float16_t svminnmv_f16(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f64)))
-float64_t svminv_f64(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f32)))
-float32_t svminv_f32(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f16)))
-float16_t svminv_f16(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s8)))
-int8_t svminv_s8(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s32)))
-int32_t svminv_s32(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s64)))
-int64_t svminv_s64(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s16)))
-int16_t svminv_s16(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u8)))
-uint8_t svminv_u8(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u32)))
-uint32_t svminv_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u64)))
-uint64_t svminv_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u16)))
-uint16_t svminv_u16(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_m)))
-svfloat64_t svmla_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_m)))
-svfloat32_t svmla_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_m)))
-svfloat16_t svmla_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_x)))
-svfloat64_t svmla_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_x)))
-svfloat32_t svmla_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_x)))
-svfloat16_t svmla_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_z)))
-svfloat64_t svmla_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_z)))
-svfloat32_t svmla_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_z)))
-svfloat16_t svmla_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_m)))
-svuint8_t svmla_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_m)))
-svuint32_t svmla_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_m)))
-svuint64_t svmla_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_m)))
-svuint16_t svmla_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_m)))
-svint8_t svmla_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_m)))
-svint32_t svmla_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_m)))
-svint64_t svmla_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_m)))
-svint16_t svmla_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_x)))
-svuint8_t svmla_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_x)))
-svuint32_t svmla_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_x)))
-svuint64_t svmla_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_x)))
-svuint16_t svmla_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_x)))
-svint8_t svmla_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_x)))
-svint32_t svmla_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_x)))
-svint64_t svmla_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_x)))
-svint16_t svmla_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_z)))
-svuint8_t svmla_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_z)))
-svuint32_t svmla_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_z)))
-svuint64_t svmla_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_z)))
-svuint16_t svmla_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_z)))
-svint8_t svmla_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_z)))
-svint32_t svmla_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_z)))
-svint64_t svmla_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_z)))
-svint16_t svmla_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_m)))
-svfloat64_t svmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_m)))
-svfloat32_t svmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_m)))
-svfloat16_t svmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_x)))
-svfloat64_t svmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_x)))
-svfloat32_t svmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_x)))
-svfloat16_t svmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_z)))
-svfloat64_t svmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_z)))
-svfloat32_t svmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_z)))
-svfloat16_t svmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_m)))
-svuint8_t svmla_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_m)))
-svuint32_t svmla_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_m)))
-svuint64_t svmla_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_m)))
-svuint16_t svmla_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_m)))
-svint8_t svmla_s8_m(svbool_t, svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_m)))
-svint32_t svmla_s32_m(svbool_t, svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_m)))
-svint64_t svmla_s64_m(svbool_t, svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_m)))
-svint16_t svmla_s16_m(svbool_t, svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_x)))
-svuint8_t svmla_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_x)))
-svuint32_t svmla_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_x)))
-svuint64_t svmla_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_x)))
-svuint16_t svmla_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_x)))
-svint8_t svmla_s8_x(svbool_t, svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_x)))
-svint32_t svmla_s32_x(svbool_t, svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_x)))
-svint64_t svmla_s64_x(svbool_t, svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_x)))
-svint16_t svmla_s16_x(svbool_t, svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_z)))
-svuint8_t svmla_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_z)))
-svuint32_t svmla_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_z)))
-svuint64_t svmla_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_z)))
-svuint16_t svmla_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_z)))
-svint8_t svmla_s8_z(svbool_t, svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_z)))
-svint32_t svmla_s32_z(svbool_t, svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_z)))
-svint64_t svmla_s64_z(svbool_t, svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_z)))
-svint16_t svmla_s16_z(svbool_t, svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f64)))
-svfloat64_t svmla_lane_f64(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f32)))
-svfloat32_t svmla_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f16)))
-svfloat16_t svmla_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_m)))
-svfloat64_t svmls_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_m)))
-svfloat32_t svmls_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_m)))
-svfloat16_t svmls_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_x)))
-svfloat64_t svmls_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_x)))
-svfloat32_t svmls_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_x)))
-svfloat16_t svmls_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_z)))
-svfloat64_t svmls_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_z)))
-svfloat32_t svmls_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_z)))
-svfloat16_t svmls_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_m)))
-svuint8_t svmls_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_m)))
-svuint32_t svmls_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_m)))
-svuint64_t svmls_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_m)))
-svuint16_t svmls_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_m)))
-svint8_t svmls_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_m)))
-svint32_t svmls_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_m)))
-svint64_t svmls_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_m)))
-svint16_t svmls_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_x)))
-svuint8_t svmls_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_x)))
-svuint32_t svmls_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_x)))
-svuint64_t svmls_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_x)))
-svuint16_t svmls_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_x)))
-svint8_t svmls_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_x)))
-svint32_t svmls_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_x)))
-svint64_t svmls_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_x)))
-svint16_t svmls_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_z)))
-svuint8_t svmls_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_z)))
-svuint32_t svmls_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_z)))
-svuint64_t svmls_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_z)))
-svuint16_t svmls_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_z)))
-svint8_t svmls_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_z)))
-svint32_t svmls_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_z)))
-svint64_t svmls_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_z)))
-svint16_t svmls_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_m)))
-svfloat64_t svmls_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_m)))
-svfloat32_t svmls_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_m)))
-svfloat16_t svmls_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_x)))
-svfloat64_t svmls_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_x)))
-svfloat32_t svmls_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_x)))
-svfloat16_t svmls_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_z)))
-svfloat64_t svmls_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_z)))
-svfloat32_t svmls_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_z)))
-svfloat16_t svmls_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_m)))
-svuint8_t svmls_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_m)))
-svuint32_t svmls_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_m)))
-svuint64_t svmls_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_m)))
-svuint16_t svmls_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_m)))
-svint8_t svmls_s8_m(svbool_t, svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_m)))
-svint32_t svmls_s32_m(svbool_t, svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_m)))
-svint64_t svmls_s64_m(svbool_t, svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_m)))
-svint16_t svmls_s16_m(svbool_t, svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_x)))
-svuint8_t svmls_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_x)))
-svuint32_t svmls_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_x)))
-svuint64_t svmls_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_x)))
-svuint16_t svmls_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_x)))
-svint8_t svmls_s8_x(svbool_t, svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_x)))
-svint32_t svmls_s32_x(svbool_t, svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_x)))
-svint64_t svmls_s64_x(svbool_t, svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_x)))
-svint16_t svmls_s16_x(svbool_t, svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_z)))
-svuint8_t svmls_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_z)))
-svuint32_t svmls_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_z)))
-svuint64_t svmls_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_z)))
-svuint16_t svmls_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_z)))
-svint8_t svmls_s8_z(svbool_t, svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_z)))
-svint32_t svmls_s32_z(svbool_t, svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_z)))
-svint64_t svmls_s64_z(svbool_t, svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_z)))
-svint16_t svmls_s16_z(svbool_t, svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f64)))
-svfloat64_t svmls_lane_f64(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f32)))
-svfloat32_t svmls_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f16)))
-svfloat16_t svmls_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmov_b_z)))
-svbool_t svmov_b_z(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_m)))
-svfloat64_t svmsb_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_m)))
-svfloat32_t svmsb_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_m)))
-svfloat16_t svmsb_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_x)))
-svfloat64_t svmsb_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_x)))
-svfloat32_t svmsb_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_x)))
-svfloat16_t svmsb_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_z)))
-svfloat64_t svmsb_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_z)))
-svfloat32_t svmsb_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_z)))
-svfloat16_t svmsb_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_m)))
-svuint8_t svmsb_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_m)))
-svuint32_t svmsb_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_m)))
-svuint64_t svmsb_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_m)))
-svuint16_t svmsb_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_m)))
-svint8_t svmsb_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_m)))
-svint32_t svmsb_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_m)))
-svint64_t svmsb_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_m)))
-svint16_t svmsb_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_x)))
-svuint8_t svmsb_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_x)))
-svuint32_t svmsb_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_x)))
-svuint64_t svmsb_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_x)))
-svuint16_t svmsb_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_x)))
-svint8_t svmsb_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_x)))
-svint32_t svmsb_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_x)))
-svint64_t svmsb_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_x)))
-svint16_t svmsb_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_z)))
-svuint8_t svmsb_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_z)))
-svuint32_t svmsb_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_z)))
-svuint64_t svmsb_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_z)))
-svuint16_t svmsb_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_z)))
-svint8_t svmsb_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_z)))
-svint32_t svmsb_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_z)))
-svint64_t svmsb_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_z)))
-svint16_t svmsb_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_m)))
-svfloat64_t svmsb_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_m)))
-svfloat32_t svmsb_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_m)))
-svfloat16_t svmsb_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_x)))
-svfloat64_t svmsb_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_x)))
-svfloat32_t svmsb_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_x)))
-svfloat16_t svmsb_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_z)))
-svfloat64_t svmsb_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_z)))
-svfloat32_t svmsb_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_z)))
-svfloat16_t svmsb_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_m)))
-svuint8_t svmsb_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_m)))
-svuint32_t svmsb_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_m)))
-svuint64_t svmsb_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_m)))
-svuint16_t svmsb_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_m)))
-svint8_t svmsb_s8_m(svbool_t, svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_m)))
-svint32_t svmsb_s32_m(svbool_t, svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_m)))
-svint64_t svmsb_s64_m(svbool_t, svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_m)))
-svint16_t svmsb_s16_m(svbool_t, svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_x)))
-svuint8_t svmsb_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_x)))
-svuint32_t svmsb_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_x)))
-svuint64_t svmsb_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_x)))
-svuint16_t svmsb_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_x)))
-svint8_t svmsb_s8_x(svbool_t, svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_x)))
-svint32_t svmsb_s32_x(svbool_t, svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_x)))
-svint64_t svmsb_s64_x(svbool_t, svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_x)))
-svint16_t svmsb_s16_x(svbool_t, svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_z)))
-svuint8_t svmsb_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_z)))
-svuint32_t svmsb_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_z)))
-svuint64_t svmsb_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_z)))
-svuint16_t svmsb_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_z)))
-svint8_t svmsb_s8_z(svbool_t, svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_z)))
-svint32_t svmsb_s32_z(svbool_t, svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_z)))
-svint64_t svmsb_s64_z(svbool_t, svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_z)))
-svint16_t svmsb_s16_z(svbool_t, svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_m)))
-svfloat64_t svmul_n_f64_m(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_m)))
-svfloat32_t svmul_n_f32_m(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_m)))
-svfloat16_t svmul_n_f16_m(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_x)))
-svfloat64_t svmul_n_f64_x(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_x)))
-svfloat32_t svmul_n_f32_x(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_x)))
-svfloat16_t svmul_n_f16_x(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_z)))
-svfloat64_t svmul_n_f64_z(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_z)))
-svfloat32_t svmul_n_f32_z(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_z)))
-svfloat16_t svmul_n_f16_z(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_m)))
-svuint8_t svmul_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_m)))
-svuint32_t svmul_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_m)))
-svuint64_t svmul_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_m)))
-svuint16_t svmul_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_m)))
-svint8_t svmul_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_m)))
-svint32_t svmul_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_m)))
-svint64_t svmul_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_m)))
-svint16_t svmul_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_x)))
-svuint8_t svmul_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_x)))
-svuint32_t svmul_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_x)))
-svuint64_t svmul_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_x)))
-svuint16_t svmul_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_x)))
-svint8_t svmul_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_x)))
-svint32_t svmul_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_x)))
-svint64_t svmul_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_x)))
-svint16_t svmul_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_z)))
-svuint8_t svmul_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_z)))
-svuint32_t svmul_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_z)))
-svuint64_t svmul_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_z)))
-svuint16_t svmul_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_z)))
-svint8_t svmul_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_z)))
-svint32_t svmul_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_z)))
-svint64_t svmul_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_z)))
-svint16_t svmul_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_m)))
-svfloat64_t svmul_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_m)))
-svfloat32_t svmul_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_m)))
-svfloat16_t svmul_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_x)))
-svfloat64_t svmul_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_x)))
-svfloat32_t svmul_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_x)))
-svfloat16_t svmul_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_z)))
-svfloat64_t svmul_f64_z(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_z)))
-svfloat32_t svmul_f32_z(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_z)))
-svfloat16_t svmul_f16_z(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_m)))
-svuint8_t svmul_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_m)))
-svuint32_t svmul_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_m)))
-svuint64_t svmul_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_m)))
-svuint16_t svmul_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_m)))
-svint8_t svmul_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_m)))
-svint32_t svmul_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_m)))
-svint64_t svmul_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_m)))
-svint16_t svmul_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_x)))
-svuint8_t svmul_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_x)))
-svuint32_t svmul_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_x)))
-svuint64_t svmul_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_x)))
-svuint16_t svmul_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_x)))
-svint8_t svmul_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_x)))
-svint32_t svmul_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_x)))
-svint64_t svmul_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_x)))
-svint16_t svmul_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_z)))
-svuint8_t svmul_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_z)))
-svuint32_t svmul_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_z)))
-svuint64_t svmul_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_z)))
-svuint16_t svmul_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_z)))
-svint8_t svmul_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_z)))
-svint32_t svmul_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_z)))
-svint64_t svmul_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_z)))
-svint16_t svmul_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f64)))
-svfloat64_t svmul_lane_f64(svfloat64_t, svfloat64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f32)))
-svfloat32_t svmul_lane_f32(svfloat32_t, svfloat32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f16)))
-svfloat16_t svmul_lane_f16(svfloat16_t, svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_m)))
-svint8_t svmulh_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_m)))
-svint32_t svmulh_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_m)))
-svint64_t svmulh_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_m)))
-svint16_t svmulh_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_x)))
-svint8_t svmulh_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_x)))
-svint32_t svmulh_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_x)))
-svint64_t svmulh_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_x)))
-svint16_t svmulh_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_z)))
-svint8_t svmulh_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_z)))
-svint32_t svmulh_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_z)))
-svint64_t svmulh_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_z)))
-svint16_t svmulh_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_m)))
-svuint8_t svmulh_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_m)))
-svuint32_t svmulh_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_m)))
-svuint64_t svmulh_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_m)))
-svuint16_t svmulh_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_x)))
-svuint8_t svmulh_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_x)))
-svuint32_t svmulh_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_x)))
-svuint64_t svmulh_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_x)))
-svuint16_t svmulh_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_z)))
-svuint8_t svmulh_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_z)))
-svuint32_t svmulh_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_z)))
-svuint64_t svmulh_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_z)))
-svuint16_t svmulh_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_m)))
-svint8_t svmulh_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_m)))
-svint32_t svmulh_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_m)))
-svint64_t svmulh_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_m)))
-svint16_t svmulh_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_x)))
-svint8_t svmulh_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_x)))
-svint32_t svmulh_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_x)))
-svint64_t svmulh_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_x)))
-svint16_t svmulh_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_z)))
-svint8_t svmulh_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_z)))
-svint32_t svmulh_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_z)))
-svint64_t svmulh_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_z)))
-svint16_t svmulh_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_m)))
-svuint8_t svmulh_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_m)))
-svuint32_t svmulh_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_m)))
-svuint64_t svmulh_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_m)))
-svuint16_t svmulh_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_x)))
-svuint8_t svmulh_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_x)))
-svuint32_t svmulh_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_x)))
-svuint64_t svmulh_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_x)))
-svuint16_t svmulh_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_z)))
-svuint8_t svmulh_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_z)))
-svuint32_t svmulh_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_z)))
-svuint64_t svmulh_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_z)))
-svuint16_t svmulh_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_m)))
-svfloat64_t svmulx_n_f64_m(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_m)))
-svfloat32_t svmulx_n_f32_m(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_m)))
-svfloat16_t svmulx_n_f16_m(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_x)))
-svfloat64_t svmulx_n_f64_x(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_x)))
-svfloat32_t svmulx_n_f32_x(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_x)))
-svfloat16_t svmulx_n_f16_x(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_z)))
-svfloat64_t svmulx_n_f64_z(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_z)))
-svfloat32_t svmulx_n_f32_z(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_z)))
-svfloat16_t svmulx_n_f16_z(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_m)))
-svfloat64_t svmulx_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_m)))
-svfloat32_t svmulx_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_m)))
-svfloat16_t svmulx_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_x)))
-svfloat64_t svmulx_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_x)))
-svfloat32_t svmulx_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_x)))
-svfloat16_t svmulx_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_z)))
-svfloat64_t svmulx_f64_z(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_z)))
-svfloat32_t svmulx_f32_z(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_z)))
-svfloat16_t svmulx_f16_z(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnand_b_z)))
-svbool_t svnand_b_z(svbool_t, svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_m)))
-svfloat64_t svneg_f64_m(svfloat64_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_m)))
-svfloat32_t svneg_f32_m(svfloat32_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_m)))
-svfloat16_t svneg_f16_m(svfloat16_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_x)))
-svfloat64_t svneg_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_x)))
-svfloat32_t svneg_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_x)))
-svfloat16_t svneg_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_z)))
-svfloat64_t svneg_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_z)))
-svfloat32_t svneg_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_z)))
-svfloat16_t svneg_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_m)))
-svint8_t svneg_s8_m(svint8_t, svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_m)))
-svint32_t svneg_s32_m(svint32_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_m)))
-svint64_t svneg_s64_m(svint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_m)))
-svint16_t svneg_s16_m(svint16_t, svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_x)))
-svint8_t svneg_s8_x(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_x)))
-svint32_t svneg_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_x)))
-svint64_t svneg_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_x)))
-svint16_t svneg_s16_x(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_z)))
-svint8_t svneg_s8_z(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_z)))
-svint32_t svneg_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_z)))
-svint64_t svneg_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_z)))
-svint16_t svneg_s16_z(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_m)))
-svfloat64_t svnmad_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_m)))
-svfloat32_t svnmad_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_m)))
-svfloat16_t svnmad_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_x)))
-svfloat64_t svnmad_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_x)))
-svfloat32_t svnmad_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_x)))
-svfloat16_t svnmad_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_z)))
-svfloat64_t svnmad_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_z)))
-svfloat32_t svnmad_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_z)))
-svfloat16_t svnmad_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_m)))
-svfloat64_t svnmad_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_m)))
-svfloat32_t svnmad_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_m)))
-svfloat16_t svnmad_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_x)))
-svfloat64_t svnmad_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_x)))
-svfloat32_t svnmad_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_x)))
-svfloat16_t svnmad_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_z)))
-svfloat64_t svnmad_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_z)))
-svfloat32_t svnmad_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_z)))
-svfloat16_t svnmad_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_m)))
-svfloat64_t svnmla_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_m)))
-svfloat32_t svnmla_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_m)))
-svfloat16_t svnmla_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_x)))
-svfloat64_t svnmla_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_x)))
-svfloat32_t svnmla_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_x)))
-svfloat16_t svnmla_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_z)))
-svfloat64_t svnmla_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_z)))
-svfloat32_t svnmla_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_z)))
-svfloat16_t svnmla_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_m)))
-svfloat64_t svnmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_m)))
-svfloat32_t svnmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_m)))
-svfloat16_t svnmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_x)))
-svfloat64_t svnmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_x)))
-svfloat32_t svnmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_x)))
-svfloat16_t svnmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_z)))
-svfloat64_t svnmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_z)))
-svfloat32_t svnmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_z)))
-svfloat16_t svnmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_m)))
-svfloat64_t svnmls_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_m)))
-svfloat32_t svnmls_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_m)))
-svfloat16_t svnmls_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_x)))
-svfloat64_t svnmls_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_x)))
-svfloat32_t svnmls_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_x)))
-svfloat16_t svnmls_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_z)))
-svfloat64_t svnmls_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_z)))
-svfloat32_t svnmls_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_z)))
-svfloat16_t svnmls_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_m)))
-svfloat64_t svnmls_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_m)))
-svfloat32_t svnmls_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_m)))
-svfloat16_t svnmls_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_x)))
-svfloat64_t svnmls_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_x)))
-svfloat32_t svnmls_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_x)))
-svfloat16_t svnmls_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_z)))
-svfloat64_t svnmls_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_z)))
-svfloat32_t svnmls_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_z)))
-svfloat16_t svnmls_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_m)))
-svfloat64_t svnmsb_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_m)))
-svfloat32_t svnmsb_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_m)))
-svfloat16_t svnmsb_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_x)))
-svfloat64_t svnmsb_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_x)))
-svfloat32_t svnmsb_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_x)))
-svfloat16_t svnmsb_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_z)))
-svfloat64_t svnmsb_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_z)))
-svfloat32_t svnmsb_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_z)))
-svfloat16_t svnmsb_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_m)))
-svfloat64_t svnmsb_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_m)))
-svfloat32_t svnmsb_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_m)))
-svfloat16_t svnmsb_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_x)))
-svfloat64_t svnmsb_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_x)))
-svfloat32_t svnmsb_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_x)))
-svfloat16_t svnmsb_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_z)))
-svfloat64_t svnmsb_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_z)))
-svfloat32_t svnmsb_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_z)))
-svfloat16_t svnmsb_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnor_b_z)))
-svbool_t svnor_b_z(svbool_t, svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_b_z)))
-svbool_t svnot_b_z(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_m)))
-svuint8_t svnot_u8_m(svuint8_t, svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_m)))
-svuint32_t svnot_u32_m(svuint32_t, svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_m)))
-svuint64_t svnot_u64_m(svuint64_t, svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_m)))
-svuint16_t svnot_u16_m(svuint16_t, svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_m)))
-svint8_t svnot_s8_m(svint8_t, svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_m)))
-svint32_t svnot_s32_m(svint32_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_m)))
-svint64_t svnot_s64_m(svint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_m)))
-svint16_t svnot_s16_m(svint16_t, svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_x)))
-svuint8_t svnot_u8_x(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_x)))
-svuint32_t svnot_u32_x(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_x)))
-svuint64_t svnot_u64_x(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_x)))
-svuint16_t svnot_u16_x(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_x)))
-svint8_t svnot_s8_x(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_x)))
-svint32_t svnot_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_x)))
-svint64_t svnot_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_x)))
-svint16_t svnot_s16_x(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_z)))
-svuint8_t svnot_u8_z(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_z)))
-svuint32_t svnot_u32_z(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_z)))
-svuint64_t svnot_u64_z(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_z)))
-svuint16_t svnot_u16_z(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_z)))
-svint8_t svnot_s8_z(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_z)))
-svint32_t svnot_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_z)))
-svint64_t svnot_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_z)))
-svint16_t svnot_s16_z(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorn_b_z)))
-svbool_t svorn_b_z(svbool_t, svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_b_z)))
-svbool_t svorr_b_z(svbool_t, svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_m)))
-svuint8_t svorr_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_m)))
-svuint32_t svorr_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_m)))
-svuint64_t svorr_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_m)))
-svuint16_t svorr_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_m)))
-svint8_t svorr_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_m)))
-svint32_t svorr_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_m)))
-svint64_t svorr_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_m)))
-svint16_t svorr_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_x)))
-svuint8_t svorr_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_x)))
-svuint32_t svorr_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_x)))
-svuint64_t svorr_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_x)))
-svuint16_t svorr_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_x)))
-svint8_t svorr_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_x)))
-svint32_t svorr_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_x)))
-svint64_t svorr_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_x)))
-svint16_t svorr_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_z)))
-svuint8_t svorr_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_z)))
-svuint32_t svorr_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_z)))
-svuint64_t svorr_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_z)))
-svuint16_t svorr_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_z)))
-svint8_t svorr_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_z)))
-svint32_t svorr_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_z)))
-svint64_t svorr_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_z)))
-svint16_t svorr_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_m)))
-svuint8_t svorr_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_m)))
-svuint32_t svorr_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_m)))
-svuint64_t svorr_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_m)))
-svuint16_t svorr_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_m)))
-svint8_t svorr_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_m)))
-svint32_t svorr_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_m)))
-svint64_t svorr_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_m)))
-svint16_t svorr_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_x)))
-svuint8_t svorr_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_x)))
-svuint32_t svorr_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_x)))
-svuint64_t svorr_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_x)))
-svuint16_t svorr_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_x)))
-svint8_t svorr_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_x)))
-svint32_t svorr_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_x)))
-svint64_t svorr_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_x)))
-svint16_t svorr_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_z)))
-svuint8_t svorr_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_z)))
-svuint32_t svorr_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_z)))
-svuint64_t svorr_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_z)))
-svuint16_t svorr_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_z)))
-svint8_t svorr_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_z)))
-svint32_t svorr_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_z)))
-svint64_t svorr_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_z)))
-svint16_t svorr_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u8)))
-uint8_t svorv_u8(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u32)))
-uint32_t svorv_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u64)))
-uint64_t svorv_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u16)))
-uint16_t svorv_u16(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s8)))
-int8_t svorv_s8(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s32)))
-int32_t svorv_s32(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s64)))
-int64_t svorv_s64(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16)))
-int16_t svorv_s16(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b)))
-svbool_t svpfalse_b();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b)))
-svbool_t svpfirst_b(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b8)))
-svbool_t svpnext_b8(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b32)))
-svbool_t svpnext_b32(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b64)))
-svbool_t svpnext_b64(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b16)))
-svbool_t svpnext_b16(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb)))
-void svprfb(svbool_t, void const *, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base)))
-void svprfb_gather_u32base(svbool_t, svuint32_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base)))
-void svprfb_gather_u64base(svbool_t, svuint64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset)))
-void svprfb_gather_u32base_offset(svbool_t, svuint32_t, int64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset)))
-void svprfb_gather_u64base_offset(svbool_t, svuint64_t, int64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset)))
-void svprfb_gather_s32offset(svbool_t, void const *, svint32_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset)))
-void svprfb_gather_u32offset(svbool_t, void const *, svuint32_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset)))
-void svprfb_gather_s64offset(svbool_t, void const *, svint64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset)))
-void svprfb_gather_u64offset(svbool_t, void const *, svuint64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_vnum)))
-void svprfb_vnum(svbool_t, void const *, int64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd)))
-void svprfd(svbool_t, void const *, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base)))
-void svprfd_gather_u32base(svbool_t, svuint32_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base)))
-void svprfd_gather_u64base(svbool_t, svuint64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index)))
-void svprfd_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index)))
-void svprfd_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index)))
-void svprfd_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index)))
-void svprfd_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index)))
-void svprfd_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index)))
-void svprfd_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_vnum)))
-void svprfd_vnum(svbool_t, void const *, int64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh)))
-void svprfh(svbool_t, void const *, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base)))
-void svprfh_gather_u32base(svbool_t, svuint32_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base)))
-void svprfh_gather_u64base(svbool_t, svuint64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index)))
-void svprfh_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index)))
-void svprfh_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index)))
-void svprfh_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index)))
-void svprfh_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index)))
-void svprfh_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index)))
-void svprfh_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_vnum)))
-void svprfh_vnum(svbool_t, void const *, int64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw)))
-void svprfw(svbool_t, void const *, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base)))
-void svprfw_gather_u32base(svbool_t, svuint32_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base)))
-void svprfw_gather_u64base(svbool_t, svuint64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index)))
-void svprfw_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index)))
-void svprfw_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index)))
-void svprfw_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index)))
-void svprfw_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index)))
-void svprfw_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index)))
-void svprfw_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_vnum)))
-void svprfw_vnum(svbool_t, void const *, int64_t, enum svprfop);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_any)))
-bool svptest_any(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_first)))
-bool svptest_first(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_last)))
-bool svptest_last(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b8)))
-svbool_t svptrue_pat_b8(enum svpattern);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b32)))
-svbool_t svptrue_pat_b32(enum svpattern);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b64)))
-svbool_t svptrue_pat_b64(enum svpattern);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b16)))
-svbool_t svptrue_pat_b16(enum svpattern);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b8)))
-svbool_t svptrue_b8();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b32)))
-svbool_t svptrue_b32();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b64)))
-svbool_t svptrue_b64();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b16)))
-svbool_t svptrue_b16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8)))
-svint8_t svqadd_n_s8(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32)))
-svint32_t svqadd_n_s32(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64)))
-svint64_t svqadd_n_s64(svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16)))
-svint16_t svqadd_n_s16(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8)))
-svuint8_t svqadd_n_u8(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32)))
-svuint32_t svqadd_n_u32(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64)))
-svuint64_t svqadd_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16)))
-svuint16_t svqadd_n_u16(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8)))
-svint8_t svqadd_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32)))
-svint32_t svqadd_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64)))
-svint64_t svqadd_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16)))
-svint16_t svqadd_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8)))
-svuint8_t svqadd_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32)))
-svuint32_t svqadd_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64)))
-svuint64_t svqadd_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16)))
-svuint16_t svqadd_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s32)))
-int32_t svqdecb_n_s32(int32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s64)))
-int64_t svqdecb_n_s64(int64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u32)))
-uint32_t svqdecb_n_u32(uint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u64)))
-uint64_t svqdecb_n_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s32)))
-int32_t svqdecb_pat_n_s32(int32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s64)))
-int64_t svqdecb_pat_n_s64(int64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u32)))
-uint32_t svqdecb_pat_n_u32(uint32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u64)))
-uint64_t svqdecb_pat_n_u64(uint64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s32)))
-int32_t svqdecd_n_s32(int32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s64)))
-int64_t svqdecd_n_s64(int64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u32)))
-uint32_t svqdecd_n_u32(uint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u64)))
-uint64_t svqdecd_n_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_s64)))
-svint64_t svqdecd_s64(svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_u64)))
-svuint64_t svqdecd_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s32)))
-int32_t svqdecd_pat_n_s32(int32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s64)))
-int64_t svqdecd_pat_n_s64(int64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u32)))
-uint32_t svqdecd_pat_n_u32(uint32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u64)))
-uint64_t svqdecd_pat_n_u64(uint64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_s64)))
-svint64_t svqdecd_pat_s64(svint64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_u64)))
-svuint64_t svqdecd_pat_u64(svuint64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s32)))
-int32_t svqdech_n_s32(int32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s64)))
-int64_t svqdech_n_s64(int64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u32)))
-uint32_t svqdech_n_u32(uint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u64)))
-uint64_t svqdech_n_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_s16)))
-svint16_t svqdech_s16(svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_u16)))
-svuint16_t svqdech_u16(svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s32)))
-int32_t svqdech_pat_n_s32(int32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s64)))
-int64_t svqdech_pat_n_s64(int64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u32)))
-uint32_t svqdech_pat_n_u32(uint32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u64)))
-uint64_t svqdech_pat_n_u64(uint64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_s16)))
-svint16_t svqdech_pat_s16(svint16_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_u16)))
-svuint16_t svqdech_pat_u16(svuint16_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b8)))
-int32_t svqdecp_n_s32_b8(int32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b32)))
-int32_t svqdecp_n_s32_b32(int32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b64)))
-int32_t svqdecp_n_s32_b64(int32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b16)))
-int32_t svqdecp_n_s32_b16(int32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b8)))
-int64_t svqdecp_n_s64_b8(int64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b32)))
-int64_t svqdecp_n_s64_b32(int64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b64)))
-int64_t svqdecp_n_s64_b64(int64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b16)))
-int64_t svqdecp_n_s64_b16(int64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b8)))
-uint32_t svqdecp_n_u32_b8(uint32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b32)))
-uint32_t svqdecp_n_u32_b32(uint32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b64)))
-uint32_t svqdecp_n_u32_b64(uint32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b16)))
-uint32_t svqdecp_n_u32_b16(uint32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b8)))
-uint64_t svqdecp_n_u64_b8(uint64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b32)))
-uint64_t svqdecp_n_u64_b32(uint64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b64)))
-uint64_t svqdecp_n_u64_b64(uint64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b16)))
-uint64_t svqdecp_n_u64_b16(uint64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s32)))
-svint32_t svqdecp_s32(svint32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s64)))
-svint64_t svqdecp_s64(svint64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s16)))
-svint16_t svqdecp_s16(svint16_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u32)))
-svuint32_t svqdecp_u32(svuint32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u64)))
-svuint64_t svqdecp_u64(svuint64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u16)))
-svuint16_t svqdecp_u16(svuint16_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s32)))
-int32_t svqdecw_n_s32(int32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s64)))
-int64_t svqdecw_n_s64(int64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u32)))
-uint32_t svqdecw_n_u32(uint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u64)))
-uint64_t svqdecw_n_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_s32)))
-svint32_t svqdecw_s32(svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_u32)))
-svuint32_t svqdecw_u32(svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s32)))
-int32_t svqdecw_pat_n_s32(int32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s64)))
-int64_t svqdecw_pat_n_s64(int64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u32)))
-uint32_t svqdecw_pat_n_u32(uint32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u64)))
-uint64_t svqdecw_pat_n_u64(uint64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_s32)))
-svint32_t svqdecw_pat_s32(svint32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_u32)))
-svuint32_t svqdecw_pat_u32(svuint32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s32)))
-int32_t svqincb_n_s32(int32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s64)))
-int64_t svqincb_n_s64(int64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u32)))
-uint32_t svqincb_n_u32(uint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u64)))
-uint64_t svqincb_n_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s32)))
-int32_t svqincb_pat_n_s32(int32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s64)))
-int64_t svqincb_pat_n_s64(int64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u32)))
-uint32_t svqincb_pat_n_u32(uint32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u64)))
-uint64_t svqincb_pat_n_u64(uint64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s32)))
-int32_t svqincd_n_s32(int32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s64)))
-int64_t svqincd_n_s64(int64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u32)))
-uint32_t svqincd_n_u32(uint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u64)))
-uint64_t svqincd_n_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_s64)))
-svint64_t svqincd_s64(svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_u64)))
-svuint64_t svqincd_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s32)))
-int32_t svqincd_pat_n_s32(int32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s64)))
-int64_t svqincd_pat_n_s64(int64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u32)))
-uint32_t svqincd_pat_n_u32(uint32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u64)))
-uint64_t svqincd_pat_n_u64(uint64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_s64)))
-svint64_t svqincd_pat_s64(svint64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_u64)))
-svuint64_t svqincd_pat_u64(svuint64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s32)))
-int32_t svqinch_n_s32(int32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s64)))
-int64_t svqinch_n_s64(int64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u32)))
-uint32_t svqinch_n_u32(uint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u64)))
-uint64_t svqinch_n_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_s16)))
-svint16_t svqinch_s16(svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_u16)))
-svuint16_t svqinch_u16(svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s32)))
-int32_t svqinch_pat_n_s32(int32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s64)))
-int64_t svqinch_pat_n_s64(int64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u32)))
-uint32_t svqinch_pat_n_u32(uint32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u64)))
-uint64_t svqinch_pat_n_u64(uint64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_s16)))
-svint16_t svqinch_pat_s16(svint16_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_u16)))
-svuint16_t svqinch_pat_u16(svuint16_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b8)))
-int32_t svqincp_n_s32_b8(int32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b32)))
-int32_t svqincp_n_s32_b32(int32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b64)))
-int32_t svqincp_n_s32_b64(int32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b16)))
-int32_t svqincp_n_s32_b16(int32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b8)))
-int64_t svqincp_n_s64_b8(int64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b32)))
-int64_t svqincp_n_s64_b32(int64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b64)))
-int64_t svqincp_n_s64_b64(int64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b16)))
-int64_t svqincp_n_s64_b16(int64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b8)))
-uint32_t svqincp_n_u32_b8(uint32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b32)))
-uint32_t svqincp_n_u32_b32(uint32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b64)))
-uint32_t svqincp_n_u32_b64(uint32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b16)))
-uint32_t svqincp_n_u32_b16(uint32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b8)))
-uint64_t svqincp_n_u64_b8(uint64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b32)))
-uint64_t svqincp_n_u64_b32(uint64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b64)))
-uint64_t svqincp_n_u64_b64(uint64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b16)))
-uint64_t svqincp_n_u64_b16(uint64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s32)))
-svint32_t svqincp_s32(svint32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s64)))
-svint64_t svqincp_s64(svint64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s16)))
-svint16_t svqincp_s16(svint16_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u32)))
-svuint32_t svqincp_u32(svuint32_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u64)))
-svuint64_t svqincp_u64(svuint64_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u16)))
-svuint16_t svqincp_u16(svuint16_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s32)))
-int32_t svqincw_n_s32(int32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s64)))
-int64_t svqincw_n_s64(int64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u32)))
-uint32_t svqincw_n_u32(uint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u64)))
-uint64_t svqincw_n_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_s32)))
-svint32_t svqincw_s32(svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_u32)))
-svuint32_t svqincw_u32(svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s32)))
-int32_t svqincw_pat_n_s32(int32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s64)))
-int64_t svqincw_pat_n_s64(int64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u32)))
-uint32_t svqincw_pat_n_u32(uint32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u64)))
-uint64_t svqincw_pat_n_u64(uint64_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_s32)))
-svint32_t svqincw_pat_s32(svint32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_u32)))
-svuint32_t svqincw_pat_u32(svuint32_t, enum svpattern, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8)))
-svint8_t svqsub_n_s8(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32)))
-svint32_t svqsub_n_s32(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64)))
-svint64_t svqsub_n_s64(svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16)))
-svint16_t svqsub_n_s16(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8)))
-svuint8_t svqsub_n_u8(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32)))
-svuint32_t svqsub_n_u32(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64)))
-svuint64_t svqsub_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16)))
-svuint16_t svqsub_n_u16(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8)))
-svint8_t svqsub_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32)))
-svint32_t svqsub_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64)))
-svint64_t svqsub_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16)))
-svint16_t svqsub_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8)))
-svuint8_t svqsub_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32)))
-svuint32_t svqsub_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64)))
-svuint64_t svqsub_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16)))
-svuint16_t svqsub_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_m)))
-svuint8_t svrbit_u8_m(svuint8_t, svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_m)))
-svuint32_t svrbit_u32_m(svuint32_t, svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_m)))
-svuint64_t svrbit_u64_m(svuint64_t, svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_m)))
-svuint16_t svrbit_u16_m(svuint16_t, svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_m)))
-svint8_t svrbit_s8_m(svint8_t, svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_m)))
-svint32_t svrbit_s32_m(svint32_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_m)))
-svint64_t svrbit_s64_m(svint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_m)))
-svint16_t svrbit_s16_m(svint16_t, svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_x)))
-svuint8_t svrbit_u8_x(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_x)))
-svuint32_t svrbit_u32_x(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_x)))
-svuint64_t svrbit_u64_x(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_x)))
-svuint16_t svrbit_u16_x(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_x)))
-svint8_t svrbit_s8_x(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_x)))
-svint32_t svrbit_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_x)))
-svint64_t svrbit_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_x)))
-svint16_t svrbit_s16_x(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_z)))
-svuint8_t svrbit_u8_z(svbool_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_z)))
-svuint32_t svrbit_u32_z(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_z)))
-svuint64_t svrbit_u64_z(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_z)))
-svuint16_t svrbit_u16_z(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_z)))
-svint8_t svrbit_s8_z(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_z)))
-svint32_t svrbit_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_z)))
-svint64_t svrbit_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z)))
-svint16_t svrbit_s16_z(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr)))
-svbool_t svrdffr();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr_z)))
-svbool_t svrdffr_z(svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64)))
-svfloat64_t svrecpe_f64(svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f32)))
-svfloat32_t svrecpe_f32(svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f16)))
-svfloat16_t svrecpe_f16(svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f64)))
-svfloat64_t svrecps_f64(svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f32)))
-svfloat32_t svrecps_f32(svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f16)))
-svfloat16_t svrecps_f16(svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_m)))
-svfloat64_t svrecpx_f64_m(svfloat64_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_m)))
-svfloat32_t svrecpx_f32_m(svfloat32_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_m)))
-svfloat16_t svrecpx_f16_m(svfloat16_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_x)))
-svfloat64_t svrecpx_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_x)))
-svfloat32_t svrecpx_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_x)))
-svfloat16_t svrecpx_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_z)))
-svfloat64_t svrecpx_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_z)))
-svfloat32_t svrecpx_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_z)))
-svfloat16_t svrecpx_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u8)))
-svuint8_t svrev_u8(svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u32)))
-svuint32_t svrev_u32(svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u64)))
-svuint64_t svrev_u64(svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u16)))
-svuint16_t svrev_u16(svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s8)))
-svint8_t svrev_s8(svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f64)))
-svfloat64_t svrev_f64(svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f32)))
-svfloat32_t svrev_f32(svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f16)))
-svfloat16_t svrev_f16(svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s32)))
-svint32_t svrev_s32(svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s64)))
-svint64_t svrev_s64(svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s16)))
-svint16_t svrev_s16(svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b8)))
-svbool_t svrev_b8(svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b32)))
-svbool_t svrev_b32(svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b64)))
-svbool_t svrev_b64(svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b16)))
-svbool_t svrev_b16(svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_m)))
-svuint32_t svrevb_u32_m(svuint32_t, svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_m)))
-svuint64_t svrevb_u64_m(svuint64_t, svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_m)))
-svuint16_t svrevb_u16_m(svuint16_t, svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_m)))
-svint32_t svrevb_s32_m(svint32_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_m)))
-svint64_t svrevb_s64_m(svint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_m)))
-svint16_t svrevb_s16_m(svint16_t, svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_x)))
-svuint32_t svrevb_u32_x(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_x)))
-svuint64_t svrevb_u64_x(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_x)))
-svuint16_t svrevb_u16_x(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_x)))
-svint32_t svrevb_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_x)))
-svint64_t svrevb_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_x)))
-svint16_t svrevb_s16_x(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_z)))
-svuint32_t svrevb_u32_z(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_z)))
-svuint64_t svrevb_u64_z(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_z)))
-svuint16_t svrevb_u16_z(svbool_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_z)))
-svint32_t svrevb_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_z)))
-svint64_t svrevb_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_z)))
-svint16_t svrevb_s16_z(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_m)))
-svuint32_t svrevh_u32_m(svuint32_t, svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_m)))
-svuint64_t svrevh_u64_m(svuint64_t, svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_m)))
-svint32_t svrevh_s32_m(svint32_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_m)))
-svint64_t svrevh_s64_m(svint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_x)))
-svuint32_t svrevh_u32_x(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_x)))
-svuint64_t svrevh_u64_x(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_x)))
-svint32_t svrevh_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_x)))
-svint64_t svrevh_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_z)))
-svuint32_t svrevh_u32_z(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_z)))
-svuint64_t svrevh_u64_z(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_z)))
-svint32_t svrevh_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_z)))
-svint64_t svrevh_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_m)))
-svuint64_t svrevw_u64_m(svuint64_t, svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_m)))
-svint64_t svrevw_s64_m(svint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_x)))
-svuint64_t svrevw_u64_x(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_x)))
-svint64_t svrevw_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_z)))
-svuint64_t svrevw_u64_z(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_z)))
-svint64_t svrevw_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_m)))
-svfloat64_t svrinta_f64_m(svfloat64_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_m)))
-svfloat32_t svrinta_f32_m(svfloat32_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_m)))
-svfloat16_t svrinta_f16_m(svfloat16_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_x)))
-svfloat64_t svrinta_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x)))
-svfloat32_t svrinta_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_x)))
-svfloat16_t svrinta_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_z)))
-svfloat64_t svrinta_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_z)))
-svfloat32_t svrinta_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_z)))
-svfloat16_t svrinta_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_m)))
-svfloat64_t svrinti_f64_m(svfloat64_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_m)))
-svfloat32_t svrinti_f32_m(svfloat32_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_m)))
-svfloat16_t svrinti_f16_m(svfloat16_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_x)))
-svfloat64_t svrinti_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_x)))
-svfloat32_t svrinti_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_x)))
-svfloat16_t svrinti_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_z)))
-svfloat64_t svrinti_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_z)))
-svfloat32_t svrinti_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_z)))
-svfloat16_t svrinti_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_m)))
-svfloat64_t svrintm_f64_m(svfloat64_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_m)))
-svfloat32_t svrintm_f32_m(svfloat32_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_m)))
-svfloat16_t svrintm_f16_m(svfloat16_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_x)))
-svfloat64_t svrintm_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x)))
-svfloat32_t svrintm_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_x)))
-svfloat16_t svrintm_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_z)))
-svfloat64_t svrintm_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_z)))
-svfloat32_t svrintm_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_z)))
-svfloat16_t svrintm_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_m)))
-svfloat64_t svrintn_f64_m(svfloat64_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_m)))
-svfloat32_t svrintn_f32_m(svfloat32_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_m)))
-svfloat16_t svrintn_f16_m(svfloat16_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_x)))
-svfloat64_t svrintn_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x)))
-svfloat32_t svrintn_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_x)))
-svfloat16_t svrintn_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_z)))
-svfloat64_t svrintn_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_z)))
-svfloat32_t svrintn_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_z)))
-svfloat16_t svrintn_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_m)))
-svfloat64_t svrintp_f64_m(svfloat64_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_m)))
-svfloat32_t svrintp_f32_m(svfloat32_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_m)))
-svfloat16_t svrintp_f16_m(svfloat16_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_x)))
-svfloat64_t svrintp_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x)))
-svfloat32_t svrintp_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_x)))
-svfloat16_t svrintp_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_z)))
-svfloat64_t svrintp_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_z)))
-svfloat32_t svrintp_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_z)))
-svfloat16_t svrintp_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_m)))
-svfloat64_t svrintx_f64_m(svfloat64_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_m)))
-svfloat32_t svrintx_f32_m(svfloat32_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_m)))
-svfloat16_t svrintx_f16_m(svfloat16_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_x)))
-svfloat64_t svrintx_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_x)))
-svfloat32_t svrintx_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_x)))
-svfloat16_t svrintx_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_z)))
-svfloat64_t svrintx_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_z)))
-svfloat32_t svrintx_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_z)))
-svfloat16_t svrintx_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_m)))
-svfloat64_t svrintz_f64_m(svfloat64_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_m)))
-svfloat32_t svrintz_f32_m(svfloat32_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_m)))
-svfloat16_t svrintz_f16_m(svfloat16_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_x)))
-svfloat64_t svrintz_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_x)))
-svfloat32_t svrintz_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_x)))
-svfloat16_t svrintz_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_z)))
-svfloat64_t svrintz_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_z)))
-svfloat32_t svrintz_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_z)))
-svfloat16_t svrintz_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f64)))
-svfloat64_t svrsqrte_f64(svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f32)))
-svfloat32_t svrsqrte_f32(svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f16)))
-svfloat16_t svrsqrte_f16(svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f64)))
-svfloat64_t svrsqrts_f64(svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f32)))
-svfloat32_t svrsqrts_f32(svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f16)))
-svfloat16_t svrsqrts_f16(svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_m)))
-svfloat64_t svscale_n_f64_m(svbool_t, svfloat64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_m)))
-svfloat32_t svscale_n_f32_m(svbool_t, svfloat32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_m)))
-svfloat16_t svscale_n_f16_m(svbool_t, svfloat16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_x)))
-svfloat64_t svscale_n_f64_x(svbool_t, svfloat64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_x)))
-svfloat32_t svscale_n_f32_x(svbool_t, svfloat32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_x)))
-svfloat16_t svscale_n_f16_x(svbool_t, svfloat16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_z)))
-svfloat64_t svscale_n_f64_z(svbool_t, svfloat64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_z)))
-svfloat32_t svscale_n_f32_z(svbool_t, svfloat32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_z)))
-svfloat16_t svscale_n_f16_z(svbool_t, svfloat16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_m)))
-svfloat64_t svscale_f64_m(svbool_t, svfloat64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_m)))
-svfloat32_t svscale_f32_m(svbool_t, svfloat32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_m)))
-svfloat16_t svscale_f16_m(svbool_t, svfloat16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_x)))
-svfloat64_t svscale_f64_x(svbool_t, svfloat64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_x)))
-svfloat32_t svscale_f32_x(svbool_t, svfloat32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_x)))
-svfloat16_t svscale_f16_x(svbool_t, svfloat16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_z)))
-svfloat64_t svscale_f64_z(svbool_t, svfloat64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_z)))
-svfloat32_t svscale_f32_z(svbool_t, svfloat32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_z)))
-svfloat16_t svscale_f16_z(svbool_t, svfloat16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_b)))
-svbool_t svsel_b(svbool_t, svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8)))
-svuint8_t svsel_u8(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32)))
-svuint32_t svsel_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64)))
-svuint64_t svsel_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16)))
-svuint16_t svsel_u16(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8)))
-svint8_t svsel_s8(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64)))
-svfloat64_t svsel_f64(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32)))
-svfloat32_t svsel_f32(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16)))
-svfloat16_t svsel_f16(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32)))
-svint32_t svsel_s32(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64)))
-svint64_t svsel_s64(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16)))
-svint16_t svsel_s16(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u8)))
-svuint8x2_t svset2_u8(svuint8x2_t, uint64_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u32)))
-svuint32x2_t svset2_u32(svuint32x2_t, uint64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u64)))
-svuint64x2_t svset2_u64(svuint64x2_t, uint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u16)))
-svuint16x2_t svset2_u16(svuint16x2_t, uint64_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s8)))
-svint8x2_t svset2_s8(svint8x2_t, uint64_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f64)))
-svfloat64x2_t svset2_f64(svfloat64x2_t, uint64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f32)))
-svfloat32x2_t svset2_f32(svfloat32x2_t, uint64_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f16)))
-svfloat16x2_t svset2_f16(svfloat16x2_t, uint64_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s32)))
-svint32x2_t svset2_s32(svint32x2_t, uint64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s64)))
-svint64x2_t svset2_s64(svint64x2_t, uint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s16)))
-svint16x2_t svset2_s16(svint16x2_t, uint64_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u8)))
-svuint8x3_t svset3_u8(svuint8x3_t, uint64_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u32)))
-svuint32x3_t svset3_u32(svuint32x3_t, uint64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u64)))
-svuint64x3_t svset3_u64(svuint64x3_t, uint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u16)))
-svuint16x3_t svset3_u16(svuint16x3_t, uint64_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s8)))
-svint8x3_t svset3_s8(svint8x3_t, uint64_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f64)))
-svfloat64x3_t svset3_f64(svfloat64x3_t, uint64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f32)))
-svfloat32x3_t svset3_f32(svfloat32x3_t, uint64_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f16)))
-svfloat16x3_t svset3_f16(svfloat16x3_t, uint64_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s32)))
-svint32x3_t svset3_s32(svint32x3_t, uint64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s64)))
-svint64x3_t svset3_s64(svint64x3_t, uint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s16)))
-svint16x3_t svset3_s16(svint16x3_t, uint64_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u8)))
-svuint8x4_t svset4_u8(svuint8x4_t, uint64_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u32)))
-svuint32x4_t svset4_u32(svuint32x4_t, uint64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u64)))
-svuint64x4_t svset4_u64(svuint64x4_t, uint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u16)))
-svuint16x4_t svset4_u16(svuint16x4_t, uint64_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s8)))
-svint8x4_t svset4_s8(svint8x4_t, uint64_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f64)))
-svfloat64x4_t svset4_f64(svfloat64x4_t, uint64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f32)))
-svfloat32x4_t svset4_f32(svfloat32x4_t, uint64_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f16)))
-svfloat16x4_t svset4_f16(svfloat16x4_t, uint64_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s32)))
-svint32x4_t svset4_s32(svint32x4_t, uint64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s64)))
-svint64x4_t svset4_s64(svint64x4_t, uint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16)))
-svint16x4_t svset4_s16(svint16x4_t, uint64_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsetffr)))
-void svsetffr();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8)))
-svuint8_t svsplice_u8(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32)))
-svuint32_t svsplice_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u64)))
-svuint64_t svsplice_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u16)))
-svuint16_t svsplice_u16(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s8)))
-svint8_t svsplice_s8(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f64)))
-svfloat64_t svsplice_f64(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f32)))
-svfloat32_t svsplice_f32(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f16)))
-svfloat16_t svsplice_f16(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s32)))
-svint32_t svsplice_s32(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s64)))
-svint64_t svsplice_s64(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s16)))
-svint16_t svsplice_s16(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_m)))
-svfloat64_t svsqrt_f64_m(svfloat64_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_m)))
-svfloat32_t svsqrt_f32_m(svfloat32_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_m)))
-svfloat16_t svsqrt_f16_m(svfloat16_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_x)))
-svfloat64_t svsqrt_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_x)))
-svfloat32_t svsqrt_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_x)))
-svfloat16_t svsqrt_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_z)))
-svfloat64_t svsqrt_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_z)))
-svfloat32_t svsqrt_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_z)))
-svfloat16_t svsqrt_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8)))
-void svst1_u8(svbool_t, uint8_t *, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32)))
-void svst1_u32(svbool_t, uint32_t *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64)))
-void svst1_u64(svbool_t, uint64_t *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16)))
-void svst1_u16(svbool_t, uint16_t *, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8)))
-void svst1_s8(svbool_t, int8_t *, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64)))
-void svst1_f64(svbool_t, float64_t *, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32)))
-void svst1_f32(svbool_t, float32_t *, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16)))
-void svst1_f16(svbool_t, float16_t *, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32)))
-void svst1_s32(svbool_t, int32_t *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64)))
-void svst1_s64(svbool_t, int64_t *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16)))
-void svst1_s16(svbool_t, int16_t *, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32)))
-void svst1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64)))
-void svst1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64)))
-void svst1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32)))
-void svst1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32)))
-void svst1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64)))
-void svst1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32)))
-void svst1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64)))
-void svst1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64)))
-void svst1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32)))
-void svst1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32)))
-void svst1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64)))
-void svst1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32)))
-void svst1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64)))
-void svst1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64)))
-void svst1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32)))
-void svst1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32)))
-void svst1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64)))
-void svst1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32)))
-void svst1_scatter_s32index_u32(svbool_t, uint32_t *, svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32)))
-void svst1_scatter_s32index_f32(svbool_t, float32_t *, svint32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32)))
-void svst1_scatter_s32index_s32(svbool_t, int32_t *, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32)))
-void svst1_scatter_u32index_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32)))
-void svst1_scatter_u32index_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32)))
-void svst1_scatter_u32index_s32(svbool_t, int32_t *, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64)))
-void svst1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64)))
-void svst1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64)))
-void svst1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64)))
-void svst1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64)))
-void svst1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64)))
-void svst1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32)))
-void svst1_scatter_s32offset_u32(svbool_t, uint32_t *, svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32)))
-void svst1_scatter_s32offset_f32(svbool_t, float32_t *, svint32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32)))
-void svst1_scatter_s32offset_s32(svbool_t, int32_t *, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32)))
-void svst1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32)))
-void svst1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32)))
-void svst1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64)))
-void svst1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64)))
-void svst1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64)))
-void svst1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64)))
-void svst1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64)))
-void svst1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64)))
-void svst1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8)))
-void svst1_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32)))
-void svst1_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64)))
-void svst1_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16)))
-void svst1_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8)))
-void svst1_vnum_s8(svbool_t, int8_t *, int64_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64)))
-void svst1_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32)))
-void svst1_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16)))
-void svst1_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32)))
-void svst1_vnum_s32(svbool_t, int32_t *, int64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64)))
-void svst1_vnum_s64(svbool_t, int64_t *, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16)))
-void svst1_vnum_s16(svbool_t, int16_t *, int64_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s32)))
-void svst1b_s32(svbool_t, int8_t *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s64)))
-void svst1b_s64(svbool_t, int8_t *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s16)))
-void svst1b_s16(svbool_t, int8_t *, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u32)))
-void svst1b_u32(svbool_t, uint8_t *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64)))
-void svst1b_u64(svbool_t, uint8_t *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16)))
-void svst1b_u16(svbool_t, uint8_t *, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32)))
-void svst1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64)))
-void svst1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32)))
-void svst1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64)))
-void svst1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32)))
-void svst1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64)))
-void svst1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32)))
-void svst1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64)))
-void svst1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32)))
-void svst1b_scatter_s32offset_s32(svbool_t, int8_t *, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32)))
-void svst1b_scatter_s32offset_u32(svbool_t, uint8_t *, svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32)))
-void svst1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32)))
-void svst1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64)))
-void svst1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64)))
-void svst1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64)))
-void svst1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64)))
-void svst1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32)))
-void svst1b_vnum_s32(svbool_t, int8_t *, int64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64)))
-void svst1b_vnum_s64(svbool_t, int8_t *, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s16)))
-void svst1b_vnum_s16(svbool_t, int8_t *, int64_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u32)))
-void svst1b_vnum_u32(svbool_t, uint8_t *, int64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u64)))
-void svst1b_vnum_u64(svbool_t, uint8_t *, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u16)))
-void svst1b_vnum_u16(svbool_t, uint8_t *, int64_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s32)))
-void svst1h_s32(svbool_t, int16_t *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s64)))
-void svst1h_s64(svbool_t, int16_t *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32)))
-void svst1h_u32(svbool_t, uint16_t *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64)))
-void svst1h_u64(svbool_t, uint16_t *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32)))
-void svst1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64)))
-void svst1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32)))
-void svst1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64)))
-void svst1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32)))
-void svst1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64)))
-void svst1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32)))
-void svst1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64)))
-void svst1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32)))
-void svst1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64)))
-void svst1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32)))
-void svst1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64)))
-void svst1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32)))
-void svst1h_scatter_s32index_s32(svbool_t, int16_t *, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32)))
-void svst1h_scatter_s32index_u32(svbool_t, uint16_t *, svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32)))
-void svst1h_scatter_u32index_s32(svbool_t, int16_t *, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32)))
-void svst1h_scatter_u32index_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64)))
-void svst1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64)))
-void svst1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64)))
-void svst1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64)))
-void svst1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32)))
-void svst1h_scatter_s32offset_s32(svbool_t, int16_t *, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32)))
-void svst1h_scatter_s32offset_u32(svbool_t, uint16_t *, svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32)))
-void svst1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32)))
-void svst1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64)))
-void svst1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64)))
-void svst1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64)))
-void svst1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64)))
-void svst1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32)))
-void svst1h_vnum_s32(svbool_t, int16_t *, int64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64)))
-void svst1h_vnum_s64(svbool_t, int16_t *, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u32)))
-void svst1h_vnum_u32(svbool_t, uint16_t *, int64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u64)))
-void svst1h_vnum_u64(svbool_t, uint16_t *, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64)))
-void svst1w_s64(svbool_t, int32_t *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64)))
-void svst1w_u64(svbool_t, uint32_t *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64)))
-void svst1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64)))
-void svst1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64)))
-void svst1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64)))
-void svst1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64)))
-void svst1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64)))
-void svst1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64)))
-void svst1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64)))
-void svst1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64)))
-void svst1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64)))
-void svst1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64)))
-void svst1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64)))
-void svst1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64)))
-void svst1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64)))
-void svst1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64)))
-void svst1w_vnum_s64(svbool_t, int32_t *, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64)))
-void svst1w_vnum_u64(svbool_t, uint32_t *, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u8)))
-void svst2_u8(svbool_t, uint8_t *, svuint8x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u32)))
-void svst2_u32(svbool_t, uint32_t *, svuint32x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u64)))
-void svst2_u64(svbool_t, uint64_t *, svuint64x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u16)))
-void svst2_u16(svbool_t, uint16_t *, svuint16x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s8)))
-void svst2_s8(svbool_t, int8_t *, svint8x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f64)))
-void svst2_f64(svbool_t, float64_t *, svfloat64x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f32)))
-void svst2_f32(svbool_t, float32_t *, svfloat32x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f16)))
-void svst2_f16(svbool_t, float16_t *, svfloat16x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s32)))
-void svst2_s32(svbool_t, int32_t *, svint32x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s64)))
-void svst2_s64(svbool_t, int64_t *, svint64x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s16)))
-void svst2_s16(svbool_t, int16_t *, svint16x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u8)))
-void svst2_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u32)))
-void svst2_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u64)))
-void svst2_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u16)))
-void svst2_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s8)))
-void svst2_vnum_s8(svbool_t, int8_t *, int64_t, svint8x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f64)))
-void svst2_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f32)))
-void svst2_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f16)))
-void svst2_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s32)))
-void svst2_vnum_s32(svbool_t, int32_t *, int64_t, svint32x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s64)))
-void svst2_vnum_s64(svbool_t, int64_t *, int64_t, svint64x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s16)))
-void svst2_vnum_s16(svbool_t, int16_t *, int64_t, svint16x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u8)))
-void svst3_u8(svbool_t, uint8_t *, svuint8x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u32)))
-void svst3_u32(svbool_t, uint32_t *, svuint32x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u64)))
-void svst3_u64(svbool_t, uint64_t *, svuint64x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u16)))
-void svst3_u16(svbool_t, uint16_t *, svuint16x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s8)))
-void svst3_s8(svbool_t, int8_t *, svint8x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f64)))
-void svst3_f64(svbool_t, float64_t *, svfloat64x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f32)))
-void svst3_f32(svbool_t, float32_t *, svfloat32x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f16)))
-void svst3_f16(svbool_t, float16_t *, svfloat16x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s32)))
-void svst3_s32(svbool_t, int32_t *, svint32x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s64)))
-void svst3_s64(svbool_t, int64_t *, svint64x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s16)))
-void svst3_s16(svbool_t, int16_t *, svint16x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u8)))
-void svst3_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u32)))
-void svst3_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u64)))
-void svst3_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u16)))
-void svst3_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s8)))
-void svst3_vnum_s8(svbool_t, int8_t *, int64_t, svint8x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f64)))
-void svst3_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f32)))
-void svst3_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f16)))
-void svst3_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s32)))
-void svst3_vnum_s32(svbool_t, int32_t *, int64_t, svint32x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s64)))
-void svst3_vnum_s64(svbool_t, int64_t *, int64_t, svint64x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s16)))
-void svst3_vnum_s16(svbool_t, int16_t *, int64_t, svint16x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u8)))
-void svst4_u8(svbool_t, uint8_t *, svuint8x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u32)))
-void svst4_u32(svbool_t, uint32_t *, svuint32x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u64)))
-void svst4_u64(svbool_t, uint64_t *, svuint64x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u16)))
-void svst4_u16(svbool_t, uint16_t *, svuint16x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s8)))
-void svst4_s8(svbool_t, int8_t *, svint8x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f64)))
-void svst4_f64(svbool_t, float64_t *, svfloat64x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f32)))
-void svst4_f32(svbool_t, float32_t *, svfloat32x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f16)))
-void svst4_f16(svbool_t, float16_t *, svfloat16x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s32)))
-void svst4_s32(svbool_t, int32_t *, svint32x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s64)))
-void svst4_s64(svbool_t, int64_t *, svint64x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s16)))
-void svst4_s16(svbool_t, int16_t *, svint16x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u8)))
-void svst4_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u32)))
-void svst4_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u64)))
-void svst4_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u16)))
-void svst4_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s8)))
-void svst4_vnum_s8(svbool_t, int8_t *, int64_t, svint8x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f64)))
-void svst4_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f32)))
-void svst4_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f16)))
-void svst4_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s32)))
-void svst4_vnum_s32(svbool_t, int32_t *, int64_t, svint32x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s64)))
-void svst4_vnum_s64(svbool_t, int64_t *, int64_t, svint64x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s16)))
-void svst4_vnum_s16(svbool_t, int16_t *, int64_t, svint16x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8)))
-void svstnt1_u8(svbool_t, uint8_t *, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32)))
-void svstnt1_u32(svbool_t, uint32_t *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64)))
-void svstnt1_u64(svbool_t, uint64_t *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16)))
-void svstnt1_u16(svbool_t, uint16_t *, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8)))
-void svstnt1_s8(svbool_t, int8_t *, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64)))
-void svstnt1_f64(svbool_t, float64_t *, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32)))
-void svstnt1_f32(svbool_t, float32_t *, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16)))
-void svstnt1_f16(svbool_t, float16_t *, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32)))
-void svstnt1_s32(svbool_t, int32_t *, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64)))
-void svstnt1_s64(svbool_t, int64_t *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16)))
-void svstnt1_s16(svbool_t, int16_t *, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8)))
-void svstnt1_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32)))
-void svstnt1_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64)))
-void svstnt1_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16)))
-void svstnt1_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8)))
-void svstnt1_vnum_s8(svbool_t, int8_t *, int64_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64)))
-void svstnt1_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32)))
-void svstnt1_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16)))
-void svstnt1_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32)))
-void svstnt1_vnum_s32(svbool_t, int32_t *, int64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64)))
-void svstnt1_vnum_s64(svbool_t, int64_t *, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16)))
-void svstnt1_vnum_s16(svbool_t, int16_t *, int64_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_m)))
-svfloat64_t svsub_n_f64_m(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_m)))
-svfloat32_t svsub_n_f32_m(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_m)))
-svfloat16_t svsub_n_f16_m(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_x)))
-svfloat64_t svsub_n_f64_x(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_x)))
-svfloat32_t svsub_n_f32_x(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_x)))
-svfloat16_t svsub_n_f16_x(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_z)))
-svfloat64_t svsub_n_f64_z(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_z)))
-svfloat32_t svsub_n_f32_z(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_z)))
-svfloat16_t svsub_n_f16_z(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_m)))
-svuint8_t svsub_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_m)))
-svuint32_t svsub_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_m)))
-svuint64_t svsub_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_m)))
-svuint16_t svsub_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_m)))
-svint8_t svsub_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_m)))
-svint32_t svsub_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_m)))
-svint64_t svsub_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_m)))
-svint16_t svsub_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_x)))
-svuint8_t svsub_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_x)))
-svuint32_t svsub_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_x)))
-svuint64_t svsub_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_x)))
-svuint16_t svsub_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_x)))
-svint8_t svsub_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_x)))
-svint32_t svsub_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_x)))
-svint64_t svsub_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_x)))
-svint16_t svsub_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_z)))
-svuint8_t svsub_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_z)))
-svuint32_t svsub_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_z)))
-svuint64_t svsub_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_z)))
-svuint16_t svsub_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_z)))
-svint8_t svsub_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_z)))
-svint32_t svsub_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_z)))
-svint64_t svsub_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_z)))
-svint16_t svsub_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_m)))
-svfloat64_t svsub_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_m)))
-svfloat32_t svsub_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_m)))
-svfloat16_t svsub_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_x)))
-svfloat64_t svsub_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_x)))
-svfloat32_t svsub_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_x)))
-svfloat16_t svsub_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_z)))
-svfloat64_t svsub_f64_z(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_z)))
-svfloat32_t svsub_f32_z(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_z)))
-svfloat16_t svsub_f16_z(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_m)))
-svuint8_t svsub_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_m)))
-svuint32_t svsub_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_m)))
-svuint64_t svsub_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_m)))
-svuint16_t svsub_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_m)))
-svint8_t svsub_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_m)))
-svint32_t svsub_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_m)))
-svint64_t svsub_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_m)))
-svint16_t svsub_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_x)))
-svuint8_t svsub_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_x)))
-svuint32_t svsub_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_x)))
-svuint64_t svsub_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_x)))
-svuint16_t svsub_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_x)))
-svint8_t svsub_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_x)))
-svint32_t svsub_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_x)))
-svint64_t svsub_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_x)))
-svint16_t svsub_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_z)))
-svuint8_t svsub_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_z)))
-svuint32_t svsub_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_z)))
-svuint64_t svsub_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_z)))
-svuint16_t svsub_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_z)))
-svint8_t svsub_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_z)))
-svint32_t svsub_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_z)))
-svint64_t svsub_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_z)))
-svint16_t svsub_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_m)))
-svfloat64_t svsubr_n_f64_m(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_m)))
-svfloat32_t svsubr_n_f32_m(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_m)))
-svfloat16_t svsubr_n_f16_m(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_x)))
-svfloat64_t svsubr_n_f64_x(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_x)))
-svfloat32_t svsubr_n_f32_x(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_x)))
-svfloat16_t svsubr_n_f16_x(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_z)))
-svfloat64_t svsubr_n_f64_z(svbool_t, svfloat64_t, float64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_z)))
-svfloat32_t svsubr_n_f32_z(svbool_t, svfloat32_t, float32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_z)))
-svfloat16_t svsubr_n_f16_z(svbool_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_m)))
-svuint8_t svsubr_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_m)))
-svuint32_t svsubr_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_m)))
-svuint64_t svsubr_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_m)))
-svuint16_t svsubr_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_m)))
-svint8_t svsubr_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_m)))
-svint32_t svsubr_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_m)))
-svint64_t svsubr_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_m)))
-svint16_t svsubr_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_x)))
-svuint8_t svsubr_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_x)))
-svuint32_t svsubr_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_x)))
-svuint64_t svsubr_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_x)))
-svuint16_t svsubr_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_x)))
-svint8_t svsubr_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_x)))
-svint32_t svsubr_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_x)))
-svint64_t svsubr_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_x)))
-svint16_t svsubr_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_z)))
-svuint8_t svsubr_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_z)))
-svuint32_t svsubr_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_z)))
-svuint64_t svsubr_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_z)))
-svuint16_t svsubr_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_z)))
-svint8_t svsubr_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_z)))
-svint32_t svsubr_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_z)))
-svint64_t svsubr_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_z)))
-svint16_t svsubr_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_m)))
-svfloat64_t svsubr_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_m)))
-svfloat32_t svsubr_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_m)))
-svfloat16_t svsubr_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_x)))
-svfloat64_t svsubr_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_x)))
-svfloat32_t svsubr_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_x)))
-svfloat16_t svsubr_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_z)))
-svfloat64_t svsubr_f64_z(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_z)))
-svfloat32_t svsubr_f32_z(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_z)))
-svfloat16_t svsubr_f16_z(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_m)))
-svuint8_t svsubr_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_m)))
-svuint32_t svsubr_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_m)))
-svuint64_t svsubr_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_m)))
-svuint16_t svsubr_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_m)))
-svint8_t svsubr_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_m)))
-svint32_t svsubr_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_m)))
-svint64_t svsubr_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_m)))
-svint16_t svsubr_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_x)))
-svuint8_t svsubr_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_x)))
-svuint32_t svsubr_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_x)))
-svuint64_t svsubr_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_x)))
-svuint16_t svsubr_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_x)))
-svint8_t svsubr_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_x)))
-svint32_t svsubr_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_x)))
-svint64_t svsubr_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_x)))
-svint16_t svsubr_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_z)))
-svuint8_t svsubr_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_z)))
-svuint32_t svsubr_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_z)))
-svuint64_t svsubr_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_z)))
-svuint16_t svsubr_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_z)))
-svint8_t svsubr_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_z)))
-svint32_t svsubr_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_z)))
-svint64_t svsubr_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_z)))
-svint16_t svsubr_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u8)))
-svuint8_t svtbl_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u32)))
-svuint32_t svtbl_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u64)))
-svuint64_t svtbl_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u16)))
-svuint16_t svtbl_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s8)))
-svint8_t svtbl_s8(svint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f64)))
-svfloat64_t svtbl_f64(svfloat64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f32)))
-svfloat32_t svtbl_f32(svfloat32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f16)))
-svfloat16_t svtbl_f16(svfloat16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s32)))
-svint32_t svtbl_s32(svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64)))
-svint64_t svtbl_s64(svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16)))
-svint16_t svtbl_s16(svint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64)))
-svfloat64_t svtmad_f64(svfloat64_t, svfloat64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32)))
-svfloat32_t svtmad_f32(svfloat32_t, svfloat32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16)))
-svfloat16_t svtmad_f16(svfloat16_t, svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8)))
-svuint8_t svtrn1_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32)))
-svuint32_t svtrn1_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u64)))
-svuint64_t svtrn1_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u16)))
-svuint16_t svtrn1_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s8)))
-svint8_t svtrn1_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f64)))
-svfloat64_t svtrn1_f64(svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f32)))
-svfloat32_t svtrn1_f32(svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f16)))
-svfloat16_t svtrn1_f16(svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s32)))
-svint32_t svtrn1_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s64)))
-svint64_t svtrn1_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s16)))
-svint16_t svtrn1_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b8)))
-svbool_t svtrn1_b8(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b32)))
-svbool_t svtrn1_b32(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b64)))
-svbool_t svtrn1_b64(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b16)))
-svbool_t svtrn1_b16(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u8)))
-svuint8_t svtrn2_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u32)))
-svuint32_t svtrn2_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u64)))
-svuint64_t svtrn2_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u16)))
-svuint16_t svtrn2_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s8)))
-svint8_t svtrn2_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f64)))
-svfloat64_t svtrn2_f64(svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f32)))
-svfloat32_t svtrn2_f32(svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f16)))
-svfloat16_t svtrn2_f16(svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s32)))
-svint32_t svtrn2_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s64)))
-svint64_t svtrn2_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s16)))
-svint16_t svtrn2_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b8)))
-svbool_t svtrn2_b8(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b32)))
-svbool_t svtrn2_b32(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b64)))
-svbool_t svtrn2_b64(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b16)))
-svbool_t svtrn2_b16(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64)))
-svfloat64_t svtsmul_f64(svfloat64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32)))
-svfloat32_t svtsmul_f32(svfloat32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16)))
-svfloat16_t svtsmul_f16(svfloat16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64)))
-svfloat64_t svtssel_f64(svfloat64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32)))
-svfloat32_t svtssel_f32(svfloat32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16)))
-svfloat16_t svtssel_f16(svfloat16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u8)))
-svuint8x2_t svundef2_u8();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u32)))
-svuint32x2_t svundef2_u32();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u64)))
-svuint64x2_t svundef2_u64();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u16)))
-svuint16x2_t svundef2_u16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s8)))
-svint8x2_t svundef2_s8();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f64)))
-svfloat64x2_t svundef2_f64();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f32)))
-svfloat32x2_t svundef2_f32();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f16)))
-svfloat16x2_t svundef2_f16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s32)))
-svint32x2_t svundef2_s32();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s64)))
-svint64x2_t svundef2_s64();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s16)))
-svint16x2_t svundef2_s16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u8)))
-svuint8x3_t svundef3_u8();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u32)))
-svuint32x3_t svundef3_u32();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u64)))
-svuint64x3_t svundef3_u64();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u16)))
-svuint16x3_t svundef3_u16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s8)))
-svint8x3_t svundef3_s8();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f64)))
-svfloat64x3_t svundef3_f64();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f32)))
-svfloat32x3_t svundef3_f32();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f16)))
-svfloat16x3_t svundef3_f16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s32)))
-svint32x3_t svundef3_s32();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s64)))
-svint64x3_t svundef3_s64();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s16)))
-svint16x3_t svundef3_s16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u8)))
-svuint8x4_t svundef4_u8();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u32)))
-svuint32x4_t svundef4_u32();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u64)))
-svuint64x4_t svundef4_u64();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u16)))
-svuint16x4_t svundef4_u16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s8)))
-svint8x4_t svundef4_s8();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f64)))
-svfloat64x4_t svundef4_f64();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f32)))
-svfloat32x4_t svundef4_f32();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f16)))
-svfloat16x4_t svundef4_f16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s32)))
-svint32x4_t svundef4_s32();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s64)))
-svint64x4_t svundef4_s64();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s16)))
-svint16x4_t svundef4_s16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u8)))
-svuint8_t svundef_u8();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u32)))
-svuint32_t svundef_u32();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u64)))
-svuint64_t svundef_u64();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u16)))
-svuint16_t svundef_u16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s8)))
-svint8_t svundef_s8();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f64)))
-svfloat64_t svundef_f64();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f32)))
-svfloat32_t svundef_f32();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f16)))
-svfloat16_t svundef_f16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s32)))
-svint32_t svundef_s32();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s64)))
-svint64_t svundef_s64();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s16)))
-svint16_t svundef_s16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b)))
-svbool_t svunpkhi_b(svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32)))
-svint32_t svunpkhi_s32(svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s64)))
-svint64_t svunpkhi_s64(svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s16)))
-svint16_t svunpkhi_s16(svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u32)))
-svuint32_t svunpkhi_u32(svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u64)))
-svuint64_t svunpkhi_u64(svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u16)))
-svuint16_t svunpkhi_u16(svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_b)))
-svbool_t svunpklo_b(svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s32)))
-svint32_t svunpklo_s32(svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s64)))
-svint64_t svunpklo_s64(svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s16)))
-svint16_t svunpklo_s16(svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u32)))
-svuint32_t svunpklo_u32(svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u64)))
-svuint64_t svunpklo_u64(svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u16)))
-svuint16_t svunpklo_u16(svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u8)))
-svuint8_t svuzp1_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u32)))
-svuint32_t svuzp1_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u64)))
-svuint64_t svuzp1_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u16)))
-svuint16_t svuzp1_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s8)))
-svint8_t svuzp1_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f64)))
-svfloat64_t svuzp1_f64(svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f32)))
-svfloat32_t svuzp1_f32(svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f16)))
-svfloat16_t svuzp1_f16(svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s32)))
-svint32_t svuzp1_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s64)))
-svint64_t svuzp1_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s16)))
-svint16_t svuzp1_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b8)))
-svbool_t svuzp1_b8(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b32)))
-svbool_t svuzp1_b32(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b64)))
-svbool_t svuzp1_b64(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b16)))
-svbool_t svuzp1_b16(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u8)))
-svuint8_t svuzp2_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u32)))
-svuint32_t svuzp2_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u64)))
-svuint64_t svuzp2_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u16)))
-svuint16_t svuzp2_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s8)))
-svint8_t svuzp2_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f64)))
-svfloat64_t svuzp2_f64(svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f32)))
-svfloat32_t svuzp2_f32(svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f16)))
-svfloat16_t svuzp2_f16(svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s32)))
-svint32_t svuzp2_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s64)))
-svint64_t svuzp2_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s16)))
-svint16_t svuzp2_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b8)))
-svbool_t svuzp2_b8(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b32)))
-svbool_t svuzp2_b32(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b64)))
-svbool_t svuzp2_b64(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b16)))
-svbool_t svuzp2_b16(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s32)))
-svbool_t svwhilele_b8_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s32)))
-svbool_t svwhilele_b32_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s32)))
-svbool_t svwhilele_b64_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s32)))
-svbool_t svwhilele_b16_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64)))
-svbool_t svwhilele_b8_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64)))
-svbool_t svwhilele_b32_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64)))
-svbool_t svwhilele_b64_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64)))
-svbool_t svwhilele_b16_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u32)))
-svbool_t svwhilele_b8_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u32)))
-svbool_t svwhilele_b32_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u32)))
-svbool_t svwhilele_b64_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u32)))
-svbool_t svwhilele_b16_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64)))
-svbool_t svwhilele_b8_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64)))
-svbool_t svwhilele_b32_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64)))
-svbool_t svwhilele_b64_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64)))
-svbool_t svwhilele_b16_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u32)))
-svbool_t svwhilelt_b8_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u32)))
-svbool_t svwhilelt_b32_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u32)))
-svbool_t svwhilelt_b64_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u32)))
-svbool_t svwhilelt_b16_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64)))
-svbool_t svwhilelt_b8_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64)))
-svbool_t svwhilelt_b32_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64)))
-svbool_t svwhilelt_b64_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64)))
-svbool_t svwhilelt_b16_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s32)))
-svbool_t svwhilelt_b8_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s32)))
-svbool_t svwhilelt_b32_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s32)))
-svbool_t svwhilelt_b64_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s32)))
-svbool_t svwhilelt_b16_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64)))
-svbool_t svwhilelt_b8_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64)))
-svbool_t svwhilelt_b32_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64)))
-svbool_t svwhilelt_b64_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64)))
-svbool_t svwhilelt_b16_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwrffr)))
-void svwrffr(svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u8)))
-svuint8_t svzip1_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u32)))
-svuint32_t svzip1_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u64)))
-svuint64_t svzip1_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u16)))
-svuint16_t svzip1_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s8)))
-svint8_t svzip1_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f64)))
-svfloat64_t svzip1_f64(svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f32)))
-svfloat32_t svzip1_f32(svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f16)))
-svfloat16_t svzip1_f16(svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s32)))
-svint32_t svzip1_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s64)))
-svint64_t svzip1_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s16)))
-svint16_t svzip1_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b8)))
-svbool_t svzip1_b8(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b32)))
-svbool_t svzip1_b32(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b64)))
-svbool_t svzip1_b64(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b16)))
-svbool_t svzip1_b16(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u8)))
-svuint8_t svzip2_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u32)))
-svuint32_t svzip2_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u64)))
-svuint64_t svzip2_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u16)))
-svuint16_t svzip2_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s8)))
-svint8_t svzip2_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f64)))
-svfloat64_t svzip2_f64(svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f32)))
-svfloat32_t svzip2_f32(svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f16)))
-svfloat16_t svzip2_f16(svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s32)))
-svint32_t svzip2_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64)))
-svint64_t svzip2_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16)))
-svint16_t svzip2_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b8)))
-svbool_t svzip2_b8(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b32)))
-svbool_t svzip2_b32(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b64)))
-svbool_t svzip2_b64(svbool_t, svbool_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b16)))
-svbool_t svzip2_b16(svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_m)))
-svfloat64_t svabd_m(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_m)))
-svfloat32_t svabd_m(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_m)))
-svfloat16_t svabd_m(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_x)))
-svfloat64_t svabd_x(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_x)))
-svfloat32_t svabd_x(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_x)))
-svfloat16_t svabd_x(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_z)))
-svfloat64_t svabd_z(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_z)))
-svfloat32_t svabd_z(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_z)))
-svfloat16_t svabd_z(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_m)))
-svint8_t svabd_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_m)))
-svint32_t svabd_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_m)))
-svint64_t svabd_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_m)))
-svint16_t svabd_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_x)))
-svint8_t svabd_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_x)))
-svint32_t svabd_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_x)))
-svint64_t svabd_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_x)))
-svint16_t svabd_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_z)))
-svint8_t svabd_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_z)))
-svint32_t svabd_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_z)))
-svint64_t svabd_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_z)))
-svint16_t svabd_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_m)))
-svuint8_t svabd_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_m)))
-svuint32_t svabd_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_m)))
-svuint64_t svabd_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_m)))
-svuint16_t svabd_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_x)))
-svuint8_t svabd_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_x)))
-svuint32_t svabd_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_x)))
-svuint64_t svabd_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_x)))
-svuint16_t svabd_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_z)))
-svuint8_t svabd_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_z)))
-svuint32_t svabd_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_z)))
-svuint64_t svabd_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_z)))
-svuint16_t svabd_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_m)))
-svfloat64_t svabd_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_m)))
-svfloat32_t svabd_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_m)))
-svfloat16_t svabd_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_x)))
-svfloat64_t svabd_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_x)))
-svfloat32_t svabd_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_x)))
-svfloat16_t svabd_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_z)))
-svfloat64_t svabd_z(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_z)))
-svfloat32_t svabd_z(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_z)))
-svfloat16_t svabd_z(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_m)))
-svint8_t svabd_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_m)))
-svint32_t svabd_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_m)))
-svint64_t svabd_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_m)))
-svint16_t svabd_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_x)))
-svint8_t svabd_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_x)))
-svint32_t svabd_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_x)))
-svint64_t svabd_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_x)))
-svint16_t svabd_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_z)))
-svint8_t svabd_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_z)))
-svint32_t svabd_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_z)))
-svint64_t svabd_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_z)))
-svint16_t svabd_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_m)))
-svuint8_t svabd_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_m)))
-svuint32_t svabd_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_m)))
-svuint64_t svabd_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_m)))
-svuint16_t svabd_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_x)))
-svuint8_t svabd_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_x)))
-svuint32_t svabd_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_x)))
-svuint64_t svabd_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_x)))
-svuint16_t svabd_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_z)))
-svuint8_t svabd_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_z)))
-svuint32_t svabd_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_z)))
-svuint64_t svabd_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_z)))
-svuint16_t svabd_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_m)))
-svfloat64_t svabs_m(svfloat64_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_m)))
-svfloat32_t svabs_m(svfloat32_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_m)))
-svfloat16_t svabs_m(svfloat16_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_x)))
-svfloat64_t svabs_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_x)))
-svfloat32_t svabs_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_x)))
-svfloat16_t svabs_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_z)))
-svfloat64_t svabs_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_z)))
-svfloat32_t svabs_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_z)))
-svfloat16_t svabs_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_m)))
-svint8_t svabs_m(svint8_t, svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_m)))
-svint32_t svabs_m(svint32_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_m)))
-svint64_t svabs_m(svint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_m)))
-svint16_t svabs_m(svint16_t, svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_x)))
-svint8_t svabs_x(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_x)))
-svint32_t svabs_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_x)))
-svint64_t svabs_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_x)))
-svint16_t svabs_x(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_z)))
-svint8_t svabs_z(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_z)))
-svint32_t svabs_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_z)))
-svint64_t svabs_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_z)))
-svint16_t svabs_z(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f64)))
-svbool_t svacge(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f32)))
-svbool_t svacge(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f16)))
-svbool_t svacge(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f64)))
-svbool_t svacge(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f32)))
-svbool_t svacge(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f16)))
-svbool_t svacge(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f64)))
-svbool_t svacgt(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f32)))
-svbool_t svacgt(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f16)))
-svbool_t svacgt(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f64)))
-svbool_t svacgt(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f32)))
-svbool_t svacgt(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f16)))
-svbool_t svacgt(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f64)))
-svbool_t svacle(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f32)))
-svbool_t svacle(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f16)))
-svbool_t svacle(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f64)))
-svbool_t svacle(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f32)))
-svbool_t svacle(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f16)))
-svbool_t svacle(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f64)))
-svbool_t svaclt(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f32)))
-svbool_t svaclt(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f16)))
-svbool_t svaclt(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f64)))
-svbool_t svaclt(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f32)))
-svbool_t svaclt(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f16)))
-svbool_t svaclt(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_m)))
-svfloat64_t svadd_m(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_m)))
-svfloat32_t svadd_m(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_m)))
-svfloat16_t svadd_m(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_x)))
-svfloat64_t svadd_x(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_x)))
-svfloat32_t svadd_x(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_x)))
-svfloat16_t svadd_x(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_z)))
-svfloat64_t svadd_z(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_z)))
-svfloat32_t svadd_z(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_z)))
-svfloat16_t svadd_z(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_m)))
-svuint8_t svadd_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_m)))
-svuint32_t svadd_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_m)))
-svuint64_t svadd_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_m)))
-svuint16_t svadd_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_m)))
-svint8_t svadd_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_m)))
-svint32_t svadd_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_m)))
-svint64_t svadd_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_m)))
-svint16_t svadd_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_x)))
-svuint8_t svadd_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_x)))
-svuint32_t svadd_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_x)))
-svuint64_t svadd_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_x)))
-svuint16_t svadd_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_x)))
-svint8_t svadd_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_x)))
-svint32_t svadd_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_x)))
-svint64_t svadd_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_x)))
-svint16_t svadd_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_z)))
-svuint8_t svadd_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_z)))
-svuint32_t svadd_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_z)))
-svuint64_t svadd_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_z)))
-svuint16_t svadd_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_z)))
-svint8_t svadd_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_z)))
-svint32_t svadd_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_z)))
-svint64_t svadd_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_z)))
-svint16_t svadd_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_m)))
-svfloat64_t svadd_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_m)))
-svfloat32_t svadd_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_m)))
-svfloat16_t svadd_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_x)))
-svfloat64_t svadd_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_x)))
-svfloat32_t svadd_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_x)))
-svfloat16_t svadd_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_z)))
-svfloat64_t svadd_z(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_z)))
-svfloat32_t svadd_z(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_z)))
-svfloat16_t svadd_z(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_m)))
-svuint8_t svadd_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_m)))
-svuint32_t svadd_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_m)))
-svuint64_t svadd_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_m)))
-svuint16_t svadd_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_m)))
-svint8_t svadd_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_m)))
-svint32_t svadd_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_m)))
-svint64_t svadd_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_m)))
-svint16_t svadd_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_x)))
-svuint8_t svadd_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_x)))
-svuint32_t svadd_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_x)))
-svuint64_t svadd_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_x)))
-svuint16_t svadd_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_x)))
-svint8_t svadd_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_x)))
-svint32_t svadd_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_x)))
-svint64_t svadd_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_x)))
-svint16_t svadd_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_z)))
-svuint8_t svadd_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_z)))
-svuint32_t svadd_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_z)))
-svuint64_t svadd_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_z)))
-svuint16_t svadd_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_z)))
-svint8_t svadd_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_z)))
-svint32_t svadd_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_z)))
-svint64_t svadd_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_z)))
-svint16_t svadd_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f64)))
-float64_t svadda(svbool_t, float64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f32)))
-float32_t svadda(svbool_t, float32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f16)))
-float16_t svadda(svbool_t, float16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s8)))
-int64_t svaddv(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s32)))
-int64_t svaddv(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s64)))
-int64_t svaddv(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s16)))
-int64_t svaddv(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u8)))
-uint64_t svaddv(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u32)))
-uint64_t svaddv(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u64)))
-uint64_t svaddv(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u16)))
-uint64_t svaddv(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f64)))
-float64_t svaddv(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32)))
-float32_t svaddv(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16)))
-float16_t svaddv(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset)))
-svuint32_t svadrb_offset(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset)))
-svuint64_t svadrb_offset(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset)))
-svuint32_t svadrb_offset(svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset)))
-svuint64_t svadrb_offset(svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index)))
-svuint32_t svadrd_index(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index)))
-svuint64_t svadrd_index(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index)))
-svuint32_t svadrd_index(svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index)))
-svuint64_t svadrd_index(svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index)))
-svuint32_t svadrh_index(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index)))
-svuint64_t svadrh_index(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index)))
-svuint32_t svadrh_index(svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index)))
-svuint64_t svadrh_index(svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index)))
-svuint32_t svadrw_index(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index)))
-svuint64_t svadrw_index(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index)))
-svuint32_t svadrw_index(svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index)))
-svuint64_t svadrw_index(svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z)))
-svbool_t svand_z(svbool_t, svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m)))
-svuint8_t svand_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_m)))
-svuint32_t svand_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_m)))
-svuint64_t svand_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_m)))
-svuint16_t svand_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_m)))
-svint8_t svand_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_m)))
-svint32_t svand_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_m)))
-svint64_t svand_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_m)))
-svint16_t svand_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_x)))
-svuint8_t svand_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_x)))
-svuint32_t svand_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_x)))
-svuint64_t svand_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_x)))
-svuint16_t svand_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_x)))
-svint8_t svand_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_x)))
-svint32_t svand_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_x)))
-svint64_t svand_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_x)))
-svint16_t svand_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_z)))
-svuint8_t svand_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_z)))
-svuint32_t svand_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_z)))
-svuint64_t svand_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_z)))
-svuint16_t svand_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_z)))
-svint8_t svand_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_z)))
-svint32_t svand_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_z)))
-svint64_t svand_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_z)))
-svint16_t svand_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_m)))
-svuint8_t svand_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_m)))
-svuint32_t svand_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_m)))
-svuint64_t svand_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_m)))
-svuint16_t svand_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_m)))
-svint8_t svand_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_m)))
-svint32_t svand_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_m)))
-svint64_t svand_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_m)))
-svint16_t svand_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_x)))
-svuint8_t svand_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_x)))
-svuint32_t svand_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_x)))
-svuint64_t svand_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_x)))
-svuint16_t svand_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_x)))
-svint8_t svand_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_x)))
-svint32_t svand_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_x)))
-svint64_t svand_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_x)))
-svint16_t svand_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_z)))
-svuint8_t svand_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_z)))
-svuint32_t svand_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_z)))
-svuint64_t svand_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_z)))
-svuint16_t svand_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_z)))
-svint8_t svand_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_z)))
-svint32_t svand_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_z)))
-svint64_t svand_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_z)))
-svint16_t svand_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u8)))
-uint8_t svandv(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u32)))
-uint32_t svandv(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u64)))
-uint64_t svandv(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u16)))
-uint16_t svandv(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s8)))
-int8_t svandv(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s32)))
-int32_t svandv(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s64)))
-int64_t svandv(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s16)))
-int16_t svandv(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_m)))
-svint8_t svasr_m(svbool_t, svint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_m)))
-svint32_t svasr_m(svbool_t, svint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_m)))
-svint64_t svasr_m(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_m)))
-svint16_t svasr_m(svbool_t, svint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_x)))
-svint8_t svasr_x(svbool_t, svint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_x)))
-svint32_t svasr_x(svbool_t, svint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_x)))
-svint64_t svasr_x(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_x)))
-svint16_t svasr_x(svbool_t, svint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_z)))
-svint8_t svasr_z(svbool_t, svint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_z)))
-svint32_t svasr_z(svbool_t, svint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_z)))
-svint64_t svasr_z(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_z)))
-svint16_t svasr_z(svbool_t, svint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_m)))
-svint8_t svasr_m(svbool_t, svint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_m)))
-svint32_t svasr_m(svbool_t, svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_m)))
-svint64_t svasr_m(svbool_t, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_m)))
-svint16_t svasr_m(svbool_t, svint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_x)))
-svint8_t svasr_x(svbool_t, svint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_x)))
-svint32_t svasr_x(svbool_t, svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_x)))
-svint64_t svasr_x(svbool_t, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_x)))
-svint16_t svasr_x(svbool_t, svint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_z)))
-svint8_t svasr_z(svbool_t, svint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_z)))
-svint32_t svasr_z(svbool_t, svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_z)))
-svint64_t svasr_z(svbool_t, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_z)))
-svint16_t svasr_z(svbool_t, svint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_m)))
-svint8_t svasr_wide_m(svbool_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_m)))
-svint32_t svasr_wide_m(svbool_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_m)))
-svint16_t svasr_wide_m(svbool_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_x)))
-svint8_t svasr_wide_x(svbool_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_x)))
-svint32_t svasr_wide_x(svbool_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_x)))
-svint16_t svasr_wide_x(svbool_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_z)))
-svint8_t svasr_wide_z(svbool_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_z)))
-svint32_t svasr_wide_z(svbool_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_z)))
-svint16_t svasr_wide_z(svbool_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_m)))
-svint8_t svasr_wide_m(svbool_t, svint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_m)))
-svint32_t svasr_wide_m(svbool_t, svint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_m)))
-svint16_t svasr_wide_m(svbool_t, svint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_x)))
-svint8_t svasr_wide_x(svbool_t, svint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_x)))
-svint32_t svasr_wide_x(svbool_t, svint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_x)))
-svint16_t svasr_wide_x(svbool_t, svint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_z)))
-svint8_t svasr_wide_z(svbool_t, svint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_z)))
-svint32_t svasr_wide_z(svbool_t, svint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_z)))
-svint16_t svasr_wide_z(svbool_t, svint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_m)))
-svint8_t svasrd_m(svbool_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_m)))
-svint32_t svasrd_m(svbool_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_m)))
-svint64_t svasrd_m(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_m)))
-svint16_t svasrd_m(svbool_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_x)))
-svint8_t svasrd_x(svbool_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_x)))
-svint32_t svasrd_x(svbool_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_x)))
-svint64_t svasrd_x(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_x)))
-svint16_t svasrd_x(svbool_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_z)))
-svint8_t svasrd_z(svbool_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_z)))
-svint32_t svasrd_z(svbool_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_z)))
-svint64_t svasrd_z(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_z)))
-svint16_t svasrd_z(svbool_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_b_z)))
-svbool_t svbic_z(svbool_t, svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_m)))
-svuint8_t svbic_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_m)))
-svuint32_t svbic_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_m)))
-svuint64_t svbic_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_m)))
-svuint16_t svbic_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_m)))
-svint8_t svbic_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_m)))
-svint32_t svbic_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_m)))
-svint64_t svbic_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_m)))
-svint16_t svbic_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_x)))
-svuint8_t svbic_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_x)))
-svuint32_t svbic_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_x)))
-svuint64_t svbic_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_x)))
-svuint16_t svbic_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_x)))
-svint8_t svbic_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_x)))
-svint32_t svbic_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_x)))
-svint64_t svbic_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_x)))
-svint16_t svbic_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_z)))
-svuint8_t svbic_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_z)))
-svuint32_t svbic_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_z)))
-svuint64_t svbic_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_z)))
-svuint16_t svbic_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_z)))
-svint8_t svbic_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_z)))
-svint32_t svbic_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_z)))
-svint64_t svbic_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_z)))
-svint16_t svbic_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_m)))
-svuint8_t svbic_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_m)))
-svuint32_t svbic_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_m)))
-svuint64_t svbic_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_m)))
-svuint16_t svbic_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_m)))
-svint8_t svbic_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_m)))
-svint32_t svbic_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_m)))
-svint64_t svbic_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_m)))
-svint16_t svbic_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_x)))
-svuint8_t svbic_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_x)))
-svuint32_t svbic_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_x)))
-svuint64_t svbic_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_x)))
-svuint16_t svbic_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_x)))
-svint8_t svbic_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_x)))
-svint32_t svbic_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_x)))
-svint64_t svbic_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_x)))
-svint16_t svbic_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_z)))
-svuint8_t svbic_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_z)))
-svuint32_t svbic_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_z)))
-svuint64_t svbic_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_z)))
-svuint16_t svbic_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_z)))
-svint8_t svbic_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_z)))
-svint32_t svbic_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_z)))
-svint64_t svbic_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_z)))
-svint16_t svbic_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_m)))
-svbool_t svbrka_m(svbool_t, svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_z)))
-svbool_t svbrka_z(svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_m)))
-svbool_t svbrkb_m(svbool_t, svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_z)))
-svbool_t svbrkb_z(svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkn_b_z)))
-svbool_t svbrkn_z(svbool_t, svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpa_b_z)))
-svbool_t svbrkpa_z(svbool_t, svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpb_b_z)))
-svbool_t svbrkpb_z(svbool_t, svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_m)))
-svfloat64_t svcadd_m(svbool_t, svfloat64_t, svfloat64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_m)))
-svfloat32_t svcadd_m(svbool_t, svfloat32_t, svfloat32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_m)))
-svfloat16_t svcadd_m(svbool_t, svfloat16_t, svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_x)))
-svfloat64_t svcadd_x(svbool_t, svfloat64_t, svfloat64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_x)))
-svfloat32_t svcadd_x(svbool_t, svfloat32_t, svfloat32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_x)))
-svfloat16_t svcadd_x(svbool_t, svfloat16_t, svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_z)))
-svfloat64_t svcadd_z(svbool_t, svfloat64_t, svfloat64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_z)))
-svfloat32_t svcadd_z(svbool_t, svfloat32_t, svfloat32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_z)))
-svfloat16_t svcadd_z(svbool_t, svfloat16_t, svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u8)))
-uint8_t svclasta(svbool_t, uint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u32)))
-uint32_t svclasta(svbool_t, uint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u64)))
-uint64_t svclasta(svbool_t, uint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u16)))
-uint16_t svclasta(svbool_t, uint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s8)))
-int8_t svclasta(svbool_t, int8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f64)))
-float64_t svclasta(svbool_t, float64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f32)))
-float32_t svclasta(svbool_t, float32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f16)))
-float16_t svclasta(svbool_t, float16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s32)))
-int32_t svclasta(svbool_t, int32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s64)))
-int64_t svclasta(svbool_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s16)))
-int16_t svclasta(svbool_t, int16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u8)))
-svuint8_t svclasta(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u32)))
-svuint32_t svclasta(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u64)))
-svuint64_t svclasta(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u16)))
-svuint16_t svclasta(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s8)))
-svint8_t svclasta(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f64)))
-svfloat64_t svclasta(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f32)))
-svfloat32_t svclasta(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f16)))
-svfloat16_t svclasta(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s32)))
-svint32_t svclasta(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s64)))
-svint64_t svclasta(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s16)))
-svint16_t svclasta(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u8)))
-uint8_t svclastb(svbool_t, uint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u32)))
-uint32_t svclastb(svbool_t, uint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u64)))
-uint64_t svclastb(svbool_t, uint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u16)))
-uint16_t svclastb(svbool_t, uint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s8)))
-int8_t svclastb(svbool_t, int8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f64)))
-float64_t svclastb(svbool_t, float64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f32)))
-float32_t svclastb(svbool_t, float32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f16)))
-float16_t svclastb(svbool_t, float16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s32)))
-int32_t svclastb(svbool_t, int32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s64)))
-int64_t svclastb(svbool_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s16)))
-int16_t svclastb(svbool_t, int16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u8)))
-svuint8_t svclastb(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u32)))
-svuint32_t svclastb(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u64)))
-svuint64_t svclastb(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u16)))
-svuint16_t svclastb(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s8)))
-svint8_t svclastb(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f64)))
-svfloat64_t svclastb(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f32)))
-svfloat32_t svclastb(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f16)))
-svfloat16_t svclastb(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s32)))
-svint32_t svclastb(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s64)))
-svint64_t svclastb(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s16)))
-svint16_t svclastb(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_m)))
-svuint8_t svcls_m(svuint8_t, svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_m)))
-svuint32_t svcls_m(svuint32_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_m)))
-svuint64_t svcls_m(svuint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_m)))
-svuint16_t svcls_m(svuint16_t, svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_x)))
-svuint8_t svcls_x(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_x)))
-svuint32_t svcls_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_x)))
-svuint64_t svcls_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_x)))
-svuint16_t svcls_x(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_z)))
-svuint8_t svcls_z(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_z)))
-svuint32_t svcls_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_z)))
-svuint64_t svcls_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_z)))
-svuint16_t svcls_z(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_m)))
-svuint8_t svclz_m(svuint8_t, svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_m)))
-svuint32_t svclz_m(svuint32_t, svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_m)))
-svuint64_t svclz_m(svuint64_t, svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_m)))
-svuint16_t svclz_m(svuint16_t, svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_m)))
-svuint8_t svclz_m(svuint8_t, svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_m)))
-svuint32_t svclz_m(svuint32_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_m)))
-svuint64_t svclz_m(svuint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_m)))
-svuint16_t svclz_m(svuint16_t, svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_x)))
-svuint8_t svclz_x(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_x)))
-svuint32_t svclz_x(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_x)))
-svuint64_t svclz_x(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_x)))
-svuint16_t svclz_x(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_x)))
-svuint8_t svclz_x(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_x)))
-svuint32_t svclz_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_x)))
-svuint64_t svclz_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_x)))
-svuint16_t svclz_x(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_z)))
-svuint8_t svclz_z(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_z)))
-svuint32_t svclz_z(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_z)))
-svuint64_t svclz_z(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_z)))
-svuint16_t svclz_z(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_z)))
-svuint8_t svclz_z(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_z)))
-svuint32_t svclz_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_z)))
-svuint64_t svclz_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_z)))
-svuint16_t svclz_z(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_m)))
-svfloat64_t svcmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_m)))
-svfloat32_t svcmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_m)))
-svfloat16_t svcmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_x)))
-svfloat64_t svcmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_x)))
-svfloat32_t svcmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_x)))
-svfloat16_t svcmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_z)))
-svfloat64_t svcmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_z)))
-svfloat32_t svcmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_z)))
-svfloat16_t svcmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f32)))
-svfloat32_t svcmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f16)))
-svfloat16_t svcmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f64)))
-svbool_t svcmpeq(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f32)))
-svbool_t svcmpeq(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f16)))
-svbool_t svcmpeq(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u8)))
-svbool_t svcmpeq(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u32)))
-svbool_t svcmpeq(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u64)))
-svbool_t svcmpeq(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u16)))
-svbool_t svcmpeq(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s8)))
-svbool_t svcmpeq(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s32)))
-svbool_t svcmpeq(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s64)))
-svbool_t svcmpeq(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s16)))
-svbool_t svcmpeq(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u8)))
-svbool_t svcmpeq(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u32)))
-svbool_t svcmpeq(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u64)))
-svbool_t svcmpeq(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u16)))
-svbool_t svcmpeq(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s8)))
-svbool_t svcmpeq(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s32)))
-svbool_t svcmpeq(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s64)))
-svbool_t svcmpeq(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s16)))
-svbool_t svcmpeq(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f64)))
-svbool_t svcmpeq(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f32)))
-svbool_t svcmpeq(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f16)))
-svbool_t svcmpeq(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s8)))
-svbool_t svcmpeq_wide(svbool_t, svint8_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s32)))
-svbool_t svcmpeq_wide(svbool_t, svint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s16)))
-svbool_t svcmpeq_wide(svbool_t, svint16_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s8)))
-svbool_t svcmpeq_wide(svbool_t, svint8_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s32)))
-svbool_t svcmpeq_wide(svbool_t, svint32_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s16)))
-svbool_t svcmpeq_wide(svbool_t, svint16_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f64)))
-svbool_t svcmpge(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f32)))
-svbool_t svcmpge(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f16)))
-svbool_t svcmpge(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s8)))
-svbool_t svcmpge(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s32)))
-svbool_t svcmpge(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s64)))
-svbool_t svcmpge(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s16)))
-svbool_t svcmpge(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u8)))
-svbool_t svcmpge(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u32)))
-svbool_t svcmpge(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u64)))
-svbool_t svcmpge(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u16)))
-svbool_t svcmpge(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s8)))
-svbool_t svcmpge(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s32)))
-svbool_t svcmpge(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s64)))
-svbool_t svcmpge(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s16)))
-svbool_t svcmpge(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f64)))
-svbool_t svcmpge(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f32)))
-svbool_t svcmpge(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f16)))
-svbool_t svcmpge(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u8)))
-svbool_t svcmpge(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u32)))
-svbool_t svcmpge(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u64)))
-svbool_t svcmpge(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u16)))
-svbool_t svcmpge(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s8)))
-svbool_t svcmpge_wide(svbool_t, svint8_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s32)))
-svbool_t svcmpge_wide(svbool_t, svint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s16)))
-svbool_t svcmpge_wide(svbool_t, svint16_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u8)))
-svbool_t svcmpge_wide(svbool_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u32)))
-svbool_t svcmpge_wide(svbool_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u16)))
-svbool_t svcmpge_wide(svbool_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s8)))
-svbool_t svcmpge_wide(svbool_t, svint8_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s32)))
-svbool_t svcmpge_wide(svbool_t, svint32_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s16)))
-svbool_t svcmpge_wide(svbool_t, svint16_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u8)))
-svbool_t svcmpge_wide(svbool_t, svuint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u32)))
-svbool_t svcmpge_wide(svbool_t, svuint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u16)))
-svbool_t svcmpge_wide(svbool_t, svuint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f64)))
-svbool_t svcmpgt(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f32)))
-svbool_t svcmpgt(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f16)))
-svbool_t svcmpgt(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s8)))
-svbool_t svcmpgt(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s32)))
-svbool_t svcmpgt(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s64)))
-svbool_t svcmpgt(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s16)))
-svbool_t svcmpgt(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u8)))
-svbool_t svcmpgt(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u32)))
-svbool_t svcmpgt(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u64)))
-svbool_t svcmpgt(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u16)))
-svbool_t svcmpgt(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s8)))
-svbool_t svcmpgt(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s32)))
-svbool_t svcmpgt(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s64)))
-svbool_t svcmpgt(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s16)))
-svbool_t svcmpgt(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f64)))
-svbool_t svcmpgt(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f32)))
-svbool_t svcmpgt(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f16)))
-svbool_t svcmpgt(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u8)))
-svbool_t svcmpgt(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u32)))
-svbool_t svcmpgt(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u64)))
-svbool_t svcmpgt(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u16)))
-svbool_t svcmpgt(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s8)))
-svbool_t svcmpgt_wide(svbool_t, svint8_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s32)))
-svbool_t svcmpgt_wide(svbool_t, svint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s16)))
-svbool_t svcmpgt_wide(svbool_t, svint16_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u8)))
-svbool_t svcmpgt_wide(svbool_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u32)))
-svbool_t svcmpgt_wide(svbool_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u16)))
-svbool_t svcmpgt_wide(svbool_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s8)))
-svbool_t svcmpgt_wide(svbool_t, svint8_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s32)))
-svbool_t svcmpgt_wide(svbool_t, svint32_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s16)))
-svbool_t svcmpgt_wide(svbool_t, svint16_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u8)))
-svbool_t svcmpgt_wide(svbool_t, svuint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u32)))
-svbool_t svcmpgt_wide(svbool_t, svuint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u16)))
-svbool_t svcmpgt_wide(svbool_t, svuint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f64)))
-svbool_t svcmple(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f32)))
-svbool_t svcmple(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f16)))
-svbool_t svcmple(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s8)))
-svbool_t svcmple(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s32)))
-svbool_t svcmple(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s64)))
-svbool_t svcmple(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s16)))
-svbool_t svcmple(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u8)))
-svbool_t svcmple(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u32)))
-svbool_t svcmple(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u64)))
-svbool_t svcmple(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u16)))
-svbool_t svcmple(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s8)))
-svbool_t svcmple(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s32)))
-svbool_t svcmple(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s64)))
-svbool_t svcmple(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s16)))
-svbool_t svcmple(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f64)))
-svbool_t svcmple(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f32)))
-svbool_t svcmple(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f16)))
-svbool_t svcmple(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u8)))
-svbool_t svcmple(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u32)))
-svbool_t svcmple(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u64)))
-svbool_t svcmple(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u16)))
-svbool_t svcmple(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s8)))
-svbool_t svcmple_wide(svbool_t, svint8_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s32)))
-svbool_t svcmple_wide(svbool_t, svint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s16)))
-svbool_t svcmple_wide(svbool_t, svint16_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u8)))
-svbool_t svcmple_wide(svbool_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u32)))
-svbool_t svcmple_wide(svbool_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u16)))
-svbool_t svcmple_wide(svbool_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s8)))
-svbool_t svcmple_wide(svbool_t, svint8_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s32)))
-svbool_t svcmple_wide(svbool_t, svint32_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s16)))
-svbool_t svcmple_wide(svbool_t, svint16_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u8)))
-svbool_t svcmple_wide(svbool_t, svuint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u32)))
-svbool_t svcmple_wide(svbool_t, svuint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u16)))
-svbool_t svcmple_wide(svbool_t, svuint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u8)))
-svbool_t svcmplt(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u32)))
-svbool_t svcmplt(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u64)))
-svbool_t svcmplt(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u16)))
-svbool_t svcmplt(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f64)))
-svbool_t svcmplt(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f32)))
-svbool_t svcmplt(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f16)))
-svbool_t svcmplt(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s8)))
-svbool_t svcmplt(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s32)))
-svbool_t svcmplt(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s64)))
-svbool_t svcmplt(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s16)))
-svbool_t svcmplt(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u8)))
-svbool_t svcmplt(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u32)))
-svbool_t svcmplt(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u64)))
-svbool_t svcmplt(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u16)))
-svbool_t svcmplt(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s8)))
-svbool_t svcmplt(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s32)))
-svbool_t svcmplt(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s64)))
-svbool_t svcmplt(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s16)))
-svbool_t svcmplt(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f64)))
-svbool_t svcmplt(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f32)))
-svbool_t svcmplt(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f16)))
-svbool_t svcmplt(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u8)))
-svbool_t svcmplt_wide(svbool_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u32)))
-svbool_t svcmplt_wide(svbool_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u16)))
-svbool_t svcmplt_wide(svbool_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s8)))
-svbool_t svcmplt_wide(svbool_t, svint8_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s32)))
-svbool_t svcmplt_wide(svbool_t, svint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s16)))
-svbool_t svcmplt_wide(svbool_t, svint16_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u8)))
-svbool_t svcmplt_wide(svbool_t, svuint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u32)))
-svbool_t svcmplt_wide(svbool_t, svuint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u16)))
-svbool_t svcmplt_wide(svbool_t, svuint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s8)))
-svbool_t svcmplt_wide(svbool_t, svint8_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s32)))
-svbool_t svcmplt_wide(svbool_t, svint32_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s16)))
-svbool_t svcmplt_wide(svbool_t, svint16_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f64)))
-svbool_t svcmpne(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f32)))
-svbool_t svcmpne(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f16)))
-svbool_t svcmpne(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u8)))
-svbool_t svcmpne(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u32)))
-svbool_t svcmpne(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u64)))
-svbool_t svcmpne(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u16)))
-svbool_t svcmpne(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s8)))
-svbool_t svcmpne(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s32)))
-svbool_t svcmpne(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s64)))
-svbool_t svcmpne(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s16)))
-svbool_t svcmpne(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u8)))
-svbool_t svcmpne(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u32)))
-svbool_t svcmpne(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u64)))
-svbool_t svcmpne(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u16)))
-svbool_t svcmpne(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s8)))
-svbool_t svcmpne(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s32)))
-svbool_t svcmpne(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s64)))
-svbool_t svcmpne(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s16)))
-svbool_t svcmpne(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f64)))
-svbool_t svcmpne(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f32)))
-svbool_t svcmpne(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f16)))
-svbool_t svcmpne(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s8)))
-svbool_t svcmpne_wide(svbool_t, svint8_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s32)))
-svbool_t svcmpne_wide(svbool_t, svint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s16)))
-svbool_t svcmpne_wide(svbool_t, svint16_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s8)))
-svbool_t svcmpne_wide(svbool_t, svint8_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s32)))
-svbool_t svcmpne_wide(svbool_t, svint32_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s16)))
-svbool_t svcmpne_wide(svbool_t, svint16_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f64)))
-svbool_t svcmpuo(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f32)))
-svbool_t svcmpuo(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f16)))
-svbool_t svcmpuo(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f64)))
-svbool_t svcmpuo(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f32)))
-svbool_t svcmpuo(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f16)))
-svbool_t svcmpuo(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_m)))
-svuint8_t svcnot_m(svuint8_t, svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_m)))
-svuint32_t svcnot_m(svuint32_t, svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_m)))
-svuint64_t svcnot_m(svuint64_t, svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_m)))
-svuint16_t svcnot_m(svuint16_t, svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_m)))
-svint8_t svcnot_m(svint8_t, svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_m)))
-svint32_t svcnot_m(svint32_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_m)))
-svint64_t svcnot_m(svint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_m)))
-svint16_t svcnot_m(svint16_t, svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_x)))
-svuint8_t svcnot_x(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_x)))
-svuint32_t svcnot_x(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_x)))
-svuint64_t svcnot_x(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_x)))
-svuint16_t svcnot_x(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_x)))
-svint8_t svcnot_x(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_x)))
-svint32_t svcnot_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_x)))
-svint64_t svcnot_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_x)))
-svint16_t svcnot_x(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_z)))
-svuint8_t svcnot_z(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_z)))
-svuint32_t svcnot_z(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_z)))
-svuint64_t svcnot_z(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_z)))
-svuint16_t svcnot_z(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_z)))
-svint8_t svcnot_z(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_z)))
-svint32_t svcnot_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_z)))
-svint64_t svcnot_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_z)))
-svint16_t svcnot_z(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_m)))
-svuint8_t svcnt_m(svuint8_t, svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_m)))
-svuint32_t svcnt_m(svuint32_t, svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_m)))
-svuint64_t svcnt_m(svuint64_t, svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_m)))
-svuint16_t svcnt_m(svuint16_t, svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_m)))
-svuint8_t svcnt_m(svuint8_t, svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_m)))
-svuint64_t svcnt_m(svuint64_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_m)))
-svuint32_t svcnt_m(svuint32_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_m)))
-svuint16_t svcnt_m(svuint16_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_m)))
-svuint32_t svcnt_m(svuint32_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_m)))
-svuint64_t svcnt_m(svuint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_m)))
-svuint16_t svcnt_m(svuint16_t, svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_x)))
-svuint8_t svcnt_x(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_x)))
-svuint32_t svcnt_x(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_x)))
-svuint64_t svcnt_x(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_x)))
-svuint16_t svcnt_x(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_x)))
-svuint8_t svcnt_x(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_x)))
-svuint64_t svcnt_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_x)))
-svuint32_t svcnt_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_x)))
-svuint16_t svcnt_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_x)))
-svuint32_t svcnt_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_x)))
-svuint64_t svcnt_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_x)))
-svuint16_t svcnt_x(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_z)))
-svuint8_t svcnt_z(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_z)))
-svuint32_t svcnt_z(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_z)))
-svuint64_t svcnt_z(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_z)))
-svuint16_t svcnt_z(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_z)))
-svuint8_t svcnt_z(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_z)))
-svuint64_t svcnt_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_z)))
-svuint32_t svcnt_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_z)))
-svuint16_t svcnt_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_z)))
-svuint32_t svcnt_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_z)))
-svuint64_t svcnt_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z)))
-svuint16_t svcnt_z(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32)))
-svuint32_t svcompact(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64)))
-svuint64_t svcompact(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64)))
-svfloat64_t svcompact(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32)))
-svfloat32_t svcompact(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32)))
-svint32_t svcompact(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64)))
-svint64_t svcompact(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8)))
-svuint8x2_t svcreate2(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32)))
-svuint32x2_t svcreate2(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u64)))
-svuint64x2_t svcreate2(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u16)))
-svuint16x2_t svcreate2(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s8)))
-svint8x2_t svcreate2(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f64)))
-svfloat64x2_t svcreate2(svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f32)))
-svfloat32x2_t svcreate2(svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f16)))
-svfloat16x2_t svcreate2(svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s32)))
-svint32x2_t svcreate2(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s64)))
-svint64x2_t svcreate2(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s16)))
-svint16x2_t svcreate2(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u8)))
-svuint8x3_t svcreate3(svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u32)))
-svuint32x3_t svcreate3(svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u64)))
-svuint64x3_t svcreate3(svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u16)))
-svuint16x3_t svcreate3(svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s8)))
-svint8x3_t svcreate3(svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f64)))
-svfloat64x3_t svcreate3(svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f32)))
-svfloat32x3_t svcreate3(svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f16)))
-svfloat16x3_t svcreate3(svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s32)))
-svint32x3_t svcreate3(svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s64)))
-svint64x3_t svcreate3(svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s16)))
-svint16x3_t svcreate3(svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u8)))
-svuint8x4_t svcreate4(svuint8_t, svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u32)))
-svuint32x4_t svcreate4(svuint32_t, svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u64)))
-svuint64x4_t svcreate4(svuint64_t, svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u16)))
-svuint16x4_t svcreate4(svuint16_t, svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s8)))
-svint8x4_t svcreate4(svint8_t, svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f64)))
-svfloat64x4_t svcreate4(svfloat64_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f32)))
-svfloat32x4_t svcreate4(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f16)))
-svfloat16x4_t svcreate4(svfloat16_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s32)))
-svint32x4_t svcreate4(svint32_t, svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s64)))
-svint64x4_t svcreate4(svint64_t, svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s16)))
-svint16x4_t svcreate4(svint16_t, svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_m)))
-svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x)))
-svfloat16_t svcvt_f16_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_z)))
-svfloat16_t svcvt_f16_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_m)))
-svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_x)))
-svfloat16_t svcvt_f16_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_z)))
-svfloat16_t svcvt_f16_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_m)))
-svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_x)))
-svfloat16_t svcvt_f16_x(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_z)))
-svfloat16_t svcvt_f16_z(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_m)))
-svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_x)))
-svfloat16_t svcvt_f16_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_z)))
-svfloat16_t svcvt_f16_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_m)))
-svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_x)))
-svfloat16_t svcvt_f16_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_z)))
-svfloat16_t svcvt_f16_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_m)))
-svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_x)))
-svfloat16_t svcvt_f16_x(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_z)))
-svfloat16_t svcvt_f16_z(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_m)))
-svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_x)))
-svfloat16_t svcvt_f16_x(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_z)))
-svfloat16_t svcvt_f16_z(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_m)))
-svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_x)))
-svfloat16_t svcvt_f16_x(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_z)))
-svfloat16_t svcvt_f16_z(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_m)))
-svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x)))
-svfloat32_t svcvt_f32_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_z)))
-svfloat32_t svcvt_f32_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_m)))
-svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_x)))
-svfloat32_t svcvt_f32_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_z)))
-svfloat32_t svcvt_f32_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_m)))
-svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x)))
-svfloat32_t svcvt_f32_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_z)))
-svfloat32_t svcvt_f32_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_m)))
-svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_x)))
-svfloat32_t svcvt_f32_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_z)))
-svfloat32_t svcvt_f32_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_m)))
-svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x)))
-svfloat32_t svcvt_f32_x(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_z)))
-svfloat32_t svcvt_f32_z(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_m)))
-svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_x)))
-svfloat32_t svcvt_f32_x(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_z)))
-svfloat32_t svcvt_f32_z(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_m)))
-svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_x)))
-svfloat64_t svcvt_f64_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_z)))
-svfloat64_t svcvt_f64_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_m)))
-svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_x)))
-svfloat64_t svcvt_f64_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_z)))
-svfloat64_t svcvt_f64_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_m)))
-svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_x)))
-svfloat64_t svcvt_f64_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_z)))
-svfloat64_t svcvt_f64_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_m)))
-svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_x)))
-svfloat64_t svcvt_f64_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_z)))
-svfloat64_t svcvt_f64_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_m)))
-svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_x)))
-svfloat64_t svcvt_f64_x(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_z)))
-svfloat64_t svcvt_f64_z(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_m)))
-svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_x)))
-svfloat64_t svcvt_f64_x(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_z)))
-svfloat64_t svcvt_f64_z(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_m)))
-svint16_t svcvt_s16_m(svint16_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_x)))
-svint16_t svcvt_s16_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_z)))
-svint16_t svcvt_s16_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_m)))
-svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_x)))
-svint32_t svcvt_s32_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_z)))
-svint32_t svcvt_s32_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_m)))
-svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x)))
-svint32_t svcvt_s32_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_z)))
-svint32_t svcvt_s32_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_m)))
-svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_x)))
-svint32_t svcvt_s32_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_z)))
-svint32_t svcvt_s32_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_m)))
-svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_x)))
-svint64_t svcvt_s64_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_z)))
-svint64_t svcvt_s64_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_m)))
-svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_x)))
-svint64_t svcvt_s64_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_z)))
-svint64_t svcvt_s64_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_m)))
-svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_x)))
-svint64_t svcvt_s64_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_z)))
-svint64_t svcvt_s64_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_m)))
-svuint16_t svcvt_u16_m(svuint16_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_x)))
-svuint16_t svcvt_u16_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_z)))
-svuint16_t svcvt_u16_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_m)))
-svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_x)))
-svuint32_t svcvt_u32_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_z)))
-svuint32_t svcvt_u32_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_m)))
-svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x)))
-svuint32_t svcvt_u32_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_z)))
-svuint32_t svcvt_u32_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_m)))
-svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_x)))
-svuint32_t svcvt_u32_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_z)))
-svuint32_t svcvt_u32_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_m)))
-svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_x)))
-svuint64_t svcvt_u64_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_z)))
-svuint64_t svcvt_u64_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_m)))
-svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_x)))
-svuint64_t svcvt_u64_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_z)))
-svuint64_t svcvt_u64_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_m)))
-svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_x)))
-svuint64_t svcvt_u64_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_z)))
-svuint64_t svcvt_u64_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_m)))
-svfloat64_t svdiv_m(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_m)))
-svfloat32_t svdiv_m(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_m)))
-svfloat16_t svdiv_m(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_x)))
-svfloat64_t svdiv_x(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_x)))
-svfloat32_t svdiv_x(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_x)))
-svfloat16_t svdiv_x(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_z)))
-svfloat64_t svdiv_z(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_z)))
-svfloat32_t svdiv_z(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_z)))
-svfloat16_t svdiv_z(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_m)))
-svint32_t svdiv_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_m)))
-svint64_t svdiv_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_x)))
-svint32_t svdiv_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_x)))
-svint64_t svdiv_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_z)))
-svint32_t svdiv_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_z)))
-svint64_t svdiv_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_m)))
-svuint32_t svdiv_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_m)))
-svuint64_t svdiv_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_x)))
-svuint32_t svdiv_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_x)))
-svuint64_t svdiv_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_z)))
-svuint32_t svdiv_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_z)))
-svuint64_t svdiv_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_m)))
-svfloat64_t svdiv_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_m)))
-svfloat32_t svdiv_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_m)))
-svfloat16_t svdiv_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_x)))
-svfloat64_t svdiv_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_x)))
-svfloat32_t svdiv_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_x)))
-svfloat16_t svdiv_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_z)))
-svfloat64_t svdiv_z(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_z)))
-svfloat32_t svdiv_z(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_z)))
-svfloat16_t svdiv_z(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_m)))
-svint32_t svdiv_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_m)))
-svint64_t svdiv_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_x)))
-svint32_t svdiv_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_x)))
-svint64_t svdiv_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_z)))
-svint32_t svdiv_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_z)))
-svint64_t svdiv_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_m)))
-svuint32_t svdiv_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_m)))
-svuint64_t svdiv_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_x)))
-svuint32_t svdiv_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_x)))
-svuint64_t svdiv_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_z)))
-svuint32_t svdiv_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_z)))
-svuint64_t svdiv_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_m)))
-svfloat64_t svdivr_m(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_m)))
-svfloat32_t svdivr_m(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_m)))
-svfloat16_t svdivr_m(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_x)))
-svfloat64_t svdivr_x(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_x)))
-svfloat32_t svdivr_x(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_x)))
-svfloat16_t svdivr_x(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_z)))
-svfloat64_t svdivr_z(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_z)))
-svfloat32_t svdivr_z(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_z)))
-svfloat16_t svdivr_z(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_m)))
-svint32_t svdivr_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_m)))
-svint64_t svdivr_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_x)))
-svint32_t svdivr_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_x)))
-svint64_t svdivr_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_z)))
-svint32_t svdivr_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_z)))
-svint64_t svdivr_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_m)))
-svuint32_t svdivr_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_m)))
-svuint64_t svdivr_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_x)))
-svuint32_t svdivr_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_x)))
-svuint64_t svdivr_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_z)))
-svuint32_t svdivr_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_z)))
-svuint64_t svdivr_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_m)))
-svfloat64_t svdivr_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_m)))
-svfloat32_t svdivr_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_m)))
-svfloat16_t svdivr_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_x)))
-svfloat64_t svdivr_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_x)))
-svfloat32_t svdivr_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_x)))
-svfloat16_t svdivr_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_z)))
-svfloat64_t svdivr_z(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_z)))
-svfloat32_t svdivr_z(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_z)))
-svfloat16_t svdivr_z(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_m)))
-svint32_t svdivr_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_m)))
-svint64_t svdivr_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_x)))
-svint32_t svdivr_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_x)))
-svint64_t svdivr_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_z)))
-svint32_t svdivr_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_z)))
-svint64_t svdivr_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_m)))
-svuint32_t svdivr_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_m)))
-svuint64_t svdivr_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_x)))
-svuint32_t svdivr_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_x)))
-svuint64_t svdivr_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_z)))
-svuint32_t svdivr_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_z)))
-svuint64_t svdivr_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s32)))
-svint32_t svdot(svint32_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s64)))
-svint64_t svdot(svint64_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u32)))
-svuint32_t svdot(svuint32_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u64)))
-svuint64_t svdot(svuint64_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32)))
-svint32_t svdot(svint32_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s64)))
-svint64_t svdot(svint64_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32)))
-svuint32_t svdot(svuint32_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u64)))
-svuint64_t svdot(svuint64_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32)))
-svint32_t svdot_lane(svint32_t, svint8_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s64)))
-svint64_t svdot_lane(svint64_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32)))
-svuint32_t svdot_lane(svuint32_t, svuint8_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u64)))
-svuint64_t svdot_lane(svuint64_t, svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8)))
-svuint8_t svdup_u8(uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32)))
-svuint32_t svdup_u32(uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64)))
-svuint64_t svdup_u64(uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16)))
-svuint16_t svdup_u16(uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8)))
-svint8_t svdup_s8(int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64)))
-svfloat64_t svdup_f64(float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32)))
-svfloat32_t svdup_f32(float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16)))
-svfloat16_t svdup_f16(float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32)))
-svint32_t svdup_s32(int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64)))
-svint64_t svdup_s64(int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16)))
-svint16_t svdup_s16(int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_m)))
-svuint8_t svdup_u8_m(svuint8_t, svbool_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_m)))
-svuint32_t svdup_u32_m(svuint32_t, svbool_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_m)))
-svuint64_t svdup_u64_m(svuint64_t, svbool_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_m)))
-svuint16_t svdup_u16_m(svuint16_t, svbool_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_m)))
-svint8_t svdup_s8_m(svint8_t, svbool_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_m)))
-svfloat64_t svdup_f64_m(svfloat64_t, svbool_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_m)))
-svfloat32_t svdup_f32_m(svfloat32_t, svbool_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_m)))
-svfloat16_t svdup_f16_m(svfloat16_t, svbool_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_m)))
-svint32_t svdup_s32_m(svint32_t, svbool_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_m)))
-svint64_t svdup_s64_m(svint64_t, svbool_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_m)))
-svint16_t svdup_s16_m(svint16_t, svbool_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b8)))
-svbool_t svdup_b8(bool);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b32)))
-svbool_t svdup_b32(bool);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b64)))
-svbool_t svdup_b64(bool);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b16)))
-svbool_t svdup_b16(bool);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_x)))
-svuint8_t svdup_u8_x(svbool_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_x)))
-svuint32_t svdup_u32_x(svbool_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_x)))
-svuint64_t svdup_u64_x(svbool_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_x)))
-svuint16_t svdup_u16_x(svbool_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_x)))
-svint8_t svdup_s8_x(svbool_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_x)))
-svfloat64_t svdup_f64_x(svbool_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_x)))
-svfloat32_t svdup_f32_x(svbool_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_x)))
-svfloat16_t svdup_f16_x(svbool_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_x)))
-svint32_t svdup_s32_x(svbool_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_x)))
-svint64_t svdup_s64_x(svbool_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_x)))
-svint16_t svdup_s16_x(svbool_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_z)))
-svuint8_t svdup_u8_z(svbool_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_z)))
-svuint32_t svdup_u32_z(svbool_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_z)))
-svuint64_t svdup_u64_z(svbool_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_z)))
-svuint16_t svdup_u16_z(svbool_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_z)))
-svint8_t svdup_s8_z(svbool_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_z)))
-svfloat64_t svdup_f64_z(svbool_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_z)))
-svfloat32_t svdup_f32_z(svbool_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_z)))
-svfloat16_t svdup_f16_z(svbool_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_z)))
-svint32_t svdup_s32_z(svbool_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_z)))
-svint64_t svdup_s64_z(svbool_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_z)))
-svint16_t svdup_s16_z(svbool_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u8)))
-svuint8_t svdup_lane(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u32)))
-svuint32_t svdup_lane(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u64)))
-svuint64_t svdup_lane(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u16)))
-svuint16_t svdup_lane(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s8)))
-svint8_t svdup_lane(svint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f64)))
-svfloat64_t svdup_lane(svfloat64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f32)))
-svfloat32_t svdup_lane(svfloat32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f16)))
-svfloat16_t svdup_lane(svfloat16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s32)))
-svint32_t svdup_lane(svint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64)))
-svint64_t svdup_lane(svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16)))
-svint16_t svdup_lane(svint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16)))
-svuint16_t svdupq_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16)))
-svfloat16_t svdupq_f16(float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s16)))
-svint16_t svdupq_s16(int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u32)))
-svuint32_t svdupq_u32(uint32_t, uint32_t, uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f32)))
-svfloat32_t svdupq_f32(float32_t, float32_t, float32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s32)))
-svint32_t svdupq_s32(int32_t, int32_t, int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u64)))
-svuint64_t svdupq_u64(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64)))
-svfloat64_t svdupq_f64(float64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64)))
-svint64_t svdupq_s64(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8)))
-svuint8_t svdupq_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8)))
-svint8_t svdupq_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16)))
-svbool_t svdupq_b16(bool, bool, bool, bool, bool, bool, bool, bool);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32)))
-svbool_t svdupq_b32(bool, bool, bool, bool);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64)))
-svbool_t svdupq_b64(bool, bool);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8)))
-svbool_t svdupq_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8)))
-svuint8_t svdupq_lane(svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32)))
-svuint32_t svdupq_lane(svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u64)))
-svuint64_t svdupq_lane(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u16)))
-svuint16_t svdupq_lane(svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s8)))
-svint8_t svdupq_lane(svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f64)))
-svfloat64_t svdupq_lane(svfloat64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f32)))
-svfloat32_t svdupq_lane(svfloat32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f16)))
-svfloat16_t svdupq_lane(svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s32)))
-svint32_t svdupq_lane(svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s64)))
-svint64_t svdupq_lane(svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s16)))
-svint16_t svdupq_lane(svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_b_z)))
-svbool_t sveor_z(svbool_t, svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_m)))
-svuint8_t sveor_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_m)))
-svuint32_t sveor_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_m)))
-svuint64_t sveor_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_m)))
-svuint16_t sveor_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_m)))
-svint8_t sveor_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_m)))
-svint32_t sveor_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_m)))
-svint64_t sveor_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_m)))
-svint16_t sveor_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_x)))
-svuint8_t sveor_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_x)))
-svuint32_t sveor_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_x)))
-svuint64_t sveor_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_x)))
-svuint16_t sveor_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_x)))
-svint8_t sveor_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_x)))
-svint32_t sveor_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_x)))
-svint64_t sveor_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_x)))
-svint16_t sveor_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_z)))
-svuint8_t sveor_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_z)))
-svuint32_t sveor_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_z)))
-svuint64_t sveor_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_z)))
-svuint16_t sveor_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_z)))
-svint8_t sveor_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_z)))
-svint32_t sveor_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_z)))
-svint64_t sveor_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_z)))
-svint16_t sveor_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_m)))
-svuint8_t sveor_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_m)))
-svuint32_t sveor_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_m)))
-svuint64_t sveor_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_m)))
-svuint16_t sveor_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_m)))
-svint8_t sveor_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_m)))
-svint32_t sveor_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_m)))
-svint64_t sveor_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_m)))
-svint16_t sveor_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_x)))
-svuint8_t sveor_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_x)))
-svuint32_t sveor_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_x)))
-svuint64_t sveor_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_x)))
-svuint16_t sveor_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_x)))
-svint8_t sveor_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_x)))
-svint32_t sveor_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_x)))
-svint64_t sveor_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_x)))
-svint16_t sveor_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_z)))
-svuint8_t sveor_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_z)))
-svuint32_t sveor_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_z)))
-svuint64_t sveor_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_z)))
-svuint16_t sveor_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_z)))
-svint8_t sveor_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_z)))
-svint32_t sveor_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_z)))
-svint64_t sveor_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_z)))
-svint16_t sveor_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u8)))
-uint8_t sveorv(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u32)))
-uint32_t sveorv(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u64)))
-uint64_t sveorv(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u16)))
-uint16_t sveorv(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s8)))
-int8_t sveorv(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s32)))
-int32_t sveorv(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64)))
-int64_t sveorv(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16)))
-int16_t sveorv(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64)))
-svfloat64_t svexpa(svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32)))
-svfloat32_t svexpa(svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16)))
-svfloat16_t svexpa(svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8)))
-svuint8_t svext(svuint8_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32)))
-svuint32_t svext(svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u64)))
-svuint64_t svext(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u16)))
-svuint16_t svext(svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s8)))
-svint8_t svext(svint8_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f64)))
-svfloat64_t svext(svfloat64_t, svfloat64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f32)))
-svfloat32_t svext(svfloat32_t, svfloat32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f16)))
-svfloat16_t svext(svfloat16_t, svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s32)))
-svint32_t svext(svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s64)))
-svint64_t svext(svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s16)))
-svint16_t svext(svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_m)))
-svint32_t svextb_m(svint32_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_m)))
-svint64_t svextb_m(svint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_m)))
-svint16_t svextb_m(svint16_t, svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_x)))
-svint32_t svextb_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_x)))
-svint64_t svextb_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_x)))
-svint16_t svextb_x(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_z)))
-svint32_t svextb_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_z)))
-svint64_t svextb_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_z)))
-svint16_t svextb_z(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_m)))
-svuint32_t svextb_m(svuint32_t, svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_m)))
-svuint64_t svextb_m(svuint64_t, svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_m)))
-svuint16_t svextb_m(svuint16_t, svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_x)))
-svuint32_t svextb_x(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_x)))
-svuint64_t svextb_x(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_x)))
-svuint16_t svextb_x(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_z)))
-svuint32_t svextb_z(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_z)))
-svuint64_t svextb_z(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_z)))
-svuint16_t svextb_z(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_m)))
-svint32_t svexth_m(svint32_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_m)))
-svint64_t svexth_m(svint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_x)))
-svint32_t svexth_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_x)))
-svint64_t svexth_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_z)))
-svint32_t svexth_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_z)))
-svint64_t svexth_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_m)))
-svuint32_t svexth_m(svuint32_t, svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_m)))
-svuint64_t svexth_m(svuint64_t, svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_x)))
-svuint32_t svexth_x(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_x)))
-svuint64_t svexth_x(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_z)))
-svuint32_t svexth_z(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_z)))
-svuint64_t svexth_z(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_m)))
-svint64_t svextw_m(svint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_x)))
-svint64_t svextw_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_z)))
-svint64_t svextw_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_m)))
-svuint64_t svextw_m(svuint64_t, svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_x)))
-svuint64_t svextw_x(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_z)))
-svuint64_t svextw_z(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u8)))
-svuint8_t svget2(svuint8x2_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u32)))
-svuint32_t svget2(svuint32x2_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u64)))
-svuint64_t svget2(svuint64x2_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u16)))
-svuint16_t svget2(svuint16x2_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s8)))
-svint8_t svget2(svint8x2_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f64)))
-svfloat64_t svget2(svfloat64x2_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f32)))
-svfloat32_t svget2(svfloat32x2_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f16)))
-svfloat16_t svget2(svfloat16x2_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s32)))
-svint32_t svget2(svint32x2_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s64)))
-svint64_t svget2(svint64x2_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s16)))
-svint16_t svget2(svint16x2_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u8)))
-svuint8_t svget3(svuint8x3_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u32)))
-svuint32_t svget3(svuint32x3_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u64)))
-svuint64_t svget3(svuint64x3_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u16)))
-svuint16_t svget3(svuint16x3_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s8)))
-svint8_t svget3(svint8x3_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f64)))
-svfloat64_t svget3(svfloat64x3_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f32)))
-svfloat32_t svget3(svfloat32x3_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f16)))
-svfloat16_t svget3(svfloat16x3_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s32)))
-svint32_t svget3(svint32x3_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s64)))
-svint64_t svget3(svint64x3_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s16)))
-svint16_t svget3(svint16x3_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u8)))
-svuint8_t svget4(svuint8x4_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u32)))
-svuint32_t svget4(svuint32x4_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u64)))
-svuint64_t svget4(svuint64x4_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u16)))
-svuint16_t svget4(svuint16x4_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s8)))
-svint8_t svget4(svint8x4_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f64)))
-svfloat64_t svget4(svfloat64x4_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f32)))
-svfloat32_t svget4(svfloat32x4_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f16)))
-svfloat16_t svget4(svfloat16x4_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s32)))
-svint32_t svget4(svint32x4_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s64)))
-svint64_t svget4(svint64x4_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s16)))
-svint16_t svget4(svint16x4_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u8)))
-svuint8_t svinsr(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u32)))
-svuint32_t svinsr(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u64)))
-svuint64_t svinsr(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u16)))
-svuint16_t svinsr(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s8)))
-svint8_t svinsr(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f64)))
-svfloat64_t svinsr(svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f32)))
-svfloat32_t svinsr(svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f16)))
-svfloat16_t svinsr(svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s32)))
-svint32_t svinsr(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s64)))
-svint64_t svinsr(svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s16)))
-svint16_t svinsr(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u8)))
-uint8_t svlasta(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u32)))
-uint32_t svlasta(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u64)))
-uint64_t svlasta(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u16)))
-uint16_t svlasta(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s8)))
-int8_t svlasta(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f64)))
-float64_t svlasta(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f32)))
-float32_t svlasta(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f16)))
-float16_t svlasta(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s32)))
-int32_t svlasta(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s64)))
-int64_t svlasta(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s16)))
-int16_t svlasta(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u8)))
-uint8_t svlastb(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u32)))
-uint32_t svlastb(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u64)))
-uint64_t svlastb(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u16)))
-uint16_t svlastb(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s8)))
-int8_t svlastb(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f64)))
-float64_t svlastb(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f32)))
-float32_t svlastb(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f16)))
-float16_t svlastb(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s32)))
-int32_t svlastb(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s64)))
-int64_t svlastb(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s16)))
-int16_t svlastb(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8)))
-svuint8_t svld1(svbool_t, uint8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32)))
-svuint32_t svld1(svbool_t, uint32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64)))
-svuint64_t svld1(svbool_t, uint64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16)))
-svuint16_t svld1(svbool_t, uint16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8)))
-svint8_t svld1(svbool_t, int8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64)))
-svfloat64_t svld1(svbool_t, float64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32)))
-svfloat32_t svld1(svbool_t, float32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16)))
-svfloat16_t svld1(svbool_t, float16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32)))
-svint32_t svld1(svbool_t, int32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64)))
-svint64_t svld1(svbool_t, int64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16)))
-svint16_t svld1(svbool_t, int16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32)))
-svuint32_t svld1_gather_index_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64)))
-svuint64_t svld1_gather_index_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64)))
-svfloat64_t svld1_gather_index_f64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32)))
-svfloat32_t svld1_gather_index_f32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32)))
-svint32_t svld1_gather_index_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64)))
-svint64_t svld1_gather_index_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32)))
-svuint32_t svld1_gather_offset_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64)))
-svuint64_t svld1_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64)))
-svfloat64_t svld1_gather_offset_f64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32)))
-svfloat32_t svld1_gather_offset_f32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32)))
-svint32_t svld1_gather_offset_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64)))
-svint64_t svld1_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32)))
-svuint32_t svld1_gather_u32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64)))
-svuint64_t svld1_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64)))
-svfloat64_t svld1_gather_f64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32)))
-svfloat32_t svld1_gather_f32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32)))
-svint32_t svld1_gather_s32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64)))
-svint64_t svld1_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32)))
-svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32)))
-svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32)))
-svint32_t svld1_gather_index(svbool_t, int32_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32)))
-svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32)))
-svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32)))
-svint32_t svld1_gather_index(svbool_t, int32_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64)))
-svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64)))
-svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64)))
-svint64_t svld1_gather_index(svbool_t, int64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64)))
-svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64)))
-svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64)))
-svint64_t svld1_gather_index(svbool_t, int64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32)))
-svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32)))
-svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32)))
-svint32_t svld1_gather_offset(svbool_t, int32_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32)))
-svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32)))
-svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32)))
-svint32_t svld1_gather_offset(svbool_t, int32_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64)))
-svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64)))
-svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64)))
-svint64_t svld1_gather_offset(svbool_t, int64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64)))
-svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64)))
-svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64)))
-svint64_t svld1_gather_offset(svbool_t, int64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8)))
-svuint8_t svld1_vnum(svbool_t, uint8_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32)))
-svuint32_t svld1_vnum(svbool_t, uint32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64)))
-svuint64_t svld1_vnum(svbool_t, uint64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16)))
-svuint16_t svld1_vnum(svbool_t, uint16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8)))
-svint8_t svld1_vnum(svbool_t, int8_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64)))
-svfloat64_t svld1_vnum(svbool_t, float64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32)))
-svfloat32_t svld1_vnum(svbool_t, float32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16)))
-svfloat16_t svld1_vnum(svbool_t, float16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32)))
-svint32_t svld1_vnum(svbool_t, int32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64)))
-svint64_t svld1_vnum(svbool_t, int64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16)))
-svint16_t svld1_vnum(svbool_t, int16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u8)))
-svuint8_t svld1rq(svbool_t, uint8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u32)))
-svuint32_t svld1rq(svbool_t, uint32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u64)))
-svuint64_t svld1rq(svbool_t, uint64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u16)))
-svuint16_t svld1rq(svbool_t, uint16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s8)))
-svint8_t svld1rq(svbool_t, int8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f64)))
-svfloat64_t svld1rq(svbool_t, float64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f32)))
-svfloat32_t svld1rq(svbool_t, float32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f16)))
-svfloat16_t svld1rq(svbool_t, float16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s32)))
-svint32_t svld1rq(svbool_t, int32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64)))
-svint64_t svld1rq(svbool_t, int64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16)))
-svint16_t svld1rq(svbool_t, int16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32)))
-svuint32_t svld1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64)))
-svuint64_t svld1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32)))
-svint32_t svld1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64)))
-svint64_t svld1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32)))
-svuint32_t svld1sb_gather_u32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64)))
-svuint64_t svld1sb_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32)))
-svint32_t svld1sb_gather_s32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64)))
-svint64_t svld1sb_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32)))
-svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32)))
-svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32)))
-svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32)))
-svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64)))
-svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64)))
-svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64)))
-svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64)))
-svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32)))
-svuint32_t svld1sh_gather_index_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64)))
-svuint64_t svld1sh_gather_index_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32)))
-svint32_t svld1sh_gather_index_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64)))
-svint64_t svld1sh_gather_index_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32)))
-svuint32_t svld1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64)))
-svuint64_t svld1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32)))
-svint32_t svld1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64)))
-svint64_t svld1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32)))
-svuint32_t svld1sh_gather_u32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64)))
-svuint64_t svld1sh_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32)))
-svint32_t svld1sh_gather_s32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64)))
-svint64_t svld1sh_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32)))
-svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32)))
-svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32)))
-svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32)))
-svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64)))
-svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64)))
-svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64)))
-svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64)))
-svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32)))
-svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32)))
-svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32)))
-svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32)))
-svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64)))
-svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64)))
-svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64)))
-svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64)))
-svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64)))
-svuint64_t svld1sw_gather_index_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64)))
-svint64_t svld1sw_gather_index_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64)))
-svuint64_t svld1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64)))
-svint64_t svld1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64)))
-svuint64_t svld1sw_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64)))
-svint64_t svld1sw_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64)))
-svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64)))
-svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64)))
-svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64)))
-svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64)))
-svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64)))
-svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64)))
-svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64)))
-svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32)))
-svuint32_t svld1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64)))
-svuint64_t svld1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32)))
-svint32_t svld1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64)))
-svint64_t svld1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32)))
-svuint32_t svld1ub_gather_u32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64)))
-svuint64_t svld1ub_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32)))
-svint32_t svld1ub_gather_s32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64)))
-svint64_t svld1ub_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32)))
-svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32)))
-svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32)))
-svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32)))
-svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64)))
-svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64)))
-svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64)))
-svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64)))
-svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32)))
-svuint32_t svld1uh_gather_index_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64)))
-svuint64_t svld1uh_gather_index_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32)))
-svint32_t svld1uh_gather_index_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64)))
-svint64_t svld1uh_gather_index_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32)))
-svuint32_t svld1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64)))
-svuint64_t svld1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32)))
-svint32_t svld1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64)))
-svint64_t svld1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32)))
-svuint32_t svld1uh_gather_u32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64)))
-svuint64_t svld1uh_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32)))
-svint32_t svld1uh_gather_s32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64)))
-svint64_t svld1uh_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32)))
-svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32)))
-svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32)))
-svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32)))
-svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64)))
-svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64)))
-svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64)))
-svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64)))
-svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32)))
-svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32)))
-svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32)))
-svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32)))
-svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64)))
-svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64)))
-svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64)))
-svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64)))
-svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64)))
-svuint64_t svld1uw_gather_index_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64)))
-svint64_t svld1uw_gather_index_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64)))
-svuint64_t svld1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64)))
-svint64_t svld1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64)))
-svuint64_t svld1uw_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64)))
-svint64_t svld1uw_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64)))
-svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64)))
-svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64)))
-svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64)))
-svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64)))
-svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64)))
-svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64)))
-svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64)))
-svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u8)))
-svuint8x2_t svld2(svbool_t, uint8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u32)))
-svuint32x2_t svld2(svbool_t, uint32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u64)))
-svuint64x2_t svld2(svbool_t, uint64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u16)))
-svuint16x2_t svld2(svbool_t, uint16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s8)))
-svint8x2_t svld2(svbool_t, int8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f64)))
-svfloat64x2_t svld2(svbool_t, float64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f32)))
-svfloat32x2_t svld2(svbool_t, float32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f16)))
-svfloat16x2_t svld2(svbool_t, float16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s32)))
-svint32x2_t svld2(svbool_t, int32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s64)))
-svint64x2_t svld2(svbool_t, int64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s16)))
-svint16x2_t svld2(svbool_t, int16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u8)))
-svuint8x2_t svld2_vnum(svbool_t, uint8_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u32)))
-svuint32x2_t svld2_vnum(svbool_t, uint32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u64)))
-svuint64x2_t svld2_vnum(svbool_t, uint64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u16)))
-svuint16x2_t svld2_vnum(svbool_t, uint16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s8)))
-svint8x2_t svld2_vnum(svbool_t, int8_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f64)))
-svfloat64x2_t svld2_vnum(svbool_t, float64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f32)))
-svfloat32x2_t svld2_vnum(svbool_t, float32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f16)))
-svfloat16x2_t svld2_vnum(svbool_t, float16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s32)))
-svint32x2_t svld2_vnum(svbool_t, int32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s64)))
-svint64x2_t svld2_vnum(svbool_t, int64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s16)))
-svint16x2_t svld2_vnum(svbool_t, int16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u8)))
-svuint8x3_t svld3(svbool_t, uint8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u32)))
-svuint32x3_t svld3(svbool_t, uint32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u64)))
-svuint64x3_t svld3(svbool_t, uint64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u16)))
-svuint16x3_t svld3(svbool_t, uint16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s8)))
-svint8x3_t svld3(svbool_t, int8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f64)))
-svfloat64x3_t svld3(svbool_t, float64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f32)))
-svfloat32x3_t svld3(svbool_t, float32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f16)))
-svfloat16x3_t svld3(svbool_t, float16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s32)))
-svint32x3_t svld3(svbool_t, int32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s64)))
-svint64x3_t svld3(svbool_t, int64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s16)))
-svint16x3_t svld3(svbool_t, int16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u8)))
-svuint8x3_t svld3_vnum(svbool_t, uint8_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u32)))
-svuint32x3_t svld3_vnum(svbool_t, uint32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u64)))
-svuint64x3_t svld3_vnum(svbool_t, uint64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u16)))
-svuint16x3_t svld3_vnum(svbool_t, uint16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s8)))
-svint8x3_t svld3_vnum(svbool_t, int8_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f64)))
-svfloat64x3_t svld3_vnum(svbool_t, float64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f32)))
-svfloat32x3_t svld3_vnum(svbool_t, float32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f16)))
-svfloat16x3_t svld3_vnum(svbool_t, float16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s32)))
-svint32x3_t svld3_vnum(svbool_t, int32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s64)))
-svint64x3_t svld3_vnum(svbool_t, int64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s16)))
-svint16x3_t svld3_vnum(svbool_t, int16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u8)))
-svuint8x4_t svld4(svbool_t, uint8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u32)))
-svuint32x4_t svld4(svbool_t, uint32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u64)))
-svuint64x4_t svld4(svbool_t, uint64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u16)))
-svuint16x4_t svld4(svbool_t, uint16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s8)))
-svint8x4_t svld4(svbool_t, int8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f64)))
-svfloat64x4_t svld4(svbool_t, float64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f32)))
-svfloat32x4_t svld4(svbool_t, float32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f16)))
-svfloat16x4_t svld4(svbool_t, float16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s32)))
-svint32x4_t svld4(svbool_t, int32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s64)))
-svint64x4_t svld4(svbool_t, int64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s16)))
-svint16x4_t svld4(svbool_t, int16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u8)))
-svuint8x4_t svld4_vnum(svbool_t, uint8_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u32)))
-svuint32x4_t svld4_vnum(svbool_t, uint32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u64)))
-svuint64x4_t svld4_vnum(svbool_t, uint64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u16)))
-svuint16x4_t svld4_vnum(svbool_t, uint16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s8)))
-svint8x4_t svld4_vnum(svbool_t, int8_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f64)))
-svfloat64x4_t svld4_vnum(svbool_t, float64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f32)))
-svfloat32x4_t svld4_vnum(svbool_t, float32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f16)))
-svfloat16x4_t svld4_vnum(svbool_t, float16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s32)))
-svint32x4_t svld4_vnum(svbool_t, int32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64)))
-svint64x4_t svld4_vnum(svbool_t, int64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16)))
-svint16x4_t svld4_vnum(svbool_t, int16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8)))
-svuint8_t svldff1(svbool_t, uint8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32)))
-svuint32_t svldff1(svbool_t, uint32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64)))
-svuint64_t svldff1(svbool_t, uint64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16)))
-svuint16_t svldff1(svbool_t, uint16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8)))
-svint8_t svldff1(svbool_t, int8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64)))
-svfloat64_t svldff1(svbool_t, float64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32)))
-svfloat32_t svldff1(svbool_t, float32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16)))
-svfloat16_t svldff1(svbool_t, float16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32)))
-svint32_t svldff1(svbool_t, int32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64)))
-svint64_t svldff1(svbool_t, int64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16)))
-svint16_t svldff1(svbool_t, int16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32)))
-svuint32_t svldff1_gather_index_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64)))
-svuint64_t svldff1_gather_index_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64)))
-svfloat64_t svldff1_gather_index_f64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32)))
-svfloat32_t svldff1_gather_index_f32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32)))
-svint32_t svldff1_gather_index_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64)))
-svint64_t svldff1_gather_index_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32)))
-svuint32_t svldff1_gather_offset_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64)))
-svuint64_t svldff1_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64)))
-svfloat64_t svldff1_gather_offset_f64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32)))
-svfloat32_t svldff1_gather_offset_f32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32)))
-svint32_t svldff1_gather_offset_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64)))
-svint64_t svldff1_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32)))
-svuint32_t svldff1_gather_u32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64)))
-svuint64_t svldff1_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64)))
-svfloat64_t svldff1_gather_f64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32)))
-svfloat32_t svldff1_gather_f32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32)))
-svint32_t svldff1_gather_s32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64)))
-svint64_t svldff1_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32)))
-svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32)))
-svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32)))
-svint32_t svldff1_gather_index(svbool_t, int32_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32)))
-svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32)))
-svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32)))
-svint32_t svldff1_gather_index(svbool_t, int32_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64)))
-svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64)))
-svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64)))
-svint64_t svldff1_gather_index(svbool_t, int64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64)))
-svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64)))
-svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64)))
-svint64_t svldff1_gather_index(svbool_t, int64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32)))
-svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32)))
-svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32)))
-svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32)))
-svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32)))
-svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32)))
-svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64)))
-svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64)))
-svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64)))
-svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64)))
-svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64)))
-svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64)))
-svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8)))
-svuint8_t svldff1_vnum(svbool_t, uint8_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32)))
-svuint32_t svldff1_vnum(svbool_t, uint32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64)))
-svuint64_t svldff1_vnum(svbool_t, uint64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16)))
-svuint16_t svldff1_vnum(svbool_t, uint16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8)))
-svint8_t svldff1_vnum(svbool_t, int8_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64)))
-svfloat64_t svldff1_vnum(svbool_t, float64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32)))
-svfloat32_t svldff1_vnum(svbool_t, float32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16)))
-svfloat16_t svldff1_vnum(svbool_t, float16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32)))
-svint32_t svldff1_vnum(svbool_t, int32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64)))
-svint64_t svldff1_vnum(svbool_t, int64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16)))
-svint16_t svldff1_vnum(svbool_t, int16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32)))
-svuint32_t svldff1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64)))
-svuint64_t svldff1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32)))
-svint32_t svldff1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64)))
-svint64_t svldff1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32)))
-svuint32_t svldff1sb_gather_u32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64)))
-svuint64_t svldff1sb_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32)))
-svint32_t svldff1sb_gather_s32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64)))
-svint64_t svldff1sb_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32)))
-svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32)))
-svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32)))
-svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32)))
-svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64)))
-svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64)))
-svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64)))
-svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64)))
-svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32)))
-svuint32_t svldff1sh_gather_index_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64)))
-svuint64_t svldff1sh_gather_index_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32)))
-svint32_t svldff1sh_gather_index_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64)))
-svint64_t svldff1sh_gather_index_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32)))
-svuint32_t svldff1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64)))
-svuint64_t svldff1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32)))
-svint32_t svldff1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64)))
-svint64_t svldff1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32)))
-svuint32_t svldff1sh_gather_u32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64)))
-svuint64_t svldff1sh_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32)))
-svint32_t svldff1sh_gather_s32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64)))
-svint64_t svldff1sh_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32)))
-svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32)))
-svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32)))
-svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32)))
-svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64)))
-svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64)))
-svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64)))
-svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64)))
-svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32)))
-svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32)))
-svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32)))
-svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32)))
-svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64)))
-svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64)))
-svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64)))
-svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64)))
-svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64)))
-svuint64_t svldff1sw_gather_index_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64)))
-svint64_t svldff1sw_gather_index_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64)))
-svuint64_t svldff1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64)))
-svint64_t svldff1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64)))
-svuint64_t svldff1sw_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64)))
-svint64_t svldff1sw_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64)))
-svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64)))
-svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64)))
-svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64)))
-svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64)))
-svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64)))
-svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64)))
-svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64)))
-svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32)))
-svuint32_t svldff1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64)))
-svuint64_t svldff1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32)))
-svint32_t svldff1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64)))
-svint64_t svldff1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32)))
-svuint32_t svldff1ub_gather_u32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64)))
-svuint64_t svldff1ub_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32)))
-svint32_t svldff1ub_gather_s32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64)))
-svint64_t svldff1ub_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32)))
-svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32)))
-svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32)))
-svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32)))
-svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64)))
-svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64)))
-svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64)))
-svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64)))
-svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32)))
-svuint32_t svldff1uh_gather_index_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64)))
-svuint64_t svldff1uh_gather_index_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32)))
-svint32_t svldff1uh_gather_index_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64)))
-svint64_t svldff1uh_gather_index_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32)))
-svuint32_t svldff1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64)))
-svuint64_t svldff1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32)))
-svint32_t svldff1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64)))
-svint64_t svldff1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32)))
-svuint32_t svldff1uh_gather_u32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64)))
-svuint64_t svldff1uh_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32)))
-svint32_t svldff1uh_gather_s32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64)))
-svint64_t svldff1uh_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32)))
-svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32)))
-svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32)))
-svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32)))
-svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64)))
-svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64)))
-svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64)))
-svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64)))
-svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32)))
-svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32)))
-svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32)))
-svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32)))
-svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64)))
-svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64)))
-svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64)))
-svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64)))
-svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64)))
-svuint64_t svldff1uw_gather_index_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64)))
-svint64_t svldff1uw_gather_index_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64)))
-svuint64_t svldff1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64)))
-svint64_t svldff1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64)))
-svuint64_t svldff1uw_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64)))
-svint64_t svldff1uw_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64)))
-svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64)))
-svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64)))
-svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64)))
-svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64)))
-svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64)))
-svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64)))
-svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64)))
-svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8)))
-svuint8_t svldnf1(svbool_t, uint8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32)))
-svuint32_t svldnf1(svbool_t, uint32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64)))
-svuint64_t svldnf1(svbool_t, uint64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16)))
-svuint16_t svldnf1(svbool_t, uint16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8)))
-svint8_t svldnf1(svbool_t, int8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64)))
-svfloat64_t svldnf1(svbool_t, float64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32)))
-svfloat32_t svldnf1(svbool_t, float32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16)))
-svfloat16_t svldnf1(svbool_t, float16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32)))
-svint32_t svldnf1(svbool_t, int32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64)))
-svint64_t svldnf1(svbool_t, int64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16)))
-svint16_t svldnf1(svbool_t, int16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8)))
-svuint8_t svldnf1_vnum(svbool_t, uint8_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32)))
-svuint32_t svldnf1_vnum(svbool_t, uint32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64)))
-svuint64_t svldnf1_vnum(svbool_t, uint64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16)))
-svuint16_t svldnf1_vnum(svbool_t, uint16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8)))
-svint8_t svldnf1_vnum(svbool_t, int8_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64)))
-svfloat64_t svldnf1_vnum(svbool_t, float64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32)))
-svfloat32_t svldnf1_vnum(svbool_t, float32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16)))
-svfloat16_t svldnf1_vnum(svbool_t, float16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32)))
-svint32_t svldnf1_vnum(svbool_t, int32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64)))
-svint64_t svldnf1_vnum(svbool_t, int64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16)))
-svint16_t svldnf1_vnum(svbool_t, int16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8)))
-svuint8_t svldnt1(svbool_t, uint8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32)))
-svuint32_t svldnt1(svbool_t, uint32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64)))
-svuint64_t svldnt1(svbool_t, uint64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16)))
-svuint16_t svldnt1(svbool_t, uint16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8)))
-svint8_t svldnt1(svbool_t, int8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64)))
-svfloat64_t svldnt1(svbool_t, float64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32)))
-svfloat32_t svldnt1(svbool_t, float32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16)))
-svfloat16_t svldnt1(svbool_t, float16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32)))
-svint32_t svldnt1(svbool_t, int32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64)))
-svint64_t svldnt1(svbool_t, int64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16)))
-svint16_t svldnt1(svbool_t, int16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8)))
-svuint8_t svldnt1_vnum(svbool_t, uint8_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32)))
-svuint32_t svldnt1_vnum(svbool_t, uint32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64)))
-svuint64_t svldnt1_vnum(svbool_t, uint64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16)))
-svuint16_t svldnt1_vnum(svbool_t, uint16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8)))
-svint8_t svldnt1_vnum(svbool_t, int8_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64)))
-svfloat64_t svldnt1_vnum(svbool_t, float64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32)))
-svfloat32_t svldnt1_vnum(svbool_t, float32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16)))
-svfloat16_t svldnt1_vnum(svbool_t, float16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32)))
-svint32_t svldnt1_vnum(svbool_t, int32_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64)))
-svint64_t svldnt1_vnum(svbool_t, int64_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16)))
-svint16_t svldnt1_vnum(svbool_t, int16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u8)))
-uint64_t svlen(svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u32)))
-uint64_t svlen(svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u64)))
-uint64_t svlen(svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u16)))
-uint64_t svlen(svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s8)))
-uint64_t svlen(svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f64)))
-uint64_t svlen(svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f32)))
-uint64_t svlen(svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f16)))
-uint64_t svlen(svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s32)))
-uint64_t svlen(svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s64)))
-uint64_t svlen(svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s16)))
-uint64_t svlen(svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_m)))
-svuint8_t svlsl_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_m)))
-svuint32_t svlsl_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_m)))
-svuint64_t svlsl_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_m)))
-svuint16_t svlsl_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_m)))
-svint8_t svlsl_m(svbool_t, svint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_m)))
-svint32_t svlsl_m(svbool_t, svint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_m)))
-svint64_t svlsl_m(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_m)))
-svint16_t svlsl_m(svbool_t, svint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_x)))
-svuint8_t svlsl_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_x)))
-svuint32_t svlsl_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_x)))
-svuint64_t svlsl_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_x)))
-svuint16_t svlsl_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_x)))
-svint8_t svlsl_x(svbool_t, svint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_x)))
-svint32_t svlsl_x(svbool_t, svint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_x)))
-svint64_t svlsl_x(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_x)))
-svint16_t svlsl_x(svbool_t, svint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_z)))
-svuint8_t svlsl_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_z)))
-svuint32_t svlsl_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_z)))
-svuint64_t svlsl_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_z)))
-svuint16_t svlsl_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_z)))
-svint8_t svlsl_z(svbool_t, svint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_z)))
-svint32_t svlsl_z(svbool_t, svint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_z)))
-svint64_t svlsl_z(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_z)))
-svint16_t svlsl_z(svbool_t, svint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_m)))
-svuint8_t svlsl_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_m)))
-svuint32_t svlsl_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_m)))
-svuint64_t svlsl_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_m)))
-svuint16_t svlsl_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_m)))
-svint8_t svlsl_m(svbool_t, svint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_m)))
-svint32_t svlsl_m(svbool_t, svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_m)))
-svint64_t svlsl_m(svbool_t, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_m)))
-svint16_t svlsl_m(svbool_t, svint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_x)))
-svuint8_t svlsl_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_x)))
-svuint32_t svlsl_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_x)))
-svuint64_t svlsl_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_x)))
-svuint16_t svlsl_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_x)))
-svint8_t svlsl_x(svbool_t, svint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_x)))
-svint32_t svlsl_x(svbool_t, svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_x)))
-svint64_t svlsl_x(svbool_t, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_x)))
-svint16_t svlsl_x(svbool_t, svint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_z)))
-svuint8_t svlsl_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_z)))
-svuint32_t svlsl_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_z)))
-svuint64_t svlsl_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_z)))
-svuint16_t svlsl_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_z)))
-svint8_t svlsl_z(svbool_t, svint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_z)))
-svint32_t svlsl_z(svbool_t, svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_z)))
-svint64_t svlsl_z(svbool_t, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_z)))
-svint16_t svlsl_z(svbool_t, svint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_m)))
-svuint8_t svlsl_wide_m(svbool_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_m)))
-svuint32_t svlsl_wide_m(svbool_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_m)))
-svuint16_t svlsl_wide_m(svbool_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_m)))
-svint8_t svlsl_wide_m(svbool_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_m)))
-svint32_t svlsl_wide_m(svbool_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_m)))
-svint16_t svlsl_wide_m(svbool_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_x)))
-svuint8_t svlsl_wide_x(svbool_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_x)))
-svuint32_t svlsl_wide_x(svbool_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_x)))
-svuint16_t svlsl_wide_x(svbool_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_x)))
-svint8_t svlsl_wide_x(svbool_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_x)))
-svint32_t svlsl_wide_x(svbool_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_x)))
-svint16_t svlsl_wide_x(svbool_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_z)))
-svuint8_t svlsl_wide_z(svbool_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_z)))
-svuint32_t svlsl_wide_z(svbool_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_z)))
-svuint16_t svlsl_wide_z(svbool_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_z)))
-svint8_t svlsl_wide_z(svbool_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_z)))
-svint32_t svlsl_wide_z(svbool_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_z)))
-svint16_t svlsl_wide_z(svbool_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_m)))
-svuint8_t svlsl_wide_m(svbool_t, svuint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_m)))
-svuint32_t svlsl_wide_m(svbool_t, svuint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_m)))
-svuint16_t svlsl_wide_m(svbool_t, svuint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_m)))
-svint8_t svlsl_wide_m(svbool_t, svint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_m)))
-svint32_t svlsl_wide_m(svbool_t, svint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_m)))
-svint16_t svlsl_wide_m(svbool_t, svint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_x)))
-svuint8_t svlsl_wide_x(svbool_t, svuint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_x)))
-svuint32_t svlsl_wide_x(svbool_t, svuint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_x)))
-svuint16_t svlsl_wide_x(svbool_t, svuint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_x)))
-svint8_t svlsl_wide_x(svbool_t, svint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_x)))
-svint32_t svlsl_wide_x(svbool_t, svint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_x)))
-svint16_t svlsl_wide_x(svbool_t, svint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_z)))
-svuint8_t svlsl_wide_z(svbool_t, svuint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_z)))
-svuint32_t svlsl_wide_z(svbool_t, svuint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_z)))
-svuint16_t svlsl_wide_z(svbool_t, svuint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_z)))
-svint8_t svlsl_wide_z(svbool_t, svint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_z)))
-svint32_t svlsl_wide_z(svbool_t, svint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_z)))
-svint16_t svlsl_wide_z(svbool_t, svint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_m)))
-svuint8_t svlsr_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_m)))
-svuint32_t svlsr_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_m)))
-svuint64_t svlsr_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_m)))
-svuint16_t svlsr_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_x)))
-svuint8_t svlsr_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_x)))
-svuint32_t svlsr_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_x)))
-svuint64_t svlsr_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_x)))
-svuint16_t svlsr_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_z)))
-svuint8_t svlsr_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_z)))
-svuint32_t svlsr_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_z)))
-svuint64_t svlsr_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_z)))
-svuint16_t svlsr_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_m)))
-svuint8_t svlsr_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_m)))
-svuint32_t svlsr_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_m)))
-svuint64_t svlsr_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_m)))
-svuint16_t svlsr_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_x)))
-svuint8_t svlsr_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_x)))
-svuint32_t svlsr_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_x)))
-svuint64_t svlsr_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_x)))
-svuint16_t svlsr_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_z)))
-svuint8_t svlsr_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_z)))
-svuint32_t svlsr_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_z)))
-svuint64_t svlsr_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_z)))
-svuint16_t svlsr_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_m)))
-svuint8_t svlsr_wide_m(svbool_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_m)))
-svuint32_t svlsr_wide_m(svbool_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_m)))
-svuint16_t svlsr_wide_m(svbool_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_x)))
-svuint8_t svlsr_wide_x(svbool_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_x)))
-svuint32_t svlsr_wide_x(svbool_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_x)))
-svuint16_t svlsr_wide_x(svbool_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_z)))
-svuint8_t svlsr_wide_z(svbool_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_z)))
-svuint32_t svlsr_wide_z(svbool_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_z)))
-svuint16_t svlsr_wide_z(svbool_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_m)))
-svuint8_t svlsr_wide_m(svbool_t, svuint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_m)))
-svuint32_t svlsr_wide_m(svbool_t, svuint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_m)))
-svuint16_t svlsr_wide_m(svbool_t, svuint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_x)))
-svuint8_t svlsr_wide_x(svbool_t, svuint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_x)))
-svuint32_t svlsr_wide_x(svbool_t, svuint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_x)))
-svuint16_t svlsr_wide_x(svbool_t, svuint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_z)))
-svuint8_t svlsr_wide_z(svbool_t, svuint8_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_z)))
-svuint32_t svlsr_wide_z(svbool_t, svuint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_z)))
-svuint16_t svlsr_wide_z(svbool_t, svuint16_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_m)))
-svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_m)))
-svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_m)))
-svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_x)))
-svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_x)))
-svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_x)))
-svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_z)))
-svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_z)))
-svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_z)))
-svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_m)))
-svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_m)))
-svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_m)))
-svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_m)))
-svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_m)))
-svint8_t svmad_m(svbool_t, svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_m)))
-svint32_t svmad_m(svbool_t, svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_m)))
-svint64_t svmad_m(svbool_t, svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_m)))
-svint16_t svmad_m(svbool_t, svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_x)))
-svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_x)))
-svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_x)))
-svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_x)))
-svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_x)))
-svint8_t svmad_x(svbool_t, svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_x)))
-svint32_t svmad_x(svbool_t, svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_x)))
-svint64_t svmad_x(svbool_t, svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_x)))
-svint16_t svmad_x(svbool_t, svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_z)))
-svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_z)))
-svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_z)))
-svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_z)))
-svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_z)))
-svint8_t svmad_z(svbool_t, svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_z)))
-svint32_t svmad_z(svbool_t, svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_z)))
-svint64_t svmad_z(svbool_t, svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_z)))
-svint16_t svmad_z(svbool_t, svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_m)))
-svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_m)))
-svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_m)))
-svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_x)))
-svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_x)))
-svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_x)))
-svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_z)))
-svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_z)))
-svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_z)))
-svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_m)))
-svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_m)))
-svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_m)))
-svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_m)))
-svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_m)))
-svint8_t svmad_m(svbool_t, svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_m)))
-svint32_t svmad_m(svbool_t, svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_m)))
-svint64_t svmad_m(svbool_t, svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_m)))
-svint16_t svmad_m(svbool_t, svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_x)))
-svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_x)))
-svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_x)))
-svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_x)))
-svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_x)))
-svint8_t svmad_x(svbool_t, svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_x)))
-svint32_t svmad_x(svbool_t, svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_x)))
-svint64_t svmad_x(svbool_t, svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_x)))
-svint16_t svmad_x(svbool_t, svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_z)))
-svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_z)))
-svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_z)))
-svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_z)))
-svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_z)))
-svint8_t svmad_z(svbool_t, svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_z)))
-svint32_t svmad_z(svbool_t, svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_z)))
-svint64_t svmad_z(svbool_t, svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_z)))
-svint16_t svmad_z(svbool_t, svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_m)))
-svfloat64_t svmax_m(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_m)))
-svfloat32_t svmax_m(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_m)))
-svfloat16_t svmax_m(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_x)))
-svfloat64_t svmax_x(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_x)))
-svfloat32_t svmax_x(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_x)))
-svfloat16_t svmax_x(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_z)))
-svfloat64_t svmax_z(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_z)))
-svfloat32_t svmax_z(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_z)))
-svfloat16_t svmax_z(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_m)))
-svint8_t svmax_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_m)))
-svint32_t svmax_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_m)))
-svint64_t svmax_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_m)))
-svint16_t svmax_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_x)))
-svint8_t svmax_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_x)))
-svint32_t svmax_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_x)))
-svint64_t svmax_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_x)))
-svint16_t svmax_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_z)))
-svint8_t svmax_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_z)))
-svint32_t svmax_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_z)))
-svint64_t svmax_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_z)))
-svint16_t svmax_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_m)))
-svuint8_t svmax_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_m)))
-svuint32_t svmax_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_m)))
-svuint64_t svmax_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_m)))
-svuint16_t svmax_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_x)))
-svuint8_t svmax_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_x)))
-svuint32_t svmax_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_x)))
-svuint64_t svmax_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_x)))
-svuint16_t svmax_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_z)))
-svuint8_t svmax_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_z)))
-svuint32_t svmax_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_z)))
-svuint64_t svmax_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_z)))
-svuint16_t svmax_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_m)))
-svfloat64_t svmax_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_m)))
-svfloat32_t svmax_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_m)))
-svfloat16_t svmax_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x)))
-svfloat64_t svmax_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x)))
-svfloat32_t svmax_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x)))
-svfloat16_t svmax_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_z)))
-svfloat64_t svmax_z(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_z)))
-svfloat32_t svmax_z(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_z)))
-svfloat16_t svmax_z(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_m)))
-svint8_t svmax_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_m)))
-svint32_t svmax_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_m)))
-svint64_t svmax_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_m)))
-svint16_t svmax_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x)))
-svint8_t svmax_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x)))
-svint32_t svmax_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x)))
-svint64_t svmax_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x)))
-svint16_t svmax_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_z)))
-svint8_t svmax_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_z)))
-svint32_t svmax_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_z)))
-svint64_t svmax_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_z)))
-svint16_t svmax_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_m)))
-svuint8_t svmax_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_m)))
-svuint32_t svmax_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_m)))
-svuint64_t svmax_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_m)))
-svuint16_t svmax_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x)))
-svuint8_t svmax_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x)))
-svuint32_t svmax_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x)))
-svuint64_t svmax_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x)))
-svuint16_t svmax_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_z)))
-svuint8_t svmax_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_z)))
-svuint32_t svmax_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_z)))
-svuint64_t svmax_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_z)))
-svuint16_t svmax_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_m)))
-svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_m)))
-svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_m)))
-svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_x)))
-svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_x)))
-svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_x)))
-svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_z)))
-svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_z)))
-svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_z)))
-svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_m)))
-svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_m)))
-svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_m)))
-svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x)))
-svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x)))
-svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x)))
-svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_z)))
-svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_z)))
-svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_z)))
-svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f64)))
-float64_t svmaxnmv(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f32)))
-float32_t svmaxnmv(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f16)))
-float16_t svmaxnmv(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f64)))
-float64_t svmaxv(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f32)))
-float32_t svmaxv(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f16)))
-float16_t svmaxv(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s8)))
-int8_t svmaxv(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s32)))
-int32_t svmaxv(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s64)))
-int64_t svmaxv(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s16)))
-int16_t svmaxv(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u8)))
-uint8_t svmaxv(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u32)))
-uint32_t svmaxv(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u64)))
-uint64_t svmaxv(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u16)))
-uint16_t svmaxv(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_m)))
-svfloat64_t svmin_m(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_m)))
-svfloat32_t svmin_m(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_m)))
-svfloat16_t svmin_m(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_x)))
-svfloat64_t svmin_x(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_x)))
-svfloat32_t svmin_x(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_x)))
-svfloat16_t svmin_x(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_z)))
-svfloat64_t svmin_z(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_z)))
-svfloat32_t svmin_z(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_z)))
-svfloat16_t svmin_z(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_m)))
-svint8_t svmin_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_m)))
-svint32_t svmin_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_m)))
-svint64_t svmin_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_m)))
-svint16_t svmin_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_x)))
-svint8_t svmin_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_x)))
-svint32_t svmin_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_x)))
-svint64_t svmin_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_x)))
-svint16_t svmin_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_z)))
-svint8_t svmin_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_z)))
-svint32_t svmin_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_z)))
-svint64_t svmin_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_z)))
-svint16_t svmin_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_m)))
-svuint8_t svmin_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_m)))
-svuint32_t svmin_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_m)))
-svuint64_t svmin_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_m)))
-svuint16_t svmin_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_x)))
-svuint8_t svmin_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_x)))
-svuint32_t svmin_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_x)))
-svuint64_t svmin_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_x)))
-svuint16_t svmin_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_z)))
-svuint8_t svmin_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_z)))
-svuint32_t svmin_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_z)))
-svuint64_t svmin_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_z)))
-svuint16_t svmin_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_m)))
-svfloat64_t svmin_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_m)))
-svfloat32_t svmin_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_m)))
-svfloat16_t svmin_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x)))
-svfloat64_t svmin_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x)))
-svfloat32_t svmin_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x)))
-svfloat16_t svmin_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_z)))
-svfloat64_t svmin_z(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_z)))
-svfloat32_t svmin_z(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_z)))
-svfloat16_t svmin_z(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_m)))
-svint8_t svmin_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_m)))
-svint32_t svmin_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_m)))
-svint64_t svmin_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_m)))
-svint16_t svmin_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x)))
-svint8_t svmin_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x)))
-svint32_t svmin_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x)))
-svint64_t svmin_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x)))
-svint16_t svmin_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_z)))
-svint8_t svmin_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_z)))
-svint32_t svmin_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_z)))
-svint64_t svmin_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_z)))
-svint16_t svmin_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_m)))
-svuint8_t svmin_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_m)))
-svuint32_t svmin_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_m)))
-svuint64_t svmin_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_m)))
-svuint16_t svmin_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x)))
-svuint8_t svmin_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x)))
-svuint32_t svmin_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x)))
-svuint64_t svmin_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x)))
-svuint16_t svmin_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_z)))
-svuint8_t svmin_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_z)))
-svuint32_t svmin_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_z)))
-svuint64_t svmin_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_z)))
-svuint16_t svmin_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_m)))
-svfloat64_t svminnm_m(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_m)))
-svfloat32_t svminnm_m(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_m)))
-svfloat16_t svminnm_m(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_x)))
-svfloat64_t svminnm_x(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_x)))
-svfloat32_t svminnm_x(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_x)))
-svfloat16_t svminnm_x(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_z)))
-svfloat64_t svminnm_z(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_z)))
-svfloat32_t svminnm_z(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_z)))
-svfloat16_t svminnm_z(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_m)))
-svfloat64_t svminnm_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_m)))
-svfloat32_t svminnm_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_m)))
-svfloat16_t svminnm_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x)))
-svfloat64_t svminnm_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x)))
-svfloat32_t svminnm_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x)))
-svfloat16_t svminnm_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_z)))
-svfloat64_t svminnm_z(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_z)))
-svfloat32_t svminnm_z(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_z)))
-svfloat16_t svminnm_z(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f64)))
-float64_t svminnmv(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f32)))
-float32_t svminnmv(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f16)))
-float16_t svminnmv(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f64)))
-float64_t svminv(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f32)))
-float32_t svminv(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f16)))
-float16_t svminv(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s8)))
-int8_t svminv(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s32)))
-int32_t svminv(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s64)))
-int64_t svminv(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s16)))
-int16_t svminv(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u8)))
-uint8_t svminv(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u32)))
-uint32_t svminv(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u64)))
-uint64_t svminv(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u16)))
-uint16_t svminv(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_m)))
-svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_m)))
-svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_m)))
-svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_x)))
-svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_x)))
-svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_x)))
-svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_z)))
-svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_z)))
-svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_z)))
-svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_m)))
-svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_m)))
-svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_m)))
-svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_m)))
-svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_m)))
-svint8_t svmla_m(svbool_t, svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_m)))
-svint32_t svmla_m(svbool_t, svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_m)))
-svint64_t svmla_m(svbool_t, svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_m)))
-svint16_t svmla_m(svbool_t, svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_x)))
-svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_x)))
-svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_x)))
-svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_x)))
-svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_x)))
-svint8_t svmla_x(svbool_t, svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_x)))
-svint32_t svmla_x(svbool_t, svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_x)))
-svint64_t svmla_x(svbool_t, svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_x)))
-svint16_t svmla_x(svbool_t, svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_z)))
-svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_z)))
-svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_z)))
-svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_z)))
-svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_z)))
-svint8_t svmla_z(svbool_t, svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_z)))
-svint32_t svmla_z(svbool_t, svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_z)))
-svint64_t svmla_z(svbool_t, svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_z)))
-svint16_t svmla_z(svbool_t, svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_m)))
-svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_m)))
-svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_m)))
-svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_x)))
-svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_x)))
-svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_x)))
-svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_z)))
-svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_z)))
-svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_z)))
-svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_m)))
-svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_m)))
-svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_m)))
-svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_m)))
-svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_m)))
-svint8_t svmla_m(svbool_t, svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_m)))
-svint32_t svmla_m(svbool_t, svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_m)))
-svint64_t svmla_m(svbool_t, svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_m)))
-svint16_t svmla_m(svbool_t, svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_x)))
-svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_x)))
-svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_x)))
-svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_x)))
-svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_x)))
-svint8_t svmla_x(svbool_t, svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_x)))
-svint32_t svmla_x(svbool_t, svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_x)))
-svint64_t svmla_x(svbool_t, svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_x)))
-svint16_t svmla_x(svbool_t, svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_z)))
-svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_z)))
-svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_z)))
-svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_z)))
-svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_z)))
-svint8_t svmla_z(svbool_t, svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_z)))
-svint32_t svmla_z(svbool_t, svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_z)))
-svint64_t svmla_z(svbool_t, svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_z)))
-svint16_t svmla_z(svbool_t, svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f64)))
-svfloat64_t svmla_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f32)))
-svfloat32_t svmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f16)))
-svfloat16_t svmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_m)))
-svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_m)))
-svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_m)))
-svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_x)))
-svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_x)))
-svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_x)))
-svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_z)))
-svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_z)))
-svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_z)))
-svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_m)))
-svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_m)))
-svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_m)))
-svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_m)))
-svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_m)))
-svint8_t svmls_m(svbool_t, svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_m)))
-svint32_t svmls_m(svbool_t, svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_m)))
-svint64_t svmls_m(svbool_t, svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_m)))
-svint16_t svmls_m(svbool_t, svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_x)))
-svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_x)))
-svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_x)))
-svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_x)))
-svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_x)))
-svint8_t svmls_x(svbool_t, svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_x)))
-svint32_t svmls_x(svbool_t, svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_x)))
-svint64_t svmls_x(svbool_t, svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_x)))
-svint16_t svmls_x(svbool_t, svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_z)))
-svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_z)))
-svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_z)))
-svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_z)))
-svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_z)))
-svint8_t svmls_z(svbool_t, svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_z)))
-svint32_t svmls_z(svbool_t, svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_z)))
-svint64_t svmls_z(svbool_t, svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_z)))
-svint16_t svmls_z(svbool_t, svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_m)))
-svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_m)))
-svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_m)))
-svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_x)))
-svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_x)))
-svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_x)))
-svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_z)))
-svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_z)))
-svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_z)))
-svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_m)))
-svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_m)))
-svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_m)))
-svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_m)))
-svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_m)))
-svint8_t svmls_m(svbool_t, svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_m)))
-svint32_t svmls_m(svbool_t, svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_m)))
-svint64_t svmls_m(svbool_t, svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_m)))
-svint16_t svmls_m(svbool_t, svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_x)))
-svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_x)))
-svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_x)))
-svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_x)))
-svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_x)))
-svint8_t svmls_x(svbool_t, svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_x)))
-svint32_t svmls_x(svbool_t, svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_x)))
-svint64_t svmls_x(svbool_t, svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_x)))
-svint16_t svmls_x(svbool_t, svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_z)))
-svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_z)))
-svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_z)))
-svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_z)))
-svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_z)))
-svint8_t svmls_z(svbool_t, svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_z)))
-svint32_t svmls_z(svbool_t, svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_z)))
-svint64_t svmls_z(svbool_t, svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_z)))
-svint16_t svmls_z(svbool_t, svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f64)))
-svfloat64_t svmls_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f32)))
-svfloat32_t svmls_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f16)))
-svfloat16_t svmls_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmov_b_z)))
-svbool_t svmov_z(svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_m)))
-svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_m)))
-svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_m)))
-svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_x)))
-svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_x)))
-svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_x)))
-svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_z)))
-svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_z)))
-svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_z)))
-svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_m)))
-svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_m)))
-svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_m)))
-svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_m)))
-svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_m)))
-svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_m)))
-svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_m)))
-svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_m)))
-svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_x)))
-svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_x)))
-svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_x)))
-svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_x)))
-svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_x)))
-svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_x)))
-svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_x)))
-svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_x)))
-svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_z)))
-svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_z)))
-svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_z)))
-svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_z)))
-svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_z)))
-svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_z)))
-svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_z)))
-svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_z)))
-svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_m)))
-svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_m)))
-svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_m)))
-svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_x)))
-svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_x)))
-svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_x)))
-svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_z)))
-svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_z)))
-svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_z)))
-svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_m)))
-svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_m)))
-svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_m)))
-svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_m)))
-svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_m)))
-svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_m)))
-svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_m)))
-svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_m)))
-svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_x)))
-svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_x)))
-svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_x)))
-svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_x)))
-svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_x)))
-svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_x)))
-svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_x)))
-svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_x)))
-svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_z)))
-svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_z)))
-svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_z)))
-svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_z)))
-svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_z)))
-svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_z)))
-svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_z)))
-svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_z)))
-svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_m)))
-svfloat64_t svmul_m(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_m)))
-svfloat32_t svmul_m(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_m)))
-svfloat16_t svmul_m(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_x)))
-svfloat64_t svmul_x(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_x)))
-svfloat32_t svmul_x(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_x)))
-svfloat16_t svmul_x(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_z)))
-svfloat64_t svmul_z(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_z)))
-svfloat32_t svmul_z(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_z)))
-svfloat16_t svmul_z(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_m)))
-svuint8_t svmul_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_m)))
-svuint32_t svmul_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_m)))
-svuint64_t svmul_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_m)))
-svuint16_t svmul_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_m)))
-svint8_t svmul_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_m)))
-svint32_t svmul_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_m)))
-svint64_t svmul_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_m)))
-svint16_t svmul_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_x)))
-svuint8_t svmul_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_x)))
-svuint32_t svmul_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_x)))
-svuint64_t svmul_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_x)))
-svuint16_t svmul_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_x)))
-svint8_t svmul_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_x)))
-svint32_t svmul_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_x)))
-svint64_t svmul_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_x)))
-svint16_t svmul_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_z)))
-svuint8_t svmul_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_z)))
-svuint32_t svmul_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_z)))
-svuint64_t svmul_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_z)))
-svuint16_t svmul_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_z)))
-svint8_t svmul_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_z)))
-svint32_t svmul_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_z)))
-svint64_t svmul_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_z)))
-svint16_t svmul_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_m)))
-svfloat64_t svmul_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_m)))
-svfloat32_t svmul_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_m)))
-svfloat16_t svmul_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_x)))
-svfloat64_t svmul_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_x)))
-svfloat32_t svmul_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_x)))
-svfloat16_t svmul_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_z)))
-svfloat64_t svmul_z(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_z)))
-svfloat32_t svmul_z(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_z)))
-svfloat16_t svmul_z(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_m)))
-svuint8_t svmul_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_m)))
-svuint32_t svmul_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_m)))
-svuint64_t svmul_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_m)))
-svuint16_t svmul_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_m)))
-svint8_t svmul_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_m)))
-svint32_t svmul_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_m)))
-svint64_t svmul_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_m)))
-svint16_t svmul_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_x)))
-svuint8_t svmul_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_x)))
-svuint32_t svmul_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_x)))
-svuint64_t svmul_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_x)))
-svuint16_t svmul_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_x)))
-svint8_t svmul_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_x)))
-svint32_t svmul_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_x)))
-svint64_t svmul_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_x)))
-svint16_t svmul_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_z)))
-svuint8_t svmul_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_z)))
-svuint32_t svmul_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_z)))
-svuint64_t svmul_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_z)))
-svuint16_t svmul_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_z)))
-svint8_t svmul_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_z)))
-svint32_t svmul_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_z)))
-svint64_t svmul_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_z)))
-svint16_t svmul_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f64)))
-svfloat64_t svmul_lane(svfloat64_t, svfloat64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f32)))
-svfloat32_t svmul_lane(svfloat32_t, svfloat32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f16)))
-svfloat16_t svmul_lane(svfloat16_t, svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_m)))
-svint8_t svmulh_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_m)))
-svint32_t svmulh_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_m)))
-svint64_t svmulh_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_m)))
-svint16_t svmulh_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_x)))
-svint8_t svmulh_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_x)))
-svint32_t svmulh_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_x)))
-svint64_t svmulh_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_x)))
-svint16_t svmulh_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_z)))
-svint8_t svmulh_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_z)))
-svint32_t svmulh_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_z)))
-svint64_t svmulh_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_z)))
-svint16_t svmulh_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_m)))
-svuint8_t svmulh_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_m)))
-svuint32_t svmulh_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_m)))
-svuint64_t svmulh_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_m)))
-svuint16_t svmulh_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_x)))
-svuint8_t svmulh_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_x)))
-svuint32_t svmulh_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_x)))
-svuint64_t svmulh_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_x)))
-svuint16_t svmulh_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_z)))
-svuint8_t svmulh_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_z)))
-svuint32_t svmulh_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_z)))
-svuint64_t svmulh_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_z)))
-svuint16_t svmulh_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_m)))
-svint8_t svmulh_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_m)))
-svint32_t svmulh_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_m)))
-svint64_t svmulh_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_m)))
-svint16_t svmulh_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_x)))
-svint8_t svmulh_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_x)))
-svint32_t svmulh_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_x)))
-svint64_t svmulh_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_x)))
-svint16_t svmulh_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_z)))
-svint8_t svmulh_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_z)))
-svint32_t svmulh_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_z)))
-svint64_t svmulh_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_z)))
-svint16_t svmulh_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_m)))
-svuint8_t svmulh_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_m)))
-svuint32_t svmulh_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_m)))
-svuint64_t svmulh_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_m)))
-svuint16_t svmulh_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_x)))
-svuint8_t svmulh_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_x)))
-svuint32_t svmulh_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_x)))
-svuint64_t svmulh_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_x)))
-svuint16_t svmulh_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_z)))
-svuint8_t svmulh_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_z)))
-svuint32_t svmulh_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_z)))
-svuint64_t svmulh_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_z)))
-svuint16_t svmulh_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_m)))
-svfloat64_t svmulx_m(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_m)))
-svfloat32_t svmulx_m(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_m)))
-svfloat16_t svmulx_m(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_x)))
-svfloat64_t svmulx_x(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_x)))
-svfloat32_t svmulx_x(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_x)))
-svfloat16_t svmulx_x(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_z)))
-svfloat64_t svmulx_z(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_z)))
-svfloat32_t svmulx_z(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_z)))
-svfloat16_t svmulx_z(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_m)))
-svfloat64_t svmulx_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_m)))
-svfloat32_t svmulx_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_m)))
-svfloat16_t svmulx_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_x)))
-svfloat64_t svmulx_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_x)))
-svfloat32_t svmulx_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_x)))
-svfloat16_t svmulx_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_z)))
-svfloat64_t svmulx_z(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_z)))
-svfloat32_t svmulx_z(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_z)))
-svfloat16_t svmulx_z(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnand_b_z)))
-svbool_t svnand_z(svbool_t, svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_m)))
-svfloat64_t svneg_m(svfloat64_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_m)))
-svfloat32_t svneg_m(svfloat32_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_m)))
-svfloat16_t svneg_m(svfloat16_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_x)))
-svfloat64_t svneg_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_x)))
-svfloat32_t svneg_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_x)))
-svfloat16_t svneg_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_z)))
-svfloat64_t svneg_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_z)))
-svfloat32_t svneg_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_z)))
-svfloat16_t svneg_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_m)))
-svint8_t svneg_m(svint8_t, svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_m)))
-svint32_t svneg_m(svint32_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_m)))
-svint64_t svneg_m(svint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_m)))
-svint16_t svneg_m(svint16_t, svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_x)))
-svint8_t svneg_x(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_x)))
-svint32_t svneg_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_x)))
-svint64_t svneg_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_x)))
-svint16_t svneg_x(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_z)))
-svint8_t svneg_z(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_z)))
-svint32_t svneg_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_z)))
-svint64_t svneg_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_z)))
-svint16_t svneg_z(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_m)))
-svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_m)))
-svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_m)))
-svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_x)))
-svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_x)))
-svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_x)))
-svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_z)))
-svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_z)))
-svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_z)))
-svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_m)))
-svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_m)))
-svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_m)))
-svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_x)))
-svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_x)))
-svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_x)))
-svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_z)))
-svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_z)))
-svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_z)))
-svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_m)))
-svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_m)))
-svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_m)))
-svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_x)))
-svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_x)))
-svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_x)))
-svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_z)))
-svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_z)))
-svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_z)))
-svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_m)))
-svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_m)))
-svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_m)))
-svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_x)))
-svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_x)))
-svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_x)))
-svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_z)))
-svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_z)))
-svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_z)))
-svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_m)))
-svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_m)))
-svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_m)))
-svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_x)))
-svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_x)))
-svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_x)))
-svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_z)))
-svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_z)))
-svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_z)))
-svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_m)))
-svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_m)))
-svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_m)))
-svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_x)))
-svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_x)))
-svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_x)))
-svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_z)))
-svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_z)))
-svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_z)))
-svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_m)))
-svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_m)))
-svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_m)))
-svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_x)))
-svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_x)))
-svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_x)))
-svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_z)))
-svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_z)))
-svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_z)))
-svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_m)))
-svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_m)))
-svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_m)))
-svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_x)))
-svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_x)))
-svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_x)))
-svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_z)))
-svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_z)))
-svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_z)))
-svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnor_b_z)))
-svbool_t svnor_z(svbool_t, svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_b_z)))
-svbool_t svnot_z(svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_m)))
-svuint8_t svnot_m(svuint8_t, svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_m)))
-svuint32_t svnot_m(svuint32_t, svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_m)))
-svuint64_t svnot_m(svuint64_t, svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_m)))
-svuint16_t svnot_m(svuint16_t, svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_m)))
-svint8_t svnot_m(svint8_t, svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_m)))
-svint32_t svnot_m(svint32_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_m)))
-svint64_t svnot_m(svint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_m)))
-svint16_t svnot_m(svint16_t, svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_x)))
-svuint8_t svnot_x(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_x)))
-svuint32_t svnot_x(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_x)))
-svuint64_t svnot_x(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_x)))
-svuint16_t svnot_x(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_x)))
-svint8_t svnot_x(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_x)))
-svint32_t svnot_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_x)))
-svint64_t svnot_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_x)))
-svint16_t svnot_x(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_z)))
-svuint8_t svnot_z(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_z)))
-svuint32_t svnot_z(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_z)))
-svuint64_t svnot_z(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_z)))
-svuint16_t svnot_z(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_z)))
-svint8_t svnot_z(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_z)))
-svint32_t svnot_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_z)))
-svint64_t svnot_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_z)))
-svint16_t svnot_z(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorn_b_z)))
-svbool_t svorn_z(svbool_t, svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_b_z)))
-svbool_t svorr_z(svbool_t, svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_m)))
-svuint8_t svorr_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_m)))
-svuint32_t svorr_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_m)))
-svuint64_t svorr_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_m)))
-svuint16_t svorr_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_m)))
-svint8_t svorr_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_m)))
-svint32_t svorr_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_m)))
-svint64_t svorr_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_m)))
-svint16_t svorr_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_x)))
-svuint8_t svorr_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_x)))
-svuint32_t svorr_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_x)))
-svuint64_t svorr_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_x)))
-svuint16_t svorr_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_x)))
-svint8_t svorr_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_x)))
-svint32_t svorr_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_x)))
-svint64_t svorr_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_x)))
-svint16_t svorr_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_z)))
-svuint8_t svorr_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_z)))
-svuint32_t svorr_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_z)))
-svuint64_t svorr_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_z)))
-svuint16_t svorr_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_z)))
-svint8_t svorr_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_z)))
-svint32_t svorr_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_z)))
-svint64_t svorr_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_z)))
-svint16_t svorr_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_m)))
-svuint8_t svorr_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_m)))
-svuint32_t svorr_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_m)))
-svuint64_t svorr_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_m)))
-svuint16_t svorr_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_m)))
-svint8_t svorr_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_m)))
-svint32_t svorr_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_m)))
-svint64_t svorr_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_m)))
-svint16_t svorr_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_x)))
-svuint8_t svorr_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_x)))
-svuint32_t svorr_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_x)))
-svuint64_t svorr_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_x)))
-svuint16_t svorr_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_x)))
-svint8_t svorr_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_x)))
-svint32_t svorr_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_x)))
-svint64_t svorr_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_x)))
-svint16_t svorr_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_z)))
-svuint8_t svorr_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_z)))
-svuint32_t svorr_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_z)))
-svuint64_t svorr_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_z)))
-svuint16_t svorr_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_z)))
-svint8_t svorr_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_z)))
-svint32_t svorr_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_z)))
-svint64_t svorr_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_z)))
-svint16_t svorr_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u8)))
-uint8_t svorv(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u32)))
-uint32_t svorv(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u64)))
-uint64_t svorv(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u16)))
-uint16_t svorv(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s8)))
-int8_t svorv(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s32)))
-int32_t svorv(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s64)))
-int64_t svorv(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16)))
-int16_t svorv(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b)))
-svbool_t svpfalse();
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b)))
-svbool_t svpfirst(svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base)))
-void svprfb_gather(svbool_t, svuint32_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base)))
-void svprfb_gather(svbool_t, svuint64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset)))
-void svprfb_gather_offset(svbool_t, svuint32_t, int64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset)))
-void svprfb_gather_offset(svbool_t, svuint64_t, int64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset)))
-void svprfb_gather_offset(svbool_t, void const *, svint32_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset)))
-void svprfb_gather_offset(svbool_t, void const *, svuint32_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset)))
-void svprfb_gather_offset(svbool_t, void const *, svint64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset)))
-void svprfb_gather_offset(svbool_t, void const *, svuint64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base)))
-void svprfd_gather(svbool_t, svuint32_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base)))
-void svprfd_gather(svbool_t, svuint64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index)))
-void svprfd_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index)))
-void svprfd_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index)))
-void svprfd_gather_index(svbool_t, void const *, svint32_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index)))
-void svprfd_gather_index(svbool_t, void const *, svuint32_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index)))
-void svprfd_gather_index(svbool_t, void const *, svint64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index)))
-void svprfd_gather_index(svbool_t, void const *, svuint64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base)))
-void svprfh_gather(svbool_t, svuint32_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base)))
-void svprfh_gather(svbool_t, svuint64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index)))
-void svprfh_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index)))
-void svprfh_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index)))
-void svprfh_gather_index(svbool_t, void const *, svint32_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index)))
-void svprfh_gather_index(svbool_t, void const *, svuint32_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index)))
-void svprfh_gather_index(svbool_t, void const *, svint64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index)))
-void svprfh_gather_index(svbool_t, void const *, svuint64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base)))
-void svprfw_gather(svbool_t, svuint32_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base)))
-void svprfw_gather(svbool_t, svuint64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index)))
-void svprfw_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index)))
-void svprfw_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index)))
-void svprfw_gather_index(svbool_t, void const *, svint32_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index)))
-void svprfw_gather_index(svbool_t, void const *, svuint32_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index)))
-void svprfw_gather_index(svbool_t, void const *, svint64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index)))
-void svprfw_gather_index(svbool_t, void const *, svuint64_t, enum svprfop);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8)))
-svint8_t svqadd(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32)))
-svint32_t svqadd(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64)))
-svint64_t svqadd(svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16)))
-svint16_t svqadd(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8)))
-svuint8_t svqadd(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32)))
-svuint32_t svqadd(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64)))
-svuint64_t svqadd(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16)))
-svuint16_t svqadd(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8)))
-svint8_t svqadd(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32)))
-svint32_t svqadd(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64)))
-svint64_t svqadd(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16)))
-svint16_t svqadd(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8)))
-svuint8_t svqadd(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32)))
-svuint32_t svqadd(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64)))
-svuint64_t svqadd(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16)))
-svuint16_t svqadd(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s32)))
-int32_t svqdecb(int32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s64)))
-int64_t svqdecb(int64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u32)))
-uint32_t svqdecb(uint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u64)))
-uint64_t svqdecb(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s32)))
-int32_t svqdecb_pat(int32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s64)))
-int64_t svqdecb_pat(int64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u32)))
-uint32_t svqdecb_pat(uint32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u64)))
-uint64_t svqdecb_pat(uint64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s32)))
-int32_t svqdecd(int32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s64)))
-int64_t svqdecd(int64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u32)))
-uint32_t svqdecd(uint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u64)))
-uint64_t svqdecd(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_s64)))
-svint64_t svqdecd(svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_u64)))
-svuint64_t svqdecd(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s32)))
-int32_t svqdecd_pat(int32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s64)))
-int64_t svqdecd_pat(int64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u32)))
-uint32_t svqdecd_pat(uint32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u64)))
-uint64_t svqdecd_pat(uint64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_s64)))
-svint64_t svqdecd_pat(svint64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_u64)))
-svuint64_t svqdecd_pat(svuint64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s32)))
-int32_t svqdech(int32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s64)))
-int64_t svqdech(int64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u32)))
-uint32_t svqdech(uint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u64)))
-uint64_t svqdech(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_s16)))
-svint16_t svqdech(svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_u16)))
-svuint16_t svqdech(svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s32)))
-int32_t svqdech_pat(int32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s64)))
-int64_t svqdech_pat(int64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u32)))
-uint32_t svqdech_pat(uint32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u64)))
-uint64_t svqdech_pat(uint64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_s16)))
-svint16_t svqdech_pat(svint16_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_u16)))
-svuint16_t svqdech_pat(svuint16_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b8)))
-int32_t svqdecp_b8(int32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b32)))
-int32_t svqdecp_b32(int32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b64)))
-int32_t svqdecp_b64(int32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b16)))
-int32_t svqdecp_b16(int32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b8)))
-int64_t svqdecp_b8(int64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b32)))
-int64_t svqdecp_b32(int64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b64)))
-int64_t svqdecp_b64(int64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b16)))
-int64_t svqdecp_b16(int64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b8)))
-uint32_t svqdecp_b8(uint32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b32)))
-uint32_t svqdecp_b32(uint32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b64)))
-uint32_t svqdecp_b64(uint32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b16)))
-uint32_t svqdecp_b16(uint32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b8)))
-uint64_t svqdecp_b8(uint64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b32)))
-uint64_t svqdecp_b32(uint64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b64)))
-uint64_t svqdecp_b64(uint64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b16)))
-uint64_t svqdecp_b16(uint64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s32)))
-svint32_t svqdecp(svint32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s64)))
-svint64_t svqdecp(svint64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s16)))
-svint16_t svqdecp(svint16_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u32)))
-svuint32_t svqdecp(svuint32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u64)))
-svuint64_t svqdecp(svuint64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u16)))
-svuint16_t svqdecp(svuint16_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s32)))
-int32_t svqdecw(int32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s64)))
-int64_t svqdecw(int64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u32)))
-uint32_t svqdecw(uint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u64)))
-uint64_t svqdecw(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_s32)))
-svint32_t svqdecw(svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_u32)))
-svuint32_t svqdecw(svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s32)))
-int32_t svqdecw_pat(int32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s64)))
-int64_t svqdecw_pat(int64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u32)))
-uint32_t svqdecw_pat(uint32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u64)))
-uint64_t svqdecw_pat(uint64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_s32)))
-svint32_t svqdecw_pat(svint32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_u32)))
-svuint32_t svqdecw_pat(svuint32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s32)))
-int32_t svqincb(int32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s64)))
-int64_t svqincb(int64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u32)))
-uint32_t svqincb(uint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u64)))
-uint64_t svqincb(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s32)))
-int32_t svqincb_pat(int32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s64)))
-int64_t svqincb_pat(int64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u32)))
-uint32_t svqincb_pat(uint32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u64)))
-uint64_t svqincb_pat(uint64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s32)))
-int32_t svqincd(int32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s64)))
-int64_t svqincd(int64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u32)))
-uint32_t svqincd(uint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u64)))
-uint64_t svqincd(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_s64)))
-svint64_t svqincd(svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_u64)))
-svuint64_t svqincd(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s32)))
-int32_t svqincd_pat(int32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s64)))
-int64_t svqincd_pat(int64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u32)))
-uint32_t svqincd_pat(uint32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u64)))
-uint64_t svqincd_pat(uint64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_s64)))
-svint64_t svqincd_pat(svint64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_u64)))
-svuint64_t svqincd_pat(svuint64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s32)))
-int32_t svqinch(int32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s64)))
-int64_t svqinch(int64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u32)))
-uint32_t svqinch(uint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u64)))
-uint64_t svqinch(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_s16)))
-svint16_t svqinch(svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_u16)))
-svuint16_t svqinch(svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s32)))
-int32_t svqinch_pat(int32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s64)))
-int64_t svqinch_pat(int64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u32)))
-uint32_t svqinch_pat(uint32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u64)))
-uint64_t svqinch_pat(uint64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_s16)))
-svint16_t svqinch_pat(svint16_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_u16)))
-svuint16_t svqinch_pat(svuint16_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b8)))
-int32_t svqincp_b8(int32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b32)))
-int32_t svqincp_b32(int32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b64)))
-int32_t svqincp_b64(int32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b16)))
-int32_t svqincp_b16(int32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b8)))
-int64_t svqincp_b8(int64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b32)))
-int64_t svqincp_b32(int64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b64)))
-int64_t svqincp_b64(int64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b16)))
-int64_t svqincp_b16(int64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b8)))
-uint32_t svqincp_b8(uint32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b32)))
-uint32_t svqincp_b32(uint32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b64)))
-uint32_t svqincp_b64(uint32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b16)))
-uint32_t svqincp_b16(uint32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b8)))
-uint64_t svqincp_b8(uint64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b32)))
-uint64_t svqincp_b32(uint64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b64)))
-uint64_t svqincp_b64(uint64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b16)))
-uint64_t svqincp_b16(uint64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s32)))
-svint32_t svqincp(svint32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s64)))
-svint64_t svqincp(svint64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s16)))
-svint16_t svqincp(svint16_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u32)))
-svuint32_t svqincp(svuint32_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u64)))
-svuint64_t svqincp(svuint64_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u16)))
-svuint16_t svqincp(svuint16_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s32)))
-int32_t svqincw(int32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s64)))
-int64_t svqincw(int64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u32)))
-uint32_t svqincw(uint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u64)))
-uint64_t svqincw(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_s32)))
-svint32_t svqincw(svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_u32)))
-svuint32_t svqincw(svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s32)))
-int32_t svqincw_pat(int32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s64)))
-int64_t svqincw_pat(int64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u32)))
-uint32_t svqincw_pat(uint32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u64)))
-uint64_t svqincw_pat(uint64_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_s32)))
-svint32_t svqincw_pat(svint32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_u32)))
-svuint32_t svqincw_pat(svuint32_t, enum svpattern, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8)))
-svint8_t svqsub(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32)))
-svint32_t svqsub(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64)))
-svint64_t svqsub(svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16)))
-svint16_t svqsub(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8)))
-svuint8_t svqsub(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32)))
-svuint32_t svqsub(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64)))
-svuint64_t svqsub(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16)))
-svuint16_t svqsub(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8)))
-svint8_t svqsub(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32)))
-svint32_t svqsub(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64)))
-svint64_t svqsub(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16)))
-svint16_t svqsub(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8)))
-svuint8_t svqsub(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32)))
-svuint32_t svqsub(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64)))
-svuint64_t svqsub(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16)))
-svuint16_t svqsub(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_m)))
-svuint8_t svrbit_m(svuint8_t, svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_m)))
-svuint32_t svrbit_m(svuint32_t, svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_m)))
-svuint64_t svrbit_m(svuint64_t, svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_m)))
-svuint16_t svrbit_m(svuint16_t, svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_m)))
-svint8_t svrbit_m(svint8_t, svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_m)))
-svint32_t svrbit_m(svint32_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_m)))
-svint64_t svrbit_m(svint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_m)))
-svint16_t svrbit_m(svint16_t, svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_x)))
-svuint8_t svrbit_x(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_x)))
-svuint32_t svrbit_x(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_x)))
-svuint64_t svrbit_x(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_x)))
-svuint16_t svrbit_x(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_x)))
-svint8_t svrbit_x(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_x)))
-svint32_t svrbit_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_x)))
-svint64_t svrbit_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_x)))
-svint16_t svrbit_x(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_z)))
-svuint8_t svrbit_z(svbool_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_z)))
-svuint32_t svrbit_z(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_z)))
-svuint64_t svrbit_z(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_z)))
-svuint16_t svrbit_z(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_z)))
-svint8_t svrbit_z(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_z)))
-svint32_t svrbit_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_z)))
-svint64_t svrbit_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z)))
-svint16_t svrbit_z(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64)))
-svfloat64_t svrecpe(svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f32)))
-svfloat32_t svrecpe(svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f16)))
-svfloat16_t svrecpe(svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f64)))
-svfloat64_t svrecps(svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f32)))
-svfloat32_t svrecps(svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f16)))
-svfloat16_t svrecps(svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_m)))
-svfloat64_t svrecpx_m(svfloat64_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_m)))
-svfloat32_t svrecpx_m(svfloat32_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_m)))
-svfloat16_t svrecpx_m(svfloat16_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_x)))
-svfloat64_t svrecpx_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_x)))
-svfloat32_t svrecpx_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_x)))
-svfloat16_t svrecpx_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_z)))
-svfloat64_t svrecpx_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_z)))
-svfloat32_t svrecpx_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_z)))
-svfloat16_t svrecpx_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u8)))
-svuint8_t svrev(svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u32)))
-svuint32_t svrev(svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u64)))
-svuint64_t svrev(svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u16)))
-svuint16_t svrev(svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s8)))
-svint8_t svrev(svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f64)))
-svfloat64_t svrev(svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f32)))
-svfloat32_t svrev(svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f16)))
-svfloat16_t svrev(svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s32)))
-svint32_t svrev(svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s64)))
-svint64_t svrev(svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s16)))
-svint16_t svrev(svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_m)))
-svuint32_t svrevb_m(svuint32_t, svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_m)))
-svuint64_t svrevb_m(svuint64_t, svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_m)))
-svuint16_t svrevb_m(svuint16_t, svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_m)))
-svint32_t svrevb_m(svint32_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_m)))
-svint64_t svrevb_m(svint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_m)))
-svint16_t svrevb_m(svint16_t, svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_x)))
-svuint32_t svrevb_x(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_x)))
-svuint64_t svrevb_x(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_x)))
-svuint16_t svrevb_x(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_x)))
-svint32_t svrevb_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_x)))
-svint64_t svrevb_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_x)))
-svint16_t svrevb_x(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_z)))
-svuint32_t svrevb_z(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_z)))
-svuint64_t svrevb_z(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_z)))
-svuint16_t svrevb_z(svbool_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_z)))
-svint32_t svrevb_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_z)))
-svint64_t svrevb_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_z)))
-svint16_t svrevb_z(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_m)))
-svuint32_t svrevh_m(svuint32_t, svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_m)))
-svuint64_t svrevh_m(svuint64_t, svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_m)))
-svint32_t svrevh_m(svint32_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_m)))
-svint64_t svrevh_m(svint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_x)))
-svuint32_t svrevh_x(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_x)))
-svuint64_t svrevh_x(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_x)))
-svint32_t svrevh_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_x)))
-svint64_t svrevh_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_z)))
-svuint32_t svrevh_z(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_z)))
-svuint64_t svrevh_z(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_z)))
-svint32_t svrevh_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_z)))
-svint64_t svrevh_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_m)))
-svuint64_t svrevw_m(svuint64_t, svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_m)))
-svint64_t svrevw_m(svint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_x)))
-svuint64_t svrevw_x(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_x)))
-svint64_t svrevw_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_z)))
-svuint64_t svrevw_z(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_z)))
-svint64_t svrevw_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_m)))
-svfloat64_t svrinta_m(svfloat64_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_m)))
-svfloat32_t svrinta_m(svfloat32_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_m)))
-svfloat16_t svrinta_m(svfloat16_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_x)))
-svfloat64_t svrinta_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x)))
-svfloat32_t svrinta_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_x)))
-svfloat16_t svrinta_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_z)))
-svfloat64_t svrinta_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_z)))
-svfloat32_t svrinta_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_z)))
-svfloat16_t svrinta_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_m)))
-svfloat64_t svrinti_m(svfloat64_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_m)))
-svfloat32_t svrinti_m(svfloat32_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_m)))
-svfloat16_t svrinti_m(svfloat16_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_x)))
-svfloat64_t svrinti_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_x)))
-svfloat32_t svrinti_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_x)))
-svfloat16_t svrinti_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_z)))
-svfloat64_t svrinti_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_z)))
-svfloat32_t svrinti_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_z)))
-svfloat16_t svrinti_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_m)))
-svfloat64_t svrintm_m(svfloat64_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_m)))
-svfloat32_t svrintm_m(svfloat32_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_m)))
-svfloat16_t svrintm_m(svfloat16_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_x)))
-svfloat64_t svrintm_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x)))
-svfloat32_t svrintm_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_x)))
-svfloat16_t svrintm_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_z)))
-svfloat64_t svrintm_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_z)))
-svfloat32_t svrintm_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_z)))
-svfloat16_t svrintm_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_m)))
-svfloat64_t svrintn_m(svfloat64_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_m)))
-svfloat32_t svrintn_m(svfloat32_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_m)))
-svfloat16_t svrintn_m(svfloat16_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_x)))
-svfloat64_t svrintn_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x)))
-svfloat32_t svrintn_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_x)))
-svfloat16_t svrintn_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_z)))
-svfloat64_t svrintn_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_z)))
-svfloat32_t svrintn_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_z)))
-svfloat16_t svrintn_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_m)))
-svfloat64_t svrintp_m(svfloat64_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_m)))
-svfloat32_t svrintp_m(svfloat32_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_m)))
-svfloat16_t svrintp_m(svfloat16_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_x)))
-svfloat64_t svrintp_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x)))
-svfloat32_t svrintp_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_x)))
-svfloat16_t svrintp_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_z)))
-svfloat64_t svrintp_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_z)))
-svfloat32_t svrintp_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_z)))
-svfloat16_t svrintp_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_m)))
-svfloat64_t svrintx_m(svfloat64_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_m)))
-svfloat32_t svrintx_m(svfloat32_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_m)))
-svfloat16_t svrintx_m(svfloat16_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_x)))
-svfloat64_t svrintx_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_x)))
-svfloat32_t svrintx_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_x)))
-svfloat16_t svrintx_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_z)))
-svfloat64_t svrintx_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_z)))
-svfloat32_t svrintx_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_z)))
-svfloat16_t svrintx_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_m)))
-svfloat64_t svrintz_m(svfloat64_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_m)))
-svfloat32_t svrintz_m(svfloat32_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_m)))
-svfloat16_t svrintz_m(svfloat16_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_x)))
-svfloat64_t svrintz_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_x)))
-svfloat32_t svrintz_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_x)))
-svfloat16_t svrintz_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_z)))
-svfloat64_t svrintz_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_z)))
-svfloat32_t svrintz_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_z)))
-svfloat16_t svrintz_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f64)))
-svfloat64_t svrsqrte(svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f32)))
-svfloat32_t svrsqrte(svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f16)))
-svfloat16_t svrsqrte(svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f64)))
-svfloat64_t svrsqrts(svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f32)))
-svfloat32_t svrsqrts(svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f16)))
-svfloat16_t svrsqrts(svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_m)))
-svfloat64_t svscale_m(svbool_t, svfloat64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_m)))
-svfloat32_t svscale_m(svbool_t, svfloat32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_m)))
-svfloat16_t svscale_m(svbool_t, svfloat16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_x)))
-svfloat64_t svscale_x(svbool_t, svfloat64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_x)))
-svfloat32_t svscale_x(svbool_t, svfloat32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_x)))
-svfloat16_t svscale_x(svbool_t, svfloat16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_z)))
-svfloat64_t svscale_z(svbool_t, svfloat64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_z)))
-svfloat32_t svscale_z(svbool_t, svfloat32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_z)))
-svfloat16_t svscale_z(svbool_t, svfloat16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_m)))
-svfloat64_t svscale_m(svbool_t, svfloat64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_m)))
-svfloat32_t svscale_m(svbool_t, svfloat32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_m)))
-svfloat16_t svscale_m(svbool_t, svfloat16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_x)))
-svfloat64_t svscale_x(svbool_t, svfloat64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_x)))
-svfloat32_t svscale_x(svbool_t, svfloat32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_x)))
-svfloat16_t svscale_x(svbool_t, svfloat16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_z)))
-svfloat64_t svscale_z(svbool_t, svfloat64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_z)))
-svfloat32_t svscale_z(svbool_t, svfloat32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_z)))
-svfloat16_t svscale_z(svbool_t, svfloat16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_b)))
-svbool_t svsel(svbool_t, svbool_t, svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8)))
-svuint8_t svsel(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32)))
-svuint32_t svsel(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64)))
-svuint64_t svsel(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16)))
-svuint16_t svsel(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8)))
-svint8_t svsel(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64)))
-svfloat64_t svsel(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32)))
-svfloat32_t svsel(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16)))
-svfloat16_t svsel(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32)))
-svint32_t svsel(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64)))
-svint64_t svsel(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16)))
-svint16_t svsel(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u8)))
-svuint8x2_t svset2(svuint8x2_t, uint64_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u32)))
-svuint32x2_t svset2(svuint32x2_t, uint64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u64)))
-svuint64x2_t svset2(svuint64x2_t, uint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u16)))
-svuint16x2_t svset2(svuint16x2_t, uint64_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s8)))
-svint8x2_t svset2(svint8x2_t, uint64_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f64)))
-svfloat64x2_t svset2(svfloat64x2_t, uint64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f32)))
-svfloat32x2_t svset2(svfloat32x2_t, uint64_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f16)))
-svfloat16x2_t svset2(svfloat16x2_t, uint64_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s32)))
-svint32x2_t svset2(svint32x2_t, uint64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s64)))
-svint64x2_t svset2(svint64x2_t, uint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s16)))
-svint16x2_t svset2(svint16x2_t, uint64_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u8)))
-svuint8x3_t svset3(svuint8x3_t, uint64_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u32)))
-svuint32x3_t svset3(svuint32x3_t, uint64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u64)))
-svuint64x3_t svset3(svuint64x3_t, uint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u16)))
-svuint16x3_t svset3(svuint16x3_t, uint64_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s8)))
-svint8x3_t svset3(svint8x3_t, uint64_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f64)))
-svfloat64x3_t svset3(svfloat64x3_t, uint64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f32)))
-svfloat32x3_t svset3(svfloat32x3_t, uint64_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f16)))
-svfloat16x3_t svset3(svfloat16x3_t, uint64_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s32)))
-svint32x3_t svset3(svint32x3_t, uint64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s64)))
-svint64x3_t svset3(svint64x3_t, uint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s16)))
-svint16x3_t svset3(svint16x3_t, uint64_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u8)))
-svuint8x4_t svset4(svuint8x4_t, uint64_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u32)))
-svuint32x4_t svset4(svuint32x4_t, uint64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u64)))
-svuint64x4_t svset4(svuint64x4_t, uint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u16)))
-svuint16x4_t svset4(svuint16x4_t, uint64_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s8)))
-svint8x4_t svset4(svint8x4_t, uint64_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f64)))
-svfloat64x4_t svset4(svfloat64x4_t, uint64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f32)))
-svfloat32x4_t svset4(svfloat32x4_t, uint64_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f16)))
-svfloat16x4_t svset4(svfloat16x4_t, uint64_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s32)))
-svint32x4_t svset4(svint32x4_t, uint64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s64)))
-svint64x4_t svset4(svint64x4_t, uint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16)))
-svint16x4_t svset4(svint16x4_t, uint64_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8)))
-svuint8_t svsplice(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32)))
-svuint32_t svsplice(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u64)))
-svuint64_t svsplice(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u16)))
-svuint16_t svsplice(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s8)))
-svint8_t svsplice(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f64)))
-svfloat64_t svsplice(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f32)))
-svfloat32_t svsplice(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f16)))
-svfloat16_t svsplice(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s32)))
-svint32_t svsplice(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s64)))
-svint64_t svsplice(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s16)))
-svint16_t svsplice(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_m)))
-svfloat64_t svsqrt_m(svfloat64_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_m)))
-svfloat32_t svsqrt_m(svfloat32_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_m)))
-svfloat16_t svsqrt_m(svfloat16_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_x)))
-svfloat64_t svsqrt_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_x)))
-svfloat32_t svsqrt_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_x)))
-svfloat16_t svsqrt_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_z)))
-svfloat64_t svsqrt_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_z)))
-svfloat32_t svsqrt_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_z)))
-svfloat16_t svsqrt_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8)))
-void svst1(svbool_t, uint8_t *, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32)))
-void svst1(svbool_t, uint32_t *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64)))
-void svst1(svbool_t, uint64_t *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16)))
-void svst1(svbool_t, uint16_t *, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8)))
-void svst1(svbool_t, int8_t *, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64)))
-void svst1(svbool_t, float64_t *, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32)))
-void svst1(svbool_t, float32_t *, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16)))
-void svst1(svbool_t, float16_t *, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32)))
-void svst1(svbool_t, int32_t *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64)))
-void svst1(svbool_t, int64_t *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16)))
-void svst1(svbool_t, int16_t *, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32)))
-void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64)))
-void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64)))
-void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32)))
-void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32)))
-void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64)))
-void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32)))
-void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64)))
-void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64)))
-void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32)))
-void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32)))
-void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64)))
-void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32)))
-void svst1_scatter(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64)))
-void svst1_scatter(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64)))
-void svst1_scatter(svbool_t, svuint64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32)))
-void svst1_scatter(svbool_t, svuint32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32)))
-void svst1_scatter(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64)))
-void svst1_scatter(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32)))
-void svst1_scatter_index(svbool_t, uint32_t *, svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32)))
-void svst1_scatter_index(svbool_t, float32_t *, svint32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32)))
-void svst1_scatter_index(svbool_t, int32_t *, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32)))
-void svst1_scatter_index(svbool_t, uint32_t *, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32)))
-void svst1_scatter_index(svbool_t, float32_t *, svuint32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32)))
-void svst1_scatter_index(svbool_t, int32_t *, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64)))
-void svst1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64)))
-void svst1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64)))
-void svst1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64)))
-void svst1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64)))
-void svst1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64)))
-void svst1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32)))
-void svst1_scatter_offset(svbool_t, uint32_t *, svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32)))
-void svst1_scatter_offset(svbool_t, float32_t *, svint32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32)))
-void svst1_scatter_offset(svbool_t, int32_t *, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32)))
-void svst1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32)))
-void svst1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32)))
-void svst1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64)))
-void svst1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64)))
-void svst1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64)))
-void svst1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64)))
-void svst1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64)))
-void svst1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64)))
-void svst1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8)))
-void svst1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32)))
-void svst1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64)))
-void svst1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16)))
-void svst1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8)))
-void svst1_vnum(svbool_t, int8_t *, int64_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64)))
-void svst1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32)))
-void svst1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16)))
-void svst1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32)))
-void svst1_vnum(svbool_t, int32_t *, int64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64)))
-void svst1_vnum(svbool_t, int64_t *, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16)))
-void svst1_vnum(svbool_t, int16_t *, int64_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s32)))
-void svst1b(svbool_t, int8_t *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s64)))
-void svst1b(svbool_t, int8_t *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s16)))
-void svst1b(svbool_t, int8_t *, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u32)))
-void svst1b(svbool_t, uint8_t *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64)))
-void svst1b(svbool_t, uint8_t *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16)))
-void svst1b(svbool_t, uint8_t *, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32)))
-void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64)))
-void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32)))
-void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64)))
-void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32)))
-void svst1b_scatter(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64)))
-void svst1b_scatter(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32)))
-void svst1b_scatter(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64)))
-void svst1b_scatter(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32)))
-void svst1b_scatter_offset(svbool_t, int8_t *, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32)))
-void svst1b_scatter_offset(svbool_t, uint8_t *, svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32)))
-void svst1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32)))
-void svst1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64)))
-void svst1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64)))
-void svst1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64)))
-void svst1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64)))
-void svst1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32)))
-void svst1b_vnum(svbool_t, int8_t *, int64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64)))
-void svst1b_vnum(svbool_t, int8_t *, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s16)))
-void svst1b_vnum(svbool_t, int8_t *, int64_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u32)))
-void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u64)))
-void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u16)))
-void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s32)))
-void svst1h(svbool_t, int16_t *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s64)))
-void svst1h(svbool_t, int16_t *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32)))
-void svst1h(svbool_t, uint16_t *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64)))
-void svst1h(svbool_t, uint16_t *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32)))
-void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64)))
-void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32)))
-void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64)))
-void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32)))
-void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64)))
-void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32)))
-void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64)))
-void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32)))
-void svst1h_scatter(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64)))
-void svst1h_scatter(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32)))
-void svst1h_scatter(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64)))
-void svst1h_scatter(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32)))
-void svst1h_scatter_index(svbool_t, int16_t *, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32)))
-void svst1h_scatter_index(svbool_t, uint16_t *, svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32)))
-void svst1h_scatter_index(svbool_t, int16_t *, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32)))
-void svst1h_scatter_index(svbool_t, uint16_t *, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64)))
-void svst1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64)))
-void svst1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64)))
-void svst1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64)))
-void svst1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32)))
-void svst1h_scatter_offset(svbool_t, int16_t *, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32)))
-void svst1h_scatter_offset(svbool_t, uint16_t *, svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32)))
-void svst1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32)))
-void svst1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64)))
-void svst1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64)))
-void svst1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64)))
-void svst1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64)))
-void svst1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32)))
-void svst1h_vnum(svbool_t, int16_t *, int64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64)))
-void svst1h_vnum(svbool_t, int16_t *, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u32)))
-void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u64)))
-void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64)))
-void svst1w(svbool_t, int32_t *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64)))
-void svst1w(svbool_t, uint32_t *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64)))
-void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64)))
-void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64)))
-void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64)))
-void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64)))
-void svst1w_scatter(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64)))
-void svst1w_scatter(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64)))
-void svst1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64)))
-void svst1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64)))
-void svst1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64)))
-void svst1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64)))
-void svst1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64)))
-void svst1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64)))
-void svst1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64)))
-void svst1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64)))
-void svst1w_vnum(svbool_t, int32_t *, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64)))
-void svst1w_vnum(svbool_t, uint32_t *, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u8)))
-void svst2(svbool_t, uint8_t *, svuint8x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u32)))
-void svst2(svbool_t, uint32_t *, svuint32x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u64)))
-void svst2(svbool_t, uint64_t *, svuint64x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u16)))
-void svst2(svbool_t, uint16_t *, svuint16x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s8)))
-void svst2(svbool_t, int8_t *, svint8x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f64)))
-void svst2(svbool_t, float64_t *, svfloat64x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f32)))
-void svst2(svbool_t, float32_t *, svfloat32x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f16)))
-void svst2(svbool_t, float16_t *, svfloat16x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s32)))
-void svst2(svbool_t, int32_t *, svint32x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s64)))
-void svst2(svbool_t, int64_t *, svint64x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s16)))
-void svst2(svbool_t, int16_t *, svint16x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u8)))
-void svst2_vnum(svbool_t, uint8_t *, int64_t, svuint8x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u32)))
-void svst2_vnum(svbool_t, uint32_t *, int64_t, svuint32x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u64)))
-void svst2_vnum(svbool_t, uint64_t *, int64_t, svuint64x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u16)))
-void svst2_vnum(svbool_t, uint16_t *, int64_t, svuint16x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s8)))
-void svst2_vnum(svbool_t, int8_t *, int64_t, svint8x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f64)))
-void svst2_vnum(svbool_t, float64_t *, int64_t, svfloat64x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f32)))
-void svst2_vnum(svbool_t, float32_t *, int64_t, svfloat32x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f16)))
-void svst2_vnum(svbool_t, float16_t *, int64_t, svfloat16x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s32)))
-void svst2_vnum(svbool_t, int32_t *, int64_t, svint32x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s64)))
-void svst2_vnum(svbool_t, int64_t *, int64_t, svint64x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s16)))
-void svst2_vnum(svbool_t, int16_t *, int64_t, svint16x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u8)))
-void svst3(svbool_t, uint8_t *, svuint8x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u32)))
-void svst3(svbool_t, uint32_t *, svuint32x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u64)))
-void svst3(svbool_t, uint64_t *, svuint64x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u16)))
-void svst3(svbool_t, uint16_t *, svuint16x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s8)))
-void svst3(svbool_t, int8_t *, svint8x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f64)))
-void svst3(svbool_t, float64_t *, svfloat64x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f32)))
-void svst3(svbool_t, float32_t *, svfloat32x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f16)))
-void svst3(svbool_t, float16_t *, svfloat16x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s32)))
-void svst3(svbool_t, int32_t *, svint32x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s64)))
-void svst3(svbool_t, int64_t *, svint64x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s16)))
-void svst3(svbool_t, int16_t *, svint16x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u8)))
-void svst3_vnum(svbool_t, uint8_t *, int64_t, svuint8x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u32)))
-void svst3_vnum(svbool_t, uint32_t *, int64_t, svuint32x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u64)))
-void svst3_vnum(svbool_t, uint64_t *, int64_t, svuint64x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u16)))
-void svst3_vnum(svbool_t, uint16_t *, int64_t, svuint16x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s8)))
-void svst3_vnum(svbool_t, int8_t *, int64_t, svint8x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f64)))
-void svst3_vnum(svbool_t, float64_t *, int64_t, svfloat64x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f32)))
-void svst3_vnum(svbool_t, float32_t *, int64_t, svfloat32x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f16)))
-void svst3_vnum(svbool_t, float16_t *, int64_t, svfloat16x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s32)))
-void svst3_vnum(svbool_t, int32_t *, int64_t, svint32x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s64)))
-void svst3_vnum(svbool_t, int64_t *, int64_t, svint64x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s16)))
-void svst3_vnum(svbool_t, int16_t *, int64_t, svint16x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u8)))
-void svst4(svbool_t, uint8_t *, svuint8x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u32)))
-void svst4(svbool_t, uint32_t *, svuint32x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u64)))
-void svst4(svbool_t, uint64_t *, svuint64x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u16)))
-void svst4(svbool_t, uint16_t *, svuint16x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s8)))
-void svst4(svbool_t, int8_t *, svint8x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f64)))
-void svst4(svbool_t, float64_t *, svfloat64x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f32)))
-void svst4(svbool_t, float32_t *, svfloat32x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f16)))
-void svst4(svbool_t, float16_t *, svfloat16x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s32)))
-void svst4(svbool_t, int32_t *, svint32x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s64)))
-void svst4(svbool_t, int64_t *, svint64x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s16)))
-void svst4(svbool_t, int16_t *, svint16x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u8)))
-void svst4_vnum(svbool_t, uint8_t *, int64_t, svuint8x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u32)))
-void svst4_vnum(svbool_t, uint32_t *, int64_t, svuint32x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u64)))
-void svst4_vnum(svbool_t, uint64_t *, int64_t, svuint64x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u16)))
-void svst4_vnum(svbool_t, uint16_t *, int64_t, svuint16x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s8)))
-void svst4_vnum(svbool_t, int8_t *, int64_t, svint8x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f64)))
-void svst4_vnum(svbool_t, float64_t *, int64_t, svfloat64x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f32)))
-void svst4_vnum(svbool_t, float32_t *, int64_t, svfloat32x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f16)))
-void svst4_vnum(svbool_t, float16_t *, int64_t, svfloat16x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s32)))
-void svst4_vnum(svbool_t, int32_t *, int64_t, svint32x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s64)))
-void svst4_vnum(svbool_t, int64_t *, int64_t, svint64x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s16)))
-void svst4_vnum(svbool_t, int16_t *, int64_t, svint16x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8)))
-void svstnt1(svbool_t, uint8_t *, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32)))
-void svstnt1(svbool_t, uint32_t *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64)))
-void svstnt1(svbool_t, uint64_t *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16)))
-void svstnt1(svbool_t, uint16_t *, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8)))
-void svstnt1(svbool_t, int8_t *, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64)))
-void svstnt1(svbool_t, float64_t *, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32)))
-void svstnt1(svbool_t, float32_t *, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16)))
-void svstnt1(svbool_t, float16_t *, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32)))
-void svstnt1(svbool_t, int32_t *, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64)))
-void svstnt1(svbool_t, int64_t *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16)))
-void svstnt1(svbool_t, int16_t *, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8)))
-void svstnt1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32)))
-void svstnt1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64)))
-void svstnt1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16)))
-void svstnt1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8)))
-void svstnt1_vnum(svbool_t, int8_t *, int64_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64)))
-void svstnt1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32)))
-void svstnt1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16)))
-void svstnt1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32)))
-void svstnt1_vnum(svbool_t, int32_t *, int64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64)))
-void svstnt1_vnum(svbool_t, int64_t *, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16)))
-void svstnt1_vnum(svbool_t, int16_t *, int64_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_m)))
-svfloat64_t svsub_m(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_m)))
-svfloat32_t svsub_m(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_m)))
-svfloat16_t svsub_m(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_x)))
-svfloat64_t svsub_x(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_x)))
-svfloat32_t svsub_x(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_x)))
-svfloat16_t svsub_x(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_z)))
-svfloat64_t svsub_z(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_z)))
-svfloat32_t svsub_z(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_z)))
-svfloat16_t svsub_z(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_m)))
-svuint8_t svsub_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_m)))
-svuint32_t svsub_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_m)))
-svuint64_t svsub_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_m)))
-svuint16_t svsub_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_m)))
-svint8_t svsub_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_m)))
-svint32_t svsub_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_m)))
-svint64_t svsub_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_m)))
-svint16_t svsub_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_x)))
-svuint8_t svsub_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_x)))
-svuint32_t svsub_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_x)))
-svuint64_t svsub_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_x)))
-svuint16_t svsub_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_x)))
-svint8_t svsub_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_x)))
-svint32_t svsub_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_x)))
-svint64_t svsub_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_x)))
-svint16_t svsub_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_z)))
-svuint8_t svsub_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_z)))
-svuint32_t svsub_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_z)))
-svuint64_t svsub_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_z)))
-svuint16_t svsub_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_z)))
-svint8_t svsub_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_z)))
-svint32_t svsub_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_z)))
-svint64_t svsub_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_z)))
-svint16_t svsub_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_m)))
-svfloat64_t svsub_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_m)))
-svfloat32_t svsub_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_m)))
-svfloat16_t svsub_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_x)))
-svfloat64_t svsub_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_x)))
-svfloat32_t svsub_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_x)))
-svfloat16_t svsub_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_z)))
-svfloat64_t svsub_z(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_z)))
-svfloat32_t svsub_z(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_z)))
-svfloat16_t svsub_z(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_m)))
-svuint8_t svsub_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_m)))
-svuint32_t svsub_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_m)))
-svuint64_t svsub_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_m)))
-svuint16_t svsub_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_m)))
-svint8_t svsub_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_m)))
-svint32_t svsub_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_m)))
-svint64_t svsub_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_m)))
-svint16_t svsub_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_x)))
-svuint8_t svsub_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_x)))
-svuint32_t svsub_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_x)))
-svuint64_t svsub_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_x)))
-svuint16_t svsub_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_x)))
-svint8_t svsub_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_x)))
-svint32_t svsub_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_x)))
-svint64_t svsub_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_x)))
-svint16_t svsub_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_z)))
-svuint8_t svsub_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_z)))
-svuint32_t svsub_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_z)))
-svuint64_t svsub_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_z)))
-svuint16_t svsub_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_z)))
-svint8_t svsub_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_z)))
-svint32_t svsub_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_z)))
-svint64_t svsub_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_z)))
-svint16_t svsub_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_m)))
-svfloat64_t svsubr_m(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_m)))
-svfloat32_t svsubr_m(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_m)))
-svfloat16_t svsubr_m(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_x)))
-svfloat64_t svsubr_x(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_x)))
-svfloat32_t svsubr_x(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_x)))
-svfloat16_t svsubr_x(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_z)))
-svfloat64_t svsubr_z(svbool_t, svfloat64_t, float64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_z)))
-svfloat32_t svsubr_z(svbool_t, svfloat32_t, float32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_z)))
-svfloat16_t svsubr_z(svbool_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_m)))
-svuint8_t svsubr_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_m)))
-svuint32_t svsubr_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_m)))
-svuint64_t svsubr_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_m)))
-svuint16_t svsubr_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_m)))
-svint8_t svsubr_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_m)))
-svint32_t svsubr_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_m)))
-svint64_t svsubr_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_m)))
-svint16_t svsubr_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_x)))
-svuint8_t svsubr_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_x)))
-svuint32_t svsubr_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_x)))
-svuint64_t svsubr_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_x)))
-svuint16_t svsubr_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_x)))
-svint8_t svsubr_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_x)))
-svint32_t svsubr_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_x)))
-svint64_t svsubr_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_x)))
-svint16_t svsubr_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_z)))
-svuint8_t svsubr_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_z)))
-svuint32_t svsubr_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_z)))
-svuint64_t svsubr_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_z)))
-svuint16_t svsubr_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_z)))
-svint8_t svsubr_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_z)))
-svint32_t svsubr_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_z)))
-svint64_t svsubr_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_z)))
-svint16_t svsubr_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_m)))
-svfloat64_t svsubr_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_m)))
-svfloat32_t svsubr_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_m)))
-svfloat16_t svsubr_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_x)))
-svfloat64_t svsubr_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_x)))
-svfloat32_t svsubr_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_x)))
-svfloat16_t svsubr_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_z)))
-svfloat64_t svsubr_z(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_z)))
-svfloat32_t svsubr_z(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_z)))
-svfloat16_t svsubr_z(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_m)))
-svuint8_t svsubr_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_m)))
-svuint32_t svsubr_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_m)))
-svuint64_t svsubr_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_m)))
-svuint16_t svsubr_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_m)))
-svint8_t svsubr_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_m)))
-svint32_t svsubr_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_m)))
-svint64_t svsubr_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_m)))
-svint16_t svsubr_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_x)))
-svuint8_t svsubr_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_x)))
-svuint32_t svsubr_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_x)))
-svuint64_t svsubr_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_x)))
-svuint16_t svsubr_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_x)))
-svint8_t svsubr_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_x)))
-svint32_t svsubr_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_x)))
-svint64_t svsubr_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_x)))
-svint16_t svsubr_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_z)))
-svuint8_t svsubr_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_z)))
-svuint32_t svsubr_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_z)))
-svuint64_t svsubr_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_z)))
-svuint16_t svsubr_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_z)))
-svint8_t svsubr_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_z)))
-svint32_t svsubr_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_z)))
-svint64_t svsubr_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_z)))
-svint16_t svsubr_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u8)))
-svuint8_t svtbl(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u32)))
-svuint32_t svtbl(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u64)))
-svuint64_t svtbl(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u16)))
-svuint16_t svtbl(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s8)))
-svint8_t svtbl(svint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f64)))
-svfloat64_t svtbl(svfloat64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f32)))
-svfloat32_t svtbl(svfloat32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f16)))
-svfloat16_t svtbl(svfloat16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s32)))
-svint32_t svtbl(svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64)))
-svint64_t svtbl(svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16)))
-svint16_t svtbl(svint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64)))
-svfloat64_t svtmad(svfloat64_t, svfloat64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32)))
-svfloat32_t svtmad(svfloat32_t, svfloat32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16)))
-svfloat16_t svtmad(svfloat16_t, svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8)))
-svuint8_t svtrn1(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32)))
-svuint32_t svtrn1(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u64)))
-svuint64_t svtrn1(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u16)))
-svuint16_t svtrn1(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s8)))
-svint8_t svtrn1(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f64)))
-svfloat64_t svtrn1(svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f32)))
-svfloat32_t svtrn1(svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f16)))
-svfloat16_t svtrn1(svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s32)))
-svint32_t svtrn1(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s64)))
-svint64_t svtrn1(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s16)))
-svint16_t svtrn1(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u8)))
-svuint8_t svtrn2(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u32)))
-svuint32_t svtrn2(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u64)))
-svuint64_t svtrn2(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u16)))
-svuint16_t svtrn2(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s8)))
-svint8_t svtrn2(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f64)))
-svfloat64_t svtrn2(svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f32)))
-svfloat32_t svtrn2(svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f16)))
-svfloat16_t svtrn2(svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s32)))
-svint32_t svtrn2(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s64)))
-svint64_t svtrn2(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s16)))
-svint16_t svtrn2(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64)))
-svfloat64_t svtsmul(svfloat64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32)))
-svfloat32_t svtsmul(svfloat32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16)))
-svfloat16_t svtsmul(svfloat16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64)))
-svfloat64_t svtssel(svfloat64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32)))
-svfloat32_t svtssel(svfloat32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16)))
-svfloat16_t svtssel(svfloat16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b)))
-svbool_t svunpkhi(svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32)))
-svint32_t svunpkhi(svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s64)))
-svint64_t svunpkhi(svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s16)))
-svint16_t svunpkhi(svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u32)))
-svuint32_t svunpkhi(svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u64)))
-svuint64_t svunpkhi(svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u16)))
-svuint16_t svunpkhi(svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_b)))
-svbool_t svunpklo(svbool_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s32)))
-svint32_t svunpklo(svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s64)))
-svint64_t svunpklo(svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s16)))
-svint16_t svunpklo(svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u32)))
-svuint32_t svunpklo(svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u64)))
-svuint64_t svunpklo(svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u16)))
-svuint16_t svunpklo(svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u8)))
-svuint8_t svuzp1(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u32)))
-svuint32_t svuzp1(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u64)))
-svuint64_t svuzp1(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u16)))
-svuint16_t svuzp1(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s8)))
-svint8_t svuzp1(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f64)))
-svfloat64_t svuzp1(svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f32)))
-svfloat32_t svuzp1(svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f16)))
-svfloat16_t svuzp1(svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s32)))
-svint32_t svuzp1(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s64)))
-svint64_t svuzp1(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s16)))
-svint16_t svuzp1(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u8)))
-svuint8_t svuzp2(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u32)))
-svuint32_t svuzp2(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u64)))
-svuint64_t svuzp2(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u16)))
-svuint16_t svuzp2(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s8)))
-svint8_t svuzp2(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f64)))
-svfloat64_t svuzp2(svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f32)))
-svfloat32_t svuzp2(svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f16)))
-svfloat16_t svuzp2(svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s32)))
-svint32_t svuzp2(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s64)))
-svint64_t svuzp2(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s16)))
-svint16_t svuzp2(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s32)))
-svbool_t svwhilele_b8(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s32)))
-svbool_t svwhilele_b32(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s32)))
-svbool_t svwhilele_b64(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s32)))
-svbool_t svwhilele_b16(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64)))
-svbool_t svwhilele_b8(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64)))
-svbool_t svwhilele_b32(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64)))
-svbool_t svwhilele_b64(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64)))
-svbool_t svwhilele_b16(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u32)))
-svbool_t svwhilele_b8(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u32)))
-svbool_t svwhilele_b32(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u32)))
-svbool_t svwhilele_b64(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u32)))
-svbool_t svwhilele_b16(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64)))
-svbool_t svwhilele_b8(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64)))
-svbool_t svwhilele_b32(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64)))
-svbool_t svwhilele_b64(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64)))
-svbool_t svwhilele_b16(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u32)))
-svbool_t svwhilelt_b8(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u32)))
-svbool_t svwhilelt_b32(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u32)))
-svbool_t svwhilelt_b64(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u32)))
-svbool_t svwhilelt_b16(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64)))
-svbool_t svwhilelt_b8(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64)))
-svbool_t svwhilelt_b32(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64)))
-svbool_t svwhilelt_b64(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64)))
-svbool_t svwhilelt_b16(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s32)))
-svbool_t svwhilelt_b8(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s32)))
-svbool_t svwhilelt_b32(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s32)))
-svbool_t svwhilelt_b64(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s32)))
-svbool_t svwhilelt_b16(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64)))
-svbool_t svwhilelt_b8(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64)))
-svbool_t svwhilelt_b32(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64)))
-svbool_t svwhilelt_b64(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64)))
-svbool_t svwhilelt_b16(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u8)))
-svuint8_t svzip1(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u32)))
-svuint32_t svzip1(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u64)))
-svuint64_t svzip1(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u16)))
-svuint16_t svzip1(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s8)))
-svint8_t svzip1(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f64)))
-svfloat64_t svzip1(svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f32)))
-svfloat32_t svzip1(svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f16)))
-svfloat16_t svzip1(svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s32)))
-svint32_t svzip1(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s64)))
-svint64_t svzip1(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s16)))
-svint16_t svzip1(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u8)))
-svuint8_t svzip2(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u32)))
-svuint32_t svzip2(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u64)))
-svuint64_t svzip2(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u16)))
-svuint16_t svzip2(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s8)))
-svint8_t svzip2(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f64)))
-svfloat64_t svzip2(svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f32)))
-svfloat32_t svzip2(svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f16)))
-svfloat16_t svzip2(svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s32)))
-svint32_t svzip2(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64)))
-svint64_t svzip2(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16)))
-svint16_t svzip2(svint16_t, svint16_t);
-
-#if defined (__ARM_FEATURE_SVE2_BITPERM)
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8)))
-svuint8_t svbdep_n_u8(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32)))
-svuint32_t svbdep_n_u32(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64)))
-svuint64_t svbdep_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16)))
-svuint16_t svbdep_n_u16(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8)))
-svuint8_t svbdep_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32)))
-svuint32_t svbdep_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64)))
-svuint64_t svbdep_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16)))
-svuint16_t svbdep_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8)))
-svuint8_t svbext_n_u8(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32)))
-svuint32_t svbext_n_u32(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64)))
-svuint64_t svbext_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16)))
-svuint16_t svbext_n_u16(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8)))
-svuint8_t svbext_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32)))
-svuint32_t svbext_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64)))
-svuint64_t svbext_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16)))
-svuint16_t svbext_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8)))
-svuint8_t svbgrp_n_u8(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32)))
-svuint32_t svbgrp_n_u32(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64)))
-svuint64_t svbgrp_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16)))
-svuint16_t svbgrp_n_u16(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8)))
-svuint8_t svbgrp_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32)))
-svuint32_t svbgrp_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64)))
-svuint64_t svbgrp_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16)))
-svuint16_t svbgrp_u16(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8)))
-svuint8_t svbdep(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32)))
-svuint32_t svbdep(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64)))
-svuint64_t svbdep(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16)))
-svuint16_t svbdep(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8)))
-svuint8_t svbdep(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32)))
-svuint32_t svbdep(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64)))
-svuint64_t svbdep(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16)))
-svuint16_t svbdep(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8)))
-svuint8_t svbext(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32)))
-svuint32_t svbext(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64)))
-svuint64_t svbext(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16)))
-svuint16_t svbext(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8)))
-svuint8_t svbext(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32)))
-svuint32_t svbext(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64)))
-svuint64_t svbext(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16)))
-svuint16_t svbext(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8)))
-svuint8_t svbgrp(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32)))
-svuint32_t svbgrp(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64)))
-svuint64_t svbgrp(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16)))
-svuint16_t svbgrp(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8)))
-svuint8_t svbgrp(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32)))
-svuint32_t svbgrp(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64)))
-svuint64_t svbgrp(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16)))
-svuint16_t svbgrp(svuint16_t, svuint16_t);
-#endif  //defined (__ARM_FEATURE_SVE2_BITPERM)
-
-#if defined(__ARM_FEATURE_SVE2)
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8)))
-svint8_t svaba_n_s8(svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32)))
-svint32_t svaba_n_s32(svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64)))
-svint64_t svaba_n_s64(svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16)))
-svint16_t svaba_n_s16(svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8)))
-svuint8_t svaba_n_u8(svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32)))
-svuint32_t svaba_n_u32(svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64)))
-svuint64_t svaba_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16)))
-svuint16_t svaba_n_u16(svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8)))
-svint8_t svaba_s8(svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32)))
-svint32_t svaba_s32(svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64)))
-svint64_t svaba_s64(svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16)))
-svint16_t svaba_s16(svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8)))
-svuint8_t svaba_u8(svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32)))
-svuint32_t svaba_u32(svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64)))
-svuint64_t svaba_u64(svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16)))
-svuint16_t svaba_u16(svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32)))
-svint32_t svabalb_n_s32(svint32_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64)))
-svint64_t svabalb_n_s64(svint64_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16)))
-svint16_t svabalb_n_s16(svint16_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32)))
-svuint32_t svabalb_n_u32(svuint32_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64)))
-svuint64_t svabalb_n_u64(svuint64_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16)))
-svuint16_t svabalb_n_u16(svuint16_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32)))
-svint32_t svabalb_s32(svint32_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64)))
-svint64_t svabalb_s64(svint64_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16)))
-svint16_t svabalb_s16(svint16_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32)))
-svuint32_t svabalb_u32(svuint32_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64)))
-svuint64_t svabalb_u64(svuint64_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16)))
-svuint16_t svabalb_u16(svuint16_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32)))
-svint32_t svabalt_n_s32(svint32_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64)))
-svint64_t svabalt_n_s64(svint64_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16)))
-svint16_t svabalt_n_s16(svint16_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32)))
-svuint32_t svabalt_n_u32(svuint32_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64)))
-svuint64_t svabalt_n_u64(svuint64_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16)))
-svuint16_t svabalt_n_u16(svuint16_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32)))
-svint32_t svabalt_s32(svint32_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64)))
-svint64_t svabalt_s64(svint64_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16)))
-svint16_t svabalt_s16(svint16_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32)))
-svuint32_t svabalt_u32(svuint32_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64)))
-svuint64_t svabalt_u64(svuint64_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16)))
-svuint16_t svabalt_u16(svuint16_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32)))
-svint32_t svabdlb_n_s32(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64)))
-svint64_t svabdlb_n_s64(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16)))
-svint16_t svabdlb_n_s16(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32)))
-svuint32_t svabdlb_n_u32(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64)))
-svuint64_t svabdlb_n_u64(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16)))
-svuint16_t svabdlb_n_u16(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32)))
-svint32_t svabdlb_s32(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64)))
-svint64_t svabdlb_s64(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16)))
-svint16_t svabdlb_s16(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32)))
-svuint32_t svabdlb_u32(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64)))
-svuint64_t svabdlb_u64(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16)))
-svuint16_t svabdlb_u16(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32)))
-svint32_t svabdlt_n_s32(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64)))
-svint64_t svabdlt_n_s64(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16)))
-svint16_t svabdlt_n_s16(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32)))
-svuint32_t svabdlt_n_u32(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64)))
-svuint64_t svabdlt_n_u64(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16)))
-svuint16_t svabdlt_n_u16(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32)))
-svint32_t svabdlt_s32(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64)))
-svint64_t svabdlt_s64(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16)))
-svint16_t svabdlt_s16(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32)))
-svuint32_t svabdlt_u32(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64)))
-svuint64_t svabdlt_u64(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16)))
-svuint16_t svabdlt_u16(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m)))
-svint32_t svadalp_s32_m(svbool_t, svint32_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m)))
-svint64_t svadalp_s64_m(svbool_t, svint64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m)))
-svint16_t svadalp_s16_m(svbool_t, svint16_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x)))
-svint32_t svadalp_s32_x(svbool_t, svint32_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x)))
-svint64_t svadalp_s64_x(svbool_t, svint64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x)))
-svint16_t svadalp_s16_x(svbool_t, svint16_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z)))
-svint32_t svadalp_s32_z(svbool_t, svint32_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z)))
-svint64_t svadalp_s64_z(svbool_t, svint64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z)))
-svint16_t svadalp_s16_z(svbool_t, svint16_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m)))
-svuint32_t svadalp_u32_m(svbool_t, svuint32_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m)))
-svuint64_t svadalp_u64_m(svbool_t, svuint64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m)))
-svuint16_t svadalp_u16_m(svbool_t, svuint16_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x)))
-svuint32_t svadalp_u32_x(svbool_t, svuint32_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x)))
-svuint64_t svadalp_u64_x(svbool_t, svuint64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x)))
-svuint16_t svadalp_u16_x(svbool_t, svuint16_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z)))
-svuint32_t svadalp_u32_z(svbool_t, svuint32_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z)))
-svuint64_t svadalp_u64_z(svbool_t, svuint64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z)))
-svuint16_t svadalp_u16_z(svbool_t, svuint16_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32)))
-svuint32_t svadclb_n_u32(svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64)))
-svuint64_t svadclb_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32)))
-svuint32_t svadclb_u32(svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64)))
-svuint64_t svadclb_u64(svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32)))
-svuint32_t svadclt_n_u32(svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64)))
-svuint64_t svadclt_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32)))
-svuint32_t svadclt_u32(svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64)))
-svuint64_t svadclt_u64(svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32)))
-svuint16_t svaddhnb_n_u32(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64)))
-svuint32_t svaddhnb_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16)))
-svuint8_t svaddhnb_n_u16(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32)))
-svint16_t svaddhnb_n_s32(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64)))
-svint32_t svaddhnb_n_s64(svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16)))
-svint8_t svaddhnb_n_s16(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32)))
-svuint16_t svaddhnb_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64)))
-svuint32_t svaddhnb_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16)))
-svuint8_t svaddhnb_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32)))
-svint16_t svaddhnb_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64)))
-svint32_t svaddhnb_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16)))
-svint8_t svaddhnb_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32)))
-svuint16_t svaddhnt_n_u32(svuint16_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64)))
-svuint32_t svaddhnt_n_u64(svuint32_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16)))
-svuint8_t svaddhnt_n_u16(svuint8_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32)))
-svint16_t svaddhnt_n_s32(svint16_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64)))
-svint32_t svaddhnt_n_s64(svint32_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16)))
-svint8_t svaddhnt_n_s16(svint8_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32)))
-svuint16_t svaddhnt_u32(svuint16_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64)))
-svuint32_t svaddhnt_u64(svuint32_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16)))
-svuint8_t svaddhnt_u16(svuint8_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32)))
-svint16_t svaddhnt_s32(svint16_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64)))
-svint32_t svaddhnt_s64(svint32_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16)))
-svint8_t svaddhnt_s16(svint8_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32)))
-svint32_t svaddlb_n_s32(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64)))
-svint64_t svaddlb_n_s64(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16)))
-svint16_t svaddlb_n_s16(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32)))
-svuint32_t svaddlb_n_u32(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64)))
-svuint64_t svaddlb_n_u64(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16)))
-svuint16_t svaddlb_n_u16(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32)))
-svint32_t svaddlb_s32(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64)))
-svint64_t svaddlb_s64(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16)))
-svint16_t svaddlb_s16(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32)))
-svuint32_t svaddlb_u32(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64)))
-svuint64_t svaddlb_u64(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16)))
-svuint16_t svaddlb_u16(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32)))
-svint32_t svaddlbt_n_s32(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64)))
-svint64_t svaddlbt_n_s64(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16)))
-svint16_t svaddlbt_n_s16(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32)))
-svint32_t svaddlbt_s32(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64)))
-svint64_t svaddlbt_s64(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16)))
-svint16_t svaddlbt_s16(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32)))
-svint32_t svaddlt_n_s32(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64)))
-svint64_t svaddlt_n_s64(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16)))
-svint16_t svaddlt_n_s16(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32)))
-svuint32_t svaddlt_n_u32(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64)))
-svuint64_t svaddlt_n_u64(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16)))
-svuint16_t svaddlt_n_u16(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32)))
-svint32_t svaddlt_s32(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64)))
-svint64_t svaddlt_s64(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16)))
-svint16_t svaddlt_s16(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32)))
-svuint32_t svaddlt_u32(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64)))
-svuint64_t svaddlt_u64(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16)))
-svuint16_t svaddlt_u16(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m)))
-svfloat64_t svaddp_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m)))
-svfloat32_t svaddp_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m)))
-svfloat16_t svaddp_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x)))
-svfloat64_t svaddp_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x)))
-svfloat32_t svaddp_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x)))
-svfloat16_t svaddp_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m)))
-svuint8_t svaddp_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m)))
-svuint32_t svaddp_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m)))
-svuint64_t svaddp_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m)))
-svuint16_t svaddp_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m)))
-svint8_t svaddp_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m)))
-svint32_t svaddp_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m)))
-svint64_t svaddp_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m)))
-svint16_t svaddp_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x)))
-svuint8_t svaddp_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x)))
-svuint32_t svaddp_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x)))
-svuint64_t svaddp_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x)))
-svuint16_t svaddp_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x)))
-svint8_t svaddp_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x)))
-svint32_t svaddp_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x)))
-svint64_t svaddp_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x)))
-svint16_t svaddp_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32)))
-svint32_t svaddwb_n_s32(svint32_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64)))
-svint64_t svaddwb_n_s64(svint64_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16)))
-svint16_t svaddwb_n_s16(svint16_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32)))
-svuint32_t svaddwb_n_u32(svuint32_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64)))
-svuint64_t svaddwb_n_u64(svuint64_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16)))
-svuint16_t svaddwb_n_u16(svuint16_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32)))
-svint32_t svaddwb_s32(svint32_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64)))
-svint64_t svaddwb_s64(svint64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16)))
-svint16_t svaddwb_s16(svint16_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32)))
-svuint32_t svaddwb_u32(svuint32_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64)))
-svuint64_t svaddwb_u64(svuint64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16)))
-svuint16_t svaddwb_u16(svuint16_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32)))
-svint32_t svaddwt_n_s32(svint32_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64)))
-svint64_t svaddwt_n_s64(svint64_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16)))
-svint16_t svaddwt_n_s16(svint16_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32)))
-svuint32_t svaddwt_n_u32(svuint32_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64)))
-svuint64_t svaddwt_n_u64(svuint64_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16)))
-svuint16_t svaddwt_n_u16(svuint16_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32)))
-svint32_t svaddwt_s32(svint32_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64)))
-svint64_t svaddwt_s64(svint64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16)))
-svint16_t svaddwt_s16(svint16_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32)))
-svuint32_t svaddwt_u32(svuint32_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64)))
-svuint64_t svaddwt_u64(svuint64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16)))
-svuint16_t svaddwt_u16(svuint16_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8)))
-svuint8_t svbcax_n_u8(svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32)))
-svuint32_t svbcax_n_u32(svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64)))
-svuint64_t svbcax_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16)))
-svuint16_t svbcax_n_u16(svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8)))
-svint8_t svbcax_n_s8(svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32)))
-svint32_t svbcax_n_s32(svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64)))
-svint64_t svbcax_n_s64(svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16)))
-svint16_t svbcax_n_s16(svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8)))
-svuint8_t svbcax_u8(svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32)))
-svuint32_t svbcax_u32(svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64)))
-svuint64_t svbcax_u64(svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16)))
-svuint16_t svbcax_u16(svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8)))
-svint8_t svbcax_s8(svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32)))
-svint32_t svbcax_s32(svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64)))
-svint64_t svbcax_s64(svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16)))
-svint16_t svbcax_s16(svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8)))
-svuint8_t svbsl1n_n_u8(svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32)))
-svuint32_t svbsl1n_n_u32(svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64)))
-svuint64_t svbsl1n_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16)))
-svuint16_t svbsl1n_n_u16(svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8)))
-svint8_t svbsl1n_n_s8(svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32)))
-svint32_t svbsl1n_n_s32(svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64)))
-svint64_t svbsl1n_n_s64(svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16)))
-svint16_t svbsl1n_n_s16(svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8)))
-svuint8_t svbsl1n_u8(svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32)))
-svuint32_t svbsl1n_u32(svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64)))
-svuint64_t svbsl1n_u64(svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16)))
-svuint16_t svbsl1n_u16(svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8)))
-svint8_t svbsl1n_s8(svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32)))
-svint32_t svbsl1n_s32(svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64)))
-svint64_t svbsl1n_s64(svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16)))
-svint16_t svbsl1n_s16(svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8)))
-svuint8_t svbsl2n_n_u8(svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32)))
-svuint32_t svbsl2n_n_u32(svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64)))
-svuint64_t svbsl2n_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16)))
-svuint16_t svbsl2n_n_u16(svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8)))
-svint8_t svbsl2n_n_s8(svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32)))
-svint32_t svbsl2n_n_s32(svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64)))
-svint64_t svbsl2n_n_s64(svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16)))
-svint16_t svbsl2n_n_s16(svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8)))
-svuint8_t svbsl2n_u8(svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32)))
-svuint32_t svbsl2n_u32(svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64)))
-svuint64_t svbsl2n_u64(svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16)))
-svuint16_t svbsl2n_u16(svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8)))
-svint8_t svbsl2n_s8(svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32)))
-svint32_t svbsl2n_s32(svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64)))
-svint64_t svbsl2n_s64(svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16)))
-svint16_t svbsl2n_s16(svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8)))
-svuint8_t svbsl_n_u8(svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32)))
-svuint32_t svbsl_n_u32(svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64)))
-svuint64_t svbsl_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16)))
-svuint16_t svbsl_n_u16(svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8)))
-svint8_t svbsl_n_s8(svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32)))
-svint32_t svbsl_n_s32(svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64)))
-svint64_t svbsl_n_s64(svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16)))
-svint16_t svbsl_n_s16(svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8)))
-svuint8_t svbsl_u8(svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32)))
-svuint32_t svbsl_u32(svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64)))
-svuint64_t svbsl_u64(svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16)))
-svuint16_t svbsl_u16(svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8)))
-svint8_t svbsl_s8(svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32)))
-svint32_t svbsl_s32(svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64)))
-svint64_t svbsl_s64(svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16)))
-svint16_t svbsl_s16(svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8)))
-svuint8_t svcadd_u8(svuint8_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32)))
-svuint32_t svcadd_u32(svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64)))
-svuint64_t svcadd_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16)))
-svuint16_t svcadd_u16(svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8)))
-svint8_t svcadd_s8(svint8_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32)))
-svint32_t svcadd_s32(svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64)))
-svint64_t svcadd_s64(svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16)))
-svint16_t svcadd_s16(svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32)))
-svint32_t svcdot_s32(svint32_t, svint8_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64)))
-svint64_t svcdot_s64(svint64_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32)))
-svint32_t svcdot_lane_s32(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64)))
-svint64_t svcdot_lane_s64(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8)))
-svuint8_t svcmla_u8(svuint8_t, svuint8_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32)))
-svuint32_t svcmla_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64)))
-svuint64_t svcmla_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16)))
-svuint16_t svcmla_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8)))
-svint8_t svcmla_s8(svint8_t, svint8_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32)))
-svint32_t svcmla_s32(svint32_t, svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64)))
-svint64_t svcmla_s64(svint64_t, svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16)))
-svint16_t svcmla_s16(svint16_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32)))
-svuint32_t svcmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16)))
-svuint16_t svcmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32)))
-svint32_t svcmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16)))
-svint16_t svcmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m)))
-svfloat32_t svcvtlt_f32_f16_m(svfloat32_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x)))
-svfloat32_t svcvtlt_f32_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m)))
-svfloat64_t svcvtlt_f64_f32_m(svfloat64_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x)))
-svfloat64_t svcvtlt_f64_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m)))
-svfloat16_t svcvtnt_f16_f32_m(svfloat16_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m)))
-svfloat32_t svcvtnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m)))
-svfloat32_t svcvtx_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x)))
-svfloat32_t svcvtx_f32_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z)))
-svfloat32_t svcvtx_f32_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m)))
-svfloat32_t svcvtxnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8)))
-svuint8_t sveor3_n_u8(svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32)))
-svuint32_t sveor3_n_u32(svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64)))
-svuint64_t sveor3_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16)))
-svuint16_t sveor3_n_u16(svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8)))
-svint8_t sveor3_n_s8(svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32)))
-svint32_t sveor3_n_s32(svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64)))
-svint64_t sveor3_n_s64(svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16)))
-svint16_t sveor3_n_s16(svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8)))
-svuint8_t sveor3_u8(svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32)))
-svuint32_t sveor3_u32(svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64)))
-svuint64_t sveor3_u64(svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16)))
-svuint16_t sveor3_u16(svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8)))
-svint8_t sveor3_s8(svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32)))
-svint32_t sveor3_s32(svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64)))
-svint64_t sveor3_s64(svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16)))
-svint16_t sveor3_s16(svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8)))
-svuint8_t sveorbt_n_u8(svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32)))
-svuint32_t sveorbt_n_u32(svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64)))
-svuint64_t sveorbt_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16)))
-svuint16_t sveorbt_n_u16(svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8)))
-svint8_t sveorbt_n_s8(svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32)))
-svint32_t sveorbt_n_s32(svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64)))
-svint64_t sveorbt_n_s64(svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16)))
-svint16_t sveorbt_n_s16(svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8)))
-svuint8_t sveorbt_u8(svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32)))
-svuint32_t sveorbt_u32(svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64)))
-svuint64_t sveorbt_u64(svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16)))
-svuint16_t sveorbt_u16(svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8)))
-svint8_t sveorbt_s8(svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32)))
-svint32_t sveorbt_s32(svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64)))
-svint64_t sveorbt_s64(svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16)))
-svint16_t sveorbt_s16(svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8)))
-svuint8_t sveortb_n_u8(svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32)))
-svuint32_t sveortb_n_u32(svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64)))
-svuint64_t sveortb_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16)))
-svuint16_t sveortb_n_u16(svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8)))
-svint8_t sveortb_n_s8(svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32)))
-svint32_t sveortb_n_s32(svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64)))
-svint64_t sveortb_n_s64(svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16)))
-svint16_t sveortb_n_s16(svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8)))
-svuint8_t sveortb_u8(svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32)))
-svuint32_t sveortb_u32(svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64)))
-svuint64_t sveortb_u64(svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16)))
-svuint16_t sveortb_u16(svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8)))
-svint8_t sveortb_s8(svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32)))
-svint32_t sveortb_s32(svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64)))
-svint64_t sveortb_s64(svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16)))
-svint16_t sveortb_s16(svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m)))
-svint8_t svhadd_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m)))
-svint32_t svhadd_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m)))
-svint64_t svhadd_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m)))
-svint16_t svhadd_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x)))
-svint8_t svhadd_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x)))
-svint32_t svhadd_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x)))
-svint64_t svhadd_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x)))
-svint16_t svhadd_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z)))
-svint8_t svhadd_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z)))
-svint32_t svhadd_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z)))
-svint64_t svhadd_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z)))
-svint16_t svhadd_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m)))
-svuint8_t svhadd_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m)))
-svuint32_t svhadd_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m)))
-svuint64_t svhadd_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m)))
-svuint16_t svhadd_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x)))
-svuint8_t svhadd_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x)))
-svuint32_t svhadd_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x)))
-svuint64_t svhadd_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x)))
-svuint16_t svhadd_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z)))
-svuint8_t svhadd_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z)))
-svuint32_t svhadd_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z)))
-svuint64_t svhadd_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z)))
-svuint16_t svhadd_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m)))
-svint8_t svhadd_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m)))
-svint32_t svhadd_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m)))
-svint64_t svhadd_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m)))
-svint16_t svhadd_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x)))
-svint8_t svhadd_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x)))
-svint32_t svhadd_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x)))
-svint64_t svhadd_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x)))
-svint16_t svhadd_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z)))
-svint8_t svhadd_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z)))
-svint32_t svhadd_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z)))
-svint64_t svhadd_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z)))
-svint16_t svhadd_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m)))
-svuint8_t svhadd_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m)))
-svuint32_t svhadd_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m)))
-svuint64_t svhadd_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m)))
-svuint16_t svhadd_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x)))
-svuint8_t svhadd_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x)))
-svuint32_t svhadd_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x)))
-svuint64_t svhadd_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x)))
-svuint16_t svhadd_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z)))
-svuint8_t svhadd_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z)))
-svuint32_t svhadd_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z)))
-svuint64_t svhadd_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z)))
-svuint16_t svhadd_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z)))
-svuint32_t svhistcnt_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z)))
-svuint64_t svhistcnt_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z)))
-svuint32_t svhistcnt_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z)))
-svuint64_t svhistcnt_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8)))
-svuint8_t svhistseg_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8)))
-svuint8_t svhistseg_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m)))
-svint8_t svhsub_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m)))
-svint32_t svhsub_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m)))
-svint64_t svhsub_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m)))
-svint16_t svhsub_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x)))
-svint8_t svhsub_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x)))
-svint32_t svhsub_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x)))
-svint64_t svhsub_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x)))
-svint16_t svhsub_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z)))
-svint8_t svhsub_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z)))
-svint32_t svhsub_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z)))
-svint64_t svhsub_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z)))
-svint16_t svhsub_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m)))
-svuint8_t svhsub_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m)))
-svuint32_t svhsub_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m)))
-svuint64_t svhsub_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m)))
-svuint16_t svhsub_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x)))
-svuint8_t svhsub_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x)))
-svuint32_t svhsub_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x)))
-svuint64_t svhsub_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x)))
-svuint16_t svhsub_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z)))
-svuint8_t svhsub_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z)))
-svuint32_t svhsub_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z)))
-svuint64_t svhsub_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z)))
-svuint16_t svhsub_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m)))
-svint8_t svhsub_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m)))
-svint32_t svhsub_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m)))
-svint64_t svhsub_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m)))
-svint16_t svhsub_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x)))
-svint8_t svhsub_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x)))
-svint32_t svhsub_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x)))
-svint64_t svhsub_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x)))
-svint16_t svhsub_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z)))
-svint8_t svhsub_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z)))
-svint32_t svhsub_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z)))
-svint64_t svhsub_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z)))
-svint16_t svhsub_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m)))
-svuint8_t svhsub_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m)))
-svuint32_t svhsub_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m)))
-svuint64_t svhsub_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m)))
-svuint16_t svhsub_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x)))
-svuint8_t svhsub_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x)))
-svuint32_t svhsub_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x)))
-svuint64_t svhsub_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x)))
-svuint16_t svhsub_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z)))
-svuint8_t svhsub_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z)))
-svuint32_t svhsub_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z)))
-svuint64_t svhsub_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z)))
-svuint16_t svhsub_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m)))
-svint8_t svhsubr_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m)))
-svint32_t svhsubr_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m)))
-svint64_t svhsubr_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m)))
-svint16_t svhsubr_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x)))
-svint8_t svhsubr_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x)))
-svint32_t svhsubr_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x)))
-svint64_t svhsubr_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x)))
-svint16_t svhsubr_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z)))
-svint8_t svhsubr_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z)))
-svint32_t svhsubr_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z)))
-svint64_t svhsubr_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z)))
-svint16_t svhsubr_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m)))
-svuint8_t svhsubr_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m)))
-svuint32_t svhsubr_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m)))
-svuint64_t svhsubr_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m)))
-svuint16_t svhsubr_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x)))
-svuint8_t svhsubr_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x)))
-svuint32_t svhsubr_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x)))
-svuint64_t svhsubr_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x)))
-svuint16_t svhsubr_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z)))
-svuint8_t svhsubr_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z)))
-svuint32_t svhsubr_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z)))
-svuint64_t svhsubr_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z)))
-svuint16_t svhsubr_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m)))
-svint8_t svhsubr_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m)))
-svint32_t svhsubr_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m)))
-svint64_t svhsubr_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m)))
-svint16_t svhsubr_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x)))
-svint8_t svhsubr_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x)))
-svint32_t svhsubr_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x)))
-svint64_t svhsubr_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x)))
-svint16_t svhsubr_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z)))
-svint8_t svhsubr_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z)))
-svint32_t svhsubr_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z)))
-svint64_t svhsubr_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z)))
-svint16_t svhsubr_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m)))
-svuint8_t svhsubr_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m)))
-svuint32_t svhsubr_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m)))
-svuint64_t svhsubr_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m)))
-svuint16_t svhsubr_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x)))
-svuint8_t svhsubr_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x)))
-svuint32_t svhsubr_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x)))
-svuint64_t svhsubr_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x)))
-svuint16_t svhsubr_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z)))
-svuint8_t svhsubr_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z)))
-svuint32_t svhsubr_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z)))
-svuint64_t svhsubr_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z)))
-svuint16_t svhsubr_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32)))
-svuint32_t svldnt1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64)))
-svuint64_t svldnt1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64)))
-svfloat64_t svldnt1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32)))
-svfloat32_t svldnt1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32)))
-svint32_t svldnt1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64)))
-svint64_t svldnt1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32)))
-svuint32_t svldnt1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64)))
-svuint64_t svldnt1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64)))
-svfloat64_t svldnt1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32)))
-svfloat32_t svldnt1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32)))
-svint32_t svldnt1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64)))
-svint64_t svldnt1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32)))
-svuint32_t svldnt1_gather_u32base_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64)))
-svuint64_t svldnt1_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64)))
-svfloat64_t svldnt1_gather_u64base_f64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32)))
-svfloat32_t svldnt1_gather_u32base_f32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32)))
-svint32_t svldnt1_gather_u32base_s32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64)))
-svint64_t svldnt1_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64)))
-svuint64_t svldnt1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64)))
-svfloat64_t svldnt1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64)))
-svint64_t svldnt1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64)))
-svuint64_t svldnt1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64)))
-svfloat64_t svldnt1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64)))
-svint64_t svldnt1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32)))
-svuint32_t svldnt1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32)))
-svfloat32_t svldnt1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32)))
-svint32_t svldnt1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64)))
-svuint64_t svldnt1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64)))
-svfloat64_t svldnt1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64)))
-svint64_t svldnt1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64)))
-svuint64_t svldnt1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64)))
-svfloat64_t svldnt1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64)))
-svint64_t svldnt1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32)))
-svuint32_t svldnt1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64)))
-svuint64_t svldnt1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32)))
-svint32_t svldnt1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64)))
-svint64_t svldnt1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32)))
-svuint32_t svldnt1sb_gather_u32base_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64)))
-svuint64_t svldnt1sb_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32)))
-svint32_t svldnt1sb_gather_u32base_s32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64)))
-svint64_t svldnt1sb_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32)))
-svuint32_t svldnt1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32)))
-svint32_t svldnt1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64)))
-svuint64_t svldnt1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64)))
-svint64_t svldnt1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64)))
-svuint64_t svldnt1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64)))
-svint64_t svldnt1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32)))
-svuint32_t svldnt1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64)))
-svuint64_t svldnt1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32)))
-svint32_t svldnt1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64)))
-svint64_t svldnt1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32)))
-svuint32_t svldnt1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64)))
-svuint64_t svldnt1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32)))
-svint32_t svldnt1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64)))
-svint64_t svldnt1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32)))
-svuint32_t svldnt1sh_gather_u32base_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64)))
-svuint64_t svldnt1sh_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32)))
-svint32_t svldnt1sh_gather_u32base_s32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64)))
-svint64_t svldnt1sh_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64)))
-svuint64_t svldnt1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64)))
-svint64_t svldnt1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64)))
-svuint64_t svldnt1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64)))
-svint64_t svldnt1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32)))
-svuint32_t svldnt1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32)))
-svint32_t svldnt1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64)))
-svuint64_t svldnt1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64)))
-svint64_t svldnt1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64)))
-svuint64_t svldnt1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64)))
-svint64_t svldnt1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64)))
-svuint64_t svldnt1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64)))
-svint64_t svldnt1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64)))
-svuint64_t svldnt1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64)))
-svint64_t svldnt1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64)))
-svuint64_t svldnt1sw_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64)))
-svint64_t svldnt1sw_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64)))
-svuint64_t svldnt1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64)))
-svint64_t svldnt1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64)))
-svuint64_t svldnt1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64)))
-svint64_t svldnt1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64)))
-svuint64_t svldnt1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64)))
-svint64_t svldnt1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64)))
-svuint64_t svldnt1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64)))
-svint64_t svldnt1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32)))
-svuint32_t svldnt1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64)))
-svuint64_t svldnt1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32)))
-svint32_t svldnt1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64)))
-svint64_t svldnt1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32)))
-svuint32_t svldnt1ub_gather_u32base_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64)))
-svuint64_t svldnt1ub_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32)))
-svint32_t svldnt1ub_gather_u32base_s32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64)))
-svint64_t svldnt1ub_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32)))
-svuint32_t svldnt1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32)))
-svint32_t svldnt1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64)))
-svuint64_t svldnt1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64)))
-svint64_t svldnt1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64)))
-svuint64_t svldnt1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64)))
-svint64_t svldnt1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32)))
-svuint32_t svldnt1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64)))
-svuint64_t svldnt1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32)))
-svint32_t svldnt1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64)))
-svint64_t svldnt1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32)))
-svuint32_t svldnt1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64)))
-svuint64_t svldnt1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32)))
-svint32_t svldnt1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64)))
-svint64_t svldnt1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32)))
-svuint32_t svldnt1uh_gather_u32base_u32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64)))
-svuint64_t svldnt1uh_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32)))
-svint32_t svldnt1uh_gather_u32base_s32(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64)))
-svint64_t svldnt1uh_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64)))
-svuint64_t svldnt1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64)))
-svint64_t svldnt1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64)))
-svuint64_t svldnt1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64)))
-svint64_t svldnt1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32)))
-svuint32_t svldnt1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32)))
-svint32_t svldnt1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64)))
-svuint64_t svldnt1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64)))
-svint64_t svldnt1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64)))
-svuint64_t svldnt1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64)))
-svint64_t svldnt1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64)))
-svuint64_t svldnt1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64)))
-svint64_t svldnt1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64)))
-svuint64_t svldnt1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64)))
-svint64_t svldnt1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64)))
-svuint64_t svldnt1uw_gather_u64base_u64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64)))
-svint64_t svldnt1uw_gather_u64base_s64(svbool_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64)))
-svuint64_t svldnt1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64)))
-svint64_t svldnt1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64)))
-svuint64_t svldnt1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64)))
-svint64_t svldnt1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64)))
-svuint64_t svldnt1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64)))
-svint64_t svldnt1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64)))
-svuint64_t svldnt1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64)))
-svint64_t svldnt1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m)))
-svint64_t svlogb_f64_m(svint64_t, svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m)))
-svint32_t svlogb_f32_m(svint32_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m)))
-svint16_t svlogb_f16_m(svint16_t, svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x)))
-svint64_t svlogb_f64_x(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x)))
-svint32_t svlogb_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x)))
-svint16_t svlogb_f16_x(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z)))
-svint64_t svlogb_f64_z(svbool_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z)))
-svint32_t svlogb_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z)))
-svint16_t svlogb_f16_z(svbool_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8)))
-svbool_t svmatch_u8(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16)))
-svbool_t svmatch_u16(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8)))
-svbool_t svmatch_s8(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16)))
-svbool_t svmatch_s16(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m)))
-svfloat64_t svmaxnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m)))
-svfloat32_t svmaxnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m)))
-svfloat16_t svmaxnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x)))
-svfloat64_t svmaxnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x)))
-svfloat32_t svmaxnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x)))
-svfloat16_t svmaxnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m)))
-svfloat64_t svmaxp_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m)))
-svfloat32_t svmaxp_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m)))
-svfloat16_t svmaxp_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x)))
-svfloat64_t svmaxp_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x)))
-svfloat32_t svmaxp_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x)))
-svfloat16_t svmaxp_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m)))
-svint8_t svmaxp_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m)))
-svint32_t svmaxp_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m)))
-svint64_t svmaxp_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m)))
-svint16_t svmaxp_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x)))
-svint8_t svmaxp_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x)))
-svint32_t svmaxp_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x)))
-svint64_t svmaxp_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x)))
-svint16_t svmaxp_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m)))
-svuint8_t svmaxp_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m)))
-svuint32_t svmaxp_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m)))
-svuint64_t svmaxp_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m)))
-svuint16_t svmaxp_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x)))
-svuint8_t svmaxp_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x)))
-svuint32_t svmaxp_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x)))
-svuint64_t svmaxp_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x)))
-svuint16_t svmaxp_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m)))
-svfloat64_t svminnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m)))
-svfloat32_t svminnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m)))
-svfloat16_t svminnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x)))
-svfloat64_t svminnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x)))
-svfloat32_t svminnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x)))
-svfloat16_t svminnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m)))
-svfloat64_t svminp_f64_m(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m)))
-svfloat32_t svminp_f32_m(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m)))
-svfloat16_t svminp_f16_m(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x)))
-svfloat64_t svminp_f64_x(svbool_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x)))
-svfloat32_t svminp_f32_x(svbool_t, svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x)))
-svfloat16_t svminp_f16_x(svbool_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m)))
-svint8_t svminp_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m)))
-svint32_t svminp_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m)))
-svint64_t svminp_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m)))
-svint16_t svminp_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x)))
-svint8_t svminp_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x)))
-svint32_t svminp_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x)))
-svint64_t svminp_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x)))
-svint16_t svminp_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m)))
-svuint8_t svminp_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m)))
-svuint32_t svminp_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m)))
-svuint64_t svminp_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m)))
-svuint16_t svminp_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x)))
-svuint8_t svminp_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x)))
-svuint32_t svminp_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x)))
-svuint64_t svminp_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x)))
-svuint16_t svminp_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32)))
-svuint32_t svmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64)))
-svuint64_t svmla_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16)))
-svuint16_t svmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32)))
-svint32_t svmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64)))
-svint64_t svmla_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16)))
-svint16_t svmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32)))
-svfloat32_t svmlalb_n_f32(svfloat32_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32)))
-svint32_t svmlalb_n_s32(svint32_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64)))
-svint64_t svmlalb_n_s64(svint64_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16)))
-svint16_t svmlalb_n_s16(svint16_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32)))
-svuint32_t svmlalb_n_u32(svuint32_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64)))
-svuint64_t svmlalb_n_u64(svuint64_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16)))
-svuint16_t svmlalb_n_u16(svuint16_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32)))
-svfloat32_t svmlalb_f32(svfloat32_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32)))
-svint32_t svmlalb_s32(svint32_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64)))
-svint64_t svmlalb_s64(svint64_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16)))
-svint16_t svmlalb_s16(svint16_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32)))
-svuint32_t svmlalb_u32(svuint32_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64)))
-svuint64_t svmlalb_u64(svuint64_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16)))
-svuint16_t svmlalb_u16(svuint16_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32)))
-svfloat32_t svmlalb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32)))
-svint32_t svmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64)))
-svint64_t svmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32)))
-svuint32_t svmlalb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64)))
-svuint64_t svmlalb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32)))
-svfloat32_t svmlalt_n_f32(svfloat32_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32)))
-svint32_t svmlalt_n_s32(svint32_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64)))
-svint64_t svmlalt_n_s64(svint64_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16)))
-svint16_t svmlalt_n_s16(svint16_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32)))
-svuint32_t svmlalt_n_u32(svuint32_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64)))
-svuint64_t svmlalt_n_u64(svuint64_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16)))
-svuint16_t svmlalt_n_u16(svuint16_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32)))
-svfloat32_t svmlalt_f32(svfloat32_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32)))
-svint32_t svmlalt_s32(svint32_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64)))
-svint64_t svmlalt_s64(svint64_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16)))
-svint16_t svmlalt_s16(svint16_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32)))
-svuint32_t svmlalt_u32(svuint32_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64)))
-svuint64_t svmlalt_u64(svuint64_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16)))
-svuint16_t svmlalt_u16(svuint16_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32)))
-svfloat32_t svmlalt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32)))
-svint32_t svmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64)))
-svint64_t svmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32)))
-svuint32_t svmlalt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64)))
-svuint64_t svmlalt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32)))
-svuint32_t svmls_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64)))
-svuint64_t svmls_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16)))
-svuint16_t svmls_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32)))
-svint32_t svmls_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64)))
-svint64_t svmls_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16)))
-svint16_t svmls_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32)))
-svfloat32_t svmlslb_n_f32(svfloat32_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32)))
-svint32_t svmlslb_n_s32(svint32_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64)))
-svint64_t svmlslb_n_s64(svint64_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16)))
-svint16_t svmlslb_n_s16(svint16_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32)))
-svuint32_t svmlslb_n_u32(svuint32_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64)))
-svuint64_t svmlslb_n_u64(svuint64_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16)))
-svuint16_t svmlslb_n_u16(svuint16_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32)))
-svfloat32_t svmlslb_f32(svfloat32_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32)))
-svint32_t svmlslb_s32(svint32_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64)))
-svint64_t svmlslb_s64(svint64_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16)))
-svint16_t svmlslb_s16(svint16_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32)))
-svuint32_t svmlslb_u32(svuint32_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64)))
-svuint64_t svmlslb_u64(svuint64_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16)))
-svuint16_t svmlslb_u16(svuint16_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32)))
-svfloat32_t svmlslb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32)))
-svint32_t svmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64)))
-svint64_t svmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32)))
-svuint32_t svmlslb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64)))
-svuint64_t svmlslb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32)))
-svfloat32_t svmlslt_n_f32(svfloat32_t, svfloat16_t, float16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32)))
-svint32_t svmlslt_n_s32(svint32_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64)))
-svint64_t svmlslt_n_s64(svint64_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16)))
-svint16_t svmlslt_n_s16(svint16_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32)))
-svuint32_t svmlslt_n_u32(svuint32_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64)))
-svuint64_t svmlslt_n_u64(svuint64_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16)))
-svuint16_t svmlslt_n_u16(svuint16_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32)))
-svfloat32_t svmlslt_f32(svfloat32_t, svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32)))
-svint32_t svmlslt_s32(svint32_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64)))
-svint64_t svmlslt_s64(svint64_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16)))
-svint16_t svmlslt_s16(svint16_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32)))
-svuint32_t svmlslt_u32(svuint32_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64)))
-svuint64_t svmlslt_u64(svuint64_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16)))
-svuint16_t svmlslt_u16(svuint16_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32)))
-svfloat32_t svmlslt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32)))
-svint32_t svmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64)))
-svint64_t svmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32)))
-svuint32_t svmlslt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64)))
-svuint64_t svmlslt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32)))
-svint32_t svmovlb_s32(svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64)))
-svint64_t svmovlb_s64(svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16)))
-svint16_t svmovlb_s16(svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32)))
-svuint32_t svmovlb_u32(svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64)))
-svuint64_t svmovlb_u64(svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16)))
-svuint16_t svmovlb_u16(svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32)))
-svint32_t svmovlt_s32(svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64)))
-svint64_t svmovlt_s64(svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16)))
-svint16_t svmovlt_s16(svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32)))
-svuint32_t svmovlt_u32(svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64)))
-svuint64_t svmovlt_u64(svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16)))
-svuint16_t svmovlt_u16(svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32)))
-svuint32_t svmul_lane_u32(svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64)))
-svuint64_t svmul_lane_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16)))
-svuint16_t svmul_lane_u16(svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32)))
-svint32_t svmul_lane_s32(svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64)))
-svint64_t svmul_lane_s64(svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16)))
-svint16_t svmul_lane_s16(svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32)))
-svint32_t svmullb_n_s32(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64)))
-svint64_t svmullb_n_s64(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16)))
-svint16_t svmullb_n_s16(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32)))
-svuint32_t svmullb_n_u32(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64)))
-svuint64_t svmullb_n_u64(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16)))
-svuint16_t svmullb_n_u16(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32)))
-svint32_t svmullb_s32(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64)))
-svint64_t svmullb_s64(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16)))
-svint16_t svmullb_s16(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32)))
-svuint32_t svmullb_u32(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64)))
-svuint64_t svmullb_u64(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16)))
-svuint16_t svmullb_u16(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32)))
-svint32_t svmullb_lane_s32(svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64)))
-svint64_t svmullb_lane_s64(svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32)))
-svuint32_t svmullb_lane_u32(svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64)))
-svuint64_t svmullb_lane_u64(svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32)))
-svint32_t svmullt_n_s32(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64)))
-svint64_t svmullt_n_s64(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16)))
-svint16_t svmullt_n_s16(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32)))
-svuint32_t svmullt_n_u32(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64)))
-svuint64_t svmullt_n_u64(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16)))
-svuint16_t svmullt_n_u16(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32)))
-svint32_t svmullt_s32(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64)))
-svint64_t svmullt_s64(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16)))
-svint16_t svmullt_s16(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32)))
-svuint32_t svmullt_u32(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64)))
-svuint64_t svmullt_u64(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16)))
-svuint16_t svmullt_u16(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32)))
-svint32_t svmullt_lane_s32(svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64)))
-svint64_t svmullt_lane_s64(svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32)))
-svuint32_t svmullt_lane_u32(svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64)))
-svuint64_t svmullt_lane_u64(svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8)))
-svuint8_t svnbsl_n_u8(svuint8_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32)))
-svuint32_t svnbsl_n_u32(svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64)))
-svuint64_t svnbsl_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16)))
-svuint16_t svnbsl_n_u16(svuint16_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8)))
-svint8_t svnbsl_n_s8(svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32)))
-svint32_t svnbsl_n_s32(svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64)))
-svint64_t svnbsl_n_s64(svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16)))
-svint16_t svnbsl_n_s16(svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8)))
-svuint8_t svnbsl_u8(svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32)))
-svuint32_t svnbsl_u32(svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64)))
-svuint64_t svnbsl_u64(svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16)))
-svuint16_t svnbsl_u16(svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8)))
-svint8_t svnbsl_s8(svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32)))
-svint32_t svnbsl_s32(svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64)))
-svint64_t svnbsl_s64(svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16)))
-svint16_t svnbsl_s16(svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8)))
-svbool_t svnmatch_u8(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16)))
-svbool_t svnmatch_u16(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8)))
-svbool_t svnmatch_s8(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16)))
-svbool_t svnmatch_s16(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8)))
-svuint8_t svpmul_n_u8(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8)))
-svuint8_t svpmul_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64)))
-svuint64_t svpmullb_n_u64(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16)))
-svuint16_t svpmullb_n_u16(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64)))
-svuint64_t svpmullb_u64(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16)))
-svuint16_t svpmullb_u16(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8)))
-svuint8_t svpmullb_pair_n_u8(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32)))
-svuint32_t svpmullb_pair_n_u32(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8)))
-svuint8_t svpmullb_pair_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32)))
-svuint32_t svpmullb_pair_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64)))
-svuint64_t svpmullt_n_u64(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16)))
-svuint16_t svpmullt_n_u16(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64)))
-svuint64_t svpmullt_u64(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16)))
-svuint16_t svpmullt_u16(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8)))
-svuint8_t svpmullt_pair_n_u8(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32)))
-svuint32_t svpmullt_pair_n_u32(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8)))
-svuint8_t svpmullt_pair_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32)))
-svuint32_t svpmullt_pair_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m)))
-svint8_t svqabs_s8_m(svint8_t, svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m)))
-svint32_t svqabs_s32_m(svint32_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m)))
-svint64_t svqabs_s64_m(svint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m)))
-svint16_t svqabs_s16_m(svint16_t, svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x)))
-svint8_t svqabs_s8_x(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x)))
-svint32_t svqabs_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x)))
-svint64_t svqabs_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x)))
-svint16_t svqabs_s16_x(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z)))
-svint8_t svqabs_s8_z(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z)))
-svint32_t svqabs_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z)))
-svint64_t svqabs_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z)))
-svint16_t svqabs_s16_z(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m)))
-svint8_t svqadd_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m)))
-svint32_t svqadd_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m)))
-svint64_t svqadd_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m)))
-svint16_t svqadd_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x)))
-svint8_t svqadd_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x)))
-svint32_t svqadd_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x)))
-svint64_t svqadd_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x)))
-svint16_t svqadd_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z)))
-svint8_t svqadd_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z)))
-svint32_t svqadd_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z)))
-svint64_t svqadd_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z)))
-svint16_t svqadd_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m)))
-svuint8_t svqadd_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m)))
-svuint32_t svqadd_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m)))
-svuint64_t svqadd_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m)))
-svuint16_t svqadd_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x)))
-svuint8_t svqadd_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x)))
-svuint32_t svqadd_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x)))
-svuint64_t svqadd_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x)))
-svuint16_t svqadd_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z)))
-svuint8_t svqadd_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z)))
-svuint32_t svqadd_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z)))
-svuint64_t svqadd_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z)))
-svuint16_t svqadd_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m)))
-svint8_t svqadd_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m)))
-svint32_t svqadd_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m)))
-svint64_t svqadd_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m)))
-svint16_t svqadd_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x)))
-svint8_t svqadd_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x)))
-svint32_t svqadd_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x)))
-svint64_t svqadd_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x)))
-svint16_t svqadd_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z)))
-svint8_t svqadd_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z)))
-svint32_t svqadd_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z)))
-svint64_t svqadd_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z)))
-svint16_t svqadd_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m)))
-svuint8_t svqadd_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m)))
-svuint32_t svqadd_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m)))
-svuint64_t svqadd_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m)))
-svuint16_t svqadd_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x)))
-svuint8_t svqadd_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x)))
-svuint32_t svqadd_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x)))
-svuint64_t svqadd_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x)))
-svuint16_t svqadd_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z)))
-svuint8_t svqadd_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z)))
-svuint32_t svqadd_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z)))
-svuint64_t svqadd_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z)))
-svuint16_t svqadd_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8)))
-svint8_t svqcadd_s8(svint8_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32)))
-svint32_t svqcadd_s32(svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64)))
-svint64_t svqcadd_s64(svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16)))
-svint16_t svqcadd_s16(svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32)))
-svint32_t svqdmlalb_n_s32(svint32_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64)))
-svint64_t svqdmlalb_n_s64(svint64_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16)))
-svint16_t svqdmlalb_n_s16(svint16_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32)))
-svint32_t svqdmlalb_s32(svint32_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64)))
-svint64_t svqdmlalb_s64(svint64_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16)))
-svint16_t svqdmlalb_s16(svint16_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32)))
-svint32_t svqdmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64)))
-svint64_t svqdmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32)))
-svint32_t svqdmlalbt_n_s32(svint32_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64)))
-svint64_t svqdmlalbt_n_s64(svint64_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16)))
-svint16_t svqdmlalbt_n_s16(svint16_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32)))
-svint32_t svqdmlalbt_s32(svint32_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64)))
-svint64_t svqdmlalbt_s64(svint64_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16)))
-svint16_t svqdmlalbt_s16(svint16_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32)))
-svint32_t svqdmlalt_n_s32(svint32_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64)))
-svint64_t svqdmlalt_n_s64(svint64_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16)))
-svint16_t svqdmlalt_n_s16(svint16_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32)))
-svint32_t svqdmlalt_s32(svint32_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64)))
-svint64_t svqdmlalt_s64(svint64_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16)))
-svint16_t svqdmlalt_s16(svint16_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32)))
-svint32_t svqdmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64)))
-svint64_t svqdmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32)))
-svint32_t svqdmlslb_n_s32(svint32_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64)))
-svint64_t svqdmlslb_n_s64(svint64_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16)))
-svint16_t svqdmlslb_n_s16(svint16_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32)))
-svint32_t svqdmlslb_s32(svint32_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64)))
-svint64_t svqdmlslb_s64(svint64_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16)))
-svint16_t svqdmlslb_s16(svint16_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32)))
-svint32_t svqdmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64)))
-svint64_t svqdmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32)))
-svint32_t svqdmlslbt_n_s32(svint32_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64)))
-svint64_t svqdmlslbt_n_s64(svint64_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16)))
-svint16_t svqdmlslbt_n_s16(svint16_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32)))
-svint32_t svqdmlslbt_s32(svint32_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64)))
-svint64_t svqdmlslbt_s64(svint64_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16)))
-svint16_t svqdmlslbt_s16(svint16_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32)))
-svint32_t svqdmlslt_n_s32(svint32_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64)))
-svint64_t svqdmlslt_n_s64(svint64_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16)))
-svint16_t svqdmlslt_n_s16(svint16_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32)))
-svint32_t svqdmlslt_s32(svint32_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64)))
-svint64_t svqdmlslt_s64(svint64_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16)))
-svint16_t svqdmlslt_s16(svint16_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32)))
-svint32_t svqdmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64)))
-svint64_t svqdmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8)))
-svint8_t svqdmulh_n_s8(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32)))
-svint32_t svqdmulh_n_s32(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64)))
-svint64_t svqdmulh_n_s64(svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16)))
-svint16_t svqdmulh_n_s16(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8)))
-svint8_t svqdmulh_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32)))
-svint32_t svqdmulh_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64)))
-svint64_t svqdmulh_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16)))
-svint16_t svqdmulh_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32)))
-svint32_t svqdmulh_lane_s32(svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64)))
-svint64_t svqdmulh_lane_s64(svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16)))
-svint16_t svqdmulh_lane_s16(svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32)))
-svint32_t svqdmullb_n_s32(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64)))
-svint64_t svqdmullb_n_s64(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16)))
-svint16_t svqdmullb_n_s16(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32)))
-svint32_t svqdmullb_s32(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64)))
-svint64_t svqdmullb_s64(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16)))
-svint16_t svqdmullb_s16(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32)))
-svint32_t svqdmullb_lane_s32(svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64)))
-svint64_t svqdmullb_lane_s64(svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32)))
-svint32_t svqdmullt_n_s32(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64)))
-svint64_t svqdmullt_n_s64(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16)))
-svint16_t svqdmullt_n_s16(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32)))
-svint32_t svqdmullt_s32(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64)))
-svint64_t svqdmullt_s64(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16)))
-svint16_t svqdmullt_s16(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32)))
-svint32_t svqdmullt_lane_s32(svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64)))
-svint64_t svqdmullt_lane_s64(svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m)))
-svint8_t svqneg_s8_m(svint8_t, svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m)))
-svint32_t svqneg_s32_m(svint32_t, svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m)))
-svint64_t svqneg_s64_m(svint64_t, svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m)))
-svint16_t svqneg_s16_m(svint16_t, svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x)))
-svint8_t svqneg_s8_x(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x)))
-svint32_t svqneg_s32_x(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x)))
-svint64_t svqneg_s64_x(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x)))
-svint16_t svqneg_s16_x(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z)))
-svint8_t svqneg_s8_z(svbool_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z)))
-svint32_t svqneg_s32_z(svbool_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z)))
-svint64_t svqneg_s64_z(svbool_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z)))
-svint16_t svqneg_s16_z(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8)))
-svint8_t svqrdcmlah_s8(svint8_t, svint8_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32)))
-svint32_t svqrdcmlah_s32(svint32_t, svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64)))
-svint64_t svqrdcmlah_s64(svint64_t, svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16)))
-svint16_t svqrdcmlah_s16(svint16_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32)))
-svint32_t svqrdcmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16)))
-svint16_t svqrdcmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8)))
-svint8_t svqrdmlah_n_s8(svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32)))
-svint32_t svqrdmlah_n_s32(svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64)))
-svint64_t svqrdmlah_n_s64(svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16)))
-svint16_t svqrdmlah_n_s16(svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8)))
-svint8_t svqrdmlah_s8(svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32)))
-svint32_t svqrdmlah_s32(svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64)))
-svint64_t svqrdmlah_s64(svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16)))
-svint16_t svqrdmlah_s16(svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32)))
-svint32_t svqrdmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64)))
-svint64_t svqrdmlah_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16)))
-svint16_t svqrdmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8)))
-svint8_t svqrdmlsh_n_s8(svint8_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32)))
-svint32_t svqrdmlsh_n_s32(svint32_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64)))
-svint64_t svqrdmlsh_n_s64(svint64_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16)))
-svint16_t svqrdmlsh_n_s16(svint16_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8)))
-svint8_t svqrdmlsh_s8(svint8_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32)))
-svint32_t svqrdmlsh_s32(svint32_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64)))
-svint64_t svqrdmlsh_s64(svint64_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16)))
-svint16_t svqrdmlsh_s16(svint16_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32)))
-svint32_t svqrdmlsh_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64)))
-svint64_t svqrdmlsh_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16)))
-svint16_t svqrdmlsh_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8)))
-svint8_t svqrdmulh_n_s8(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32)))
-svint32_t svqrdmulh_n_s32(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64)))
-svint64_t svqrdmulh_n_s64(svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16)))
-svint16_t svqrdmulh_n_s16(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8)))
-svint8_t svqrdmulh_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32)))
-svint32_t svqrdmulh_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64)))
-svint64_t svqrdmulh_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16)))
-svint16_t svqrdmulh_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32)))
-svint32_t svqrdmulh_lane_s32(svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64)))
-svint64_t svqrdmulh_lane_s64(svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16)))
-svint16_t svqrdmulh_lane_s16(svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m)))
-svint8_t svqrshl_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m)))
-svint32_t svqrshl_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m)))
-svint64_t svqrshl_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m)))
-svint16_t svqrshl_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x)))
-svint8_t svqrshl_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x)))
-svint32_t svqrshl_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x)))
-svint64_t svqrshl_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x)))
-svint16_t svqrshl_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z)))
-svint8_t svqrshl_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z)))
-svint32_t svqrshl_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z)))
-svint64_t svqrshl_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z)))
-svint16_t svqrshl_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m)))
-svuint8_t svqrshl_n_u8_m(svbool_t, svuint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m)))
-svuint32_t svqrshl_n_u32_m(svbool_t, svuint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m)))
-svuint64_t svqrshl_n_u64_m(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m)))
-svuint16_t svqrshl_n_u16_m(svbool_t, svuint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x)))
-svuint8_t svqrshl_n_u8_x(svbool_t, svuint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x)))
-svuint32_t svqrshl_n_u32_x(svbool_t, svuint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x)))
-svuint64_t svqrshl_n_u64_x(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x)))
-svuint16_t svqrshl_n_u16_x(svbool_t, svuint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z)))
-svuint8_t svqrshl_n_u8_z(svbool_t, svuint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z)))
-svuint32_t svqrshl_n_u32_z(svbool_t, svuint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z)))
-svuint64_t svqrshl_n_u64_z(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z)))
-svuint16_t svqrshl_n_u16_z(svbool_t, svuint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m)))
-svint8_t svqrshl_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m)))
-svint32_t svqrshl_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m)))
-svint64_t svqrshl_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m)))
-svint16_t svqrshl_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x)))
-svint8_t svqrshl_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x)))
-svint32_t svqrshl_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x)))
-svint64_t svqrshl_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x)))
-svint16_t svqrshl_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z)))
-svint8_t svqrshl_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z)))
-svint32_t svqrshl_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z)))
-svint64_t svqrshl_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z)))
-svint16_t svqrshl_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m)))
-svuint8_t svqrshl_u8_m(svbool_t, svuint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m)))
-svuint32_t svqrshl_u32_m(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m)))
-svuint64_t svqrshl_u64_m(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m)))
-svuint16_t svqrshl_u16_m(svbool_t, svuint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x)))
-svuint8_t svqrshl_u8_x(svbool_t, svuint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x)))
-svuint32_t svqrshl_u32_x(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x)))
-svuint64_t svqrshl_u64_x(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x)))
-svuint16_t svqrshl_u16_x(svbool_t, svuint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z)))
-svuint8_t svqrshl_u8_z(svbool_t, svuint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z)))
-svuint32_t svqrshl_u32_z(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z)))
-svuint64_t svqrshl_u64_z(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z)))
-svuint16_t svqrshl_u16_z(svbool_t, svuint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32)))
-svint16_t svqrshrnb_n_s32(svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64)))
-svint32_t svqrshrnb_n_s64(svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16)))
-svint8_t svqrshrnb_n_s16(svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32)))
-svuint16_t svqrshrnb_n_u32(svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64)))
-svuint32_t svqrshrnb_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16)))
-svuint8_t svqrshrnb_n_u16(svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32)))
-svint16_t svqrshrnt_n_s32(svint16_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64)))
-svint32_t svqrshrnt_n_s64(svint32_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16)))
-svint8_t svqrshrnt_n_s16(svint8_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32)))
-svuint16_t svqrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64)))
-svuint32_t svqrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16)))
-svuint8_t svqrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32)))
-svuint16_t svqrshrunb_n_s32(svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64)))
-svuint32_t svqrshrunb_n_s64(svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16)))
-svuint8_t svqrshrunb_n_s16(svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32)))
-svuint16_t svqrshrunt_n_s32(svuint16_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64)))
-svuint32_t svqrshrunt_n_s64(svuint32_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16)))
-svuint8_t svqrshrunt_n_s16(svuint8_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m)))
-svint8_t svqshl_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m)))
-svint32_t svqshl_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m)))
-svint64_t svqshl_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m)))
-svint16_t svqshl_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x)))
-svint8_t svqshl_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x)))
-svint32_t svqshl_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x)))
-svint64_t svqshl_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x)))
-svint16_t svqshl_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z)))
-svint8_t svqshl_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z)))
-svint32_t svqshl_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z)))
-svint64_t svqshl_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z)))
-svint16_t svqshl_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m)))
-svuint8_t svqshl_n_u8_m(svbool_t, svuint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m)))
-svuint32_t svqshl_n_u32_m(svbool_t, svuint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m)))
-svuint64_t svqshl_n_u64_m(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m)))
-svuint16_t svqshl_n_u16_m(svbool_t, svuint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x)))
-svuint8_t svqshl_n_u8_x(svbool_t, svuint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x)))
-svuint32_t svqshl_n_u32_x(svbool_t, svuint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x)))
-svuint64_t svqshl_n_u64_x(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x)))
-svuint16_t svqshl_n_u16_x(svbool_t, svuint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z)))
-svuint8_t svqshl_n_u8_z(svbool_t, svuint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z)))
-svuint32_t svqshl_n_u32_z(svbool_t, svuint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z)))
-svuint64_t svqshl_n_u64_z(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z)))
-svuint16_t svqshl_n_u16_z(svbool_t, svuint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m)))
-svint8_t svqshl_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m)))
-svint32_t svqshl_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m)))
-svint64_t svqshl_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m)))
-svint16_t svqshl_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x)))
-svint8_t svqshl_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x)))
-svint32_t svqshl_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x)))
-svint64_t svqshl_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x)))
-svint16_t svqshl_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z)))
-svint8_t svqshl_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z)))
-svint32_t svqshl_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z)))
-svint64_t svqshl_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z)))
-svint16_t svqshl_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m)))
-svuint8_t svqshl_u8_m(svbool_t, svuint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m)))
-svuint32_t svqshl_u32_m(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m)))
-svuint64_t svqshl_u64_m(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m)))
-svuint16_t svqshl_u16_m(svbool_t, svuint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x)))
-svuint8_t svqshl_u8_x(svbool_t, svuint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x)))
-svuint32_t svqshl_u32_x(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x)))
-svuint64_t svqshl_u64_x(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x)))
-svuint16_t svqshl_u16_x(svbool_t, svuint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z)))
-svuint8_t svqshl_u8_z(svbool_t, svuint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z)))
-svuint32_t svqshl_u32_z(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z)))
-svuint64_t svqshl_u64_z(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z)))
-svuint16_t svqshl_u16_z(svbool_t, svuint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m)))
-svuint8_t svqshlu_n_s8_m(svbool_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m)))
-svuint32_t svqshlu_n_s32_m(svbool_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m)))
-svuint64_t svqshlu_n_s64_m(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m)))
-svuint16_t svqshlu_n_s16_m(svbool_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x)))
-svuint8_t svqshlu_n_s8_x(svbool_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x)))
-svuint32_t svqshlu_n_s32_x(svbool_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x)))
-svuint64_t svqshlu_n_s64_x(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x)))
-svuint16_t svqshlu_n_s16_x(svbool_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z)))
-svuint8_t svqshlu_n_s8_z(svbool_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z)))
-svuint32_t svqshlu_n_s32_z(svbool_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z)))
-svuint64_t svqshlu_n_s64_z(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z)))
-svuint16_t svqshlu_n_s16_z(svbool_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32)))
-svint16_t svqshrnb_n_s32(svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64)))
-svint32_t svqshrnb_n_s64(svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16)))
-svint8_t svqshrnb_n_s16(svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32)))
-svuint16_t svqshrnb_n_u32(svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64)))
-svuint32_t svqshrnb_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16)))
-svuint8_t svqshrnb_n_u16(svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32)))
-svint16_t svqshrnt_n_s32(svint16_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64)))
-svint32_t svqshrnt_n_s64(svint32_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16)))
-svint8_t svqshrnt_n_s16(svint8_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32)))
-svuint16_t svqshrnt_n_u32(svuint16_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64)))
-svuint32_t svqshrnt_n_u64(svuint32_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16)))
-svuint8_t svqshrnt_n_u16(svuint8_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32)))
-svuint16_t svqshrunb_n_s32(svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64)))
-svuint32_t svqshrunb_n_s64(svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16)))
-svuint8_t svqshrunb_n_s16(svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32)))
-svuint16_t svqshrunt_n_s32(svuint16_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64)))
-svuint32_t svqshrunt_n_s64(svuint32_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16)))
-svuint8_t svqshrunt_n_s16(svuint8_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m)))
-svint8_t svqsub_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m)))
-svint32_t svqsub_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m)))
-svint64_t svqsub_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m)))
-svint16_t svqsub_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x)))
-svint8_t svqsub_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x)))
-svint32_t svqsub_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x)))
-svint64_t svqsub_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x)))
-svint16_t svqsub_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z)))
-svint8_t svqsub_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z)))
-svint32_t svqsub_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z)))
-svint64_t svqsub_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z)))
-svint16_t svqsub_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m)))
-svuint8_t svqsub_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m)))
-svuint32_t svqsub_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m)))
-svuint64_t svqsub_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m)))
-svuint16_t svqsub_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x)))
-svuint8_t svqsub_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x)))
-svuint32_t svqsub_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x)))
-svuint64_t svqsub_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x)))
-svuint16_t svqsub_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z)))
-svuint8_t svqsub_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z)))
-svuint32_t svqsub_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z)))
-svuint64_t svqsub_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z)))
-svuint16_t svqsub_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m)))
-svint8_t svqsub_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m)))
-svint32_t svqsub_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m)))
-svint64_t svqsub_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m)))
-svint16_t svqsub_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x)))
-svint8_t svqsub_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x)))
-svint32_t svqsub_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x)))
-svint64_t svqsub_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x)))
-svint16_t svqsub_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z)))
-svint8_t svqsub_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z)))
-svint32_t svqsub_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z)))
-svint64_t svqsub_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z)))
-svint16_t svqsub_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m)))
-svuint8_t svqsub_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m)))
-svuint32_t svqsub_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m)))
-svuint64_t svqsub_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m)))
-svuint16_t svqsub_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x)))
-svuint8_t svqsub_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x)))
-svuint32_t svqsub_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x)))
-svuint64_t svqsub_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x)))
-svuint16_t svqsub_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z)))
-svuint8_t svqsub_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z)))
-svuint32_t svqsub_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z)))
-svuint64_t svqsub_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z)))
-svuint16_t svqsub_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m)))
-svint8_t svqsubr_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m)))
-svint32_t svqsubr_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m)))
-svint64_t svqsubr_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m)))
-svint16_t svqsubr_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x)))
-svint8_t svqsubr_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x)))
-svint32_t svqsubr_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x)))
-svint64_t svqsubr_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x)))
-svint16_t svqsubr_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z)))
-svint8_t svqsubr_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z)))
-svint32_t svqsubr_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z)))
-svint64_t svqsubr_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z)))
-svint16_t svqsubr_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m)))
-svuint8_t svqsubr_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m)))
-svuint32_t svqsubr_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m)))
-svuint64_t svqsubr_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m)))
-svuint16_t svqsubr_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x)))
-svuint8_t svqsubr_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x)))
-svuint32_t svqsubr_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x)))
-svuint64_t svqsubr_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x)))
-svuint16_t svqsubr_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z)))
-svuint8_t svqsubr_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z)))
-svuint32_t svqsubr_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z)))
-svuint64_t svqsubr_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z)))
-svuint16_t svqsubr_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m)))
-svint8_t svqsubr_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m)))
-svint32_t svqsubr_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m)))
-svint64_t svqsubr_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m)))
-svint16_t svqsubr_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x)))
-svint8_t svqsubr_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x)))
-svint32_t svqsubr_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x)))
-svint64_t svqsubr_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x)))
-svint16_t svqsubr_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z)))
-svint8_t svqsubr_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z)))
-svint32_t svqsubr_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z)))
-svint64_t svqsubr_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z)))
-svint16_t svqsubr_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m)))
-svuint8_t svqsubr_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m)))
-svuint32_t svqsubr_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m)))
-svuint64_t svqsubr_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m)))
-svuint16_t svqsubr_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x)))
-svuint8_t svqsubr_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x)))
-svuint32_t svqsubr_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x)))
-svuint64_t svqsubr_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x)))
-svuint16_t svqsubr_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z)))
-svuint8_t svqsubr_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z)))
-svuint32_t svqsubr_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z)))
-svuint64_t svqsubr_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z)))
-svuint16_t svqsubr_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32)))
-svint16_t svqxtnb_s32(svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64)))
-svint32_t svqxtnb_s64(svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16)))
-svint8_t svqxtnb_s16(svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32)))
-svuint16_t svqxtnb_u32(svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64)))
-svuint32_t svqxtnb_u64(svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16)))
-svuint8_t svqxtnb_u16(svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32)))
-svint16_t svqxtnt_s32(svint16_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64)))
-svint32_t svqxtnt_s64(svint32_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16)))
-svint8_t svqxtnt_s16(svint8_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32)))
-svuint16_t svqxtnt_u32(svuint16_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64)))
-svuint32_t svqxtnt_u64(svuint32_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16)))
-svuint8_t svqxtnt_u16(svuint8_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32)))
-svuint16_t svqxtunb_s32(svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64)))
-svuint32_t svqxtunb_s64(svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16)))
-svuint8_t svqxtunb_s16(svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32)))
-svuint16_t svqxtunt_s32(svuint16_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64)))
-svuint32_t svqxtunt_s64(svuint32_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16)))
-svuint8_t svqxtunt_s16(svuint8_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32)))
-svuint16_t svraddhnb_n_u32(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64)))
-svuint32_t svraddhnb_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16)))
-svuint8_t svraddhnb_n_u16(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32)))
-svint16_t svraddhnb_n_s32(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64)))
-svint32_t svraddhnb_n_s64(svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16)))
-svint8_t svraddhnb_n_s16(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32)))
-svuint16_t svraddhnb_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64)))
-svuint32_t svraddhnb_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16)))
-svuint8_t svraddhnb_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32)))
-svint16_t svraddhnb_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64)))
-svint32_t svraddhnb_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16)))
-svint8_t svraddhnb_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32)))
-svuint16_t svraddhnt_n_u32(svuint16_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64)))
-svuint32_t svraddhnt_n_u64(svuint32_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16)))
-svuint8_t svraddhnt_n_u16(svuint8_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32)))
-svint16_t svraddhnt_n_s32(svint16_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64)))
-svint32_t svraddhnt_n_s64(svint32_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16)))
-svint8_t svraddhnt_n_s16(svint8_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32)))
-svuint16_t svraddhnt_u32(svuint16_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64)))
-svuint32_t svraddhnt_u64(svuint32_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16)))
-svuint8_t svraddhnt_u16(svuint8_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32)))
-svint16_t svraddhnt_s32(svint16_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64)))
-svint32_t svraddhnt_s64(svint32_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16)))
-svint8_t svraddhnt_s16(svint8_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m)))
-svuint32_t svrecpe_u32_m(svuint32_t, svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x)))
-svuint32_t svrecpe_u32_x(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z)))
-svuint32_t svrecpe_u32_z(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m)))
-svint8_t svrhadd_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m)))
-svint32_t svrhadd_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m)))
-svint64_t svrhadd_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m)))
-svint16_t svrhadd_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x)))
-svint8_t svrhadd_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x)))
-svint32_t svrhadd_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x)))
-svint64_t svrhadd_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x)))
-svint16_t svrhadd_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z)))
-svint8_t svrhadd_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z)))
-svint32_t svrhadd_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z)))
-svint64_t svrhadd_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z)))
-svint16_t svrhadd_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m)))
-svuint8_t svrhadd_n_u8_m(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m)))
-svuint32_t svrhadd_n_u32_m(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m)))
-svuint64_t svrhadd_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m)))
-svuint16_t svrhadd_n_u16_m(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x)))
-svuint8_t svrhadd_n_u8_x(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x)))
-svuint32_t svrhadd_n_u32_x(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x)))
-svuint64_t svrhadd_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x)))
-svuint16_t svrhadd_n_u16_x(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z)))
-svuint8_t svrhadd_n_u8_z(svbool_t, svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z)))
-svuint32_t svrhadd_n_u32_z(svbool_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z)))
-svuint64_t svrhadd_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z)))
-svuint16_t svrhadd_n_u16_z(svbool_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m)))
-svint8_t svrhadd_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m)))
-svint32_t svrhadd_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m)))
-svint64_t svrhadd_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m)))
-svint16_t svrhadd_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x)))
-svint8_t svrhadd_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x)))
-svint32_t svrhadd_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x)))
-svint64_t svrhadd_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x)))
-svint16_t svrhadd_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z)))
-svint8_t svrhadd_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z)))
-svint32_t svrhadd_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z)))
-svint64_t svrhadd_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z)))
-svint16_t svrhadd_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m)))
-svuint8_t svrhadd_u8_m(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m)))
-svuint32_t svrhadd_u32_m(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m)))
-svuint64_t svrhadd_u64_m(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m)))
-svuint16_t svrhadd_u16_m(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x)))
-svuint8_t svrhadd_u8_x(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x)))
-svuint32_t svrhadd_u32_x(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x)))
-svuint64_t svrhadd_u64_x(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x)))
-svuint16_t svrhadd_u16_x(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z)))
-svuint8_t svrhadd_u8_z(svbool_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z)))
-svuint32_t svrhadd_u32_z(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z)))
-svuint64_t svrhadd_u64_z(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z)))
-svuint16_t svrhadd_u16_z(svbool_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m)))
-svint8_t svrshl_n_s8_m(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m)))
-svint32_t svrshl_n_s32_m(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m)))
-svint64_t svrshl_n_s64_m(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m)))
-svint16_t svrshl_n_s16_m(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x)))
-svint8_t svrshl_n_s8_x(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x)))
-svint32_t svrshl_n_s32_x(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x)))
-svint64_t svrshl_n_s64_x(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x)))
-svint16_t svrshl_n_s16_x(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z)))
-svint8_t svrshl_n_s8_z(svbool_t, svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z)))
-svint32_t svrshl_n_s32_z(svbool_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z)))
-svint64_t svrshl_n_s64_z(svbool_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z)))
-svint16_t svrshl_n_s16_z(svbool_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m)))
-svuint8_t svrshl_n_u8_m(svbool_t, svuint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m)))
-svuint32_t svrshl_n_u32_m(svbool_t, svuint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m)))
-svuint64_t svrshl_n_u64_m(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m)))
-svuint16_t svrshl_n_u16_m(svbool_t, svuint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x)))
-svuint8_t svrshl_n_u8_x(svbool_t, svuint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x)))
-svuint32_t svrshl_n_u32_x(svbool_t, svuint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x)))
-svuint64_t svrshl_n_u64_x(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x)))
-svuint16_t svrshl_n_u16_x(svbool_t, svuint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z)))
-svuint8_t svrshl_n_u8_z(svbool_t, svuint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z)))
-svuint32_t svrshl_n_u32_z(svbool_t, svuint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z)))
-svuint64_t svrshl_n_u64_z(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z)))
-svuint16_t svrshl_n_u16_z(svbool_t, svuint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m)))
-svint8_t svrshl_s8_m(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m)))
-svint32_t svrshl_s32_m(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m)))
-svint64_t svrshl_s64_m(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m)))
-svint16_t svrshl_s16_m(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x)))
-svint8_t svrshl_s8_x(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x)))
-svint32_t svrshl_s32_x(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x)))
-svint64_t svrshl_s64_x(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x)))
-svint16_t svrshl_s16_x(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z)))
-svint8_t svrshl_s8_z(svbool_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z)))
-svint32_t svrshl_s32_z(svbool_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z)))
-svint64_t svrshl_s64_z(svbool_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z)))
-svint16_t svrshl_s16_z(svbool_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m)))
-svuint8_t svrshl_u8_m(svbool_t, svuint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m)))
-svuint32_t svrshl_u32_m(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m)))
-svuint64_t svrshl_u64_m(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m)))
-svuint16_t svrshl_u16_m(svbool_t, svuint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x)))
-svuint8_t svrshl_u8_x(svbool_t, svuint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x)))
-svuint32_t svrshl_u32_x(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x)))
-svuint64_t svrshl_u64_x(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x)))
-svuint16_t svrshl_u16_x(svbool_t, svuint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z)))
-svuint8_t svrshl_u8_z(svbool_t, svuint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z)))
-svuint32_t svrshl_u32_z(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z)))
-svuint64_t svrshl_u64_z(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z)))
-svuint16_t svrshl_u16_z(svbool_t, svuint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m)))
-svint8_t svrshr_n_s8_m(svbool_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m)))
-svint32_t svrshr_n_s32_m(svbool_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m)))
-svint64_t svrshr_n_s64_m(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m)))
-svint16_t svrshr_n_s16_m(svbool_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m)))
-svuint8_t svrshr_n_u8_m(svbool_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m)))
-svuint32_t svrshr_n_u32_m(svbool_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m)))
-svuint64_t svrshr_n_u64_m(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m)))
-svuint16_t svrshr_n_u16_m(svbool_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x)))
-svint8_t svrshr_n_s8_x(svbool_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x)))
-svint32_t svrshr_n_s32_x(svbool_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x)))
-svint64_t svrshr_n_s64_x(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x)))
-svint16_t svrshr_n_s16_x(svbool_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x)))
-svuint8_t svrshr_n_u8_x(svbool_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x)))
-svuint32_t svrshr_n_u32_x(svbool_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x)))
-svuint64_t svrshr_n_u64_x(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x)))
-svuint16_t svrshr_n_u16_x(svbool_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z)))
-svint8_t svrshr_n_s8_z(svbool_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z)))
-svint32_t svrshr_n_s32_z(svbool_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z)))
-svint64_t svrshr_n_s64_z(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z)))
-svint16_t svrshr_n_s16_z(svbool_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z)))
-svuint8_t svrshr_n_u8_z(svbool_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z)))
-svuint32_t svrshr_n_u32_z(svbool_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z)))
-svuint64_t svrshr_n_u64_z(svbool_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z)))
-svuint16_t svrshr_n_u16_z(svbool_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32)))
-svuint16_t svrshrnb_n_u32(svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64)))
-svuint32_t svrshrnb_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16)))
-svuint8_t svrshrnb_n_u16(svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32)))
-svint16_t svrshrnb_n_s32(svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64)))
-svint32_t svrshrnb_n_s64(svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16)))
-svint8_t svrshrnb_n_s16(svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32)))
-svuint16_t svrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64)))
-svuint32_t svrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16)))
-svuint8_t svrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32)))
-svint16_t svrshrnt_n_s32(svint16_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64)))
-svint32_t svrshrnt_n_s64(svint32_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16)))
-svint8_t svrshrnt_n_s16(svint8_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m)))
-svuint32_t svrsqrte_u32_m(svuint32_t, svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x)))
-svuint32_t svrsqrte_u32_x(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z)))
-svuint32_t svrsqrte_u32_z(svbool_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8)))
-svint8_t svrsra_n_s8(svint8_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32)))
-svint32_t svrsra_n_s32(svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64)))
-svint64_t svrsra_n_s64(svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16)))
-svint16_t svrsra_n_s16(svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8)))
-svuint8_t svrsra_n_u8(svuint8_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32)))
-svuint32_t svrsra_n_u32(svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64)))
-svuint64_t svrsra_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16)))
-svuint16_t svrsra_n_u16(svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32)))
-svuint16_t svrsubhnb_n_u32(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64)))
-svuint32_t svrsubhnb_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16)))
-svuint8_t svrsubhnb_n_u16(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32)))
-svint16_t svrsubhnb_n_s32(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64)))
-svint32_t svrsubhnb_n_s64(svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16)))
-svint8_t svrsubhnb_n_s16(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32)))
-svuint16_t svrsubhnb_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64)))
-svuint32_t svrsubhnb_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16)))
-svuint8_t svrsubhnb_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32)))
-svint16_t svrsubhnb_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64)))
-svint32_t svrsubhnb_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16)))
-svint8_t svrsubhnb_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32)))
-svuint16_t svrsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64)))
-svuint32_t svrsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16)))
-svuint8_t svrsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32)))
-svint16_t svrsubhnt_n_s32(svint16_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64)))
-svint32_t svrsubhnt_n_s64(svint32_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16)))
-svint8_t svrsubhnt_n_s16(svint8_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32)))
-svuint16_t svrsubhnt_u32(svuint16_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64)))
-svuint32_t svrsubhnt_u64(svuint32_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16)))
-svuint8_t svrsubhnt_u16(svuint8_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32)))
-svint16_t svrsubhnt_s32(svint16_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64)))
-svint32_t svrsubhnt_s64(svint32_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16)))
-svint8_t svrsubhnt_s16(svint8_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32)))
-svuint32_t svsbclb_n_u32(svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64)))
-svuint64_t svsbclb_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32)))
-svuint32_t svsbclb_u32(svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64)))
-svuint64_t svsbclb_u64(svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32)))
-svuint32_t svsbclt_n_u32(svuint32_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64)))
-svuint64_t svsbclt_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32)))
-svuint32_t svsbclt_u32(svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64)))
-svuint64_t svsbclt_u64(svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32)))
-svint32_t svshllb_n_s32(svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64)))
-svint64_t svshllb_n_s64(svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16)))
-svint16_t svshllb_n_s16(svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32)))
-svuint32_t svshllb_n_u32(svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64)))
-svuint64_t svshllb_n_u64(svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16)))
-svuint16_t svshllb_n_u16(svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32)))
-svint32_t svshllt_n_s32(svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64)))
-svint64_t svshllt_n_s64(svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16)))
-svint16_t svshllt_n_s16(svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32)))
-svuint32_t svshllt_n_u32(svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64)))
-svuint64_t svshllt_n_u64(svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16)))
-svuint16_t svshllt_n_u16(svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32)))
-svuint16_t svshrnb_n_u32(svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64)))
-svuint32_t svshrnb_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16)))
-svuint8_t svshrnb_n_u16(svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32)))
-svint16_t svshrnb_n_s32(svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64)))
-svint32_t svshrnb_n_s64(svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16)))
-svint8_t svshrnb_n_s16(svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32)))
-svuint16_t svshrnt_n_u32(svuint16_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64)))
-svuint32_t svshrnt_n_u64(svuint32_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16)))
-svuint8_t svshrnt_n_u16(svuint8_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32)))
-svint16_t svshrnt_n_s32(svint16_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64)))
-svint32_t svshrnt_n_s64(svint32_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16)))
-svint8_t svshrnt_n_s16(svint8_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8)))
-svuint8_t svsli_n_u8(svuint8_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32)))
-svuint32_t svsli_n_u32(svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64)))
-svuint64_t svsli_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16)))
-svuint16_t svsli_n_u16(svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8)))
-svint8_t svsli_n_s8(svint8_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32)))
-svint32_t svsli_n_s32(svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64)))
-svint64_t svsli_n_s64(svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16)))
-svint16_t svsli_n_s16(svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m)))
-svuint8_t svsqadd_n_u8_m(svbool_t, svuint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m)))
-svuint32_t svsqadd_n_u32_m(svbool_t, svuint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m)))
-svuint64_t svsqadd_n_u64_m(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m)))
-svuint16_t svsqadd_n_u16_m(svbool_t, svuint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x)))
-svuint8_t svsqadd_n_u8_x(svbool_t, svuint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x)))
-svuint32_t svsqadd_n_u32_x(svbool_t, svuint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x)))
-svuint64_t svsqadd_n_u64_x(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x)))
-svuint16_t svsqadd_n_u16_x(svbool_t, svuint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z)))
-svuint8_t svsqadd_n_u8_z(svbool_t, svuint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z)))
-svuint32_t svsqadd_n_u32_z(svbool_t, svuint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z)))
-svuint64_t svsqadd_n_u64_z(svbool_t, svuint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z)))
-svuint16_t svsqadd_n_u16_z(svbool_t, svuint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m)))
-svuint8_t svsqadd_u8_m(svbool_t, svuint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m)))
-svuint32_t svsqadd_u32_m(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m)))
-svuint64_t svsqadd_u64_m(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m)))
-svuint16_t svsqadd_u16_m(svbool_t, svuint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x)))
-svuint8_t svsqadd_u8_x(svbool_t, svuint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x)))
-svuint32_t svsqadd_u32_x(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x)))
-svuint64_t svsqadd_u64_x(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x)))
-svuint16_t svsqadd_u16_x(svbool_t, svuint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z)))
-svuint8_t svsqadd_u8_z(svbool_t, svuint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z)))
-svuint32_t svsqadd_u32_z(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z)))
-svuint64_t svsqadd_u64_z(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z)))
-svuint16_t svsqadd_u16_z(svbool_t, svuint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8)))
-svint8_t svsra_n_s8(svint8_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32)))
-svint32_t svsra_n_s32(svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64)))
-svint64_t svsra_n_s64(svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16)))
-svint16_t svsra_n_s16(svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8)))
-svuint8_t svsra_n_u8(svuint8_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32)))
-svuint32_t svsra_n_u32(svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64)))
-svuint64_t svsra_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16)))
-svuint16_t svsra_n_u16(svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8)))
-svuint8_t svsri_n_u8(svuint8_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32)))
-svuint32_t svsri_n_u32(svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64)))
-svuint64_t svsri_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16)))
-svuint16_t svsri_n_u16(svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8)))
-svint8_t svsri_n_s8(svint8_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32)))
-svint32_t svsri_n_s32(svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64)))
-svint64_t svsri_n_s64(svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16)))
-svint16_t svsri_n_s16(svint16_t, svint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32)))
-void svstnt1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64)))
-void svstnt1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64)))
-void svstnt1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32)))
-void svstnt1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32)))
-void svstnt1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64)))
-void svstnt1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32)))
-void svstnt1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64)))
-void svstnt1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64)))
-void svstnt1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32)))
-void svstnt1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32)))
-void svstnt1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64)))
-void svstnt1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32)))
-void svstnt1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64)))
-void svstnt1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64)))
-void svstnt1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32)))
-void svstnt1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32)))
-void svstnt1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64)))
-void svstnt1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64)))
-void svstnt1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64)))
-void svstnt1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64)))
-void svstnt1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64)))
-void svstnt1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64)))
-void svstnt1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64)))
-void svstnt1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32)))
-void svstnt1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32)))
-void svstnt1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32)))
-void svstnt1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64)))
-void svstnt1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64)))
-void svstnt1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64)))
-void svstnt1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64)))
-void svstnt1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64)))
-void svstnt1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64)))
-void svstnt1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32)))
-void svstnt1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64)))
-void svstnt1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32)))
-void svstnt1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64)))
-void svstnt1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32)))
-void svstnt1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64)))
-void svstnt1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32)))
-void svstnt1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64)))
-void svstnt1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32)))
-void svstnt1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32)))
-void svstnt1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64)))
-void svstnt1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64)))
-void svstnt1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64)))
-void svstnt1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64)))
-void svstnt1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32)))
-void svstnt1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64)))
-void svstnt1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32)))
-void svstnt1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64)))
-void svstnt1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32)))
-void svstnt1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64)))
-void svstnt1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32)))
-void svstnt1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64)))
-void svstnt1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32)))
-void svstnt1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64)))
-void svstnt1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32)))
-void svstnt1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64)))
-void svstnt1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64)))
-void svstnt1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64)))
-void svstnt1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64)))
-void svstnt1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64)))
-void svstnt1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32)))
-void svstnt1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32)))
-void svstnt1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64)))
-void svstnt1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64)))
-void svstnt1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64)))
-void svstnt1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64)))
-void svstnt1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64)))
-void svstnt1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64)))
-void svstnt1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64)))
-void svstnt1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64)))
-void svstnt1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64)))
-void svstnt1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64)))
-void svstnt1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64)))
-void svstnt1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64)))
-void svstnt1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64)))
-void svstnt1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64)))
-void svstnt1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64)))
-void svstnt1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64)))
-void svstnt1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64)))
-void svstnt1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64)))
-void svstnt1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32)))
-svuint16_t svsubhnb_n_u32(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64)))
-svuint32_t svsubhnb_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16)))
-svuint8_t svsubhnb_n_u16(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32)))
-svint16_t svsubhnb_n_s32(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64)))
-svint32_t svsubhnb_n_s64(svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16)))
-svint8_t svsubhnb_n_s16(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32)))
-svuint16_t svsubhnb_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64)))
-svuint32_t svsubhnb_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16)))
-svuint8_t svsubhnb_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32)))
-svint16_t svsubhnb_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64)))
-svint32_t svsubhnb_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16)))
-svint8_t svsubhnb_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32)))
-svuint16_t svsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64)))
-svuint32_t svsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16)))
-svuint8_t svsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32)))
-svint16_t svsubhnt_n_s32(svint16_t, svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64)))
-svint32_t svsubhnt_n_s64(svint32_t, svint64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16)))
-svint8_t svsubhnt_n_s16(svint8_t, svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32)))
-svuint16_t svsubhnt_u32(svuint16_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64)))
-svuint32_t svsubhnt_u64(svuint32_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16)))
-svuint8_t svsubhnt_u16(svuint8_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32)))
-svint16_t svsubhnt_s32(svint16_t, svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64)))
-svint32_t svsubhnt_s64(svint32_t, svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16)))
-svint8_t svsubhnt_s16(svint8_t, svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32)))
-svint32_t svsublb_n_s32(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64)))
-svint64_t svsublb_n_s64(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16)))
-svint16_t svsublb_n_s16(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32)))
-svuint32_t svsublb_n_u32(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64)))
-svuint64_t svsublb_n_u64(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16)))
-svuint16_t svsublb_n_u16(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32)))
-svint32_t svsublb_s32(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64)))
-svint64_t svsublb_s64(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16)))
-svint16_t svsublb_s16(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32)))
-svuint32_t svsublb_u32(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64)))
-svuint64_t svsublb_u64(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16)))
-svuint16_t svsublb_u16(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32)))
-svint32_t svsublbt_n_s32(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64)))
-svint64_t svsublbt_n_s64(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16)))
-svint16_t svsublbt_n_s16(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32)))
-svint32_t svsublbt_s32(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64)))
-svint64_t svsublbt_s64(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16)))
-svint16_t svsublbt_s16(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32)))
-svint32_t svsublt_n_s32(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64)))
-svint64_t svsublt_n_s64(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16)))
-svint16_t svsublt_n_s16(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32)))
-svuint32_t svsublt_n_u32(svuint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64)))
-svuint64_t svsublt_n_u64(svuint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16)))
-svuint16_t svsublt_n_u16(svuint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32)))
-svint32_t svsublt_s32(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64)))
-svint64_t svsublt_s64(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16)))
-svint16_t svsublt_s16(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32)))
-svuint32_t svsublt_u32(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64)))
-svuint64_t svsublt_u64(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16)))
-svuint16_t svsublt_u16(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32)))
-svint32_t svsubltb_n_s32(svint16_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64)))
-svint64_t svsubltb_n_s64(svint32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16)))
-svint16_t svsubltb_n_s16(svint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32)))
-svint32_t svsubltb_s32(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64)))
-svint64_t svsubltb_s64(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16)))
-svint16_t svsubltb_s16(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32)))
-svint32_t svsubwb_n_s32(svint32_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64)))
-svint64_t svsubwb_n_s64(svint64_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16)))
-svint16_t svsubwb_n_s16(svint16_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32)))
-svuint32_t svsubwb_n_u32(svuint32_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64)))
-svuint64_t svsubwb_n_u64(svuint64_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16)))
-svuint16_t svsubwb_n_u16(svuint16_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32)))
-svint32_t svsubwb_s32(svint32_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64)))
-svint64_t svsubwb_s64(svint64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16)))
-svint16_t svsubwb_s16(svint16_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32)))
-svuint32_t svsubwb_u32(svuint32_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64)))
-svuint64_t svsubwb_u64(svuint64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16)))
-svuint16_t svsubwb_u16(svuint16_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32)))
-svint32_t svsubwt_n_s32(svint32_t, int16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64)))
-svint64_t svsubwt_n_s64(svint64_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16)))
-svint16_t svsubwt_n_s16(svint16_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32)))
-svuint32_t svsubwt_n_u32(svuint32_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64)))
-svuint64_t svsubwt_n_u64(svuint64_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16)))
-svuint16_t svsubwt_n_u16(svuint16_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32)))
-svint32_t svsubwt_s32(svint32_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64)))
-svint64_t svsubwt_s64(svint64_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16)))
-svint16_t svsubwt_s16(svint16_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32)))
-svuint32_t svsubwt_u32(svuint32_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64)))
-svuint64_t svsubwt_u64(svuint64_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16)))
-svuint16_t svsubwt_u16(svuint16_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8)))
-svuint8_t svtbl2_u8(svuint8x2_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32)))
-svuint32_t svtbl2_u32(svuint32x2_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64)))
-svuint64_t svtbl2_u64(svuint64x2_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16)))
-svuint16_t svtbl2_u16(svuint16x2_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8)))
-svint8_t svtbl2_s8(svint8x2_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64)))
-svfloat64_t svtbl2_f64(svfloat64x2_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32)))
-svfloat32_t svtbl2_f32(svfloat32x2_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16)))
-svfloat16_t svtbl2_f16(svfloat16x2_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32)))
-svint32_t svtbl2_s32(svint32x2_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64)))
-svint64_t svtbl2_s64(svint64x2_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16)))
-svint16_t svtbl2_s16(svint16x2_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8)))
-svuint8_t svtbx_u8(svuint8_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32)))
-svuint32_t svtbx_u32(svuint32_t, svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64)))
-svuint64_t svtbx_u64(svuint64_t, svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16)))
-svuint16_t svtbx_u16(svuint16_t, svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8)))
-svint8_t svtbx_s8(svint8_t, svint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64)))
-svfloat64_t svtbx_f64(svfloat64_t, svfloat64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32)))
-svfloat32_t svtbx_f32(svfloat32_t, svfloat32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16)))
-svfloat16_t svtbx_f16(svfloat16_t, svfloat16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32)))
-svint32_t svtbx_s32(svint32_t, svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64)))
-svint64_t svtbx_s64(svint64_t, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16)))
-svint16_t svtbx_s16(svint16_t, svint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m)))
-svint8_t svuqadd_n_s8_m(svbool_t, svint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m)))
-svint32_t svuqadd_n_s32_m(svbool_t, svint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m)))
-svint64_t svuqadd_n_s64_m(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m)))
-svint16_t svuqadd_n_s16_m(svbool_t, svint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x)))
-svint8_t svuqadd_n_s8_x(svbool_t, svint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x)))
-svint32_t svuqadd_n_s32_x(svbool_t, svint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x)))
-svint64_t svuqadd_n_s64_x(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x)))
-svint16_t svuqadd_n_s16_x(svbool_t, svint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z)))
-svint8_t svuqadd_n_s8_z(svbool_t, svint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z)))
-svint32_t svuqadd_n_s32_z(svbool_t, svint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z)))
-svint64_t svuqadd_n_s64_z(svbool_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z)))
-svint16_t svuqadd_n_s16_z(svbool_t, svint16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m)))
-svint8_t svuqadd_s8_m(svbool_t, svint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m)))
-svint32_t svuqadd_s32_m(svbool_t, svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m)))
-svint64_t svuqadd_s64_m(svbool_t, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m)))
-svint16_t svuqadd_s16_m(svbool_t, svint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x)))
-svint8_t svuqadd_s8_x(svbool_t, svint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x)))
-svint32_t svuqadd_s32_x(svbool_t, svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x)))
-svint64_t svuqadd_s64_x(svbool_t, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x)))
-svint16_t svuqadd_s16_x(svbool_t, svint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z)))
-svint8_t svuqadd_s8_z(svbool_t, svint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z)))
-svint32_t svuqadd_s32_z(svbool_t, svint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z)))
-svint64_t svuqadd_s64_z(svbool_t, svint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z)))
-svint16_t svuqadd_s16_z(svbool_t, svint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32)))
-svbool_t svwhilege_b8_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32)))
-svbool_t svwhilege_b32_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32)))
-svbool_t svwhilege_b64_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32)))
-svbool_t svwhilege_b16_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64)))
-svbool_t svwhilege_b8_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64)))
-svbool_t svwhilege_b32_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64)))
-svbool_t svwhilege_b64_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64)))
-svbool_t svwhilege_b16_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32)))
-svbool_t svwhilege_b8_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32)))
-svbool_t svwhilege_b32_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32)))
-svbool_t svwhilege_b64_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32)))
-svbool_t svwhilege_b16_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64)))
-svbool_t svwhilege_b8_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64)))
-svbool_t svwhilege_b32_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64)))
-svbool_t svwhilege_b64_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64)))
-svbool_t svwhilege_b16_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32)))
-svbool_t svwhilegt_b8_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32)))
-svbool_t svwhilegt_b32_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32)))
-svbool_t svwhilegt_b64_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32)))
-svbool_t svwhilegt_b16_s32(int32_t, int32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64)))
-svbool_t svwhilegt_b8_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64)))
-svbool_t svwhilegt_b32_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64)))
-svbool_t svwhilegt_b64_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64)))
-svbool_t svwhilegt_b16_s64(int64_t, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32)))
-svbool_t svwhilegt_b8_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32)))
-svbool_t svwhilegt_b32_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32)))
-svbool_t svwhilegt_b64_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32)))
-svbool_t svwhilegt_b16_u32(uint32_t, uint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64)))
-svbool_t svwhilegt_b8_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64)))
-svbool_t svwhilegt_b32_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64)))
-svbool_t svwhilegt_b64_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64)))
-svbool_t svwhilegt_b16_u64(uint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8)))
-svbool_t svwhilerw_u8(uint8_t const *, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8)))
-svbool_t svwhilerw_s8(int8_t const *, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64)))
-svbool_t svwhilerw_u64(uint64_t const *, uint64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64)))
-svbool_t svwhilerw_f64(float64_t const *, float64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64)))
-svbool_t svwhilerw_s64(int64_t const *, int64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16)))
-svbool_t svwhilerw_u16(uint16_t const *, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16)))
-svbool_t svwhilerw_f16(float16_t const *, float16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16)))
-svbool_t svwhilerw_s16(int16_t const *, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32)))
-svbool_t svwhilerw_u32(uint32_t const *, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32)))
-svbool_t svwhilerw_f32(float32_t const *, float32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32)))
-svbool_t svwhilerw_s32(int32_t const *, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8)))
-svbool_t svwhilewr_u8(uint8_t const *, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8)))
-svbool_t svwhilewr_s8(int8_t const *, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64)))
-svbool_t svwhilewr_u64(uint64_t const *, uint64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64)))
-svbool_t svwhilewr_f64(float64_t const *, float64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64)))
-svbool_t svwhilewr_s64(int64_t const *, int64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16)))
-svbool_t svwhilewr_u16(uint16_t const *, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16)))
-svbool_t svwhilewr_f16(float16_t const *, float16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16)))
-svbool_t svwhilewr_s16(int16_t const *, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32)))
-svbool_t svwhilewr_u32(uint32_t const *, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32)))
-svbool_t svwhilewr_f32(float32_t const *, float32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32)))
-svbool_t svwhilewr_s32(int32_t const *, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8)))
-svuint8_t svxar_n_u8(svuint8_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32)))
-svuint32_t svxar_n_u32(svuint32_t, svuint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64)))
-svuint64_t svxar_n_u64(svuint64_t, svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16)))
-svuint16_t svxar_n_u16(svuint16_t, svuint16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8)))
-svint8_t svxar_n_s8(svint8_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32)))
-svint32_t svxar_n_s32(svint32_t, svint32_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64)))
-svint64_t svxar_n_s64(svint64_t, svint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16)))
-svint16_t svxar_n_s16(svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8)))
-svint8_t svaba(svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32)))
-svint32_t svaba(svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64)))
-svint64_t svaba(svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16)))
-svint16_t svaba(svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8)))
-svuint8_t svaba(svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32)))
-svuint32_t svaba(svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64)))
-svuint64_t svaba(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16)))
-svuint16_t svaba(svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8)))
-svint8_t svaba(svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32)))
-svint32_t svaba(svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64)))
-svint64_t svaba(svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16)))
-svint16_t svaba(svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8)))
-svuint8_t svaba(svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32)))
-svuint32_t svaba(svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64)))
-svuint64_t svaba(svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16)))
-svuint16_t svaba(svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32)))
-svint32_t svabalb(svint32_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64)))
-svint64_t svabalb(svint64_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16)))
-svint16_t svabalb(svint16_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32)))
-svuint32_t svabalb(svuint32_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64)))
-svuint64_t svabalb(svuint64_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16)))
-svuint16_t svabalb(svuint16_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32)))
-svint32_t svabalb(svint32_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64)))
-svint64_t svabalb(svint64_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16)))
-svint16_t svabalb(svint16_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32)))
-svuint32_t svabalb(svuint32_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64)))
-svuint64_t svabalb(svuint64_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16)))
-svuint16_t svabalb(svuint16_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32)))
-svint32_t svabalt(svint32_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64)))
-svint64_t svabalt(svint64_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16)))
-svint16_t svabalt(svint16_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32)))
-svuint32_t svabalt(svuint32_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64)))
-svuint64_t svabalt(svuint64_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16)))
-svuint16_t svabalt(svuint16_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32)))
-svint32_t svabalt(svint32_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64)))
-svint64_t svabalt(svint64_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16)))
-svint16_t svabalt(svint16_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32)))
-svuint32_t svabalt(svuint32_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64)))
-svuint64_t svabalt(svuint64_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16)))
-svuint16_t svabalt(svuint16_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32)))
-svint32_t svabdlb(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64)))
-svint64_t svabdlb(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16)))
-svint16_t svabdlb(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32)))
-svuint32_t svabdlb(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64)))
-svuint64_t svabdlb(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16)))
-svuint16_t svabdlb(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32)))
-svint32_t svabdlb(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64)))
-svint64_t svabdlb(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16)))
-svint16_t svabdlb(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32)))
-svuint32_t svabdlb(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64)))
-svuint64_t svabdlb(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16)))
-svuint16_t svabdlb(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32)))
-svint32_t svabdlt(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64)))
-svint64_t svabdlt(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16)))
-svint16_t svabdlt(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32)))
-svuint32_t svabdlt(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64)))
-svuint64_t svabdlt(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16)))
-svuint16_t svabdlt(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32)))
-svint32_t svabdlt(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64)))
-svint64_t svabdlt(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16)))
-svint16_t svabdlt(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32)))
-svuint32_t svabdlt(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64)))
-svuint64_t svabdlt(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16)))
-svuint16_t svabdlt(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m)))
-svint32_t svadalp_m(svbool_t, svint32_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m)))
-svint64_t svadalp_m(svbool_t, svint64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m)))
-svint16_t svadalp_m(svbool_t, svint16_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x)))
-svint32_t svadalp_x(svbool_t, svint32_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x)))
-svint64_t svadalp_x(svbool_t, svint64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x)))
-svint16_t svadalp_x(svbool_t, svint16_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z)))
-svint32_t svadalp_z(svbool_t, svint32_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z)))
-svint64_t svadalp_z(svbool_t, svint64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z)))
-svint16_t svadalp_z(svbool_t, svint16_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m)))
-svuint32_t svadalp_m(svbool_t, svuint32_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m)))
-svuint64_t svadalp_m(svbool_t, svuint64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m)))
-svuint16_t svadalp_m(svbool_t, svuint16_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x)))
-svuint32_t svadalp_x(svbool_t, svuint32_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x)))
-svuint64_t svadalp_x(svbool_t, svuint64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x)))
-svuint16_t svadalp_x(svbool_t, svuint16_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z)))
-svuint32_t svadalp_z(svbool_t, svuint32_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z)))
-svuint64_t svadalp_z(svbool_t, svuint64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z)))
-svuint16_t svadalp_z(svbool_t, svuint16_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32)))
-svuint32_t svadclb(svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64)))
-svuint64_t svadclb(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32)))
-svuint32_t svadclb(svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64)))
-svuint64_t svadclb(svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32)))
-svuint32_t svadclt(svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64)))
-svuint64_t svadclt(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32)))
-svuint32_t svadclt(svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64)))
-svuint64_t svadclt(svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32)))
-svuint16_t svaddhnb(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64)))
-svuint32_t svaddhnb(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16)))
-svuint8_t svaddhnb(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32)))
-svint16_t svaddhnb(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64)))
-svint32_t svaddhnb(svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16)))
-svint8_t svaddhnb(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32)))
-svuint16_t svaddhnb(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64)))
-svuint32_t svaddhnb(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16)))
-svuint8_t svaddhnb(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32)))
-svint16_t svaddhnb(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64)))
-svint32_t svaddhnb(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16)))
-svint8_t svaddhnb(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32)))
-svuint16_t svaddhnt(svuint16_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64)))
-svuint32_t svaddhnt(svuint32_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16)))
-svuint8_t svaddhnt(svuint8_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32)))
-svint16_t svaddhnt(svint16_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64)))
-svint32_t svaddhnt(svint32_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16)))
-svint8_t svaddhnt(svint8_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32)))
-svuint16_t svaddhnt(svuint16_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64)))
-svuint32_t svaddhnt(svuint32_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16)))
-svuint8_t svaddhnt(svuint8_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32)))
-svint16_t svaddhnt(svint16_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64)))
-svint32_t svaddhnt(svint32_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16)))
-svint8_t svaddhnt(svint8_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32)))
-svint32_t svaddlb(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64)))
-svint64_t svaddlb(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16)))
-svint16_t svaddlb(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32)))
-svuint32_t svaddlb(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64)))
-svuint64_t svaddlb(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16)))
-svuint16_t svaddlb(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32)))
-svint32_t svaddlb(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64)))
-svint64_t svaddlb(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16)))
-svint16_t svaddlb(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32)))
-svuint32_t svaddlb(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64)))
-svuint64_t svaddlb(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16)))
-svuint16_t svaddlb(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32)))
-svint32_t svaddlbt(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64)))
-svint64_t svaddlbt(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16)))
-svint16_t svaddlbt(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32)))
-svint32_t svaddlbt(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64)))
-svint64_t svaddlbt(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16)))
-svint16_t svaddlbt(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32)))
-svint32_t svaddlt(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64)))
-svint64_t svaddlt(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16)))
-svint16_t svaddlt(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32)))
-svuint32_t svaddlt(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64)))
-svuint64_t svaddlt(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16)))
-svuint16_t svaddlt(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32)))
-svint32_t svaddlt(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64)))
-svint64_t svaddlt(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16)))
-svint16_t svaddlt(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32)))
-svuint32_t svaddlt(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64)))
-svuint64_t svaddlt(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16)))
-svuint16_t svaddlt(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m)))
-svfloat64_t svaddp_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m)))
-svfloat32_t svaddp_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m)))
-svfloat16_t svaddp_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x)))
-svfloat64_t svaddp_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x)))
-svfloat32_t svaddp_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x)))
-svfloat16_t svaddp_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m)))
-svuint8_t svaddp_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m)))
-svuint32_t svaddp_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m)))
-svuint64_t svaddp_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m)))
-svuint16_t svaddp_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m)))
-svint8_t svaddp_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m)))
-svint32_t svaddp_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m)))
-svint64_t svaddp_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m)))
-svint16_t svaddp_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x)))
-svuint8_t svaddp_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x)))
-svuint32_t svaddp_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x)))
-svuint64_t svaddp_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x)))
-svuint16_t svaddp_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x)))
-svint8_t svaddp_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x)))
-svint32_t svaddp_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x)))
-svint64_t svaddp_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x)))
-svint16_t svaddp_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32)))
-svint32_t svaddwb(svint32_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64)))
-svint64_t svaddwb(svint64_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16)))
-svint16_t svaddwb(svint16_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32)))
-svuint32_t svaddwb(svuint32_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64)))
-svuint64_t svaddwb(svuint64_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16)))
-svuint16_t svaddwb(svuint16_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32)))
-svint32_t svaddwb(svint32_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64)))
-svint64_t svaddwb(svint64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16)))
-svint16_t svaddwb(svint16_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32)))
-svuint32_t svaddwb(svuint32_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64)))
-svuint64_t svaddwb(svuint64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16)))
-svuint16_t svaddwb(svuint16_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32)))
-svint32_t svaddwt(svint32_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64)))
-svint64_t svaddwt(svint64_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16)))
-svint16_t svaddwt(svint16_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32)))
-svuint32_t svaddwt(svuint32_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64)))
-svuint64_t svaddwt(svuint64_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16)))
-svuint16_t svaddwt(svuint16_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32)))
-svint32_t svaddwt(svint32_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64)))
-svint64_t svaddwt(svint64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16)))
-svint16_t svaddwt(svint16_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32)))
-svuint32_t svaddwt(svuint32_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64)))
-svuint64_t svaddwt(svuint64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16)))
-svuint16_t svaddwt(svuint16_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8)))
-svuint8_t svbcax(svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32)))
-svuint32_t svbcax(svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64)))
-svuint64_t svbcax(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16)))
-svuint16_t svbcax(svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8)))
-svint8_t svbcax(svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32)))
-svint32_t svbcax(svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64)))
-svint64_t svbcax(svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16)))
-svint16_t svbcax(svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8)))
-svuint8_t svbcax(svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32)))
-svuint32_t svbcax(svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64)))
-svuint64_t svbcax(svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16)))
-svuint16_t svbcax(svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8)))
-svint8_t svbcax(svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32)))
-svint32_t svbcax(svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64)))
-svint64_t svbcax(svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16)))
-svint16_t svbcax(svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8)))
-svuint8_t svbsl1n(svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32)))
-svuint32_t svbsl1n(svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64)))
-svuint64_t svbsl1n(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16)))
-svuint16_t svbsl1n(svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8)))
-svint8_t svbsl1n(svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32)))
-svint32_t svbsl1n(svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64)))
-svint64_t svbsl1n(svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16)))
-svint16_t svbsl1n(svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8)))
-svuint8_t svbsl1n(svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32)))
-svuint32_t svbsl1n(svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64)))
-svuint64_t svbsl1n(svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16)))
-svuint16_t svbsl1n(svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8)))
-svint8_t svbsl1n(svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32)))
-svint32_t svbsl1n(svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64)))
-svint64_t svbsl1n(svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16)))
-svint16_t svbsl1n(svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8)))
-svuint8_t svbsl2n(svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32)))
-svuint32_t svbsl2n(svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64)))
-svuint64_t svbsl2n(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16)))
-svuint16_t svbsl2n(svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8)))
-svint8_t svbsl2n(svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32)))
-svint32_t svbsl2n(svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64)))
-svint64_t svbsl2n(svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16)))
-svint16_t svbsl2n(svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8)))
-svuint8_t svbsl2n(svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32)))
-svuint32_t svbsl2n(svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64)))
-svuint64_t svbsl2n(svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16)))
-svuint16_t svbsl2n(svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8)))
-svint8_t svbsl2n(svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32)))
-svint32_t svbsl2n(svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64)))
-svint64_t svbsl2n(svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16)))
-svint16_t svbsl2n(svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8)))
-svuint8_t svbsl(svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32)))
-svuint32_t svbsl(svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64)))
-svuint64_t svbsl(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16)))
-svuint16_t svbsl(svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8)))
-svint8_t svbsl(svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32)))
-svint32_t svbsl(svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64)))
-svint64_t svbsl(svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16)))
-svint16_t svbsl(svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8)))
-svuint8_t svbsl(svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32)))
-svuint32_t svbsl(svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64)))
-svuint64_t svbsl(svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16)))
-svuint16_t svbsl(svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8)))
-svint8_t svbsl(svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32)))
-svint32_t svbsl(svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64)))
-svint64_t svbsl(svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16)))
-svint16_t svbsl(svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8)))
-svuint8_t svcadd(svuint8_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32)))
-svuint32_t svcadd(svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64)))
-svuint64_t svcadd(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16)))
-svuint16_t svcadd(svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8)))
-svint8_t svcadd(svint8_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32)))
-svint32_t svcadd(svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64)))
-svint64_t svcadd(svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16)))
-svint16_t svcadd(svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32)))
-svint32_t svcdot(svint32_t, svint8_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64)))
-svint64_t svcdot(svint64_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32)))
-svint32_t svcdot_lane(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64)))
-svint64_t svcdot_lane(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8)))
-svuint8_t svcmla(svuint8_t, svuint8_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32)))
-svuint32_t svcmla(svuint32_t, svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64)))
-svuint64_t svcmla(svuint64_t, svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16)))
-svuint16_t svcmla(svuint16_t, svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8)))
-svint8_t svcmla(svint8_t, svint8_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32)))
-svint32_t svcmla(svint32_t, svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64)))
-svint64_t svcmla(svint64_t, svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16)))
-svint16_t svcmla(svint16_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32)))
-svuint32_t svcmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16)))
-svuint16_t svcmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32)))
-svint32_t svcmla_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16)))
-svint16_t svcmla_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m)))
-svfloat32_t svcvtlt_f32_m(svfloat32_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x)))
-svfloat32_t svcvtlt_f32_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m)))
-svfloat64_t svcvtlt_f64_m(svfloat64_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x)))
-svfloat64_t svcvtlt_f64_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m)))
-svfloat16_t svcvtnt_f16_m(svfloat16_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m)))
-svfloat32_t svcvtnt_f32_m(svfloat32_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m)))
-svfloat32_t svcvtx_f32_m(svfloat32_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x)))
-svfloat32_t svcvtx_f32_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z)))
-svfloat32_t svcvtx_f32_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m)))
-svfloat32_t svcvtxnt_f32_m(svfloat32_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8)))
-svuint8_t sveor3(svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32)))
-svuint32_t sveor3(svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64)))
-svuint64_t sveor3(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16)))
-svuint16_t sveor3(svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8)))
-svint8_t sveor3(svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32)))
-svint32_t sveor3(svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64)))
-svint64_t sveor3(svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16)))
-svint16_t sveor3(svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8)))
-svuint8_t sveor3(svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32)))
-svuint32_t sveor3(svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64)))
-svuint64_t sveor3(svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16)))
-svuint16_t sveor3(svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8)))
-svint8_t sveor3(svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32)))
-svint32_t sveor3(svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64)))
-svint64_t sveor3(svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16)))
-svint16_t sveor3(svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8)))
-svuint8_t sveorbt(svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32)))
-svuint32_t sveorbt(svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64)))
-svuint64_t sveorbt(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16)))
-svuint16_t sveorbt(svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8)))
-svint8_t sveorbt(svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32)))
-svint32_t sveorbt(svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64)))
-svint64_t sveorbt(svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16)))
-svint16_t sveorbt(svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8)))
-svuint8_t sveorbt(svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32)))
-svuint32_t sveorbt(svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64)))
-svuint64_t sveorbt(svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16)))
-svuint16_t sveorbt(svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8)))
-svint8_t sveorbt(svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32)))
-svint32_t sveorbt(svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64)))
-svint64_t sveorbt(svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16)))
-svint16_t sveorbt(svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8)))
-svuint8_t sveortb(svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32)))
-svuint32_t sveortb(svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64)))
-svuint64_t sveortb(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16)))
-svuint16_t sveortb(svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8)))
-svint8_t sveortb(svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32)))
-svint32_t sveortb(svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64)))
-svint64_t sveortb(svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16)))
-svint16_t sveortb(svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8)))
-svuint8_t sveortb(svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32)))
-svuint32_t sveortb(svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64)))
-svuint64_t sveortb(svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16)))
-svuint16_t sveortb(svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8)))
-svint8_t sveortb(svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32)))
-svint32_t sveortb(svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64)))
-svint64_t sveortb(svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16)))
-svint16_t sveortb(svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m)))
-svint8_t svhadd_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m)))
-svint32_t svhadd_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m)))
-svint64_t svhadd_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m)))
-svint16_t svhadd_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x)))
-svint8_t svhadd_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x)))
-svint32_t svhadd_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x)))
-svint64_t svhadd_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x)))
-svint16_t svhadd_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z)))
-svint8_t svhadd_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z)))
-svint32_t svhadd_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z)))
-svint64_t svhadd_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z)))
-svint16_t svhadd_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m)))
-svuint8_t svhadd_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m)))
-svuint32_t svhadd_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m)))
-svuint64_t svhadd_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m)))
-svuint16_t svhadd_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x)))
-svuint8_t svhadd_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x)))
-svuint32_t svhadd_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x)))
-svuint64_t svhadd_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x)))
-svuint16_t svhadd_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z)))
-svuint8_t svhadd_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z)))
-svuint32_t svhadd_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z)))
-svuint64_t svhadd_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z)))
-svuint16_t svhadd_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m)))
-svint8_t svhadd_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m)))
-svint32_t svhadd_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m)))
-svint64_t svhadd_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m)))
-svint16_t svhadd_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x)))
-svint8_t svhadd_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x)))
-svint32_t svhadd_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x)))
-svint64_t svhadd_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x)))
-svint16_t svhadd_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z)))
-svint8_t svhadd_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z)))
-svint32_t svhadd_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z)))
-svint64_t svhadd_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z)))
-svint16_t svhadd_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m)))
-svuint8_t svhadd_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m)))
-svuint32_t svhadd_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m)))
-svuint64_t svhadd_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m)))
-svuint16_t svhadd_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x)))
-svuint8_t svhadd_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x)))
-svuint32_t svhadd_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x)))
-svuint64_t svhadd_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x)))
-svuint16_t svhadd_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z)))
-svuint8_t svhadd_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z)))
-svuint32_t svhadd_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z)))
-svuint64_t svhadd_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z)))
-svuint16_t svhadd_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z)))
-svuint32_t svhistcnt_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z)))
-svuint64_t svhistcnt_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z)))
-svuint32_t svhistcnt_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z)))
-svuint64_t svhistcnt_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8)))
-svuint8_t svhistseg(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8)))
-svuint8_t svhistseg(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m)))
-svint8_t svhsub_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m)))
-svint32_t svhsub_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m)))
-svint64_t svhsub_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m)))
-svint16_t svhsub_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x)))
-svint8_t svhsub_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x)))
-svint32_t svhsub_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x)))
-svint64_t svhsub_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x)))
-svint16_t svhsub_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z)))
-svint8_t svhsub_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z)))
-svint32_t svhsub_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z)))
-svint64_t svhsub_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z)))
-svint16_t svhsub_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m)))
-svuint8_t svhsub_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m)))
-svuint32_t svhsub_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m)))
-svuint64_t svhsub_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m)))
-svuint16_t svhsub_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x)))
-svuint8_t svhsub_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x)))
-svuint32_t svhsub_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x)))
-svuint64_t svhsub_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x)))
-svuint16_t svhsub_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z)))
-svuint8_t svhsub_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z)))
-svuint32_t svhsub_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z)))
-svuint64_t svhsub_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z)))
-svuint16_t svhsub_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m)))
-svint8_t svhsub_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m)))
-svint32_t svhsub_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m)))
-svint64_t svhsub_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m)))
-svint16_t svhsub_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x)))
-svint8_t svhsub_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x)))
-svint32_t svhsub_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x)))
-svint64_t svhsub_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x)))
-svint16_t svhsub_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z)))
-svint8_t svhsub_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z)))
-svint32_t svhsub_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z)))
-svint64_t svhsub_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z)))
-svint16_t svhsub_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m)))
-svuint8_t svhsub_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m)))
-svuint32_t svhsub_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m)))
-svuint64_t svhsub_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m)))
-svuint16_t svhsub_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x)))
-svuint8_t svhsub_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x)))
-svuint32_t svhsub_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x)))
-svuint64_t svhsub_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x)))
-svuint16_t svhsub_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z)))
-svuint8_t svhsub_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z)))
-svuint32_t svhsub_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z)))
-svuint64_t svhsub_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z)))
-svuint16_t svhsub_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m)))
-svint8_t svhsubr_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m)))
-svint32_t svhsubr_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m)))
-svint64_t svhsubr_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m)))
-svint16_t svhsubr_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x)))
-svint8_t svhsubr_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x)))
-svint32_t svhsubr_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x)))
-svint64_t svhsubr_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x)))
-svint16_t svhsubr_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z)))
-svint8_t svhsubr_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z)))
-svint32_t svhsubr_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z)))
-svint64_t svhsubr_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z)))
-svint16_t svhsubr_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m)))
-svuint8_t svhsubr_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m)))
-svuint32_t svhsubr_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m)))
-svuint64_t svhsubr_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m)))
-svuint16_t svhsubr_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x)))
-svuint8_t svhsubr_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x)))
-svuint32_t svhsubr_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x)))
-svuint64_t svhsubr_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x)))
-svuint16_t svhsubr_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z)))
-svuint8_t svhsubr_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z)))
-svuint32_t svhsubr_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z)))
-svuint64_t svhsubr_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z)))
-svuint16_t svhsubr_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m)))
-svint8_t svhsubr_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m)))
-svint32_t svhsubr_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m)))
-svint64_t svhsubr_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m)))
-svint16_t svhsubr_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x)))
-svint8_t svhsubr_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x)))
-svint32_t svhsubr_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x)))
-svint64_t svhsubr_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x)))
-svint16_t svhsubr_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z)))
-svint8_t svhsubr_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z)))
-svint32_t svhsubr_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z)))
-svint64_t svhsubr_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z)))
-svint16_t svhsubr_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m)))
-svuint8_t svhsubr_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m)))
-svuint32_t svhsubr_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m)))
-svuint64_t svhsubr_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m)))
-svuint16_t svhsubr_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x)))
-svuint8_t svhsubr_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x)))
-svuint32_t svhsubr_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x)))
-svuint64_t svhsubr_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x)))
-svuint16_t svhsubr_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z)))
-svuint8_t svhsubr_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z)))
-svuint32_t svhsubr_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z)))
-svuint64_t svhsubr_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z)))
-svuint16_t svhsubr_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32)))
-svuint32_t svldnt1_gather_index_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64)))
-svuint64_t svldnt1_gather_index_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64)))
-svfloat64_t svldnt1_gather_index_f64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32)))
-svfloat32_t svldnt1_gather_index_f32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32)))
-svint32_t svldnt1_gather_index_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64)))
-svint64_t svldnt1_gather_index_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32)))
-svuint32_t svldnt1_gather_offset_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64)))
-svuint64_t svldnt1_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64)))
-svfloat64_t svldnt1_gather_offset_f64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32)))
-svfloat32_t svldnt1_gather_offset_f32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32)))
-svint32_t svldnt1_gather_offset_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64)))
-svint64_t svldnt1_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32)))
-svuint32_t svldnt1_gather_u32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64)))
-svuint64_t svldnt1_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64)))
-svfloat64_t svldnt1_gather_f64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32)))
-svfloat32_t svldnt1_gather_f32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32)))
-svint32_t svldnt1_gather_s32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64)))
-svint64_t svldnt1_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64)))
-svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64)))
-svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64)))
-svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64)))
-svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64)))
-svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64)))
-svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32)))
-svuint32_t svldnt1_gather_offset(svbool_t, uint32_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32)))
-svfloat32_t svldnt1_gather_offset(svbool_t, float32_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32)))
-svint32_t svldnt1_gather_offset(svbool_t, int32_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64)))
-svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64)))
-svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64)))
-svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64)))
-svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64)))
-svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64)))
-svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32)))
-svuint32_t svldnt1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64)))
-svuint64_t svldnt1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32)))
-svint32_t svldnt1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64)))
-svint64_t svldnt1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32)))
-svuint32_t svldnt1sb_gather_u32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64)))
-svuint64_t svldnt1sb_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32)))
-svint32_t svldnt1sb_gather_s32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64)))
-svint64_t svldnt1sb_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32)))
-svuint32_t svldnt1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32)))
-svint32_t svldnt1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64)))
-svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64)))
-svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64)))
-svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64)))
-svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32)))
-svuint32_t svldnt1sh_gather_index_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64)))
-svuint64_t svldnt1sh_gather_index_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32)))
-svint32_t svldnt1sh_gather_index_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64)))
-svint64_t svldnt1sh_gather_index_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32)))
-svuint32_t svldnt1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64)))
-svuint64_t svldnt1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32)))
-svint32_t svldnt1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64)))
-svint64_t svldnt1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32)))
-svuint32_t svldnt1sh_gather_u32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64)))
-svuint64_t svldnt1sh_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32)))
-svint32_t svldnt1sh_gather_s32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64)))
-svint64_t svldnt1sh_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64)))
-svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64)))
-svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64)))
-svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64)))
-svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32)))
-svuint32_t svldnt1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32)))
-svint32_t svldnt1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64)))
-svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64)))
-svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64)))
-svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64)))
-svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64)))
-svuint64_t svldnt1sw_gather_index_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64)))
-svint64_t svldnt1sw_gather_index_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64)))
-svuint64_t svldnt1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64)))
-svint64_t svldnt1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64)))
-svuint64_t svldnt1sw_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64)))
-svint64_t svldnt1sw_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64)))
-svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64)))
-svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64)))
-svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64)))
-svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64)))
-svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64)))
-svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64)))
-svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64)))
-svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32)))
-svuint32_t svldnt1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64)))
-svuint64_t svldnt1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32)))
-svint32_t svldnt1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64)))
-svint64_t svldnt1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32)))
-svuint32_t svldnt1ub_gather_u32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64)))
-svuint64_t svldnt1ub_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32)))
-svint32_t svldnt1ub_gather_s32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64)))
-svint64_t svldnt1ub_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32)))
-svuint32_t svldnt1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32)))
-svint32_t svldnt1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64)))
-svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64)))
-svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64)))
-svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64)))
-svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32)))
-svuint32_t svldnt1uh_gather_index_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64)))
-svuint64_t svldnt1uh_gather_index_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32)))
-svint32_t svldnt1uh_gather_index_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64)))
-svint64_t svldnt1uh_gather_index_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32)))
-svuint32_t svldnt1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64)))
-svuint64_t svldnt1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32)))
-svint32_t svldnt1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64)))
-svint64_t svldnt1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32)))
-svuint32_t svldnt1uh_gather_u32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64)))
-svuint64_t svldnt1uh_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32)))
-svint32_t svldnt1uh_gather_s32(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64)))
-svint64_t svldnt1uh_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64)))
-svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64)))
-svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64)))
-svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64)))
-svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32)))
-svuint32_t svldnt1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32)))
-svint32_t svldnt1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64)))
-svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64)))
-svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64)))
-svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64)))
-svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64)))
-svuint64_t svldnt1uw_gather_index_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64)))
-svint64_t svldnt1uw_gather_index_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64)))
-svuint64_t svldnt1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64)))
-svint64_t svldnt1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64)))
-svuint64_t svldnt1uw_gather_u64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64)))
-svint64_t svldnt1uw_gather_s64(svbool_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64)))
-svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64)))
-svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64)))
-svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64)))
-svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64)))
-svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64)))
-svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64)))
-svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64)))
-svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m)))
-svint64_t svlogb_m(svint64_t, svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m)))
-svint32_t svlogb_m(svint32_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m)))
-svint16_t svlogb_m(svint16_t, svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x)))
-svint64_t svlogb_x(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x)))
-svint32_t svlogb_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x)))
-svint16_t svlogb_x(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z)))
-svint64_t svlogb_z(svbool_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z)))
-svint32_t svlogb_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z)))
-svint16_t svlogb_z(svbool_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8)))
-svbool_t svmatch(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16)))
-svbool_t svmatch(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8)))
-svbool_t svmatch(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16)))
-svbool_t svmatch(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m)))
-svfloat64_t svmaxnmp_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m)))
-svfloat32_t svmaxnmp_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m)))
-svfloat16_t svmaxnmp_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x)))
-svfloat64_t svmaxnmp_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x)))
-svfloat32_t svmaxnmp_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x)))
-svfloat16_t svmaxnmp_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m)))
-svfloat64_t svmaxp_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m)))
-svfloat32_t svmaxp_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m)))
-svfloat16_t svmaxp_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x)))
-svfloat64_t svmaxp_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x)))
-svfloat32_t svmaxp_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x)))
-svfloat16_t svmaxp_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m)))
-svint8_t svmaxp_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m)))
-svint32_t svmaxp_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m)))
-svint64_t svmaxp_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m)))
-svint16_t svmaxp_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x)))
-svint8_t svmaxp_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x)))
-svint32_t svmaxp_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x)))
-svint64_t svmaxp_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x)))
-svint16_t svmaxp_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m)))
-svuint8_t svmaxp_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m)))
-svuint32_t svmaxp_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m)))
-svuint64_t svmaxp_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m)))
-svuint16_t svmaxp_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x)))
-svuint8_t svmaxp_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x)))
-svuint32_t svmaxp_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x)))
-svuint64_t svmaxp_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x)))
-svuint16_t svmaxp_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m)))
-svfloat64_t svminnmp_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m)))
-svfloat32_t svminnmp_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m)))
-svfloat16_t svminnmp_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x)))
-svfloat64_t svminnmp_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x)))
-svfloat32_t svminnmp_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x)))
-svfloat16_t svminnmp_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m)))
-svfloat64_t svminp_m(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m)))
-svfloat32_t svminp_m(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m)))
-svfloat16_t svminp_m(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x)))
-svfloat64_t svminp_x(svbool_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x)))
-svfloat32_t svminp_x(svbool_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x)))
-svfloat16_t svminp_x(svbool_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m)))
-svint8_t svminp_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m)))
-svint32_t svminp_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m)))
-svint64_t svminp_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m)))
-svint16_t svminp_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x)))
-svint8_t svminp_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x)))
-svint32_t svminp_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x)))
-svint64_t svminp_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x)))
-svint16_t svminp_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m)))
-svuint8_t svminp_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m)))
-svuint32_t svminp_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m)))
-svuint64_t svminp_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m)))
-svuint16_t svminp_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x)))
-svuint8_t svminp_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x)))
-svuint32_t svminp_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x)))
-svuint64_t svminp_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x)))
-svuint16_t svminp_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32)))
-svuint32_t svmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64)))
-svuint64_t svmla_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16)))
-svuint16_t svmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32)))
-svint32_t svmla_lane(svint32_t, svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64)))
-svint64_t svmla_lane(svint64_t, svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16)))
-svint16_t svmla_lane(svint16_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32)))
-svfloat32_t svmlalb(svfloat32_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32)))
-svint32_t svmlalb(svint32_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64)))
-svint64_t svmlalb(svint64_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16)))
-svint16_t svmlalb(svint16_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32)))
-svuint32_t svmlalb(svuint32_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64)))
-svuint64_t svmlalb(svuint64_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16)))
-svuint16_t svmlalb(svuint16_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32)))
-svfloat32_t svmlalb(svfloat32_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32)))
-svint32_t svmlalb(svint32_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64)))
-svint64_t svmlalb(svint64_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16)))
-svint16_t svmlalb(svint16_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32)))
-svuint32_t svmlalb(svuint32_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64)))
-svuint64_t svmlalb(svuint64_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16)))
-svuint16_t svmlalb(svuint16_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32)))
-svfloat32_t svmlalb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32)))
-svint32_t svmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64)))
-svint64_t svmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32)))
-svuint32_t svmlalb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64)))
-svuint64_t svmlalb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32)))
-svfloat32_t svmlalt(svfloat32_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32)))
-svint32_t svmlalt(svint32_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64)))
-svint64_t svmlalt(svint64_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16)))
-svint16_t svmlalt(svint16_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32)))
-svuint32_t svmlalt(svuint32_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64)))
-svuint64_t svmlalt(svuint64_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16)))
-svuint16_t svmlalt(svuint16_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32)))
-svfloat32_t svmlalt(svfloat32_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32)))
-svint32_t svmlalt(svint32_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64)))
-svint64_t svmlalt(svint64_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16)))
-svint16_t svmlalt(svint16_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32)))
-svuint32_t svmlalt(svuint32_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64)))
-svuint64_t svmlalt(svuint64_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16)))
-svuint16_t svmlalt(svuint16_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32)))
-svfloat32_t svmlalt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32)))
-svint32_t svmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64)))
-svint64_t svmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32)))
-svuint32_t svmlalt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64)))
-svuint64_t svmlalt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32)))
-svuint32_t svmls_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64)))
-svuint64_t svmls_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16)))
-svuint16_t svmls_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32)))
-svint32_t svmls_lane(svint32_t, svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64)))
-svint64_t svmls_lane(svint64_t, svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16)))
-svint16_t svmls_lane(svint16_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32)))
-svfloat32_t svmlslb(svfloat32_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32)))
-svint32_t svmlslb(svint32_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64)))
-svint64_t svmlslb(svint64_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16)))
-svint16_t svmlslb(svint16_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32)))
-svuint32_t svmlslb(svuint32_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64)))
-svuint64_t svmlslb(svuint64_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16)))
-svuint16_t svmlslb(svuint16_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32)))
-svfloat32_t svmlslb(svfloat32_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32)))
-svint32_t svmlslb(svint32_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64)))
-svint64_t svmlslb(svint64_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16)))
-svint16_t svmlslb(svint16_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32)))
-svuint32_t svmlslb(svuint32_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64)))
-svuint64_t svmlslb(svuint64_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16)))
-svuint16_t svmlslb(svuint16_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32)))
-svfloat32_t svmlslb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32)))
-svint32_t svmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64)))
-svint64_t svmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32)))
-svuint32_t svmlslb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64)))
-svuint64_t svmlslb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32)))
-svfloat32_t svmlslt(svfloat32_t, svfloat16_t, float16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32)))
-svint32_t svmlslt(svint32_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64)))
-svint64_t svmlslt(svint64_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16)))
-svint16_t svmlslt(svint16_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32)))
-svuint32_t svmlslt(svuint32_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64)))
-svuint64_t svmlslt(svuint64_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16)))
-svuint16_t svmlslt(svuint16_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32)))
-svfloat32_t svmlslt(svfloat32_t, svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32)))
-svint32_t svmlslt(svint32_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64)))
-svint64_t svmlslt(svint64_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16)))
-svint16_t svmlslt(svint16_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32)))
-svuint32_t svmlslt(svuint32_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64)))
-svuint64_t svmlslt(svuint64_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16)))
-svuint16_t svmlslt(svuint16_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32)))
-svfloat32_t svmlslt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32)))
-svint32_t svmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64)))
-svint64_t svmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32)))
-svuint32_t svmlslt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64)))
-svuint64_t svmlslt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32)))
-svint32_t svmovlb(svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64)))
-svint64_t svmovlb(svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16)))
-svint16_t svmovlb(svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32)))
-svuint32_t svmovlb(svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64)))
-svuint64_t svmovlb(svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16)))
-svuint16_t svmovlb(svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32)))
-svint32_t svmovlt(svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64)))
-svint64_t svmovlt(svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16)))
-svint16_t svmovlt(svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32)))
-svuint32_t svmovlt(svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64)))
-svuint64_t svmovlt(svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16)))
-svuint16_t svmovlt(svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32)))
-svuint32_t svmul_lane(svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64)))
-svuint64_t svmul_lane(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16)))
-svuint16_t svmul_lane(svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32)))
-svint32_t svmul_lane(svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64)))
-svint64_t svmul_lane(svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16)))
-svint16_t svmul_lane(svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32)))
-svint32_t svmullb(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64)))
-svint64_t svmullb(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16)))
-svint16_t svmullb(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32)))
-svuint32_t svmullb(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64)))
-svuint64_t svmullb(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16)))
-svuint16_t svmullb(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32)))
-svint32_t svmullb(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64)))
-svint64_t svmullb(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16)))
-svint16_t svmullb(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32)))
-svuint32_t svmullb(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64)))
-svuint64_t svmullb(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16)))
-svuint16_t svmullb(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32)))
-svint32_t svmullb_lane(svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64)))
-svint64_t svmullb_lane(svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32)))
-svuint32_t svmullb_lane(svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64)))
-svuint64_t svmullb_lane(svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32)))
-svint32_t svmullt(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64)))
-svint64_t svmullt(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16)))
-svint16_t svmullt(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32)))
-svuint32_t svmullt(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64)))
-svuint64_t svmullt(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16)))
-svuint16_t svmullt(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32)))
-svint32_t svmullt(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64)))
-svint64_t svmullt(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16)))
-svint16_t svmullt(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32)))
-svuint32_t svmullt(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64)))
-svuint64_t svmullt(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16)))
-svuint16_t svmullt(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32)))
-svint32_t svmullt_lane(svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64)))
-svint64_t svmullt_lane(svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32)))
-svuint32_t svmullt_lane(svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64)))
-svuint64_t svmullt_lane(svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8)))
-svuint8_t svnbsl(svuint8_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32)))
-svuint32_t svnbsl(svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64)))
-svuint64_t svnbsl(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16)))
-svuint16_t svnbsl(svuint16_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8)))
-svint8_t svnbsl(svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32)))
-svint32_t svnbsl(svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64)))
-svint64_t svnbsl(svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16)))
-svint16_t svnbsl(svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8)))
-svuint8_t svnbsl(svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32)))
-svuint32_t svnbsl(svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64)))
-svuint64_t svnbsl(svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16)))
-svuint16_t svnbsl(svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8)))
-svint8_t svnbsl(svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32)))
-svint32_t svnbsl(svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64)))
-svint64_t svnbsl(svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16)))
-svint16_t svnbsl(svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8)))
-svbool_t svnmatch(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16)))
-svbool_t svnmatch(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8)))
-svbool_t svnmatch(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16)))
-svbool_t svnmatch(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8)))
-svuint8_t svpmul(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8)))
-svuint8_t svpmul(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64)))
-svuint64_t svpmullb(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16)))
-svuint16_t svpmullb(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64)))
-svuint64_t svpmullb(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16)))
-svuint16_t svpmullb(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8)))
-svuint8_t svpmullb_pair(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32)))
-svuint32_t svpmullb_pair(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8)))
-svuint8_t svpmullb_pair(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32)))
-svuint32_t svpmullb_pair(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64)))
-svuint64_t svpmullt(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16)))
-svuint16_t svpmullt(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64)))
-svuint64_t svpmullt(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16)))
-svuint16_t svpmullt(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8)))
-svuint8_t svpmullt_pair(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32)))
-svuint32_t svpmullt_pair(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8)))
-svuint8_t svpmullt_pair(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32)))
-svuint32_t svpmullt_pair(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m)))
-svint8_t svqabs_m(svint8_t, svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m)))
-svint32_t svqabs_m(svint32_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m)))
-svint64_t svqabs_m(svint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m)))
-svint16_t svqabs_m(svint16_t, svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x)))
-svint8_t svqabs_x(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x)))
-svint32_t svqabs_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x)))
-svint64_t svqabs_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x)))
-svint16_t svqabs_x(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z)))
-svint8_t svqabs_z(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z)))
-svint32_t svqabs_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z)))
-svint64_t svqabs_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z)))
-svint16_t svqabs_z(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m)))
-svint8_t svqadd_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m)))
-svint32_t svqadd_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m)))
-svint64_t svqadd_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m)))
-svint16_t svqadd_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x)))
-svint8_t svqadd_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x)))
-svint32_t svqadd_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x)))
-svint64_t svqadd_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x)))
-svint16_t svqadd_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z)))
-svint8_t svqadd_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z)))
-svint32_t svqadd_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z)))
-svint64_t svqadd_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z)))
-svint16_t svqadd_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m)))
-svuint8_t svqadd_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m)))
-svuint32_t svqadd_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m)))
-svuint64_t svqadd_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m)))
-svuint16_t svqadd_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x)))
-svuint8_t svqadd_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x)))
-svuint32_t svqadd_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x)))
-svuint64_t svqadd_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x)))
-svuint16_t svqadd_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z)))
-svuint8_t svqadd_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z)))
-svuint32_t svqadd_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z)))
-svuint64_t svqadd_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z)))
-svuint16_t svqadd_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m)))
-svint8_t svqadd_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m)))
-svint32_t svqadd_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m)))
-svint64_t svqadd_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m)))
-svint16_t svqadd_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x)))
-svint8_t svqadd_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x)))
-svint32_t svqadd_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x)))
-svint64_t svqadd_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x)))
-svint16_t svqadd_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z)))
-svint8_t svqadd_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z)))
-svint32_t svqadd_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z)))
-svint64_t svqadd_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z)))
-svint16_t svqadd_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m)))
-svuint8_t svqadd_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m)))
-svuint32_t svqadd_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m)))
-svuint64_t svqadd_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m)))
-svuint16_t svqadd_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x)))
-svuint8_t svqadd_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x)))
-svuint32_t svqadd_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x)))
-svuint64_t svqadd_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x)))
-svuint16_t svqadd_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z)))
-svuint8_t svqadd_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z)))
-svuint32_t svqadd_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z)))
-svuint64_t svqadd_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z)))
-svuint16_t svqadd_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8)))
-svint8_t svqcadd(svint8_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32)))
-svint32_t svqcadd(svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64)))
-svint64_t svqcadd(svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16)))
-svint16_t svqcadd(svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32)))
-svint32_t svqdmlalb(svint32_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64)))
-svint64_t svqdmlalb(svint64_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16)))
-svint16_t svqdmlalb(svint16_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32)))
-svint32_t svqdmlalb(svint32_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64)))
-svint64_t svqdmlalb(svint64_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16)))
-svint16_t svqdmlalb(svint16_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32)))
-svint32_t svqdmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64)))
-svint64_t svqdmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32)))
-svint32_t svqdmlalbt(svint32_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64)))
-svint64_t svqdmlalbt(svint64_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16)))
-svint16_t svqdmlalbt(svint16_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32)))
-svint32_t svqdmlalbt(svint32_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64)))
-svint64_t svqdmlalbt(svint64_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16)))
-svint16_t svqdmlalbt(svint16_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32)))
-svint32_t svqdmlalt(svint32_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64)))
-svint64_t svqdmlalt(svint64_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16)))
-svint16_t svqdmlalt(svint16_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32)))
-svint32_t svqdmlalt(svint32_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64)))
-svint64_t svqdmlalt(svint64_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16)))
-svint16_t svqdmlalt(svint16_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32)))
-svint32_t svqdmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64)))
-svint64_t svqdmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32)))
-svint32_t svqdmlslb(svint32_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64)))
-svint64_t svqdmlslb(svint64_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16)))
-svint16_t svqdmlslb(svint16_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32)))
-svint32_t svqdmlslb(svint32_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64)))
-svint64_t svqdmlslb(svint64_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16)))
-svint16_t svqdmlslb(svint16_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32)))
-svint32_t svqdmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64)))
-svint64_t svqdmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32)))
-svint32_t svqdmlslbt(svint32_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64)))
-svint64_t svqdmlslbt(svint64_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16)))
-svint16_t svqdmlslbt(svint16_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32)))
-svint32_t svqdmlslbt(svint32_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64)))
-svint64_t svqdmlslbt(svint64_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16)))
-svint16_t svqdmlslbt(svint16_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32)))
-svint32_t svqdmlslt(svint32_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64)))
-svint64_t svqdmlslt(svint64_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16)))
-svint16_t svqdmlslt(svint16_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32)))
-svint32_t svqdmlslt(svint32_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64)))
-svint64_t svqdmlslt(svint64_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16)))
-svint16_t svqdmlslt(svint16_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32)))
-svint32_t svqdmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64)))
-svint64_t svqdmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8)))
-svint8_t svqdmulh(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32)))
-svint32_t svqdmulh(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64)))
-svint64_t svqdmulh(svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16)))
-svint16_t svqdmulh(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8)))
-svint8_t svqdmulh(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32)))
-svint32_t svqdmulh(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64)))
-svint64_t svqdmulh(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16)))
-svint16_t svqdmulh(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32)))
-svint32_t svqdmulh_lane(svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64)))
-svint64_t svqdmulh_lane(svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16)))
-svint16_t svqdmulh_lane(svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32)))
-svint32_t svqdmullb(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64)))
-svint64_t svqdmullb(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16)))
-svint16_t svqdmullb(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32)))
-svint32_t svqdmullb(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64)))
-svint64_t svqdmullb(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16)))
-svint16_t svqdmullb(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32)))
-svint32_t svqdmullb_lane(svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64)))
-svint64_t svqdmullb_lane(svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32)))
-svint32_t svqdmullt(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64)))
-svint64_t svqdmullt(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16)))
-svint16_t svqdmullt(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32)))
-svint32_t svqdmullt(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64)))
-svint64_t svqdmullt(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16)))
-svint16_t svqdmullt(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32)))
-svint32_t svqdmullt_lane(svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64)))
-svint64_t svqdmullt_lane(svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m)))
-svint8_t svqneg_m(svint8_t, svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m)))
-svint32_t svqneg_m(svint32_t, svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m)))
-svint64_t svqneg_m(svint64_t, svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m)))
-svint16_t svqneg_m(svint16_t, svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x)))
-svint8_t svqneg_x(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x)))
-svint32_t svqneg_x(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x)))
-svint64_t svqneg_x(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x)))
-svint16_t svqneg_x(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z)))
-svint8_t svqneg_z(svbool_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z)))
-svint32_t svqneg_z(svbool_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z)))
-svint64_t svqneg_z(svbool_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z)))
-svint16_t svqneg_z(svbool_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8)))
-svint8_t svqrdcmlah(svint8_t, svint8_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32)))
-svint32_t svqrdcmlah(svint32_t, svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64)))
-svint64_t svqrdcmlah(svint64_t, svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16)))
-svint16_t svqrdcmlah(svint16_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32)))
-svint32_t svqrdcmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16)))
-svint16_t svqrdcmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8)))
-svint8_t svqrdmlah(svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32)))
-svint32_t svqrdmlah(svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64)))
-svint64_t svqrdmlah(svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16)))
-svint16_t svqrdmlah(svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8)))
-svint8_t svqrdmlah(svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32)))
-svint32_t svqrdmlah(svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64)))
-svint64_t svqrdmlah(svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16)))
-svint16_t svqrdmlah(svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32)))
-svint32_t svqrdmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64)))
-svint64_t svqrdmlah_lane(svint64_t, svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16)))
-svint16_t svqrdmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8)))
-svint8_t svqrdmlsh(svint8_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32)))
-svint32_t svqrdmlsh(svint32_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64)))
-svint64_t svqrdmlsh(svint64_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16)))
-svint16_t svqrdmlsh(svint16_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8)))
-svint8_t svqrdmlsh(svint8_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32)))
-svint32_t svqrdmlsh(svint32_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64)))
-svint64_t svqrdmlsh(svint64_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16)))
-svint16_t svqrdmlsh(svint16_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32)))
-svint32_t svqrdmlsh_lane(svint32_t, svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64)))
-svint64_t svqrdmlsh_lane(svint64_t, svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16)))
-svint16_t svqrdmlsh_lane(svint16_t, svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8)))
-svint8_t svqrdmulh(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32)))
-svint32_t svqrdmulh(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64)))
-svint64_t svqrdmulh(svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16)))
-svint16_t svqrdmulh(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8)))
-svint8_t svqrdmulh(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32)))
-svint32_t svqrdmulh(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64)))
-svint64_t svqrdmulh(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16)))
-svint16_t svqrdmulh(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32)))
-svint32_t svqrdmulh_lane(svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64)))
-svint64_t svqrdmulh_lane(svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16)))
-svint16_t svqrdmulh_lane(svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m)))
-svint8_t svqrshl_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m)))
-svint32_t svqrshl_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m)))
-svint64_t svqrshl_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m)))
-svint16_t svqrshl_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x)))
-svint8_t svqrshl_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x)))
-svint32_t svqrshl_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x)))
-svint64_t svqrshl_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x)))
-svint16_t svqrshl_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z)))
-svint8_t svqrshl_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z)))
-svint32_t svqrshl_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z)))
-svint64_t svqrshl_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z)))
-svint16_t svqrshl_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m)))
-svuint8_t svqrshl_m(svbool_t, svuint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m)))
-svuint32_t svqrshl_m(svbool_t, svuint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m)))
-svuint64_t svqrshl_m(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m)))
-svuint16_t svqrshl_m(svbool_t, svuint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x)))
-svuint8_t svqrshl_x(svbool_t, svuint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x)))
-svuint32_t svqrshl_x(svbool_t, svuint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x)))
-svuint64_t svqrshl_x(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x)))
-svuint16_t svqrshl_x(svbool_t, svuint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z)))
-svuint8_t svqrshl_z(svbool_t, svuint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z)))
-svuint32_t svqrshl_z(svbool_t, svuint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z)))
-svuint64_t svqrshl_z(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z)))
-svuint16_t svqrshl_z(svbool_t, svuint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m)))
-svint8_t svqrshl_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m)))
-svint32_t svqrshl_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m)))
-svint64_t svqrshl_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m)))
-svint16_t svqrshl_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x)))
-svint8_t svqrshl_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x)))
-svint32_t svqrshl_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x)))
-svint64_t svqrshl_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x)))
-svint16_t svqrshl_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z)))
-svint8_t svqrshl_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z)))
-svint32_t svqrshl_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z)))
-svint64_t svqrshl_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z)))
-svint16_t svqrshl_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m)))
-svuint8_t svqrshl_m(svbool_t, svuint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m)))
-svuint32_t svqrshl_m(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m)))
-svuint64_t svqrshl_m(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m)))
-svuint16_t svqrshl_m(svbool_t, svuint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x)))
-svuint8_t svqrshl_x(svbool_t, svuint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x)))
-svuint32_t svqrshl_x(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x)))
-svuint64_t svqrshl_x(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x)))
-svuint16_t svqrshl_x(svbool_t, svuint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z)))
-svuint8_t svqrshl_z(svbool_t, svuint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z)))
-svuint32_t svqrshl_z(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z)))
-svuint64_t svqrshl_z(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z)))
-svuint16_t svqrshl_z(svbool_t, svuint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32)))
-svint16_t svqrshrnb(svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64)))
-svint32_t svqrshrnb(svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16)))
-svint8_t svqrshrnb(svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32)))
-svuint16_t svqrshrnb(svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64)))
-svuint32_t svqrshrnb(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16)))
-svuint8_t svqrshrnb(svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32)))
-svint16_t svqrshrnt(svint16_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64)))
-svint32_t svqrshrnt(svint32_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16)))
-svint8_t svqrshrnt(svint8_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32)))
-svuint16_t svqrshrnt(svuint16_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64)))
-svuint32_t svqrshrnt(svuint32_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16)))
-svuint8_t svqrshrnt(svuint8_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32)))
-svuint16_t svqrshrunb(svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64)))
-svuint32_t svqrshrunb(svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16)))
-svuint8_t svqrshrunb(svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32)))
-svuint16_t svqrshrunt(svuint16_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64)))
-svuint32_t svqrshrunt(svuint32_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16)))
-svuint8_t svqrshrunt(svuint8_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m)))
-svint8_t svqshl_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m)))
-svint32_t svqshl_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m)))
-svint64_t svqshl_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m)))
-svint16_t svqshl_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x)))
-svint8_t svqshl_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x)))
-svint32_t svqshl_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x)))
-svint64_t svqshl_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x)))
-svint16_t svqshl_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z)))
-svint8_t svqshl_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z)))
-svint32_t svqshl_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z)))
-svint64_t svqshl_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z)))
-svint16_t svqshl_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m)))
-svuint8_t svqshl_m(svbool_t, svuint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m)))
-svuint32_t svqshl_m(svbool_t, svuint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m)))
-svuint64_t svqshl_m(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m)))
-svuint16_t svqshl_m(svbool_t, svuint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x)))
-svuint8_t svqshl_x(svbool_t, svuint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x)))
-svuint32_t svqshl_x(svbool_t, svuint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x)))
-svuint64_t svqshl_x(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x)))
-svuint16_t svqshl_x(svbool_t, svuint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z)))
-svuint8_t svqshl_z(svbool_t, svuint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z)))
-svuint32_t svqshl_z(svbool_t, svuint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z)))
-svuint64_t svqshl_z(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z)))
-svuint16_t svqshl_z(svbool_t, svuint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m)))
-svint8_t svqshl_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m)))
-svint32_t svqshl_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m)))
-svint64_t svqshl_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m)))
-svint16_t svqshl_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x)))
-svint8_t svqshl_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x)))
-svint32_t svqshl_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x)))
-svint64_t svqshl_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x)))
-svint16_t svqshl_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z)))
-svint8_t svqshl_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z)))
-svint32_t svqshl_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z)))
-svint64_t svqshl_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z)))
-svint16_t svqshl_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m)))
-svuint8_t svqshl_m(svbool_t, svuint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m)))
-svuint32_t svqshl_m(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m)))
-svuint64_t svqshl_m(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m)))
-svuint16_t svqshl_m(svbool_t, svuint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x)))
-svuint8_t svqshl_x(svbool_t, svuint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x)))
-svuint32_t svqshl_x(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x)))
-svuint64_t svqshl_x(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x)))
-svuint16_t svqshl_x(svbool_t, svuint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z)))
-svuint8_t svqshl_z(svbool_t, svuint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z)))
-svuint32_t svqshl_z(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z)))
-svuint64_t svqshl_z(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z)))
-svuint16_t svqshl_z(svbool_t, svuint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m)))
-svuint8_t svqshlu_m(svbool_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m)))
-svuint32_t svqshlu_m(svbool_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m)))
-svuint64_t svqshlu_m(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m)))
-svuint16_t svqshlu_m(svbool_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x)))
-svuint8_t svqshlu_x(svbool_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x)))
-svuint32_t svqshlu_x(svbool_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x)))
-svuint64_t svqshlu_x(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x)))
-svuint16_t svqshlu_x(svbool_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z)))
-svuint8_t svqshlu_z(svbool_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z)))
-svuint32_t svqshlu_z(svbool_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z)))
-svuint64_t svqshlu_z(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z)))
-svuint16_t svqshlu_z(svbool_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32)))
-svint16_t svqshrnb(svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64)))
-svint32_t svqshrnb(svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16)))
-svint8_t svqshrnb(svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32)))
-svuint16_t svqshrnb(svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64)))
-svuint32_t svqshrnb(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16)))
-svuint8_t svqshrnb(svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32)))
-svint16_t svqshrnt(svint16_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64)))
-svint32_t svqshrnt(svint32_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16)))
-svint8_t svqshrnt(svint8_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32)))
-svuint16_t svqshrnt(svuint16_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64)))
-svuint32_t svqshrnt(svuint32_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16)))
-svuint8_t svqshrnt(svuint8_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32)))
-svuint16_t svqshrunb(svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64)))
-svuint32_t svqshrunb(svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16)))
-svuint8_t svqshrunb(svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32)))
-svuint16_t svqshrunt(svuint16_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64)))
-svuint32_t svqshrunt(svuint32_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16)))
-svuint8_t svqshrunt(svuint8_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m)))
-svint8_t svqsub_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m)))
-svint32_t svqsub_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m)))
-svint64_t svqsub_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m)))
-svint16_t svqsub_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x)))
-svint8_t svqsub_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x)))
-svint32_t svqsub_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x)))
-svint64_t svqsub_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x)))
-svint16_t svqsub_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z)))
-svint8_t svqsub_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z)))
-svint32_t svqsub_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z)))
-svint64_t svqsub_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z)))
-svint16_t svqsub_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m)))
-svuint8_t svqsub_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m)))
-svuint32_t svqsub_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m)))
-svuint64_t svqsub_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m)))
-svuint16_t svqsub_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x)))
-svuint8_t svqsub_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x)))
-svuint32_t svqsub_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x)))
-svuint64_t svqsub_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x)))
-svuint16_t svqsub_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z)))
-svuint8_t svqsub_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z)))
-svuint32_t svqsub_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z)))
-svuint64_t svqsub_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z)))
-svuint16_t svqsub_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m)))
-svint8_t svqsub_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m)))
-svint32_t svqsub_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m)))
-svint64_t svqsub_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m)))
-svint16_t svqsub_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x)))
-svint8_t svqsub_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x)))
-svint32_t svqsub_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x)))
-svint64_t svqsub_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x)))
-svint16_t svqsub_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z)))
-svint8_t svqsub_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z)))
-svint32_t svqsub_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z)))
-svint64_t svqsub_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z)))
-svint16_t svqsub_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m)))
-svuint8_t svqsub_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m)))
-svuint32_t svqsub_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m)))
-svuint64_t svqsub_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m)))
-svuint16_t svqsub_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x)))
-svuint8_t svqsub_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x)))
-svuint32_t svqsub_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x)))
-svuint64_t svqsub_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x)))
-svuint16_t svqsub_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z)))
-svuint8_t svqsub_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z)))
-svuint32_t svqsub_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z)))
-svuint64_t svqsub_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z)))
-svuint16_t svqsub_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m)))
-svint8_t svqsubr_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m)))
-svint32_t svqsubr_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m)))
-svint64_t svqsubr_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m)))
-svint16_t svqsubr_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x)))
-svint8_t svqsubr_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x)))
-svint32_t svqsubr_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x)))
-svint64_t svqsubr_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x)))
-svint16_t svqsubr_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z)))
-svint8_t svqsubr_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z)))
-svint32_t svqsubr_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z)))
-svint64_t svqsubr_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z)))
-svint16_t svqsubr_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m)))
-svuint8_t svqsubr_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m)))
-svuint32_t svqsubr_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m)))
-svuint64_t svqsubr_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m)))
-svuint16_t svqsubr_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x)))
-svuint8_t svqsubr_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x)))
-svuint32_t svqsubr_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x)))
-svuint64_t svqsubr_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x)))
-svuint16_t svqsubr_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z)))
-svuint8_t svqsubr_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z)))
-svuint32_t svqsubr_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z)))
-svuint64_t svqsubr_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z)))
-svuint16_t svqsubr_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m)))
-svint8_t svqsubr_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m)))
-svint32_t svqsubr_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m)))
-svint64_t svqsubr_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m)))
-svint16_t svqsubr_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x)))
-svint8_t svqsubr_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x)))
-svint32_t svqsubr_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x)))
-svint64_t svqsubr_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x)))
-svint16_t svqsubr_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z)))
-svint8_t svqsubr_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z)))
-svint32_t svqsubr_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z)))
-svint64_t svqsubr_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z)))
-svint16_t svqsubr_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m)))
-svuint8_t svqsubr_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m)))
-svuint32_t svqsubr_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m)))
-svuint64_t svqsubr_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m)))
-svuint16_t svqsubr_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x)))
-svuint8_t svqsubr_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x)))
-svuint32_t svqsubr_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x)))
-svuint64_t svqsubr_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x)))
-svuint16_t svqsubr_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z)))
-svuint8_t svqsubr_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z)))
-svuint32_t svqsubr_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z)))
-svuint64_t svqsubr_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z)))
-svuint16_t svqsubr_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32)))
-svint16_t svqxtnb(svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64)))
-svint32_t svqxtnb(svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16)))
-svint8_t svqxtnb(svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32)))
-svuint16_t svqxtnb(svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64)))
-svuint32_t svqxtnb(svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16)))
-svuint8_t svqxtnb(svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32)))
-svint16_t svqxtnt(svint16_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64)))
-svint32_t svqxtnt(svint32_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16)))
-svint8_t svqxtnt(svint8_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32)))
-svuint16_t svqxtnt(svuint16_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64)))
-svuint32_t svqxtnt(svuint32_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16)))
-svuint8_t svqxtnt(svuint8_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32)))
-svuint16_t svqxtunb(svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64)))
-svuint32_t svqxtunb(svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16)))
-svuint8_t svqxtunb(svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32)))
-svuint16_t svqxtunt(svuint16_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64)))
-svuint32_t svqxtunt(svuint32_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16)))
-svuint8_t svqxtunt(svuint8_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32)))
-svuint16_t svraddhnb(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64)))
-svuint32_t svraddhnb(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16)))
-svuint8_t svraddhnb(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32)))
-svint16_t svraddhnb(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64)))
-svint32_t svraddhnb(svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16)))
-svint8_t svraddhnb(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32)))
-svuint16_t svraddhnb(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64)))
-svuint32_t svraddhnb(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16)))
-svuint8_t svraddhnb(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32)))
-svint16_t svraddhnb(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64)))
-svint32_t svraddhnb(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16)))
-svint8_t svraddhnb(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32)))
-svuint16_t svraddhnt(svuint16_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64)))
-svuint32_t svraddhnt(svuint32_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16)))
-svuint8_t svraddhnt(svuint8_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32)))
-svint16_t svraddhnt(svint16_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64)))
-svint32_t svraddhnt(svint32_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16)))
-svint8_t svraddhnt(svint8_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32)))
-svuint16_t svraddhnt(svuint16_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64)))
-svuint32_t svraddhnt(svuint32_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16)))
-svuint8_t svraddhnt(svuint8_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32)))
-svint16_t svraddhnt(svint16_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64)))
-svint32_t svraddhnt(svint32_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16)))
-svint8_t svraddhnt(svint8_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m)))
-svuint32_t svrecpe_m(svuint32_t, svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x)))
-svuint32_t svrecpe_x(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z)))
-svuint32_t svrecpe_z(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m)))
-svint8_t svrhadd_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m)))
-svint32_t svrhadd_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m)))
-svint64_t svrhadd_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m)))
-svint16_t svrhadd_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x)))
-svint8_t svrhadd_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x)))
-svint32_t svrhadd_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x)))
-svint64_t svrhadd_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x)))
-svint16_t svrhadd_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z)))
-svint8_t svrhadd_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z)))
-svint32_t svrhadd_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z)))
-svint64_t svrhadd_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z)))
-svint16_t svrhadd_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m)))
-svuint8_t svrhadd_m(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m)))
-svuint32_t svrhadd_m(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m)))
-svuint64_t svrhadd_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m)))
-svuint16_t svrhadd_m(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x)))
-svuint8_t svrhadd_x(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x)))
-svuint32_t svrhadd_x(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x)))
-svuint64_t svrhadd_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x)))
-svuint16_t svrhadd_x(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z)))
-svuint8_t svrhadd_z(svbool_t, svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z)))
-svuint32_t svrhadd_z(svbool_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z)))
-svuint64_t svrhadd_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z)))
-svuint16_t svrhadd_z(svbool_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m)))
-svint8_t svrhadd_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m)))
-svint32_t svrhadd_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m)))
-svint64_t svrhadd_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m)))
-svint16_t svrhadd_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x)))
-svint8_t svrhadd_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x)))
-svint32_t svrhadd_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x)))
-svint64_t svrhadd_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x)))
-svint16_t svrhadd_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z)))
-svint8_t svrhadd_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z)))
-svint32_t svrhadd_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z)))
-svint64_t svrhadd_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z)))
-svint16_t svrhadd_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m)))
-svuint8_t svrhadd_m(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m)))
-svuint32_t svrhadd_m(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m)))
-svuint64_t svrhadd_m(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m)))
-svuint16_t svrhadd_m(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x)))
-svuint8_t svrhadd_x(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x)))
-svuint32_t svrhadd_x(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x)))
-svuint64_t svrhadd_x(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x)))
-svuint16_t svrhadd_x(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z)))
-svuint8_t svrhadd_z(svbool_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z)))
-svuint32_t svrhadd_z(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z)))
-svuint64_t svrhadd_z(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z)))
-svuint16_t svrhadd_z(svbool_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m)))
-svint8_t svrshl_m(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m)))
-svint32_t svrshl_m(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m)))
-svint64_t svrshl_m(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m)))
-svint16_t svrshl_m(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x)))
-svint8_t svrshl_x(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x)))
-svint32_t svrshl_x(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x)))
-svint64_t svrshl_x(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x)))
-svint16_t svrshl_x(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z)))
-svint8_t svrshl_z(svbool_t, svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z)))
-svint32_t svrshl_z(svbool_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z)))
-svint64_t svrshl_z(svbool_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z)))
-svint16_t svrshl_z(svbool_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m)))
-svuint8_t svrshl_m(svbool_t, svuint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m)))
-svuint32_t svrshl_m(svbool_t, svuint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m)))
-svuint64_t svrshl_m(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m)))
-svuint16_t svrshl_m(svbool_t, svuint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x)))
-svuint8_t svrshl_x(svbool_t, svuint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x)))
-svuint32_t svrshl_x(svbool_t, svuint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x)))
-svuint64_t svrshl_x(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x)))
-svuint16_t svrshl_x(svbool_t, svuint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z)))
-svuint8_t svrshl_z(svbool_t, svuint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z)))
-svuint32_t svrshl_z(svbool_t, svuint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z)))
-svuint64_t svrshl_z(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z)))
-svuint16_t svrshl_z(svbool_t, svuint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m)))
-svint8_t svrshl_m(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m)))
-svint32_t svrshl_m(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m)))
-svint64_t svrshl_m(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m)))
-svint16_t svrshl_m(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x)))
-svint8_t svrshl_x(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x)))
-svint32_t svrshl_x(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x)))
-svint64_t svrshl_x(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x)))
-svint16_t svrshl_x(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z)))
-svint8_t svrshl_z(svbool_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z)))
-svint32_t svrshl_z(svbool_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z)))
-svint64_t svrshl_z(svbool_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z)))
-svint16_t svrshl_z(svbool_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m)))
-svuint8_t svrshl_m(svbool_t, svuint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m)))
-svuint32_t svrshl_m(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m)))
-svuint64_t svrshl_m(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m)))
-svuint16_t svrshl_m(svbool_t, svuint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x)))
-svuint8_t svrshl_x(svbool_t, svuint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x)))
-svuint32_t svrshl_x(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x)))
-svuint64_t svrshl_x(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x)))
-svuint16_t svrshl_x(svbool_t, svuint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z)))
-svuint8_t svrshl_z(svbool_t, svuint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z)))
-svuint32_t svrshl_z(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z)))
-svuint64_t svrshl_z(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z)))
-svuint16_t svrshl_z(svbool_t, svuint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m)))
-svint8_t svrshr_m(svbool_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m)))
-svint32_t svrshr_m(svbool_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m)))
-svint64_t svrshr_m(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m)))
-svint16_t svrshr_m(svbool_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m)))
-svuint8_t svrshr_m(svbool_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m)))
-svuint32_t svrshr_m(svbool_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m)))
-svuint64_t svrshr_m(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m)))
-svuint16_t svrshr_m(svbool_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x)))
-svint8_t svrshr_x(svbool_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x)))
-svint32_t svrshr_x(svbool_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x)))
-svint64_t svrshr_x(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x)))
-svint16_t svrshr_x(svbool_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x)))
-svuint8_t svrshr_x(svbool_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x)))
-svuint32_t svrshr_x(svbool_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x)))
-svuint64_t svrshr_x(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x)))
-svuint16_t svrshr_x(svbool_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z)))
-svint8_t svrshr_z(svbool_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z)))
-svint32_t svrshr_z(svbool_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z)))
-svint64_t svrshr_z(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z)))
-svint16_t svrshr_z(svbool_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z)))
-svuint8_t svrshr_z(svbool_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z)))
-svuint32_t svrshr_z(svbool_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z)))
-svuint64_t svrshr_z(svbool_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z)))
-svuint16_t svrshr_z(svbool_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32)))
-svuint16_t svrshrnb(svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64)))
-svuint32_t svrshrnb(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16)))
-svuint8_t svrshrnb(svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32)))
-svint16_t svrshrnb(svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64)))
-svint32_t svrshrnb(svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16)))
-svint8_t svrshrnb(svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32)))
-svuint16_t svrshrnt(svuint16_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64)))
-svuint32_t svrshrnt(svuint32_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16)))
-svuint8_t svrshrnt(svuint8_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32)))
-svint16_t svrshrnt(svint16_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64)))
-svint32_t svrshrnt(svint32_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16)))
-svint8_t svrshrnt(svint8_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m)))
-svuint32_t svrsqrte_m(svuint32_t, svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x)))
-svuint32_t svrsqrte_x(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z)))
-svuint32_t svrsqrte_z(svbool_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8)))
-svint8_t svrsra(svint8_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32)))
-svint32_t svrsra(svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64)))
-svint64_t svrsra(svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16)))
-svint16_t svrsra(svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8)))
-svuint8_t svrsra(svuint8_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32)))
-svuint32_t svrsra(svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64)))
-svuint64_t svrsra(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16)))
-svuint16_t svrsra(svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32)))
-svuint16_t svrsubhnb(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64)))
-svuint32_t svrsubhnb(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16)))
-svuint8_t svrsubhnb(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32)))
-svint16_t svrsubhnb(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64)))
-svint32_t svrsubhnb(svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16)))
-svint8_t svrsubhnb(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32)))
-svuint16_t svrsubhnb(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64)))
-svuint32_t svrsubhnb(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16)))
-svuint8_t svrsubhnb(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32)))
-svint16_t svrsubhnb(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64)))
-svint32_t svrsubhnb(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16)))
-svint8_t svrsubhnb(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32)))
-svuint16_t svrsubhnt(svuint16_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64)))
-svuint32_t svrsubhnt(svuint32_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16)))
-svuint8_t svrsubhnt(svuint8_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32)))
-svint16_t svrsubhnt(svint16_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64)))
-svint32_t svrsubhnt(svint32_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16)))
-svint8_t svrsubhnt(svint8_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32)))
-svuint16_t svrsubhnt(svuint16_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64)))
-svuint32_t svrsubhnt(svuint32_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16)))
-svuint8_t svrsubhnt(svuint8_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32)))
-svint16_t svrsubhnt(svint16_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64)))
-svint32_t svrsubhnt(svint32_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16)))
-svint8_t svrsubhnt(svint8_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32)))
-svuint32_t svsbclb(svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64)))
-svuint64_t svsbclb(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32)))
-svuint32_t svsbclb(svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64)))
-svuint64_t svsbclb(svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32)))
-svuint32_t svsbclt(svuint32_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64)))
-svuint64_t svsbclt(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32)))
-svuint32_t svsbclt(svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64)))
-svuint64_t svsbclt(svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32)))
-svint32_t svshllb(svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64)))
-svint64_t svshllb(svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16)))
-svint16_t svshllb(svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32)))
-svuint32_t svshllb(svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64)))
-svuint64_t svshllb(svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16)))
-svuint16_t svshllb(svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32)))
-svint32_t svshllt(svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64)))
-svint64_t svshllt(svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16)))
-svint16_t svshllt(svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32)))
-svuint32_t svshllt(svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64)))
-svuint64_t svshllt(svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16)))
-svuint16_t svshllt(svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32)))
-svuint16_t svshrnb(svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64)))
-svuint32_t svshrnb(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16)))
-svuint8_t svshrnb(svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32)))
-svint16_t svshrnb(svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64)))
-svint32_t svshrnb(svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16)))
-svint8_t svshrnb(svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32)))
-svuint16_t svshrnt(svuint16_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64)))
-svuint32_t svshrnt(svuint32_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16)))
-svuint8_t svshrnt(svuint8_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32)))
-svint16_t svshrnt(svint16_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64)))
-svint32_t svshrnt(svint32_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16)))
-svint8_t svshrnt(svint8_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8)))
-svuint8_t svsli(svuint8_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32)))
-svuint32_t svsli(svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64)))
-svuint64_t svsli(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16)))
-svuint16_t svsli(svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8)))
-svint8_t svsli(svint8_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32)))
-svint32_t svsli(svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64)))
-svint64_t svsli(svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16)))
-svint16_t svsli(svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m)))
-svuint8_t svsqadd_m(svbool_t, svuint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m)))
-svuint32_t svsqadd_m(svbool_t, svuint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m)))
-svuint64_t svsqadd_m(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m)))
-svuint16_t svsqadd_m(svbool_t, svuint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x)))
-svuint8_t svsqadd_x(svbool_t, svuint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x)))
-svuint32_t svsqadd_x(svbool_t, svuint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x)))
-svuint64_t svsqadd_x(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x)))
-svuint16_t svsqadd_x(svbool_t, svuint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z)))
-svuint8_t svsqadd_z(svbool_t, svuint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z)))
-svuint32_t svsqadd_z(svbool_t, svuint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z)))
-svuint64_t svsqadd_z(svbool_t, svuint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z)))
-svuint16_t svsqadd_z(svbool_t, svuint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m)))
-svuint8_t svsqadd_m(svbool_t, svuint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m)))
-svuint32_t svsqadd_m(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m)))
-svuint64_t svsqadd_m(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m)))
-svuint16_t svsqadd_m(svbool_t, svuint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x)))
-svuint8_t svsqadd_x(svbool_t, svuint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x)))
-svuint32_t svsqadd_x(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x)))
-svuint64_t svsqadd_x(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x)))
-svuint16_t svsqadd_x(svbool_t, svuint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z)))
-svuint8_t svsqadd_z(svbool_t, svuint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z)))
-svuint32_t svsqadd_z(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z)))
-svuint64_t svsqadd_z(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z)))
-svuint16_t svsqadd_z(svbool_t, svuint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8)))
-svint8_t svsra(svint8_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32)))
-svint32_t svsra(svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64)))
-svint64_t svsra(svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16)))
-svint16_t svsra(svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8)))
-svuint8_t svsra(svuint8_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32)))
-svuint32_t svsra(svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64)))
-svuint64_t svsra(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16)))
-svuint16_t svsra(svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8)))
-svuint8_t svsri(svuint8_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32)))
-svuint32_t svsri(svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64)))
-svuint64_t svsri(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16)))
-svuint16_t svsri(svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8)))
-svint8_t svsri(svint8_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32)))
-svint32_t svsri(svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64)))
-svint64_t svsri(svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16)))
-svint16_t svsri(svint16_t, svint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32)))
-void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64)))
-void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64)))
-void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32)))
-void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32)))
-void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64)))
-void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32)))
-void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64)))
-void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64)))
-void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32)))
-void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32)))
-void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64)))
-void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32)))
-void svstnt1_scatter(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64)))
-void svstnt1_scatter(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64)))
-void svstnt1_scatter(svbool_t, svuint64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32)))
-void svstnt1_scatter(svbool_t, svuint32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32)))
-void svstnt1_scatter(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64)))
-void svstnt1_scatter(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64)))
-void svstnt1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64)))
-void svstnt1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64)))
-void svstnt1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64)))
-void svstnt1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64)))
-void svstnt1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64)))
-void svstnt1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32)))
-void svstnt1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32)))
-void svstnt1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32)))
-void svstnt1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64)))
-void svstnt1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64)))
-void svstnt1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64)))
-void svstnt1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64)))
-void svstnt1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64)))
-void svstnt1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64)))
-void svstnt1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32)))
-void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64)))
-void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32)))
-void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64)))
-void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32)))
-void svstnt1b_scatter(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64)))
-void svstnt1b_scatter(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32)))
-void svstnt1b_scatter(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64)))
-void svstnt1b_scatter(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32)))
-void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32)))
-void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64)))
-void svstnt1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64)))
-void svstnt1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64)))
-void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64)))
-void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32)))
-void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64)))
-void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32)))
-void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64)))
-void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32)))
-void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64)))
-void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32)))
-void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64)))
-void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32)))
-void svstnt1h_scatter(svbool_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64)))
-void svstnt1h_scatter(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32)))
-void svstnt1h_scatter(svbool_t, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64)))
-void svstnt1h_scatter(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64)))
-void svstnt1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64)))
-void svstnt1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64)))
-void svstnt1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64)))
-void svstnt1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32)))
-void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32)))
-void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64)))
-void svstnt1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64)))
-void svstnt1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64)))
-void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64)))
-void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64)))
-void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64)))
-void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64)))
-void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64)))
-void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64)))
-void svstnt1w_scatter(svbool_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64)))
-void svstnt1w_scatter(svbool_t, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64)))
-void svstnt1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64)))
-void svstnt1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64)))
-void svstnt1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64)))
-void svstnt1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64)))
-void svstnt1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64)))
-void svstnt1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64)))
-void svstnt1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64)))
-void svstnt1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32)))
-svuint16_t svsubhnb(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64)))
-svuint32_t svsubhnb(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16)))
-svuint8_t svsubhnb(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32)))
-svint16_t svsubhnb(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64)))
-svint32_t svsubhnb(svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16)))
-svint8_t svsubhnb(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32)))
-svuint16_t svsubhnb(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64)))
-svuint32_t svsubhnb(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16)))
-svuint8_t svsubhnb(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32)))
-svint16_t svsubhnb(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64)))
-svint32_t svsubhnb(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16)))
-svint8_t svsubhnb(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32)))
-svuint16_t svsubhnt(svuint16_t, svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64)))
-svuint32_t svsubhnt(svuint32_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16)))
-svuint8_t svsubhnt(svuint8_t, svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32)))
-svint16_t svsubhnt(svint16_t, svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64)))
-svint32_t svsubhnt(svint32_t, svint64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16)))
-svint8_t svsubhnt(svint8_t, svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32)))
-svuint16_t svsubhnt(svuint16_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64)))
-svuint32_t svsubhnt(svuint32_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16)))
-svuint8_t svsubhnt(svuint8_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32)))
-svint16_t svsubhnt(svint16_t, svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64)))
-svint32_t svsubhnt(svint32_t, svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16)))
-svint8_t svsubhnt(svint8_t, svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32)))
-svint32_t svsublb(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64)))
-svint64_t svsublb(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16)))
-svint16_t svsublb(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32)))
-svuint32_t svsublb(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64)))
-svuint64_t svsublb(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16)))
-svuint16_t svsublb(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32)))
-svint32_t svsublb(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64)))
-svint64_t svsublb(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16)))
-svint16_t svsublb(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32)))
-svuint32_t svsublb(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64)))
-svuint64_t svsublb(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16)))
-svuint16_t svsublb(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32)))
-svint32_t svsublbt(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64)))
-svint64_t svsublbt(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16)))
-svint16_t svsublbt(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32)))
-svint32_t svsublbt(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64)))
-svint64_t svsublbt(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16)))
-svint16_t svsublbt(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32)))
-svint32_t svsublt(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64)))
-svint64_t svsublt(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16)))
-svint16_t svsublt(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32)))
-svuint32_t svsublt(svuint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64)))
-svuint64_t svsublt(svuint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16)))
-svuint16_t svsublt(svuint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32)))
-svint32_t svsublt(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64)))
-svint64_t svsublt(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16)))
-svint16_t svsublt(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32)))
-svuint32_t svsublt(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64)))
-svuint64_t svsublt(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16)))
-svuint16_t svsublt(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32)))
-svint32_t svsubltb(svint16_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64)))
-svint64_t svsubltb(svint32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16)))
-svint16_t svsubltb(svint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32)))
-svint32_t svsubltb(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64)))
-svint64_t svsubltb(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16)))
-svint16_t svsubltb(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32)))
-svint32_t svsubwb(svint32_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64)))
-svint64_t svsubwb(svint64_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16)))
-svint16_t svsubwb(svint16_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32)))
-svuint32_t svsubwb(svuint32_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64)))
-svuint64_t svsubwb(svuint64_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16)))
-svuint16_t svsubwb(svuint16_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32)))
-svint32_t svsubwb(svint32_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64)))
-svint64_t svsubwb(svint64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16)))
-svint16_t svsubwb(svint16_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32)))
-svuint32_t svsubwb(svuint32_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64)))
-svuint64_t svsubwb(svuint64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16)))
-svuint16_t svsubwb(svuint16_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32)))
-svint32_t svsubwt(svint32_t, int16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64)))
-svint64_t svsubwt(svint64_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16)))
-svint16_t svsubwt(svint16_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32)))
-svuint32_t svsubwt(svuint32_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64)))
-svuint64_t svsubwt(svuint64_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16)))
-svuint16_t svsubwt(svuint16_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32)))
-svint32_t svsubwt(svint32_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64)))
-svint64_t svsubwt(svint64_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16)))
-svint16_t svsubwt(svint16_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32)))
-svuint32_t svsubwt(svuint32_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64)))
-svuint64_t svsubwt(svuint64_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16)))
-svuint16_t svsubwt(svuint16_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8)))
-svuint8_t svtbl2(svuint8x2_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32)))
-svuint32_t svtbl2(svuint32x2_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64)))
-svuint64_t svtbl2(svuint64x2_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16)))
-svuint16_t svtbl2(svuint16x2_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8)))
-svint8_t svtbl2(svint8x2_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64)))
-svfloat64_t svtbl2(svfloat64x2_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32)))
-svfloat32_t svtbl2(svfloat32x2_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16)))
-svfloat16_t svtbl2(svfloat16x2_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32)))
-svint32_t svtbl2(svint32x2_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64)))
-svint64_t svtbl2(svint64x2_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16)))
-svint16_t svtbl2(svint16x2_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8)))
-svuint8_t svtbx(svuint8_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32)))
-svuint32_t svtbx(svuint32_t, svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64)))
-svuint64_t svtbx(svuint64_t, svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16)))
-svuint16_t svtbx(svuint16_t, svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8)))
-svint8_t svtbx(svint8_t, svint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64)))
-svfloat64_t svtbx(svfloat64_t, svfloat64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32)))
-svfloat32_t svtbx(svfloat32_t, svfloat32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16)))
-svfloat16_t svtbx(svfloat16_t, svfloat16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32)))
-svint32_t svtbx(svint32_t, svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64)))
-svint64_t svtbx(svint64_t, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16)))
-svint16_t svtbx(svint16_t, svint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m)))
-svint8_t svuqadd_m(svbool_t, svint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m)))
-svint32_t svuqadd_m(svbool_t, svint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m)))
-svint64_t svuqadd_m(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m)))
-svint16_t svuqadd_m(svbool_t, svint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x)))
-svint8_t svuqadd_x(svbool_t, svint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x)))
-svint32_t svuqadd_x(svbool_t, svint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x)))
-svint64_t svuqadd_x(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x)))
-svint16_t svuqadd_x(svbool_t, svint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z)))
-svint8_t svuqadd_z(svbool_t, svint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z)))
-svint32_t svuqadd_z(svbool_t, svint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z)))
-svint64_t svuqadd_z(svbool_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z)))
-svint16_t svuqadd_z(svbool_t, svint16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m)))
-svint8_t svuqadd_m(svbool_t, svint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m)))
-svint32_t svuqadd_m(svbool_t, svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m)))
-svint64_t svuqadd_m(svbool_t, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m)))
-svint16_t svuqadd_m(svbool_t, svint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x)))
-svint8_t svuqadd_x(svbool_t, svint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x)))
-svint32_t svuqadd_x(svbool_t, svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x)))
-svint64_t svuqadd_x(svbool_t, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x)))
-svint16_t svuqadd_x(svbool_t, svint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z)))
-svint8_t svuqadd_z(svbool_t, svint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z)))
-svint32_t svuqadd_z(svbool_t, svint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z)))
-svint64_t svuqadd_z(svbool_t, svint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z)))
-svint16_t svuqadd_z(svbool_t, svint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32)))
-svbool_t svwhilege_b8(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32)))
-svbool_t svwhilege_b32(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32)))
-svbool_t svwhilege_b64(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32)))
-svbool_t svwhilege_b16(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64)))
-svbool_t svwhilege_b8(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64)))
-svbool_t svwhilege_b32(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64)))
-svbool_t svwhilege_b64(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64)))
-svbool_t svwhilege_b16(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32)))
-svbool_t svwhilege_b8(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32)))
-svbool_t svwhilege_b32(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32)))
-svbool_t svwhilege_b64(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32)))
-svbool_t svwhilege_b16(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64)))
-svbool_t svwhilege_b8(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64)))
-svbool_t svwhilege_b32(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64)))
-svbool_t svwhilege_b64(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64)))
-svbool_t svwhilege_b16(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32)))
-svbool_t svwhilegt_b8(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32)))
-svbool_t svwhilegt_b32(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32)))
-svbool_t svwhilegt_b64(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32)))
-svbool_t svwhilegt_b16(int32_t, int32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64)))
-svbool_t svwhilegt_b8(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64)))
-svbool_t svwhilegt_b32(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64)))
-svbool_t svwhilegt_b64(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64)))
-svbool_t svwhilegt_b16(int64_t, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32)))
-svbool_t svwhilegt_b8(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32)))
-svbool_t svwhilegt_b32(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32)))
-svbool_t svwhilegt_b64(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32)))
-svbool_t svwhilegt_b16(uint32_t, uint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64)))
-svbool_t svwhilegt_b8(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64)))
-svbool_t svwhilegt_b32(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64)))
-svbool_t svwhilegt_b64(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64)))
-svbool_t svwhilegt_b16(uint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8)))
-svbool_t svwhilerw(uint8_t const *, uint8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8)))
-svbool_t svwhilerw(int8_t const *, int8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64)))
-svbool_t svwhilerw(uint64_t const *, uint64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64)))
-svbool_t svwhilerw(float64_t const *, float64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64)))
-svbool_t svwhilerw(int64_t const *, int64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16)))
-svbool_t svwhilerw(uint16_t const *, uint16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16)))
-svbool_t svwhilerw(float16_t const *, float16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16)))
-svbool_t svwhilerw(int16_t const *, int16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32)))
-svbool_t svwhilerw(uint32_t const *, uint32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32)))
-svbool_t svwhilerw(float32_t const *, float32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32)))
-svbool_t svwhilerw(int32_t const *, int32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8)))
-svbool_t svwhilewr(uint8_t const *, uint8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8)))
-svbool_t svwhilewr(int8_t const *, int8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64)))
-svbool_t svwhilewr(uint64_t const *, uint64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64)))
-svbool_t svwhilewr(float64_t const *, float64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64)))
-svbool_t svwhilewr(int64_t const *, int64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16)))
-svbool_t svwhilewr(uint16_t const *, uint16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16)))
-svbool_t svwhilewr(float16_t const *, float16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16)))
-svbool_t svwhilewr(int16_t const *, int16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32)))
-svbool_t svwhilewr(uint32_t const *, uint32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32)))
-svbool_t svwhilewr(float32_t const *, float32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32)))
-svbool_t svwhilewr(int32_t const *, int32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8)))
-svuint8_t svxar(svuint8_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32)))
-svuint32_t svxar(svuint32_t, svuint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64)))
-svuint64_t svxar(svuint64_t, svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16)))
-svuint16_t svxar(svuint16_t, svuint16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8)))
-svint8_t svxar(svint8_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32)))
-svint32_t svxar(svint32_t, svint32_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64)))
-svint64_t svxar(svint64_t, svint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16)))
-svint16_t svxar(svint16_t, svint16_t, uint64_t);
-#endif  //defined(__ARM_FEATURE_SVE2)
-
-#if defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16)))
-svbool_t svwhilerw_bf16(bfloat16_t const *, bfloat16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16)))
-svbool_t svwhilewr_bf16(bfloat16_t const *, bfloat16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16)))
-svbool_t svwhilerw(bfloat16_t const *, bfloat16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16)))
-svbool_t svwhilewr(bfloat16_t const *, bfloat16_t const *);
-#endif  //defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)
-
-#if defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_SVE_BF16)
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16)))
-svbfloat16_t svtbl2_bf16(svbfloat16x2_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16)))
-svbfloat16_t svtbx_bf16(svbfloat16_t, svbfloat16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16)))
-svbfloat16_t svtbl2(svbfloat16x2_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16)))
-svbfloat16_t svtbx(svbfloat16_t, svbfloat16_t, svuint16_t);
-#endif  //defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_SVE_BF16)
-
-#if defined(__ARM_FEATURE_SVE2_AES)
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesd_u8)))
-svuint8_t svaesd_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaese_u8)))
-svuint8_t svaese_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesimc_u8)))
-svuint8_t svaesimc_u8(svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesmc_u8)))
-svuint8_t svaesmc_u8(svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u64)))
-svuint64_t svpmullb_pair_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u64)))
-svuint64_t svpmullb_pair_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u64)))
-svuint64_t svpmullt_pair_n_u64(svuint64_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u64)))
-svuint64_t svpmullt_pair_u64(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesd_u8)))
-svuint8_t svaesd(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaese_u8)))
-svuint8_t svaese(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesimc_u8)))
-svuint8_t svaesimc(svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesmc_u8)))
-svuint8_t svaesmc(svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u64)))
-svuint64_t svpmullb_pair(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u64)))
-svuint64_t svpmullb_pair(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u64)))
-svuint64_t svpmullt_pair(svuint64_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u64)))
-svuint64_t svpmullt_pair(svuint64_t, svuint64_t);
-#endif  //defined(__ARM_FEATURE_SVE2_AES)
-
-#if defined(__ARM_FEATURE_SVE2_SHA3)
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64)))
-svuint64_t svrax1_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64)))
-svint64_t svrax1_s64(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64)))
-svuint64_t svrax1(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64)))
-svint64_t svrax1(svint64_t, svint64_t);
-#endif  //defined(__ARM_FEATURE_SVE2_SHA3)
-
-#if defined(__ARM_FEATURE_SVE2_SM4)
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32)))
-svuint32_t svsm4e_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32)))
-svuint32_t svsm4ekey_u32(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32)))
-svuint32_t svsm4e(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32)))
-svuint32_t svsm4ekey(svuint32_t, svuint32_t);
-#endif  //defined(__ARM_FEATURE_SVE2_SM4)
-
-#if defined(__ARM_FEATURE_SVE_BF16)
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32)))
-svfloat32_t svbfdot_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32)))
-svfloat32_t svbfdot_f32(svfloat32_t, svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32)))
-svfloat32_t svbfdot_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32)))
-svfloat32_t svbfmlalb_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32)))
-svfloat32_t svbfmlalb_f32(svfloat32_t, svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32)))
-svfloat32_t svbfmlalb_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32)))
-svfloat32_t svbfmlalt_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32)))
-svfloat32_t svbfmlalt_f32(svfloat32_t, svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32)))
-svfloat32_t svbfmlalt_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32)))
-svfloat32_t svbfmmla_f32(svfloat32_t, svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16)))
-bfloat16_t svclasta_n_bf16(svbool_t, bfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16)))
-svbfloat16_t svclasta_bf16(svbool_t, svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16)))
-bfloat16_t svclastb_n_bf16(svbool_t, bfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16)))
-svbfloat16_t svclastb_bf16(svbool_t, svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m)))
-svuint16_t svcnt_bf16_m(svuint16_t, svbool_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x)))
-svuint16_t svcnt_bf16_x(svbool_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z)))
-svuint16_t svcnt_bf16_z(svbool_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16)))
-svbfloat16x2_t svcreate2_bf16(svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16)))
-svbfloat16x3_t svcreate3_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16)))
-svbfloat16x4_t svcreate4_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m)))
-svbfloat16_t svcvt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x)))
-svbfloat16_t svcvt_bf16_f32_x(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z)))
-svbfloat16_t svcvt_bf16_f32_z(svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m)))
-svbfloat16_t svcvtnt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16)))
-svbfloat16_t svdup_n_bf16(bfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m)))
-svbfloat16_t svdup_n_bf16_m(svbfloat16_t, svbool_t, bfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x)))
-svbfloat16_t svdup_n_bf16_x(svbool_t, bfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z)))
-svbfloat16_t svdup_n_bf16_z(svbool_t, bfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16)))
-svbfloat16_t svdup_lane_bf16(svbfloat16_t, uint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16)))
-svbfloat16_t svdupq_n_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16)))
-svbfloat16_t svdupq_lane_bf16(svbfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16)))
-svbfloat16_t svext_bf16(svbfloat16_t, svbfloat16_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16)))
-svbfloat16_t svget2_bf16(svbfloat16x2_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16)))
-svbfloat16_t svget3_bf16(svbfloat16x3_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16)))
-svbfloat16_t svget4_bf16(svbfloat16x4_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16)))
-svbfloat16_t svinsr_n_bf16(svbfloat16_t, bfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16)))
-bfloat16_t svlasta_bf16(svbool_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16)))
-bfloat16_t svlastb_bf16(svbool_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16)))
-svbfloat16_t svld1_bf16(svbool_t, bfloat16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16)))
-svbfloat16_t svld1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16)))
-svbfloat16_t svld1rq_bf16(svbool_t, bfloat16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16)))
-svbfloat16x2_t svld2_bf16(svbool_t, bfloat16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16)))
-svbfloat16x2_t svld2_vnum_bf16(svbool_t, bfloat16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16)))
-svbfloat16x3_t svld3_bf16(svbool_t, bfloat16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16)))
-svbfloat16x3_t svld3_vnum_bf16(svbool_t, bfloat16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16)))
-svbfloat16x4_t svld4_bf16(svbool_t, bfloat16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16)))
-svbfloat16x4_t svld4_vnum_bf16(svbool_t, bfloat16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16)))
-svbfloat16_t svldff1_bf16(svbool_t, bfloat16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16)))
-svbfloat16_t svldff1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16)))
-svbfloat16_t svldnf1_bf16(svbool_t, bfloat16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16)))
-svbfloat16_t svldnf1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16)))
-svbfloat16_t svldnt1_bf16(svbool_t, bfloat16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16)))
-svbfloat16_t svldnt1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16)))
-uint64_t svlen_bf16(svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16)))
-svbfloat16_t svrev_bf16(svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16)))
-svbfloat16_t svsel_bf16(svbool_t, svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16)))
-svbfloat16x2_t svset2_bf16(svbfloat16x2_t, uint64_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16)))
-svbfloat16x3_t svset3_bf16(svbfloat16x3_t, uint64_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16)))
-svbfloat16x4_t svset4_bf16(svbfloat16x4_t, uint64_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16)))
-svbfloat16_t svsplice_bf16(svbool_t, svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16)))
-void svst1_bf16(svbool_t, bfloat16_t *, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16)))
-void svst1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16)))
-void svst2_bf16(svbool_t, bfloat16_t *, svbfloat16x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16)))
-void svst2_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16)))
-void svst3_bf16(svbool_t, bfloat16_t *, svbfloat16x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16)))
-void svst3_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16)))
-void svst4_bf16(svbool_t, bfloat16_t *, svbfloat16x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16)))
-void svst4_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16)))
-void svstnt1_bf16(svbool_t, bfloat16_t *, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16)))
-void svstnt1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16)))
-svbfloat16_t svtbl_bf16(svbfloat16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16)))
-svbfloat16_t svtrn1_bf16(svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16)))
-svbfloat16_t svtrn2_bf16(svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_bf16)))
-svbfloat16x2_t svundef2_bf16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_bf16)))
-svbfloat16x3_t svundef3_bf16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_bf16)))
-svbfloat16x4_t svundef4_bf16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_bf16)))
-svbfloat16_t svundef_bf16();
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16)))
-svbfloat16_t svuzp1_bf16(svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16)))
-svbfloat16_t svuzp2_bf16(svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16)))
-svbfloat16_t svzip1_bf16(svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16)))
-svbfloat16_t svzip2_bf16(svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32)))
-svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, bfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32)))
-svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32)))
-svfloat32_t svbfdot_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32)))
-svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, bfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32)))
-svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32)))
-svfloat32_t svbfmlalb_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32)))
-svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, bfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32)))
-svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32)))
-svfloat32_t svbfmlalt_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32)))
-svfloat32_t svbfmmla(svfloat32_t, svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16)))
-bfloat16_t svclasta(svbool_t, bfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16)))
-svbfloat16_t svclasta(svbool_t, svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16)))
-bfloat16_t svclastb(svbool_t, bfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16)))
-svbfloat16_t svclastb(svbool_t, svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m)))
-svuint16_t svcnt_m(svuint16_t, svbool_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x)))
-svuint16_t svcnt_x(svbool_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z)))
-svuint16_t svcnt_z(svbool_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16)))
-svbfloat16x2_t svcreate2(svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16)))
-svbfloat16x3_t svcreate3(svbfloat16_t, svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16)))
-svbfloat16x4_t svcreate4(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m)))
-svbfloat16_t svcvt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x)))
-svbfloat16_t svcvt_bf16_x(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z)))
-svbfloat16_t svcvt_bf16_z(svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m)))
-svbfloat16_t svcvtnt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16)))
-svbfloat16_t svdup_bf16(bfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m)))
-svbfloat16_t svdup_bf16_m(svbfloat16_t, svbool_t, bfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x)))
-svbfloat16_t svdup_bf16_x(svbool_t, bfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z)))
-svbfloat16_t svdup_bf16_z(svbool_t, bfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16)))
-svbfloat16_t svdup_lane(svbfloat16_t, uint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16)))
-svbfloat16_t svdupq_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16)))
-svbfloat16_t svdupq_lane(svbfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16)))
-svbfloat16_t svext(svbfloat16_t, svbfloat16_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16)))
-svbfloat16_t svget2(svbfloat16x2_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16)))
-svbfloat16_t svget3(svbfloat16x3_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16)))
-svbfloat16_t svget4(svbfloat16x4_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16)))
-svbfloat16_t svinsr(svbfloat16_t, bfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16)))
-bfloat16_t svlasta(svbool_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16)))
-bfloat16_t svlastb(svbool_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16)))
-svbfloat16_t svld1(svbool_t, bfloat16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16)))
-svbfloat16_t svld1_vnum(svbool_t, bfloat16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16)))
-svbfloat16_t svld1rq(svbool_t, bfloat16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16)))
-svbfloat16x2_t svld2(svbool_t, bfloat16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16)))
-svbfloat16x2_t svld2_vnum(svbool_t, bfloat16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16)))
-svbfloat16x3_t svld3(svbool_t, bfloat16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16)))
-svbfloat16x3_t svld3_vnum(svbool_t, bfloat16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16)))
-svbfloat16x4_t svld4(svbool_t, bfloat16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16)))
-svbfloat16x4_t svld4_vnum(svbool_t, bfloat16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16)))
-svbfloat16_t svldff1(svbool_t, bfloat16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16)))
-svbfloat16_t svldff1_vnum(svbool_t, bfloat16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16)))
-svbfloat16_t svldnf1(svbool_t, bfloat16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16)))
-svbfloat16_t svldnf1_vnum(svbool_t, bfloat16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16)))
-svbfloat16_t svldnt1(svbool_t, bfloat16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16)))
-svbfloat16_t svldnt1_vnum(svbool_t, bfloat16_t const *, int64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16)))
-uint64_t svlen(svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16)))
-svbfloat16_t svrev(svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16)))
-svbfloat16_t svsel(svbool_t, svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16)))
-svbfloat16x2_t svset2(svbfloat16x2_t, uint64_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16)))
-svbfloat16x3_t svset3(svbfloat16x3_t, uint64_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16)))
-svbfloat16x4_t svset4(svbfloat16x4_t, uint64_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16)))
-svbfloat16_t svsplice(svbool_t, svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16)))
-void svst1(svbool_t, bfloat16_t *, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16)))
-void svst1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16)))
-void svst2(svbool_t, bfloat16_t *, svbfloat16x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16)))
-void svst2_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16)))
-void svst3(svbool_t, bfloat16_t *, svbfloat16x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16)))
-void svst3_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16)))
-void svst4(svbool_t, bfloat16_t *, svbfloat16x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16)))
-void svst4_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16)))
-void svstnt1(svbool_t, bfloat16_t *, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16)))
-void svstnt1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16)))
-svbfloat16_t svtbl(svbfloat16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16)))
-svbfloat16_t svtrn1(svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16)))
-svbfloat16_t svtrn2(svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16)))
-svbfloat16_t svuzp1(svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16)))
-svbfloat16_t svuzp2(svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16)))
-svbfloat16_t svzip1(svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16)))
-svbfloat16_t svzip2(svbfloat16_t, svbfloat16_t);
-#endif  //defined(__ARM_FEATURE_SVE_BF16)
-
-#if defined(__ARM_FEATURE_SVE_MATMUL_FP32)
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32)))
-svfloat32_t svmmla_f32(svfloat32_t, svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32)))
-svfloat32_t svmmla(svfloat32_t, svfloat32_t, svfloat32_t);
-#endif  //defined(__ARM_FEATURE_SVE_MATMUL_FP32)
-
-#if defined(__ARM_FEATURE_SVE_MATMUL_FP64)
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8)))
-svuint8_t svld1ro_u8(svbool_t, uint8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32)))
-svuint32_t svld1ro_u32(svbool_t, uint32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64)))
-svuint64_t svld1ro_u64(svbool_t, uint64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16)))
-svuint16_t svld1ro_u16(svbool_t, uint16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8)))
-svint8_t svld1ro_s8(svbool_t, int8_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64)))
-svfloat64_t svld1ro_f64(svbool_t, float64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32)))
-svfloat32_t svld1ro_f32(svbool_t, float32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16)))
-svfloat16_t svld1ro_f16(svbool_t, float16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32)))
-svint32_t svld1ro_s32(svbool_t, int32_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64)))
-svint64_t svld1ro_s64(svbool_t, int64_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16)))
-svint16_t svld1ro_s16(svbool_t, int16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64)))
-svfloat64_t svmmla_f64(svfloat64_t, svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8)))
-svuint8_t svtrn1q_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32)))
-svuint32_t svtrn1q_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64)))
-svuint64_t svtrn1q_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16)))
-svuint16_t svtrn1q_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8)))
-svint8_t svtrn1q_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64)))
-svfloat64_t svtrn1q_f64(svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32)))
-svfloat32_t svtrn1q_f32(svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16)))
-svfloat16_t svtrn1q_f16(svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32)))
-svint32_t svtrn1q_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64)))
-svint64_t svtrn1q_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16)))
-svint16_t svtrn1q_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8)))
-svuint8_t svtrn2q_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32)))
-svuint32_t svtrn2q_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64)))
-svuint64_t svtrn2q_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16)))
-svuint16_t svtrn2q_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8)))
-svint8_t svtrn2q_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64)))
-svfloat64_t svtrn2q_f64(svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32)))
-svfloat32_t svtrn2q_f32(svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16)))
-svfloat16_t svtrn2q_f16(svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32)))
-svint32_t svtrn2q_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64)))
-svint64_t svtrn2q_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16)))
-svint16_t svtrn2q_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8)))
-svuint8_t svuzp1q_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32)))
-svuint32_t svuzp1q_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64)))
-svuint64_t svuzp1q_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16)))
-svuint16_t svuzp1q_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8)))
-svint8_t svuzp1q_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64)))
-svfloat64_t svuzp1q_f64(svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32)))
-svfloat32_t svuzp1q_f32(svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16)))
-svfloat16_t svuzp1q_f16(svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32)))
-svint32_t svuzp1q_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64)))
-svint64_t svuzp1q_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16)))
-svint16_t svuzp1q_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8)))
-svuint8_t svuzp2q_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32)))
-svuint32_t svuzp2q_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64)))
-svuint64_t svuzp2q_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16)))
-svuint16_t svuzp2q_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8)))
-svint8_t svuzp2q_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64)))
-svfloat64_t svuzp2q_f64(svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32)))
-svfloat32_t svuzp2q_f32(svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16)))
-svfloat16_t svuzp2q_f16(svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32)))
-svint32_t svuzp2q_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64)))
-svint64_t svuzp2q_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16)))
-svint16_t svuzp2q_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8)))
-svuint8_t svzip1q_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32)))
-svuint32_t svzip1q_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64)))
-svuint64_t svzip1q_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16)))
-svuint16_t svzip1q_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8)))
-svint8_t svzip1q_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64)))
-svfloat64_t svzip1q_f64(svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32)))
-svfloat32_t svzip1q_f32(svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16)))
-svfloat16_t svzip1q_f16(svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32)))
-svint32_t svzip1q_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64)))
-svint64_t svzip1q_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16)))
-svint16_t svzip1q_s16(svint16_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8)))
-svuint8_t svzip2q_u8(svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32)))
-svuint32_t svzip2q_u32(svuint32_t, svuint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64)))
-svuint64_t svzip2q_u64(svuint64_t, svuint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16)))
-svuint16_t svzip2q_u16(svuint16_t, svuint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8)))
-svint8_t svzip2q_s8(svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64)))
-svfloat64_t svzip2q_f64(svfloat64_t, svfloat64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32)))
-svfloat32_t svzip2q_f32(svfloat32_t, svfloat32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16)))
-svfloat16_t svzip2q_f16(svfloat16_t, svfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32)))
-svint32_t svzip2q_s32(svint32_t, svint32_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64)))
-svint64_t svzip2q_s64(svint64_t, svint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16)))
-svint16_t svzip2q_s16(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8)))
-svuint8_t svld1ro(svbool_t, uint8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32)))
-svuint32_t svld1ro(svbool_t, uint32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64)))
-svuint64_t svld1ro(svbool_t, uint64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16)))
-svuint16_t svld1ro(svbool_t, uint16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8)))
-svint8_t svld1ro(svbool_t, int8_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64)))
-svfloat64_t svld1ro(svbool_t, float64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32)))
-svfloat32_t svld1ro(svbool_t, float32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16)))
-svfloat16_t svld1ro(svbool_t, float16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32)))
-svint32_t svld1ro(svbool_t, int32_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64)))
-svint64_t svld1ro(svbool_t, int64_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16)))
-svint16_t svld1ro(svbool_t, int16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64)))
-svfloat64_t svmmla(svfloat64_t, svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8)))
-svuint8_t svtrn1q(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32)))
-svuint32_t svtrn1q(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64)))
-svuint64_t svtrn1q(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16)))
-svuint16_t svtrn1q(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8)))
-svint8_t svtrn1q(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64)))
-svfloat64_t svtrn1q(svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32)))
-svfloat32_t svtrn1q(svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16)))
-svfloat16_t svtrn1q(svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32)))
-svint32_t svtrn1q(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64)))
-svint64_t svtrn1q(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16)))
-svint16_t svtrn1q(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8)))
-svuint8_t svtrn2q(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32)))
-svuint32_t svtrn2q(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64)))
-svuint64_t svtrn2q(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16)))
-svuint16_t svtrn2q(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8)))
-svint8_t svtrn2q(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64)))
-svfloat64_t svtrn2q(svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32)))
-svfloat32_t svtrn2q(svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16)))
-svfloat16_t svtrn2q(svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32)))
-svint32_t svtrn2q(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64)))
-svint64_t svtrn2q(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16)))
-svint16_t svtrn2q(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8)))
-svuint8_t svuzp1q(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32)))
-svuint32_t svuzp1q(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64)))
-svuint64_t svuzp1q(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16)))
-svuint16_t svuzp1q(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8)))
-svint8_t svuzp1q(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64)))
-svfloat64_t svuzp1q(svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32)))
-svfloat32_t svuzp1q(svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16)))
-svfloat16_t svuzp1q(svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32)))
-svint32_t svuzp1q(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64)))
-svint64_t svuzp1q(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16)))
-svint16_t svuzp1q(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8)))
-svuint8_t svuzp2q(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32)))
-svuint32_t svuzp2q(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64)))
-svuint64_t svuzp2q(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16)))
-svuint16_t svuzp2q(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8)))
-svint8_t svuzp2q(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64)))
-svfloat64_t svuzp2q(svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32)))
-svfloat32_t svuzp2q(svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16)))
-svfloat16_t svuzp2q(svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32)))
-svint32_t svuzp2q(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64)))
-svint64_t svuzp2q(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16)))
-svint16_t svuzp2q(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8)))
-svuint8_t svzip1q(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32)))
-svuint32_t svzip1q(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64)))
-svuint64_t svzip1q(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16)))
-svuint16_t svzip1q(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8)))
-svint8_t svzip1q(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64)))
-svfloat64_t svzip1q(svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32)))
-svfloat32_t svzip1q(svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16)))
-svfloat16_t svzip1q(svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32)))
-svint32_t svzip1q(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64)))
-svint64_t svzip1q(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16)))
-svint16_t svzip1q(svint16_t, svint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8)))
-svuint8_t svzip2q(svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32)))
-svuint32_t svzip2q(svuint32_t, svuint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64)))
-svuint64_t svzip2q(svuint64_t, svuint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16)))
-svuint16_t svzip2q(svuint16_t, svuint16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8)))
-svint8_t svzip2q(svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64)))
-svfloat64_t svzip2q(svfloat64_t, svfloat64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32)))
-svfloat32_t svzip2q(svfloat32_t, svfloat32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16)))
-svfloat16_t svzip2q(svfloat16_t, svfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32)))
-svint32_t svzip2q(svint32_t, svint32_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64)))
-svint64_t svzip2q(svint64_t, svint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16)))
-svint16_t svzip2q(svint16_t, svint16_t);
-#endif  //defined(__ARM_FEATURE_SVE_MATMUL_FP64)
-
-#if defined(__ARM_FEATURE_SVE_MATMUL_FP64) && defined(__ARM_FEATURE_SVE_BF16)
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16)))
-svbfloat16_t svld1ro_bf16(svbool_t, bfloat16_t const *);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16)))
-svbfloat16_t svtrn1q_bf16(svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16)))
-svbfloat16_t svtrn2q_bf16(svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16)))
-svbfloat16_t svuzp1q_bf16(svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16)))
-svbfloat16_t svuzp2q_bf16(svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16)))
-svbfloat16_t svzip1q_bf16(svbfloat16_t, svbfloat16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16)))
-svbfloat16_t svzip2q_bf16(svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16)))
-svbfloat16_t svld1ro(svbool_t, bfloat16_t const *);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16)))
-svbfloat16_t svtrn1q(svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16)))
-svbfloat16_t svtrn2q(svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16)))
-svbfloat16_t svuzp1q(svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16)))
-svbfloat16_t svuzp2q(svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16)))
-svbfloat16_t svzip1q(svbfloat16_t, svbfloat16_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16)))
-svbfloat16_t svzip2q(svbfloat16_t, svbfloat16_t);
-#endif  //defined(__ARM_FEATURE_SVE_MATMUL_FP64) && defined(__ARM_FEATURE_SVE_BF16)
-
-#if defined(__ARM_FEATURE_SVE_MATMUL_INT8)
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32)))
-svint32_t svmmla_s32(svint32_t, svint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32)))
-svuint32_t svmmla_u32(svuint32_t, svuint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32)))
-svint32_t svsudot_n_s32(svint32_t, svint8_t, uint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32)))
-svint32_t svsudot_s32(svint32_t, svint8_t, svuint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32)))
-svint32_t svsudot_lane_s32(svint32_t, svint8_t, svuint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32)))
-svint32_t svusdot_n_s32(svint32_t, svuint8_t, int8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32)))
-svint32_t svusdot_s32(svint32_t, svuint8_t, svint8_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32)))
-svint32_t svusdot_lane_s32(svint32_t, svuint8_t, svint8_t, uint64_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32)))
-svint32_t svusmmla_s32(svint32_t, svuint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32)))
-svint32_t svmmla(svint32_t, svint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32)))
-svuint32_t svmmla(svuint32_t, svuint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32)))
-svint32_t svsudot(svint32_t, svint8_t, uint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32)))
-svint32_t svsudot(svint32_t, svint8_t, svuint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32)))
-svint32_t svsudot_lane(svint32_t, svint8_t, svuint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32)))
-svint32_t svusdot(svint32_t, svuint8_t, int8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32)))
-svint32_t svusdot(svint32_t, svuint8_t, svint8_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32)))
-svint32_t svusdot_lane(svint32_t, svuint8_t, svint8_t, uint64_t);
-__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32)))
-svint32_t svusmmla(svint32_t, svuint8_t, svint8_t);
-#endif  //defined(__ARM_FEATURE_SVE_MATMUL_INT8)
-#if defined(__ARM_FEATURE_SVE_BF16)
-#define svcvtnt_bf16_x      svcvtnt_bf16_m
-#define svcvtnt_bf16_f32_x  svcvtnt_bf16_f32_m
-#endif /*__ARM_FEATURE_SVE_BF16 */
-
-#if defined(__ARM_FEATURE_SVE2)
-#define svcvtnt_f16_x      svcvtnt_f16_m
-#define svcvtnt_f16_f32_x  svcvtnt_f16_f32_m
-#define svcvtnt_f32_x      svcvtnt_f32_m
-#define svcvtnt_f32_f64_x  svcvtnt_f32_f64_m
-
-#define svcvtxnt_f32_x     svcvtxnt_f32_m
-#define svcvtxnt_f32_f64_x svcvtxnt_f32_f64_m
-
-#endif /*__ARM_FEATURE_SVE2 */
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#endif /*__ARM_FEATURE_SVE */
-
-#endif /* __ARM_SVE_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx2intrin.h b/linux-x86/lib64/clang/14.0.2/include/avx2intrin.h
deleted file mode 100644
index 5064c87..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/avx2intrin.h
+++ /dev/null
@@ -1,1148 +0,0 @@
-/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#error "Never use <avx2intrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX2INTRIN_H
-#define __AVX2INTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(256)))
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(128)))
-
-/* SSE4 Multiple Packed Sums of Absolute Difference.  */
-#define _mm256_mpsadbw_epu8(X, Y, M) \
-  ((__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \
-                                      (__v32qi)(__m256i)(Y), (int)(M)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_abs_epi8(__m256i __a)
-{
-    return (__m256i)__builtin_ia32_pabsb256((__v32qi)__a);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_abs_epi16(__m256i __a)
-{
-    return (__m256i)__builtin_ia32_pabsw256((__v16hi)__a);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_abs_epi32(__m256i __a)
-{
-    return (__m256i)__builtin_ia32_pabsd256((__v8si)__a);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_packs_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_packs_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_packus_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_packus_epi32(__m256i __V1, __m256i __V2)
-{
-  return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_add_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v32qu)__a + (__v32qu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_add_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v16hu)__a + (__v16hu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_add_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8su)__a + (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_add_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a + (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_paddsb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_paddsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epu8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_paddusb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epu16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_paddusw256((__v16hi)__a, (__v16hi)__b);
-}
-
-#define _mm256_alignr_epi8(a, b, n) \
-  ((__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \
-                                      (__v32qi)(__m256i)(b), (n)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_and_si256(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a & (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_andnot_si256(__m256i __a, __m256i __b)
-{
-  return (__m256i)(~(__v4du)__a & (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_avg_epu8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_avg_epu16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
-{
-  return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2,
-                                              (__v32qi)__M);
-}
-
-#define _mm256_blend_epi16(V1, V2, M) \
-  ((__m256i)__builtin_ia32_pblendw256((__v16hi)(__m256i)(V1), \
-                                      (__v16hi)(__m256i)(V2), (int)(M)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpeq_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v32qi)__a == (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpeq_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v16hi)__a == (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpeq_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8si)__a == (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpeq_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4di)__a == (__v4di)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpgt_epi8(__m256i __a, __m256i __b)
-{
-  /* This function always performs a signed comparison, but __v32qi is a char
-     which may be signed or unsigned, so use __v32qs. */
-  return (__m256i)((__v32qs)__a > (__v32qs)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpgt_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v16hi)__a > (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpgt_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8si)__a > (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpgt_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4di)__a > (__v4di)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hadd_epi16(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hadd_epi32(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hadds_epi16(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hsub_epi16(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hsub_epi32(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hsubs_epi16(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maddubs_epi16(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_madd_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epu8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmaxub256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epu16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epu32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pminsb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pminsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pminsd256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epu8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pminub256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epu16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epu32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pminud256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS256
-_mm256_movemask_epi8(__m256i __a)
-{
-  return __builtin_ia32_pmovmskb256((__v32qi)__a);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi8_epi16(__m128i __V)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi8_epi32(__m128i __V)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi8_epi64(__m128i __V)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi16_epi32(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi16_epi64(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi32_epi64(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector((__v4si)__V, __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu8_epi16(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu8_epi32(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu8_epi64(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu16_epi32(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu16_epi64(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu32_epi64(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector((__v4su)__V, __v4di);
-}
-
-static __inline__  __m256i __DEFAULT_FN_ATTRS256
-_mm256_mul_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mulhrs_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mulhi_epu16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mulhi_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mullo_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v16hu)__a * (__v16hu)__b);
-}
-
-static __inline__  __m256i __DEFAULT_FN_ATTRS256
-_mm256_mullo_epi32 (__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8su)__a * (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mul_epu32(__m256i __a, __m256i __b)
-{
-  return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_or_si256(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a | (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sad_epu8(__m256i __a, __m256i __b)
-{
-  return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_shuffle_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b);
-}
-
-#define _mm256_shuffle_epi32(a, imm) \
-  ((__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm)))
-
-#define _mm256_shufflehi_epi16(a, imm) \
-  ((__m256i)__builtin_ia32_pshufhw256((__v16hi)(__m256i)(a), (int)(imm)))
-
-#define _mm256_shufflelo_epi16(a, imm) \
-  ((__m256i)__builtin_ia32_pshuflw256((__v16hi)(__m256i)(a), (int)(imm)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sign_epi8(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sign_epi16(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sign_epi32(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
-}
-
-#define _mm256_slli_si256(a, imm) \
-  ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)))
-
-#define _mm256_bslli_epi128(a, imm) \
-  ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_slli_epi16(__m256i __a, int __count)
-{
-  return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sll_epi16(__m256i __a, __m128i __count)
-{
-  return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_slli_epi32(__m256i __a, int __count)
-{
-  return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sll_epi32(__m256i __a, __m128i __count)
-{
-  return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_slli_epi64(__m256i __a, int __count)
-{
-  return __builtin_ia32_psllqi256((__v4di)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sll_epi64(__m256i __a, __m128i __count)
-{
-  return __builtin_ia32_psllq256((__v4di)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srai_epi16(__m256i __a, int __count)
-{
-  return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sra_epi16(__m256i __a, __m128i __count)
-{
-  return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srai_epi32(__m256i __a, int __count)
-{
-  return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sra_epi32(__m256i __a, __m128i __count)
-{
-  return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count);
-}
-
-#define _mm256_srli_si256(a, imm) \
-  ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)))
-
-#define _mm256_bsrli_epi128(a, imm) \
-  ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srli_epi16(__m256i __a, int __count)
-{
-  return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srl_epi16(__m256i __a, __m128i __count)
-{
-  return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srli_epi32(__m256i __a, int __count)
-{
-  return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srl_epi32(__m256i __a, __m128i __count)
-{
-  return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srli_epi64(__m256i __a, int __count)
-{
-  return __builtin_ia32_psrlqi256((__v4di)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srl_epi64(__m256i __a, __m128i __count)
-{
-  return __builtin_ia32_psrlq256((__v4di)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sub_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v32qu)__a - (__v32qu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sub_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v16hu)__a - (__v16hu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sub_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8su)__a - (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sub_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a - (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_psubsb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_psubsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epu8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_psubusb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epu16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_psubusw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_xor_si256(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a ^ (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_stream_load_si256(__m256i const *__V)
-{
-  typedef __v4di __v4di_aligned __attribute__((aligned(32)));
-  return (__m256i)__builtin_nontemporal_load((const __v4di_aligned *)__V);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_broadcastss_ps(__m128 __X)
-{
-  return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_broadcastsd_pd(__m128d __a)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_broadcastss_ps(__m128 __X)
-{
-  return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_broadcastsd_pd(__m128d __X)
-{
-  return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastsi128_si256(__m128i __X)
-{
-  return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1);
-}
-
-#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X)
-
-#define _mm_blend_epi32(V1, V2, M) \
-  ((__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \
-                                      (__v4si)(__m128i)(V2), (int)(M)))
-
-#define _mm256_blend_epi32(V1, V2, M) \
-  ((__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \
-                                      (__v8si)(__m256i)(V2), (int)(M)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastb_epi8(__m128i __X)
-{
-  return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastw_epi16(__m128i __X)
-{
-  return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastd_epi32(__m128i __X)
-{
-  return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastq_epi64(__m128i __X)
-{
-  return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastb_epi8(__m128i __X)
-{
-  return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastw_epi16(__m128i __X)
-{
-  return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastd_epi32(__m128i __X)
-{
-  return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastq_epi64(__m128i __X)
-{
-  return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b);
-}
-
-#define _mm256_permute4x64_pd(V, M) \
-  ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M)))
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_permutevar8x32_ps(__m256 __a, __m256i __b)
-{
-  return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b);
-}
-
-#define _mm256_permute4x64_epi64(V, M) \
-  ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M)))
-
-#define _mm256_permute2x128_si256(V1, V2, M) \
-  ((__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (int)(M)))
-
-#define _mm256_extracti128_si256(V, M) \
-  ((__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M)))
-
-#define _mm256_inserti128_si256(V1, V2, M) \
-  ((__m256i)__builtin_ia32_insert128i256((__v4di)(__m256i)(V1), \
-                                         (__v2di)(__m128i)(V2), (int)(M)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskload_epi32(int const *__X, __m256i __M)
-{
-  return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskload_epi64(long long const *__X, __m256i __M)
-{
-  return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskload_epi32(int const *__X, __m128i __M)
-{
-  return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskload_epi64(long long const *__X, __m128i __M)
-{
-  return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)
-{
-  __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
-{
-  __builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)
-{
-  __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
-{
-  __builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sllv_epi32(__m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_sllv_epi32(__m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sllv_epi64(__m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_sllv_epi64(__m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srav_epi32(__m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srav_epi32(__m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srlv_epi32(__m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srlv_epi32(__m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srlv_epi64(__m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srlv_epi64(__m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y);
-}
-
-#define _mm_mask_i32gather_pd(a, m, i, mask, s) \
-  ((__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \
-                                      (double const *)(m), \
-                                      (__v4si)(__m128i)(i), \
-                                      (__v2df)(__m128d)(mask), (s)))
-
-#define _mm256_mask_i32gather_pd(a, m, i, mask, s) \
-  ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \
-                                         (double const *)(m), \
-                                         (__v4si)(__m128i)(i), \
-                                         (__v4df)(__m256d)(mask), (s)))
-
-#define _mm_mask_i64gather_pd(a, m, i, mask, s) \
-  ((__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \
-                                      (double const *)(m), \
-                                      (__v2di)(__m128i)(i), \
-                                      (__v2df)(__m128d)(mask), (s)))
-
-#define _mm256_mask_i64gather_pd(a, m, i, mask, s) \
-  ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \
-                                         (double const *)(m), \
-                                         (__v4di)(__m256i)(i), \
-                                         (__v4df)(__m256d)(mask), (s)))
-
-#define _mm_mask_i32gather_ps(a, m, i, mask, s) \
-  ((__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
-                                     (float const *)(m), \
-                                     (__v4si)(__m128i)(i), \
-                                     (__v4sf)(__m128)(mask), (s)))
-
-#define _mm256_mask_i32gather_ps(a, m, i, mask, s) \
-  ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \
-                                        (float const *)(m), \
-                                        (__v8si)(__m256i)(i), \
-                                        (__v8sf)(__m256)(mask), (s)))
-
-#define _mm_mask_i64gather_ps(a, m, i, mask, s) \
-  ((__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
-                                     (float const *)(m), \
-                                     (__v2di)(__m128i)(i), \
-                                     (__v4sf)(__m128)(mask), (s)))
-
-#define _mm256_mask_i64gather_ps(a, m, i, mask, s) \
-  ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
-                                        (float const *)(m), \
-                                        (__v4di)(__m256i)(i), \
-                                        (__v4sf)(__m128)(mask), (s)))
-
-#define _mm_mask_i32gather_epi32(a, m, i, mask, s) \
-  ((__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \
-                                     (int const *)(m), \
-                                     (__v4si)(__m128i)(i), \
-                                     (__v4si)(__m128i)(mask), (s)))
-
-#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) \
-  ((__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \
-                                        (int const *)(m), \
-                                        (__v8si)(__m256i)(i), \
-                                        (__v8si)(__m256i)(mask), (s)))
-
-#define _mm_mask_i64gather_epi32(a, m, i, mask, s) \
-  ((__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \
-                                     (int const *)(m), \
-                                     (__v2di)(__m128i)(i), \
-                                     (__v4si)(__m128i)(mask), (s)))
-
-#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) \
-  ((__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \
-                                        (int const *)(m), \
-                                        (__v4di)(__m256i)(i), \
-                                        (__v4si)(__m128i)(mask), (s)))
-
-#define _mm_mask_i32gather_epi64(a, m, i, mask, s) \
-  ((__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \
-                                     (long long const *)(m), \
-                                     (__v4si)(__m128i)(i), \
-                                     (__v2di)(__m128i)(mask), (s)))
-
-#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) \
-  ((__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \
-                                        (long long const *)(m), \
-                                        (__v4si)(__m128i)(i), \
-                                        (__v4di)(__m256i)(mask), (s)))
-
-#define _mm_mask_i64gather_epi64(a, m, i, mask, s) \
-  ((__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \
-                                     (long long const *)(m), \
-                                     (__v2di)(__m128i)(i), \
-                                     (__v2di)(__m128i)(mask), (s)))
-
-#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) \
-  ((__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \
-                                        (long long const *)(m), \
-                                        (__v4di)(__m256i)(i), \
-                                        (__v4di)(__m256i)(mask), (s)))
-
-#define _mm_i32gather_pd(m, i, s) \
-  ((__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \
-                                      (double const *)(m), \
-                                      (__v4si)(__m128i)(i), \
-                                      (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
-                                                           _mm_setzero_pd()), \
-                                      (s)))
-
-#define _mm256_i32gather_pd(m, i, s) \
-  ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \
-                                         (double const *)(m), \
-                                         (__v4si)(__m128i)(i), \
-                                         (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
-                                                               _mm256_setzero_pd(), \
-                                                               _CMP_EQ_OQ), \
-                                         (s)))
-
-#define _mm_i64gather_pd(m, i, s) \
-  ((__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \
-                                      (double const *)(m), \
-                                      (__v2di)(__m128i)(i), \
-                                      (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
-                                                           _mm_setzero_pd()), \
-                                      (s)))
-
-#define _mm256_i64gather_pd(m, i, s) \
-  ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \
-                                         (double const *)(m), \
-                                         (__v4di)(__m256i)(i), \
-                                         (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
-                                                               _mm256_setzero_pd(), \
-                                                               _CMP_EQ_OQ), \
-                                         (s)))
-
-#define _mm_i32gather_ps(m, i, s) \
-  ((__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
-                                     (float const *)(m), \
-                                     (__v4si)(__m128i)(i), \
-                                     (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
-                                                          _mm_setzero_ps()), \
-                                     (s)))
-
-#define _mm256_i32gather_ps(m, i, s) \
-  ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \
-                                        (float const *)(m), \
-                                        (__v8si)(__m256i)(i), \
-                                        (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \
-                                                              _mm256_setzero_ps(), \
-                                                              _CMP_EQ_OQ), \
-                                        (s)))
-
-#define _mm_i64gather_ps(m, i, s) \
-  ((__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \
-                                     (float const *)(m), \
-                                     (__v2di)(__m128i)(i), \
-                                     (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
-                                                          _mm_setzero_ps()), \
-                                     (s)))
-
-#define _mm256_i64gather_ps(m, i, s) \
-  ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \
-                                        (float const *)(m), \
-                                        (__v4di)(__m256i)(i), \
-                                        (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
-                                                             _mm_setzero_ps()), \
-                                        (s)))
-
-#define _mm_i32gather_epi32(m, i, s) \
-  ((__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \
-                                     (int const *)(m), (__v4si)(__m128i)(i), \
-                                     (__v4si)_mm_set1_epi32(-1), (s)))
-
-#define _mm256_i32gather_epi32(m, i, s) \
-  ((__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \
-                                        (int const *)(m), (__v8si)(__m256i)(i), \
-                                        (__v8si)_mm256_set1_epi32(-1), (s)))
-
-#define _mm_i64gather_epi32(m, i, s) \
-  ((__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \
-                                     (int const *)(m), (__v2di)(__m128i)(i), \
-                                     (__v4si)_mm_set1_epi32(-1), (s)))
-
-#define _mm256_i64gather_epi32(m, i, s) \
-  ((__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \
-                                        (int const *)(m), (__v4di)(__m256i)(i), \
-                                        (__v4si)_mm_set1_epi32(-1), (s)))
-
-#define _mm_i32gather_epi64(m, i, s) \
-  ((__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \
-                                     (long long const *)(m), \
-                                     (__v4si)(__m128i)(i), \
-                                     (__v2di)_mm_set1_epi64x(-1), (s)))
-
-#define _mm256_i32gather_epi64(m, i, s) \
-  ((__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \
-                                        (long long const *)(m), \
-                                        (__v4si)(__m128i)(i), \
-                                        (__v4di)_mm256_set1_epi64x(-1), (s)))
-
-#define _mm_i64gather_epi64(m, i, s) \
-  ((__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \
-                                     (long long const *)(m), \
-                                     (__v2di)(__m128i)(i), \
-                                     (__v2di)_mm_set1_epi64x(-1), (s)))
-
-#define _mm256_i64gather_epi64(m, i, s) \
-  ((__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \
-                                        (long long const *)(m), \
-                                        (__v4di)(__m256i)(i), \
-                                        (__v4di)_mm256_set1_epi64x(-1), (s)))
-
-#undef __DEFAULT_FN_ATTRS256
-#undef __DEFAULT_FN_ATTRS128
-
-#endif /* __AVX2INTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512bwintrin.h b/linux-x86/lib64/clang/14.0.2/include/avx512bwintrin.h
deleted file mode 100644
index 6aee8ae..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/avx512bwintrin.h
+++ /dev/null
@@ -1,2024 +0,0 @@
-/*===------------- avx512bwintrin.h - AVX512BW intrinsics ------------------===
- *
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512bwintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512BWINTRIN_H
-#define __AVX512BWINTRIN_H
-
-typedef unsigned int __mmask32;
-typedef unsigned long long __mmask64;
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw"), __min_vector_width__(512)))
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw")))
-
-static __inline __mmask32 __DEFAULT_FN_ATTRS
-_knot_mask32(__mmask32 __M)
-{
-  return __builtin_ia32_knotsi(__M);
-}
-
-static __inline __mmask64 __DEFAULT_FN_ATTRS
-_knot_mask64(__mmask64 __M)
-{
-  return __builtin_ia32_knotdi(__M);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kand_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_kandsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kand_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kanddi((__mmask64)__A, (__mmask64)__B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kandn_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_kandnsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kandn_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kandndi((__mmask64)__A, (__mmask64)__B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kor_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_korsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kor_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kordi((__mmask64)__A, (__mmask64)__B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kxnor_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_kxnorsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kxnor_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kxnordi((__mmask64)__A, (__mmask64)__B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kxor_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_kxorsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kxor_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kxordi((__mmask64)__A, (__mmask64)__B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestc_mask32_u8(__mmask32 __A, __mmask32 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestcsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestz_mask32_u8(__mmask32 __A, __mmask32 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestzsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_kortestcsi(__A, __B);
-  return (unsigned char)__builtin_ia32_kortestzsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestc_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestcdi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestz_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestzdi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortest_mask64_u8(__mmask64 __A, __mmask64 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_kortestcdi(__A, __B);
-  return (unsigned char)__builtin_ia32_kortestzdi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktestc_mask32_u8(__mmask32 __A, __mmask32 __B)
-{
-  return (unsigned char)__builtin_ia32_ktestcsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktestz_mask32_u8(__mmask32 __A, __mmask32 __B)
-{
-  return (unsigned char)__builtin_ia32_ktestzsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_ktestcsi(__A, __B);
-  return (unsigned char)__builtin_ia32_ktestzsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktestc_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
-  return (unsigned char)__builtin_ia32_ktestcdi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktestz_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
-  return (unsigned char)__builtin_ia32_ktestzdi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktest_mask64_u8(__mmask64 __A, __mmask64 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_ktestcdi(__A, __B);
-  return (unsigned char)__builtin_ia32_ktestzdi(__A, __B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kadd_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_kaddsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kadd_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kadddi((__mmask64)__A, (__mmask64)__B);
-}
-
-#define _kshiftli_mask32(A, I) \
-  ((__mmask32)__builtin_ia32_kshiftlisi((__mmask32)(A), (unsigned int)(I)))
-
-#define _kshiftri_mask32(A, I) \
-  ((__mmask32)__builtin_ia32_kshiftrisi((__mmask32)(A), (unsigned int)(I)))
-
-#define _kshiftli_mask64(A, I) \
-  ((__mmask64)__builtin_ia32_kshiftlidi((__mmask64)(A), (unsigned int)(I)))
-
-#define _kshiftri_mask64(A, I) \
-  ((__mmask64)__builtin_ia32_kshiftridi((__mmask64)(A), (unsigned int)(I)))
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_cvtmask32_u32(__mmask32 __A) {
-  return (unsigned int)__builtin_ia32_kmovd((__mmask32)__A);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-_cvtmask64_u64(__mmask64 __A) {
-  return (unsigned long long)__builtin_ia32_kmovq((__mmask64)__A);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_cvtu32_mask32(unsigned int __A) {
-  return (__mmask32)__builtin_ia32_kmovd((__mmask32)__A);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_cvtu64_mask64(unsigned long long __A) {
-  return (__mmask64)__builtin_ia32_kmovq((__mmask64)__A);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_load_mask32(__mmask32 *__A) {
-  return (__mmask32)__builtin_ia32_kmovd(*(__mmask32 *)__A);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_load_mask64(__mmask64 *__A) {
-  return (__mmask64)__builtin_ia32_kmovq(*(__mmask64 *)__A);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS
-_store_mask32(__mmask32 *__A, __mmask32 __B) {
-  *(__mmask32 *)__A = __builtin_ia32_kmovd((__mmask32)__B);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS
-_store_mask64(__mmask64 *__A, __mmask64 __B) {
-  *(__mmask64 *)__A = __builtin_ia32_kmovq((__mmask64)__B);
-}
-
-/* Integer compare */
-
-#define _mm512_cmp_epi8_mask(a, b, p) \
-  ((__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
-                                          (__v64qi)(__m512i)(b), (int)(p), \
-                                          (__mmask64)-1))
-
-#define _mm512_mask_cmp_epi8_mask(m, a, b, p) \
-  ((__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
-                                          (__v64qi)(__m512i)(b), (int)(p), \
-                                          (__mmask64)(m)))
-
-#define _mm512_cmp_epu8_mask(a, b, p) \
-  ((__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
-                                           (__v64qi)(__m512i)(b), (int)(p), \
-                                           (__mmask64)-1))
-
-#define _mm512_mask_cmp_epu8_mask(m, a, b, p) \
-  ((__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
-                                           (__v64qi)(__m512i)(b), (int)(p), \
-                                           (__mmask64)(m)))
-
-#define _mm512_cmp_epi16_mask(a, b, p) \
-  ((__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
-                                          (__v32hi)(__m512i)(b), (int)(p), \
-                                          (__mmask32)-1))
-
-#define _mm512_mask_cmp_epi16_mask(m, a, b, p) \
-  ((__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
-                                          (__v32hi)(__m512i)(b), (int)(p), \
-                                          (__mmask32)(m)))
-
-#define _mm512_cmp_epu16_mask(a, b, p) \
-  ((__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
-                                           (__v32hi)(__m512i)(b), (int)(p), \
-                                           (__mmask32)-1))
-
-#define _mm512_mask_cmp_epu16_mask(m, a, b, p) \
-  ((__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
-                                           (__v32hi)(__m512i)(b), (int)(p), \
-                                           (__mmask32)(m)))
-
-#define _mm512_cmpeq_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE)
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi8 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v64qu) __A + (__v64qu) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_add_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_add_epi8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_add_epi8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sub_epi8 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v64qu) __A - (__v64qu) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_sub_epi8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_sub_epi8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi16 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v32hu) __A + (__v32hu) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_add_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_add_epi16(__A, __B),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_add_epi16(__A, __B),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sub_epi16 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v32hu) __A - (__v32hu) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_sub_epi16(__A, __B),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_sub_epi16(__A, __B),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mullo_epi16 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v32hu) __A * (__v32hu) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mullo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_mullo_epi16(__A, __B),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mullo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_mullo_epi16(__A, __B),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_epi8 (__mmask64 __U, __m512i __A, __m512i __W)
-{
-  return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
-              (__v64qi) __W,
-              (__v64qi) __A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W)
-{
-  return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
-              (__v32hi) __W,
-              (__v32hi) __A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi8 (__m512i __A)
-{
-  return (__m512i)__builtin_ia32_pabsb512((__v64qi)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_abs_epi8(__A),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_abs_epi8(__A),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi16 (__m512i __A)
-{
-  return (__m512i)__builtin_ia32_pabsw512((__v32hi)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_abs_epi16(__A),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_abs_epi16(__A),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_packs_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_packssdw512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_packs_epi32(__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                       (__v32hi)_mm512_packs_epi32(__A, __B),
-                                       (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_packs_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                       (__v32hi)_mm512_packs_epi32(__A, __B),
-                                       (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_packs_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_packsswb512((__v32hi)__A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_packs_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                        (__v64qi)_mm512_packs_epi16(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_packs_epi16(__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                        (__v64qi)_mm512_packs_epi16(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_packus_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_packusdw512((__v16si) __A, (__v16si) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_packus_epi32(__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                       (__v32hi)_mm512_packus_epi32(__A, __B),
-                                       (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_packus_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                       (__v32hi)_mm512_packus_epi32(__A, __B),
-                                       (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_packus_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_packuswb512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_packus_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                        (__v64qi)_mm512_packus_epi16(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                        (__v64qi)_mm512_packus_epi16(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epi8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_paddsb512((__v64qi)__A, (__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_adds_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_adds_epi8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_adds_epi8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epi16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_paddsw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_adds_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_adds_epi16(__A, __B),
-                                        (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_adds_epi16(__A, __B),
-                                        (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epu8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_paddusb512((__v64qi) __A, (__v64qi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_adds_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_adds_epu8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_adds_epu8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epu16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_paddusw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_adds_epu16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_adds_epu16(__A, __B),
-                                        (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_adds_epu16(__A, __B),
-                                        (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_avg_epu8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pavgb512((__v64qi)__A, (__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_avg_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
-          __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-              (__v64qi)_mm512_avg_epu8(__A, __B),
-              (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_avg_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-              (__v64qi)_mm512_avg_epu8(__A, __B),
-              (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_avg_epu16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pavgw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_avg_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
-           __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-              (__v32hi)_mm512_avg_epu16(__A, __B),
-              (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_avg_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-              (__v32hi)_mm512_avg_epu16(__A, __B),
-              (__v32hi) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epi8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxsb512((__v64qi) __A, (__v64qi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_max_epi8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_max_epi8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epi16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxsw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_max_epi16(__A, __B),
-                                            (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
-           __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_max_epi16(__A, __B),
-                                            (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epu8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxub512((__v64qi)__A, (__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_max_epu8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_max_epu8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epu16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxuw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_max_epu16(__A, __B),
-                                            (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_max_epu16(__A, __B),
-                                            (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epi8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminsb512((__v64qi) __A, (__v64qi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_min_epi8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_min_epi8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epi16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminsw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_min_epi16(__A, __B),
-                                            (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_min_epi16(__A, __B),
-                                            (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epu8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminub512((__v64qi)__A, (__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_min_epu8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_min_epu8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epu16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminuw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_min_epu16(__A, __B),
-                                            (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_min_epu16(__A, __B),
-                                            (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_shuffle_epi8(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pshufb512((__v64qi)__A,(__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_shuffle_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                         (__v64qi)_mm512_shuffle_epi8(__A, __B),
-                                         (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                         (__v64qi)_mm512_shuffle_epi8(__A, __B),
-                                         (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epi8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psubsb512((__v64qi)__A, (__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_subs_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_subs_epi8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_subs_epi8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epi16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psubsw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_subs_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_subs_epi16(__A, __B),
-                                        (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_subs_epi16(__A, __B),
-                                        (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epu8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psubusb512((__v64qi) __A, (__v64qi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_subs_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_subs_epu8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_subs_epu8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epu16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psubusw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_subs_epu16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_subs_epu16(__A, __B),
-                                        (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_subs_epu16(__A, __B),
-                                        (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_epi16(__m512i __A, __m512i __I, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_vpermi2varhi512((__v32hi)__A, (__v32hi)__I,
-                                                 (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_epi16(__m512i __A, __mmask32 __U, __m512i __I,
-                               __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512(__U,
-                              (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B),
-                              (__v32hi)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_epi16(__m512i __A, __m512i __I, __mmask32 __U,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512(__U,
-                              (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B),
-                              (__v32hi)__I);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_epi16(__mmask32 __U, __m512i __A, __m512i __I,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512(__U,
-                              (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B),
-                              (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mulhrs_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmulhrsw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_mulhrs_epi16(__A, __B),
-                                         (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_mulhrs_epi16(__A, __B),
-                                         (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mulhi_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmulhw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mulhi_epi16(__m512i __W, __mmask32 __U, __m512i __A,
-       __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_mulhi_epi16(__A, __B),
-                                          (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mulhi_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_mulhi_epi16(__A, __B),
-                                          (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mulhi_epu16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmulhuw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mulhi_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_mulhi_epu16(__A, __B),
-                                          (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_mulhi_epu16(__A, __B),
-                                          (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maddubs_epi16(__m512i __X, __m512i __Y) {
-  return (__m512i)__builtin_ia32_pmaddubsw512((__v64qi)__X, (__v64qi)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_maddubs_epi16(__m512i __W, __mmask32 __U, __m512i __X,
-                          __m512i __Y) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32) __U,
-                                        (__v32hi)_mm512_maddubs_epi16(__X, __Y),
-                                        (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_maddubs_epi16(__mmask32 __U, __m512i __X, __m512i __Y) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32) __U,
-                                        (__v32hi)_mm512_maddubs_epi16(__X, __Y),
-                                        (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_madd_epi16(__m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_pmaddwd512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_madd_epi16(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_madd_epi16(__A, __B),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_madd_epi16(__mmask16 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_madd_epi16(__A, __B),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi16_epi8 (__m512i __A) {
-  return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
-               (__v32qi)_mm256_setzero_si256(),
-               (__mmask32) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
-               (__v32qi)__O,
-               __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi16_epi8 (__mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
-               (__v32qi) _mm256_setzero_si256(),
-               __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi16_epi8 (__m512i __A) {
-  return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
-                (__v32qi) _mm256_setzero_si256(),
-                (__mmask32) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
-                (__v32qi) __O,
-                __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi16_epi8 (__mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
-                (__v32qi) _mm256_setzero_si256(),
-                __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi16_epi8 (__m512i __A) {
-  return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
-              (__v32qi) _mm256_undefined_si256(),
-              (__mmask32) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
-              (__v32qi) __O,
-              __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi16_epi8 (__mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
-              (__v32qi) _mm256_setzero_si256(),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
-{
-  __builtin_ia32_pmovwb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
-{
-  __builtin_ia32_pmovswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
-{
-  __builtin_ia32_pmovuswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi8(__m512i __A, __m512i __B) {
-  return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B,
-                                          8,  64+8,   9, 64+9,
-                                          10, 64+10, 11, 64+11,
-                                          12, 64+12, 13, 64+13,
-                                          14, 64+14, 15, 64+15,
-                                          24, 64+24, 25, 64+25,
-                                          26, 64+26, 27, 64+27,
-                                          28, 64+28, 29, 64+29,
-                                          30, 64+30, 31, 64+31,
-                                          40, 64+40, 41, 64+41,
-                                          42, 64+42, 43, 64+43,
-                                          44, 64+44, 45, 64+45,
-                                          46, 64+46, 47, 64+47,
-                                          56, 64+56, 57, 64+57,
-                                          58, 64+58, 59, 64+59,
-                                          60, 64+60, 61, 64+61,
-                                          62, 64+62, 63, 64+63);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_unpackhi_epi8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_unpackhi_epi8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi16(__m512i __A, __m512i __B) {
-  return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B,
-                                          4,  32+4,   5, 32+5,
-                                          6,  32+6,   7, 32+7,
-                                          12, 32+12, 13, 32+13,
-                                          14, 32+14, 15, 32+15,
-                                          20, 32+20, 21, 32+21,
-                                          22, 32+22, 23, 32+23,
-                                          28, 32+28, 29, 32+29,
-                                          30, 32+30, 31, 32+31);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                       (__v32hi)_mm512_unpackhi_epi16(__A, __B),
-                                       (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                       (__v32hi)_mm512_unpackhi_epi16(__A, __B),
-                                       (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi8(__m512i __A, __m512i __B) {
-  return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B,
-                                          0,  64+0,   1, 64+1,
-                                          2,  64+2,   3, 64+3,
-                                          4,  64+4,   5, 64+5,
-                                          6,  64+6,   7, 64+7,
-                                          16, 64+16, 17, 64+17,
-                                          18, 64+18, 19, 64+19,
-                                          20, 64+20, 21, 64+21,
-                                          22, 64+22, 23, 64+23,
-                                          32, 64+32, 33, 64+33,
-                                          34, 64+34, 35, 64+35,
-                                          36, 64+36, 37, 64+37,
-                                          38, 64+38, 39, 64+39,
-                                          48, 64+48, 49, 64+49,
-                                          50, 64+50, 51, 64+51,
-                                          52, 64+52, 53, 64+53,
-                                          54, 64+54, 55, 64+55);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_unpacklo_epi8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_unpacklo_epi8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi16(__m512i __A, __m512i __B) {
-  return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B,
-                                          0,  32+0,   1, 32+1,
-                                          2,  32+2,   3, 32+3,
-                                          8,  32+8,   9, 32+9,
-                                          10, 32+10, 11, 32+11,
-                                          16, 32+16, 17, 32+17,
-                                          18, 32+18, 19, 32+19,
-                                          24, 32+24, 25, 32+25,
-                                          26, 32+26, 27, 32+27);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                       (__v32hi)_mm512_unpacklo_epi16(__A, __B),
-                                       (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                       (__v32hi)_mm512_unpacklo_epi16(__A, __B),
-                                       (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi8_epi16(__m256i __A)
-{
-  /* This function always performs a signed extension, but __v32qi is a char
-     which may be signed or unsigned, so use __v32qs. */
-  return (__m512i)__builtin_convertvector((__v32qs)__A, __v32hi);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi8_epi16(__m512i __W, __mmask32 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_cvtepi8_epi16(__A),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi8_epi16(__mmask32 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_cvtepi8_epi16(__A),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu8_epi16(__m256i __A)
-{
-  return (__m512i)__builtin_convertvector((__v32qu)__A, __v32hi);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu8_epi16(__m512i __W, __mmask32 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_cvtepu8_epi16(__A),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu8_epi16(__mmask32 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_cvtepu8_epi16(__A),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-
-#define _mm512_shufflehi_epi16(A, imm) \
-  ((__m512i)__builtin_ia32_pshufhw512((__v32hi)(__m512i)(A), (int)(imm)))
-
-#define _mm512_mask_shufflehi_epi16(W, U, A, imm) \
-  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                       (__v32hi)_mm512_shufflehi_epi16((A), \
-                                                                       (imm)), \
-                                       (__v32hi)(__m512i)(W)))
-
-#define _mm512_maskz_shufflehi_epi16(U, A, imm) \
-  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                       (__v32hi)_mm512_shufflehi_epi16((A), \
-                                                                       (imm)), \
-                                       (__v32hi)_mm512_setzero_si512()))
-
-#define _mm512_shufflelo_epi16(A, imm) \
-  ((__m512i)__builtin_ia32_pshuflw512((__v32hi)(__m512i)(A), (int)(imm)))
-
-
-#define _mm512_mask_shufflelo_epi16(W, U, A, imm) \
-  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                       (__v32hi)_mm512_shufflelo_epi16((A), \
-                                                                       (imm)), \
-                                       (__v32hi)(__m512i)(W)))
-
-
-#define _mm512_maskz_shufflelo_epi16(U, A, imm) \
-  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                       (__v32hi)_mm512_shufflelo_epi16((A), \
-                                                                       (imm)), \
-                                       (__v32hi)_mm512_setzero_si512()))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sllv_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psllv32hi((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sllv_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_sllv_epi16(__A, __B),
-                                           (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sllv_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_sllv_epi16(__A, __B),
-                                           (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sll_epi16(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psllw512((__v32hi) __A, (__v8hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sll_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_sll_epi16(__A, __B),
-                                          (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_sll_epi16(__A, __B),
-                                          (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi16(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A,
-                       unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_slli_epi16(__A, __B),
-                                         (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_slli_epi16(__A, __B),
-                                         (__v32hi)_mm512_setzero_si512());
-}
-
-#define _mm512_bslli_epi128(a, imm) \
-  ((__m512i)__builtin_ia32_pslldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srlv_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psrlv32hi((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srlv_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_srlv_epi16(__A, __B),
-                                           (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srlv_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_srlv_epi16(__A, __B),
-                                           (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srav_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psrav32hi((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srav_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_srav_epi16(__A, __B),
-                                           (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srav_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_srav_epi16(__A, __B),
-                                           (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sra_epi16(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psraw512((__v32hi) __A, (__v8hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sra_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_sra_epi16(__A, __B),
-                                          (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_sra_epi16(__A, __B),
-                                          (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi16(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A,
-                       unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_srai_epi16(__A, __B),
-                                         (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_srai_epi16(__A, __B),
-                                         (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srl_epi16(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psrlw512((__v32hi) __A, (__v8hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srl_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_srl_epi16(__A, __B),
-                                          (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_srl_epi16(__A, __B),
-                                          (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi16(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A,
-                       unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_srli_epi16(__A, __B),
-                                         (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_srli_epi16(__A, __B),
-                                         (__v32hi)_mm512_setzero_si512());
-}
-
-#define _mm512_bsrli_epi128(a, imm) \
-  ((__m512i)__builtin_ia32_psrldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
-                (__v32hi) __A,
-                (__v32hi) __W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_epi16 (__mmask32 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
-                (__v32hi) __A,
-                (__v32hi) _mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
-                (__v64qi) __A,
-                (__v64qi) __W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
-                (__v64qi) __A,
-                (__v64qi) _mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A)
-{
-  return (__m512i) __builtin_ia32_selectb_512(__M,
-                                              (__v64qi)_mm512_set1_epi8(__A),
-                                              (__v64qi) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_set1_epi8 (__mmask64 __M, char __A)
-{
-  return (__m512i) __builtin_ia32_selectb_512(__M,
-                                              (__v64qi) _mm512_set1_epi8(__A),
-                                              (__v64qi) _mm512_setzero_si512());
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_mm512_kunpackd (__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A,
-                (__mmask64) __B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_mm512_kunpackw (__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A,
-                (__mmask32) __B);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_epi16 (void const *__P)
-{
-  struct __loadu_epi16 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi16*)__P)->__v;
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddquhi512_mask ((const __v32hi *) __P,
-                 (__v32hi) __W,
-                 (__mmask32) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddquhi512_mask ((const __v32hi *) __P,
-                 (__v32hi)
-                 _mm512_setzero_si512 (),
-                 (__mmask32) __U);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_epi8 (void const *__P)
-{
-  struct __loadu_epi8 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi8*)__P)->__v;
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddquqi512_mask ((const __v64qi *) __P,
-                 (__v64qi) __W,
-                 (__mmask64) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddquqi512_mask ((const __v64qi *) __P,
-                 (__v64qi)
-                 _mm512_setzero_si512 (),
-                 (__mmask64) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_epi16 (void *__P, __m512i __A)
-{
-  struct __storeu_epi16 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi16*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_epi16 (void *__P, __mmask32 __U, __m512i __A)
-{
-  __builtin_ia32_storedquhi512_mask ((__v32hi *) __P,
-             (__v32hi) __A,
-             (__mmask32) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_epi8 (void *__P, __m512i __A)
-{
-  struct __storeu_epi8 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi8*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_epi8 (void *__P, __mmask64 __U, __m512i __A)
-{
-  __builtin_ia32_storedquqi512_mask ((__v64qi *) __P,
-             (__v64qi) __A,
-             (__mmask64) __U);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
-_mm512_test_epi8_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpneq_epi8_mask (_mm512_and_epi32 (__A, __B),
-                                  _mm512_setzero_si512());
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
-_mm512_mask_test_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpneq_epi8_mask (__U, _mm512_and_epi32 (__A, __B),
-                                       _mm512_setzero_si512());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
-_mm512_test_epi16_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpneq_epi16_mask (_mm512_and_epi32 (__A, __B),
-                                   _mm512_setzero_si512());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
-_mm512_mask_test_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpneq_epi16_mask (__U, _mm512_and_epi32 (__A, __B),
-                                        _mm512_setzero_si512());
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
-_mm512_testn_epi8_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpeq_epi8_mask (_mm512_and_epi32 (__A, __B), _mm512_setzero_si512());
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
-_mm512_mask_testn_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpeq_epi8_mask (__U, _mm512_and_epi32 (__A, __B),
-                                      _mm512_setzero_si512());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
-_mm512_testn_epi16_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpeq_epi16_mask (_mm512_and_epi32 (__A, __B),
-                                  _mm512_setzero_si512());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
-_mm512_mask_testn_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpeq_epi16_mask (__U, _mm512_and_epi32 (__A, __B),
-                                       _mm512_setzero_si512());
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
-_mm512_movepi8_mask (__m512i __A)
-{
-  return (__mmask64) __builtin_ia32_cvtb2mask512 ((__v64qi) __A);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
-_mm512_movepi16_mask (__m512i __A)
-{
-  return (__mmask32) __builtin_ia32_cvtw2mask512 ((__v32hi) __A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_movm_epi8 (__mmask64 __A)
-{
-  return (__m512i) __builtin_ia32_cvtmask2b512 (__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_movm_epi16 (__mmask32 __A)
-{
-  return (__m512i) __builtin_ia32_cvtmask2w512 (__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastb_epi8 (__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v16qi) __A, (__v16qi) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastb_epi8 (__m512i __O, __mmask64 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectb_512(__M,
-                                             (__v64qi) _mm512_broadcastb_epi8(__A),
-                                             (__v64qi) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastb_epi8 (__mmask64 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectb_512(__M,
-                                             (__v64qi) _mm512_broadcastb_epi8(__A),
-                                             (__v64qi) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_set1_epi16 (__m512i __O, __mmask32 __M, short __A)
-{
-  return (__m512i) __builtin_ia32_selectw_512(__M,
-                                              (__v32hi) _mm512_set1_epi16(__A),
-                                              (__v32hi) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_set1_epi16 (__mmask32 __M, short __A)
-{
-  return (__m512i) __builtin_ia32_selectw_512(__M,
-                                              (__v32hi) _mm512_set1_epi16(__A),
-                                              (__v32hi) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastw_epi16 (__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v8hi) __A, (__v8hi) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastw_epi16 (__m512i __O, __mmask32 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512(__M,
-                                             (__v32hi) _mm512_broadcastw_epi16(__A),
-                                             (__v32hi) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastw_epi16 (__mmask32 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512(__M,
-                                             (__v32hi) _mm512_broadcastw_epi16(__A),
-                                             (__v32hi) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_epi16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_permvarhi512((__v32hi)__B, (__v32hi)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_epi16 (__mmask32 __M, __m512i __A,
-        __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                    (__v32hi)_mm512_permutexvar_epi16(__A, __B),
-                                    (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
-             __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                    (__v32hi)_mm512_permutexvar_epi16(__A, __B),
-                                    (__v32hi)__W);
-}
-
-#define _mm512_alignr_epi8(A, B, N) \
-  ((__m512i)__builtin_ia32_palignr512((__v64qi)(__m512i)(A), \
-                                      (__v64qi)(__m512i)(B), (int)(N)))
-
-#define _mm512_mask_alignr_epi8(W, U, A, B, N) \
-  ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
-                              (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \
-                              (__v64qi)(__m512i)(W)))
-
-#define _mm512_maskz_alignr_epi8(U, A, B, N) \
-  ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
-                              (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \
-                              (__v64qi)(__m512i)_mm512_setzero_si512()))
-
-#define _mm512_dbsad_epu8(A, B, imm) \
-  ((__m512i)__builtin_ia32_dbpsadbw512((__v64qi)(__m512i)(A), \
-                                       (__v64qi)(__m512i)(B), (int)(imm)))
-
-#define _mm512_mask_dbsad_epu8(W, U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                  (__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \
-                                  (__v32hi)(__m512i)(W)))
-
-#define _mm512_maskz_dbsad_epu8(U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                  (__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \
-                                  (__v32hi)_mm512_setzero_si512()))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sad_epu8 (__m512i __A, __m512i __B)
-{
- return (__m512i) __builtin_ia32_psadbw512 ((__v64qi) __A,
-               (__v64qi) __B);
-}
-
-#undef __DEFAULT_FN_ATTRS512
-#undef __DEFAULT_FN_ATTRS
-
-#endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512fintrin.h b/linux-x86/lib64/clang/14.0.2/include/avx512fintrin.h
deleted file mode 100644
index df29864..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/avx512fintrin.h
+++ /dev/null
@@ -1,9758 +0,0 @@
-/*===---- avx512fintrin.h - AVX512F intrinsics -----------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512fintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512FINTRIN_H
-#define __AVX512FINTRIN_H
-
-typedef char __v64qi __attribute__((__vector_size__(64)));
-typedef short __v32hi __attribute__((__vector_size__(64)));
-typedef double __v8df __attribute__((__vector_size__(64)));
-typedef float __v16sf __attribute__((__vector_size__(64)));
-typedef long long __v8di __attribute__((__vector_size__(64)));
-typedef int __v16si __attribute__((__vector_size__(64)));
-
-/* Unsigned types */
-typedef unsigned char __v64qu __attribute__((__vector_size__(64)));
-typedef unsigned short __v32hu __attribute__((__vector_size__(64)));
-typedef unsigned long long __v8du __attribute__((__vector_size__(64)));
-typedef unsigned int __v16su __attribute__((__vector_size__(64)));
-
-typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64)));
-typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64)));
-typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64)));
-
-typedef float __m512_u __attribute__((__vector_size__(64), __aligned__(1)));
-typedef double __m512d_u __attribute__((__vector_size__(64), __aligned__(1)));
-typedef long long __m512i_u __attribute__((__vector_size__(64), __aligned__(1)));
-
-typedef unsigned char __mmask8;
-typedef unsigned short __mmask16;
-
-/* Rounding mode macros.  */
-#define _MM_FROUND_TO_NEAREST_INT   0x00
-#define _MM_FROUND_TO_NEG_INF       0x01
-#define _MM_FROUND_TO_POS_INF       0x02
-#define _MM_FROUND_TO_ZERO          0x03
-#define _MM_FROUND_CUR_DIRECTION    0x04
-
-/* Constants for integer comparison predicates */
-typedef enum {
-    _MM_CMPINT_EQ,      /* Equal */
-    _MM_CMPINT_LT,      /* Less than */
-    _MM_CMPINT_LE,      /* Less than or Equal */
-    _MM_CMPINT_UNUSED,
-    _MM_CMPINT_NE,      /* Not Equal */
-    _MM_CMPINT_NLT,     /* Not Less than */
-#define _MM_CMPINT_GE   _MM_CMPINT_NLT  /* Greater than or Equal */
-    _MM_CMPINT_NLE      /* Not Less than or Equal */
-#define _MM_CMPINT_GT   _MM_CMPINT_NLE  /* Greater than */
-} _MM_CMPINT_ENUM;
-
-typedef enum
-{
-  _MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02,
-  _MM_PERM_AAAD = 0x03, _MM_PERM_AABA = 0x04, _MM_PERM_AABB = 0x05,
-  _MM_PERM_AABC = 0x06, _MM_PERM_AABD = 0x07, _MM_PERM_AACA = 0x08,
-  _MM_PERM_AACB = 0x09, _MM_PERM_AACC = 0x0A, _MM_PERM_AACD = 0x0B,
-  _MM_PERM_AADA = 0x0C, _MM_PERM_AADB = 0x0D, _MM_PERM_AADC = 0x0E,
-  _MM_PERM_AADD = 0x0F, _MM_PERM_ABAA = 0x10, _MM_PERM_ABAB = 0x11,
-  _MM_PERM_ABAC = 0x12, _MM_PERM_ABAD = 0x13, _MM_PERM_ABBA = 0x14,
-  _MM_PERM_ABBB = 0x15, _MM_PERM_ABBC = 0x16, _MM_PERM_ABBD = 0x17,
-  _MM_PERM_ABCA = 0x18, _MM_PERM_ABCB = 0x19, _MM_PERM_ABCC = 0x1A,
-  _MM_PERM_ABCD = 0x1B, _MM_PERM_ABDA = 0x1C, _MM_PERM_ABDB = 0x1D,
-  _MM_PERM_ABDC = 0x1E, _MM_PERM_ABDD = 0x1F, _MM_PERM_ACAA = 0x20,
-  _MM_PERM_ACAB = 0x21, _MM_PERM_ACAC = 0x22, _MM_PERM_ACAD = 0x23,
-  _MM_PERM_ACBA = 0x24, _MM_PERM_ACBB = 0x25, _MM_PERM_ACBC = 0x26,
-  _MM_PERM_ACBD = 0x27, _MM_PERM_ACCA = 0x28, _MM_PERM_ACCB = 0x29,
-  _MM_PERM_ACCC = 0x2A, _MM_PERM_ACCD = 0x2B, _MM_PERM_ACDA = 0x2C,
-  _MM_PERM_ACDB = 0x2D, _MM_PERM_ACDC = 0x2E, _MM_PERM_ACDD = 0x2F,
-  _MM_PERM_ADAA = 0x30, _MM_PERM_ADAB = 0x31, _MM_PERM_ADAC = 0x32,
-  _MM_PERM_ADAD = 0x33, _MM_PERM_ADBA = 0x34, _MM_PERM_ADBB = 0x35,
-  _MM_PERM_ADBC = 0x36, _MM_PERM_ADBD = 0x37, _MM_PERM_ADCA = 0x38,
-  _MM_PERM_ADCB = 0x39, _MM_PERM_ADCC = 0x3A, _MM_PERM_ADCD = 0x3B,
-  _MM_PERM_ADDA = 0x3C, _MM_PERM_ADDB = 0x3D, _MM_PERM_ADDC = 0x3E,
-  _MM_PERM_ADDD = 0x3F, _MM_PERM_BAAA = 0x40, _MM_PERM_BAAB = 0x41,
-  _MM_PERM_BAAC = 0x42, _MM_PERM_BAAD = 0x43, _MM_PERM_BABA = 0x44,
-  _MM_PERM_BABB = 0x45, _MM_PERM_BABC = 0x46, _MM_PERM_BABD = 0x47,
-  _MM_PERM_BACA = 0x48, _MM_PERM_BACB = 0x49, _MM_PERM_BACC = 0x4A,
-  _MM_PERM_BACD = 0x4B, _MM_PERM_BADA = 0x4C, _MM_PERM_BADB = 0x4D,
-  _MM_PERM_BADC = 0x4E, _MM_PERM_BADD = 0x4F, _MM_PERM_BBAA = 0x50,
-  _MM_PERM_BBAB = 0x51, _MM_PERM_BBAC = 0x52, _MM_PERM_BBAD = 0x53,
-  _MM_PERM_BBBA = 0x54, _MM_PERM_BBBB = 0x55, _MM_PERM_BBBC = 0x56,
-  _MM_PERM_BBBD = 0x57, _MM_PERM_BBCA = 0x58, _MM_PERM_BBCB = 0x59,
-  _MM_PERM_BBCC = 0x5A, _MM_PERM_BBCD = 0x5B, _MM_PERM_BBDA = 0x5C,
-  _MM_PERM_BBDB = 0x5D, _MM_PERM_BBDC = 0x5E, _MM_PERM_BBDD = 0x5F,
-  _MM_PERM_BCAA = 0x60, _MM_PERM_BCAB = 0x61, _MM_PERM_BCAC = 0x62,
-  _MM_PERM_BCAD = 0x63, _MM_PERM_BCBA = 0x64, _MM_PERM_BCBB = 0x65,
-  _MM_PERM_BCBC = 0x66, _MM_PERM_BCBD = 0x67, _MM_PERM_BCCA = 0x68,
-  _MM_PERM_BCCB = 0x69, _MM_PERM_BCCC = 0x6A, _MM_PERM_BCCD = 0x6B,
-  _MM_PERM_BCDA = 0x6C, _MM_PERM_BCDB = 0x6D, _MM_PERM_BCDC = 0x6E,
-  _MM_PERM_BCDD = 0x6F, _MM_PERM_BDAA = 0x70, _MM_PERM_BDAB = 0x71,
-  _MM_PERM_BDAC = 0x72, _MM_PERM_BDAD = 0x73, _MM_PERM_BDBA = 0x74,
-  _MM_PERM_BDBB = 0x75, _MM_PERM_BDBC = 0x76, _MM_PERM_BDBD = 0x77,
-  _MM_PERM_BDCA = 0x78, _MM_PERM_BDCB = 0x79, _MM_PERM_BDCC = 0x7A,
-  _MM_PERM_BDCD = 0x7B, _MM_PERM_BDDA = 0x7C, _MM_PERM_BDDB = 0x7D,
-  _MM_PERM_BDDC = 0x7E, _MM_PERM_BDDD = 0x7F, _MM_PERM_CAAA = 0x80,
-  _MM_PERM_CAAB = 0x81, _MM_PERM_CAAC = 0x82, _MM_PERM_CAAD = 0x83,
-  _MM_PERM_CABA = 0x84, _MM_PERM_CABB = 0x85, _MM_PERM_CABC = 0x86,
-  _MM_PERM_CABD = 0x87, _MM_PERM_CACA = 0x88, _MM_PERM_CACB = 0x89,
-  _MM_PERM_CACC = 0x8A, _MM_PERM_CACD = 0x8B, _MM_PERM_CADA = 0x8C,
-  _MM_PERM_CADB = 0x8D, _MM_PERM_CADC = 0x8E, _MM_PERM_CADD = 0x8F,
-  _MM_PERM_CBAA = 0x90, _MM_PERM_CBAB = 0x91, _MM_PERM_CBAC = 0x92,
-  _MM_PERM_CBAD = 0x93, _MM_PERM_CBBA = 0x94, _MM_PERM_CBBB = 0x95,
-  _MM_PERM_CBBC = 0x96, _MM_PERM_CBBD = 0x97, _MM_PERM_CBCA = 0x98,
-  _MM_PERM_CBCB = 0x99, _MM_PERM_CBCC = 0x9A, _MM_PERM_CBCD = 0x9B,
-  _MM_PERM_CBDA = 0x9C, _MM_PERM_CBDB = 0x9D, _MM_PERM_CBDC = 0x9E,
-  _MM_PERM_CBDD = 0x9F, _MM_PERM_CCAA = 0xA0, _MM_PERM_CCAB = 0xA1,
-  _MM_PERM_CCAC = 0xA2, _MM_PERM_CCAD = 0xA3, _MM_PERM_CCBA = 0xA4,
-  _MM_PERM_CCBB = 0xA5, _MM_PERM_CCBC = 0xA6, _MM_PERM_CCBD = 0xA7,
-  _MM_PERM_CCCA = 0xA8, _MM_PERM_CCCB = 0xA9, _MM_PERM_CCCC = 0xAA,
-  _MM_PERM_CCCD = 0xAB, _MM_PERM_CCDA = 0xAC, _MM_PERM_CCDB = 0xAD,
-  _MM_PERM_CCDC = 0xAE, _MM_PERM_CCDD = 0xAF, _MM_PERM_CDAA = 0xB0,
-  _MM_PERM_CDAB = 0xB1, _MM_PERM_CDAC = 0xB2, _MM_PERM_CDAD = 0xB3,
-  _MM_PERM_CDBA = 0xB4, _MM_PERM_CDBB = 0xB5, _MM_PERM_CDBC = 0xB6,
-  _MM_PERM_CDBD = 0xB7, _MM_PERM_CDCA = 0xB8, _MM_PERM_CDCB = 0xB9,
-  _MM_PERM_CDCC = 0xBA, _MM_PERM_CDCD = 0xBB, _MM_PERM_CDDA = 0xBC,
-  _MM_PERM_CDDB = 0xBD, _MM_PERM_CDDC = 0xBE, _MM_PERM_CDDD = 0xBF,
-  _MM_PERM_DAAA = 0xC0, _MM_PERM_DAAB = 0xC1, _MM_PERM_DAAC = 0xC2,
-  _MM_PERM_DAAD = 0xC3, _MM_PERM_DABA = 0xC4, _MM_PERM_DABB = 0xC5,
-  _MM_PERM_DABC = 0xC6, _MM_PERM_DABD = 0xC7, _MM_PERM_DACA = 0xC8,
-  _MM_PERM_DACB = 0xC9, _MM_PERM_DACC = 0xCA, _MM_PERM_DACD = 0xCB,
-  _MM_PERM_DADA = 0xCC, _MM_PERM_DADB = 0xCD, _MM_PERM_DADC = 0xCE,
-  _MM_PERM_DADD = 0xCF, _MM_PERM_DBAA = 0xD0, _MM_PERM_DBAB = 0xD1,
-  _MM_PERM_DBAC = 0xD2, _MM_PERM_DBAD = 0xD3, _MM_PERM_DBBA = 0xD4,
-  _MM_PERM_DBBB = 0xD5, _MM_PERM_DBBC = 0xD6, _MM_PERM_DBBD = 0xD7,
-  _MM_PERM_DBCA = 0xD8, _MM_PERM_DBCB = 0xD9, _MM_PERM_DBCC = 0xDA,
-  _MM_PERM_DBCD = 0xDB, _MM_PERM_DBDA = 0xDC, _MM_PERM_DBDB = 0xDD,
-  _MM_PERM_DBDC = 0xDE, _MM_PERM_DBDD = 0xDF, _MM_PERM_DCAA = 0xE0,
-  _MM_PERM_DCAB = 0xE1, _MM_PERM_DCAC = 0xE2, _MM_PERM_DCAD = 0xE3,
-  _MM_PERM_DCBA = 0xE4, _MM_PERM_DCBB = 0xE5, _MM_PERM_DCBC = 0xE6,
-  _MM_PERM_DCBD = 0xE7, _MM_PERM_DCCA = 0xE8, _MM_PERM_DCCB = 0xE9,
-  _MM_PERM_DCCC = 0xEA, _MM_PERM_DCCD = 0xEB, _MM_PERM_DCDA = 0xEC,
-  _MM_PERM_DCDB = 0xED, _MM_PERM_DCDC = 0xEE, _MM_PERM_DCDD = 0xEF,
-  _MM_PERM_DDAA = 0xF0, _MM_PERM_DDAB = 0xF1, _MM_PERM_DDAC = 0xF2,
-  _MM_PERM_DDAD = 0xF3, _MM_PERM_DDBA = 0xF4, _MM_PERM_DDBB = 0xF5,
-  _MM_PERM_DDBC = 0xF6, _MM_PERM_DDBD = 0xF7, _MM_PERM_DDCA = 0xF8,
-  _MM_PERM_DDCB = 0xF9, _MM_PERM_DDCC = 0xFA, _MM_PERM_DDCD = 0xFB,
-  _MM_PERM_DDDA = 0xFC, _MM_PERM_DDDB = 0xFD, _MM_PERM_DDDC = 0xFE,
-  _MM_PERM_DDDD = 0xFF
-} _MM_PERM_ENUM;
-
-typedef enum
-{
-  _MM_MANT_NORM_1_2,    /* interval [1, 2)      */
-  _MM_MANT_NORM_p5_2,   /* interval [0.5, 2)    */
-  _MM_MANT_NORM_p5_1,   /* interval [0.5, 1)    */
-  _MM_MANT_NORM_p75_1p5   /* interval [0.75, 1.5) */
-} _MM_MANTISSA_NORM_ENUM;
-
-typedef enum
-{
-  _MM_MANT_SIGN_src,    /* sign = sign(SRC)     */
-  _MM_MANT_SIGN_zero,   /* sign = 0             */
-  _MM_MANT_SIGN_nan   /* DEST = NaN if sign(SRC) = 1 */
-} _MM_MANTISSA_SIGN_ENUM;
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(512)))
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
-
-/* Create vectors with repeated elements */
-
-static  __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_setzero_si512(void)
-{
-  return __extension__ (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 };
-}
-
-#define _mm512_setzero_epi32 _mm512_setzero_si512
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_undefined_pd(void)
-{
-  return (__m512d)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_undefined(void)
-{
-  return (__m512)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_undefined_ps(void)
-{
-  return (__m512)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_undefined_epi32(void)
-{
-  return (__m512i)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastd_epi32 (__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v4si) __A, (__v4si) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastd_epi32 (__m512i __O, __mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__M,
-                                             (__v16si) _mm512_broadcastd_epi32(__A),
-                                             (__v16si) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastd_epi32 (__mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__M,
-                                             (__v16si) _mm512_broadcastd_epi32(__A),
-                                             (__v16si) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastq_epi64 (__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v2di) __A, (__v2di) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastq_epi64 (__m512i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__M,
-                                             (__v8di) _mm512_broadcastq_epi64(__A),
-                                             (__v8di) __O);
-
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__M,
-                                             (__v8di) _mm512_broadcastq_epi64(__A),
-                                             (__v8di) _mm512_setzero_si512());
-}
-
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_setzero_ps(void)
-{
-  return __extension__ (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-                                 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
-}
-
-#define _mm512_setzero _mm512_setzero_ps
-
-static  __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_setzero_pd(void)
-{
-  return __extension__ (__m512d){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_set1_ps(float __w)
-{
-  return __extension__ (__m512){ __w, __w, __w, __w, __w, __w, __w, __w,
-                                 __w, __w, __w, __w, __w, __w, __w, __w  };
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_set1_pd(double __w)
-{
-  return __extension__ (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi8(char __w)
-{
-  return __extension__ (__m512i)(__v64qi){
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w  };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi16(short __w)
-{
-  return __extension__ (__m512i)(__v32hi){
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi32(int __s)
-{
-  return __extension__ (__m512i)(__v16si){
-    __s, __s, __s, __s, __s, __s, __s, __s,
-    __s, __s, __s, __s, __s, __s, __s, __s };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_set1_epi32(__mmask16 __M, int __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__M,
-                                             (__v16si)_mm512_set1_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi64(long long __d)
-{
-  return __extension__(__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_set1_epi64(__mmask8 __M, long long __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__M,
-                                             (__v8di)_mm512_set1_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_broadcastss_ps(__m128 __A)
-{
-  return (__m512)__builtin_shufflevector((__v4sf) __A, (__v4sf) __A,
-                                         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set4_epi32 (int __A, int __B, int __C, int __D)
-{
-  return __extension__ (__m512i)(__v16si)
-   { __D, __C, __B, __A, __D, __C, __B, __A,
-     __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set4_epi64 (long long __A, long long __B, long long __C,
-       long long __D)
-{
-  return __extension__ (__m512i) (__v8di)
-   { __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_set4_pd (double __A, double __B, double __C, double __D)
-{
-  return __extension__ (__m512d)
-   { __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_set4_ps (float __A, float __B, float __C, float __D)
-{
-  return __extension__ (__m512)
-   { __D, __C, __B, __A, __D, __C, __B, __A,
-     __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-#define _mm512_setr4_epi32(e0,e1,e2,e3)               \
-  _mm512_set4_epi32((e3),(e2),(e1),(e0))
-
-#define _mm512_setr4_epi64(e0,e1,e2,e3)               \
-  _mm512_set4_epi64((e3),(e2),(e1),(e0))
-
-#define _mm512_setr4_pd(e0,e1,e2,e3)                \
-  _mm512_set4_pd((e3),(e2),(e1),(e0))
-
-#define _mm512_setr4_ps(e0,e1,e2,e3)                \
-  _mm512_set4_ps((e3),(e2),(e1),(e0))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_broadcastsd_pd(__m128d __A)
-{
-  return (__m512d)__builtin_shufflevector((__v2df) __A, (__v2df) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-/* Cast between vector types */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_castpd256_pd512(__m256d __a)
-{
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_castps256_ps512(__m256 __a)
-{
-  return __builtin_shufflevector(__a, __a, 0,  1,  2,  3,  4,  5,  6,  7,
-                                          -1, -1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline __m128d __DEFAULT_FN_ATTRS512
-_mm512_castpd512_pd128(__m512d __a)
-{
-  return __builtin_shufflevector(__a, __a, 0, 1);
-}
-
-static __inline __m256d __DEFAULT_FN_ATTRS512
-_mm512_castpd512_pd256 (__m512d __A)
-{
-  return __builtin_shufflevector(__A, __A, 0, 1, 2, 3);
-}
-
-static __inline __m128 __DEFAULT_FN_ATTRS512
-_mm512_castps512_ps128(__m512 __a)
-{
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
-}
-
-static __inline __m256 __DEFAULT_FN_ATTRS512
-_mm512_castps512_ps256 (__m512 __A)
-{
-  return __builtin_shufflevector(__A, __A, 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_castpd_ps (__m512d __A)
-{
-  return (__m512) (__A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_castpd_si512 (__m512d __A)
-{
-  return (__m512i) (__A);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_castpd128_pd512 (__m128d __A)
-{
-  return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_castps_pd (__m512 __A)
-{
-  return (__m512d) (__A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_castps_si512 (__m512 __A)
-{
-  return (__m512i) (__A);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_castps128_ps512 (__m128 __A)
-{
-    return  __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_castsi128_si512 (__m128i __A)
-{
-   return  __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_castsi256_si512 (__m256i __A)
-{
-   return  __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_castsi512_ps (__m512i __A)
-{
-  return (__m512) (__A);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_castsi512_pd (__m512i __A)
-{
-  return (__m512d) (__A);
-}
-
-static __inline __m128i __DEFAULT_FN_ATTRS512
-_mm512_castsi512_si128 (__m512i __A)
-{
-  return (__m128i)__builtin_shufflevector(__A, __A , 0, 1);
-}
-
-static __inline __m256i __DEFAULT_FN_ATTRS512
-_mm512_castsi512_si256 (__m512i __A)
-{
-  return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_int2mask(int __a)
-{
-  return (__mmask16)__a;
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm512_mask2int(__mmask16 __a)
-{
-  return (int)__a;
-}
-
-/// Constructs a 512-bit floating-point vector of [8 x double] from a
-///    128-bit floating-point vector of [2 x double]. The lower 128 bits
-///    contain the value of the source vector. The upper 384 bits are set
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 512-bit floating-point vector of [8 x double]. The lower 128 bits
-///    contain the value of the parameter. The upper 384 bits are set to zero.
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_zextpd128_pd512(__m128d __a)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3, 2, 3, 2, 3);
-}
-
-/// Constructs a 512-bit floating-point vector of [8 x double] from a
-///    256-bit floating-point vector of [4 x double]. The lower 256 bits
-///    contain the value of the source vector. The upper 256 bits are set
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double].
-/// \returns A 512-bit floating-point vector of [8 x double]. The lower 256 bits
-///    contain the value of the parameter. The upper 256 bits are set to zero.
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_zextpd256_pd512(__m256d __a)
-{
-  return __builtin_shufflevector((__v4df)__a, (__v4df)_mm256_setzero_pd(), 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-/// Constructs a 512-bit floating-point vector of [16 x float] from a
-///    128-bit floating-point vector of [4 x float]. The lower 128 bits contain
-///    the value of the source vector. The upper 384 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 512-bit floating-point vector of [16 x float]. The lower 128 bits
-///    contain the value of the parameter. The upper 384 bits are set to zero.
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_zextps128_ps512(__m128 __a)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7);
-}
-
-/// Constructs a 512-bit floating-point vector of [16 x float] from a
-///    256-bit floating-point vector of [8 x float]. The lower 256 bits contain
-///    the value of the source vector. The upper 256 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float].
-/// \returns A 512-bit floating-point vector of [16 x float]. The lower 256 bits
-///    contain the value of the parameter. The upper 256 bits are set to zero.
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_zextps256_ps512(__m256 __a)
-{
-  return __builtin_shufflevector((__v8sf)__a, (__v8sf)_mm256_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-}
-
-/// Constructs a 512-bit integer vector from a 128-bit integer vector.
-///    The lower 128 bits contain the value of the source vector. The upper
-///    384 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 512-bit integer vector. The lower 128 bits contain the value of
-///    the parameter. The upper 384 bits are set to zero.
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_zextsi128_si512(__m128i __a)
-{
-  return __builtin_shufflevector((__v2di)__a, (__v2di)_mm_setzero_si128(), 0, 1, 2, 3, 2, 3, 2, 3);
-}
-
-/// Constructs a 512-bit integer vector from a 256-bit integer vector.
-///    The lower 256 bits contain the value of the source vector. The upper
-///    256 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit integer vector.
-/// \returns A 512-bit integer vector. The lower 256 bits contain the value of
-///    the parameter. The upper 256 bits are set to zero.
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_zextsi256_si512(__m256i __a)
-{
-  return __builtin_shufflevector((__v4di)__a, (__v4di)_mm256_setzero_si256(), 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-/* Bitwise operators */
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_and_epi32(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v16su)__a & (__v16su)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
-                (__v16si) _mm512_and_epi32(__a, __b),
-                (__v16si) __src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i) _mm512_mask_and_epi32(_mm512_setzero_si512 (),
-                                         __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_and_epi64(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a & (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
-{
-    return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __k,
-                (__v8di) _mm512_and_epi64(__a, __b),
-                (__v8di) __src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i) _mm512_mask_and_epi64(_mm512_setzero_si512 (),
-                                         __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_andnot_si512 (__m512i __A, __m512i __B)
-{
-  return (__m512i)(~(__v8du)__A & (__v8du)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_andnot_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i)(~(__v16su)__A & (__v16su)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_andnot_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_andnot_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)_mm512_mask_andnot_epi32(_mm512_setzero_si512(),
-                                           __U, __A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_andnot_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)(~(__v8du)__A & (__v8du)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_andnot_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_andnot_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)_mm512_mask_andnot_epi64(_mm512_setzero_si512(),
-                                           __U, __A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_or_epi32(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v16su)__a | (__v16su)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
-                                             (__v16si)_mm512_or_epi32(__a, __b),
-                                             (__v16si)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_or_epi32(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_or_epi64(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a | (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
-                                             (__v8di)_mm512_or_epi64(__a, __b),
-                                             (__v8di)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_or_epi64(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_xor_epi32(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v16su)__a ^ (__v16su)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
-                                            (__v16si)_mm512_xor_epi32(__a, __b),
-                                            (__v16si)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_xor_epi32(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_xor_epi64(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a ^ (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
-                                             (__v8di)_mm512_xor_epi64(__a, __b),
-                                             (__v8di)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_xor_epi64(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_and_si512(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a & (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_or_si512(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a | (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_xor_si512(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a ^ (__v8du)__b);
-}
-
-/* Arithmetic */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_add_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a + (__v8df)__b);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_add_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a + (__v16sf)__b);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mul_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a * (__v8df)__b);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mul_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a * (__v16sf)__b);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_sub_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a - (__v8df)__b);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_sub_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a - (__v16sf)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v8du) __A + (__v8du) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_add_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_add_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_add_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sub_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v8du) __A - (__v8du) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_sub_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_sub_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v16su) __A + (__v16su) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_add_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_add_epi32(__A, __B),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_add_epi32(__A, __B),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sub_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v16su) __A - (__v16su) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_sub_epi32(__A, __B),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_sub_epi32(__A, __B),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-#define _mm512_max_round_pd(A, B, R) \
-  ((__m512d)__builtin_ia32_maxpd512((__v8df)(__m512d)(A), \
-                                    (__v8df)(__m512d)(B), (int)(R)))
-
-#define _mm512_mask_max_round_pd(W, U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_max_round_pd((A), (B), (R)), \
-                                   (__v8df)(W)))
-
-#define _mm512_maskz_max_round_pd(U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_max_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd()))
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_max_pd(__m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_maxpd512((__v8df) __A, (__v8df) __B,
-                                           _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_max_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_max_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_max_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_max_round_ps(A, B, R) \
-  ((__m512)__builtin_ia32_maxps512((__v16sf)(__m512)(A), \
-                                   (__v16sf)(__m512)(B), (int)(R)))
-
-#define _mm512_mask_max_round_ps(W, U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_max_round_ps((A), (B), (R)), \
-                                  (__v16sf)(W)))
-
-#define _mm512_maskz_max_round_ps(U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_max_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps()))
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_max_ps(__m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_maxps512((__v16sf) __A, (__v16sf) __B,
-                                          _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_max_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_max_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_max_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf)  _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_max_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_max_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                           (int)(R)))
-
-#define _mm_maskz_max_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df)  _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_max_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_max_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_max_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline __m512i
-__DEFAULT_FN_ATTRS512
-_mm512_max_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epi32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epi32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epu32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epu32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epu32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epu64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxuq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epu64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epu64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_min_round_pd(A, B, R) \
-  ((__m512d)__builtin_ia32_minpd512((__v8df)(__m512d)(A), \
-                                    (__v8df)(__m512d)(B), (int)(R)))
-
-#define _mm512_mask_min_round_pd(W, U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_min_round_pd((A), (B), (R)), \
-                                   (__v8df)(W)))
-
-#define _mm512_maskz_min_round_pd(U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_min_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd()))
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_min_pd(__m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_minpd512((__v8df) __A, (__v8df) __B,
-                                           _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_min_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_min_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_min_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_min_round_ps(A, B, R) \
-  ((__m512)__builtin_ia32_minps512((__v16sf)(__m512)(A), \
-                                   (__v16sf)(__m512)(B), (int)(R)))
-
-#define _mm512_mask_min_round_ps(W, U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_min_round_ps((A), (B), (R)), \
-                                  (__v16sf)(W)))
-
-#define _mm512_maskz_min_round_ps(U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_min_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps()))
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_min_ps(__m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_minps512((__v16sf) __A, (__v16sf) __B,
-                                          _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_min_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_min_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_min_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf)  _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_min_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_min_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                           (int)(R)))
-
-#define _mm_maskz_min_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df)  _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_min_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_min_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_min_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline __m512i
-__DEFAULT_FN_ATTRS512
-_mm512_min_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epi32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epi32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epu32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epu32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epu32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epu64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminuq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epu64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epu64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mul_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_pmuldq512((__v16si)__X, (__v16si) __Y);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_epi32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epi32(__X, __Y),
-                                             (__v8di)__W);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_epi32(__mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epi32(__X, __Y),
-                                             (__v8di)_mm512_setzero_si512 ());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mul_epu32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_pmuludq512((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_epu32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epu32(__X, __Y),
-                                             (__v8di)__W);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_epu32(__mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epu32(__X, __Y),
-                                             (__v8di)_mm512_setzero_si512 ());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mullo_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v16su) __A * (__v16su) __B);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mullo_epi32(__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                             (__v16si)_mm512_mullo_epi32(__A, __B),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mullo_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                             (__v16si)_mm512_mullo_epi32(__A, __B),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mullox_epi64 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v8du) __A * (__v8du) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mullox_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_mullox_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-#define _mm512_sqrt_round_pd(A, R) \
-  ((__m512d)__builtin_ia32_sqrtpd512((__v8df)(__m512d)(A), (int)(R)))
-
-#define _mm512_mask_sqrt_round_pd(W, U, A, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_sqrt_round_pd((A), (R)), \
-                                       (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_sqrt_round_pd(U, A, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_sqrt_round_pd((A), (R)), \
-                                       (__v8df)_mm512_setzero_pd()))
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_sqrt_pd(__m512d __A)
-{
-  return (__m512d)__builtin_ia32_sqrtpd512((__v8df)__A,
-                                           _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_sqrt_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_sqrt_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_sqrt_round_ps(A, R) \
-  ((__m512)__builtin_ia32_sqrtps512((__v16sf)(__m512)(A), (int)(R)))
-
-#define _mm512_mask_sqrt_round_ps(W, U, A, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_sqrt_round_ps((A), (R)), \
-                                      (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_sqrt_round_ps(U, A, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_sqrt_round_ps((A), (R)), \
-                                      (__v16sf)_mm512_setzero_ps()))
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_sqrt_ps(__m512 __A)
-{
-  return (__m512)__builtin_ia32_sqrtps512((__v16sf)__A,
-                                          _MM_FROUND_CUR_DIRECTION);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_sqrt_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_sqrt_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_rsqrt14_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
-                 (__v8df)
-                 _mm512_setzero_pd (),
-                 (__mmask8) -1);}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_rsqrt14_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
-                  (__v8df) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_rsqrt14_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
-                  (__v8df)
-                  _mm512_setzero_pd (),
-                  (__mmask8) __U);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_rsqrt14_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
-                (__v16sf)
-                _mm512_setzero_ps (),
-                (__mmask16) -1);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_rsqrt14_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
-                 (__v16sf) __W,
-                 (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_rsqrt14_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
-                 (__v16sf)
-                 _mm512_setzero_ps (),
-                 (__mmask16) __U);
-}
-
-static  __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_rsqrt14_ss(__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
-             (__v4sf) __B,
-             (__v4sf)
-             _mm_setzero_ps (),
-             (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) _mm_setzero_ps (),
-          (__mmask8) __U);
-}
-
-static  __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_rsqrt14_sd(__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __A,
-              (__v2df) __B,
-              (__v2df)
-              _mm_setzero_pd (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) _mm_setzero_pd (),
-          (__mmask8) __U);
-}
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_rcp14_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
-               (__v8df)
-               _mm512_setzero_pd (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_rcp14_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
-                (__v8df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_rcp14_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
-                (__v8df)
-                _mm512_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_rcp14_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
-              (__v16sf)
-              _mm512_setzero_ps (),
-              (__mmask16) -1);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_rcp14_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_rcp14_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
-                   (__v16sf)
-                   _mm512_setzero_ps (),
-                   (__mmask16) __U);
-}
-
-static  __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_rcp14_ss(__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
-                 (__v4sf) __B,
-                 (__v4sf)
-                 _mm_setzero_ps (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) _mm_setzero_ps (),
-          (__mmask8) __U);
-}
-
-static  __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_rcp14_sd(__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __A,
-            (__v2df) __B,
-            (__v2df)
-            _mm_setzero_pd (),
-            (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) _mm_setzero_pd (),
-          (__mmask8) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_floor_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                                                  _MM_FROUND_FLOOR,
-                                                  (__v16sf) __A, -1,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_floor_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                   _MM_FROUND_FLOOR,
-                   (__v16sf) __W, __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_floor_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                                                   _MM_FROUND_FLOOR,
-                                                   (__v8df) __A, -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_floor_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                _MM_FROUND_FLOOR,
-                (__v8df) __W, __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_ceil_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                   _MM_FROUND_CEIL,
-                   (__v16sf) __W, __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_ceil_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                                                  _MM_FROUND_CEIL,
-                                                  (__v16sf) __A, -1,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_ceil_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                                                   _MM_FROUND_CEIL,
-                                                   (__v8df) __A, -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                _MM_FROUND_CEIL,
-                (__v8df) __W, __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi64(__m512i __A)
-{
-  return (__m512i)__builtin_ia32_pabsq512((__v8di)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_abs_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_abs_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi32(__m512i __A)
-{
-  return (__m512i)__builtin_ia32_pabsd512((__v16si) __A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                             (__v16si)_mm512_abs_epi32(__A),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                             (__v16si)_mm512_abs_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_add_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_add_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_add_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-
-#define _mm_add_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_add_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                           (int)(R)))
-
-#define _mm_maskz_add_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_add_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_add_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-#define _mm_add_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_add_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_add_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_add_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_add_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_add_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_add_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_add_round_pd(A, B, R) \
-  ((__m512d)__builtin_ia32_addpd512((__v8df)(__m512d)(A), \
-                                    (__v8df)(__m512d)(B), (int)(R)))
-
-#define _mm512_mask_add_round_pd(W, U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_add_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_add_round_pd(U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_add_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_add_round_ps(A, B, R) \
-  ((__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \
-                                   (__v16sf)(__m512)(B), (int)(R)))
-
-#define _mm512_mask_add_round_ps(W, U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_add_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_add_round_ps(U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_add_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps()))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_sub_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_sub_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-#define _mm_sub_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_sub_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                           (int)(R)))
-
-#define _mm_maskz_sub_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_sub_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_sub_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-
-#define _mm_sub_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_sub_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_sub_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_sub_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_sub_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_sub_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_sub_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_sub_round_pd(A, B, R) \
-  ((__m512d)__builtin_ia32_subpd512((__v8df)(__m512d)(A), \
-                                    (__v8df)(__m512d)(B), (int)(R)))
-
-#define _mm512_mask_sub_round_pd(W, U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_sub_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_sub_round_pd(U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_sub_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_sub_round_ps(A, B, R) \
-  ((__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \
-                                   (__v16sf)(__m512)(B), (int)(R)))
-
-#define _mm512_mask_sub_round_ps(W, U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_sub_round_ps(U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps()))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_mul_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_mul_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-#define _mm_mul_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_mul_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                           (int)(R)))
-
-#define _mm_maskz_mul_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_mul_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_mul_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-
-#define _mm_mul_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_mul_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_mul_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_mul_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_mul_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_mul_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_mul_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_mul_round_pd(A, B, R) \
-  ((__m512d)__builtin_ia32_mulpd512((__v8df)(__m512d)(A), \
-                                    (__v8df)(__m512d)(B), (int)(R)))
-
-#define _mm512_mask_mul_round_pd(W, U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_mul_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_mul_round_pd(U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_mul_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_mul_round_ps(A, B, R) \
-  ((__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R)))
-
-#define _mm512_mask_mul_round_ps(W, U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_mul_round_ps(U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps()))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_div_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_div_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-
-#define _mm_div_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_div_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                           (int)(R)))
-
-#define _mm_maskz_div_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_div_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_div_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-
-#define _mm_div_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_div_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_div_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_div_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a/(__v8df)__b);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_div_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_div_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_div_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a/(__v16sf)__b);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_div_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_div_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_div_round_pd(A, B, R) \
-  ((__m512d)__builtin_ia32_divpd512((__v8df)(__m512d)(A), \
-                                    (__v8df)(__m512d)(B), (int)(R)))
-
-#define _mm512_mask_div_round_pd(W, U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_div_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_div_round_pd(U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_div_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_div_round_ps(A, B, R) \
-  ((__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \
-                                   (__v16sf)(__m512)(B), (int)(R)))
-
-#define _mm512_mask_div_round_ps(W, U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_div_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_div_round_ps(U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_div_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps()))
-
-#define _mm512_roundscale_ps(A, B) \
-  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
-                                          (__v16sf)_mm512_undefined_ps(), \
-                                          (__mmask16)-1, \
-                                          _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_roundscale_ps(A, B, C, imm) \
-  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
-                                         (__v16sf)(__m512)(A), (__mmask16)(B), \
-                                         _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_roundscale_ps(A, B, imm) \
-  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)(A), \
-                                          _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) \
-  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
-                                         (__v16sf)(__m512)(A), (__mmask16)(B), \
-                                         (int)(R)))
-
-#define _mm512_maskz_roundscale_round_ps(A, B, imm, R) \
-  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)(A), (int)(R)))
-
-#define _mm512_roundscale_round_ps(A, imm, R) \
-  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                          (__v16sf)_mm512_undefined_ps(), \
-                                          (__mmask16)-1, (int)(R)))
-
-#define _mm512_roundscale_pd(A, B) \
-  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \
-                                           (__v8df)_mm512_undefined_pd(), \
-                                           (__mmask8)-1, \
-                                           _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_roundscale_pd(A, B, C, imm) \
-  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
-                                          (__v8df)(__m512d)(A), (__mmask8)(B), \
-                                          _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_roundscale_pd(A, B, imm) \
-  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(A), \
-                                           _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) \
-  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
-                                          (__v8df)(__m512d)(A), (__mmask8)(B), \
-                                          (int)(R)))
-
-#define _mm512_maskz_roundscale_round_pd(A, B, imm, R) \
-  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(A), (int)(R)))
-
-#define _mm512_roundscale_round_pd(A, imm, R) \
-  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                           (__v8df)_mm512_undefined_pd(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm512_fmadd_round_pd(A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)-1, (int)(R)))
-
-
-#define _mm512_mask_fmadd_round_pd(A, U, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_fmsub_round_pd(A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            -(__v8df)(__m512d)(C), \
-                                            (__mmask8)-1, (int)(R)))
-
-
-#define _mm512_mask_fmsub_round_pd(A, U, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            -(__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             -(__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_fnmadd_round_pd(A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)-1, (int)(R)))
-
-
-#define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_fnmsub_round_pd(A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            -(__v8df)(__m512d)(C), \
-                                            (__mmask8)-1, (int)(R)))
-
-
-#define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             -(__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
-                                                     (__v8df) __B,
-                                                     -(__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     -(__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmadd_round_ps(A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)-1, (int)(R)))
-
-
-#define _mm512_mask_fmadd_round_ps(A, U, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_fmsub_round_ps(A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           -(__v16sf)(__m512)(C), \
-                                           (__mmask16)-1, (int)(R)))
-
-
-#define _mm512_mask_fmsub_round_ps(A, U, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           -(__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            -(__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_fnmadd_round_ps(A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           -(__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)-1, (int)(R)))
-
-
-#define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_fnmsub_round_ps(A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           -(__v16sf)(__m512)(B), \
-                                           -(__v16sf)(__m512)(C), \
-                                           (__mmask16)-1, (int)(R)))
-
-
-#define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            -(__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    -(__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    -(__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmaddsub_round_pd(A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8df)(__m512d)(C), \
-                                               (__mmask8)-1, (int)(R)))
-
-
-#define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) \
-  ((__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \
-                                                (__v8df)(__m512d)(B), \
-                                                (__v8df)(__m512d)(C), \
-                                                (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
-                                                (__v8df)(__m512d)(B), \
-                                                (__v8df)(__m512d)(C), \
-                                                (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_fmsubadd_round_pd(A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               -(__v8df)(__m512d)(C), \
-                                               (__mmask8)-1, (int)(R)))
-
-
-#define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               -(__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
-                                                (__v8df)(__m512d)(B), \
-                                                -(__v8df)(__m512d)(C), \
-                                                (__mmask8)(U), (int)(R)))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                      (__v8df) __B,
-                                                      (__v8df) __C,
-                                                      (__mmask8) -1,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmaddsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                      (__v8df) __B,
-                                                      (__v8df) __C,
-                                                      (__mmask8) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       (__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmaddsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       (__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       -(__v8df) __C,
-                                                       (__mmask8) -1,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsubadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       -(__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
-                                                        (__v8df) __B,
-                                                        -(__v8df) __C,
-                                                        (__mmask8) __U,
-                                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmaddsub_round_ps(A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16sf)(__m512)(C), \
-                                              (__mmask16)-1, (int)(R)))
-
-
-#define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) \
-  ((__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \
-                                               (__v16sf)(__m512)(B), \
-                                               (__v16sf)(__m512)(C), \
-                                               (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
-                                               (__v16sf)(__m512)(B), \
-                                               (__v16sf)(__m512)(C), \
-                                               (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_fmsubadd_round_ps(A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              -(__v16sf)(__m512)(C), \
-                                              (__mmask16)-1, (int)(R)))
-
-
-#define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              -(__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
-                                               (__v16sf)(__m512)(B), \
-                                               -(__v16sf)(__m512)(C), \
-                                               (__mmask16)(U), (int)(R)))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      (__v16sf) __C,
-                                                      (__mmask16) -1,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmaddsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      (__v16sf) __C,
-                                                      (__mmask16) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) __A,
-                                                       (__v16sf) __B,
-                                                       (__v16sf) __C,
-                                                       (__mmask16) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmaddsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
-                                                       (__v16sf) __B,
-                                                       (__v16sf) __C,
-                                                       (__mmask16) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      -(__v16sf) __C,
-                                                      (__mmask16) -1,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsubadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      -(__v16sf) __C,
-                                                      (__mmask16) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
-                                                       (__v16sf) __B,
-                                                       -(__v16sf) __C,
-                                                       (__mmask16) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) \
-  ((__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d)__builtin_ia32_vfmsubpd512_mask3 ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) \
-  ((__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512)__builtin_ia32_vfmsubps512_mask3 ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) \
-  ((__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \
-                                                (__v8df)(__m512d)(B), \
-                                                (__v8df)(__m512d)(C), \
-                                                (__mmask8)(U), (int)(R)))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d)__builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       (__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) \
-  ((__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \
-                                               (__v16sf)(__m512)(B), \
-                                               (__v16sf)(__m512)(C), \
-                                               (__mmask16)(U), (int)(R)))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512)__builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      (__v16sf) __C,
-                                                      (__mmask16) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                            -(__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R)))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           -(__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R)))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                            -(__v8df)(__m512d)(B), \
-                                            -(__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) \
-  ((__m512d)__builtin_ia32_vfmsubpd512_mask3(-(__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmsubpd512_mask3 (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           -(__v16sf)(__m512)(B), \
-                                           -(__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) \
-  ((__m512)__builtin_ia32_vfmsubps512_mask3(-(__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmsubps512_mask3 (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-
-
-/* Vector permutations */
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_epi32(__m512i __A, __m512i __I, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_vpermi2vard512((__v16si)__A, (__v16si) __I,
-                                                (__v16si) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_epi32(__m512i __A, __mmask16 __U, __m512i __I,
-                               __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                              (__v16si)_mm512_permutex2var_epi32(__A, __I, __B),
-                              (__v16si)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_epi32(__m512i __A, __m512i __I, __mmask16 __U,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                              (__v16si)_mm512_permutex2var_epi32(__A, __I, __B),
-                              (__v16si)__I);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_epi32(__mmask16 __U, __m512i __A, __m512i __I,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                              (__v16si)_mm512_permutex2var_epi32(__A, __I, __B),
-                              (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_vpermi2varq512((__v8di)__A, (__v8di) __I,
-                                                (__v8di) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_epi64(__m512i __A, __mmask8 __U, __m512i __I,
-                               __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                               (__v8di)_mm512_permutex2var_epi64(__A, __I, __B),
-                               (__v8di)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_epi64(__m512i __A, __m512i __I, __mmask8 __U,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                               (__v8di)_mm512_permutex2var_epi64(__A, __I, __B),
-                               (__v8di)__I);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                               (__v8di)_mm512_permutex2var_epi64(__A, __I, __B),
-                               (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_alignr_epi64(A, B, I) \
-  ((__m512i)__builtin_ia32_alignq512((__v8di)(__m512i)(A), \
-                                     (__v8di)(__m512i)(B), (int)(I)))
-
-#define _mm512_mask_alignr_epi64(W, U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                  (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
-                                  (__v8di)(__m512i)(W)))
-
-#define _mm512_maskz_alignr_epi64(U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                  (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
-                                  (__v8di)_mm512_setzero_si512()))
-
-#define _mm512_alignr_epi32(A, B, I) \
-  ((__m512i)__builtin_ia32_alignd512((__v16si)(__m512i)(A), \
-                                     (__v16si)(__m512i)(B), (int)(I)))
-
-#define _mm512_mask_alignr_epi32(W, U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                 (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
-                                 (__v16si)(__m512i)(W)))
-
-#define _mm512_maskz_alignr_epi32(U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                 (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
-                                 (__v16si)_mm512_setzero_si512()))
-/* Vector Extract */
-
-#define _mm512_extractf64x4_pd(A, I) \
-  ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
-                                             (__v4df)_mm256_undefined_pd(), \
-                                             (__mmask8)-1))
-
-#define _mm512_mask_extractf64x4_pd(W, U, A, imm) \
-  ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                             (__v4df)(__m256d)(W), \
-                                             (__mmask8)(U)))
-
-#define _mm512_maskz_extractf64x4_pd(U, A, imm) \
-  ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                             (__v4df)_mm256_setzero_pd(), \
-                                             (__mmask8)(U)))
-
-#define _mm512_extractf32x4_ps(A, I) \
-  ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
-                                            (__v4sf)_mm_undefined_ps(), \
-                                            (__mmask8)-1))
-
-#define _mm512_mask_extractf32x4_ps(W, U, A, imm) \
-  ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                            (__v4sf)(__m128)(W), \
-                                            (__mmask8)(U)))
-
-#define _mm512_maskz_extractf32x4_ps(U, A, imm) \
-  ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                            (__v4sf)_mm_setzero_ps(), \
-                                            (__mmask8)(U)))
-
-/* Vector Blend */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W)
-{
-  return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
-                 (__v8df) __W,
-                 (__v8df) __A);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W)
-{
-  return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
-                (__v16sf) __W,
-                (__v16sf) __A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W)
-{
-  return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
-                (__v8di) __W,
-                (__v8di) __A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
-{
-  return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
-                (__v16si) __W,
-                (__v16si) __A);
-}
-
-/* Compare */
-
-#define _mm512_cmp_round_ps_mask(A, B, P, R) \
-  ((__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), (int)(P), \
-                                           (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) \
-  ((__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), (int)(P), \
-                                           (__mmask16)(U), (int)(R)))
-
-#define _mm512_cmp_ps_mask(A, B, P) \
-  _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_cmp_ps_mask(U, A, B, P) \
-  _mm512_mask_cmp_round_ps_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_cmpeq_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_EQ_OQ)
-#define _mm512_mask_cmpeq_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_EQ_OQ)
-
-#define _mm512_cmplt_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_LT_OS)
-#define _mm512_mask_cmplt_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LT_OS)
-
-#define _mm512_cmple_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_LE_OS)
-#define _mm512_mask_cmple_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LE_OS)
-
-#define _mm512_cmpunord_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_UNORD_Q)
-#define _mm512_mask_cmpunord_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_UNORD_Q)
-
-#define _mm512_cmpneq_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_NEQ_UQ)
-#define _mm512_mask_cmpneq_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NEQ_UQ)
-
-#define _mm512_cmpnlt_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_NLT_US)
-#define _mm512_mask_cmpnlt_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLT_US)
-
-#define _mm512_cmpnle_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_NLE_US)
-#define _mm512_mask_cmpnle_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLE_US)
-
-#define _mm512_cmpord_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_ORD_Q)
-#define _mm512_mask_cmpord_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_ORD_Q)
-
-#define _mm512_cmp_round_pd_mask(A, B, P, R) \
-  ((__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
-                                          (__v8df)(__m512d)(B), (int)(P), \
-                                          (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) \
-  ((__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
-                                          (__v8df)(__m512d)(B), (int)(P), \
-                                          (__mmask8)(U), (int)(R)))
-
-#define _mm512_cmp_pd_mask(A, B, P) \
-  _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_cmp_pd_mask(U, A, B, P) \
-  _mm512_mask_cmp_round_pd_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_cmpeq_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_EQ_OQ)
-#define _mm512_mask_cmpeq_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_EQ_OQ)
-
-#define _mm512_cmplt_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_LT_OS)
-#define _mm512_mask_cmplt_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LT_OS)
-
-#define _mm512_cmple_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_LE_OS)
-#define _mm512_mask_cmple_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LE_OS)
-
-#define _mm512_cmpunord_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_UNORD_Q)
-#define _mm512_mask_cmpunord_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_UNORD_Q)
-
-#define _mm512_cmpneq_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_NEQ_UQ)
-#define _mm512_mask_cmpneq_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NEQ_UQ)
-
-#define _mm512_cmpnlt_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_NLT_US)
-#define _mm512_mask_cmpnlt_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLT_US)
-
-#define _mm512_cmpnle_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_NLE_US)
-#define _mm512_mask_cmpnle_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLE_US)
-
-#define _mm512_cmpord_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_ORD_Q)
-#define _mm512_mask_cmpord_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_ORD_Q)
-
-/* Conversion */
-
-#define _mm512_cvtt_roundps_epu32(A, R) \
-  ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
-                                              (__v16si)_mm512_undefined_epi32(), \
-                                              (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cvtt_roundps_epu32(W, U, A, R) \
-  ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
-                                              (__v16si)(__m512i)(W), \
-                                              (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvtt_roundps_epu32(U, A, R) \
-  ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
-                                              (__v16si)_mm512_setzero_si512(), \
-                                              (__mmask16)(U), (int)(R)))
-
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvttps_epu32(__m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
-                  (__v16si)
-                  _mm512_setzero_si512 (),
-                  (__mmask16) -1,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
-                   (__v16si) __W,
-                   (__mmask16) __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
-                   (__v16si) _mm512_setzero_si512 (),
-                   (__mmask16) __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundepi32_ps(A, R) \
-  ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundepi32_ps(W, U, A, R) \
-  ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundepi32_ps(U, A, R) \
-  ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), (int)(R)))
-
-#define _mm512_cvt_roundepu32_ps(A, R) \
-  ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
-                                            (__v16sf)_mm512_setzero_ps(), \
-                                            (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundepu32_ps(W, U, A, R) \
-  ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
-                                            (__v16sf)(__m512)(W), \
-                                            (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundepu32_ps(U, A, R) \
-  ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
-                                            (__v16sf)_mm512_setzero_ps(), \
-                                            (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_ps (__m512i __A)
-{
-  return (__m512)__builtin_convertvector((__v16su)__A, __v16sf);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32_ps (__m512 __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepu32_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepu32_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_pd(__m256i __A)
-{
-  return (__m512d)__builtin_convertvector((__v8si)__A, __v8df);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepi32_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepi32_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32lo_pd(__m512i __A)
-{
-  return (__m512d) _mm512_cvtepi32_pd(_mm512_castsi512_si256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
-{
-  return (__m512d) _mm512_mask_cvtepi32_pd(__W, __U, _mm512_castsi512_si256(__A));
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_ps (__m512i __A)
-{
-  return (__m512)__builtin_convertvector((__v16si)__A, __v16sf);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_ps (__m512 __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepi32_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepi32_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_pd(__m256i __A)
-{
-  return (__m512d)__builtin_convertvector((__v8su)__A, __v8df);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepu32_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepu32_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32lo_pd(__m512i __A)
-{
-  return (__m512d) _mm512_cvtepu32_pd(_mm512_castsi512_si256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
-{
-  return (__m512d) _mm512_mask_cvtepu32_pd(__W, __U, _mm512_castsi512_si256(__A));
-}
-
-#define _mm512_cvt_roundpd_ps(A, R) \
-  ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
-                                           (__v8sf)_mm256_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundpd_ps(W, U, A, R) \
-  ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
-                                           (__v8sf)(__m256)(W), (__mmask8)(U), \
-                                           (int)(R)))
-
-#define _mm512_maskz_cvt_roundpd_ps(U, A, R) \
-  ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
-                                           (__v8sf)_mm256_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_ps (__m512d __A)
-{
-  return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
-                (__v8sf) _mm256_undefined_ps (),
-                (__mmask8) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_ps (__m256 __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
-                (__v8sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtpd_ps (__mmask8 __U, __m512d __A)
-{
-  return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
-                (__v8sf) _mm256_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_pslo (__m512d __A)
-{
-  return (__m512) __builtin_shufflevector((__v8sf) _mm512_cvtpd_ps(__A),
-                (__v8sf) _mm256_setzero_ps (),
-                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_pslo (__m512 __W, __mmask8 __U,__m512d __A)
-{
-  return (__m512) __builtin_shufflevector (
-                (__v8sf) _mm512_mask_cvtpd_ps (_mm512_castps512_ps256(__W),
-                                               __U, __A),
-                (__v8sf) _mm256_setzero_ps (),
-                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-}
-
-#define _mm512_cvt_roundps_ph(A, I) \
-  ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
-                                             (__v16hi)_mm256_undefined_si256(), \
-                                             (__mmask16)-1))
-
-#define _mm512_mask_cvt_roundps_ph(U, W, A, I) \
-  ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
-                                             (__v16hi)(__m256i)(U), \
-                                             (__mmask16)(W)))
-
-#define _mm512_maskz_cvt_roundps_ph(W, A, I) \
-  ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
-                                             (__v16hi)_mm256_setzero_si256(), \
-                                             (__mmask16)(W)))
-
-#define _mm512_cvtps_ph       _mm512_cvt_roundps_ph
-#define _mm512_mask_cvtps_ph  _mm512_mask_cvt_roundps_ph
-#define _mm512_maskz_cvtps_ph _mm512_maskz_cvt_roundps_ph
-
-#define _mm512_cvt_roundph_ps(A, R) \
-  ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
-                                            (__v16sf)_mm512_undefined_ps(), \
-                                            (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundph_ps(W, U, A, R) \
-  ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
-                                            (__v16sf)(__m512)(W), \
-                                            (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundph_ps(U, A, R) \
-  ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
-                                            (__v16sf)_mm512_setzero_ps(), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-static  __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtph_ps(__m256i __A)
-{
-  return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
-                (__v16sf)
-                _mm512_setzero_ps (),
-                (__mmask16) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A)
-{
-  return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
-                 (__v16sf) __W,
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A)
-{
-  return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
-                 (__v16sf) _mm512_setzero_ps (),
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvtt_roundpd_epi32(A, R) \
-  ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)_mm256_setzero_si256(), \
-                                             (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_cvtt_roundpd_epi32(W, U, A, R) \
-  ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)(__m256i)(W), \
-                                             (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvtt_roundpd_epi32(U, A, R) \
-  ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)_mm256_setzero_si256(), \
-                                             (__mmask8)(U), (int)(R)))
-
-static __inline __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvttpd_epi32(__m512d __a)
-{
-  return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) __a,
-                                                   (__v8si)_mm256_setzero_si256(),
-                                                   (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttpd_epi32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
-                  (__v8si) _mm256_setzero_si256 (),
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvtt_roundps_epi32(A, R) \
-  ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)_mm512_setzero_si512(), \
-                                             (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cvtt_roundps_epi32(W, U, A, R) \
-  ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)(__m512i)(W), \
-                                             (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvtt_roundps_epi32(U, A, R) \
-  ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)_mm512_setzero_si512(), \
-                                             (__mmask16)(U), (int)(R)))
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvttps_epi32(__m512 __a)
-{
-  return (__m512i)
-    __builtin_ia32_cvttps2dq512_mask((__v16sf) __a,
-                                     (__v16si) _mm512_setzero_si512 (),
-                                     (__mmask16) -1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
-                  (__v16si) __W,
-                  (__mmask16) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttps_epi32 (__mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
-                  (__v16si) _mm512_setzero_si512 (),
-                  (__mmask16) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundps_epi32(A, R) \
-  ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundps_epi32(W, U, A, R) \
-  ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)(__m512i)(W), \
-                                            (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundps_epi32(U, A, R) \
-  ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtps_epi32 (__m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
-                 (__v16si) _mm512_undefined_epi32 (),
-                 (__mmask16) -1,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
-                 (__v16si) __W,
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtps_epi32 (__mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
-                 (__v16si)
-                 _mm512_setzero_si512 (),
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundpd_epi32(A, R) \
-  ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundpd_epi32(W, U, A, R) \
-  ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)(__m256i)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundpd_epi32(U, A, R) \
-  ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_epi32 (__m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
-                 (__v8si)
-                 _mm256_undefined_si256 (),
-                 (__mmask8) -1,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
-                 (__v8si) __W,
-                 (__mmask8) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtpd_epi32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
-                 (__v8si)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundps_epu32(A, R) \
-  ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)_mm512_setzero_si512(), \
-                                             (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundps_epu32(W, U, A, R) \
-  ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)(__m512i)(W), \
-                                             (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundps_epu32(U, A, R) \
-  ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)_mm512_setzero_si512(), \
-                                             (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtps_epu32 ( __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,\
-                  (__v16si)\
-                  _mm512_undefined_epi32 (),
-                  (__mmask16) -1,\
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
-                  (__v16si) __W,
-                  (__mmask16) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtps_epu32 ( __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
-                  (__v16si)
-                  _mm512_setzero_si512 (),
-                  (__mmask16) __U ,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundpd_epu32(A, R) \
-  ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)_mm256_setzero_si256(), \
-                                             (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundpd_epu32(W, U, A, R) \
-  ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)(__m256i)(W), \
-                                             (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundpd_epu32(U, A, R) \
-  ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)_mm256_setzero_si256(), \
-                                             (__mmask8)(U), (int)(R)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_epu32 (__m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_undefined_si256 (),
-                  (__mmask8) -1,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtpd_epu32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_cvtsd_f64(__m512d __a)
-{
-  return __a[0];
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_cvtss_f32(__m512 __a)
-{
-  return __a[0];
-}
-
-/* Unpack and Interleave */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
-                                          1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpackhi_pd(__A, __B),
-                                           (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpackhi_pd(__A, __B),
-                                           (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
-                                          0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpacklo_pd(__A, __B),
-                                           (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpacklo_pd(__A, __B),
-                                           (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
-                                         2,    18,    3,    19,
-                                         2+4,  18+4,  3+4,  19+4,
-                                         2+8,  18+8,  3+8,  19+8,
-                                         2+12, 18+12, 3+12, 19+12);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpackhi_ps(__A, __B),
-                                          (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpackhi_ps(__A, __B),
-                                          (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
-                                         0,    16,    1,    17,
-                                         0+4,  16+4,  1+4,  17+4,
-                                         0+8,  16+8,  1+8,  17+8,
-                                         0+12, 16+12, 1+12, 17+12);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpacklo_ps(__A, __B),
-                                          (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpacklo_ps(__A, __B),
-                                          (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
-                                          2,    18,    3,    19,
-                                          2+4,  18+4,  3+4,  19+4,
-                                          2+8,  18+8,  3+8,  19+8,
-                                          2+12, 18+12, 3+12, 19+12);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpackhi_epi32(__A, __B),
-                                       (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpackhi_epi32(__A, __B),
-                                       (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
-                                          0,    16,    1,    17,
-                                          0+4,  16+4,  1+4,  17+4,
-                                          0+8,  16+8,  1+8,  17+8,
-                                          0+12, 16+12, 1+12, 17+12);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpacklo_epi32(__A, __B),
-                                       (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpacklo_epi32(__A, __B),
-                                       (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
-                                          1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpackhi_epi64(__A, __B),
-                                        (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpackhi_epi64(__A, __B),
-                                        (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
-                                          0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpacklo_epi64(__A, __B),
-                                        (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpacklo_epi64(__A, __B),
-                                        (__v8di)_mm512_setzero_si512());
-}
-
-
-/* SIMD load ops */
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_si512 (void const *__P)
-{
-  struct __loadu_si512 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_si512*)__P)->__v;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_epi32 (void const *__P)
-{
-  struct __loadu_epi32 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi32*)__P)->__v;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P,
-                  (__v16si) __W,
-                  (__mmask16) __U);
-}
-
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *)__P,
-                                                     (__v16si)
-                                                     _mm512_setzero_si512 (),
-                                                     (__mmask16) __U);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_epi64 (void const *__P)
-{
-  struct __loadu_epi64 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi64*)__P)->__v;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *) __P,
-                  (__v8di) __W,
-                  (__mmask8) __U);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *)__P,
-                                                     (__v8di)
-                                                     _mm512_setzero_si512 (),
-                                                     (__mmask8) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_ps (__m512 __W, __mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadups512_mask ((const float *) __P,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_ps(__mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadups512_mask ((const float *)__P,
-                                                  (__v16sf)
-                                                  _mm512_setzero_ps (),
-                                                  (__mmask16) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_pd (__m512d __W, __mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadupd512_mask ((const double *) __P,
-                (__v8df) __W,
-                (__mmask8) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_pd(__mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadupd512_mask ((const double *)__P,
-                                                   (__v8df)
-                                                   _mm512_setzero_pd (),
-                                                   (__mmask8) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_loadu_pd(void const *__p)
-{
-  struct __loadu_pd {
-    __m512d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_pd*)__p)->__v;
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_loadu_ps(void const *__p)
-{
-  struct __loadu_ps {
-    __m512_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_ps*)__p)->__v;
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_load_ps(void const *__p)
-{
-  return *(const __m512*)__p;
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_load_ps (__m512 __W, __mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *) __P,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_ps(__mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__P,
-                                                  (__v16sf)
-                                                  _mm512_setzero_ps (),
-                                                  (__mmask16) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_load_pd(void const *__p)
-{
-  return *(const __m512d*)__p;
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_load_pd (__m512d __W, __mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *) __P,
-                          (__v8df) __W,
-                          (__mmask8) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_pd(__mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__P,
-                                                   (__v8df)
-                                                   _mm512_setzero_pd (),
-                                                   (__mmask8) __U);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_load_si512 (void const *__P)
-{
-  return *(const __m512i *) __P;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_load_epi32 (void const *__P)
-{
-  return *(const __m512i *) __P;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_load_epi64 (void const *__P)
-{
-  return *(const __m512i *) __P;
-}
-
-/* SIMD store ops */
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_epi64 (void *__P, __m512i __A)
-{
-  struct __storeu_epi64 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi64*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A)
-{
-  __builtin_ia32_storedqudi512_mask ((long long *)__P, (__v8di) __A,
-                                     (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_si512 (void *__P, __m512i __A)
-{
-  struct __storeu_si512 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si512*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_epi32 (void *__P, __m512i __A)
-{
-  struct __storeu_epi32 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi32*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A)
-{
-  __builtin_ia32_storedqusi512_mask ((int *)__P, (__v16si) __A,
-                                     (__mmask16) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_pd(void *__P, __mmask8 __U, __m512d __A)
-{
-  __builtin_ia32_storeupd512_mask ((double *)__P, (__v8df) __A, (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_pd(void *__P, __m512d __A)
-{
-  struct __storeu_pd {
-    __m512d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_pd*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_ps(void *__P, __mmask16 __U, __m512 __A)
-{
-  __builtin_ia32_storeups512_mask ((float *)__P, (__v16sf) __A,
-                                   (__mmask16) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_ps(void *__P, __m512 __A)
-{
-  struct __storeu_ps {
-    __m512_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_ps*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_pd(void *__P, __mmask8 __U, __m512d __A)
-{
-  __builtin_ia32_storeapd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_pd(void *__P, __m512d __A)
-{
-  *(__m512d*)__P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_ps(void *__P, __mmask16 __U, __m512 __A)
-{
-  __builtin_ia32_storeaps512_mask ((__v16sf *)__P, (__v16sf) __A,
-                                   (__mmask16) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_ps(void *__P, __m512 __A)
-{
-  *(__m512*)__P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_si512 (void *__P, __m512i __A)
-{
-  *(__m512i *) __P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_epi32 (void *__P, __m512i __A)
-{
-  *(__m512i *) __P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_epi64 (void *__P, __m512i __A)
-{
-  *(__m512i *) __P = __A;
-}
-
-/* Mask ops */
-
-static __inline __mmask16 __DEFAULT_FN_ATTRS
-_mm512_knot(__mmask16 __M)
-{
-  return __builtin_ia32_knothi(__M);
-}
-
-/* Integer compare */
-
-#define _mm512_cmpeq_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi8_epi32(__m128i __A)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m512i)__builtin_convertvector((__v16qs)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepi8_epi32(__A),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi8_epi32(__mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepi8_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi8_epi64(__m128i __A)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__A, (__v16qs)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi8_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi8_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_epi64(__m256i __X)
-{
-  return (__m512i)__builtin_convertvector((__v8si)__X, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi32_epi64(__X),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_epi64(__mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi32_epi64(__X),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi16_epi32(__m256i __A)
-{
-  return (__m512i)__builtin_convertvector((__v16hi)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepi16_epi32(__A),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi16_epi32(__mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepi16_epi32(__A),
-                                            (__v16si)_mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi16_epi64(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector((__v8hi)__A, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi16_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi16_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu8_epi32(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector((__v16qu)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepu8_epi32(__A),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu8_epi32(__mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepu8_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu8_epi64(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__A, (__v16qu)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu8_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu8_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_epi64(__m256i __X)
-{
-  return (__m512i)__builtin_convertvector((__v8su)__X, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu32_epi64(__X),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu32_epi64(__mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu32_epi64(__X),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu16_epi32(__m256i __A)
-{
-  return (__m512i)__builtin_convertvector((__v16hu)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepu16_epi32(__A),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu16_epi32(__mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepu16_epi32(__A),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu16_epi64(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector((__v8hu)__A, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu16_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu16_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rorv_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prorvd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rorv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rorv_epi32(__A, __B),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rorv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rorv_epi32(__A, __B),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rorv_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prorvq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rorv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rorv_epi64(__A, __B),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rorv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rorv_epi64(__A, __B),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-
-
-#define _mm512_cmp_epi32_mask(a, b, p) \
-  ((__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
-                                          (__v16si)(__m512i)(b), (int)(p), \
-                                          (__mmask16)-1))
-
-#define _mm512_cmp_epu32_mask(a, b, p) \
-  ((__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
-                                           (__v16si)(__m512i)(b), (int)(p), \
-                                           (__mmask16)-1))
-
-#define _mm512_cmp_epi64_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
-                                         (__v8di)(__m512i)(b), (int)(p), \
-                                         (__mmask8)-1))
-
-#define _mm512_cmp_epu64_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
-                                          (__v8di)(__m512i)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm512_mask_cmp_epi32_mask(m, a, b, p) \
-  ((__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
-                                          (__v16si)(__m512i)(b), (int)(p), \
-                                          (__mmask16)(m)))
-
-#define _mm512_mask_cmp_epu32_mask(m, a, b, p) \
-  ((__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
-                                           (__v16si)(__m512i)(b), (int)(p), \
-                                           (__mmask16)(m)))
-
-#define _mm512_mask_cmp_epi64_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
-                                         (__v8di)(__m512i)(b), (int)(p), \
-                                         (__mmask8)(m)))
-
-#define _mm512_mask_cmp_epu64_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
-                                          (__v8di)(__m512i)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm512_rol_epi32(a, b) \
-  ((__m512i)__builtin_ia32_prold512((__v16si)(__m512i)(a), (int)(b)))
-
-#define _mm512_mask_rol_epi32(W, U, a, b) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_rol_epi32((a), (b)), \
-                                       (__v16si)(__m512i)(W)))
-
-#define _mm512_maskz_rol_epi32(U, a, b) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_rol_epi32((a), (b)), \
-                                       (__v16si)_mm512_setzero_si512()))
-
-#define _mm512_rol_epi64(a, b) \
-  ((__m512i)__builtin_ia32_prolq512((__v8di)(__m512i)(a), (int)(b)))
-
-#define _mm512_mask_rol_epi64(W, U, a, b) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_rol_epi64((a), (b)), \
-                                       (__v8di)(__m512i)(W)))
-
-#define _mm512_maskz_rol_epi64(U, a, b) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_rol_epi64((a), (b)), \
-                                       (__v8di)_mm512_setzero_si512()))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rolv_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prolvd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rolv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rolv_epi32(__A, __B),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rolv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rolv_epi32(__A, __B),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rolv_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prolvq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rolv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rolv_epi64(__A, __B),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rolv_epi64(__A, __B),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_ror_epi32(A, B) \
-  ((__m512i)__builtin_ia32_prord512((__v16si)(__m512i)(A), (int)(B)))
-
-#define _mm512_mask_ror_epi32(W, U, A, B) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_ror_epi32((A), (B)), \
-                                       (__v16si)(__m512i)(W)))
-
-#define _mm512_maskz_ror_epi32(U, A, B) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_ror_epi32((A), (B)), \
-                                       (__v16si)_mm512_setzero_si512()))
-
-#define _mm512_ror_epi64(A, B) \
-  ((__m512i)__builtin_ia32_prorq512((__v8di)(__m512i)(A), (int)(B)))
-
-#define _mm512_mask_ror_epi64(W, U, A, B) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_ror_epi64((A), (B)), \
-                                       (__v8di)(__m512i)(W)))
-
-#define _mm512_maskz_ror_epi64(U, A, B) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_ror_epi64((A), (B)), \
-                                       (__v8di)_mm512_setzero_si512()))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi32(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A,
-                       unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_slli_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_slli_epi32(__A, __B),
-                                         (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi64(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_slli_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_slli_epi64(__A, __B),
-                                          (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi32(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A,
-                       unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srli_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srli_epi32(__A, __B),
-                                         (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi64(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A,
-                       unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srli_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A,
-                        unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srli_epi64(__A, __B),
-                                          (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_load_epi32 (__m512i __W, __mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P,
-              (__v16si) __W,
-              (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_epi32 (__mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P,
-              (__v16si)
-              _mm512_setzero_si512 (),
-              (__mmask16) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_epi32 (void *__P, __mmask16 __U, __m512i __A)
-{
-  __builtin_ia32_movdqa32store512_mask ((__v16si *) __P, (__v16si) __A,
-          (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
-                 (__v16si) __A,
-                 (__v16si) __W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
-                 (__v16si) __A,
-                 (__v16si) _mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
-                 (__v8di) __A,
-                 (__v8di) __W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_epi64 (__mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
-                 (__v8di) __A,
-                 (__v8di) _mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_load_epi64 (__m512i __W, __mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P,
-              (__v8di) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_epi64 (__mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P,
-              (__v8di)
-              _mm512_setzero_si512 (),
-              (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_epi64 (void *__P, __mmask8 __U, __m512i __A)
-{
-  __builtin_ia32_movdqa64store512_mask ((__v8di *) __P, (__v8di) __A,
-          (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_movedup_pd (__m512d __A)
-{
-  return (__m512d)__builtin_shufflevector((__v8df)__A, (__v8df)__A,
-                                          0, 0, 2, 2, 4, 4, 6, 6);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_movedup_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_movedup_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_movedup_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_fixupimm_round_pd(A, B, C, imm, R) \
-  ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8di)(__m512i)(C), (int)(imm), \
-                                              (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_fixupimm_round_pd(A, U, B, C, imm, R) \
-  ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8di)(__m512i)(C), (int)(imm), \
-                                              (__mmask8)(U), (int)(R)))
-
-#define _mm512_fixupimm_pd(A, B, C, imm) \
-  ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8di)(__m512i)(C), (int)(imm), \
-                                              (__mmask8)-1, \
-                                              _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_fixupimm_pd(A, U, B, C, imm) \
-  ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8di)(__m512i)(C), (int)(imm), \
-                                              (__mmask8)(U), \
-                                              _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_fixupimm_round_pd(U, A, B, C, imm, R) \
-  ((__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8di)(__m512i)(C), \
-                                               (int)(imm), (__mmask8)(U), \
-                                               (int)(R)))
-
-#define _mm512_maskz_fixupimm_pd(U, A, B, C, imm) \
-  ((__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8di)(__m512i)(C), \
-                                               (int)(imm), (__mmask8)(U), \
-                                               _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_fixupimm_round_ps(A, B, C, imm, R) \
-  ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_fixupimm_round_ps(A, U, B, C, imm, R) \
-  ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)(U), (int)(R)))
-
-#define _mm512_fixupimm_ps(A, B, C, imm) \
-  ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)-1, \
-                                             _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_fixupimm_ps(A, U, B, C, imm) \
-  ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)(U), \
-                                             _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_fixupimm_round_ps(U, A, B, C, imm, R) \
-  ((__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16si)(__m512i)(C), \
-                                              (int)(imm), (__mmask16)(U), \
-                                              (int)(R)))
-
-#define _mm512_maskz_fixupimm_ps(U, A, B, C, imm) \
-  ((__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16si)(__m512i)(C), \
-                                              (int)(imm), (__mmask16)(U), \
-                                              _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_fixupimm_round_sd(A, B, C, imm, R) \
-  ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2di)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_fixupimm_round_sd(A, U, B, C, imm, R) \
-  ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2di)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)(U), (int)(R)))
-
-#define _mm_fixupimm_sd(A, B, C, imm) \
-  ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2di)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)-1, \
-                                           _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_fixupimm_sd(A, U, B, C, imm) \
-  ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2di)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)(U), \
-                                           _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) \
-  ((__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2di)(__m128i)(C), (int)(imm), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) \
-  ((__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2di)(__m128i)(C), (int)(imm), \
-                                            (__mmask8)(U), \
-                                            _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_fixupimm_round_ss(A, B, C, imm, R) \
-  ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4si)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_fixupimm_round_ss(A, U, B, C, imm, R) \
-  ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4si)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)(U), (int)(R)))
-
-#define _mm_fixupimm_ss(A, B, C, imm) \
-  ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4si)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)-1, \
-                                          _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_fixupimm_ss(A, U, B, C, imm) \
-  ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4si)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)(U), \
-                                          _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) \
-  ((__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4si)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) \
-  ((__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4si)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)(U), \
-                                           _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_getexp_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
-                                                  (__v2df)(__m128d)(B), \
-                                                  (__v2df)_mm_setzero_pd(), \
-                                                  (__mmask8)-1, (int)(R)))
-
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_getexp_sd (__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_getexpsd128_round_mask ((__v2df) __A,
-                 (__v2df) __B, (__v2df) _mm_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) __W,
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_getexp_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
-                                                  (__v2df)(__m128d)(B), \
-                                                  (__v2df)(__m128d)(W), \
-                                                  (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) _mm_setzero_pd (),
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_getexp_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
-                                                  (__v2df)(__m128d)(B), \
-                                                  (__v2df)_mm_setzero_pd(), \
-                                                  (__mmask8)(U), (int)(R)))
-
-#define _mm_getexp_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
-                                                 (__v4sf)(__m128)(B), \
-                                                 (__v4sf)_mm_setzero_ps(), \
-                                                 (__mmask8)-1, (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_getexp_ss (__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
-                (__v4sf) __B, (__v4sf)  _mm_setzero_ps(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) __W,
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_getexp_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
-                                                 (__v4sf)(__m128)(B), \
-                                                 (__v4sf)(__m128)(W), \
-                                                 (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) _mm_setzero_ps (),
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_getexp_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
-                                                 (__v4sf)(__m128)(B), \
-                                                 (__v4sf)_mm_setzero_ps(), \
-                                                 (__mmask8)(U), (int)(R)))
-
-#define _mm_getmant_round_sd(A, B, C, D, R) \
-  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (int)(((D)<<2) | (C)), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)-1, (int)(R)))
-
-#define _mm_getmant_sd(A, B, C, D)  \
-  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (int)(((D)<<2) | (C)), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)-1, \
-                                                _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_getmant_sd(W, U, A, B, C, D) \
-  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (int)(((D)<<2) | (C)), \
-                                                (__v2df)(__m128d)(W), \
-                                                (__mmask8)(U), \
-                                                _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_getmant_round_sd(W, U, A, B, C, D, R) \
-  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (int)(((D)<<2) | (C)), \
-                                                (__v2df)(__m128d)(W), \
-                                                (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_getmant_sd(U, A, B, C, D) \
-  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (int)(((D)<<2) | (C)), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)(U), \
-                                                _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_getmant_round_sd(U, A, B, C, D, R) \
-  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (int)(((D)<<2) | (C)), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)(U), (int)(R)))
-
-#define _mm_getmant_round_ss(A, B, C, D, R) \
-  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)-1, (int)(R)))
-
-#define _mm_getmant_ss(A, B, C, D) \
-  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)-1, \
-                                               _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_getmant_ss(W, U, A, B, C, D) \
-  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v4sf)(__m128)(W), \
-                                               (__mmask8)(U), \
-                                               _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_getmant_round_ss(W, U, A, B, C, D, R) \
-  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v4sf)(__m128)(W), \
-                                               (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_getmant_ss(U, A, B, C, D) \
-  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)(U), \
-                                               _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_getmant_round_ss(U, A, B, C, D, R) \
-  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)(U), (int)(R)))
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kmov (__mmask16 __A)
-{
-  return  __A;
-}
-
-#define _mm_comi_round_sd(A, B, P, R) \
-  ((int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \
-                               (int)(P), (int)(R)))
-
-#define _mm_comi_round_ss(A, B, P, R) \
-  ((int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
-                               (int)(P), (int)(R)))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsd_si64(A, R) \
-  ((long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)))
-#endif
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sll_epi32(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_pslld512((__v16si) __A, (__v4si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sll_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sll_epi32(__A, __B),
-                                          (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sll_epi32(__mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sll_epi32(__A, __B),
-                                          (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sll_epi64(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psllq512((__v8di)__A, (__v2di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sll_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_sll_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sll_epi64(__mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_sll_epi64(__A, __B),
-                                           (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sllv_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psllv16si((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sllv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_sllv_epi32(__X, __Y),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sllv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_sllv_epi32(__X, __Y),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sllv_epi64(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psllv8di((__v8di)__X, (__v8di)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sllv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_sllv_epi64(__X, __Y),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sllv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_sllv_epi64(__X, __Y),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sra_epi32(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psrad512((__v16si) __A, (__v4si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sra_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sra_epi32(__A, __B),
-                                          (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sra_epi32(__mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sra_epi32(__A, __B),
-                                          (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sra_epi64(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psraq512((__v8di)__A, (__v2di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sra_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_sra_epi64(__A, __B),
-                                           (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sra_epi64(__mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_sra_epi64(__A, __B),
-                                           (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srav_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrav16si((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srav_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srav_epi32(__X, __Y),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srav_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srav_epi32(__X, __Y),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srav_epi64(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrav8di((__v8di)__X, (__v8di)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srav_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srav_epi64(__X, __Y),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srav_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srav_epi64(__X, __Y),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srl_epi32(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psrld512((__v16si) __A, (__v4si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srl_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_srl_epi32(__A, __B),
-                                          (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srl_epi32(__mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_srl_epi32(__A, __B),
-                                          (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srl_epi64(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psrlq512((__v8di)__A, (__v2di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srl_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_srl_epi64(__A, __B),
-                                           (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srl_epi64(__mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_srl_epi64(__A, __B),
-                                           (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srlv_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrlv16si((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srlv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srlv_epi32(__X, __Y),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srlv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srlv_epi32(__X, __Y),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srlv_epi64 (__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrlv8di((__v8di)__X, (__v8di)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srlv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srlv_epi64(__X, __Y),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srlv_epi64(__X, __Y),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_ternarylogic_epi32(A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
-                                             (__v16si)(__m512i)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)-1))
-
-#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
-                                             (__v16si)(__m512i)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)(U)))
-
-#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \
-                                              (__v16si)(__m512i)(B), \
-                                              (__v16si)(__m512i)(C), \
-                                              (int)(imm), (__mmask16)(U)))
-
-#define _mm512_ternarylogic_epi64(A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
-                                             (__v8di)(__m512i)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)-1))
-
-#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
-                                             (__v8di)(__m512i)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
-
-#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \
-                                              (__v8di)(__m512i)(B), \
-                                              (__v8di)(__m512i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsd_i64(A, R) \
-  ((long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)))
-#endif
-
-#define _mm_cvt_roundsd_si32(A, R) \
-  ((int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)))
-
-#define _mm_cvt_roundsd_i32(A, R) \
-  ((int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)))
-
-#define _mm_cvt_roundsd_u32(A, R) \
-  ((unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R)))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvtsd_u32 (__m128d __A)
-{
-  return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A,
-             _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsd_u64(A, R) \
-  ((unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
-                                                   (int)(R)))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvtsd_u64 (__m128d __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df)
-                 __A,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvt_roundss_si32(A, R) \
-  ((int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)))
-
-#define _mm_cvt_roundss_i32(A, R) \
-  ((int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundss_si64(A, R) \
-  ((long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)))
-
-#define _mm_cvt_roundss_i64(A, R) \
-  ((long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)))
-#endif
-
-#define _mm_cvt_roundss_u32(A, R) \
-  ((unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R)))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvtss_u32 (__m128 __A)
-{
-  return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A,
-             _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundss_u64(A, R) \
-  ((unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
-                                                   (int)(R)))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvtss_u64 (__m128 __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf)
-                 __A,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundsd_i32(A, R) \
-  ((int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)))
-
-#define _mm_cvtt_roundsd_si32(A, R) \
-  ((int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)))
-
-static __inline__ int __DEFAULT_FN_ATTRS128
-_mm_cvttsd_i32 (__m128d __A)
-{
-  return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundsd_si64(A, R) \
-  ((long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)))
-
-#define _mm_cvtt_roundsd_i64(A, R) \
-  ((long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)))
-
-static __inline__ long long __DEFAULT_FN_ATTRS128
-_mm_cvttsd_i64 (__m128d __A)
-{
-  return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundsd_u32(A, R) \
-  ((unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R)))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvttsd_u32 (__m128d __A)
-{
-  return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundsd_u64(A, R) \
-  ((unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
-                                                    (int)(R)))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvttsd_u64 (__m128d __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df)
-                  __A,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundss_i32(A, R) \
-  ((int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)))
-
-#define _mm_cvtt_roundss_si32(A, R) \
-  ((int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)))
-
-static __inline__ int __DEFAULT_FN_ATTRS128
-_mm_cvttss_i32 (__m128 __A)
-{
-  return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundss_i64(A, R) \
-  ((long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)))
-
-#define _mm_cvtt_roundss_si64(A, R) \
-  ((long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)))
-
-static __inline__ long long __DEFAULT_FN_ATTRS128
-_mm_cvttss_i64 (__m128 __A)
-{
-  return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundss_u32(A, R) \
-  ((unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R)))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvttss_u32 (__m128 __A)
-{
-  return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundss_u64(A, R) \
-  ((unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
-                                                    (int)(R)))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvttss_u64 (__m128 __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf)
-                  __A,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm512_permute_pd(X, C) \
-  ((__m512d)__builtin_ia32_vpermilpd512((__v8df)(__m512d)(X), (int)(C)))
-
-#define _mm512_mask_permute_pd(W, U, X, C) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_permute_pd((X), (C)), \
-                                        (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_permute_pd(U, X, C) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_permute_pd((X), (C)), \
-                                        (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_permute_ps(X, C) \
-  ((__m512)__builtin_ia32_vpermilps512((__v16sf)(__m512)(X), (int)(C)))
-
-#define _mm512_mask_permute_ps(W, U, X, C) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                       (__v16sf)_mm512_permute_ps((X), (C)), \
-                                       (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_permute_ps(U, X, C) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                       (__v16sf)_mm512_permute_ps((X), (C)), \
-                                       (__v16sf)_mm512_setzero_ps()))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_permutevar_pd(__m512d __A, __m512i __C)
-{
-  return (__m512d)__builtin_ia32_vpermilvarpd512((__v8df)__A, (__v8di)__C);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_permutevar_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512i __C)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                         (__v8df)_mm512_permutevar_pd(__A, __C),
-                                         (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutevar_pd(__mmask8 __U, __m512d __A, __m512i __C)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                         (__v8df)_mm512_permutevar_pd(__A, __C),
-                                         (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_permutevar_ps(__m512 __A, __m512i __C)
-{
-  return (__m512)__builtin_ia32_vpermilvarps512((__v16sf)__A, (__v16si)__C);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_permutevar_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512i __C)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                        (__v16sf)_mm512_permutevar_ps(__A, __C),
-                                        (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutevar_ps(__mmask16 __U, __m512 __A, __m512i __C)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                        (__v16sf)_mm512_permutevar_ps(__A, __C),
-                                        (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_vpermi2varpd512((__v8df)__A, (__v8di)__I,
-                                                 (__v8df)__B);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_pd(__m512d __A, __mmask8 __U, __m512i __I, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                  (__v8df)_mm512_permutex2var_pd(__A, __I, __B),
-                                  (__v8df)__A);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_pd(__m512d __A, __m512i __I, __mmask8 __U,
-                             __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                  (__v8df)_mm512_permutex2var_pd(__A, __I, __B),
-                                  (__v8df)(__m512d)__I);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_pd(__mmask8 __U, __m512d __A, __m512i __I,
-                             __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                  (__v8df)_mm512_permutex2var_pd(__A, __I, __B),
-                                  (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B)
-{
-  return (__m512)__builtin_ia32_vpermi2varps512((__v16sf)__A, (__v16si)__I,
-                                                (__v16sf) __B);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_ps(__m512 __A, __mmask16 __U, __m512i __I, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                 (__v16sf)_mm512_permutex2var_ps(__A, __I, __B),
-                                 (__v16sf)__A);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_ps(__m512 __A, __m512i __I, __mmask16 __U, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                 (__v16sf)_mm512_permutex2var_ps(__A, __I, __B),
-                                 (__v16sf)(__m512)__I);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_ps(__mmask16 __U, __m512 __A, __m512i __I, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                 (__v16sf)_mm512_permutex2var_ps(__A, __I, __B),
-                                 (__v16sf)_mm512_setzero_ps());
-}
-
-
-#define _mm512_cvtt_roundpd_epu32(A, R) \
-  ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
-                                              (__v8si)_mm256_undefined_si256(), \
-                                              (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_cvtt_roundpd_epu32(W, U, A, R) \
-  ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
-                                              (__v8si)(__m256i)(W), \
-                                              (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvtt_roundpd_epu32(U, A, R) \
-  ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
-                                              (__v8si)_mm256_setzero_si256(), \
-                                              (__mmask8)(U), (int)(R)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvttpd_epu32 (__m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_undefined_si256 (),
-                  (__mmask8) -1,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttpd_epu32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_roundscale_round_sd(A, B, imm, R) \
-  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)_mm_setzero_pd(), \
-                                                 (__mmask8)-1, (int)(imm), \
-                                                 (int)(R)))
-
-#define _mm_roundscale_sd(A, B, imm) \
-  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)_mm_setzero_pd(), \
-                                                 (__mmask8)-1, (int)(imm), \
-                                                 _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_roundscale_sd(W, U, A, B, imm) \
-  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)(__m128d)(W), \
-                                                 (__mmask8)(U), (int)(imm), \
-                                                 _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_roundscale_round_sd(W, U, A, B, I, R) \
-  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)(__m128d)(W), \
-                                                 (__mmask8)(U), (int)(I), \
-                                                 (int)(R)))
-
-#define _mm_maskz_roundscale_sd(U, A, B, I) \
-  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)_mm_setzero_pd(), \
-                                                 (__mmask8)(U), (int)(I), \
-                                                 _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) \
-  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)_mm_setzero_pd(), \
-                                                 (__mmask8)(U), (int)(I), \
-                                                 (int)(R)))
-
-#define _mm_roundscale_round_ss(A, B, imm, R) \
-  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)-1, (int)(imm), \
-                                                (int)(R)))
-
-#define _mm_roundscale_ss(A, B, imm) \
-  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)-1, (int)(imm), \
-                                                _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_roundscale_ss(W, U, A, B, I) \
-  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)(__m128)(W), \
-                                                (__mmask8)(U), (int)(I), \
-                                                _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_roundscale_round_ss(W, U, A, B, I, R) \
-  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)(__m128)(W), \
-                                                (__mmask8)(U), (int)(I), \
-                                                (int)(R)))
-
-#define _mm_maskz_roundscale_ss(U, A, B, I) \
-  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)(U), (int)(I), \
-                                                _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) \
-  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)(U), (int)(I), \
-                                                (int)(R)))
-
-#define _mm512_scalef_round_pd(A, B, R) \
-  ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)_mm512_undefined_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_scalef_round_pd(W, U, A, B, R) \
-  ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_scalef_round_pd(U, A, B, R) \
-  ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_scalef_pd (__m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
-                (__v8df) __B,
-                (__v8df)
-                _mm512_undefined_pd (),
-                (__mmask8) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_scalef_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
-                (__v8df) __B,
-                (__v8df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_scalef_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
-                (__v8df) __B,
-                (__v8df)
-                _mm512_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_scalef_round_ps(A, B, R) \
-  ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)_mm512_undefined_ps(), \
-                                           (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_scalef_round_ps(W, U, A, B, R) \
-  ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_scalef_round_ps(U, A, B, R) \
-  ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_scalef_ps (__m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
-               (__v16sf) __B,
-               (__v16sf)
-               _mm512_undefined_ps (),
-               (__mmask16) -1,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_scalef_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
-               (__v16sf) __B,
-               (__v16sf) __W,
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
-               (__v16sf) __B,
-               (__v16sf)
-               _mm512_setzero_ps (),
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_scalef_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)-1, (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_scalef_sd (__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_scalefsd_round_mask ((__v2df) __A,
-              (__v2df)( __B), (__v2df) _mm_setzero_pd(),
-              (__mmask8) -1,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_scalef_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_scalef_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (__v2df)(__m128d)(W), \
-                                               (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_scalef_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)(U), (int)(R)))
-
-#define _mm_scalef_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)-1, (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_scalef_ss (__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_scalefss_round_mask ((__v4sf) __A,
-             (__v4sf)( __B), (__v4sf) _mm_setzero_ps(),
-             (__mmask8) -1,
-             _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_scalef_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_scalef_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v4sf)(__m128)(W), \
-                                              (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
-                 (__v4sf) __B,
-                (__v4sf) _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_scalef_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)(U), \
-                                              (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi32(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A,
-                       unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srai_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A,
-                        unsigned int __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srai_epi32(__A, __B),
-                                         (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi64(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srai_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srai_epi64(__A, __B),
-                                          (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_shuffle_f32x4(A, B, imm) \
-  ((__m512)__builtin_ia32_shuf_f32x4((__v16sf)(__m512)(A), \
-                                     (__v16sf)(__m512)(B), (int)(imm)))
-
-#define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                       (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
-                                       (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_shuffle_f32x4(U, A, B, imm) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                       (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
-                                       (__v16sf)_mm512_setzero_ps()))
-
-#define _mm512_shuffle_f64x2(A, B, imm) \
-  ((__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \
-                                      (__v8df)(__m512d)(B), (int)(imm)))
-
-#define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
-                                        (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_shuffle_f64x2(U, A, B, imm) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
-                                        (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_shuffle_i32x4(A, B, imm) \
-  ((__m512i)__builtin_ia32_shuf_i32x4((__v16si)(__m512i)(A), \
-                                      (__v16si)(__m512i)(B), (int)(imm)))
-
-#define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
-                                       (__v16si)(__m512i)(W)))
-
-#define _mm512_maskz_shuffle_i32x4(U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
-                                       (__v16si)_mm512_setzero_si512()))
-
-#define _mm512_shuffle_i64x2(A, B, imm) \
-  ((__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \
-                                      (__v8di)(__m512i)(B), (int)(imm)))
-
-#define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
-                                       (__v8di)(__m512i)(W)))
-
-#define _mm512_maskz_shuffle_i64x2(U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
-                                       (__v8di)_mm512_setzero_si512()))
-
-#define _mm512_shuffle_pd(A, B, M) \
-  ((__m512d)__builtin_ia32_shufpd512((__v8df)(__m512d)(A), \
-                                     (__v8df)(__m512d)(B), (int)(M)))
-
-#define _mm512_mask_shuffle_pd(W, U, A, B, M) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
-                                        (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_shuffle_pd(U, A, B, M) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
-                                        (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_shuffle_ps(A, B, M) \
-  ((__m512)__builtin_ia32_shufps512((__v16sf)(__m512)(A), \
-                                    (__v16sf)(__m512)(B), (int)(M)))
-
-#define _mm512_mask_shuffle_ps(W, U, A, B, M) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                       (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
-                                       (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_shuffle_ps(U, A, B, M) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                       (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
-                                       (__v16sf)_mm512_setzero_ps()))
-
-#define _mm_sqrt_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v2df)_mm_setzero_pd(), \
-                                             (__mmask8)-1, (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_sqrt_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v2df)(__m128d)(W), \
-                                             (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_sqrt_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v2df)_mm_setzero_pd(), \
-                                             (__mmask8)(U), (int)(R)))
-
-#define _mm_sqrt_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
-                                            (__v4sf)(__m128)(B), \
-                                            (__v4sf)_mm_setzero_ps(), \
-                                            (__mmask8)-1, (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
-                 (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_sqrt_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
-                                            (__v4sf)(__m128)(B), \
-                                            (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                            (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
-                 (__v4sf) __B,
-                (__v4sf) _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_sqrt_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
-                                            (__v4sf)(__m128)(B), \
-                                            (__v4sf)_mm_setzero_ps(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_broadcast_f32x4(__m128 __A)
-{
-  return (__m512)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
-                                         0, 1, 2, 3, 0, 1, 2, 3,
-                                         0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_f32x4(__m512 __O, __mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
-                                           (__v16sf)_mm512_broadcast_f32x4(__A),
-                                           (__v16sf)__O);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_f32x4(__mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
-                                           (__v16sf)_mm512_broadcast_f32x4(__A),
-                                           (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_broadcast_f64x4(__m256d __A)
-{
-  return (__m512d)__builtin_shufflevector((__v4df)__A, (__v4df)__A,
-                                          0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, __m256d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M,
-                                            (__v8df)_mm512_broadcast_f64x4(__A),
-                                            (__v8df)__O);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_f64x4(__mmask8 __M, __m256d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M,
-                                            (__v8df)_mm512_broadcast_f64x4(__A),
-                                            (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcast_i32x4(__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
-                                          0, 1, 2, 3, 0, 1, 2, 3,
-                                          0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_i32x4(__m512i __O, __mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                           (__v16si)_mm512_broadcast_i32x4(__A),
-                                           (__v16si)__O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_i32x4(__mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                           (__v16si)_mm512_broadcast_i32x4(__A),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcast_i64x4(__m256i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v4di)__A, (__v4di)__A,
-                                          0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_i64x4(__m512i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                            (__v8di)_mm512_broadcast_i64x4(__A),
-                                            (__v8di)__O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_i64x4(__mmask8 __M, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                            (__v8di)_mm512_broadcast_i64x4(__A),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastsd_pd (__m512d __O, __mmask8 __M, __m128d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__M,
-                                              (__v8df) _mm512_broadcastsd_pd(__A),
-                                              (__v8df) __O);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__M,
-                                              (__v8df) _mm512_broadcastsd_pd(__A),
-                                              (__v8df) _mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastss_ps (__m512 __O, __mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__M,
-                                             (__v16sf) _mm512_broadcastss_ps(__A),
-                                             (__v16sf) __O);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastss_ps (__mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__M,
-                                             (__v16sf) _mm512_broadcastss_ps(__A),
-                                             (__v16sf) _mm512_setzero_ps());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi32_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
-               (__v16qi) _mm_undefined_si128 (),
-               (__mmask16) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi32_epi8 (__mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi32_epi16 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
-               (__v16hi) _mm256_undefined_si256 (),
-               (__mmask16) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
-               (__v16hi) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi32_epi16 (__mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
-               (__v16hi) _mm256_setzero_si256 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi64_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
-               (__v16qi) _mm_undefined_si128 (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi64_epi8 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi64_epi32 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
-               (__v8si) _mm256_undefined_si256 (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
-               (__v8si) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi64_epi32 (__mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
-               (__v8si) _mm256_setzero_si256 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_storeu_epi32 (void *__P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi64_epi16 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
-               (__v8hi) _mm_undefined_si128 (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
-               (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi64_epi16 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
-               (__v8hi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi32_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
-                (__v16qi) _mm_undefined_si128 (),
-                (__mmask16) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi32_epi8 (__mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi32_epi16 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
-                (__v16hi) _mm256_undefined_si256 (),
-                (__mmask16) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
-                (__v16hi) __O,
-                __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi32_epi16 (__mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
-                (__v16hi) _mm256_setzero_si256 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi64_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
-                (__v16qi) _mm_undefined_si128 (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi64_epi8 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi64_epi32 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
-                (__v8si) _mm256_undefined_si256 (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
-                (__v8si) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi64_epi32 (__mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
-                (__v8si) _mm256_setzero_si256 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusqd512mem_mask ((__v8si*) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi64_epi16 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
-                (__v8hi) _mm_undefined_si128 (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
-                (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi64_epi16 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
-                (__v8hi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusqw512mem_mask ((__v8hi*) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
-              (__v16qi) _mm_undefined_si128 (),
-              (__mmask16) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_epi8 (__mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
-              (__v16qi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_epi16 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
-              (__v16hi) _mm256_undefined_si256 (),
-              (__mmask16) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
-              (__v16hi) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_epi16 (__mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
-              (__v16hi) _mm256_setzero_si256 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_storeu_epi16 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovdw512mem_mask ((__v16hi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi64_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
-              (__v16qi) _mm_undefined_si128 (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi64_epi8 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
-              (__v16qi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi64_epi32 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
-              (__v8si) _mm256_undefined_si256 (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
-              (__v8si) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi64_epi32 (__mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
-              (__v8si) _mm256_setzero_si256 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi64_epi16 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
-              (__v8hi) _mm_undefined_si128 (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
-              (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi64_epi16 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
-              (__v8hi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
-}
-
-#define _mm512_extracti32x4_epi32(A, imm) \
-  ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                             (__v4si)_mm_undefined_si128(), \
-                                             (__mmask8)-1))
-
-#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) \
-  ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                             (__v4si)(__m128i)(W), \
-                                             (__mmask8)(U)))
-
-#define _mm512_maskz_extracti32x4_epi32(U, A, imm) \
-  ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                             (__v4si)_mm_setzero_si128(), \
-                                             (__mmask8)(U)))
-
-#define _mm512_extracti64x4_epi64(A, imm) \
-  ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
-                                             (__v4di)_mm256_undefined_si256(), \
-                                             (__mmask8)-1))
-
-#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) \
-  ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
-                                             (__v4di)(__m256i)(W), \
-                                             (__mmask8)(U)))
-
-#define _mm512_maskz_extracti64x4_epi64(U, A, imm) \
-  ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
-                                             (__v4di)_mm256_setzero_si256(), \
-                                             (__mmask8)(U)))
-
-#define _mm512_insertf64x4(A, B, imm) \
-  ((__m512d)__builtin_ia32_insertf64x4((__v8df)(__m512d)(A), \
-                                       (__v4df)(__m256d)(B), (int)(imm)))
-
-#define _mm512_mask_insertf64x4(W, U, A, B, imm) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
-                                   (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_insertf64x4(U, A, B, imm) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
-                                   (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_inserti64x4(A, B, imm) \
-  ((__m512i)__builtin_ia32_inserti64x4((__v8di)(__m512i)(A), \
-                                       (__v4di)(__m256i)(B), (int)(imm)))
-
-#define _mm512_mask_inserti64x4(W, U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                   (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
-                                   (__v8di)(__m512i)(W)))
-
-#define _mm512_maskz_inserti64x4(U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                   (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
-                                   (__v8di)_mm512_setzero_si512()))
-
-#define _mm512_insertf32x4(A, B, imm) \
-  ((__m512)__builtin_ia32_insertf32x4((__v16sf)(__m512)(A), \
-                                      (__v4sf)(__m128)(B), (int)(imm)))
-
-#define _mm512_mask_insertf32x4(W, U, A, B, imm) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
-                                  (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_insertf32x4(U, A, B, imm) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
-                                  (__v16sf)_mm512_setzero_ps()))
-
-#define _mm512_inserti32x4(A, B, imm) \
-  ((__m512i)__builtin_ia32_inserti32x4((__v16si)(__m512i)(A), \
-                                       (__v4si)(__m128i)(B), (int)(imm)))
-
-#define _mm512_mask_inserti32x4(W, U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                  (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
-                                  (__v16si)(__m512i)(W)))
-
-#define _mm512_maskz_inserti32x4(U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                  (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
-                                  (__v16si)_mm512_setzero_si512()))
-
-#define _mm512_getmant_round_pd(A, B, C, R) \
-  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v8df)_mm512_undefined_pd(), \
-                                             (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_getmant_round_pd(W, U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v8df)(__m512d)(W), \
-                                             (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_getmant_round_pd(U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v8df)_mm512_setzero_pd(), \
-                                             (__mmask8)(U), (int)(R)))
-
-#define _mm512_getmant_pd(A, B, C) \
-  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v8df)_mm512_setzero_pd(), \
-                                             (__mmask8)-1, \
-                                             _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_getmant_pd(W, U, A, B, C) \
-  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v8df)(__m512d)(W), \
-                                             (__mmask8)(U), \
-                                             _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_getmant_pd(U, A, B, C) \
-  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v8df)_mm512_setzero_pd(), \
-                                             (__mmask8)(U), \
-                                             _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_getmant_round_ps(A, B, C, R) \
-  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v16sf)_mm512_undefined_ps(), \
-                                            (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_getmant_round_ps(W, U, A, B, C, R) \
-  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v16sf)(__m512)(W), \
-                                            (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_getmant_round_ps(U, A, B, C, R) \
-  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v16sf)_mm512_setzero_ps(), \
-                                            (__mmask16)(U), (int)(R)))
-
-#define _mm512_getmant_ps(A, B, C) \
-  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                            (int)(((C)<<2)|(B)), \
-                                            (__v16sf)_mm512_undefined_ps(), \
-                                            (__mmask16)-1, \
-                                            _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_getmant_ps(W, U, A, B, C) \
-  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                            (int)(((C)<<2)|(B)), \
-                                            (__v16sf)(__m512)(W), \
-                                            (__mmask16)(U), \
-                                            _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_getmant_ps(U, A, B, C) \
-  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                            (int)(((C)<<2)|(B)), \
-                                            (__v16sf)_mm512_setzero_ps(), \
-                                            (__mmask16)(U), \
-                                            _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_getexp_round_pd(A, R) \
-  ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)_mm512_undefined_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_getexp_round_pd(W, U, A, R) \
-  ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_getexp_round_pd(U, A, R) \
-  ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_getexp_pd (__m512d __A)
-{
-  return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
-                (__v8df) _mm512_undefined_pd (),
-                (__mmask8) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_getexp_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
-                (__v8df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
-                (__v8df) _mm512_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_getexp_round_ps(A, R) \
-  ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)_mm512_undefined_ps(), \
-                                           (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_getexp_round_ps(W, U, A, R) \
-  ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_getexp_round_ps(U, A, R) \
-  ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_getexp_ps (__m512 __A)
-{
-  return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
-               (__v16sf) _mm512_undefined_ps (),
-               (__mmask16) -1,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_getexp_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
-               (__v16sf) __W,
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
-               (__v16sf) _mm512_setzero_ps (),
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_i64gather_ps(index, addr, scale) \
-  ((__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), (__mmask8)-1, \
-                                        (int)(scale)))
-
-#define _mm512_mask_i64gather_ps(v1_old, mask, index, addr, scale) \
-  ((__m256)__builtin_ia32_gatherdiv16sf((__v8sf)(__m256)(v1_old),\
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm512_i64gather_epi32(index, addr, scale) \
-  ((__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \
-                                         (void const *)(addr), \
-                                         (__v8di)(__m512i)(index), \
-                                         (__mmask8)-1, (int)(scale)))
-
-#define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) \
-  ((__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v8di)(__m512i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm512_i64gather_pd(index, addr, scale) \
-  ((__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), (__mmask8)-1, \
-                                        (int)(scale)))
-
-#define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) \
-  ((__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm512_i64gather_epi64(index, addr, scale) \
-  ((__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), (__mmask8)-1, \
-                                        (int)(scale)))
-
-#define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) \
-  ((__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm512_i32gather_ps(index, addr, scale) \
-  ((__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
-                                        (void const *)(addr), \
-                                        (__v16si)(__m512)(index), \
-                                        (__mmask16)-1, (int)(scale)))
-
-#define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) \
-  ((__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v16si)(__m512)(index), \
-                                        (__mmask16)(mask), (int)(scale)))
-
-#define _mm512_i32gather_epi32(index, addr, scale) \
-  ((__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \
-                                         (void const *)(addr), \
-                                         (__v16si)(__m512i)(index), \
-                                         (__mmask16)-1, (int)(scale)))
-
-#define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) \
-  ((__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v16si)(__m512i)(index), \
-                                         (__mmask16)(mask), (int)(scale)))
-
-#define _mm512_i32gather_pd(index, addr, scale) \
-  ((__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \
-                                        (void const *)(addr), \
-                                        (__v8si)(__m256i)(index), (__mmask8)-1, \
-                                        (int)(scale)))
-
-#define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) \
-  ((__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v8si)(__m256i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm512_i32gather_epi64(index, addr, scale) \
-  ((__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \
-                                        (void const *)(addr), \
-                                        (__v8si)(__m256i)(index), (__mmask8)-1, \
-                                        (int)(scale)))
-
-#define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) \
-  ((__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v8si)(__m256i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm512_i64scatter_ps(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)-1, \
-                                (__v8di)(__m512i)(index), \
-                                (__v8sf)(__m256)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_ps(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)(mask), \
-                                (__v8di)(__m512i)(index), \
-                                (__v8sf)(__m256)(v1), (int)(scale))
-
-#define _mm512_i64scatter_epi32(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv16si((void *)(addr), (__mmask8)-1, \
-                                (__v8di)(__m512i)(index), \
-                                (__v8si)(__m256i)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv16si((void *)(addr), (__mmask8)(mask), \
-                                (__v8di)(__m512i)(index), \
-                                (__v8si)(__m256i)(v1), (int)(scale))
-
-#define _mm512_i64scatter_pd(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv8df((void *)(addr), (__mmask8)-1, \
-                               (__v8di)(__m512i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_pd(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv8df((void *)(addr), (__mmask8)(mask), \
-                               (__v8di)(__m512i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_i64scatter_epi64(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv8di((void *)(addr), (__mmask8)-1, \
-                               (__v8di)(__m512i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv8di((void *)(addr), (__mmask8)(mask), \
-                               (__v8di)(__m512i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-#define _mm512_i32scatter_ps(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv16sf((void *)(addr), (__mmask16)-1, \
-                                (__v16si)(__m512i)(index), \
-                                (__v16sf)(__m512)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_ps(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv16sf((void *)(addr), (__mmask16)(mask), \
-                                (__v16si)(__m512i)(index), \
-                                (__v16sf)(__m512)(v1), (int)(scale))
-
-#define _mm512_i32scatter_epi32(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv16si((void *)(addr), (__mmask16)-1, \
-                                (__v16si)(__m512i)(index), \
-                                (__v16si)(__m512i)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv16si((void *)(addr), (__mmask16)(mask), \
-                                (__v16si)(__m512i)(index), \
-                                (__v16si)(__m512i)(v1), (int)(scale))
-
-#define _mm512_i32scatter_pd(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv8df((void *)(addr), (__mmask8)-1, \
-                               (__v8si)(__m256i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_pd(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv8df((void *)(addr), (__mmask8)(mask), \
-                               (__v8si)(__m256i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_i32scatter_epi64(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv8di((void *)(addr), (__mmask8)-1, \
-                               (__v8si)(__m256i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv8di((void *)(addr), (__mmask8)(mask), \
-                               (__v8si)(__m256i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       (__v4sf)__A,
-                                       (__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmadd_round_ss(A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4sf)(__m128)(C), (__mmask8)-1, \
-                                         (int)(R)))
-
-#define _mm_mask_fmadd_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                         (__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), (__mmask8)(U), \
-                                         (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        (__v4sf)__B,
-                                        (__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(C), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddss3_mask3((__v4sf)__W,
-                                        (__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmadd_round_ss(W, X, Y, U, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
-                                          (__v4sf)(__m128)(X), \
-                                          (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       (__v4sf)__A,
-                                       -(__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmsub_round_ss(A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         -(__v4sf)(__m128)(C), (__mmask8)-1, \
-                                         (int)(R)))
-
-#define _mm_mask_fmsub_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                         (__v4sf)(__m128)(A), \
-                                         -(__v4sf)(__m128)(B), (__mmask8)(U), \
-                                         (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        (__v4sf)__B,
-                                        -(__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          -(__v4sf)(__m128)(C), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubss3_mask3((__v4sf)__W,
-                                        (__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) \
-  ((__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
-                                          (__v4sf)(__m128)(X), \
-                                          (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       -(__v4sf)__A,
-                                       (__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmadd_round_ss(A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                         -(__v4sf)(__m128)(B), \
-                                         (__v4sf)(__m128)(C), (__mmask8)-1, \
-                                         (int)(R)))
-
-#define _mm_mask_fnmadd_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                         -(__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), (__mmask8)(U), \
-                                         (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        -(__v4sf)__B,
-                                        (__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                          -(__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(C), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddss3_mask3((__v4sf)__W,
-                                        -(__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmadd_round_ss(W, X, Y, U, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
-                                          -(__v4sf)(__m128)(X), \
-                                          (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       -(__v4sf)__A,
-                                       -(__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmsub_round_ss(A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                         -(__v4sf)(__m128)(B), \
-                                         -(__v4sf)(__m128)(C), (__mmask8)-1, \
-                                         (int)(R)))
-
-#define _mm_mask_fnmsub_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                         -(__v4sf)(__m128)(A), \
-                                         -(__v4sf)(__m128)(B), (__mmask8)(U), \
-                                         (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        -(__v4sf)__B,
-                                        -(__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                          -(__v4sf)(__m128)(B), \
-                                          -(__v4sf)(__m128)(C), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubss3_mask3((__v4sf)__W,
-                                        -(__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) \
-  ((__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
-                                          -(__v4sf)(__m128)(X), \
-                                          (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       (__v2df)__A,
-                                       (__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmadd_round_sd(A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2df)(__m128d)(C), (__mmask8)-1, \
-                                          (int)(R)))
-
-#define _mm_mask_fmadd_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                          (__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        (__v2df)__B,
-                                        (__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(C), (__mmask8)(U), \
-                                           (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddsd3_mask3((__v2df)__W,
-                                        (__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmadd_round_sd(W, X, Y, U, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
-                                           (__v2df)(__m128d)(X), \
-                                           (__v2df)(__m128d)(Y), (__mmask8)(U), \
-                                           (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       (__v2df)__A,
-                                       -(__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmsub_round_sd(A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          -(__v2df)(__m128d)(C), (__mmask8)-1, \
-                                          (int)(R)))
-
-#define _mm_mask_fmsub_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                          (__v2df)(__m128d)(A), \
-                                          -(__v2df)(__m128d)(B), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        (__v2df)__B,
-                                        -(__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           -(__v2df)(__m128d)(C), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubsd3_mask3((__v2df)__W,
-                                        (__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) \
-  ((__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
-                                           (__v2df)(__m128d)(X), \
-                                           (__v2df)(__m128d)(Y), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       -(__v2df)__A,
-                                       (__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmadd_round_sd(A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                          -(__v2df)(__m128d)(B), \
-                                          (__v2df)(__m128d)(C), (__mmask8)-1, \
-                                          (int)(R)))
-
-#define _mm_mask_fnmadd_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                          -(__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        -(__v2df)__B,
-                                        (__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                           -(__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(C), (__mmask8)(U), \
-                                           (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddsd3_mask3((__v2df)__W,
-                                        -(__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmadd_round_sd(W, X, Y, U, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
-                                           -(__v2df)(__m128d)(X), \
-                                           (__v2df)(__m128d)(Y), (__mmask8)(U), \
-                                           (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       -(__v2df)__A,
-                                       -(__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmsub_round_sd(A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                          -(__v2df)(__m128d)(B), \
-                                          -(__v2df)(__m128d)(C), (__mmask8)-1, \
-                                          (int)(R)))
-
-#define _mm_mask_fnmsub_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                          -(__v2df)(__m128d)(A), \
-                                          -(__v2df)(__m128d)(B), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        -(__v2df)__B,
-                                        -(__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                           -(__v2df)(__m128d)(B), \
-                                           -(__v2df)(__m128d)(C), \
-                                           (__mmask8)(U), \
-                                           (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubsd3_mask3((__v2df)__W,
-                                        -(__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) \
-  ((__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
-                                           -(__v2df)(__m128d)(X), \
-                                           (__v2df)(__m128d)(Y), \
-                                           (__mmask8)(U), (int)(R)))
-
-#define _mm512_permutex_pd(X, C) \
-  ((__m512d)__builtin_ia32_permdf512((__v8df)(__m512d)(X), (int)(C)))
-
-#define _mm512_mask_permutex_pd(W, U, X, C) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_permutex_pd((X), (C)), \
-                                        (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_permutex_pd(U, X, C) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_permutex_pd((X), (C)), \
-                                        (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_permutex_epi64(X, C) \
-  ((__m512i)__builtin_ia32_permdi512((__v8di)(__m512i)(X), (int)(C)))
-
-#define _mm512_mask_permutex_epi64(W, U, X, C) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_permutex_epi64((X), (C)), \
-                                       (__v8di)(__m512i)(W)))
-
-#define _mm512_maskz_permutex_epi64(U, X, C) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_permutex_epi64((X), (C)), \
-                                       (__v8di)_mm512_setzero_si512()))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_pd (__m512i __X, __m512d __Y)
-{
-  return (__m512d)__builtin_ia32_permvardf512((__v8df) __Y, (__v8di) __X);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_pd (__m512d __W, __mmask8 __U, __m512i __X, __m512d __Y)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                        (__v8df)_mm512_permutexvar_pd(__X, __Y),
-                                        (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_pd (__mmask8 __U, __m512i __X, __m512d __Y)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                        (__v8df)_mm512_permutexvar_pd(__X, __Y),
-                                        (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_epi64 (__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_permvardi512((__v8di)__Y, (__v8di)__X);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_epi64 (__mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                     (__v8di)_mm512_permutexvar_epi64(__X, __Y),
-                                     (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_epi64 (__m512i __W, __mmask8 __M, __m512i __X,
-             __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                     (__v8di)_mm512_permutexvar_epi64(__X, __Y),
-                                     (__v8di)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_ps (__m512i __X, __m512 __Y)
-{
-  return (__m512)__builtin_ia32_permvarsf512((__v16sf)__Y, (__v16si)__X);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_ps (__m512 __W, __mmask16 __U, __m512i __X, __m512 __Y)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                       (__v16sf)_mm512_permutexvar_ps(__X, __Y),
-                                       (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_ps (__mmask16 __U, __m512i __X, __m512 __Y)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                       (__v16sf)_mm512_permutexvar_ps(__X, __Y),
-                                       (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_epi32 (__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_permvarsi512((__v16si)__Y, (__v16si)__X);
-}
-
-#define _mm512_permutevar_epi32 _mm512_permutexvar_epi32
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_epi32 (__mmask16 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                    (__v16si)_mm512_permutexvar_epi32(__X, __Y),
-                                    (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_epi32 (__m512i __W, __mmask16 __M, __m512i __X,
-             __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                    (__v16si)_mm512_permutexvar_epi32(__X, __Y),
-                                    (__v16si)__W);
-}
-
-#define _mm512_mask_permutevar_epi32 _mm512_mask_permutexvar_epi32
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kand (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kandhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kandn (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kandnhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kor (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_korhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm512_kortestc (__mmask16 __A, __mmask16 __B)
-{
-  return __builtin_ia32_kortestchi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm512_kortestz (__mmask16 __A, __mmask16 __B)
-{
-  return __builtin_ia32_kortestzhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestc_mask16_u8(__mmask16 __A, __mmask16 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestchi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestz_mask16_u8(__mmask16 __A, __mmask16 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestzhi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortest_mask16_u8(__mmask16 __A, __mmask16 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_kortestchi(__A, __B);
-  return (unsigned char)__builtin_ia32_kortestzhi(__A, __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kunpackb (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kxnor (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kxnorhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kxor (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kxorhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-#define _kand_mask16 _mm512_kand
-#define _kandn_mask16 _mm512_kandn
-#define _knot_mask16 _mm512_knot
-#define _kor_mask16 _mm512_kor
-#define _kxnor_mask16 _mm512_kxnor
-#define _kxor_mask16 _mm512_kxor
-
-#define _kshiftli_mask16(A, I) \
-  ((__mmask16)__builtin_ia32_kshiftlihi((__mmask16)(A), (unsigned int)(I)))
-
-#define _kshiftri_mask16(A, I) \
-  ((__mmask16)__builtin_ia32_kshiftrihi((__mmask16)(A), (unsigned int)(I)))
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_cvtmask16_u32(__mmask16 __A) {
-  return (unsigned int)__builtin_ia32_kmovw((__mmask16)__A);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_cvtu32_mask16(unsigned int __A) {
-  return (__mmask16)__builtin_ia32_kmovw((__mmask16)__A);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_load_mask16(__mmask16 *__A) {
-  return (__mmask16)__builtin_ia32_kmovw(*(__mmask16 *)__A);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS
-_store_mask16(__mmask16 *__A, __mmask16 __B) {
-  *(__mmask16 *)__A = __builtin_ia32_kmovw((__mmask16)__B);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_stream_si512 (void * __P, __m512i __A)
-{
-  typedef __v8di __v8di_aligned __attribute__((aligned(64)));
-  __builtin_nontemporal_store((__v8di_aligned)__A, (__v8di_aligned*)__P);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_stream_load_si512 (void const *__P)
-{
-  typedef __v8di __v8di_aligned __attribute__((aligned(64)));
-  return (__m512i) __builtin_nontemporal_load((const __v8di_aligned *)__P);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_stream_pd (void *__P, __m512d __A)
-{
-  typedef __v8df __v8df_aligned __attribute__((aligned(64)));
-  __builtin_nontemporal_store((__v8df_aligned)__A, (__v8df_aligned*)__P);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_stream_ps (void *__P, __m512 __A)
-{
-  typedef __v16sf __v16sf_aligned __attribute__((aligned(64)));
-  __builtin_nontemporal_store((__v16sf_aligned)__A, (__v16sf_aligned*)__P);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
-                  (__v8df) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
-                  (__v8df)
-                  _mm512_setzero_pd (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
-                  (__v8di) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_epi64 (__mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
-                  (__v8di)
-                  _mm512_setzero_si512 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
-                 (__v16sf) __W,
-                 (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
-                 (__v16sf)
-                 _mm512_setzero_ps (),
-                 (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
-                  (__v16si) __W,
-                  (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
-                  (__v16si)
-                  _mm512_setzero_si512 (),
-                  (__mmask16) __U);
-}
-
-#define _mm_cmp_round_ss_mask(X, Y, P, R) \
-  ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                       (__v4sf)(__m128)(Y), (int)(P), \
-                                       (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) \
-  ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                       (__v4sf)(__m128)(Y), (int)(P), \
-                                       (__mmask8)(M), (int)(R)))
-
-#define _mm_cmp_ss_mask(X, Y, P) \
-  ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                       (__v4sf)(__m128)(Y), (int)(P), \
-                                       (__mmask8)-1, \
-                                       _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_cmp_ss_mask(M, X, Y, P) \
-  ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                       (__v4sf)(__m128)(Y), (int)(P), \
-                                       (__mmask8)(M), \
-                                       _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_cmp_round_sd_mask(X, Y, P, R) \
-  ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                       (__v2df)(__m128d)(Y), (int)(P), \
-                                       (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) \
-  ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                       (__v2df)(__m128d)(Y), (int)(P), \
-                                       (__mmask8)(M), (int)(R)))
-
-#define _mm_cmp_sd_mask(X, Y, P) \
-  ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                       (__v2df)(__m128d)(Y), (int)(P), \
-                                       (__mmask8)-1, \
-                                       _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_cmp_sd_mask(M, X, Y, P) \
-  ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                       (__v2df)(__m128d)(Y), (int)(P), \
-                                       (__mmask8)(M), \
-                                       _MM_FROUND_CUR_DIRECTION))
-
-/* Bit Test */
-
-static __inline __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_test_epi32_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpneq_epi32_mask (_mm512_and_epi32(__A, __B),
-                                   _mm512_setzero_si512());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_mask_test_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpneq_epi32_mask (__U, _mm512_and_epi32 (__A, __B),
-                                        _mm512_setzero_si512());
-}
-
-static __inline __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_test_epi64_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpneq_epi64_mask (_mm512_and_epi32 (__A, __B),
-                                   _mm512_setzero_si512());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_mask_test_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpneq_epi64_mask (__U, _mm512_and_epi32 (__A, __B),
-                                        _mm512_setzero_si512());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_testn_epi32_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpeq_epi32_mask (_mm512_and_epi32 (__A, __B),
-                                  _mm512_setzero_si512());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_mask_testn_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpeq_epi32_mask (__U, _mm512_and_epi32 (__A, __B),
-                                       _mm512_setzero_si512());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_testn_epi64_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpeq_epi64_mask (_mm512_and_epi32 (__A, __B),
-                                  _mm512_setzero_si512());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_mask_testn_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpeq_epi64_mask (__U, _mm512_and_epi32 (__A, __B),
-                                       _mm512_setzero_si512());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_movehdup_ps (__m512 __A)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
-                         1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_movehdup_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_movehdup_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_movehdup_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_movehdup_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_moveldup_ps (__m512 __A)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
-                         0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_moveldup_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_moveldup_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_moveldup_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_moveldup_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_move_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_selectss_128(__U, _mm_move_ss(__A, __B), __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_move_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_selectss_128(__U, _mm_move_ss(__A, __B),
-                                     _mm_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_move_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_selectsd_128(__U, _mm_move_sd(__A, __B), __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_selectsd_128(__U, _mm_move_sd(__A, __B),
-                                     _mm_setzero_pd());
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_ss (float * __W, __mmask8 __U, __m128 __A)
-{
-  __builtin_ia32_storess128_mask ((__v4sf *)__W, __A, __U & 1);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_sd (double * __W, __mmask8 __U, __m128d __A)
-{
-  __builtin_ia32_storesd128_mask ((__v2df *)__W, __A, __U & 1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_load_ss (__m128 __W, __mmask8 __U, const float* __A)
-{
-  __m128 src = (__v4sf) __builtin_shufflevector((__v4sf) __W,
-                                                (__v4sf)_mm_setzero_ps(),
-                                                0, 4, 4, 4);
-
-  return (__m128) __builtin_ia32_loadss128_mask ((const __v4sf *) __A, src, __U & 1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_load_ss (__mmask8 __U, const float* __A)
-{
-  return (__m128)__builtin_ia32_loadss128_mask ((const __v4sf *) __A,
-                                                (__v4sf) _mm_setzero_ps(),
-                                                __U & 1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_load_sd (__m128d __W, __mmask8 __U, const double* __A)
-{
-  __m128d src = (__v2df) __builtin_shufflevector((__v2df) __W,
-                                                 (__v2df)_mm_setzero_pd(),
-                                                 0, 2);
-
-  return (__m128d) __builtin_ia32_loadsd128_mask ((const __v2df *) __A, src, __U & 1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_load_sd (__mmask8 __U, const double* __A)
-{
-  return (__m128d) __builtin_ia32_loadsd128_mask ((const __v2df *) __A,
-                                                  (__v2df) _mm_setzero_pd(),
-                                                  __U & 1);
-}
-
-#define _mm512_shuffle_epi32(A, I) \
-  ((__m512i)__builtin_ia32_pshufd512((__v16si)(__m512i)(A), (int)(I)))
-
-#define _mm512_mask_shuffle_epi32(W, U, A, I) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_shuffle_epi32((A), (I)), \
-                                       (__v16si)(__m512i)(W)))
-
-#define _mm512_maskz_shuffle_epi32(U, A, I) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_shuffle_epi32((A), (I)), \
-                                       (__v16si)_mm512_setzero_si512()))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
-                (__v8df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
-                (__v8df) _mm512_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
-                (__v8di) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_epi64 ( __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
-                (__v8di) _mm512_setzero_si512 (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_pd(__m512d __W, __mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P,
-              (__v8df) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P,
-              (__v8df) _mm512_setzero_pd(),
-              (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_epi64(__m512i __W, __mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
-              (__v8di) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
-              (__v8di) _mm512_setzero_si512(),
-              (__mmask8) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_ps(__m512 __W, __mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_ps(__mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P,
-                   (__v16sf) _mm512_setzero_ps(),
-                   (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_epi32(__m512i __W, __mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
-              (__v16si) __W,
-              (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_epi32(__mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
-              (__v16si) _mm512_setzero_si512(),
-              (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
-               (__v16sf) __W,
-               (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
-               (__v16sf) _mm512_setzero_ps(),
-               (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
-                (__v16si) __W,
-                (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
-                (__v16si) _mm512_setzero_si512(),
-                (__mmask16) __U);
-}
-
-#define _mm512_cvt_roundps_pd(A, R) \
-  ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
-                                            (__v8df)_mm512_undefined_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundps_pd(W, U, A, R) \
-  ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
-                                            (__v8df)(__m512d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundps_pd(U, A, R) \
-  ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtps_pd (__m256 __A)
-{
-  return (__m512d) __builtin_convertvector((__v8sf)__A, __v8df);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtps_pd (__m512d __W, __mmask8 __U, __m256 __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_cvtps_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtps_pd (__mmask8 __U, __m256 __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_cvtps_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtpslo_pd (__m512 __A)
-{
-  return (__m512d) _mm512_cvtps_pd(_mm512_castps512_ps256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpslo_pd (__m512d __W, __mmask8 __U, __m512 __A)
-{
-  return (__m512d) _mm512_mask_cvtps_pd(__W, __U, _mm512_castps512_ps256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
-              (__v8df) __A,
-              (__v8df) __W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
-              (__v8df) __A,
-              (__v8df) _mm512_setzero_pd ());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
-             (__v16sf) __A,
-             (__v16sf) __W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
-             (__v16sf) __A,
-             (__v16sf) _mm512_setzero_ps ());
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m512d __A)
-{
-  __builtin_ia32_compressstoredf512_mask ((__v8df *) __P, (__v8df) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m512i __A)
-{
-  __builtin_ia32_compressstoredi512_mask ((__v8di *) __P, (__v8di) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_ps (void *__P, __mmask16 __U, __m512 __A)
-{
-  __builtin_ia32_compressstoresf512_mask ((__v16sf *) __P, (__v16sf) __A,
-            (__mmask16) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A)
-{
-  __builtin_ia32_compressstoresi512_mask ((__v16si *) __P, (__v16si) __A,
-            (__mmask16) __U);
-}
-
-#define _mm_cvt_roundsd_ss(A, B, R) \
-  ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v4sf)_mm_undefined_ps(), \
-                                              (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_cvt_roundsd_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v4sf)(__m128)(W), \
-                                              (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_cvt_roundsd_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128d __B)
-{
-  return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)__A,
-                                             (__v2df)__B,
-                                             (__v4sf)__W,
-                                             (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
-{
-  return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)__A,
-                                             (__v2df)__B,
-                                             (__v4sf)_mm_setzero_ps(),
-                                             (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_cvtss_i32 _mm_cvtss_si32
-#define _mm_cvtsd_i32 _mm_cvtsd_si32
-#define _mm_cvti32_sd _mm_cvtsi32_sd
-#define _mm_cvti32_ss _mm_cvtsi32_ss
-#ifdef __x86_64__
-#define _mm_cvtss_i64 _mm_cvtss_si64
-#define _mm_cvtsd_i64 _mm_cvtsd_si64
-#define _mm_cvti64_sd _mm_cvtsi64_sd
-#define _mm_cvti64_ss _mm_cvtsi64_ss
-#endif
-
-#ifdef __x86_64__
-#define _mm_cvt_roundi64_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
-                                      (int)(R)))
-
-#define _mm_cvt_roundsi64_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
-                                      (int)(R)))
-#endif
-
-#define _mm_cvt_roundsi32_ss(A, B, R) \
-  ((__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)))
-
-#define _mm_cvt_roundi32_ss(A, B, R) \
-  ((__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsi64_ss(A, B, R) \
-  ((__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
-                                     (int)(R)))
-
-#define _mm_cvt_roundi64_ss(A, B, R) \
-  ((__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
-                                     (int)(R)))
-#endif
-
-#define _mm_cvt_roundss_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v2df)_mm_undefined_pd(), \
-                                               (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_cvt_roundss_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v2df)(__m128d)(W), \
-                                               (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_cvt_roundss_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_cvtss_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128 __B)
-{
-  return __builtin_ia32_cvtss2sd_round_mask((__v2df)__A,
-                                            (__v4sf)__B,
-                                            (__v2df)__W,
-                                            (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtss_sd (__mmask8 __U, __m128d __A, __m128 __B)
-{
-  return __builtin_ia32_cvtss2sd_round_mask((__v2df)__A,
-                                            (__v4sf)__B,
-                                            (__v2df)_mm_setzero_pd(),
-                                            (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_cvtu32_sd (__m128d __A, unsigned __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundu64_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
-                                       (unsigned long long)(B), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_cvtu64_sd (__m128d __A, unsigned long long __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-#endif
-
-#define _mm_cvt_roundu32_ss(A, B, R) \
-  ((__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
-                                      (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_cvtu32_ss (__m128 __A, unsigned __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundu64_ss(A, B, R) \
-  ((__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
-                                      (unsigned long long)(B), (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_cvtu64_ss (__m128 __A, unsigned long long __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-#endif
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A)
-{
-  return (__m512i) __builtin_ia32_selectd_512(__M,
-                                              (__v16si) _mm512_set1_epi32(__A),
-                                              (__v16si) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A)
-{
-  return (__m512i) __builtin_ia32_selectq_512(__M,
-                                              (__v8di) _mm512_set1_epi64(__A),
-                                              (__v8di) __O);
-}
-
-static  __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi8 (char __e63, char __e62, char __e61, char __e60, char __e59,
-    char __e58, char __e57, char __e56, char __e55, char __e54, char __e53,
-    char __e52, char __e51, char __e50, char __e49, char __e48, char __e47,
-    char __e46, char __e45, char __e44, char __e43, char __e42, char __e41,
-    char __e40, char __e39, char __e38, char __e37, char __e36, char __e35,
-    char __e34, char __e33, char __e32, char __e31, char __e30, char __e29,
-    char __e28, char __e27, char __e26, char __e25, char __e24, char __e23,
-    char __e22, char __e21, char __e20, char __e19, char __e18, char __e17,
-    char __e16, char __e15, char __e14, char __e13, char __e12, char __e11,
-    char __e10, char __e9, char __e8, char __e7, char __e6, char __e5,
-    char __e4, char __e3, char __e2, char __e1, char __e0) {
-
-  return __extension__ (__m512i)(__v64qi)
-    {__e0, __e1, __e2, __e3, __e4, __e5, __e6, __e7,
-     __e8, __e9, __e10, __e11, __e12, __e13, __e14, __e15,
-     __e16, __e17, __e18, __e19, __e20, __e21, __e22, __e23,
-     __e24, __e25, __e26, __e27, __e28, __e29, __e30, __e31,
-     __e32, __e33, __e34, __e35, __e36, __e37, __e38, __e39,
-     __e40, __e41, __e42, __e43, __e44, __e45, __e46, __e47,
-     __e48, __e49, __e50, __e51, __e52, __e53, __e54, __e55,
-     __e56, __e57, __e58, __e59, __e60, __e61, __e62, __e63};
-}
-
-static  __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi16(short __e31, short __e30, short __e29, short __e28,
-    short __e27, short __e26, short __e25, short __e24, short __e23,
-    short __e22, short __e21, short __e20, short __e19, short __e18,
-    short __e17, short __e16, short __e15, short __e14, short __e13,
-    short __e12, short __e11, short __e10, short __e9, short __e8,
-    short __e7, short __e6, short __e5, short __e4, short __e3,
-    short __e2, short __e1, short __e0) {
-  return __extension__ (__m512i)(__v32hi)
-    {__e0, __e1, __e2, __e3, __e4, __e5, __e6, __e7,
-     __e8, __e9, __e10, __e11, __e12, __e13, __e14, __e15,
-     __e16, __e17, __e18, __e19, __e20, __e21, __e22, __e23,
-     __e24, __e25, __e26, __e27, __e28, __e29, __e30, __e31 };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi32 (int __A, int __B, int __C, int __D,
-     int __E, int __F, int __G, int __H,
-     int __I, int __J, int __K, int __L,
-     int __M, int __N, int __O, int __P)
-{
-  return __extension__ (__m512i)(__v16si)
-  { __P, __O, __N, __M, __L, __K, __J, __I,
-    __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_epi32(e0,e1,e2,e3,e4,e5,e6,e7,           \
-       e8,e9,e10,e11,e12,e13,e14,e15)          \
-  _mm512_set_epi32((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6), \
-                   (e5),(e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi64 (long long __A, long long __B, long long __C,
-     long long __D, long long __E, long long __F,
-     long long __G, long long __H)
-{
-  return __extension__ (__m512i) (__v8di)
-  { __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_epi64(e0,e1,e2,e3,e4,e5,e6,e7)           \
-  _mm512_set_epi64((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_set_pd (double __A, double __B, double __C, double __D,
-        double __E, double __F, double __G, double __H)
-{
-  return __extension__ (__m512d)
-  { __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_pd(e0,e1,e2,e3,e4,e5,e6,e7)              \
-  _mm512_set_pd((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_set_ps (float __A, float __B, float __C, float __D,
-        float __E, float __F, float __G, float __H,
-        float __I, float __J, float __K, float __L,
-        float __M, float __N, float __O, float __P)
-{
-  return __extension__ (__m512)
-  { __P, __O, __N, __M, __L, __K, __J, __I,
-    __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_ps(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10,e11,e12,e13,e14,e15) \
-  _mm512_set_ps((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6),(e5), \
-                (e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_abs_ps(__m512 __A)
-{
-  return (__m512)_mm512_and_epi32(_mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ;
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_ps(__m512 __W, __mmask16 __K, __m512 __A)
-{
-  return (__m512)_mm512_mask_and_epi32((__m512i)__W, __K, _mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ;
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_abs_pd(__m512d __A)
-{
-  return (__m512d)_mm512_and_epi64(_mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A) ;
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A)
-{
-  return (__m512d)_mm512_mask_and_epi64((__v8di)__W, __K, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A);
-}
-
-/* Vector-reduction arithmetic accepts vectors as inputs and produces scalars as
- * outputs. This class of vector operation forms the basis of many scientific
- * computations. In vector-reduction arithmetic, the evaluation order is
- * independent of the order of the input elements of V.
-
- * For floating-point intrinsics:
- * 1. When using fadd/fmul intrinsics, the order of operations within the
- * vector is unspecified (associative math).
- * 2. When using fmin/fmax intrinsics, NaN or -0.0 elements within the vector
- * produce unspecified results.
-
- * Used bisection method. At each step, we partition the vector with previous
- * step in half, and the operation is performed on its two halves.
- * This takes log2(n) steps where n is the number of elements in the vector.
- */
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_add_q512(__W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_mul_q512(__W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_and_q512(__W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_or_q512(__W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi64(__M, __W);
-  return __builtin_ia32_reduce_add_q512(__W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(1), __M, __W);
-  return __builtin_ia32_reduce_mul_q512(__W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __W);
-  return __builtin_ia32_reduce_and_q512(__W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi64(__M, __W);
-  return __builtin_ia32_reduce_or_q512(__W);
-}
-
-// -0.0 is used to ignore the start value since it is the neutral value of
-// floating point addition. For more information, please refer to
-// https://llvm.org/docs/LangRef.html#llvm-vector-reduce-fadd-intrinsic
-static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_add_pd(__m512d __W) {
-  return __builtin_ia32_reduce_fadd_pd512(-0.0, __W);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_pd(__m512d __W) {
-  return __builtin_ia32_reduce_fmul_pd512(1.0, __W);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_pd(__mmask8 __M, __m512d __W) {
-  __W = _mm512_maskz_mov_pd(__M, __W);
-  return __builtin_ia32_reduce_fadd_pd512(-0.0, __W);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) {
-  __W = _mm512_mask_mov_pd(_mm512_set1_pd(1.0), __M, __W);
-  return __builtin_ia32_reduce_fmul_pd512(1.0, __W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_add_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_add_d512((__v16si)__W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_mul_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_mul_d512((__v16si)__W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_and_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_and_d512((__v16si)__W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_or_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_or_d512((__v16si)__W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_epi32( __mmask16 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi32(__M, __W);
-  return __builtin_ia32_reduce_add_d512((__v16si)__W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(1), __M, __W);
-  return __builtin_ia32_reduce_mul_d512((__v16si)__W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __W);
-  return __builtin_ia32_reduce_and_d512((__v16si)__W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi32(__M, __W);
-  return __builtin_ia32_reduce_or_d512((__v16si)__W);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_add_ps(__m512 __W) {
-  return __builtin_ia32_reduce_fadd_ps512(-0.0f, __W);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_mul_ps(__m512 __W) {
-  return __builtin_ia32_reduce_fmul_ps512(1.0f, __W);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_ps(__mmask16 __M, __m512 __W) {
-  __W = _mm512_maskz_mov_ps(__M, __W);
-  return __builtin_ia32_reduce_fadd_ps512(-0.0f, __W);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) {
-  __W = _mm512_mask_mov_ps(_mm512_set1_ps(1.0f), __M, __W);
-  return __builtin_ia32_reduce_fmul_ps512(1.0f, __W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epi64(__m512i __V) {
-  return __builtin_ia32_reduce_smax_q512(__V);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epu64(__m512i __V) {
-  return __builtin_ia32_reduce_umax_q512(__V);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epi64(__m512i __V) {
-  return __builtin_ia32_reduce_smin_q512(__V);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epu64(__m512i __V) {
-  return __builtin_ia32_reduce_umin_q512(__V);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1LL), __M, __V);
-  return __builtin_ia32_reduce_smax_q512(__V);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_maskz_mov_epi64(__M, __V);
-  return __builtin_ia32_reduce_umax_q512(__V);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __M, __V);
-  return __builtin_ia32_reduce_smin_q512(__V);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __V);
-  return __builtin_ia32_reduce_umin_q512(__V);
-}
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epi32(__m512i __V) {
-  return __builtin_ia32_reduce_smax_d512((__v16si)__V);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epu32(__m512i __V) {
-  return __builtin_ia32_reduce_umax_d512((__v16si)__V);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epi32(__m512i __V) {
-  return __builtin_ia32_reduce_smin_d512((__v16si)__V);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epu32(__m512i __V) {
-  return __builtin_ia32_reduce_umin_d512((__v16si)__V);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __M, __V);
-  return __builtin_ia32_reduce_smax_d512((__v16si)__V);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_maskz_mov_epi32(__M, __V);
-  return __builtin_ia32_reduce_umax_d512((__v16si)__V);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __M, __V);
-  return __builtin_ia32_reduce_smin_d512((__v16si)__V);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __V);
-  return __builtin_ia32_reduce_umin_d512((__v16si)__V);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_pd(__m512d __V) {
-  return __builtin_ia32_reduce_fmax_pd512(__V);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_pd(__m512d __V) {
-  return __builtin_ia32_reduce_fmin_pd512(__V);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_pd(__mmask8 __M, __m512d __V) {
-  __V = _mm512_mask_mov_pd(_mm512_set1_pd(-__builtin_inf()), __M, __V);
-  return __builtin_ia32_reduce_fmax_pd512(__V);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __V) {
-  __V = _mm512_mask_mov_pd(_mm512_set1_pd(__builtin_inf()), __M, __V);
-  return __builtin_ia32_reduce_fmin_pd512(__V);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_ps(__m512 __V) {
-  return __builtin_ia32_reduce_fmax_ps512(__V);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_ps(__m512 __V) {
-  return __builtin_ia32_reduce_fmin_ps512(__V);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_ps(__mmask16 __M, __m512 __V) {
-  __V = _mm512_mask_mov_ps(_mm512_set1_ps(-__builtin_inff()), __M, __V);
-  return __builtin_ia32_reduce_fmax_ps512(__V);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_ps(__mmask16 __M, __m512 __V) {
-  __V = _mm512_mask_mov_ps(_mm512_set1_ps(__builtin_inff()), __M, __V);
-  return __builtin_ia32_reduce_fmin_ps512(__V);
-}
-
-/// Moves the least significant 32 bits of a vector of [16 x i32] to a
-///    32-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __A
-///    A vector of [16 x i32]. The least significant 32 bits are moved to the
-///    destination.
-/// \returns A 32-bit signed integer containing the moved value.
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_cvtsi512_si32(__m512i __A) {
-  __v16si __b = (__v16si)__A;
-  return __b[0];
-}
-
-/// Loads 8 double-precision (64-bit) floating-point elements stored at memory
-/// locations starting at location \a base_addr at packed 32-bit integer indices
-/// stored in the lower half of \a vindex scaled by \a scale them in dst.
-///
-/// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///   dst[i+63:i] := MEM[addr+63:addr]
-/// ENDFOR
-/// dst[MAX:512] := 0
-/// \endoperation
-#define _mm512_i32logather_pd(vindex, base_addr, scale)                        \
-  _mm512_i32gather_pd(_mm512_castsi512_si256(vindex), (base_addr), (scale))
-
-/// Loads 8 double-precision (64-bit) floating-point elements from memory
-/// starting at location \a base_addr at packed 32-bit integer indices stored in
-/// the lower half of \a vindex scaled by \a scale into dst using writemask
-/// \a mask (elements are copied from \a src when the corresponding mask bit is
-/// not set).
-///
-/// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   IF mask[j]
-///     addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///     dst[i+63:i] := MEM[addr+63:addr]
-///   ELSE
-///     dst[i+63:i] := src[i+63:i]
-///   FI
-/// ENDFOR
-/// dst[MAX:512] := 0
-/// \endoperation
-#define _mm512_mask_i32logather_pd(src, mask, vindex, base_addr, scale)        \
-  _mm512_mask_i32gather_pd((src), (mask), _mm512_castsi512_si256(vindex),      \
-                           (base_addr), (scale))
-
-/// Loads 8 64-bit integer elements from memory starting at location \a base_addr
-/// at packed 32-bit integer indices stored in the lower half of \a vindex
-/// scaled by \a scale and stores them in dst.
-///
-/// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///   dst[i+63:i] := MEM[addr+63:addr]
-/// ENDFOR
-/// dst[MAX:512] := 0
-/// \endoperation
-#define _mm512_i32logather_epi64(vindex, base_addr, scale)                     \
-  _mm512_i32gather_epi64(_mm512_castsi512_si256(vindex), (base_addr), (scale))
-
-/// Loads 8 64-bit integer elements from memory starting at location \a base_addr
-/// at packed 32-bit integer indices stored in the lower half of \a vindex
-/// scaled by \a scale and stores them in dst using writemask \a mask (elements
-/// are copied from \a src when the corresponding mask bit is not set).
-///
-/// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   IF mask[j]
-///     addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///     dst[i+63:i] := MEM[addr+63:addr]
-///   ELSE
-///     dst[i+63:i] := src[i+63:i]
-///   FI
-/// ENDFOR
-/// dst[MAX:512] := 0
-/// \endoperation
-#define _mm512_mask_i32logather_epi64(src, mask, vindex, base_addr, scale)     \
-  _mm512_mask_i32gather_epi64((src), (mask), _mm512_castsi512_si256(vindex),   \
-                              (base_addr), (scale))
-
-/// Stores 8 packed double-precision (64-bit) floating-point elements in \a v1
-/// and to memory locations starting at location \a base_addr at packed 32-bit
-/// integer indices stored in \a vindex scaled by \a scale.
-///
-/// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///   MEM[addr+63:addr] := v1[i+63:i]
-/// ENDFOR
-/// \endoperation
-#define _mm512_i32loscatter_pd(base_addr, vindex, v1, scale)                   \
-  _mm512_i32scatter_pd((base_addr), _mm512_castsi512_si256(vindex), (v1), (scale))
-
-/// Stores 8 packed double-precision (64-bit) floating-point elements in \a v1
-/// to memory locations starting at location \a base_addr at packed 32-bit
-/// integer indices stored in \a vindex scaled by \a scale. Only those elements
-/// whose corresponding mask bit is set in writemask \a mask are written to
-/// memory.
-///
-/// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   IF mask[j]
-///     addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///     MEM[addr+63:addr] := a[i+63:i]
-///   FI
-/// ENDFOR
-/// \endoperation
-#define _mm512_mask_i32loscatter_pd(base_addr, mask, vindex, v1, scale)        \
-  _mm512_mask_i32scatter_pd((base_addr), (mask),                               \
-                            _mm512_castsi512_si256(vindex), (v1), (scale))
-
-/// Stores 8 packed 64-bit integer elements located in \a v1 and stores them in
-/// memory locations starting at location \a base_addr at packed 32-bit integer
-/// indices stored in \a vindex scaled by \a scale.
-///
-/// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///   MEM[addr+63:addr] := a[i+63:i]
-/// ENDFOR
-/// \endoperation
-#define _mm512_i32loscatter_epi64(base_addr, vindex, v1, scale)                \
-  _mm512_i32scatter_epi64((base_addr),                                         \
-                          _mm512_castsi512_si256(vindex), (v1), (scale))
-
-/// Stores 8 packed 64-bit integer elements located in a and stores them in
-/// memory locations starting at location \a base_addr at packed 32-bit integer
-/// indices stored in \a vindex scaled by scale using writemask \a mask (elements
-/// whose corresponding mask bit is not set are not written to memory).
-///
-/// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   IF mask[j]
-///     addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///     MEM[addr+63:addr] := a[i+63:i]
-///   FI
-/// ENDFOR
-/// \endoperation
-#define _mm512_mask_i32loscatter_epi64(base_addr, mask, vindex, v1, scale)     \
-  _mm512_mask_i32scatter_epi64((base_addr), (mask),                            \
-                               _mm512_castsi512_si256(vindex), (v1), (scale))
-
-#undef __DEFAULT_FN_ATTRS512
-#undef __DEFAULT_FN_ATTRS128
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __AVX512FINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512fp16intrin.h b/linux-x86/lib64/clang/14.0.2/include/avx512fp16intrin.h
deleted file mode 100644
index 99409a3..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/avx512fp16intrin.h
+++ /dev/null
@@ -1,3349 +0,0 @@
-/*===----------- avx512fp16intrin.h - AVX512-FP16 intrinsics ---------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512fp16intrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512FP16INTRIN_H
-#define __AVX512FP16INTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-typedef _Float16 __v32hf __attribute__((__vector_size__(64), __aligned__(64)));
-typedef _Float16 __m512h __attribute__((__vector_size__(64), __aligned__(64)));
-typedef _Float16 __m512h_u __attribute__((__vector_size__(64), __aligned__(1)));
-typedef _Float16 __v8hf __attribute__((__vector_size__(16), __aligned__(16)));
-typedef _Float16 __m128h __attribute__((__vector_size__(16), __aligned__(16)));
-typedef _Float16 __m128h_u __attribute__((__vector_size__(16), __aligned__(1)));
-typedef _Float16 __v16hf __attribute__((__vector_size__(32), __aligned__(32)));
-typedef _Float16 __m256h __attribute__((__vector_size__(32), __aligned__(32)));
-typedef _Float16 __m256h_u __attribute__((__vector_size__(32), __aligned__(1)));
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS512                                                  \
-  __attribute__((__always_inline__, __nodebug__, __target__("avx512fp16"),     \
-                 __min_vector_width__(512)))
-#define __DEFAULT_FN_ATTRS256                                                  \
-  __attribute__((__always_inline__, __nodebug__, __target__("avx512fp16"),     \
-                 __min_vector_width__(256)))
-#define __DEFAULT_FN_ATTRS128                                                  \
-  __attribute__((__always_inline__, __nodebug__, __target__("avx512fp16"),     \
-                 __min_vector_width__(128)))
-
-static __inline__ _Float16 __DEFAULT_FN_ATTRS512 _mm512_cvtsh_h(__m512h __a) {
-  return __a[0];
-}
-
-static __inline __m128h __DEFAULT_FN_ATTRS128 _mm_setzero_ph(void) {
-  return (__m128h){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
-}
-
-static __inline __m256h __DEFAULT_FN_ATTRS256 _mm256_setzero_ph(void) {
-  return (__m256h){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-                   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_undefined_ph(void) {
-  return (__m256h)__builtin_ia32_undef256();
-}
-
-static __inline __m512h __DEFAULT_FN_ATTRS512 _mm512_setzero_ph(void) {
-  return (__m512h){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-                   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-                   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_undefined_ph(void) {
-  return (__m128h)__builtin_ia32_undef128();
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_undefined_ph(void) {
-  return (__m512h)__builtin_ia32_undef512();
-}
-
-static __inline __m512h __DEFAULT_FN_ATTRS512 _mm512_set1_ph(_Float16 __h) {
-  return (__m512h)(__v32hf){__h, __h, __h, __h, __h, __h, __h, __h,
-                            __h, __h, __h, __h, __h, __h, __h, __h,
-                            __h, __h, __h, __h, __h, __h, __h, __h,
-                            __h, __h, __h, __h, __h, __h, __h, __h};
-}
-
-static __inline __m512h __DEFAULT_FN_ATTRS512
-_mm512_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4,
-              _Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8,
-              _Float16 __h9, _Float16 __h10, _Float16 __h11, _Float16 __h12,
-              _Float16 __h13, _Float16 __h14, _Float16 __h15, _Float16 __h16,
-              _Float16 __h17, _Float16 __h18, _Float16 __h19, _Float16 __h20,
-              _Float16 __h21, _Float16 __h22, _Float16 __h23, _Float16 __h24,
-              _Float16 __h25, _Float16 __h26, _Float16 __h27, _Float16 __h28,
-              _Float16 __h29, _Float16 __h30, _Float16 __h31, _Float16 __h32) {
-  return (__m512h)(__v32hf){__h32, __h31, __h30, __h29, __h28, __h27, __h26,
-                            __h25, __h24, __h23, __h22, __h21, __h20, __h19,
-                            __h18, __h17, __h16, __h15, __h14, __h13, __h12,
-                            __h11, __h10, __h9,  __h8,  __h7,  __h6,  __h5,
-                            __h4,  __h3,  __h2,  __h1};
-}
-
-#define _mm512_setr_ph(h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, \
-                       h14, h15, h16, h17, h18, h19, h20, h21, h22, h23, h24,  \
-                       h25, h26, h27, h28, h29, h30, h31, h32)                 \
-  _mm512_set_ph((h32), (h31), (h30), (h29), (h28), (h27), (h26), (h25), (h24), \
-                (h23), (h22), (h21), (h20), (h19), (h18), (h17), (h16), (h15), \
-                (h14), (h13), (h12), (h11), (h10), (h9), (h8), (h7), (h6),     \
-                (h5), (h4), (h3), (h2), (h1))
-
-static __inline __m512h __DEFAULT_FN_ATTRS512
-_mm512_set1_pch(_Float16 _Complex h) {
-  return (__m512h)_mm512_set1_ps(__builtin_bit_cast(float, h));
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_castph_ps(__m128h __a) {
-  return (__m128)__a;
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_castph_ps(__m256h __a) {
-  return (__m256)__a;
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_castph_ps(__m512h __a) {
-  return (__m512)__a;
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_castph_pd(__m128h __a) {
-  return (__m128d)__a;
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_castph_pd(__m256h __a) {
-  return (__m256d)__a;
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512 _mm512_castph_pd(__m512h __a) {
-  return (__m512d)__a;
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_castph_si128(__m128h __a) {
-  return (__m128i)__a;
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_castph_si256(__m256h __a) {
-  return (__m256i)__a;
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_castph_si512(__m512h __a) {
-  return (__m512i)__a;
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_castps_ph(__m128 __a) {
-  return (__m128h)__a;
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_castps_ph(__m256 __a) {
-  return (__m256h)__a;
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_castps_ph(__m512 __a) {
-  return (__m512h)__a;
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_castpd_ph(__m128d __a) {
-  return (__m128h)__a;
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_castpd_ph(__m256d __a) {
-  return (__m256h)__a;
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_castpd_ph(__m512d __a) {
-  return (__m512h)__a;
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_castsi128_ph(__m128i __a) {
-  return (__m128h)__a;
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_castsi256_ph(__m256i __a) {
-  return (__m256h)__a;
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_castsi512_ph(__m512i __a) {
-  return (__m512h)__a;
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_castph256_ph128(__m256h __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS512
-_mm512_castph512_ph128(__m512h __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS512
-_mm512_castph512_ph256(__m512h __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
-                                 12, 13, 14, 15);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_castph128_ph256(__m128h __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, -1, -1, -1,
-                                 -1, -1, -1, -1, -1);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_castph128_ph512(__m128h __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, -1, -1, -1,
-                                 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-                                 -1, -1, -1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_castph256_ph512(__m256h __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
-                                 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1,
-                                 -1, -1, -1, -1, -1, -1, -1, -1);
-}
-
-/// Constructs a 256-bit floating-point vector of [16 x half] from a
-///    128-bit floating-point vector of [8 x half]. The lower 128 bits
-///    contain the value of the source vector. The upper 384 bits are set
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [8 x half].
-/// \returns A 512-bit floating-point vector of [16 x half]. The lower 128 bits
-///    contain the value of the parameter. The upper 384 bits are set to zero.
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_zextph128_ph256(__m128h __a) {
-  return __builtin_shufflevector(__a, (__v8hf)_mm_setzero_ph(), 0, 1, 2, 3, 4,
-                                 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-}
-
-/// Constructs a 512-bit floating-point vector of [32 x half] from a
-///    128-bit floating-point vector of [8 x half]. The lower 128 bits
-///    contain the value of the source vector. The upper 384 bits are set
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [8 x half].
-/// \returns A 512-bit floating-point vector of [32 x half]. The lower 128 bits
-///    contain the value of the parameter. The upper 384 bits are set to zero.
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_zextph128_ph512(__m128h __a) {
-  return __builtin_shufflevector(
-      __a, (__v8hf)_mm_setzero_ph(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
-      13, 14, 15, 8, 9, 10, 11, 12, 13, 14, 15, 8, 9, 10, 11, 12, 13, 14, 15);
-}
-
-/// Constructs a 512-bit floating-point vector of [32 x half] from a
-///    256-bit floating-point vector of [16 x half]. The lower 256 bits
-///    contain the value of the source vector. The upper 256 bits are set
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit vector of [16 x half].
-/// \returns A 512-bit floating-point vector of [32 x half]. The lower 256 bits
-///    contain the value of the parameter. The upper 256 bits are set to zero.
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_zextph256_ph512(__m256h __a) {
-  return __builtin_shufflevector(__a, (__v16hf)_mm256_setzero_ph(), 0, 1, 2, 3,
-                                 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
-                                 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
-                                 29, 30, 31);
-}
-
-#define _mm_comi_round_sh(A, B, P, R)                                          \
-  __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, (int)(P), (int)(R))
-
-#define _mm_comi_sh(A, B, pred)                                                \
-  _mm_comi_round_sh((A), (B), (pred), _MM_FROUND_CUR_DIRECTION)
-
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comieq_sh(__m128h A,
-                                                          __m128h B) {
-  return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_EQ_OS,
-                                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comilt_sh(__m128h A,
-                                                          __m128h B) {
-  return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LT_OS,
-                                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comile_sh(__m128h A,
-                                                          __m128h B) {
-  return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LE_OS,
-                                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comigt_sh(__m128h A,
-                                                          __m128h B) {
-  return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GT_OS,
-                                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comige_sh(__m128h A,
-                                                          __m128h B) {
-  return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GE_OS,
-                                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comineq_sh(__m128h A,
-                                                           __m128h B) {
-  return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_NEQ_US,
-                                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomieq_sh(__m128h A,
-                                                           __m128h B) {
-  return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_EQ_OQ,
-                                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomilt_sh(__m128h A,
-                                                           __m128h B) {
-  return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LT_OQ,
-                                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomile_sh(__m128h A,
-                                                           __m128h B) {
-  return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LE_OQ,
-                                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomigt_sh(__m128h A,
-                                                           __m128h B) {
-  return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GT_OQ,
-                                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomige_sh(__m128h A,
-                                                           __m128h B) {
-  return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GE_OQ,
-                                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomineq_sh(__m128h A,
-                                                            __m128h B) {
-  return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_NEQ_UQ,
-                                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_add_ph(__m512h __A,
-                                                              __m512h __B) {
-  return (__m512h)((__v32hf)__A + (__v32hf)__B);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_add_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_selectph_512(
-      (__mmask32)__U, (__v32hf)_mm512_add_ph(__A, __B), (__v32hf)__W);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_ph(__mmask32 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U,
-                                              (__v32hf)_mm512_add_ph(__A, __B),
-                                              (__v32hf)_mm512_setzero_ph());
-}
-
-#define _mm512_add_round_ph(A, B, R)                                           \
-  ((__m512h)__builtin_ia32_addph512((__v32hf)(__m512h)(A),                     \
-                                    (__v32hf)(__m512h)(B), (int)(R)))
-
-#define _mm512_mask_add_round_ph(W, U, A, B, R)                                \
-  ((__m512h)__builtin_ia32_selectph_512(                                       \
-      (__mmask32)(U), (__v32hf)_mm512_add_round_ph((A), (B), (R)),             \
-      (__v32hf)(__m512h)(W)))
-
-#define _mm512_maskz_add_round_ph(U, A, B, R)                                  \
-  ((__m512h)__builtin_ia32_selectph_512(                                       \
-      (__mmask32)(U), (__v32hf)_mm512_add_round_ph((A), (B), (R)),             \
-      (__v32hf)_mm512_setzero_ph()))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_sub_ph(__m512h __A,
-                                                              __m512h __B) {
-  return (__m512h)((__v32hf)__A - (__v32hf)__B);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_selectph_512(
-      (__mmask32)__U, (__v32hf)_mm512_sub_ph(__A, __B), (__v32hf)__W);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_ph(__mmask32 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U,
-                                              (__v32hf)_mm512_sub_ph(__A, __B),
-                                              (__v32hf)_mm512_setzero_ph());
-}
-
-#define _mm512_sub_round_ph(A, B, R)                                           \
-  ((__m512h)__builtin_ia32_subph512((__v32hf)(__m512h)(A),                     \
-                                    (__v32hf)(__m512h)(B), (int)(R)))
-
-#define _mm512_mask_sub_round_ph(W, U, A, B, R)                                \
-  ((__m512h)__builtin_ia32_selectph_512(                                       \
-      (__mmask32)(U), (__v32hf)_mm512_sub_round_ph((A), (B), (R)),             \
-      (__v32hf)(__m512h)(W)))
-
-#define _mm512_maskz_sub_round_ph(U, A, B, R)                                  \
-  ((__m512h)__builtin_ia32_selectph_512(                                       \
-      (__mmask32)(U), (__v32hf)_mm512_sub_round_ph((A), (B), (R)),             \
-      (__v32hf)_mm512_setzero_ph()))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_mul_ph(__m512h __A,
-                                                              __m512h __B) {
-  return (__m512h)((__v32hf)__A * (__v32hf)__B);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_selectph_512(
-      (__mmask32)__U, (__v32hf)_mm512_mul_ph(__A, __B), (__v32hf)__W);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_ph(__mmask32 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U,
-                                              (__v32hf)_mm512_mul_ph(__A, __B),
-                                              (__v32hf)_mm512_setzero_ph());
-}
-
-#define _mm512_mul_round_ph(A, B, R)                                           \
-  ((__m512h)__builtin_ia32_mulph512((__v32hf)(__m512h)(A),                     \
-                                    (__v32hf)(__m512h)(B), (int)(R)))
-
-#define _mm512_mask_mul_round_ph(W, U, A, B, R)                                \
-  ((__m512h)__builtin_ia32_selectph_512(                                       \
-      (__mmask32)(U), (__v32hf)_mm512_mul_round_ph((A), (B), (R)),             \
-      (__v32hf)(__m512h)(W)))
-
-#define _mm512_maskz_mul_round_ph(U, A, B, R)                                  \
-  ((__m512h)__builtin_ia32_selectph_512(                                       \
-      (__mmask32)(U), (__v32hf)_mm512_mul_round_ph((A), (B), (R)),             \
-      (__v32hf)_mm512_setzero_ph()))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_div_ph(__m512h __A,
-                                                              __m512h __B) {
-  return (__m512h)((__v32hf)__A / (__v32hf)__B);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_div_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_selectph_512(
-      (__mmask32)__U, (__v32hf)_mm512_div_ph(__A, __B), (__v32hf)__W);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_div_ph(__mmask32 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U,
-                                              (__v32hf)_mm512_div_ph(__A, __B),
-                                              (__v32hf)_mm512_setzero_ph());
-}
-
-#define _mm512_div_round_ph(A, B, R)                                           \
-  ((__m512h)__builtin_ia32_divph512((__v32hf)(__m512h)(A),                     \
-                                    (__v32hf)(__m512h)(B), (int)(R)))
-
-#define _mm512_mask_div_round_ph(W, U, A, B, R)                                \
-  ((__m512h)__builtin_ia32_selectph_512(                                       \
-      (__mmask32)(U), (__v32hf)_mm512_div_round_ph((A), (B), (R)),             \
-      (__v32hf)(__m512h)(W)))
-
-#define _mm512_maskz_div_round_ph(U, A, B, R)                                  \
-  ((__m512h)__builtin_ia32_selectph_512(                                       \
-      (__mmask32)(U), (__v32hf)_mm512_div_round_ph((A), (B), (R)),             \
-      (__v32hf)_mm512_setzero_ph()))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_min_ph(__m512h __A,
-                                                              __m512h __B) {
-  return (__m512h)__builtin_ia32_minph512((__v32hf)__A, (__v32hf)__B,
-                                          _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_min_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_selectph_512(
-      (__mmask32)__U, (__v32hf)_mm512_min_ph(__A, __B), (__v32hf)__W);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_ph(__mmask32 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U,
-                                              (__v32hf)_mm512_min_ph(__A, __B),
-                                              (__v32hf)_mm512_setzero_ph());
-}
-
-#define _mm512_min_round_ph(A, B, R)                                           \
-  ((__m512h)__builtin_ia32_minph512((__v32hf)(__m512h)(A),                     \
-                                    (__v32hf)(__m512h)(B), (int)(R)))
-
-#define _mm512_mask_min_round_ph(W, U, A, B, R)                                \
-  ((__m512h)__builtin_ia32_selectph_512(                                       \
-      (__mmask32)(U), (__v32hf)_mm512_min_round_ph((A), (B), (R)),             \
-      (__v32hf)(__m512h)(W)))
-
-#define _mm512_maskz_min_round_ph(U, A, B, R)                                  \
-  ((__m512h)__builtin_ia32_selectph_512(                                       \
-      (__mmask32)(U), (__v32hf)_mm512_min_round_ph((A), (B), (R)),             \
-      (__v32hf)_mm512_setzero_ph()))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_max_ph(__m512h __A,
-                                                              __m512h __B) {
-  return (__m512h)__builtin_ia32_maxph512((__v32hf)__A, (__v32hf)__B,
-                                          _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_max_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_selectph_512(
-      (__mmask32)__U, (__v32hf)_mm512_max_ph(__A, __B), (__v32hf)__W);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_ph(__mmask32 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U,
-                                              (__v32hf)_mm512_max_ph(__A, __B),
-                                              (__v32hf)_mm512_setzero_ph());
-}
-
-#define _mm512_max_round_ph(A, B, R)                                           \
-  ((__m512h)__builtin_ia32_maxph512((__v32hf)(__m512h)(A),                     \
-                                    (__v32hf)(__m512h)(B), (int)(R)))
-
-#define _mm512_mask_max_round_ph(W, U, A, B, R)                                \
-  ((__m512h)__builtin_ia32_selectph_512(                                       \
-      (__mmask32)(U), (__v32hf)_mm512_max_round_ph((A), (B), (R)),             \
-      (__v32hf)(__m512h)(W)))
-
-#define _mm512_maskz_max_round_ph(U, A, B, R)                                  \
-  ((__m512h)__builtin_ia32_selectph_512(                                       \
-      (__mmask32)(U), (__v32hf)_mm512_max_round_ph((A), (B), (R)),             \
-      (__v32hf)_mm512_setzero_ph()))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_abs_ph(__m512h __A) {
-  return (__m512h)_mm512_and_epi32(_mm512_set1_epi32(0x7FFF7FFF), (__m512i)__A);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_conj_pch(__m512h __A) {
-  return (__m512h)_mm512_xor_ps((__m512)__A, _mm512_set1_ps(-0.0f));
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_conj_pch(__m512h __W, __mmask16 __U, __m512h __A) {
-  return (__m512h)__builtin_ia32_selectps_512(
-      (__mmask16)__U, (__v16sf)_mm512_conj_pch(__A), (__v16sf)__W);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_conj_pch(__mmask16 __U, __m512h __A) {
-  return (__m512h)__builtin_ia32_selectps_512((__mmask16)__U,
-                                              (__v16sf)_mm512_conj_pch(__A),
-                                              (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_add_sh(__m128h __A,
-                                                           __m128h __B) {
-  __A[0] += __B[0];
-  return __A;
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_add_sh(__m128h __W,
-                                                                __mmask8 __U,
-                                                                __m128h __A,
-                                                                __m128h __B) {
-  __A = _mm_add_sh(__A, __B);
-  return __builtin_ia32_selectsh_128(__U, __A, __W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_add_sh(__mmask8 __U,
-                                                                 __m128h __A,
-                                                                 __m128h __B) {
-  __A = _mm_add_sh(__A, __B);
-  return __builtin_ia32_selectsh_128(__U, __A, _mm_setzero_ph());
-}
-
-#define _mm_add_round_sh(A, B, R)                                              \
-  ((__m128h)__builtin_ia32_addsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_add_round_sh(W, U, A, B, R)                                   \
-  ((__m128h)__builtin_ia32_addsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W),        \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_add_round_sh(U, A, B, R)                                     \
-  ((__m128h)__builtin_ia32_addsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_sub_sh(__m128h __A,
-                                                           __m128h __B) {
-  __A[0] -= __B[0];
-  return __A;
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_sub_sh(__m128h __W,
-                                                                __mmask8 __U,
-                                                                __m128h __A,
-                                                                __m128h __B) {
-  __A = _mm_sub_sh(__A, __B);
-  return __builtin_ia32_selectsh_128(__U, __A, __W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_sub_sh(__mmask8 __U,
-                                                                 __m128h __A,
-                                                                 __m128h __B) {
-  __A = _mm_sub_sh(__A, __B);
-  return __builtin_ia32_selectsh_128(__U, __A, _mm_setzero_ph());
-}
-
-#define _mm_sub_round_sh(A, B, R)                                              \
-  ((__m128h)__builtin_ia32_subsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_sub_round_sh(W, U, A, B, R)                                   \
-  ((__m128h)__builtin_ia32_subsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W),        \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_sub_round_sh(U, A, B, R)                                     \
-  ((__m128h)__builtin_ia32_subsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mul_sh(__m128h __A,
-                                                           __m128h __B) {
-  __A[0] *= __B[0];
-  return __A;
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_mul_sh(__m128h __W,
-                                                                __mmask8 __U,
-                                                                __m128h __A,
-                                                                __m128h __B) {
-  __A = _mm_mul_sh(__A, __B);
-  return __builtin_ia32_selectsh_128(__U, __A, __W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_mul_sh(__mmask8 __U,
-                                                                 __m128h __A,
-                                                                 __m128h __B) {
-  __A = _mm_mul_sh(__A, __B);
-  return __builtin_ia32_selectsh_128(__U, __A, _mm_setzero_ph());
-}
-
-#define _mm_mul_round_sh(A, B, R)                                              \
-  ((__m128h)__builtin_ia32_mulsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_mul_round_sh(W, U, A, B, R)                                   \
-  ((__m128h)__builtin_ia32_mulsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W),        \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_mul_round_sh(U, A, B, R)                                     \
-  ((__m128h)__builtin_ia32_mulsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_div_sh(__m128h __A,
-                                                           __m128h __B) {
-  __A[0] /= __B[0];
-  return __A;
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_div_sh(__m128h __W,
-                                                                __mmask8 __U,
-                                                                __m128h __A,
-                                                                __m128h __B) {
-  __A = _mm_div_sh(__A, __B);
-  return __builtin_ia32_selectsh_128(__U, __A, __W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_div_sh(__mmask8 __U,
-                                                                 __m128h __A,
-                                                                 __m128h __B) {
-  __A = _mm_div_sh(__A, __B);
-  return __builtin_ia32_selectsh_128(__U, __A, _mm_setzero_ph());
-}
-
-#define _mm_div_round_sh(A, B, R)                                              \
-  ((__m128h)__builtin_ia32_divsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_div_round_sh(W, U, A, B, R)                                   \
-  ((__m128h)__builtin_ia32_divsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W),        \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_div_round_sh(U, A, B, R)                                     \
-  ((__m128h)__builtin_ia32_divsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_min_sh(__m128h __A,
-                                                           __m128h __B) {
-  return (__m128h)__builtin_ia32_minsh_round_mask(
-      (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_min_sh(__m128h __W,
-                                                                __mmask8 __U,
-                                                                __m128h __A,
-                                                                __m128h __B) {
-  return (__m128h)__builtin_ia32_minsh_round_mask((__v8hf)__A, (__v8hf)__B,
-                                                  (__v8hf)__W, (__mmask8)__U,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_min_sh(__mmask8 __U,
-                                                                 __m128h __A,
-                                                                 __m128h __B) {
-  return (__m128h)__builtin_ia32_minsh_round_mask(
-      (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_min_round_sh(A, B, R)                                              \
-  ((__m128h)__builtin_ia32_minsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_min_round_sh(W, U, A, B, R)                                   \
-  ((__m128h)__builtin_ia32_minsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W),        \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_min_round_sh(U, A, B, R)                                     \
-  ((__m128h)__builtin_ia32_minsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_max_sh(__m128h __A,
-                                                           __m128h __B) {
-  return (__m128h)__builtin_ia32_maxsh_round_mask(
-      (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_max_sh(__m128h __W,
-                                                                __mmask8 __U,
-                                                                __m128h __A,
-                                                                __m128h __B) {
-  return (__m128h)__builtin_ia32_maxsh_round_mask((__v8hf)__A, (__v8hf)__B,
-                                                  (__v8hf)__W, (__mmask8)__U,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_max_sh(__mmask8 __U,
-                                                                 __m128h __A,
-                                                                 __m128h __B) {
-  return (__m128h)__builtin_ia32_maxsh_round_mask(
-      (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_max_round_sh(A, B, R)                                              \
-  ((__m128h)__builtin_ia32_maxsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_max_round_sh(W, U, A, B, R)                                   \
-  ((__m128h)__builtin_ia32_maxsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W),        \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_max_round_sh(U, A, B, R)                                     \
-  ((__m128h)__builtin_ia32_maxsh_round_mask(                                   \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm512_cmp_round_ph_mask(A, B, P, R)                                   \
-  ((__mmask32)__builtin_ia32_cmpph512_mask((__v32hf)(__m512h)(A),              \
-                                           (__v32hf)(__m512h)(B), (int)(P),    \
-                                           (__mmask32)-1, (int)(R)))
-
-#define _mm512_mask_cmp_round_ph_mask(U, A, B, P, R)                           \
-  ((__mmask32)__builtin_ia32_cmpph512_mask((__v32hf)(__m512h)(A),              \
-                                           (__v32hf)(__m512h)(B), (int)(P),    \
-                                           (__mmask32)(U), (int)(R)))
-
-#define _mm512_cmp_ph_mask(A, B, P)                                            \
-  _mm512_cmp_round_ph_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_cmp_ph_mask(U, A, B, P)                                    \
-  _mm512_mask_cmp_round_ph_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_cmp_round_sh_mask(X, Y, P, R)                                      \
-  ((__mmask8)__builtin_ia32_cmpsh_mask((__v8hf)(__m128h)(X),                   \
-                                       (__v8hf)(__m128h)(Y), (int)(P),         \
-                                       (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_cmp_round_sh_mask(M, X, Y, P, R)                              \
-  ((__mmask8)__builtin_ia32_cmpsh_mask((__v8hf)(__m128h)(X),                   \
-                                       (__v8hf)(__m128h)(Y), (int)(P),         \
-                                       (__mmask8)(M), (int)(R)))
-
-#define _mm_cmp_sh_mask(X, Y, P)                                               \
-  ((__mmask8)__builtin_ia32_cmpsh_mask(                                        \
-      (__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), (int)(P), (__mmask8)-1,      \
-      _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_cmp_sh_mask(M, X, Y, P)                                       \
-  ((__mmask8)__builtin_ia32_cmpsh_mask(                                        \
-      (__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), (int)(P), (__mmask8)(M),     \
-      _MM_FROUND_CUR_DIRECTION))
-// loads with vmovsh:
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_load_sh(void const *__dp) {
-  struct __mm_load_sh_struct {
-    _Float16 __u;
-  } __attribute__((__packed__, __may_alias__));
-  _Float16 __u = ((struct __mm_load_sh_struct *)__dp)->__u;
-  return (__m128h){__u, 0, 0, 0, 0, 0, 0, 0};
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_load_sh(__m128h __W, __mmask8 __U, const void *__A) {
-  __m128h src = (__v8hf)__builtin_shufflevector(
-      (__v8hf)__W, (__v8hf)_mm_setzero_ph(), 0, 8, 8, 8, 8, 8, 8, 8);
-
-  return (__m128h)__builtin_ia32_loadsh128_mask((__v8hf *)__A, src, __U & 1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_load_sh(__mmask8 __U, const void *__A) {
-  return (__m128h)__builtin_ia32_loadsh128_mask(
-      (__v8hf *)__A, (__v8hf)_mm_setzero_ph(), __U & 1);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_load_ph(void const *__p) {
-  return *(const __m512h *)__p;
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_load_ph(void const *__p) {
-  return *(const __m256h *)__p;
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_load_ph(void const *__p) {
-  return *(const __m128h *)__p;
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_loadu_ph(void const *__p) {
-  struct __loadu_ph {
-    __m512h_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_ph *)__p)->__v;
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_loadu_ph(void const *__p) {
-  struct __loadu_ph {
-    __m256h_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_ph *)__p)->__v;
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_loadu_ph(void const *__p) {
-  struct __loadu_ph {
-    __m128h_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_ph *)__p)->__v;
-}
-
-// stores with vmovsh:
-static __inline__ void __DEFAULT_FN_ATTRS128 _mm_store_sh(void *__dp,
-                                                          __m128h __a) {
-  struct __mm_store_sh_struct {
-    _Float16 __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_store_sh_struct *)__dp)->__u = __a[0];
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_sh(void *__W,
-                                                               __mmask8 __U,
-                                                               __m128h __A) {
-  __builtin_ia32_storesh128_mask((__v8hf *)__W, __A, __U & 1);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512 _mm512_store_ph(void *__P,
-                                                             __m512h __A) {
-  *(__m512h *)__P = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_store_ph(void *__P,
-                                                             __m256h __A) {
-  *(__m256h *)__P = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128 _mm_store_ph(void *__P,
-                                                          __m128h __A) {
-  *(__m128h *)__P = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512 _mm512_storeu_ph(void *__P,
-                                                              __m512h __A) {
-  struct __storeu_ph {
-    __m512h_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_ph *)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_storeu_ph(void *__P,
-                                                              __m256h __A) {
-  struct __storeu_ph {
-    __m256h_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_ph *)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128 _mm_storeu_ph(void *__P,
-                                                           __m128h __A) {
-  struct __storeu_ph {
-    __m128h_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_ph *)__P)->__v = __A;
-}
-
-// moves with vmovsh:
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_move_sh(__m128h __a,
-                                                            __m128h __b) {
-  __a[0] = __b[0];
-  return __a;
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_move_sh(__m128h __W,
-                                                                 __mmask8 __U,
-                                                                 __m128h __A,
-                                                                 __m128h __B) {
-  return __builtin_ia32_selectsh_128(__U, _mm_move_sh(__A, __B), __W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_move_sh(__mmask8 __U,
-                                                                  __m128h __A,
-                                                                  __m128h __B) {
-  return __builtin_ia32_selectsh_128(__U, _mm_move_sh(__A, __B),
-                                     _mm_setzero_ph());
-}
-
-// vmovw:
-static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsi16_si128(short __a) {
-  return (__m128i)(__v8hi){__a, 0, 0, 0, 0, 0, 0, 0};
-}
-
-static __inline__ short __DEFAULT_FN_ATTRS128 _mm_cvtsi128_si16(__m128i __a) {
-  __v8hi __b = (__v8hi)__a;
-  return __b[0];
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_rcp_ph(__m512h __A) {
-  return (__m512h)__builtin_ia32_rcpph512_mask(
-      (__v32hf)__A, (__v32hf)_mm512_undefined_ph(), (__mmask32)-1);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_rcp_ph(__m512h __W, __mmask32 __U, __m512h __A) {
-  return (__m512h)__builtin_ia32_rcpph512_mask((__v32hf)__A, (__v32hf)__W,
-                                               (__mmask32)__U);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_rcp_ph(__mmask32 __U, __m512h __A) {
-  return (__m512h)__builtin_ia32_rcpph512_mask(
-      (__v32hf)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_rsqrt_ph(__m512h __A) {
-  return (__m512h)__builtin_ia32_rsqrtph512_mask(
-      (__v32hf)__A, (__v32hf)_mm512_undefined_ph(), (__mmask32)-1);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_rsqrt_ph(__m512h __W, __mmask32 __U, __m512h __A) {
-  return (__m512h)__builtin_ia32_rsqrtph512_mask((__v32hf)__A, (__v32hf)__W,
-                                                 (__mmask32)__U);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_rsqrt_ph(__mmask32 __U, __m512h __A) {
-  return (__m512h)__builtin_ia32_rsqrtph512_mask(
-      (__v32hf)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U);
-}
-
-#define _mm512_getmant_ph(A, B, C)                                             \
-  ((__m512h)__builtin_ia32_getmantph512_mask(                                  \
-      (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)),                          \
-      (__v32hf)_mm512_undefined_ph(), (__mmask32)-1,                           \
-      _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_getmant_ph(W, U, A, B, C)                                  \
-  ((__m512h)__builtin_ia32_getmantph512_mask(                                  \
-      (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)), (__v32hf)(__m512h)(W),   \
-      (__mmask32)(U), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_getmant_ph(U, A, B, C)                                    \
-  ((__m512h)__builtin_ia32_getmantph512_mask(                                  \
-      (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)),                          \
-      (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_getmant_round_ph(A, B, C, R)                                    \
-  ((__m512h)__builtin_ia32_getmantph512_mask(                                  \
-      (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)),                          \
-      (__v32hf)_mm512_undefined_ph(), (__mmask32)-1, (int)(R)))
-
-#define _mm512_mask_getmant_round_ph(W, U, A, B, C, R)                         \
-  ((__m512h)__builtin_ia32_getmantph512_mask(                                  \
-      (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)), (__v32hf)(__m512h)(W),   \
-      (__mmask32)(U), (int)(R)))
-
-#define _mm512_maskz_getmant_round_ph(U, A, B, C, R)                           \
-  ((__m512h)__builtin_ia32_getmantph512_mask(                                  \
-      (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)),                          \
-      (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), (int)(R)))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_getexp_ph(__m512h __A) {
-  return (__m512h)__builtin_ia32_getexpph512_mask(
-      (__v32hf)__A, (__v32hf)_mm512_undefined_ph(), (__mmask32)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_getexp_ph(__m512h __W, __mmask32 __U, __m512h __A) {
-  return (__m512h)__builtin_ia32_getexpph512_mask(
-      (__v32hf)__A, (__v32hf)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_getexp_ph(__mmask32 __U, __m512h __A) {
-  return (__m512h)__builtin_ia32_getexpph512_mask(
-      (__v32hf)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_getexp_round_ph(A, R)                                           \
-  ((__m512h)__builtin_ia32_getexpph512_mask((__v32hf)(__m512h)(A),             \
-                                            (__v32hf)_mm512_undefined_ph(),    \
-                                            (__mmask32)-1, (int)(R)))
-
-#define _mm512_mask_getexp_round_ph(W, U, A, R)                                \
-  ((__m512h)__builtin_ia32_getexpph512_mask(                                   \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(W), (__mmask32)(U), (int)(R)))
-
-#define _mm512_maskz_getexp_round_ph(U, A, R)                                  \
-  ((__m512h)__builtin_ia32_getexpph512_mask((__v32hf)(__m512h)(A),             \
-                                            (__v32hf)_mm512_setzero_ph(),      \
-                                            (__mmask32)(U), (int)(R)))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_scalef_ph(__m512h __A,
-                                                                 __m512h __B) {
-  return (__m512h)__builtin_ia32_scalefph512_mask(
-      (__v32hf)__A, (__v32hf)__B, (__v32hf)_mm512_undefined_ph(), (__mmask32)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_scalef_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_scalefph512_mask((__v32hf)__A, (__v32hf)__B,
-                                                  (__v32hf)__W, (__mmask32)__U,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_scalef_ph(__mmask32 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_scalefph512_mask(
-      (__v32hf)__A, (__v32hf)__B, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_scalef_round_ph(A, B, R)                                        \
-  ((__m512h)__builtin_ia32_scalefph512_mask(                                   \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B),                            \
-      (__v32hf)_mm512_undefined_ph(), (__mmask32)-1, (int)(R)))
-
-#define _mm512_mask_scalef_round_ph(W, U, A, B, R)                             \
-  ((__m512h)__builtin_ia32_scalefph512_mask(                                   \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(W),     \
-      (__mmask32)(U), (int)(R)))
-
-#define _mm512_maskz_scalef_round_ph(U, A, B, R)                               \
-  ((__m512h)__builtin_ia32_scalefph512_mask(                                   \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B),                            \
-      (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), (int)(R)))
-
-#define _mm512_roundscale_ph(A, B)                                             \
-  ((__m512h)__builtin_ia32_rndscaleph_mask(                                    \
-      (__v32hf)(__m512h)(A), (int)(B), (__v32hf)(__m512h)(A), (__mmask32)-1,   \
-      _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_roundscale_ph(A, B, C, imm)                                \
-  ((__m512h)__builtin_ia32_rndscaleph_mask(                                    \
-      (__v32hf)(__m512h)(C), (int)(imm), (__v32hf)(__m512h)(A),                \
-      (__mmask32)(B), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_roundscale_ph(A, B, imm)                                  \
-  ((__m512h)__builtin_ia32_rndscaleph_mask(                                    \
-      (__v32hf)(__m512h)(B), (int)(imm), (__v32hf)_mm512_setzero_ph(),         \
-      (__mmask32)(A), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_roundscale_round_ph(A, B, C, imm, R)                       \
-  ((__m512h)__builtin_ia32_rndscaleph_mask((__v32hf)(__m512h)(C), (int)(imm),  \
-                                           (__v32hf)(__m512h)(A),              \
-                                           (__mmask32)(B), (int)(R)))
-
-#define _mm512_maskz_roundscale_round_ph(A, B, imm, R)                         \
-  ((__m512h)__builtin_ia32_rndscaleph_mask((__v32hf)(__m512h)(B), (int)(imm),  \
-                                           (__v32hf)_mm512_setzero_ph(),       \
-                                           (__mmask32)(A), (int)(R)))
-
-#define _mm512_roundscale_round_ph(A, imm, R)                                  \
-  ((__m512h)__builtin_ia32_rndscaleph_mask((__v32hf)(__m512h)(A), (int)(imm),  \
-                                           (__v32hf)_mm512_undefined_ph(),     \
-                                           (__mmask32)-1, (int)(R)))
-
-#define _mm512_reduce_ph(A, imm)                                               \
-  ((__m512h)__builtin_ia32_reduceph512_mask(                                   \
-      (__v32hf)(__m512h)(A), (int)(imm), (__v32hf)_mm512_undefined_ph(),       \
-      (__mmask32)-1, _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_reduce_ph(W, U, A, imm)                                    \
-  ((__m512h)__builtin_ia32_reduceph512_mask(                                   \
-      (__v32hf)(__m512h)(A), (int)(imm), (__v32hf)(__m512h)(W),                \
-      (__mmask32)(U), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_reduce_ph(U, A, imm)                                      \
-  ((__m512h)__builtin_ia32_reduceph512_mask(                                   \
-      (__v32hf)(__m512h)(A), (int)(imm), (__v32hf)_mm512_setzero_ph(),         \
-      (__mmask32)(U), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_reduce_round_ph(W, U, A, imm, R)                           \
-  ((__m512h)__builtin_ia32_reduceph512_mask((__v32hf)(__m512h)(A), (int)(imm), \
-                                            (__v32hf)(__m512h)(W),             \
-                                            (__mmask32)(U), (int)(R)))
-
-#define _mm512_maskz_reduce_round_ph(U, A, imm, R)                             \
-  ((__m512h)__builtin_ia32_reduceph512_mask((__v32hf)(__m512h)(A), (int)(imm), \
-                                            (__v32hf)_mm512_setzero_ph(),      \
-                                            (__mmask32)(U), (int)(R)))
-
-#define _mm512_reduce_round_ph(A, imm, R)                                      \
-  ((__m512h)__builtin_ia32_reduceph512_mask((__v32hf)(__m512h)(A), (int)(imm), \
-                                            (__v32hf)_mm512_undefined_ph(),    \
-                                            (__mmask32)-1, (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_rcp_sh(__m128h __A,
-                                                           __m128h __B) {
-  return (__m128h)__builtin_ia32_rcpsh_mask(
-      (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_rcp_sh(__m128h __W,
-                                                                __mmask8 __U,
-                                                                __m128h __A,
-                                                                __m128h __B) {
-  return (__m128h)__builtin_ia32_rcpsh_mask((__v8hf)__A, (__v8hf)__B,
-                                            (__v8hf)__W, (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_rcp_sh(__mmask8 __U,
-                                                                 __m128h __A,
-                                                                 __m128h __B) {
-  return (__m128h)__builtin_ia32_rcpsh_mask(
-      (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_rsqrt_sh(__m128h __A,
-                                                             __m128h __B) {
-  return (__m128h)__builtin_ia32_rsqrtsh_mask(
-      (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_rsqrt_sh(__m128h __W,
-                                                                  __mmask8 __U,
-                                                                  __m128h __A,
-                                                                  __m128h __B) {
-  return (__m128h)__builtin_ia32_rsqrtsh_mask((__v8hf)__A, (__v8hf)__B,
-                                              (__v8hf)__W, (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_rsqrt_sh(__mmask8 __U, __m128h __A, __m128h __B) {
-  return (__m128h)__builtin_ia32_rsqrtsh_mask(
-      (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-#define _mm_getmant_round_sh(A, B, C, D, R)                                    \
-  ((__m128h)__builtin_ia32_getmantsh_round_mask(                               \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)),     \
-      (__v8hf)_mm_setzero_ph(), (__mmask8)-1, (int)(R)))
-
-#define _mm_getmant_sh(A, B, C, D)                                             \
-  ((__m128h)__builtin_ia32_getmantsh_round_mask(                               \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)),     \
-      (__v8hf)_mm_setzero_ph(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_getmant_sh(W, U, A, B, C, D)                                  \
-  ((__m128h)__builtin_ia32_getmantsh_round_mask(                               \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)),     \
-      (__v8hf)(__m128h)(W), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_getmant_round_sh(W, U, A, B, C, D, R)                         \
-  ((__m128h)__builtin_ia32_getmantsh_round_mask(                               \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)),     \
-      (__v8hf)(__m128h)(W), (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_getmant_sh(U, A, B, C, D)                                    \
-  ((__m128h)__builtin_ia32_getmantsh_round_mask(                               \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)),     \
-      (__v8hf)_mm_setzero_ph(), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_getmant_round_sh(U, A, B, C, D, R)                           \
-  ((__m128h)__builtin_ia32_getmantsh_round_mask(                               \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)),     \
-      (__v8hf)_mm_setzero_ph(), (__mmask8)(U), (int)(R)))
-
-#define _mm_getexp_round_sh(A, B, R)                                           \
-  ((__m128h)__builtin_ia32_getexpsh128_round_mask(                             \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)-1, (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_getexp_sh(__m128h __A,
-                                                              __m128h __B) {
-  return (__m128h)__builtin_ia32_getexpsh128_round_mask(
-      (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_getexp_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
-  return (__m128h)__builtin_ia32_getexpsh128_round_mask(
-      (__v8hf)__A, (__v8hf)__B, (__v8hf)__W, (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_getexp_round_sh(W, U, A, B, R)                                \
-  ((__m128h)__builtin_ia32_getexpsh128_round_mask(                             \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W),        \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_getexp_sh(__mmask8 __U, __m128h __A, __m128h __B) {
-  return (__m128h)__builtin_ia32_getexpsh128_round_mask(
-      (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_getexp_round_sh(U, A, B, R)                                  \
-  ((__m128h)__builtin_ia32_getexpsh128_round_mask(                             \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm_scalef_round_sh(A, B, R)                                           \
-  ((__m128h)__builtin_ia32_scalefsh_round_mask(                                \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)-1, (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_scalef_sh(__m128h __A,
-                                                              __m128h __B) {
-  return (__m128h)__builtin_ia32_scalefsh_round_mask(
-      (__v8hf)__A, (__v8hf)(__B), (__v8hf)_mm_setzero_ph(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_scalef_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
-  return (__m128h)__builtin_ia32_scalefsh_round_mask((__v8hf)__A, (__v8hf)__B,
-                                                     (__v8hf)__W, (__mmask8)__U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_scalef_round_sh(W, U, A, B, R)                                \
-  ((__m128h)__builtin_ia32_scalefsh_round_mask(                                \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W),        \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_scalef_sh(__mmask8 __U, __m128h __A, __m128h __B) {
-  return (__m128h)__builtin_ia32_scalefsh_round_mask(
-      (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_scalef_round_sh(U, A, B, R)                                  \
-  ((__m128h)__builtin_ia32_scalefsh_round_mask(                                \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm_roundscale_round_sh(A, B, imm, R)                                  \
-  ((__m128h)__builtin_ia32_rndscalesh_round_mask(                              \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)-1, (int)(imm), (int)(R)))
-
-#define _mm_roundscale_sh(A, B, imm)                                           \
-  ((__m128h)__builtin_ia32_rndscalesh_round_mask(                              \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)-1, (int)(imm), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_roundscale_sh(W, U, A, B, I)                                  \
-  ((__m128h)__builtin_ia32_rndscalesh_round_mask(                              \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W),        \
-      (__mmask8)(U), (int)(I), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_roundscale_round_sh(W, U, A, B, I, R)                         \
-  ((__m128h)__builtin_ia32_rndscalesh_round_mask(                              \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W),        \
-      (__mmask8)(U), (int)(I), (int)(R)))
-
-#define _mm_maskz_roundscale_sh(U, A, B, I)                                    \
-  ((__m128h)__builtin_ia32_rndscalesh_round_mask(                              \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)(U), (int)(I), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_roundscale_round_sh(U, A, B, I, R)                           \
-  ((__m128h)__builtin_ia32_rndscalesh_round_mask(                              \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)(U), (int)(I), (int)(R)))
-
-#define _mm_reduce_sh(A, B, C)                                                 \
-  ((__m128h)__builtin_ia32_reducesh_mask(                                      \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)-1, (int)(C), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_reduce_sh(W, U, A, B, C)                                      \
-  ((__m128h)__builtin_ia32_reducesh_mask(                                      \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W),        \
-      (__mmask8)(U), (int)(C), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_reduce_sh(U, A, B, C)                                        \
-  ((__m128h)__builtin_ia32_reducesh_mask(                                      \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)(U), (int)(C), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_reduce_round_sh(A, B, C, R)                                        \
-  ((__m128h)__builtin_ia32_reducesh_mask(                                      \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)-1, (int)(C), (int)(R)))
-
-#define _mm_mask_reduce_round_sh(W, U, A, B, C, R)                             \
-  ((__m128h)__builtin_ia32_reducesh_mask(                                      \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W),        \
-      (__mmask8)(U), (int)(C), (int)(R)))
-
-#define _mm_maskz_reduce_round_sh(U, A, B, C, R)                               \
-  ((__m128h)__builtin_ia32_reducesh_mask(                                      \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)(U), (int)(C), (int)(R)))
-
-#define _mm512_sqrt_round_ph(A, R)                                             \
-  ((__m512h)__builtin_ia32_sqrtph512((__v32hf)(__m512h)(A), (int)(R)))
-
-#define _mm512_mask_sqrt_round_ph(W, U, A, R)                                  \
-  ((__m512h)__builtin_ia32_selectph_512(                                       \
-      (__mmask32)(U), (__v32hf)_mm512_sqrt_round_ph((A), (R)),                 \
-      (__v32hf)(__m512h)(W)))
-
-#define _mm512_maskz_sqrt_round_ph(U, A, R)                                    \
-  ((__m512h)__builtin_ia32_selectph_512(                                       \
-      (__mmask32)(U), (__v32hf)_mm512_sqrt_round_ph((A), (R)),                 \
-      (__v32hf)_mm512_setzero_ph()))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_sqrt_ph(__m512h __A) {
-  return (__m512h)__builtin_ia32_sqrtph512((__v32hf)__A,
-                                           _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_sqrt_ph(__m512h __W, __mmask32 __U, __m512h __A) {
-  return (__m512h)__builtin_ia32_selectph_512(
-      (__mmask32)(__U),
-      (__v32hf)__builtin_ia32_sqrtph512((__A), (_MM_FROUND_CUR_DIRECTION)),
-      (__v32hf)(__m512h)(__W));
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_sqrt_ph(__mmask32 __U, __m512h __A) {
-  return (__m512h)__builtin_ia32_selectph_512(
-      (__mmask32)(__U),
-      (__v32hf)__builtin_ia32_sqrtph512((__A), (_MM_FROUND_CUR_DIRECTION)),
-      (__v32hf)_mm512_setzero_ph());
-}
-
-#define _mm_sqrt_round_sh(A, B, R)                                             \
-  ((__m128h)__builtin_ia32_sqrtsh_round_mask(                                  \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_sqrt_round_sh(W, U, A, B, R)                                  \
-  ((__m128h)__builtin_ia32_sqrtsh_round_mask(                                  \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W),        \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_sqrt_round_sh(U, A, B, R)                                    \
-  ((__m128h)__builtin_ia32_sqrtsh_round_mask(                                  \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(),    \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_sqrt_sh(__m128h __A,
-                                                            __m128h __B) {
-  return (__m128h)__builtin_ia32_sqrtsh_round_mask(
-      (__v8hf)(__m128h)(__A), (__v8hf)(__m128h)(__B), (__v8hf)_mm_setzero_ph(),
-      (__mmask8)-1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_sqrt_sh(__m128h __W,
-                                                                 __mmask32 __U,
-                                                                 __m128h __A,
-                                                                 __m128h __B) {
-  return (__m128h)__builtin_ia32_sqrtsh_round_mask(
-      (__v8hf)(__m128h)(__A), (__v8hf)(__m128h)(__B), (__v8hf)(__m128h)(__W),
-      (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_sqrt_sh(__mmask32 __U,
-                                                                  __m128h __A,
-                                                                  __m128h __B) {
-  return (__m128h)__builtin_ia32_sqrtsh_round_mask(
-      (__v8hf)(__m128h)(__A), (__v8hf)(__m128h)(__B), (__v8hf)_mm_setzero_ph(),
-      (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fpclass_ph_mask(U, A, imm)                                 \
-  ((__mmask32)__builtin_ia32_fpclassph512_mask((__v32hf)(__m512h)(A),          \
-                                               (int)(imm), (__mmask32)(U)))
-
-#define _mm512_fpclass_ph_mask(A, imm)                                         \
-  ((__mmask32)__builtin_ia32_fpclassph512_mask((__v32hf)(__m512h)(A),          \
-                                               (int)(imm), (__mmask32)-1))
-
-#define _mm_fpclass_sh_mask(A, imm)                                            \
-  ((__mmask8)__builtin_ia32_fpclasssh_mask((__v8hf)(__m128h)(A), (int)(imm),   \
-                                           (__mmask8)-1))
-
-#define _mm_mask_fpclass_sh_mask(U, A, imm)                                    \
-  ((__mmask8)__builtin_ia32_fpclasssh_mask((__v8hf)(__m128h)(A), (int)(imm),   \
-                                           (__mmask8)(U)))
-
-#define _mm512_cvt_roundpd_ph(A, R)                                            \
-  ((__m128h)__builtin_ia32_vcvtpd2ph512_mask(                                  \
-      (__v8df)(A), (__v8hf)_mm_undefined_ph(), (__mmask8)(-1), (int)(R)))
-
-#define _mm512_mask_cvt_roundpd_ph(W, U, A, R)                                 \
-  ((__m128h)__builtin_ia32_vcvtpd2ph512_mask((__v8df)(A), (__v8hf)(W),         \
-                                             (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundpd_ph(U, A, R)                                   \
-  ((__m128h)__builtin_ia32_vcvtpd2ph512_mask(                                  \
-      (__v8df)(A), (__v8hf)_mm_setzero_ph(), (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS512 _mm512_cvtpd_ph(__m512d __A) {
-  return (__m128h)__builtin_ia32_vcvtpd2ph512_mask(
-      (__v8df)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_ph(__m128h __W, __mmask8 __U, __m512d __A) {
-  return (__m128h)__builtin_ia32_vcvtpd2ph512_mask(
-      (__v8df)__A, (__v8hf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtpd_ph(__mmask8 __U, __m512d __A) {
-  return (__m128h)__builtin_ia32_vcvtpd2ph512_mask(
-      (__v8df)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundph_pd(A, R)                                            \
-  ((__m512d)__builtin_ia32_vcvtph2pd512_mask(                                  \
-      (__v8hf)(A), (__v8df)_mm512_undefined_pd(), (__mmask8)(-1), (int)(R)))
-
-#define _mm512_mask_cvt_roundph_pd(W, U, A, R)                                 \
-  ((__m512d)__builtin_ia32_vcvtph2pd512_mask((__v8hf)(A), (__v8df)(W),         \
-                                             (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundph_pd(U, A, R)                                   \
-  ((__m512d)__builtin_ia32_vcvtph2pd512_mask(                                  \
-      (__v8hf)(A), (__v8df)_mm512_setzero_pd(), (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512 _mm512_cvtph_pd(__m128h __A) {
-  return (__m512d)__builtin_ia32_vcvtph2pd512_mask(
-      (__v8hf)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtph_pd(__m512d __W, __mmask8 __U, __m128h __A) {
-  return (__m512d)__builtin_ia32_vcvtph2pd512_mask(
-      (__v8hf)__A, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtph_pd(__mmask8 __U, __m128h __A) {
-  return (__m512d)__builtin_ia32_vcvtph2pd512_mask(
-      (__v8hf)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_cvt_roundsh_ss(A, B, R)                                            \
-  ((__m128)__builtin_ia32_vcvtsh2ss_round_mask((__v4sf)(A), (__v8hf)(B),       \
-                                               (__v4sf)_mm_undefined_ps(),     \
-                                               (__mmask8)(-1), (int)(R)))
-
-#define _mm_mask_cvt_roundsh_ss(W, U, A, B, R)                                 \
-  ((__m128)__builtin_ia32_vcvtsh2ss_round_mask(                                \
-      (__v4sf)(A), (__v8hf)(B), (__v4sf)(W), (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_cvt_roundsh_ss(U, A, B, R)                                   \
-  ((__m128)__builtin_ia32_vcvtsh2ss_round_mask((__v4sf)(A), (__v8hf)(B),       \
-                                               (__v4sf)_mm_setzero_ps(),       \
-                                               (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtsh_ss(__m128 __A,
-                                                            __m128h __B) {
-  return (__m128)__builtin_ia32_vcvtsh2ss_round_mask(
-      (__v4sf)__A, (__v8hf)__B, (__v4sf)_mm_undefined_ps(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtsh_ss(__m128 __W,
-                                                                 __mmask8 __U,
-                                                                 __m128 __A,
-                                                                 __m128h __B) {
-  return (__m128)__builtin_ia32_vcvtsh2ss_round_mask((__v4sf)__A, (__v8hf)__B,
-                                                     (__v4sf)__W, (__mmask8)__U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsh_ss(__mmask8 __U,
-                                                                  __m128 __A,
-                                                                  __m128h __B) {
-  return (__m128)__builtin_ia32_vcvtsh2ss_round_mask(
-      (__v4sf)__A, (__v8hf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_cvt_roundss_sh(A, B, R)                                            \
-  ((__m128h)__builtin_ia32_vcvtss2sh_round_mask((__v8hf)(A), (__v4sf)(B),      \
-                                                (__v8hf)_mm_undefined_ph(),    \
-                                                (__mmask8)(-1), (int)(R)))
-
-#define _mm_mask_cvt_roundss_sh(W, U, A, B, R)                                 \
-  ((__m128h)__builtin_ia32_vcvtss2sh_round_mask(                               \
-      (__v8hf)(A), (__v4sf)(B), (__v8hf)(W), (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_cvt_roundss_sh(U, A, B, R)                                   \
-  ((__m128h)__builtin_ia32_vcvtss2sh_round_mask((__v8hf)(A), (__v4sf)(B),      \
-                                                (__v8hf)_mm_setzero_ph(),      \
-                                                (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtss_sh(__m128h __A,
-                                                             __m128 __B) {
-  return (__m128h)__builtin_ia32_vcvtss2sh_round_mask(
-      (__v8hf)__A, (__v4sf)__B, (__v8hf)_mm_undefined_ph(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_cvtss_sh(__m128h __W,
-                                                                  __mmask8 __U,
-                                                                  __m128h __A,
-                                                                  __m128 __B) {
-  return (__m128h)__builtin_ia32_vcvtss2sh_round_mask(
-      (__v8hf)__A, (__v4sf)__B, (__v8hf)__W, (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_cvtss_sh(__mmask8 __U,
-                                                                   __m128h __A,
-                                                                   __m128 __B) {
-  return (__m128h)__builtin_ia32_vcvtss2sh_round_mask(
-      (__v8hf)__A, (__v4sf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_cvt_roundsd_sh(A, B, R)                                            \
-  ((__m128h)__builtin_ia32_vcvtsd2sh_round_mask((__v8hf)(A), (__v2df)(B),      \
-                                                (__v8hf)_mm_undefined_ph(),    \
-                                                (__mmask8)(-1), (int)(R)))
-
-#define _mm_mask_cvt_roundsd_sh(W, U, A, B, R)                                 \
-  ((__m128h)__builtin_ia32_vcvtsd2sh_round_mask(                               \
-      (__v8hf)(A), (__v2df)(B), (__v8hf)(W), (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_cvt_roundsd_sh(U, A, B, R)                                   \
-  ((__m128h)__builtin_ia32_vcvtsd2sh_round_mask((__v8hf)(A), (__v2df)(B),      \
-                                                (__v8hf)_mm_setzero_ph(),      \
-                                                (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtsd_sh(__m128h __A,
-                                                             __m128d __B) {
-  return (__m128h)__builtin_ia32_vcvtsd2sh_round_mask(
-      (__v8hf)__A, (__v2df)__B, (__v8hf)_mm_undefined_ph(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_cvtsd_sh(__m128h __W,
-                                                                  __mmask8 __U,
-                                                                  __m128h __A,
-                                                                  __m128d __B) {
-  return (__m128h)__builtin_ia32_vcvtsd2sh_round_mask(
-      (__v8hf)__A, (__v2df)__B, (__v8hf)__W, (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsd_sh(__mmask8 __U, __m128h __A, __m128d __B) {
-  return (__m128h)__builtin_ia32_vcvtsd2sh_round_mask(
-      (__v8hf)__A, (__v2df)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_cvt_roundsh_sd(A, B, R)                                            \
-  ((__m128d)__builtin_ia32_vcvtsh2sd_round_mask((__v2df)(A), (__v8hf)(B),      \
-                                                (__v2df)_mm_undefined_pd(),    \
-                                                (__mmask8)(-1), (int)(R)))
-
-#define _mm_mask_cvt_roundsh_sd(W, U, A, B, R)                                 \
-  ((__m128d)__builtin_ia32_vcvtsh2sd_round_mask(                               \
-      (__v2df)(A), (__v8hf)(B), (__v2df)(W), (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_cvt_roundsh_sd(U, A, B, R)                                   \
-  ((__m128d)__builtin_ia32_vcvtsh2sd_round_mask((__v2df)(A), (__v8hf)(B),      \
-                                                (__v2df)_mm_setzero_pd(),      \
-                                                (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_cvtsh_sd(__m128d __A,
-                                                             __m128h __B) {
-  return (__m128d)__builtin_ia32_vcvtsh2sd_round_mask(
-      (__v2df)__A, (__v8hf)__B, (__v2df)_mm_undefined_pd(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtsh_sd(__m128d __W,
-                                                                  __mmask8 __U,
-                                                                  __m128d __A,
-                                                                  __m128h __B) {
-  return (__m128d)__builtin_ia32_vcvtsh2sd_round_mask(
-      (__v2df)__A, (__v8hf)__B, (__v2df)__W, (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsh_sd(__mmask8 __U, __m128d __A, __m128h __B) {
-  return (__m128d)__builtin_ia32_vcvtsh2sd_round_mask(
-      (__v2df)__A, (__v8hf)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundph_epi16(A, R)                                         \
-  ((__m512i)__builtin_ia32_vcvtph2w512_mask((__v32hf)(A),                      \
-                                            (__v32hi)_mm512_undefined_epi32(), \
-                                            (__mmask32)(-1), (int)(R)))
-
-#define _mm512_mask_cvt_roundph_epi16(W, U, A, R)                              \
-  ((__m512i)__builtin_ia32_vcvtph2w512_mask((__v32hf)(A), (__v32hi)(W),        \
-                                            (__mmask32)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundph_epi16(U, A, R)                                \
-  ((__m512i)__builtin_ia32_vcvtph2w512_mask((__v32hf)(A),                      \
-                                            (__v32hi)_mm512_setzero_epi32(),   \
-                                            (__mmask32)(U), (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtph_epi16(__m512h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2w512_mask(
-      (__v32hf)__A, (__v32hi)_mm512_setzero_epi32(), (__mmask32)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtph_epi16(__m512i __W, __mmask32 __U, __m512h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2w512_mask(
-      (__v32hf)__A, (__v32hi)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtph_epi16(__mmask32 __U, __m512h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2w512_mask(
-      (__v32hf)__A, (__v32hi)_mm512_setzero_epi32(), (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvtt_roundph_epi16(A, R)                                        \
-  ((__m512i)__builtin_ia32_vcvttph2w512_mask(                                  \
-      (__v32hf)(A), (__v32hi)_mm512_undefined_epi32(), (__mmask32)(-1),        \
-      (int)(R)))
-
-#define _mm512_mask_cvtt_roundph_epi16(W, U, A, R)                             \
-  ((__m512i)__builtin_ia32_vcvttph2w512_mask((__v32hf)(A), (__v32hi)(W),       \
-                                             (__mmask32)(U), (int)(R)))
-
-#define _mm512_maskz_cvtt_roundph_epi16(U, A, R)                               \
-  ((__m512i)__builtin_ia32_vcvttph2w512_mask((__v32hf)(A),                     \
-                                             (__v32hi)_mm512_setzero_epi32(),  \
-                                             (__mmask32)(U), (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvttph_epi16(__m512h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2w512_mask(
-      (__v32hf)__A, (__v32hi)_mm512_setzero_epi32(), (__mmask32)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttph_epi16(__m512i __W, __mmask32 __U, __m512h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2w512_mask(
-      (__v32hf)__A, (__v32hi)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttph_epi16(__mmask32 __U, __m512h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2w512_mask(
-      (__v32hf)__A, (__v32hi)_mm512_setzero_epi32(), (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundepi16_ph(A, R)                                         \
-  ((__m512h)__builtin_ia32_vcvtw2ph512_mask((__v32hi)(A),                      \
-                                            (__v32hf)_mm512_undefined_ph(),    \
-                                            (__mmask32)(-1), (int)(R)))
-
-#define _mm512_mask_cvt_roundepi16_ph(W, U, A, R)                              \
-  ((__m512h)__builtin_ia32_vcvtw2ph512_mask((__v32hi)(A), (__v32hf)(W),        \
-                                            (__mmask32)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundepi16_ph(U, A, R)                                \
-  ((__m512h)__builtin_ia32_vcvtw2ph512_mask(                                   \
-      (__v32hi)(A), (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), (int)(R)))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_cvtepi16_ph(__m512i __A) {
-  return (__m512h)__builtin_ia32_vcvtw2ph512_mask(
-      (__v32hi)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi16_ph(__m512h __W, __mmask32 __U, __m512i __A) {
-  return (__m512h)__builtin_ia32_vcvtw2ph512_mask(
-      (__v32hi)__A, (__v32hf)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi16_ph(__mmask32 __U, __m512i __A) {
-  return (__m512h)__builtin_ia32_vcvtw2ph512_mask(
-      (__v32hi)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundph_epu16(A, R)                                         \
-  ((__m512i)__builtin_ia32_vcvtph2uw512_mask(                                  \
-      (__v32hf)(A), (__v32hu)_mm512_undefined_epi32(), (__mmask32)(-1),        \
-      (int)(R)))
-
-#define _mm512_mask_cvt_roundph_epu16(W, U, A, R)                              \
-  ((__m512i)__builtin_ia32_vcvtph2uw512_mask((__v32hf)(A), (__v32hu)(W),       \
-                                             (__mmask32)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundph_epu16(U, A, R)                                \
-  ((__m512i)__builtin_ia32_vcvtph2uw512_mask((__v32hf)(A),                     \
-                                             (__v32hu)_mm512_setzero_epi32(),  \
-                                             (__mmask32)(U), (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtph_epu16(__m512h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2uw512_mask(
-      (__v32hf)__A, (__v32hu)_mm512_setzero_epi32(), (__mmask32)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtph_epu16(__m512i __W, __mmask32 __U, __m512h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2uw512_mask(
-      (__v32hf)__A, (__v32hu)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtph_epu16(__mmask32 __U, __m512h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2uw512_mask(
-      (__v32hf)__A, (__v32hu)_mm512_setzero_epi32(), (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvtt_roundph_epu16(A, R)                                        \
-  ((__m512i)__builtin_ia32_vcvttph2uw512_mask(                                 \
-      (__v32hf)(A), (__v32hu)_mm512_undefined_epi32(), (__mmask32)(-1),        \
-      (int)(R)))
-
-#define _mm512_mask_cvtt_roundph_epu16(W, U, A, R)                             \
-  ((__m512i)__builtin_ia32_vcvttph2uw512_mask((__v32hf)(A), (__v32hu)(W),      \
-                                              (__mmask32)(U), (int)(R)))
-
-#define _mm512_maskz_cvtt_roundph_epu16(U, A, R)                               \
-  ((__m512i)__builtin_ia32_vcvttph2uw512_mask((__v32hf)(A),                    \
-                                              (__v32hu)_mm512_setzero_epi32(), \
-                                              (__mmask32)(U), (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvttph_epu16(__m512h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2uw512_mask(
-      (__v32hf)__A, (__v32hu)_mm512_setzero_epi32(), (__mmask32)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttph_epu16(__m512i __W, __mmask32 __U, __m512h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2uw512_mask(
-      (__v32hf)__A, (__v32hu)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttph_epu16(__mmask32 __U, __m512h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2uw512_mask(
-      (__v32hf)__A, (__v32hu)_mm512_setzero_epi32(), (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundepu16_ph(A, R)                                         \
-  ((__m512h)__builtin_ia32_vcvtuw2ph512_mask((__v32hu)(A),                     \
-                                             (__v32hf)_mm512_undefined_ph(),   \
-                                             (__mmask32)(-1), (int)(R)))
-
-#define _mm512_mask_cvt_roundepu16_ph(W, U, A, R)                              \
-  ((__m512h)__builtin_ia32_vcvtuw2ph512_mask((__v32hu)(A), (__v32hf)(W),       \
-                                             (__mmask32)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundepu16_ph(U, A, R)                                \
-  ((__m512h)__builtin_ia32_vcvtuw2ph512_mask(                                  \
-      (__v32hu)(A), (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), (int)(R)))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_cvtepu16_ph(__m512i __A) {
-  return (__m512h)__builtin_ia32_vcvtuw2ph512_mask(
-      (__v32hu)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu16_ph(__m512h __W, __mmask32 __U, __m512i __A) {
-  return (__m512h)__builtin_ia32_vcvtuw2ph512_mask(
-      (__v32hu)__A, (__v32hf)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu16_ph(__mmask32 __U, __m512i __A) {
-  return (__m512h)__builtin_ia32_vcvtuw2ph512_mask(
-      (__v32hu)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundph_epi32(A, R)                                         \
-  ((__m512i)__builtin_ia32_vcvtph2dq512_mask(                                  \
-      (__v16hf)(A), (__v16si)_mm512_undefined_epi32(), (__mmask16)(-1),        \
-      (int)(R)))
-
-#define _mm512_mask_cvt_roundph_epi32(W, U, A, R)                              \
-  ((__m512i)__builtin_ia32_vcvtph2dq512_mask((__v16hf)(A), (__v16si)(W),       \
-                                             (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundph_epi32(U, A, R)                                \
-  ((__m512i)__builtin_ia32_vcvtph2dq512_mask((__v16hf)(A),                     \
-                                             (__v16si)_mm512_setzero_epi32(),  \
-                                             (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtph_epi32(__m256h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2dq512_mask(
-      (__v16hf)__A, (__v16si)_mm512_setzero_epi32(), (__mmask16)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtph_epi32(__m512i __W, __mmask16 __U, __m256h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2dq512_mask(
-      (__v16hf)__A, (__v16si)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtph_epi32(__mmask16 __U, __m256h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2dq512_mask(
-      (__v16hf)__A, (__v16si)_mm512_setzero_epi32(), (__mmask16)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundph_epu32(A, R)                                         \
-  ((__m512i)__builtin_ia32_vcvtph2udq512_mask(                                 \
-      (__v16hf)(A), (__v16su)_mm512_undefined_epi32(), (__mmask16)(-1),        \
-      (int)(R)))
-
-#define _mm512_mask_cvt_roundph_epu32(W, U, A, R)                              \
-  ((__m512i)__builtin_ia32_vcvtph2udq512_mask((__v16hf)(A), (__v16su)(W),      \
-                                              (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundph_epu32(U, A, R)                                \
-  ((__m512i)__builtin_ia32_vcvtph2udq512_mask((__v16hf)(A),                    \
-                                              (__v16su)_mm512_setzero_epi32(), \
-                                              (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtph_epu32(__m256h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2udq512_mask(
-      (__v16hf)__A, (__v16su)_mm512_setzero_epi32(), (__mmask16)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtph_epu32(__m512i __W, __mmask16 __U, __m256h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2udq512_mask(
-      (__v16hf)__A, (__v16su)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtph_epu32(__mmask16 __U, __m256h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2udq512_mask(
-      (__v16hf)__A, (__v16su)_mm512_setzero_epi32(), (__mmask16)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundepi32_ph(A, R)                                         \
-  ((__m256h)__builtin_ia32_vcvtdq2ph512_mask((__v16si)(A),                     \
-                                             (__v16hf)_mm256_undefined_ph(),   \
-                                             (__mmask16)(-1), (int)(R)))
-
-#define _mm512_mask_cvt_roundepi32_ph(W, U, A, R)                              \
-  ((__m256h)__builtin_ia32_vcvtdq2ph512_mask((__v16si)(A), (__v16hf)(W),       \
-                                             (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundepi32_ph(U, A, R)                                \
-  ((__m256h)__builtin_ia32_vcvtdq2ph512_mask(                                  \
-      (__v16si)(A), (__v16hf)_mm256_setzero_ph(), (__mmask16)(U), (int)(R)))
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_ph(__m512i __A) {
-  return (__m256h)__builtin_ia32_vcvtdq2ph512_mask(
-      (__v16si)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_ph(__m256h __W, __mmask16 __U, __m512i __A) {
-  return (__m256h)__builtin_ia32_vcvtdq2ph512_mask(
-      (__v16si)__A, (__v16hf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_ph(__mmask16 __U, __m512i __A) {
-  return (__m256h)__builtin_ia32_vcvtdq2ph512_mask(
-      (__v16si)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundepu32_ph(A, R)                                         \
-  ((__m256h)__builtin_ia32_vcvtudq2ph512_mask((__v16su)(A),                    \
-                                              (__v16hf)_mm256_undefined_ph(),  \
-                                              (__mmask16)(-1), (int)(R)))
-
-#define _mm512_mask_cvt_roundepu32_ph(W, U, A, R)                              \
-  ((__m256h)__builtin_ia32_vcvtudq2ph512_mask((__v16su)(A), (__v16hf)(W),      \
-                                              (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundepu32_ph(U, A, R)                                \
-  ((__m256h)__builtin_ia32_vcvtudq2ph512_mask(                                 \
-      (__v16su)(A), (__v16hf)_mm256_setzero_ph(), (__mmask16)(U), (int)(R)))
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_ph(__m512i __A) {
-  return (__m256h)__builtin_ia32_vcvtudq2ph512_mask(
-      (__v16su)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32_ph(__m256h __W, __mmask16 __U, __m512i __A) {
-  return (__m256h)__builtin_ia32_vcvtudq2ph512_mask(
-      (__v16su)__A, (__v16hf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu32_ph(__mmask16 __U, __m512i __A) {
-  return (__m256h)__builtin_ia32_vcvtudq2ph512_mask(
-      (__v16su)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvtt_roundph_epi32(A, R)                                        \
-  ((__m512i)__builtin_ia32_vcvttph2dq512_mask(                                 \
-      (__v16hf)(A), (__v16si)_mm512_undefined_epi32(), (__mmask16)(-1),        \
-      (int)(R)))
-
-#define _mm512_mask_cvtt_roundph_epi32(W, U, A, R)                             \
-  ((__m512i)__builtin_ia32_vcvttph2dq512_mask((__v16hf)(A), (__v16si)(W),      \
-                                              (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvtt_roundph_epi32(U, A, R)                               \
-  ((__m512i)__builtin_ia32_vcvttph2dq512_mask((__v16hf)(A),                    \
-                                              (__v16si)_mm512_setzero_epi32(), \
-                                              (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvttph_epi32(__m256h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2dq512_mask(
-      (__v16hf)__A, (__v16si)_mm512_setzero_epi32(), (__mmask16)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttph_epi32(__m512i __W, __mmask16 __U, __m256h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2dq512_mask(
-      (__v16hf)__A, (__v16si)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttph_epi32(__mmask16 __U, __m256h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2dq512_mask(
-      (__v16hf)__A, (__v16si)_mm512_setzero_epi32(), (__mmask16)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvtt_roundph_epu32(A, R)                                        \
-  ((__m512i)__builtin_ia32_vcvttph2udq512_mask(                                \
-      (__v16hf)(A), (__v16su)_mm512_undefined_epi32(), (__mmask16)(-1),        \
-      (int)(R)))
-
-#define _mm512_mask_cvtt_roundph_epu32(W, U, A, R)                             \
-  ((__m512i)__builtin_ia32_vcvttph2udq512_mask((__v16hf)(A), (__v16su)(W),     \
-                                               (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvtt_roundph_epu32(U, A, R)                               \
-  ((__m512i)__builtin_ia32_vcvttph2udq512_mask(                                \
-      (__v16hf)(A), (__v16su)_mm512_setzero_epi32(), (__mmask16)(U),           \
-      (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvttph_epu32(__m256h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2udq512_mask(
-      (__v16hf)__A, (__v16su)_mm512_setzero_epi32(), (__mmask16)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttph_epu32(__m512i __W, __mmask16 __U, __m256h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2udq512_mask(
-      (__v16hf)__A, (__v16su)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttph_epu32(__mmask16 __U, __m256h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2udq512_mask(
-      (__v16hf)__A, (__v16su)_mm512_setzero_epi32(), (__mmask16)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundepi64_ph(A, R)                                         \
-  ((__m128h)__builtin_ia32_vcvtqq2ph512_mask(                                  \
-      (__v8di)(A), (__v8hf)_mm_undefined_ph(), (__mmask8)(-1), (int)(R)))
-
-#define _mm512_mask_cvt_roundepi64_ph(W, U, A, R)                              \
-  ((__m128h)__builtin_ia32_vcvtqq2ph512_mask((__v8di)(A), (__v8hf)(W),         \
-                                             (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundepi64_ph(U, A, R)                                \
-  ((__m128h)__builtin_ia32_vcvtqq2ph512_mask(                                  \
-      (__v8di)(A), (__v8hf)_mm_setzero_ph(), (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS512
-_mm512_cvtepi64_ph(__m512i __A) {
-  return (__m128h)__builtin_ia32_vcvtqq2ph512_mask(
-      (__v8di)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_ph(__m128h __W, __mmask8 __U, __m512i __A) {
-  return (__m128h)__builtin_ia32_vcvtqq2ph512_mask(
-      (__v8di)__A, (__v8hf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi64_ph(__mmask8 __U, __m512i __A) {
-  return (__m128h)__builtin_ia32_vcvtqq2ph512_mask(
-      (__v8di)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundph_epi64(A, R)                                         \
-  ((__m512i)__builtin_ia32_vcvtph2qq512_mask((__v8hf)(A),                      \
-                                             (__v8di)_mm512_undefined_epi32(), \
-                                             (__mmask8)(-1), (int)(R)))
-
-#define _mm512_mask_cvt_roundph_epi64(W, U, A, R)                              \
-  ((__m512i)__builtin_ia32_vcvtph2qq512_mask((__v8hf)(A), (__v8di)(W),         \
-                                             (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundph_epi64(U, A, R)                                \
-  ((__m512i)__builtin_ia32_vcvtph2qq512_mask(                                  \
-      (__v8hf)(A), (__v8di)_mm512_setzero_epi32(), (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtph_epi64(__m128h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2qq512_mask(
-      (__v8hf)__A, (__v8di)_mm512_setzero_epi32(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtph_epi64(__m512i __W, __mmask8 __U, __m128h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2qq512_mask(
-      (__v8hf)__A, (__v8di)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtph_epi64(__mmask8 __U, __m128h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2qq512_mask(
-      (__v8hf)__A, (__v8di)_mm512_setzero_epi32(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundepu64_ph(A, R)                                         \
-  ((__m128h)__builtin_ia32_vcvtuqq2ph512_mask(                                 \
-      (__v8du)(A), (__v8hf)_mm_undefined_ph(), (__mmask8)(-1), (int)(R)))
-
-#define _mm512_mask_cvt_roundepu64_ph(W, U, A, R)                              \
-  ((__m128h)__builtin_ia32_vcvtuqq2ph512_mask((__v8du)(A), (__v8hf)(W),        \
-                                              (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundepu64_ph(U, A, R)                                \
-  ((__m128h)__builtin_ia32_vcvtuqq2ph512_mask(                                 \
-      (__v8du)(A), (__v8hf)_mm_setzero_ph(), (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS512
-_mm512_cvtepu64_ph(__m512i __A) {
-  return (__m128h)__builtin_ia32_vcvtuqq2ph512_mask(
-      (__v8du)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu64_ph(__m128h __W, __mmask8 __U, __m512i __A) {
-  return (__m128h)__builtin_ia32_vcvtuqq2ph512_mask(
-      (__v8du)__A, (__v8hf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu64_ph(__mmask8 __U, __m512i __A) {
-  return (__m128h)__builtin_ia32_vcvtuqq2ph512_mask(
-      (__v8du)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundph_epu64(A, R)                                         \
-  ((__m512i)__builtin_ia32_vcvtph2uqq512_mask(                                 \
-      (__v8hf)(A), (__v8du)_mm512_undefined_epi32(), (__mmask8)(-1),           \
-      (int)(R)))
-
-#define _mm512_mask_cvt_roundph_epu64(W, U, A, R)                              \
-  ((__m512i)__builtin_ia32_vcvtph2uqq512_mask((__v8hf)(A), (__v8du)(W),        \
-                                              (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundph_epu64(U, A, R)                                \
-  ((__m512i)__builtin_ia32_vcvtph2uqq512_mask(                                 \
-      (__v8hf)(A), (__v8du)_mm512_setzero_epi32(), (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtph_epu64(__m128h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2uqq512_mask(
-      (__v8hf)__A, (__v8du)_mm512_setzero_epi32(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtph_epu64(__m512i __W, __mmask8 __U, __m128h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2uqq512_mask(
-      (__v8hf)__A, (__v8du)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtph_epu64(__mmask8 __U, __m128h __A) {
-  return (__m512i)__builtin_ia32_vcvtph2uqq512_mask(
-      (__v8hf)__A, (__v8du)_mm512_setzero_epi32(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvtt_roundph_epi64(A, R)                                        \
-  ((__m512i)__builtin_ia32_vcvttph2qq512_mask(                                 \
-      (__v8hf)(A), (__v8di)_mm512_undefined_epi32(), (__mmask8)(-1),           \
-      (int)(R)))
-
-#define _mm512_mask_cvtt_roundph_epi64(W, U, A, R)                             \
-  ((__m512i)__builtin_ia32_vcvttph2qq512_mask((__v8hf)(A), (__v8di)(W),        \
-                                              (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvtt_roundph_epi64(U, A, R)                               \
-  ((__m512i)__builtin_ia32_vcvttph2qq512_mask(                                 \
-      (__v8hf)(A), (__v8di)_mm512_setzero_epi32(), (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvttph_epi64(__m128h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2qq512_mask(
-      (__v8hf)__A, (__v8di)_mm512_setzero_epi32(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttph_epi64(__m512i __W, __mmask8 __U, __m128h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2qq512_mask(
-      (__v8hf)__A, (__v8di)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttph_epi64(__mmask8 __U, __m128h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2qq512_mask(
-      (__v8hf)__A, (__v8di)_mm512_setzero_epi32(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvtt_roundph_epu64(A, R)                                        \
-  ((__m512i)__builtin_ia32_vcvttph2uqq512_mask(                                \
-      (__v8hf)(A), (__v8du)_mm512_undefined_epi32(), (__mmask8)(-1),           \
-      (int)(R)))
-
-#define _mm512_mask_cvtt_roundph_epu64(W, U, A, R)                             \
-  ((__m512i)__builtin_ia32_vcvttph2uqq512_mask((__v8hf)(A), (__v8du)(W),       \
-                                               (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvtt_roundph_epu64(U, A, R)                               \
-  ((__m512i)__builtin_ia32_vcvttph2uqq512_mask(                                \
-      (__v8hf)(A), (__v8du)_mm512_setzero_epi32(), (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvttph_epu64(__m128h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2uqq512_mask(
-      (__v8hf)__A, (__v8du)_mm512_setzero_epi32(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttph_epu64(__m512i __W, __mmask8 __U, __m128h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2uqq512_mask(
-      (__v8hf)__A, (__v8du)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttph_epu64(__mmask8 __U, __m128h __A) {
-  return (__m512i)__builtin_ia32_vcvttph2uqq512_mask(
-      (__v8hf)__A, (__v8du)_mm512_setzero_epi32(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_cvt_roundsh_i32(A, R)                                              \
-  ((int)__builtin_ia32_vcvtsh2si32((__v8hf)(A), (int)(R)))
-
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_cvtsh_i32(__m128h __A) {
-  return (int)__builtin_ia32_vcvtsh2si32((__v8hf)__A, _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_cvt_roundsh_u32(A, R)                                              \
-  ((unsigned int)__builtin_ia32_vcvtsh2usi32((__v8hf)(A), (int)(R)))
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS128
-_mm_cvtsh_u32(__m128h __A) {
-  return (unsigned int)__builtin_ia32_vcvtsh2usi32((__v8hf)__A,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsh_i64(A, R)                                              \
-  ((long long)__builtin_ia32_vcvtsh2si64((__v8hf)(A), (int)(R)))
-
-static __inline__ long long __DEFAULT_FN_ATTRS128 _mm_cvtsh_i64(__m128h __A) {
-  return (long long)__builtin_ia32_vcvtsh2si64((__v8hf)__A,
-                                               _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_cvt_roundsh_u64(A, R)                                              \
-  ((unsigned long long)__builtin_ia32_vcvtsh2usi64((__v8hf)(A), (int)(R)))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvtsh_u64(__m128h __A) {
-  return (unsigned long long)__builtin_ia32_vcvtsh2usi64(
-      (__v8hf)__A, _MM_FROUND_CUR_DIRECTION);
-}
-#endif // __x86_64__
-
-#define _mm_cvt_roundu32_sh(A, B, R)                                           \
-  ((__m128h)__builtin_ia32_vcvtusi2sh((__v8hf)(A), (unsigned int)(B), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_cvtu32_sh(__m128h __A, unsigned int __B) {
-  __A[0] = __B;
-  return __A;
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundu64_sh(A, B, R)                                           \
-  ((__m128h)__builtin_ia32_vcvtusi642sh((__v8hf)(A), (unsigned long long)(B),  \
-                                        (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_cvtu64_sh(__m128h __A, unsigned long long __B) {
-  __A[0] = __B;
-  return __A;
-}
-#endif
-
-#define _mm_cvt_roundi32_sh(A, B, R)                                           \
-  ((__m128h)__builtin_ia32_vcvtsi2sh((__v8hf)(A), (int)(B), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvti32_sh(__m128h __A,
-                                                              int __B) {
-  __A[0] = __B;
-  return __A;
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundi64_sh(A, B, R)                                           \
-  ((__m128h)__builtin_ia32_vcvtsi642sh((__v8hf)(A), (long long)(B), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvti64_sh(__m128h __A,
-                                                              long long __B) {
-  __A[0] = __B;
-  return __A;
-}
-#endif
-
-#define _mm_cvtt_roundsh_i32(A, R)                                             \
-  ((int)__builtin_ia32_vcvttsh2si32((__v8hf)(A), (int)(R)))
-
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_cvttsh_i32(__m128h __A) {
-  return (int)__builtin_ia32_vcvttsh2si32((__v8hf)__A,
-                                          _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundsh_i64(A, R)                                             \
-  ((long long)__builtin_ia32_vcvttsh2si64((__v8hf)(A), (int)(R)))
-
-static __inline__ long long __DEFAULT_FN_ATTRS128 _mm_cvttsh_i64(__m128h __A) {
-  return (long long)__builtin_ia32_vcvttsh2si64((__v8hf)__A,
-                                                _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundsh_u32(A, R)                                             \
-  ((unsigned int)__builtin_ia32_vcvttsh2usi32((__v8hf)(A), (int)(R)))
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS128
-_mm_cvttsh_u32(__m128h __A) {
-  return (unsigned int)__builtin_ia32_vcvttsh2usi32((__v8hf)__A,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundsh_u64(A, R)                                             \
-  ((unsigned long long)__builtin_ia32_vcvttsh2usi64((__v8hf)(A), (int)(R)))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvttsh_u64(__m128h __A) {
-  return (unsigned long long)__builtin_ia32_vcvttsh2usi64(
-      (__v8hf)__A, _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm512_cvtx_roundph_ps(A, R)                                           \
-  ((__m512)__builtin_ia32_vcvtph2psx512_mask((__v16hf)(A),                     \
-                                             (__v16sf)_mm512_undefined_ps(),   \
-                                             (__mmask16)(-1), (int)(R)))
-
-#define _mm512_mask_cvtx_roundph_ps(W, U, A, R)                                \
-  ((__m512)__builtin_ia32_vcvtph2psx512_mask((__v16hf)(A), (__v16sf)(W),       \
-                                             (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvtx_roundph_ps(U, A, R)                                  \
-  ((__m512)__builtin_ia32_vcvtph2psx512_mask(                                  \
-      (__v16hf)(A), (__v16sf)_mm512_setzero_ps(), (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_cvtxph_ps(__m256h __A) {
-  return (__m512)__builtin_ia32_vcvtph2psx512_mask(
-      (__v16hf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtxph_ps(__m512 __W, __mmask16 __U, __m256h __A) {
-  return (__m512)__builtin_ia32_vcvtph2psx512_mask(
-      (__v16hf)__A, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtxph_ps(__mmask16 __U, __m256h __A) {
-  return (__m512)__builtin_ia32_vcvtph2psx512_mask(
-      (__v16hf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvtx_roundps_ph(A, R)                                           \
-  ((__m256h)__builtin_ia32_vcvtps2phx512_mask((__v16sf)(A),                    \
-                                              (__v16hf)_mm256_undefined_ph(),  \
-                                              (__mmask16)(-1), (int)(R)))
-
-#define _mm512_mask_cvtx_roundps_ph(W, U, A, R)                                \
-  ((__m256h)__builtin_ia32_vcvtps2phx512_mask((__v16sf)(A), (__v16hf)(W),      \
-                                              (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvtx_roundps_ph(U, A, R)                                  \
-  ((__m256h)__builtin_ia32_vcvtps2phx512_mask(                                 \
-      (__v16sf)(A), (__v16hf)_mm256_setzero_ph(), (__mmask16)(U), (int)(R)))
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS512 _mm512_cvtxps_ph(__m512 __A) {
-  return (__m256h)__builtin_ia32_vcvtps2phx512_mask(
-      (__v16sf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtxps_ph(__m256h __W, __mmask16 __U, __m512 __A) {
-  return (__m256h)__builtin_ia32_vcvtps2phx512_mask(
-      (__v16sf)__A, (__v16hf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtxps_ph(__mmask16 __U, __m512 __A) {
-  return (__m256h)__builtin_ia32_vcvtps2phx512_mask(
-      (__v16sf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmadd_round_ph(A, B, C, R)                                      \
-  ((__m512h)__builtin_ia32_vfmaddph512_mask(                                   \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C),     \
-      (__mmask32)-1, (int)(R)))
-
-#define _mm512_mask_fmadd_round_ph(A, U, B, C, R)                              \
-  ((__m512h)__builtin_ia32_vfmaddph512_mask(                                   \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C),     \
-      (__mmask32)(U), (int)(R)))
-
-#define _mm512_mask3_fmadd_round_ph(A, B, C, U, R)                             \
-  ((__m512h)__builtin_ia32_vfmaddph512_mask3(                                  \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C),     \
-      (__mmask32)(U), (int)(R)))
-
-#define _mm512_maskz_fmadd_round_ph(U, A, B, C, R)                             \
-  ((__m512h)__builtin_ia32_vfmaddph512_maskz(                                  \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C),     \
-      (__mmask32)(U), (int)(R)))
-
-#define _mm512_fmsub_round_ph(A, B, C, R)                                      \
-  ((__m512h)__builtin_ia32_vfmaddph512_mask(                                   \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C),    \
-      (__mmask32)-1, (int)(R)))
-
-#define _mm512_mask_fmsub_round_ph(A, U, B, C, R)                              \
-  ((__m512h)__builtin_ia32_vfmaddph512_mask(                                   \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C),    \
-      (__mmask32)(U), (int)(R)))
-
-#define _mm512_maskz_fmsub_round_ph(U, A, B, C, R)                             \
-  ((__m512h)__builtin_ia32_vfmaddph512_maskz(                                  \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C),    \
-      (__mmask32)(U), (int)(R)))
-
-#define _mm512_fnmadd_round_ph(A, B, C, R)                                     \
-  ((__m512h)__builtin_ia32_vfmaddph512_mask(                                   \
-      (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C),    \
-      (__mmask32)-1, (int)(R)))
-
-#define _mm512_mask3_fnmadd_round_ph(A, B, C, U, R)                            \
-  ((__m512h)__builtin_ia32_vfmaddph512_mask3(                                  \
-      -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C),    \
-      (__mmask32)(U), (int)(R)))
-
-#define _mm512_maskz_fnmadd_round_ph(U, A, B, C, R)                            \
-  ((__m512h)__builtin_ia32_vfmaddph512_maskz(                                  \
-      -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C),    \
-      (__mmask32)(U), (int)(R)))
-
-#define _mm512_fnmsub_round_ph(A, B, C, R)                                     \
-  ((__m512h)__builtin_ia32_vfmaddph512_mask(                                   \
-      (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C),   \
-      (__mmask32)-1, (int)(R)))
-
-#define _mm512_maskz_fnmsub_round_ph(U, A, B, C, R)                            \
-  ((__m512h)__builtin_ia32_vfmaddph512_maskz(                                  \
-      -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C),   \
-      (__mmask32)(U), (int)(R)))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fmadd_ph(__m512h __A,
-                                                                __m512h __B,
-                                                                __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B,
-                                                  (__v32hf)__C, (__mmask32)-1,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_fmadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B,
-                                                  (__v32hf)__C, (__mmask32)__U,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
-  return (__m512h)__builtin_ia32_vfmaddph512_mask3((__v32hf)__A, (__v32hf)__B,
-                                                   (__v32hf)__C, (__mmask32)__U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddph512_maskz((__v32hf)__A, (__v32hf)__B,
-                                                   (__v32hf)__C, (__mmask32)__U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fmsub_ph(__m512h __A,
-                                                                __m512h __B,
-                                                                __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B,
-                                                  -(__v32hf)__C, (__mmask32)-1,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B,
-                                                  -(__v32hf)__C, (__mmask32)__U,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddph512_maskz(
-      (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fnmadd_ph(__m512h __A,
-                                                                 __m512h __B,
-                                                                 __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B,
-                                                  (__v32hf)__C, (__mmask32)-1,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
-  return (__m512h)__builtin_ia32_vfmaddph512_mask3(-(__v32hf)__A, (__v32hf)__B,
-                                                   (__v32hf)__C, (__mmask32)__U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddph512_maskz(-(__v32hf)__A, (__v32hf)__B,
-                                                   (__v32hf)__C, (__mmask32)__U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fnmsub_ph(__m512h __A,
-                                                                 __m512h __B,
-                                                                 __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B,
-                                                  -(__v32hf)__C, (__mmask32)-1,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddph512_maskz(
-      -(__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmaddsub_round_ph(A, B, C, R)                                   \
-  ((__m512h)__builtin_ia32_vfmaddsubph512_mask(                                \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C),     \
-      (__mmask32)-1, (int)(R)))
-
-#define _mm512_mask_fmaddsub_round_ph(A, U, B, C, R)                           \
-  ((__m512h)__builtin_ia32_vfmaddsubph512_mask(                                \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C),     \
-      (__mmask32)(U), (int)(R)))
-
-#define _mm512_mask3_fmaddsub_round_ph(A, B, C, U, R)                          \
-  ((__m512h)__builtin_ia32_vfmaddsubph512_mask3(                               \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C),     \
-      (__mmask32)(U), (int)(R)))
-
-#define _mm512_maskz_fmaddsub_round_ph(U, A, B, C, R)                          \
-  ((__m512h)__builtin_ia32_vfmaddsubph512_maskz(                               \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C),     \
-      (__mmask32)(U), (int)(R)))
-
-#define _mm512_fmsubadd_round_ph(A, B, C, R)                                   \
-  ((__m512h)__builtin_ia32_vfmaddsubph512_mask(                                \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C),    \
-      (__mmask32)-1, (int)(R)))
-
-#define _mm512_mask_fmsubadd_round_ph(A, U, B, C, R)                           \
-  ((__m512h)__builtin_ia32_vfmaddsubph512_mask(                                \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C),    \
-      (__mmask32)(U), (int)(R)))
-
-#define _mm512_maskz_fmsubadd_round_ph(U, A, B, C, R)                          \
-  ((__m512h)__builtin_ia32_vfmaddsubph512_maskz(                               \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C),    \
-      (__mmask32)(U), (int)(R)))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_fmaddsub_ph(__m512h __A, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddsubph512_mask(
-      (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_fmaddsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddsubph512_mask(
-      (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmaddsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
-  return (__m512h)__builtin_ia32_vfmaddsubph512_mask3(
-      (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmaddsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddsubph512_maskz(
-      (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_fmsubadd_ph(__m512h __A, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddsubph512_mask(
-      (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsubadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddsubph512_mask(
-      (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsubadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddsubph512_maskz(
-      (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsub_round_ph(A, B, C, U, R)                             \
-  ((__m512h)__builtin_ia32_vfmsubph512_mask3(                                  \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C),     \
-      (__mmask32)(U), (int)(R)))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
-  return (__m512h)__builtin_ia32_vfmsubph512_mask3((__v32hf)__A, (__v32hf)__B,
-                                                   (__v32hf)__C, (__mmask32)__U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsubadd_round_ph(A, B, C, U, R)                          \
-  ((__m512h)__builtin_ia32_vfmsubaddph512_mask3(                               \
-      (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C),     \
-      (__mmask32)(U), (int)(R)))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsubadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
-  return (__m512h)__builtin_ia32_vfmsubaddph512_mask3(
-      (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmadd_round_ph(A, U, B, C, R)                             \
-  ((__m512h)__builtin_ia32_vfmaddph512_mask(                                   \
-      (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C),    \
-      (__mmask32)(U), (int)(R)))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B,
-                                                  (__v32hf)__C, (__mmask32)__U,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmsub_round_ph(A, U, B, C, R)                             \
-  ((__m512h)__builtin_ia32_vfmaddph512_mask(                                   \
-      (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C),   \
-      (__mmask32)(U), (int)(R)))
-
-#define _mm512_mask3_fnmsub_round_ph(A, B, C, U, R)                            \
-  ((__m512h)__builtin_ia32_vfmsubph512_mask3(                                  \
-      -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C),    \
-      (__mmask32)(U), (int)(R)))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B,
-                                                  -(__v32hf)__C, (__mmask32)__U,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
-  return (__m512h)__builtin_ia32_vfmsubph512_mask3(-(__v32hf)__A, (__v32hf)__B,
-                                                   (__v32hf)__C, (__mmask32)__U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_sh(__m128h __W,
-                                                             __m128h __A,
-                                                             __m128h __B) {
-  return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A, (__v8hf)__B,
-                                       (__mmask8)-1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_sh(__m128h __W,
-                                                                  __mmask8 __U,
-                                                                  __m128h __A,
-                                                                  __m128h __B) {
-  return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A, (__v8hf)__B,
-                                       (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmadd_round_sh(A, B, C, R)                                         \
-  ((__m128h)__builtin_ia32_vfmaddsh3_mask(                                     \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C),        \
-      (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_fmadd_round_sh(W, U, A, B, R)                                 \
-  ((__m128h)__builtin_ia32_vfmaddsh3_mask(                                     \
-      (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B),        \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return __builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, (__v8hf)__B, (__v8hf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmadd_round_sh(U, A, B, C, R)                                \
-  ((__m128h)__builtin_ia32_vfmaddsh3_maskz(                                    \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C),        \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
-  return __builtin_ia32_vfmaddsh3_mask3((__v8hf)__W, (__v8hf)__X, (__v8hf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmadd_round_sh(W, X, Y, U, R)                                \
-  ((__m128h)__builtin_ia32_vfmaddsh3_mask3(                                    \
-      (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y),        \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsub_sh(__m128h __W,
-                                                             __m128h __A,
-                                                             __m128h __B) {
-  return (__m128h)__builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A,
-                                                -(__v8hf)__B, (__mmask8)-1,
-                                                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_sh(__m128h __W,
-                                                                  __mmask8 __U,
-                                                                  __m128h __A,
-                                                                  __m128h __B) {
-  return (__m128h)__builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A,
-                                                -(__v8hf)__B, (__mmask8)__U,
-                                                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmsub_round_sh(A, B, C, R)                                         \
-  ((__m128h)__builtin_ia32_vfmaddsh3_mask(                                     \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C),       \
-      (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_fmsub_round_sh(W, U, A, B, R)                                 \
-  ((__m128h)__builtin_ia32_vfmaddsh3_mask(                                     \
-      (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B),       \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsub_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, (__v8hf)__B,
-                                                 -(__v8hf)__C, (__mmask8)__U,
-                                                 _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmsub_round_sh(U, A, B, C, R)                                \
-  ((__m128h)__builtin_ia32_vfmaddsh3_maskz(                                    \
-      (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C),       \
-      (__mmask8)(U), (int)R))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsub_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
-  return __builtin_ia32_vfmsubsh3_mask3((__v8hf)__W, (__v8hf)__X, (__v8hf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmsub_round_sh(W, X, Y, U, R)                                \
-  ((__m128h)__builtin_ia32_vfmsubsh3_mask3(                                    \
-      (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y),        \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmadd_sh(__m128h __W,
-                                                              __m128h __A,
-                                                              __m128h __B) {
-  return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, (__v8hf)__B,
-                                       (__mmask8)-1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_fnmadd_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
-  return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, (__v8hf)__B,
-                                       (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmadd_round_sh(A, B, C, R)                                        \
-  ((__m128h)__builtin_ia32_vfmaddsh3_mask(                                     \
-      (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C),       \
-      (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_fnmadd_round_sh(W, U, A, B, R)                                \
-  ((__m128h)__builtin_ia32_vfmaddsh3_mask(                                     \
-      (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B),       \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmadd_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return __builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmadd_round_sh(U, A, B, C, R)                               \
-  ((__m128h)__builtin_ia32_vfmaddsh3_maskz(                                    \
-      (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C),       \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmadd_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
-  return __builtin_ia32_vfmaddsh3_mask3((__v8hf)__W, -(__v8hf)__X, (__v8hf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmadd_round_sh(W, X, Y, U, R)                               \
-  ((__m128h)__builtin_ia32_vfmaddsh3_mask3(                                    \
-      (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y),       \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmsub_sh(__m128h __W,
-                                                              __m128h __A,
-                                                              __m128h __B) {
-  return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, -(__v8hf)__B,
-                                       (__mmask8)-1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_fnmsub_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
-  return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, -(__v8hf)__B,
-                                       (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmsub_round_sh(A, B, C, R)                                        \
-  ((__m128h)__builtin_ia32_vfmaddsh3_mask(                                     \
-      (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C),      \
-      (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_fnmsub_round_sh(W, U, A, B, R)                                \
-  ((__m128h)__builtin_ia32_vfmaddsh3_mask(                                     \
-      (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B),      \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmsub_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return __builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmsub_round_sh(U, A, B, C, R)                               \
-  ((__m128h)__builtin_ia32_vfmaddsh3_maskz(                                    \
-      (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C),      \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmsub_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
-  return __builtin_ia32_vfmsubsh3_mask3((__v8hf)__W, -(__v8hf)__X, (__v8hf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmsub_round_sh(W, X, Y, U, R)                               \
-  ((__m128h)__builtin_ia32_vfmsubsh3_mask3(                                    \
-      (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y),       \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fcmadd_sch(__m128h __A,
-                                                               __m128h __B,
-                                                               __m128h __C) {
-  return (__m128h)__builtin_ia32_vfcmaddcsh_mask((__v4sf)__A, (__v4sf)__B,
-                                                 (__v4sf)__C, (__mmask8)-1,
-                                                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_fcmadd_sch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_vfcmaddcsh_round_mask(
-      (__v4sf)__A, (__v4sf)(__B), (__v4sf)(__C), __U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fcmadd_sch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_vfcmaddcsh_maskz((__v4sf)__A, (__v4sf)__B,
-                                                  (__v4sf)__C, (__mmask8)__U,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask3_fcmadd_sch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
-  return (__m128h)__builtin_ia32_vfcmaddcsh_round_mask3(
-      (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, __U, _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fcmadd_round_sch(A, B, C, R)                                       \
-  ((__m128h)__builtin_ia32_vfcmaddcsh_mask(                                    \
-      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C),        \
-      (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_fcmadd_round_sch(A, U, B, C, R)                               \
-  ((__m128h)__builtin_ia32_vfcmaddcsh_round_mask(                              \
-      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C),        \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_fcmadd_round_sch(U, A, B, C, R)                              \
-  ((__m128h)__builtin_ia32_vfcmaddcsh_maskz(                                   \
-      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C),        \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm_mask3_fcmadd_round_sch(A, B, C, U, R)                              \
-  ((__m128h)__builtin_ia32_vfcmaddcsh_round_mask3(                             \
-      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C),        \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_sch(__m128h __A,
-                                                              __m128h __B,
-                                                              __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddcsh_mask((__v4sf)__A, (__v4sf)__B,
-                                                (__v4sf)__C, (__mmask8)-1,
-                                                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_fmadd_sch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddcsh_round_mask(
-      (__v4sf)__A, (__v4sf)(__B), (__v4sf)(__C), __U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_sch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddcsh_maskz((__v4sf)__A, (__v4sf)__B,
-                                                 (__v4sf)__C, (__mmask8)__U,
-                                                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_sch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
-  return (__m128h)__builtin_ia32_vfmaddcsh_round_mask3(
-      (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, __U, _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmadd_round_sch(A, B, C, R)                                        \
-  ((__m128h)__builtin_ia32_vfmaddcsh_mask(                                     \
-      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C),        \
-      (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_fmadd_round_sch(A, U, B, C, R)                                \
-  ((__m128h)__builtin_ia32_vfmaddcsh_round_mask(                               \
-      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C),        \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_fmadd_round_sch(U, A, B, C, R)                               \
-  ((__m128h)__builtin_ia32_vfmaddcsh_maskz(                                    \
-      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C),        \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm_mask3_fmadd_round_sch(A, B, C, U, R)                               \
-  ((__m128h)__builtin_ia32_vfmaddcsh_round_mask3(                              \
-      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C),        \
-      (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fcmul_sch(__m128h __A,
-                                                              __m128h __B) {
-  return (__m128h)__builtin_ia32_vfcmulcsh_mask(
-      (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_fcmul_sch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
-  return (__m128h)__builtin_ia32_vfcmulcsh_mask((__v4sf)__A, (__v4sf)__B,
-                                                (__v4sf)__W, (__mmask8)__U,
-                                                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fcmul_sch(__mmask8 __U, __m128h __A, __m128h __B) {
-  return (__m128h)__builtin_ia32_vfcmulcsh_mask(
-      (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fcmul_round_sch(A, B, R)                                           \
-  ((__m128h)__builtin_ia32_vfcmulcsh_mask(                                     \
-      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B),                              \
-      (__v4sf)(__m128h)_mm_undefined_ph(), (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_fcmul_round_sch(W, U, A, B, R)                                \
-  ((__m128h)__builtin_ia32_vfcmulcsh_mask(                                     \
-      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(W),        \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_fcmul_round_sch(U, A, B, R)                                  \
-  ((__m128h)__builtin_ia32_vfcmulcsh_mask(                                     \
-      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B),                              \
-      (__v4sf)(__m128h)_mm_setzero_ph(), (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmul_sch(__m128h __A,
-                                                             __m128h __B) {
-  return (__m128h)__builtin_ia32_vfmulcsh_mask(
-      (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmul_sch(__m128h __W,
-                                                                  __mmask8 __U,
-                                                                  __m128h __A,
-                                                                  __m128h __B) {
-  return (__m128h)__builtin_ia32_vfmulcsh_mask((__v4sf)__A, (__v4sf)__B,
-                                               (__v4sf)__W, (__mmask8)__U,
-                                               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fmul_sch(__mmask8 __U, __m128h __A, __m128h __B) {
-  return (__m128h)__builtin_ia32_vfmulcsh_mask(
-      (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (__mmask8)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmul_round_sch(A, B, R)                                            \
-  ((__m128h)__builtin_ia32_vfmulcsh_mask(                                      \
-      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B),                              \
-      (__v4sf)(__m128h)_mm_undefined_ph(), (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_fmul_round_sch(W, U, A, B, R)                                 \
-  ((__m128h)__builtin_ia32_vfmulcsh_mask(                                      \
-      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(W),        \
-      (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_fmul_round_sch(U, A, B, R)                                   \
-  ((__m128h)__builtin_ia32_vfmulcsh_mask(                                      \
-      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B),                              \
-      (__v4sf)(__m128h)_mm_setzero_ph(), (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fcmul_pch(__m512h __A,
-                                                                 __m512h __B) {
-  return (__m512h)__builtin_ia32_vfcmulcph512_mask(
-      (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ph(), (__mmask16)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_fcmul_pch(__m512h __W, __mmask16 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_vfcmulcph512_mask((__v16sf)__A, (__v16sf)__B,
-                                                   (__v16sf)__W, (__mmask16)__U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_fcmul_pch(__mmask16 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_vfcmulcph512_mask(
-      (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ph(), (__mmask16)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fcmul_round_pch(A, B, R)                                        \
-  ((__m512h)__builtin_ia32_vfcmulcph512_mask(                                  \
-      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B),                            \
-      (__v16sf)(__m512h)_mm512_undefined_ph(), (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_fcmul_round_pch(W, U, A, B, R)                             \
-  ((__m512h)__builtin_ia32_vfcmulcph512_mask(                                  \
-      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(W),     \
-      (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_fcmul_round_pch(U, A, B, R)                               \
-  ((__m512h)__builtin_ia32_vfcmulcph512_mask(                                  \
-      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B),                            \
-      (__v16sf)(__m512h)_mm512_setzero_ph(), (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fmul_pch(__m512h __A,
-                                                                __m512h __B) {
-  return (__m512h)__builtin_ia32_vfmulcph512_mask(
-      (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ph(), (__mmask16)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_fmul_pch(__m512h __W, __mmask16 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_vfmulcph512_mask((__v16sf)__A, (__v16sf)__B,
-                                                  (__v16sf)__W, (__mmask16)__U,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmul_pch(__mmask16 __U, __m512h __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_vfmulcph512_mask(
-      (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ph(), (__mmask16)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmul_round_pch(A, B, R)                                         \
-  ((__m512h)__builtin_ia32_vfmulcph512_mask(                                   \
-      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B),                            \
-      (__v16sf)(__m512h)_mm512_undefined_ph(), (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_fmul_round_pch(W, U, A, B, R)                              \
-  ((__m512h)__builtin_ia32_vfmulcph512_mask(                                   \
-      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(W),     \
-      (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_fmul_round_pch(U, A, B, R)                                \
-  ((__m512h)__builtin_ia32_vfmulcph512_mask(                                   \
-      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B),                            \
-      (__v16sf)(__m512h)_mm512_setzero_ph(), (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fcmadd_pch(__m512h __A,
-                                                                  __m512h __B,
-                                                                  __m512h __C) {
-  return (__m512h)__builtin_ia32_vfcmaddcph512_mask3(
-      (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)-1,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_fcmadd_pch(__m512h __A, __mmask16 __U, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfcmaddcph512_mask(
-      (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask3_fcmadd_pch(__m512h __A, __m512h __B, __m512h __C, __mmask16 __U) {
-  return (__m512h)__builtin_ia32_vfcmaddcph512_mask3(
-      (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_fcmadd_pch(__mmask16 __U, __m512h __A, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfcmaddcph512_maskz(
-      (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fcmadd_round_pch(A, B, C, R)                                    \
-  ((__m512h)__builtin_ia32_vfcmaddcph512_mask3(                                \
-      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C),     \
-      (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_fcmadd_round_pch(A, U, B, C, R)                            \
-  ((__m512h)__builtin_ia32_vfcmaddcph512_mask(                                 \
-      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C),     \
-      (__mmask16)(U), (int)(R)))
-
-#define _mm512_mask3_fcmadd_round_pch(A, B, C, U, R)                           \
-  ((__m512h)__builtin_ia32_vfcmaddcph512_mask3(                                \
-      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C),     \
-      (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_fcmadd_round_pch(U, A, B, C, R)                           \
-  ((__m512h)__builtin_ia32_vfcmaddcph512_maskz(                                \
-      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C),     \
-      (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fmadd_pch(__m512h __A,
-                                                                 __m512h __B,
-                                                                 __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddcph512_mask3((__v16sf)__A, (__v16sf)__B,
-                                                    (__v16sf)__C, (__mmask16)-1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_fmadd_pch(__m512h __A, __mmask16 __U, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddcph512_mask((__v16sf)__A, (__v16sf)__B,
-                                                   (__v16sf)__C, (__mmask16)__U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmadd_pch(__m512h __A, __m512h __B, __m512h __C, __mmask16 __U) {
-  return (__m512h)__builtin_ia32_vfmaddcph512_mask3(
-      (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmadd_pch(__mmask16 __U, __m512h __A, __m512h __B, __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddcph512_maskz(
-      (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U,
-      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmadd_round_pch(A, B, C, R)                                     \
-  ((__m512h)__builtin_ia32_vfmaddcph512_mask3(                                 \
-      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C),     \
-      (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_fmadd_round_pch(A, U, B, C, R)                             \
-  ((__m512h)__builtin_ia32_vfmaddcph512_mask(                                  \
-      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C),     \
-      (__mmask16)(U), (int)(R)))
-
-#define _mm512_mask3_fmadd_round_pch(A, B, C, U, R)                            \
-  ((__m512h)__builtin_ia32_vfmaddcph512_mask3(                                 \
-      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C),     \
-      (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_fmadd_round_pch(U, A, B, C, R)                            \
-  ((__m512h)__builtin_ia32_vfmaddcph512_maskz(                                 \
-      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C),     \
-      (__mmask16)(U), (int)(R)))
-
-static __inline__ _Float16 __DEFAULT_FN_ATTRS512
-_mm512_reduce_add_ph(__m512h __W) {
-  return __builtin_ia32_reduce_fadd_ph512(-0.0f16, __W);
-}
-
-static __inline__ _Float16 __DEFAULT_FN_ATTRS512
-_mm512_reduce_mul_ph(__m512h __W) {
-  return __builtin_ia32_reduce_fmul_ph512(1.0f16, __W);
-}
-
-static __inline__ _Float16 __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_ph(__m512h __V) {
-  return __builtin_ia32_reduce_fmax_ph512(__V);
-}
-
-static __inline__ _Float16 __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_ph(__m512h __V) {
-  return __builtin_ia32_reduce_fmin_ph512(__V);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_ph(__mmask32 __U, __m512h __A, __m512h __W) {
-  return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U, (__v32hf)__W,
-                                              (__v32hf)__A);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_ph(__m512h __A, __m512i __I, __m512h __B) {
-  return (__m512h)__builtin_ia32_vpermi2varhi512((__v32hi)__A, (__v32hi)__I,
-                                                 (__v32hi)__B);
-}
-
-static __inline__ __m512h __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_ph(__m512i __A, __m512h __B) {
-  return (__m512h)__builtin_ia32_permvarhi512((__v32hi)__B, (__v32hi)__A);
-}
-
-// intrinsics below are alias for f*mul_*ch
-#define _mm512_mul_pch(A, B) _mm512_fmul_pch(A, B)
-#define _mm512_mask_mul_pch(W, U, A, B) _mm512_mask_fmul_pch(W, U, A, B)
-#define _mm512_maskz_mul_pch(U, A, B) _mm512_maskz_fmul_pch(U, A, B)
-#define _mm512_mul_round_pch(A, B, R) _mm512_fmul_round_pch(A, B, R)
-#define _mm512_mask_mul_round_pch(W, U, A, B, R)                               \
-  _mm512_mask_fmul_round_pch(W, U, A, B, R)
-#define _mm512_maskz_mul_round_pch(U, A, B, R)                                 \
-  _mm512_maskz_fmul_round_pch(U, A, B, R)
-
-#define _mm512_cmul_pch(A, B) _mm512_fcmul_pch(A, B)
-#define _mm512_mask_cmul_pch(W, U, A, B) _mm512_mask_fcmul_pch(W, U, A, B)
-#define _mm512_maskz_cmul_pch(U, A, B) _mm512_maskz_fcmul_pch(U, A, B)
-#define _mm512_cmul_round_pch(A, B, R) _mm512_fcmul_round_pch(A, B, R)
-#define _mm512_mask_cmul_round_pch(W, U, A, B, R)                              \
-  _mm512_mask_fcmul_round_pch(W, U, A, B, R)
-#define _mm512_maskz_cmul_round_pch(U, A, B, R)                                \
-  _mm512_maskz_fcmul_round_pch(U, A, B, R)
-
-#define _mm_mul_sch(A, B) _mm_fmul_sch(A, B)
-#define _mm_mask_mul_sch(W, U, A, B) _mm_mask_fmul_sch(W, U, A, B)
-#define _mm_maskz_mul_sch(U, A, B) _mm_maskz_fmul_sch(U, A, B)
-#define _mm_mul_round_sch(A, B, R) _mm_fmul_round_sch(A, B, R)
-#define _mm_mask_mul_round_sch(W, U, A, B, R)                                  \
-  _mm_mask_fmul_round_sch(W, U, A, B, R)
-#define _mm_maskz_mul_round_sch(U, A, B, R) _mm_maskz_fmul_round_sch(U, A, B, R)
-
-#define _mm_cmul_sch(A, B) _mm_fcmul_sch(A, B)
-#define _mm_mask_cmul_sch(W, U, A, B) _mm_mask_fcmul_sch(W, U, A, B)
-#define _mm_maskz_cmul_sch(U, A, B) _mm_maskz_fcmul_sch(U, A, B)
-#define _mm_cmul_round_sch(A, B, R) _mm_fcmul_round_sch(A, B, R)
-#define _mm_mask_cmul_round_sch(W, U, A, B, R)                                 \
-  _mm_mask_fcmul_round_sch(W, U, A, B, R)
-#define _mm_maskz_cmul_round_sch(U, A, B, R)                                   \
-  _mm_maskz_fcmul_round_sch(U, A, B, R)
-
-#undef __DEFAULT_FN_ATTRS128
-#undef __DEFAULT_FN_ATTRS256
-#undef __DEFAULT_FN_ATTRS512
-
-#endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlbf16intrin.h b/linux-x86/lib64/clang/14.0.2/include/avx512vlbf16intrin.h
deleted file mode 100644
index 6a5a860..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/avx512vlbf16intrin.h
+++ /dev/null
@@ -1,522 +0,0 @@
-/*===--------- avx512vlbf16intrin.h - AVX512_BF16 intrinsics ---------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512vlbf16intrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512VLBF16INTRIN_H
-#define __AVX512VLBF16INTRIN_H
-
-typedef short __m128bh __attribute__((__vector_size__(16), __aligned__(16)));
-
-#define __DEFAULT_FN_ATTRS128 \
-  __attribute__((__always_inline__, __nodebug__, \
-                 __target__("avx512vl, avx512bf16"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 \
-  __attribute__((__always_inline__, __nodebug__, \
-                 __target__("avx512vl, avx512bf16"), __min_vector_width__(256)))
-
-/// Convert Two Packed Single Data to One Packed BF16 Data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
-///
-/// \param __A
-///    A 128-bit vector of [4 x float].
-/// \param __B
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
-///    conversion of __B, and higher 64 bits come from conversion of __A.
-static __inline__ __m128bh __DEFAULT_FN_ATTRS128
-_mm_cvtne2ps_pbh(__m128 __A, __m128 __B) {
-  return (__m128bh)__builtin_ia32_cvtne2ps2bf16_128((__v4sf) __A,
-                                                    (__v4sf) __B);
-}
-
-/// Convert Two Packed Single Data to One Packed BF16 Data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
-///
-/// \param __A
-///    A 128-bit vector of [4 x float].
-/// \param __B
-///    A 128-bit vector of [4 x float].
-/// \param __W
-///    A 128-bit vector of [8 x bfloat].
-/// \param __U
-///    A 8-bit mask value specifying what is chosen for each element.
-///    A 1 means conversion of __A or __B. A 0 means element from __W.
-/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
-///    conversion of __B, and higher 64 bits come from conversion of __A.
-static __inline__ __m128bh __DEFAULT_FN_ATTRS128
-_mm_mask_cvtne2ps_pbh(__m128bh __W, __mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128bh)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_cvtne2ps_pbh(__A, __B),
-                                             (__v8hi)__W);
-}
-
-/// Convert Two Packed Single Data to One Packed BF16 Data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
-///
-/// \param __A
-///    A 128-bit vector of [4 x float].
-/// \param __B
-///    A 128-bit vector of [4 x float].
-/// \param __U
-///    A 8-bit mask value specifying what is chosen for each element.
-///    A 1 means conversion of __A or __B. A 0 means element is zero.
-/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
-///    conversion of __B, and higher 64 bits come from conversion of __A.
-static __inline__ __m128bh __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtne2ps_pbh(__mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128bh)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_cvtne2ps_pbh(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-/// Convert Two Packed Single Data to One Packed BF16 Data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
-///
-/// \param __A
-///    A 256-bit vector of [8 x float].
-/// \param __B
-///    A 256-bit vector of [8 x float].
-/// \returns A 256-bit vector of [16 x bfloat] whose lower 128 bits come from
-///    conversion of __B, and higher 128 bits come from conversion of __A.
-static __inline__ __m256bh __DEFAULT_FN_ATTRS256
-_mm256_cvtne2ps_pbh(__m256 __A, __m256 __B) {
-  return (__m256bh)__builtin_ia32_cvtne2ps2bf16_256((__v8sf) __A,
-                                                    (__v8sf) __B);
-}
-
-/// Convert Two Packed Single Data to One Packed BF16 Data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
-///
-/// \param __A
-///    A 256-bit vector of [8 x float].
-/// \param __B
-///    A 256-bit vector of [8 x float].
-/// \param __W
-///    A 256-bit vector of [16 x bfloat].
-/// \param __U
-///    A 16-bit mask value specifying what is chosen for each element.
-///    A 1 means conversion of __A or __B. A 0 means element from __W.
-/// \returns A 256-bit vector of [16 x bfloat] whose lower 128 bits come from
-///    conversion of __B, and higher 128 bits come from conversion of __A.
-static __inline__ __m256bh __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtne2ps_pbh(__m256bh __W, __mmask16 __U, __m256 __A, __m256 __B) {
-  return (__m256bh)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_cvtne2ps_pbh(__A, __B),
-                                         (__v16hi)__W);
-}
-
-/// Convert Two Packed Single Data to One Packed BF16 Data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
-///
-/// \param __A
-///    A 256-bit vector of [8 x float].
-/// \param __B
-///    A 256-bit vector of [8 x float].
-/// \param __U
-///    A 16-bit mask value specifying what is chosen for each element.
-///    A 1 means conversion of __A or __B. A 0 means element is zero.
-/// \returns A 256-bit vector of [16 x bfloat] whose lower 128 bits come from
-///    conversion of __B, and higher 128 bits come from conversion of __A.
-static __inline__ __m256bh __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtne2ps_pbh(__mmask16 __U, __m256 __A, __m256 __B) {
-  return (__m256bh)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_cvtne2ps_pbh(__A, __B),
-                                         (__v16hi)_mm256_setzero_si256());
-}
-
-/// Convert Packed Single Data to Packed BF16 Data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
-///
-/// \param __A
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
-///    conversion of __A, and higher 64 bits are 0.
-static __inline__ __m128bh __DEFAULT_FN_ATTRS128
-_mm_cvtneps_pbh(__m128 __A) {
-  return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A,
-                                                  (__v8hi)_mm_undefined_si128(),
-                                                  (__mmask8)-1);
-}
-
-/// Convert Packed Single Data to Packed BF16 Data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
-///
-/// \param __A
-///    A 128-bit vector of [4 x float].
-/// \param __W
-///    A 128-bit vector of [8 x bfloat].
-/// \param __U
-///    A 4-bit mask value specifying what is chosen for each element.
-///    A 1 means conversion of __A. A 0 means element from __W.
-/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
-///    conversion of __A, and higher 64 bits are 0.
-static __inline__ __m128bh __DEFAULT_FN_ATTRS128
-_mm_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m128 __A) {
-  return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A,
-                                                        (__v8hi)__W,
-                                                        (__mmask8)__U);
-}
-
-/// Convert Packed Single Data to Packed BF16 Data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
-///
-/// \param __A
-///    A 128-bit vector of [4 x float].
-/// \param __U
-///    A 4-bit mask value specifying what is chosen for each element.
-///    A 1 means conversion of __A. A 0 means element is zero.
-/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
-///    conversion of __A, and higher 64 bits are 0.
-static __inline__ __m128bh __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtneps_pbh(__mmask8 __U, __m128 __A) {
-  return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A,
-                                                    (__v8hi)_mm_setzero_si128(),
-                                                    (__mmask8)__U);
-}
-
-/// Convert Packed Single Data to Packed BF16 Data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
-///
-/// \param __A
-///    A 256-bit vector of [8 x float].
-/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A.
-static __inline__ __m128bh __DEFAULT_FN_ATTRS256
-_mm256_cvtneps_pbh(__m256 __A) {
-  return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A,
-                                                  (__v8hi)_mm_undefined_si128(),
-                                                  (__mmask8)-1);
-}
-
-/// Convert Packed Single Data to Packed BF16 Data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
-///
-/// \param __A
-///    A 256-bit vector of [8 x float].
-/// \param __W
-///    A 256-bit vector of [8 x bfloat].
-/// \param __U
-///    A 8-bit mask value specifying what is chosen for each element.
-///    A 1 means conversion of __A. A 0 means element from __W.
-/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A.
-static __inline__ __m128bh __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m256 __A) {
-  return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A,
-                                                        (__v8hi)__W,
-                                                        (__mmask8)__U);
-}
-
-/// Convert Packed Single Data to Packed BF16 Data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
-///
-/// \param __A
-///    A 256-bit vector of [8 x float].
-/// \param __U
-///    A 8-bit mask value specifying what is chosen for each element.
-///    A 1 means conversion of __A. A 0 means element is zero.
-/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A.
-static __inline__ __m128bh __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtneps_pbh(__mmask8 __U, __m256 __A) {
-  return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A,
-                                                    (__v8hi)_mm_setzero_si128(),
-                                                    (__mmask8)__U);
-}
-
-/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
-///
-/// \param __A
-///    A 128-bit vector of [8 x bfloat].
-/// \param __B
-///    A 128-bit vector of [8 x bfloat].
-/// \param __D
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] comes from  Dot Product of
-///  __A, __B and __D
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_dpbf16_ps(__m128 __D, __m128bh __A, __m128bh __B) {
-  return (__m128)__builtin_ia32_dpbf16ps_128((__v4sf)__D,
-                                             (__v4si)__A,
-                                             (__v4si)__B);
-}
-
-/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
-///
-/// \param __A
-///    A 128-bit vector of [8 x bfloat].
-/// \param __B
-///    A 128-bit vector of [8 x bfloat].
-/// \param __D
-///    A 128-bit vector of [4 x float].
-/// \param __U
-///    A 8-bit mask value specifying what is chosen for each element.
-///    A 1 means __A and __B's dot product accumulated with __D. A 0 means __D.
-/// \returns A 128-bit vector of [4 x float] comes from  Dot Product of
-///  __A, __B and __D
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_dpbf16_ps(__m128 __D, __mmask8 __U, __m128bh __A, __m128bh __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                           (__v4sf)_mm_dpbf16_ps(__D, __A, __B),
-                                           (__v4sf)__D);
-}
-
-/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
-///
-/// \param __A
-///    A 128-bit vector of [8 x bfloat].
-/// \param __B
-///    A 128-bit vector of [8 x bfloat].
-/// \param __D
-///    A 128-bit vector of [4 x float].
-/// \param __U
-///    A 8-bit mask value specifying what is chosen for each element.
-///    A 1 means __A and __B's dot product accumulated with __D. A 0 means 0.
-/// \returns A 128-bit vector of [4 x float] comes from  Dot Product of
-///  __A, __B and __D
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_dpbf16_ps(__mmask8 __U, __m128 __D, __m128bh __A, __m128bh __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                           (__v4sf)_mm_dpbf16_ps(__D, __A, __B),
-                                           (__v4sf)_mm_setzero_si128());
-}
-
-/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
-///
-/// \param __A
-///    A 256-bit vector of [16 x bfloat].
-/// \param __B
-///    A 256-bit vector of [16 x bfloat].
-/// \param __D
-///    A 256-bit vector of [8 x float].
-/// \returns A 256-bit vector of [8 x float] comes from  Dot Product of
-///  __A, __B and __D
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_dpbf16_ps(__m256 __D, __m256bh __A, __m256bh __B) {
-  return (__m256)__builtin_ia32_dpbf16ps_256((__v8sf)__D,
-                                             (__v8si)__A,
-                                             (__v8si)__B);
-}
-
-/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
-///
-/// \param __A
-///    A 256-bit vector of [16 x bfloat].
-/// \param __B
-///    A 256-bit vector of [16 x bfloat].
-/// \param __D
-///    A 256-bit vector of [8 x float].
-/// \param __U
-///    A 16-bit mask value specifying what is chosen for each element.
-///    A 1 means __A and __B's dot product accumulated with __D. A 0 means __D.
-/// \returns A 256-bit vector of [8 x float] comes from  Dot Product of
-///  __A, __B and __D
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_dpbf16_ps(__m256 __D, __mmask8 __U, __m256bh __A, __m256bh __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                        (__v8sf)_mm256_dpbf16_ps(__D, __A, __B),
-                                        (__v8sf)__D);
-}
-
-/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
-///
-/// \param __A
-///    A 256-bit vector of [16 x bfloat].
-/// \param __B
-///    A 256-bit vector of [16 x bfloat].
-/// \param __D
-///    A 256-bit vector of [8 x float].
-/// \param __U
-///    A 8-bit mask value specifying what is chosen for each element.
-///    A 1 means __A and __B's dot product accumulated with __D. A 0 means 0.
-/// \returns A 256-bit vector of [8 x float] comes from  Dot Product of
-///  __A, __B and __D
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_dpbf16_ps(__mmask8 __U, __m256 __D, __m256bh __A, __m256bh __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                        (__v8sf)_mm256_dpbf16_ps(__D, __A, __B),
-                                        (__v8sf)_mm256_setzero_si256());
-}
-
-/// Convert One Single float Data to One BF16 Data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
-///
-/// \param __A
-///    A float data.
-/// \returns A bf16 data whose sign field and exponent field keep unchanged,
-///    and fraction field is truncated to 7 bits.
-static __inline__ __bfloat16 __DEFAULT_FN_ATTRS128 _mm_cvtness_sbh(float __A) {
-  __v4sf __V = {__A, 0, 0, 0};
-  __v8hi __R = __builtin_ia32_cvtneps2bf16_128_mask(
-      (__v4sf)__V, (__v8hi)_mm_undefined_si128(), (__mmask8)-1);
-  return __R[0];
-}
-
-/// Convert Packed BF16 Data to Packed float Data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \param __A
-///    A 128-bit vector of [4 x bfloat].
-/// \returns A 128-bit vector of [4 x float] come from conversion of __A
-static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtpbh_ps(__m128bh __A) {
-  return _mm_castsi128_ps(
-      (__m128i)_mm_slli_epi32((__m128i)_mm_cvtepi16_epi32((__m128i)__A), 16));
-}
-
-/// Convert Packed BF16 Data to Packed float Data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \param __A
-///    A 128-bit vector of [8 x bfloat].
-/// \returns A 256-bit vector of [8 x float] come from conversion of __A
-static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtpbh_ps(__m128bh __A) {
-  return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32(
-      (__m256i)_mm256_cvtepi16_epi32((__m128i)__A), 16));
-}
-
-/// Convert Packed BF16 Data to Packed float Data using zeroing mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \param __U
-///    A 4-bit mask. Elements are zeroed out when the corresponding mask
-///    bit is not set.
-/// \param __A
-///    A 128-bit vector of [4 x bfloat].
-/// \returns A 128-bit vector of [4 x float] come from conversion of __A
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) {
-  return _mm_castsi128_ps((__m128i)_mm_slli_epi32(
-      (__m128i)_mm_maskz_cvtepi16_epi32((__mmask8)__U, (__m128i)__A), 16));
-}
-
-/// Convert Packed BF16 Data to Packed float Data using zeroing mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \param __U
-///    A 8-bit mask. Elements are zeroed out when the corresponding mask
-///    bit is not set.
-/// \param __A
-///    A 128-bit vector of [8 x bfloat].
-/// \returns A 256-bit vector of [8 x float] come from conversion of __A
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) {
-  return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32(
-      (__m256i)_mm256_maskz_cvtepi16_epi32((__mmask8)__U, (__m128i)__A), 16));
-}
-
-/// Convert Packed BF16 Data to Packed float Data using merging mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \param __S
-///    A 128-bit vector of [4 x float]. Elements are copied from __S when
-///     the corresponding mask bit is not set.
-/// \param __U
-///    A 4-bit mask. Elements are zeroed out when the corresponding mask
-///    bit is not set.
-/// \param __A
-///    A 128-bit vector of [4 x bfloat].
-/// \returns A 128-bit vector of [4 x float] come from conversion of __A
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_cvtpbh_ps(__m128 __S, __mmask8 __U, __m128bh __A) {
-  return _mm_castsi128_ps((__m128i)_mm_mask_slli_epi32(
-      (__m128i)__S, (__mmask8)__U, (__m128i)_mm_cvtepi16_epi32((__m128i)__A),
-      16));
-}
-
-/// Convert Packed BF16 Data to Packed float Data using merging mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \param __S
-///    A 256-bit vector of [8 x float]. Elements are copied from __S when
-///     the corresponding mask bit is not set.
-/// \param __U
-///    A 8-bit mask. Elements are zeroed out when the corresponding mask
-///    bit is not set.
-/// \param __A
-///    A 128-bit vector of [8 x bfloat].
-/// \returns A 256-bit vector of [8 x float] come from conversion of __A
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtpbh_ps(__m256 __S, __mmask8 __U, __m128bh __A) {
-  return _mm256_castsi256_ps((__m256i)_mm256_mask_slli_epi32(
-      (__m256i)__S, (__mmask8)__U, (__m256i)_mm256_cvtepi16_epi32((__m128i)__A),
-      16));
-}
-
-#undef __DEFAULT_FN_ATTRS128
-#undef __DEFAULT_FN_ATTRS256
-
-#endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlbwintrin.h b/linux-x86/lib64/clang/14.0.2/include/avx512vlbwintrin.h
deleted file mode 100644
index 7873516..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/avx512vlbwintrin.h
+++ /dev/null
@@ -1,2809 +0,0 @@
-/*===---- avx512vlbwintrin.h - AVX512VL and AVX512BW intrinsics ------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512vlbwintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512VLBWINTRIN_H
-#define __AVX512VLBWINTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw"), __min_vector_width__(256)))
-
-/* Integer compare */
-
-#define _mm_cmp_epi8_mask(a, b, p) \
-  ((__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
-                                          (__v16qi)(__m128i)(b), (int)(p), \
-                                          (__mmask16)-1))
-
-#define _mm_mask_cmp_epi8_mask(m, a, b, p) \
-  ((__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
-                                          (__v16qi)(__m128i)(b), (int)(p), \
-                                          (__mmask16)(m)))
-
-#define _mm_cmp_epu8_mask(a, b, p) \
-  ((__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
-                                           (__v16qi)(__m128i)(b), (int)(p), \
-                                           (__mmask16)-1))
-
-#define _mm_mask_cmp_epu8_mask(m, a, b, p) \
-  ((__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
-                                           (__v16qi)(__m128i)(b), (int)(p), \
-                                           (__mmask16)(m)))
-
-#define _mm256_cmp_epi8_mask(a, b, p) \
-  ((__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
-                                          (__v32qi)(__m256i)(b), (int)(p), \
-                                          (__mmask32)-1))
-
-#define _mm256_mask_cmp_epi8_mask(m, a, b, p) \
-  ((__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
-                                          (__v32qi)(__m256i)(b), (int)(p), \
-                                          (__mmask32)(m)))
-
-#define _mm256_cmp_epu8_mask(a, b, p) \
-  ((__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
-                                           (__v32qi)(__m256i)(b), (int)(p), \
-                                           (__mmask32)-1))
-
-#define _mm256_mask_cmp_epu8_mask(m, a, b, p) \
-  ((__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
-                                           (__v32qi)(__m256i)(b), (int)(p), \
-                                           (__mmask32)(m)))
-
-#define _mm_cmp_epi16_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
-                                         (__v8hi)(__m128i)(b), (int)(p), \
-                                         (__mmask8)-1))
-
-#define _mm_mask_cmp_epi16_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
-                                         (__v8hi)(__m128i)(b), (int)(p), \
-                                         (__mmask8)(m)))
-
-#define _mm_cmp_epu16_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
-                                          (__v8hi)(__m128i)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm_mask_cmp_epu16_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
-                                          (__v8hi)(__m128i)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm256_cmp_epi16_mask(a, b, p) \
-  ((__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
-                                          (__v16hi)(__m256i)(b), (int)(p), \
-                                          (__mmask16)-1))
-
-#define _mm256_mask_cmp_epi16_mask(m, a, b, p) \
-  ((__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
-                                          (__v16hi)(__m256i)(b), (int)(p), \
-                                          (__mmask16)(m)))
-
-#define _mm256_cmp_epu16_mask(a, b, p) \
-  ((__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
-                                           (__v16hi)(__m256i)(b), (int)(p), \
-                                           (__mmask16)-1))
-
-#define _mm256_mask_cmp_epu16_mask(m, a, b, p) \
-  ((__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
-                                           (__v16hi)(__m256i)(b), (int)(p), \
-                                           (__mmask16)(m)))
-
-#define _mm_cmpeq_epi8_mask(A, B) \
-    _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epi8_mask(k, A, B) \
-    _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epi8_mask(A, B) \
-    _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epi8_mask(k, A, B) \
-    _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epi8_mask(A, B) \
-    _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epi8_mask(k, A, B) \
-    _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epi8_mask(A, B) \
-    _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epi8_mask(k, A, B) \
-    _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epi8_mask(A, B) \
-    _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epi8_mask(k, A, B) \
-    _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epi8_mask(A, B) \
-    _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epi8_mask(k, A, B) \
-    _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epi8_mask(A, B) \
-    _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epi8_mask(k, A, B) \
-    _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epi8_mask(A, B) \
-    _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epi8_mask(k, A, B) \
-    _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epi8_mask(A, B) \
-    _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epi8_mask(k, A, B) \
-    _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epi8_mask(A, B) \
-    _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epi8_mask(k, A, B) \
-    _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epi8_mask(A, B) \
-    _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epi8_mask(k, A, B) \
-    _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epi8_mask(A, B) \
-    _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epi8_mask(k, A, B) \
-    _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm_cmpeq_epu8_mask(A, B) \
-    _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epu8_mask(k, A, B) \
-    _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epu8_mask(A, B) \
-    _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epu8_mask(k, A, B) \
-    _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epu8_mask(A, B) \
-    _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epu8_mask(k, A, B) \
-    _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epu8_mask(A, B) \
-    _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epu8_mask(k, A, B) \
-    _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epu8_mask(A, B) \
-    _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epu8_mask(k, A, B) \
-    _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epu8_mask(A, B) \
-    _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epu8_mask(k, A, B) \
-    _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epu8_mask(A, B) \
-    _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epu8_mask(k, A, B) \
-    _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epu8_mask(A, B) \
-    _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epu8_mask(k, A, B) \
-    _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epu8_mask(A, B) \
-    _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epu8_mask(k, A, B) \
-    _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epu8_mask(A, B) \
-    _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epu8_mask(k, A, B) \
-    _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epu8_mask(A, B) \
-    _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epu8_mask(k, A, B) \
-    _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epu8_mask(A, B) \
-    _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epu8_mask(k, A, B) \
-    _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm_cmpeq_epi16_mask(A, B) \
-    _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epi16_mask(k, A, B) \
-    _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epi16_mask(A, B) \
-    _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epi16_mask(k, A, B) \
-    _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epi16_mask(A, B) \
-    _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epi16_mask(k, A, B) \
-    _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epi16_mask(A, B) \
-    _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epi16_mask(k, A, B) \
-    _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epi16_mask(A, B) \
-    _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epi16_mask(k, A, B) \
-    _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epi16_mask(A, B) \
-    _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epi16_mask(k, A, B) \
-    _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epi16_mask(A, B) \
-    _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epi16_mask(k, A, B) \
-    _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epi16_mask(A, B) \
-    _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epi16_mask(k, A, B) \
-    _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epi16_mask(A, B) \
-    _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epi16_mask(k, A, B) \
-    _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epi16_mask(A, B) \
-    _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epi16_mask(k, A, B) \
-    _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epi16_mask(A, B) \
-    _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epi16_mask(k, A, B) \
-    _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epi16_mask(A, B) \
-    _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epi16_mask(k, A, B) \
-    _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm_cmpeq_epu16_mask(A, B) \
-    _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epu16_mask(k, A, B) \
-    _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epu16_mask(A, B) \
-    _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epu16_mask(k, A, B) \
-    _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epu16_mask(A, B) \
-    _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epu16_mask(k, A, B) \
-    _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epu16_mask(A, B) \
-    _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epu16_mask(k, A, B) \
-    _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epu16_mask(A, B) \
-    _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epu16_mask(k, A, B) \
-    _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epu16_mask(A, B) \
-    _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epu16_mask(k, A, B) \
-    _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epu16_mask(A, B) \
-    _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epu16_mask(k, A, B) \
-    _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epu16_mask(A, B) \
-    _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epu16_mask(k, A, B) \
-    _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epu16_mask(A, B) \
-    _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epu16_mask(k, A, B) \
-    _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epu16_mask(A, B) \
-    _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epu16_mask(k, A, B) \
-    _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epu16_mask(A, B) \
-    _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epu16_mask(k, A, B) \
-    _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epu16_mask(A, B) \
-    _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epu16_mask(k, A, B) \
-    _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE)
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_add_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B){
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                             (__v32qi)_mm256_add_epi8(__A, __B),
-                                             (__v32qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_add_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                             (__v32qi)_mm256_add_epi8(__A, __B),
-                                             (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_add_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                             (__v16hi)_mm256_add_epi16(__A, __B),
-                                             (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_add_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                             (__v16hi)_mm256_add_epi16(__A, __B),
-                                             (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sub_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                             (__v32qi)_mm256_sub_epi8(__A, __B),
-                                             (__v32qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sub_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                             (__v32qi)_mm256_sub_epi8(__A, __B),
-                                             (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sub_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                             (__v16hi)_mm256_sub_epi16(__A, __B),
-                                             (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sub_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                             (__v16hi)_mm256_sub_epi16(__A, __B),
-                                             (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_add_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_add_epi8(__A, __B),
-                                             (__v16qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_add_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_add_epi8(__A, __B),
-                                             (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_add_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_add_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_add_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_add_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sub_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_sub_epi8(__A, __B),
-                                             (__v16qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sub_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_sub_epi8(__A, __B),
-                                             (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sub_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_sub_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sub_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_sub_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mullo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                             (__v16hi)_mm256_mullo_epi16(__A, __B),
-                                             (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mullo_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                             (__v16hi)_mm256_mullo_epi16(__A, __B),
-                                             (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mullo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_mullo_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mullo_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_mullo_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_blend_epi8 (__mmask16 __U, __m128i __A, __m128i __W)
-{
-  return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
-              (__v16qi) __W,
-              (__v16qi) __A);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_blend_epi8 (__mmask32 __U, __m256i __A, __m256i __W)
-{
-  return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
-               (__v32qi) __W,
-               (__v32qi) __A);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_blend_epi16 (__mmask8 __U, __m128i __A, __m128i __W)
-{
-  return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
-               (__v8hi) __W,
-               (__v8hi) __A);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_blend_epi16 (__mmask16 __U, __m256i __A, __m256i __W)
-{
-  return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
-               (__v16hi) __W,
-               (__v16hi) __A);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_abs_epi8(__A),
-                                             (__v16qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_abs_epi8(__mmask16 __U, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_abs_epi8(__A),
-                                             (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_abs_epi8(__m256i __W, __mmask32 __U, __m256i __A)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                             (__v32qi)_mm256_abs_epi8(__A),
-                                             (__v32qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_abs_epi8 (__mmask32 __U, __m256i __A)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                             (__v32qi)_mm256_abs_epi8(__A),
-                                             (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_abs_epi16(__m128i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_abs_epi16(__A),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_abs_epi16(__mmask8 __U, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_abs_epi16(__A),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_abs_epi16(__m256i __W, __mmask16 __U, __m256i __A)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                             (__v16hi)_mm256_abs_epi16(__A),
-                                             (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_abs_epi16(__mmask16 __U, __m256i __A)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                             (__v16hi)_mm256_abs_epi16(__A),
-                                             (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_packs_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
-                                             (__v8hi)_mm_packs_epi32(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_packs_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
-                                             (__v8hi)_mm_packs_epi32(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
-                                          (__v16hi)_mm256_packs_epi32(__A, __B),
-                                          (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
-                                          (__v16hi)_mm256_packs_epi32(__A, __B),
-                                          (__v16hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
-                                             (__v16qi)_mm_packs_epi16(__A, __B),
-                                             (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
-                                             (__v16qi)_mm_packs_epi16(__A, __B),
-                                             (__v16qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
-                                          (__v32qi)_mm256_packs_epi16(__A, __B),
-                                          (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
-                                          (__v32qi)_mm256_packs_epi16(__A, __B),
-                                          (__v32qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
-                                             (__v8hi)_mm_packus_epi32(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_packus_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
-                                             (__v8hi)_mm_packus_epi32(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
-                                         (__v16hi)_mm256_packus_epi32(__A, __B),
-                                         (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
-                                         (__v16hi)_mm256_packus_epi32(__A, __B),
-                                         (__v16hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
-                                            (__v16qi)_mm_packus_epi16(__A, __B),
-                                            (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
-                                            (__v16qi)_mm_packus_epi16(__A, __B),
-                                            (__v16qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
-                                         (__v32qi)_mm256_packus_epi16(__A, __B),
-                                         (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
-                                         (__v32qi)_mm256_packus_epi16(__A, __B),
-                                         (__v32qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_adds_epi8(__A, __B),
-                                             (__v16qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_adds_epi8(__A, __B),
-                                             (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                            (__v32qi)_mm256_adds_epi8(__A, __B),
-                                            (__v32qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                            (__v32qi)_mm256_adds_epi8(__A, __B),
-                                            (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_adds_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_adds_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                           (__v16hi)_mm256_adds_epi16(__A, __B),
-                                           (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                           (__v16hi)_mm256_adds_epi16(__A, __B),
-                                           (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_adds_epu8(__A, __B),
-                                             (__v16qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_adds_epu8(__A, __B),
-                                             (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                            (__v32qi)_mm256_adds_epu8(__A, __B),
-                                            (__v32qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                            (__v32qi)_mm256_adds_epu8(__A, __B),
-                                            (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_adds_epu16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_adds_epu16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                           (__v16hi)_mm256_adds_epu16(__A, __B),
-                                           (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                           (__v16hi)_mm256_adds_epu16(__A, __B),
-                                           (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_avg_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_avg_epu8(__A, __B),
-                                             (__v16qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_avg_epu8(__mmask16 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_avg_epu8(__A, __B),
-                                             (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_avg_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                             (__v32qi)_mm256_avg_epu8(__A, __B),
-                                             (__v32qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_avg_epu8(__mmask32 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                             (__v32qi)_mm256_avg_epu8(__A, __B),
-                                             (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_avg_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_avg_epu16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_avg_epu16(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_avg_epu16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_avg_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                            (__v16hi)_mm256_avg_epu16(__A, __B),
-                                            (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_avg_epu16(__mmask16 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                            (__v16hi)_mm256_avg_epu16(__A, __B),
-                                            (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_max_epi8(__mmask16 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
-                                             (__v16qi)_mm_max_epi8(__A, __B),
-                                             (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_max_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
-                                             (__v16qi)_mm_max_epi8(__A, __B),
-                                             (__v16qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_epi8(__mmask32 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
-                                             (__v32qi)_mm256_max_epi8(__A, __B),
-                                             (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_max_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
-                                             (__v32qi)_mm256_max_epi8(__A, __B),
-                                             (__v32qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_max_epi16(__mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
-                                             (__v8hi)_mm_max_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_max_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
-                                             (__v8hi)_mm_max_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_epi16(__mmask16 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
-                                            (__v16hi)_mm256_max_epi16(__A, __B),
-                                            (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_max_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
-                                            (__v16hi)_mm256_max_epi16(__A, __B),
-                                            (__v16hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_max_epu8(__mmask16 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
-                                             (__v16qi)_mm_max_epu8(__A, __B),
-                                             (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_max_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
-                                             (__v16qi)_mm_max_epu8(__A, __B),
-                                             (__v16qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
-                                             (__v32qi)_mm256_max_epu8(__A, __B),
-                                             (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_max_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
-                                             (__v32qi)_mm256_max_epu8(__A, __B),
-                                             (__v32qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_max_epu16(__mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
-                                             (__v8hi)_mm_max_epu16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_max_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
-                                             (__v8hi)_mm_max_epu16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_epu16(__mmask16 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
-                                            (__v16hi)_mm256_max_epu16(__A, __B),
-                                            (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_max_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
-                                            (__v16hi)_mm256_max_epu16(__A, __B),
-                                            (__v16hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_min_epi8(__mmask16 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
-                                             (__v16qi)_mm_min_epi8(__A, __B),
-                                             (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_min_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
-                                             (__v16qi)_mm_min_epi8(__A, __B),
-                                             (__v16qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_epi8(__mmask32 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
-                                             (__v32qi)_mm256_min_epi8(__A, __B),
-                                             (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_min_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
-                                             (__v32qi)_mm256_min_epi8(__A, __B),
-                                             (__v32qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_min_epi16(__mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
-                                             (__v8hi)_mm_min_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_min_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
-                                             (__v8hi)_mm_min_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_epi16(__mmask16 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
-                                            (__v16hi)_mm256_min_epi16(__A, __B),
-                                            (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_min_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
-                                            (__v16hi)_mm256_min_epi16(__A, __B),
-                                            (__v16hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_min_epu8(__mmask16 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
-                                             (__v16qi)_mm_min_epu8(__A, __B),
-                                             (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_min_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
-                                             (__v16qi)_mm_min_epu8(__A, __B),
-                                             (__v16qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
-                                             (__v32qi)_mm256_min_epu8(__A, __B),
-                                             (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_min_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
-                                             (__v32qi)_mm256_min_epu8(__A, __B),
-                                             (__v32qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_min_epu16(__mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
-                                             (__v8hi)_mm_min_epu16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_min_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
-                                             (__v8hi)_mm_min_epu16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_epu16(__mmask16 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
-                                            (__v16hi)_mm256_min_epu16(__A, __B),
-                                            (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_min_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
-                                            (__v16hi)_mm256_min_epu16(__A, __B),
-                                            (__v16hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_shuffle_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                            (__v16qi)_mm_shuffle_epi8(__A, __B),
-                                            (__v16qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_shuffle_epi8(__mmask16 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                            (__v16qi)_mm_shuffle_epi8(__A, __B),
-                                            (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_shuffle_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                         (__v32qi)_mm256_shuffle_epi8(__A, __B),
-                                         (__v32qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_shuffle_epi8(__mmask32 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                         (__v32qi)_mm256_shuffle_epi8(__A, __B),
-                                         (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_subs_epi8(__A, __B),
-                                             (__v16qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_subs_epi8(__A, __B),
-                                             (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                            (__v32qi)_mm256_subs_epi8(__A, __B),
-                                            (__v32qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                            (__v32qi)_mm256_subs_epi8(__A, __B),
-                                            (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_subs_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_subs_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                           (__v16hi)_mm256_subs_epi16(__A, __B),
-                                           (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                           (__v16hi)_mm256_subs_epi16(__A, __B),
-                                           (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_subs_epu8(__A, __B),
-                                             (__v16qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                             (__v16qi)_mm_subs_epu8(__A, __B),
-                                             (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                            (__v32qi)_mm256_subs_epu8(__A, __B),
-                                            (__v32qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                            (__v32qi)_mm256_subs_epu8(__A, __B),
-                                            (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_subs_epu16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_subs_epu16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A,
-      __m256i __B) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                           (__v16hi)_mm256_subs_epu16(__A, __B),
-                                           (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                           (__v16hi)_mm256_subs_epu16(__A, __B),
-                                           (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_permutex2var_epi16(__m128i __A, __m128i __I, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_vpermi2varhi128((__v8hi)__A, (__v8hi)__I,
-                                                 (__v8hi) __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_permutex2var_epi16(__m128i __A, __mmask8 __U, __m128i __I,
-                            __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128(__U,
-                                  (__v8hi)_mm_permutex2var_epi16(__A, __I, __B),
-                                  (__v8hi)__A);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask2_permutex2var_epi16(__m128i __A, __m128i __I, __mmask8 __U,
-                             __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128(__U,
-                                  (__v8hi)_mm_permutex2var_epi16(__A, __I, __B),
-                                  (__v8hi)__I);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_permutex2var_epi16 (__mmask8 __U, __m128i __A, __m128i __I,
-            __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128(__U,
-                                  (__v8hi)_mm_permutex2var_epi16(__A, __I, __B),
-                                  (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_permutex2var_epi16(__m256i __A, __m256i __I, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_vpermi2varhi256((__v16hi)__A, (__v16hi)__I,
-                                                 (__v16hi)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_permutex2var_epi16(__m256i __A, __mmask16 __U, __m256i __I,
-                               __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256(__U,
-                              (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B),
-                              (__v16hi)__A);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask2_permutex2var_epi16(__m256i __A, __m256i __I, __mmask16 __U,
-                                __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256(__U,
-                              (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B),
-                              (__v16hi)__I);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutex2var_epi16 (__mmask16 __U, __m256i __A, __m256i __I,
-                                 __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256(__U,
-                              (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B),
-                              (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_maddubs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                            (__v8hi)_mm_maddubs_epi16(__X, __Y),
-                                            (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_maddubs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                            (__v8hi)_mm_maddubs_epi16(__X, __Y),
-                                            (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_maddubs_epi16(__m256i __W, __mmask16 __U, __m256i __X,
-                          __m256i __Y) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                        (__v16hi)_mm256_maddubs_epi16(__X, __Y),
-                                        (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_maddubs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                        (__v16hi)_mm256_maddubs_epi16(__X, __Y),
-                                        (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_madd_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_madd_epi16(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_madd_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_madd_epi16(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_madd_epi16(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                            (__v8si)_mm256_madd_epi16(__A, __B),
-                                            (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_madd_epi16(__mmask8 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                            (__v8si)_mm256_madd_epi16(__A, __B),
-                                            (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtsepi16_epi8 (__m128i __A) {
-  return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
-               (__v16qi) _mm_setzero_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
-  return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
-               (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsepi16_epi8 (__mmask8 __M, __m128i __A) {
-  return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
-               (__v16qi) _mm_setzero_si128(),
-               __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtsepi16_epi8 (__m256i __A) {
-  return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
-               (__v16qi) _mm_setzero_si128(),
-               (__mmask16) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
-  return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
-               (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtsepi16_epi8 (__mmask16 __M, __m256i __A) {
-  return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
-               (__v16qi) _mm_setzero_si128(),
-               __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtusepi16_epi8 (__m128i __A) {
-  return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
-                (__v16qi) _mm_setzero_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
-  return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtusepi16_epi8 (__mmask8 __M, __m128i __A) {
-  return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
-                (__v16qi) _mm_setzero_si128(),
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtusepi16_epi8 (__m256i __A) {
-  return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
-                (__v16qi) _mm_setzero_si128(),
-                (__mmask16) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
-  return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtusepi16_epi8 (__mmask16 __M, __m256i __A) {
-  return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
-                (__v16qi) _mm_setzero_si128(),
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi16_epi8 (__m128i __A) {
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v8hi)__A, __v8qi),
-      (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
-      12, 13, 14, 15);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
-  return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
-               (__v16qi) __O,
-               __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi16_epi8 (__mmask8 __M, __m128i __A) {
-  return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
-               (__v16qi) _mm_setzero_si128(),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovwb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M);
-}
-
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovuswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi16_epi8 (__m256i __A) {
-  return (__m128i)__builtin_convertvector((__v16hi) __A, __v16qi);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
-                                             (__v16qi)_mm256_cvtepi16_epi8(__A),
-                                             (__v16qi)__O);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi16_epi8 (__mmask16 __M, __m256i __A) {
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
-                                             (__v16qi)_mm256_cvtepi16_epi8(__A),
-                                             (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
-{
-  __builtin_ia32_pmovwb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
-{
-  __builtin_ia32_pmovswb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
-{
-  __builtin_ia32_pmovuswb256mem_mask ((__v16qi*) __P, (__v16hi) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_mulhrs_epi16(__X, __Y),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_mulhrs_epi16(__X, __Y),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_mulhrs_epi16(__X, __Y),
-                                         (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_mulhrs_epi16(__X, __Y),
-                                         (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mulhi_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_mulhi_epu16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mulhi_epu16(__mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_mulhi_epu16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mulhi_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                          (__v16hi)_mm256_mulhi_epu16(__A, __B),
-                                          (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mulhi_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                          (__v16hi)_mm256_mulhi_epu16(__A, __B),
-                                          (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mulhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_mulhi_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mulhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_mulhi_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mulhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                          (__v16hi)_mm256_mulhi_epi16(__A, __B),
-                                          (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mulhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                          (__v16hi)_mm256_mulhi_epi16(__A, __B),
-                                          (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_unpackhi_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                           (__v16qi)_mm_unpackhi_epi8(__A, __B),
-                                           (__v16qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_unpackhi_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                           (__v16qi)_mm_unpackhi_epi8(__A, __B),
-                                           (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpackhi_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                        (__v32qi)_mm256_unpackhi_epi8(__A, __B),
-                                        (__v32qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpackhi_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                        (__v32qi)_mm256_unpackhi_epi8(__A, __B),
-                                        (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_unpackhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                           (__v8hi)_mm_unpackhi_epi16(__A, __B),
-                                           (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_unpackhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                           (__v8hi)_mm_unpackhi_epi16(__A, __B),
-                                           (__v8hi) _mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpackhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                       (__v16hi)_mm256_unpackhi_epi16(__A, __B),
-                                       (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpackhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                       (__v16hi)_mm256_unpackhi_epi16(__A, __B),
-                                       (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_unpacklo_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                           (__v16qi)_mm_unpacklo_epi8(__A, __B),
-                                           (__v16qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_unpacklo_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
-                                           (__v16qi)_mm_unpacklo_epi8(__A, __B),
-                                           (__v16qi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpacklo_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                        (__v32qi)_mm256_unpacklo_epi8(__A, __B),
-                                        (__v32qi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpacklo_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
-                                        (__v32qi)_mm256_unpacklo_epi8(__A, __B),
-                                        (__v32qi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_unpacklo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                           (__v8hi)_mm_unpacklo_epi16(__A, __B),
-                                           (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_unpacklo_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                           (__v8hi)_mm_unpacklo_epi16(__A, __B),
-                                           (__v8hi) _mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpacklo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                       (__v16hi)_mm256_unpacklo_epi16(__A, __B),
-                                       (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpacklo_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                       (__v16hi)_mm256_unpacklo_epi16(__A, __B),
-                                       (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_cvtepi8_epi16(__A),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_cvtepi8_epi16(__A),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                             (__v16hi)_mm256_cvtepi8_epi16(__A),
-                                             (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                             (__v16hi)_mm256_cvtepi8_epi16(__A),
-                                             (__v16hi)_mm256_setzero_si256());
-}
-
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_cvtepu8_epi16(__A),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_cvtepu8_epi16(__A),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                             (__v16hi)_mm256_cvtepu8_epi16(__A),
-                                             (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                             (__v16hi)_mm256_cvtepu8_epi16(__A),
-                                             (__v16hi)_mm256_setzero_si256());
-}
-
-
-#define _mm_mask_shufflehi_epi16(W, U, A, imm) \
-  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
-                                       (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
-                                       (__v8hi)(__m128i)(W)))
-
-#define _mm_maskz_shufflehi_epi16(U, A, imm) \
-  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
-                                       (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
-                                       (__v8hi)_mm_setzero_si128()))
-
-#define _mm256_mask_shufflehi_epi16(W, U, A, imm) \
-  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
-                                       (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
-                                       (__v16hi)(__m256i)(W)))
-
-#define _mm256_maskz_shufflehi_epi16(U, A, imm) \
-  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
-                                       (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
-                                       (__v16hi)_mm256_setzero_si256()))
-
-#define _mm_mask_shufflelo_epi16(W, U, A, imm) \
-  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
-                                       (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
-                                       (__v8hi)(__m128i)(W)))
-
-#define _mm_maskz_shufflelo_epi16(U, A, imm) \
-  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
-                                       (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
-                                       (__v8hi)_mm_setzero_si128()))
-
-#define _mm256_mask_shufflelo_epi16(W, U, A, imm) \
-  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
-                                       (__v16hi)_mm256_shufflelo_epi16((A), \
-                                                                       (imm)), \
-                                       (__v16hi)(__m256i)(W)))
-
-#define _mm256_maskz_shufflelo_epi16(U, A, imm) \
-  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
-                                       (__v16hi)_mm256_shufflelo_epi16((A), \
-                                                                       (imm)), \
-                                       (__v16hi)_mm256_setzero_si256()))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sllv_epi16(__m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_psllv16hi((__v16hi)__A, (__v16hi)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sllv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                           (__v16hi)_mm256_sllv_epi16(__A, __B),
-                                           (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sllv_epi16(__mmask16 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                           (__v16hi)_mm256_sllv_epi16(__A, __B),
-                                           (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_sllv_epi16(__m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_psllv8hi((__v8hi)__A, (__v8hi)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sllv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_sllv_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sllv_epi16(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_sllv_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sll_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_sll_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sll_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_sll_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sll_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                          (__v16hi)_mm256_sll_epi16(__A, __B),
-                                          (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                          (__v16hi)_mm256_sll_epi16(__A, __B),
-                                          (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_slli_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_slli_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A,
-                       unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_slli_epi16(__A, __B),
-                                         (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_slli_epi16(__A, __B),
-                                         (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srlv_epi16(__m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_psrlv16hi((__v16hi)__A, (__v16hi)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srlv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                           (__v16hi)_mm256_srlv_epi16(__A, __B),
-                                           (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srlv_epi16(__mmask16 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                           (__v16hi)_mm256_srlv_epi16(__A, __B),
-                                           (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srlv_epi16(__m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_psrlv8hi((__v8hi)__A, (__v8hi)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srlv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_srlv_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srlv_epi16(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_srlv_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srav_epi16(__m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_psrav16hi((__v16hi)__A, (__v16hi)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srav_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                           (__v16hi)_mm256_srav_epi16(__A, __B),
-                                           (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srav_epi16(__mmask16 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                           (__v16hi)_mm256_srav_epi16(__A, __B),
-                                           (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srav_epi16(__m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_psrav8hi((__v8hi)__A, (__v8hi)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srav_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_srav_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srav_epi16(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_srav_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sra_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_sra_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sra_epi16(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_sra_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sra_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                          (__v16hi)_mm256_sra_epi16(__A, __B),
-                                          (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sra_epi16(__mmask16 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                          (__v16hi)_mm256_sra_epi16(__A, __B),
-                                          (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_srai_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_srai_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A,
-                       unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_srai_epi16(__A, __B),
-                                         (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_srai_epi16(__A, __B),
-                                         (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srl_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_srl_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srl_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_srl_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srl_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                          (__v16hi)_mm256_srl_epi16(__A, __B),
-                                          (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srl_epi16(__mmask16 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                          (__v16hi)_mm256_srl_epi16(__A, __B),
-                                          (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_srli_epi16(__A, __B),
-                                             (__v8hi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srli_epi16 (__mmask8 __U, __m128i __A, int __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_srli_epi16(__A, __B),
-                                             (__v8hi)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_srli_epi16(__A, __B),
-                                         (__v16hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi16(__mmask16 __U, __m256i __A, int __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_srli_epi16(__A, __B),
-                                         (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mov_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
-                (__v8hi) __A,
-                (__v8hi) __W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mov_epi16 (__mmask8 __U, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
-                (__v8hi) __A,
-                (__v8hi) _mm_setzero_si128 ());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mov_epi16 (__m256i __W, __mmask16 __U, __m256i __A)
-{
-  return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
-                (__v16hi) __A,
-                (__v16hi) __W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mov_epi16 (__mmask16 __U, __m256i __A)
-{
-  return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
-                (__v16hi) __A,
-                (__v16hi) _mm256_setzero_si256 ());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mov_epi8 (__m128i __W, __mmask16 __U, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
-                (__v16qi) __A,
-                (__v16qi) __W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mov_epi8 (__mmask16 __U, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
-                (__v16qi) __A,
-                (__v16qi) _mm_setzero_si128 ());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mov_epi8 (__m256i __W, __mmask32 __U, __m256i __A)
-{
-  return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
-                (__v32qi) __A,
-                (__v32qi) __W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mov_epi8 (__mmask32 __U, __m256i __A)
-{
-  return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
-                (__v32qi) __A,
-                (__v32qi) _mm256_setzero_si256 ());
-}
-
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A)
-{
-  return (__m128i) __builtin_ia32_selectb_128(__M,
-                                              (__v16qi) _mm_set1_epi8(__A),
-                                              (__v16qi) __O);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_set1_epi8 (__mmask16 __M, char __A)
-{
- return (__m128i) __builtin_ia32_selectb_128(__M,
-                                             (__v16qi) _mm_set1_epi8(__A),
-                                             (__v16qi) _mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_set1_epi8 (__m256i __O, __mmask32 __M, char __A)
-{
-  return (__m256i) __builtin_ia32_selectb_256(__M,
-                                              (__v32qi) _mm256_set1_epi8(__A),
-                                              (__v32qi) __O);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_set1_epi8 (__mmask32 __M, char __A)
-{
-  return (__m256i) __builtin_ia32_selectb_256(__M,
-                                              (__v32qi) _mm256_set1_epi8(__A),
-                                              (__v32qi) _mm256_setzero_si256());
-}
-
-static __inline __m128i __DEFAULT_FN_ATTRS128
-_mm_loadu_epi16 (void const *__P)
-{
-  struct __loadu_epi16 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi16*)__P)->__v;
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_loaddquhi128_mask ((const __v8hi *) __P,
-                 (__v8hi) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_loadu_epi16 (__mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_loaddquhi128_mask ((const __v8hi *) __P,
-                 (__v8hi)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline __m256i __DEFAULT_FN_ATTRS256
-_mm256_loadu_epi16 (void const *__P)
-{
-  struct __loadu_epi16 {
-    __m256i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi16*)__P)->__v;
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_loaddquhi256_mask ((const __v16hi *) __P,
-                 (__v16hi) __W,
-                 (__mmask16) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_loadu_epi16 (__mmask16 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_loaddquhi256_mask ((const __v16hi *) __P,
-                 (__v16hi)
-                 _mm256_setzero_si256 (),
-                 (__mmask16) __U);
-}
-
-static __inline __m128i __DEFAULT_FN_ATTRS128
-_mm_loadu_epi8 (void const *__P)
-{
-  struct __loadu_epi8 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi8*)__P)->__v;
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_loaddquqi128_mask ((const __v16qi *) __P,
-                 (__v16qi) __W,
-                 (__mmask16) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_loadu_epi8 (__mmask16 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_loaddquqi128_mask ((const __v16qi *) __P,
-                 (__v16qi)
-                 _mm_setzero_si128 (),
-                 (__mmask16) __U);
-}
-
-static __inline __m256i __DEFAULT_FN_ATTRS256
-_mm256_loadu_epi8 (void const *__P)
-{
-  struct __loadu_epi8 {
-    __m256i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi8*)__P)->__v;
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_loaddquqi256_mask ((const __v32qi *) __P,
-                 (__v32qi) __W,
-                 (__mmask32) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_loadu_epi8 (__mmask32 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_loaddquqi256_mask ((const __v32qi *) __P,
-                 (__v32qi)
-                 _mm256_setzero_si256 (),
-                 (__mmask32) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS128
-_mm_storeu_epi16 (void *__P, __m128i __A)
-{
-  struct __storeu_epi16 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi16*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A)
-{
-  __builtin_ia32_storedquhi128_mask ((__v8hi *) __P,
-             (__v8hi) __A,
-             (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS256
-_mm256_storeu_epi16 (void *__P, __m256i __A)
-{
-  struct __storeu_epi16 {
-    __m256i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi16*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A)
-{
-  __builtin_ia32_storedquhi256_mask ((__v16hi *) __P,
-             (__v16hi) __A,
-             (__mmask16) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS128
-_mm_storeu_epi8 (void *__P, __m128i __A)
-{
-  struct __storeu_epi8 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi8*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A)
-{
-  __builtin_ia32_storedquqi128_mask ((__v16qi *) __P,
-             (__v16qi) __A,
-             (__mmask16) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS256
-_mm256_storeu_epi8 (void *__P, __m256i __A)
-{
-  struct __storeu_epi8 {
-    __m256i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi8*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A)
-{
-  __builtin_ia32_storedquqi256_mask ((__v32qi *) __P,
-             (__v32qi) __A,
-             (__mmask32) __U);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
-_mm_test_epi8_mask (__m128i __A, __m128i __B)
-{
-  return _mm_cmpneq_epi8_mask (_mm_and_si128(__A, __B), _mm_setzero_si128());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
-_mm_mask_test_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
-{
-  return _mm_mask_cmpneq_epi8_mask (__U, _mm_and_si128 (__A, __B),
-                                    _mm_setzero_si128());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
-_mm256_test_epi8_mask (__m256i __A, __m256i __B)
-{
-  return _mm256_cmpneq_epi8_mask (_mm256_and_si256(__A, __B),
-                                  _mm256_setzero_si256());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
-_mm256_mask_test_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B)
-{
-  return _mm256_mask_cmpneq_epi8_mask (__U, _mm256_and_si256(__A, __B),
-                                       _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_test_epi16_mask (__m128i __A, __m128i __B)
-{
-  return _mm_cmpneq_epi16_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_mask_test_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return _mm_mask_cmpneq_epi16_mask (__U, _mm_and_si128 (__A, __B),
-                                     _mm_setzero_si128());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS256
-_mm256_test_epi16_mask (__m256i __A, __m256i __B)
-{
-  return _mm256_cmpneq_epi16_mask (_mm256_and_si256 (__A, __B),
-                                   _mm256_setzero_si256 ());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS256
-_mm256_mask_test_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
-{
-  return _mm256_mask_cmpneq_epi16_mask (__U, _mm256_and_si256(__A, __B),
-                                        _mm256_setzero_si256());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
-_mm_testn_epi8_mask (__m128i __A, __m128i __B)
-{
-  return _mm_cmpeq_epi8_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
-_mm_mask_testn_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
-{
-  return _mm_mask_cmpeq_epi8_mask (__U, _mm_and_si128 (__A, __B),
-                                  _mm_setzero_si128());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
-_mm256_testn_epi8_mask (__m256i __A, __m256i __B)
-{
-  return _mm256_cmpeq_epi8_mask (_mm256_and_si256 (__A, __B),
-                                 _mm256_setzero_si256());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
-_mm256_mask_testn_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B)
-{
-  return _mm256_mask_cmpeq_epi8_mask (__U, _mm256_and_si256 (__A, __B),
-                                      _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_testn_epi16_mask (__m128i __A, __m128i __B)
-{
-  return _mm_cmpeq_epi16_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_mask_testn_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return _mm_mask_cmpeq_epi16_mask (__U, _mm_and_si128(__A, __B), _mm_setzero_si128());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS256
-_mm256_testn_epi16_mask (__m256i __A, __m256i __B)
-{
-  return _mm256_cmpeq_epi16_mask (_mm256_and_si256(__A, __B),
-                                  _mm256_setzero_si256());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS256
-_mm256_mask_testn_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
-{
-  return _mm256_mask_cmpeq_epi16_mask (__U, _mm256_and_si256 (__A, __B),
-                                       _mm256_setzero_si256());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
-_mm_movepi8_mask (__m128i __A)
-{
-  return (__mmask16) __builtin_ia32_cvtb2mask128 ((__v16qi) __A);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
-_mm256_movepi8_mask (__m256i __A)
-{
-  return (__mmask32) __builtin_ia32_cvtb2mask256 ((__v32qi) __A);
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_movepi16_mask (__m128i __A)
-{
-  return (__mmask8) __builtin_ia32_cvtw2mask128 ((__v8hi) __A);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS256
-_mm256_movepi16_mask (__m256i __A)
-{
-  return (__mmask16) __builtin_ia32_cvtw2mask256 ((__v16hi) __A);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_movm_epi8 (__mmask16 __A)
-{
-  return (__m128i) __builtin_ia32_cvtmask2b128 (__A);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_movm_epi8 (__mmask32 __A)
-{
-  return (__m256i) __builtin_ia32_cvtmask2b256 (__A);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_movm_epi16 (__mmask8 __A)
-{
-  return (__m128i) __builtin_ia32_cvtmask2w128 (__A);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_movm_epi16 (__mmask16 __A)
-{
-  return (__m256i) __builtin_ia32_cvtmask2w256 (__A);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_broadcastb_epi8 (__m128i __O, __mmask16 __M, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectb_128(__M,
-                                             (__v16qi) _mm_broadcastb_epi8(__A),
-                                             (__v16qi) __O);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_broadcastb_epi8 (__mmask16 __M, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectb_128(__M,
-                                             (__v16qi) _mm_broadcastb_epi8(__A),
-                                             (__v16qi) _mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcastb_epi8 (__m256i __O, __mmask32 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectb_256(__M,
-                                             (__v32qi) _mm256_broadcastb_epi8(__A),
-                                             (__v32qi) __O);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcastb_epi8 (__mmask32 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectb_256(__M,
-                                             (__v32qi) _mm256_broadcastb_epi8(__A),
-                                             (__v32qi) _mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_broadcastw_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectw_128(__M,
-                                             (__v8hi) _mm_broadcastw_epi16(__A),
-                                             (__v8hi) __O);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_broadcastw_epi16 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectw_128(__M,
-                                             (__v8hi) _mm_broadcastw_epi16(__A),
-                                             (__v8hi) _mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcastw_epi16 (__m256i __O, __mmask16 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectw_256(__M,
-                                             (__v16hi) _mm256_broadcastw_epi16(__A),
-                                             (__v16hi) __O);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcastw_epi16 (__mmask16 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectw_256(__M,
-                                             (__v16hi) _mm256_broadcastw_epi16(__A),
-                                             (__v16hi) _mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_set1_epi16 (__m256i __O, __mmask16 __M, short __A)
-{
-  return (__m256i) __builtin_ia32_selectw_256 (__M,
-                                               (__v16hi) _mm256_set1_epi16(__A),
-                                               (__v16hi) __O);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_set1_epi16 (__mmask16 __M, short __A)
-{
-  return (__m256i) __builtin_ia32_selectw_256(__M,
-                                              (__v16hi)_mm256_set1_epi16(__A),
-                                              (__v16hi) _mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_set1_epi16 (__m128i __O, __mmask8 __M, short __A)
-{
-  return (__m128i) __builtin_ia32_selectw_128(__M,
-                                              (__v8hi) _mm_set1_epi16(__A),
-                                              (__v8hi) __O);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_set1_epi16 (__mmask8 __M, short __A)
-{
-  return (__m128i) __builtin_ia32_selectw_128(__M,
-                                              (__v8hi) _mm_set1_epi16(__A),
-                                              (__v8hi) _mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_permutexvar_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_permvarhi128((__v8hi) __B, (__v8hi) __A);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_permutexvar_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
-                                        (__v8hi)_mm_permutexvar_epi16(__A, __B),
-                                        (__v8hi) _mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_permutexvar_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
-          __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
-                                        (__v8hi)_mm_permutexvar_epi16(__A, __B),
-                                        (__v8hi)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_permutexvar_epi16 (__m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_permvarhi256((__v16hi) __B, (__v16hi) __A);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutexvar_epi16 (__mmask16 __M, __m256i __A,
-        __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
-                                    (__v16hi)_mm256_permutexvar_epi16(__A, __B),
-                                    (__v16hi)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
-             __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
-                                    (__v16hi)_mm256_permutexvar_epi16(__A, __B),
-                                    (__v16hi)__W);
-}
-
-#define _mm_mask_alignr_epi8(W, U, A, B, N) \
-  ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
-                                 (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
-                                 (__v16qi)(__m128i)(W)))
-
-#define _mm_maskz_alignr_epi8(U, A, B, N) \
-  ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
-                                 (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
-                                 (__v16qi)_mm_setzero_si128()))
-
-#define _mm256_mask_alignr_epi8(W, U, A, B, N) \
-  ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
-                              (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
-                              (__v32qi)(__m256i)(W)))
-
-#define _mm256_maskz_alignr_epi8(U, A, B, N) \
-  ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
-                              (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
-                              (__v32qi)_mm256_setzero_si256()))
-
-#define _mm_dbsad_epu8(A, B, imm) \
-  ((__m128i)__builtin_ia32_dbpsadbw128((__v16qi)(__m128i)(A), \
-                                       (__v16qi)(__m128i)(B), (int)(imm)))
-
-#define _mm_mask_dbsad_epu8(W, U, A, B, imm) \
-  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
-                                      (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \
-                                      (__v8hi)(__m128i)(W)))
-
-#define _mm_maskz_dbsad_epu8(U, A, B, imm) \
-  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
-                                      (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \
-                                      (__v8hi)_mm_setzero_si128()))
-
-#define _mm256_dbsad_epu8(A, B, imm) \
-  ((__m256i)__builtin_ia32_dbpsadbw256((__v32qi)(__m256i)(A), \
-                                       (__v32qi)(__m256i)(B), (int)(imm)))
-
-#define _mm256_mask_dbsad_epu8(W, U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
-                                  (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
-                                  (__v16hi)(__m256i)(W)))
-
-#define _mm256_maskz_dbsad_epu8(U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
-                                  (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
-                                  (__v16hi)_mm256_setzero_si256()))
-
-#undef __DEFAULT_FN_ATTRS128
-#undef __DEFAULT_FN_ATTRS256
-
-#endif /* __AVX512VLBWINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlfp16intrin.h b/linux-x86/lib64/clang/14.0.2/include/avx512vlfp16intrin.h
deleted file mode 100644
index 3d27853..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/avx512vlfp16intrin.h
+++ /dev/null
@@ -1,2068 +0,0 @@
-/*===---------- avx512vlfp16intrin.h - AVX512-FP16 intrinsics --------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error                                                                         \
-    "Never use <avx512vlfp16intrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512VLFP16INTRIN_H
-#define __AVX512VLFP16INTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS256                                                  \
-  __attribute__((__always_inline__, __nodebug__,                               \
-                 __target__("avx512fp16, avx512vl"),                           \
-                 __min_vector_width__(256)))
-#define __DEFAULT_FN_ATTRS128                                                  \
-  __attribute__((__always_inline__, __nodebug__,                               \
-                 __target__("avx512fp16, avx512vl"),                           \
-                 __min_vector_width__(128)))
-
-static __inline__ _Float16 __DEFAULT_FN_ATTRS128 _mm_cvtsh_h(__m128h __a) {
-  return __a[0];
-}
-
-static __inline__ _Float16 __DEFAULT_FN_ATTRS256 _mm256_cvtsh_h(__m256h __a) {
-  return __a[0];
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_set_sh(_Float16 __h) {
-  return __extension__(__m128h){__h, 0, 0, 0, 0, 0, 0, 0};
-}
-
-static __inline __m128h __DEFAULT_FN_ATTRS128 _mm_set1_ph(_Float16 __h) {
-  return (__m128h)(__v8hf){__h, __h, __h, __h, __h, __h, __h, __h};
-}
-
-static __inline __m256h __DEFAULT_FN_ATTRS256 _mm256_set1_ph(_Float16 __h) {
-  return (__m256h)(__v16hf){__h, __h, __h, __h, __h, __h, __h, __h,
-                            __h, __h, __h, __h, __h, __h, __h, __h};
-}
-
-static __inline __m128h __DEFAULT_FN_ATTRS128
-_mm_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4,
-           _Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8) {
-  return (__m128h)(__v8hf){__h8, __h7, __h6, __h5, __h4, __h3, __h2, __h1};
-}
-
-static __inline __m256h __DEFAULT_FN_ATTRS256
-_mm256_set1_pch(_Float16 _Complex h) {
-  return (__m256h)_mm256_set1_ps(__builtin_bit_cast(float, h));
-}
-
-static __inline __m128h __DEFAULT_FN_ATTRS128
-_mm_set1_pch(_Float16 _Complex h) {
-  return (__m128h)_mm_set1_ps(__builtin_bit_cast(float, h));
-}
-
-static __inline __m256h __DEFAULT_FN_ATTRS256
-_mm256_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4,
-              _Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8,
-              _Float16 __h9, _Float16 __h10, _Float16 __h11, _Float16 __h12,
-              _Float16 __h13, _Float16 __h14, _Float16 __h15, _Float16 __h16) {
-  return (__m256h)(__v16hf){__h16, __h15, __h14, __h13, __h12, __h11,
-                            __h10, __h9,  __h8,  __h7,  __h6,  __h5,
-                            __h4,  __h3,  __h2,  __h1};
-}
-
-#define _mm_setr_ph(h1, h2, h3, h4, h5, h6, h7, h8)                            \
-  _mm_set_ph((h8), (h7), (h6), (h5), (h4), (h3), (h2), (h1))
-
-#define _mm256_setr_ph(h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, \
-                       h14, h15, h16)                                          \
-  _mm256_set_ph((h16), (h15), (h14), (h13), (h12), (h11), (h10), (h9), (h8),   \
-                (h7), (h6), (h5), (h4), (h3), (h2), (h1))
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_add_ph(__m256h __A,
-                                                              __m256h __B) {
-  return (__m256h)((__v16hf)__A + (__v16hf)__B);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_add_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      __U, (__v16hf)_mm256_add_ph(__A, __B), (__v16hf)__W);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_add_ph(__mmask16 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      __U, (__v16hf)_mm256_add_ph(__A, __B), (__v16hf)_mm256_setzero_ph());
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_add_ph(__m128h __A,
-                                                           __m128h __B) {
-  return (__m128h)((__v8hf)__A + (__v8hf)__B);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_add_ph(__m128h __W,
-                                                                __mmask8 __U,
-                                                                __m128h __A,
-                                                                __m128h __B) {
-  return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_add_ph(__A, __B),
-                                              (__v8hf)__W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_add_ph(__mmask8 __U,
-                                                                 __m128h __A,
-                                                                 __m128h __B) {
-  return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_add_ph(__A, __B),
-                                              (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_sub_ph(__m256h __A,
-                                                              __m256h __B) {
-  return (__m256h)((__v16hf)__A - (__v16hf)__B);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_sub_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      __U, (__v16hf)_mm256_sub_ph(__A, __B), (__v16hf)__W);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_sub_ph(__mmask16 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      __U, (__v16hf)_mm256_sub_ph(__A, __B), (__v16hf)_mm256_setzero_ph());
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_sub_ph(__m128h __A,
-                                                           __m128h __B) {
-  return (__m128h)((__v8hf)__A - (__v8hf)__B);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_sub_ph(__m128h __W,
-                                                                __mmask8 __U,
-                                                                __m128h __A,
-                                                                __m128h __B) {
-  return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_sub_ph(__A, __B),
-                                              (__v8hf)__W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_sub_ph(__mmask8 __U,
-                                                                 __m128h __A,
-                                                                 __m128h __B) {
-  return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_sub_ph(__A, __B),
-                                              (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_mul_ph(__m256h __A,
-                                                              __m256h __B) {
-  return (__m256h)((__v16hf)__A * (__v16hf)__B);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_mul_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      __U, (__v16hf)_mm256_mul_ph(__A, __B), (__v16hf)__W);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_mul_ph(__mmask16 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      __U, (__v16hf)_mm256_mul_ph(__A, __B), (__v16hf)_mm256_setzero_ph());
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mul_ph(__m128h __A,
-                                                           __m128h __B) {
-  return (__m128h)((__v8hf)__A * (__v8hf)__B);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_mul_ph(__m128h __W,
-                                                                __mmask8 __U,
-                                                                __m128h __A,
-                                                                __m128h __B) {
-  return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_mul_ph(__A, __B),
-                                              (__v8hf)__W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_mul_ph(__mmask8 __U,
-                                                                 __m128h __A,
-                                                                 __m128h __B) {
-  return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_mul_ph(__A, __B),
-                                              (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_div_ph(__m256h __A,
-                                                              __m256h __B) {
-  return (__m256h)((__v16hf)__A / (__v16hf)__B);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_div_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      __U, (__v16hf)_mm256_div_ph(__A, __B), (__v16hf)__W);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_div_ph(__mmask16 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      __U, (__v16hf)_mm256_div_ph(__A, __B), (__v16hf)_mm256_setzero_ph());
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_div_ph(__m128h __A,
-                                                           __m128h __B) {
-  return (__m128h)((__v8hf)__A / (__v8hf)__B);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_div_ph(__m128h __W,
-                                                                __mmask8 __U,
-                                                                __m128h __A,
-                                                                __m128h __B) {
-  return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_div_ph(__A, __B),
-                                              (__v8hf)__W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_div_ph(__mmask8 __U,
-                                                                 __m128h __A,
-                                                                 __m128h __B) {
-  return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_div_ph(__A, __B),
-                                              (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_min_ph(__m256h __A,
-                                                              __m256h __B) {
-  return (__m256h)__builtin_ia32_minph256((__v16hf)__A, (__v16hf)__B);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_min_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      (__v16hf)__builtin_ia32_minph256((__v16hf)__A, (__v16hf)__B),
-      (__v16hf)__W);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_ph(__mmask16 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      (__v16hf)__builtin_ia32_minph256((__v16hf)__A, (__v16hf)__B),
-      (__v16hf)_mm256_setzero_ph());
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_min_ph(__m128h __A,
-                                                           __m128h __B) {
-  return (__m128h)__builtin_ia32_minph128((__v8hf)__A, (__v8hf)__B);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_min_ph(__m128h __W,
-                                                                __mmask8 __U,
-                                                                __m128h __A,
-                                                                __m128h __B) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, (__v8hf)__builtin_ia32_minph128((__v8hf)__A, (__v8hf)__B),
-      (__v8hf)__W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_min_ph(__mmask8 __U,
-                                                                 __m128h __A,
-                                                                 __m128h __B) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, (__v8hf)__builtin_ia32_minph128((__v8hf)__A, (__v8hf)__B),
-      (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_max_ph(__m256h __A,
-                                                              __m256h __B) {
-  return (__m256h)__builtin_ia32_maxph256((__v16hf)__A, (__v16hf)__B);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_max_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      (__v16hf)__builtin_ia32_maxph256((__v16hf)__A, (__v16hf)__B),
-      (__v16hf)__W);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_ph(__mmask16 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      (__v16hf)__builtin_ia32_maxph256((__v16hf)__A, (__v16hf)__B),
-      (__v16hf)_mm256_setzero_ph());
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_max_ph(__m128h __A,
-                                                           __m128h __B) {
-  return (__m128h)__builtin_ia32_maxph128((__v8hf)__A, (__v8hf)__B);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_max_ph(__m128h __W,
-                                                                __mmask8 __U,
-                                                                __m128h __A,
-                                                                __m128h __B) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, (__v8hf)__builtin_ia32_maxph128((__v8hf)__A, (__v8hf)__B),
-      (__v8hf)__W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_max_ph(__mmask8 __U,
-                                                                 __m128h __A,
-                                                                 __m128h __B) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, (__v8hf)__builtin_ia32_maxph128((__v8hf)__A, (__v8hf)__B),
-      (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_abs_ph(__m256h __A) {
-  return (__m256h)_mm256_and_epi32(_mm256_set1_epi32(0x7FFF7FFF), (__m256i)__A);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_abs_ph(__m128h __A) {
-  return (__m128h)_mm_and_epi32(_mm_set1_epi32(0x7FFF7FFF), (__m128i)__A);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_conj_pch(__m256h __A) {
-  return (__m256h)_mm256_xor_ps((__m256)__A, _mm256_set1_ps(-0.0f));
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_conj_pch(__m256h __W, __mmask8 __U, __m256h __A) {
-  return (__m256h)__builtin_ia32_selectps_256(
-      (__mmask8)__U, (__v8sf)_mm256_conj_pch(__A), (__v8sf)__W);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_conj_pch(__mmask8 __U, __m256h __A) {
-  return (__m256h)__builtin_ia32_selectps_256(
-      (__mmask8)__U, (__v8sf)_mm256_conj_pch(__A), (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_conj_pch(__m128h __A) {
-  return (__m128h)_mm_xor_ps((__m128)__A, _mm_set1_ps(-0.0f));
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_conj_pch(__m128h __W,
-                                                                  __mmask8 __U,
-                                                                  __m128h __A) {
-  return (__m128h)__builtin_ia32_selectps_128(
-      (__mmask8)__U, (__v4sf)_mm_conj_pch(__A), (__v4sf)__W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_conj_pch(__mmask8 __U, __m128h __A) {
-  return (__m128h)__builtin_ia32_selectps_128(
-      (__mmask8)__U, (__v4sf)_mm_conj_pch(__A), (__v4sf)_mm_setzero_ps());
-}
-
-#define _mm256_cmp_ph_mask(a, b, p)                                            \
-  ((__mmask16)__builtin_ia32_cmpph256_mask(                                    \
-      (__v16hf)(__m256h)(a), (__v16hf)(__m256h)(b), (int)(p), (__mmask16)-1))
-
-#define _mm256_mask_cmp_ph_mask(m, a, b, p)                                    \
-  ((__mmask16)__builtin_ia32_cmpph256_mask(                                    \
-      (__v16hf)(__m256h)(a), (__v16hf)(__m256h)(b), (int)(p), (__mmask16)(m)))
-
-#define _mm_cmp_ph_mask(a, b, p)                                               \
-  ((__mmask8)__builtin_ia32_cmpph128_mask(                                     \
-      (__v8hf)(__m128h)(a), (__v8hf)(__m128h)(b), (int)(p), (__mmask8)-1))
-
-#define _mm_mask_cmp_ph_mask(m, a, b, p)                                       \
-  ((__mmask8)__builtin_ia32_cmpph128_mask(                                     \
-      (__v8hf)(__m128h)(a), (__v8hf)(__m128h)(b), (int)(p), (__mmask8)(m)))
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_rcp_ph(__m256h __A) {
-  return (__m256h)__builtin_ia32_rcpph256_mask(
-      (__v16hf)__A, (__v16hf)_mm256_undefined_ph(), (__mmask16)-1);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_rcp_ph(__m256h __W, __mmask16 __U, __m256h __A) {
-  return (__m256h)__builtin_ia32_rcpph256_mask((__v16hf)__A, (__v16hf)__W,
-                                               (__mmask16)__U);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_rcp_ph(__mmask16 __U, __m256h __A) {
-  return (__m256h)__builtin_ia32_rcpph256_mask(
-      (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_rcp_ph(__m128h __A) {
-  return (__m128h)__builtin_ia32_rcpph128_mask(
-      (__v8hf)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_rcp_ph(__m128h __W,
-                                                                __mmask8 __U,
-                                                                __m128h __A) {
-  return (__m128h)__builtin_ia32_rcpph128_mask((__v8hf)__A, (__v8hf)__W,
-                                               (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_rcp_ph(__mmask8 __U,
-                                                                 __m128h __A) {
-  return (__m128h)__builtin_ia32_rcpph128_mask(
-      (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_rsqrt_ph(__m256h __A) {
-  return (__m256h)__builtin_ia32_rsqrtph256_mask(
-      (__v16hf)__A, (__v16hf)_mm256_undefined_ph(), (__mmask16)-1);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_rsqrt_ph(__m256h __W, __mmask16 __U, __m256h __A) {
-  return (__m256h)__builtin_ia32_rsqrtph256_mask((__v16hf)__A, (__v16hf)__W,
-                                                 (__mmask16)__U);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_rsqrt_ph(__mmask16 __U, __m256h __A) {
-  return (__m256h)__builtin_ia32_rsqrtph256_mask(
-      (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_rsqrt_ph(__m128h __A) {
-  return (__m128h)__builtin_ia32_rsqrtph128_mask(
-      (__v8hf)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_rsqrt_ph(__m128h __W,
-                                                                  __mmask8 __U,
-                                                                  __m128h __A) {
-  return (__m128h)__builtin_ia32_rsqrtph128_mask((__v8hf)__A, (__v8hf)__W,
-                                                 (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_rsqrt_ph(__mmask8 __U, __m128h __A) {
-  return (__m128h)__builtin_ia32_rsqrtph128_mask(
-      (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_getexp_ph(__m128h __A) {
-  return (__m128h)__builtin_ia32_getexpph128_mask(
-      (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_getexp_ph(__m128h __W, __mmask8 __U, __m128h __A) {
-  return (__m128h)__builtin_ia32_getexpph128_mask((__v8hf)__A, (__v8hf)__W,
-                                                  (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_getexp_ph(__mmask8 __U, __m128h __A) {
-  return (__m128h)__builtin_ia32_getexpph128_mask(
-      (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_getexp_ph(__m256h __A) {
-  return (__m256h)__builtin_ia32_getexpph256_mask(
-      (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)-1);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_getexp_ph(__m256h __W, __mmask16 __U, __m256h __A) {
-  return (__m256h)__builtin_ia32_getexpph256_mask((__v16hf)__A, (__v16hf)__W,
-                                                  (__mmask16)__U);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_getexp_ph(__mmask16 __U, __m256h __A) {
-  return (__m256h)__builtin_ia32_getexpph256_mask(
-      (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U);
-}
-
-#define _mm_getmant_ph(A, B, C)                                                \
-  ((__m128h)__builtin_ia32_getmantph128_mask(                                  \
-      (__v8hf)(__m128h)(A), (int)(((C) << 2) | (B)), (__v8hf)_mm_setzero_ph(), \
-      (__mmask8)-1))
-
-#define _mm_mask_getmant_ph(W, U, A, B, C)                                     \
-  ((__m128h)__builtin_ia32_getmantph128_mask(                                  \
-      (__v8hf)(__m128h)(A), (int)(((C) << 2) | (B)), (__v8hf)(__m128h)(W),     \
-      (__mmask8)(U)))
-
-#define _mm_maskz_getmant_ph(U, A, B, C)                                       \
-  ((__m128h)__builtin_ia32_getmantph128_mask(                                  \
-      (__v8hf)(__m128h)(A), (int)(((C) << 2) | (B)), (__v8hf)_mm_setzero_ph(), \
-      (__mmask8)(U)))
-
-#define _mm256_getmant_ph(A, B, C)                                             \
-  ((__m256h)__builtin_ia32_getmantph256_mask(                                  \
-      (__v16hf)(__m256h)(A), (int)(((C) << 2) | (B)),                          \
-      (__v16hf)_mm256_setzero_ph(), (__mmask16)-1))
-
-#define _mm256_mask_getmant_ph(W, U, A, B, C)                                  \
-  ((__m256h)__builtin_ia32_getmantph256_mask(                                  \
-      (__v16hf)(__m256h)(A), (int)(((C) << 2) | (B)), (__v16hf)(__m256h)(W),   \
-      (__mmask16)(U)))
-
-#define _mm256_maskz_getmant_ph(U, A, B, C)                                    \
-  ((__m256h)__builtin_ia32_getmantph256_mask(                                  \
-      (__v16hf)(__m256h)(A), (int)(((C) << 2) | (B)),                          \
-      (__v16hf)_mm256_setzero_ph(), (__mmask16)(U)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_scalef_ph(__m128h __A,
-                                                              __m128h __B) {
-  return (__m128h)__builtin_ia32_scalefph128_mask(
-      (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_scalef_ph(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
-  return (__m128h)__builtin_ia32_scalefph128_mask((__v8hf)__A, (__v8hf)__B,
-                                                  (__v8hf)__W, (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_scalef_ph(__mmask8 __U, __m128h __A, __m128h __B) {
-  return (__m128h)__builtin_ia32_scalefph128_mask(
-      (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_scalef_ph(__m256h __A,
-                                                                 __m256h __B) {
-  return (__m256h)__builtin_ia32_scalefph256_mask(
-      (__v16hf)__A, (__v16hf)__B, (__v16hf)_mm256_setzero_ph(), (__mmask16)-1);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_scalef_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_scalefph256_mask((__v16hf)__A, (__v16hf)__B,
-                                                  (__v16hf)__W, (__mmask16)__U);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_scalef_ph(__mmask16 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_scalefph256_mask(
-      (__v16hf)__A, (__v16hf)__B, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U);
-}
-
-#define _mm_roundscale_ph(A, imm)                                              \
-  ((__m128h)__builtin_ia32_rndscaleph_128_mask(                                \
-      (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)_mm_setzero_ph(),              \
-      (__mmask8)-1))
-
-#define _mm_mask_roundscale_ph(W, U, A, imm)                                   \
-  ((__m128h)__builtin_ia32_rndscaleph_128_mask(                                \
-      (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)(__m128h)(W), (__mmask8)(U)))
-
-#define _mm_maskz_roundscale_ph(U, A, imm)                                     \
-  ((__m128h)__builtin_ia32_rndscaleph_128_mask(                                \
-      (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)_mm_setzero_ph(),              \
-      (__mmask8)(U)))
-
-#define _mm256_roundscale_ph(A, imm)                                           \
-  ((__m256h)__builtin_ia32_rndscaleph_256_mask(                                \
-      (__v16hf)(__m256h)(A), (int)(imm), (__v16hf)_mm256_setzero_ph(),         \
-      (__mmask16)-1))
-
-#define _mm256_mask_roundscale_ph(W, U, A, imm)                                \
-  ((__m256h)__builtin_ia32_rndscaleph_256_mask(                                \
-      (__v16hf)(__m256h)(A), (int)(imm), (__v16hf)(__m256h)(W),                \
-      (__mmask16)(U)))
-
-#define _mm256_maskz_roundscale_ph(U, A, imm)                                  \
-  ((__m256h)__builtin_ia32_rndscaleph_256_mask(                                \
-      (__v16hf)(__m256h)(A), (int)(imm), (__v16hf)_mm256_setzero_ph(),         \
-      (__mmask16)(U)))
-
-#define _mm_reduce_ph(A, imm)                                                  \
-  ((__m128h)__builtin_ia32_reduceph128_mask((__v8hf)(__m128h)(A), (int)(imm),  \
-                                            (__v8hf)_mm_setzero_ph(),          \
-                                            (__mmask8)-1))
-
-#define _mm_mask_reduce_ph(W, U, A, imm)                                       \
-  ((__m128h)__builtin_ia32_reduceph128_mask(                                   \
-      (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)(__m128h)(W), (__mmask8)(U)))
-
-#define _mm_maskz_reduce_ph(U, A, imm)                                         \
-  ((__m128h)__builtin_ia32_reduceph128_mask((__v8hf)(__m128h)(A), (int)(imm),  \
-                                            (__v8hf)_mm_setzero_ph(),          \
-                                            (__mmask8)(U)))
-
-#define _mm256_reduce_ph(A, imm)                                               \
-  ((__m256h)__builtin_ia32_reduceph256_mask((__v16hf)(__m256h)(A), (int)(imm), \
-                                            (__v16hf)_mm256_setzero_ph(),      \
-                                            (__mmask16)-1))
-
-#define _mm256_mask_reduce_ph(W, U, A, imm)                                    \
-  ((__m256h)__builtin_ia32_reduceph256_mask((__v16hf)(__m256h)(A), (int)(imm), \
-                                            (__v16hf)(__m256h)(W),             \
-                                            (__mmask16)(U)))
-
-#define _mm256_maskz_reduce_ph(U, A, imm)                                      \
-  ((__m256h)__builtin_ia32_reduceph256_mask((__v16hf)(__m256h)(A), (int)(imm), \
-                                            (__v16hf)_mm256_setzero_ph(),      \
-                                            (__mmask16)(U)))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_sqrt_ph(__m128h __a) {
-  return __builtin_ia32_sqrtph((__v8hf)__a);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_sqrt_ph(__m128h __W,
-                                                                 __mmask8 __U,
-                                                                 __m128h __A) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, (__v8hf)_mm_sqrt_ph(__A), (__v8hf)__W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_sqrt_ph(__mmask8 __U,
-                                                                  __m128h __A) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, (__v8hf)_mm_sqrt_ph(__A), (__v8hf)_mm_setzero_ph());
-}
-
-static __inline __m256h __DEFAULT_FN_ATTRS256 _mm256_sqrt_ph(__m256h __a) {
-  return (__m256h)__builtin_ia32_sqrtph256((__v16hf)__a);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_sqrt_ph(__m256h __W, __mmask16 __U, __m256h __A) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U, (__v16hf)_mm256_sqrt_ph(__A), (__v16hf)__W);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_sqrt_ph(__mmask16 __U, __m256h __A) {
-  return (__m256h)__builtin_ia32_selectph_256((__mmask16)__U,
-                                              (__v16hf)_mm256_sqrt_ph(__A),
-                                              (__v16hf)_mm256_setzero_ph());
-}
-
-#define _mm_mask_fpclass_ph_mask(U, A, imm)                                    \
-  ((__mmask8)__builtin_ia32_fpclassph128_mask((__v8hf)(__m128h)(A),            \
-                                              (int)(imm), (__mmask8)(U)))
-
-#define _mm_fpclass_ph_mask(A, imm)                                            \
-  ((__mmask8)__builtin_ia32_fpclassph128_mask((__v8hf)(__m128h)(A),            \
-                                              (int)(imm), (__mmask8)-1))
-
-#define _mm256_mask_fpclass_ph_mask(U, A, imm)                                 \
-  ((__mmask16)__builtin_ia32_fpclassph256_mask((__v16hf)(__m256h)(A),          \
-                                               (int)(imm), (__mmask16)(U)))
-
-#define _mm256_fpclass_ph_mask(A, imm)                                         \
-  ((__mmask16)__builtin_ia32_fpclassph256_mask((__v16hf)(__m256h)(A),          \
-                                               (int)(imm), (__mmask16)-1))
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpd_ph(__m128d __A) {
-  return (__m128h)__builtin_ia32_vcvtpd2ph128_mask(
-      (__v2df)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_cvtpd_ph(__m128h __W,
-                                                                  __mmask8 __U,
-                                                                  __m128d __A) {
-  return (__m128h)__builtin_ia32_vcvtpd2ph128_mask((__v2df)__A, (__v8hf)__W,
-                                                   (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtpd_ph(__mmask8 __U, __m128d __A) {
-  return (__m128h)__builtin_ia32_vcvtpd2ph128_mask(
-      (__v2df)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256 _mm256_cvtpd_ph(__m256d __A) {
-  return (__m128h)__builtin_ia32_vcvtpd2ph256_mask(
-      (__v4df)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtpd_ph(__m128h __W, __mmask8 __U, __m256d __A) {
-  return (__m128h)__builtin_ia32_vcvtpd2ph256_mask((__v4df)__A, (__v8hf)__W,
-                                                   (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtpd_ph(__mmask8 __U, __m256d __A) {
-  return (__m128h)__builtin_ia32_vcvtpd2ph256_mask(
-      (__v4df)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_cvtph_pd(__m128h __A) {
-  return (__m128d)__builtin_ia32_vcvtph2pd128_mask(
-      (__v8hf)__A, (__v2df)_mm_undefined_pd(), (__mmask8)-1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtph_pd(__m128d __W,
-                                                                  __mmask8 __U,
-                                                                  __m128h __A) {
-  return (__m128d)__builtin_ia32_vcvtph2pd128_mask((__v8hf)__A, (__v2df)__W,
-                                                   (__mmask8)__U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtph_pd(__mmask8 __U, __m128h __A) {
-  return (__m128d)__builtin_ia32_vcvtph2pd128_mask(
-      (__v8hf)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_cvtph_pd(__m128h __A) {
-  return (__m256d)__builtin_ia32_vcvtph2pd256_mask(
-      (__v8hf)__A, (__v4df)_mm256_undefined_pd(), (__mmask8)-1);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtph_pd(__m256d __W, __mmask8 __U, __m128h __A) {
-  return (__m256d)__builtin_ia32_vcvtph2pd256_mask((__v8hf)__A, (__v4df)__W,
-                                                   (__mmask8)__U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtph_pd(__mmask8 __U, __m128h __A) {
-  return (__m256d)__builtin_ia32_vcvtph2pd256_mask(
-      (__v8hf)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epi16(__m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2w128_mask(
-      (__v8hf)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtph_epi16(__m128i __W, __mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2w128_mask((__v8hf)__A, (__v8hi)__W,
-                                                  (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtph_epi16(__mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2w128_mask(
-      (__v8hf)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtph_epi16(__m256h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2w256_mask(
-      (__v16hf)__A, (__v16hi)_mm256_undefined_si256(), (__mmask16)-1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtph_epi16(__m256i __W, __mmask16 __U, __m256h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2w256_mask((__v16hf)__A, (__v16hi)__W,
-                                                  (__mmask16)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtph_epi16(__mmask16 __U, __m256h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2w256_mask(
-      (__v16hf)__A, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epi16(__m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2w128_mask(
-      (__v8hf)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvttph_epi16(__m128i __W, __mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2w128_mask((__v8hf)__A, (__v8hi)__W,
-                                                   (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvttph_epi16(__mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2w128_mask(
-      (__v8hf)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvttph_epi16(__m256h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2w256_mask(
-      (__v16hf)__A, (__v16hi)_mm256_undefined_si256(), (__mmask16)-1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvttph_epi16(__m256i __W, __mmask16 __U, __m256h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2w256_mask((__v16hf)__A, (__v16hi)__W,
-                                                   (__mmask16)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvttph_epi16(__mmask16 __U, __m256h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2w256_mask(
-      (__v16hf)__A, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepi16_ph(__m128i __A) {
-  return (__m128h) __builtin_convertvector((__v8hi)__A, __v8hf);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi16_ph(__m128h __W, __mmask8 __U, __m128i __A) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, (__v8hf)_mm_cvtepi16_ph(__A), (__v8hf)__W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi16_ph(__mmask8 __U, __m128i __A) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, (__v8hf)_mm_cvtepi16_ph(__A), (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_cvtepi16_ph(__m256i __A) {
-  return (__m256h) __builtin_convertvector((__v16hi)__A, __v16hf);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi16_ph(__m256h __W, __mmask16 __U, __m256i __A) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U, (__v16hf)_mm256_cvtepi16_ph(__A), (__v16hf)__W);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi16_ph(__mmask16 __U, __m256i __A) {
-  return (__m256h)__builtin_ia32_selectph_256((__mmask16)__U,
-                                              (__v16hf)_mm256_cvtepi16_ph(__A),
-                                              (__v16hf)_mm256_setzero_ph());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epu16(__m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2uw128_mask(
-      (__v8hf)__A, (__v8hu)_mm_undefined_si128(), (__mmask8)-1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtph_epu16(__m128i __W, __mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2uw128_mask((__v8hf)__A, (__v8hu)__W,
-                                                   (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtph_epu16(__mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2uw128_mask(
-      (__v8hf)__A, (__v8hu)_mm_setzero_si128(), (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtph_epu16(__m256h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2uw256_mask(
-      (__v16hf)__A, (__v16hu)_mm256_undefined_si256(), (__mmask16)-1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtph_epu16(__m256i __W, __mmask16 __U, __m256h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2uw256_mask((__v16hf)__A, (__v16hu)__W,
-                                                   (__mmask16)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtph_epu16(__mmask16 __U, __m256h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2uw256_mask(
-      (__v16hf)__A, (__v16hu)_mm256_setzero_si256(), (__mmask16)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epu16(__m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2uw128_mask(
-      (__v8hf)__A, (__v8hu)_mm_undefined_si128(), (__mmask8)-1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvttph_epu16(__m128i __W, __mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2uw128_mask((__v8hf)__A, (__v8hu)__W,
-                                                    (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvttph_epu16(__mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2uw128_mask(
-      (__v8hf)__A, (__v8hu)_mm_setzero_si128(), (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvttph_epu16(__m256h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2uw256_mask(
-      (__v16hf)__A, (__v16hu)_mm256_undefined_si256(), (__mmask16)-1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvttph_epu16(__m256i __W, __mmask16 __U, __m256h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2uw256_mask((__v16hf)__A, (__v16hu)__W,
-                                                    (__mmask16)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvttph_epu16(__mmask16 __U, __m256h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2uw256_mask(
-      (__v16hf)__A, (__v16hu)_mm256_setzero_si256(), (__mmask16)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepu16_ph(__m128i __A) {
-  return (__m128h) __builtin_convertvector((__v8hu)__A, __v8hf);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepu16_ph(__m128h __W, __mmask8 __U, __m128i __A) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, (__v8hf)_mm_cvtepu16_ph(__A), (__v8hf)__W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepu16_ph(__mmask8 __U, __m128i __A) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, (__v8hf)_mm_cvtepu16_ph(__A), (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_cvtepu16_ph(__m256i __A) {
-  return (__m256h) __builtin_convertvector((__v16hu)__A, __v16hf);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepu16_ph(__m256h __W, __mmask16 __U, __m256i __A) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U, (__v16hf)_mm256_cvtepu16_ph(__A), (__v16hf)__W);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepu16_ph(__mmask16 __U, __m256i __A) {
-  return (__m256h)__builtin_ia32_selectph_256((__mmask16)__U,
-                                              (__v16hf)_mm256_cvtepu16_ph(__A),
-                                              (__v16hf)_mm256_setzero_ph());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epi32(__m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2dq128_mask(
-      (__v8hf)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtph_epi32(__m128i __W, __mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2dq128_mask((__v8hf)__A, (__v4si)__W,
-                                                   (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtph_epi32(__mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2dq128_mask(
-      (__v8hf)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtph_epi32(__m128h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2dq256_mask(
-      (__v8hf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtph_epi32(__m256i __W, __mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2dq256_mask((__v8hf)__A, (__v8si)__W,
-                                                   (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtph_epi32(__mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2dq256_mask(
-      (__v8hf)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epu32(__m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2udq128_mask(
-      (__v8hf)__A, (__v4su)_mm_undefined_si128(), (__mmask8)-1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtph_epu32(__m128i __W, __mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2udq128_mask((__v8hf)__A, (__v4su)__W,
-                                                    (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtph_epu32(__mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2udq128_mask(
-      (__v8hf)__A, (__v4su)_mm_setzero_si128(), (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtph_epu32(__m128h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2udq256_mask(
-      (__v8hf)__A, (__v8su)_mm256_undefined_si256(), (__mmask8)-1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtph_epu32(__m256i __W, __mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2udq256_mask((__v8hf)__A, (__v8su)__W,
-                                                    (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtph_epu32(__mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2udq256_mask(
-      (__v8hf)__A, (__v8su)_mm256_setzero_si256(), (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepi32_ph(__m128i __A) {
-  return (__m128h)__builtin_ia32_vcvtdq2ph128_mask(
-      (__v4si)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi32_ph(__m128h __W, __mmask8 __U, __m128i __A) {
-  return (__m128h)__builtin_ia32_vcvtdq2ph128_mask((__v4si)__A, (__v8hf)__W,
-                                                   (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi32_ph(__mmask8 __U, __m128i __A) {
-  return (__m128h)__builtin_ia32_vcvtdq2ph128_mask(
-      (__v4si)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_cvtepi32_ph(__m256i __A) {
-  return (__m128h) __builtin_convertvector((__v8si)__A, __v8hf);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi32_ph(__m128h __W, __mmask8 __U, __m256i __A) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, (__v8hf)_mm256_cvtepi32_ph(__A), (__v8hf)__W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi32_ph(__mmask8 __U, __m256i __A) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, (__v8hf)_mm256_cvtepi32_ph(__A), (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepu32_ph(__m128i __A) {
-  return (__m128h)__builtin_ia32_vcvtudq2ph128_mask(
-      (__v4su)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepu32_ph(__m128h __W, __mmask8 __U, __m128i __A) {
-  return (__m128h)__builtin_ia32_vcvtudq2ph128_mask((__v4su)__A, (__v8hf)__W,
-                                                    (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepu32_ph(__mmask8 __U, __m128i __A) {
-  return (__m128h)__builtin_ia32_vcvtudq2ph128_mask(
-      (__v4su)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_cvtepu32_ph(__m256i __A) {
-  return (__m128h) __builtin_convertvector((__v8su)__A, __v8hf);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepu32_ph(__m128h __W, __mmask8 __U, __m256i __A) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, (__v8hf)_mm256_cvtepu32_ph(__A), (__v8hf)__W);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepu32_ph(__mmask8 __U, __m256i __A) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, (__v8hf)_mm256_cvtepu32_ph(__A), (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epi32(__m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2dq128_mask(
-      (__v8hf)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvttph_epi32(__m128i __W, __mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2dq128_mask((__v8hf)__A, (__v4si)__W,
-                                                    (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvttph_epi32(__mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2dq128_mask(
-      (__v8hf)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvttph_epi32(__m128h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2dq256_mask(
-      (__v8hf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvttph_epi32(__m256i __W, __mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2dq256_mask((__v8hf)__A, (__v8si)__W,
-                                                    (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvttph_epi32(__mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2dq256_mask(
-      (__v8hf)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epu32(__m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2udq128_mask(
-      (__v8hf)__A, (__v4su)_mm_undefined_si128(), (__mmask8)-1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvttph_epu32(__m128i __W, __mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2udq128_mask((__v8hf)__A, (__v4su)__W,
-                                                     (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvttph_epu32(__mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2udq128_mask(
-      (__v8hf)__A, (__v4su)_mm_setzero_si128(), (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvttph_epu32(__m128h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2udq256_mask(
-      (__v8hf)__A, (__v8su)_mm256_undefined_si256(), (__mmask8)-1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvttph_epu32(__m256i __W, __mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2udq256_mask((__v8hf)__A, (__v8su)__W,
-                                                     (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvttph_epu32(__mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2udq256_mask(
-      (__v8hf)__A, (__v8su)_mm256_setzero_si256(), (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepi64_ph(__m128i __A) {
-  return (__m128h)__builtin_ia32_vcvtqq2ph128_mask(
-      (__v2di)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi64_ph(__m128h __W, __mmask8 __U, __m128i __A) {
-  return (__m128h)__builtin_ia32_vcvtqq2ph128_mask((__v2di)__A, (__v8hf)__W,
-                                                   (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi64_ph(__mmask8 __U, __m128i __A) {
-  return (__m128h)__builtin_ia32_vcvtqq2ph128_mask(
-      (__v2di)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_cvtepi64_ph(__m256i __A) {
-  return (__m128h)__builtin_ia32_vcvtqq2ph256_mask(
-      (__v4di)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi64_ph(__m128h __W, __mmask8 __U, __m256i __A) {
-  return (__m128h)__builtin_ia32_vcvtqq2ph256_mask((__v4di)__A, (__v8hf)__W,
-                                                   (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi64_ph(__mmask8 __U, __m256i __A) {
-  return (__m128h)__builtin_ia32_vcvtqq2ph256_mask(
-      (__v4di)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epi64(__m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2qq128_mask(
-      (__v8hf)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtph_epi64(__m128i __W, __mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2qq128_mask((__v8hf)__A, (__v2di)__W,
-                                                   (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtph_epi64(__mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2qq128_mask(
-      (__v8hf)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtph_epi64(__m128h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2qq256_mask(
-      (__v8hf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtph_epi64(__m256i __W, __mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2qq256_mask((__v8hf)__A, (__v4di)__W,
-                                                   (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtph_epi64(__mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2qq256_mask(
-      (__v8hf)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepu64_ph(__m128i __A) {
-  return (__m128h)__builtin_ia32_vcvtuqq2ph128_mask(
-      (__v2du)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepu64_ph(__m128h __W, __mmask8 __U, __m128i __A) {
-  return (__m128h)__builtin_ia32_vcvtuqq2ph128_mask((__v2du)__A, (__v8hf)__W,
-                                                    (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepu64_ph(__mmask8 __U, __m128i __A) {
-  return (__m128h)__builtin_ia32_vcvtuqq2ph128_mask(
-      (__v2du)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_cvtepu64_ph(__m256i __A) {
-  return (__m128h)__builtin_ia32_vcvtuqq2ph256_mask(
-      (__v4du)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepu64_ph(__m128h __W, __mmask8 __U, __m256i __A) {
-  return (__m128h)__builtin_ia32_vcvtuqq2ph256_mask((__v4du)__A, (__v8hf)__W,
-                                                    (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepu64_ph(__mmask8 __U, __m256i __A) {
-  return (__m128h)__builtin_ia32_vcvtuqq2ph256_mask(
-      (__v4du)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epu64(__m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2uqq128_mask(
-      (__v8hf)__A, (__v2du)_mm_undefined_si128(), (__mmask8)-1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtph_epu64(__m128i __W, __mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2uqq128_mask((__v8hf)__A, (__v2du)__W,
-                                                    (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtph_epu64(__mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvtph2uqq128_mask(
-      (__v8hf)__A, (__v2du)_mm_setzero_si128(), (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtph_epu64(__m128h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2uqq256_mask(
-      (__v8hf)__A, (__v4du)_mm256_undefined_si256(), (__mmask8)-1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtph_epu64(__m256i __W, __mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2uqq256_mask((__v8hf)__A, (__v4du)__W,
-                                                    (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtph_epu64(__mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvtph2uqq256_mask(
-      (__v8hf)__A, (__v4du)_mm256_setzero_si256(), (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epi64(__m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2qq128_mask(
-      (__v8hf)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvttph_epi64(__m128i __W, __mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2qq128_mask((__v8hf)__A, (__v2di)__W,
-                                                    (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvttph_epi64(__mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2qq128_mask(
-      (__v8hf)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvttph_epi64(__m128h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2qq256_mask(
-      (__v8hf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvttph_epi64(__m256i __W, __mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2qq256_mask((__v8hf)__A, (__v4di)__W,
-                                                    (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvttph_epi64(__mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2qq256_mask(
-      (__v8hf)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epu64(__m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2uqq128_mask(
-      (__v8hf)__A, (__v2du)_mm_undefined_si128(), (__mmask8)-1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvttph_epu64(__m128i __W, __mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2uqq128_mask((__v8hf)__A, (__v2du)__W,
-                                                     (__mmask8)__U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvttph_epu64(__mmask8 __U, __m128h __A) {
-  return (__m128i)__builtin_ia32_vcvttph2uqq128_mask(
-      (__v8hf)__A, (__v2du)_mm_setzero_si128(), (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvttph_epu64(__m128h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2uqq256_mask(
-      (__v8hf)__A, (__v4du)_mm256_undefined_si256(), (__mmask8)-1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvttph_epu64(__m256i __W, __mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2uqq256_mask((__v8hf)__A, (__v4du)__W,
-                                                     (__mmask8)__U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvttph_epu64(__mmask8 __U, __m128h __A) {
-  return (__m256i)__builtin_ia32_vcvttph2uqq256_mask(
-      (__v8hf)__A, (__v4du)_mm256_setzero_si256(), (__mmask8)__U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtxph_ps(__m128h __A) {
-  return (__m128)__builtin_ia32_vcvtph2psx128_mask(
-      (__v8hf)__A, (__v4sf)_mm_undefined_ps(), (__mmask8)-1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtxph_ps(__m128 __W,
-                                                                  __mmask8 __U,
-                                                                  __m128h __A) {
-  return (__m128)__builtin_ia32_vcvtph2psx128_mask((__v8hf)__A, (__v4sf)__W,
-                                                   (__mmask8)__U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtxph_ps(__mmask8 __U, __m128h __A) {
-  return (__m128)__builtin_ia32_vcvtph2psx128_mask(
-      (__v8hf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtxph_ps(__m128h __A) {
-  return (__m256)__builtin_ia32_vcvtph2psx256_mask(
-      (__v8hf)__A, (__v8sf)_mm256_undefined_ps(), (__mmask8)-1);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtxph_ps(__m256 __W, __mmask8 __U, __m128h __A) {
-  return (__m256)__builtin_ia32_vcvtph2psx256_mask((__v8hf)__A, (__v8sf)__W,
-                                                   (__mmask8)__U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtxph_ps(__mmask8 __U, __m128h __A) {
-  return (__m256)__builtin_ia32_vcvtph2psx256_mask(
-      (__v8hf)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtxps_ph(__m128 __A) {
-  return (__m128h)__builtin_ia32_vcvtps2phx128_mask(
-      (__v4sf)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_cvtxps_ph(__m128h __W,
-                                                                   __mmask8 __U,
-                                                                   __m128 __A) {
-  return (__m128h)__builtin_ia32_vcvtps2phx128_mask((__v4sf)__A, (__v8hf)__W,
-                                                    (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtxps_ph(__mmask8 __U, __m128 __A) {
-  return (__m128h)__builtin_ia32_vcvtps2phx128_mask(
-      (__v4sf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256 _mm256_cvtxps_ph(__m256 __A) {
-  return (__m128h)__builtin_ia32_vcvtps2phx256_mask(
-      (__v8sf)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtxps_ph(__m128h __W, __mmask8 __U, __m256 __A) {
-  return (__m128h)__builtin_ia32_vcvtps2phx256_mask((__v8sf)__A, (__v8hf)__W,
-                                                    (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtxps_ph(__mmask8 __U, __m256 __A) {
-  return (__m128h)__builtin_ia32_vcvtps2phx256_mask(
-      (__v8sf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_ph(__m128h __A,
-                                                             __m128h __B,
-                                                             __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B,
-                                          (__v8hf)__C);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_ph(__m128h __A,
-                                                                  __mmask8 __U,
-                                                                  __m128h __B,
-                                                                  __m128h __C) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
-      (__v8hf)__A);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
-      (__v8hf)__C);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
-      (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsub_ph(__m128h __A,
-                                                             __m128h __B,
-                                                             __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B,
-                                          -(__v8hf)__C);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_ph(__m128h __A,
-                                                                  __mmask8 __U,
-                                                                  __m128h __B,
-                                                                  __m128h __C) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, _mm_fmsub_ph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
-      (__v8hf)__A);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U, _mm_fmsub_ph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
-      (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
-      (__v8hf)__C);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
-      (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
-      (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmadd_ph(__m256h __A,
-                                                                __m256h __B,
-                                                                __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B,
-                                             (__v16hf)__C);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_fmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
-      (__v16hf)__A);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
-      (__v16hf)__C);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
-      (__v16hf)_mm256_setzero_ph());
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmsub_ph(__m256h __A,
-                                                                __m256h __B,
-                                                                __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B,
-                                             -(__v16hf)__C);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_fmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
-      (__v16hf)__A);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
-      (__v16hf)_mm256_setzero_ph());
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask3_fnmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
-      (__v16hf)__C);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_fnmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
-      (__v16hf)_mm256_setzero_ph());
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_fnmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
-      (__v16hf)_mm256_setzero_ph());
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmaddsub_ph(__m128h __A,
-                                                                __m128h __B,
-                                                                __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B,
-                                             (__v8hf)__C);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_fmaddsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
-      (__v8hf)__A);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask3_fmaddsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
-      (__v8hf)__C);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fmaddsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
-      (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsubadd_ph(__m128h __A,
-                                                                __m128h __B,
-                                                                __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B,
-                                             -(__v8hf)__C);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_fmsubadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
-      (__v8hf)__A);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsubadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
-      (__v8hf)_mm_setzero_ph());
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_fmaddsub_ph(__m256h __A, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B,
-                                                (__v16hf)__C);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_fmaddsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
-      (__v16hf)__A);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmaddsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
-      (__v16hf)__C);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmaddsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
-      (__v16hf)_mm256_setzero_ph());
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B,
-                                                -(__v16hf)__C);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_fmsubadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
-      (__v16hf)__A);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmsubadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
-      (__v16hf)_mm256_setzero_ph());
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
-      (__v8hf)__C);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
-      (__v16hf)__C);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsubadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
-      (__v8hf)__C);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
-      (__v16hf)__C);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmadd_ph(__m128h __A,
-                                                              __m128h __B,
-                                                              __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B,
-                                          (__v8hf)__C);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_fnmadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C),
-      (__v8hf)__A);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmadd_ph(__m256h __A,
-                                                                 __m256h __B,
-                                                                 __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B,
-                                             (__v16hf)__C);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_fnmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, (__v16hf)__C),
-      (__v16hf)__A);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmsub_ph(__m128h __A,
-                                                              __m128h __B,
-                                                              __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B,
-                                          -(__v8hf)__C);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_fnmsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
-      (__v8hf)__A);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
-  return (__m128h)__builtin_ia32_selectph_128(
-      (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
-      (__v8hf)__C);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmsub_ph(__m256h __A,
-                                                                 __m256h __B,
-                                                                 __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B,
-                                             -(__v16hf)__C);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_fnmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
-      (__v16hf)__A);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask3_fnmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
-  return (__m256h)__builtin_ia32_selectph_256(
-      (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
-      (__v16hf)__C);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fcmul_pch(__m128h __A,
-                                                              __m128h __B) {
-  return (__m128h)__builtin_ia32_vfcmulcph128_mask(
-      (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_fcmul_pch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
-  return (__m128h)__builtin_ia32_vfcmulcph128_mask((__v4sf)__A, (__v4sf)__B,
-                                                   (__v4sf)__W, (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fcmul_pch(__mmask8 __U, __m128h __A, __m128h __B) {
-  return (__m128h)__builtin_ia32_vfcmulcph128_mask(
-      (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS128 _mm256_fcmul_pch(__m256h __A,
-                                                                 __m256h __B) {
-  return (__m256h)__builtin_ia32_vfcmulcph256_mask(
-      (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_fcmul_pch(__m256h __W, __mmask8 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_vfcmulcph256_mask((__v8sf)__A, (__v8sf)__B,
-                                                   (__v8sf)__W, (__mmask8)__U);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_fcmul_pch(__mmask8 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_vfcmulcph256_mask(
-      (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fcmadd_pch(__m128h __A,
-                                                               __m128h __B,
-                                                               __m128h __C) {
-  return (__m128h)__builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
-                                                    (__v4sf)__C, (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_fcmadd_pch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_selectps_128(
-      __U,
-      __builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)(__m128h)__B,
-                                        (__v4sf)__C, (__mmask8)__U),
-      (__v4sf)__A);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask3_fcmadd_pch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
-  return (__m128h)__builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
-                                                    (__v4sf)__C, (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fcmadd_pch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_vfcmaddcph128_maskz(
-      (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fcmadd_pch(__m256h __A,
-                                                                  __m256h __B,
-                                                                  __m256h __C) {
-  return (__m256h)__builtin_ia32_vfcmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
-                                                    (__v8sf)__C, (__mmask8)-1);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_fcmadd_pch(__m256h __A, __mmask8 __U, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_selectps_256(
-      __U,
-      __builtin_ia32_vfcmaddcph256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__C,
-                                        (__mmask8)__U),
-      (__v8sf)__A);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask3_fcmadd_pch(__m256h __A, __m256h __B, __m256h __C, __mmask8 __U) {
-  return (__m256h)__builtin_ia32_vfcmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
-                                                    (__v8sf)__C, (__mmask8)__U);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_fcmadd_pch(__mmask8 __U, __m256h __A, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_vfcmaddcph256_maskz(
-      (__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmul_pch(__m128h __A,
-                                                             __m128h __B) {
-  return (__m128h)__builtin_ia32_vfmulcph128_mask(
-      (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmul_pch(__m128h __W,
-                                                                  __mmask8 __U,
-                                                                  __m128h __A,
-                                                                  __m128h __B) {
-  return (__m128h)__builtin_ia32_vfmulcph128_mask((__v4sf)__A, (__v4sf)__B,
-                                                  (__v4sf)__W, (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fmul_pch(__mmask8 __U, __m128h __A, __m128h __B) {
-  return (__m128h)__builtin_ia32_vfmulcph128_mask(
-      (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmul_pch(__m256h __A,
-                                                                __m256h __B) {
-  return (__m256h)__builtin_ia32_vfmulcph256_mask(
-      (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_undefined_ph(), (__mmask8)-1);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_fmul_pch(__m256h __W, __mmask8 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_vfmulcph256_mask((__v8sf)__A, (__v8sf)__B,
-                                                  (__v8sf)__W, (__mmask8)__U);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmul_pch(__mmask8 __U, __m256h __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_vfmulcph256_mask(
-      (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ph(), (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_pch(__m128h __A,
-                                                              __m128h __B,
-                                                              __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
-                                                   (__v4sf)__C, (__mmask8)-1);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_fmadd_pch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_selectps_128(
-      __U,
-      __builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__C,
-                                       (__mmask8)__U),
-      (__v4sf)__A);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_pch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
-  return (__m128h)__builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
-                                                   (__v4sf)__C, (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_pch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddcph128_maskz((__v4sf)__A, (__v4sf)__B,
-                                                    (__v4sf)__C, (__mmask8)__U);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmadd_pch(__m256h __A,
-                                                                 __m256h __B,
-                                                                 __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
-                                                   (__v8sf)__C, (__mmask8)-1);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_fmadd_pch(__m256h __A, __mmask8 __U, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_selectps_256(
-      __U,
-      __builtin_ia32_vfmaddcph256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__C,
-                                       (__mmask8)__U),
-      (__v8sf)__A);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmadd_pch(__m256h __A, __m256h __B, __m256h __C, __mmask8 __U) {
-  return (__m256h)__builtin_ia32_vfmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
-                                                   (__v8sf)__C, (__mmask8)__U);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmadd_pch(__mmask8 __U, __m256h __A, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddcph256_maskz((__v8sf)__A, (__v8sf)__B,
-                                                    (__v8sf)__C, (__mmask8)__U);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_blend_ph(__mmask8 __U,
-                                                                  __m128h __A,
-                                                                  __m128h __W) {
-  return (__m128h)__builtin_ia32_selectph_128((__mmask8)__U, (__v8hf)__W,
-                                              (__v8hf)__A);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_blend_ph(__mmask16 __U, __m256h __A, __m256h __W) {
-  return (__m256h)__builtin_ia32_selectph_256((__mmask16)__U, (__v16hf)__W,
-                                              (__v16hf)__A);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_permutex2var_ph(__m128h __A, __m128i __I, __m128h __B) {
-  return (__m128h)__builtin_ia32_vpermi2varhi128((__v8hi)__A, (__v8hi)__I,
-                                                 (__v8hi)__B);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_permutex2var_ph(__m256h __A, __m256i __I, __m256h __B) {
-  return (__m256h)__builtin_ia32_vpermi2varhi256((__v16hi)__A, (__v16hi)__I,
-                                                 (__v16hi)__B);
-}
-
-static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_permutexvar_ph(__m128i __A, __m128h __B) {
-  return (__m128h)__builtin_ia32_permvarhi128((__v8hi)__B, (__v8hi)__A);
-}
-
-static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_permutexvar_ph(__m256i __A, __m256h __B) {
-  return (__m256h)__builtin_ia32_permvarhi256((__v16hi)__B, (__v16hi)__A);
-}
-
-static __inline__ _Float16 __DEFAULT_FN_ATTRS256
-_mm256_reduce_add_ph(__m256h __W) {
-  return __builtin_ia32_reduce_fadd_ph256(-0.0f16, __W);
-}
-
-static __inline__ _Float16 __DEFAULT_FN_ATTRS256
-_mm256_reduce_mul_ph(__m256h __W) {
-  return __builtin_ia32_reduce_fmul_ph256(1.0f16, __W);
-}
-
-static __inline__ _Float16 __DEFAULT_FN_ATTRS256
-_mm256_reduce_max_ph(__m256h __V) {
-  return __builtin_ia32_reduce_fmax_ph256(__V);
-}
-
-static __inline__ _Float16 __DEFAULT_FN_ATTRS256
-_mm256_reduce_min_ph(__m256h __V) {
-  return __builtin_ia32_reduce_fmin_ph256(__V);
-}
-
-static __inline__ _Float16 __DEFAULT_FN_ATTRS128
-_mm_reduce_add_ph(__m128h __W) {
-  return __builtin_ia32_reduce_fadd_ph128(-0.0f16, __W);
-}
-
-static __inline__ _Float16 __DEFAULT_FN_ATTRS128
-_mm_reduce_mul_ph(__m128h __W) {
-  return __builtin_ia32_reduce_fmul_ph128(1.0f16, __W);
-}
-
-static __inline__ _Float16 __DEFAULT_FN_ATTRS128
-_mm_reduce_max_ph(__m128h __V) {
-  return __builtin_ia32_reduce_fmax_ph128(__V);
-}
-
-static __inline__ _Float16 __DEFAULT_FN_ATTRS128
-_mm_reduce_min_ph(__m128h __V) {
-  return __builtin_ia32_reduce_fmin_ph128(__V);
-}
-
-// intrinsics below are alias for f*mul_*ch
-#define _mm_mul_pch(A, B) _mm_fmul_pch(A, B)
-#define _mm_mask_mul_pch(W, U, A, B) _mm_mask_fmul_pch(W, U, A, B)
-#define _mm_maskz_mul_pch(U, A, B) _mm_maskz_fmul_pch(U, A, B)
-#define _mm256_mul_pch(A, B) _mm256_fmul_pch(A, B)
-#define _mm256_mask_mul_pch(W, U, A, B) _mm256_mask_fmul_pch(W, U, A, B)
-#define _mm256_maskz_mul_pch(U, A, B) _mm256_maskz_fmul_pch(U, A, B)
-
-#define _mm_cmul_pch(A, B) _mm_fcmul_pch(A, B)
-#define _mm_mask_cmul_pch(W, U, A, B) _mm_mask_fcmul_pch(W, U, A, B)
-#define _mm_maskz_cmul_pch(U, A, B) _mm_maskz_fcmul_pch(U, A, B)
-#define _mm256_cmul_pch(A, B) _mm256_fcmul_pch(A, B)
-#define _mm256_mask_cmul_pch(W, U, A, B) _mm256_mask_fcmul_pch(W, U, A, B)
-#define _mm256_maskz_cmul_pch(U, A, B) _mm256_maskz_fcmul_pch(U, A, B)
-
-#undef __DEFAULT_FN_ATTRS128
-#undef __DEFAULT_FN_ATTRS256
-
-#endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlintrin.h b/linux-x86/lib64/clang/14.0.2/include/avx512vlintrin.h
deleted file mode 100644
index 0519dba..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/avx512vlintrin.h
+++ /dev/null
@@ -1,8445 +0,0 @@
-/*===---- avx512vlintrin.h - AVX512VL intrinsics ---------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512vlintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512VLINTRIN_H
-#define __AVX512VLINTRIN_H
-
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), __min_vector_width__(256)))
-
-typedef short __v2hi __attribute__((__vector_size__(4)));
-typedef char __v4qi __attribute__((__vector_size__(4)));
-typedef char __v2qi __attribute__((__vector_size__(2)));
-
-/* Integer compare */
-
-#define _mm_cmpeq_epi32_mask(A, B) \
-    _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epi32_mask(k, A, B) \
-    _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epi32_mask(A, B) \
-    _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epi32_mask(k, A, B) \
-    _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epi32_mask(A, B) \
-    _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epi32_mask(k, A, B) \
-    _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epi32_mask(A, B) \
-    _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epi32_mask(k, A, B) \
-    _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epi32_mask(A, B) \
-    _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epi32_mask(k, A, B) \
-    _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epi32_mask(A, B) \
-    _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epi32_mask(k, A, B) \
-    _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epi32_mask(A, B) \
-    _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epi32_mask(k, A, B) \
-    _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epi32_mask(A, B) \
-    _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epi32_mask(k, A, B) \
-    _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epi32_mask(A, B) \
-    _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epi32_mask(k, A, B) \
-    _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epi32_mask(A, B) \
-    _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epi32_mask(k, A, B) \
-    _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epi32_mask(A, B) \
-    _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epi32_mask(k, A, B) \
-    _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epi32_mask(A, B) \
-    _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epi32_mask(k, A, B) \
-    _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm_cmpeq_epu32_mask(A, B) \
-    _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epu32_mask(k, A, B) \
-    _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epu32_mask(A, B) \
-    _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epu32_mask(k, A, B) \
-    _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epu32_mask(A, B) \
-    _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epu32_mask(k, A, B) \
-    _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epu32_mask(A, B) \
-    _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epu32_mask(k, A, B) \
-    _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epu32_mask(A, B) \
-    _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epu32_mask(k, A, B) \
-    _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epu32_mask(A, B) \
-    _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epu32_mask(k, A, B) \
-    _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epu32_mask(A, B) \
-    _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epu32_mask(k, A, B) \
-    _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epu32_mask(A, B) \
-    _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epu32_mask(k, A, B) \
-    _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epu32_mask(A, B) \
-    _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epu32_mask(k, A, B) \
-    _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epu32_mask(A, B) \
-    _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epu32_mask(k, A, B) \
-    _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epu32_mask(A, B) \
-    _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epu32_mask(k, A, B) \
-    _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epu32_mask(A, B) \
-    _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epu32_mask(k, A, B) \
-    _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm_cmpeq_epi64_mask(A, B) \
-    _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epi64_mask(k, A, B) \
-    _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epi64_mask(A, B) \
-    _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epi64_mask(k, A, B) \
-    _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epi64_mask(A, B) \
-    _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epi64_mask(k, A, B) \
-    _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epi64_mask(A, B) \
-    _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epi64_mask(k, A, B) \
-    _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epi64_mask(A, B) \
-    _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epi64_mask(k, A, B) \
-    _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epi64_mask(A, B) \
-    _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epi64_mask(k, A, B) \
-    _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epi64_mask(A, B) \
-    _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epi64_mask(k, A, B) \
-    _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epi64_mask(A, B) \
-    _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epi64_mask(k, A, B) \
-    _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epi64_mask(A, B) \
-    _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epi64_mask(k, A, B) \
-    _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epi64_mask(A, B) \
-    _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epi64_mask(k, A, B) \
-    _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epi64_mask(A, B) \
-    _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epi64_mask(k, A, B) \
-    _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epi64_mask(A, B) \
-    _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epi64_mask(k, A, B) \
-    _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm_cmpeq_epu64_mask(A, B) \
-    _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epu64_mask(k, A, B) \
-    _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epu64_mask(A, B) \
-    _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epu64_mask(k, A, B) \
-    _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epu64_mask(A, B) \
-    _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epu64_mask(k, A, B) \
-    _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epu64_mask(A, B) \
-    _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epu64_mask(k, A, B) \
-    _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epu64_mask(A, B) \
-    _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epu64_mask(k, A, B) \
-    _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epu64_mask(A, B) \
-    _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epu64_mask(k, A, B) \
-    _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epu64_mask(A, B) \
-    _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epu64_mask(k, A, B) \
-    _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epu64_mask(A, B) \
-    _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epu64_mask(k, A, B) \
-    _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epu64_mask(A, B) \
-    _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epu64_mask(k, A, B) \
-    _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epu64_mask(A, B) \
-    _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epu64_mask(k, A, B) \
-    _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epu64_mask(A, B) \
-    _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epu64_mask(k, A, B) \
-    _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epu64_mask(A, B) \
-    _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epu64_mask(k, A, B) \
-    _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_add_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_add_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_add_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_add_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_add_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_add_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_add_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_add_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sub_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_sub_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sub_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_sub_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sub_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_sub_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sub_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_sub_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_add_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_add_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_add_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_add_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_add_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_add_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_add_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_add_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sub_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sub_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sub_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sub_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sub_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_sub_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sub_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_sub_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mul_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_mul_epi32(__X, __Y),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mul_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_mul_epi32(__X, __Y),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mul_epi32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_mul_epi32(__X, __Y),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_epi32(__mmask8 __M, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_mul_epi32(__X, __Y),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mul_epu32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_mul_epu32(__X, __Y),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mul_epu32(__mmask8 __M, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_mul_epu32(__X, __Y),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mul_epu32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_mul_epu32(__X, __Y),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_epu32(__mmask8 __M, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_mul_epu32(__X, __Y),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mullo_epi32(__mmask8 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_mullo_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mullo_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_mullo_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mullo_epi32(__mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_mullo_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_mullo_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_and_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8su)__a & (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_and_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_and_epi32(_mm256_setzero_si256(), __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_and_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4su)__a & (__v4su)__b);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_and_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_and_epi32(_mm_setzero_si128(), __U, __A, __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_andnot_epi32(__m256i __A, __m256i __B)
-{
-  return (__m256i)(~(__v8su)__A & (__v8su)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                          (__v8si)_mm256_andnot_epi32(__A, __B),
-                                          (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_andnot_epi32(_mm256_setzero_si256(),
-                                           __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_andnot_epi32(__m128i __A, __m128i __B)
-{
-  return (__m128i)(~(__v4su)__A & (__v4su)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_andnot_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_andnot_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_andnot_epi32(_mm_setzero_si128(), __U, __A, __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_or_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8su)__a | (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_or_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_or_epi32(_mm256_setzero_si256(), __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_or_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4su)__a | (__v4su)__b);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_or_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_or_epi32(_mm_setzero_si128(), __U, __A, __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_xor_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8su)__a ^ (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_xor_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_xor_epi32(_mm256_setzero_si256(), __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_xor_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4su)__a ^ (__v4su)__b);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_xor_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_xor_epi32(_mm_setzero_si128(), __U, __A, __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_and_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a & (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_and_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_and_epi64(_mm256_setzero_si256(), __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_and_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a & (__v2du)__b);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_and_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_and_epi64(_mm_setzero_si128(), __U, __A, __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_andnot_epi64(__m256i __A, __m256i __B)
-{
-  return (__m256i)(~(__v4du)__A & (__v4du)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                          (__v4di)_mm256_andnot_epi64(__A, __B),
-                                          (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_andnot_epi64(_mm256_setzero_si256(),
-                                           __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_andnot_epi64(__m128i __A, __m128i __B)
-{
-  return (__m128i)(~(__v2du)__A & (__v2du)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_andnot_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_andnot_epi64(_mm_setzero_si128(), __U, __A, __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_or_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a | (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_or_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_or_epi64(_mm256_setzero_si256(), __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_or_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a | (__v2du)__b);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_or_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_or_epi64(_mm_setzero_si128(), __U, __A, __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_xor_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a ^ (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_xor_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_xor_epi64(_mm256_setzero_si256(), __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_xor_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a ^ (__v2du)__b);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A,
-        __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_xor_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_xor_epi64(_mm_setzero_si128(), __U, __A, __B);
-}
-
-#define _mm_cmp_epi32_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
-                                         (__v4si)(__m128i)(b), (int)(p), \
-                                         (__mmask8)-1))
-
-#define _mm_mask_cmp_epi32_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
-                                         (__v4si)(__m128i)(b), (int)(p), \
-                                         (__mmask8)(m)))
-
-#define _mm_cmp_epu32_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
-                                          (__v4si)(__m128i)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm_mask_cmp_epu32_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
-                                          (__v4si)(__m128i)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm256_cmp_epi32_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
-                                         (__v8si)(__m256i)(b), (int)(p), \
-                                         (__mmask8)-1))
-
-#define _mm256_mask_cmp_epi32_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
-                                         (__v8si)(__m256i)(b), (int)(p), \
-                                         (__mmask8)(m)))
-
-#define _mm256_cmp_epu32_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
-                                          (__v8si)(__m256i)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm256_mask_cmp_epu32_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
-                                          (__v8si)(__m256i)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm_cmp_epi64_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
-                                         (__v2di)(__m128i)(b), (int)(p), \
-                                         (__mmask8)-1))
-
-#define _mm_mask_cmp_epi64_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
-                                         (__v2di)(__m128i)(b), (int)(p), \
-                                         (__mmask8)(m)))
-
-#define _mm_cmp_epu64_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
-                                          (__v2di)(__m128i)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm_mask_cmp_epu64_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
-                                          (__v2di)(__m128i)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm256_cmp_epi64_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
-                                         (__v4di)(__m256i)(b), (int)(p), \
-                                         (__mmask8)-1))
-
-#define _mm256_mask_cmp_epi64_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
-                                         (__v4di)(__m256i)(b), (int)(p), \
-                                         (__mmask8)(m)))
-
-#define _mm256_cmp_epu64_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
-                                          (__v4di)(__m256i)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm256_mask_cmp_epu64_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
-                                          (__v4di)(__m256i)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm256_cmp_ps_mask(a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
-                                          (__v8sf)(__m256)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm256_mask_cmp_ps_mask(m, a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
-                                          (__v8sf)(__m256)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm256_cmp_pd_mask(a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
-                                          (__v4df)(__m256d)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm256_mask_cmp_pd_mask(m, a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
-                                          (__v4df)(__m256d)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm_cmp_ps_mask(a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
-                                          (__v4sf)(__m128)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm_mask_cmp_ps_mask(m, a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
-                                          (__v4sf)(__m128)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm_cmp_pd_mask(a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
-                                          (__v2df)(__m128d)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm_mask_cmp_pd_mask(m, a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
-                                          (__v2df)(__m128d)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df) __C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd (-(__v2df) __A,
-                                             (__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df) __C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd (-(__v2df) __A,
-                                             (__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd (-(__v2df) __A,
-                                             (__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df) __A);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df) __C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df) __A);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
-                                                (__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df) __C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
-                                                (__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
-                                                (__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf) __A);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf) __C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf) __A);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps (-(__v4sf) __A,
-                                             (__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf) __C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps (-(__v4sf) __A,
-                                             (__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps (-(__v4sf) __A,
-                                             (__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf) __A);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf) __C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf) __A);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
-                                                (__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf) __C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
-                                                (__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
-                                                (__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd ((__v2df) __A,
-                                                (__v2df) __B,
-                                                (__v2df) __C),
-                    (__v2df) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd ((__v2df) __A,
-                                                (__v2df) __B,
-                                                (__v2df) __C),
-                    (__v2df) __C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd ((__v2df) __A,
-                                                (__v2df) __B,
-                                                (__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd ((__v2df) __A,
-                                                (__v2df) __B,
-                                                -(__v2df) __C),
-                    (__v2df) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd ((__v2df) __A,
-                                                (__v2df) __B,
-                                                -(__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
-                                                   (__v4df) __B,
-                                                   (__v4df) __C),
-                    (__v4df) __A);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
-                                                   (__v4df) __B,
-                                                   (__v4df) __C),
-                    (__v4df) __C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
-                                                   (__v4df) __B,
-                                                   (__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
-                                                   (__v4df) __B,
-                                                   -(__v4df) __C),
-                    (__v4df) __A);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
-                                                   (__v4df) __B,
-                                                   -(__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps ((__v4sf) __A,
-                                                (__v4sf) __B,
-                                                (__v4sf) __C),
-                    (__v4sf) __A);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps ((__v4sf) __A,
-                                                (__v4sf) __B,
-                                                (__v4sf) __C),
-                    (__v4sf) __C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps ((__v4sf) __A,
-                                                (__v4sf) __B,
-                                                (__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps ((__v4sf) __A,
-                                                (__v4sf) __B,
-                                                -(__v4sf) __C),
-                    (__v4sf) __A);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps ((__v4sf) __A,
-                                                (__v4sf) __B,
-                                                -(__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B,
-                         __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
-                                                   (__v8sf) __B,
-                                                   (__v8sf) __C),
-                    (__v8sf) __A);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
-                                                   (__v8sf) __B,
-                                                   (__v8sf) __C),
-                    (__v8sf) __C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
-                                                   (__v8sf) __B,
-                                                   (__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
-                                                   (__v8sf) __B,
-                                                   -(__v8sf) __C),
-                    (__v8sf) __A);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
-                                                   (__v8sf) __B,
-                                                   -(__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df) __C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df) __C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf) __C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf) __C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd ((__v2df) __A,
-                                                (__v2df) __B,
-                                                -(__v2df) __C),
-                    (__v2df) __C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
-                                                   (__v4df) __B,
-                                                   -(__v4df) __C),
-                    (__v4df) __C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps ((__v4sf) __A,
-                                                (__v4sf) __B,
-                                                -(__v4sf) __C),
-                    (__v4sf) __C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
-                                                   (__v8sf) __B,
-                                                   -(__v8sf) __C),
-                    (__v8sf) __C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             -(__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df) __A);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                -(__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df) __A);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             -(__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf) __A);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                -(__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             -(__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             -(__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df) __C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                -(__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df) __A);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                -(__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df) __C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             -(__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf) __A);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             -(__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf) __C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                -(__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf) __A);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                -(__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf) __C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_add_pd(__A, __B),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_add_pd(__A, __B),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_add_pd(__A, __B),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_add_pd(__A, __B),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_add_ps(__A, __B),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_add_ps(__A, __B),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_add_ps(__A, __B),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_add_ps(__A, __B),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_blend_epi32 (__mmask8 __U, __m128i __A, __m128i __W) {
-  return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
-                (__v4si) __W,
-                (__v4si) __A);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_blend_epi32 (__mmask8 __U, __m256i __A, __m256i __W) {
-  return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
-                (__v8si) __W,
-                (__v8si) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_blend_pd (__mmask8 __U, __m128d __A, __m128d __W) {
-  return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
-                 (__v2df) __W,
-                 (__v2df) __A);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_blend_pd (__mmask8 __U, __m256d __A, __m256d __W) {
-  return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
-                 (__v4df) __W,
-                 (__v4df) __A);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_blend_ps (__mmask8 __U, __m128 __A, __m128 __W) {
-  return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
-                (__v4sf) __W,
-                (__v4sf) __A);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_blend_ps (__mmask8 __U, __m256 __A, __m256 __W) {
-  return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
-                (__v8sf) __W,
-                (__v8sf) __A);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_blend_epi64 (__mmask8 __U, __m128i __A, __m128i __W) {
-  return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
-                (__v2di) __W,
-                (__v2di) __A);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_blend_epi64 (__mmask8 __U, __m256i __A, __m256i __W) {
-  return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
-                (__v4di) __W,
-                (__v4di) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_compress_pd (__m128d __W, __mmask8 __U, __m128d __A) {
-  return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
-                  (__v2df) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_compress_pd (__mmask8 __U, __m128d __A) {
-  return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
-                  (__v2df)
-                  _mm_setzero_pd (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_compress_pd (__m256d __W, __mmask8 __U, __m256d __A) {
-  return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
-                  (__v4df) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_compress_pd (__mmask8 __U, __m256d __A) {
-  return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
-                  (__v4df)
-                  _mm256_setzero_pd (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_compress_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
-                  (__v2di) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_compress_epi64 (__mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
-                  (__v2di)
-                  _mm_setzero_si128 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_compress_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
-                  (__v4di) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_compress_epi64 (__mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
-                  (__v4di)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_compress_ps (__m128 __W, __mmask8 __U, __m128 __A) {
-  return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
-                 (__v4sf) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_compress_ps (__mmask8 __U, __m128 __A) {
-  return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
-                 (__v4sf)
-                 _mm_setzero_ps (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_compress_ps (__m256 __W, __mmask8 __U, __m256 __A) {
-  return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
-                 (__v8sf) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_compress_ps (__mmask8 __U, __m256 __A) {
-  return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
-                 (__v8sf)
-                 _mm256_setzero_ps (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_compress_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
-                  (__v4si) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_compress_epi32 (__mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
-                  (__v4si)
-                  _mm_setzero_si128 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_compress_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_compress_epi32 (__mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
-                  (__v8si)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m128d __A) {
-  __builtin_ia32_compressstoredf128_mask ((__v2df *) __P,
-            (__v2df) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m256d __A) {
-  __builtin_ia32_compressstoredf256_mask ((__v4df *) __P,
-            (__v4df) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m128i __A) {
-  __builtin_ia32_compressstoredi128_mask ((__v2di *) __P,
-            (__v2di) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m256i __A) {
-  __builtin_ia32_compressstoredi256_mask ((__v4di *) __P,
-            (__v4di) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m128 __A) {
-  __builtin_ia32_compressstoresf128_mask ((__v4sf *) __P,
-            (__v4sf) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m256 __A) {
-  __builtin_ia32_compressstoresf256_mask ((__v8sf *) __P,
-            (__v8sf) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m128i __A) {
-  __builtin_ia32_compressstoresi128_mask ((__v4si *) __P,
-            (__v4si) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m256i __A) {
-  __builtin_ia32_compressstoresi256_mask ((__v8si *) __P,
-            (__v8si) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
-                                              (__v2df)_mm_cvtepi32_pd(__A),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
-                                              (__v2df)_mm_cvtepi32_pd(__A),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
-                                              (__v4df)_mm256_cvtepi32_pd(__A),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
-                                              (__v4df)_mm256_cvtepi32_pd(__A),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi32_ps (__m128 __W, __mmask8 __U, __m128i __A) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_cvtepi32_ps(__A),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi32_ps (__mmask8 __U, __m128i __A) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_cvtepi32_ps(__A),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi32_ps (__m256 __W, __mmask8 __U, __m256i __A) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_cvtepi32_ps(__A),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi32_ps (__mmask8 __U, __m256i __A) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_cvtepi32_ps(__A),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
-                (__v4si) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtpd_epi32 (__mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
-                (__v4si)
-                _mm_setzero_si128 (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm256_cvtpd_epi32(__A),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtpd_epi32 (__mmask8 __U, __m256d __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm256_cvtpd_epi32(__A),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m128d __A) {
-  return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
-            (__v4sf) __W,
-            (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtpd_ps (__mmask8 __U, __m128d __A) {
-  return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
-            (__v4sf)
-            _mm_setzero_ps (),
-            (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m256d __A) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm256_cvtpd_ps(__A),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtpd_ps (__mmask8 __U, __m256d __A) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm256_cvtpd_ps(__A),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtpd_epu32 (__m128d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
-                 (__v4si) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtpd_epu32 (__mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtpd_epu32 (__m256d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
-                 (__v4si) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtpd_epu32 (__mmask8 __U, __m256d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_cvtps_epi32(__A),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtps_epi32 (__mmask8 __U, __m128 __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_cvtps_epi32(__A),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_cvtps_epi32(__A),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtps_epi32 (__mmask8 __U, __m256 __A) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_cvtps_epi32(__A),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_cvtps_pd (__m128d __W, __mmask8 __U, __m128 __A) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_cvtps_pd(__A),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtps_pd (__mmask8 __U, __m128 __A) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_cvtps_pd(__A),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtps_pd (__m256d __W, __mmask8 __U, __m128 __A) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_cvtps_pd(__A),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtps_pd (__mmask8 __U, __m128 __A) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_cvtps_pd(__A),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtps_epu32 (__m128 __A) {
-  return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) {
-  return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
-                 (__v4si) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtps_epu32 (__mmask8 __U, __m128 __A) {
-  return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtps_epu32 (__m256 __A) {
-  return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
-                 (__v8si)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) {
-  return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
-                 (__v8si) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtps_epu32 (__mmask8 __U, __m256 __A) {
-  return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
-                 (__v8si)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
-                 (__v4si) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvttpd_epi32 (__mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm256_cvttpd_epi32(__A),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvttpd_epi32 (__mmask8 __U, __m256d __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm256_cvttpd_epi32(__A),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvttpd_epu32 (__m128d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
-                  (__v4si)
-                  _mm_setzero_si128 (),
-                  (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
-                  (__v4si) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvttpd_epu32 (__mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
-                  (__v4si)
-                  _mm_setzero_si128 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvttpd_epu32 (__m256d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
-                  (__v4si)
-                  _mm_setzero_si128 (),
-                  (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
-                  (__v4si) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvttpd_epu32 (__mmask8 __U, __m256d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
-                  (__v4si)
-                  _mm_setzero_si128 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvttps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_cvttps_epi32(__A),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvttps_epi32 (__mmask8 __U, __m128 __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_cvttps_epi32(__A),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvttps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_cvttps_epi32(__A),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvttps_epi32 (__mmask8 __U, __m256 __A) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_cvttps_epi32(__A),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvttps_epu32 (__m128 __A) {
-  return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
-                  (__v4si)
-                  _mm_setzero_si128 (),
-                  (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvttps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) {
-  return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
-                  (__v4si) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvttps_epu32 (__mmask8 __U, __m128 __A) {
-  return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
-                  (__v4si)
-                  _mm_setzero_si128 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvttps_epu32 (__m256 __A) {
-  return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
-                  (__v8si)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvttps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) {
-  return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A) {
-  return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
-                  (__v8si)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_cvtepu32_pd (__m128i __A) {
-  return (__m128d) __builtin_convertvector(
-      __builtin_shufflevector((__v4su)__A, (__v4su)__A, 0, 1), __v2df);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
-                                              (__v2df)_mm_cvtepu32_pd(__A),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
-                                              (__v2df)_mm_cvtepu32_pd(__A),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_cvtepu32_pd (__m128i __A) {
-  return (__m256d)__builtin_convertvector((__v4su)__A, __v4df);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
-                                              (__v4df)_mm256_cvtepu32_pd(__A),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
-                                              (__v4df)_mm256_cvtepu32_pd(__A),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_cvtepu32_ps (__m128i __A) {
-  return (__m128)__builtin_convertvector((__v4su)__A, __v4sf);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepu32_ps (__m128 __W, __mmask8 __U, __m128i __A) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_cvtepu32_ps(__A),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepu32_ps (__mmask8 __U, __m128i __A) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_cvtepu32_ps(__A),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_cvtepu32_ps (__m256i __A) {
-  return (__m256)__builtin_convertvector((__v8su)__A, __v8sf);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepu32_ps (__m256 __W, __mmask8 __U, __m256i __A) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_cvtepu32_ps(__A),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepu32_ps (__mmask8 __U, __m256i __A) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_cvtepu32_ps(__A),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_div_pd(__A, __B),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_div_pd(__A, __B),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_div_pd(__A, __B),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_div_pd(__A, __B),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_div_ps(__A, __B),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_div_ps(__A, __B),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_div_ps(__A, __B),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_div_ps(__A, __B),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_expand_pd (__m128d __W, __mmask8 __U, __m128d __A) {
-  return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
-                (__v2df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_expand_pd (__mmask8 __U, __m128d __A) {
-  return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
-                 (__v2df)
-                 _mm_setzero_pd (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_expand_pd (__m256d __W, __mmask8 __U, __m256d __A) {
-  return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
-                (__v4df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_expand_pd (__mmask8 __U, __m256d __A) {
-  return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
-                 (__v4df)
-                 _mm256_setzero_pd (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_expand_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
-                (__v2di) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_expand_epi64 (__mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
-                 (__v2di)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_expand_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
-                (__v4di) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_expand_epi64 (__mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
-                 (__v4di)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_expandloadu_pd (__m128d __W, __mmask8 __U, void const *__P) {
-  return (__m128d) __builtin_ia32_expandloaddf128_mask ((const __v2df *) __P,
-              (__v2df) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
-  return (__m128d) __builtin_ia32_expandloaddf128_mask ((const __v2df *) __P,
-               (__v2df)
-               _mm_setzero_pd (),
-               (__mmask8)
-               __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_expandloadu_pd (__m256d __W, __mmask8 __U, void const *__P) {
-  return (__m256d) __builtin_ia32_expandloaddf256_mask ((const __v4df *) __P,
-              (__v4df) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
-  return (__m256d) __builtin_ia32_expandloaddf256_mask ((const __v4df *) __P,
-               (__v4df)
-               _mm256_setzero_pd (),
-               (__mmask8)
-               __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_expandloadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) {
-  return (__m128i) __builtin_ia32_expandloaddi128_mask ((const __v2di *) __P,
-              (__v2di) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
-  return (__m128i) __builtin_ia32_expandloaddi128_mask ((const __v2di *) __P,
-               (__v2di)
-               _mm_setzero_si128 (),
-               (__mmask8)
-               __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_expandloadu_epi64 (__m256i __W, __mmask8 __U,
-             void const *__P) {
-  return (__m256i) __builtin_ia32_expandloaddi256_mask ((const __v4di *) __P,
-              (__v4di) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
-  return (__m256i) __builtin_ia32_expandloaddi256_mask ((const __v4di *) __P,
-               (__v4di)
-               _mm256_setzero_si256 (),
-               (__mmask8)
-               __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_expandloadu_ps (__m128 __W, __mmask8 __U, void const *__P) {
-  return (__m128) __builtin_ia32_expandloadsf128_mask ((const __v4sf *) __P,
-                   (__v4sf) __W,
-                   (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
-  return (__m128) __builtin_ia32_expandloadsf128_mask ((const __v4sf *) __P,
-              (__v4sf)
-              _mm_setzero_ps (),
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_expandloadu_ps (__m256 __W, __mmask8 __U, void const *__P) {
-  return (__m256) __builtin_ia32_expandloadsf256_mask ((const __v8sf *) __P,
-                   (__v8sf) __W,
-                   (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
-  return (__m256) __builtin_ia32_expandloadsf256_mask ((const __v8sf *) __P,
-              (__v8sf)
-              _mm256_setzero_ps (),
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_expandloadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) {
-  return (__m128i) __builtin_ia32_expandloadsi128_mask ((const __v4si *) __P,
-              (__v4si) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
-  return (__m128i) __builtin_ia32_expandloadsi128_mask ((const __v4si *) __P,
-               (__v4si)
-               _mm_setzero_si128 (),
-               (__mmask8)     __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_expandloadu_epi32 (__m256i __W, __mmask8 __U,
-             void const *__P) {
-  return (__m256i) __builtin_ia32_expandloadsi256_mask ((const __v8si *) __P,
-              (__v8si) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
-  return (__m256i) __builtin_ia32_expandloadsi256_mask ((const __v8si *) __P,
-               (__v8si)
-               _mm256_setzero_si256 (),
-               (__mmask8)
-               __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_expand_ps (__m128 __W, __mmask8 __U, __m128 __A) {
-  return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
-               (__v4sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_expand_ps (__mmask8 __U, __m128 __A) {
-  return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
-                (__v4sf)
-                _mm_setzero_ps (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_expand_ps (__m256 __W, __mmask8 __U, __m256 __A) {
-  return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
-               (__v8sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_expand_ps (__mmask8 __U, __m256 __A) {
-  return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
-                (__v8sf)
-                _mm256_setzero_ps (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_expand_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
-                (__v4si) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_expand_epi32 (__mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_expand_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
-                (__v8si) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_expand_epi32 (__mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
-                 (__v8si)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_getexp_pd (__m128d __A) {
-  return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
-                (__v2df)
-                _mm_setzero_pd (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_getexp_pd (__m128d __W, __mmask8 __U, __m128d __A) {
-  return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
-                (__v2df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_getexp_pd (__mmask8 __U, __m128d __A) {
-  return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
-                (__v2df)
-                _mm_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_getexp_pd (__m256d __A) {
-  return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
-                (__v4df)
-                _mm256_setzero_pd (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_getexp_pd (__m256d __W, __mmask8 __U, __m256d __A) {
-  return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
-                (__v4df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_getexp_pd (__mmask8 __U, __m256d __A) {
-  return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
-                (__v4df)
-                _mm256_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_getexp_ps (__m128 __A) {
-  return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
-               (__v4sf)
-               _mm_setzero_ps (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_getexp_ps (__m128 __W, __mmask8 __U, __m128 __A) {
-  return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
-               (__v4sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_getexp_ps (__mmask8 __U, __m128 __A) {
-  return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
-               (__v4sf)
-               _mm_setzero_ps (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_getexp_ps (__m256 __A) {
-  return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
-               (__v8sf)
-               _mm256_setzero_ps (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_getexp_ps (__m256 __W, __mmask8 __U, __m256 __A) {
-  return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
-               (__v8sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_getexp_ps (__mmask8 __U, __m256 __A) {
-  return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
-               (__v8sf)
-               _mm256_setzero_ps (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_max_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_max_pd(__A, __B),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_max_pd(__mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_max_pd(__A, __B),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_max_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_max_pd(__A, __B),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_pd(__mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_max_pd(__A, __B),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_max_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_max_ps(__A, __B),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_max_ps(__mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_max_ps(__A, __B),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_max_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_max_ps(__A, __B),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_ps(__mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_max_ps(__A, __B),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_min_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_min_pd(__A, __B),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_min_pd(__mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_min_pd(__A, __B),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_min_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_min_pd(__A, __B),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_pd(__mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_min_pd(__A, __B),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_min_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_min_ps(__A, __B),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_min_ps(__mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_min_ps(__A, __B),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_min_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_min_ps(__A, __B),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_min_ps(__A, __B),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_mul_pd(__A, __B),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_mul_pd(__A, __B),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_mul_pd(__A, __B),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_mul_pd(__A, __B),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_mul_ps(__A, __B),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_mul_ps(__A, __B),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_mul_ps(__A, __B),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_mul_ps(__A, __B),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_abs_epi32(__A),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_abs_epi32(__mmask8 __U, __m128i __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_abs_epi32(__A),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_abs_epi32(__A),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_abs_epi32(__A),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_abs_epi64 (__m128i __A) {
-  return (__m128i)__builtin_ia32_pabsq128((__v2di)__A);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_abs_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_abs_epi64(__A),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_abs_epi64 (__mmask8 __U, __m128i __A) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_abs_epi64(__A),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_abs_epi64 (__m256i __A) {
-  return (__m256i)__builtin_ia32_pabsq256 ((__v4di)__A);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_abs_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_abs_epi64(__A),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_abs_epi64 (__mmask8 __U, __m256i __A) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_abs_epi64(__A),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_max_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_max_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_max_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_max_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_max_epi64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pmaxsq128((__v2di)__A, (__v2di)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_max_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_max_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_max_epi64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_max_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epi64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pmaxsq256((__v4di)__A, (__v4di)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_max_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_max_epi64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_max_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_max_epu32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_max_epu32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_max_epu32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_max_epu32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_max_epu64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pmaxuq128((__v2di)__A, (__v2di)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_max_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_max_epu64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_max_epu64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_max_epu64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epu64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pmaxuq256((__v4di)__A, (__v4di)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_max_epu64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_max_epu64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_max_epu64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_min_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_min_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_min_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_min_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_min_epi64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pminsq128((__v2di)__A, (__v2di)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_min_epi64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_min_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_min_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_min_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epi64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pminsq256((__v4di)__A, (__v4di)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_min_epi64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_min_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_min_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_min_epu32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_min_epu32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_min_epu32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_min_epu32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_min_epu64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pminuq128((__v2di)__A, (__v2di)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_min_epu64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_min_epu64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_min_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_min_epu64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epu64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pminuq256((__v4di)__A, (__v4di)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_min_epu64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_min_epu64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_min_epu64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-#define _mm_roundscale_pd(A, imm) \
-  ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
-                                               (int)(imm), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)-1))
-
-
-#define _mm_mask_roundscale_pd(W, U, A, imm) \
-  ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
-                                               (int)(imm), \
-                                               (__v2df)(__m128d)(W), \
-                                               (__mmask8)(U)))
-
-
-#define _mm_maskz_roundscale_pd(U, A, imm) \
-  ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
-                                               (int)(imm), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)(U)))
-
-
-#define _mm256_roundscale_pd(A, imm) \
-  ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
-                                               (int)(imm), \
-                                               (__v4df)_mm256_setzero_pd(), \
-                                               (__mmask8)-1))
-
-
-#define _mm256_mask_roundscale_pd(W, U, A, imm) \
-  ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
-                                               (int)(imm), \
-                                               (__v4df)(__m256d)(W), \
-                                               (__mmask8)(U)))
-
-
-#define _mm256_maskz_roundscale_pd(U, A, imm)  \
-  ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
-                                               (int)(imm), \
-                                               (__v4df)_mm256_setzero_pd(), \
-                                               (__mmask8)(U)))
-
-#define _mm_roundscale_ps(A, imm)  \
-  ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)-1))
-
-
-#define _mm_mask_roundscale_ps(W, U, A, imm)  \
-  ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
-                                              (__v4sf)(__m128)(W), \
-                                              (__mmask8)(U)))
-
-
-#define _mm_maskz_roundscale_ps(U, A, imm)  \
-  ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)(U)))
-
-#define _mm256_roundscale_ps(A, imm)  \
-  ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
-                                              (__v8sf)_mm256_setzero_ps(), \
-                                              (__mmask8)-1))
-
-#define _mm256_mask_roundscale_ps(W, U, A, imm)  \
-  ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
-                                              (__v8sf)(__m256)(W), \
-                                              (__mmask8)(U)))
-
-
-#define _mm256_maskz_roundscale_ps(U, A, imm)  \
-  ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
-                                              (__v8sf)_mm256_setzero_ps(), \
-                                              (__mmask8)(U)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_scalef_pd (__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df)
-                _mm_setzero_pd (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_scalef_pd (__m128d __W, __mmask8 __U, __m128d __A,
-        __m128d __B) {
-  return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_scalef_pd (__mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df)
-                _mm_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_scalef_pd (__m256d __A, __m256d __B) {
-  return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
-                (__v4df) __B,
-                (__v4df)
-                _mm256_setzero_pd (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_scalef_pd (__m256d __W, __mmask8 __U, __m256d __A,
-           __m256d __B) {
-  return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
-                (__v4df) __B,
-                (__v4df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_scalef_pd (__mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
-                (__v4df) __B,
-                (__v4df)
-                _mm256_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_scalef_ps (__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
-               (__v4sf) __B,
-               (__v4sf)
-               _mm_setzero_ps (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_scalef_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
-               (__v4sf) __B,
-               (__v4sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_scalef_ps (__mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
-               (__v4sf) __B,
-               (__v4sf)
-               _mm_setzero_ps (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_scalef_ps (__m256 __A, __m256 __B) {
-  return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
-               (__v8sf) __B,
-               (__v8sf)
-               _mm256_setzero_ps (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_scalef_ps (__m256 __W, __mmask8 __U, __m256 __A,
-           __m256 __B) {
-  return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
-               (__v8sf) __B,
-               (__v8sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
-               (__v8sf) __B,
-               (__v8sf)
-               _mm256_setzero_ps (),
-               (__mmask8) __U);
-}
-
-#define _mm_i64scatter_pd(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)-1, \
-                               (__v2di)(__m128i)(index), \
-                               (__v2df)(__m128d)(v1), (int)(scale))
-
-#define _mm_mask_i64scatter_pd(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)(mask), \
-                               (__v2di)(__m128i)(index), \
-                               (__v2df)(__m128d)(v1), (int)(scale))
-
-#define _mm_i64scatter_epi64(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)-1, \
-                               (__v2di)(__m128i)(index), \
-                               (__v2di)(__m128i)(v1), (int)(scale))
-
-#define _mm_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)(mask), \
-                               (__v2di)(__m128i)(index), \
-                               (__v2di)(__m128i)(v1), (int)(scale))
-
-#define _mm256_i64scatter_pd(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)-1, \
-                               (__v4di)(__m256i)(index), \
-                               (__v4df)(__m256d)(v1), (int)(scale))
-
-#define _mm256_mask_i64scatter_pd(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)(mask), \
-                               (__v4di)(__m256i)(index), \
-                               (__v4df)(__m256d)(v1), (int)(scale))
-
-#define _mm256_i64scatter_epi64(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)-1, \
-                               (__v4di)(__m256i)(index), \
-                               (__v4di)(__m256i)(v1), (int)(scale))
-
-#define _mm256_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)(mask), \
-                               (__v4di)(__m256i)(index), \
-                               (__v4di)(__m256i)(v1), (int)(scale))
-
-#define _mm_i64scatter_ps(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)-1, \
-                               (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
-                               (int)(scale))
-
-#define _mm_mask_i64scatter_ps(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)(mask), \
-                               (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
-                               (int)(scale))
-
-#define _mm_i64scatter_epi32(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)-1, \
-                               (__v2di)(__m128i)(index), \
-                               (__v4si)(__m128i)(v1), (int)(scale))
-
-#define _mm_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)(mask), \
-                               (__v2di)(__m128i)(index), \
-                               (__v4si)(__m128i)(v1), (int)(scale))
-
-#define _mm256_i64scatter_ps(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)-1, \
-                               (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
-                               (int)(scale))
-
-#define _mm256_mask_i64scatter_ps(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)(mask), \
-                               (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
-                               (int)(scale))
-
-#define _mm256_i64scatter_epi32(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)-1, \
-                               (__v4di)(__m256i)(index), \
-                               (__v4si)(__m128i)(v1), (int)(scale))
-
-#define _mm256_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)(mask), \
-                               (__v4di)(__m256i)(index), \
-                               (__v4si)(__m128i)(v1), (int)(scale))
-
-#define _mm_i32scatter_pd(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)-1, \
-                               (__v4si)(__m128i)(index), \
-                               (__v2df)(__m128d)(v1), (int)(scale))
-
-#define _mm_mask_i32scatter_pd(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)(mask), \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v2df)(__m128d)(v1), (int)(scale))
-
-#define _mm_i32scatter_epi64(addr, index, v1, scale) \
-    __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)-1, \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v2di)(__m128i)(v1), (int)(scale))
-
-#define _mm_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)(mask), \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v2di)(__m128i)(v1), (int)(scale))
-
-#define _mm256_i32scatter_pd(addr, index, v1, scale) \
-    __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)-1, \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v4df)(__m256d)(v1), (int)(scale))
-
-#define _mm256_mask_i32scatter_pd(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)(mask), \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v4df)(__m256d)(v1), (int)(scale))
-
-#define _mm256_i32scatter_epi64(addr, index, v1, scale) \
-    __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)-1, \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v4di)(__m256i)(v1), (int)(scale))
-
-#define _mm256_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)(mask), \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v4di)(__m256i)(v1), (int)(scale))
-
-#define _mm_i32scatter_ps(addr, index, v1, scale) \
-    __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)-1, \
-                                 (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
-                                 (int)(scale))
-
-#define _mm_mask_i32scatter_ps(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)(mask), \
-                                 (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
-                                 (int)(scale))
-
-#define _mm_i32scatter_epi32(addr, index, v1, scale) \
-    __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)-1, \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v4si)(__m128i)(v1), (int)(scale))
-
-#define _mm_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)(mask), \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v4si)(__m128i)(v1), (int)(scale))
-
-#define _mm256_i32scatter_ps(addr, index, v1, scale) \
-    __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)-1, \
-                                 (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
-                                 (int)(scale))
-
-#define _mm256_mask_i32scatter_ps(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)(mask), \
-                                 (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
-                                 (int)(scale))
-
-#define _mm256_i32scatter_epi32(addr, index, v1, scale) \
-    __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)-1, \
-                                 (__v8si)(__m256i)(index), \
-                                 (__v8si)(__m256i)(v1), (int)(scale))
-
-#define _mm256_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)(mask), \
-                                 (__v8si)(__m256i)(index), \
-                                 (__v8si)(__m256i)(v1), (int)(scale))
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A) {
-    return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                                (__v2df)_mm_sqrt_pd(__A),
-                                                (__v2df)__W);
-  }
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A) {
-    return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                                (__v2df)_mm_sqrt_pd(__A),
-                                                (__v2df)_mm_setzero_pd());
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A) {
-    return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                                (__v4df)_mm256_sqrt_pd(__A),
-                                                (__v4df)__W);
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A) {
-    return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                                (__v4df)_mm256_sqrt_pd(__A),
-                                                (__v4df)_mm256_setzero_pd());
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A) {
-    return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                               (__v4sf)_mm_sqrt_ps(__A),
-                                               (__v4sf)__W);
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A) {
-    return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                               (__v4sf)_mm_sqrt_ps(__A),
-                                               (__v4sf)_mm_setzero_ps());
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A) {
-    return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                               (__v8sf)_mm256_sqrt_ps(__A),
-                                               (__v8sf)__W);
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A) {
-    return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                               (__v8sf)_mm256_sqrt_ps(__A),
-                                               (__v8sf)_mm256_setzero_ps());
-  }
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
-    return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                                (__v2df)_mm_sub_pd(__A, __B),
-                                                (__v2df)__W);
-  }
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B) {
-    return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                                (__v2df)_mm_sub_pd(__A, __B),
-                                                (__v2df)_mm_setzero_pd());
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
-    return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                                (__v4df)_mm256_sub_pd(__A, __B),
-                                                (__v4df)__W);
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B) {
-    return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                                (__v4df)_mm256_sub_pd(__A, __B),
-                                                (__v4df)_mm256_setzero_pd());
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
-    return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                               (__v4sf)_mm_sub_ps(__A, __B),
-                                               (__v4sf)__W);
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B) {
-    return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                               (__v4sf)_mm_sub_ps(__A, __B),
-                                               (__v4sf)_mm_setzero_ps());
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
-    return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                               (__v8sf)_mm256_sub_ps(__A, __B),
-                                               (__v8sf)__W);
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B) {
-    return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                               (__v8sf)_mm256_sub_ps(__A, __B),
-                                               (__v8sf)_mm256_setzero_ps());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_permutex2var_epi32(__m128i __A, __m128i __I, __m128i __B) {
-    return (__m128i)__builtin_ia32_vpermi2vard128((__v4si) __A, (__v4si)__I,
-                                                  (__v4si)__B);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_permutex2var_epi32(__m128i __A, __mmask8 __U, __m128i __I,
-                              __m128i __B) {
-    return (__m128i)__builtin_ia32_selectd_128(__U,
-                                    (__v4si)_mm_permutex2var_epi32(__A, __I, __B),
-                                    (__v4si)__A);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask2_permutex2var_epi32(__m128i __A, __m128i __I, __mmask8 __U,
-                               __m128i __B) {
-    return (__m128i)__builtin_ia32_selectd_128(__U,
-                                    (__v4si)_mm_permutex2var_epi32(__A, __I, __B),
-                                    (__v4si)__I);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_permutex2var_epi32(__mmask8 __U, __m128i __A, __m128i __I,
-                               __m128i __B) {
-    return (__m128i)__builtin_ia32_selectd_128(__U,
-                                    (__v4si)_mm_permutex2var_epi32(__A, __I, __B),
-                                    (__v4si)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_permutex2var_epi32(__m256i __A, __m256i __I, __m256i __B) {
-    return (__m256i)__builtin_ia32_vpermi2vard256((__v8si)__A, (__v8si) __I,
-                                                  (__v8si) __B);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_permutex2var_epi32(__m256i __A, __mmask8 __U, __m256i __I,
-                                 __m256i __B) {
-    return (__m256i)__builtin_ia32_selectd_256(__U,
-                                 (__v8si)_mm256_permutex2var_epi32(__A, __I, __B),
-                                 (__v8si)__A);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask2_permutex2var_epi32(__m256i __A, __m256i __I, __mmask8 __U,
-                                  __m256i __B) {
-    return (__m256i)__builtin_ia32_selectd_256(__U,
-                                 (__v8si)_mm256_permutex2var_epi32(__A, __I, __B),
-                                 (__v8si)__I);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_permutex2var_epi32(__mmask8 __U, __m256i __A, __m256i __I,
-                                  __m256i __B) {
-    return (__m256i)__builtin_ia32_selectd_256(__U,
-                                 (__v8si)_mm256_permutex2var_epi32(__A, __I, __B),
-                                 (__v8si)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_permutex2var_pd(__m128d __A, __m128i __I, __m128d __B) {
-    return (__m128d)__builtin_ia32_vpermi2varpd128((__v2df)__A, (__v2di)__I,
-                                                   (__v2df)__B);
-  }
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_mask_permutex2var_pd(__m128d __A, __mmask8 __U, __m128i __I, __m128d __B) {
-    return (__m128d)__builtin_ia32_selectpd_128(__U,
-                                       (__v2df)_mm_permutex2var_pd(__A, __I, __B),
-                                       (__v2df)__A);
-  }
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_mask2_permutex2var_pd(__m128d __A, __m128i __I, __mmask8 __U, __m128d __B) {
-    return (__m128d)__builtin_ia32_selectpd_128(__U,
-                                       (__v2df)_mm_permutex2var_pd(__A, __I, __B),
-                                       (__v2df)(__m128d)__I);
-  }
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_maskz_permutex2var_pd(__mmask8 __U, __m128d __A, __m128i __I, __m128d __B) {
-    return (__m128d)__builtin_ia32_selectpd_128(__U,
-                                       (__v2df)_mm_permutex2var_pd(__A, __I, __B),
-                                       (__v2df)_mm_setzero_pd());
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_permutex2var_pd(__m256d __A, __m256i __I, __m256d __B) {
-    return (__m256d)__builtin_ia32_vpermi2varpd256((__v4df)__A, (__v4di)__I,
-                                                   (__v4df)__B);
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_mask_permutex2var_pd(__m256d __A, __mmask8 __U, __m256i __I,
-                              __m256d __B) {
-    return (__m256d)__builtin_ia32_selectpd_256(__U,
-                                    (__v4df)_mm256_permutex2var_pd(__A, __I, __B),
-                                    (__v4df)__A);
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_mask2_permutex2var_pd(__m256d __A, __m256i __I, __mmask8 __U,
-                               __m256d __B) {
-    return (__m256d)__builtin_ia32_selectpd_256(__U,
-                                    (__v4df)_mm256_permutex2var_pd(__A, __I, __B),
-                                    (__v4df)(__m256d)__I);
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_maskz_permutex2var_pd(__mmask8 __U, __m256d __A, __m256i __I,
-                               __m256d __B) {
-    return (__m256d)__builtin_ia32_selectpd_256(__U,
-                                    (__v4df)_mm256_permutex2var_pd(__A, __I, __B),
-                                    (__v4df)_mm256_setzero_pd());
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_permutex2var_ps(__m128 __A, __m128i __I, __m128 __B) {
-    return (__m128)__builtin_ia32_vpermi2varps128((__v4sf)__A, (__v4si)__I,
-                                                  (__v4sf)__B);
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_mask_permutex2var_ps(__m128 __A, __mmask8 __U, __m128i __I, __m128 __B) {
-    return (__m128)__builtin_ia32_selectps_128(__U,
-                                       (__v4sf)_mm_permutex2var_ps(__A, __I, __B),
-                                       (__v4sf)__A);
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_mask2_permutex2var_ps(__m128 __A, __m128i __I, __mmask8 __U, __m128 __B) {
-    return (__m128)__builtin_ia32_selectps_128(__U,
-                                       (__v4sf)_mm_permutex2var_ps(__A, __I, __B),
-                                       (__v4sf)(__m128)__I);
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_maskz_permutex2var_ps(__mmask8 __U, __m128 __A, __m128i __I, __m128 __B) {
-    return (__m128)__builtin_ia32_selectps_128(__U,
-                                       (__v4sf)_mm_permutex2var_ps(__A, __I, __B),
-                                       (__v4sf)_mm_setzero_ps());
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_permutex2var_ps(__m256 __A, __m256i __I, __m256 __B) {
-    return (__m256)__builtin_ia32_vpermi2varps256((__v8sf)__A, (__v8si)__I,
-                                                  (__v8sf) __B);
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_mask_permutex2var_ps(__m256 __A, __mmask8 __U, __m256i __I, __m256 __B) {
-    return (__m256)__builtin_ia32_selectps_256(__U,
-                                    (__v8sf)_mm256_permutex2var_ps(__A, __I, __B),
-                                    (__v8sf)__A);
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_mask2_permutex2var_ps(__m256 __A, __m256i __I, __mmask8 __U,
-                               __m256 __B) {
-    return (__m256)__builtin_ia32_selectps_256(__U,
-                                    (__v8sf)_mm256_permutex2var_ps(__A, __I, __B),
-                                    (__v8sf)(__m256)__I);
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_maskz_permutex2var_ps(__mmask8 __U, __m256 __A, __m256i __I,
-                               __m256 __B) {
-    return (__m256)__builtin_ia32_selectps_256(__U,
-                                    (__v8sf)_mm256_permutex2var_ps(__A, __I, __B),
-                                    (__v8sf)_mm256_setzero_ps());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_permutex2var_epi64(__m128i __A, __m128i __I, __m128i __B) {
-    return (__m128i)__builtin_ia32_vpermi2varq128((__v2di)__A, (__v2di)__I,
-                                                  (__v2di)__B);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_permutex2var_epi64(__m128i __A, __mmask8 __U, __m128i __I,
-                              __m128i __B) {
-    return (__m128i)__builtin_ia32_selectq_128(__U,
-                                    (__v2di)_mm_permutex2var_epi64(__A, __I, __B),
-                                    (__v2di)__A);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask2_permutex2var_epi64(__m128i __A, __m128i __I, __mmask8 __U,
-                               __m128i __B) {
-    return (__m128i)__builtin_ia32_selectq_128(__U,
-                                    (__v2di)_mm_permutex2var_epi64(__A, __I, __B),
-                                    (__v2di)__I);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_permutex2var_epi64(__mmask8 __U, __m128i __A, __m128i __I,
-                               __m128i __B) {
-    return (__m128i)__builtin_ia32_selectq_128(__U,
-                                    (__v2di)_mm_permutex2var_epi64(__A, __I, __B),
-                                    (__v2di)_mm_setzero_si128());
-  }
-
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_permutex2var_epi64(__m256i __A, __m256i __I, __m256i __B) {
-    return (__m256i)__builtin_ia32_vpermi2varq256((__v4di)__A, (__v4di) __I,
-                                                  (__v4di) __B);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_permutex2var_epi64(__m256i __A, __mmask8 __U, __m256i __I,
-                                 __m256i __B) {
-    return (__m256i)__builtin_ia32_selectq_256(__U,
-                                 (__v4di)_mm256_permutex2var_epi64(__A, __I, __B),
-                                 (__v4di)__A);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask2_permutex2var_epi64(__m256i __A, __m256i __I, __mmask8 __U,
-                                  __m256i __B) {
-    return (__m256i)__builtin_ia32_selectq_256(__U,
-                                 (__v4di)_mm256_permutex2var_epi64(__A, __I, __B),
-                                 (__v4di)__I);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_permutex2var_epi64(__mmask8 __U, __m256i __A, __m256i __I,
-                                  __m256i __B) {
-    return (__m256i)__builtin_ia32_selectq_256(__U,
-                                 (__v4di)_mm256_permutex2var_epi64(__A, __I, __B),
-                                 (__v4di)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepi8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepi8_epi32(__A),
-                                               (__v4si)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepi8_epi32(__A),
-                                               (__v4si)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepi8_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepi8_epi32(__A),
-                                               (__v8si)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepi8_epi32(__A),
-                                               (__v8si)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepi8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepi8_epi64(__A),
-                                               (__v2di)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepi8_epi64(__A),
-                                               (__v2di)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepi8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepi8_epi64(__A),
-                                               (__v4di)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepi8_epi64(__A),
-                                               (__v4di)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepi32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepi32_epi64(__X),
-                                               (__v2di)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepi32_epi64(__X),
-                                               (__v2di)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepi32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepi32_epi64(__X),
-                                               (__v4di)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepi32_epi64(__X),
-                                               (__v4di)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepi16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepi16_epi32(__A),
-                                               (__v4si)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepi16_epi32(__A),
-                                               (__v4si)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepi16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepi16_epi32(__A),
-                                               (__v8si)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepi16_epi32(__A),
-                                               (__v8si)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepi16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepi16_epi64(__A),
-                                               (__v2di)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepi16_epi64(__A),
-                                               (__v2di)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepi16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepi16_epi64(__A),
-                                               (__v4di)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepi16_epi64(__A),
-                                               (__v4di)_mm256_setzero_si256());
-  }
-
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepu8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepu8_epi32(__A),
-                                               (__v4si)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepu8_epi32(__A),
-                                               (__v4si)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepu8_epi32(__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepu8_epi32(__A),
-                                               (__v8si)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepu8_epi32(__A),
-                                               (__v8si)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepu8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepu8_epi64(__A),
-                                               (__v2di)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepu8_epi64(__A),
-                                               (__v2di)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepu8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepu8_epi64(__A),
-                                               (__v4di)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepu8_epi64(__A),
-                                               (__v4di)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepu32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepu32_epi64(__X),
-                                               (__v2di)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepu32_epi64(__X),
-                                               (__v2di)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepu32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepu32_epi64(__X),
-                                               (__v4di)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepu32_epi64(__X),
-                                               (__v4di)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepu16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepu16_epi32(__A),
-                                               (__v4si)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepu16_epi32(__A),
-                                               (__v4si)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepu16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepu16_epi32(__A),
-                                               (__v8si)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepu16_epi32(__A),
-                                               (__v8si)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepu16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepu16_epi64(__A),
-                                               (__v2di)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepu16_epi64(__A),
-                                               (__v2di)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepu16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepu16_epi64(__A),
-                                               (__v4di)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepu16_epi64(__A),
-                                               (__v4di)_mm256_setzero_si256());
-  }
-
-
-#define _mm_rol_epi32(a, b) \
-  ((__m128i)__builtin_ia32_prold128((__v4si)(__m128i)(a), (int)(b)))
-
-#define _mm_mask_rol_epi32(w, u, a, b) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
-                                       (__v4si)_mm_rol_epi32((a), (b)), \
-                                       (__v4si)(__m128i)(w)))
-
-#define _mm_maskz_rol_epi32(u, a, b) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
-                                       (__v4si)_mm_rol_epi32((a), (b)), \
-                                       (__v4si)_mm_setzero_si128()))
-
-#define _mm256_rol_epi32(a, b) \
-  ((__m256i)__builtin_ia32_prold256((__v8si)(__m256i)(a), (int)(b)))
-
-#define _mm256_mask_rol_epi32(w, u, a, b) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
-                                       (__v8si)_mm256_rol_epi32((a), (b)), \
-                                       (__v8si)(__m256i)(w)))
-
-#define _mm256_maskz_rol_epi32(u, a, b) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
-                                       (__v8si)_mm256_rol_epi32((a), (b)), \
-                                       (__v8si)_mm256_setzero_si256()))
-
-#define _mm_rol_epi64(a, b) \
-  ((__m128i)__builtin_ia32_prolq128((__v2di)(__m128i)(a), (int)(b)))
-
-#define _mm_mask_rol_epi64(w, u, a, b) \
-  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
-                                       (__v2di)_mm_rol_epi64((a), (b)), \
-                                       (__v2di)(__m128i)(w)))
-
-#define _mm_maskz_rol_epi64(u, a, b) \
-  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
-                                       (__v2di)_mm_rol_epi64((a), (b)), \
-                                       (__v2di)_mm_setzero_si128()))
-
-#define _mm256_rol_epi64(a, b) \
-  ((__m256i)__builtin_ia32_prolq256((__v4di)(__m256i)(a), (int)(b)))
-
-#define _mm256_mask_rol_epi64(w, u, a, b) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
-                                       (__v4di)_mm256_rol_epi64((a), (b)), \
-                                       (__v4di)(__m256i)(w)))
-
-#define _mm256_maskz_rol_epi64(u, a, b) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
-                                       (__v4di)_mm256_rol_epi64((a), (b)), \
-                                       (__v4di)_mm256_setzero_si256()))
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_rolv_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_prolvd128((__v4si)__A, (__v4si)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_rolv_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                             (__v4si)_mm_rolv_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_rolv_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                             (__v4si)_mm_rolv_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_rolv_epi32 (__m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_prolvd256((__v8si)__A, (__v8si)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_rolv_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                            (__v8si)_mm256_rolv_epi32(__A, __B),
-                                            (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_rolv_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                            (__v8si)_mm256_rolv_epi32(__A, __B),
-                                            (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_rolv_epi64 (__m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_prolvq128((__v2di)__A, (__v2di)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_rolv_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128(__U,
-                                             (__v2di)_mm_rolv_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_rolv_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128(__U,
-                                             (__v2di)_mm_rolv_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_rolv_epi64 (__m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_prolvq256((__v4di)__A, (__v4di)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_rolv_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256(__U,
-                                            (__v4di)_mm256_rolv_epi64(__A, __B),
-                                            (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_rolv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256(__U,
-                                            (__v4di)_mm256_rolv_epi64(__A, __B),
-                                            (__v4di)_mm256_setzero_si256());
-}
-
-#define _mm_ror_epi32(a, b) \
-  ((__m128i)__builtin_ia32_prord128((__v4si)(__m128i)(a), (int)(b)))
-
-#define _mm_mask_ror_epi32(w, u, a, b) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
-                                       (__v4si)_mm_ror_epi32((a), (b)), \
-                                       (__v4si)(__m128i)(w)))
-
-#define _mm_maskz_ror_epi32(u, a, b) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
-                                       (__v4si)_mm_ror_epi32((a), (b)), \
-                                       (__v4si)_mm_setzero_si128()))
-
-#define _mm256_ror_epi32(a, b) \
-  ((__m256i)__builtin_ia32_prord256((__v8si)(__m256i)(a), (int)(b)))
-
-#define _mm256_mask_ror_epi32(w, u, a, b) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
-                                       (__v8si)_mm256_ror_epi32((a), (b)), \
-                                       (__v8si)(__m256i)(w)))
-
-#define _mm256_maskz_ror_epi32(u, a, b) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
-                                       (__v8si)_mm256_ror_epi32((a), (b)), \
-                                       (__v8si)_mm256_setzero_si256()))
-
-#define _mm_ror_epi64(a, b) \
-  ((__m128i)__builtin_ia32_prorq128((__v2di)(__m128i)(a), (int)(b)))
-
-#define _mm_mask_ror_epi64(w, u, a, b) \
-  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
-                                       (__v2di)_mm_ror_epi64((a), (b)), \
-                                       (__v2di)(__m128i)(w)))
-
-#define _mm_maskz_ror_epi64(u, a, b) \
-  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
-                                       (__v2di)_mm_ror_epi64((a), (b)), \
-                                       (__v2di)_mm_setzero_si128()))
-
-#define _mm256_ror_epi64(a, b) \
-  ((__m256i)__builtin_ia32_prorq256((__v4di)(__m256i)(a), (int)(b)))
-
-#define _mm256_mask_ror_epi64(w, u, a, b) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
-                                       (__v4di)_mm256_ror_epi64((a), (b)), \
-                                       (__v4di)(__m256i)(w)))
-
-#define _mm256_maskz_ror_epi64(u, a, b) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
-                                       (__v4di)_mm256_ror_epi64((a), (b)), \
-                                       (__v4di)_mm256_setzero_si256()))
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sll_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sll_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_sll_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_sll_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_slli_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_slli_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_slli_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_slli_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_sll_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_sll_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_sll_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_sll_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_slli_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_slli_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_slli_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_slli_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_rorv_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_prorvd128((__v4si)__A, (__v4si)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_rorv_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                             (__v4si)_mm_rorv_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_rorv_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                             (__v4si)_mm_rorv_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_rorv_epi32 (__m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_prorvd256((__v8si)__A, (__v8si)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_rorv_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                            (__v8si)_mm256_rorv_epi32(__A, __B),
-                                            (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_rorv_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                            (__v8si)_mm256_rorv_epi32(__A, __B),
-                                            (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_rorv_epi64 (__m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_prorvq128((__v2di)__A, (__v2di)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_rorv_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128(__U,
-                                             (__v2di)_mm_rorv_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_rorv_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128(__U,
-                                             (__v2di)_mm_rorv_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_rorv_epi64 (__m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_prorvq256((__v4di)__A, (__v4di)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_rorv_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256(__U,
-                                            (__v4di)_mm256_rorv_epi64(__A, __B),
-                                            (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_rorv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256(__U,
-                                            (__v4di)_mm256_rorv_epi64(__A, __B),
-                                            (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_sllv_epi64(__X, __Y),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_sllv_epi64(__X, __Y),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                            (__v4di)_mm256_sllv_epi64(__X, __Y),
-                                            (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                            (__v4di)_mm256_sllv_epi64(__X, __Y),
-                                            (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sllv_epi32(__X, __Y),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sllv_epi32(__X, __Y),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                            (__v8si)_mm256_sllv_epi32(__X, __Y),
-                                            (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                            (__v8si)_mm256_sllv_epi32(__X, __Y),
-                                            (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srlv_epi64(__X, __Y),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srlv_epi64(__X, __Y),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                            (__v4di)_mm256_srlv_epi64(__X, __Y),
-                                            (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                            (__v4di)_mm256_srlv_epi64(__X, __Y),
-                                            (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                            (__v4si)_mm_srlv_epi32(__X, __Y),
-                                            (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                            (__v4si)_mm_srlv_epi32(__X, __Y),
-                                            (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                            (__v8si)_mm256_srlv_epi32(__X, __Y),
-                                            (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                            (__v8si)_mm256_srlv_epi32(__X, __Y),
-                                            (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srl_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srl_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srl_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srl_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srli_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srli_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srli_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srli_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srl_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srl_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srl_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srl_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srli_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srli_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srli_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srli_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                            (__v4si)_mm_srav_epi32(__X, __Y),
-                                            (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                            (__v4si)_mm_srav_epi32(__X, __Y),
-                                            (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                            (__v8si)_mm256_srav_epi32(__X, __Y),
-                                            (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                            (__v8si)_mm256_srav_epi32(__X, __Y),
-                                            (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srav_epi64(__m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_psravq128((__v2di)__X, (__v2di)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srav_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srav_epi64(__X, __Y),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srav_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srav_epi64(__X, __Y),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srav_epi64(__m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_psravq256((__v4di)__X, (__v4di) __Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srav_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srav_epi64(__X, __Y),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srav_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srav_epi64(__X, __Y),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mov_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
-                 (__v4si) __A,
-                 (__v4si) __W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mov_epi32 (__mmask8 __U, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
-                 (__v4si) __A,
-                 (__v4si) _mm_setzero_si128 ());
-}
-
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mov_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
-{
-  return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
-                 (__v8si) __A,
-                 (__v8si) __W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mov_epi32 (__mmask8 __U, __m256i __A)
-{
-  return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
-                 (__v8si) __A,
-                 (__v8si) _mm256_setzero_si256 ());
-}
-
-static __inline __m128i __DEFAULT_FN_ATTRS128
-_mm_load_epi32 (void const *__P)
-{
-  return *(const __m128i *) __P;
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_movdqa32load128_mask ((const __v4si *) __P,
-              (__v4si) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_load_epi32 (__mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_movdqa32load128_mask ((const __v4si *) __P,
-              (__v4si)
-              _mm_setzero_si128 (),
-              (__mmask8)
-              __U);
-}
-
-static __inline __m256i __DEFAULT_FN_ATTRS256
-_mm256_load_epi32 (void const *__P)
-{
-  return *(const __m256i *) __P;
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_movdqa32load256_mask ((const __v8si *) __P,
-              (__v8si) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_load_epi32 (__mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_movdqa32load256_mask ((const __v8si *) __P,
-              (__v8si)
-              _mm256_setzero_si256 (),
-              (__mmask8)
-              __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS128
-_mm_store_epi32 (void *__P, __m128i __A)
-{
-  *(__m128i *) __P = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A)
-{
-  __builtin_ia32_movdqa32store128_mask ((__v4si *) __P,
-          (__v4si) __A,
-          (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS256
-_mm256_store_epi32 (void *__P, __m256i __A)
-{
-  *(__m256i *) __P = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_store_epi32 (void *__P, __mmask8 __U, __m256i __A)
-{
-  __builtin_ia32_movdqa32store256_mask ((__v8si *) __P,
-          (__v8si) __A,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mov_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
-                 (__v2di) __A,
-                 (__v2di) __W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mov_epi64 (__mmask8 __U, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
-                 (__v2di) __A,
-                 (__v2di) _mm_setzero_si128 ());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mov_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
-{
-  return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
-                 (__v4di) __A,
-                 (__v4di) __W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mov_epi64 (__mmask8 __U, __m256i __A)
-{
-  return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
-                 (__v4di) __A,
-                 (__v4di) _mm256_setzero_si256 ());
-}
-
-static __inline __m128i __DEFAULT_FN_ATTRS128
-_mm_load_epi64 (void const *__P)
-{
-  return *(const __m128i *) __P;
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_movdqa64load128_mask ((const __v2di *) __P,
-              (__v2di) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_load_epi64 (__mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_movdqa64load128_mask ((const __v2di *) __P,
-              (__v2di)
-              _mm_setzero_si128 (),
-              (__mmask8)
-              __U);
-}
-
-static __inline __m256i __DEFAULT_FN_ATTRS256
-_mm256_load_epi64 (void const *__P)
-{
-  return *(const __m256i *) __P;
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_movdqa64load256_mask ((const __v4di *) __P,
-              (__v4di) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_load_epi64 (__mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_movdqa64load256_mask ((const __v4di *) __P,
-              (__v4di)
-              _mm256_setzero_si256 (),
-              (__mmask8)
-              __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS128
-_mm_store_epi64 (void *__P, __m128i __A)
-{
-  *(__m128i *) __P = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A)
-{
-  __builtin_ia32_movdqa64store128_mask ((__v2di *) __P,
-          (__v2di) __A,
-          (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS256
-_mm256_store_epi64 (void *__P, __m256i __A)
-{
-  *(__m256i *) __P = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_store_epi64 (void *__P, __mmask8 __U, __m256i __A)
-{
-  __builtin_ia32_movdqa64store256_mask ((__v4di *) __P,
-          (__v4di) __A,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_movedup_pd (__m128d __W, __mmask8 __U, __m128d __A)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_movedup_pd(__A),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_movedup_pd (__mmask8 __U, __m128d __A)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_movedup_pd(__A),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_movedup_pd (__m256d __W, __mmask8 __U, __m256d __A)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_movedup_pd(__A),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_movedup_pd (__mmask8 __U, __m256d __A)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_movedup_pd(__A),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_set1_epi32(__m128i __O, __mmask8 __M, int __A)
-{
-   return (__m128i)__builtin_ia32_selectd_128(__M,
-                                              (__v4si) _mm_set1_epi32(__A),
-                                              (__v4si)__O);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_set1_epi32( __mmask8 __M, int __A)
-{
-   return (__m128i)__builtin_ia32_selectd_128(__M,
-                                              (__v4si) _mm_set1_epi32(__A),
-                                              (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_set1_epi32(__m256i __O, __mmask8 __M, int __A)
-{
-   return (__m256i)__builtin_ia32_selectd_256(__M,
-                                              (__v8si) _mm256_set1_epi32(__A),
-                                              (__v8si)__O);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_set1_epi32( __mmask8 __M, int __A)
-{
-   return (__m256i)__builtin_ia32_selectd_256(__M,
-                                              (__v8si) _mm256_set1_epi32(__A),
-                                              (__v8si)_mm256_setzero_si256());
-}
-
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_set1_epi64 (__m128i __O, __mmask8 __M, long long __A)
-{
-  return (__m128i) __builtin_ia32_selectq_128(__M,
-                                              (__v2di) _mm_set1_epi64x(__A),
-                                              (__v2di) __O);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_set1_epi64 (__mmask8 __M, long long __A)
-{
-  return (__m128i) __builtin_ia32_selectq_128(__M,
-                                              (__v2di) _mm_set1_epi64x(__A),
-                                              (__v2di) _mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_set1_epi64 (__m256i __O, __mmask8 __M, long long __A)
-{
-  return (__m256i) __builtin_ia32_selectq_256(__M,
-                                              (__v4di) _mm256_set1_epi64x(__A),
-                                              (__v4di) __O) ;
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_set1_epi64 (__mmask8 __M, long long __A)
-{
-   return (__m256i) __builtin_ia32_selectq_256(__M,
-                                               (__v4di) _mm256_set1_epi64x(__A),
-                                               (__v4di) _mm256_setzero_si256());
-}
-
-#define _mm_fixupimm_pd(A, B, C, imm) \
-  ((__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v2di)(__m128i)(C), (int)(imm), \
-                                              (__mmask8)-1))
-
-#define _mm_mask_fixupimm_pd(A, U, B, C, imm) \
-  ((__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v2di)(__m128i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-#define _mm_maskz_fixupimm_pd(U, A, B, C, imm) \
-  ((__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (__v2di)(__m128i)(C), \
-                                               (int)(imm), (__mmask8)(U)))
-
-#define _mm256_fixupimm_pd(A, B, C, imm) \
-  ((__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
-                                              (__v4df)(__m256d)(B), \
-                                              (__v4di)(__m256i)(C), (int)(imm), \
-                                              (__mmask8)-1))
-
-#define _mm256_mask_fixupimm_pd(A, U, B, C, imm) \
-  ((__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
-                                              (__v4df)(__m256d)(B), \
-                                              (__v4di)(__m256i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-#define _mm256_maskz_fixupimm_pd(U, A, B, C, imm) \
-  ((__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \
-                                               (__v4df)(__m256d)(B), \
-                                               (__v4di)(__m256i)(C), \
-                                               (int)(imm), (__mmask8)(U)))
-
-#define _mm_fixupimm_ps(A, B, C, imm) \
-  ((__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
-                                             (__v4sf)(__m128)(B), \
-                                             (__v4si)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)-1))
-
-#define _mm_mask_fixupimm_ps(A, U, B, C, imm) \
-  ((__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
-                                             (__v4sf)(__m128)(B), \
-                                             (__v4si)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
-
-#define _mm_maskz_fixupimm_ps(U, A, B, C, imm) \
-  ((__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v4si)(__m128i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-#define _mm256_fixupimm_ps(A, B, C, imm) \
-  ((__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
-                                             (__v8sf)(__m256)(B), \
-                                             (__v8si)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)-1))
-
-#define _mm256_mask_fixupimm_ps(A, U, B, C, imm) \
-  ((__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
-                                             (__v8sf)(__m256)(B), \
-                                             (__v8si)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
-
-#define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) \
-  ((__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \
-                                              (__v8sf)(__m256)(B), \
-                                              (__v8si)(__m256i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P)
-{
-  return (__m128d) __builtin_ia32_loadapd128_mask ((const __v2df *) __P,
-               (__v2df) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_load_pd (__mmask8 __U, void const *__P)
-{
-  return (__m128d) __builtin_ia32_loadapd128_mask ((const __v2df *) __P,
-               (__v2df)
-               _mm_setzero_pd (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_load_pd (__m256d __W, __mmask8 __U, void const *__P)
-{
-  return (__m256d) __builtin_ia32_loadapd256_mask ((const __v4df *) __P,
-               (__v4df) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_load_pd (__mmask8 __U, void const *__P)
-{
-  return (__m256d) __builtin_ia32_loadapd256_mask ((const __v4df *) __P,
-               (__v4df)
-               _mm256_setzero_pd (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_load_ps (__m128 __W, __mmask8 __U, void const *__P)
-{
-  return (__m128) __builtin_ia32_loadaps128_mask ((const __v4sf *) __P,
-              (__v4sf) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_load_ps (__mmask8 __U, void const *__P)
-{
-  return (__m128) __builtin_ia32_loadaps128_mask ((const __v4sf *) __P,
-              (__v4sf)
-              _mm_setzero_ps (),
-              (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_load_ps (__m256 __W, __mmask8 __U, void const *__P)
-{
-  return (__m256) __builtin_ia32_loadaps256_mask ((const __v8sf *) __P,
-              (__v8sf) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_load_ps (__mmask8 __U, void const *__P)
-{
-  return (__m256) __builtin_ia32_loadaps256_mask ((const __v8sf *) __P,
-              (__v8sf)
-              _mm256_setzero_ps (),
-              (__mmask8) __U);
-}
-
-static __inline __m128i __DEFAULT_FN_ATTRS128
-_mm_loadu_epi64 (void const *__P)
-{
-  struct __loadu_epi64 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi64*)__P)->__v;
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_loaddqudi128_mask ((const __v2di *) __P,
-                 (__v2di) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_loaddqudi128_mask ((const __v2di *) __P,
-                 (__v2di)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline __m256i __DEFAULT_FN_ATTRS256
-_mm256_loadu_epi64 (void const *__P)
-{
-  struct __loadu_epi64 {
-    __m256i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi64*)__P)->__v;
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_loaddqudi256_mask ((const __v4di *) __P,
-                 (__v4di) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_loaddqudi256_mask ((const __v4di *) __P,
-                 (__v4di)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) __U);
-}
-
-static __inline __m128i __DEFAULT_FN_ATTRS128
-_mm_loadu_epi32 (void const *__P)
-{
-  struct __loadu_epi32 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi32*)__P)->__v;
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_loaddqusi128_mask ((const __v4si *) __P,
-                 (__v4si) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_loaddqusi128_mask ((const __v4si *) __P,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline __m256i __DEFAULT_FN_ATTRS256
-_mm256_loadu_epi32 (void const *__P)
-{
-  struct __loadu_epi32 {
-    __m256i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi32*)__P)->__v;
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_loaddqusi256_mask ((const __v8si *) __P,
-                 (__v8si) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_loaddqusi256_mask ((const __v8si *) __P,
-                 (__v8si)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_loadu_pd (__m128d __W, __mmask8 __U, void const *__P)
-{
-  return (__m128d) __builtin_ia32_loadupd128_mask ((const __v2df *) __P,
-               (__v2df) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_loadu_pd (__mmask8 __U, void const *__P)
-{
-  return (__m128d) __builtin_ia32_loadupd128_mask ((const __v2df *) __P,
-               (__v2df)
-               _mm_setzero_pd (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_loadu_pd (__m256d __W, __mmask8 __U, void const *__P)
-{
-  return (__m256d) __builtin_ia32_loadupd256_mask ((const __v4df *) __P,
-               (__v4df) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_loadu_pd (__mmask8 __U, void const *__P)
-{
-  return (__m256d) __builtin_ia32_loadupd256_mask ((const __v4df *) __P,
-               (__v4df)
-               _mm256_setzero_pd (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_loadu_ps (__m128 __W, __mmask8 __U, void const *__P)
-{
-  return (__m128) __builtin_ia32_loadups128_mask ((const __v4sf *) __P,
-              (__v4sf) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_loadu_ps (__mmask8 __U, void const *__P)
-{
-  return (__m128) __builtin_ia32_loadups128_mask ((const __v4sf *) __P,
-              (__v4sf)
-              _mm_setzero_ps (),
-              (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_loadu_ps (__m256 __W, __mmask8 __U, void const *__P)
-{
-  return (__m256) __builtin_ia32_loadups256_mask ((const __v8sf *) __P,
-              (__v8sf) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_loadu_ps (__mmask8 __U, void const *__P)
-{
-  return (__m256) __builtin_ia32_loadups256_mask ((const __v8sf *) __P,
-              (__v8sf)
-              _mm256_setzero_ps (),
-              (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_pd (void *__P, __mmask8 __U, __m128d __A)
-{
-  __builtin_ia32_storeapd128_mask ((__v2df *) __P,
-           (__v2df) __A,
-           (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_store_pd (void *__P, __mmask8 __U, __m256d __A)
-{
-  __builtin_ia32_storeapd256_mask ((__v4df *) __P,
-           (__v4df) __A,
-           (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_ps (void *__P, __mmask8 __U, __m128 __A)
-{
-  __builtin_ia32_storeaps128_mask ((__v4sf *) __P,
-           (__v4sf) __A,
-           (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_store_ps (void *__P, __mmask8 __U, __m256 __A)
-{
-  __builtin_ia32_storeaps256_mask ((__v8sf *) __P,
-           (__v8sf) __A,
-           (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS128
-_mm_storeu_epi64 (void *__P, __m128i __A)
-{
-  struct __storeu_epi64 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi64*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_storeu_epi64 (void *__P, __mmask8 __U, __m128i __A)
-{
-  __builtin_ia32_storedqudi128_mask ((__v2di *) __P,
-             (__v2di) __A,
-             (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS256
-_mm256_storeu_epi64 (void *__P, __m256i __A)
-{
-  struct __storeu_epi64 {
-    __m256i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi64*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_storeu_epi64 (void *__P, __mmask8 __U, __m256i __A)
-{
-  __builtin_ia32_storedqudi256_mask ((__v4di *) __P,
-             (__v4di) __A,
-             (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS128
-_mm_storeu_epi32 (void *__P, __m128i __A)
-{
-  struct __storeu_epi32 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi32*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_storeu_epi32 (void *__P, __mmask8 __U, __m128i __A)
-{
-  __builtin_ia32_storedqusi128_mask ((__v4si *) __P,
-             (__v4si) __A,
-             (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS256
-_mm256_storeu_epi32 (void *__P, __m256i __A)
-{
-  struct __storeu_epi32 {
-    __m256i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi32*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_storeu_epi32 (void *__P, __mmask8 __U, __m256i __A)
-{
-  __builtin_ia32_storedqusi256_mask ((__v8si *) __P,
-             (__v8si) __A,
-             (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_storeu_pd (void *__P, __mmask8 __U, __m128d __A)
-{
-  __builtin_ia32_storeupd128_mask ((__v2df *) __P,
-           (__v2df) __A,
-           (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_storeu_pd (void *__P, __mmask8 __U, __m256d __A)
-{
-  __builtin_ia32_storeupd256_mask ((__v4df *) __P,
-           (__v4df) __A,
-           (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_storeu_ps (void *__P, __mmask8 __U, __m128 __A)
-{
-  __builtin_ia32_storeups128_mask ((__v4sf *) __P,
-           (__v4sf) __A,
-           (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_storeu_ps (void *__P, __mmask8 __U, __m256 __A)
-{
-  __builtin_ia32_storeups256_mask ((__v8sf *) __P,
-           (__v8sf) __A,
-           (__mmask8) __U);
-}
-
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_unpackhi_pd(__A, __B),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_unpackhi_pd(__A, __B),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                           (__v4df)_mm256_unpackhi_pd(__A, __B),
-                                           (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                           (__v4df)_mm256_unpackhi_pd(__A, __B),
-                                           (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_unpackhi_ps(__A, __B),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_unpackhi_ps(__A, __B),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                           (__v8sf)_mm256_unpackhi_ps(__A, __B),
-                                           (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                           (__v8sf)_mm256_unpackhi_ps(__A, __B),
-                                           (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_unpacklo_pd(__A, __B),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_unpacklo_pd(__A, __B),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                           (__v4df)_mm256_unpacklo_pd(__A, __B),
-                                           (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                           (__v4df)_mm256_unpacklo_pd(__A, __B),
-                                           (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_unpacklo_ps(__A, __B),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_unpacklo_ps(__A, __B),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                           (__v8sf)_mm256_unpacklo_ps(__A, __B),
-                                           (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                           (__v8sf)_mm256_unpacklo_ps(__A, __B),
-                                           (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_rcp14_pd (__m128d __A)
-{
-  return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
-                (__v2df)
-                _mm_setzero_pd (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_rcp14_pd (__m128d __W, __mmask8 __U, __m128d __A)
-{
-  return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
-                (__v2df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_rcp14_pd (__mmask8 __U, __m128d __A)
-{
-  return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
-                (__v2df)
-                _mm_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_rcp14_pd (__m256d __A)
-{
-  return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
-                (__v4df)
-                _mm256_setzero_pd (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_rcp14_pd (__m256d __W, __mmask8 __U, __m256d __A)
-{
-  return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
-                (__v4df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_rcp14_pd (__mmask8 __U, __m256d __A)
-{
-  return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
-                (__v4df)
-                _mm256_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_rcp14_ps (__m128 __A)
-{
-  return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
-               (__v4sf)
-               _mm_setzero_ps (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_rcp14_ps (__m128 __W, __mmask8 __U, __m128 __A)
-{
-  return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
-               (__v4sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_rcp14_ps (__mmask8 __U, __m128 __A)
-{
-  return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
-               (__v4sf)
-               _mm_setzero_ps (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_rcp14_ps (__m256 __A)
-{
-  return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
-               (__v8sf)
-               _mm256_setzero_ps (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_rcp14_ps (__m256 __W, __mmask8 __U, __m256 __A)
-{
-  return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
-               (__v8sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_rcp14_ps (__mmask8 __U, __m256 __A)
-{
-  return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
-               (__v8sf)
-               _mm256_setzero_ps (),
-               (__mmask8) __U);
-}
-
-#define _mm_mask_permute_pd(W, U, X, C) \
-  ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
-                                        (__v2df)_mm_permute_pd((X), (C)), \
-                                        (__v2df)(__m128d)(W)))
-
-#define _mm_maskz_permute_pd(U, X, C) \
-  ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
-                                        (__v2df)_mm_permute_pd((X), (C)), \
-                                        (__v2df)_mm_setzero_pd()))
-
-#define _mm256_mask_permute_pd(W, U, X, C) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                        (__v4df)_mm256_permute_pd((X), (C)), \
-                                        (__v4df)(__m256d)(W)))
-
-#define _mm256_maskz_permute_pd(U, X, C) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                        (__v4df)_mm256_permute_pd((X), (C)), \
-                                        (__v4df)_mm256_setzero_pd()))
-
-#define _mm_mask_permute_ps(W, U, X, C) \
-  ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
-                                       (__v4sf)_mm_permute_ps((X), (C)), \
-                                       (__v4sf)(__m128)(W)))
-
-#define _mm_maskz_permute_ps(U, X, C) \
-  ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
-                                       (__v4sf)_mm_permute_ps((X), (C)), \
-                                       (__v4sf)_mm_setzero_ps()))
-
-#define _mm256_mask_permute_ps(W, U, X, C) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                       (__v8sf)_mm256_permute_ps((X), (C)), \
-                                       (__v8sf)(__m256)(W)))
-
-#define _mm256_maskz_permute_ps(U, X, C) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                       (__v8sf)_mm256_permute_ps((X), (C)), \
-                                       (__v8sf)_mm256_setzero_ps()))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                            (__v2df)_mm_permutevar_pd(__A, __C),
-                                            (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_permutevar_pd(__mmask8 __U, __m128d __A, __m128i __C)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                            (__v2df)_mm_permutevar_pd(__A, __C),
-                                            (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_permutevar_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256i __C)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                         (__v4df)_mm256_permutevar_pd(__A, __C),
-                                         (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutevar_pd(__mmask8 __U, __m256d __A, __m256i __C)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                         (__v4df)_mm256_permutevar_pd(__A, __C),
-                                         (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_permutevar_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128i __C)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                            (__v4sf)_mm_permutevar_ps(__A, __C),
-                                            (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_permutevar_ps(__mmask8 __U, __m128 __A, __m128i __C)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                            (__v4sf)_mm_permutevar_ps(__A, __C),
-                                            (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_permutevar_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256i __C)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                          (__v8sf)_mm256_permutevar_ps(__A, __C),
-                                          (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                          (__v8sf)_mm256_permutevar_ps(__A, __C),
-                                          (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_test_epi32_mask (__m128i __A, __m128i __B)
-{
-  return _mm_cmpneq_epi32_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_mask_test_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return _mm_mask_cmpneq_epi32_mask (__U, _mm_and_si128 (__A, __B),
-                                     _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_test_epi32_mask (__m256i __A, __m256i __B)
-{
-  return _mm256_cmpneq_epi32_mask (_mm256_and_si256 (__A, __B),
-                                   _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_mask_test_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return _mm256_mask_cmpneq_epi32_mask (__U, _mm256_and_si256 (__A, __B),
-                                        _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_test_epi64_mask (__m128i __A, __m128i __B)
-{
-  return _mm_cmpneq_epi64_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_mask_test_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return _mm_mask_cmpneq_epi64_mask (__U, _mm_and_si128 (__A, __B),
-                                     _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_test_epi64_mask (__m256i __A, __m256i __B)
-{
-  return _mm256_cmpneq_epi64_mask (_mm256_and_si256 (__A, __B),
-                                   _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_mask_test_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return _mm256_mask_cmpneq_epi64_mask (__U, _mm256_and_si256 (__A, __B),
-                                        _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_testn_epi32_mask (__m128i __A, __m128i __B)
-{
-  return _mm_cmpeq_epi32_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_mask_testn_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return _mm_mask_cmpeq_epi32_mask (__U, _mm_and_si128 (__A, __B),
-                                    _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_testn_epi32_mask (__m256i __A, __m256i __B)
-{
-  return _mm256_cmpeq_epi32_mask (_mm256_and_si256 (__A, __B),
-                                  _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_mask_testn_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return _mm256_mask_cmpeq_epi32_mask (__U, _mm256_and_si256 (__A, __B),
-                                       _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_testn_epi64_mask (__m128i __A, __m128i __B)
-{
-  return _mm_cmpeq_epi64_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_mask_testn_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return _mm_mask_cmpeq_epi64_mask (__U, _mm_and_si128 (__A, __B),
-                                    _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_testn_epi64_mask (__m256i __A, __m256i __B)
-{
-  return _mm256_cmpeq_epi64_mask (_mm256_and_si256 (__A, __B),
-                                  _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_mask_testn_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return _mm256_mask_cmpeq_epi64_mask (__U, _mm256_and_si256 (__A, __B),
-                                       _mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                           (__v4si)_mm_unpackhi_epi32(__A, __B),
-                                           (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                           (__v4si)_mm_unpackhi_epi32(__A, __B),
-                                           (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                        (__v8si)_mm256_unpackhi_epi32(__A, __B),
-                                        (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                        (__v8si)_mm256_unpackhi_epi32(__A, __B),
-                                        (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                           (__v2di)_mm_unpackhi_epi64(__A, __B),
-                                           (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_unpackhi_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                           (__v2di)_mm_unpackhi_epi64(__A, __B),
-                                           (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                        (__v4di)_mm256_unpackhi_epi64(__A, __B),
-                                        (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                        (__v4di)_mm256_unpackhi_epi64(__A, __B),
-                                        (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                           (__v4si)_mm_unpacklo_epi32(__A, __B),
-                                           (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                           (__v4si)_mm_unpacklo_epi32(__A, __B),
-                                           (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                        (__v8si)_mm256_unpacklo_epi32(__A, __B),
-                                        (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                        (__v8si)_mm256_unpacklo_epi32(__A, __B),
-                                        (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                           (__v2di)_mm_unpacklo_epi64(__A, __B),
-                                           (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_unpacklo_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                           (__v2di)_mm_unpacklo_epi64(__A, __B),
-                                           (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                        (__v4di)_mm256_unpacklo_epi64(__A, __B),
-                                        (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                        (__v4di)_mm256_unpacklo_epi64(__A, __B),
-                                        (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sra_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sra_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_sra_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_sra_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srai_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srai_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srai_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srai_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_sra_epi64(__m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_psraq128((__v2di)__A, (__v2di)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
-                                             (__v2di)_mm_sra_epi64(__A, __B), \
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
-                                             (__v2di)_mm_sra_epi64(__A, __B), \
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sra_epi64(__m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_psraq256((__v4di) __A, (__v2di) __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
-                                           (__v4di)_mm256_sra_epi64(__A, __B), \
-                                           (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
-                                           (__v4di)_mm256_sra_epi64(__A, __B), \
-                                           (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srai_epi64(__m128i __A, unsigned int __imm)
-{
-  return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __imm)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
-                                           (__v2di)_mm_srai_epi64(__A, __imm), \
-                                           (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, unsigned int __imm)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
-                                           (__v2di)_mm_srai_epi64(__A, __imm), \
-                                           (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srai_epi64(__m256i __A, unsigned int __imm)
-{
-  return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A,
-                       unsigned int __imm)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
-                                        (__v4di)_mm256_srai_epi64(__A, __imm), \
-                                        (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, unsigned int __imm)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
-                                        (__v4di)_mm256_srai_epi64(__A, __imm), \
-                                        (__v4di)_mm256_setzero_si256());
-}
-
-#define _mm_ternarylogic_epi32(A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
-                                             (__v4si)(__m128i)(B), \
-                                             (__v4si)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)-1))
-
-#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
-                                             (__v4si)(__m128i)(B), \
-                                             (__v4si)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
-
-#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogd128_maskz((__v4si)(__m128i)(A), \
-                                              (__v4si)(__m128i)(B), \
-                                              (__v4si)(__m128i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-#define _mm256_ternarylogic_epi32(A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
-                                             (__v8si)(__m256i)(B), \
-                                             (__v8si)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)-1))
-
-#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
-                                             (__v8si)(__m256i)(B), \
-                                             (__v8si)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
-
-#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogd256_maskz((__v8si)(__m256i)(A), \
-                                              (__v8si)(__m256i)(B), \
-                                              (__v8si)(__m256i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-#define _mm_ternarylogic_epi64(A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
-                                             (__v2di)(__m128i)(B), \
-                                             (__v2di)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)-1))
-
-#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
-                                             (__v2di)(__m128i)(B), \
-                                             (__v2di)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
-
-#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogq128_maskz((__v2di)(__m128i)(A), \
-                                              (__v2di)(__m128i)(B), \
-                                              (__v2di)(__m128i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-#define _mm256_ternarylogic_epi64(A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
-                                             (__v4di)(__m256i)(B), \
-                                             (__v4di)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)-1))
-
-#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
-                                             (__v4di)(__m256i)(B), \
-                                             (__v4di)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
-
-#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogq256_maskz((__v4di)(__m256i)(A), \
-                                              (__v4di)(__m256i)(B), \
-                                              (__v4di)(__m256i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-
-
-#define _mm256_shuffle_f32x4(A, B, imm) \
-  ((__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \
-                                         (__v8sf)(__m256)(B), (int)(imm)))
-
-#define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                       (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
-                                       (__v8sf)(__m256)(W)))
-
-#define _mm256_maskz_shuffle_f32x4(U, A, B, imm) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                       (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
-                                       (__v8sf)_mm256_setzero_ps()))
-
-#define _mm256_shuffle_f64x2(A, B, imm) \
-  ((__m256d)__builtin_ia32_shuf_f64x2_256((__v4df)(__m256d)(A), \
-                                          (__v4df)(__m256d)(B), (int)(imm)))
-
-#define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                       (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
-                                       (__v4df)(__m256d)(W)))
-
-#define _mm256_maskz_shuffle_f64x2(U, A, B, imm) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                       (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
-                                       (__v4df)_mm256_setzero_pd()))
-
-#define _mm256_shuffle_i32x4(A, B, imm) \
-  ((__m256i)__builtin_ia32_shuf_i32x4_256((__v8si)(__m256i)(A), \
-                                          (__v8si)(__m256i)(B), (int)(imm)))
-
-#define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                       (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
-                                       (__v8si)(__m256i)(W)))
-
-#define _mm256_maskz_shuffle_i32x4(U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                       (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
-                                       (__v8si)_mm256_setzero_si256()))
-
-#define _mm256_shuffle_i64x2(A, B, imm) \
-  ((__m256i)__builtin_ia32_shuf_i64x2_256((__v4di)(__m256i)(A), \
-                                          (__v4di)(__m256i)(B), (int)(imm)))
-
-#define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                       (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
-                                       (__v4di)(__m256i)(W)))
-
-
-#define _mm256_maskz_shuffle_i64x2(U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                       (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
-                                       (__v4di)_mm256_setzero_si256()))
-
-#define _mm_mask_shuffle_pd(W, U, A, B, M) \
-  ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
-                                        (__v2df)_mm_shuffle_pd((A), (B), (M)), \
-                                        (__v2df)(__m128d)(W)))
-
-#define _mm_maskz_shuffle_pd(U, A, B, M) \
-  ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
-                                        (__v2df)_mm_shuffle_pd((A), (B), (M)), \
-                                        (__v2df)_mm_setzero_pd()))
-
-#define _mm256_mask_shuffle_pd(W, U, A, B, M) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                        (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
-                                        (__v4df)(__m256d)(W)))
-
-#define _mm256_maskz_shuffle_pd(U, A, B, M) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                        (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
-                                        (__v4df)_mm256_setzero_pd()))
-
-#define _mm_mask_shuffle_ps(W, U, A, B, M) \
-  ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
-                                       (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
-                                       (__v4sf)(__m128)(W)))
-
-#define _mm_maskz_shuffle_ps(U, A, B, M) \
-  ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
-                                       (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
-                                       (__v4sf)_mm_setzero_ps()))
-
-#define _mm256_mask_shuffle_ps(W, U, A, B, M) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                       (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
-                                       (__v8sf)(__m256)(W)))
-
-#define _mm256_maskz_shuffle_ps(U, A, B, M) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                       (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
-                                       (__v8sf)_mm256_setzero_ps()))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_rsqrt14_pd (__m128d __A)
-{
-  return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
-                 (__v2df)
-                 _mm_setzero_pd (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_rsqrt14_pd (__m128d __W, __mmask8 __U, __m128d __A)
-{
-  return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
-                 (__v2df) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_rsqrt14_pd (__mmask8 __U, __m128d __A)
-{
-  return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
-                 (__v2df)
-                 _mm_setzero_pd (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_rsqrt14_pd (__m256d __A)
-{
-  return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
-                 (__v4df)
-                 _mm256_setzero_pd (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_rsqrt14_pd (__m256d __W, __mmask8 __U, __m256d __A)
-{
-  return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
-                 (__v4df) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_rsqrt14_pd (__mmask8 __U, __m256d __A)
-{
-  return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
-                 (__v4df)
-                 _mm256_setzero_pd (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_rsqrt14_ps (__m128 __A)
-{
-  return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
-                (__v4sf)
-                _mm_setzero_ps (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_rsqrt14_ps (__m128 __W, __mmask8 __U, __m128 __A)
-{
-  return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
-                (__v4sf) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_rsqrt14_ps (__mmask8 __U, __m128 __A)
-{
-  return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
-                (__v4sf)
-                _mm_setzero_ps (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_rsqrt14_ps (__m256 __A)
-{
-  return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
-                (__v8sf)
-                _mm256_setzero_ps (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_rsqrt14_ps (__m256 __W, __mmask8 __U, __m256 __A)
-{
-  return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
-                (__v8sf) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_rsqrt14_ps (__mmask8 __U, __m256 __A)
-{
-  return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
-                (__v8sf)
-                _mm256_setzero_ps (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_broadcast_f32x4(__m128 __A)
-{
-  return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
-                                         0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcast_f32x4(__m256 __O, __mmask8 __M, __m128 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__M,
-                                            (__v8sf)_mm256_broadcast_f32x4(__A),
-                                            (__v8sf)__O);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcast_f32x4 (__mmask8 __M, __m128 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__M,
-                                            (__v8sf)_mm256_broadcast_f32x4(__A),
-                                            (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcast_i32x4(__m128i __A)
-{
-  return (__m256i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
-                                          0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcast_i32x4(__m256i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                            (__v8si)_mm256_broadcast_i32x4(__A),
-                                            (__v8si)__O);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                            (__v8si)_mm256_broadcast_i32x4(__A),
-                                            (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcastsd_pd (__m256d __O, __mmask8 __M, __m128d __A)
-{
-  return (__m256d)__builtin_ia32_selectpd_256(__M,
-                                              (__v4df) _mm256_broadcastsd_pd(__A),
-                                              (__v4df) __O);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A)
-{
-  return (__m256d)__builtin_ia32_selectpd_256(__M,
-                                              (__v4df) _mm256_broadcastsd_pd(__A),
-                                              (__v4df) _mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_broadcastss_ps (__m128 __O, __mmask8 __M, __m128 __A)
-{
-  return (__m128)__builtin_ia32_selectps_128(__M,
-                                             (__v4sf) _mm_broadcastss_ps(__A),
-                                             (__v4sf) __O);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_broadcastss_ps (__mmask8 __M, __m128 __A)
-{
-  return (__m128)__builtin_ia32_selectps_128(__M,
-                                             (__v4sf) _mm_broadcastss_ps(__A),
-                                             (__v4sf) _mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcastss_ps (__m256 __O, __mmask8 __M, __m128 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256(__M,
-                                             (__v8sf) _mm256_broadcastss_ps(__A),
-                                             (__v8sf) __O);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcastss_ps (__mmask8 __M, __m128 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256(__M,
-                                             (__v8sf) _mm256_broadcastss_ps(__A),
-                                             (__v8sf) _mm256_setzero_ps());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_broadcastd_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__M,
-                                             (__v4si) _mm_broadcastd_epi32(__A),
-                                             (__v4si) __O);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__M,
-                                             (__v4si) _mm_broadcastd_epi32(__A),
-                                             (__v4si) _mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcastd_epi32 (__m256i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__M,
-                                             (__v8si) _mm256_broadcastd_epi32(__A),
-                                             (__v8si) __O);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__M,
-                                             (__v8si) _mm256_broadcastd_epi32(__A),
-                                             (__v8si) _mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_broadcastq_epi64 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectq_128(__M,
-                                             (__v2di) _mm_broadcastq_epi64(__A),
-                                             (__v2di) __O);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectq_128(__M,
-                                             (__v2di) _mm_broadcastq_epi64(__A),
-                                             (__v2di) _mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcastq_epi64 (__m256i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectq_256(__M,
-                                             (__v4di) _mm256_broadcastq_epi64(__A),
-                                             (__v4di) __O);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectq_256(__M,
-                                             (__v4di) _mm256_broadcastq_epi64(__A),
-                                             (__v4di) _mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtsepi32_epi8 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
-               (__v16qi)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsepi32_epi8 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovsdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtsepi32_epi8 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
-               (__v16qi)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtsepi32_epi8 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovsdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtsepi32_epi16 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
-               (__v8hi)_mm_setzero_si128 (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
-               (__v8hi)__O,
-               __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsepi32_epi16 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
-               (__v8hi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovsdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtsepi32_epi16 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
-               (__v8hi)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
-               (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtsepi32_epi16 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
-               (__v8hi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovsdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtsepi64_epi8 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
-               (__v16qi)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsepi64_epi8 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovsqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtsepi64_epi8 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
-               (__v16qi)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtsepi64_epi8 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovsqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtsepi64_epi32 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
-               (__v4si)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
-               (__v4si) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsepi64_epi32 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
-               (__v4si) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovsqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtsepi64_epi32 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
-               (__v4si)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
-               (__v4si)__O,
-               __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtsepi64_epi32 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
-               (__v4si) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovsqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtsepi64_epi16 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
-               (__v8hi)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
-               (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsepi64_epi16 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
-               (__v8hi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovsqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtsepi64_epi16 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
-               (__v8hi)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
-               (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtsepi64_epi16 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
-               (__v8hi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovsqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtusepi32_epi8 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
-                (__v16qi)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtusepi32_epi8 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovusdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtusepi32_epi8 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
-                (__v16qi)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtusepi32_epi8 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovusdb256mem_mask ((__v16qi*) __P, (__v8si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtusepi32_epi16 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
-                (__v8hi)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
-                (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtusepi32_epi16 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
-                (__v8hi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovusdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtusepi32_epi16 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
-                (__v8hi) _mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
-                (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtusepi32_epi16 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
-                (__v8hi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovusdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtusepi64_epi8 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
-                (__v16qi)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtusepi64_epi8 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovusqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtusepi64_epi8 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
-                (__v16qi)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtusepi64_epi8 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovusqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtusepi64_epi32 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
-                (__v4si)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
-                (__v4si) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtusepi64_epi32 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
-                (__v4si) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovusqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtusepi64_epi32 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
-                (__v4si)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
-                (__v4si) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtusepi64_epi32 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
-                (__v4si) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovusqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtusepi64_epi16 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
-                (__v8hi)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
-                (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtusepi64_epi16 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
-                (__v8hi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovusqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtusepi64_epi16 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
-                (__v8hi)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
-                (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtusepi64_epi16 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
-                (__v8hi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovusqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi32_epi8 (__m128i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v4si)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
-      2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi32_epi8 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
-              (__v16qi)
-              _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi32_epi8 (__m256i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v8si)__A, __v8qi),
-      (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
-      12, 13, 14, 15);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi32_epi8 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
-              (__v16qi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi32_epi16 (__m128i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v4si)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
-      2, 3, 4, 5, 6, 7);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
-              (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi32_epi16 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
-              (__v8hi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi32_epi16 (__m256i __A)
-{
-  return (__m128i)__builtin_convertvector((__v8si)__A, __v8hi);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
-              (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi32_epi16 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
-              (__v8hi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi32_storeu_epi16 (void *  __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi64_epi8 (__m128i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v2di)__A, __v2qi), (__v2qi){0, 0}, 0, 1, 2, 3,
-      3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi64_epi8 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
-              (__v16qi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi64_epi8 (__m256i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v4di)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
-      2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi64_epi8 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
-              (__v16qi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi64_epi32 (__m128i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v2di)__A, __v2si), (__v2si){0, 0}, 0, 1, 2, 3);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
-              (__v4si) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi64_epi32 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
-              (__v4si) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi64_epi32 (__m256i __A)
-{
-  return (__m128i)__builtin_convertvector((__v4di)__A, __v4si);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm256_cvtepi64_epi32(__A),
-                                             (__v4si)__O);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi64_epi32 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm256_cvtepi64_epi32(__A),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi64_epi16 (__m128i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v2di)__A, __v2hi), (__v2hi){0, 0}, 0, 1, 2, 3,
-      3, 3, 3, 3);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
-              (__v8hi)__O,
-              __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi64_epi16 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
-              (__v8hi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi64_epi16 (__m256i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v4di)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
-      2, 3, 4, 5, 6, 7);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
-              (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi64_epi16 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
-              (__v8hi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
-}
-
-#define _mm256_extractf32x4_ps(A, imm) \
-  ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
-                                                (int)(imm), \
-                                                (__v4sf)_mm_undefined_ps(), \
-                                                (__mmask8)-1))
-
-#define _mm256_mask_extractf32x4_ps(W, U, A, imm) \
-  ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
-                                                (int)(imm), \
-                                                (__v4sf)(__m128)(W), \
-                                                (__mmask8)(U)))
-
-#define _mm256_maskz_extractf32x4_ps(U, A, imm) \
-  ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
-                                                (int)(imm), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)(U)))
-
-#define _mm256_extracti32x4_epi32(A, imm) \
-  ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
-                                                 (int)(imm), \
-                                                 (__v4si)_mm_undefined_si128(), \
-                                                 (__mmask8)-1))
-
-#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) \
-  ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
-                                                 (int)(imm), \
-                                                 (__v4si)(__m128i)(W), \
-                                                 (__mmask8)(U)))
-
-#define _mm256_maskz_extracti32x4_epi32(U, A, imm) \
-  ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
-                                                 (int)(imm), \
-                                                 (__v4si)_mm_setzero_si128(), \
-                                                 (__mmask8)(U)))
-
-#define _mm256_insertf32x4(A, B, imm) \
-  ((__m256)__builtin_ia32_insertf32x4_256((__v8sf)(__m256)(A), \
-                                          (__v4sf)(__m128)(B), (int)(imm)))
-
-#define _mm256_mask_insertf32x4(W, U, A, B, imm) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                  (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
-                                  (__v8sf)(__m256)(W)))
-
-#define _mm256_maskz_insertf32x4(U, A, B, imm) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                  (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
-                                  (__v8sf)_mm256_setzero_ps()))
-
-#define _mm256_inserti32x4(A, B, imm) \
-  ((__m256i)__builtin_ia32_inserti32x4_256((__v8si)(__m256i)(A), \
-                                           (__v4si)(__m128i)(B), (int)(imm)))
-
-#define _mm256_mask_inserti32x4(W, U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                  (__v8si)_mm256_inserti32x4((A), (B), (imm)), \
-                                  (__v8si)(__m256i)(W)))
-
-#define _mm256_maskz_inserti32x4(U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                  (__v8si)_mm256_inserti32x4((A), (B), (imm)), \
-                                  (__v8si)_mm256_setzero_si256()))
-
-#define _mm_getmant_pd(A, B, C) \
-  ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v2df)_mm_setzero_pd(), \
-                                             (__mmask8)-1))
-
-#define _mm_mask_getmant_pd(W, U, A, B, C) \
-  ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v2df)(__m128d)(W), \
-                                             (__mmask8)(U)))
-
-#define _mm_maskz_getmant_pd(U, A, B, C) \
-  ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v2df)_mm_setzero_pd(), \
-                                             (__mmask8)(U)))
-
-#define _mm256_getmant_pd(A, B, C) \
-  ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v4df)_mm256_setzero_pd(), \
-                                             (__mmask8)-1))
-
-#define _mm256_mask_getmant_pd(W, U, A, B, C) \
-  ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v4df)(__m256d)(W), \
-                                             (__mmask8)(U)))
-
-#define _mm256_maskz_getmant_pd(U, A, B, C) \
-  ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v4df)_mm256_setzero_pd(), \
-                                             (__mmask8)(U)))
-
-#define _mm_getmant_ps(A, B, C) \
-  ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v4sf)_mm_setzero_ps(), \
-                                            (__mmask8)-1))
-
-#define _mm_mask_getmant_ps(W, U, A, B, C) \
-  ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v4sf)(__m128)(W), \
-                                            (__mmask8)(U)))
-
-#define _mm_maskz_getmant_ps(U, A, B, C) \
-  ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v4sf)_mm_setzero_ps(), \
-                                            (__mmask8)(U)))
-
-#define _mm256_getmant_ps(A, B, C) \
-  ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8sf)_mm256_setzero_ps(), \
-                                            (__mmask8)-1))
-
-#define _mm256_mask_getmant_ps(W, U, A, B, C) \
-  ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8sf)(__m256)(W), \
-                                            (__mmask8)(U)))
-
-#define _mm256_maskz_getmant_ps(U, A, B, C) \
-  ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8sf)_mm256_setzero_ps(), \
-                                            (__mmask8)(U)))
-
-#define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
-  ((__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v2di)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
-  ((__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v2di)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
-  ((__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4di)(__m256i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
-  ((__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4di)(__m256i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
-  ((__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v2di)(__m128i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
-  ((__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v2di)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
-  ((__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v4di)(__m256i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
-  ((__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4di)(__m256i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
-  ((__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4si)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
-  ((__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4si)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
-  ((__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4si)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
-  ((__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4si)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
-  ((__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v4si)(__m128i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
-  ((__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4si)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
-  ((__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v8si)(__m256i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
-  ((__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v8si)(__m256i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_permutex_pd(X, C) \
-  ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(X), (int)(C)))
-
-#define _mm256_mask_permutex_pd(W, U, X, C) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                       (__v4df)_mm256_permutex_pd((X), (C)), \
-                                       (__v4df)(__m256d)(W)))
-
-#define _mm256_maskz_permutex_pd(U, X, C) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                        (__v4df)_mm256_permutex_pd((X), (C)), \
-                                        (__v4df)_mm256_setzero_pd()))
-
-#define _mm256_permutex_epi64(X, C) \
-  ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C)))
-
-#define _mm256_mask_permutex_epi64(W, U, X, C) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                      (__v4di)_mm256_permutex_epi64((X), (C)), \
-                                      (__v4di)(__m256i)(W)))
-
-#define _mm256_maskz_permutex_epi64(U, X, C) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                      (__v4di)_mm256_permutex_epi64((X), (C)), \
-                                      (__v4di)_mm256_setzero_si256()))
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_permutexvar_pd (__m256i __X, __m256d __Y)
-{
-  return (__m256d)__builtin_ia32_permvardf256((__v4df)__Y, (__v4di)__X);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_permutexvar_pd (__m256d __W, __mmask8 __U, __m256i __X,
-          __m256d __Y)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                        (__v4df)_mm256_permutexvar_pd(__X, __Y),
-                                        (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutexvar_pd (__mmask8 __U, __m256i __X, __m256d __Y)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                        (__v4df)_mm256_permutexvar_pd(__X, __Y),
-                                        (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_permutexvar_epi64 ( __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_permvardi256((__v4di) __Y, (__v4di) __X);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutexvar_epi64 (__mmask8 __M, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                     (__v4di)_mm256_permutexvar_epi64(__X, __Y),
-                                     (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_permutexvar_epi64 (__m256i __W, __mmask8 __M, __m256i __X,
-             __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                     (__v4di)_mm256_permutexvar_epi64(__X, __Y),
-                                     (__v4di)__W);
-}
-
-#define _mm256_permutexvar_ps(A, B) _mm256_permutevar8x32_ps((B), (A))
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_permutexvar_ps(__m256 __W, __mmask8 __U, __m256i __X, __m256 __Y)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                        (__v8sf)_mm256_permutexvar_ps(__X, __Y),
-                                        (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutexvar_ps(__mmask8 __U, __m256i __X, __m256 __Y)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                        (__v8sf)_mm256_permutexvar_ps(__X, __Y),
-                                        (__v8sf)_mm256_setzero_ps());
-}
-
-#define _mm256_permutexvar_epi32(A, B) _mm256_permutevar8x32_epi32((B), (A))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_permutexvar_epi32(__m256i __W, __mmask8 __M, __m256i __X,
-                              __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                     (__v8si)_mm256_permutexvar_epi32(__X, __Y),
-                                     (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutexvar_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                     (__v8si)_mm256_permutexvar_epi32(__X, __Y),
-                                     (__v8si)_mm256_setzero_si256());
-}
-
-#define _mm_alignr_epi32(A, B, imm) \
-  ((__m128i)__builtin_ia32_alignd128((__v4si)(__m128i)(A), \
-                                     (__v4si)(__m128i)(B), (int)(imm)))
-
-#define _mm_mask_alignr_epi32(W, U, A, B, imm) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
-                                    (__v4si)_mm_alignr_epi32((A), (B), (imm)), \
-                                    (__v4si)(__m128i)(W)))
-
-#define _mm_maskz_alignr_epi32(U, A, B, imm) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
-                                    (__v4si)_mm_alignr_epi32((A), (B), (imm)), \
-                                    (__v4si)_mm_setzero_si128()))
-
-#define _mm256_alignr_epi32(A, B, imm) \
-  ((__m256i)__builtin_ia32_alignd256((__v8si)(__m256i)(A), \
-                                     (__v8si)(__m256i)(B), (int)(imm)))
-
-#define _mm256_mask_alignr_epi32(W, U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                 (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
-                                 (__v8si)(__m256i)(W)))
-
-#define _mm256_maskz_alignr_epi32(U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                 (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
-                                 (__v8si)_mm256_setzero_si256()))
-
-#define _mm_alignr_epi64(A, B, imm) \
-  ((__m128i)__builtin_ia32_alignq128((__v2di)(__m128i)(A), \
-                                     (__v2di)(__m128i)(B), (int)(imm)))
-
-#define _mm_mask_alignr_epi64(W, U, A, B, imm) \
-  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
-                                    (__v2di)_mm_alignr_epi64((A), (B), (imm)), \
-                                    (__v2di)(__m128i)(W)))
-
-#define _mm_maskz_alignr_epi64(U, A, B, imm) \
-  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
-                                    (__v2di)_mm_alignr_epi64((A), (B), (imm)), \
-                                    (__v2di)_mm_setzero_si128()))
-
-#define _mm256_alignr_epi64(A, B, imm) \
-  ((__m256i)__builtin_ia32_alignq256((__v4di)(__m256i)(A), \
-                                     (__v4di)(__m256i)(B), (int)(imm)))
-
-#define _mm256_mask_alignr_epi64(W, U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                 (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
-                                 (__v4di)(__m256i)(W)))
-
-#define _mm256_maskz_alignr_epi64(U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                 (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
-                                 (__v4di)_mm256_setzero_si256()))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_movehdup_ps (__m128 __W, __mmask8 __U, __m128 __A)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_movehdup_ps(__A),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_movehdup_ps (__mmask8 __U, __m128 __A)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_movehdup_ps(__A),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_movehdup_ps (__m256 __W, __mmask8 __U, __m256 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_movehdup_ps(__A),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_movehdup_ps (__mmask8 __U, __m256 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_movehdup_ps(__A),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_moveldup_ps (__m128 __W, __mmask8 __U, __m128 __A)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_moveldup_ps(__A),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_moveldup_ps (__mmask8 __U, __m128 __A)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_moveldup_ps(__A),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_moveldup_ps (__m256 __W, __mmask8 __U, __m256 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_moveldup_ps(__A),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_moveldup_ps (__mmask8 __U, __m256 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_moveldup_ps(__A),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-#define _mm256_mask_shuffle_epi32(W, U, A, I) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                       (__v8si)_mm256_shuffle_epi32((A), (I)), \
-                                       (__v8si)(__m256i)(W)))
-
-#define _mm256_maskz_shuffle_epi32(U, A, I) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                       (__v8si)_mm256_shuffle_epi32((A), (I)), \
-                                       (__v8si)_mm256_setzero_si256()))
-
-#define _mm_mask_shuffle_epi32(W, U, A, I) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
-                                       (__v4si)_mm_shuffle_epi32((A), (I)), \
-                                       (__v4si)(__m128i)(W)))
-
-#define _mm_maskz_shuffle_epi32(U, A, I) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
-                                       (__v4si)_mm_shuffle_epi32((A), (I)), \
-                                       (__v4si)_mm_setzero_si128()))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_mov_pd (__m128d __W, __mmask8 __U, __m128d __A)
-{
-  return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
-              (__v2df) __A,
-              (__v2df) __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_mov_pd (__mmask8 __U, __m128d __A)
-{
-  return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
-              (__v2df) __A,
-              (__v2df) _mm_setzero_pd ());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_mov_pd (__m256d __W, __mmask8 __U, __m256d __A)
-{
-  return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
-              (__v4df) __A,
-              (__v4df) __W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_mov_pd (__mmask8 __U, __m256d __A)
-{
-  return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
-              (__v4df) __A,
-              (__v4df) _mm256_setzero_pd ());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_mov_ps (__m128 __W, __mmask8 __U, __m128 __A)
-{
-  return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
-             (__v4sf) __A,
-             (__v4sf) __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_mov_ps (__mmask8 __U, __m128 __A)
-{
-  return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
-             (__v4sf) __A,
-             (__v4sf) _mm_setzero_ps ());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_mov_ps (__m256 __W, __mmask8 __U, __m256 __A)
-{
-  return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
-             (__v8sf) __A,
-             (__v8sf) __W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_mov_ps (__mmask8 __U, __m256 __A)
-{
-  return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
-             (__v8sf) __A,
-             (__v8sf) _mm256_setzero_ps ());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_cvtph_ps (__m128 __W, __mmask8 __U, __m128i __A)
-{
-  return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
-             (__v4sf) __W,
-             (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
-{
-  return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
-             (__v4sf)
-             _mm_setzero_ps (),
-             (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtph_ps (__m256 __W, __mmask8 __U, __m128i __A)
-{
-  return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
-                (__v8sf) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
-{
-  return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
-                (__v8sf)
-                _mm256_setzero_ps (),
-                (__mmask8) __U);
-}
-
-#define _mm_mask_cvt_roundps_ph(W, U, A, I) \
-  ((__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
-                                          (__v8hi)(__m128i)(W), \
-                                          (__mmask8)(U)))
-
-#define _mm_maskz_cvt_roundps_ph(U, A, I) \
-  ((__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
-                                          (__v8hi)_mm_setzero_si128(), \
-                                          (__mmask8)(U)))
-
-#define _mm_mask_cvtps_ph  _mm_mask_cvt_roundps_ph
-#define _mm_maskz_cvtps_ph _mm_maskz_cvt_roundps_ph
-
-#define _mm256_mask_cvt_roundps_ph(W, U, A, I) \
-  ((__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
-                                             (__v8hi)(__m128i)(W), \
-                                             (__mmask8)(U)))
-
-#define _mm256_maskz_cvt_roundps_ph(U, A, I) \
-  ((__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
-                                             (__v8hi)_mm_setzero_si128(), \
-                                             (__mmask8)(U)))
-
-#define _mm256_mask_cvtps_ph  _mm256_mask_cvt_roundps_ph
-#define _mm256_maskz_cvtps_ph _mm256_maskz_cvt_roundps_ph
-
-
-#undef __DEFAULT_FN_ATTRS128
-#undef __DEFAULT_FN_ATTRS256
-
-#endif /* __AVX512VLINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlvnniintrin.h b/linux-x86/lib64/clang/14.0.2/include/avx512vlvnniintrin.h
deleted file mode 100644
index 0fb29af..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/avx512vlvnniintrin.h
+++ /dev/null
@@ -1,304 +0,0 @@
-/*===------------- avx512vlvnniintrin.h - VNNI intrinsics ------------------===
- *
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512vlvnniintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512VLVNNIINTRIN_H
-#define __AVX512VLVNNIINTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vnni"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vnni"), __min_vector_width__(256)))
-
-/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with
-/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed
-/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer
-/// in \a S, and store the packed 32-bit results in DST.
-///
-/// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
-///
-/// \operation
-///    FOR j := 0 to 7
-///      tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
-///      tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
-///      tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2]))
-///      tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3]))
-///      DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
-///    ENDFOR
-///    DST[MAX:256] := 0
-/// \endoperation
-#define _mm256_dpbusd_epi32(S, A, B) \
-  ((__m256i)__builtin_ia32_vpdpbusd256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
-
-/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with
-/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed
-/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer
-/// in \a S using signed saturation, and store the packed 32-bit results in DST.
-///
-/// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
-///
-/// \operation
-///    FOR j := 0 to 7
-///      tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
-///      tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
-///      tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2]))
-///      tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3]))
-///      DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
-///    ENDFOR
-///    DST[MAX:256] := 0
-/// \endoperation
-#define _mm256_dpbusds_epi32(S, A, B) \
-  ((__m256i)__builtin_ia32_vpdpbusds256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
-
-/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
-/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
-/// results. Sum these 2 results with the corresponding 32-bit integer in \a S,
-///  and store the packed 32-bit results in DST.
-///
-/// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
-///
-/// \operation
-///    FOR j := 0 to 7
-///      tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
-///      tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
-///      DST.dword[j] := S.dword[j] + tmp1 + tmp2
-///    ENDFOR
-///    DST[MAX:256] := 0
-/// \endoperation
-#define _mm256_dpwssd_epi32(S, A, B) \
-  ((__m256i)__builtin_ia32_vpdpwssd256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
-
-/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
-/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
-/// results. Sum these 2 results with the corresponding 32-bit integer in \a S
-/// using signed saturation, and store the packed 32-bit results in DST.
-///
-/// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
-///
-/// \operation
-///    FOR j := 0 to 7
-///      tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
-///      tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
-///      DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2)
-///    ENDFOR
-///    DST[MAX:256] := 0
-/// \endoperation
-#define _mm256_dpwssds_epi32(S, A, B) \
-  ((__m256i)__builtin_ia32_vpdpwssds256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
-
-/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with
-/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed
-/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer
-/// in \a S, and store the packed 32-bit results in DST.
-///
-/// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
-///
-/// \operation
-///    FOR j := 0 to 3
-///      tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
-///      tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
-///      tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2]))
-///      tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3]))
-///      DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
-///    ENDFOR
-///    DST[MAX:128] := 0
-/// \endoperation
-#define _mm_dpbusd_epi32(S, A, B) \
-  ((__m128i)__builtin_ia32_vpdpbusd128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
-
-/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with
-/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed
-/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer
-/// in \a S using signed saturation, and store the packed 32-bit results in DST.
-///
-/// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
-///
-/// \operation
-///    FOR j := 0 to 3
-///      tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
-///      tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
-///      tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2]))
-///      tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3]))
-///      DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
-///    ENDFOR
-///    DST[MAX:128] := 0
-/// \endoperation
-#define _mm_dpbusds_epi32(S, A, B) \
-  ((__m128i)__builtin_ia32_vpdpbusds128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
-
-/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
-/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
-/// results. Sum these 2 results with the corresponding 32-bit integer in \a S,
-/// and store the packed 32-bit results in DST.
-///
-/// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
-///
-/// \operation
-///    FOR j := 0 to 3
-///      tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
-///      tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
-///      DST.dword[j] := S.dword[j] + tmp1 + tmp2
-///    ENDFOR
-///    DST[MAX:128] := 0
-/// \endoperation
-#define _mm_dpwssd_epi32(S, A, B) \
-  ((__m128i)__builtin_ia32_vpdpwssd128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
-
-/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
-/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
-/// results. Sum these 2 results with the corresponding 32-bit integer in \a S
-/// using signed saturation, and store the packed 32-bit results in DST.
-///
-/// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
-///
-/// \operation
-///    FOR j := 0 to 3
-///      tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
-///      tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
-///      DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2)
-///    ENDFOR
-///    DST[MAX:128] := 0
-/// \endoperation
-#define _mm_dpwssds_epi32(S, A, B) \
-  ((__m128i)__builtin_ia32_vpdpwssds128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_dpbusd_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                     (__v8si)_mm256_dpbusd_epi32(__S, __A, __B),
-                                     (__v8si)__S);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_dpbusd_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                     (__v8si)_mm256_dpbusd_epi32(__S, __A, __B),
-                                     (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_dpbusds_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                    (__v8si)_mm256_dpbusds_epi32(__S, __A, __B),
-                                    (__v8si)__S);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_dpbusds_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                     (__v8si)_mm256_dpbusds_epi32(__S, __A, __B),
-                                     (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_dpwssd_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                     (__v8si)_mm256_dpwssd_epi32(__S, __A, __B),
-                                     (__v8si)__S);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_dpwssd_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                     (__v8si)_mm256_dpwssd_epi32(__S, __A, __B),
-                                     (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_dpwssds_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                    (__v8si)_mm256_dpwssds_epi32(__S, __A, __B),
-                                    (__v8si)__S);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_dpwssds_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                    (__v8si)_mm256_dpwssds_epi32(__S, __A, __B),
-                                    (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_dpbusd_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                        (__v4si)_mm_dpbusd_epi32(__S, __A, __B),
-                                        (__v4si)__S);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_dpbusd_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                        (__v4si)_mm_dpbusd_epi32(__S, __A, __B),
-                                        (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_dpbusds_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                       (__v4si)_mm_dpbusds_epi32(__S, __A, __B),
-                                       (__v4si)__S);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_dpbusds_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                       (__v4si)_mm_dpbusds_epi32(__S, __A, __B),
-                                       (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_dpwssd_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                        (__v4si)_mm_dpwssd_epi32(__S, __A, __B),
-                                        (__v4si)__S);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_dpwssd_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                        (__v4si)_mm_dpwssd_epi32(__S, __A, __B),
-                                        (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_dpwssds_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                       (__v4si)_mm_dpwssds_epi32(__S, __A, __B),
-                                       (__v4si)__S);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_dpwssds_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                       (__v4si)_mm_dpwssds_epi32(__S, __A, __B),
-                                       (__v4si)_mm_setzero_si128());
-}
-
-#undef __DEFAULT_FN_ATTRS128
-#undef __DEFAULT_FN_ATTRS256
-
-#endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/avxintrin.h b/linux-x86/lib64/clang/14.0.2/include/avxintrin.h
deleted file mode 100644
index 17fe636..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/avxintrin.h
+++ /dev/null
@@ -1,5050 +0,0 @@
-/*===---- avxintrin.h - AVX intrinsics -------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#error "Never use <avxintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVXINTRIN_H
-#define __AVXINTRIN_H
-
-typedef double __v4df __attribute__ ((__vector_size__ (32)));
-typedef float __v8sf __attribute__ ((__vector_size__ (32)));
-typedef long long __v4di __attribute__ ((__vector_size__ (32)));
-typedef int __v8si __attribute__ ((__vector_size__ (32)));
-typedef short __v16hi __attribute__ ((__vector_size__ (32)));
-typedef char __v32qi __attribute__ ((__vector_size__ (32)));
-
-/* Unsigned types */
-typedef unsigned long long __v4du __attribute__ ((__vector_size__ (32)));
-typedef unsigned int __v8su __attribute__ ((__vector_size__ (32)));
-typedef unsigned short __v16hu __attribute__ ((__vector_size__ (32)));
-typedef unsigned char __v32qu __attribute__ ((__vector_size__ (32)));
-
-/* We need an explicitly signed variant for char. Note that this shouldn't
- * appear in the interface though. */
-typedef signed char __v32qs __attribute__((__vector_size__(32)));
-
-typedef float __m256 __attribute__ ((__vector_size__ (32), __aligned__(32)));
-typedef double __m256d __attribute__((__vector_size__(32), __aligned__(32)));
-typedef long long __m256i __attribute__((__vector_size__(32), __aligned__(32)));
-
-typedef float __m256_u __attribute__ ((__vector_size__ (32), __aligned__(1)));
-typedef double __m256d_u __attribute__((__vector_size__(32), __aligned__(1)));
-typedef long long __m256i_u __attribute__((__vector_size__(32), __aligned__(1)));
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(256)))
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(128)))
-
-/* Arithmetic */
-/// Adds two 256-bit vectors of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double] containing one of the source operands.
-/// \param __b
-///    A 256-bit vector of [4 x double] containing one of the source operands.
-/// \returns A 256-bit vector of [4 x double] containing the sums of both
-///    operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_add_pd(__m256d __a, __m256d __b)
-{
-  return (__m256d)((__v4df)__a+(__v4df)__b);
-}
-
-/// Adds two 256-bit vectors of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float] containing one of the source operands.
-/// \param __b
-///    A 256-bit vector of [8 x float] containing one of the source operands.
-/// \returns A 256-bit vector of [8 x float] containing the sums of both
-///    operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_add_ps(__m256 __a, __m256 __b)
-{
-  return (__m256)((__v8sf)__a+(__v8sf)__b);
-}
-
-/// Subtracts two 256-bit vectors of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSUBPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double] containing the minuend.
-/// \param __b
-///    A 256-bit vector of [4 x double] containing the subtrahend.
-/// \returns A 256-bit vector of [4 x double] containing the differences between
-///    both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_sub_pd(__m256d __a, __m256d __b)
-{
-  return (__m256d)((__v4df)__a-(__v4df)__b);
-}
-
-/// Subtracts two 256-bit vectors of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSUBPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float] containing the minuend.
-/// \param __b
-///    A 256-bit vector of [8 x float] containing the subtrahend.
-/// \returns A 256-bit vector of [8 x float] containing the differences between
-///    both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_sub_ps(__m256 __a, __m256 __b)
-{
-  return (__m256)((__v8sf)__a-(__v8sf)__b);
-}
-
-/// Adds the even-indexed values and subtracts the odd-indexed values of
-///    two 256-bit vectors of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDSUBPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double] containing the left source operand.
-/// \param __b
-///    A 256-bit vector of [4 x double] containing the right source operand.
-/// \returns A 256-bit vector of [4 x double] containing the alternating sums
-///    and differences between both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_addsub_pd(__m256d __a, __m256d __b)
-{
-  return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b);
-}
-
-/// Adds the even-indexed values and subtracts the odd-indexed values of
-///    two 256-bit vectors of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDSUBPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float] containing the left source operand.
-/// \param __b
-///    A 256-bit vector of [8 x float] containing the right source operand.
-/// \returns A 256-bit vector of [8 x float] containing the alternating sums and
-///    differences between both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_addsub_ps(__m256 __a, __m256 __b)
-{
-  return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b);
-}
-
-/// Divides two 256-bit vectors of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDIVPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double] containing the dividend.
-/// \param __b
-///    A 256-bit vector of [4 x double] containing the divisor.
-/// \returns A 256-bit vector of [4 x double] containing the quotients of both
-///    operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_div_pd(__m256d __a, __m256d __b)
-{
-  return (__m256d)((__v4df)__a/(__v4df)__b);
-}
-
-/// Divides two 256-bit vectors of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDIVPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float] containing the dividend.
-/// \param __b
-///    A 256-bit vector of [8 x float] containing the divisor.
-/// \returns A 256-bit vector of [8 x float] containing the quotients of both
-///    operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_div_ps(__m256 __a, __m256 __b)
-{
-  return (__m256)((__v8sf)__a/(__v8sf)__b);
-}
-
-/// Compares two 256-bit vectors of [4 x double] and returns the greater
-///    of each pair of values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMAXPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double] containing one of the operands.
-/// \param __b
-///    A 256-bit vector of [4 x double] containing one of the operands.
-/// \returns A 256-bit vector of [4 x double] containing the maximum values
-///    between both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_max_pd(__m256d __a, __m256d __b)
-{
-  return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b);
-}
-
-/// Compares two 256-bit vectors of [8 x float] and returns the greater
-///    of each pair of values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMAXPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float] containing one of the operands.
-/// \param __b
-///    A 256-bit vector of [8 x float] containing one of the operands.
-/// \returns A 256-bit vector of [8 x float] containing the maximum values
-///    between both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_max_ps(__m256 __a, __m256 __b)
-{
-  return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b);
-}
-
-/// Compares two 256-bit vectors of [4 x double] and returns the lesser
-///    of each pair of values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMINPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double] containing one of the operands.
-/// \param __b
-///    A 256-bit vector of [4 x double] containing one of the operands.
-/// \returns A 256-bit vector of [4 x double] containing the minimum values
-///    between both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_min_pd(__m256d __a, __m256d __b)
-{
-  return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b);
-}
-
-/// Compares two 256-bit vectors of [8 x float] and returns the lesser
-///    of each pair of values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMINPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float] containing one of the operands.
-/// \param __b
-///    A 256-bit vector of [8 x float] containing one of the operands.
-/// \returns A 256-bit vector of [8 x float] containing the minimum values
-///    between both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_min_ps(__m256 __a, __m256 __b)
-{
-  return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b);
-}
-
-/// Multiplies two 256-bit vectors of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMULPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double] containing one of the operands.
-/// \param __b
-///    A 256-bit vector of [4 x double] containing one of the operands.
-/// \returns A 256-bit vector of [4 x double] containing the products of both
-///    operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_mul_pd(__m256d __a, __m256d __b)
-{
-  return (__m256d)((__v4df)__a * (__v4df)__b);
-}
-
-/// Multiplies two 256-bit vectors of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMULPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float] containing one of the operands.
-/// \param __b
-///    A 256-bit vector of [8 x float] containing one of the operands.
-/// \returns A 256-bit vector of [8 x float] containing the products of both
-///    operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_mul_ps(__m256 __a, __m256 __b)
-{
-  return (__m256)((__v8sf)__a * (__v8sf)__b);
-}
-
-/// Calculates the square roots of the values in a 256-bit vector of
-///    [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSQRTPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double].
-/// \returns A 256-bit vector of [4 x double] containing the square roots of the
-///    values in the operand.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_sqrt_pd(__m256d __a)
-{
-  return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a);
-}
-
-/// Calculates the square roots of the values in a 256-bit vector of
-///    [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSQRTPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float].
-/// \returns A 256-bit vector of [8 x float] containing the square roots of the
-///    values in the operand.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_sqrt_ps(__m256 __a)
-{
-  return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a);
-}
-
-/// Calculates the reciprocal square roots of the values in a 256-bit
-///    vector of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VRSQRTPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float].
-/// \returns A 256-bit vector of [8 x float] containing the reciprocal square
-///    roots of the values in the operand.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_rsqrt_ps(__m256 __a)
-{
-  return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a);
-}
-
-/// Calculates the reciprocals of the values in a 256-bit vector of
-///    [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VRCPPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float].
-/// \returns A 256-bit vector of [8 x float] containing the reciprocals of the
-///    values in the operand.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_rcp_ps(__m256 __a)
-{
-  return (__m256)__builtin_ia32_rcpps256((__v8sf)__a);
-}
-
-/// Rounds the values in a 256-bit vector of [4 x double] as specified
-///    by the byte operand. The source values are rounded to integer values and
-///    returned as 64-bit double-precision floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256d _mm256_round_pd(__m256d V, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDPD </c> instruction.
-///
-/// \param V
-///    A 256-bit vector of [4 x double].
-/// \param M
-///    An integer value that specifies the rounding operation. \n
-///    Bits [7:4] are reserved. \n
-///    Bit [3] is a precision exception value: \n
-///      0: A normal PE exception is used. \n
-///      1: The PE field is not updated. \n
-///    Bit [2] is the rounding control source: \n
-///      0: Use bits [1:0] of \a M. \n
-///      1: Use the current MXCSR setting. \n
-///    Bits [1:0] contain the rounding control definition: \n
-///      00: Nearest. \n
-///      01: Downward (toward negative infinity). \n
-///      10: Upward (toward positive infinity). \n
-///      11: Truncated.
-/// \returns A 256-bit vector of [4 x double] containing the rounded values.
-#define _mm256_round_pd(V, M) \
-  ((__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)))
-
-/// Rounds the values stored in a 256-bit vector of [8 x float] as
-///    specified by the byte operand. The source values are rounded to integer
-///    values and returned as floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256 _mm256_round_ps(__m256 V, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDPS </c> instruction.
-///
-/// \param V
-///    A 256-bit vector of [8 x float].
-/// \param M
-///    An integer value that specifies the rounding operation. \n
-///    Bits [7:4] are reserved. \n
-///    Bit [3] is a precision exception value: \n
-///      0: A normal PE exception is used. \n
-///      1: The PE field is not updated. \n
-///    Bit [2] is the rounding control source: \n
-///      0: Use bits [1:0] of \a M. \n
-///      1: Use the current MXCSR setting. \n
-///    Bits [1:0] contain the rounding control definition: \n
-///      00: Nearest. \n
-///      01: Downward (toward negative infinity). \n
-///      10: Upward (toward positive infinity). \n
-///      11: Truncated.
-/// \returns A 256-bit vector of [8 x float] containing the rounded values.
-#define _mm256_round_ps(V, M) \
-  ((__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)))
-
-/// Rounds up the values stored in a 256-bit vector of [4 x double]. The
-///    source values are rounded up to integer values and returned as 64-bit
-///    double-precision floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256d _mm256_ceil_pd(__m256d V);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDPD </c> instruction.
-///
-/// \param V
-///    A 256-bit vector of [4 x double].
-/// \returns A 256-bit vector of [4 x double] containing the rounded up values.
-#define _mm256_ceil_pd(V)  _mm256_round_pd((V), _MM_FROUND_CEIL)
-
-/// Rounds down the values stored in a 256-bit vector of [4 x double].
-///    The source values are rounded down to integer values and returned as
-///    64-bit double-precision floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256d _mm256_floor_pd(__m256d V);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDPD </c> instruction.
-///
-/// \param V
-///    A 256-bit vector of [4 x double].
-/// \returns A 256-bit vector of [4 x double] containing the rounded down
-///    values.
-#define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
-
-/// Rounds up the values stored in a 256-bit vector of [8 x float]. The
-///    source values are rounded up to integer values and returned as
-///    floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256 _mm256_ceil_ps(__m256 V);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDPS </c> instruction.
-///
-/// \param V
-///    A 256-bit vector of [8 x float].
-/// \returns A 256-bit vector of [8 x float] containing the rounded up values.
-#define _mm256_ceil_ps(V)  _mm256_round_ps((V), _MM_FROUND_CEIL)
-
-/// Rounds down the values stored in a 256-bit vector of [8 x float]. The
-///    source values are rounded down to integer values and returned as
-///    floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256 _mm256_floor_ps(__m256 V);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDPS </c> instruction.
-///
-/// \param V
-///    A 256-bit vector of [8 x float].
-/// \returns A 256-bit vector of [8 x float] containing the rounded down values.
-#define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)
-
-/* Logical */
-/// Performs a bitwise AND of two 256-bit vectors of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VANDPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double] containing one of the source operands.
-/// \param __b
-///    A 256-bit vector of [4 x double] containing one of the source operands.
-/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the
-///    values between both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_and_pd(__m256d __a, __m256d __b)
-{
-  return (__m256d)((__v4du)__a & (__v4du)__b);
-}
-
-/// Performs a bitwise AND of two 256-bit vectors of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VANDPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float] containing one of the source operands.
-/// \param __b
-///    A 256-bit vector of [8 x float] containing one of the source operands.
-/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the
-///    values between both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_and_ps(__m256 __a, __m256 __b)
-{
-  return (__m256)((__v8su)__a & (__v8su)__b);
-}
-
-/// Performs a bitwise AND of two 256-bit vectors of [4 x double], using
-///    the one's complement of the values contained in the first source operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VANDNPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double] containing the left source operand. The
-///    one's complement of this value is used in the bitwise AND.
-/// \param __b
-///    A 256-bit vector of [4 x double] containing the right source operand.
-/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the
-///    values of the second operand and the one's complement of the first
-///    operand.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_andnot_pd(__m256d __a, __m256d __b)
-{
-  return (__m256d)(~(__v4du)__a & (__v4du)__b);
-}
-
-/// Performs a bitwise AND of two 256-bit vectors of [8 x float], using
-///    the one's complement of the values contained in the first source operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VANDNPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float] containing the left source operand. The
-///    one's complement of this value is used in the bitwise AND.
-/// \param __b
-///    A 256-bit vector of [8 x float] containing the right source operand.
-/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the
-///    values of the second operand and the one's complement of the first
-///    operand.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_andnot_ps(__m256 __a, __m256 __b)
-{
-  return (__m256)(~(__v8su)__a & (__v8su)__b);
-}
-
-/// Performs a bitwise OR of two 256-bit vectors of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VORPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double] containing one of the source operands.
-/// \param __b
-///    A 256-bit vector of [4 x double] containing one of the source operands.
-/// \returns A 256-bit vector of [4 x double] containing the bitwise OR of the
-///    values between both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_or_pd(__m256d __a, __m256d __b)
-{
-  return (__m256d)((__v4du)__a | (__v4du)__b);
-}
-
-/// Performs a bitwise OR of two 256-bit vectors of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VORPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float] containing one of the source operands.
-/// \param __b
-///    A 256-bit vector of [8 x float] containing one of the source operands.
-/// \returns A 256-bit vector of [8 x float] containing the bitwise OR of the
-///    values between both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_or_ps(__m256 __a, __m256 __b)
-{
-  return (__m256)((__v8su)__a | (__v8su)__b);
-}
-
-/// Performs a bitwise XOR of two 256-bit vectors of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double] containing one of the source operands.
-/// \param __b
-///    A 256-bit vector of [4 x double] containing one of the source operands.
-/// \returns A 256-bit vector of [4 x double] containing the bitwise XOR of the
-///    values between both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_xor_pd(__m256d __a, __m256d __b)
-{
-  return (__m256d)((__v4du)__a ^ (__v4du)__b);
-}
-
-/// Performs a bitwise XOR of two 256-bit vectors of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float] containing one of the source operands.
-/// \param __b
-///    A 256-bit vector of [8 x float] containing one of the source operands.
-/// \returns A 256-bit vector of [8 x float] containing the bitwise XOR of the
-///    values between both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_xor_ps(__m256 __a, __m256 __b)
-{
-  return (__m256)((__v8su)__a ^ (__v8su)__b);
-}
-
-/* Horizontal arithmetic */
-/// Horizontally adds the adjacent pairs of values contained in two
-///    256-bit vectors of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VHADDPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double] containing one of the source operands.
-///    The horizontal sums of the values are returned in the even-indexed
-///    elements of a vector of [4 x double].
-/// \param __b
-///    A 256-bit vector of [4 x double] containing one of the source operands.
-///    The horizontal sums of the values are returned in the odd-indexed
-///    elements of a vector of [4 x double].
-/// \returns A 256-bit vector of [4 x double] containing the horizontal sums of
-///    both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_hadd_pd(__m256d __a, __m256d __b)
-{
-  return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b);
-}
-
-/// Horizontally adds the adjacent pairs of values contained in two
-///    256-bit vectors of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VHADDPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float] containing one of the source operands.
-///    The horizontal sums of the values are returned in the elements with
-///    index 0, 1, 4, 5 of a vector of [8 x float].
-/// \param __b
-///    A 256-bit vector of [8 x float] containing one of the source operands.
-///    The horizontal sums of the values are returned in the elements with
-///    index 2, 3, 6, 7 of a vector of [8 x float].
-/// \returns A 256-bit vector of [8 x float] containing the horizontal sums of
-///    both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_hadd_ps(__m256 __a, __m256 __b)
-{
-  return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b);
-}
-
-/// Horizontally subtracts the adjacent pairs of values contained in two
-///    256-bit vectors of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VHSUBPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double] containing one of the source operands.
-///    The horizontal differences between the values are returned in the
-///    even-indexed elements of a vector of [4 x double].
-/// \param __b
-///    A 256-bit vector of [4 x double] containing one of the source operands.
-///    The horizontal differences between the values are returned in the
-///    odd-indexed elements of a vector of [4 x double].
-/// \returns A 256-bit vector of [4 x double] containing the horizontal
-///    differences of both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_hsub_pd(__m256d __a, __m256d __b)
-{
-  return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b);
-}
-
-/// Horizontally subtracts the adjacent pairs of values contained in two
-///    256-bit vectors of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VHSUBPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float] containing one of the source operands.
-///    The horizontal differences between the values are returned in the
-///    elements with index 0, 1, 4, 5 of a vector of [8 x float].
-/// \param __b
-///    A 256-bit vector of [8 x float] containing one of the source operands.
-///    The horizontal differences between the values are returned in the
-///    elements with index 2, 3, 6, 7 of a vector of [8 x float].
-/// \returns A 256-bit vector of [8 x float] containing the horizontal
-///    differences of both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_hsub_ps(__m256 __a, __m256 __b)
-{
-  return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b);
-}
-
-/* Vector permutations */
-/// Copies the values in a 128-bit vector of [2 x double] as specified
-///    by the 128-bit integer vector operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPERMILPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __c
-///    A 128-bit integer vector operand specifying how the values are to be
-///    copied. \n
-///    Bit [1]: \n
-///      0: Bits [63:0] of the source are copied to bits [63:0] of the returned
-///         vector. \n
-///      1: Bits [127:64] of the source are copied to bits [63:0] of the
-///         returned vector. \n
-///    Bit [65]: \n
-///      0: Bits [63:0] of the source are copied to bits [127:64] of the
-///         returned vector. \n
-///      1: Bits [127:64] of the source are copied to bits [127:64] of the
-///         returned vector.
-/// \returns A 128-bit vector of [2 x double] containing the copied values.
-static __inline __m128d __DEFAULT_FN_ATTRS128
-_mm_permutevar_pd(__m128d __a, __m128i __c)
-{
-  return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c);
-}
-
-/// Copies the values in a 256-bit vector of [4 x double] as specified
-///    by the 256-bit integer vector operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPERMILPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double].
-/// \param __c
-///    A 256-bit integer vector operand specifying how the values are to be
-///    copied. \n
-///    Bit [1]: \n
-///      0: Bits [63:0] of the source are copied to bits [63:0] of the returned
-///         vector. \n
-///      1: Bits [127:64] of the source are copied to bits [63:0] of the
-///         returned vector. \n
-///    Bit [65]: \n
-///      0: Bits [63:0] of the source are copied to bits [127:64] of the
-///         returned vector. \n
-///      1: Bits [127:64] of the source are copied to bits [127:64] of the
-///         returned vector. \n
-///    Bit [129]: \n
-///      0: Bits [191:128] of the source are copied to bits [191:128] of the
-///         returned vector. \n
-///      1: Bits [255:192] of the source are copied to bits [191:128] of the
-///         returned vector. \n
-///    Bit [193]: \n
-///      0: Bits [191:128] of the source are copied to bits [255:192] of the
-///         returned vector. \n
-///      1: Bits [255:192] of the source are copied to bits [255:192] of the
-///    returned vector.
-/// \returns A 256-bit vector of [4 x double] containing the copied values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_permutevar_pd(__m256d __a, __m256i __c)
-{
-  return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c);
-}
-
-/// Copies the values stored in a 128-bit vector of [4 x float] as
-///    specified by the 128-bit integer vector operand.
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPERMILPS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __c
-///    A 128-bit integer vector operand specifying how the values are to be
-///    copied. \n
-///    Bits [1:0]: \n
-///      00: Bits [31:0] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///    Bits [33:32]: \n
-///      00: Bits [31:0] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///    Bits [65:64]: \n
-///      00: Bits [31:0] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///    Bits [97:96]: \n
-///      00: Bits [31:0] of the source are copied to bits [127:96] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [127:96] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [127:96] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [127:96] of the
-///          returned vector.
-/// \returns A 128-bit vector of [4 x float] containing the copied values.
-static __inline __m128 __DEFAULT_FN_ATTRS128
-_mm_permutevar_ps(__m128 __a, __m128i __c)
-{
-  return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c);
-}
-
-/// Copies the values stored in a 256-bit vector of [8 x float] as
-///    specified by the 256-bit integer vector operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPERMILPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float].
-/// \param __c
-///    A 256-bit integer vector operand specifying how the values are to be
-///    copied. \n
-///    Bits [1:0]: \n
-///      00: Bits [31:0] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///    Bits [33:32]: \n
-///      00: Bits [31:0] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///    Bits [65:64]: \n
-///      00: Bits [31:0] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///    Bits [97:96]: \n
-///      00: Bits [31:0] of the source are copied to bits [127:96] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [127:96] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [127:96] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [127:96] of the
-///          returned vector. \n
-///    Bits [129:128]: \n
-///      00: Bits [159:128] of the source are copied to bits [159:128] of the
-///          returned vector. \n
-///      01: Bits [191:160] of the source are copied to bits [159:128] of the
-///          returned vector. \n
-///      10: Bits [223:192] of the source are copied to bits [159:128] of the
-///          returned vector. \n
-///      11: Bits [255:224] of the source are copied to bits [159:128] of the
-///          returned vector. \n
-///    Bits [161:160]: \n
-///      00: Bits [159:128] of the source are copied to bits [191:160] of the
-///          returned vector. \n
-///      01: Bits [191:160] of the source are copied to bits [191:160] of the
-///          returned vector. \n
-///      10: Bits [223:192] of the source are copied to bits [191:160] of the
-///          returned vector. \n
-///      11: Bits [255:224] of the source are copied to bits [191:160] of the
-///          returned vector. \n
-///    Bits [193:192]: \n
-///      00: Bits [159:128] of the source are copied to bits [223:192] of the
-///          returned vector. \n
-///      01: Bits [191:160] of the source are copied to bits [223:192] of the
-///          returned vector. \n
-///      10: Bits [223:192] of the source are copied to bits [223:192] of the
-///          returned vector. \n
-///      11: Bits [255:224] of the source are copied to bits [223:192] of the
-///          returned vector. \n
-///    Bits [225:224]: \n
-///      00: Bits [159:128] of the source are copied to bits [255:224] of the
-///          returned vector. \n
-///      01: Bits [191:160] of the source are copied to bits [255:224] of the
-///          returned vector. \n
-///      10: Bits [223:192] of the source are copied to bits [255:224] of the
-///          returned vector. \n
-///      11: Bits [255:224] of the source are copied to bits [255:224] of the
-///          returned vector.
-/// \returns A 256-bit vector of [8 x float] containing the copied values.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_permutevar_ps(__m256 __a, __m256i __c)
-{
-  return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c);
-}
-
-/// Copies the values in a 128-bit vector of [2 x double] as specified
-///    by the immediate integer operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_permute_pd(__m128d A, const int C);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPERMILPD </c> instruction.
-///
-/// \param A
-///    A 128-bit vector of [2 x double].
-/// \param C
-///    An immediate integer operand specifying how the values are to be
-///    copied. \n
-///    Bit [0]: \n
-///      0: Bits [63:0] of the source are copied to bits [63:0] of the returned
-///         vector. \n
-///      1: Bits [127:64] of the source are copied to bits [63:0] of the
-///         returned vector. \n
-///    Bit [1]: \n
-///      0: Bits [63:0] of the source are copied to bits [127:64] of the
-///         returned vector. \n
-///      1: Bits [127:64] of the source are copied to bits [127:64] of the
-///         returned vector.
-/// \returns A 128-bit vector of [2 x double] containing the copied values.
-#define _mm_permute_pd(A, C) \
-  ((__m128d)__builtin_ia32_vpermilpd((__v2df)(__m128d)(A), (int)(C)))
-
-/// Copies the values in a 256-bit vector of [4 x double] as specified by
-///    the immediate integer operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256d _mm256_permute_pd(__m256d A, const int C);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPERMILPD </c> instruction.
-///
-/// \param A
-///    A 256-bit vector of [4 x double].
-/// \param C
-///    An immediate integer operand specifying how the values are to be
-///    copied. \n
-///    Bit [0]: \n
-///      0: Bits [63:0] of the source are copied to bits [63:0] of the returned
-///         vector. \n
-///      1: Bits [127:64] of the source are copied to bits [63:0] of the
-///         returned vector. \n
-///    Bit [1]: \n
-///      0: Bits [63:0] of the source are copied to bits [127:64] of the
-///         returned vector. \n
-///      1: Bits [127:64] of the source are copied to bits [127:64] of the
-///         returned vector. \n
-///    Bit [2]: \n
-///      0: Bits [191:128] of the source are copied to bits [191:128] of the
-///         returned vector. \n
-///      1: Bits [255:192] of the source are copied to bits [191:128] of the
-///         returned vector. \n
-///    Bit [3]: \n
-///      0: Bits [191:128] of the source are copied to bits [255:192] of the
-///         returned vector. \n
-///      1: Bits [255:192] of the source are copied to bits [255:192] of the
-///         returned vector.
-/// \returns A 256-bit vector of [4 x double] containing the copied values.
-#define _mm256_permute_pd(A, C) \
-  ((__m256d)__builtin_ia32_vpermilpd256((__v4df)(__m256d)(A), (int)(C)))
-
-/// Copies the values in a 128-bit vector of [4 x float] as specified by
-///    the immediate integer operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128 _mm_permute_ps(__m128 A, const int C);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPERMILPS </c> instruction.
-///
-/// \param A
-///    A 128-bit vector of [4 x float].
-/// \param C
-///    An immediate integer operand specifying how the values are to be
-///    copied. \n
-///    Bits [1:0]: \n
-///      00: Bits [31:0] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///    Bits [3:2]: \n
-///      00: Bits [31:0] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///    Bits [5:4]: \n
-///      00: Bits [31:0] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///    Bits [7:6]: \n
-///      00: Bits [31:0] of the source are copied to bits [127:96] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [127:96] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [127:96] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [127:96] of the
-///          returned vector.
-/// \returns A 128-bit vector of [4 x float] containing the copied values.
-#define _mm_permute_ps(A, C) \
-  ((__m128)__builtin_ia32_vpermilps((__v4sf)(__m128)(A), (int)(C)))
-
-/// Copies the values in a 256-bit vector of [8 x float] as specified by
-///    the immediate integer operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256 _mm256_permute_ps(__m256 A, const int C);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPERMILPS </c> instruction.
-///
-/// \param A
-///    A 256-bit vector of [8 x float].
-/// \param C
-///    An immediate integer operand specifying how the values are to be
-///    copied. \n
-///    Bits [1:0]: \n
-///      00: Bits [31:0] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [31:0] of the
-///          returned vector. \n
-///    Bits [3:2]: \n
-///      00: Bits [31:0] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [63:32] of the
-///          returned vector. \n
-///    Bits [5:4]: \n
-///      00: Bits [31:0] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [95:64] of the
-///          returned vector. \n
-///    Bits [7:6]: \n
-///      00: Bits [31:0] of the source are copied to bits [127:96] of the
-///          returned vector. \n
-///      01: Bits [63:32] of the source are copied to bits [127:96] of the
-///          returned vector. \n
-///      10: Bits [95:64] of the source are copied to bits [127:96] of the
-///          returned vector. \n
-///      11: Bits [127:96] of the source are copied to bits [127:96] of the
-///          returned vector. \n
-///    Bits [1:0]: \n
-///      00: Bits [159:128] of the source are copied to bits [159:128] of the
-///          returned vector. \n
-///      01: Bits [191:160] of the source are copied to bits [159:128] of the
-///          returned vector. \n
-///      10: Bits [223:192] of the source are copied to bits [159:128] of the
-///          returned vector. \n
-///      11: Bits [255:224] of the source are copied to bits [159:128] of the
-///          returned vector. \n
-///    Bits [3:2]: \n
-///      00: Bits [159:128] of the source are copied to bits [191:160] of the
-///          returned vector. \n
-///      01: Bits [191:160] of the source are copied to bits [191:160] of the
-///          returned vector. \n
-///      10: Bits [223:192] of the source are copied to bits [191:160] of the
-///          returned vector. \n
-///      11: Bits [255:224] of the source are copied to bits [191:160] of the
-///          returned vector. \n
-///    Bits [5:4]: \n
-///      00: Bits [159:128] of the source are copied to bits [223:192] of the
-///          returned vector. \n
-///      01: Bits [191:160] of the source are copied to bits [223:192] of the
-///          returned vector. \n
-///      10: Bits [223:192] of the source are copied to bits [223:192] of the
-///          returned vector. \n
-///      11: Bits [255:224] of the source are copied to bits [223:192] of the
-///          returned vector. \n
-///    Bits [7:6]: \n
-///      00: Bits [159:128] of the source are copied to bits [255:224] of the
-///          returned vector. \n
-///      01: Bits [191:160] of the source are copied to bits [255:224] of the
-///          returned vector. \n
-///      10: Bits [223:192] of the source are copied to bits [255:224] of the
-///          returned vector. \n
-///      11: Bits [255:224] of the source are copied to bits [255:224] of the
-///          returned vector.
-/// \returns A 256-bit vector of [8 x float] containing the copied values.
-#define _mm256_permute_ps(A, C) \
-  ((__m256)__builtin_ia32_vpermilps256((__v8sf)(__m256)(A), (int)(C)))
-
-/// Permutes 128-bit data values stored in two 256-bit vectors of
-///    [4 x double], as specified by the immediate integer operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256d _mm256_permute2f128_pd(__m256d V1, __m256d V2, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPERM2F128 </c> instruction.
-///
-/// \param V1
-///    A 256-bit vector of [4 x double].
-/// \param V2
-///    A 256-bit vector of [4 x double.
-/// \param M
-///    An immediate integer operand specifying how the values are to be
-///    permuted. \n
-///    Bits [1:0]: \n
-///      00: Bits [127:0] of operand \a V1 are copied to bits [127:0] of the
-///          destination. \n
-///      01: Bits [255:128] of operand \a V1 are copied to bits [127:0] of the
-///          destination. \n
-///      10: Bits [127:0] of operand \a V2 are copied to bits [127:0] of the
-///          destination. \n
-///      11: Bits [255:128] of operand \a V2 are copied to bits [127:0] of the
-///          destination. \n
-///    Bits [5:4]: \n
-///      00: Bits [127:0] of operand \a V1 are copied to bits [255:128] of the
-///          destination. \n
-///      01: Bits [255:128] of operand \a V1 are copied to bits [255:128] of the
-///          destination. \n
-///      10: Bits [127:0] of operand \a V2 are copied to bits [255:128] of the
-///          destination. \n
-///      11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the
-///          destination.
-/// \returns A 256-bit vector of [4 x double] containing the copied values.
-#define _mm256_permute2f128_pd(V1, V2, M) \
-  ((__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
-                                            (__v4df)(__m256d)(V2), (int)(M)))
-
-/// Permutes 128-bit data values stored in two 256-bit vectors of
-///    [8 x float], as specified by the immediate integer operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256 _mm256_permute2f128_ps(__m256 V1, __m256 V2, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPERM2F128 </c> instruction.
-///
-/// \param V1
-///    A 256-bit vector of [8 x float].
-/// \param V2
-///    A 256-bit vector of [8 x float].
-/// \param M
-///    An immediate integer operand specifying how the values are to be
-///    permuted. \n
-///    Bits [1:0]: \n
-///    00: Bits [127:0] of operand \a V1 are copied to bits [127:0] of the
-///    destination. \n
-///    01: Bits [255:128] of operand \a V1 are copied to bits [127:0] of the
-///    destination. \n
-///    10: Bits [127:0] of operand \a V2 are copied to bits [127:0] of the
-///    destination. \n
-///    11: Bits [255:128] of operand \a V2 are copied to bits [127:0] of the
-///    destination. \n
-///    Bits [5:4]: \n
-///    00: Bits [127:0] of operand \a V1 are copied to bits [255:128] of the
-///    destination. \n
-///    01: Bits [255:128] of operand \a V1 are copied to bits [255:128] of the
-///    destination. \n
-///    10: Bits [127:0] of operand \a V2 are copied to bits [255:128] of the
-///    destination. \n
-///    11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the
-///    destination.
-/// \returns A 256-bit vector of [8 x float] containing the copied values.
-#define _mm256_permute2f128_ps(V1, V2, M) \
-  ((__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \
-                                           (__v8sf)(__m256)(V2), (int)(M)))
-
-/// Permutes 128-bit data values stored in two 256-bit integer vectors,
-///    as specified by the immediate integer operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256i _mm256_permute2f128_si256(__m256i V1, __m256i V2, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPERM2F128 </c> instruction.
-///
-/// \param V1
-///    A 256-bit integer vector.
-/// \param V2
-///    A 256-bit integer vector.
-/// \param M
-///    An immediate integer operand specifying how the values are to be copied.
-///    Bits [1:0]: \n
-///    00: Bits [127:0] of operand \a V1 are copied to bits [127:0] of the
-///    destination. \n
-///    01: Bits [255:128] of operand \a V1 are copied to bits [127:0] of the
-///    destination. \n
-///    10: Bits [127:0] of operand \a V2 are copied to bits [127:0] of the
-///    destination. \n
-///    11: Bits [255:128] of operand \a V2 are copied to bits [127:0] of the
-///    destination. \n
-///    Bits [5:4]: \n
-///    00: Bits [127:0] of operand \a V1 are copied to bits [255:128] of the
-///    destination. \n
-///    01: Bits [255:128] of operand \a V1 are copied to bits [255:128] of the
-///    destination. \n
-///    10: Bits [127:0] of operand \a V2 are copied to bits [255:128] of the
-///    destination. \n
-///    11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the
-///    destination.
-/// \returns A 256-bit integer vector containing the copied values.
-#define _mm256_permute2f128_si256(V1, V2, M) \
-  ((__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \
-                                            (__v8si)(__m256i)(V2), (int)(M)))
-
-/* Vector Blend */
-/// Merges 64-bit double-precision data values stored in either of the
-///    two 256-bit vectors of [4 x double], as specified by the immediate
-///    integer operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256d _mm256_blend_pd(__m256d V1, __m256d V2, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VBLENDPD </c> instruction.
-///
-/// \param V1
-///    A 256-bit vector of [4 x double].
-/// \param V2
-///    A 256-bit vector of [4 x double].
-/// \param M
-///    An immediate integer operand, with mask bits [3:0] specifying how the
-///    values are to be copied. The position of the mask bit corresponds to the
-///    index of a copied value. When a mask bit is 0, the corresponding 64-bit
-///    element in operand \a V1 is copied to the same position in the
-///    destination. When a mask bit is 1, the corresponding 64-bit element in
-///    operand \a V2 is copied to the same position in the destination.
-/// \returns A 256-bit vector of [4 x double] containing the copied values.
-#define _mm256_blend_pd(V1, V2, M) \
-  ((__m256d)__builtin_ia32_blendpd256((__v4df)(__m256d)(V1), \
-                                      (__v4df)(__m256d)(V2), (int)(M)))
-
-/// Merges 32-bit single-precision data values stored in either of the
-///    two 256-bit vectors of [8 x float], as specified by the immediate
-///    integer operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256 _mm256_blend_ps(__m256 V1, __m256 V2, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VBLENDPS </c> instruction.
-///
-/// \param V1
-///    A 256-bit vector of [8 x float].
-/// \param V2
-///    A 256-bit vector of [8 x float].
-/// \param M
-///    An immediate integer operand, with mask bits [7:0] specifying how the
-///    values are to be copied. The position of the mask bit corresponds to the
-///    index of a copied value. When a mask bit is 0, the corresponding 32-bit
-///    element in operand \a V1 is copied to the same position in the
-///    destination. When a mask bit is 1, the corresponding 32-bit element in
-///    operand \a V2 is copied to the same position in the destination.
-/// \returns A 256-bit vector of [8 x float] containing the copied values.
-#define _mm256_blend_ps(V1, V2, M) \
-  ((__m256)__builtin_ia32_blendps256((__v8sf)(__m256)(V1), \
-                                     (__v8sf)(__m256)(V2), (int)(M)))
-
-/// Merges 64-bit double-precision data values stored in either of the
-///    two 256-bit vectors of [4 x double], as specified by the 256-bit vector
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBLENDVPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double].
-/// \param __b
-///    A 256-bit vector of [4 x double].
-/// \param __c
-///    A 256-bit vector operand, with mask bits 255, 191, 127, and 63 specifying
-///    how the values are to be copied. The position of the mask bit corresponds
-///    to the most significant bit of a copied value. When a mask bit is 0, the
-///    corresponding 64-bit element in operand \a __a is copied to the same
-///    position in the destination. When a mask bit is 1, the corresponding
-///    64-bit element in operand \a __b is copied to the same position in the
-///    destination.
-/// \returns A 256-bit vector of [4 x double] containing the copied values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)
-{
-  return (__m256d)__builtin_ia32_blendvpd256(
-    (__v4df)__a, (__v4df)__b, (__v4df)__c);
-}
-
-/// Merges 32-bit single-precision data values stored in either of the
-///    two 256-bit vectors of [8 x float], as specified by the 256-bit vector
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBLENDVPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float].
-/// \param __b
-///    A 256-bit vector of [8 x float].
-/// \param __c
-///    A 256-bit vector operand, with mask bits 255, 223, 191, 159, 127, 95, 63,
-///    and 31 specifying how the values are to be copied. The position of the
-///    mask bit corresponds to the most significant bit of a copied value. When
-///    a mask bit is 0, the corresponding 32-bit element in operand \a __a is
-///    copied to the same position in the destination. When a mask bit is 1, the
-///    corresponding 32-bit element in operand \a __b is copied to the same
-///    position in the destination.
-/// \returns A 256-bit vector of [8 x float] containing the copied values.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
-{
-  return (__m256)__builtin_ia32_blendvps256(
-    (__v8sf)__a, (__v8sf)__b, (__v8sf)__c);
-}
-
-/* Vector Dot Product */
-/// Computes two dot products in parallel, using the lower and upper
-///    halves of two [8 x float] vectors as input to the two computations, and
-///    returning the two dot products in the lower and upper halves of the
-///    [8 x float] result.
-///
-///    The immediate integer operand controls which input elements will
-///    contribute to the dot product, and where the final results are returned.
-///    In general, for each dot product, the four corresponding elements of the
-///    input vectors are multiplied; the first two and second two products are
-///    summed, then the two sums are added to form the final result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256 _mm256_dp_ps(__m256 V1, __m256 V2, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VDPPS </c> instruction.
-///
-/// \param V1
-///    A vector of [8 x float] values, treated as two [4 x float] vectors.
-/// \param V2
-///    A vector of [8 x float] values, treated as two [4 x float] vectors.
-/// \param M
-///    An immediate integer argument. Bits [7:4] determine which elements of
-///    the input vectors are used, with bit [4] corresponding to the lowest
-///    element and bit [7] corresponding to the highest element of each [4 x
-///    float] subvector. If a bit is set, the corresponding elements from the
-///    two input vectors are used as an input for dot product; otherwise that
-///    input is treated as zero. Bits [3:0] determine which elements of the
-///    result will receive a copy of the final dot product, with bit [0]
-///    corresponding to the lowest element and bit [3] corresponding to the
-///    highest element of each [4 x float] subvector. If a bit is set, the dot
-///    product is returned in the corresponding element; otherwise that element
-///    is set to zero. The bitmask is applied in the same way to each of the
-///    two parallel dot product computations.
-/// \returns A 256-bit vector of [8 x float] containing the two dot products.
-#define _mm256_dp_ps(V1, V2, M) \
-  ((__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \
-                                  (__v8sf)(__m256)(V2), (M)))
-
-/* Vector shuffle */
-/// Selects 8 float values from the 256-bit operands of [8 x float], as
-///    specified by the immediate value operand.
-///
-///    The four selected elements in each operand are copied to the destination
-///    according to the bits specified in the immediate operand. The selected
-///    elements from the first 256-bit operand are copied to bits [63:0] and
-///    bits [191:128] of the destination, and the selected elements from the
-///    second 256-bit operand are copied to bits [127:64] and bits [255:192] of
-///    the destination. For example, if bits [7:0] of the immediate operand
-///    contain a value of 0xFF, the 256-bit destination vector would contain the
-///    following values: b[7], b[7], a[7], a[7], b[3], b[3], a[3], a[3].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256 _mm256_shuffle_ps(__m256 a, __m256 b, const int mask);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VSHUFPS </c> instruction.
-///
-/// \param a
-///    A 256-bit vector of [8 x float]. The four selected elements in this
-///    operand are copied to bits [63:0] and bits [191:128] in the destination,
-///    according to the bits specified in the immediate operand.
-/// \param b
-///    A 256-bit vector of [8 x float]. The four selected elements in this
-///    operand are copied to bits [127:64] and bits [255:192] in the
-///    destination, according to the bits specified in the immediate operand.
-/// \param mask
-///    An immediate value containing an 8-bit value specifying which elements to
-///    copy from \a a and \a b \n.
-///    Bits [3:0] specify the values copied from operand \a a. \n
-///    Bits [7:4] specify the values copied from operand \a b. \n
-///    The destinations within the 256-bit destination are assigned values as
-///    follows, according to the bit value assignments described below: \n
-///    Bits [1:0] are used to assign values to bits [31:0] and [159:128] in the
-///    destination. \n
-///    Bits [3:2] are used to assign values to bits [63:32] and [191:160] in the
-///    destination. \n
-///    Bits [5:4] are used to assign values to bits [95:64] and [223:192] in the
-///    destination. \n
-///    Bits [7:6] are used to assign values to bits [127:96] and [255:224] in
-///    the destination. \n
-///    Bit value assignments: \n
-///    00: Bits [31:0] and [159:128] are copied from the selected operand. \n
-///    01: Bits [63:32] and [191:160] are copied from the selected operand. \n
-///    10: Bits [95:64] and [223:192] are copied from the selected operand. \n
-///    11: Bits [127:96] and [255:224] are copied from the selected operand.
-/// \returns A 256-bit vector of [8 x float] containing the shuffled values.
-#define _mm256_shuffle_ps(a, b, mask) \
-  ((__m256)__builtin_ia32_shufps256((__v8sf)(__m256)(a), \
-                                    (__v8sf)(__m256)(b), (int)(mask)))
-
-/// Selects four double-precision values from the 256-bit operands of
-///    [4 x double], as specified by the immediate value operand.
-///
-///    The selected elements from the first 256-bit operand are copied to bits
-///    [63:0] and bits [191:128] in the destination, and the selected elements
-///    from the second 256-bit operand are copied to bits [127:64] and bits
-///    [255:192] in the destination. For example, if bits [3:0] of the immediate
-///    operand contain a value of 0xF, the 256-bit destination vector would
-///    contain the following values: b[3], a[3], b[1], a[1].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256d _mm256_shuffle_pd(__m256d a, __m256d b, const int mask);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VSHUFPD </c> instruction.
-///
-/// \param a
-///    A 256-bit vector of [4 x double].
-/// \param b
-///    A 256-bit vector of [4 x double].
-/// \param mask
-///    An immediate value containing 8-bit values specifying which elements to
-///    copy from \a a and \a b: \n
-///    Bit [0]=0: Bits [63:0] are copied from \a a to bits [63:0] of the
-///    destination. \n
-///    Bit [0]=1: Bits [127:64] are copied from \a a to bits [63:0] of the
-///    destination. \n
-///    Bit [1]=0: Bits [63:0] are copied from \a b to bits [127:64] of the
-///    destination. \n
-///    Bit [1]=1: Bits [127:64] are copied from \a b to bits [127:64] of the
-///    destination. \n
-///    Bit [2]=0: Bits [191:128] are copied from \a a to bits [191:128] of the
-///    destination. \n
-///    Bit [2]=1: Bits [255:192] are copied from \a a to bits [191:128] of the
-///    destination. \n
-///    Bit [3]=0: Bits [191:128] are copied from \a b to bits [255:192] of the
-///    destination. \n
-///    Bit [3]=1: Bits [255:192] are copied from \a b to bits [255:192] of the
-///    destination.
-/// \returns A 256-bit vector of [4 x double] containing the shuffled values.
-#define _mm256_shuffle_pd(a, b, mask) \
-  ((__m256d)__builtin_ia32_shufpd256((__v4df)(__m256d)(a), \
-                                     (__v4df)(__m256d)(b), (int)(mask)))
-
-/* Compare */
-#define _CMP_EQ_OQ    0x00 /* Equal (ordered, non-signaling)  */
-#define _CMP_LT_OS    0x01 /* Less-than (ordered, signaling)  */
-#define _CMP_LE_OS    0x02 /* Less-than-or-equal (ordered, signaling)  */
-#define _CMP_UNORD_Q  0x03 /* Unordered (non-signaling)  */
-#define _CMP_NEQ_UQ   0x04 /* Not-equal (unordered, non-signaling)  */
-#define _CMP_NLT_US   0x05 /* Not-less-than (unordered, signaling)  */
-#define _CMP_NLE_US   0x06 /* Not-less-than-or-equal (unordered, signaling)  */
-#define _CMP_ORD_Q    0x07 /* Ordered (non-signaling)   */
-#define _CMP_EQ_UQ    0x08 /* Equal (unordered, non-signaling)  */
-#define _CMP_NGE_US   0x09 /* Not-greater-than-or-equal (unordered, signaling)  */
-#define _CMP_NGT_US   0x0a /* Not-greater-than (unordered, signaling)  */
-#define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling)  */
-#define _CMP_NEQ_OQ   0x0c /* Not-equal (ordered, non-signaling)  */
-#define _CMP_GE_OS    0x0d /* Greater-than-or-equal (ordered, signaling)  */
-#define _CMP_GT_OS    0x0e /* Greater-than (ordered, signaling)  */
-#define _CMP_TRUE_UQ  0x0f /* True (unordered, non-signaling)  */
-#define _CMP_EQ_OS    0x10 /* Equal (ordered, signaling)  */
-#define _CMP_LT_OQ    0x11 /* Less-than (ordered, non-signaling)  */
-#define _CMP_LE_OQ    0x12 /* Less-than-or-equal (ordered, non-signaling)  */
-#define _CMP_UNORD_S  0x13 /* Unordered (signaling)  */
-#define _CMP_NEQ_US   0x14 /* Not-equal (unordered, signaling)  */
-#define _CMP_NLT_UQ   0x15 /* Not-less-than (unordered, non-signaling)  */
-#define _CMP_NLE_UQ   0x16 /* Not-less-than-or-equal (unordered, non-signaling)  */
-#define _CMP_ORD_S    0x17 /* Ordered (signaling)  */
-#define _CMP_EQ_US    0x18 /* Equal (unordered, signaling)  */
-#define _CMP_NGE_UQ   0x19 /* Not-greater-than-or-equal (unordered, non-signaling)  */
-#define _CMP_NGT_UQ   0x1a /* Not-greater-than (unordered, non-signaling)  */
-#define _CMP_FALSE_OS 0x1b /* False (ordered, signaling)  */
-#define _CMP_NEQ_OS   0x1c /* Not-equal (ordered, signaling)  */
-#define _CMP_GE_OQ    0x1d /* Greater-than-or-equal (ordered, non-signaling)  */
-#define _CMP_GT_OQ    0x1e /* Greater-than (ordered, non-signaling)  */
-#define _CMP_TRUE_US  0x1f /* True (unordered, signaling)  */
-
-/// Compares each of the corresponding double-precision values of two
-///    128-bit vectors of [2 x double], using the operation specified by the
-///    immediate integer operand.
-///
-///    Returns a [2 x double] vector consisting of two doubles corresponding to
-///    the two comparison results: zero if the comparison is false, and all 1's
-///    if the comparison is true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VCMPPD </c> instruction.
-///
-/// \param a
-///    A 128-bit vector of [2 x double].
-/// \param b
-///    A 128-bit vector of [2 x double].
-/// \param c
-///    An immediate integer operand, with bits [4:0] specifying which comparison
-///    operation to use: \n
-///    0x00: Equal (ordered, non-signaling) \n
-///    0x01: Less-than (ordered, signaling) \n
-///    0x02: Less-than-or-equal (ordered, signaling) \n
-///    0x03: Unordered (non-signaling) \n
-///    0x04: Not-equal (unordered, non-signaling) \n
-///    0x05: Not-less-than (unordered, signaling) \n
-///    0x06: Not-less-than-or-equal (unordered, signaling) \n
-///    0x07: Ordered (non-signaling) \n
-///    0x08: Equal (unordered, non-signaling) \n
-///    0x09: Not-greater-than-or-equal (unordered, signaling) \n
-///    0x0A: Not-greater-than (unordered, signaling) \n
-///    0x0B: False (ordered, non-signaling) \n
-///    0x0C: Not-equal (ordered, non-signaling) \n
-///    0x0D: Greater-than-or-equal (ordered, signaling) \n
-///    0x0E: Greater-than (ordered, signaling) \n
-///    0x0F: True (unordered, non-signaling) \n
-///    0x10: Equal (ordered, signaling) \n
-///    0x11: Less-than (ordered, non-signaling) \n
-///    0x12: Less-than-or-equal (ordered, non-signaling) \n
-///    0x13: Unordered (signaling) \n
-///    0x14: Not-equal (unordered, signaling) \n
-///    0x15: Not-less-than (unordered, non-signaling) \n
-///    0x16: Not-less-than-or-equal (unordered, non-signaling) \n
-///    0x17: Ordered (signaling) \n
-///    0x18: Equal (unordered, signaling) \n
-///    0x19: Not-greater-than-or-equal (unordered, non-signaling) \n
-///    0x1A: Not-greater-than (unordered, non-signaling) \n
-///    0x1B: False (ordered, signaling) \n
-///    0x1C: Not-equal (ordered, signaling) \n
-///    0x1D: Greater-than-or-equal (ordered, non-signaling) \n
-///    0x1E: Greater-than (ordered, non-signaling) \n
-///    0x1F: True (unordered, signaling)
-/// \returns A 128-bit vector of [2 x double] containing the comparison results.
-#define _mm_cmp_pd(a, b, c) \
-  ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \
-                                 (__v2df)(__m128d)(b), (c)))
-
-/// Compares each of the corresponding values of two 128-bit vectors of
-///    [4 x float], using the operation specified by the immediate integer
-///    operand.
-///
-///    Returns a [4 x float] vector consisting of four floats corresponding to
-///    the four comparison results: zero if the comparison is false, and all 1's
-///    if the comparison is true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VCMPPS </c> instruction.
-///
-/// \param a
-///    A 128-bit vector of [4 x float].
-/// \param b
-///    A 128-bit vector of [4 x float].
-/// \param c
-///    An immediate integer operand, with bits [4:0] specifying which comparison
-///    operation to use: \n
-///    0x00: Equal (ordered, non-signaling) \n
-///    0x01: Less-than (ordered, signaling) \n
-///    0x02: Less-than-or-equal (ordered, signaling) \n
-///    0x03: Unordered (non-signaling) \n
-///    0x04: Not-equal (unordered, non-signaling) \n
-///    0x05: Not-less-than (unordered, signaling) \n
-///    0x06: Not-less-than-or-equal (unordered, signaling) \n
-///    0x07: Ordered (non-signaling) \n
-///    0x08: Equal (unordered, non-signaling) \n
-///    0x09: Not-greater-than-or-equal (unordered, signaling) \n
-///    0x0A: Not-greater-than (unordered, signaling) \n
-///    0x0B: False (ordered, non-signaling) \n
-///    0x0C: Not-equal (ordered, non-signaling) \n
-///    0x0D: Greater-than-or-equal (ordered, signaling) \n
-///    0x0E: Greater-than (ordered, signaling) \n
-///    0x0F: True (unordered, non-signaling) \n
-///    0x10: Equal (ordered, signaling) \n
-///    0x11: Less-than (ordered, non-signaling) \n
-///    0x12: Less-than-or-equal (ordered, non-signaling) \n
-///    0x13: Unordered (signaling) \n
-///    0x14: Not-equal (unordered, signaling) \n
-///    0x15: Not-less-than (unordered, non-signaling) \n
-///    0x16: Not-less-than-or-equal (unordered, non-signaling) \n
-///    0x17: Ordered (signaling) \n
-///    0x18: Equal (unordered, signaling) \n
-///    0x19: Not-greater-than-or-equal (unordered, non-signaling) \n
-///    0x1A: Not-greater-than (unordered, non-signaling) \n
-///    0x1B: False (ordered, signaling) \n
-///    0x1C: Not-equal (ordered, signaling) \n
-///    0x1D: Greater-than-or-equal (ordered, non-signaling) \n
-///    0x1E: Greater-than (ordered, non-signaling) \n
-///    0x1F: True (unordered, signaling)
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-#define _mm_cmp_ps(a, b, c) \
-  ((__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
-                                (__v4sf)(__m128)(b), (c)))
-
-/// Compares each of the corresponding double-precision values of two
-///    256-bit vectors of [4 x double], using the operation specified by the
-///    immediate integer operand.
-///
-///    Returns a [4 x double] vector consisting of four doubles corresponding to
-///    the four comparison results: zero if the comparison is false, and all 1's
-///    if the comparison is true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256d _mm256_cmp_pd(__m256d a, __m256d b, const int c);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VCMPPD </c> instruction.
-///
-/// \param a
-///    A 256-bit vector of [4 x double].
-/// \param b
-///    A 256-bit vector of [4 x double].
-/// \param c
-///    An immediate integer operand, with bits [4:0] specifying which comparison
-///    operation to use: \n
-///    0x00: Equal (ordered, non-signaling) \n
-///    0x01: Less-than (ordered, signaling) \n
-///    0x02: Less-than-or-equal (ordered, signaling) \n
-///    0x03: Unordered (non-signaling) \n
-///    0x04: Not-equal (unordered, non-signaling) \n
-///    0x05: Not-less-than (unordered, signaling) \n
-///    0x06: Not-less-than-or-equal (unordered, signaling) \n
-///    0x07: Ordered (non-signaling) \n
-///    0x08: Equal (unordered, non-signaling) \n
-///    0x09: Not-greater-than-or-equal (unordered, signaling) \n
-///    0x0A: Not-greater-than (unordered, signaling) \n
-///    0x0B: False (ordered, non-signaling) \n
-///    0x0C: Not-equal (ordered, non-signaling) \n
-///    0x0D: Greater-than-or-equal (ordered, signaling) \n
-///    0x0E: Greater-than (ordered, signaling) \n
-///    0x0F: True (unordered, non-signaling) \n
-///    0x10: Equal (ordered, signaling) \n
-///    0x11: Less-than (ordered, non-signaling) \n
-///    0x12: Less-than-or-equal (ordered, non-signaling) \n
-///    0x13: Unordered (signaling) \n
-///    0x14: Not-equal (unordered, signaling) \n
-///    0x15: Not-less-than (unordered, non-signaling) \n
-///    0x16: Not-less-than-or-equal (unordered, non-signaling) \n
-///    0x17: Ordered (signaling) \n
-///    0x18: Equal (unordered, signaling) \n
-///    0x19: Not-greater-than-or-equal (unordered, non-signaling) \n
-///    0x1A: Not-greater-than (unordered, non-signaling) \n
-///    0x1B: False (ordered, signaling) \n
-///    0x1C: Not-equal (ordered, signaling) \n
-///    0x1D: Greater-than-or-equal (ordered, non-signaling) \n
-///    0x1E: Greater-than (ordered, non-signaling) \n
-///    0x1F: True (unordered, signaling)
-/// \returns A 256-bit vector of [4 x double] containing the comparison results.
-#define _mm256_cmp_pd(a, b, c) \
-  ((__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \
-                                    (__v4df)(__m256d)(b), (c)))
-
-/// Compares each of the corresponding values of two 256-bit vectors of
-///    [8 x float], using the operation specified by the immediate integer
-///    operand.
-///
-///    Returns a [8 x float] vector consisting of eight floats corresponding to
-///    the eight comparison results: zero if the comparison is false, and all
-///    1's if the comparison is true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256 _mm256_cmp_ps(__m256 a, __m256 b, const int c);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VCMPPS </c> instruction.
-///
-/// \param a
-///    A 256-bit vector of [8 x float].
-/// \param b
-///    A 256-bit vector of [8 x float].
-/// \param c
-///    An immediate integer operand, with bits [4:0] specifying which comparison
-///    operation to use: \n
-///    0x00: Equal (ordered, non-signaling) \n
-///    0x01: Less-than (ordered, signaling) \n
-///    0x02: Less-than-or-equal (ordered, signaling) \n
-///    0x03: Unordered (non-signaling) \n
-///    0x04: Not-equal (unordered, non-signaling) \n
-///    0x05: Not-less-than (unordered, signaling) \n
-///    0x06: Not-less-than-or-equal (unordered, signaling) \n
-///    0x07: Ordered (non-signaling) \n
-///    0x08: Equal (unordered, non-signaling) \n
-///    0x09: Not-greater-than-or-equal (unordered, signaling) \n
-///    0x0A: Not-greater-than (unordered, signaling) \n
-///    0x0B: False (ordered, non-signaling) \n
-///    0x0C: Not-equal (ordered, non-signaling) \n
-///    0x0D: Greater-than-or-equal (ordered, signaling) \n
-///    0x0E: Greater-than (ordered, signaling) \n
-///    0x0F: True (unordered, non-signaling) \n
-///    0x10: Equal (ordered, signaling) \n
-///    0x11: Less-than (ordered, non-signaling) \n
-///    0x12: Less-than-or-equal (ordered, non-signaling) \n
-///    0x13: Unordered (signaling) \n
-///    0x14: Not-equal (unordered, signaling) \n
-///    0x15: Not-less-than (unordered, non-signaling) \n
-///    0x16: Not-less-than-or-equal (unordered, non-signaling) \n
-///    0x17: Ordered (signaling) \n
-///    0x18: Equal (unordered, signaling) \n
-///    0x19: Not-greater-than-or-equal (unordered, non-signaling) \n
-///    0x1A: Not-greater-than (unordered, non-signaling) \n
-///    0x1B: False (ordered, signaling) \n
-///    0x1C: Not-equal (ordered, signaling) \n
-///    0x1D: Greater-than-or-equal (ordered, non-signaling) \n
-///    0x1E: Greater-than (ordered, non-signaling) \n
-///    0x1F: True (unordered, signaling)
-/// \returns A 256-bit vector of [8 x float] containing the comparison results.
-#define _mm256_cmp_ps(a, b, c) \
-  ((__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
-                                   (__v8sf)(__m256)(b), (c)))
-
-/// Compares each of the corresponding scalar double-precision values of
-///    two 128-bit vectors of [2 x double], using the operation specified by the
-///    immediate integer operand.
-///
-///    If the result is true, all 64 bits of the destination vector are set;
-///    otherwise they are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VCMPSD </c> instruction.
-///
-/// \param a
-///    A 128-bit vector of [2 x double].
-/// \param b
-///    A 128-bit vector of [2 x double].
-/// \param c
-///    An immediate integer operand, with bits [4:0] specifying which comparison
-///    operation to use: \n
-///    0x00: Equal (ordered, non-signaling) \n
-///    0x01: Less-than (ordered, signaling) \n
-///    0x02: Less-than-or-equal (ordered, signaling) \n
-///    0x03: Unordered (non-signaling) \n
-///    0x04: Not-equal (unordered, non-signaling) \n
-///    0x05: Not-less-than (unordered, signaling) \n
-///    0x06: Not-less-than-or-equal (unordered, signaling) \n
-///    0x07: Ordered (non-signaling) \n
-///    0x08: Equal (unordered, non-signaling) \n
-///    0x09: Not-greater-than-or-equal (unordered, signaling) \n
-///    0x0A: Not-greater-than (unordered, signaling) \n
-///    0x0B: False (ordered, non-signaling) \n
-///    0x0C: Not-equal (ordered, non-signaling) \n
-///    0x0D: Greater-than-or-equal (ordered, signaling) \n
-///    0x0E: Greater-than (ordered, signaling) \n
-///    0x0F: True (unordered, non-signaling) \n
-///    0x10: Equal (ordered, signaling) \n
-///    0x11: Less-than (ordered, non-signaling) \n
-///    0x12: Less-than-or-equal (ordered, non-signaling) \n
-///    0x13: Unordered (signaling) \n
-///    0x14: Not-equal (unordered, signaling) \n
-///    0x15: Not-less-than (unordered, non-signaling) \n
-///    0x16: Not-less-than-or-equal (unordered, non-signaling) \n
-///    0x17: Ordered (signaling) \n
-///    0x18: Equal (unordered, signaling) \n
-///    0x19: Not-greater-than-or-equal (unordered, non-signaling) \n
-///    0x1A: Not-greater-than (unordered, non-signaling) \n
-///    0x1B: False (ordered, signaling) \n
-///    0x1C: Not-equal (ordered, signaling) \n
-///    0x1D: Greater-than-or-equal (ordered, non-signaling) \n
-///    0x1E: Greater-than (ordered, non-signaling) \n
-///    0x1F: True (unordered, signaling)
-/// \returns A 128-bit vector of [2 x double] containing the comparison results.
-#define _mm_cmp_sd(a, b, c) \
-  ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \
-                                 (__v2df)(__m128d)(b), (c)))
-
-/// Compares each of the corresponding scalar values of two 128-bit
-///    vectors of [4 x float], using the operation specified by the immediate
-///    integer operand.
-///
-///    If the result is true, all 32 bits of the destination vector are set;
-///    otherwise they are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VCMPSS </c> instruction.
-///
-/// \param a
-///    A 128-bit vector of [4 x float].
-/// \param b
-///    A 128-bit vector of [4 x float].
-/// \param c
-///    An immediate integer operand, with bits [4:0] specifying which comparison
-///    operation to use: \n
-///    0x00: Equal (ordered, non-signaling) \n
-///    0x01: Less-than (ordered, signaling) \n
-///    0x02: Less-than-or-equal (ordered, signaling) \n
-///    0x03: Unordered (non-signaling) \n
-///    0x04: Not-equal (unordered, non-signaling) \n
-///    0x05: Not-less-than (unordered, signaling) \n
-///    0x06: Not-less-than-or-equal (unordered, signaling) \n
-///    0x07: Ordered (non-signaling) \n
-///    0x08: Equal (unordered, non-signaling) \n
-///    0x09: Not-greater-than-or-equal (unordered, signaling) \n
-///    0x0A: Not-greater-than (unordered, signaling) \n
-///    0x0B: False (ordered, non-signaling) \n
-///    0x0C: Not-equal (ordered, non-signaling) \n
-///    0x0D: Greater-than-or-equal (ordered, signaling) \n
-///    0x0E: Greater-than (ordered, signaling) \n
-///    0x0F: True (unordered, non-signaling) \n
-///    0x10: Equal (ordered, signaling) \n
-///    0x11: Less-than (ordered, non-signaling) \n
-///    0x12: Less-than-or-equal (ordered, non-signaling) \n
-///    0x13: Unordered (signaling) \n
-///    0x14: Not-equal (unordered, signaling) \n
-///    0x15: Not-less-than (unordered, non-signaling) \n
-///    0x16: Not-less-than-or-equal (unordered, non-signaling) \n
-///    0x17: Ordered (signaling) \n
-///    0x18: Equal (unordered, signaling) \n
-///    0x19: Not-greater-than-or-equal (unordered, non-signaling) \n
-///    0x1A: Not-greater-than (unordered, non-signaling) \n
-///    0x1B: False (ordered, signaling) \n
-///    0x1C: Not-equal (ordered, signaling) \n
-///    0x1D: Greater-than-or-equal (ordered, non-signaling) \n
-///    0x1E: Greater-than (ordered, non-signaling) \n
-///    0x1F: True (unordered, signaling)
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-#define _mm_cmp_ss(a, b, c) \
-  ((__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
-                                (__v4sf)(__m128)(b), (c)))
-
-/// Takes a [8 x i32] vector and returns the vector element value
-///    indexed by the immediate constant operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
-///   instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x i32].
-/// \param __imm
-///    An immediate integer operand with bits [2:0] determining which vector
-///    element is extracted and returned.
-/// \returns A 32-bit integer containing the extracted 32 bits of extended
-///    packed data.
-#define _mm256_extract_epi32(X, N) \
-  ((int)__builtin_ia32_vec_ext_v8si((__v8si)(__m256i)(X), (int)(N)))
-
-/// Takes a [16 x i16] vector and returns the vector element value
-///    indexed by the immediate constant operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
-///   instruction.
-///
-/// \param __a
-///    A 256-bit integer vector of [16 x i16].
-/// \param __imm
-///    An immediate integer operand with bits [3:0] determining which vector
-///    element is extracted and returned.
-/// \returns A 32-bit integer containing the extracted 16 bits of zero extended
-///    packed data.
-#define _mm256_extract_epi16(X, N) \
-  ((int)(unsigned short)__builtin_ia32_vec_ext_v16hi((__v16hi)(__m256i)(X), \
-                                                     (int)(N)))
-
-/// Takes a [32 x i8] vector and returns the vector element value
-///    indexed by the immediate constant operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
-///   instruction.
-///
-/// \param __a
-///    A 256-bit integer vector of [32 x i8].
-/// \param __imm
-///    An immediate integer operand with bits [4:0] determining which vector
-///    element is extracted and returned.
-/// \returns A 32-bit integer containing the extracted 8 bits of zero extended
-///    packed data.
-#define _mm256_extract_epi8(X, N) \
-  ((int)(unsigned char)__builtin_ia32_vec_ext_v32qi((__v32qi)(__m256i)(X), \
-                                                    (int)(N)))
-
-#ifdef __x86_64__
-/// Takes a [4 x i64] vector and returns the vector element value
-///    indexed by the immediate constant operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
-///   instruction.
-///
-/// \param __a
-///    A 256-bit integer vector of [4 x i64].
-/// \param __imm
-///    An immediate integer operand with bits [1:0] determining which vector
-///    element is extracted and returned.
-/// \returns A 64-bit integer containing the extracted 64 bits of extended
-///    packed data.
-#define _mm256_extract_epi64(X, N) \
-  ((long long)__builtin_ia32_vec_ext_v4di((__v4di)(__m256i)(X), (int)(N)))
-#endif
-
-/// Takes a [8 x i32] vector and replaces the vector element value
-///    indexed by the immediate constant operand by a new value. Returns the
-///    modified vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
-///   instruction.
-///
-/// \param __a
-///    A vector of [8 x i32] to be used by the insert operation.
-/// \param __b
-///    An integer value. The replacement value for the insert operation.
-/// \param __imm
-///    An immediate integer specifying the index of the vector element to be
-///    replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-///    \a __imm with \a __b.
-#define _mm256_insert_epi32(X, I, N) \
-  ((__m256i)__builtin_ia32_vec_set_v8si((__v8si)(__m256i)(X), \
-                                        (int)(I), (int)(N)))
-
-
-/// Takes a [16 x i16] vector and replaces the vector element value
-///    indexed by the immediate constant operand with a new value. Returns the
-///    modified vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
-///   instruction.
-///
-/// \param __a
-///    A vector of [16 x i16] to be used by the insert operation.
-/// \param __b
-///    An i16 integer value. The replacement value for the insert operation.
-/// \param __imm
-///    An immediate integer specifying the index of the vector element to be
-///    replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-///    \a __imm with \a __b.
-#define _mm256_insert_epi16(X, I, N) \
-  ((__m256i)__builtin_ia32_vec_set_v16hi((__v16hi)(__m256i)(X), \
-                                         (int)(I), (int)(N)))
-
-/// Takes a [32 x i8] vector and replaces the vector element value
-///    indexed by the immediate constant operand with a new value. Returns the
-///    modified vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
-///   instruction.
-///
-/// \param __a
-///    A vector of [32 x i8] to be used by the insert operation.
-/// \param __b
-///    An i8 integer value. The replacement value for the insert operation.
-/// \param __imm
-///    An immediate integer specifying the index of the vector element to be
-///    replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-///    \a __imm with \a __b.
-#define _mm256_insert_epi8(X, I, N) \
-  ((__m256i)__builtin_ia32_vec_set_v32qi((__v32qi)(__m256i)(X), \
-                                         (int)(I), (int)(N)))
-
-#ifdef __x86_64__
-/// Takes a [4 x i64] vector and replaces the vector element value
-///    indexed by the immediate constant operand with a new value. Returns the
-///    modified vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
-///   instruction.
-///
-/// \param __a
-///    A vector of [4 x i64] to be used by the insert operation.
-/// \param __b
-///    A 64-bit integer value. The replacement value for the insert operation.
-/// \param __imm
-///    An immediate integer specifying the index of the vector element to be
-///    replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-///     \a __imm with \a __b.
-#define _mm256_insert_epi64(X, I, N) \
-  ((__m256i)__builtin_ia32_vec_set_v4di((__v4di)(__m256i)(X), \
-                                        (long long)(I), (int)(N)))
-#endif
-
-/* Conversion */
-/// Converts a vector of [4 x i32] into a vector of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTDQ2PD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [4 x i32].
-/// \returns A 256-bit vector of [4 x double] containing the converted values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_cvtepi32_pd(__m128i __a)
-{
-  return (__m256d)__builtin_convertvector((__v4si)__a, __v4df);
-}
-
-/// Converts a vector of [8 x i32] into a vector of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTDQ2PS </c> instruction.
-///
-/// \param __a
-///    A 256-bit integer vector.
-/// \returns A 256-bit vector of [8 x float] containing the converted values.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_cvtepi32_ps(__m256i __a)
-{
-  return (__m256)__builtin_convertvector((__v8si)__a, __v8sf);
-}
-
-/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPD2PS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double].
-/// \returns A 128-bit vector of [4 x float] containing the converted values.
-static __inline __m128 __DEFAULT_FN_ATTRS
-_mm256_cvtpd_ps(__m256d __a)
-{
-  return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a);
-}
-
-/// Converts a vector of [8 x float] into a vector of [8 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPS2DQ </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float].
-/// \returns A 256-bit integer vector containing the converted values.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_cvtps_epi32(__m256 __a)
-{
-  return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a);
-}
-
-/// Converts a 128-bit vector of [4 x float] into a 256-bit vector of [4
-///    x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPS2PD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 256-bit vector of [4 x double] containing the converted values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_cvtps_pd(__m128 __a)
-{
-  return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df);
-}
-
-/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4
-///    x i32], truncating the result by rounding towards zero when it is
-///    inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTPD2DQ </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double].
-/// \returns A 128-bit integer vector containing the converted values.
-static __inline __m128i __DEFAULT_FN_ATTRS
-_mm256_cvttpd_epi32(__m256d __a)
-{
-  return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a);
-}
-
-/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4
-///    x i32]. When a conversion is inexact, the value returned is rounded
-///    according to the rounding control bits in the MXCSR register.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPD2DQ </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double].
-/// \returns A 128-bit integer vector containing the converted values.
-static __inline __m128i __DEFAULT_FN_ATTRS
-_mm256_cvtpd_epi32(__m256d __a)
-{
-  return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a);
-}
-
-/// Converts a vector of [8 x float] into a vector of [8 x i32],
-///    truncating the result by rounding towards zero when it is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTPS2DQ </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float].
-/// \returns A 256-bit integer vector containing the converted values.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_cvttps_epi32(__m256 __a)
-{
-  return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a);
-}
-
-/// Returns the first element of the input vector of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double].
-/// \returns A 64 bit double containing the first element of the input vector.
-static __inline double __DEFAULT_FN_ATTRS
-_mm256_cvtsd_f64(__m256d __a)
-{
- return __a[0];
-}
-
-/// Returns the first element of the input vector of [8 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x i32].
-/// \returns A 32 bit integer containing the first element of the input vector.
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_cvtsi256_si32(__m256i __a)
-{
- __v8si __b = (__v8si)__a;
- return __b[0];
-}
-
-/// Returns the first element of the input vector of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float].
-/// \returns A 32 bit float containing the first element of the input vector.
-static __inline float __DEFAULT_FN_ATTRS
-_mm256_cvtss_f32(__m256 __a)
-{
- return __a[0];
-}
-
-/* Vector replicate */
-/// Moves and duplicates odd-indexed values from a 256-bit vector of
-///    [8 x float] to float values in a 256-bit vector of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSHDUP </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float]. \n
-///    Bits [255:224] of \a __a are written to bits [255:224] and [223:192] of
-///    the return value. \n
-///    Bits [191:160] of \a __a are written to bits [191:160] and [159:128] of
-///    the return value. \n
-///    Bits [127:96] of \a __a are written to bits [127:96] and [95:64] of the
-///    return value. \n
-///    Bits [63:32] of \a __a are written to bits [63:32] and [31:0] of the
-///    return value.
-/// \returns A 256-bit vector of [8 x float] containing the moved and duplicated
-///    values.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_movehdup_ps(__m256 __a)
-{
-  return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 1, 1, 3, 3, 5, 5, 7, 7);
-}
-
-/// Moves and duplicates even-indexed values from a 256-bit vector of
-///    [8 x float] to float values in a 256-bit vector of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSLDUP </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float]. \n
-///    Bits [223:192] of \a __a are written to bits [255:224] and [223:192] of
-///    the return value. \n
-///    Bits [159:128] of \a __a are written to bits [191:160] and [159:128] of
-///    the return value. \n
-///    Bits [95:64] of \a __a are written to bits [127:96] and [95:64] of the
-///    return value. \n
-///    Bits [31:0] of \a __a are written to bits [63:32] and [31:0] of the
-///    return value.
-/// \returns A 256-bit vector of [8 x float] containing the moved and duplicated
-///    values.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_moveldup_ps(__m256 __a)
-{
-  return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 0, 2, 2, 4, 4, 6, 6);
-}
-
-/// Moves and duplicates double-precision floating point values from a
-///    256-bit vector of [4 x double] to double-precision values in a 256-bit
-///    vector of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double]. \n
-///    Bits [63:0] of \a __a are written to bits [127:64] and [63:0] of the
-///    return value. \n
-///    Bits [191:128] of \a __a are written to bits [255:192] and [191:128] of
-///    the return value.
-/// \returns A 256-bit vector of [4 x double] containing the moved and
-///    duplicated values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_movedup_pd(__m256d __a)
-{
-  return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 0, 2, 2);
-}
-
-/* Unpack and Interleave */
-/// Unpacks the odd-indexed vector elements from two 256-bit vectors of
-///    [4 x double] and interleaves them into a 256-bit vector of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKHPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit floating-point vector of [4 x double]. \n
-///    Bits [127:64] are written to bits [63:0] of the return value. \n
-///    Bits [255:192] are written to bits [191:128] of the return value. \n
-/// \param __b
-///    A 256-bit floating-point vector of [4 x double]. \n
-///    Bits [127:64] are written to bits [127:64] of the return value. \n
-///    Bits [255:192] are written to bits [255:192] of the return value. \n
-/// \returns A 256-bit vector of [4 x double] containing the interleaved values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_unpackhi_pd(__m256d __a, __m256d __b)
-{
-  return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 1, 5, 1+2, 5+2);
-}
-
-/// Unpacks the even-indexed vector elements from two 256-bit vectors of
-///    [4 x double] and interleaves them into a 256-bit vector of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit floating-point vector of [4 x double]. \n
-///    Bits [63:0] are written to bits [63:0] of the return value. \n
-///    Bits [191:128] are written to bits [191:128] of the return value.
-/// \param __b
-///    A 256-bit floating-point vector of [4 x double]. \n
-///    Bits [63:0] are written to bits [127:64] of the return value. \n
-///    Bits [191:128] are written to bits [255:192] of the return value. \n
-/// \returns A 256-bit vector of [4 x double] containing the interleaved values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_unpacklo_pd(__m256d __a, __m256d __b)
-{
-  return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 0, 4, 0+2, 4+2);
-}
-
-/// Unpacks the 32-bit vector elements 2, 3, 6 and 7 from each of the
-///    two 256-bit vectors of [8 x float] and interleaves them into a 256-bit
-///    vector of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKHPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float]. \n
-///    Bits [95:64] are written to bits [31:0] of the return value. \n
-///    Bits [127:96] are written to bits [95:64] of the return value. \n
-///    Bits [223:192] are written to bits [159:128] of the return value. \n
-///    Bits [255:224] are written to bits [223:192] of the return value.
-/// \param __b
-///    A 256-bit vector of [8 x float]. \n
-///    Bits [95:64] are written to bits [63:32] of the return value. \n
-///    Bits [127:96] are written to bits [127:96] of the return value. \n
-///    Bits [223:192] are written to bits [191:160] of the return value. \n
-///    Bits [255:224] are written to bits [255:224] of the return value.
-/// \returns A 256-bit vector of [8 x float] containing the interleaved values.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_unpackhi_ps(__m256 __a, __m256 __b)
-{
-  return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
-}
-
-/// Unpacks the 32-bit vector elements 0, 1, 4 and 5 from each of the
-///    two 256-bit vectors of [8 x float] and interleaves them into a 256-bit
-///    vector of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float]. \n
-///    Bits [31:0] are written to bits [31:0] of the return value. \n
-///    Bits [63:32] are written to bits [95:64] of the return value. \n
-///    Bits [159:128] are written to bits [159:128] of the return value. \n
-///    Bits [191:160] are written to bits [223:192] of the return value.
-/// \param __b
-///    A 256-bit vector of [8 x float]. \n
-///    Bits [31:0] are written to bits [63:32] of the return value. \n
-///    Bits [63:32] are written to bits [127:96] of the return value. \n
-///    Bits [159:128] are written to bits [191:160] of the return value. \n
-///    Bits [191:160] are written to bits [255:224] of the return value.
-/// \returns A 256-bit vector of [8 x float] containing the interleaved values.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_unpacklo_ps(__m256 __a, __m256 __b)
-{
-  return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
-}
-
-/* Bit Test */
-/// Given two 128-bit floating-point vectors of [2 x double], perform an
-///    element-by-element comparison of the double-precision element in the
-///    first source vector and the corresponding element in the second source
-///    vector.
-///
-///    The EFLAGS register is updated as follows: \n
-///    If there is at least one pair of double-precision elements where the
-///    sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
-///    ZF flag is set to 1. \n
-///    If there is at least one pair of double-precision elements where the
-///    sign-bit of the first element is 0 and the sign-bit of the second element
-///    is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
-///    This intrinsic returns the value of the ZF flag.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VTESTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns the ZF flag in the EFLAGS register.
-static __inline int __DEFAULT_FN_ATTRS128
-_mm_testz_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Given two 128-bit floating-point vectors of [2 x double], perform an
-///    element-by-element comparison of the double-precision element in the
-///    first source vector and the corresponding element in the second source
-///    vector.
-///
-///    The EFLAGS register is updated as follows: \n
-///    If there is at least one pair of double-precision elements where the
-///    sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
-///    ZF flag is set to 1. \n
-///    If there is at least one pair of double-precision elements where the
-///    sign-bit of the first element is 0 and the sign-bit of the second element
-///    is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
-///    This intrinsic returns the value of the CF flag.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VTESTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns the CF flag in the EFLAGS register.
-static __inline int __DEFAULT_FN_ATTRS128
-_mm_testc_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Given two 128-bit floating-point vectors of [2 x double], perform an
-///    element-by-element comparison of the double-precision element in the
-///    first source vector and the corresponding element in the second source
-///    vector.
-///
-///    The EFLAGS register is updated as follows: \n
-///    If there is at least one pair of double-precision elements where the
-///    sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
-///    ZF flag is set to 1. \n
-///    If there is at least one pair of double-precision elements where the
-///    sign-bit of the first element is 0 and the sign-bit of the second element
-///    is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
-///    This intrinsic returns 1 if both the ZF and CF flags are set to 0,
-///    otherwise it returns 0.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VTESTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.
-static __inline int __DEFAULT_FN_ATTRS128
-_mm_testnzc_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Given two 128-bit floating-point vectors of [4 x float], perform an
-///    element-by-element comparison of the single-precision element in the
-///    first source vector and the corresponding element in the second source
-///    vector.
-///
-///    The EFLAGS register is updated as follows: \n
-///    If there is at least one pair of single-precision elements where the
-///    sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
-///    ZF flag is set to 1. \n
-///    If there is at least one pair of single-precision elements where the
-///    sign-bit of the first element is 0 and the sign-bit of the second element
-///    is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
-///    This intrinsic returns the value of the ZF flag.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VTESTPS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns the ZF flag.
-static __inline int __DEFAULT_FN_ATTRS128
-_mm_testz_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Given two 128-bit floating-point vectors of [4 x float], perform an
-///    element-by-element comparison of the single-precision element in the
-///    first source vector and the corresponding element in the second source
-///    vector.
-///
-///    The EFLAGS register is updated as follows: \n
-///    If there is at least one pair of single-precision elements where the
-///    sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
-///    ZF flag is set to 1. \n
-///    If there is at least one pair of single-precision elements where the
-///    sign-bit of the first element is 0 and the sign-bit of the second element
-///    is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
-///    This intrinsic returns the value of the CF flag.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VTESTPS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns the CF flag.
-static __inline int __DEFAULT_FN_ATTRS128
-_mm_testc_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Given two 128-bit floating-point vectors of [4 x float], perform an
-///    element-by-element comparison of the single-precision element in the
-///    first source vector and the corresponding element in the second source
-///    vector.
-///
-///    The EFLAGS register is updated as follows: \n
-///    If there is at least one pair of single-precision elements where the
-///    sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
-///    ZF flag is set to 1. \n
-///    If there is at least one pair of single-precision elements where the
-///    sign-bit of the first element is 0 and the sign-bit of the second element
-///    is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
-///    This intrinsic returns 1 if both the ZF and CF flags are set to 0,
-///    otherwise it returns 0.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VTESTPS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.
-static __inline int __DEFAULT_FN_ATTRS128
-_mm_testnzc_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Given two 256-bit floating-point vectors of [4 x double], perform an
-///    element-by-element comparison of the double-precision elements in the
-///    first source vector and the corresponding elements in the second source
-///    vector.
-///
-///    The EFLAGS register is updated as follows: \n
-///    If there is at least one pair of double-precision elements where the
-///    sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
-///    ZF flag is set to 1. \n
-///    If there is at least one pair of double-precision elements where the
-///    sign-bit of the first element is 0 and the sign-bit of the second element
-///    is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
-///    This intrinsic returns the value of the ZF flag.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VTESTPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double].
-/// \param __b
-///    A 256-bit vector of [4 x double].
-/// \returns the ZF flag.
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_testz_pd(__m256d __a, __m256d __b)
-{
-  return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b);
-}
-
-/// Given two 256-bit floating-point vectors of [4 x double], perform an
-///    element-by-element comparison of the double-precision elements in the
-///    first source vector and the corresponding elements in the second source
-///    vector.
-///
-///    The EFLAGS register is updated as follows: \n
-///    If there is at least one pair of double-precision elements where the
-///    sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
-///    ZF flag is set to 1. \n
-///    If there is at least one pair of double-precision elements where the
-///    sign-bit of the first element is 0 and the sign-bit of the second element
-///    is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
-///    This intrinsic returns the value of the CF flag.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VTESTPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double].
-/// \param __b
-///    A 256-bit vector of [4 x double].
-/// \returns the CF flag.
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_testc_pd(__m256d __a, __m256d __b)
-{
-  return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b);
-}
-
-/// Given two 256-bit floating-point vectors of [4 x double], perform an
-///    element-by-element comparison of the double-precision elements in the
-///    first source vector and the corresponding elements in the second source
-///    vector.
-///
-///    The EFLAGS register is updated as follows: \n
-///    If there is at least one pair of double-precision elements where the
-///    sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
-///    ZF flag is set to 1. \n
-///    If there is at least one pair of double-precision elements where the
-///    sign-bit of the first element is 0 and the sign-bit of the second element
-///    is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
-///    This intrinsic returns 1 if both the ZF and CF flags are set to 0,
-///    otherwise it returns 0.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VTESTPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double].
-/// \param __b
-///    A 256-bit vector of [4 x double].
-/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_testnzc_pd(__m256d __a, __m256d __b)
-{
-  return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b);
-}
-
-/// Given two 256-bit floating-point vectors of [8 x float], perform an
-///    element-by-element comparison of the single-precision element in the
-///    first source vector and the corresponding element in the second source
-///    vector.
-///
-///    The EFLAGS register is updated as follows: \n
-///    If there is at least one pair of single-precision elements where the
-///    sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
-///    ZF flag is set to 1. \n
-///    If there is at least one pair of single-precision elements where the
-///    sign-bit of the first element is 0 and the sign-bit of the second element
-///    is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
-///    This intrinsic returns the value of the ZF flag.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VTESTPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float].
-/// \param __b
-///    A 256-bit vector of [8 x float].
-/// \returns the ZF flag.
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_testz_ps(__m256 __a, __m256 __b)
-{
-  return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b);
-}
-
-/// Given two 256-bit floating-point vectors of [8 x float], perform an
-///    element-by-element comparison of the single-precision element in the
-///    first source vector and the corresponding element in the second source
-///    vector.
-///
-///    The EFLAGS register is updated as follows: \n
-///    If there is at least one pair of single-precision elements where the
-///    sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
-///    ZF flag is set to 1. \n
-///    If there is at least one pair of single-precision elements where the
-///    sign-bit of the first element is 0 and the sign-bit of the second element
-///    is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
-///    This intrinsic returns the value of the CF flag.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VTESTPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float].
-/// \param __b
-///    A 256-bit vector of [8 x float].
-/// \returns the CF flag.
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_testc_ps(__m256 __a, __m256 __b)
-{
-  return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b);
-}
-
-/// Given two 256-bit floating-point vectors of [8 x float], perform an
-///    element-by-element comparison of the single-precision elements in the
-///    first source vector and the corresponding elements in the second source
-///    vector.
-///
-///    The EFLAGS register is updated as follows: \n
-///    If there is at least one pair of single-precision elements where the
-///    sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
-///    ZF flag is set to 1. \n
-///    If there is at least one pair of single-precision elements where the
-///    sign-bit of the first element is 0 and the sign-bit of the second element
-///    is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
-///    This intrinsic returns 1 if both the ZF and CF flags are set to 0,
-///    otherwise it returns 0.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VTESTPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float].
-/// \param __b
-///    A 256-bit vector of [8 x float].
-/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_testnzc_ps(__m256 __a, __m256 __b)
-{
-  return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b);
-}
-
-/// Given two 256-bit integer vectors, perform a bit-by-bit comparison
-///    of the two source vectors.
-///
-///    The EFLAGS register is updated as follows: \n
-///    If there is at least one pair of bits where both bits are 1, the ZF flag
-///    is set to 0. Otherwise the ZF flag is set to 1. \n
-///    If there is at least one pair of bits where the bit from the first source
-///    vector is 0 and the bit from the second source vector is 1, the CF flag
-///    is set to 0. Otherwise the CF flag is set to 1. \n
-///    This intrinsic returns the value of the ZF flag.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPTEST </c> instruction.
-///
-/// \param __a
-///    A 256-bit integer vector.
-/// \param __b
-///    A 256-bit integer vector.
-/// \returns the ZF flag.
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_testz_si256(__m256i __a, __m256i __b)
-{
-  return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b);
-}
-
-/// Given two 256-bit integer vectors, perform a bit-by-bit comparison
-///    of the two source vectors.
-///
-///    The EFLAGS register is updated as follows: \n
-///    If there is at least one pair of bits where both bits are 1, the ZF flag
-///    is set to 0. Otherwise the ZF flag is set to 1. \n
-///    If there is at least one pair of bits where the bit from the first source
-///    vector is 0 and the bit from the second source vector is 1, the CF flag
-///    is set to 0. Otherwise the CF flag is set to 1. \n
-///    This intrinsic returns the value of the CF flag.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPTEST </c> instruction.
-///
-/// \param __a
-///    A 256-bit integer vector.
-/// \param __b
-///    A 256-bit integer vector.
-/// \returns the CF flag.
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_testc_si256(__m256i __a, __m256i __b)
-{
-  return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b);
-}
-
-/// Given two 256-bit integer vectors, perform a bit-by-bit comparison
-///    of the two source vectors.
-///
-///    The EFLAGS register is updated as follows: \n
-///    If there is at least one pair of bits where both bits are 1, the ZF flag
-///    is set to 0. Otherwise the ZF flag is set to 1. \n
-///    If there is at least one pair of bits where the bit from the first source
-///    vector is 0 and the bit from the second source vector is 1, the CF flag
-///    is set to 0. Otherwise the CF flag is set to 1. \n
-///    This intrinsic returns 1 if both the ZF and CF flags are set to 0,
-///    otherwise it returns 0.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPTEST </c> instruction.
-///
-/// \param __a
-///    A 256-bit integer vector.
-/// \param __b
-///    A 256-bit integer vector.
-/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_testnzc_si256(__m256i __a, __m256i __b)
-{
-  return __builtin_ia32_ptestnzc256((__v4di)__a, (__v4di)__b);
-}
-
-/* Vector extract sign mask */
-/// Extracts the sign bits of double-precision floating point elements
-///    in a 256-bit vector of [4 x double] and writes them to the lower order
-///    bits of the return value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVMSKPD </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double] containing the double-precision
-///    floating point values with sign bits to be extracted.
-/// \returns The sign bits from the operand, written to bits [3:0].
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_movemask_pd(__m256d __a)
-{
-  return __builtin_ia32_movmskpd256((__v4df)__a);
-}
-
-/// Extracts the sign bits of single-precision floating point elements
-///    in a 256-bit vector of [8 x float] and writes them to the lower order
-///    bits of the return value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVMSKPS </c> instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float] containing the single-precision floating
-///    point values with sign bits to be extracted.
-/// \returns The sign bits from the operand, written to bits [7:0].
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_movemask_ps(__m256 __a)
-{
-  return __builtin_ia32_movmskps256((__v8sf)__a);
-}
-
-/* Vector __zero */
-/// Zeroes the contents of all XMM or YMM registers.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VZEROALL </c> instruction.
-static __inline void __attribute__((__always_inline__, __nodebug__, __target__("avx")))
-_mm256_zeroall(void)
-{
-  __builtin_ia32_vzeroall();
-}
-
-/// Zeroes the upper 128 bits (bits 255:128) of all YMM registers.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VZEROUPPER </c> instruction.
-static __inline void __attribute__((__always_inline__, __nodebug__, __target__("avx")))
-_mm256_zeroupper(void)
-{
-  __builtin_ia32_vzeroupper();
-}
-
-/* Vector load with broadcast */
-/// Loads a scalar single-precision floating point value from the
-///    specified address pointed to by \a __a and broadcasts it to the elements
-///    of a [4 x float] vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBROADCASTSS </c> instruction.
-///
-/// \param __a
-///    The single-precision floating point value to be broadcast.
-/// \returns A 128-bit vector of [4 x float] whose 32-bit elements are set
-///    equal to the broadcast value.
-static __inline __m128 __DEFAULT_FN_ATTRS128
-_mm_broadcast_ss(float const *__a)
-{
-  float __f = *__a;
-  return __extension__ (__m128)(__v4sf){ __f, __f, __f, __f };
-}
-
-/// Loads a scalar double-precision floating point value from the
-///    specified address pointed to by \a __a and broadcasts it to the elements
-///    of a [4 x double] vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBROADCASTSD </c> instruction.
-///
-/// \param __a
-///    The double-precision floating point value to be broadcast.
-/// \returns A 256-bit vector of [4 x double] whose 64-bit elements are set
-///    equal to the broadcast value.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_broadcast_sd(double const *__a)
-{
-  double __d = *__a;
-  return __extension__ (__m256d)(__v4df){ __d, __d, __d, __d };
-}
-
-/// Loads a scalar single-precision floating point value from the
-///    specified address pointed to by \a __a and broadcasts it to the elements
-///    of a [8 x float] vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBROADCASTSS </c> instruction.
-///
-/// \param __a
-///    The single-precision floating point value to be broadcast.
-/// \returns A 256-bit vector of [8 x float] whose 32-bit elements are set
-///    equal to the broadcast value.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_broadcast_ss(float const *__a)
-{
-  float __f = *__a;
-  return __extension__ (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
-}
-
-/// Loads the data from a 128-bit vector of [2 x double] from the
-///    specified address pointed to by \a __a and broadcasts it to 128-bit
-///    elements in a 256-bit vector of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBROADCASTF128 </c> instruction.
-///
-/// \param __a
-///    The 128-bit vector of [2 x double] to be broadcast.
-/// \returns A 256-bit vector of [4 x double] whose 128-bit elements are set
-///    equal to the broadcast value.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_broadcast_pd(__m128d const *__a)
-{
-  __m128d __b = _mm_loadu_pd((const double *)__a);
-  return (__m256d)__builtin_shufflevector((__v2df)__b, (__v2df)__b,
-                                          0, 1, 0, 1);
-}
-
-/// Loads the data from a 128-bit vector of [4 x float] from the
-///    specified address pointed to by \a __a and broadcasts it to 128-bit
-///    elements in a 256-bit vector of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBROADCASTF128 </c> instruction.
-///
-/// \param __a
-///    The 128-bit vector of [4 x float] to be broadcast.
-/// \returns A 256-bit vector of [8 x float] whose 128-bit elements are set
-///    equal to the broadcast value.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_broadcast_ps(__m128 const *__a)
-{
-  __m128 __b = _mm_loadu_ps((const float *)__a);
-  return (__m256)__builtin_shufflevector((__v4sf)__b, (__v4sf)__b,
-                                         0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-/* SIMD load ops */
-/// Loads 4 double-precision floating point values from a 32-byte aligned
-///    memory location pointed to by \a __p into a vector of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPD </c> instruction.
-///
-/// \param __p
-///    A 32-byte aligned pointer to a memory location containing
-///    double-precision floating point values.
-/// \returns A 256-bit vector of [4 x double] containing the moved values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_load_pd(double const *__p)
-{
-  return *(const __m256d *)__p;
-}
-
-/// Loads 8 single-precision floating point values from a 32-byte aligned
-///    memory location pointed to by \a __p into a vector of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPS </c> instruction.
-///
-/// \param __p
-///    A 32-byte aligned pointer to a memory location containing float values.
-/// \returns A 256-bit vector of [8 x float] containing the moved values.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_load_ps(float const *__p)
-{
-  return *(const __m256 *)__p;
-}
-
-/// Loads 4 double-precision floating point values from an unaligned
-///    memory location pointed to by \a __p into a vector of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPD </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location containing double-precision floating
-///    point values.
-/// \returns A 256-bit vector of [4 x double] containing the moved values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_loadu_pd(double const *__p)
-{
-  struct __loadu_pd {
-    __m256d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_pd*)__p)->__v;
-}
-
-/// Loads 8 single-precision floating point values from an unaligned
-///    memory location pointed to by \a __p into a vector of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location containing single-precision floating
-///    point values.
-/// \returns A 256-bit vector of [8 x float] containing the moved values.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_loadu_ps(float const *__p)
-{
-  struct __loadu_ps {
-    __m256_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_ps*)__p)->__v;
-}
-
-/// Loads 256 bits of integer data from a 32-byte aligned memory
-///    location pointed to by \a __p into elements of a 256-bit integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDQA </c> instruction.
-///
-/// \param __p
-///    A 32-byte aligned pointer to a 256-bit integer vector containing integer
-///    values.
-/// \returns A 256-bit integer vector containing the moved values.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_load_si256(__m256i const *__p)
-{
-  return *__p;
-}
-
-/// Loads 256 bits of integer data from an unaligned memory location
-///    pointed to by \a __p into a 256-bit integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDQU </c> instruction.
-///
-/// \param __p
-///    A pointer to a 256-bit integer vector containing integer values.
-/// \returns A 256-bit integer vector containing the moved values.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_loadu_si256(__m256i_u const *__p)
-{
-  struct __loadu_si256 {
-    __m256i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_si256*)__p)->__v;
-}
-
-/// Loads 256 bits of integer data from an unaligned memory location
-///    pointed to by \a __p into a 256-bit integer vector. This intrinsic may
-///    perform better than \c _mm256_loadu_si256 when the data crosses a cache
-///    line boundary.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VLDDQU </c> instruction.
-///
-/// \param __p
-///    A pointer to a 256-bit integer vector containing integer values.
-/// \returns A 256-bit integer vector containing the moved values.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_lddqu_si256(__m256i const *__p)
-{
-  return (__m256i)__builtin_ia32_lddqu256((char const *)__p);
-}
-
-/* SIMD store ops */
-/// Stores double-precision floating point values from a 256-bit vector
-///    of [4 x double] to a 32-byte aligned memory location pointed to by
-///    \a __p.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPD </c> instruction.
-///
-/// \param __p
-///    A 32-byte aligned pointer to a memory location that will receive the
-///    double-precision floaing point values.
-/// \param __a
-///    A 256-bit vector of [4 x double] containing the values to be moved.
-static __inline void __DEFAULT_FN_ATTRS
-_mm256_store_pd(double *__p, __m256d __a)
-{
-  *(__m256d *)__p = __a;
-}
-
-/// Stores single-precision floating point values from a 256-bit vector
-///    of [8 x float] to a 32-byte aligned memory location pointed to by \a __p.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPS </c> instruction.
-///
-/// \param __p
-///    A 32-byte aligned pointer to a memory location that will receive the
-///    float values.
-/// \param __a
-///    A 256-bit vector of [8 x float] containing the values to be moved.
-static __inline void __DEFAULT_FN_ATTRS
-_mm256_store_ps(float *__p, __m256 __a)
-{
-  *(__m256 *)__p = __a;
-}
-
-/// Stores double-precision floating point values from a 256-bit vector
-///    of [4 x double] to an unaligned memory location pointed to by \a __p.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPD </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that will receive the double-precision
-///    floating point values.
-/// \param __a
-///    A 256-bit vector of [4 x double] containing the values to be moved.
-static __inline void __DEFAULT_FN_ATTRS
-_mm256_storeu_pd(double *__p, __m256d __a)
-{
-  struct __storeu_pd {
-    __m256d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_pd*)__p)->__v = __a;
-}
-
-/// Stores single-precision floating point values from a 256-bit vector
-///    of [8 x float] to an unaligned memory location pointed to by \a __p.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that will receive the float values.
-/// \param __a
-///    A 256-bit vector of [8 x float] containing the values to be moved.
-static __inline void __DEFAULT_FN_ATTRS
-_mm256_storeu_ps(float *__p, __m256 __a)
-{
-  struct __storeu_ps {
-    __m256_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_ps*)__p)->__v = __a;
-}
-
-/// Stores integer values from a 256-bit integer vector to a 32-byte
-///    aligned memory location pointed to by \a __p.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDQA </c> instruction.
-///
-/// \param __p
-///    A 32-byte aligned pointer to a memory location that will receive the
-///    integer values.
-/// \param __a
-///    A 256-bit integer vector containing the values to be moved.
-static __inline void __DEFAULT_FN_ATTRS
-_mm256_store_si256(__m256i *__p, __m256i __a)
-{
-  *__p = __a;
-}
-
-/// Stores integer values from a 256-bit integer vector to an unaligned
-///    memory location pointed to by \a __p.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDQU </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that will receive the integer values.
-/// \param __a
-///    A 256-bit integer vector containing the values to be moved.
-static __inline void __DEFAULT_FN_ATTRS
-_mm256_storeu_si256(__m256i_u *__p, __m256i __a)
-{
-  struct __storeu_si256 {
-    __m256i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si256*)__p)->__v = __a;
-}
-
-/* Conditional load ops */
-/// Conditionally loads double-precision floating point elements from a
-///    memory location pointed to by \a __p into a 128-bit vector of
-///    [2 x double], depending on the mask bits associated with each data
-///    element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMASKMOVPD </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that contains the double-precision
-///    floating point values.
-/// \param __m
-///    A 128-bit integer vector containing the mask. The most significant bit of
-///    each data element represents the mask bits. If a mask bit is zero, the
-///    corresponding value in the memory location is not loaded and the
-///    corresponding field in the return value is set to zero.
-/// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline __m128d __DEFAULT_FN_ATTRS128
-_mm_maskload_pd(double const *__p, __m128i __m)
-{
-  return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2di)__m);
-}
-
-/// Conditionally loads double-precision floating point elements from a
-///    memory location pointed to by \a __p into a 256-bit vector of
-///    [4 x double], depending on the mask bits associated with each data
-///    element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMASKMOVPD </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that contains the double-precision
-///    floating point values.
-/// \param __m
-///    A 256-bit integer vector of [4 x quadword] containing the mask. The most
-///    significant bit of each quadword element represents the mask bits. If a
-///    mask bit is zero, the corresponding value in the memory location is not
-///    loaded and the corresponding field in the return value is set to zero.
-/// \returns A 256-bit vector of [4 x double] containing the loaded values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_maskload_pd(double const *__p, __m256i __m)
-{
-  return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p,
-                                               (__v4di)__m);
-}
-
-/// Conditionally loads single-precision floating point elements from a
-///    memory location pointed to by \a __p into a 128-bit vector of
-///    [4 x float], depending on the mask bits associated with each data
-///    element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMASKMOVPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that contains the single-precision
-///    floating point values.
-/// \param __m
-///    A 128-bit integer vector containing the mask. The most significant bit of
-///    each data element represents the mask bits. If a mask bit is zero, the
-///    corresponding value in the memory location is not loaded and the
-///    corresponding field in the return value is set to zero.
-/// \returns A 128-bit vector of [4 x float] containing the loaded values.
-static __inline __m128 __DEFAULT_FN_ATTRS128
-_mm_maskload_ps(float const *__p, __m128i __m)
-{
-  return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4si)__m);
-}
-
-/// Conditionally loads single-precision floating point elements from a
-///    memory location pointed to by \a __p into a 256-bit vector of
-///    [8 x float], depending on the mask bits associated with each data
-///    element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMASKMOVPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that contains the single-precision
-///    floating point values.
-/// \param __m
-///    A 256-bit integer vector of [8 x dword] containing the mask. The most
-///    significant bit of each dword element represents the mask bits. If a mask
-///    bit is zero, the corresponding value in the memory location is not loaded
-///    and the corresponding field in the return value is set to zero.
-/// \returns A 256-bit vector of [8 x float] containing the loaded values.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_maskload_ps(float const *__p, __m256i __m)
-{
-  return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8si)__m);
-}
-
-/* Conditional store ops */
-/// Moves single-precision floating point values from a 256-bit vector
-///    of [8 x float] to a memory location pointed to by \a __p, according to
-///    the specified mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMASKMOVPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that will receive the float values.
-/// \param __m
-///    A 256-bit integer vector of [8 x dword] containing the mask. The most
-///    significant bit of each dword element in the mask vector represents the
-///    mask bits. If a mask bit is zero, the corresponding value from vector
-///    \a __a is not stored and the corresponding field in the memory location
-///    pointed to by \a __p is not changed.
-/// \param __a
-///    A 256-bit vector of [8 x float] containing the values to be stored.
-static __inline void __DEFAULT_FN_ATTRS
-_mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a)
-{
-  __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8si)__m, (__v8sf)__a);
-}
-
-/// Moves double-precision values from a 128-bit vector of [2 x double]
-///    to a memory location pointed to by \a __p, according to the specified
-///    mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMASKMOVPD </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that will receive the float values.
-/// \param __m
-///    A 128-bit integer vector containing the mask. The most significant bit of
-///    each field in the mask vector represents the mask bits. If a mask bit is
-///    zero, the corresponding value from vector \a __a is not stored and the
-///    corresponding field in the memory location pointed to by \a __p is not
-///    changed.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the values to be stored.
-static __inline void __DEFAULT_FN_ATTRS128
-_mm_maskstore_pd(double *__p, __m128i __m, __m128d __a)
-{
-  __builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a);
-}
-
-/// Moves double-precision values from a 256-bit vector of [4 x double]
-///    to a memory location pointed to by \a __p, according to the specified
-///    mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMASKMOVPD </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that will receive the float values.
-/// \param __m
-///    A 256-bit integer vector of [4 x quadword] containing the mask. The most
-///    significant bit of each quadword element in the mask vector represents
-///    the mask bits. If a mask bit is zero, the corresponding value from vector
-///    __a is not stored and the corresponding field in the memory location
-///    pointed to by \a __p is not changed.
-/// \param __a
-///    A 256-bit vector of [4 x double] containing the values to be stored.
-static __inline void __DEFAULT_FN_ATTRS
-_mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a)
-{
-  __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4di)__m, (__v4df)__a);
-}
-
-/// Moves single-precision floating point values from a 128-bit vector
-///    of [4 x float] to a memory location pointed to by \a __p, according to
-///    the specified mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMASKMOVPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that will receive the float values.
-/// \param __m
-///    A 128-bit integer vector containing the mask. The most significant bit of
-///    each field in the mask vector represents the mask bits. If a mask bit is
-///    zero, the corresponding value from vector __a is not stored and the
-///    corresponding field in the memory location pointed to by \a __p is not
-///    changed.
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the values to be stored.
-static __inline void __DEFAULT_FN_ATTRS128
-_mm_maskstore_ps(float *__p, __m128i __m, __m128 __a)
-{
-  __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4si)__m, (__v4sf)__a);
-}
-
-/* Cacheability support ops */
-/// Moves integer data from a 256-bit integer vector to a 32-byte
-///    aligned memory location. To minimize caching, the data is flagged as
-///    non-temporal (unlikely to be used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVNTDQ </c> instruction.
-///
-/// \param __a
-///    A pointer to a 32-byte aligned memory location that will receive the
-///    integer values.
-/// \param __b
-///    A 256-bit integer vector containing the values to be moved.
-static __inline void __DEFAULT_FN_ATTRS
-_mm256_stream_si256(__m256i *__a, __m256i __b)
-{
-  typedef __v4di __v4di_aligned __attribute__((aligned(32)));
-  __builtin_nontemporal_store((__v4di_aligned)__b, (__v4di_aligned*)__a);
-}
-
-/// Moves double-precision values from a 256-bit vector of [4 x double]
-///    to a 32-byte aligned memory location. To minimize caching, the data is
-///    flagged as non-temporal (unlikely to be used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVNTPD </c> instruction.
-///
-/// \param __a
-///    A pointer to a 32-byte aligned memory location that will receive the
-///    double-precision floating-point values.
-/// \param __b
-///    A 256-bit vector of [4 x double] containing the values to be moved.
-static __inline void __DEFAULT_FN_ATTRS
-_mm256_stream_pd(double *__a, __m256d __b)
-{
-  typedef __v4df __v4df_aligned __attribute__((aligned(32)));
-  __builtin_nontemporal_store((__v4df_aligned)__b, (__v4df_aligned*)__a);
-}
-
-/// Moves single-precision floating point values from a 256-bit vector
-///    of [8 x float] to a 32-byte aligned memory location. To minimize
-///    caching, the data is flagged as non-temporal (unlikely to be used again
-///    soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVNTPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 32-byte aligned memory location that will receive the
-///    single-precision floating point values.
-/// \param __a
-///    A 256-bit vector of [8 x float] containing the values to be moved.
-static __inline void __DEFAULT_FN_ATTRS
-_mm256_stream_ps(float *__p, __m256 __a)
-{
-  typedef __v8sf __v8sf_aligned __attribute__((aligned(32)));
-  __builtin_nontemporal_store((__v8sf_aligned)__a, (__v8sf_aligned*)__p);
-}
-
-/* Create vectors */
-/// Create a 256-bit vector of [4 x double] with undefined values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \returns A 256-bit vector of [4 x double] containing undefined values.
-static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_undefined_pd(void)
-{
-  return (__m256d)__builtin_ia32_undef256();
-}
-
-/// Create a 256-bit vector of [8 x float] with undefined values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \returns A 256-bit vector of [8 x float] containing undefined values.
-static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_undefined_ps(void)
-{
-  return (__m256)__builtin_ia32_undef256();
-}
-
-/// Create a 256-bit integer vector with undefined values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \returns A 256-bit integer vector containing undefined values.
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_undefined_si256(void)
-{
-  return (__m256i)__builtin_ia32_undef256();
-}
-
-/// Constructs a 256-bit floating-point vector of [4 x double]
-///    initialized with the specified double-precision floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD+VINSERTF128 </c>
-///   instruction.
-///
-/// \param __a
-///    A double-precision floating-point value used to initialize bits [255:192]
-///    of the result.
-/// \param __b
-///    A double-precision floating-point value used to initialize bits [191:128]
-///    of the result.
-/// \param __c
-///    A double-precision floating-point value used to initialize bits [127:64]
-///    of the result.
-/// \param __d
-///    A double-precision floating-point value used to initialize bits [63:0]
-///    of the result.
-/// \returns An initialized 256-bit floating-point vector of [4 x double].
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_set_pd(double __a, double __b, double __c, double __d)
-{
-  return __extension__ (__m256d){ __d, __c, __b, __a };
-}
-
-/// Constructs a 256-bit floating-point vector of [8 x float] initialized
-///    with the specified single-precision floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///   instruction.
-///
-/// \param __a
-///    A single-precision floating-point value used to initialize bits [255:224]
-///    of the result.
-/// \param __b
-///    A single-precision floating-point value used to initialize bits [223:192]
-///    of the result.
-/// \param __c
-///    A single-precision floating-point value used to initialize bits [191:160]
-///    of the result.
-/// \param __d
-///    A single-precision floating-point value used to initialize bits [159:128]
-///    of the result.
-/// \param __e
-///    A single-precision floating-point value used to initialize bits [127:96]
-///    of the result.
-/// \param __f
-///    A single-precision floating-point value used to initialize bits [95:64]
-///    of the result.
-/// \param __g
-///    A single-precision floating-point value used to initialize bits [63:32]
-///    of the result.
-/// \param __h
-///    A single-precision floating-point value used to initialize bits [31:0]
-///    of the result.
-/// \returns An initialized 256-bit floating-point vector of [8 x float].
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_set_ps(float __a, float __b, float __c, float __d,
-              float __e, float __f, float __g, float __h)
-{
-  return __extension__ (__m256){ __h, __g, __f, __e, __d, __c, __b, __a };
-}
-
-/// Constructs a 256-bit integer vector initialized with the specified
-///    32-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///   instruction.
-///
-/// \param __i0
-///    A 32-bit integral value used to initialize bits [255:224] of the result.
-/// \param __i1
-///    A 32-bit integral value used to initialize bits [223:192] of the result.
-/// \param __i2
-///    A 32-bit integral value used to initialize bits [191:160] of the result.
-/// \param __i3
-///    A 32-bit integral value used to initialize bits [159:128] of the result.
-/// \param __i4
-///    A 32-bit integral value used to initialize bits [127:96] of the result.
-/// \param __i5
-///    A 32-bit integral value used to initialize bits [95:64] of the result.
-/// \param __i6
-///    A 32-bit integral value used to initialize bits [63:32] of the result.
-/// \param __i7
-///    A 32-bit integral value used to initialize bits [31:0] of the result.
-/// \returns An initialized 256-bit integer vector.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_set_epi32(int __i0, int __i1, int __i2, int __i3,
-                 int __i4, int __i5, int __i6, int __i7)
-{
-  return __extension__ (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 };
-}
-
-/// Constructs a 256-bit integer vector initialized with the specified
-///    16-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///   instruction.
-///
-/// \param __w15
-///    A 16-bit integral value used to initialize bits [255:240] of the result.
-/// \param __w14
-///    A 16-bit integral value used to initialize bits [239:224] of the result.
-/// \param __w13
-///    A 16-bit integral value used to initialize bits [223:208] of the result.
-/// \param __w12
-///    A 16-bit integral value used to initialize bits [207:192] of the result.
-/// \param __w11
-///    A 16-bit integral value used to initialize bits [191:176] of the result.
-/// \param __w10
-///    A 16-bit integral value used to initialize bits [175:160] of the result.
-/// \param __w09
-///    A 16-bit integral value used to initialize bits [159:144] of the result.
-/// \param __w08
-///    A 16-bit integral value used to initialize bits [143:128] of the result.
-/// \param __w07
-///    A 16-bit integral value used to initialize bits [127:112] of the result.
-/// \param __w06
-///    A 16-bit integral value used to initialize bits [111:96] of the result.
-/// \param __w05
-///    A 16-bit integral value used to initialize bits [95:80] of the result.
-/// \param __w04
-///    A 16-bit integral value used to initialize bits [79:64] of the result.
-/// \param __w03
-///    A 16-bit integral value used to initialize bits [63:48] of the result.
-/// \param __w02
-///    A 16-bit integral value used to initialize bits [47:32] of the result.
-/// \param __w01
-///    A 16-bit integral value used to initialize bits [31:16] of the result.
-/// \param __w00
-///    A 16-bit integral value used to initialize bits [15:0] of the result.
-/// \returns An initialized 256-bit integer vector.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_set_epi16(short __w15, short __w14, short __w13, short __w12,
-                 short __w11, short __w10, short __w09, short __w08,
-                 short __w07, short __w06, short __w05, short __w04,
-                 short __w03, short __w02, short __w01, short __w00)
-{
-  return __extension__ (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06,
-    __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 };
-}
-
-/// Constructs a 256-bit integer vector initialized with the specified
-///    8-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///   instruction.
-///
-/// \param __b31
-///    An 8-bit integral value used to initialize bits [255:248] of the result.
-/// \param __b30
-///    An 8-bit integral value used to initialize bits [247:240] of the result.
-/// \param __b29
-///    An 8-bit integral value used to initialize bits [239:232] of the result.
-/// \param __b28
-///    An 8-bit integral value used to initialize bits [231:224] of the result.
-/// \param __b27
-///    An 8-bit integral value used to initialize bits [223:216] of the result.
-/// \param __b26
-///    An 8-bit integral value used to initialize bits [215:208] of the result.
-/// \param __b25
-///    An 8-bit integral value used to initialize bits [207:200] of the result.
-/// \param __b24
-///    An 8-bit integral value used to initialize bits [199:192] of the result.
-/// \param __b23
-///    An 8-bit integral value used to initialize bits [191:184] of the result.
-/// \param __b22
-///    An 8-bit integral value used to initialize bits [183:176] of the result.
-/// \param __b21
-///    An 8-bit integral value used to initialize bits [175:168] of the result.
-/// \param __b20
-///    An 8-bit integral value used to initialize bits [167:160] of the result.
-/// \param __b19
-///    An 8-bit integral value used to initialize bits [159:152] of the result.
-/// \param __b18
-///    An 8-bit integral value used to initialize bits [151:144] of the result.
-/// \param __b17
-///    An 8-bit integral value used to initialize bits [143:136] of the result.
-/// \param __b16
-///    An 8-bit integral value used to initialize bits [135:128] of the result.
-/// \param __b15
-///    An 8-bit integral value used to initialize bits [127:120] of the result.
-/// \param __b14
-///    An 8-bit integral value used to initialize bits [119:112] of the result.
-/// \param __b13
-///    An 8-bit integral value used to initialize bits [111:104] of the result.
-/// \param __b12
-///    An 8-bit integral value used to initialize bits [103:96] of the result.
-/// \param __b11
-///    An 8-bit integral value used to initialize bits [95:88] of the result.
-/// \param __b10
-///    An 8-bit integral value used to initialize bits [87:80] of the result.
-/// \param __b09
-///    An 8-bit integral value used to initialize bits [79:72] of the result.
-/// \param __b08
-///    An 8-bit integral value used to initialize bits [71:64] of the result.
-/// \param __b07
-///    An 8-bit integral value used to initialize bits [63:56] of the result.
-/// \param __b06
-///    An 8-bit integral value used to initialize bits [55:48] of the result.
-/// \param __b05
-///    An 8-bit integral value used to initialize bits [47:40] of the result.
-/// \param __b04
-///    An 8-bit integral value used to initialize bits [39:32] of the result.
-/// \param __b03
-///    An 8-bit integral value used to initialize bits [31:24] of the result.
-/// \param __b02
-///    An 8-bit integral value used to initialize bits [23:16] of the result.
-/// \param __b01
-///    An 8-bit integral value used to initialize bits [15:8] of the result.
-/// \param __b00
-///    An 8-bit integral value used to initialize bits [7:0] of the result.
-/// \returns An initialized 256-bit integer vector.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_set_epi8(char __b31, char __b30, char __b29, char __b28,
-                char __b27, char __b26, char __b25, char __b24,
-                char __b23, char __b22, char __b21, char __b20,
-                char __b19, char __b18, char __b17, char __b16,
-                char __b15, char __b14, char __b13, char __b12,
-                char __b11, char __b10, char __b09, char __b08,
-                char __b07, char __b06, char __b05, char __b04,
-                char __b03, char __b02, char __b01, char __b00)
-{
-  return __extension__ (__m256i)(__v32qi){
-    __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,
-    __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,
-    __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,
-    __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31
-  };
-}
-
-/// Constructs a 256-bit integer vector initialized with the specified
-///    64-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLQDQ+VINSERTF128 </c>
-///   instruction.
-///
-/// \param __a
-///    A 64-bit integral value used to initialize bits [255:192] of the result.
-/// \param __b
-///    A 64-bit integral value used to initialize bits [191:128] of the result.
-/// \param __c
-///    A 64-bit integral value used to initialize bits [127:64] of the result.
-/// \param __d
-///    A 64-bit integral value used to initialize bits [63:0] of the result.
-/// \returns An initialized 256-bit integer vector.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)
-{
-  return __extension__ (__m256i)(__v4di){ __d, __c, __b, __a };
-}
-
-/* Create vectors with elements in reverse order */
-/// Constructs a 256-bit floating-point vector of [4 x double],
-///    initialized in reverse order with the specified double-precision
-///    floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD+VINSERTF128 </c>
-///   instruction.
-///
-/// \param __a
-///    A double-precision floating-point value used to initialize bits [63:0]
-///    of the result.
-/// \param __b
-///    A double-precision floating-point value used to initialize bits [127:64]
-///    of the result.
-/// \param __c
-///    A double-precision floating-point value used to initialize bits [191:128]
-///    of the result.
-/// \param __d
-///    A double-precision floating-point value used to initialize bits [255:192]
-///    of the result.
-/// \returns An initialized 256-bit floating-point vector of [4 x double].
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_setr_pd(double __a, double __b, double __c, double __d)
-{
-  return _mm256_set_pd(__d, __c, __b, __a);
-}
-
-/// Constructs a 256-bit floating-point vector of [8 x float],
-///    initialized in reverse order with the specified single-precision
-///    float-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///   instruction.
-///
-/// \param __a
-///    A single-precision floating-point value used to initialize bits [31:0]
-///    of the result.
-/// \param __b
-///    A single-precision floating-point value used to initialize bits [63:32]
-///    of the result.
-/// \param __c
-///    A single-precision floating-point value used to initialize bits [95:64]
-///    of the result.
-/// \param __d
-///    A single-precision floating-point value used to initialize bits [127:96]
-///    of the result.
-/// \param __e
-///    A single-precision floating-point value used to initialize bits [159:128]
-///    of the result.
-/// \param __f
-///    A single-precision floating-point value used to initialize bits [191:160]
-///    of the result.
-/// \param __g
-///    A single-precision floating-point value used to initialize bits [223:192]
-///    of the result.
-/// \param __h
-///    A single-precision floating-point value used to initialize bits [255:224]
-///    of the result.
-/// \returns An initialized 256-bit floating-point vector of [8 x float].
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_setr_ps(float __a, float __b, float __c, float __d,
-               float __e, float __f, float __g, float __h)
-{
-  return _mm256_set_ps(__h, __g, __f, __e, __d, __c, __b, __a);
-}
-
-/// Constructs a 256-bit integer vector, initialized in reverse order
-///    with the specified 32-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///   instruction.
-///
-/// \param __i0
-///    A 32-bit integral value used to initialize bits [31:0] of the result.
-/// \param __i1
-///    A 32-bit integral value used to initialize bits [63:32] of the result.
-/// \param __i2
-///    A 32-bit integral value used to initialize bits [95:64] of the result.
-/// \param __i3
-///    A 32-bit integral value used to initialize bits [127:96] of the result.
-/// \param __i4
-///    A 32-bit integral value used to initialize bits [159:128] of the result.
-/// \param __i5
-///    A 32-bit integral value used to initialize bits [191:160] of the result.
-/// \param __i6
-///    A 32-bit integral value used to initialize bits [223:192] of the result.
-/// \param __i7
-///    A 32-bit integral value used to initialize bits [255:224] of the result.
-/// \returns An initialized 256-bit integer vector.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3,
-                  int __i4, int __i5, int __i6, int __i7)
-{
-  return _mm256_set_epi32(__i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0);
-}
-
-/// Constructs a 256-bit integer vector, initialized in reverse order
-///    with the specified 16-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///   instruction.
-///
-/// \param __w15
-///    A 16-bit integral value used to initialize bits [15:0] of the result.
-/// \param __w14
-///    A 16-bit integral value used to initialize bits [31:16] of the result.
-/// \param __w13
-///    A 16-bit integral value used to initialize bits [47:32] of the result.
-/// \param __w12
-///    A 16-bit integral value used to initialize bits [63:48] of the result.
-/// \param __w11
-///    A 16-bit integral value used to initialize bits [79:64] of the result.
-/// \param __w10
-///    A 16-bit integral value used to initialize bits [95:80] of the result.
-/// \param __w09
-///    A 16-bit integral value used to initialize bits [111:96] of the result.
-/// \param __w08
-///    A 16-bit integral value used to initialize bits [127:112] of the result.
-/// \param __w07
-///    A 16-bit integral value used to initialize bits [143:128] of the result.
-/// \param __w06
-///    A 16-bit integral value used to initialize bits [159:144] of the result.
-/// \param __w05
-///    A 16-bit integral value used to initialize bits [175:160] of the result.
-/// \param __w04
-///    A 16-bit integral value used to initialize bits [191:176] of the result.
-/// \param __w03
-///    A 16-bit integral value used to initialize bits [207:192] of the result.
-/// \param __w02
-///    A 16-bit integral value used to initialize bits [223:208] of the result.
-/// \param __w01
-///    A 16-bit integral value used to initialize bits [239:224] of the result.
-/// \param __w00
-///    A 16-bit integral value used to initialize bits [255:240] of the result.
-/// \returns An initialized 256-bit integer vector.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12,
-       short __w11, short __w10, short __w09, short __w08,
-       short __w07, short __w06, short __w05, short __w04,
-       short __w03, short __w02, short __w01, short __w00)
-{
-  return _mm256_set_epi16(__w00, __w01, __w02, __w03,
-                          __w04, __w05, __w06, __w07,
-                          __w08, __w09, __w10, __w11,
-                          __w12, __w13, __w14, __w15);
-}
-
-/// Constructs a 256-bit integer vector, initialized in reverse order
-///    with the specified 8-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///   instruction.
-///
-/// \param __b31
-///    An 8-bit integral value used to initialize bits [7:0] of the result.
-/// \param __b30
-///    An 8-bit integral value used to initialize bits [15:8] of the result.
-/// \param __b29
-///    An 8-bit integral value used to initialize bits [23:16] of the result.
-/// \param __b28
-///    An 8-bit integral value used to initialize bits [31:24] of the result.
-/// \param __b27
-///    An 8-bit integral value used to initialize bits [39:32] of the result.
-/// \param __b26
-///    An 8-bit integral value used to initialize bits [47:40] of the result.
-/// \param __b25
-///    An 8-bit integral value used to initialize bits [55:48] of the result.
-/// \param __b24
-///    An 8-bit integral value used to initialize bits [63:56] of the result.
-/// \param __b23
-///    An 8-bit integral value used to initialize bits [71:64] of the result.
-/// \param __b22
-///    An 8-bit integral value used to initialize bits [79:72] of the result.
-/// \param __b21
-///    An 8-bit integral value used to initialize bits [87:80] of the result.
-/// \param __b20
-///    An 8-bit integral value used to initialize bits [95:88] of the result.
-/// \param __b19
-///    An 8-bit integral value used to initialize bits [103:96] of the result.
-/// \param __b18
-///    An 8-bit integral value used to initialize bits [111:104] of the result.
-/// \param __b17
-///    An 8-bit integral value used to initialize bits [119:112] of the result.
-/// \param __b16
-///    An 8-bit integral value used to initialize bits [127:120] of the result.
-/// \param __b15
-///    An 8-bit integral value used to initialize bits [135:128] of the result.
-/// \param __b14
-///    An 8-bit integral value used to initialize bits [143:136] of the result.
-/// \param __b13
-///    An 8-bit integral value used to initialize bits [151:144] of the result.
-/// \param __b12
-///    An 8-bit integral value used to initialize bits [159:152] of the result.
-/// \param __b11
-///    An 8-bit integral value used to initialize bits [167:160] of the result.
-/// \param __b10
-///    An 8-bit integral value used to initialize bits [175:168] of the result.
-/// \param __b09
-///    An 8-bit integral value used to initialize bits [183:176] of the result.
-/// \param __b08
-///    An 8-bit integral value used to initialize bits [191:184] of the result.
-/// \param __b07
-///    An 8-bit integral value used to initialize bits [199:192] of the result.
-/// \param __b06
-///    An 8-bit integral value used to initialize bits [207:200] of the result.
-/// \param __b05
-///    An 8-bit integral value used to initialize bits [215:208] of the result.
-/// \param __b04
-///    An 8-bit integral value used to initialize bits [223:216] of the result.
-/// \param __b03
-///    An 8-bit integral value used to initialize bits [231:224] of the result.
-/// \param __b02
-///    An 8-bit integral value used to initialize bits [239:232] of the result.
-/// \param __b01
-///    An 8-bit integral value used to initialize bits [247:240] of the result.
-/// \param __b00
-///    An 8-bit integral value used to initialize bits [255:248] of the result.
-/// \returns An initialized 256-bit integer vector.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28,
-                 char __b27, char __b26, char __b25, char __b24,
-                 char __b23, char __b22, char __b21, char __b20,
-                 char __b19, char __b18, char __b17, char __b16,
-                 char __b15, char __b14, char __b13, char __b12,
-                 char __b11, char __b10, char __b09, char __b08,
-                 char __b07, char __b06, char __b05, char __b04,
-                 char __b03, char __b02, char __b01, char __b00)
-{
-  return _mm256_set_epi8(__b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,
-                         __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,
-                         __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,
-                         __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31);
-}
-
-/// Constructs a 256-bit integer vector, initialized in reverse order
-///    with the specified 64-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLQDQ+VINSERTF128 </c>
-///   instruction.
-///
-/// \param __a
-///    A 64-bit integral value used to initialize bits [63:0] of the result.
-/// \param __b
-///    A 64-bit integral value used to initialize bits [127:64] of the result.
-/// \param __c
-///    A 64-bit integral value used to initialize bits [191:128] of the result.
-/// \param __d
-///    A 64-bit integral value used to initialize bits [255:192] of the result.
-/// \returns An initialized 256-bit integer vector.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)
-{
-  return _mm256_set_epi64x(__d, __c, __b, __a);
-}
-
-/* Create vectors with repeated elements */
-/// Constructs a 256-bit floating-point vector of [4 x double], with each
-///    of the four double-precision floating-point vector elements set to the
-///    specified double-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP+VINSERTF128 </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize each vector
-///    element of the result.
-/// \returns An initialized 256-bit floating-point vector of [4 x double].
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_set1_pd(double __w)
-{
-  return _mm256_set_pd(__w, __w, __w, __w);
-}
-
-/// Constructs a 256-bit floating-point vector of [8 x float], with each
-///    of the eight single-precision floating-point vector elements set to the
-///    specified single-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPERMILPS+VINSERTF128 </c>
-///   instruction.
-///
-/// \param __w
-///    A single-precision floating-point value used to initialize each vector
-///    element of the result.
-/// \returns An initialized 256-bit floating-point vector of [8 x float].
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_set1_ps(float __w)
-{
-  return _mm256_set_ps(__w, __w, __w, __w, __w, __w, __w, __w);
-}
-
-/// Constructs a 256-bit integer vector of [8 x i32], with each of the
-///    32-bit integral vector elements set to the specified 32-bit integral
-///    value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPERMILPS+VINSERTF128 </c>
-///   instruction.
-///
-/// \param __i
-///    A 32-bit integral value used to initialize each vector element of the
-///    result.
-/// \returns An initialized 256-bit integer vector of [8 x i32].
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_set1_epi32(int __i)
-{
-  return _mm256_set_epi32(__i, __i, __i, __i, __i, __i, __i, __i);
-}
-
-/// Constructs a 256-bit integer vector of [16 x i16], with each of the
-///    16-bit integral vector elements set to the specified 16-bit integral
-///    value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSHUFB+VINSERTF128 </c> instruction.
-///
-/// \param __w
-///    A 16-bit integral value used to initialize each vector element of the
-///    result.
-/// \returns An initialized 256-bit integer vector of [16 x i16].
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_set1_epi16(short __w)
-{
-  return _mm256_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w,
-                          __w, __w, __w, __w, __w, __w, __w, __w);
-}
-
-/// Constructs a 256-bit integer vector of [32 x i8], with each of the
-///    8-bit integral vector elements set to the specified 8-bit integral value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSHUFB+VINSERTF128 </c> instruction.
-///
-/// \param __b
-///    An 8-bit integral value used to initialize each vector element of the
-///    result.
-/// \returns An initialized 256-bit integer vector of [32 x i8].
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_set1_epi8(char __b)
-{
-  return _mm256_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b,
-                         __b, __b, __b, __b, __b, __b, __b, __b,
-                         __b, __b, __b, __b, __b, __b, __b, __b,
-                         __b, __b, __b, __b, __b, __b, __b, __b);
-}
-
-/// Constructs a 256-bit integer vector of [4 x i64], with each of the
-///    64-bit integral vector elements set to the specified 64-bit integral
-///    value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP+VINSERTF128 </c> instruction.
-///
-/// \param __q
-///    A 64-bit integral value used to initialize each vector element of the
-///    result.
-/// \returns An initialized 256-bit integer vector of [4 x i64].
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_set1_epi64x(long long __q)
-{
-  return _mm256_set_epi64x(__q, __q, __q, __q);
-}
-
-/* Create __zeroed vectors */
-/// Constructs a 256-bit floating-point vector of [4 x double] with all
-///    vector elements initialized to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS </c> instruction.
-///
-/// \returns A 256-bit vector of [4 x double] with all elements set to zero.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_setzero_pd(void)
-{
-  return __extension__ (__m256d){ 0, 0, 0, 0 };
-}
-
-/// Constructs a 256-bit floating-point vector of [8 x float] with all
-///    vector elements initialized to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS </c> instruction.
-///
-/// \returns A 256-bit vector of [8 x float] with all elements set to zero.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_setzero_ps(void)
-{
-  return __extension__ (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 };
-}
-
-/// Constructs a 256-bit integer vector initialized to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS </c> instruction.
-///
-/// \returns A 256-bit integer vector initialized to zero.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_setzero_si256(void)
-{
-  return __extension__ (__m256i)(__v4di){ 0, 0, 0, 0 };
-}
-
-/* Cast between vector types */
-/// Casts a 256-bit floating-point vector of [4 x double] into a 256-bit
-///    floating-point vector of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit floating-point vector of [4 x double].
-/// \returns A 256-bit floating-point vector of [8 x float] containing the same
-///    bitwise pattern as the parameter.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_castpd_ps(__m256d __a)
-{
-  return (__m256)__a;
-}
-
-/// Casts a 256-bit floating-point vector of [4 x double] into a 256-bit
-///    integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit floating-point vector of [4 x double].
-/// \returns A 256-bit integer vector containing the same bitwise pattern as the
-///    parameter.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_castpd_si256(__m256d __a)
-{
-  return (__m256i)__a;
-}
-
-/// Casts a 256-bit floating-point vector of [8 x float] into a 256-bit
-///    floating-point vector of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit floating-point vector of [8 x float].
-/// \returns A 256-bit floating-point vector of [4 x double] containing the same
-///    bitwise pattern as the parameter.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_castps_pd(__m256 __a)
-{
-  return (__m256d)__a;
-}
-
-/// Casts a 256-bit floating-point vector of [8 x float] into a 256-bit
-///    integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit floating-point vector of [8 x float].
-/// \returns A 256-bit integer vector containing the same bitwise pattern as the
-///    parameter.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_castps_si256(__m256 __a)
-{
-  return (__m256i)__a;
-}
-
-/// Casts a 256-bit integer vector into a 256-bit floating-point vector
-///    of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit integer vector.
-/// \returns A 256-bit floating-point vector of [8 x float] containing the same
-///    bitwise pattern as the parameter.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_castsi256_ps(__m256i __a)
-{
-  return (__m256)__a;
-}
-
-/// Casts a 256-bit integer vector into a 256-bit floating-point vector
-///    of [4 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit integer vector.
-/// \returns A 256-bit floating-point vector of [4 x double] containing the same
-///    bitwise pattern as the parameter.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_castsi256_pd(__m256i __a)
-{
-  return (__m256d)__a;
-}
-
-/// Returns the lower 128 bits of a 256-bit floating-point vector of
-///    [4 x double] as a 128-bit floating-point vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit floating-point vector of [4 x double].
-/// \returns A 128-bit floating-point vector of [2 x double] containing the
-///    lower 128 bits of the parameter.
-static __inline __m128d __DEFAULT_FN_ATTRS
-_mm256_castpd256_pd128(__m256d __a)
-{
-  return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 1);
-}
-
-/// Returns the lower 128 bits of a 256-bit floating-point vector of
-///    [8 x float] as a 128-bit floating-point vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit floating-point vector of [8 x float].
-/// \returns A 128-bit floating-point vector of [4 x float] containing the
-///    lower 128 bits of the parameter.
-static __inline __m128 __DEFAULT_FN_ATTRS
-_mm256_castps256_ps128(__m256 __a)
-{
-  return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 1, 2, 3);
-}
-
-/// Truncates a 256-bit integer vector into a 128-bit integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit integer vector.
-/// \returns A 128-bit integer vector containing the lower 128 bits of the
-///    parameter.
-static __inline __m128i __DEFAULT_FN_ATTRS
-_mm256_castsi256_si128(__m256i __a)
-{
-  return __builtin_shufflevector((__v4di)__a, (__v4di)__a, 0, 1);
-}
-
-/// Constructs a 256-bit floating-point vector of [4 x double] from a
-///    128-bit floating-point vector of [2 x double].
-///
-///    The lower 128 bits contain the value of the source vector. The contents
-///    of the upper 128 bits are undefined.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 256-bit floating-point vector of [4 x double]. The lower 128 bits
-///    contain the value of the parameter. The contents of the upper 128 bits
-///    are undefined.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_castpd128_pd256(__m128d __a)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 1, -1, -1);
-}
-
-/// Constructs a 256-bit floating-point vector of [8 x float] from a
-///    128-bit floating-point vector of [4 x float].
-///
-///    The lower 128 bits contain the value of the source vector. The contents
-///    of the upper 128 bits are undefined.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 256-bit floating-point vector of [8 x float]. The lower 128 bits
-///    contain the value of the parameter. The contents of the upper 128 bits
-///    are undefined.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_castps128_ps256(__m128 __a)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1, 2, 3, -1, -1, -1, -1);
-}
-
-/// Constructs a 256-bit integer vector from a 128-bit integer vector.
-///
-///    The lower 128 bits contain the value of the source vector. The contents
-///    of the upper 128 bits are undefined.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 256-bit integer vector. The lower 128 bits contain the value of
-///    the parameter. The contents of the upper 128 bits are undefined.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_castsi128_si256(__m128i __a)
-{
-  return __builtin_shufflevector((__v2di)__a, (__v2di)__a, 0, 1, -1, -1);
-}
-
-/// Constructs a 256-bit floating-point vector of [4 x double] from a
-///    128-bit floating-point vector of [2 x double]. The lower 128 bits
-///    contain the value of the source vector. The upper 128 bits are set
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 256-bit floating-point vector of [4 x double]. The lower 128 bits
-///    contain the value of the parameter. The upper 128 bits are set to zero.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_zextpd128_pd256(__m128d __a)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3);
-}
-
-/// Constructs a 256-bit floating-point vector of [8 x float] from a
-///    128-bit floating-point vector of [4 x float]. The lower 128 bits contain
-///    the value of the source vector. The upper 128 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 256-bit floating-point vector of [8 x float]. The lower 128 bits
-///    contain the value of the parameter. The upper 128 bits are set to zero.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_zextps128_ps256(__m128 __a)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-/// Constructs a 256-bit integer vector from a 128-bit integer vector.
-///    The lower 128 bits contain the value of the source vector. The upper
-///    128 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 256-bit integer vector. The lower 128 bits contain the value of
-///    the parameter. The upper 128 bits are set to zero.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_zextsi128_si256(__m128i __a)
-{
-  return __builtin_shufflevector((__v2di)__a, (__v2di)_mm_setzero_si128(), 0, 1, 2, 3);
-}
-
-/*
-   Vector insert.
-   We use macros rather than inlines because we only want to accept
-   invocations where the immediate M is a constant expression.
-*/
-/// Constructs a new 256-bit vector of [8 x float] by first duplicating
-///    a 256-bit vector of [8 x float] given in the first parameter, and then
-///    replacing either the upper or the lower 128 bits with the contents of a
-///    128-bit vector of [4 x float] in the second parameter.
-///
-///    The immediate integer parameter determines between the upper or the lower
-///    128 bits.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256 _mm256_insertf128_ps(__m256 V1, __m128 V2, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
-///
-/// \param V1
-///    A 256-bit vector of [8 x float]. This vector is copied to the result
-///    first, and then either the upper or the lower 128 bits of the result will
-///    be replaced by the contents of \a V2.
-/// \param V2
-///    A 128-bit vector of [4 x float]. The contents of this parameter are
-///    written to either the upper or the lower 128 bits of the result depending
-///    on the value of parameter \a M.
-/// \param M
-///    An immediate integer. The least significant bit determines how the values
-///    from the two parameters are interleaved: \n
-///    If bit [0] of \a M is 0, \a V2 are copied to bits [127:0] of the result,
-///    and bits [255:128] of \a V1 are copied to bits [255:128] of the
-///    result. \n
-///    If bit [0] of \a M is 1, \a V2 are copied to bits [255:128] of the
-///    result, and bits [127:0] of \a V1 are copied to bits [127:0] of the
-///    result.
-/// \returns A 256-bit vector of [8 x float] containing the interleaved values.
-#define _mm256_insertf128_ps(V1, V2, M) \
-  ((__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)(__m256)(V1), \
-                                            (__v4sf)(__m128)(V2), (int)(M)))
-
-/// Constructs a new 256-bit vector of [4 x double] by first duplicating
-///    a 256-bit vector of [4 x double] given in the first parameter, and then
-///    replacing either the upper or the lower 128 bits with the contents of a
-///    128-bit vector of [2 x double] in the second parameter.
-///
-///    The immediate integer parameter determines between the upper or the lower
-///    128 bits.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256d _mm256_insertf128_pd(__m256d V1, __m128d V2, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
-///
-/// \param V1
-///    A 256-bit vector of [4 x double]. This vector is copied to the result
-///    first, and then either the upper or the lower 128 bits of the result will
-///    be replaced by the contents of \a V2.
-/// \param V2
-///    A 128-bit vector of [2 x double]. The contents of this parameter are
-///    written to either the upper or the lower 128 bits of the result depending
-///    on the value of parameter \a M.
-/// \param M
-///    An immediate integer. The least significant bit determines how the values
-///    from the two parameters are interleaved: \n
-///    If bit [0] of \a M is 0, \a V2 are copied to bits [127:0] of the result,
-///    and bits [255:128] of \a V1 are copied to bits [255:128] of the
-///    result. \n
-///    If bit [0] of \a M is 1, \a V2 are copied to bits [255:128] of the
-///    result, and bits [127:0] of \a V1 are copied to bits [127:0] of the
-///    result.
-/// \returns A 256-bit vector of [4 x double] containing the interleaved values.
-#define _mm256_insertf128_pd(V1, V2, M) \
-  ((__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)(__m256d)(V1), \
-                                             (__v2df)(__m128d)(V2), (int)(M)))
-
-/// Constructs a new 256-bit integer vector by first duplicating a
-///    256-bit integer vector given in the first parameter, and then replacing
-///    either the upper or the lower 128 bits with the contents of a 128-bit
-///    integer vector in the second parameter.
-///
-///    The immediate integer parameter determines between the upper or the lower
-///    128 bits.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m256i _mm256_insertf128_si256(__m256i V1, __m128i V2, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
-///
-/// \param V1
-///    A 256-bit integer vector. This vector is copied to the result first, and
-///    then either the upper or the lower 128 bits of the result will be
-///    replaced by the contents of \a V2.
-/// \param V2
-///    A 128-bit integer vector. The contents of this parameter are written to
-///    either the upper or the lower 128 bits of the result depending on the
-///     value of parameter \a M.
-/// \param M
-///    An immediate integer. The least significant bit determines how the values
-///    from the two parameters are interleaved: \n
-///    If bit [0] of \a M is 0, \a V2 are copied to bits [127:0] of the result,
-///    and bits [255:128] of \a V1 are copied to bits [255:128] of the
-///    result. \n
-///    If bit [0] of \a M is 1, \a V2 are copied to bits [255:128] of the
-///    result, and bits [127:0] of \a V1 are copied to bits [127:0] of the
-///    result.
-/// \returns A 256-bit integer vector containing the interleaved values.
-#define _mm256_insertf128_si256(V1, V2, M) \
-  ((__m256i)__builtin_ia32_vinsertf128_si256((__v8si)(__m256i)(V1), \
-                                             (__v4si)(__m128i)(V2), (int)(M)))
-
-/*
-   Vector extract.
-   We use macros rather than inlines because we only want to accept
-   invocations where the immediate M is a constant expression.
-*/
-/// Extracts either the upper or the lower 128 bits from a 256-bit vector
-///    of [8 x float], as determined by the immediate integer parameter, and
-///    returns the extracted bits as a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128 _mm256_extractf128_ps(__m256 V, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VEXTRACTF128 </c> instruction.
-///
-/// \param V
-///    A 256-bit vector of [8 x float].
-/// \param M
-///    An immediate integer. The least significant bit determines which bits are
-///    extracted from the first parameter: \n
-///    If bit [0] of \a M is 0, bits [127:0] of \a V are copied to the
-///    result. \n
-///    If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result.
-/// \returns A 128-bit vector of [4 x float] containing the extracted bits.
-#define _mm256_extractf128_ps(V, M) \
-  ((__m128)__builtin_ia32_vextractf128_ps256((__v8sf)(__m256)(V), (int)(M)))
-
-/// Extracts either the upper or the lower 128 bits from a 256-bit vector
-///    of [4 x double], as determined by the immediate integer parameter, and
-///    returns the extracted bits as a 128-bit vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm256_extractf128_pd(__m256d V, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VEXTRACTF128 </c> instruction.
-///
-/// \param V
-///    A 256-bit vector of [4 x double].
-/// \param M
-///    An immediate integer. The least significant bit determines which bits are
-///    extracted from the first parameter: \n
-///    If bit [0] of \a M is 0, bits [127:0] of \a V are copied to the
-///    result. \n
-///    If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result.
-/// \returns A 128-bit vector of [2 x double] containing the extracted bits.
-#define _mm256_extractf128_pd(V, M) \
-  ((__m128d)__builtin_ia32_vextractf128_pd256((__v4df)(__m256d)(V), (int)(M)))
-
-/// Extracts either the upper or the lower 128 bits from a 256-bit
-///    integer vector, as determined by the immediate integer parameter, and
-///    returns the extracted bits as a 128-bit integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm256_extractf128_si256(__m256i V, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VEXTRACTF128 </c> instruction.
-///
-/// \param V
-///    A 256-bit integer vector.
-/// \param M
-///    An immediate integer. The least significant bit determines which bits are
-///    extracted from the first parameter:  \n
-///    If bit [0] of \a M is 0, bits [127:0] of \a V are copied to the
-///    result. \n
-///    If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result.
-/// \returns A 128-bit integer vector containing the extracted bits.
-#define _mm256_extractf128_si256(V, M) \
-  ((__m128i)__builtin_ia32_vextractf128_si256((__v8si)(__m256i)(V), (int)(M)))
-
-/// Constructs a 256-bit floating-point vector of [8 x float] by
-///    concatenating two 128-bit floating-point vectors of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
-///
-/// \param __hi
-///    A 128-bit floating-point vector of [4 x float] to be copied to the upper
-///    128 bits of the result.
-/// \param __lo
-///    A 128-bit floating-point vector of [4 x float] to be copied to the lower
-///    128 bits of the result.
-/// \returns A 256-bit floating-point vector of [8 x float] containing the
-///    concatenated result.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_set_m128 (__m128 __hi, __m128 __lo)
-{
-  return (__m256) __builtin_shufflevector((__v4sf)__lo, (__v4sf)__hi, 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-/// Constructs a 256-bit floating-point vector of [4 x double] by
-///    concatenating two 128-bit floating-point vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
-///
-/// \param __hi
-///    A 128-bit floating-point vector of [2 x double] to be copied to the upper
-///    128 bits of the result.
-/// \param __lo
-///    A 128-bit floating-point vector of [2 x double] to be copied to the lower
-///    128 bits of the result.
-/// \returns A 256-bit floating-point vector of [4 x double] containing the
-///    concatenated result.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_set_m128d (__m128d __hi, __m128d __lo)
-{
-  return (__m256d) __builtin_shufflevector((__v2df)__lo, (__v2df)__hi, 0, 1, 2, 3);
-}
-
-/// Constructs a 256-bit integer vector by concatenating two 128-bit
-///    integer vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
-///
-/// \param __hi
-///    A 128-bit integer vector to be copied to the upper 128 bits of the
-///    result.
-/// \param __lo
-///    A 128-bit integer vector to be copied to the lower 128 bits of the
-///    result.
-/// \returns A 256-bit integer vector containing the concatenated result.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_set_m128i (__m128i __hi, __m128i __lo)
-{
-  return (__m256i) __builtin_shufflevector((__v2di)__lo, (__v2di)__hi, 0, 1, 2, 3);
-}
-
-/// Constructs a 256-bit floating-point vector of [8 x float] by
-///    concatenating two 128-bit floating-point vectors of [4 x float]. This is
-///    similar to _mm256_set_m128, but the order of the input parameters is
-///    swapped.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
-///
-/// \param __lo
-///    A 128-bit floating-point vector of [4 x float] to be copied to the lower
-///    128 bits of the result.
-/// \param __hi
-///    A 128-bit floating-point vector of [4 x float] to be copied to the upper
-///    128 bits of the result.
-/// \returns A 256-bit floating-point vector of [8 x float] containing the
-///    concatenated result.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_setr_m128 (__m128 __lo, __m128 __hi)
-{
-  return _mm256_set_m128(__hi, __lo);
-}
-
-/// Constructs a 256-bit floating-point vector of [4 x double] by
-///    concatenating two 128-bit floating-point vectors of [2 x double]. This is
-///    similar to _mm256_set_m128d, but the order of the input parameters is
-///    swapped.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
-///
-/// \param __lo
-///    A 128-bit floating-point vector of [2 x double] to be copied to the lower
-///    128 bits of the result.
-/// \param __hi
-///    A 128-bit floating-point vector of [2 x double] to be copied to the upper
-///    128 bits of the result.
-/// \returns A 256-bit floating-point vector of [4 x double] containing the
-///    concatenated result.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_setr_m128d (__m128d __lo, __m128d __hi)
-{
-  return (__m256d)_mm256_set_m128d(__hi, __lo);
-}
-
-/// Constructs a 256-bit integer vector by concatenating two 128-bit
-///    integer vectors. This is similar to _mm256_set_m128i, but the order of
-///    the input parameters is swapped.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
-///
-/// \param __lo
-///    A 128-bit integer vector to be copied to the lower 128 bits of the
-///    result.
-/// \param __hi
-///    A 128-bit integer vector to be copied to the upper 128 bits of the
-///    result.
-/// \returns A 256-bit integer vector containing the concatenated result.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_setr_m128i (__m128i __lo, __m128i __hi)
-{
-  return (__m256i)_mm256_set_m128i(__hi, __lo);
-}
-
-/* SIMD load ops (unaligned) */
-/// Loads two 128-bit floating-point vectors of [4 x float] from
-///    unaligned memory locations and constructs a 256-bit floating-point vector
-///    of [8 x float] by concatenating the two 128-bit vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to load instructions followed by the
-///   <c> VINSERTF128 </c> instruction.
-///
-/// \param __addr_hi
-///    A pointer to a 128-bit memory location containing 4 consecutive
-///    single-precision floating-point values. These values are to be copied to
-///    bits[255:128] of the result. The address of the memory location does not
-///    have to be aligned.
-/// \param __addr_lo
-///    A pointer to a 128-bit memory location containing 4 consecutive
-///    single-precision floating-point values. These values are to be copied to
-///    bits[127:0] of the result. The address of the memory location does not
-///    have to be aligned.
-/// \returns A 256-bit floating-point vector of [8 x float] containing the
-///    concatenated result.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
-{
-  return _mm256_set_m128(_mm_loadu_ps(__addr_hi), _mm_loadu_ps(__addr_lo));
-}
-
-/// Loads two 128-bit floating-point vectors of [2 x double] from
-///    unaligned memory locations and constructs a 256-bit floating-point vector
-///    of [4 x double] by concatenating the two 128-bit vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to load instructions followed by the
-///   <c> VINSERTF128 </c> instruction.
-///
-/// \param __addr_hi
-///    A pointer to a 128-bit memory location containing two consecutive
-///    double-precision floating-point values. These values are to be copied to
-///    bits[255:128] of the result. The address of the memory location does not
-///    have to be aligned.
-/// \param __addr_lo
-///    A pointer to a 128-bit memory location containing two consecutive
-///    double-precision floating-point values. These values are to be copied to
-///    bits[127:0] of the result. The address of the memory location does not
-///    have to be aligned.
-/// \returns A 256-bit floating-point vector of [4 x double] containing the
-///    concatenated result.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
-{
-  return _mm256_set_m128d(_mm_loadu_pd(__addr_hi), _mm_loadu_pd(__addr_lo));
-}
-
-/// Loads two 128-bit integer vectors from unaligned memory locations and
-///    constructs a 256-bit integer vector by concatenating the two 128-bit
-///    vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to load instructions followed by the
-///   <c> VINSERTF128 </c> instruction.
-///
-/// \param __addr_hi
-///    A pointer to a 128-bit memory location containing a 128-bit integer
-///    vector. This vector is to be copied to bits[255:128] of the result. The
-///    address of the memory location does not have to be aligned.
-/// \param __addr_lo
-///    A pointer to a 128-bit memory location containing a 128-bit integer
-///    vector. This vector is to be copied to bits[127:0] of the result. The
-///    address of the memory location does not have to be aligned.
-/// \returns A 256-bit integer vector containing the concatenated result.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_loadu2_m128i(__m128i_u const *__addr_hi, __m128i_u const *__addr_lo)
-{
-   return _mm256_set_m128i(_mm_loadu_si128(__addr_hi), _mm_loadu_si128(__addr_lo));
-}
-
-/* SIMD store ops (unaligned) */
-/// Stores the upper and lower 128 bits of a 256-bit floating-point
-///    vector of [8 x float] into two different unaligned memory locations.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VEXTRACTF128 </c> instruction and the
-///   store instructions.
-///
-/// \param __addr_hi
-///    A pointer to a 128-bit memory location. Bits[255:128] of \a __a are to be
-///    copied to this memory location. The address of this memory location does
-///    not have to be aligned.
-/// \param __addr_lo
-///    A pointer to a 128-bit memory location. Bits[127:0] of \a __a are to be
-///    copied to this memory location. The address of this memory location does
-///    not have to be aligned.
-/// \param __a
-///    A 256-bit floating-point vector of [8 x float].
-static __inline void __DEFAULT_FN_ATTRS
-_mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)
-{
-  __m128 __v128;
-
-  __v128 = _mm256_castps256_ps128(__a);
-  _mm_storeu_ps(__addr_lo, __v128);
-  __v128 = _mm256_extractf128_ps(__a, 1);
-  _mm_storeu_ps(__addr_hi, __v128);
-}
-
-/// Stores the upper and lower 128 bits of a 256-bit floating-point
-///    vector of [4 x double] into two different unaligned memory locations.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VEXTRACTF128 </c> instruction and the
-///   store instructions.
-///
-/// \param __addr_hi
-///    A pointer to a 128-bit memory location. Bits[255:128] of \a __a are to be
-///    copied to this memory location. The address of this memory location does
-///    not have to be aligned.
-/// \param __addr_lo
-///    A pointer to a 128-bit memory location. Bits[127:0] of \a __a are to be
-///    copied to this memory location. The address of this memory location does
-///    not have to be aligned.
-/// \param __a
-///    A 256-bit floating-point vector of [4 x double].
-static __inline void __DEFAULT_FN_ATTRS
-_mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
-{
-  __m128d __v128;
-
-  __v128 = _mm256_castpd256_pd128(__a);
-  _mm_storeu_pd(__addr_lo, __v128);
-  __v128 = _mm256_extractf128_pd(__a, 1);
-  _mm_storeu_pd(__addr_hi, __v128);
-}
-
-/// Stores the upper and lower 128 bits of a 256-bit integer vector into
-///    two different unaligned memory locations.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VEXTRACTF128 </c> instruction and the
-///   store instructions.
-///
-/// \param __addr_hi
-///    A pointer to a 128-bit memory location. Bits[255:128] of \a __a are to be
-///    copied to this memory location. The address of this memory location does
-///    not have to be aligned.
-/// \param __addr_lo
-///    A pointer to a 128-bit memory location. Bits[127:0] of \a __a are to be
-///    copied to this memory location. The address of this memory location does
-///    not have to be aligned.
-/// \param __a
-///    A 256-bit integer vector.
-static __inline void __DEFAULT_FN_ATTRS
-_mm256_storeu2_m128i(__m128i_u *__addr_hi, __m128i_u *__addr_lo, __m256i __a)
-{
-  __m128i __v128;
-
-  __v128 = _mm256_castsi256_si128(__a);
-  _mm_storeu_si128(__addr_lo, __v128);
-  __v128 = _mm256_extractf128_si256(__a, 1);
-  _mm_storeu_si128(__addr_hi, __v128);
-}
-
-#undef __DEFAULT_FN_ATTRS
-#undef __DEFAULT_FN_ATTRS128
-
-#endif /* __AVXINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/bmiintrin.h b/linux-x86/lib64/clang/14.0.2/include/bmiintrin.h
deleted file mode 100644
index f583c21..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/bmiintrin.h
+++ /dev/null
@@ -1,427 +0,0 @@
-/*===---- bmiintrin.h - BMI intrinsics -------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
-#error "Never use <bmiintrin.h> directly; include <x86intrin.h> instead."
-#endif
-
-#ifndef __BMIINTRIN_H
-#define __BMIINTRIN_H
-
-/* Allow using the tzcnt intrinsics even for non-BMI targets. Since the TZCNT
-   instruction behaves as BSF on non-BMI targets, there is code that expects
-   to use it as a potentially faster version of BSF. */
-#define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-
-#define _tzcnt_u16(a)     (__tzcnt_u16((a)))
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 16-bit integer whose trailing zeros are to be counted.
-/// \returns An unsigned 16-bit integer containing the number of trailing zero
-///    bits in the operand.
-static __inline__ unsigned short __RELAXED_FN_ATTRS
-__tzcnt_u16(unsigned short __X)
-{
-  return __builtin_ia32_tzcnt_u16(__X);
-}
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 32-bit integer whose trailing zeros are to be counted.
-/// \returns An unsigned 32-bit integer containing the number of trailing zero
-///    bits in the operand.
-static __inline__ unsigned int __RELAXED_FN_ATTRS
-__tzcnt_u32(unsigned int __X)
-{
-  return __builtin_ia32_tzcnt_u32(__X);
-}
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 32-bit integer whose trailing zeros are to be counted.
-/// \returns An 32-bit integer containing the number of trailing zero bits in
-///    the operand.
-static __inline__ int __RELAXED_FN_ATTRS
-_mm_tzcnt_32(unsigned int __X)
-{
-  return __builtin_ia32_tzcnt_u32(__X);
-}
-
-#define _tzcnt_u32(a)     (__tzcnt_u32((a)))
-
-#ifdef __x86_64__
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose trailing zeros are to be counted.
-/// \returns An unsigned 64-bit integer containing the number of trailing zero
-///    bits in the operand.
-static __inline__ unsigned long long __RELAXED_FN_ATTRS
-__tzcnt_u64(unsigned long long __X)
-{
-  return __builtin_ia32_tzcnt_u64(__X);
-}
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose trailing zeros are to be counted.
-/// \returns An 64-bit integer containing the number of trailing zero bits in
-///    the operand.
-static __inline__ long long __RELAXED_FN_ATTRS
-_mm_tzcnt_64(unsigned long long __X)
-{
-  return __builtin_ia32_tzcnt_u64(__X);
-}
-
-#define _tzcnt_u64(a)     (__tzcnt_u64((a)))
-
-#endif /* __x86_64__ */
-
-#undef __RELAXED_FN_ATTRS
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__BMI__)
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
-
-#define _andn_u32(a, b)   (__andn_u32((a), (b)))
-
-/* _bextr_u32 != __bextr_u32 */
-#define _blsi_u32(a)      (__blsi_u32((a)))
-
-#define _blsmsk_u32(a)    (__blsmsk_u32((a)))
-
-#define _blsr_u32(a)      (__blsr_u32((a)))
-
-/// Performs a bitwise AND of the second operand with the one's
-///    complement of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> ANDN </c> instruction.
-///
-/// \param __X
-///    An unsigned integer containing one of the operands.
-/// \param __Y
-///    An unsigned integer containing one of the operands.
-/// \returns An unsigned integer containing the bitwise AND of the second
-///    operand with the one's complement of the first operand.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__andn_u32(unsigned int __X, unsigned int __Y)
-{
-  return ~__X & __Y;
-}
-
-/* AMD-specified, double-leading-underscore version of BEXTR */
-/// Extracts the specified bits from the first operand and returns them
-///    in the least significant bits of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
-///
-/// \param __X
-///    An unsigned integer whose bits are to be extracted.
-/// \param __Y
-///    An unsigned integer used to specify which bits are extracted. Bits [7:0]
-///    specify the index of the least significant bit. Bits [15:8] specify the
-///    number of bits to be extracted.
-/// \returns An unsigned integer whose least significant bits contain the
-///    extracted bits.
-/// \see _bextr_u32
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__bextr_u32(unsigned int __X, unsigned int __Y)
-{
-  return __builtin_ia32_bextr_u32(__X, __Y);
-}
-
-/* Intel-specified, single-leading-underscore version of BEXTR */
-/// Extracts the specified bits from the first operand and returns them
-///    in the least significant bits of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
-///
-/// \param __X
-///    An unsigned integer whose bits are to be extracted.
-/// \param __Y
-///    An unsigned integer used to specify the index of the least significant
-///    bit for the bits to be extracted. Bits [7:0] specify the index.
-/// \param __Z
-///    An unsigned integer used to specify the number of bits to be extracted.
-///    Bits [7:0] specify the number of bits.
-/// \returns An unsigned integer whose least significant bits contain the
-///    extracted bits.
-/// \see __bextr_u32
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z)
-{
-  return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
-}
-
-/* Intel-specified, single-leading-underscore version of BEXTR2 */
-/// Extracts the specified bits from the first operand and returns them
-///    in the least significant bits of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
-///
-/// \param __X
-///    An unsigned integer whose bits are to be extracted.
-/// \param __Y
-///    An unsigned integer used to specify which bits are extracted. Bits [7:0]
-///    specify the index of the least significant bit. Bits [15:8] specify the
-///    number of bits to be extracted.
-/// \returns An unsigned integer whose least significant bits contain the
-///    extracted bits.
-/// \see __bextr_u32
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_bextr2_u32(unsigned int __X, unsigned int __Y) {
-  return __builtin_ia32_bextr_u32(__X, __Y);
-}
-
-/// Clears all bits in the source except for the least significant bit
-///    containing a value of 1 and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSI </c> instruction.
-///
-/// \param __X
-///    An unsigned integer whose bits are to be cleared.
-/// \returns An unsigned integer containing the result of clearing the bits from
-///    the source operand.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blsi_u32(unsigned int __X)
-{
-  return __X & -__X;
-}
-
-/// Creates a mask whose bits are set to 1, using bit 0 up to and
-///    including the least significant bit that is set to 1 in the source
-///    operand and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSMSK </c> instruction.
-///
-/// \param __X
-///    An unsigned integer used to create the mask.
-/// \returns An unsigned integer containing the newly created mask.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blsmsk_u32(unsigned int __X)
-{
-  return __X ^ (__X - 1);
-}
-
-/// Clears the least significant bit that is set to 1 in the source
-///    operand and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSR </c> instruction.
-///
-/// \param __X
-///    An unsigned integer containing the operand to be cleared.
-/// \returns An unsigned integer containing the result of clearing the source
-///    operand.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blsr_u32(unsigned int __X)
-{
-  return __X & (__X - 1);
-}
-
-#ifdef __x86_64__
-
-#define _andn_u64(a, b)   (__andn_u64((a), (b)))
-
-/* _bextr_u64 != __bextr_u64 */
-#define _blsi_u64(a)      (__blsi_u64((a)))
-
-#define _blsmsk_u64(a)    (__blsmsk_u64((a)))
-
-#define _blsr_u64(a)      (__blsr_u64((a)))
-
-/// Performs a bitwise AND of the second operand with the one's
-///    complement of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> ANDN </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer containing one of the operands.
-/// \param __Y
-///    An unsigned 64-bit integer containing one of the operands.
-/// \returns An unsigned 64-bit integer containing the bitwise AND of the second
-///    operand with the one's complement of the first operand.
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__andn_u64 (unsigned long long __X, unsigned long long __Y)
-{
-  return ~__X & __Y;
-}
-
-/* AMD-specified, double-leading-underscore version of BEXTR */
-/// Extracts the specified bits from the first operand and returns them
-///    in the least significant bits of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose bits are to be extracted.
-/// \param __Y
-///    An unsigned 64-bit integer used to specify which bits are extracted. Bits
-///    [7:0] specify the index of the least significant bit. Bits [15:8] specify
-///    the number of bits to be extracted.
-/// \returns An unsigned 64-bit integer whose least significant bits contain the
-///    extracted bits.
-/// \see _bextr_u64
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__bextr_u64(unsigned long long __X, unsigned long long __Y)
-{
-  return __builtin_ia32_bextr_u64(__X, __Y);
-}
-
-/* Intel-specified, single-leading-underscore version of BEXTR */
-/// Extracts the specified bits from the first operand and returns them
-///     in the least significant bits of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose bits are to be extracted.
-/// \param __Y
-///    An unsigned integer used to specify the index of the least significant
-///    bit for the bits to be extracted. Bits [7:0] specify the index.
-/// \param __Z
-///    An unsigned integer used to specify the number of bits to be extracted.
-///    Bits [7:0] specify the number of bits.
-/// \returns An unsigned 64-bit integer whose least significant bits contain the
-///    extracted bits.
-/// \see __bextr_u64
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-_bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z)
-{
-  return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
-}
-
-/* Intel-specified, single-leading-underscore version of BEXTR2 */
-/// Extracts the specified bits from the first operand and returns them
-///    in the least significant bits of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose bits are to be extracted.
-/// \param __Y
-///    An unsigned 64-bit integer used to specify which bits are extracted. Bits
-///    [7:0] specify the index of the least significant bit. Bits [15:8] specify
-///    the number of bits to be extracted.
-/// \returns An unsigned 64-bit integer whose least significant bits contain the
-///    extracted bits.
-/// \see __bextr_u64
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-_bextr2_u64(unsigned long long __X, unsigned long long __Y) {
-  return __builtin_ia32_bextr_u64(__X, __Y);
-}
-
-/// Clears all bits in the source except for the least significant bit
-///    containing a value of 1 and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSI </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose bits are to be cleared.
-/// \returns An unsigned 64-bit integer containing the result of clearing the
-///    bits from the source operand.
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blsi_u64(unsigned long long __X)
-{
-  return __X & -__X;
-}
-
-/// Creates a mask whose bits are set to 1, using bit 0 up to and
-///    including the least significant bit that is set to 1 in the source
-///    operand and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSMSK </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer used to create the mask.
-/// \returns An unsigned 64-bit integer containing the newly created mask.
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blsmsk_u64(unsigned long long __X)
-{
-  return __X ^ (__X - 1);
-}
-
-/// Clears the least significant bit that is set to 1 in the source
-///    operand and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSR </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer containing the operand to be cleared.
-/// \returns An unsigned 64-bit integer containing the result of clearing the
-///    source operand.
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blsr_u64(unsigned long long __X)
-{
-  return __X & (__X - 1);
-}
-
-#endif /* __x86_64__ */
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules)   \
-          || defined(__BMI__) */
-
-#endif /* __BMIINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/cetintrin.h b/linux-x86/lib64/clang/14.0.2/include/cetintrin.h
deleted file mode 100644
index 4290e9d..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/cetintrin.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*===---- cetintrin.h - CET intrinsic --------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#error "Never use <cetintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __CETINTRIN_H
-#define __CETINTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS                                                     \
-  __attribute__((__always_inline__, __nodebug__, __target__("shstk")))
-
-static __inline__ void __DEFAULT_FN_ATTRS _incsspd(int __a) {
-  __builtin_ia32_incsspd(__a);
-}
-
-#ifdef __x86_64__
-static __inline__ void __DEFAULT_FN_ATTRS _incsspq(unsigned long long __a) {
-  __builtin_ia32_incsspq(__a);
-}
-#endif /* __x86_64__ */
-
-#ifdef __x86_64__
-static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) {
-  __builtin_ia32_incsspq(__a);
-}
-#else /* __x86_64__ */
-static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) {
-  __builtin_ia32_incsspd((int)__a);
-}
-#endif /* __x86_64__ */
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd(unsigned int __a) {
-  return __builtin_ia32_rdsspd(__a);
-}
-
-#ifdef __x86_64__
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq(unsigned long long __a) {
-  return __builtin_ia32_rdsspq(__a);
-}
-#endif /* __x86_64__ */
-
-#ifdef __x86_64__
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS _get_ssp(void) {
-  return __builtin_ia32_rdsspq(0);
-}
-#else /* __x86_64__ */
-static __inline__ unsigned int __DEFAULT_FN_ATTRS _get_ssp(void) {
-  return __builtin_ia32_rdsspd(0);
-}
-#endif /* __x86_64__ */
-
-static __inline__ void __DEFAULT_FN_ATTRS _saveprevssp() {
-  __builtin_ia32_saveprevssp();
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS _rstorssp(void * __p) {
-  __builtin_ia32_rstorssp(__p);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS _wrssd(unsigned int __a, void * __p) {
-  __builtin_ia32_wrssd(__a, __p);
-}
-
-#ifdef __x86_64__
-static __inline__ void __DEFAULT_FN_ATTRS _wrssq(unsigned long long __a, void * __p) {
-  __builtin_ia32_wrssq(__a, __p);
-}
-#endif /* __x86_64__ */
-
-static __inline__ void __DEFAULT_FN_ATTRS _wrussd(unsigned int __a, void * __p) {
-  __builtin_ia32_wrussd(__a, __p);
-}
-
-#ifdef __x86_64__
-static __inline__ void __DEFAULT_FN_ATTRS _wrussq(unsigned long long __a, void * __p) {
-  __builtin_ia32_wrussq(__a, __p);
-}
-#endif /* __x86_64__ */
-
-static __inline__ void __DEFAULT_FN_ATTRS _setssbsy() {
-  __builtin_ia32_setssbsy();
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS _clrssbsy(void * __p) {
-  __builtin_ia32_clrssbsy(__p);
-}
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __CETINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/emmintrin.h b/linux-x86/lib64/clang/14.0.2/include/emmintrin.h
deleted file mode 100644
index 6e9c303..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/emmintrin.h
+++ /dev/null
@@ -1,4985 +0,0 @@
-/*===---- emmintrin.h - SSE2 intrinsics ------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __EMMINTRIN_H
-#define __EMMINTRIN_H
-
-#if !defined(__i386__) && !defined(__x86_64__)
-#error "This header is only meant to be used on x86 and x64 architecture"
-#endif
-
-#include <xmmintrin.h>
-
-typedef double __m128d __attribute__((__vector_size__(16), __aligned__(16)));
-typedef long long __m128i __attribute__((__vector_size__(16), __aligned__(16)));
-
-typedef double __m128d_u __attribute__((__vector_size__(16), __aligned__(1)));
-typedef long long __m128i_u __attribute__((__vector_size__(16), __aligned__(1)));
-
-/* Type defines.  */
-typedef double __v2df __attribute__ ((__vector_size__ (16)));
-typedef long long __v2di __attribute__ ((__vector_size__ (16)));
-typedef short __v8hi __attribute__((__vector_size__(16)));
-typedef char __v16qi __attribute__((__vector_size__(16)));
-
-/* Unsigned types */
-typedef unsigned long long __v2du __attribute__ ((__vector_size__ (16)));
-typedef unsigned short __v8hu __attribute__((__vector_size__(16)));
-typedef unsigned char __v16qu __attribute__((__vector_size__(16)));
-
-/* We need an explicitly signed variant for char. Note that this shouldn't
- * appear in the interface though. */
-typedef signed char __v16qs __attribute__((__vector_size__(16)));
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse2"), __min_vector_width__(64)))
-
-/// Adds lower double-precision values in both operands and returns the
-///    sum in the lower 64 bits of the result. The upper 64 bits of the result
-///    are copied from the upper double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDSD / ADDSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    sum of the lower 64 bits of both operands. The upper 64 bits are copied
-///    from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_sd(__m128d __a, __m128d __b)
-{
-  __a[0] += __b[0];
-  return __a;
-}
-
-/// Adds two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDPD / ADDPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the sums of both
-///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a + (__v2df)__b);
-}
-
-/// Subtracts the lower double-precision value of the second operand
-///    from the lower double-precision value of the first operand and returns
-///    the difference in the lower 64 bits of the result. The upper 64 bits of
-///    the result are copied from the upper double-precision value of the first
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSUBSD / SUBSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the minuend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the subtrahend.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    difference of the lower 64 bits of both operands. The upper 64 bits are
-///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_sd(__m128d __a, __m128d __b)
-{
-  __a[0] -= __b[0];
-  return __a;
-}
-
-/// Subtracts two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSUBPD / SUBPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the minuend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the subtrahend.
-/// \returns A 128-bit vector of [2 x double] containing the differences between
-///    both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a - (__v2df)__b);
-}
-
-/// Multiplies lower double-precision values in both operands and returns
-///    the product in the lower 64 bits of the result. The upper 64 bits of the
-///    result are copied from the upper double-precision value of the first
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMULSD / MULSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    product of the lower 64 bits of both operands. The upper 64 bits are
-///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_sd(__m128d __a, __m128d __b)
-{
-  __a[0] *= __b[0];
-  return __a;
-}
-
-/// Multiplies two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMULPD / MULPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \returns A 128-bit vector of [2 x double] containing the products of both
-///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a * (__v2df)__b);
-}
-
-/// Divides the lower double-precision value of the first operand by the
-///    lower double-precision value of the second operand and returns the
-///    quotient in the lower 64 bits of the result. The upper 64 bits of the
-///    result are copied from the upper double-precision value of the first
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDIVSD / DIVSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the dividend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing divisor.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    quotient of the lower 64 bits of both operands. The upper 64 bits are
-///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_sd(__m128d __a, __m128d __b)
-{
-  __a[0] /= __b[0];
-  return __a;
-}
-
-/// Performs an element-by-element division of two 128-bit vectors of
-///    [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDIVPD / DIVPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the dividend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the divisor.
-/// \returns A 128-bit vector of [2 x double] containing the quotients of both
-///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a / (__v2df)__b);
-}
-
-/// Calculates the square root of the lower double-precision value of
-///    the second operand and returns it in the lower 64 bits of the result.
-///    The upper 64 bits of the result are copied from the upper
-///    double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSQRTSD / SQRTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    upper 64 bits of this operand are copied to the upper 64 bits of the
-///    result.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    square root is calculated using the lower 64 bits of this operand.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    square root of the lower 64 bits of operand \a __b, and whose upper 64
-///    bits are copied from the upper 64 bits of operand \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_sqrtsd((__v2df)__b);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Calculates the square root of the each of two values stored in a
-///    128-bit vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSQRTPD / SQRTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [2 x double] containing the square roots of the
-///    values in the operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_pd(__m128d __a)
-{
-  return __builtin_ia32_sqrtpd((__v2df)__a);
-}
-
-/// Compares lower 64-bit double-precision values of both operands, and
-///    returns the lesser of the pair of values in the lower 64-bits of the
-///    result. The upper 64 bits of the result are copied from the upper
-///    double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMINSD / MINSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    minimum value between both operands. The upper 64 bits are copied from
-///    the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Performs element-by-element comparison of the two 128-bit vectors of
-///    [2 x double] and returns the vector containing the lesser of each pair of
-///    values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMINPD / MINPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \returns A 128-bit vector of [2 x double] containing the minimum values
-///    between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares lower 64-bit double-precision values of both operands, and
-///    returns the greater of the pair of values in the lower 64-bits of the
-///    result. The upper 64 bits of the result are copied from the upper
-///    double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMAXSD / MAXSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    maximum value between both operands. The upper 64 bits are copied from
-///    the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Performs element-by-element comparison of the two 128-bit vectors of
-///    [2 x double] and returns the vector containing the greater of each pair
-///    of values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMAXPD / MAXPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \returns A 128-bit vector of [2 x double] containing the maximum values
-///    between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAND / PAND </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
-///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_and_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2du)__a & (__v2du)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit vectors of [2 x double], using
-///    the one's complement of the values contained in the first source operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPANDN / PANDN </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the left source operand. The
-///    one's complement of this value is used in the bitwise AND.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the right source operand.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
-///    values in the second operand and the one's complement of the first
-///    operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_andnot_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)(~(__v2du)__a & (__v2du)__b);
-}
-
-/// Performs a bitwise OR of two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPOR / POR </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise OR of the
-///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_or_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2du)__a | (__v2du)__b);
-}
-
-/// Performs a bitwise XOR of two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPXOR / PXOR </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise XOR of the
-///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_xor_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2du)__a ^ (__v2du)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] for equality. Each comparison yields 0x0
-///    for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPEQPD / CMPEQPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are less than those in the second operand. Each comparison
-///    yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTPD / CMPLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are less than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLEPD / CMPLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTPD / CMPLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are greater than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLEPD / CMPLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are ordered with respect to those in the second operand.
-///
-///    A pair of double-precision values are "ordered" with respect to each
-///    other if neither value is a NaN. Each comparison yields 0x0 for false,
-///    0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPORDPD / CMPORDPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are unordered with respect to those in the second operand.
-///
-///    A pair of double-precision values are "unordered" with respect to each
-///    other if one or both values are NaN. Each comparison yields 0x0 for
-///    false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPUNORDPD / CMPUNORDPD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are unequal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNEQPD / CMPNEQPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not less than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTPD / CMPNLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not less than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLEPD / CMPNLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTPD / CMPNLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not greater than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLEPD / CMPNLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] for equality.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPEQSD / CMPEQSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTSD / CMPLTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLESD / CMPLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than the corresponding value
-///    in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTSD / CMPLTSD </c> instruction.
-///
-/// \param __a
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///     results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmpltsd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLESD / CMPLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmplesd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is "ordered" with respect to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
-///    of double-precision values are "ordered" with respect to each other if
-///    neither value is a NaN.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPORDSD / CMPORDSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is "unordered" with respect to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
-///    of double-precision values are "unordered" with respect to each other if
-///    one or both values are NaN.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPUNORDSD / CMPUNORDSD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is unequal to the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNEQSD / CMPNEQSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not less than the corresponding
-///    value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTSD / CMPNLTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLESD / CMPNLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns  A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not greater than the corresponding
-///    value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTSD / CMPNLTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmpnltsd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLESD / CMPNLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmpnlesd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] for equality.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comieq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comilt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comile_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than the corresponding value
-///    in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comigt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comige_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is unequal to the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 1 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comineq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] for equality. The
-///    comparison yields 0 for false, 1 for true.
-///
-///    If either of the two lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomieq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomilt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomile_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than the corresponding value
-///    in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomigt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true.  If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomige_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is unequal to the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 1 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison result. If either of the two
-///    lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomineq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two single-precision floating-point
-///    values, returned in the lower 64 bits of a 128-bit vector of [4 x float].
-///    The upper 64 bits of the result vector are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPD2PS / CVTPD2PS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
-///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtpd_ps(__m128d __a)
-{
-  return __builtin_ia32_cvtpd2ps((__v2df)__a);
-}
-
-/// Converts the lower two single-precision floating-point elements of a
-///    128-bit vector of [4 x float] into two double-precision floating-point
-///    values, returned in a 128-bit vector of [2 x double]. The upper two
-///    elements of the input vector are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPS2PD / CVTPS2PD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower two single-precision
-///    floating-point elements are converted to double-precision values. The
-///    upper two elements are unused.
-/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtps_pd(__m128 __a)
-{
-  return (__m128d) __builtin_convertvector(
-      __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df);
-}
-
-/// Converts the lower two integer elements of a 128-bit vector of
-///    [4 x i32] into two double-precision floating-point values, returned in a
-///    128-bit vector of [2 x double].
-///
-///    The upper two elements of the input vector are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTDQ2PD / CVTDQ2PD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [4 x i32]. The lower two integer elements are
-///    converted to double-precision values.
-///
-///    The upper two elements are unused.
-/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtepi32_pd(__m128i __a)
-{
-  return (__m128d) __builtin_convertvector(
-      __builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in the lower 64 bits of a 128-bit vector of [4 x i32]. The upper
-///    64 bits of the result vector are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPD2DQ / CVTPD2DQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
-///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtpd_epi32(__m128d __a)
-{
-  return __builtin_ia32_cvtpd2dq((__v2df)__a);
-}
-
-/// Converts the low-order element of a 128-bit vector of [2 x double]
-///    into a 32-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSD2SI / CVTSD2SI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsd_si32(__m128d __a)
-{
-  return __builtin_ia32_cvtsd2si((__v2df)__a);
-}
-
-/// Converts the lower double-precision floating-point element of a
-///    128-bit vector of [2 x double], in the second parameter, into a
-///    single-precision floating-point value, returned in the lower 32 bits of a
-///    128-bit vector of [4 x float]. The upper 96 bits of the result vector are
-///    copied from the upper 96 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSD2SS / CVTSD2SS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The upper 96 bits of this parameter are
-///    copied to the upper 96 bits of the result.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision
-///    floating-point element is used in the conversion.
-/// \returns A 128-bit vector of [4 x float]. The lower 32 bits contain the
-///    converted value from the second parameter. The upper 96 bits are copied
-///    from the upper 96 bits of the first parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtsd_ss(__m128 __a, __m128d __b)
-{
-  return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)__a, (__v2df)__b);
-}
-
-/// Converts a 32-bit signed integer value, in the second parameter, into
-///    a double-precision floating-point value, returned in the lower 64 bits of
-///    a 128-bit vector of [2 x double]. The upper 64 bits of the result vector
-///    are copied from the upper 64 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSI2SD / CVTSI2SD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are
-///    copied to the upper 64 bits of the result.
-/// \param __b
-///    A 32-bit signed integer containing the value to be converted.
-/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
-///    converted value from the second parameter. The upper 64 bits are copied
-///    from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi32_sd(__m128d __a, int __b)
-{
-  __a[0] = __b;
-  return __a;
-}
-
-/// Converts the lower single-precision floating-point element of a
-///    128-bit vector of [4 x float], in the second parameter, into a
-///    double-precision floating-point value, returned in the lower 64 bits of
-///    a 128-bit vector of [2 x double]. The upper 64 bits of the result vector
-///    are copied from the upper 64 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSS2SD / CVTSS2SD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are
-///    copied to the upper 64 bits of the result.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower single-precision
-///    floating-point element is used in the conversion.
-/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
-///    converted value from the second parameter. The upper 64 bits are copied
-///    from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtss_sd(__m128d __a, __m128 __b)
-{
-  __a[0] = __b[0];
-  return __a;
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in the lower 64 bits of a 128-bit vector of [4 x i32].
-///
-///    If the result of either conversion is inexact, the result is truncated
-///    (rounded towards zero) regardless of the current MXCSR setting. The upper
-///    64 bits of the result vector are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTPD2DQ / CVTTPD2DQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
-///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttpd_epi32(__m128d __a)
-{
-  return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a);
-}
-
-/// Converts the low-order element of a [2 x double] vector into a 32-bit
-///    signed integer value, truncating the result when it is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTSD2SI / CVTTSD2SI </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvttsd_si32(__m128d __a)
-{
-  return __builtin_ia32_cvttsd2si((__v2df)__a);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in a 64-bit vector of [2 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPD2PI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpd_pi32(__m128d __a)
-{
-  return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in a 64-bit vector of [2 x i32].
-///
-///    If the result of either conversion is inexact, the result is truncated
-///    (rounded towards zero) regardless of the current MXCSR setting.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTTPD2PI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvttpd_pi32(__m128d __a)
-{
-  return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a);
-}
-
-/// Converts the two signed 32-bit integer elements of a 64-bit vector of
-///    [2 x i32] into two double-precision floating-point values, returned in a
-///    128-bit vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PD </c> instruction.
-///
-/// \param __a
-///    A 64-bit vector of [2 x i32].
-/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi32_pd(__m64 __a)
-{
-  return __builtin_ia32_cvtpi2pd((__v2si)__a);
-}
-
-/// Returns the low-order element of a 128-bit vector of [2 x double] as
-///    a double-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are returned.
-/// \returns A double-precision floating-point value copied from the lower 64
-///    bits of \a __a.
-static __inline__ double __DEFAULT_FN_ATTRS
-_mm_cvtsd_f64(__m128d __a)
-{
-  return __a[0];
-}
-
-/// Loads a 128-bit floating-point vector of [2 x double] from an aligned
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPD / MOVAPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location has to be 16-byte aligned.
-/// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_pd(double const *__dp)
-{
-  return *(const __m128d*)__dp;
-}
-
-/// Loads a double-precision floating-point value from a specified memory
-///    location and duplicates it to both vector elements of a 128-bit vector of
-///    [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP / MOVDDUP </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location containing a double-precision value.
-/// \returns A 128-bit vector of [2 x double] containing the loaded and
-///    duplicated values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load1_pd(double const *__dp)
-{
-  struct __mm_load1_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_load1_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, __u };
-}
-
-#define        _mm_load_pd1(dp)        _mm_load1_pd(dp)
-
-/// Loads two double-precision values, in reverse order, from an aligned
-///    memory location into a 128-bit vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPD / MOVAPD </c> instruction +
-/// needed shuffling instructions. In AVX mode, the shuffling may be combined
-/// with the \c VMOVAPD, resulting in only a \c VPERMILPD instruction.
-///
-/// \param __dp
-///    A 16-byte aligned pointer to an array of double-precision values to be
-///    loaded in reverse order.
-/// \returns A 128-bit vector of [2 x double] containing the reversed loaded
-///    values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadr_pd(double const *__dp)
-{
-  __m128d __u = *(const __m128d*)__dp;
-  return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
-}
-
-/// Loads a 128-bit floating-point vector of [2 x double] from an
-///    unaligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPD / MOVUPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadu_pd(double const *__dp)
-{
-  struct __loadu_pd {
-    __m128d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_pd*)__dp)->__v;
-}
-
-/// Loads a 64-bit integer value to the low element of a 128-bit integer
-///    vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A pointer to a 64-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [2 x i64] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si64(void const *__a)
-{
-  struct __loadu_si64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  long long __u = ((const struct __loadu_si64*)__a)->__v;
-  return __extension__ (__m128i)(__v2di){__u, 0LL};
-}
-
-/// Loads a 32-bit integer value to the low element of a 128-bit integer
-///    vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __a
-///    A pointer to a 32-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [4 x i32] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si32(void const *__a)
-{
-  struct __loadu_si32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  int __u = ((const struct __loadu_si32*)__a)->__v;
-  return __extension__ (__m128i)(__v4si){__u, 0, 0, 0};
-}
-
-/// Loads a 16-bit integer value to the low element of a 128-bit integer
-///    vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic does not correspond to a specific instruction.
-///
-/// \param __a
-///    A pointer to a 16-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [8 x i16] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si16(void const *__a)
-{
-  struct __loadu_si16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  short __u = ((const struct __loadu_si16*)__a)->__v;
-  return __extension__ (__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
-}
-
-/// Loads a 64-bit double-precision value to the low element of a
-///    128-bit integer vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSD / MOVSD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location containing a double-precision value.
-///    The address of the memory location does not have to be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the loaded value.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_sd(double const *__dp)
-{
-  struct __mm_load_sd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_load_sd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, 0 };
-}
-
-/// Loads a double-precision value into the high-order bits of a 128-bit
-///    vector of [2 x double]. The low-order bits are copied from the low-order
-///    bits of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVHPD / MOVHPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [63:0] are written to bits [63:0] of the result.
-/// \param __dp
-///    A pointer to a 64-bit memory location containing a double-precision
-///    floating-point value that is loaded. The loaded value is written to bits
-///    [127:64] of the result. The address of the memory location does not have
-///    to be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadh_pd(__m128d __a, double const *__dp)
-{
-  struct __mm_loadh_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_loadh_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __a[0], __u };
-}
-
-/// Loads a double-precision value into the low-order bits of a 128-bit
-///    vector of [2 x double]. The high-order bits are copied from the
-///    high-order bits of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPD / MOVLPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [127:64] are written to bits [127:64] of the result.
-/// \param __dp
-///    A pointer to a 64-bit memory location containing a double-precision
-///    floating-point value that is loaded. The loaded value is written to bits
-///    [63:0] of the result. The address of the memory location does not have to
-///    be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadl_pd(__m128d __a, double const *__dp)
-{
-  struct __mm_loadl_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_loadl_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, __a[1] };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double] with
-///    unspecified content. This could be used as an argument to another
-///    intrinsic function where the argument is required but the value is not
-///    actually used.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \returns A 128-bit floating-point vector of [2 x double] with unspecified
-///    content.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_undefined_pd(void)
-{
-  return (__m128d)__builtin_ia32_undef128();
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]. The lower
-///    64 bits of the vector are initialized with the specified double-precision
-///    floating-point value. The upper 64 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize the lower 64
-///    bits of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double]. The
-///    lower 64 bits contain the value of the parameter. The upper 64 bits are
-///    set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_sd(double __w)
-{
-  return __extension__ (__m128d){ __w, 0 };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double], with each
-///    of the two double-precision floating-point vector elements set to the
-///    specified double-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP / MOVLHPS </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize each vector
-///    element of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set1_pd(double __w)
-{
-  return __extension__ (__m128d){ __w, __w };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double], with each
-///    of the two double-precision floating-point vector elements set to the
-///    specified double-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP / MOVLHPS </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize each vector
-///    element of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd1(double __w)
-{
-  return _mm_set1_pd(__w);
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]
-///    initialized with the specified double-precision floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize the upper 64
-///    bits of the result.
-/// \param __x
-///    A double-precision floating-point value used to initialize the lower 64
-///    bits of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd(double __w, double __x)
-{
-  return __extension__ (__m128d){ __x, __w };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double],
-///    initialized in reverse order with the specified double-precision
-///    floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize the lower 64
-///    bits of the result.
-/// \param __x
-///    A double-precision floating-point value used to initialize the upper 64
-///    bits of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setr_pd(double __w, double __x)
-{
-  return __extension__ (__m128d){ __w, __x };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]
-///    initialized to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instruction.
-///
-/// \returns An initialized 128-bit floating-point vector of [2 x double] with
-///    all elements set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setzero_pd(void)
-{
-  return __extension__ (__m128d){ 0, 0 };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]. The lower
-///    64 bits are set to the lower 64 bits of the second parameter. The upper
-///    64 bits are set to the upper 64 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBLENDPD / BLENDPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits are written to the
-///    upper 64 bits of the result.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower 64 bits are written to the
-///    lower 64 bits of the result.
-/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_move_sd(__m128d __a, __m128d __b)
-{
-  __a[0] = __b[0];
-  return __a;
-}
-
-/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSD / MOVSD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 64-bit memory location.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_sd(double *__dp, __m128d __a)
-{
-  struct __mm_store_sd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_store_sd_struct*)__dp)->__u = __a[0];
-}
-
-/// Moves packed double-precision values from a 128-bit vector of
-///    [2 x double] to a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c>VMOVAPD / MOVAPS</c> instruction.
-///
-/// \param __dp
-///    A pointer to an aligned memory location that can store two
-///    double-precision values.
-/// \param __a
-///    A packed 128-bit vector of [2 x double] containing the values to be
-///    moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd(double *__dp, __m128d __a)
-{
-  *(__m128d*)__dp = __a;
-}
-
-/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to
-///    the upper and lower 64 bits of a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the
-///   <c> VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location that can store two double-precision
-///    values.
-/// \param __a
-///    A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
-///    of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store1_pd(double *__dp, __m128d __a)
-{
-  __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
-  _mm_store_pd(__dp, __a);
-}
-
-/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to
-///    the upper and lower 64 bits of a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the
-///   <c> VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location that can store two double-precision
-///    values.
-/// \param __a
-///    A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
-///    of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd1(double *__dp, __m128d __a)
-{
-  _mm_store1_pd(__dp, __a);
-}
-
-/// Stores a 128-bit vector of [2 x double] into an unaligned memory
-///    location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPD / MOVUPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_pd(double *__dp, __m128d __a)
-{
-  struct __storeu_pd {
-    __m128d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_pd*)__dp)->__v = __a;
-}
-
-/// Stores two double-precision values, in reverse order, from a 128-bit
-///    vector of [2 x double] to a 16-byte aligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to a shuffling instruction followed by a
-/// <c> VMOVAPD / MOVAPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 16-byte aligned memory location that can store two
-///    double-precision values.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the values to be reversed and
-///    stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storer_pd(double *__dp, __m128d __a)
-{
-  __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 1, 0);
-  *(__m128d *)__dp = __a;
-}
-
-/// Stores the upper 64 bits of a 128-bit vector of [2 x double] to a
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVHPD / MOVHPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 64-bit memory location.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeh_pd(double *__dp, __m128d __a)
-{
-  struct __mm_storeh_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1];
-}
-
-/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPD / MOVLPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 64-bit memory location.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_pd(double *__dp, __m128d __a)
-{
-  struct __mm_storeh_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0];
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [16 x i8],
-///    saving the lower 8 bits of each sum in the corresponding element of a
-///    128-bit result vector of [16 x i8].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDB / PADDB </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [16 x i8].
-/// \param __b
-///    A 128-bit vector of [16 x i8].
-/// \returns A 128-bit vector of [16 x i8] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v16qu)__a + (__v16qu)__b);
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [8 x i16],
-///    saving the lower 16 bits of each sum in the corresponding element of a
-///    128-bit result vector of [8 x i16].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDW / PADDW </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [8 x i16].
-/// \param __b
-///    A 128-bit vector of [8 x i16].
-/// \returns A 128-bit vector of [8 x i16] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hu)__a + (__v8hu)__b);
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [4 x i32],
-///    saving the lower 32 bits of each sum in the corresponding element of a
-///    128-bit result vector of [4 x i32].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDD / PADDD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x i32].
-/// \param __b
-///    A 128-bit vector of [4 x i32].
-/// \returns A 128-bit vector of [4 x i32] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4su)__a + (__v4su)__b);
-}
-
-/// Adds two signed or unsigned 64-bit integer values, returning the
-///    lower 64 bits of the sum.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PADDQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer.
-/// \param __b
-///    A 64-bit integer.
-/// \returns A 64-bit integer containing the sum of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_add_si64(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b);
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [2 x i64],
-///    saving the lower 64 bits of each sum in the corresponding element of a
-///    128-bit result vector of [2 x i64].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDQ / PADDQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x i64].
-/// \param __b
-///    A 128-bit vector of [2 x i64].
-/// \returns A 128-bit vector of [2 x i64] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a + (__v2du)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    signed [16 x i8] vectors, saving each sum in the corresponding element of
-///    a 128-bit result vector of [16 x i8]. Positive sums greater than 0x7F are
-///    saturated to 0x7F. Negative sums less than 0x80 are saturated to 0x80.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDSB / PADDSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [16 x i8] vector.
-/// \param __b
-///    A 128-bit signed [16 x i8] vector.
-/// \returns A 128-bit signed [16 x i8] vector containing the saturated sums of
-///    both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    signed [8 x i16] vectors, saving each sum in the corresponding element of
-///    a 128-bit result vector of [8 x i16]. Positive sums greater than 0x7FFF
-///    are saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to
-///    0x8000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDSW / PADDSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the saturated sums of
-///    both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    unsigned [16 x i8] vectors, saving each sum in the corresponding element
-///    of a 128-bit result vector of [16 x i8]. Positive sums greater than 0xFF
-///    are saturated to 0xFF. Negative sums are saturated to 0x00.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDUSB / PADDUSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the saturated sums
-///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    unsigned [8 x i16] vectors, saving each sum in the corresponding element
-///    of a 128-bit result vector of [8 x i16]. Positive sums greater than
-///    0xFFFF are saturated to 0xFFFF. Negative sums are saturated to 0x0000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDUSB / PADDUSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [8 x i16] vector.
-/// \param __b
-///    A 128-bit unsigned [8 x i16] vector.
-/// \returns A 128-bit unsigned [8 x i16] vector containing the saturated sums
-///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Computes the rounded averages of corresponding elements of two
-///    128-bit unsigned [16 x i8] vectors, saving each result in the
-///    corresponding element of a 128-bit result vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAVGB / PAVGB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the rounded
-///    averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Computes the rounded averages of corresponding elements of two
-///    128-bit unsigned [8 x i16] vectors, saving each result in the
-///    corresponding element of a 128-bit result vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAVGW / PAVGW </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [8 x i16] vector.
-/// \param __b
-///    A 128-bit unsigned [8 x i16] vector.
-/// \returns A 128-bit unsigned [8 x i16] vector containing the rounded
-///    averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Multiplies the corresponding elements of two 128-bit signed [8 x i16]
-///    vectors, producing eight intermediate 32-bit signed integer products, and
-///    adds the consecutive pairs of 32-bit products to form a 128-bit signed
-///    [4 x i32] vector.
-///
-///    For example, bits [15:0] of both parameters are multiplied producing a
-///    32-bit product, bits [31:16] of both parameters are multiplied producing
-///    a 32-bit product, and the sum of those two products becomes bits [31:0]
-///    of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMADDWD / PMADDWD </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [4 x i32] vector containing the sums of products
-///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_madd_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit signed [8 x i16]
-///    vectors, saving the greater value from each comparison in the
-///    corresponding element of a 128-bit result vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMAXSW / PMAXSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the greater value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit unsigned [16 x i8]
-///    vectors, saving the greater value from each comparison in the
-///    corresponding element of a 128-bit result vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMAXUB / PMAXUB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the greater value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit signed [8 x i16]
-///    vectors, saving the smaller value from each comparison in the
-///    corresponding element of a 128-bit result vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMINSW / PMINSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the smaller value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit unsigned [16 x i8]
-///    vectors, saving the smaller value from each comparison in the
-///    corresponding element of a 128-bit result vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMINUB / PMINUB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the smaller value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Multiplies the corresponding elements of two signed [8 x i16]
-///    vectors, saving the upper 16 bits of each 32-bit product in the
-///    corresponding element of a 128-bit signed [8 x i16] result vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULHW / PMULHW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the upper 16 bits of
-///    each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Multiplies the corresponding elements of two unsigned [8 x i16]
-///    vectors, saving the upper 16 bits of each 32-bit product in the
-///    corresponding element of a 128-bit unsigned [8 x i16] result vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULHUW / PMULHUW </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [8 x i16] vector.
-/// \param __b
-///    A 128-bit unsigned [8 x i16] vector.
-/// \returns A 128-bit unsigned [8 x i16] vector containing the upper 16 bits
-///    of each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Multiplies the corresponding elements of two signed [8 x i16]
-///    vectors, saving the lower 16 bits of each 32-bit product in the
-///    corresponding element of a 128-bit signed [8 x i16] result vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULLW / PMULLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the lower 16 bits of
-///    each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mullo_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hu)__a * (__v8hu)__b);
-}
-
-/// Multiplies 32-bit unsigned integer values contained in the lower bits
-///    of the two 64-bit integer vectors and returns the 64-bit unsigned
-///    product.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMULUDQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer containing one of the source operands.
-/// \param __b
-///    A 64-bit integer containing one of the source operands.
-/// \returns A 64-bit integer vector containing the product of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_mul_su32(__m64 __a, __m64 __b)
-{
-  return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b);
-}
-
-/// Multiplies 32-bit unsigned integer values contained in the lower
-///    bits of the corresponding elements of two [2 x i64] vectors, and returns
-///    the 64-bit products in the corresponding elements of a [2 x i64] vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULUDQ / PMULUDQ </c> instruction.
-///
-/// \param __a
-///    A [2 x i64] vector containing one of the source operands.
-/// \param __b
-///    A [2 x i64] vector containing one of the source operands.
-/// \returns A [2 x i64] vector containing the product of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mul_epu32(__m128i __a, __m128i __b)
-{
-  return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);
-}
-
-/// Computes the absolute differences of corresponding 8-bit integer
-///    values in two 128-bit vectors. Sums the first 8 absolute differences, and
-///    separately sums the second 8 absolute differences. Packs these two
-///    unsigned 16-bit integer sums into the upper and lower elements of a
-///    [2 x i64] vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSADBW / PSADBW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A [2 x i64] vector containing the sums of the sets of absolute
-///    differences between both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sad_epu8(__m128i __a, __m128i __b)
-{
-  return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Subtracts the corresponding 8-bit integer values in the operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBB / PSUBB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v16qu)__a - (__v16qu)__b);
-}
-
-/// Subtracts the corresponding 16-bit integer values in the operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBW / PSUBW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hu)__a - (__v8hu)__b);
-}
-
-/// Subtracts the corresponding 32-bit integer values in the operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBD / PSUBD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4su)__a - (__v4su)__b);
-}
-
-/// Subtracts signed or unsigned 64-bit integer values and writes the
-///    difference to the corresponding bits in the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PSUBQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing the minuend.
-/// \param __b
-///    A 64-bit integer vector containing the subtrahend.
-/// \returns A 64-bit integer vector containing the difference of the values in
-///    the operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_sub_si64(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_psubq((__v1di)__a, (__v1di)__b);
-}
-
-/// Subtracts the corresponding elements of two [2 x i64] vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBQ / PSUBQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a - (__v2du)__b);
-}
-
-/// Subtracts corresponding 8-bit signed integer values in the input and
-///    returns the differences in the corresponding bytes in the destination.
-///    Differences greater than 0x7F are saturated to 0x7F, and differences less
-///    than 0x80 are saturated to 0x80.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBSB / PSUBSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Subtracts corresponding 16-bit signed integer values in the input and
-///    returns the differences in the corresponding bytes in the destination.
-///    Differences greater than 0x7FFF are saturated to 0x7FFF, and values less
-///    than 0x8000 are saturated to 0x8000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBSW / PSUBSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Subtracts corresponding 8-bit unsigned integer values in the input
-///    and returns the differences in the corresponding bytes in the
-///    destination. Differences less than 0x00 are saturated to 0x00.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBUSB / PSUBUSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the unsigned integer
-///    differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Subtracts corresponding 16-bit unsigned integer values in the input
-///    and returns the differences in the corresponding bytes in the
-///    destination. Differences less than 0x0000 are saturated to 0x0000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBUSW / PSUBUSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the unsigned integer
-///    differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit integer vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAND / PAND </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A 128-bit integer vector containing the bitwise AND of the values
-///    in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_and_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a & (__v2du)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit integer vectors, using the
-///    one's complement of the values contained in the first source operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPANDN / PANDN </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector containing the left source operand. The one's complement
-///    of this value is used in the bitwise AND.
-/// \param __b
-///    A 128-bit vector containing the right source operand.
-/// \returns A 128-bit integer vector containing the bitwise AND of the one's
-///    complement of the first operand and the values in the second operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_andnot_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)(~(__v2du)__a & (__v2du)__b);
-}
-/// Performs a bitwise OR of two 128-bit integer vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPOR / POR </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A 128-bit integer vector containing the bitwise OR of the values
-///    in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_or_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a | (__v2du)__b);
-}
-
-/// Performs a bitwise exclusive OR of two 128-bit integer vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPXOR / PXOR </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A 128-bit integer vector containing the bitwise exclusive OR of the
-///    values in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_xor_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a ^ (__v2du)__b);
-}
-
-/// Left-shifts the 128-bit integer vector operand by the specified
-///    number of bytes. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_slli_si128(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSLLDQ / PSLLDQ </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector containing the source operand.
-/// \param imm
-///    An immediate value specifying the number of bytes to left-shift operand
-///    \a a.
-/// \returns A 128-bit integer vector containing the left-shifted value.
-#define _mm_slli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
-
-#define _mm_bslli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
-
-/// Left-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLW / PSLLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to left-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi16(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);
-}
-
-/// Left-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLW / PSLLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to left-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi16(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count);
-}
-
-/// Left-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLD / PSLLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to left-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi32(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);
-}
-
-/// Left-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLD / PSLLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to left-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi32(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count);
-}
-
-/// Left-shifts each 64-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLQ / PSLLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to left-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi64(__m128i __a, int __count)
-{
-  return __builtin_ia32_psllqi128((__v2di)__a, __count);
-}
-
-/// Left-shifts each 64-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLQ / PSLLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to left-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi64(__m128i __a, __m128i __count)
-{
-  return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count);
-}
-
-/// Right-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAW / PSRAW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi16(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);
-}
-
-/// Right-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAW / PSRAW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi16(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count);
-}
-
-/// Right-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAD / PSRAD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi32(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);
-}
-
-/// Right-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAD / PSRAD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi32(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count);
-}
-
-/// Right-shifts the 128-bit integer vector operand by the specified
-///    number of bytes. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_srli_si128(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSRLDQ / PSRLDQ </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector containing the source operand.
-/// \param imm
-///    An immediate value specifying the number of bytes to right-shift operand
-///    \a a.
-/// \returns A 128-bit integer vector containing the right-shifted value.
-#define _mm_srli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
-
-#define _mm_bsrli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
-
-/// Right-shifts each of 16-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLW / PSRLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi16(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);
-}
-
-/// Right-shifts each of 16-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLW / PSRLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi16(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count);
-}
-
-/// Right-shifts each of 32-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLD / PSRLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi32(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);
-}
-
-/// Right-shifts each of 32-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLD / PSRLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi32(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count);
-}
-
-/// Right-shifts each of 64-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLQ / PSRLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi64(__m128i __a, int __count)
-{
-  return __builtin_ia32_psrlqi128((__v2di)__a, __count);
-}
-
-/// Right-shifts each of 64-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLQ / PSRLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi64(__m128i __a, __m128i __count)
-{
-  return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count);
-}
-
-/// Compares each of the corresponding 8-bit values of the 128-bit
-///    integer vectors for equality. Each comparison yields 0x0 for false, 0xFF
-///    for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPEQB / PCMPEQB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v16qi)__a == (__v16qi)__b);
-}
-
-/// Compares each of the corresponding 16-bit values of the 128-bit
-///    integer vectors for equality. Each comparison yields 0x0 for false,
-///    0xFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPEQW / PCMPEQW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hi)__a == (__v8hi)__b);
-}
-
-/// Compares each of the corresponding 32-bit values of the 128-bit
-///    integer vectors for equality. Each comparison yields 0x0 for false,
-///    0xFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPEQD / PCMPEQD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4si)__a == (__v4si)__b);
-}
-
-/// Compares each of the corresponding signed 8-bit values of the 128-bit
-///    integer vectors to determine if the values in the first operand are
-///    greater than those in the second operand. Each comparison yields 0x0 for
-///    false, 0xFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTB / PCMPGTB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi8(__m128i __a, __m128i __b)
-{
-  /* This function always performs a signed comparison, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m128i)((__v16qs)__a > (__v16qs)__b);
-}
-
-/// Compares each of the corresponding signed 16-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTW / PCMPGTW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hi)__a > (__v8hi)__b);
-}
-
-/// Compares each of the corresponding signed 32-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTD / PCMPGTD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4si)__a > (__v4si)__b);
-}
-
-/// Compares each of the corresponding signed 8-bit values of the 128-bit
-///    integer vectors to determine if the values in the first operand are less
-///    than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTB / PCMPGTB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi8(__m128i __a, __m128i __b)
-{
-  return _mm_cmpgt_epi8(__b, __a);
-}
-
-/// Compares each of the corresponding signed 16-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are less than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTW / PCMPGTW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi16(__m128i __a, __m128i __b)
-{
-  return _mm_cmpgt_epi16(__b, __a);
-}
-
-/// Compares each of the corresponding signed 32-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are less than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTD / PCMPGTD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi32(__m128i __a, __m128i __b)
-{
-  return _mm_cmpgt_epi32(__b, __a);
-}
-
-#ifdef __x86_64__
-/// Converts a 64-bit signed integer value from the second operand into a
-///    double-precision value and returns it in the lower element of a [2 x
-///    double] vector; the upper element of the returned vector is copied from
-///    the upper element of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSI2SD / CVTSI2SD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits of this operand are
-///    copied to the upper 64 bits of the destination.
-/// \param __b
-///    A 64-bit signed integer operand containing the value to be converted.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    converted value of the second operand. The upper 64 bits are copied from
-///    the upper 64 bits of the first operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi64_sd(__m128d __a, long long __b)
-{
-  __a[0] = __b;
-  return __a;
-}
-
-/// Converts the first (lower) element of a vector of [2 x double] into a
-///    64-bit signed integer value, according to the current rounding mode.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSD2SI / CVTSD2SI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsd_si64(__m128d __a)
-{
-  return __builtin_ia32_cvtsd2si64((__v2df)__a);
-}
-
-/// Converts the first (lower) element of a vector of [2 x double] into a
-///    64-bit signed integer value, truncating the result when it is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTSD2SI / CVTTSD2SI </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvttsd_si64(__m128d __a)
-{
-  return __builtin_ia32_cvttsd2si64((__v2df)__a);
-}
-#endif
-
-/// Converts a vector of [4 x i32] into a vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTDQ2PS / CVTDQ2PS </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 128-bit vector of [4 x float] containing the converted values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtepi32_ps(__m128i __a)
-{
-  return (__m128)__builtin_convertvector((__v4si)__a, __v4sf);
-}
-
-/// Converts a vector of [4 x float] into a vector of [4 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPS2DQ / CVTPS2DQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit integer vector of [4 x i32] containing the converted
-///    values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtps_epi32(__m128 __a)
-{
-  return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a);
-}
-
-/// Converts a vector of [4 x float] into a vector of [4 x i32],
-///    truncating the result when it is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTPS2DQ / CVTTPS2DQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x i32] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttps_epi32(__m128 __a)
-{
-  return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)__a);
-}
-
-/// Returns a vector of [4 x i32] where the lowest element is the input
-///    operand and the remaining elements are zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __a
-///    A 32-bit signed integer operand.
-/// \returns A 128-bit vector of [4 x i32].
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi32_si128(int __a)
-{
-  return __extension__ (__m128i)(__v4si){ __a, 0, 0, 0 };
-}
-
-#ifdef __x86_64__
-/// Returns a vector of [2 x i64] where the lower element is the input
-///    operand and the upper element is zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit signed integer operand containing the value to be converted.
-/// \returns A 128-bit vector of [2 x i64] containing the converted value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi64_si128(long long __a)
-{
-  return __extension__ (__m128i)(__v2di){ __a, 0 };
-}
-#endif
-
-/// Moves the least significant 32 bits of a vector of [4 x i32] to a
-///    32-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __a
-///    A vector of [4 x i32]. The least significant 32 bits are moved to the
-///    destination.
-/// \returns A 32-bit signed integer containing the moved value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si32(__m128i __a)
-{
-  __v4si __b = (__v4si)__a;
-  return __b[0];
-}
-
-#ifdef __x86_64__
-/// Moves the least significant 64 bits of a vector of [2 x i64] to a
-///    64-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A vector of [2 x i64]. The least significant 64 bits are moved to the
-///    destination.
-/// \returns A 64-bit signed integer containing the moved value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si64(__m128i __a)
-{
-  return __a[0];
-}
-#endif
-
-/// Moves packed integer values from an aligned 128-bit memory location
-///    to elements in a 128-bit integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDQA / MOVDQA </c> instruction.
-///
-/// \param __p
-///    An aligned pointer to a memory location containing integer values.
-/// \returns A 128-bit integer vector containing the moved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_load_si128(__m128i const *__p)
-{
-  return *__p;
-}
-
-/// Moves packed integer values from an unaligned 128-bit memory location
-///    to elements in a 128-bit integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDQU / MOVDQU </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location containing integer values.
-/// \returns A 128-bit integer vector containing the moved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si128(__m128i_u const *__p)
-{
-  struct __loadu_si128 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_si128*)__p)->__v;
-}
-
-/// Returns a vector of [2 x i64] where the lower element is taken from
-///    the lower element of the operand, and the upper element is zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __p
-///    A 128-bit vector of [2 x i64]. Bits [63:0] are written to bits [63:0] of
-///    the destination.
-/// \returns A 128-bit vector of [2 x i64]. The lower order bits contain the
-///    moved value. The higher order bits are cleared.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadl_epi64(__m128i_u const *__p)
-{
-  struct __mm_loadl_epi64_struct {
-    long long __u;
-  } __attribute__((__packed__, __may_alias__));
-  return __extension__ (__m128i) { ((const struct __mm_loadl_epi64_struct*)__p)->__u, 0};
-}
-
-/// Generates a 128-bit vector of [4 x i32] with unspecified content.
-///    This could be used as an argument to another intrinsic function where the
-///    argument is required but the value is not actually used.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \returns A 128-bit vector of [4 x i32] with unspecified content.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_undefined_si128(void)
-{
-  return (__m128i)__builtin_ia32_undef128();
-}
-
-/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
-///    the specified 64-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q1
-///    A 64-bit integer value used to initialize the upper 64 bits of the
-///    destination vector of [2 x i64].
-/// \param __q0
-///    A 64-bit integer value used to initialize the lower 64 bits of the
-///    destination vector of [2 x i64].
-/// \returns An initialized 128-bit vector of [2 x i64] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64x(long long __q1, long long __q0)
-{
-  return __extension__ (__m128i)(__v2di){ __q0, __q1 };
-}
-
-/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
-///    the specified 64-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q1
-///    A 64-bit integer value used to initialize the upper 64 bits of the
-///    destination vector of [2 x i64].
-/// \param __q0
-///    A 64-bit integer value used to initialize the lower 64 bits of the
-///    destination vector of [2 x i64].
-/// \returns An initialized 128-bit vector of [2 x i64] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64(__m64 __q1, __m64 __q0)
-{
-  return _mm_set_epi64x((long long)__q1, (long long)__q0);
-}
-
-/// Initializes the 32-bit values in a 128-bit vector of [4 x i32] with
-///    the specified 32-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __i3
-///    A 32-bit integer value used to initialize bits [127:96] of the
-///    destination vector.
-/// \param __i2
-///    A 32-bit integer value used to initialize bits [95:64] of the destination
-///    vector.
-/// \param __i1
-///    A 32-bit integer value used to initialize bits [63:32] of the destination
-///    vector.
-/// \param __i0
-///    A 32-bit integer value used to initialize bits [31:0] of the destination
-///    vector.
-/// \returns An initialized 128-bit vector of [4 x i32] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
-{
-  return __extension__ (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
-}
-
-/// Initializes the 16-bit values in a 128-bit vector of [8 x i16] with
-///    the specified 16-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __w7
-///    A 16-bit integer value used to initialize bits [127:112] of the
-///    destination vector.
-/// \param __w6
-///    A 16-bit integer value used to initialize bits [111:96] of the
-///    destination vector.
-/// \param __w5
-///    A 16-bit integer value used to initialize bits [95:80] of the destination
-///    vector.
-/// \param __w4
-///    A 16-bit integer value used to initialize bits [79:64] of the destination
-///    vector.
-/// \param __w3
-///    A 16-bit integer value used to initialize bits [63:48] of the destination
-///    vector.
-/// \param __w2
-///    A 16-bit integer value used to initialize bits [47:32] of the destination
-///    vector.
-/// \param __w1
-///    A 16-bit integer value used to initialize bits [31:16] of the destination
-///    vector.
-/// \param __w0
-///    A 16-bit integer value used to initialize bits [15:0] of the destination
-///    vector.
-/// \returns An initialized 128-bit vector of [8 x i16] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
-{
-  return __extension__ (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
-}
-
-/// Initializes the 8-bit values in a 128-bit vector of [16 x i8] with
-///    the specified 8-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __b15
-///    Initializes bits [127:120] of the destination vector.
-/// \param __b14
-///    Initializes bits [119:112] of the destination vector.
-/// \param __b13
-///    Initializes bits [111:104] of the destination vector.
-/// \param __b12
-///    Initializes bits [103:96] of the destination vector.
-/// \param __b11
-///    Initializes bits [95:88] of the destination vector.
-/// \param __b10
-///    Initializes bits [87:80] of the destination vector.
-/// \param __b9
-///    Initializes bits [79:72] of the destination vector.
-/// \param __b8
-///    Initializes bits [71:64] of the destination vector.
-/// \param __b7
-///    Initializes bits [63:56] of the destination vector.
-/// \param __b6
-///    Initializes bits [55:48] of the destination vector.
-/// \param __b5
-///    Initializes bits [47:40] of the destination vector.
-/// \param __b4
-///    Initializes bits [39:32] of the destination vector.
-/// \param __b3
-///    Initializes bits [31:24] of the destination vector.
-/// \param __b2
-///    Initializes bits [23:16] of the destination vector.
-/// \param __b1
-///    Initializes bits [15:8] of the destination vector.
-/// \param __b0
-///    Initializes bits [7:0] of the destination vector.
-/// \returns An initialized 128-bit vector of [16 x i8] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
-{
-  return __extension__ (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
-}
-
-/// Initializes both values in a 128-bit integer vector with the
-///    specified 64-bit integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q
-///    Integer value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit integer vector of [2 x i64] with both
-///    elements containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64x(long long __q)
-{
-  return _mm_set_epi64x(__q, __q);
-}
-
-/// Initializes both values in a 128-bit vector of [2 x i64] with the
-///    specified 64-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q
-///    A 64-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [2 x i64] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64(__m64 __q)
-{
-  return _mm_set_epi64(__q, __q);
-}
-
-/// Initializes all values in a 128-bit vector of [4 x i32] with the
-///    specified 32-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __i
-///    A 32-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [4 x i32] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi32(int __i)
-{
-  return _mm_set_epi32(__i, __i, __i, __i);
-}
-
-/// Initializes all values in a 128-bit vector of [8 x i16] with the
-///    specified 16-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __w
-///    A 16-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [8 x i16] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi16(short __w)
-{
-  return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w);
-}
-
-/// Initializes all values in a 128-bit vector of [16 x i8] with the
-///    specified 8-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __b
-///    An 8-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [16 x i8] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi8(char __b)
-{
-  return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 64-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic does not correspond to a specific instruction.
-///
-/// \param __q0
-///    A 64-bit integral value used to initialize the lower 64 bits of the
-///    result.
-/// \param __q1
-///    A 64-bit integral value used to initialize the upper 64 bits of the
-///    result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi64(__m64 __q0, __m64 __q1)
-{
-  return _mm_set_epi64(__q1, __q0);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 32-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __i0
-///    A 32-bit integral value used to initialize bits [31:0] of the result.
-/// \param __i1
-///    A 32-bit integral value used to initialize bits [63:32] of the result.
-/// \param __i2
-///    A 32-bit integral value used to initialize bits [95:64] of the result.
-/// \param __i3
-///    A 32-bit integral value used to initialize bits [127:96] of the result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
-{
-  return _mm_set_epi32(__i3, __i2, __i1, __i0);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 16-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __w0
-///    A 16-bit integral value used to initialize bits [15:0] of the result.
-/// \param __w1
-///    A 16-bit integral value used to initialize bits [31:16] of the result.
-/// \param __w2
-///    A 16-bit integral value used to initialize bits [47:32] of the result.
-/// \param __w3
-///    A 16-bit integral value used to initialize bits [63:48] of the result.
-/// \param __w4
-///    A 16-bit integral value used to initialize bits [79:64] of the result.
-/// \param __w5
-///    A 16-bit integral value used to initialize bits [95:80] of the result.
-/// \param __w6
-///    A 16-bit integral value used to initialize bits [111:96] of the result.
-/// \param __w7
-///    A 16-bit integral value used to initialize bits [127:112] of the result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
-{
-  return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 8-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __b0
-///    An 8-bit integral value used to initialize bits [7:0] of the result.
-/// \param __b1
-///    An 8-bit integral value used to initialize bits [15:8] of the result.
-/// \param __b2
-///    An 8-bit integral value used to initialize bits [23:16] of the result.
-/// \param __b3
-///    An 8-bit integral value used to initialize bits [31:24] of the result.
-/// \param __b4
-///    An 8-bit integral value used to initialize bits [39:32] of the result.
-/// \param __b5
-///    An 8-bit integral value used to initialize bits [47:40] of the result.
-/// \param __b6
-///    An 8-bit integral value used to initialize bits [55:48] of the result.
-/// \param __b7
-///    An 8-bit integral value used to initialize bits [63:56] of the result.
-/// \param __b8
-///    An 8-bit integral value used to initialize bits [71:64] of the result.
-/// \param __b9
-///    An 8-bit integral value used to initialize bits [79:72] of the result.
-/// \param __b10
-///    An 8-bit integral value used to initialize bits [87:80] of the result.
-/// \param __b11
-///    An 8-bit integral value used to initialize bits [95:88] of the result.
-/// \param __b12
-///    An 8-bit integral value used to initialize bits [103:96] of the result.
-/// \param __b13
-///    An 8-bit integral value used to initialize bits [111:104] of the result.
-/// \param __b14
-///    An 8-bit integral value used to initialize bits [119:112] of the result.
-/// \param __b15
-///    An 8-bit integral value used to initialize bits [127:120] of the result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
-{
-  return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8, __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
-}
-
-/// Creates a 128-bit integer vector initialized to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instruction.
-///
-/// \returns An initialized 128-bit integer vector with all elements set to
-///    zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setzero_si128(void)
-{
-  return __extension__ (__m128i)(__v2di){ 0LL, 0LL };
-}
-
-/// Stores a 128-bit integer vector to a memory location aligned on a
-///    128-bit boundary.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS </c> instruction.
-///
-/// \param __p
-///    A pointer to an aligned memory location that will receive the integer
-///    values.
-/// \param __b
-///    A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_si128(__m128i *__p, __m128i __b)
-{
-  *__p = __b;
-}
-
-/// Stores a 128-bit integer vector to an unaligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPS / MOVUPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that will receive the integer values.
-/// \param __b
-///    A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si128(__m128i_u *__p, __m128i __b)
-{
-  struct __storeu_si128 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si128*)__p)->__v = __b;
-}
-
-/// Stores a 64-bit integer value from the low element of a 128-bit integer
-///    vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __p
-///    A pointer to a 64-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __b
-///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si64(void *__p, __m128i __b)
-{
-  struct __storeu_si64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si64*)__p)->__v = ((__v2di)__b)[0];
-}
-
-/// Stores a 32-bit integer value from the low element of a 128-bit integer
-///    vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __p
-///    A pointer to a 32-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __b
-///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si32(void *__p, __m128i __b)
-{
-  struct __storeu_si32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si32*)__p)->__v = ((__v4si)__b)[0];
-}
-
-/// Stores a 16-bit integer value from the low element of a 128-bit integer
-///    vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic does not correspond to a specific instruction.
-///
-/// \param __p
-///    A pointer to a 16-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __b
-///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si16(void *__p, __m128i __b)
-{
-  struct __storeu_si16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si16*)__p)->__v = ((__v8hi)__b)[0];
-}
-
-/// Moves bytes selected by the mask from the first operand to the
-///    specified unaligned memory location. When a mask bit is 1, the
-///    corresponding byte is written, otherwise it is not written.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon). Exception and trap behavior for elements not selected
-///    for storage to memory are implementation dependent.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMASKMOVDQU / MASKMOVDQU </c>
-///   instruction.
-///
-/// \param __d
-///    A 128-bit integer vector containing the values to be moved.
-/// \param __n
-///    A 128-bit integer vector containing the mask. The most significant bit of
-///    each byte represents the mask bits.
-/// \param __p
-///    A pointer to an unaligned 128-bit memory location where the specified
-///    values are moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
-{
-  __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p);
-}
-
-/// Stores the lower 64 bits of a 128-bit integer vector of [2 x i64] to
-///    a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPS / MOVLPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 64-bit memory location that will receive the lower 64 bits
-///    of the integer vector parameter.
-/// \param __a
-///    A 128-bit integer vector of [2 x i64]. The lower 64 bits contain the
-///    value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_epi64(__m128i_u *__p, __m128i __a)
-{
-  struct __mm_storel_epi64_struct {
-    long long __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storel_epi64_struct*)__p)->__u = __a[0];
-}
-
-/// Stores a 128-bit floating point vector of [2 x double] to a 128-bit
-///    aligned memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVNTPS / MOVNTPS </c> instruction.
-///
-/// \param __p
-///    A pointer to the 128-bit aligned memory location used to store the value.
-/// \param __a
-///    A vector of [2 x double] containing the 64-bit values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_pd(double *__p, __m128d __a)
-{
-  __builtin_nontemporal_store((__v2df)__a, (__v2df*)__p);
-}
-
-/// Stores a 128-bit integer vector to a 128-bit aligned memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVNTPS / MOVNTPS </c> instruction.
-///
-/// \param __p
-///    A pointer to the 128-bit aligned memory location used to store the value.
-/// \param __a
-///    A 128-bit integer vector containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_si128(__m128i *__p, __m128i __a)
-{
-  __builtin_nontemporal_store((__v2di)__a, (__v2di*)__p);
-}
-
-/// Stores a 32-bit integer value in the specified memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVNTI </c> instruction.
-///
-/// \param __p
-///    A pointer to the 32-bit memory location used to store the value.
-/// \param __a
-///    A 32-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si32(int *__p, int __a)
-{
-  __builtin_ia32_movnti(__p, __a);
-}
-
-#ifdef __x86_64__
-/// Stores a 64-bit integer value in the specified memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVNTIQ </c> instruction.
-///
-/// \param __p
-///    A pointer to the 64-bit memory location used to store the value.
-/// \param __a
-///    A 64-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si64(long long *__p, long long __a)
-{
-  __builtin_ia32_movnti64(__p, __a);
-}
-#endif
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/// The cache line containing \a __p is flushed and invalidated from all
-///    caches in the coherency domain.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CLFLUSH </c> instruction.
-///
-/// \param __p
-///    A pointer to the memory location used to identify the cache line to be
-///    flushed.
-void _mm_clflush(void const * __p);
-
-/// Forces strong memory ordering (serialization) between load
-///    instructions preceding this instruction and load instructions following
-///    this instruction, ensuring the system completes all previous loads before
-///    executing subsequent loads.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> LFENCE </c> instruction.
-///
-void _mm_lfence(void);
-
-/// Forces strong memory ordering (serialization) between load and store
-///    instructions preceding this instruction and load and store instructions
-///    following this instruction, ensuring that the system completes all
-///    previous memory accesses before executing subsequent memory accesses.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MFENCE </c> instruction.
-///
-void _mm_mfence(void);
-
-#if defined(__cplusplus)
-} // extern "C"
-#endif
-
-/// Converts 16-bit signed integers from both 128-bit integer vector
-///    operands into 8-bit signed integers, and packs the results into the
-///    destination. Positive values greater than 0x7F are saturated to 0x7F.
-///    Negative values less than 0x80 are saturated to 0x80.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPACKSSWB / PACKSSWB </c> instruction.
-///
-/// \param __a
-///   A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///   a signed integer and is converted to a 8-bit signed integer with
-///   saturation. Values greater than 0x7F are saturated to 0x7F. Values less
-///   than 0x80 are saturated to 0x80. The converted [8 x i8] values are
-///   written to the lower 64 bits of the result.
-/// \param __b
-///   A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///   a signed integer and is converted to a 8-bit signed integer with
-///   saturation. Values greater than 0x7F are saturated to 0x7F. Values less
-///   than 0x80 are saturated to 0x80. The converted [8 x i8] values are
-///   written to the higher 64 bits of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Converts 32-bit signed integers from both 128-bit integer vector
-///    operands into 16-bit signed integers, and packs the results into the
-///    destination. Positive values greater than 0x7FFF are saturated to 0x7FFF.
-///    Negative values less than 0x8000 are saturated to 0x8000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPACKSSDW / PACKSSDW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as
-///    a signed integer and is converted to a 16-bit signed integer with
-///    saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values
-///    less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
-///    are written to the lower 64 bits of the result.
-/// \param __b
-///    A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as
-///    a signed integer and is converted to a 16-bit signed integer with
-///    saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values
-///    less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
-///    are written to the higher 64 bits of the result.
-/// \returns A 128-bit vector of [8 x i16] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
-}
-
-/// Converts 16-bit signed integers from both 128-bit integer vector
-///    operands into 8-bit unsigned integers, and packs the results into the
-///    destination. Values greater than 0xFF are saturated to 0xFF. Values less
-///    than 0x00 are saturated to 0x00.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPACKUSWB / PACKUSWB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///    a signed integer and is converted to an 8-bit unsigned integer with
-///    saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-///    than 0x00 are saturated to 0x00. The converted [8 x i8] values are
-///    written to the lower 64 bits of the result.
-/// \param __b
-///    A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///    a signed integer and is converted to an 8-bit unsigned integer with
-///    saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-///    than 0x00 are saturated to 0x00. The converted [8 x i8] values are
-///    written to the higher 64 bits of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packus_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Extracts 16 bits from a 128-bit integer vector of [8 x i16], using
-///    the immediate-value parameter as a selector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPEXTRW / PEXTRW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __imm
-///    An immediate value. Bits [2:0] selects values from \a __a to be assigned
-///    to bits[15:0] of the result. \n
-///    000: assign values from bits [15:0] of \a __a. \n
-///    001: assign values from bits [31:16] of \a __a. \n
-///    010: assign values from bits [47:32] of \a __a. \n
-///    011: assign values from bits [63:48] of \a __a. \n
-///    100: assign values from bits [79:64] of \a __a. \n
-///    101: assign values from bits [95:80] of \a __a. \n
-///    110: assign values from bits [111:96] of \a __a. \n
-///    111: assign values from bits [127:112] of \a __a.
-/// \returns An integer, whose lower 16 bits are selected from the 128-bit
-///    integer vector parameter and the remaining bits are assigned zeros.
-#define _mm_extract_epi16(a, imm) \
-  ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \
-                                                    (int)(imm)))
-
-/// Constructs a 128-bit integer vector by first making a copy of the
-///    128-bit integer vector parameter, and then inserting the lower 16 bits
-///    of an integer parameter into an offset specified by the immediate-value
-///    parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPINSRW / PINSRW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [8 x i16]. This vector is copied to the
-///    result and then one of the eight elements in the result is replaced by
-///    the lower 16 bits of \a __b.
-/// \param __b
-///    An integer. The lower 16 bits of this parameter are written to the
-///    result beginning at an offset specified by \a __imm.
-/// \param __imm
-///    An immediate value specifying the bit offset in the result at which the
-///    lower 16 bits of \a __b are written.
-/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi16(a, b, imm) \
-  ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \
-                                        (int)(imm)))
-
-/// Copies the values of the most significant bits from each 8-bit
-///    element in a 128-bit integer vector of [16 x i8] to create a 16-bit mask
-///    value, zero-extends the value, and writes it to the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVMSKB / PMOVMSKB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the values with bits to be extracted.
-/// \returns The most significant bits from each 8-bit element in \a __a,
-///    written to bits [15:0]. The other bits are assigned zeros.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_epi8(__m128i __a)
-{
-  return __builtin_ia32_pmovmskb128((__v16qi)__a);
-}
-
-/// Constructs a 128-bit integer vector by shuffling four 32-bit
-///    elements of a 128-bit integer vector parameter, using the immediate-value
-///    parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_shuffle_epi32(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSHUFD / PSHUFD </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector containing the values to be copied.
-/// \param imm
-///    An immediate value containing an 8-bit value specifying which elements to
-///    copy from a. The destinations within the 128-bit destination are assigned
-///    values as follows: \n
-///    Bits [1:0] are used to assign values to bits [31:0] of the result. \n
-///    Bits [3:2] are used to assign values to bits [63:32] of the result. \n
-///    Bits [5:4] are used to assign values to bits [95:64] of the result. \n
-///    Bits [7:6] are used to assign values to bits [127:96] of the result. \n
-///    Bit value assignments: \n
-///    00: assign values from bits [31:0] of \a a. \n
-///    01: assign values from bits [63:32] of \a a. \n
-///    10: assign values from bits [95:64] of \a a. \n
-///    11: assign values from bits [127:96] of \a a.
-/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shuffle_epi32(a, imm) \
-  ((__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm)))
-
-/// Constructs a 128-bit integer vector by shuffling four lower 16-bit
-///    elements of a 128-bit integer vector of [8 x i16], using the immediate
-///    value parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_shufflelo_epi16(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSHUFLW / PSHUFLW </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector of [8 x i16]. Bits [127:64] are copied to bits
-///    [127:64] of the result.
-/// \param imm
-///    An 8-bit immediate value specifying which elements to copy from \a a. \n
-///    Bits[1:0] are used to assign values to bits [15:0] of the result. \n
-///    Bits[3:2] are used to assign values to bits [31:16] of the result. \n
-///    Bits[5:4] are used to assign values to bits [47:32] of the result. \n
-///    Bits[7:6] are used to assign values to bits [63:48] of the result. \n
-///    Bit value assignments: \n
-///    00: assign values from bits [15:0] of \a a. \n
-///    01: assign values from bits [31:16] of \a a. \n
-///    10: assign values from bits [47:32] of \a a. \n
-///    11: assign values from bits [63:48] of \a a. \n
-/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflelo_epi16(a, imm) \
-  ((__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm)))
-
-/// Constructs a 128-bit integer vector by shuffling four upper 16-bit
-///    elements of a 128-bit integer vector of [8 x i16], using the immediate
-///    value parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_shufflehi_epi16(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSHUFHW / PSHUFHW </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector of [8 x i16]. Bits [63:0] are copied to bits
-///    [63:0] of the result.
-/// \param imm
-///    An 8-bit immediate value specifying which elements to copy from \a a. \n
-///    Bits[1:0] are used to assign values to bits [79:64] of the result. \n
-///    Bits[3:2] are used to assign values to bits [95:80] of the result. \n
-///    Bits[5:4] are used to assign values to bits [111:96] of the result. \n
-///    Bits[7:6] are used to assign values to bits [127:112] of the result. \n
-///    Bit value assignments: \n
-///    00: assign values from bits [79:64] of \a a. \n
-///    01: assign values from bits [95:80] of \a a. \n
-///    10: assign values from bits [111:96] of \a a. \n
-///    11: assign values from bits [127:112] of \a a. \n
-/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflehi_epi16(a, imm) \
-  ((__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm)))
-
-/// Unpacks the high-order (index 8-15) values from two 128-bit vectors
-///    of [16 x i8] and interleaves them into a 128-bit vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHBW / PUNPCKHBW </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [16 x i8].
-///    Bits [71:64] are written to bits [7:0] of the result. \n
-///    Bits [79:72] are written to bits [23:16] of the result. \n
-///    Bits [87:80] are written to bits [39:32] of the result. \n
-///    Bits [95:88] are written to bits [55:48] of the result. \n
-///    Bits [103:96] are written to bits [71:64] of the result. \n
-///    Bits [111:104] are written to bits [87:80] of the result. \n
-///    Bits [119:112] are written to bits [103:96] of the result. \n
-///    Bits [127:120] are written to bits [119:112] of the result.
-/// \param __b
-///    A 128-bit vector of [16 x i8]. \n
-///    Bits [71:64] are written to bits [15:8] of the result. \n
-///    Bits [79:72] are written to bits [31:24] of the result. \n
-///    Bits [87:80] are written to bits [47:40] of the result. \n
-///    Bits [95:88] are written to bits [63:56] of the result. \n
-///    Bits [103:96] are written to bits [79:72] of the result. \n
-///    Bits [111:104] are written to bits [95:88] of the result. \n
-///    Bits [119:112] are written to bits [111:104] of the result. \n
-///    Bits [127:120] are written to bits [127:120] of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
-}
-
-/// Unpacks the high-order (index 4-7) values from two 128-bit vectors of
-///    [8 x i16] and interleaves them into a 128-bit vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHWD / PUNPCKHWD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [8 x i16].
-///    Bits [79:64] are written to bits [15:0] of the result. \n
-///    Bits [95:80] are written to bits [47:32] of the result. \n
-///    Bits [111:96] are written to bits [79:64] of the result. \n
-///    Bits [127:112] are written to bits [111:96] of the result.
-/// \param __b
-///    A 128-bit vector of [8 x i16].
-///    Bits [79:64] are written to bits [31:16] of the result. \n
-///    Bits [95:80] are written to bits [63:48] of the result. \n
-///    Bits [111:96] are written to bits [95:80] of the result. \n
-///    Bits [127:112] are written to bits [127:112] of the result.
-/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
-}
-
-/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of
-///    [4 x i32] and interleaves them into a 128-bit vector of [4 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHDQ / PUNPCKHDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [95:64] are written to bits [31:0] of the destination. \n
-///    Bits [127:96] are written to bits [95:64] of the destination.
-/// \param __b
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [95:64] are written to bits [64:32] of the destination. \n
-///    Bits [127:96] are written to bits [127:96] of the destination.
-/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3);
-}
-
-/// Unpacks the high-order 64-bit elements from two 128-bit vectors of
-///    [2 x i64] and interleaves them into a 128-bit vector of [2 x i64].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHQDQ / PUNPCKHQDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [127:64] are written to bits [63:0] of the destination.
-/// \param __b
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [127:64] are written to bits [127:64] of the destination.
-/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2+1);
-}
-
-/// Unpacks the low-order (index 0-7) values from two 128-bit vectors of
-///    [16 x i8] and interleaves them into a 128-bit vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLBW / PUNPCKLBW </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [16 x i8]. \n
-///    Bits [7:0] are written to bits [7:0] of the result. \n
-///    Bits [15:8] are written to bits [23:16] of the result. \n
-///    Bits [23:16] are written to bits [39:32] of the result. \n
-///    Bits [31:24] are written to bits [55:48] of the result. \n
-///    Bits [39:32] are written to bits [71:64] of the result. \n
-///    Bits [47:40] are written to bits [87:80] of the result. \n
-///    Bits [55:48] are written to bits [103:96] of the result. \n
-///    Bits [63:56] are written to bits [119:112] of the result.
-/// \param __b
-///    A 128-bit vector of [16 x i8].
-///    Bits [7:0] are written to bits [15:8] of the result. \n
-///    Bits [15:8] are written to bits [31:24] of the result. \n
-///    Bits [23:16] are written to bits [47:40] of the result. \n
-///    Bits [31:24] are written to bits [63:56] of the result. \n
-///    Bits [39:32] are written to bits [79:72] of the result. \n
-///    Bits [47:40] are written to bits [95:88] of the result. \n
-///    Bits [55:48] are written to bits [111:104] of the result. \n
-///    Bits [63:56] are written to bits [127:120] of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
-}
-
-/// Unpacks the low-order (index 0-3) values from each of the two 128-bit
-///    vectors of [8 x i16] and interleaves them into a 128-bit vector of
-///    [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLWD / PUNPCKLWD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [8 x i16].
-///    Bits [15:0] are written to bits [15:0] of the result. \n
-///    Bits [31:16] are written to bits [47:32] of the result. \n
-///    Bits [47:32] are written to bits [79:64] of the result. \n
-///    Bits [63:48] are written to bits [111:96] of the result.
-/// \param __b
-///    A 128-bit vector of [8 x i16].
-///    Bits [15:0] are written to bits [31:16] of the result. \n
-///    Bits [31:16] are written to bits [63:48] of the result. \n
-///    Bits [47:32] are written to bits [95:80] of the result. \n
-///    Bits [63:48] are written to bits [127:112] of the result.
-/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
-}
-
-/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of
-///    [4 x i32] and interleaves them into a 128-bit vector of [4 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLDQ / PUNPCKLDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [31:0] are written to bits [31:0] of the destination. \n
-///    Bits [63:32] are written to bits [95:64] of the destination.
-/// \param __b
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [31:0] are written to bits [64:32] of the destination. \n
-///    Bits [63:32] are written to bits [127:96] of the destination.
-/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1);
-}
-
-/// Unpacks the low-order 64-bit elements from two 128-bit vectors of
-///    [2 x i64] and interleaves them into a 128-bit vector of [2 x i64].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLQDQ / PUNPCKLQDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [63:0] are written to bits [63:0] of the destination. \n
-/// \param __b
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [63:0] are written to bits [127:64] of the destination. \n
-/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2+0);
-}
-
-/// Returns the lower 64 bits of a 128-bit integer vector as a 64-bit
-///    integer.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVDQ2Q </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector operand. The lower 64 bits are moved to the
-///    destination.
-/// \returns A 64-bit integer containing the lower 64 bits of the parameter.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_movepi64_pi64(__m128i __a)
-{
-  return (__m64)__a[0];
-}
-
-/// Moves the 64-bit operand to a 128-bit integer vector, zeroing the
-///    upper bits.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVD+VMOVQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit value.
-/// \returns A 128-bit integer vector. The lower 64 bits contain the value from
-///    the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_movpi64_epi64(__m64 __a)
-{
-  return __extension__ (__m128i)(__v2di){ (long long)__a, 0 };
-}
-
-/// Moves the lower 64 bits of a 128-bit integer vector to a 128-bit
-///    integer vector, zeroing the upper bits.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector operand. The lower 64 bits are moved to the
-///    destination.
-/// \returns A 128-bit integer vector. The lower 64 bits contain the value from
-///    the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_move_epi64(__m128i __a)
-{
-  return __builtin_shufflevector((__v2di)__a, _mm_setzero_si128(), 0, 2);
-}
-
-/// Unpacks the high-order 64-bit elements from two 128-bit vectors of
-///    [2 x double] and interleaves them into a 128-bit vector of [2 x
-///    double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKHPD / UNPCKHPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [127:64] are written to bits [63:0] of the destination.
-/// \param __b
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [127:64] are written to bits [127:64] of the destination.
-/// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpackhi_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2+1);
-}
-
-/// Unpacks the low-order 64-bit elements from two 128-bit vectors
-///    of [2 x double] and interleaves them into a 128-bit vector of [2 x
-///    double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [63:0] are written to bits [63:0] of the destination.
-/// \param __b
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [63:0] are written to bits [127:64] of the destination.
-/// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpacklo_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2+0);
-}
-
-/// Extracts the sign bits of the double-precision values in the 128-bit
-///    vector of [2 x double], zero-extends the value, and writes it to the
-///    low-order bits of the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVMSKPD / MOVMSKPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the values with sign bits to
-///    be extracted.
-/// \returns The sign bits from each of the double-precision elements in \a __a,
-///    written to bits [1:0]. The remaining bits are assigned values of zero.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_pd(__m128d __a)
-{
-  return __builtin_ia32_movmskpd((__v2df)__a);
-}
-
-
-/// Constructs a 128-bit floating-point vector of [2 x double] from two
-///    128-bit vector parameters of [2 x double], using the immediate-value
-///     parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_shuffle_pd(__m128d a, __m128d b, const int i);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VSHUFPD / SHUFPD </c> instruction.
-///
-/// \param a
-///    A 128-bit vector of [2 x double].
-/// \param b
-///    A 128-bit vector of [2 x double].
-/// \param i
-///    An 8-bit immediate value. The least significant two bits specify which
-///    elements to copy from \a a and \a b: \n
-///    Bit[0] = 0: lower element of \a a copied to lower element of result. \n
-///    Bit[0] = 1: upper element of \a a copied to lower element of result. \n
-///    Bit[1] = 0: lower element of \a b copied to upper element of result. \n
-///    Bit[1] = 1: upper element of \a b copied to upper element of result. \n
-/// \returns A 128-bit vector of [2 x double] containing the shuffled values.
-#define _mm_shuffle_pd(a, b, i) \
-  ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
-                                  (int)(i)))
-
-/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
-///    floating-point vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [2 x double].
-/// \returns A 128-bit floating-point vector of [4 x float] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castpd_ps(__m128d __a)
-{
-  return (__m128)__a;
-}
-
-/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
-///    integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [2 x double].
-/// \returns A 128-bit integer vector containing the same bitwise pattern as the
-///    parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castpd_si128(__m128d __a)
-{
-  return (__m128i)__a;
-}
-
-/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit
-///    floating-point vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float].
-/// \returns A 128-bit floating-point vector of [2 x double] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castps_pd(__m128 __a)
-{
-  return (__m128d)__a;
-}
-
-/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit
-///    integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float].
-/// \returns A 128-bit integer vector containing the same bitwise pattern as the
-///    parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castps_si128(__m128 __a)
-{
-  return (__m128i)__a;
-}
-
-/// Casts a 128-bit integer vector into a 128-bit floating-point vector
-///    of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 128-bit floating-point vector of [4 x float] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castsi128_ps(__m128i __a)
-{
-  return (__m128)__a;
-}
-
-/// Casts a 128-bit integer vector into a 128-bit floating-point vector
-///    of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 128-bit floating-point vector of [2 x double] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castsi128_pd(__m128i __a)
-{
-  return (__m128d)__a;
-}
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/// Indicates that a spin loop is being executed for the purposes of
-///    optimizing power consumption during the loop.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PAUSE </c> instruction.
-///
-void _mm_pause(void);
-
-#if defined(__cplusplus)
-} // extern "C"
-#endif
-#undef __DEFAULT_FN_ATTRS
-#undef __DEFAULT_FN_ATTRS_MMX
-
-#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
-
-#define _MM_DENORMALS_ZERO_ON   (0x0040U)
-#define _MM_DENORMALS_ZERO_OFF  (0x0000U)
-
-#define _MM_DENORMALS_ZERO_MASK (0x0040U)
-
-#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
-#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
-
-#endif /* __EMMINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/f16cintrin.h b/linux-x86/lib64/clang/14.0.2/include/f16cintrin.h
deleted file mode 100644
index 13905e6..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/f16cintrin.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*===---- f16cintrin.h - F16C intrinsics -----------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#if !defined __IMMINTRIN_H
-#error "Never use <f16cintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __F16CINTRIN_H
-#define __F16CINTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS128 \
-  __attribute__((__always_inline__, __nodebug__, __target__("f16c"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 \
-  __attribute__((__always_inline__, __nodebug__, __target__("f16c"), __min_vector_width__(256)))
-
-/* NOTE: Intel documents the 128-bit versions of these as being in emmintrin.h,
- * but that's because icc can emulate these without f16c using a library call.
- * Since we don't do that let's leave these in f16cintrin.h.
- */
-
-/// Converts a 16-bit half-precision float value into a 32-bit float
-///    value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPH2PS </c> instruction.
-///
-/// \param __a
-///    A 16-bit half-precision float value.
-/// \returns The converted 32-bit float value.
-static __inline float __DEFAULT_FN_ATTRS128
-_cvtsh_ss(unsigned short __a)
-{
-  __v8hi __v = {(short)__a, 0, 0, 0, 0, 0, 0, 0};
-  __v4sf __r = __builtin_ia32_vcvtph2ps(__v);
-  return __r[0];
-}
-
-/// Converts a 32-bit single-precision float value to a 16-bit
-///    half-precision float value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// unsigned short _cvtss_sh(float a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VCVTPS2PH </c> instruction.
-///
-/// \param a
-///    A 32-bit single-precision float value to be converted to a 16-bit
-///    half-precision float value.
-/// \param imm
-///    An immediate value controlling rounding using bits [2:0]: \n
-///    000: Nearest \n
-///    001: Down \n
-///    010: Up \n
-///    011: Truncate \n
-///    1XX: Use MXCSR.RC for rounding
-/// \returns The converted 16-bit half-precision float value.
-#define _cvtss_sh(a, imm) \
-  ((unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
-                                                     (imm)))[0]))
-
-/// Converts a 128-bit vector containing 32-bit float values into a
-///    128-bit vector containing 16-bit half-precision float values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_cvtps_ph(__m128 a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VCVTPS2PH </c> instruction.
-///
-/// \param a
-///    A 128-bit vector containing 32-bit float values.
-/// \param imm
-///    An immediate value controlling rounding using bits [2:0]: \n
-///    000: Nearest \n
-///    001: Down \n
-///    010: Up \n
-///    011: Truncate \n
-///    1XX: Use MXCSR.RC for rounding
-/// \returns A 128-bit vector containing converted 16-bit half-precision float
-///    values. The lower 64 bits are used to store the converted 16-bit
-///    half-precision floating-point values.
-#define _mm_cvtps_ph(a, imm) \
-  ((__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)))
-
-/// Converts a 128-bit vector containing 16-bit half-precision float
-///    values into a 128-bit vector containing 32-bit float values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPH2PS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector containing 16-bit half-precision float values. The lower
-///    64 bits are used in the conversion.
-/// \returns A 128-bit vector of [4 x float] containing converted float values.
-static __inline __m128 __DEFAULT_FN_ATTRS128
-_mm_cvtph_ps(__m128i __a)
-{
-  return (__m128)__builtin_ia32_vcvtph2ps((__v8hi)__a);
-}
-
-/// Converts a 256-bit vector of [8 x float] into a 128-bit vector
-///    containing 16-bit half-precision float values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm256_cvtps_ph(__m256 a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VCVTPS2PH </c> instruction.
-///
-/// \param a
-///    A 256-bit vector containing 32-bit single-precision float values to be
-///    converted to 16-bit half-precision float values.
-/// \param imm
-///    An immediate value controlling rounding using bits [2:0]: \n
-///    000: Nearest \n
-///    001: Down \n
-///    010: Up \n
-///    011: Truncate \n
-///    1XX: Use MXCSR.RC for rounding
-/// \returns A 128-bit vector containing the converted 16-bit half-precision
-///    float values.
-#define _mm256_cvtps_ph(a, imm) \
- ((__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm)))
-
-/// Converts a 128-bit vector containing 16-bit half-precision float
-///    values into a 256-bit vector of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPH2PS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector containing 16-bit half-precision float values to be
-///    converted to 32-bit single-precision float values.
-/// \returns A vector of [8 x float] containing the converted 32-bit
-///    single-precision float values.
-static __inline __m256 __DEFAULT_FN_ATTRS256
-_mm256_cvtph_ps(__m128i __a)
-{
-  return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a);
-}
-
-#undef __DEFAULT_FN_ATTRS128
-#undef __DEFAULT_FN_ATTRS256
-
-#endif /* __F16CINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/hexagon_protos.h b/linux-x86/lib64/clang/14.0.2/include/hexagon_protos.h
deleted file mode 100644
index cdffd93..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/hexagon_protos.h
+++ /dev/null
@@ -1,8450 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-// Automatically generated file, do not edit!
-//===----------------------------------------------------------------------===//
-
-
-
-#ifndef __HEXAGON_PROTOS_H_
-#define __HEXAGON_PROTOS_H_ 1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=abs(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_abs_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_abs_R __builtin_HEXAGON_A2_abs
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=abs(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_abs_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_abs_P __builtin_HEXAGON_A2_absp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=abs(Rs32):sat
-   C Intrinsic Prototype: Word32 Q6_R_abs_R_sat(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_abs_R_sat __builtin_HEXAGON_A2_abssat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_add_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_add_RR __builtin_HEXAGON_A2_add
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.h,Rs32.h):<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RhRh_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RhRh_s16 __builtin_HEXAGON_A2_addh_h16_hh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.h,Rs32.l):<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RhRl_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RhRl_s16 __builtin_HEXAGON_A2_addh_h16_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.h):<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRh_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRh_s16 __builtin_HEXAGON_A2_addh_h16_lh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.l):<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRl_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRl_s16 __builtin_HEXAGON_A2_addh_h16_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.h,Rs32.h):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RhRh_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RhRh_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_hh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.h,Rs32.l):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RhRl_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RhRl_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.h):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRh_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRh_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_lh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.l):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRl_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRl_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.h)
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRh(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRh __builtin_HEXAGON_A2_addh_l16_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.l)
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRl(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRl __builtin_HEXAGON_A2_addh_l16_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRh_sat(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRh_sat __builtin_HEXAGON_A2_addh_l16_sat_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRl_sat(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRl_sat __builtin_HEXAGON_A2_addh_l16_sat_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rs32,#s16)
-   C Intrinsic Prototype: Word32 Q6_R_add_RI(Word32 Rs, Word32 Is16)
-   Instruction Type:      ALU32_ADDI
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_add_RI __builtin_HEXAGON_A2_addi
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=add(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_add_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_add_PP __builtin_HEXAGON_A2_addp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=add(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_add_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_add_PP_sat __builtin_HEXAGON_A2_addpsat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word32 Q6_R_add_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_add_RR_sat __builtin_HEXAGON_A2_addsat
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=add(Rs32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_add_RP(Word32 Rs, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_add_RP __builtin_HEXAGON_A2_addsp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=and(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_and_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_and_RR __builtin_HEXAGON_A2_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=and(Rs32,#s10)
-   C Intrinsic Prototype: Word32 Q6_R_and_RI(Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_and_RI __builtin_HEXAGON_A2_andir
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=and(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_and_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_and_PP __builtin_HEXAGON_A2_andp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=aslh(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_aslh_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_aslh_R __builtin_HEXAGON_A2_aslh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asrh(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_asrh_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_asrh_R __builtin_HEXAGON_A2_asrh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=combine(Rt32.h,Rs32.h)
-   C Intrinsic Prototype: Word32 Q6_R_combine_RhRh(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_combine_RhRh __builtin_HEXAGON_A2_combine_hh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=combine(Rt32.h,Rs32.l)
-   C Intrinsic Prototype: Word32 Q6_R_combine_RhRl(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_combine_RhRl __builtin_HEXAGON_A2_combine_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=combine(Rt32.l,Rs32.h)
-   C Intrinsic Prototype: Word32 Q6_R_combine_RlRh(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_combine_RlRh __builtin_HEXAGON_A2_combine_lh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=combine(Rt32.l,Rs32.l)
-   C Intrinsic Prototype: Word32 Q6_R_combine_RlRl(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_combine_RlRl __builtin_HEXAGON_A2_combine_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=combine(#s8,#S8)
-   C Intrinsic Prototype: Word64 Q6_P_combine_II(Word32 Is8, Word32 IS8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_combine_II __builtin_HEXAGON_A2_combineii
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=combine(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_combine_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_combine_RR __builtin_HEXAGON_A2_combinew
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=max(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_max_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_max_RR __builtin_HEXAGON_A2_max
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=max(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_max_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_max_PP __builtin_HEXAGON_A2_maxp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=maxu(Rs32,Rt32)
-   C Intrinsic Prototype: UWord32 Q6_R_maxu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_maxu_RR __builtin_HEXAGON_A2_maxu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=maxu(Rss32,Rtt32)
-   C Intrinsic Prototype: UWord64 Q6_P_maxu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_maxu_PP __builtin_HEXAGON_A2_maxup
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=min(Rt32,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_min_RR(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_min_RR __builtin_HEXAGON_A2_min
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=min(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_min_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_min_PP __builtin_HEXAGON_A2_minp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=minu(Rt32,Rs32)
-   C Intrinsic Prototype: UWord32 Q6_R_minu_RR(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_minu_RR __builtin_HEXAGON_A2_minu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=minu(Rtt32,Rss32)
-   C Intrinsic Prototype: UWord64 Q6_P_minu_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_minu_PP __builtin_HEXAGON_A2_minup
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=neg(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_neg_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_neg_R __builtin_HEXAGON_A2_neg
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=neg(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_neg_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_neg_P __builtin_HEXAGON_A2_negp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=neg(Rs32):sat
-   C Intrinsic Prototype: Word32 Q6_R_neg_R_sat(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_neg_R_sat __builtin_HEXAGON_A2_negsat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=not(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_not_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_not_R __builtin_HEXAGON_A2_not
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=not(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_not_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_not_P __builtin_HEXAGON_A2_notp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=or(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_or_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_or_RR __builtin_HEXAGON_A2_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=or(Rs32,#s10)
-   C Intrinsic Prototype: Word32 Q6_R_or_RI(Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_or_RI __builtin_HEXAGON_A2_orir
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=or(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_or_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_or_PP __builtin_HEXAGON_A2_orp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=round(Rss32):sat
-   C Intrinsic Prototype: Word32 Q6_R_round_P_sat(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_round_P_sat __builtin_HEXAGON_A2_roundsat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sat(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_sat_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sat_P __builtin_HEXAGON_A2_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=satb(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_satb_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_satb_R __builtin_HEXAGON_A2_satb
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sath(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_sath_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sath_R __builtin_HEXAGON_A2_sath
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=satub(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_satub_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_satub_R __builtin_HEXAGON_A2_satub
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=satuh(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_satuh_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_satuh_R __builtin_HEXAGON_A2_satuh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_sub_RR(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_sub_RR __builtin_HEXAGON_A2_sub
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.h,Rs32.h):<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RhRh_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RhRh_s16 __builtin_HEXAGON_A2_subh_h16_hh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.h,Rs32.l):<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RhRl_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RhRl_s16 __builtin_HEXAGON_A2_subh_h16_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.h):<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRh_s16 __builtin_HEXAGON_A2_subh_h16_lh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.l):<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRl_s16 __builtin_HEXAGON_A2_subh_h16_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.h,Rs32.h):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RhRh_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RhRh_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_hh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.h,Rs32.l):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RhRl_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RhRl_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.h):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRh_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_lh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.l):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRl_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.h)
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRh(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRh __builtin_HEXAGON_A2_subh_l16_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.l)
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRl(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRl __builtin_HEXAGON_A2_subh_l16_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_sat(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRh_sat __builtin_HEXAGON_A2_subh_l16_sat_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_sat(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRl_sat __builtin_HEXAGON_A2_subh_l16_sat_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=sub(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_sub_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_sub_PP __builtin_HEXAGON_A2_subp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(#s10,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_sub_IR(Word32 Is10, Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_sub_IR __builtin_HEXAGON_A2_subri
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32,Rs32):sat
-   C Intrinsic Prototype: Word32 Q6_R_sub_RR_sat(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_sub_RR_sat __builtin_HEXAGON_A2_subsat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vaddh(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_vaddh_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vaddh_RR __builtin_HEXAGON_A2_svaddh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vaddh(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word32 Q6_R_vaddh_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vaddh_RR_sat __builtin_HEXAGON_A2_svaddhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vadduh(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word32 Q6_R_vadduh_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vadduh_RR_sat __builtin_HEXAGON_A2_svadduhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vavgh(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_vavgh_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vavgh_RR __builtin_HEXAGON_A2_svavgh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vavgh(Rs32,Rt32):rnd
-   C Intrinsic Prototype: Word32 Q6_R_vavgh_RR_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vavgh_RR_rnd __builtin_HEXAGON_A2_svavghs
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vnavgh(Rt32,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_vnavgh_RR(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vnavgh_RR __builtin_HEXAGON_A2_svnavgh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsubh(Rt32,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_vsubh_RR(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vsubh_RR __builtin_HEXAGON_A2_svsubh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsubh(Rt32,Rs32):sat
-   C Intrinsic Prototype: Word32 Q6_R_vsubh_RR_sat(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vsubh_RR_sat __builtin_HEXAGON_A2_svsubhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsubuh(Rt32,Rs32):sat
-   C Intrinsic Prototype: Word32 Q6_R_vsubuh_RR_sat(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vsubuh_RR_sat __builtin_HEXAGON_A2_svsubuhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=swiz(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_swiz_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_swiz_R __builtin_HEXAGON_A2_swiz
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sxtb(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_sxtb_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_sxtb_R __builtin_HEXAGON_A2_sxtb
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sxth(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_sxth_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_sxth_R __builtin_HEXAGON_A2_sxth
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=sxtw(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_sxtw_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_sxtw_R __builtin_HEXAGON_A2_sxtw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=Rs32
-   C Intrinsic Prototype: Word32 Q6_R_equals_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_equals_R __builtin_HEXAGON_A2_tfr
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32.h=#u16
-   C Intrinsic Prototype: Word32 Q6_Rh_equals_I(Word32 Rx, Word32 Iu16)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Rh_equals_I __builtin_HEXAGON_A2_tfrih
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32.l=#u16
-   C Intrinsic Prototype: Word32 Q6_Rl_equals_I(Word32 Rx, Word32 Iu16)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Rl_equals_I __builtin_HEXAGON_A2_tfril
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=Rss32
-   C Intrinsic Prototype: Word64 Q6_P_equals_P(Word64 Rss)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_equals_P __builtin_HEXAGON_A2_tfrp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=#s8
-   C Intrinsic Prototype: Word64 Q6_P_equals_I(Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_equals_I __builtin_HEXAGON_A2_tfrpi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=#s16
-   C Intrinsic Prototype: Word32 Q6_R_equals_I(Word32 Is16)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_equals_I __builtin_HEXAGON_A2_tfrsi
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsh(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vabsh_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsh_P __builtin_HEXAGON_A2_vabsh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsh(Rss32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vabsh_P_sat(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsh_P_sat __builtin_HEXAGON_A2_vabshsat
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsw(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vabsw_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsw_P __builtin_HEXAGON_A2_vabsw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsw(Rss32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vabsw_P_sat(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsw_P_sat __builtin_HEXAGON_A2_vabswsat
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaddb(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vaddb_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_vaddb_PP __builtin_HEXAGON_A2_vaddb_map
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaddh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vaddh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaddh_PP __builtin_HEXAGON_A2_vaddh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaddh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vaddh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaddh_PP_sat __builtin_HEXAGON_A2_vaddhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaddub(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vaddub_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaddub_PP __builtin_HEXAGON_A2_vaddub
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaddub(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vaddub_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaddub_PP_sat __builtin_HEXAGON_A2_vaddubs
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vadduh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vadduh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vadduh_PP_sat __builtin_HEXAGON_A2_vadduhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaddw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vaddw_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaddw_PP __builtin_HEXAGON_A2_vaddw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaddw(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vaddw_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaddw_PP_sat __builtin_HEXAGON_A2_vaddws
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vavgh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgh_PP __builtin_HEXAGON_A2_vavgh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgh(Rss32,Rtt32):crnd
-   C Intrinsic Prototype: Word64 Q6_P_vavgh_PP_crnd(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgh_PP_crnd __builtin_HEXAGON_A2_vavghcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgh(Rss32,Rtt32):rnd
-   C Intrinsic Prototype: Word64 Q6_P_vavgh_PP_rnd(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgh_PP_rnd __builtin_HEXAGON_A2_vavghr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgub(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vavgub_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgub_PP __builtin_HEXAGON_A2_vavgub
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgub(Rss32,Rtt32):rnd
-   C Intrinsic Prototype: Word64 Q6_P_vavgub_PP_rnd(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgub_PP_rnd __builtin_HEXAGON_A2_vavgubr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavguh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vavguh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavguh_PP __builtin_HEXAGON_A2_vavguh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavguh(Rss32,Rtt32):rnd
-   C Intrinsic Prototype: Word64 Q6_P_vavguh_PP_rnd(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavguh_PP_rnd __builtin_HEXAGON_A2_vavguhr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavguw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vavguw_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavguw_PP __builtin_HEXAGON_A2_vavguw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavguw(Rss32,Rtt32):rnd
-   C Intrinsic Prototype: Word64 Q6_P_vavguw_PP_rnd(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavguw_PP_rnd __builtin_HEXAGON_A2_vavguwr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vavgw_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgw_PP __builtin_HEXAGON_A2_vavgw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgw(Rss32,Rtt32):crnd
-   C Intrinsic Prototype: Word64 Q6_P_vavgw_PP_crnd(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgw_PP_crnd __builtin_HEXAGON_A2_vavgwcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgw(Rss32,Rtt32):rnd
-   C Intrinsic Prototype: Word64 Q6_P_vavgw_PP_rnd(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgw_PP_rnd __builtin_HEXAGON_A2_vavgwr
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpb.eq(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmpb_eq_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpb_eq_PP __builtin_HEXAGON_A2_vcmpbeq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpb.gtu(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmpb_gtu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpb_gtu_PP __builtin_HEXAGON_A2_vcmpbgtu
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmph.eq(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmph_eq_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmph_eq_PP __builtin_HEXAGON_A2_vcmpheq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmph.gt(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmph_gt_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmph_gt_PP __builtin_HEXAGON_A2_vcmphgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmph.gtu(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmph_gtu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmph_gtu_PP __builtin_HEXAGON_A2_vcmphgtu
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpw.eq(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmpw_eq_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpw_eq_PP __builtin_HEXAGON_A2_vcmpweq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpw.gt(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmpw_gt_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpw_gt_PP __builtin_HEXAGON_A2_vcmpwgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpw.gtu(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmpw_gtu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpw_gtu_PP __builtin_HEXAGON_A2_vcmpwgtu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vconj(Rss32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vconj_P_sat(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vconj_P_sat __builtin_HEXAGON_A2_vconj
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmaxb(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vmaxb_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmaxb_PP __builtin_HEXAGON_A2_vmaxb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmaxh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vmaxh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmaxh_PP __builtin_HEXAGON_A2_vmaxh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmaxub(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vmaxub_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmaxub_PP __builtin_HEXAGON_A2_vmaxub
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmaxuh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vmaxuh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmaxuh_PP __builtin_HEXAGON_A2_vmaxuh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmaxuw(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vmaxuw_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmaxuw_PP __builtin_HEXAGON_A2_vmaxuw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmaxw(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vmaxw_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmaxw_PP __builtin_HEXAGON_A2_vmaxw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vminb(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vminb_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vminb_PP __builtin_HEXAGON_A2_vminb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vminh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vminh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vminh_PP __builtin_HEXAGON_A2_vminh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vminub(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vminub_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vminub_PP __builtin_HEXAGON_A2_vminub
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vminuh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vminuh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vminuh_PP __builtin_HEXAGON_A2_vminuh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vminuw(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vminuw_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vminuw_PP __builtin_HEXAGON_A2_vminuw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vminw(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vminw_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vminw_PP __builtin_HEXAGON_A2_vminw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vnavgh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vnavgh_PP __builtin_HEXAGON_A2_vnavgh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vnavgh(Rtt32,Rss32):crnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP_crnd_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vnavgh_PP_crnd_sat __builtin_HEXAGON_A2_vnavghcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vnavgh(Rtt32,Rss32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP_rnd_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vnavgh_PP_rnd_sat __builtin_HEXAGON_A2_vnavghr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vnavgw(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vnavgw_PP __builtin_HEXAGON_A2_vnavgw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vnavgw(Rtt32,Rss32):crnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP_crnd_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vnavgw_PP_crnd_sat __builtin_HEXAGON_A2_vnavgwcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vnavgw(Rtt32,Rss32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP_rnd_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vnavgw_PP_rnd_sat __builtin_HEXAGON_A2_vnavgwr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vraddub(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vraddub_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vraddub_PP __builtin_HEXAGON_A2_vraddub
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vraddub(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vraddubacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vraddubacc_PP __builtin_HEXAGON_A2_vraddub_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrsadub(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrsadub_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrsadub_PP __builtin_HEXAGON_A2_vrsadub
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrsadub(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrsadubacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrsadubacc_PP __builtin_HEXAGON_A2_vrsadub_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubb(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vsubb_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_vsubb_PP __builtin_HEXAGON_A2_vsubb_map
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vsubh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsubh_PP __builtin_HEXAGON_A2_vsubh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubh(Rtt32,Rss32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vsubh_PP_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsubh_PP_sat __builtin_HEXAGON_A2_vsubhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubub(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vsubub_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsubub_PP __builtin_HEXAGON_A2_vsubub
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubub(Rtt32,Rss32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vsubub_PP_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsubub_PP_sat __builtin_HEXAGON_A2_vsububs
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubuh(Rtt32,Rss32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vsubuh_PP_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsubuh_PP_sat __builtin_HEXAGON_A2_vsubuhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubw(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vsubw_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsubw_PP __builtin_HEXAGON_A2_vsubw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubw(Rtt32,Rss32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vsubw_PP_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsubw_PP_sat __builtin_HEXAGON_A2_vsubws
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=xor(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_xor_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_xor_RR __builtin_HEXAGON_A2_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=xor(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_xor_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_xor_PP __builtin_HEXAGON_A2_xorp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=zxtb(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_zxtb_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_zxtb_R __builtin_HEXAGON_A2_zxtb
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=zxth(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_zxth_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_zxth_R __builtin_HEXAGON_A2_zxth
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=and(Rt32,~Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_and_RnR(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_and_RnR __builtin_HEXAGON_A4_andn
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=and(Rtt32,~Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_and_PnP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_and_PnP __builtin_HEXAGON_A4_andnp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=bitsplit(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_bitsplit_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_bitsplit_RR __builtin_HEXAGON_A4_bitsplit
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=bitsplit(Rs32,#u5)
-   C Intrinsic Prototype: Word64 Q6_P_bitsplit_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_bitsplit_RI __builtin_HEXAGON_A4_bitspliti
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=boundscheck(Rs32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_boundscheck_RP(Word32 Rs, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_boundscheck_RP __builtin_HEXAGON_A4_boundscheck
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmpb.eq(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmpb_eq_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmpb_eq_RR __builtin_HEXAGON_A4_cmpbeq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmpb.eq(Rs32,#u8)
-   C Intrinsic Prototype: Byte Q6_p_cmpb_eq_RI(Word32 Rs, Word32 Iu8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmpb_eq_RI __builtin_HEXAGON_A4_cmpbeqi
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmpb.gt(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmpb_gt_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmpb_gt_RR __builtin_HEXAGON_A4_cmpbgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmpb.gt(Rs32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_cmpb_gt_RI(Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmpb_gt_RI __builtin_HEXAGON_A4_cmpbgti
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmpb.gtu(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmpb_gtu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmpb_gtu_RR __builtin_HEXAGON_A4_cmpbgtu
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmpb.gtu(Rs32,#u7)
-   C Intrinsic Prototype: Byte Q6_p_cmpb_gtu_RI(Word32 Rs, Word32 Iu7)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmpb_gtu_RI __builtin_HEXAGON_A4_cmpbgtui
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmph.eq(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmph_eq_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmph_eq_RR __builtin_HEXAGON_A4_cmpheq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmph.eq(Rs32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_cmph_eq_RI(Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmph_eq_RI __builtin_HEXAGON_A4_cmpheqi
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmph.gt(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmph_gt_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmph_gt_RR __builtin_HEXAGON_A4_cmphgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmph.gt(Rs32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_cmph_gt_RI(Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmph_gt_RI __builtin_HEXAGON_A4_cmphgti
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmph.gtu(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmph_gtu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmph_gtu_RR __builtin_HEXAGON_A4_cmphgtu
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmph.gtu(Rs32,#u7)
-   C Intrinsic Prototype: Byte Q6_p_cmph_gtu_RI(Word32 Rs, Word32 Iu7)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmph_gtu_RI __builtin_HEXAGON_A4_cmphgtui
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=combine(#s8,Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_combine_IR(Word32 Is8, Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_combine_IR __builtin_HEXAGON_A4_combineir
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=combine(Rs32,#s8)
-   C Intrinsic Prototype: Word64 Q6_P_combine_RI(Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_combine_RI __builtin_HEXAGON_A4_combineri
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cround(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_cround_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cround_RI __builtin_HEXAGON_A4_cround_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cround(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_cround_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cround_RR __builtin_HEXAGON_A4_cround_rr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=modwrap(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_modwrap_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_modwrap_RR __builtin_HEXAGON_A4_modwrapu
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=or(Rt32,~Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_or_RnR(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_or_RnR __builtin_HEXAGON_A4_orn
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=or(Rtt32,~Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_or_PnP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_or_PnP __builtin_HEXAGON_A4_ornp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmp.eq(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_cmp_eq_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_cmp_eq_RR __builtin_HEXAGON_A4_rcmpeq
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmp.eq(Rs32,#s8)
-   C Intrinsic Prototype: Word32 Q6_R_cmp_eq_RI(Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_cmp_eq_RI __builtin_HEXAGON_A4_rcmpeqi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=!cmp.eq(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_not_cmp_eq_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_not_cmp_eq_RR __builtin_HEXAGON_A4_rcmpneq
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=!cmp.eq(Rs32,#s8)
-   C Intrinsic Prototype: Word32 Q6_R_not_cmp_eq_RI(Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_not_cmp_eq_RI __builtin_HEXAGON_A4_rcmpneqi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=round(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_round_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_round_RI __builtin_HEXAGON_A4_round_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=round(Rs32,#u5):sat
-   C Intrinsic Prototype: Word32 Q6_R_round_RI_sat(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_round_RI_sat __builtin_HEXAGON_A4_round_ri_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=round(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_round_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_round_RR __builtin_HEXAGON_A4_round_rr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=round(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word32 Q6_R_round_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_round_RR_sat __builtin_HEXAGON_A4_round_rr_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=tlbmatch(Rss32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_tlbmatch_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_tlbmatch_PR __builtin_HEXAGON_A4_tlbmatch
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=any8(vcmpb.eq(Rss32,Rtt32))
-   C Intrinsic Prototype: Byte Q6_p_any8_vcmpb_eq_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_any8_vcmpb_eq_PP __builtin_HEXAGON_A4_vcmpbeq_any
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpb.eq(Rss32,#u8)
-   C Intrinsic Prototype: Byte Q6_p_vcmpb_eq_PI(Word64 Rss, Word32 Iu8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpb_eq_PI __builtin_HEXAGON_A4_vcmpbeqi
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpb.gt(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmpb_gt_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpb_gt_PP __builtin_HEXAGON_A4_vcmpbgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpb.gt(Rss32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_vcmpb_gt_PI(Word64 Rss, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpb_gt_PI __builtin_HEXAGON_A4_vcmpbgti
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpb.gtu(Rss32,#u7)
-   C Intrinsic Prototype: Byte Q6_p_vcmpb_gtu_PI(Word64 Rss, Word32 Iu7)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpb_gtu_PI __builtin_HEXAGON_A4_vcmpbgtui
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmph.eq(Rss32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_vcmph_eq_PI(Word64 Rss, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmph_eq_PI __builtin_HEXAGON_A4_vcmpheqi
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmph.gt(Rss32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_vcmph_gt_PI(Word64 Rss, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmph_gt_PI __builtin_HEXAGON_A4_vcmphgti
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmph.gtu(Rss32,#u7)
-   C Intrinsic Prototype: Byte Q6_p_vcmph_gtu_PI(Word64 Rss, Word32 Iu7)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmph_gtu_PI __builtin_HEXAGON_A4_vcmphgtui
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpw.eq(Rss32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_vcmpw_eq_PI(Word64 Rss, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpw_eq_PI __builtin_HEXAGON_A4_vcmpweqi
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpw.gt(Rss32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_vcmpw_gt_PI(Word64 Rss, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpw_gt_PI __builtin_HEXAGON_A4_vcmpwgti
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpw.gtu(Rss32,#u7)
-   C Intrinsic Prototype: Byte Q6_p_vcmpw_gtu_PI(Word64 Rss, Word32 Iu7)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpw_gtu_PI __builtin_HEXAGON_A4_vcmpwgtui
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrmaxh(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmaxh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmaxh_PR __builtin_HEXAGON_A4_vrmaxh
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrmaxuh(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmaxuh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmaxuh_PR __builtin_HEXAGON_A4_vrmaxuh
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrmaxuw(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmaxuw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmaxuw_PR __builtin_HEXAGON_A4_vrmaxuw
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrmaxw(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmaxw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmaxw_PR __builtin_HEXAGON_A4_vrmaxw
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrminh(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrminh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrminh_PR __builtin_HEXAGON_A4_vrminh
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrminuh(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrminuh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrminuh_PR __builtin_HEXAGON_A4_vrminuh
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrminuw(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrminuw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrminuw_PR __builtin_HEXAGON_A4_vrminuw
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrminw(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrminw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrminw_PR __builtin_HEXAGON_A4_vrminw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vaddhub(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word32 Q6_R_vaddhub_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vaddhub_PP_sat __builtin_HEXAGON_A5_vaddhubs
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=all8(Ps4)
-   C Intrinsic Prototype: Byte Q6_p_all8_p(Byte Ps)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_all8_p __builtin_HEXAGON_C2_all8
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=and(Pt4,Ps4)
-   C Intrinsic Prototype: Byte Q6_p_and_pp(Byte Pt, Byte Ps)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_and_pp __builtin_HEXAGON_C2_and
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=and(Pt4,!Ps4)
-   C Intrinsic Prototype: Byte Q6_p_and_pnp(Byte Pt, Byte Ps)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_and_pnp __builtin_HEXAGON_C2_andn
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=any8(Ps4)
-   C Intrinsic Prototype: Byte Q6_p_any8_p(Byte Ps)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_any8_p __builtin_HEXAGON_C2_any8
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=bitsclr(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_bitsclr_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_bitsclr_RR __builtin_HEXAGON_C2_bitsclr
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=bitsclr(Rs32,#u6)
-   C Intrinsic Prototype: Byte Q6_p_bitsclr_RI(Word32 Rs, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_bitsclr_RI __builtin_HEXAGON_C2_bitsclri
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=bitsset(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_bitsset_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_bitsset_RR __builtin_HEXAGON_C2_bitsset
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.eq(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_eq_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_eq_RR __builtin_HEXAGON_C2_cmpeq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.eq(Rs32,#s10)
-   C Intrinsic Prototype: Byte Q6_p_cmp_eq_RI(Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_eq_RI __builtin_HEXAGON_C2_cmpeqi
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.eq(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_eq_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmp_eq_PP __builtin_HEXAGON_C2_cmpeqp
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.ge(Rs32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_cmp_ge_RI(Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_ge_RI __builtin_HEXAGON_C2_cmpgei
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.geu(Rs32,#u8)
-   C Intrinsic Prototype: Byte Q6_p_cmp_geu_RI(Word32 Rs, Word32 Iu8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_geu_RI __builtin_HEXAGON_C2_cmpgeui
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.gt(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_gt_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_gt_RR __builtin_HEXAGON_C2_cmpgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.gt(Rs32,#s10)
-   C Intrinsic Prototype: Byte Q6_p_cmp_gt_RI(Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_gt_RI __builtin_HEXAGON_C2_cmpgti
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.gt(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_gt_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmp_gt_PP __builtin_HEXAGON_C2_cmpgtp
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.gtu(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_gtu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_gtu_RR __builtin_HEXAGON_C2_cmpgtu
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.gtu(Rs32,#u9)
-   C Intrinsic Prototype: Byte Q6_p_cmp_gtu_RI(Word32 Rs, Word32 Iu9)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_gtu_RI __builtin_HEXAGON_C2_cmpgtui
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.gtu(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_gtu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmp_gtu_PP __builtin_HEXAGON_C2_cmpgtup
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.lt(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_lt_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_lt_RR __builtin_HEXAGON_C2_cmplt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.ltu(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_ltu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_ltu_RR __builtin_HEXAGON_C2_cmpltu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mask(Pt4)
-   C Intrinsic Prototype: Word64 Q6_P_mask_p(Byte Pt)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mask_p __builtin_HEXAGON_C2_mask
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mux(Pu4,Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_mux_pRR(Byte Pu, Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_mux_pRR __builtin_HEXAGON_C2_mux
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mux(Pu4,#s8,#S8)
-   C Intrinsic Prototype: Word32 Q6_R_mux_pII(Byte Pu, Word32 Is8, Word32 IS8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_mux_pII __builtin_HEXAGON_C2_muxii
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mux(Pu4,Rs32,#s8)
-   C Intrinsic Prototype: Word32 Q6_R_mux_pRI(Byte Pu, Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_mux_pRI __builtin_HEXAGON_C2_muxir
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mux(Pu4,#s8,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_mux_pIR(Byte Pu, Word32 Is8, Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_mux_pIR __builtin_HEXAGON_C2_muxri
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=not(Ps4)
-   C Intrinsic Prototype: Byte Q6_p_not_p(Byte Ps)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_p __builtin_HEXAGON_C2_not
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=or(Pt4,Ps4)
-   C Intrinsic Prototype: Byte Q6_p_or_pp(Byte Pt, Byte Ps)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_or_pp __builtin_HEXAGON_C2_or
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=or(Pt4,!Ps4)
-   C Intrinsic Prototype: Byte Q6_p_or_pnp(Byte Pt, Byte Ps)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_or_pnp __builtin_HEXAGON_C2_orn
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=Ps4
-   C Intrinsic Prototype: Byte Q6_p_equals_p(Byte Ps)
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_equals_p __builtin_HEXAGON_C2_pxfer_map
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=Ps4
-   C Intrinsic Prototype: Word32 Q6_R_equals_p(Byte Ps)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_equals_p __builtin_HEXAGON_C2_tfrpr
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=Rs32
-   C Intrinsic Prototype: Byte Q6_p_equals_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_equals_R __builtin_HEXAGON_C2_tfrrp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vitpack(Ps4,Pt4)
-   C Intrinsic Prototype: Word32 Q6_R_vitpack_pp(Byte Ps, Byte Pt)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vitpack_pp __builtin_HEXAGON_C2_vitpack
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmux(Pu4,Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vmux_pPP(Byte Pu, Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmux_pPP __builtin_HEXAGON_C2_vmux
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=xor(Ps4,Pt4)
-   C Intrinsic Prototype: Byte Q6_p_xor_pp(Byte Ps, Byte Pt)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_xor_pp __builtin_HEXAGON_C2_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=and(Ps4,and(Pt4,Pu4))
-   C Intrinsic Prototype: Byte Q6_p_and_and_ppp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_and_and_ppp __builtin_HEXAGON_C4_and_and
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=and(Ps4,and(Pt4,!Pu4))
-   C Intrinsic Prototype: Byte Q6_p_and_and_ppnp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_and_and_ppnp __builtin_HEXAGON_C4_and_andn
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=and(Ps4,or(Pt4,Pu4))
-   C Intrinsic Prototype: Byte Q6_p_and_or_ppp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_and_or_ppp __builtin_HEXAGON_C4_and_or
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=and(Ps4,or(Pt4,!Pu4))
-   C Intrinsic Prototype: Byte Q6_p_and_or_ppnp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_and_or_ppnp __builtin_HEXAGON_C4_and_orn
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!cmp.gt(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_not_cmp_gt_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_not_cmp_gt_RR __builtin_HEXAGON_C4_cmplte
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!cmp.gt(Rs32,#s10)
-   C Intrinsic Prototype: Byte Q6_p_not_cmp_gt_RI(Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_not_cmp_gt_RI __builtin_HEXAGON_C4_cmpltei
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!cmp.gtu(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_not_cmp_gtu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_not_cmp_gtu_RR __builtin_HEXAGON_C4_cmplteu
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!cmp.gtu(Rs32,#u9)
-   C Intrinsic Prototype: Byte Q6_p_not_cmp_gtu_RI(Word32 Rs, Word32 Iu9)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_not_cmp_gtu_RI __builtin_HEXAGON_C4_cmplteui
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!cmp.eq(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_not_cmp_eq_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_not_cmp_eq_RR __builtin_HEXAGON_C4_cmpneq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!cmp.eq(Rs32,#s10)
-   C Intrinsic Prototype: Byte Q6_p_not_cmp_eq_RI(Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_not_cmp_eq_RI __builtin_HEXAGON_C4_cmpneqi
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=fastcorner9(Ps4,Pt4)
-   C Intrinsic Prototype: Byte Q6_p_fastcorner9_pp(Byte Ps, Byte Pt)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_fastcorner9_pp __builtin_HEXAGON_C4_fastcorner9
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!fastcorner9(Ps4,Pt4)
-   C Intrinsic Prototype: Byte Q6_p_not_fastcorner9_pp(Byte Ps, Byte Pt)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_fastcorner9_pp __builtin_HEXAGON_C4_fastcorner9_not
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!bitsclr(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_not_bitsclr_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_bitsclr_RR __builtin_HEXAGON_C4_nbitsclr
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!bitsclr(Rs32,#u6)
-   C Intrinsic Prototype: Byte Q6_p_not_bitsclr_RI(Word32 Rs, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_bitsclr_RI __builtin_HEXAGON_C4_nbitsclri
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!bitsset(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_not_bitsset_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_bitsset_RR __builtin_HEXAGON_C4_nbitsset
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=or(Ps4,and(Pt4,Pu4))
-   C Intrinsic Prototype: Byte Q6_p_or_and_ppp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_or_and_ppp __builtin_HEXAGON_C4_or_and
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=or(Ps4,and(Pt4,!Pu4))
-   C Intrinsic Prototype: Byte Q6_p_or_and_ppnp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_or_and_ppnp __builtin_HEXAGON_C4_or_andn
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=or(Ps4,or(Pt4,Pu4))
-   C Intrinsic Prototype: Byte Q6_p_or_or_ppp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_or_or_ppp __builtin_HEXAGON_C4_or_or
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=or(Ps4,or(Pt4,!Pu4))
-   C Intrinsic Prototype: Byte Q6_p_or_or_ppnp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_or_or_ppnp __builtin_HEXAGON_C4_or_orn
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_d2df(Rss32)
-   C Intrinsic Prototype: Float64 Q6_P_convert_d2df_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_d2df_P __builtin_HEXAGON_F2_conv_d2df
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_d2sf(Rss32)
-   C Intrinsic Prototype: Float32 Q6_R_convert_d2sf_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_d2sf_P __builtin_HEXAGON_F2_conv_d2sf
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_df2d(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_convert_df2d_P(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_df2d_P __builtin_HEXAGON_F2_conv_df2d
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_df2d(Rss32):chop
-   C Intrinsic Prototype: Word64 Q6_P_convert_df2d_P_chop(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_df2d_P_chop __builtin_HEXAGON_F2_conv_df2d_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_df2sf(Rss32)
-   C Intrinsic Prototype: Float32 Q6_R_convert_df2sf_P(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_df2sf_P __builtin_HEXAGON_F2_conv_df2sf
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_df2ud(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_convert_df2ud_P(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_df2ud_P __builtin_HEXAGON_F2_conv_df2ud
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_df2ud(Rss32):chop
-   C Intrinsic Prototype: Word64 Q6_P_convert_df2ud_P_chop(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_df2ud_P_chop __builtin_HEXAGON_F2_conv_df2ud_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_df2uw(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_convert_df2uw_P(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_df2uw_P __builtin_HEXAGON_F2_conv_df2uw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_df2uw(Rss32):chop
-   C Intrinsic Prototype: Word32 Q6_R_convert_df2uw_P_chop(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_df2uw_P_chop __builtin_HEXAGON_F2_conv_df2uw_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_df2w(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_convert_df2w_P(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_df2w_P __builtin_HEXAGON_F2_conv_df2w
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_df2w(Rss32):chop
-   C Intrinsic Prototype: Word32 Q6_R_convert_df2w_P_chop(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_df2w_P_chop __builtin_HEXAGON_F2_conv_df2w_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_sf2d(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_convert_sf2d_R(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_sf2d_R __builtin_HEXAGON_F2_conv_sf2d
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_sf2d(Rs32):chop
-   C Intrinsic Prototype: Word64 Q6_P_convert_sf2d_R_chop(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_sf2d_R_chop __builtin_HEXAGON_F2_conv_sf2d_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_sf2df(Rs32)
-   C Intrinsic Prototype: Float64 Q6_P_convert_sf2df_R(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_sf2df_R __builtin_HEXAGON_F2_conv_sf2df
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_sf2ud(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_convert_sf2ud_R(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_sf2ud_R __builtin_HEXAGON_F2_conv_sf2ud
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_sf2ud(Rs32):chop
-   C Intrinsic Prototype: Word64 Q6_P_convert_sf2ud_R_chop(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_sf2ud_R_chop __builtin_HEXAGON_F2_conv_sf2ud_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_sf2uw(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_convert_sf2uw_R(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_sf2uw_R __builtin_HEXAGON_F2_conv_sf2uw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_sf2uw(Rs32):chop
-   C Intrinsic Prototype: Word32 Q6_R_convert_sf2uw_R_chop(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_sf2uw_R_chop __builtin_HEXAGON_F2_conv_sf2uw_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_sf2w(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_convert_sf2w_R(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_sf2w_R __builtin_HEXAGON_F2_conv_sf2w
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_sf2w(Rs32):chop
-   C Intrinsic Prototype: Word32 Q6_R_convert_sf2w_R_chop(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_sf2w_R_chop __builtin_HEXAGON_F2_conv_sf2w_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_ud2df(Rss32)
-   C Intrinsic Prototype: Float64 Q6_P_convert_ud2df_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_ud2df_P __builtin_HEXAGON_F2_conv_ud2df
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_ud2sf(Rss32)
-   C Intrinsic Prototype: Float32 Q6_R_convert_ud2sf_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_ud2sf_P __builtin_HEXAGON_F2_conv_ud2sf
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_uw2df(Rs32)
-   C Intrinsic Prototype: Float64 Q6_P_convert_uw2df_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_uw2df_R __builtin_HEXAGON_F2_conv_uw2df
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_uw2sf(Rs32)
-   C Intrinsic Prototype: Float32 Q6_R_convert_uw2sf_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_uw2sf_R __builtin_HEXAGON_F2_conv_uw2sf
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_w2df(Rs32)
-   C Intrinsic Prototype: Float64 Q6_P_convert_w2df_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_w2df_R __builtin_HEXAGON_F2_conv_w2df
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_w2sf(Rs32)
-   C Intrinsic Prototype: Float32 Q6_R_convert_w2sf_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_w2sf_R __builtin_HEXAGON_F2_conv_w2sf
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=dfclass(Rss32,#u5)
-   C Intrinsic Prototype: Byte Q6_p_dfclass_PI(Float64 Rss, Word32 Iu5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_dfclass_PI __builtin_HEXAGON_F2_dfclass
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=dfcmp.eq(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_dfcmp_eq_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_dfcmp_eq_PP __builtin_HEXAGON_F2_dfcmpeq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=dfcmp.ge(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_dfcmp_ge_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_dfcmp_ge_PP __builtin_HEXAGON_F2_dfcmpge
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=dfcmp.gt(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_dfcmp_gt_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_dfcmp_gt_PP __builtin_HEXAGON_F2_dfcmpgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=dfcmp.uo(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_dfcmp_uo_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_dfcmp_uo_PP __builtin_HEXAGON_F2_dfcmpuo
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfmake(#u10):neg
-   C Intrinsic Prototype: Float64 Q6_P_dfmake_I_neg(Word32 Iu10)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmake_I_neg __builtin_HEXAGON_F2_dfimm_n
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfmake(#u10):pos
-   C Intrinsic Prototype: Float64 Q6_P_dfmake_I_pos(Word32 Iu10)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmake_I_pos __builtin_HEXAGON_F2_dfimm_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sfadd(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sfadd_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfadd_RR __builtin_HEXAGON_F2_sfadd
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=sfclass(Rs32,#u5)
-   C Intrinsic Prototype: Byte Q6_p_sfclass_RI(Float32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_sfclass_RI __builtin_HEXAGON_F2_sfclass
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=sfcmp.eq(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_sfcmp_eq_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_sfcmp_eq_RR __builtin_HEXAGON_F2_sfcmpeq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=sfcmp.ge(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_sfcmp_ge_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_sfcmp_ge_RR __builtin_HEXAGON_F2_sfcmpge
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=sfcmp.gt(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_sfcmp_gt_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_sfcmp_gt_RR __builtin_HEXAGON_F2_sfcmpgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=sfcmp.uo(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_sfcmp_uo_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_sfcmp_uo_RR __builtin_HEXAGON_F2_sfcmpuo
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sffixupd(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sffixupd_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sffixupd_RR __builtin_HEXAGON_F2_sffixupd
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sffixupn(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sffixupn_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sffixupn_RR __builtin_HEXAGON_F2_sffixupn
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sffixupr(Rs32)
-   C Intrinsic Prototype: Float32 Q6_R_sffixupr_R(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sffixupr_R __builtin_HEXAGON_F2_sffixupr
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=sfmpy(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RR(Float32 Rx, Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmpyacc_RR __builtin_HEXAGON_F2_sffma
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=sfmpy(Rs32,Rt32):lib
-   C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RR_lib(Float32 Rx, Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmpyacc_RR_lib __builtin_HEXAGON_F2_sffma_lib
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=sfmpy(Rs32,Rt32,Pu4):scale
-   C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RRp_scale(Float32 Rx, Float32 Rs, Float32 Rt, Byte Pu)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmpyacc_RRp_scale __builtin_HEXAGON_F2_sffma_sc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=sfmpy(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sfmpynac_RR(Float32 Rx, Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmpynac_RR __builtin_HEXAGON_F2_sffms
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=sfmpy(Rs32,Rt32):lib
-   C Intrinsic Prototype: Float32 Q6_R_sfmpynac_RR_lib(Float32 Rx, Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmpynac_RR_lib __builtin_HEXAGON_F2_sffms_lib
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sfmake(#u10):neg
-   C Intrinsic Prototype: Float32 Q6_R_sfmake_I_neg(Word32 Iu10)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmake_I_neg __builtin_HEXAGON_F2_sfimm_n
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sfmake(#u10):pos
-   C Intrinsic Prototype: Float32 Q6_R_sfmake_I_pos(Word32 Iu10)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmake_I_pos __builtin_HEXAGON_F2_sfimm_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sfmax(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sfmax_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmax_RR __builtin_HEXAGON_F2_sfmax
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sfmin(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sfmin_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmin_RR __builtin_HEXAGON_F2_sfmin
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sfmpy(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sfmpy_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmpy_RR __builtin_HEXAGON_F2_sfmpy
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sfsub(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sfsub_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfsub_RR __builtin_HEXAGON_F2_sfsub
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memb(Rx32++#s4:0:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memb_IM_circ(void** Rx, Word32 Is4_0, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memb_IM_circ __builtin_HEXAGON_L2_loadrb_pci
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memb(Rx32++I:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memb_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memb_M_circ __builtin_HEXAGON_L2_loadrb_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=memd(Rx32++#s4:3:circ(Mu2))
-   C Intrinsic Prototype: Word64 Q6_P_memd_IM_circ(void** Rx, Word32 Is4_3, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_P_memd_IM_circ __builtin_HEXAGON_L2_loadrd_pci
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=memd(Rx32++I:circ(Mu2))
-   C Intrinsic Prototype: Word64 Q6_P_memd_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_P_memd_M_circ __builtin_HEXAGON_L2_loadrd_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memh(Rx32++#s4:1:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memh_IM_circ(void** Rx, Word32 Is4_1, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memh_IM_circ __builtin_HEXAGON_L2_loadrh_pci
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memh(Rx32++I:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memh_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memh_M_circ __builtin_HEXAGON_L2_loadrh_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memw(Rx32++#s4:2:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memw_IM_circ(void** Rx, Word32 Is4_2, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memw_IM_circ __builtin_HEXAGON_L2_loadri_pci
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memw(Rx32++I:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memw_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memw_M_circ __builtin_HEXAGON_L2_loadri_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memub(Rx32++#s4:0:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memub_IM_circ(void** Rx, Word32 Is4_0, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memub_IM_circ __builtin_HEXAGON_L2_loadrub_pci
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memub(Rx32++I:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memub_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memub_M_circ __builtin_HEXAGON_L2_loadrub_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memuh(Rx32++#s4:1:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memuh_IM_circ(void** Rx, Word32 Is4_1, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memuh_IM_circ __builtin_HEXAGON_L2_loadruh_pci
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memuh(Rx32++I:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memuh_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memuh_M_circ __builtin_HEXAGON_L2_loadruh_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=add(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_addacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_addacc_RR __builtin_HEXAGON_M2_acci
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=add(Rs32,#s8)
-   C Intrinsic Prototype: Word32 Q6_R_addacc_RI(Word32 Rx, Word32 Rs, Word32 Is8)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_addacc_RI __builtin_HEXAGON_M2_accii
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpyi(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyiacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyiacc_RR __builtin_HEXAGON_M2_cmaci_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpyr(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyracc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyracc_RR __builtin_HEXAGON_M2_cmacr_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpy(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyacc_RR_sat __builtin_HEXAGON_M2_cmacs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpy(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyacc_RR_s1_sat __builtin_HEXAGON_M2_cmacs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpy(Rs32,Rt32*):sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_conj_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyacc_RR_conj_sat __builtin_HEXAGON_M2_cmacsc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpy(Rs32,Rt32*):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_conj_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyacc_RR_conj_s1_sat __builtin_HEXAGON_M2_cmacsc_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpyi(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyi_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyi_RR __builtin_HEXAGON_M2_cmpyi_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpyr(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyr_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyr_RR __builtin_HEXAGON_M2_cmpyr_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpy(Rs32,Rt32):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpy_RR_rnd_sat __builtin_HEXAGON_M2_cmpyrs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpy(Rs32,Rt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpy_RR_s1_rnd_sat __builtin_HEXAGON_M2_cmpyrs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpy(Rs32,Rt32*):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_conj_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpy_RR_conj_rnd_sat __builtin_HEXAGON_M2_cmpyrsc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpy(Rs32,Rt32*):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_conj_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpy_RR_conj_s1_rnd_sat __builtin_HEXAGON_M2_cmpyrsc_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpy(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpy_RR_sat __builtin_HEXAGON_M2_cmpys_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpy(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpy_RR_s1_sat __builtin_HEXAGON_M2_cmpys_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpy(Rs32,Rt32*):sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_conj_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpy_RR_conj_sat __builtin_HEXAGON_M2_cmpysc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpy(Rs32,Rt32*):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_conj_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpy_RR_conj_s1_sat __builtin_HEXAGON_M2_cmpysc_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=cmpy(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpynac_RR_sat __builtin_HEXAGON_M2_cnacs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=cmpy(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpynac_RR_s1_sat __builtin_HEXAGON_M2_cnacs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=cmpy(Rs32,Rt32*):sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_conj_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpynac_RR_conj_sat __builtin_HEXAGON_M2_cnacsc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=cmpy(Rs32,Rt32*):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_conj_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpynac_RR_conj_s1_sat __builtin_HEXAGON_M2_cnacsc_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RR __builtin_HEXAGON_M2_dpmpyss_acc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RR __builtin_HEXAGON_M2_dpmpyss_nac_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32):rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RR_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RR_rnd __builtin_HEXAGON_M2_dpmpyss_rnd_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RR __builtin_HEXAGON_M2_dpmpyss_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RR __builtin_HEXAGON_M2_dpmpyuu_acc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RR __builtin_HEXAGON_M2_dpmpyuu_nac_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32,Rt32)
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RR __builtin_HEXAGON_M2_dpmpyuu_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32.h):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RRh_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RRh_s1_rnd_sat __builtin_HEXAGON_M2_hmmpyh_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32.h):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RRh_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RRh_s1_sat __builtin_HEXAGON_M2_hmmpyh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32.l):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RRl_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RRl_s1_rnd_sat __builtin_HEXAGON_M2_hmmpyl_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32.l):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RRl_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RRl_s1_sat __builtin_HEXAGON_M2_hmmpyl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyi(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_mpyiacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyiacc_RR __builtin_HEXAGON_M2_maci
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyi(Rs32,#u8)
-   C Intrinsic Prototype: Word32 Q6_R_mpyinac_RI(Word32 Rx, Word32 Rs, Word32 Iu8)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyinac_RI __builtin_HEXAGON_M2_macsin
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyi(Rs32,#u8)
-   C Intrinsic Prototype: Word32 Q6_R_mpyiacc_RI(Word32 Rx, Word32 Rs, Word32 Iu8)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyiacc_RI __builtin_HEXAGON_M2_macsip
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywoh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywohacc_PP_rnd_sat __builtin_HEXAGON_M2_mmachs_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywoh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywohacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmachs_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywoh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywohacc_PP_sat __builtin_HEXAGON_M2_mmachs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywoh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywohacc_PP_s1_sat __builtin_HEXAGON_M2_mmachs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywehacc_PP_rnd_sat __builtin_HEXAGON_M2_mmacls_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywehacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmacls_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywehacc_PP_sat __builtin_HEXAGON_M2_mmacls_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywehacc_PP_s1_sat __builtin_HEXAGON_M2_mmacls_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywouh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouhacc_PP_rnd_sat __builtin_HEXAGON_M2_mmacuhs_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywouh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouhacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmacuhs_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywouh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouhacc_PP_sat __builtin_HEXAGON_M2_mmacuhs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywouh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouhacc_PP_s1_sat __builtin_HEXAGON_M2_mmacuhs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweuh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuhacc_PP_rnd_sat __builtin_HEXAGON_M2_mmaculs_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuhacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmaculs_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweuh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuhacc_PP_sat __builtin_HEXAGON_M2_mmaculs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuhacc_PP_s1_sat __builtin_HEXAGON_M2_mmaculs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywoh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywoh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyh_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywoh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywoh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyh_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywoh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywoh_PP_sat __builtin_HEXAGON_M2_mmpyh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywoh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywoh_PP_s1_sat __builtin_HEXAGON_M2_mmpyh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyl_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyl_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweh_PP_sat __builtin_HEXAGON_M2_mmpyl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweh_PP_s1_sat __builtin_HEXAGON_M2_mmpyl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywouh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyuh_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywouh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyuh_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywouh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouh_PP_sat __builtin_HEXAGON_M2_mmpyuh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywouh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouh_PP_s1_sat __builtin_HEXAGON_M2_mmpyuh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweuh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyul_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyul_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweuh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuh_PP_sat __builtin_HEXAGON_M2_mmpyul_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweuh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuh_PP_s1_sat __builtin_HEXAGON_M2_mmpyul_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRh __builtin_HEXAGON_M2_mpy_acc_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRh_s1 __builtin_HEXAGON_M2_mpy_acc_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRl __builtin_HEXAGON_M2_mpy_acc_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRl_s1 __builtin_HEXAGON_M2_mpy_acc_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRh __builtin_HEXAGON_M2_mpy_acc_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRh_s1 __builtin_HEXAGON_M2_mpy_acc_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRl __builtin_HEXAGON_M2_mpy_acc_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRl_s1 __builtin_HEXAGON_M2_mpy_acc_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRh_sat __builtin_HEXAGON_M2_mpy_acc_sat_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.h):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRl_sat __builtin_HEXAGON_M2_mpy_acc_sat_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.l):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRh_sat __builtin_HEXAGON_M2_mpy_acc_sat_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.h):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRl_sat __builtin_HEXAGON_M2_mpy_acc_sat_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.l):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh __builtin_HEXAGON_M2_mpy_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh_s1 __builtin_HEXAGON_M2_mpy_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl __builtin_HEXAGON_M2_mpy_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl_s1 __builtin_HEXAGON_M2_mpy_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh __builtin_HEXAGON_M2_mpy_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh_s1 __builtin_HEXAGON_M2_mpy_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl __builtin_HEXAGON_M2_mpy_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl_s1 __builtin_HEXAGON_M2_mpy_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRh __builtin_HEXAGON_M2_mpy_nac_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRh_s1 __builtin_HEXAGON_M2_mpy_nac_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRl __builtin_HEXAGON_M2_mpy_nac_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRl_s1 __builtin_HEXAGON_M2_mpy_nac_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRh __builtin_HEXAGON_M2_mpy_nac_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRh_s1 __builtin_HEXAGON_M2_mpy_nac_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRl __builtin_HEXAGON_M2_mpy_nac_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRl_s1 __builtin_HEXAGON_M2_mpy_nac_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRh_sat __builtin_HEXAGON_M2_mpy_nac_sat_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.h):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRl_sat __builtin_HEXAGON_M2_mpy_nac_sat_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.l):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRh_sat __builtin_HEXAGON_M2_mpy_nac_sat_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.h):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRl_sat __builtin_HEXAGON_M2_mpy_nac_sat_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.l):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h):rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh_rnd __builtin_HEXAGON_M2_mpy_rnd_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h):<<1:rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l):rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl_rnd __builtin_HEXAGON_M2_mpy_rnd_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l):<<1:rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h):rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh_rnd __builtin_HEXAGON_M2_mpy_rnd_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h):<<1:rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l):rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl_rnd __builtin_HEXAGON_M2_mpy_rnd_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l):<<1:rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh_sat __builtin_HEXAGON_M2_mpy_sat_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_sat_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl_sat __builtin_HEXAGON_M2_mpy_sat_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_sat_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh_sat __builtin_HEXAGON_M2_mpy_sat_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_sat_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl_sat __builtin_HEXAGON_M2_mpy_sat_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_sat_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RR __builtin_HEXAGON_M2_mpy_up
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RR_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RR_s1 __builtin_HEXAGON_M2_mpy_up_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RR_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RR_s1_sat __builtin_HEXAGON_M2_mpy_up_s1_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RhRh __builtin_HEXAGON_M2_mpyd_acc_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RhRh_s1 __builtin_HEXAGON_M2_mpyd_acc_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RhRl __builtin_HEXAGON_M2_mpyd_acc_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RhRl_s1 __builtin_HEXAGON_M2_mpyd_acc_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RlRh __builtin_HEXAGON_M2_mpyd_acc_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RlRh_s1 __builtin_HEXAGON_M2_mpyd_acc_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RlRl __builtin_HEXAGON_M2_mpyd_acc_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RlRl_s1 __builtin_HEXAGON_M2_mpyd_acc_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRh __builtin_HEXAGON_M2_mpyd_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRh_s1 __builtin_HEXAGON_M2_mpyd_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRl __builtin_HEXAGON_M2_mpyd_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRl_s1 __builtin_HEXAGON_M2_mpyd_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRh __builtin_HEXAGON_M2_mpyd_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRh_s1 __builtin_HEXAGON_M2_mpyd_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRl __builtin_HEXAGON_M2_mpyd_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRl_s1 __builtin_HEXAGON_M2_mpyd_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RhRh __builtin_HEXAGON_M2_mpyd_nac_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RhRh_s1 __builtin_HEXAGON_M2_mpyd_nac_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RhRl __builtin_HEXAGON_M2_mpyd_nac_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RhRl_s1 __builtin_HEXAGON_M2_mpyd_nac_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RlRh __builtin_HEXAGON_M2_mpyd_nac_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RlRh_s1 __builtin_HEXAGON_M2_mpyd_nac_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RlRl __builtin_HEXAGON_M2_mpyd_nac_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RlRl_s1 __builtin_HEXAGON_M2_mpyd_nac_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.h):rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRh_rnd __builtin_HEXAGON_M2_mpyd_rnd_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.h):<<1:rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRh_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.l):rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRl_rnd __builtin_HEXAGON_M2_mpyd_rnd_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.l):<<1:rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRl_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.h):rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRh_rnd __builtin_HEXAGON_M2_mpyd_rnd_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.h):<<1:rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRh_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.l):rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRl_rnd __builtin_HEXAGON_M2_mpyd_rnd_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.l):<<1:rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRl_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyi(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_mpyi_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyi_RR __builtin_HEXAGON_M2_mpyi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyi(Rs32,#m9)
-   C Intrinsic Prototype: Word32 Q6_R_mpyi_RI(Word32 Rs, Word32 Im9)
-   Instruction Type:      M
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_mpyi_RI __builtin_HEXAGON_M2_mpysmi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpysu(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_mpysu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpysu_RR __builtin_HEXAGON_M2_mpysu_up
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RhRh __builtin_HEXAGON_M2_mpyu_acc_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RhRh_s1 __builtin_HEXAGON_M2_mpyu_acc_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RhRl __builtin_HEXAGON_M2_mpyu_acc_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RhRl_s1 __builtin_HEXAGON_M2_mpyu_acc_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RlRh __builtin_HEXAGON_M2_mpyu_acc_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RlRh_s1 __builtin_HEXAGON_M2_mpyu_acc_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RlRl __builtin_HEXAGON_M2_mpyu_acc_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RlRl_s1 __builtin_HEXAGON_M2_mpyu_acc_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RhRh __builtin_HEXAGON_M2_mpyu_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RhRh_s1 __builtin_HEXAGON_M2_mpyu_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RhRl __builtin_HEXAGON_M2_mpyu_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RhRl_s1 __builtin_HEXAGON_M2_mpyu_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RlRh __builtin_HEXAGON_M2_mpyu_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RlRh_s1 __builtin_HEXAGON_M2_mpyu_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RlRl __builtin_HEXAGON_M2_mpyu_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RlRl_s1 __builtin_HEXAGON_M2_mpyu_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RhRh __builtin_HEXAGON_M2_mpyu_nac_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RhRh_s1 __builtin_HEXAGON_M2_mpyu_nac_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RhRl __builtin_HEXAGON_M2_mpyu_nac_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RhRl_s1 __builtin_HEXAGON_M2_mpyu_nac_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RlRh __builtin_HEXAGON_M2_mpyu_nac_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RlRh_s1 __builtin_HEXAGON_M2_mpyu_nac_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RlRl __builtin_HEXAGON_M2_mpyu_nac_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RlRl_s1 __builtin_HEXAGON_M2_mpyu_nac_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32,Rt32)
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RR __builtin_HEXAGON_M2_mpyu_up
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RhRh __builtin_HEXAGON_M2_mpyud_acc_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RhRh_s1 __builtin_HEXAGON_M2_mpyud_acc_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RhRl __builtin_HEXAGON_M2_mpyud_acc_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RhRl_s1 __builtin_HEXAGON_M2_mpyud_acc_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RlRh __builtin_HEXAGON_M2_mpyud_acc_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RlRh_s1 __builtin_HEXAGON_M2_mpyud_acc_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RlRl __builtin_HEXAGON_M2_mpyud_acc_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RlRl_s1 __builtin_HEXAGON_M2_mpyud_acc_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RhRh __builtin_HEXAGON_M2_mpyud_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RhRh_s1 __builtin_HEXAGON_M2_mpyud_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RhRl __builtin_HEXAGON_M2_mpyud_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RhRl_s1 __builtin_HEXAGON_M2_mpyud_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RlRh __builtin_HEXAGON_M2_mpyud_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RlRh_s1 __builtin_HEXAGON_M2_mpyud_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RlRl __builtin_HEXAGON_M2_mpyud_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RlRl_s1 __builtin_HEXAGON_M2_mpyud_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RhRh __builtin_HEXAGON_M2_mpyud_nac_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RhRh_s1 __builtin_HEXAGON_M2_mpyud_nac_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RhRl __builtin_HEXAGON_M2_mpyud_nac_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RhRl_s1 __builtin_HEXAGON_M2_mpyud_nac_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RlRh __builtin_HEXAGON_M2_mpyud_nac_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RlRh_s1 __builtin_HEXAGON_M2_mpyud_nac_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RlRl __builtin_HEXAGON_M2_mpyud_nac_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RlRl_s1 __builtin_HEXAGON_M2_mpyud_nac_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyui(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_mpyui_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_mpyui_RR __builtin_HEXAGON_M2_mpyui
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=add(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_addnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_addnac_RR __builtin_HEXAGON_M2_nacci
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=add(Rs32,#s8)
-   C Intrinsic Prototype: Word32 Q6_R_addnac_RI(Word32 Rx, Word32 Rs, Word32 Is8)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_addnac_RI __builtin_HEXAGON_M2_naccii
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=sub(Rt32,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_subacc_RR(Word32 Rx, Word32 Rt, Word32 Rs)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_subacc_RR __builtin_HEXAGON_M2_subacc
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsdiffh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vabsdiffh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsdiffh_PP __builtin_HEXAGON_M2_vabsdiffh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsdiffw(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vabsdiffw_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsdiffw_PP __builtin_HEXAGON_M2_vabsdiffw
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vcmpyi(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vcmpyiacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcmpyiacc_PP_sat __builtin_HEXAGON_M2_vcmac_s0_sat_i
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vcmpyr(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vcmpyracc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcmpyracc_PP_sat __builtin_HEXAGON_M2_vcmac_s0_sat_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vcmpyi(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vcmpyi_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcmpyi_PP_sat __builtin_HEXAGON_M2_vcmpy_s0_sat_i
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vcmpyr(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vcmpyr_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcmpyr_PP_sat __builtin_HEXAGON_M2_vcmpy_s0_sat_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vcmpyi(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vcmpyi_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcmpyi_PP_s1_sat __builtin_HEXAGON_M2_vcmpy_s1_sat_i
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vcmpyr(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vcmpyr_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcmpyr_PP_s1_sat __builtin_HEXAGON_M2_vcmpy_s1_sat_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vdmpy(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vdmpyacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vdmpyacc_PP_sat __builtin_HEXAGON_M2_vdmacs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vdmpy(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vdmpyacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vdmpyacc_PP_s1_sat __builtin_HEXAGON_M2_vdmacs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vdmpy(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_vdmpy_PP_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vdmpy_PP_rnd_sat __builtin_HEXAGON_M2_vdmpyrs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vdmpy(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_vdmpy_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vdmpy_PP_s1_rnd_sat __builtin_HEXAGON_M2_vdmpyrs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vdmpy(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vdmpy_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vdmpy_PP_sat __builtin_HEXAGON_M2_vdmpys_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vdmpy(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vdmpy_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vdmpy_PP_s1_sat __builtin_HEXAGON_M2_vdmpys_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyh(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyhacc_RR __builtin_HEXAGON_M2_vmac2
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyeh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyehacc_PP __builtin_HEXAGON_M2_vmac2es
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyeh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyehacc_PP_sat __builtin_HEXAGON_M2_vmac2es_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyeh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyehacc_PP_s1_sat __builtin_HEXAGON_M2_vmac2es_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyh(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyhacc_RR_sat __builtin_HEXAGON_M2_vmac2s_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyh(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyhacc_RR_s1_sat __builtin_HEXAGON_M2_vmac2s_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyhsu(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyhsuacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyhsuacc_RR_sat __builtin_HEXAGON_M2_vmac2su_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyhsu(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyhsuacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyhsuacc_RR_s1_sat __builtin_HEXAGON_M2_vmac2su_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyeh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyeh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyeh_PP_sat __builtin_HEXAGON_M2_vmpy2es_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyeh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyeh_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyeh_PP_s1_sat __builtin_HEXAGON_M2_vmpy2es_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyh(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyh_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyh_RR_sat __builtin_HEXAGON_M2_vmpy2s_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vmpyh(Rs32,Rt32):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_vmpyh_RR_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vmpyh_RR_rnd_sat __builtin_HEXAGON_M2_vmpy2s_s0pack
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyh(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyh_RR_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyh_RR_s1_sat __builtin_HEXAGON_M2_vmpy2s_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vmpyh(Rs32,Rt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_vmpyh_RR_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vmpyh_RR_s1_rnd_sat __builtin_HEXAGON_M2_vmpy2s_s1pack
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyhsu(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyhsu_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyhsu_RR_sat __builtin_HEXAGON_M2_vmpy2su_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyhsu(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyhsu_RR_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyhsu_RR_s1_sat __builtin_HEXAGON_M2_vmpy2su_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vraddh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word32 Q6_R_vraddh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vraddh_PP __builtin_HEXAGON_M2_vraddh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vradduh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word32 Q6_R_vradduh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vradduh_PP __builtin_HEXAGON_M2_vradduh
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrcmpyi(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyiacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyiacc_PP __builtin_HEXAGON_M2_vrcmaci_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrcmpyi(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyiacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyiacc_PP_conj __builtin_HEXAGON_M2_vrcmaci_s0c
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrcmpyr(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyracc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyracc_PP __builtin_HEXAGON_M2_vrcmacr_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrcmpyr(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyracc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyracc_PP_conj __builtin_HEXAGON_M2_vrcmacr_s0c
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrcmpyi(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyi_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyi_PP __builtin_HEXAGON_M2_vrcmpyi_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrcmpyi(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyi_PP_conj(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyi_PP_conj __builtin_HEXAGON_M2_vrcmpyi_s0c
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrcmpyr(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyr_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyr_PP __builtin_HEXAGON_M2_vrcmpyr_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrcmpyr(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyr_PP_conj(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyr_PP_conj __builtin_HEXAGON_M2_vrcmpyr_s0c
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrcmpys(Rss32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpysacc_PR_s1_sat(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_vrcmpysacc_PR_s1_sat __builtin_HEXAGON_M2_vrcmpys_acc_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrcmpys(Rss32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpys_PR_s1_sat(Word64 Rss, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_vrcmpys_PR_s1_sat __builtin_HEXAGON_M2_vrcmpys_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vrcmpys(Rss32,Rt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_vrcmpys_PR_s1_rnd_sat(Word64 Rss, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vrcmpys_PR_s1_rnd_sat __builtin_HEXAGON_M2_vrcmpys_s1rp
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrmpyh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpyhacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpyhacc_PP __builtin_HEXAGON_M2_vrmac_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrmpyh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpyh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpyh_PP __builtin_HEXAGON_M2_vrmpy_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32^=xor(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_xorxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_xorxacc_RR __builtin_HEXAGON_M2_xor_xacc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=and(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_andand_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_andand_RR __builtin_HEXAGON_M4_and_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=and(Rs32,~Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_andand_RnR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_andand_RnR __builtin_HEXAGON_M4_and_andn
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=or(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_orand_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_orand_RR __builtin_HEXAGON_M4_and_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=xor(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_xorand_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_xorand_RR __builtin_HEXAGON_M4_and_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyiwh(Rss32,Rt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyiwh_PR_s1_rnd_sat(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpyiwh_PR_s1_rnd_sat __builtin_HEXAGON_M4_cmpyi_wh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyiwh(Rss32,Rt32*):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyiwh_PR_conj_s1_rnd_sat(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpyiwh_PR_conj_s1_rnd_sat __builtin_HEXAGON_M4_cmpyi_whc
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyrwh(Rss32,Rt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyrwh_PR_s1_rnd_sat(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpyrwh_PR_s1_rnd_sat __builtin_HEXAGON_M4_cmpyr_wh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyrwh(Rss32,Rt32*):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyrwh_PR_conj_s1_rnd_sat(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpyrwh_PR_conj_s1_rnd_sat __builtin_HEXAGON_M4_cmpyr_whc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RR_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RR_s1_sat __builtin_HEXAGON_M4_mac_up_s1_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(#u6,mpyi(Rs32,#U6))
-   C Intrinsic Prototype: Word32 Q6_R_add_mpyi_IRI(Word32 Iu6, Word32 Rs, Word32 IU6)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_mpyi_IRI __builtin_HEXAGON_M4_mpyri_addi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Ru32,mpyi(Rs32,#u6))
-   C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RRI(Word32 Ru, Word32 Rs, Word32 Iu6)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_mpyi_RRI __builtin_HEXAGON_M4_mpyri_addr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Ru32,mpyi(#u6:2,Rs32))
-   C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RIR(Word32 Ru, Word32 Iu6_2, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_mpyi_RIR __builtin_HEXAGON_M4_mpyri_addr_u2
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(#u6,mpyi(Rs32,Rt32))
-   C Intrinsic Prototype: Word32 Q6_R_add_mpyi_IRR(Word32 Iu6, Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_mpyi_IRR __builtin_HEXAGON_M4_mpyrr_addi
-
-/* ==========================================================================
-   Assembly Syntax:       Ry32=add(Ru32,mpyi(Ry32,Rs32))
-   C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RRR(Word32 Ru, Word32 Ry, Word32 Rs)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_mpyi_RRR __builtin_HEXAGON_M4_mpyrr_addr
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RR_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RR_s1_sat __builtin_HEXAGON_M4_nac_up_s1_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=and(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_andor_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_andor_RR __builtin_HEXAGON_M4_or_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=and(Rs32,~Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_andor_RnR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_andor_RnR __builtin_HEXAGON_M4_or_andn
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=or(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_oror_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_oror_RR __builtin_HEXAGON_M4_or_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=xor(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_xoror_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_xoror_RR __builtin_HEXAGON_M4_or_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=pmpyw(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_pmpyw_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_pmpyw_RR __builtin_HEXAGON_M4_pmpyw
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=pmpyw(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_pmpywxacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_pmpywxacc_RR __builtin_HEXAGON_M4_pmpyw_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vpmpyh(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vpmpyh_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vpmpyh_RR __builtin_HEXAGON_M4_vpmpyh
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=vpmpyh(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vpmpyhxacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vpmpyhxacc_RR __builtin_HEXAGON_M4_vpmpyh_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrmpyweh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpywehacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpywehacc_PP __builtin_HEXAGON_M4_vrmpyeh_acc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrmpyweh(Rss32,Rtt32):<<1
-   C Intrinsic Prototype: Word64 Q6_P_vrmpywehacc_PP_s1(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpywehacc_PP_s1 __builtin_HEXAGON_M4_vrmpyeh_acc_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrmpyweh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpyweh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpyweh_PP __builtin_HEXAGON_M4_vrmpyeh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrmpyweh(Rss32,Rtt32):<<1
-   C Intrinsic Prototype: Word64 Q6_P_vrmpyweh_PP_s1(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpyweh_PP_s1 __builtin_HEXAGON_M4_vrmpyeh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrmpywoh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpywohacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpywohacc_PP __builtin_HEXAGON_M4_vrmpyoh_acc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrmpywoh(Rss32,Rtt32):<<1
-   C Intrinsic Prototype: Word64 Q6_P_vrmpywohacc_PP_s1(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpywohacc_PP_s1 __builtin_HEXAGON_M4_vrmpyoh_acc_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrmpywoh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpywoh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpywoh_PP __builtin_HEXAGON_M4_vrmpyoh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrmpywoh(Rss32,Rtt32):<<1
-   C Intrinsic Prototype: Word64 Q6_P_vrmpywoh_PP_s1(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpywoh_PP_s1 __builtin_HEXAGON_M4_vrmpyoh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32^=and(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_andxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_andxacc_RR __builtin_HEXAGON_M4_xor_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32^=and(Rs32,~Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_andxacc_RnR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_andxacc_RnR __builtin_HEXAGON_M4_xor_andn
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32^=or(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_orxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_orxacc_RR __builtin_HEXAGON_M4_xor_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=xor(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_xorxacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_xorxacc_PP __builtin_HEXAGON_M4_xor_xacc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vdmpybsu(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vdmpybsuacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vdmpybsuacc_PP_sat __builtin_HEXAGON_M5_vdmacbsu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vdmpybsu(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vdmpybsu_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vdmpybsu_PP_sat __builtin_HEXAGON_M5_vdmpybsu
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpybsu(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vmpybsuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpybsuacc_RR __builtin_HEXAGON_M5_vmacbsu
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpybu(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vmpybuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpybuacc_RR __builtin_HEXAGON_M5_vmacbuu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpybsu(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vmpybsu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpybsu_RR __builtin_HEXAGON_M5_vmpybsu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpybu(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vmpybu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpybu_RR __builtin_HEXAGON_M5_vmpybuu
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrmpybsu(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpybsuacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpybsuacc_PP __builtin_HEXAGON_M5_vrmacbsu
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrmpybu(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpybuacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpybuacc_PP __builtin_HEXAGON_M5_vrmacbuu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrmpybsu(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpybsu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpybsu_PP __builtin_HEXAGON_M5_vrmpybsu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrmpybu(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpybu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpybu_PP __builtin_HEXAGON_M5_vrmpybuu
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=addasl(Rt32,Rs32,#u3)
-   C Intrinsic Prototype: Word32 Q6_R_addasl_RRI(Word32 Rt, Word32 Rs, Word32 Iu3)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_addasl_RRI __builtin_HEXAGON_S2_addasl_rrri
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=asl(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asl_PI(Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asl_PI __builtin_HEXAGON_S2_asl_i_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=asl(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_aslacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslacc_PI __builtin_HEXAGON_S2_asl_i_p_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=asl(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asland_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asland_PI __builtin_HEXAGON_S2_asl_i_p_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=asl(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_aslnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslnac_PI __builtin_HEXAGON_S2_asl_i_p_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=asl(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_aslor_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslor_PI __builtin_HEXAGON_S2_asl_i_p_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=asl(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_aslxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslxacc_PI __builtin_HEXAGON_S2_asl_i_p_xacc
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asl(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asl_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asl_RI __builtin_HEXAGON_S2_asl_i_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=asl(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_aslacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_aslacc_RI __builtin_HEXAGON_S2_asl_i_r_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=asl(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asland_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asland_RI __builtin_HEXAGON_S2_asl_i_r_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=asl(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_aslnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_aslnac_RI __builtin_HEXAGON_S2_asl_i_r_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=asl(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_aslor_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_aslor_RI __builtin_HEXAGON_S2_asl_i_r_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asl(Rs32,#u5):sat
-   C Intrinsic Prototype: Word32 Q6_R_asl_RI_sat(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asl_RI_sat __builtin_HEXAGON_S2_asl_i_r_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32^=asl(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_aslxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_aslxacc_RI __builtin_HEXAGON_S2_asl_i_r_xacc
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaslh(Rss32,#u4)
-   C Intrinsic Prototype: Word64 Q6_P_vaslh_PI(Word64 Rss, Word32 Iu4)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaslh_PI __builtin_HEXAGON_S2_asl_i_vh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaslw(Rss32,#u5)
-   C Intrinsic Prototype: Word64 Q6_P_vaslw_PI(Word64 Rss, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaslw_PI __builtin_HEXAGON_S2_asl_i_vw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=asl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asl_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asl_PR __builtin_HEXAGON_S2_asl_r_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=asl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_aslacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslacc_PR __builtin_HEXAGON_S2_asl_r_p_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=asl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asland_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asland_PR __builtin_HEXAGON_S2_asl_r_p_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=asl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_aslnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslnac_PR __builtin_HEXAGON_S2_asl_r_p_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=asl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_aslor_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslor_PR __builtin_HEXAGON_S2_asl_r_p_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=asl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_aslxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslxacc_PR __builtin_HEXAGON_S2_asl_r_p_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_asl_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asl_RR __builtin_HEXAGON_S2_asl_r_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=asl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_aslacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_aslacc_RR __builtin_HEXAGON_S2_asl_r_r_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=asl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_asland_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asland_RR __builtin_HEXAGON_S2_asl_r_r_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=asl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_aslnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_aslnac_RR __builtin_HEXAGON_S2_asl_r_r_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=asl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_aslor_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_aslor_RR __builtin_HEXAGON_S2_asl_r_r_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asl(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word32 Q6_R_asl_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asl_RR_sat __builtin_HEXAGON_S2_asl_r_r_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaslh(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vaslh_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaslh_PR __builtin_HEXAGON_S2_asl_r_vh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaslw(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vaslw_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaslw_PR __builtin_HEXAGON_S2_asl_r_vw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=asr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asr_PI(Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asr_PI __builtin_HEXAGON_S2_asr_i_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=asr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asracc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asracc_PI __builtin_HEXAGON_S2_asr_i_p_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=asr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asrand_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asrand_PI __builtin_HEXAGON_S2_asr_i_p_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=asr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asrnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asrnac_PI __builtin_HEXAGON_S2_asr_i_p_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=asr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asror_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asror_PI __builtin_HEXAGON_S2_asr_i_p_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=asr(Rss32,#u6):rnd
-   C Intrinsic Prototype: Word64 Q6_P_asr_PI_rnd(Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asr_PI_rnd __builtin_HEXAGON_S2_asr_i_p_rnd
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=asrrnd(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asrrnd_PI(Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_asrrnd_PI __builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asr_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asr_RI __builtin_HEXAGON_S2_asr_i_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=asr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asracc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asracc_RI __builtin_HEXAGON_S2_asr_i_r_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=asr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asrand_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asrand_RI __builtin_HEXAGON_S2_asr_i_r_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=asr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asrnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asrnac_RI __builtin_HEXAGON_S2_asr_i_r_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=asr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asror_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asror_RI __builtin_HEXAGON_S2_asr_i_r_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asr(Rs32,#u5):rnd
-   C Intrinsic Prototype: Word32 Q6_R_asr_RI_rnd(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asr_RI_rnd __builtin_HEXAGON_S2_asr_i_r_rnd
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asrrnd(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asrrnd_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_asrrnd_RI __builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vasrw(Rss32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_vasrw_PI(Word64 Rss, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vasrw_PI __builtin_HEXAGON_S2_asr_i_svw_trun
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vasrh(Rss32,#u4)
-   C Intrinsic Prototype: Word64 Q6_P_vasrh_PI(Word64 Rss, Word32 Iu4)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vasrh_PI __builtin_HEXAGON_S2_asr_i_vh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vasrw(Rss32,#u5)
-   C Intrinsic Prototype: Word64 Q6_P_vasrw_PI(Word64 Rss, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vasrw_PI __builtin_HEXAGON_S2_asr_i_vw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=asr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asr_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asr_PR __builtin_HEXAGON_S2_asr_r_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=asr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asracc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asracc_PR __builtin_HEXAGON_S2_asr_r_p_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=asr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asrand_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asrand_PR __builtin_HEXAGON_S2_asr_r_p_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=asr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asrnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asrnac_PR __builtin_HEXAGON_S2_asr_r_p_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=asr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asror_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asror_PR __builtin_HEXAGON_S2_asr_r_p_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=asr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asrxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asrxacc_PR __builtin_HEXAGON_S2_asr_r_p_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_asr_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asr_RR __builtin_HEXAGON_S2_asr_r_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=asr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_asracc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asracc_RR __builtin_HEXAGON_S2_asr_r_r_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=asr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_asrand_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asrand_RR __builtin_HEXAGON_S2_asr_r_r_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=asr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_asrnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asrnac_RR __builtin_HEXAGON_S2_asr_r_r_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=asr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_asror_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asror_RR __builtin_HEXAGON_S2_asr_r_r_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asr(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word32 Q6_R_asr_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asr_RR_sat __builtin_HEXAGON_S2_asr_r_r_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vasrw(Rss32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_vasrw_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vasrw_PR __builtin_HEXAGON_S2_asr_r_svw_trun
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vasrh(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vasrh_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vasrh_PR __builtin_HEXAGON_S2_asr_r_vh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vasrw(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vasrw_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vasrw_PR __builtin_HEXAGON_S2_asr_r_vw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=brev(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_brev_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_brev_R __builtin_HEXAGON_S2_brev
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=brev(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_brev_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_brev_P __builtin_HEXAGON_S2_brevp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cl0(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_cl0_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cl0_R __builtin_HEXAGON_S2_cl0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cl0(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_cl0_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cl0_P __builtin_HEXAGON_S2_cl0p
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cl1(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_cl1_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cl1_R __builtin_HEXAGON_S2_cl1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cl1(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_cl1_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cl1_P __builtin_HEXAGON_S2_cl1p
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=clb(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_clb_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_clb_R __builtin_HEXAGON_S2_clb
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=normamt(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_normamt_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_normamt_R __builtin_HEXAGON_S2_clbnorm
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=clb(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_clb_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_clb_P __builtin_HEXAGON_S2_clbp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=clrbit(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_clrbit_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_clrbit_RI __builtin_HEXAGON_S2_clrbit_i
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=clrbit(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_clrbit_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_clrbit_RR __builtin_HEXAGON_S2_clrbit_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=ct0(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_ct0_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_ct0_R __builtin_HEXAGON_S2_ct0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=ct0(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_ct0_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_ct0_P __builtin_HEXAGON_S2_ct0p
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=ct1(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_ct1_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_ct1_R __builtin_HEXAGON_S2_ct1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=ct1(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_ct1_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_ct1_P __builtin_HEXAGON_S2_ct1p
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=deinterleave(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_deinterleave_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_deinterleave_P __builtin_HEXAGON_S2_deinterleave
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=extractu(Rs32,#u5,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_extractu_RII(Word32 Rs, Word32 Iu5, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_extractu_RII __builtin_HEXAGON_S2_extractu
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=extractu(Rs32,Rtt32)
-   C Intrinsic Prototype: Word32 Q6_R_extractu_RP(Word32 Rs, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_extractu_RP __builtin_HEXAGON_S2_extractu_rp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=extractu(Rss32,#u6,#U6)
-   C Intrinsic Prototype: Word64 Q6_P_extractu_PII(Word64 Rss, Word32 Iu6, Word32 IU6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_extractu_PII __builtin_HEXAGON_S2_extractup
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=extractu(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_extractu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_extractu_PP __builtin_HEXAGON_S2_extractup_rp
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=insert(Rs32,#u5,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_insert_RII(Word32 Rx, Word32 Rs, Word32 Iu5, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_insert_RII __builtin_HEXAGON_S2_insert
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=insert(Rs32,Rtt32)
-   C Intrinsic Prototype: Word32 Q6_R_insert_RP(Word32 Rx, Word32 Rs, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_insert_RP __builtin_HEXAGON_S2_insert_rp
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=insert(Rss32,#u6,#U6)
-   C Intrinsic Prototype: Word64 Q6_P_insert_PII(Word64 Rxx, Word64 Rss, Word32 Iu6, Word32 IU6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_insert_PII __builtin_HEXAGON_S2_insertp
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=insert(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_insert_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_insert_PP __builtin_HEXAGON_S2_insertp_rp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=interleave(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_interleave_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_interleave_P __builtin_HEXAGON_S2_interleave
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=lfs(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_lfs_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lfs_PP __builtin_HEXAGON_S2_lfsp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=lsl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsl_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsl_PR __builtin_HEXAGON_S2_lsl_r_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=lsl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lslacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lslacc_PR __builtin_HEXAGON_S2_lsl_r_p_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=lsl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsland_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsland_PR __builtin_HEXAGON_S2_lsl_r_p_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=lsl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lslnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lslnac_PR __builtin_HEXAGON_S2_lsl_r_p_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=lsl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lslor_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lslor_PR __builtin_HEXAGON_S2_lsl_r_p_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=lsl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lslxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lslxacc_PR __builtin_HEXAGON_S2_lsl_r_p_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=lsl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsl_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsl_RR __builtin_HEXAGON_S2_lsl_r_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=lsl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lslacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lslacc_RR __builtin_HEXAGON_S2_lsl_r_r_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=lsl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsland_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsland_RR __builtin_HEXAGON_S2_lsl_r_r_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=lsl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lslnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lslnac_RR __builtin_HEXAGON_S2_lsl_r_r_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=lsl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lslor_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lslor_RR __builtin_HEXAGON_S2_lsl_r_r_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vlslh(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vlslh_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vlslh_PR __builtin_HEXAGON_S2_lsl_r_vh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vlslw(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vlslw_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vlslw_PR __builtin_HEXAGON_S2_lsl_r_vw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=lsr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_lsr_PI(Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsr_PI __builtin_HEXAGON_S2_lsr_i_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=lsr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_lsracc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsracc_PI __builtin_HEXAGON_S2_lsr_i_p_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=lsr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_lsrand_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsrand_PI __builtin_HEXAGON_S2_lsr_i_p_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=lsr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_lsrnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsrnac_PI __builtin_HEXAGON_S2_lsr_i_p_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=lsr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_lsror_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsror_PI __builtin_HEXAGON_S2_lsr_i_p_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=lsr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_lsrxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsrxacc_PI __builtin_HEXAGON_S2_lsr_i_p_xacc
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=lsr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_lsr_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsr_RI __builtin_HEXAGON_S2_lsr_i_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=lsr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_lsracc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsracc_RI __builtin_HEXAGON_S2_lsr_i_r_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=lsr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_lsrand_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsrand_RI __builtin_HEXAGON_S2_lsr_i_r_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=lsr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_lsrnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsrnac_RI __builtin_HEXAGON_S2_lsr_i_r_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=lsr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_lsror_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsror_RI __builtin_HEXAGON_S2_lsr_i_r_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32^=lsr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_lsrxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsrxacc_RI __builtin_HEXAGON_S2_lsr_i_r_xacc
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vlsrh(Rss32,#u4)
-   C Intrinsic Prototype: Word64 Q6_P_vlsrh_PI(Word64 Rss, Word32 Iu4)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vlsrh_PI __builtin_HEXAGON_S2_lsr_i_vh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vlsrw(Rss32,#u5)
-   C Intrinsic Prototype: Word64 Q6_P_vlsrw_PI(Word64 Rss, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vlsrw_PI __builtin_HEXAGON_S2_lsr_i_vw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=lsr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsr_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsr_PR __builtin_HEXAGON_S2_lsr_r_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=lsr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsracc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsracc_PR __builtin_HEXAGON_S2_lsr_r_p_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=lsr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsrand_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsrand_PR __builtin_HEXAGON_S2_lsr_r_p_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=lsr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsrnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsrnac_PR __builtin_HEXAGON_S2_lsr_r_p_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=lsr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsror_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsror_PR __builtin_HEXAGON_S2_lsr_r_p_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=lsr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsrxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsrxacc_PR __builtin_HEXAGON_S2_lsr_r_p_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=lsr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsr_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsr_RR __builtin_HEXAGON_S2_lsr_r_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=lsr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsracc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsracc_RR __builtin_HEXAGON_S2_lsr_r_r_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=lsr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsrand_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsrand_RR __builtin_HEXAGON_S2_lsr_r_r_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=lsr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsrnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsrnac_RR __builtin_HEXAGON_S2_lsr_r_r_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=lsr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsror_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsror_RR __builtin_HEXAGON_S2_lsr_r_r_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vlsrh(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vlsrh_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vlsrh_PR __builtin_HEXAGON_S2_lsr_r_vh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vlsrw(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vlsrw_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vlsrw_PR __builtin_HEXAGON_S2_lsr_r_vw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=packhl(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_packhl_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_packhl_RR __builtin_HEXAGON_S2_packhl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=parity(Rss32,Rtt32)
-   C Intrinsic Prototype: Word32 Q6_R_parity_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_parity_PP __builtin_HEXAGON_S2_parityp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=setbit(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_setbit_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_setbit_RI __builtin_HEXAGON_S2_setbit_i
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=setbit(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_setbit_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_setbit_RR __builtin_HEXAGON_S2_setbit_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=shuffeb(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_shuffeb_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_shuffeb_PP __builtin_HEXAGON_S2_shuffeb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=shuffeh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_shuffeh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_shuffeh_PP __builtin_HEXAGON_S2_shuffeh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=shuffob(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_shuffob_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_shuffob_PP __builtin_HEXAGON_S2_shuffob
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=shuffoh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_shuffoh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_shuffoh_PP __builtin_HEXAGON_S2_shuffoh
-
-/* ==========================================================================
-   Assembly Syntax:       memb(Rx32++#s4:0:circ(Mu2))=Rt32
-   C Intrinsic Prototype: void Q6_memb_IMR_circ(void** Rx, Word32 Is4_0, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memb_IMR_circ __builtin_HEXAGON_S2_storerb_pci
-
-/* ==========================================================================
-   Assembly Syntax:       memb(Rx32++I:circ(Mu2))=Rt32
-   C Intrinsic Prototype: void Q6_memb_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memb_MR_circ __builtin_HEXAGON_S2_storerb_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       memd(Rx32++#s4:3:circ(Mu2))=Rtt32
-   C Intrinsic Prototype: void Q6_memd_IMP_circ(void** Rx, Word32 Is4_3, Word32 Mu, Word64 Rtt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memd_IMP_circ __builtin_HEXAGON_S2_storerd_pci
-
-/* ==========================================================================
-   Assembly Syntax:       memd(Rx32++I:circ(Mu2))=Rtt32
-   C Intrinsic Prototype: void Q6_memd_MP_circ(void** Rx, Word32 Mu, Word64 Rtt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memd_MP_circ __builtin_HEXAGON_S2_storerd_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       memh(Rx32++#s4:1:circ(Mu2))=Rt32.h
-   C Intrinsic Prototype: void Q6_memh_IMRh_circ(void** Rx, Word32 Is4_1, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memh_IMRh_circ __builtin_HEXAGON_S2_storerf_pci
-
-/* ==========================================================================
-   Assembly Syntax:       memh(Rx32++I:circ(Mu2))=Rt32.h
-   C Intrinsic Prototype: void Q6_memh_MRh_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memh_MRh_circ __builtin_HEXAGON_S2_storerf_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       memh(Rx32++#s4:1:circ(Mu2))=Rt32
-   C Intrinsic Prototype: void Q6_memh_IMR_circ(void** Rx, Word32 Is4_1, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memh_IMR_circ __builtin_HEXAGON_S2_storerh_pci
-
-/* ==========================================================================
-   Assembly Syntax:       memh(Rx32++I:circ(Mu2))=Rt32
-   C Intrinsic Prototype: void Q6_memh_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memh_MR_circ __builtin_HEXAGON_S2_storerh_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       memw(Rx32++#s4:2:circ(Mu2))=Rt32
-   C Intrinsic Prototype: void Q6_memw_IMR_circ(void** Rx, Word32 Is4_2, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memw_IMR_circ __builtin_HEXAGON_S2_storeri_pci
-
-/* ==========================================================================
-   Assembly Syntax:       memw(Rx32++I:circ(Mu2))=Rt32
-   C Intrinsic Prototype: void Q6_memw_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memw_MR_circ __builtin_HEXAGON_S2_storeri_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsathb(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_vsathb_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vsathb_R __builtin_HEXAGON_S2_svsathb
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsathub(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_vsathub_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vsathub_R __builtin_HEXAGON_S2_svsathub
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=tableidxb(Rs32,#u4,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_tableidxb_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_tableidxb_RII __builtin_HEXAGON_S2_tableidxb_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=tableidxd(Rs32,#u4,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_tableidxd_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_tableidxd_RII __builtin_HEXAGON_S2_tableidxd_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=tableidxh(Rs32,#u4,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_tableidxh_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_tableidxh_RII __builtin_HEXAGON_S2_tableidxh_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=tableidxw(Rs32,#u4,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_tableidxw_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_tableidxw_RII __builtin_HEXAGON_S2_tableidxw_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=togglebit(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_togglebit_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_togglebit_RI __builtin_HEXAGON_S2_togglebit_i
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=togglebit(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_togglebit_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_togglebit_RR __builtin_HEXAGON_S2_togglebit_r
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=tstbit(Rs32,#u5)
-   C Intrinsic Prototype: Byte Q6_p_tstbit_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_tstbit_RI __builtin_HEXAGON_S2_tstbit_i
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=tstbit(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_tstbit_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_tstbit_RR __builtin_HEXAGON_S2_tstbit_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=valignb(Rtt32,Rss32,#u3)
-   C Intrinsic Prototype: Word64 Q6_P_valignb_PPI(Word64 Rtt, Word64 Rss, Word32 Iu3)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_valignb_PPI __builtin_HEXAGON_S2_valignib
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=valignb(Rtt32,Rss32,Pu4)
-   C Intrinsic Prototype: Word64 Q6_P_valignb_PPp(Word64 Rtt, Word64 Rss, Byte Pu)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_valignb_PPp __builtin_HEXAGON_S2_valignrb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vcnegh(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vcnegh_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcnegh_PR __builtin_HEXAGON_S2_vcnegh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vcrotate(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vcrotate_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcrotate_PR __builtin_HEXAGON_S2_vcrotate
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrcnegh(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrcneghacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcneghacc_PR __builtin_HEXAGON_S2_vrcnegh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vrndwh(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_vrndwh_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vrndwh_P __builtin_HEXAGON_S2_vrndpackwh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vrndwh(Rss32):sat
-   C Intrinsic Prototype: Word32 Q6_R_vrndwh_P_sat(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vrndwh_P_sat __builtin_HEXAGON_S2_vrndpackwhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsathb(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_vsathb_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vsathb_P __builtin_HEXAGON_S2_vsathb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsathb(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vsathb_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsathb_P __builtin_HEXAGON_S2_vsathb_nopack
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsathub(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_vsathub_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vsathub_P __builtin_HEXAGON_S2_vsathub
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsathub(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vsathub_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsathub_P __builtin_HEXAGON_S2_vsathub_nopack
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsatwh(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_vsatwh_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vsatwh_P __builtin_HEXAGON_S2_vsatwh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsatwh(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vsatwh_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsatwh_P __builtin_HEXAGON_S2_vsatwh_nopack
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsatwuh(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_vsatwuh_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vsatwuh_P __builtin_HEXAGON_S2_vsatwuh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsatwuh(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vsatwuh_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsatwuh_P __builtin_HEXAGON_S2_vsatwuh_nopack
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsplatb(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_vsplatb_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vsplatb_R __builtin_HEXAGON_S2_vsplatrb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsplath(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_vsplath_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsplath_R __builtin_HEXAGON_S2_vsplatrh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vspliceb(Rss32,Rtt32,#u3)
-   C Intrinsic Prototype: Word64 Q6_P_vspliceb_PPI(Word64 Rss, Word64 Rtt, Word32 Iu3)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vspliceb_PPI __builtin_HEXAGON_S2_vspliceib
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vspliceb(Rss32,Rtt32,Pu4)
-   C Intrinsic Prototype: Word64 Q6_P_vspliceb_PPp(Word64 Rss, Word64 Rtt, Byte Pu)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vspliceb_PPp __builtin_HEXAGON_S2_vsplicerb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsxtbh(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_vsxtbh_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsxtbh_R __builtin_HEXAGON_S2_vsxtbh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsxthw(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_vsxthw_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsxthw_R __builtin_HEXAGON_S2_vsxthw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vtrunehb(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_vtrunehb_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vtrunehb_P __builtin_HEXAGON_S2_vtrunehb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vtrunewh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vtrunewh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vtrunewh_PP __builtin_HEXAGON_S2_vtrunewh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vtrunohb(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_vtrunohb_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vtrunohb_P __builtin_HEXAGON_S2_vtrunohb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vtrunowh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vtrunowh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vtrunowh_PP __builtin_HEXAGON_S2_vtrunowh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vzxtbh(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_vzxtbh_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vzxtbh_R __builtin_HEXAGON_S2_vzxtbh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vzxthw(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_vzxthw_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vzxthw_R __builtin_HEXAGON_S2_vzxthw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rs32,add(Ru32,#s6))
-   C Intrinsic Prototype: Word32 Q6_R_add_add_RRI(Word32 Rs, Word32 Ru, Word32 Is6)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_add_RRI __builtin_HEXAGON_S4_addaddi
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=add(#u8,asl(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_add_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_asl_IRI __builtin_HEXAGON_S4_addi_asl_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=add(#u8,lsr(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_add_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_lsr_IRI __builtin_HEXAGON_S4_addi_lsr_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=and(#u8,asl(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_and_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_and_asl_IRI __builtin_HEXAGON_S4_andi_asl_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=and(#u8,lsr(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_and_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_and_lsr_IRI __builtin_HEXAGON_S4_andi_lsr_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(clb(Rs32),#s6)
-   C Intrinsic Prototype: Word32 Q6_R_add_clb_RI(Word32 Rs, Word32 Is6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_clb_RI __builtin_HEXAGON_S4_clbaddi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(clb(Rss32),#s6)
-   C Intrinsic Prototype: Word32 Q6_R_add_clb_PI(Word64 Rss, Word32 Is6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_clb_PI __builtin_HEXAGON_S4_clbpaddi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=normamt(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_normamt_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_normamt_P __builtin_HEXAGON_S4_clbpnorm
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=extract(Rs32,#u5,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_extract_RII(Word32 Rs, Word32 Iu5, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_extract_RII __builtin_HEXAGON_S4_extract
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=extract(Rs32,Rtt32)
-   C Intrinsic Prototype: Word32 Q6_R_extract_RP(Word32 Rs, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_extract_RP __builtin_HEXAGON_S4_extract_rp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=extract(Rss32,#u6,#U6)
-   C Intrinsic Prototype: Word64 Q6_P_extract_PII(Word64 Rss, Word32 Iu6, Word32 IU6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_extract_PII __builtin_HEXAGON_S4_extractp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=extract(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_extract_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_extract_PP __builtin_HEXAGON_S4_extractp_rp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=lsl(#s6,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsl_IR(Word32 Is6, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsl_IR __builtin_HEXAGON_S4_lsli
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!tstbit(Rs32,#u5)
-   C Intrinsic Prototype: Byte Q6_p_not_tstbit_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_tstbit_RI __builtin_HEXAGON_S4_ntstbit_i
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!tstbit(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_not_tstbit_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_tstbit_RR __builtin_HEXAGON_S4_ntstbit_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=and(Rs32,#s10)
-   C Intrinsic Prototype: Word32 Q6_R_andor_RI(Word32 Rx, Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_andor_RI __builtin_HEXAGON_S4_or_andi
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=or(Ru32,and(Rx32,#s10))
-   C Intrinsic Prototype: Word32 Q6_R_or_and_RRI(Word32 Ru, Word32 Rx, Word32 Is10)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_or_and_RRI __builtin_HEXAGON_S4_or_andix
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=or(Rs32,#s10)
-   C Intrinsic Prototype: Word32 Q6_R_oror_RI(Word32 Rx, Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_oror_RI __builtin_HEXAGON_S4_or_ori
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=or(#u8,asl(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_or_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_or_asl_IRI __builtin_HEXAGON_S4_ori_asl_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=or(#u8,lsr(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_or_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_or_lsr_IRI __builtin_HEXAGON_S4_ori_lsr_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=parity(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_parity_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_parity_RR __builtin_HEXAGON_S4_parity
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rs32,sub(#s6,Ru32))
-   C Intrinsic Prototype: Word32 Q6_R_add_sub_RIR(Word32 Rs, Word32 Is6, Word32 Ru)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_sub_RIR __builtin_HEXAGON_S4_subaddi
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=sub(#u8,asl(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_sub_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_asl_IRI __builtin_HEXAGON_S4_subi_asl_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=sub(#u8,lsr(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_sub_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_lsr_IRI __builtin_HEXAGON_S4_subi_lsr_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrcrotate(Rss32,Rt32,#u2)
-   C Intrinsic Prototype: Word64 Q6_P_vrcrotate_PRI(Word64 Rss, Word32 Rt, Word32 Iu2)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcrotate_PRI __builtin_HEXAGON_S4_vrcrotate
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrcrotate(Rss32,Rt32,#u2)
-   C Intrinsic Prototype: Word64 Q6_P_vrcrotateacc_PRI(Word64 Rxx, Word64 Rss, Word32 Rt, Word32 Iu2)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcrotateacc_PRI __builtin_HEXAGON_S4_vrcrotate_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vxaddsubh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vxaddsubh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vxaddsubh_PP_sat __builtin_HEXAGON_S4_vxaddsubh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vxaddsubh(Rss32,Rtt32):rnd:>>1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vxaddsubh_PP_rnd_rs1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vxaddsubh_PP_rnd_rs1_sat __builtin_HEXAGON_S4_vxaddsubhr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vxaddsubw(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vxaddsubw_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vxaddsubw_PP_sat __builtin_HEXAGON_S4_vxaddsubw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vxsubaddh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vxsubaddh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vxsubaddh_PP_sat __builtin_HEXAGON_S4_vxsubaddh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vxsubaddh(Rss32,Rtt32):rnd:>>1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vxsubaddh_PP_rnd_rs1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vxsubaddh_PP_rnd_rs1_sat __builtin_HEXAGON_S4_vxsubaddhr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vxsubaddw(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vxsubaddw_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vxsubaddw_PP_sat __builtin_HEXAGON_S4_vxsubaddw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vasrhub(Rss32,#u4):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_vasrhub_PI_rnd_sat(Word64 Rss, Word32 Iu4)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vasrhub_PI_rnd_sat __builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vasrhub(Rss32,#u4):sat
-   C Intrinsic Prototype: Word32 Q6_R_vasrhub_PI_sat(Word64 Rss, Word32 Iu4)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vasrhub_PI_sat __builtin_HEXAGON_S5_asrhub_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=popcount(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_popcount_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_popcount_P __builtin_HEXAGON_S5_popcountp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vasrh(Rss32,#u4):rnd
-   C Intrinsic Prototype: Word64 Q6_P_vasrh_PI_rnd(Word64 Rss, Word32 Iu4)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_vasrh_PI_rnd __builtin_HEXAGON_S5_vasrhrnd_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       dccleana(Rs32)
-   C Intrinsic Prototype: void Q6_dccleana_A(Address Rs)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_dccleana_A __builtin_HEXAGON_Y2_dccleana
-
-/* ==========================================================================
-   Assembly Syntax:       dccleaninva(Rs32)
-   C Intrinsic Prototype: void Q6_dccleaninva_A(Address Rs)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_dccleaninva_A __builtin_HEXAGON_Y2_dccleaninva
-
-/* ==========================================================================
-   Assembly Syntax:       dcfetch(Rs32)
-   C Intrinsic Prototype: void Q6_dcfetch_A(Address Rs)
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_dcfetch_A __builtin_HEXAGON_Y2_dcfetch
-
-/* ==========================================================================
-   Assembly Syntax:       dcinva(Rs32)
-   C Intrinsic Prototype: void Q6_dcinva_A(Address Rs)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_dcinva_A __builtin_HEXAGON_Y2_dcinva
-
-/* ==========================================================================
-   Assembly Syntax:       dczeroa(Rs32)
-   C Intrinsic Prototype: void Q6_dczeroa_A(Address Rs)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_dczeroa_A __builtin_HEXAGON_Y2_dczeroa
-
-/* ==========================================================================
-   Assembly Syntax:       l2fetch(Rs32,Rt32)
-   C Intrinsic Prototype: void Q6_l2fetch_AR(Address Rs, Word32 Rt)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_l2fetch_AR __builtin_HEXAGON_Y4_l2fetch
-
-/* ==========================================================================
-   Assembly Syntax:       l2fetch(Rs32,Rtt32)
-   C Intrinsic Prototype: void Q6_l2fetch_AP(Address Rs, Word64 Rtt)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_l2fetch_AP __builtin_HEXAGON_Y5_l2fetch
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=rol(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_rol_PI(Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_rol_PI __builtin_HEXAGON_S6_rol_i_p
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=rol(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_rolacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_rolacc_PI __builtin_HEXAGON_S6_rol_i_p_acc
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=rol(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_roland_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_roland_PI __builtin_HEXAGON_S6_rol_i_p_and
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=rol(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_rolnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_rolnac_PI __builtin_HEXAGON_S6_rol_i_p_nac
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=rol(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_rolor_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_rolor_PI __builtin_HEXAGON_S6_rol_i_p_or
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=rol(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_rolxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_rolxacc_PI __builtin_HEXAGON_S6_rol_i_p_xacc
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rd32=rol(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_rol_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_rol_RI __builtin_HEXAGON_S6_rol_i_r
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=rol(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_rolacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_rolacc_RI __builtin_HEXAGON_S6_rol_i_r_acc
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=rol(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_roland_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_roland_RI __builtin_HEXAGON_S6_rol_i_r_and
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=rol(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_rolnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_rolnac_RI __builtin_HEXAGON_S6_rol_i_r_nac
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=rol(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_rolor_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_rolor_RI __builtin_HEXAGON_S6_rol_i_r_or
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rx32^=rol(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_rolxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_rolxacc_RI __builtin_HEXAGON_S6_rol_i_r_xacc
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsdiffb(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vabsdiffb_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsdiffb_PP __builtin_HEXAGON_M6_vabsdiffb
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsdiffub(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vabsdiffub_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsdiffub_PP __builtin_HEXAGON_M6_vabsdiffub
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsplatb(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_vsplatb_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsplatb_R __builtin_HEXAGON_S6_vsplatrbp
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vtrunehb(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vtrunehb_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vtrunehb_PP __builtin_HEXAGON_S6_vtrunehb_ppp
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vtrunohb(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vtrunohb_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vtrunohb_PP __builtin_HEXAGON_S6_vtrunohb_ppp
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vmem(Rt32):nt
-   C Intrinsic Prototype: HVX_Vector Q6_V_vmem_R_nt(Word32 Rt)
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vmem_R_nt __builtin_HEXAGON_V6_ldntnt0
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HEXAGON_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!any8(vcmpb.eq(Rss32,Rtt32))
-   C Intrinsic Prototype: Byte Q6_p_not_any8_vcmpb_eq_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_any8_vcmpb_eq_PP __builtin_HEXAGON_A6_vcmpbeq_notany
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HEXAGON_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfadd(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfadd_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfadd_PP __builtin_HEXAGON_F2_dfadd
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HEXAGON_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfsub(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfsub_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfsub_PP __builtin_HEXAGON_F2_dfsub
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HEXAGON_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyi(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_mpyinac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyinac_RR __builtin_HEXAGON_M2_mnaci
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HEXAGON_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mask(#u5,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_mask_II(Word32 Iu5, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mask_II __builtin_HEXAGON_S2_mask
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=clip(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_clip_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_clip_RI __builtin_HEXAGON_A7_clip
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cround(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_cround_PI(Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cround_PI __builtin_HEXAGON_A7_croundd_ri
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cround(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_cround_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cround_PR __builtin_HEXAGON_A7_croundd_rr
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vclip(Rss32,#u5)
-   C Intrinsic Prototype: Word64 Q6_P_vclip_PI(Word64 Rss, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vclip_PI __builtin_HEXAGON_A7_vclip
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfmax(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfmax_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmax_PP __builtin_HEXAGON_F2_dfmax
-#endif /* __HEXAGON_ARCH___ >= 67 */
-
-#if __HEXAGON_ARCH__ >= 67
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfmin(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfmin_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmin_PP __builtin_HEXAGON_F2_dfmin
-#endif /* __HEXAGON_ARCH___ >= 67 */
-
-#if __HEXAGON_ARCH__ >= 67
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfmpyfix(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfmpyfix_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmpyfix_PP __builtin_HEXAGON_F2_dfmpyfix
-#endif /* __HEXAGON_ARCH___ >= 67 */
-
-#if __HEXAGON_ARCH__ >= 67
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=dfmpyhh(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfmpyhhacc_PP(Float64 Rxx, Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmpyhhacc_PP __builtin_HEXAGON_F2_dfmpyhh
-#endif /* __HEXAGON_ARCH___ >= 67 */
-
-#if __HEXAGON_ARCH__ >= 67
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=dfmpylh(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfmpylhacc_PP(Float64 Rxx, Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmpylhacc_PP __builtin_HEXAGON_F2_dfmpylh
-#endif /* __HEXAGON_ARCH___ >= 67 */
-
-#if __HEXAGON_ARCH__ >= 67
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfmpyll(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfmpyll_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmpyll_PP __builtin_HEXAGON_F2_dfmpyll
-#endif /* __HEXAGON_ARCH___ >= 67 */
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpyiw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyiw_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyiw_PP __builtin_HEXAGON_M7_dcmpyiw
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpyiw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyiwacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyiwacc_PP __builtin_HEXAGON_M7_dcmpyiw_acc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpyiw(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyiw_PP_conj(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyiw_PP_conj __builtin_HEXAGON_M7_dcmpyiwc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpyiw(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyiwacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyiwacc_PP_conj __builtin_HEXAGON_M7_dcmpyiwc_acc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpyrw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyrw_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyrw_PP __builtin_HEXAGON_M7_dcmpyrw
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpyrw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyrwacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyrwacc_PP __builtin_HEXAGON_M7_dcmpyrw_acc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpyrw(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyrw_PP_conj(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyrw_PP_conj __builtin_HEXAGON_M7_dcmpyrwc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpyrw(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyrwacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyrwacc_PP_conj __builtin_HEXAGON_M7_dcmpyrwc_acc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vdmpyw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vdmpyw_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_vdmpyw_PP __builtin_HEXAGON_M7_vdmpy
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vdmpyw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vdmpywacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_vdmpywacc_PP __builtin_HEXAGON_M7_vdmpy_acc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyiw(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyiw_PP_s1_sat __builtin_HEXAGON_M7_wcmpyiw
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyiw(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyiw_PP_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyiw_rnd
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyiw(Rss32,Rtt32*):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_conj_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyiw_PP_conj_s1_sat __builtin_HEXAGON_M7_wcmpyiwc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyiw(Rss32,Rtt32*):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_conj_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyiw_PP_conj_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyiwc_rnd
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyrw(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyrw_PP_s1_sat __builtin_HEXAGON_M7_wcmpyrw
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyrw(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyrw_PP_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyrw_rnd
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyrw(Rss32,Rtt32*):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_conj_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyrw_PP_conj_s1_sat __builtin_HEXAGON_M7_wcmpyrwc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyrw(Rss32,Rtt32*):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_conj_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyrw_PP_conj_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyrwc_rnd
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       dmlink(Rs32,Rt32)
-   C Intrinsic Prototype: void Q6_dmlink_AA(Address Rs, Address Rt)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_dmlink_AA __builtin_HEXAGON_Y6_dmlink
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HEXAGON_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       Rd32=dmpause
-   C Intrinsic Prototype: Word32 Q6_R_dmpause()
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_R_dmpause __builtin_HEXAGON_Y6_dmpause
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HEXAGON_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       Rd32=dmpoll
-   C Intrinsic Prototype: Word32 Q6_R_dmpoll()
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_R_dmpoll __builtin_HEXAGON_Y6_dmpoll
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HEXAGON_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       dmresume(Rs32)
-   C Intrinsic Prototype: void Q6_dmresume_A(Address Rs)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_dmresume_A __builtin_HEXAGON_Y6_dmresume
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HEXAGON_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       dmstart(Rs32)
-   C Intrinsic Prototype: void Q6_dmstart_A(Address Rs)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_dmstart_A __builtin_HEXAGON_Y6_dmstart
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HEXAGON_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       Rd32=dmwait
-   C Intrinsic Prototype: Word32 Q6_R_dmwait()
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_R_dmwait __builtin_HEXAGON_Y6_dmwait
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#include <hexagon_circ_brev_intrinsics.h>
-#ifdef __HVX__
-#include <hvx_hexagon_protos.h>
-#endif /* __HVX__ */
-#endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/hexagon_types.h b/linux-x86/lib64/clang/14.0.2/include/hexagon_types.h
deleted file mode 100644
index 6958809..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/hexagon_types.h
+++ /dev/null
@@ -1,2653 +0,0 @@
-/******************************************************************************/
-/*   (c) 2020 Qualcomm Innovation Center, Inc. All rights reserved.           */
-/*                                                                            */
-/******************************************************************************/
-#ifndef HEXAGON_TYPES_H
-#define HEXAGON_TYPES_H
-
-#include <hexagon_protos.h>
-
-/* Hexagon names */
-#define HEXAGON_Vect HEXAGON_Vect64
-#define HEXAGON_V_GET_D HEXAGON_V64_GET_D
-#define HEXAGON_V_GET_UD HEXAGON_V64_GET_UD
-#define HEXAGON_V_GET_W0 HEXAGON_V64_GET_W0
-#define HEXAGON_V_GET_W1 HEXAGON_V64_GET_W1
-#define HEXAGON_V_GET_UW0 HEXAGON_V64_GET_UW0
-#define HEXAGON_V_GET_UW1 HEXAGON_V64_GET_UW1
-#define HEXAGON_V_GET_H0 HEXAGON_V64_GET_H0
-#define HEXAGON_V_GET_H1 HEXAGON_V64_GET_H1
-#define HEXAGON_V_GET_H2 HEXAGON_V64_GET_H2
-#define HEXAGON_V_GET_H3 HEXAGON_V64_GET_H3
-#define HEXAGON_V_GET_UH0 HEXAGON_V64_GET_UH0
-#define HEXAGON_V_GET_UH1 HEXAGON_V64_GET_UH1
-#define HEXAGON_V_GET_UH2 HEXAGON_V64_GET_UH2
-#define HEXAGON_V_GET_UH3 HEXAGON_V64_GET_UH3
-#define HEXAGON_V_GET_B0 HEXAGON_V64_GET_B0
-#define HEXAGON_V_GET_B1 HEXAGON_V64_GET_B1
-#define HEXAGON_V_GET_B2 HEXAGON_V64_GET_B2
-#define HEXAGON_V_GET_B3 HEXAGON_V64_GET_B3
-#define HEXAGON_V_GET_B4 HEXAGON_V64_GET_B4
-#define HEXAGON_V_GET_B5 HEXAGON_V64_GET_B5
-#define HEXAGON_V_GET_B6 HEXAGON_V64_GET_B6
-#define HEXAGON_V_GET_B7 HEXAGON_V64_GET_B7
-#define HEXAGON_V_GET_UB0 HEXAGON_V64_GET_UB0
-#define HEXAGON_V_GET_UB1 HEXAGON_V64_GET_UB1
-#define HEXAGON_V_GET_UB2 HEXAGON_V64_GET_UB2
-#define HEXAGON_V_GET_UB3 HEXAGON_V64_GET_UB3
-#define HEXAGON_V_GET_UB4 HEXAGON_V64_GET_UB4
-#define HEXAGON_V_GET_UB5 HEXAGON_V64_GET_UB5
-#define HEXAGON_V_GET_UB6 HEXAGON_V64_GET_UB6
-#define HEXAGON_V_GET_UB7 HEXAGON_V64_GET_UB7
-#define HEXAGON_V_PUT_D HEXAGON_V64_PUT_D
-#define HEXAGON_V_PUT_W0 HEXAGON_V64_PUT_W0
-#define HEXAGON_V_PUT_W1 HEXAGON_V64_PUT_W1
-#define HEXAGON_V_PUT_H0 HEXAGON_V64_PUT_H0
-#define HEXAGON_V_PUT_H1 HEXAGON_V64_PUT_H1
-#define HEXAGON_V_PUT_H2 HEXAGON_V64_PUT_H2
-#define HEXAGON_V_PUT_H3 HEXAGON_V64_PUT_H3
-#define HEXAGON_V_PUT_B0 HEXAGON_V64_PUT_B0
-#define HEXAGON_V_PUT_B1 HEXAGON_V64_PUT_B1
-#define HEXAGON_V_PUT_B2 HEXAGON_V64_PUT_B2
-#define HEXAGON_V_PUT_B3 HEXAGON_V64_PUT_B3
-#define HEXAGON_V_PUT_B4 HEXAGON_V64_PUT_B4
-#define HEXAGON_V_PUT_B5 HEXAGON_V64_PUT_B5
-#define HEXAGON_V_PUT_B6 HEXAGON_V64_PUT_B6
-#define HEXAGON_V_PUT_B7 HEXAGON_V64_PUT_B7
-#define HEXAGON_V_CREATE_D HEXAGON_V64_CREATE_D
-#define HEXAGON_V_CREATE_W HEXAGON_V64_CREATE_W
-#define HEXAGON_V_CREATE_H HEXAGON_V64_CREATE_H
-#define HEXAGON_V_CREATE_B HEXAGON_V64_CREATE_B
-
-#ifdef __cplusplus
-#define HEXAGON_VectC HEXAGON_Vect64C
-#endif /* __cplusplus */
-
-/* 64 Bit Vectors */
-
-typedef long long __attribute__((__may_alias__)) HEXAGON_Vect64;
-
-/* Extract doubleword macros */
-
-#define HEXAGON_V64_GET_D(v) (v)
-#define HEXAGON_V64_GET_UD(v) ((unsigned long long)(v))
-
-/* Extract word macros */
-
-#define HEXAGON_V64_GET_W0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.w[0];                                                \
-  })
-#define HEXAGON_V64_GET_W1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.w[1];                                                \
-  })
-#define HEXAGON_V64_GET_UW0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned int uw[2];                                                      \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.uw[0];                                               \
-  })
-#define HEXAGON_V64_GET_UW1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned int uw[2];                                                      \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.uw[1];                                               \
-  })
-
-/* Extract half word macros */
-
-#define HEXAGON_V64_GET_H0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[0];                                                \
-  })
-#define HEXAGON_V64_GET_H1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[1];                                                \
-  })
-#define HEXAGON_V64_GET_H2(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[2];                                                \
-  })
-#define HEXAGON_V64_GET_H3(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[3];                                                \
-  })
-#define HEXAGON_V64_GET_UH0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.uh[0];                                               \
-  })
-#define HEXAGON_V64_GET_UH1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.uh[1];                                               \
-  })
-#define HEXAGON_V64_GET_UH2(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.uh[2];                                               \
-  })
-#define HEXAGON_V64_GET_UH3(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.uh[3];                                               \
-  })
-
-/* Extract byte macros */
-
-#define HEXAGON_V64_GET_B0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[0];                                                \
-  })
-#define HEXAGON_V64_GET_B1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[1];                                                \
-  })
-#define HEXAGON_V64_GET_B2(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[2];                                                \
-  })
-#define HEXAGON_V64_GET_B3(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[3];                                                \
-  })
-#define HEXAGON_V64_GET_B4(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[4];                                                \
-  })
-#define HEXAGON_V64_GET_B5(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[5];                                                \
-  })
-#define HEXAGON_V64_GET_B6(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[6];                                                \
-  })
-#define HEXAGON_V64_GET_B7(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[7];                                                \
-  })
-#define HEXAGON_V64_GET_UB0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[0];                                               \
-  })
-#define HEXAGON_V64_GET_UB1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[1];                                               \
-  })
-#define HEXAGON_V64_GET_UB2(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[2];                                               \
-  })
-#define HEXAGON_V64_GET_UB3(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[3];                                               \
-  })
-#define HEXAGON_V64_GET_UB4(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[4];                                               \
-  })
-#define HEXAGON_V64_GET_UB5(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[5];                                               \
-  })
-#define HEXAGON_V64_GET_UB6(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[6];                                               \
-  })
-#define HEXAGON_V64_GET_UB7(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[7];                                               \
-  })
-
-/* NOTE: All set macros return a HEXAGON_Vect64 type */
-
-/* Set doubleword macro */
-
-#define HEXAGON_V64_PUT_D(v, new) (new)
-
-/* Set word macros */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V64_PUT_W0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.w[0] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_W1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.w[1] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V64_PUT_W0(v, new)                                                   \
-  (((v) & 0xffffffff00000000LL) | ((HEXAGON_Vect64)((unsigned int)(new))))
-#define HEXAGON_V64_PUT_W1(v, new)                                                   \
-  (((v) & 0x00000000ffffffffLL) | (((HEXAGON_Vect64)(new)) << 32LL))
-
-#endif /* !__hexagon__ */
-
-/* Set half word macros */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V64_PUT_H0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[0] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_H1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[1] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_H2(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[2] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_H3(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[3] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V64_PUT_H0(v, new)                                                   \
-  (((v) & 0xffffffffffff0000LL) | ((HEXAGON_Vect64)((unsigned short)(new))))
-#define HEXAGON_V64_PUT_H1(v, new)                                                   \
-  (((v) & 0xffffffff0000ffffLL) | (((HEXAGON_Vect64)((unsigned short)(new))) << 16LL))
-#define HEXAGON_V64_PUT_H2(v, new)                                                   \
-  (((v) & 0xffff0000ffffffffLL) | (((HEXAGON_Vect64)((unsigned short)(new))) << 32LL))
-#define HEXAGON_V64_PUT_H3(v, new)                                                   \
-  (((v) & 0x0000ffffffffffffLL) | (((HEXAGON_Vect64)(new)) << 48LL))
-
-#endif /* !__hexagon__ */
-
-/* Set byte macros */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V64_PUT_B0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[0] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_B1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[1] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_B2(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[2] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_B3(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[3] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_B4(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[4] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_B5(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[5] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_B6(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[6] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_B7(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[7] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V64_PUT_B0(v, new)                                                   \
-  (((v) & 0xffffffffffffff00LL) | ((HEXAGON_Vect64)((unsigned char)(new))))
-#define HEXAGON_V64_PUT_B1(v, new)                                                   \
-  (((v) & 0xffffffffffff00ffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 8LL))
-#define HEXAGON_V64_PUT_B2(v, new)                                                   \
-  (((v) & 0xffffffffff00ffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 16LL))
-#define HEXAGON_V64_PUT_B3(v, new)                                                   \
-  (((v) & 0xffffffff00ffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 24LL))
-#define HEXAGON_V64_PUT_B4(v, new)                                                   \
-  (((v) & 0xffffff00ffffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 32LL))
-#define HEXAGON_V64_PUT_B5(v, new)                                                   \
-  (((v) & 0xffff00ffffffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 40LL))
-#define HEXAGON_V64_PUT_B6(v, new)                                                   \
-  (((v) & 0xff00ffffffffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 48LL))
-#define HEXAGON_V64_PUT_B7(v, new)                                                   \
-  (((v) & 0x00ffffffffffffffLL) | (((HEXAGON_Vect64)(new)) << 56LL))
-
-#endif /* !__hexagon__ */
-
-/* NOTE: All create macros return a HEXAGON_Vect64 type */
-
-/* Create from a doubleword */
-
-#define HEXAGON_V64_CREATE_D(d) (d)
-
-/* Create from words */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V64_CREATE_W(w1, w0)                                                 \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.w[0] = (w0);                                         \
-    _HEXAGON_V64_internal_union.w[1] = (w1);                                         \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V64_CREATE_W(w1, w0)                                                 \
-  ((((HEXAGON_Vect64)(w1)) << 32LL) | ((HEXAGON_Vect64)((w0) & 0xffffffff)))
-
-#endif /* !__hexagon__ */
-
-/* Create from half words */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V64_CREATE_H(h3, h2, h1, h0)                                         \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.h[0] = (h0);                                         \
-    _HEXAGON_V64_internal_union.h[1] = (h1);                                         \
-    _HEXAGON_V64_internal_union.h[2] = (h2);                                         \
-    _HEXAGON_V64_internal_union.h[3] = (h3);                                         \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V64_CREATE_H(h3, h2, h1, h0)                                         \
-  ((((HEXAGON_Vect64)(h3)) << 48LL) | (((HEXAGON_Vect64)((h2) & 0xffff)) << 32LL) |        \
-   (((HEXAGON_Vect64)((h1) & 0xffff)) << 16LL) | ((HEXAGON_Vect64)((h0) & 0xffff)))
-
-#endif /* !__hexagon__ */
-
-/* Create from bytes */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)                         \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.b[0] = (b0);                                         \
-    _HEXAGON_V64_internal_union.b[1] = (b1);                                         \
-    _HEXAGON_V64_internal_union.b[2] = (b2);                                         \
-    _HEXAGON_V64_internal_union.b[3] = (b3);                                         \
-    _HEXAGON_V64_internal_union.b[4] = (b4);                                         \
-    _HEXAGON_V64_internal_union.b[5] = (b5);                                         \
-    _HEXAGON_V64_internal_union.b[6] = (b6);                                         \
-    _HEXAGON_V64_internal_union.b[7] = (b7);                                         \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)                         \
-  ((((HEXAGON_Vect64)(b7)) << 56LL) | (((HEXAGON_Vect64)((b6) & 0xff)) << 48LL) |          \
-   (((HEXAGON_Vect64)((b5) & 0xff)) << 40LL) | (((HEXAGON_Vect64)((b4) & 0xff)) << 32LL) | \
-   (((HEXAGON_Vect64)((b3) & 0xff)) << 24LL) | (((HEXAGON_Vect64)((b2) & 0xff)) << 16LL) | \
-   (((HEXAGON_Vect64)((b1) & 0xff)) << 8LL) | ((HEXAGON_Vect64)((b0) & 0xff)))
-
-#endif /* !__hexagon__ */
-
-#ifdef __cplusplus
-
-class HEXAGON_Vect64C {
-public:
-  // Constructors
-  HEXAGON_Vect64C(long long d = 0) : data(d) {};
-  HEXAGON_Vect64C(int w1, int w0) : data(HEXAGON_V64_CREATE_W(w1, w0)) {};
-  HEXAGON_Vect64C(short h3, short h2, short h1, short h0)
-      : data(HEXAGON_V64_CREATE_H(h3, h2, h1, h0)) {};
-  HEXAGON_Vect64C(signed char b7, signed char b6, signed char b5, signed char b4,
-            signed char b3, signed char b2, signed char b1, signed char b0)
-      : data(HEXAGON_V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)) {};
-  HEXAGON_Vect64C(const HEXAGON_Vect64C &v) : data(v.data) {};
-
-  HEXAGON_Vect64C &operator=(const HEXAGON_Vect64C &v) {
-    data = v.data;
-    return *this;
-  };
-
-  operator long long() {
-    return data;
-  };
-
-  // Extract doubleword methods
-  long long D(void) {
-    return HEXAGON_V64_GET_D(data);
-  };
-  unsigned long long UD(void) {
-    return HEXAGON_V64_GET_UD(data);
-  };
-
-  // Extract word methods
-  int W0(void) {
-    return HEXAGON_V64_GET_W0(data);
-  };
-  int W1(void) {
-    return HEXAGON_V64_GET_W1(data);
-  };
-  unsigned int UW0(void) {
-    return HEXAGON_V64_GET_UW0(data);
-  };
-  unsigned int UW1(void) {
-    return HEXAGON_V64_GET_UW1(data);
-  };
-
-  // Extract half word methods
-  short H0(void) {
-    return HEXAGON_V64_GET_H0(data);
-  };
-  short H1(void) {
-    return HEXAGON_V64_GET_H1(data);
-  };
-  short H2(void) {
-    return HEXAGON_V64_GET_H2(data);
-  };
-  short H3(void) {
-    return HEXAGON_V64_GET_H3(data);
-  };
-  unsigned short UH0(void) {
-    return HEXAGON_V64_GET_UH0(data);
-  };
-  unsigned short UH1(void) {
-    return HEXAGON_V64_GET_UH1(data);
-  };
-  unsigned short UH2(void) {
-    return HEXAGON_V64_GET_UH2(data);
-  };
-  unsigned short UH3(void) {
-    return HEXAGON_V64_GET_UH3(data);
-  };
-
-  // Extract byte methods
-  signed char B0(void) {
-    return HEXAGON_V64_GET_B0(data);
-  };
-  signed char B1(void) {
-    return HEXAGON_V64_GET_B1(data);
-  };
-  signed char B2(void) {
-    return HEXAGON_V64_GET_B2(data);
-  };
-  signed char B3(void) {
-    return HEXAGON_V64_GET_B3(data);
-  };
-  signed char B4(void) {
-    return HEXAGON_V64_GET_B4(data);
-  };
-  signed char B5(void) {
-    return HEXAGON_V64_GET_B5(data);
-  };
-  signed char B6(void) {
-    return HEXAGON_V64_GET_B6(data);
-  };
-  signed char B7(void) {
-    return HEXAGON_V64_GET_B7(data);
-  };
-  unsigned char UB0(void) {
-    return HEXAGON_V64_GET_UB0(data);
-  };
-  unsigned char UB1(void) {
-    return HEXAGON_V64_GET_UB1(data);
-  };
-  unsigned char UB2(void) {
-    return HEXAGON_V64_GET_UB2(data);
-  };
-  unsigned char UB3(void) {
-    return HEXAGON_V64_GET_UB3(data);
-  };
-  unsigned char UB4(void) {
-    return HEXAGON_V64_GET_UB4(data);
-  };
-  unsigned char UB5(void) {
-    return HEXAGON_V64_GET_UB5(data);
-  };
-  unsigned char UB6(void) {
-    return HEXAGON_V64_GET_UB6(data);
-  };
-  unsigned char UB7(void) {
-    return HEXAGON_V64_GET_UB7(data);
-  };
-
-  // NOTE: All set methods return a HEXAGON_Vect64C type
-
-  // Set doubleword method
-  HEXAGON_Vect64C D(long long d) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_D(data, d));
-  };
-
-  // Set word methods
-  HEXAGON_Vect64C W0(int w) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_W0(data, w));
-  };
-  HEXAGON_Vect64C W1(int w) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_W1(data, w));
-  };
-
-  // Set half word methods
-  HEXAGON_Vect64C H0(short h) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_H0(data, h));
-  };
-  HEXAGON_Vect64C H1(short h) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_H1(data, h));
-  };
-  HEXAGON_Vect64C H2(short h) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_H2(data, h));
-  };
-  HEXAGON_Vect64C H3(short h) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_H3(data, h));
-  };
-
-  // Set byte methods
-  HEXAGON_Vect64C B0(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B0(data, b));
-  };
-  HEXAGON_Vect64C B1(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B1(data, b));
-  };
-  HEXAGON_Vect64C B2(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B2(data, b));
-  };
-  HEXAGON_Vect64C B3(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B3(data, b));
-  };
-  HEXAGON_Vect64C B4(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B4(data, b));
-  };
-  HEXAGON_Vect64C B5(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B5(data, b));
-  };
-  HEXAGON_Vect64C B6(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B6(data, b));
-  };
-  HEXAGON_Vect64C B7(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B7(data, b));
-  };
-
-private:
-  long long data;
-};
-
-#endif /* __cplusplus */
-
-/* 32 Bit Vectors */
-
-typedef int HEXAGON_Vect32;
-
-/* Extract word macros */
-
-#define HEXAGON_V32_GET_W(v) (v)
-#define HEXAGON_V32_GET_UW(v) ((unsigned int)(v))
-
-/* Extract half word macros */
-
-#define HEXAGON_V32_GET_H0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.h[0];                                                \
-  })
-#define HEXAGON_V32_GET_H1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.h[1];                                                \
-  })
-#define HEXAGON_V32_GET_UH0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned short uh[2];                                                    \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.uh[0];                                               \
-  })
-#define HEXAGON_V32_GET_UH1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned short uh[2];                                                    \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.uh[1];                                               \
-  })
-
-/* Extract byte macros */
-
-#define HEXAGON_V32_GET_B0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[0];                                                \
-  })
-#define HEXAGON_V32_GET_B1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[1];                                                \
-  })
-#define HEXAGON_V32_GET_B2(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[2];                                                \
-  })
-#define HEXAGON_V32_GET_B3(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[3];                                                \
-  })
-#define HEXAGON_V32_GET_UB0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.ub[0];                                               \
-  })
-#define HEXAGON_V32_GET_UB1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.ub[1];                                               \
-  })
-#define HEXAGON_V32_GET_UB2(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.ub[2];                                               \
-  })
-#define HEXAGON_V32_GET_UB3(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.ub[3];                                               \
-  })
-
-/* NOTE: All set macros return a HEXAGON_Vect32 type */
-
-/* Set word macro */
-
-#define HEXAGON_V32_PUT_W(v, new) (new)
-
-/* Set half word macros */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V32_PUT_H0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.h[0] = (new);                                        \
-    _HEXAGON_V32_internal_union.w;                                                   \
-  })
-#define HEXAGON_V32_PUT_H1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.h[1] = (new);                                        \
-    _HEXAGON_V32_internal_union.w;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V32_PUT_H0(v, new)                                                   \
-  (((v) & 0xffff0000) | ((HEXAGON_Vect32)((unsigned short)(new))))
-#define HEXAGON_V32_PUT_H1(v, new) (((v) & 0x0000ffff) | (((HEXAGON_Vect32)(new)) << 16))
-
-#endif /* !__hexagon__ */
-
-/* Set byte macros */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V32_PUT_B0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[0] = (new);                                        \
-    _HEXAGON_V32_internal_union.w;                                                   \
-  })
-#define HEXAGON_V32_PUT_B1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[1] = (new);                                        \
-    _HEXAGON_V32_internal_union.w;                                                   \
-  })
-#define HEXAGON_V32_PUT_B2(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[2] = (new);                                        \
-    _HEXAGON_V32_internal_union.w;                                                   \
-  })
-#define HEXAGON_V32_PUT_B3(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[3] = (new);                                        \
-    _HEXAGON_V32_internal_union.w;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V32_PUT_B0(v, new)                                                   \
-  (((v) & 0xffffff00) | ((HEXAGON_Vect32)((unsigned char)(new))))
-#define HEXAGON_V32_PUT_B1(v, new)                                                   \
-  (((v) & 0xffff00ff) | (((HEXAGON_Vect32)((unsigned char)(new))) << 8))
-#define HEXAGON_V32_PUT_B2(v, new)                                                   \
-  (((v) & 0xff00ffff) | (((HEXAGON_Vect32)((unsigned char)(new))) << 16))
-#define HEXAGON_V32_PUT_B3(v, new) (((v) & 0x00ffffff) | (((HEXAGON_Vect32)(new)) << 24))
-
-#endif /* !__hexagon__ */
-
-/* NOTE: All create macros return a HEXAGON_Vect32 type */
-
-/* Create from a word */
-
-#define HEXAGON_V32_CREATE_W(w) (w)
-
-/* Create from half words */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V32_CREATE_H(h1, h0)                                                 \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[2];                                                              \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.h[0] = (h0);                                         \
-    _HEXAGON_V32_internal_union.h[1] = (h1);                                         \
-    _HEXAGON_V32_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V32_CREATE_H(h1, h0)                                                 \
-  ((((HEXAGON_Vect32)(h1)) << 16) | ((HEXAGON_Vect32)((h0) & 0xffff)))
-
-#endif /* !__hexagon__ */
-
-/* Create from bytes */
-#ifdef __hexagon__
-
-#define HEXAGON_V32_CREATE_B(b3, b2, b1, b0)                                         \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[4];                                                               \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.b[0] = (b0);                                         \
-    _HEXAGON_V32_internal_union.b[1] = (b1);                                         \
-    _HEXAGON_V32_internal_union.b[2] = (b2);                                         \
-    _HEXAGON_V32_internal_union.b[3] = (b3);                                         \
-    _HEXAGON_V32_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V32_CREATE_B(b3, b2, b1, b0)                                         \
-  ((((HEXAGON_Vect32)(b3)) << 24) | (((HEXAGON_Vect32)((b2) & 0xff)) << 16) |              \
-   (((HEXAGON_Vect32)((b1) & 0xff)) << 8) | ((HEXAGON_Vect32)((b0) & 0xff)))
-
-#endif /* !__hexagon__ */
-
-#ifdef __cplusplus
-
-class HEXAGON_Vect32C {
-public:
-  // Constructors
-  HEXAGON_Vect32C(int w = 0) : data(w) {};
-  HEXAGON_Vect32C(short h1, short h0) : data(HEXAGON_V32_CREATE_H(h1, h0)) {};
-  HEXAGON_Vect32C(signed char b3, signed char b2, signed char b1, signed char b0)
-      : data(HEXAGON_V32_CREATE_B(b3, b2, b1, b0)) {};
-  HEXAGON_Vect32C(const HEXAGON_Vect32C &v) : data(v.data) {};
-
-  HEXAGON_Vect32C &operator=(const HEXAGON_Vect32C &v) {
-    data = v.data;
-    return *this;
-  };
-
-  operator int() {
-    return data;
-  };
-
-  // Extract word methods
-  int W(void) {
-    return HEXAGON_V32_GET_W(data);
-  };
-  unsigned int UW(void) {
-    return HEXAGON_V32_GET_UW(data);
-  };
-
-  // Extract half word methods
-  short H0(void) {
-    return HEXAGON_V32_GET_H0(data);
-  };
-  short H1(void) {
-    return HEXAGON_V32_GET_H1(data);
-  };
-  unsigned short UH0(void) {
-    return HEXAGON_V32_GET_UH0(data);
-  };
-  unsigned short UH1(void) {
-    return HEXAGON_V32_GET_UH1(data);
-  };
-
-  // Extract byte methods
-  signed char B0(void) {
-    return HEXAGON_V32_GET_B0(data);
-  };
-  signed char B1(void) {
-    return HEXAGON_V32_GET_B1(data);
-  };
-  signed char B2(void) {
-    return HEXAGON_V32_GET_B2(data);
-  };
-  signed char B3(void) {
-    return HEXAGON_V32_GET_B3(data);
-  };
-  unsigned char UB0(void) {
-    return HEXAGON_V32_GET_UB0(data);
-  };
-  unsigned char UB1(void) {
-    return HEXAGON_V32_GET_UB1(data);
-  };
-  unsigned char UB2(void) {
-    return HEXAGON_V32_GET_UB2(data);
-  };
-  unsigned char UB3(void) {
-    return HEXAGON_V32_GET_UB3(data);
-  };
-
-  // NOTE: All set methods return a HEXAGON_Vect32C type
-
-  // Set word method
-  HEXAGON_Vect32C W(int w) {
-    return HEXAGON_Vect32C(HEXAGON_V32_PUT_W(data, w));
-  };
-
-  // Set half word methods
-  HEXAGON_Vect32C H0(short h) {
-    return HEXAGON_Vect32C(HEXAGON_V32_PUT_H0(data, h));
-  };
-  HEXAGON_Vect32C H1(short h) {
-    return HEXAGON_Vect32C(HEXAGON_V32_PUT_H1(data, h));
-  };
-
-  // Set byte methods
-  HEXAGON_Vect32C B0(signed char b) {
-    return HEXAGON_Vect32C(HEXAGON_V32_PUT_B0(data, b));
-  };
-  HEXAGON_Vect32C B1(signed char b) {
-    return HEXAGON_Vect32C(HEXAGON_V32_PUT_B1(data, b));
-  };
-  HEXAGON_Vect32C B2(signed char b) {
-    return HEXAGON_Vect32C(HEXAGON_V32_PUT_B2(data, b));
-  };
-  HEXAGON_Vect32C B3(signed char b) {
-    return HEXAGON_Vect32C(HEXAGON_V32_PUT_B3(data, b));
-  };
-
-private:
-  int data;
-};
-
-#endif /* __cplusplus */
-
-// V65 Silver types
-#if __Q6S_ARCH__ >= 65
-  // Silver vector types are 128 bytes, and pairs are 256. The vector predicate
-  // types are 16 bytes and 32 bytes for pairs.
-  typedef long HEXAGON_VecPred128 __attribute__((__vector_size__(16)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_VecPred256 __attribute__((__vector_size__(32)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(256)));
-
-  typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(4)));
-
-  typedef long HEXAGON_UVect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(4)));
-
-  #define Q6S_VectorPredPair HEXAGON_VecPred256
-  #define Q6S_VectorPred     HEXAGON_VecPred128
-  #define Q6S_Vector         HEXAGON_Vect1024
-  #define Q6S_VectorPair     HEXAGON_Vect2048
-  #define Q6S_UVector        HEXAGON_UVect1024
-  #define Q6S_UVectorPair    HEXAGON_UVect2048
-
-#else /* __Q6S_ARCH__ >= 65 */
-
-// V65 Vector types
-#if __HVX_ARCH__ >= 65
-#if defined __HVX__ && (__HVX_LENGTH__ == 128)
-  typedef long HEXAGON_VecPred128 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(256)));
-
-  typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(4)));
-
-  typedef long HEXAGON_UVect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(4)));
-
-  #define HVX_VectorPred     HEXAGON_VecPred128
-  #define HVX_Vector         HEXAGON_Vect1024
-  #define HVX_VectorPair     HEXAGON_Vect2048
-  #define HVX_UVector        HEXAGON_UVect1024
-  #define HVX_UVectorPair    HEXAGON_UVect2048
-#else /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
-#if defined __HVX__ &&  (__HVX_LENGTH__ == 64)
-  typedef long HEXAGON_VecPred64 __attribute__((__vector_size__(64)))
-    __attribute__((aligned(64)));
-
-  typedef long HEXAGON_Vect512 __attribute__((__vector_size__(64)))
-    __attribute__((aligned(64)));
-
-  typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_UVect512 __attribute__((__vector_size__(64)))
-    __attribute__((aligned(4)));
-
-  typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(4)));
-
-  #define HVX_VectorPred     HEXAGON_VecPred64
-  #define HVX_Vector         HEXAGON_Vect512
-  #define HVX_VectorPair     HEXAGON_Vect1024
-  #define HVX_UVector        HEXAGON_UVect512
-  #define HVX_UVectorPair    HEXAGON_UVect1024
-#endif /* defined __HVX__ &&  (__HVX_LENGTH__ == 64) */
-#endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
-#endif /* __HVX_ARCH__ >= 65 */
-#endif /* __Q6S_ARCH__ >= 65 */
-
-/* Predicates */
-
-typedef int HEXAGON_Pred;
-
-/***
- *** backward compatibility aliases
- ***/
-
-/* Old names */
-#define Q6Vect Q6Vect64
-#define Q6V_GET_D Q6V64_GET_D
-#define Q6V_GET_UD Q6V64_GET_UD
-#define Q6V_GET_W0 Q6V64_GET_W0
-#define Q6V_GET_W1 Q6V64_GET_W1
-#define Q6V_GET_UW0 Q6V64_GET_UW0
-#define Q6V_GET_UW1 Q6V64_GET_UW1
-#define Q6V_GET_H0 Q6V64_GET_H0
-#define Q6V_GET_H1 Q6V64_GET_H1
-#define Q6V_GET_H2 Q6V64_GET_H2
-#define Q6V_GET_H3 Q6V64_GET_H3
-#define Q6V_GET_UH0 Q6V64_GET_UH0
-#define Q6V_GET_UH1 Q6V64_GET_UH1
-#define Q6V_GET_UH2 Q6V64_GET_UH2
-#define Q6V_GET_UH3 Q6V64_GET_UH3
-#define Q6V_GET_B0 Q6V64_GET_B0
-#define Q6V_GET_B1 Q6V64_GET_B1
-#define Q6V_GET_B2 Q6V64_GET_B2
-#define Q6V_GET_B3 Q6V64_GET_B3
-#define Q6V_GET_B4 Q6V64_GET_B4
-#define Q6V_GET_B5 Q6V64_GET_B5
-#define Q6V_GET_B6 Q6V64_GET_B6
-#define Q6V_GET_B7 Q6V64_GET_B7
-#define Q6V_GET_UB0 Q6V64_GET_UB0
-#define Q6V_GET_UB1 Q6V64_GET_UB1
-#define Q6V_GET_UB2 Q6V64_GET_UB2
-#define Q6V_GET_UB3 Q6V64_GET_UB3
-#define Q6V_GET_UB4 Q6V64_GET_UB4
-#define Q6V_GET_UB5 Q6V64_GET_UB5
-#define Q6V_GET_UB6 Q6V64_GET_UB6
-#define Q6V_GET_UB7 Q6V64_GET_UB7
-#define Q6V_PUT_D Q6V64_PUT_D
-#define Q6V_PUT_W0 Q6V64_PUT_W0
-#define Q6V_PUT_W1 Q6V64_PUT_W1
-#define Q6V_PUT_H0 Q6V64_PUT_H0
-#define Q6V_PUT_H1 Q6V64_PUT_H1
-#define Q6V_PUT_H2 Q6V64_PUT_H2
-#define Q6V_PUT_H3 Q6V64_PUT_H3
-#define Q6V_PUT_B0 Q6V64_PUT_B0
-#define Q6V_PUT_B1 Q6V64_PUT_B1
-#define Q6V_PUT_B2 Q6V64_PUT_B2
-#define Q6V_PUT_B3 Q6V64_PUT_B3
-#define Q6V_PUT_B4 Q6V64_PUT_B4
-#define Q6V_PUT_B5 Q6V64_PUT_B5
-#define Q6V_PUT_B6 Q6V64_PUT_B6
-#define Q6V_PUT_B7 Q6V64_PUT_B7
-#define Q6V_CREATE_D Q6V64_CREATE_D
-#define Q6V_CREATE_W Q6V64_CREATE_W
-#define Q6V_CREATE_H Q6V64_CREATE_H
-#define Q6V_CREATE_B Q6V64_CREATE_B
-
-#ifdef __cplusplus
-#define Q6VectC Q6Vect64C
-#endif /* __cplusplus */
-
-/* 64 Bit Vectors */
-
-typedef long long __attribute__((__may_alias__)) Q6Vect64;
-
-/* Extract doubleword macros */
-
-#define Q6V64_GET_D(v) (v)
-#define Q6V64_GET_UD(v) ((unsigned long long)(v))
-
-/* Extract word macros */
-
-#define Q6V64_GET_W0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.w[0];                                                \
-  })
-#define Q6V64_GET_W1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.w[1];                                                \
-  })
-#define Q6V64_GET_UW0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned int uw[2];                                                      \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.uw[0];                                               \
-  })
-#define Q6V64_GET_UW1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned int uw[2];                                                      \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.uw[1];                                               \
-  })
-
-/* Extract half word macros */
-
-#define Q6V64_GET_H0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[0];                                                \
-  })
-#define Q6V64_GET_H1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[1];                                                \
-  })
-#define Q6V64_GET_H2(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[2];                                                \
-  })
-#define Q6V64_GET_H3(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[3];                                                \
-  })
-#define Q6V64_GET_UH0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.uh[0];                                               \
-  })
-#define Q6V64_GET_UH1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.uh[1];                                               \
-  })
-#define Q6V64_GET_UH2(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.uh[2];                                               \
-  })
-#define Q6V64_GET_UH3(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.uh[3];                                               \
-  })
-
-/* Extract byte macros */
-
-#define Q6V64_GET_B0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[0];                                                \
-  })
-#define Q6V64_GET_B1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[1];                                                \
-  })
-#define Q6V64_GET_B2(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[2];                                                \
-  })
-#define Q6V64_GET_B3(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[3];                                                \
-  })
-#define Q6V64_GET_B4(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[4];                                                \
-  })
-#define Q6V64_GET_B5(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[5];                                                \
-  })
-#define Q6V64_GET_B6(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[6];                                                \
-  })
-#define Q6V64_GET_B7(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[7];                                                \
-  })
-#define Q6V64_GET_UB0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[0];                                               \
-  })
-#define Q6V64_GET_UB1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[1];                                               \
-  })
-#define Q6V64_GET_UB2(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[2];                                               \
-  })
-#define Q6V64_GET_UB3(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[3];                                               \
-  })
-#define Q6V64_GET_UB4(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[4];                                               \
-  })
-#define Q6V64_GET_UB5(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[5];                                               \
-  })
-#define Q6V64_GET_UB6(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[6];                                               \
-  })
-#define Q6V64_GET_UB7(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[7];                                               \
-  })
-
-/* NOTE: All set macros return a Q6Vect64 type */
-
-/* Set doubleword macro */
-
-#define Q6V64_PUT_D(v, new) (new)
-
-/* Set word macros */
-
-#ifdef __qdsp6__
-
-#define Q6V64_PUT_W0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.w[0] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_W1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.w[1] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V64_PUT_W0(v, new)                                                   \
-  (((v) & 0xffffffff00000000LL) | ((Q6Vect64)((unsigned int)(new))))
-#define Q6V64_PUT_W1(v, new)                                                   \
-  (((v) & 0x00000000ffffffffLL) | (((Q6Vect64)(new)) << 32LL))
-
-#endif /* !__qdsp6__ */
-
-/* Set half word macros */
-
-#ifdef __qdsp6__
-
-#define Q6V64_PUT_H0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[0] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_H1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[1] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_H2(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[2] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_H3(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[3] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V64_PUT_H0(v, new)                                                   \
-  (((v) & 0xffffffffffff0000LL) | ((Q6Vect64)((unsigned short)(new))))
-#define Q6V64_PUT_H1(v, new)                                                   \
-  (((v) & 0xffffffff0000ffffLL) | (((Q6Vect64)((unsigned short)(new))) << 16LL))
-#define Q6V64_PUT_H2(v, new)                                                   \
-  (((v) & 0xffff0000ffffffffLL) | (((Q6Vect64)((unsigned short)(new))) << 32LL))
-#define Q6V64_PUT_H3(v, new)                                                   \
-  (((v) & 0x0000ffffffffffffLL) | (((Q6Vect64)(new)) << 48LL))
-
-#endif /* !__qdsp6__ */
-
-/* Set byte macros */
-
-#ifdef __qdsp6__
-
-#define Q6V64_PUT_B0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[0] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_B1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[1] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_B2(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[2] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_B3(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[3] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_B4(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[4] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_B5(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[5] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_B6(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[6] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_B7(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[7] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V64_PUT_B0(v, new)                                                   \
-  (((v) & 0xffffffffffffff00LL) | ((Q6Vect64)((unsigned char)(new))))
-#define Q6V64_PUT_B1(v, new)                                                   \
-  (((v) & 0xffffffffffff00ffLL) | (((Q6Vect64)((unsigned char)(new))) << 8LL))
-#define Q6V64_PUT_B2(v, new)                                                   \
-  (((v) & 0xffffffffff00ffffLL) | (((Q6Vect64)((unsigned char)(new))) << 16LL))
-#define Q6V64_PUT_B3(v, new)                                                   \
-  (((v) & 0xffffffff00ffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 24LL))
-#define Q6V64_PUT_B4(v, new)                                                   \
-  (((v) & 0xffffff00ffffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 32LL))
-#define Q6V64_PUT_B5(v, new)                                                   \
-  (((v) & 0xffff00ffffffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 40LL))
-#define Q6V64_PUT_B6(v, new)                                                   \
-  (((v) & 0xff00ffffffffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 48LL))
-#define Q6V64_PUT_B7(v, new)                                                   \
-  (((v) & 0x00ffffffffffffffLL) | (((Q6Vect64)(new)) << 56LL))
-
-#endif /* !__qdsp6__ */
-
-/* NOTE: All create macros return a Q6Vect64 type */
-
-/* Create from a doubleword */
-
-#define Q6V64_CREATE_D(d) (d)
-
-/* Create from words */
-
-#ifdef __qdsp6__
-
-#define Q6V64_CREATE_W(w1, w0)                                                 \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.w[0] = (w0);                                         \
-    _Q6V64_internal_union.w[1] = (w1);                                         \
-    _Q6V64_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V64_CREATE_W(w1, w0)                                                 \
-  ((((Q6Vect64)(w1)) << 32LL) | ((Q6Vect64)((w0) & 0xffffffff)))
-
-#endif /* !__qdsp6__ */
-
-/* Create from half words */
-
-#ifdef __qdsp6__
-
-#define Q6V64_CREATE_H(h3, h2, h1, h0)                                         \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.h[0] = (h0);                                         \
-    _Q6V64_internal_union.h[1] = (h1);                                         \
-    _Q6V64_internal_union.h[2] = (h2);                                         \
-    _Q6V64_internal_union.h[3] = (h3);                                         \
-    _Q6V64_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V64_CREATE_H(h3, h2, h1, h0)                                         \
-  ((((Q6Vect64)(h3)) << 48LL) | (((Q6Vect64)((h2) & 0xffff)) << 32LL) |        \
-   (((Q6Vect64)((h1) & 0xffff)) << 16LL) | ((Q6Vect64)((h0) & 0xffff)))
-
-#endif /* !__qdsp6__ */
-
-/* Create from bytes */
-
-#ifdef __qdsp6__
-
-#define Q6V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)                         \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.b[0] = (b0);                                         \
-    _Q6V64_internal_union.b[1] = (b1);                                         \
-    _Q6V64_internal_union.b[2] = (b2);                                         \
-    _Q6V64_internal_union.b[3] = (b3);                                         \
-    _Q6V64_internal_union.b[4] = (b4);                                         \
-    _Q6V64_internal_union.b[5] = (b5);                                         \
-    _Q6V64_internal_union.b[6] = (b6);                                         \
-    _Q6V64_internal_union.b[7] = (b7);                                         \
-    _Q6V64_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)                         \
-  ((((Q6Vect64)(b7)) << 56LL) | (((Q6Vect64)((b6) & 0xff)) << 48LL) |          \
-   (((Q6Vect64)((b5) & 0xff)) << 40LL) | (((Q6Vect64)((b4) & 0xff)) << 32LL) | \
-   (((Q6Vect64)((b3) & 0xff)) << 24LL) | (((Q6Vect64)((b2) & 0xff)) << 16LL) | \
-   (((Q6Vect64)((b1) & 0xff)) << 8LL) | ((Q6Vect64)((b0) & 0xff)))
-
-#endif /* !__qdsp6__ */
-
-#ifdef __cplusplus
-
-class Q6Vect64C {
-public:
-  // Constructors
-  Q6Vect64C(long long d = 0) : data(d) {};
-  Q6Vect64C(int w1, int w0) : data(Q6V64_CREATE_W(w1, w0)) {};
-  Q6Vect64C(short h3, short h2, short h1, short h0)
-      : data(Q6V64_CREATE_H(h3, h2, h1, h0)) {};
-  Q6Vect64C(signed char b7, signed char b6, signed char b5, signed char b4,
-            signed char b3, signed char b2, signed char b1, signed char b0)
-      : data(Q6V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)) {};
-  Q6Vect64C(const Q6Vect64C &v) : data(v.data) {};
-
-  Q6Vect64C &operator=(const Q6Vect64C &v) {
-    data = v.data;
-    return *this;
-  };
-
-  operator long long() {
-    return data;
-  };
-
-  // Extract doubleword methods
-  long long D(void) {
-    return Q6V64_GET_D(data);
-  };
-  unsigned long long UD(void) {
-    return Q6V64_GET_UD(data);
-  };
-
-  // Extract word methods
-  int W0(void) {
-    return Q6V64_GET_W0(data);
-  };
-  int W1(void) {
-    return Q6V64_GET_W1(data);
-  };
-  unsigned int UW0(void) {
-    return Q6V64_GET_UW0(data);
-  };
-  unsigned int UW1(void) {
-    return Q6V64_GET_UW1(data);
-  };
-
-  // Extract half word methods
-  short H0(void) {
-    return Q6V64_GET_H0(data);
-  };
-  short H1(void) {
-    return Q6V64_GET_H1(data);
-  };
-  short H2(void) {
-    return Q6V64_GET_H2(data);
-  };
-  short H3(void) {
-    return Q6V64_GET_H3(data);
-  };
-  unsigned short UH0(void) {
-    return Q6V64_GET_UH0(data);
-  };
-  unsigned short UH1(void) {
-    return Q6V64_GET_UH1(data);
-  };
-  unsigned short UH2(void) {
-    return Q6V64_GET_UH2(data);
-  };
-  unsigned short UH3(void) {
-    return Q6V64_GET_UH3(data);
-  };
-
-  // Extract byte methods
-  signed char B0(void) {
-    return Q6V64_GET_B0(data);
-  };
-  signed char B1(void) {
-    return Q6V64_GET_B1(data);
-  };
-  signed char B2(void) {
-    return Q6V64_GET_B2(data);
-  };
-  signed char B3(void) {
-    return Q6V64_GET_B3(data);
-  };
-  signed char B4(void) {
-    return Q6V64_GET_B4(data);
-  };
-  signed char B5(void) {
-    return Q6V64_GET_B5(data);
-  };
-  signed char B6(void) {
-    return Q6V64_GET_B6(data);
-  };
-  signed char B7(void) {
-    return Q6V64_GET_B7(data);
-  };
-  unsigned char UB0(void) {
-    return Q6V64_GET_UB0(data);
-  };
-  unsigned char UB1(void) {
-    return Q6V64_GET_UB1(data);
-  };
-  unsigned char UB2(void) {
-    return Q6V64_GET_UB2(data);
-  };
-  unsigned char UB3(void) {
-    return Q6V64_GET_UB3(data);
-  };
-  unsigned char UB4(void) {
-    return Q6V64_GET_UB4(data);
-  };
-  unsigned char UB5(void) {
-    return Q6V64_GET_UB5(data);
-  };
-  unsigned char UB6(void) {
-    return Q6V64_GET_UB6(data);
-  };
-  unsigned char UB7(void) {
-    return Q6V64_GET_UB7(data);
-  };
-
-  // NOTE: All set methods return a Q6Vect64C type
-
-  // Set doubleword method
-  Q6Vect64C D(long long d) {
-    return Q6Vect64C(Q6V64_PUT_D(data, d));
-  };
-
-  // Set word methods
-  Q6Vect64C W0(int w) {
-    return Q6Vect64C(Q6V64_PUT_W0(data, w));
-  };
-  Q6Vect64C W1(int w) {
-    return Q6Vect64C(Q6V64_PUT_W1(data, w));
-  };
-
-  // Set half word methods
-  Q6Vect64C H0(short h) {
-    return Q6Vect64C(Q6V64_PUT_H0(data, h));
-  };
-  Q6Vect64C H1(short h) {
-    return Q6Vect64C(Q6V64_PUT_H1(data, h));
-  };
-  Q6Vect64C H2(short h) {
-    return Q6Vect64C(Q6V64_PUT_H2(data, h));
-  };
-  Q6Vect64C H3(short h) {
-    return Q6Vect64C(Q6V64_PUT_H3(data, h));
-  };
-
-  // Set byte methods
-  Q6Vect64C B0(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B0(data, b));
-  };
-  Q6Vect64C B1(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B1(data, b));
-  };
-  Q6Vect64C B2(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B2(data, b));
-  };
-  Q6Vect64C B3(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B3(data, b));
-  };
-  Q6Vect64C B4(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B4(data, b));
-  };
-  Q6Vect64C B5(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B5(data, b));
-  };
-  Q6Vect64C B6(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B6(data, b));
-  };
-  Q6Vect64C B7(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B7(data, b));
-  };
-
-private:
-  long long data;
-};
-
-#endif /* __cplusplus */
-
-/* 32 Bit Vectors */
-
-typedef int Q6Vect32;
-
-/* Extract word macros */
-
-#define Q6V32_GET_W(v) (v)
-#define Q6V32_GET_UW(v) ((unsigned int)(v))
-
-/* Extract half word macros */
-
-#define Q6V32_GET_H0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.h[0];                                                \
-  })
-#define Q6V32_GET_H1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.h[1];                                                \
-  })
-#define Q6V32_GET_UH0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned short uh[2];                                                    \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.uh[0];                                               \
-  })
-#define Q6V32_GET_UH1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned short uh[2];                                                    \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.uh[1];                                               \
-  })
-
-/* Extract byte macros */
-
-#define Q6V32_GET_B0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[0];                                                \
-  })
-#define Q6V32_GET_B1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[1];                                                \
-  })
-#define Q6V32_GET_B2(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[2];                                                \
-  })
-#define Q6V32_GET_B3(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[3];                                                \
-  })
-#define Q6V32_GET_UB0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.ub[0];                                               \
-  })
-#define Q6V32_GET_UB1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.ub[1];                                               \
-  })
-#define Q6V32_GET_UB2(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.ub[2];                                               \
-  })
-#define Q6V32_GET_UB3(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.ub[3];                                               \
-  })
-
-/* NOTE: All set macros return a Q6Vect32 type */
-
-/* Set word macro */
-
-#define Q6V32_PUT_W(v, new) (new)
-
-/* Set half word macros */
-
-#ifdef __qdsp6__
-
-#define Q6V32_PUT_H0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.h[0] = (new);                                        \
-    _Q6V32_internal_union.w;                                                   \
-  })
-#define Q6V32_PUT_H1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.h[1] = (new);                                        \
-    _Q6V32_internal_union.w;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V32_PUT_H0(v, new)                                                   \
-  (((v) & 0xffff0000) | ((Q6Vect32)((unsigned short)(new))))
-#define Q6V32_PUT_H1(v, new) (((v) & 0x0000ffff) | (((Q6Vect32)(new)) << 16))
-
-#endif /* !__qdsp6__ */
-
-/* Set byte macros */
-
-#ifdef __qdsp6__
-
-#define Q6V32_PUT_B0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[0] = (new);                                        \
-    _Q6V32_internal_union.w;                                                   \
-  })
-#define Q6V32_PUT_B1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[1] = (new);                                        \
-    _Q6V32_internal_union.w;                                                   \
-  })
-#define Q6V32_PUT_B2(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[2] = (new);                                        \
-    _Q6V32_internal_union.w;                                                   \
-  })
-#define Q6V32_PUT_B3(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[3] = (new);                                        \
-    _Q6V32_internal_union.w;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V32_PUT_B0(v, new)                                                   \
-  (((v) & 0xffffff00) | ((Q6Vect32)((unsigned char)(new))))
-#define Q6V32_PUT_B1(v, new)                                                   \
-  (((v) & 0xffff00ff) | (((Q6Vect32)((unsigned char)(new))) << 8))
-#define Q6V32_PUT_B2(v, new)                                                   \
-  (((v) & 0xff00ffff) | (((Q6Vect32)((unsigned char)(new))) << 16))
-#define Q6V32_PUT_B3(v, new) (((v) & 0x00ffffff) | (((Q6Vect32)(new)) << 24))
-
-#endif /* !__qdsp6__ */
-
-/* NOTE: All create macros return a Q6Vect32 type */
-
-/* Create from a word */
-
-#define Q6V32_CREATE_W(w) (w)
-
-/* Create from half words */
-
-#ifdef __qdsp6__
-
-#define Q6V32_CREATE_H(h1, h0)                                                 \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[2];                                                              \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.h[0] = (h0);                                         \
-    _Q6V32_internal_union.h[1] = (h1);                                         \
-    _Q6V32_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V32_CREATE_H(h1, h0)                                                 \
-  ((((Q6Vect32)(h1)) << 16) | ((Q6Vect32)((h0) & 0xffff)))
-
-#endif /* !__qdsp6__ */
-
-/* Create from bytes */
-#ifdef __qdsp6__
-
-#define Q6V32_CREATE_B(b3, b2, b1, b0)                                         \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[4];                                                               \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.b[0] = (b0);                                         \
-    _Q6V32_internal_union.b[1] = (b1);                                         \
-    _Q6V32_internal_union.b[2] = (b2);                                         \
-    _Q6V32_internal_union.b[3] = (b3);                                         \
-    _Q6V32_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V32_CREATE_B(b3, b2, b1, b0)                                         \
-  ((((Q6Vect32)(b3)) << 24) | (((Q6Vect32)((b2) & 0xff)) << 16) |              \
-   (((Q6Vect32)((b1) & 0xff)) << 8) | ((Q6Vect32)((b0) & 0xff)))
-
-#endif /* !__qdsp6__ */
-
-#ifdef __cplusplus
-
-class Q6Vect32C {
-public:
-  // Constructors
-  Q6Vect32C(int w = 0) : data(w) {};
-  Q6Vect32C(short h1, short h0) : data(Q6V32_CREATE_H(h1, h0)) {};
-  Q6Vect32C(signed char b3, signed char b2, signed char b1, signed char b0)
-      : data(Q6V32_CREATE_B(b3, b2, b1, b0)) {};
-  Q6Vect32C(const Q6Vect32C &v) : data(v.data) {};
-
-  Q6Vect32C &operator=(const Q6Vect32C &v) {
-    data = v.data;
-    return *this;
-  };
-
-  operator int() {
-    return data;
-  };
-
-  // Extract word methods
-  int W(void) {
-    return Q6V32_GET_W(data);
-  };
-  unsigned int UW(void) {
-    return Q6V32_GET_UW(data);
-  };
-
-  // Extract half word methods
-  short H0(void) {
-    return Q6V32_GET_H0(data);
-  };
-  short H1(void) {
-    return Q6V32_GET_H1(data);
-  };
-  unsigned short UH0(void) {
-    return Q6V32_GET_UH0(data);
-  };
-  unsigned short UH1(void) {
-    return Q6V32_GET_UH1(data);
-  };
-
-  // Extract byte methods
-  signed char B0(void) {
-    return Q6V32_GET_B0(data);
-  };
-  signed char B1(void) {
-    return Q6V32_GET_B1(data);
-  };
-  signed char B2(void) {
-    return Q6V32_GET_B2(data);
-  };
-  signed char B3(void) {
-    return Q6V32_GET_B3(data);
-  };
-  unsigned char UB0(void) {
-    return Q6V32_GET_UB0(data);
-  };
-  unsigned char UB1(void) {
-    return Q6V32_GET_UB1(data);
-  };
-  unsigned char UB2(void) {
-    return Q6V32_GET_UB2(data);
-  };
-  unsigned char UB3(void) {
-    return Q6V32_GET_UB3(data);
-  };
-
-  // NOTE: All set methods return a Q6Vect32C type
-
-  // Set word method
-  Q6Vect32C W(int w) {
-    return Q6Vect32C(Q6V32_PUT_W(data, w));
-  };
-
-  // Set half word methods
-  Q6Vect32C H0(short h) {
-    return Q6Vect32C(Q6V32_PUT_H0(data, h));
-  };
-  Q6Vect32C H1(short h) {
-    return Q6Vect32C(Q6V32_PUT_H1(data, h));
-  };
-
-  // Set byte methods
-  Q6Vect32C B0(signed char b) {
-    return Q6Vect32C(Q6V32_PUT_B0(data, b));
-  };
-  Q6Vect32C B1(signed char b) {
-    return Q6Vect32C(Q6V32_PUT_B1(data, b));
-  };
-  Q6Vect32C B2(signed char b) {
-    return Q6Vect32C(Q6V32_PUT_B2(data, b));
-  };
-  Q6Vect32C B3(signed char b) {
-    return Q6Vect32C(Q6V32_PUT_B3(data, b));
-  };
-
-private:
-  int data;
-};
-
-#endif /* __cplusplus */
-
-// V65 Vector types
-#if __HVX_ARCH__ >= 65
-#if defined __HVX__ && (__HVX_LENGTH__ == 128)
-typedef long Q6VecPred128 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-typedef long Q6Vect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-typedef long Q6Vect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(256)));
-
-#else /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
-#if defined __HVX__ &&  (__HVX_LENGTH__ == 64)
-typedef long Q6VecPred64 __attribute__((__vector_size__(64)))
-    __attribute__((aligned(64)));
-
-typedef long Q6Vect512 __attribute__((__vector_size__(64)))
-    __attribute__((aligned(64)));
-
-typedef long Q6Vect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-#endif /* defined __HVX__ &&  (__HVX_LENGTH__ == 64) */
-#endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
-#endif /* __HVX_ARCH__ >= 65 */
-
-/* Predicates */
-
-typedef int Q6Pred;
-
-
-#ifdef __HVX__
-
-// Extract HVX VectorPair macro.
-#define HEXAGON_HVX_GET_W(v) (v)
-
-// Extract HVX Vector macros.
-#define HEXAGON_HVX_GET_V0(v)                                                  \
-  __extension__({                                                              \
-    union {                                                                    \
-      HVX_VectorPair W;                                                        \
-      HVX_Vector V[2];                                                         \
-    } _HEXAGON_HVX_internal_union;                                             \
-    _HEXAGON_HVX_internal_union.W = (v);                                       \
-    _HEXAGON_HVX_internal_union.V[0];                                          \
-  })
-#define HEXAGON_HVX_GET_V1(v)                                                  \
-  __extension__({                                                              \
-    union {                                                                    \
-      HVX_VectorPair W;                                                        \
-      HVX_Vector V[2];                                                         \
-    } _HEXAGON_HVX_internal_union;                                             \
-    _HEXAGON_HVX_internal_union.W = (v);                                       \
-    _HEXAGON_HVX_internal_union.V[1];                                          \
-  })
-#define HEXAGON_HVX_GET_P(v)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      HVX_VectorPair W;                                                        \
-      HVX_VectorPred P[2];                                                     \
-    } _HEXAGON_HVX_internal_union;                                             \
-    _HEXAGON_HVX_internal_union.W = (v);                                       \
-    _HEXAGON_HVX_internal_union.P[0];                                          \
-  })
-
-// Set HVX VectorPair macro.
-#define HEXAGON_HVX_PUT_W(v, new) (new)
-
-// Set HVX Vector macros.
-#define HEXAGON_HVX_PUT_V0(v, new)                                             \
-  __extension__({                                                              \
-    union {                                                                    \
-      HVX_VectorPair W;                                                        \
-      HVX_Vector V[2];                                                         \
-    } _HEXAGON_HVX_internal_union;                                             \
-    _HEXAGON_HVX_internal_union.W = (v);                                       \
-    _HEXAGON_HVX_internal_union.V[0] = (new);                                  \
-    _HEXAGON_HVX_internal_union.W;                                             \
-  })
-
-#define HEXAGON_HVX_PUT_V1(v, new)                                             \
-  __extension__({                                                              \
-    union {                                                                    \
-      HVX_VectorPair W;                                                        \
-      HVX_Vector V[2];                                                         \
-    } _HEXAGON_HVX_internal_union;                                             \
-    _HEXAGON_HVX_internal_union.W = (v);                                       \
-    _HEXAGON_HVX_internal_union.V[1] = (new);                                  \
-    _HEXAGON_HVX_internal_union.W;                                             \
-  })
-
-#define HEXAGON_HVX_PUT_P(v, new)                                              \
-  __extension__({                                                              \
-    union {                                                                    \
-      HVX_VectorPair W;                                                        \
-      HVX_VectorPred P[2];                                                     \
-    } _HEXAGON_HVX_internal_union;                                             \
-    _HEXAGON_HVX_internal_union.W = (v);                                       \
-    _HEXAGON_HVX_internal_union.P[0] = (new);                                  \
-    _HEXAGON_HVX_internal_union.W;                                             \
-  })
-
-
-#define HEXAGON_HVX_CREATE_W(v1, v0)                                           \
-  __extension__({                                                              \
-    union {                                                                    \
-      HVX_VectorPair W;                                                        \
-      HVX_Vector V[2];                                                         \
-    } _HEXAGON_HVX_internal_union;                                             \
-    _HEXAGON_HVX_internal_union.V[0] = (v0);                                   \
-    _HEXAGON_HVX_internal_union.V[1] = (v1);                                   \
-    _HEXAGON_HVX_internal_union.W;                                             \
-  })
-
-#ifdef __cplusplus
-
-class HVX_Vect {
-public:
-  // Constructors.
-  // Default.
-  HVX_Vect() : data(Q6_W_vcombine_VV(Q6_V_vzero(), Q6_V_vzero())){};
-
-  // Custom constructors.
-  HVX_Vect(HVX_VectorPair W) : data(W){};
-  HVX_Vect(HVX_Vector v1, HVX_Vector v0) : data(HEXAGON_HVX_CREATE_W(v1, v0)){};
-
-  // Copy constructor.
-  HVX_Vect(const HVX_Vect &W) = default;
-
-  // Move constructor.
-  HVX_Vect(HVX_Vect &&W) = default;
-
-  // Assignment operator.
-  HVX_Vect &operator=(const HVX_Vect &W) = default;
-
-  operator HVX_VectorPair() { return data; };
-
-  // Extract VectorPair method.
-  HVX_VectorPair W(void) { return HEXAGON_HVX_GET_W(data); };
-
-  // Extract Vector methods.
-  HVX_Vector V0(void) { return HEXAGON_HVX_GET_V0(data); };
-  HVX_Vector V1(void) { return HEXAGON_HVX_GET_V1(data); };
-  HVX_VectorPred P(void) { return HEXAGON_HVX_GET_P(data); };
-
-  // NOTE: All set methods return a HVX_Vect type.
-  // Set HVX VectorPair method.
-  HVX_Vect W(HVX_VectorPair w) { return HVX_Vect(HEXAGON_HVX_PUT_W(data, w)); };
-
-  // Set HVX Vector methods.
-  HVX_Vect V0(HVX_Vector v) { return HVX_Vect(HEXAGON_HVX_PUT_V0(data, v)); };
-  HVX_Vect V1(HVX_Vector v) { return HVX_Vect(HEXAGON_HVX_PUT_V1(data, v)); };
-  HVX_Vect P(HVX_VectorPred p) { return HVX_Vect(HEXAGON_HVX_PUT_P(data, p)); };
-
-private:
-  HVX_VectorPair data;
-};
-
-#endif /* __cplusplus */
-#endif /* __HVX__ */
-
-#define HEXAGON_UDMA_DM0_STATUS_IDLE             0x00000000
-#define HEXAGON_UDMA_DM0_STATUS_RUN              0x00000001
-#define HEXAGON_UDMA_DM0_STATUS_ERROR            0x00000002
-#define HEXAGON_UDMA_DESC_DSTATE_INCOMPLETE      0
-#define HEXAGON_UDMA_DESC_DSTATE_COMPLETE        1
-#define HEXAGON_UDMA_DESC_ORDER_NOORDER          0
-#define HEXAGON_UDMA_DESC_ORDER_ORDER            1
-#define HEXAGON_UDMA_DESC_BYPASS_OFF             0
-#define HEXAGON_UDMA_DESC_BYPASS_ON              1
-#define HEXAGON_UDMA_DESC_COMP_NONE              0
-#define HEXAGON_UDMA_DESC_COMP_DLBC              1
-#define HEXAGON_UDMA_DESC_DESCTYPE_TYPE0         0
-#define HEXAGON_UDMA_DESC_DESCTYPE_TYPE1         1
-
-typedef struct hexagon_udma_descriptor_type0_s
-{
-    void *next;
-    unsigned int length:24;
-    unsigned int desctype:2;
-    unsigned int dstcomp:1;
-    unsigned int srccomp:1;
-    unsigned int dstbypass:1;
-    unsigned int srcbypass:1;
-    unsigned int order:1;
-    unsigned int dstate:1;
-    void *src;
-    void *dst;
-} hexagon_udma_descriptor_type0_t;
-
-typedef struct hexagon_udma_descriptor_type1_s
-{
-    void *next;
-    unsigned int length:24;
-    unsigned int desctype:2;
-    unsigned int dstcomp:1;
-    unsigned int srccomp:1;
-    unsigned int dstbypass:1;
-    unsigned int srcbypass:1;
-    unsigned int order:1;
-    unsigned int dstate:1;
-    void *src;
-    void *dst;
-    unsigned int allocation:28;
-    unsigned int padding:4;
-    unsigned int roiwidth:16;
-    unsigned int roiheight:16;
-    unsigned int srcstride:16;
-    unsigned int dststride:16;
-    unsigned int srcwidthoffset:16;
-    unsigned int dstwidthoffset:16;
-} hexagon_udma_descriptor_type1_t;
-
-#endif /* !HEXAGON_TYPES_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/hresetintrin.h b/linux-x86/lib64/clang/14.0.2/include/hresetintrin.h
deleted file mode 100644
index 13e31a2..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/hresetintrin.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*===---------------- hresetintrin.h - HRESET intrinsics -------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __X86GPRINTRIN_H
-#error "Never use <hresetintrin.h> directly; include <x86gprintrin.h> instead."
-#endif
-
-#ifndef __HRESETINTRIN_H
-#define __HRESETINTRIN_H
-
-#if __has_extension(gnu_asm)
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS \
-  __attribute__((__always_inline__, __nodebug__, __target__("hreset")))
-
-/// Provides a hint to the processor to selectively reset the prediction
-///    history of the current logical processor specified by a 32-bit integer
-///    value \a __eax.
-///
-/// This intrinsic corresponds to the <c> HRESET </c> instruction.
-///
-/// \operation
-///    IF __eax == 0
-///      // nop
-///    ELSE
-///      FOR i := 0 to 31
-///        IF __eax[i]
-///          ResetPredictionFeature(i)
-///        FI
-///      ENDFOR
-///    FI
-/// \endoperation
-static __inline void __DEFAULT_FN_ATTRS
-_hreset(int __eax)
-{
-  __asm__ ("hreset $0" :: "a"(__eax));
-}
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __has_extension(gnu_asm) */
-
-#endif /* __HRESETINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h b/linux-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h
deleted file mode 100644
index 41ce7a6..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h
+++ /dev/null
@@ -1,4392 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-// Automatically generated file, do not edit!
-//===----------------------------------------------------------------------===//
-
-
-
-#ifndef _HVX_HEXAGON_PROTOS_H_
-#define _HVX_HEXAGON_PROTOS_H_ 1
-
-#ifdef __HVX__
-#if __HVX_LENGTH__ == 128
-#define __BUILTIN_VECTOR_WRAP(a) a ## _128B
-#else
-#define __BUILTIN_VECTOR_WRAP(a) a
-#endif
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vextract(Vu32,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_vextract_VR(HVX_Vector Vu, Word32 Rs)
-   Instruction Type:      LD
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_R_vextract_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=hi(Vss32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_hi_W(HVX_VectorPair Vss)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_hi_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=lo(Vss32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_lo_W(HVX_VectorPair Vss)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_lo_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vsplat(Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vsplat_R(Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_V_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=and(Qs4,Qt4)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_and_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_and_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=and(Qs4,!Qt4)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_and_QQn(HVX_VectorPred Qs, HVX_VectorPred Qt)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_and_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=not(Qs4)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_not_Q(HVX_VectorPred Qs)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_not_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=or(Qs4,Qt4)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_or_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_or_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=or(Qs4,!Qt4)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_or_QQn(HVX_VectorPred Qs, HVX_VectorPred Qt)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_or_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vsetq(Rt32)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vsetq_R(Word32 Rt)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vsetq_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=xor(Qs4,Qt4)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_xor_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_xor_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) vmem(Rt32+#s4)=Vs32
-   C Intrinsic Prototype: void Q6_vmem_QnRIV(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)
-   Instruction Type:      CVI_VM_ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vmem_QnRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) vmem(Rt32+#s4):nt=Vs32
-   C Intrinsic Prototype: void Q6_vmem_QnRIV_nt(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)
-   Instruction Type:      CVI_VM_ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vmem_QnRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) vmem(Rt32+#s4):nt=Vs32
-   C Intrinsic Prototype: void Q6_vmem_QRIV_nt(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)
-   Instruction Type:      CVI_VM_ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vmem_QRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) vmem(Rt32+#s4)=Vs32
-   C Intrinsic Prototype: void Q6_vmem_QRIV(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)
-   Instruction Type:      CVI_VM_ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vmem_QRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vabsdiff(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vabsdiff_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuh_vabsdiff_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vabsdiff(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vabsdiff_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vub_vabsdiff_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vabsdiff(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vabsdiff_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuh_vabsdiff_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vabsdiff(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vabsdiff_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuw_vabsdiff_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vabs(Vu32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vabs_Vh(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vabs_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vabs(Vu32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vabs_Vh_sat(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vabs_Vh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vabs(Vu32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vabs_Vw(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vabs_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vabs(Vu32.w):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vabs_Vw_sat(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vabs_Vw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vadd(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vadd_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vadd_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.b=vadd(Vuu32.b,Vvv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vadd_WbWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wb_vadd_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) Vx32.b+=Vu32.b
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_condacc_QnVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_condacc_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) Vx32.b+=Vu32.b
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_condacc_QVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_condacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vadd(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vadd(Vuu32.h,Vvv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_WhWh(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vadd_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) Vx32.h+=Vu32.h
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_condacc_QnVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_condacc_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) Vx32.h+=Vu32.h
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_condacc_QVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_condacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vadd(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vadd_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vadd(Vuu32.h,Vvv32.h):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_WhWh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vadd_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vadd(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vadd(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vadd_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vadd(Vu32.ub,Vv32.ub):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vadd_VubVub_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vadd_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.ub=vadd(Vuu32.ub,Vvv32.ub):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wub_vadd_WubWub_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wub_vadd_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vadd(Vu32.uh,Vv32.uh):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vadd_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vadd_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uh=vadd(Vuu32.uh,Vvv32.uh):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vadd_WuhWuh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuh_vadd_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vadd(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vadd_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vadd(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vadd_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vadd(Vuu32.w,Vvv32.w)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_WwWw(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vadd_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) Vx32.w+=Vu32.w
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_condacc_QnVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_condacc_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) Vx32.w+=Vu32.w
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_condacc_QVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_condacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vadd(Vu32.w,Vv32.w):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vadd_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vadd(Vuu32.w,Vvv32.w):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_WwWw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vadd_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=valign(Vu32,Vv32,Rt8)
-   C Intrinsic Prototype: HVX_Vector Q6_V_valign_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_valign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=valign(Vu32,Vv32,#u3)
-   C Intrinsic Prototype: HVX_Vector Q6_V_valign_VVI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_valign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vand(Vu32,Vv32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vand_VV(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vand_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vand(Qu4,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vand_QR(HVX_VectorPred Qu, Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_V_vand_QR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32|=vand(Qu4,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vandor_VQR(HVX_Vector Vx, HVX_VectorPred Qu, Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_V_vandor_VQR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vand(Vu32,Rt32)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vand_VR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Q_vand_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vand(Vu32,Rt32)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vandor_QVR(HVX_VectorPred Qx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Q_vandor_QVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vasl(Vu32.h,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasl_VhR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasl_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vasl(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasl_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasl_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vasl(Vu32.w,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vasl_VwR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vasl_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vasl(Vu32.w,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vaslacc_VwVwR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vaslacc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vasl(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vasl_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vasl_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vasr(Vu32.h,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VhR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasr_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vasr_VhVhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VhVhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VhVhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vasr(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vasr(Vu32.w,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vasr_VwR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vasr_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vasr(Vu32.w,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vasracc_VwVwR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vasracc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vasr(Vu32.w,Vv32.w,Rt8)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasr_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VwVwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vasr(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vasr_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vasr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=Vu32
-   C Intrinsic Prototype: HVX_Vector Q6_V_equals_V(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_equals_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32=Vuu32
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_equals_W(HVX_VectorPair Vuu)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_W_equals_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vavg(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vavg_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vavg(Vu32.h,Vv32.h):rnd
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vavg_VhVh_rnd(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vavg_VhVh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vavg(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vavg_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vavg(Vu32.ub,Vv32.ub):rnd
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vavg_VubVub_rnd(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vavg_VubVub_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vavg(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vavg_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vavg_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vavg(Vu32.uh,Vv32.uh):rnd
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vavg_VuhVuh_rnd(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vavg_VuhVuh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vavg(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vavg_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vavg(Vu32.w,Vv32.w):rnd
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vavg_VwVw_rnd(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vavg_VwVw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vcl0(Vu32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcl0_Vuh(HVX_Vector Vu)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vcl0_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vcl0(Vu32.uw)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vcl0_Vuw(HVX_Vector Vu)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuw_vcl0_Vuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32=vcombine(Vu32,Vv32)
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_vcombine_VV(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_W_vcombine_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=#0
-   C Intrinsic Prototype: HVX_Vector Q6_V_vzero()
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vdeal(Vu32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vdeal_Vb(HVX_Vector Vu)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vdeal_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vdeale(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vdeale_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vdeale_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vdeal(Vu32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vdeal_Vh(HVX_Vector Vu)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vdeal_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32=vdeal(Vu32,Vv32,Rt8)
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_vdeal_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_W_vdeal_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vdelta(Vu32,Vv32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vdelta_VV(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vdmpy(Vu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vdmpy_VubRb(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vdmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h+=vdmpy(Vu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vdmpyacc_VhVubRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vdmpyacc_VhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vdmpy(Vuu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vdmpy_WubRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vdmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vdmpy(Vuu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vdmpyacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vdmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRb(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpy_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpyacc_VwVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vdmpy(Vuu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vdmpy_WhRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vdmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vdmpy(Vuu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vdmpyacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vdmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vdmpy(Vuu32.h,Rt32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_WhRh_sat(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpy_WhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vdmpy(Vuu32.h,Rt32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwWhRh_sat(HVX_Vector Vx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpyacc_VwWhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Rt32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRh_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpy_VhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpyacc_VwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vdmpy(Vuu32.h,Rt32.uh,#1):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_WhRuh_sat(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpy_WhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vdmpy(Vuu32.h,Rt32.uh,#1):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwWhRuh_sat(HVX_Vector Vx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpyacc_VwWhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRuh_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpy_VhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpyacc_VwVhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpy_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhVh_sat(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpyacc_VwVhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vdsad(Vuu32.uh,Rt32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vdsad_WuhRuh(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vdsad_WuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.uw+=vdsad(Vuu32.uh,Rt32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vdsadacc_WuwWuhRuh(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vdsadacc_WuwWuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.eq(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eq_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.eq(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.eq(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.eq(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.eq(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eq_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.eq(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.eq(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.eq(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.eq(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eq_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.eq(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.eq(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.eq(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.gt(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gt_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.gt(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gt_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.gt(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gt_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtand_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtor_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtxacc_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.gt(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gt_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtand_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtor_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtxacc_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.gt(Vu32.uw,Vv32.uw)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gt_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.uw,Vv32.uw)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtand_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.uw,Vv32.uw)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtor_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.uw,Vv32.uw)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtxacc_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.gt(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gt_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w=vinsert(Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vinsert_VwR(HVX_Vector Vx, Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vinsert_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vlalign(Vu32,Vv32,Rt8)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vlalign_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vlalign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vlalign(Vu32,Vv32,#u3)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vlalign_VVI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vlalign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vlsr(Vu32.uh,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vlsr_VuhR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vlsr_VuhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vlsr(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vlsr_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vlsr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vlsr(Vu32.uw,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vlsr_VuwR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuw_vlsr_VuwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vlsr(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vlsr_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vlsr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vlut32_VbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32or_VbVbVbR(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vlut32or_VbVbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vlut16_VbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16or_WhVbVhR(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vlut16or_WhVbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vmax(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmax_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vmax_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vmax(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vmax_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vmax_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vmax(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmax_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vmax_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmax(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmax_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vmax_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vmin(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmin_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vmin_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vmin(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vmin_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vmin_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vmin(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmin_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vmin_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmin(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmin_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vmin_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vmpa(Vuu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpa_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vmpa(Vuu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpaacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpaacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vmpa(Vuu32.ub,Vvv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpa_WubWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubWub(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpa_WubWub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vmpa(Vuu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpa_WhRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpa_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vmpa(Vuu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpaacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpaacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vmpy(Vu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VubRb(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vmpy(Vu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVubRb(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpyacc_WhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vmpy(Vu32.ub,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VubVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vmpy(Vu32.ub,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVubVb(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpyacc_WhVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vmpy(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vmpy(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVbVb(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpyacc_WhVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpye(Vu32.w,Vv32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpye_VwVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vmpy(Vu32.h,Rt32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhRh(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpy_VhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhRh_sat(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpyacc_WwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_rnd_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vmpy_VhRh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vmpy_VhRh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vmpy(Vu32.h,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpy_VhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vmpy(Vu32.h,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpyacc_WwVhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vmpy(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpy_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vmpy(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpyacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vmpy_VhVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyieo(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieo_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyieo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vmpyie(Vu32.w,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieacc_VwVwVh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyieacc_VwVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyie(Vu32.w,Vv32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyie_VwVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyie_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vmpyie(Vu32.w,Vv32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieacc_VwVwVuh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyieacc_VwVwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vmpyi(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyi_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vmpyi_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h+=vmpyi(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyiacc_VhVhVh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vmpyiacc_VhVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vmpyi(Vu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyi_VhRb(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vmpyi_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h+=vmpyi(Vu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyiacc_VhVhRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vmpyiacc_VhVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyio(Vu32.w,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyio_VwVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyio_VwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyi(Vu32.w,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRb(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyi_VwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vmpyi(Vu32.w,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyiacc_VwVwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyi(Vu32.w,Rt32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRh(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyi_VwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vmpyi(Vu32.w,Rt32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRh(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyiacc_VwVwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyo_VwVh_s1_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyo_VwVh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyo_VwVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uh=vmpy(Vu32.ub,Rt32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpy_VubRub(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuh_vmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpyacc_WuhVubRub(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuh_vmpyacc_WuhVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uh=vmpy(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpy_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuh_vmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpyacc_WuhVubVub(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuh_vmpyacc_WuhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vmpy(Vu32.uh,Rt32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpy_VuhRuh(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vmpy_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpyacc_WuwVuhRuh(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vmpyacc_WuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vmpy(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpy_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vmpy_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpyacc_WuwVuhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vmpyacc_WuwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vmux(Qt4,Vu32,Vv32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vmux_QVV(HVX_VectorPred Qt, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vmux_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vnavg(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vnavg_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vnavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vnavg(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vnavg_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vnavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vnavg(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vnavg_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vnavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vnormamt(Vu32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vnormamt_Vh(HVX_Vector Vu)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vnormamt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vnormamt(Vu32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vnormamt_Vw(HVX_Vector Vu)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vnormamt_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vnot(Vu32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vnot_V(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vnot_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vor(Vu32,Vv32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vor_VV(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vpacke(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vpacke_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vpacke_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vpacke(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vpacke_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vpacke_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vpack(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vpack_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vpack(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vpack_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vpacko(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vpacko_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vpacko_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vpacko(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vpacko_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vpacko_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vpack(Vu32.w,Vv32.w):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vpack_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vpack(Vu32.w,Vv32.w):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vpack_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vpopcount(Vu32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vpopcount_Vh(HVX_Vector Vu)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vpopcount_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vrdelta(Vu32,Vv32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vrdelta_VV(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vrdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vrmpy(Vu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VubRb(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vrmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vrmpy(Vu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vrmpyacc_VwVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vrmpy(Vuu32.ub,Rt32.b,#u1)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vrmpy_WubRbI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vrmpy_WubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vrmpy(Vuu32.ub,Rt32.b,#u1)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vrmpyacc_WwWubRbI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vrmpyacc_WwWubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vrmpy(Vu32.ub,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VubVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vrmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vrmpy(Vu32.ub,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vrmpyacc_VwVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vrmpy(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vrmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vrmpy(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVbVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vrmpyacc_VwVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vrmpy(Vu32.ub,Rt32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpy_VubRub(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuw_vrmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.uw+=vrmpy(Vu32.ub,Rt32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubRub(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuw_vrmpyacc_VuwVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vrmpy(Vuu32.ub,Rt32.ub,#u1)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrmpy_WubRubI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vrmpy_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.uw+=vrmpy(Vuu32.ub,Rt32.ub,#u1)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrmpyacc_WuwWubRubI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vrmpyacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vrmpy(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpy_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuw_vrmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubVub(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuw_vrmpyacc_VuwVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vror(Vu32,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vror_VR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vror_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vround(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vround_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vround(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vround_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vround(Vu32.w,Vv32.w):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vround_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vround(Vu32.w,Vv32.w):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vround_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vrsad(Vuu32.ub,Rt32.ub,#u1)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrsad_WubRubI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vrsad_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.uw+=vrsad(Vuu32.ub,Rt32.ub,#u1)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrsadacc_WuwWubRubI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vrsadacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vsat(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vsat_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vsat_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vsat(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vsat_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vsat_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vsxt(Vu32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsxt_Vb(HVX_Vector Vu)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vsxt_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vsxt(Vu32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsxt_Vh(HVX_Vector Vu)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vsxt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vshuffe(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuffe_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vshuffe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vshuff(Vu32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuff_Vb(HVX_Vector Vu)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vshuff_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vshuffe(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuffe_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vshuffe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vshuff(Vu32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuff_Vh(HVX_Vector Vu)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vshuff_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vshuffo(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuffo_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vshuffo_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32=vshuff(Vu32,Vv32,Rt8)
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_vshuff_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_W_vshuff_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.b=vshuffoe(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vshuffoe_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wb_vshuffoe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vshuffoe(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vshuffoe_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vshuffoe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vshuffo(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuffo_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vshuffo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vsub(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vsub_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vsub_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.b=vsub(Vuu32.b,Vvv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vsub_WbWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wb_vsub_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) Vx32.b-=Vu32.b
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_condnac_QnVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_condnac_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) Vx32.b-=Vu32.b
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_condnac_QVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_condnac_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vsub(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vsub_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vsub(Vuu32.h,Vvv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_WhWh(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vsub_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) Vx32.h-=Vu32.h
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_condnac_QnVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_condnac_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) Vx32.h-=Vu32.h
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_condnac_QVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_condnac_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vsub(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vsub_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vsub_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vsub(Vuu32.h,Vvv32.h):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_WhWh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vsub_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vsub(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vsub(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vsub_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vsub(Vu32.ub,Vv32.ub):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vsub_VubVub_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vsub_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.ub=vsub(Vuu32.ub,Vvv32.ub):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wub_vsub_WubWub_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wub_vsub_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vsub(Vu32.uh,Vv32.uh):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vsub_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vsub_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uh=vsub(Vuu32.uh,Vvv32.uh):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vsub_WuhWuh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuh_vsub_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vsub(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vsub_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vsub(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vsub_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vsub(Vuu32.w,Vvv32.w)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_WwWw(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vsub_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) Vx32.w-=Vu32.w
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_condnac_QnVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_condnac_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) Vx32.w-=Vu32.w
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_condnac_QVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_condnac_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vsub(Vu32.w,Vv32.w):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vsub_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vsub(Vuu32.w,Vvv32.w):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_WwWw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vsub_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32=vswap(Qt4,Vu32,Vv32)
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_vswap_QVV(HVX_VectorPred Qt, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_W_vswap_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vtmpy(Vuu32.b,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpy_WbRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vtmpy_WbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vtmpy(Vuu32.b,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpyacc_WhWbRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vtmpyacc_WhWbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vtmpy(Vuu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpy_WubRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vtmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vtmpy(Vuu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpyacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vtmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vtmpy(Vuu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vtmpy_WhRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vtmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vtmpy(Vuu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vtmpyacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vtmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vunpack(Vu32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vunpack_Vb(HVX_Vector Vu)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vunpack_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vunpack(Vu32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vunpack_Vh(HVX_Vector Vu)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vunpack_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h|=vunpacko(Vu32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vunpackoor_WhVb(HVX_VectorPair Vxx, HVX_Vector Vu)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vunpackoor_WhVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w|=vunpacko(Vu32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vunpackoor_WwVh(HVX_VectorPair Vxx, HVX_Vector Vu)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vunpackoor_WwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uh=vunpack(Vu32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vunpack_Vub(HVX_Vector Vu)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuh_vunpack_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vunpack(Vu32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vunpack_Vuh(HVX_Vector Vu)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuw_vunpack_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vxor(Vu32,Vv32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vxor_VV(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vxor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uh=vzxt(Vu32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vzxt_Vub(HVX_Vector Vu)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuh_vzxt_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vzxt(Vu32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vzxt_Vuh(HVX_Vector Vu)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuw_vzxt_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vsplat(Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vsplat_R(Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vb_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vsplat(Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vsplat_R(Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vsetq2(Rt32)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vsetq2_R(Word32 Rt)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vsetq2_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Qd4.b=vshuffe(Qs4.h,Qt4.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Qb_vshuffe_QhQh(HVX_VectorPred Qs, HVX_VectorPred Qt)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Qb_vshuffe_QhQh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Qd4.h=vshuffe(Qs4.w,Qt4.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Qh_vshuffe_QwQw(HVX_VectorPred Qs, HVX_VectorPred Qt)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Qh_vshuffe_QwQw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vadd(Vu32.b,Vv32.b):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vadd_VbVb_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vadd_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.b=vadd(Vuu32.b,Vvv32.b):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vadd_WbWb_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wb_vadd_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vadd(Vu32.w,Vv32.w,Qx4):carry
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVwQ_carry(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred* Qx)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vadd_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vadd(vclb(Vu32.h),Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_vclb_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vadd_vclb_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vadd(vclb(Vu32.w),Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_vclb_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vadd_vclb_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vadd(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vaddacc_WwVhVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vaddacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vadd(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vaddacc_WhVubVub(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vaddacc_WhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vadd(Vu32.ub,Vv32.b):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vadd_VubVb_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vadd_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vadd(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vaddacc_WwVuhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vaddacc_WwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vadd(Vu32.uw,Vv32.uw):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vadd_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuw_vadd_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vadd(Vuu32.uw,Vvv32.uw):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vadd_WuwWuw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuw_vadd_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vand(!Qu4,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vand_QnR(HVX_VectorPred Qu, Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_V_vand_QnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vx32|=vand(!Qu4,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vandor_VQnR(HVX_Vector Vx, HVX_VectorPred Qu, Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_V_vandor_VQnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vand(!Qv4,Vu32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vand_QnV(HVX_VectorPred Qv, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vand_QnV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vand(Qv4,Vu32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vand_QV(HVX_VectorPred Qv, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vand_QV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vasr_VhVhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VuwVuwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vasr_VuwVuwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VwVwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vlsr(Vu32.ub,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vlsr_VubR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vlsr_VubR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbR_nomatch(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vlut32_VbVbR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32or_VbVbVbI(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vlut32or_VbVbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vlut32(Vu32.b,Vv32.b,#u3)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vlut32_VbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhR_nomatch(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vlut16_VbVhR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16or_WhVbVhI(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vlut16or_WhVbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vlut16_VbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vmax(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vmax_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vmax_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vmin(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vmin_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vmin_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vmpa(Vuu32.uh,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpa_WuhRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpa_WuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vmpa(Vuu32.uh,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpaacc_WwWuhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpaacc_WwWuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32=vmpye(Vu32.w,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_vmpye_VwVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_W_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyi(Vu32.w,Rt32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRub(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyi_VwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vmpyi(Vu32.w,Rt32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRub(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyiacc_VwVwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vxx32+=vmpyo(Vu32.w,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_vmpyoacc_WVwVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_W_vmpyoacc_WVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vround(Vu32.uh,Vv32.uh):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vround_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vround_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vround(Vu32.uw,Vv32.uw):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vround_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vround_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vsat(Vu32.uw,Vv32.uw)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vsat_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vsat_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vsub(Vu32.b,Vv32.b):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vsub_VbVb_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vsub_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.b=vsub(Vuu32.b,Vvv32.b):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vsub_WbWb_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wb_vsub_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vsub(Vu32.w,Vv32.w,Qx4):carry
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVwQ_carry(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred* Qx)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vsub_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vsub(Vu32.ub,Vv32.b):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vsub_VubVb_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vsub_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vsub(Vu32.uw,Vv32.uw):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vsub_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuw_vsub_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vsub(Vuu32.uw,Vvv32.uw):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vsub_WuwWuw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuw_vsub_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vabs(Vu32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vabs_Vb(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vabs_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vabs(Vu32.b):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vabs_Vb_sat(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vabs_Vb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h+=vasl(Vu32.h,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vaslacc_VhVhR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vaslacc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h+=vasr(Vu32.h,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasracc_VhVhR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasracc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VuhVuhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vasr_VuhVuhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VuhVuhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vasr_VuhVuhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VuwVuwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vasr_VuwVuwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vavg(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vavg_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vavg(Vu32.b,Vv32.b):rnd
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vavg_VbVb_rnd(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vavg_VbVb_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vavg(Vu32.uw,Vv32.uw)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vavg_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuw_vavg_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vavg(Vu32.uw,Vv32.uw):rnd
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vavg_VuwVuw_rnd(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuw_vavg_VuwVuw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vdd32=#0
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_vzero()
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_W_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vtmp.h=vgather(Rt32,Mu2,Vv32.h).h
-   C Intrinsic Prototype: void Q6_vgather_ARMVh(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_Vector Vv)
-   Instruction Type:      CVI_GATHER
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_vgather_ARMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h
-   C Intrinsic Prototype: void Q6_vgather_AQRMVh(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv)
-   Instruction Type:      CVI_GATHER
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_vgather_AQRMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h
-   C Intrinsic Prototype: void Q6_vgather_ARMWw(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_GATHER_DV
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_vgather_ARMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h
-   C Intrinsic Prototype: void Q6_vgather_AQRMWw(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_GATHER_DV
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_vgather_AQRMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vtmp.w=vgather(Rt32,Mu2,Vv32.w).w
-   C Intrinsic Prototype: void Q6_vgather_ARMVw(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_Vector Vv)
-   Instruction Type:      CVI_GATHER
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_vgather_ARMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w
-   C Intrinsic Prototype: void Q6_vgather_AQRMVw(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv)
-   Instruction Type:      CVI_GATHER
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_vgather_AQRMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vlut4(Vu32.uh,Rtt32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vlut4_VuhPh(HVX_Vector Vu, Word64 Rtt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT2
-   ========================================================================== */
-
-#define Q6_Vh_vlut4_VuhPh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vmpa(Vuu32.ub,Rt32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubRub(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpa_WubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpaacc_WhWubRub(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpaacc_WhWubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h=vmpa(Vx32.h,Vu32.h,Rtt32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpa_VhVhVhPh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT2
-   ========================================================================== */
-
-#define Q6_Vh_vmpa_VhVhVhPh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h=vmpa(Vx32.h,Vu32.uh,Rtt32.uh):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpa_VhVhVuhPuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT2
-   ========================================================================== */
-
-#define Q6_Vh_vmpa_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h=vmps(Vx32.h,Vu32.uh,Rtt32.uh):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmps_VhVhVuhPuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT2
-   ========================================================================== */
-
-#define Q6_Vh_vmps_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vmpy(Vu32.h,Rt32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhRh(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpyacc_WwVhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vmpye(Vu32.uh,Rt32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vmpye_VuhRuh(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuw_vmpye_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vx32.uw+=vmpye(Vu32.uh,Rt32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vmpyeacc_VuwVuhRuh(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuw_vmpyeacc_VuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vnavg(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vnavg_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vnavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=prefixsum(Qv4)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_prefixsum_Q(HVX_VectorPred Qv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=prefixsum(Qv4)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_prefixsum_Q(HVX_VectorPred Qv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=prefixsum(Qv4)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_prefixsum_Q(HVX_VectorPred Qv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vscatter(Rt32,Mu2,Vv32.h).h=Vw32
-   C Intrinsic Prototype: void Q6_vscatter_RMVhV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatter_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vscatter(Rt32,Mu2,Vv32.h).h+=Vw32
-   C Intrinsic Prototype: void Q6_vscatteracc_RMVhV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatteracc_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32
-   C Intrinsic Prototype: void Q6_vscatter_QRMVhV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatter_QRMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vscatter(Rt32,Mu2,Vvv32.w).h=Vw32
-   C Intrinsic Prototype: void Q6_vscatter_RMWwV(Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER_DV
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatter_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32
-   C Intrinsic Prototype: void Q6_vscatteracc_RMWwV(Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER_DV
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatteracc_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32
-   C Intrinsic Prototype: void Q6_vscatter_QRMWwV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER_DV
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatter_QRMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vscatter(Rt32,Mu2,Vv32.w).w=Vw32
-   C Intrinsic Prototype: void Q6_vscatter_RMVwV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatter_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vscatter(Rt32,Mu2,Vv32.w).w+=Vw32
-   C Intrinsic Prototype: void Q6_vscatteracc_RMVwV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatteracc_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32
-   C Intrinsic Prototype: void Q6_vscatter_QRMVwV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatter_QRMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVwQ_carry_sat(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred Qs)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vadd_VwVwQ_carry_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HVX_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w=vasrinto(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vasrinto_WwVwVw(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vasrinto_WwVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HVX_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vrotr(Vu32.uw,Vv32.uw)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrotr_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuw_vrotr_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HVX_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vsatdw(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vsatdw_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vsatdw_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HVX_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):h
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpy_WubWbI_h(HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_v6mpy_WubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HVX_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):h
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpyacc_WwWubWbI_h(HVX_VectorPair Vxx, HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_v6mpyacc_WwWubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HVX_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):v
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpy_WubWbI_v(HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_v6mpy_WubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HVX_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):v
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpyacc_WwWubWbI_v(HVX_VectorPair Vxx, HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_v6mpyacc_WwWubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#endif /* __HVX__ */
-
-#endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/immintrin.h b/linux-x86/lib64/clang/14.0.2/include/immintrin.h
deleted file mode 100644
index e5174f8..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/immintrin.h
+++ /dev/null
@@ -1,618 +0,0 @@
-/*===---- immintrin.h - Intel intrinsics -----------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#define __IMMINTRIN_H
-
-#if !defined(__i386__) && !defined(__x86_64__)
-#error "This header is only meant to be used on x86 and x64 architecture"
-#endif
-
-#include <x86gprintrin.h>
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__MMX__)
-#include <mmintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SSE__)
-#include <xmmintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SSE2__)
-#include <emmintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SSE3__)
-#include <pmmintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SSSE3__)
-#include <tmmintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__SSE4_2__) || defined(__SSE4_1__))
-#include <smmintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AES__) || defined(__PCLMUL__))
-#include <wmmintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__CLFLUSHOPT__)
-#include <clflushoptintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__CLWB__)
-#include <clwbintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX__)
-#include <avxintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX2__)
-#include <avx2intrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__F16C__)
-#include <f16cintrin.h>
-#endif
-
-/* No feature check desired due to internal checks */
-#include <bmiintrin.h>
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__BMI2__)
-#include <bmi2intrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__LZCNT__)
-#include <lzcntintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__POPCNT__)
-#include <popcntintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__FMA__)
-#include <fmaintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512F__)
-#include <avx512fintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512VL__)
-#include <avx512vlintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512BW__)
-#include <avx512bwintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512BITALG__)
-#include <avx512bitalgintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512CD__)
-#include <avx512cdintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512VPOPCNTDQ__)
-#include <avx512vpopcntdqintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__))
-#include <avx512vpopcntdqvlintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512VNNI__)
-#include <avx512vnniintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512VNNI__))
-#include <avx512vlvnniintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVXVNNI__)
-#include <avxvnniintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512DQ__)
-#include <avx512dqintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512BITALG__))
-#include <avx512vlbitalgintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512BW__))
-#include <avx512vlbwintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512CD__))
-#include <avx512vlcdintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512DQ__))
-#include <avx512vldqintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512ER__)
-#include <avx512erintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512IFMA__)
-#include <avx512ifmaintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512IFMA__) && defined(__AVX512VL__))
-#include <avx512ifmavlintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512VBMI__)
-#include <avx512vbmiintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VBMI__) && defined(__AVX512VL__))
-#include <avx512vbmivlintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512VBMI2__)
-#include <avx512vbmi2intrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VBMI2__) && defined(__AVX512VL__))
-#include <avx512vlvbmi2intrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512PF__)
-#include <avx512pfintrin.h>
-#endif
-
-/*
- * FIXME: _Float16 type is legal only when HW support float16 operation.
- * We use __AVX512FP16__ to identify if float16 is supported or not, so
- * when float16 is not supported, the related header is not included.
- *
- */
-#if defined(__AVX512FP16__)
-#include <avx512fp16intrin.h>
-#endif
-
-#if defined(__AVX512FP16__) && defined(__AVX512VL__)
-#include <avx512vlfp16intrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512BF16__)
-#include <avx512bf16intrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512BF16__))
-#include <avx512vlbf16intrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__PKU__)
-#include <pkuintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__VPCLMULQDQ__)
-#include <vpclmulqdqintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__VAES__)
-#include <vaesintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__GFNI__)
-#include <gfniintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__RDPID__)
-/// Returns the value of the IA32_TSC_AUX MSR (0xc0000103).
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> RDPID </c> instruction.
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("rdpid")))
-_rdpid_u32(void) {
-  return __builtin_ia32_rdpid();
-}
-#endif // __RDPID__
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__RDRND__)
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
-_rdrand16_step(unsigned short *__p)
-{
-  return __builtin_ia32_rdrand16_step(__p);
-}
-
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
-_rdrand32_step(unsigned int *__p)
-{
-  return __builtin_ia32_rdrand32_step(__p);
-}
-
-#ifdef __x86_64__
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
-_rdrand64_step(unsigned long long *__p)
-{
-  return __builtin_ia32_rdrand64_step(__p);
-}
-#endif
-#endif /* __RDRND__ */
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__FSGSBASE__)
-#ifdef __x86_64__
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readfsbase_u32(void)
-{
-  return __builtin_ia32_rdfsbase32();
-}
-
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readfsbase_u64(void)
-{
-  return __builtin_ia32_rdfsbase64();
-}
-
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readgsbase_u32(void)
-{
-  return __builtin_ia32_rdgsbase32();
-}
-
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readgsbase_u64(void)
-{
-  return __builtin_ia32_rdgsbase64();
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writefsbase_u32(unsigned int __V)
-{
-  __builtin_ia32_wrfsbase32(__V);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writefsbase_u64(unsigned long long __V)
-{
-  __builtin_ia32_wrfsbase64(__V);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writegsbase_u32(unsigned int __V)
-{
-  __builtin_ia32_wrgsbase32(__V);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writegsbase_u64(unsigned long long __V)
-{
-  __builtin_ia32_wrgsbase64(__V);
-}
-
-#endif
-#endif /* __FSGSBASE__ */
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__MOVBE__)
-
-/* The structs used below are to force the load/store to be unaligned. This
- * is accomplished with the __packed__ attribute. The __may_alias__ prevents
- * tbaa metadata from being generated based on the struct and the type of the
- * field inside of it.
- */
-
-static __inline__ short __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_loadbe_i16(void const * __P) {
-  struct __loadu_i16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap16(((const struct __loadu_i16*)__P)->__v);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_storebe_i16(void * __P, short __D) {
-  struct __storeu_i16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i16*)__P)->__v = __builtin_bswap16(__D);
-}
-
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_loadbe_i32(void const * __P) {
-  struct __loadu_i32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap32(((const struct __loadu_i32*)__P)->__v);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_storebe_i32(void * __P, int __D) {
-  struct __storeu_i32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i32*)__P)->__v = __builtin_bswap32(__D);
-}
-
-#ifdef __x86_64__
-static __inline__ long long __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_loadbe_i64(void const * __P) {
-  struct __loadu_i64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap64(((const struct __loadu_i64*)__P)->__v);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_storebe_i64(void * __P, long long __D) {
-  struct __storeu_i64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i64*)__P)->__v = __builtin_bswap64(__D);
-}
-#endif
-#endif /* __MOVBE */
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__RTM__)
-#include <rtmintrin.h>
-#include <xtestintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SHA__)
-#include <shaintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__FXSR__)
-#include <fxsrintrin.h>
-#endif
-
-/* No feature check desired due to internal MSC_VER checks */
-#include <xsaveintrin.h>
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__XSAVEOPT__)
-#include <xsaveoptintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__XSAVEC__)
-#include <xsavecintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__XSAVES__)
-#include <xsavesintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SHSTK__)
-#include <cetintrin.h>
-#endif
-
-/* Some intrinsics inside adxintrin.h are available only on processors with ADX,
- * whereas others are also available at all times. */
-#include <adxintrin.h>
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__RDSEED__)
-#include <rdseedintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__WBNOINVD__)
-#include <wbnoinvdintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__CLDEMOTE__)
-#include <cldemoteintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__WAITPKG__)
-#include <waitpkgintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__MOVDIRI__) || defined(__MOVDIR64B__)
-#include <movdirintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__PCONFIG__)
-#include <pconfigintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SGX__)
-#include <sgxintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__PTWRITE__)
-#include <ptwriteintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__INVPCID__)
-#include <invpcidintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__KL__) || defined(__WIDEKL__)
-#include <keylockerintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AMXTILE__) || defined(__AMXINT8__) || defined(__AMXBF16__)
-#include <amxintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512VP2INTERSECT__)
-#include <avx512vp2intersectintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
-#include <avx512vlvp2intersectintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__ENQCMD__)
-#include <enqcmdintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SERIALIZE__)
-#include <serializeintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__TSXLDTRK__)
-#include <tsxldtrkintrin.h>
-#endif
-
-#if defined(_MSC_VER) && __has_extension(gnu_asm)
-/* Define the default attributes for these intrinsics */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-#ifdef __cplusplus
-extern "C" {
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange HLE
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchange_HLEAcquire(long volatile *_Target, long _Value) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; xchg {%0, %1|%1, %0}"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchange_HLERelease(long volatile *_Target, long _Value) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; xchg {%0, %1|%1, %0}"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-#endif
-#if defined(__x86_64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64_HLEAcquire(__int64 volatile *_Target, __int64 _Value) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; xchg {%0, %1|%1, %0}"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64_HLERelease(__int64 volatile *_Target, __int64 _Value) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; xchg {%0, %1|%1, %0}"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Compare Exchange HLE
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_HLEAcquire(long volatile *_Destination,
-                              long _Exchange, long _Comparand) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg {%2, %1|%1, %2}"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_HLERelease(long volatile *_Destination,
-                              long _Exchange, long _Comparand) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg {%2, %1|%1, %2}"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-#endif
-#if defined(__x86_64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_HLEAcquire(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg {%2, %1|%1, %2}"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_HLERelease(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg {%2, %1|%1, %2}"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-#endif
-#ifdef __cplusplus
-}
-#endif
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* defined(_MSC_VER) && __has_extension(gnu_asm) */
-
-#endif /* __IMMINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/intrin.h b/linux-x86/lib64/clang/14.0.2/include/intrin.h
deleted file mode 100644
index 02e66d0..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/intrin.h
+++ /dev/null
@@ -1,629 +0,0 @@
-/* ===-------- intrin.h ---------------------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* Only include this if we're compiling for the windows platform. */
-#ifndef _MSC_VER
-#include_next <intrin.h>
-#else
-
-#ifndef __INTRIN_H
-#define __INTRIN_H
-
-/* First include the standard intrinsics. */
-#if defined(__i386__) || defined(__x86_64__)
-#include <x86intrin.h>
-#endif
-
-#if defined(__arm__)
-#include <armintr.h>
-#endif
-
-#if defined(__aarch64__)
-#include <arm64intr.h>
-#endif
-
-/* For the definition of jmp_buf. */
-#if __STDC_HOSTED__
-#include <setjmp.h>
-#endif
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-
-#if __x86_64__
-#define __LPTRINT_TYPE__ __int64
-#else
-#define __LPTRINT_TYPE__ long
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if defined(__MMX__)
-/* And the random ones that aren't in those files. */
-__m64 _m_from_float(float);
-float _m_to_float(__m64);
-#endif
-
-/* Other assorted instruction intrinsics. */
-void __addfsbyte(unsigned long, unsigned char);
-void __addfsdword(unsigned long, unsigned long);
-void __addfsword(unsigned long, unsigned short);
-void __code_seg(const char *);
-void __cpuid(int[4], int);
-void __cpuidex(int[4], int, int);
-__int64 __emul(int, int);
-unsigned __int64 __emulu(unsigned int, unsigned int);
-unsigned int __getcallerseflags(void);
-void __halt(void);
-unsigned char __inbyte(unsigned short);
-void __inbytestring(unsigned short, unsigned char *, unsigned long);
-void __incfsbyte(unsigned long);
-void __incfsdword(unsigned long);
-void __incfsword(unsigned long);
-unsigned long __indword(unsigned short);
-void __indwordstring(unsigned short, unsigned long *, unsigned long);
-void __int2c(void);
-void __invlpg(void *);
-unsigned short __inword(unsigned short);
-void __inwordstring(unsigned short, unsigned short *, unsigned long);
-void __lidt(void *);
-unsigned __int64 __ll_lshift(unsigned __int64, int);
-__int64 __ll_rshift(__int64, int);
-void __movsb(unsigned char *, unsigned char const *, size_t);
-void __movsd(unsigned long *, unsigned long const *, size_t);
-void __movsw(unsigned short *, unsigned short const *, size_t);
-void __nop(void);
-void __nvreg_restore_fence(void);
-void __nvreg_save_fence(void);
-void __outbyte(unsigned short, unsigned char);
-void __outbytestring(unsigned short, unsigned char *, unsigned long);
-void __outdword(unsigned short, unsigned long);
-void __outdwordstring(unsigned short, unsigned long *, unsigned long);
-void __outword(unsigned short, unsigned short);
-void __outwordstring(unsigned short, unsigned short *, unsigned long);
-unsigned long __readcr0(void);
-unsigned long __readcr2(void);
-unsigned __LPTRINT_TYPE__ __readcr3(void);
-unsigned long __readcr4(void);
-unsigned long __readcr8(void);
-unsigned int __readdr(unsigned int);
-#ifdef __i386__
-unsigned char __readfsbyte(unsigned long);
-unsigned short __readfsword(unsigned long);
-unsigned long __readfsdword(unsigned long);
-unsigned __int64 __readfsqword(unsigned long);
-#endif
-unsigned __int64 __readmsr(unsigned long);
-unsigned __int64 __readpmc(unsigned long);
-unsigned long __segmentlimit(unsigned long);
-void __sidt(void *);
-void __stosb(unsigned char *, unsigned char, size_t);
-void __stosd(unsigned long *, unsigned long, size_t);
-void __stosw(unsigned short *, unsigned short, size_t);
-void __svm_clgi(void);
-void __svm_invlpga(void *, int);
-void __svm_skinit(int);
-void __svm_stgi(void);
-void __svm_vmload(size_t);
-void __svm_vmrun(size_t);
-void __svm_vmsave(size_t);
-void __ud2(void);
-unsigned __int64 __ull_rshift(unsigned __int64, int);
-void __vmx_off(void);
-void __vmx_vmptrst(unsigned __int64 *);
-void __wbinvd(void);
-void __writecr0(unsigned int);
-void __writecr3(unsigned __INTPTR_TYPE__);
-void __writecr4(unsigned int);
-void __writecr8(unsigned int);
-void __writedr(unsigned int, unsigned int);
-void __writefsbyte(unsigned long, unsigned char);
-void __writefsdword(unsigned long, unsigned long);
-void __writefsqword(unsigned long, unsigned __int64);
-void __writefsword(unsigned long, unsigned short);
-void __writemsr(unsigned long, unsigned __int64);
-void *_AddressOfReturnAddress(void);
-unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
-unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
-unsigned char _bittest(long const *, long);
-unsigned char _bittestandcomplement(long *, long);
-unsigned char _bittestandreset(long *, long);
-unsigned char _bittestandset(long *, long);
-void __cdecl _disable(void);
-void __cdecl _enable(void);
-long _InterlockedAddLargeStatistic(__int64 volatile *_Addend, long _Value);
-unsigned char _interlockedbittestandreset(long volatile *, long);
-unsigned char _interlockedbittestandset(long volatile *, long);
-void *_InterlockedCompareExchangePointer_HLEAcquire(void *volatile *, void *,
-                                                    void *);
-void *_InterlockedCompareExchangePointer_HLERelease(void *volatile *, void *,
-                                                    void *);
-long _InterlockedExchangeAdd_HLEAcquire(long volatile *, long);
-long _InterlockedExchangeAdd_HLERelease(long volatile *, long);
-__int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64);
-__int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64);
-void _ReadBarrier(void);
-void _ReadWriteBarrier(void);
-unsigned int _rorx_u32(unsigned int, const unsigned int);
-int _sarx_i32(int, unsigned int);
-#if __STDC_HOSTED__
-int __cdecl _setjmp(jmp_buf);
-#endif
-unsigned int _shlx_u32(unsigned int, unsigned int);
-unsigned int _shrx_u32(unsigned int, unsigned int);
-void _Store_HLERelease(long volatile *, long);
-void _Store64_HLERelease(__int64 volatile *, __int64);
-void _StorePointer_HLERelease(void *volatile *, void *);
-void _WriteBarrier(void);
-unsigned __int32 xbegin(void);
-void _xend(void);
-
-/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
-#ifdef __x86_64__
-void __addgsbyte(unsigned long, unsigned char);
-void __addgsdword(unsigned long, unsigned long);
-void __addgsqword(unsigned long, unsigned __int64);
-void __addgsword(unsigned long, unsigned short);
-void __faststorefence(void);
-void __incgsbyte(unsigned long);
-void __incgsdword(unsigned long);
-void __incgsqword(unsigned long);
-void __incgsword(unsigned long);
-void __movsq(unsigned long long *, unsigned long long const *, size_t);
-unsigned char __readgsbyte(unsigned long);
-unsigned long __readgsdword(unsigned long);
-unsigned __int64 __readgsqword(unsigned long);
-unsigned short __readgsword(unsigned long);
-unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,
-                                unsigned __int64 _HighPart,
-                                unsigned char _Shift);
-unsigned __int64 __shiftright128(unsigned __int64 _LowPart,
-                                 unsigned __int64 _HighPart,
-                                 unsigned char _Shift);
-void __stosq(unsigned __int64 *, unsigned __int64, size_t);
-unsigned char __vmx_on(unsigned __int64 *);
-unsigned char __vmx_vmclear(unsigned __int64 *);
-unsigned char __vmx_vmlaunch(void);
-unsigned char __vmx_vmptrld(unsigned __int64 *);
-unsigned char __vmx_vmread(size_t, size_t *);
-unsigned char __vmx_vmresume(void);
-unsigned char __vmx_vmwrite(size_t, size_t);
-void __writegsbyte(unsigned long, unsigned char);
-void __writegsdword(unsigned long, unsigned long);
-void __writegsqword(unsigned long, unsigned __int64);
-void __writegsword(unsigned long, unsigned short);
-unsigned char _bittest64(__int64 const *, __int64);
-unsigned char _bittestandcomplement64(__int64 *, __int64);
-unsigned char _bittestandreset64(__int64 *, __int64);
-unsigned char _bittestandset64(__int64 *, __int64);
-long _InterlockedAnd_np(long volatile *_Value, long _Mask);
-short _InterlockedAnd16_np(short volatile *_Value, short _Mask);
-__int64 _InterlockedAnd64_np(__int64 volatile *_Value, __int64 _Mask);
-char _InterlockedAnd8_np(char volatile *_Value, char _Mask);
-unsigned char _interlockedbittestandreset64(__int64 volatile *, __int64);
-unsigned char _interlockedbittestandset64(__int64 volatile *, __int64);
-long _InterlockedCompareExchange_np(long volatile *_Destination, long _Exchange,
-                                    long _Comparand);
-unsigned char _InterlockedCompareExchange128_np(__int64 volatile *_Destination,
-                                                __int64 _ExchangeHigh,
-                                                __int64 _ExchangeLow,
-                                                __int64 *_ComparandResult);
-short _InterlockedCompareExchange16_np(short volatile *_Destination,
-                                       short _Exchange, short _Comparand);
-__int64 _InterlockedCompareExchange64_np(__int64 volatile *_Destination,
-                                         __int64 _Exchange, __int64 _Comparand);
-void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,
-                                            void *_Exchange, void *_Comparand);
-long _InterlockedOr_np(long volatile *_Value, long _Mask);
-short _InterlockedOr16_np(short volatile *_Value, short _Mask);
-__int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask);
-char _InterlockedOr8_np(char volatile *_Value, char _Mask);
-long _InterlockedXor_np(long volatile *_Value, long _Mask);
-short _InterlockedXor16_np(short volatile *_Value, short _Mask);
-__int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask);
-char _InterlockedXor8_np(char volatile *_Value, char _Mask);
-unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
-__int64 _sarx_i64(__int64, unsigned int);
-unsigned __int64 _shlx_u64(unsigned __int64, unsigned int);
-unsigned __int64 _shrx_u64(unsigned __int64, unsigned int);
-__int64 __mulh(__int64, __int64);
-unsigned __int64 __umulh(unsigned __int64, unsigned __int64);
-__int64 _mul128(__int64, __int64, __int64*);
-unsigned __int64 _umul128(unsigned __int64,
-                          unsigned __int64,
-                          unsigned __int64*);
-
-#endif /* __x86_64__ */
-
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-
-unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
-unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
-
-#endif
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
-__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
-__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
-__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value);
-__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
-__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask);
-
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange Add
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value);
-char _InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value);
-char _InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value);
-short _InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value);
-short _InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value);
-short _InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value);
-long _InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value);
-long _InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value);
-long _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value);
-__int64 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value);
-__int64 _InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value);
-__int64 _InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, __int64 _Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Increment
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-short _InterlockedIncrement16_acq(short volatile *_Value);
-short _InterlockedIncrement16_nf(short volatile *_Value);
-short _InterlockedIncrement16_rel(short volatile *_Value);
-long _InterlockedIncrement_acq(long volatile *_Value);
-long _InterlockedIncrement_nf(long volatile *_Value);
-long _InterlockedIncrement_rel(long volatile *_Value);
-__int64 _InterlockedIncrement64_acq(__int64 volatile *_Value);
-__int64 _InterlockedIncrement64_nf(__int64 volatile *_Value);
-__int64 _InterlockedIncrement64_rel(__int64 volatile *_Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Decrement
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-short _InterlockedDecrement16_acq(short volatile *_Value);
-short _InterlockedDecrement16_nf(short volatile *_Value);
-short _InterlockedDecrement16_rel(short volatile *_Value);
-long _InterlockedDecrement_acq(long volatile *_Value);
-long _InterlockedDecrement_nf(long volatile *_Value);
-long _InterlockedDecrement_rel(long volatile *_Value);
-__int64 _InterlockedDecrement64_acq(__int64 volatile *_Value);
-__int64 _InterlockedDecrement64_nf(__int64 volatile *_Value);
-__int64 _InterlockedDecrement64_rel(__int64 volatile *_Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked And
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedAnd8_acq(char volatile *_Value, char _Mask);
-char _InterlockedAnd8_nf(char volatile *_Value, char _Mask);
-char _InterlockedAnd8_rel(char volatile *_Value, char _Mask);
-short _InterlockedAnd16_acq(short volatile *_Value, short _Mask);
-short _InterlockedAnd16_nf(short volatile *_Value, short _Mask);
-short _InterlockedAnd16_rel(short volatile *_Value, short _Mask);
-long _InterlockedAnd_acq(long volatile *_Value, long _Mask);
-long _InterlockedAnd_nf(long volatile *_Value, long _Mask);
-long _InterlockedAnd_rel(long volatile *_Value, long _Mask);
-__int64 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Bit Counting and Testing
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-unsigned char _interlockedbittestandset_acq(long volatile *_BitBase,
-                                            long _BitPos);
-unsigned char _interlockedbittestandset_nf(long volatile *_BitBase,
-                                           long _BitPos);
-unsigned char _interlockedbittestandset_rel(long volatile *_BitBase,
-                                            long _BitPos);
-unsigned char _interlockedbittestandreset_acq(long volatile *_BitBase,
-                                              long _BitPos);
-unsigned char _interlockedbittestandreset_nf(long volatile *_BitBase,
-                                             long _BitPos);
-unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase,
-                                              long _BitPos);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Or
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedOr8_acq(char volatile *_Value, char _Mask);
-char _InterlockedOr8_nf(char volatile *_Value, char _Mask);
-char _InterlockedOr8_rel(char volatile *_Value, char _Mask);
-short _InterlockedOr16_acq(short volatile *_Value, short _Mask);
-short _InterlockedOr16_nf(short volatile *_Value, short _Mask);
-short _InterlockedOr16_rel(short volatile *_Value, short _Mask);
-long _InterlockedOr_acq(long volatile *_Value, long _Mask);
-long _InterlockedOr_nf(long volatile *_Value, long _Mask);
-long _InterlockedOr_rel(long volatile *_Value, long _Mask);
-__int64 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Xor
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedXor8_acq(char volatile *_Value, char _Mask);
-char _InterlockedXor8_nf(char volatile *_Value, char _Mask);
-char _InterlockedXor8_rel(char volatile *_Value, char _Mask);
-short _InterlockedXor16_acq(short volatile *_Value, short _Mask);
-short _InterlockedXor16_nf(short volatile *_Value, short _Mask);
-short _InterlockedXor16_rel(short volatile *_Value, short _Mask);
-long _InterlockedXor_acq(long volatile *_Value, long _Mask);
-long _InterlockedXor_nf(long volatile *_Value, long _Mask);
-long _InterlockedXor_rel(long volatile *_Value, long _Mask);
-__int64 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedExchange8_acq(char volatile *_Target, char _Value);
-char _InterlockedExchange8_nf(char volatile *_Target, char _Value);
-char _InterlockedExchange8_rel(char volatile *_Target, char _Value);
-short _InterlockedExchange16_acq(short volatile *_Target, short _Value);
-short _InterlockedExchange16_nf(short volatile *_Target, short _Value);
-short _InterlockedExchange16_rel(short volatile *_Target, short _Value);
-long _InterlockedExchange_acq(long volatile *_Target, long _Value);
-long _InterlockedExchange_nf(long volatile *_Target, long _Value);
-long _InterlockedExchange_rel(long volatile *_Target, long _Value);
-__int64 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value);
-__int64 _InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value);
-__int64 _InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Compare Exchange
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedCompareExchange8_acq(char volatile *_Destination,
-                             char _Exchange, char _Comparand);
-char _InterlockedCompareExchange8_nf(char volatile *_Destination,
-                             char _Exchange, char _Comparand);
-char _InterlockedCompareExchange8_rel(char volatile *_Destination,
-                             char _Exchange, char _Comparand);
-short _InterlockedCompareExchange16_acq(short volatile *_Destination,
-                              short _Exchange, short _Comparand);
-short _InterlockedCompareExchange16_nf(short volatile *_Destination,
-                              short _Exchange, short _Comparand);
-short _InterlockedCompareExchange16_rel(short volatile *_Destination,
-                              short _Exchange, short _Comparand);
-long _InterlockedCompareExchange_acq(long volatile *_Destination,
-                              long _Exchange, long _Comparand);
-long _InterlockedCompareExchange_nf(long volatile *_Destination,
-                              long _Exchange, long _Comparand);
-long _InterlockedCompareExchange_rel(long volatile *_Destination,
-                              long _Exchange, long _Comparand);
-__int64 _InterlockedCompareExchange64_acq(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand);
-__int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand);
-__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand);
-#endif
-#if defined(__x86_64__) || defined(__aarch64__)
-unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination,
-                                             __int64 _ExchangeHigh,
-                                             __int64 _ExchangeLow,
-                                             __int64 *_ComparandResult);
-#endif
-#if defined(__aarch64__)
-unsigned char _InterlockedCompareExchange128_acq(__int64 volatile *_Destination,
-                                                 __int64 _ExchangeHigh,
-                                                 __int64 _ExchangeLow,
-                                                 __int64 *_ComparandResult);
-unsigned char _InterlockedCompareExchange128_nf(__int64 volatile *_Destination,
-                                                __int64 _ExchangeHigh,
-                                                __int64 _ExchangeLow,
-                                                __int64 *_ComparandResult);
-unsigned char _InterlockedCompareExchange128_rel(__int64 volatile *_Destination,
-                                                 __int64 _ExchangeHigh,
-                                                 __int64 _ExchangeLow,
-                                                 __int64 *_ComparandResult);
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* movs, stos
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ void __DEFAULT_FN_ATTRS __movsb(unsigned char *__dst,
-                                                  unsigned char const *__src,
-                                                  size_t __n) {
-#if defined(__x86_64__)
-  __asm__ __volatile__("rep movsb"
-                       : "+D"(__dst), "+S"(__src), "+c"(__n)
-                       :
-                       : "memory");
-#else
-  __asm__ __volatile__("xchg {%%esi, %1|%1, esi}\n"
-                       "rep movsb\n"
-                       "xchg {%%esi, %1|%1, esi}"
-                       : "+D"(__dst), "+r"(__src), "+c"(__n)
-                       :
-                       : "memory");
-#endif
-}
-static __inline__ void __DEFAULT_FN_ATTRS __movsd(unsigned long *__dst,
-                                                  unsigned long const *__src,
-                                                  size_t __n) {
-#if defined(__x86_64__)
-  __asm__ __volatile__("rep movs{l|d}"
-                       : "+D"(__dst), "+S"(__src), "+c"(__n)
-                       :
-                       : "memory");
-#else
-  __asm__ __volatile__("xchg {%%esi, %1|%1, esi}\n"
-                       "rep movs{l|d}\n"
-                       "xchg {%%esi, %1|%1, esi}"
-                       : "+D"(__dst), "+r"(__src), "+c"(__n)
-                       :
-                       : "memory");
-#endif
-}
-static __inline__ void __DEFAULT_FN_ATTRS __movsw(unsigned short *__dst,
-                                                  unsigned short const *__src,
-                                                  size_t __n) {
-#if defined(__x86_64__)
-  __asm__ __volatile__("rep movsw"
-                       : "+D"(__dst), "+S"(__src), "+c"(__n)
-                       :
-                       : "memory");
-#else
-  __asm__ __volatile__("xchg {%%esi, %1|%1, esi}\n"
-                       "rep movsw\n"
-                       "xchg {%%esi, %1|%1, esi}"
-                       : "+D"(__dst), "+r"(__src), "+c"(__n)
-                       :
-                       : "memory");
-#endif
-}
-static __inline__ void __DEFAULT_FN_ATTRS __stosd(unsigned long *__dst,
-                                                  unsigned long __x,
-                                                  size_t __n) {
-  __asm__ __volatile__("rep stos{l|d}"
-                       : "+D"(__dst), "+c"(__n)
-                       : "a"(__x)
-                       : "memory");
-}
-static __inline__ void __DEFAULT_FN_ATTRS __stosw(unsigned short *__dst,
-                                                  unsigned short __x,
-                                                  size_t __n) {
-  __asm__ __volatile__("rep stosw"
-                       : "+D"(__dst), "+c"(__n)
-                       : "a"(__x)
-                       : "memory");
-}
-#endif
-#ifdef __x86_64__
-static __inline__ void __DEFAULT_FN_ATTRS __movsq(
-    unsigned long long *__dst, unsigned long long const *__src, size_t __n) {
-  __asm__ __volatile__("rep movsq"
-                       : "+D"(__dst), "+S"(__src), "+c"(__n)
-                       :
-                       : "memory");
-}
-static __inline__ void __DEFAULT_FN_ATTRS __stosq(unsigned __int64 *__dst,
-                                                  unsigned __int64 __x,
-                                                  size_t __n) {
-  __asm__ __volatile__("rep stosq" : "+D"(__dst), "+c"(__n) : "a"(__x)
-                       : "memory");
-}
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* Misc
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-#if defined(__i386__)
-#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx)             \
-  __asm("cpuid"                                                                \
-        : "=a"(__eax), "=b"(__ebx), "=c"(__ecx), "=d"(__edx)                   \
-        : "0"(__leaf), "2"(__count))
-#else
-/* x86-64 uses %rbx as the base register, so preserve it. */
-#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx)             \
-  __asm("xchg{q} {%%rbx, %q1|%q1, rbx}\n"                                      \
-        "cpuid\n"                                                              \
-        "xchg{q} {%%rbx, %q1|%q1, rbx}"                                        \
-        : "=a"(__eax), "=r"(__ebx), "=c"(__ecx), "=d"(__edx)                   \
-        : "0"(__leaf), "2"(__count))
-#endif
-static __inline__ void __DEFAULT_FN_ATTRS __cpuid(int __info[4], int __level) {
-  __cpuid_count(__level, 0, __info[0], __info[1], __info[2], __info[3]);
-}
-static __inline__ void __DEFAULT_FN_ATTRS __cpuidex(int __info[4], int __level,
-                                                    int __ecx) {
-  __cpuid_count(__level, __ecx, __info[0], __info[1], __info[2], __info[3]);
-}
-static __inline__ void __DEFAULT_FN_ATTRS __halt(void) {
-  __asm__ volatile("hlt");
-}
-#endif
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
-static __inline__ void __DEFAULT_FN_ATTRS __nop(void) {
-  __asm__ volatile("nop");
-}
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* MS AArch64 specific
-\*----------------------------------------------------------------------------*/
-#if defined(__aarch64__)
-unsigned __int64 __getReg(int);
-long _InterlockedAdd(long volatile *Addend, long Value);
-__int64 _ReadStatusReg(int);
-void _WriteStatusReg(int, __int64);
-
-unsigned short __cdecl _byteswap_ushort(unsigned short val);
-unsigned long __cdecl _byteswap_ulong (unsigned long val);
-unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64 val);
-
-__int64 __mulh(__int64 __a, __int64 __b);
-unsigned __int64 __umulh(unsigned __int64 __a, unsigned __int64 __b);
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* Privileged intrinsics
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
-__readmsr(unsigned long __register) {
-  // Loads the contents of a 64-bit model specific register (MSR) specified in
-  // the ECX register into registers EDX:EAX. The EDX register is loaded with
-  // the high-order 32 bits of the MSR and the EAX register is loaded with the
-  // low-order 32 bits. If less than 64 bits are implemented in the MSR being
-  // read, the values returned to EDX:EAX in unimplemented bit locations are
-  // undefined.
-  unsigned long __edx;
-  unsigned long __eax;
-  __asm__ ("rdmsr" : "=d"(__edx), "=a"(__eax) : "c"(__register));
-  return (((unsigned __int64)__edx) << 32) | (unsigned __int64)__eax;
-}
-#endif
-
-static __inline__ unsigned __LPTRINT_TYPE__ __DEFAULT_FN_ATTRS __readcr3(void) {
-  unsigned __LPTRINT_TYPE__ __cr3_val;
-  __asm__ __volatile__(
-                       "mov {%%cr3, %0|%0, cr3}"
-                       : "=r"(__cr3_val)
-                       :
-                       : "memory");
-  return __cr3_val;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS
-__writecr3(unsigned __INTPTR_TYPE__ __cr3_val) {
-  __asm__ ("mov {%0, %%cr3|cr3, %0}" : : "r"(__cr3_val) : "memory");
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#undef __LPTRINT_TYPE__
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __INTRIN_H */
-#endif /* _MSC_VER */
diff --git a/linux-x86/lib64/clang/14.0.2/include/keylockerintrin.h b/linux-x86/lib64/clang/14.0.2/include/keylockerintrin.h
deleted file mode 100644
index ad9428e..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/keylockerintrin.h
+++ /dev/null
@@ -1,530 +0,0 @@
-/*===----------------- keylockerintrin.h - KL Intrinsics -------------------===
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#error "Never use <keylockerintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef _KEYLOCKERINTRIN_H
-#define _KEYLOCKERINTRIN_H
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__KL__)
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS \
-  __attribute__((__always_inline__, __nodebug__, __target__("kl"),\
-                 __min_vector_width__(128)))
-
-/// Load internal wrapping key from __intkey, __enkey_lo and __enkey_hi. __ctl
-/// will assigned to EAX, whch specifies the KeySource and whether backing up
-/// the key is permitted. The 256-bit encryption key is loaded from the two
-/// explicit operands (__enkey_lo and __enkey_hi). The 128-bit integrity key is
-/// loaded from the implicit operand XMM0 which assigned by __intkey.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> LOADIWKEY </c> instructions.
-///
-/// \operation
-/// IF CPL > 0 // LOADKWKEY only allowed at ring 0 (supervisor mode)
-///   GP (0)
-/// FI
-/// IF “LOADIWKEY exiting” VM execution control set
-///   VMexit
-/// FI
-/// IF __ctl[4:1] > 1 // Reserved KeySource encoding used
-///   GP (0)
-/// FI
-/// IF __ctl[31:5] != 0 // Reserved bit in __ctl is set
-///   GP (0)
-/// FI
-/// IF __ctl[0] AND (CPUID.19H.ECX[0] == 0) // NoBackup is not supported on this part
-///   GP (0)
-/// FI
-/// IF (__ctl[4:1] == 1) AND (CPUID.19H.ECX[1] == 0) // KeySource of 1 is not supported on this part
-///   GP (0)
-/// FI
-/// IF (__ctl[4:1] == 0) // KeySource of 0.
-///   IWKey.Encryption Key[127:0] := __enkey_hi[127:0]:
-///   IWKey.Encryption Key[255:128] := __enkey_lo[127:0]
-///   IWKey.IntegrityKey[127:0] := __intkey[127:0]
-///   IWKey.NoBackup := __ctl[0]
-///   IWKey.KeySource := __ctl[4:1]
-///   ZF := 0
-/// ELSE // KeySource of 1. See RDSEED definition for details of randomness
-///   IF HW_NRND_GEN.ready == 1 // Full-entropy random data from RDSEED was received
-///     IWKey.Encryption Key[127:0] := __enkey_hi[127:0] XOR HW_NRND_GEN.data[127:0]
-///     IWKey.Encryption Key[255:128] := __enkey_lo[127:0] XOR HW_NRND_GEN.data[255:128]
-///     IWKey.Encryption Key[255:0] := __enkey_hi[127:0]:__enkey_lo[127:0] XOR HW_NRND_GEN.data[255:0]
-///     IWKey.IntegrityKey[127:0] := __intkey[127:0] XOR HW_NRND_GEN.data[383:256]
-///     IWKey.NoBackup := __ctl[0]
-///     IWKey.KeySource := __ctl[4:1]
-///     ZF := 0
-///   ELSE // Random data was not returned from RDSEED. IWKey was not loaded
-///     ZF := 1
-///   FI
-/// FI
-/// dst := ZF
-/// OF := 0
-/// SF := 0
-/// AF := 0
-/// PF := 0
-/// CF := 0
-/// \endoperation
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_loadiwkey (unsigned int __ctl, __m128i __intkey,
-               __m128i __enkey_lo, __m128i __enkey_hi) {
-  __builtin_ia32_loadiwkey (__intkey, __enkey_lo, __enkey_hi, __ctl);
-}
-
-/// Wrap a 128-bit AES key from __key into a key handle and output in
-/// ((__m128i*)__h) to ((__m128i*)__h) + 2  and a 32-bit value as return.
-/// The explicit source operand __htype specifies handle restrictions.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> ENCODEKEY128 </c> instructions.
-///
-/// \operation
-/// InputKey[127:0] := __key[127:0]
-/// KeyMetadata[2:0] := __htype[2:0]
-/// KeyMetadata[23:3] := 0 // Reserved for future usage
-/// KeyMetadata[27:24] := 0 // KeyType is AES-128 (value of 0)
-/// KeyMetadata[127:28] := 0 // Reserved for future usage
-/// Handle[383:0] := WrapKey128(InputKey[127:0], KeyMetadata[127:0],
-///                  IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0])
-/// dst[0] := IWKey.NoBackup
-/// dst[4:1] := IWKey.KeySource[3:0]
-/// dst[31:5] := 0
-/// MEM[__h+127:__h] := Handle[127:0]   // AAD
-/// MEM[__h+255:__h+128] := Handle[255:128] // Integrity Tag
-/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText
-/// OF := 0
-/// SF := 0
-/// ZF := 0
-/// AF := 0
-/// PF := 0
-/// CF := 0
-/// \endoperation
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) {
-  return __builtin_ia32_encodekey128_u32(__htype, (__v2di)__key, __h);
-}
-
-/// Wrap a 256-bit AES key from __key_hi:__key_lo into a key handle, then
-/// output handle in ((__m128i*)__h) to ((__m128i*)__h) + 3 and
-/// a 32-bit value as return.
-/// The explicit source operand __htype specifies handle restrictions.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> ENCODEKEY256 </c> instructions.
-///
-/// \operation
-/// InputKey[127:0] := __key_lo[127:0]
-/// InputKey[255:128] := __key_hi[255:128]
-/// KeyMetadata[2:0] := __htype[2:0]
-/// KeyMetadata[23:3] := 0 // Reserved for future usage
-/// KeyMetadata[27:24] := 1 // KeyType is AES-256 (value of 1)
-/// KeyMetadata[127:28] := 0 // Reserved for future usage
-/// Handle[511:0] := WrapKey256(InputKey[255:0], KeyMetadata[127:0],
-///                  IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0])
-/// dst[0] := IWKey.NoBackup
-/// dst[4:1] := IWKey.KeySource[3:0]
-/// dst[31:5] := 0
-/// MEM[__h+127:__h]   := Handle[127:0] // AAD
-/// MEM[__h+255:__h+128] := Handle[255:128] // Tag
-/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText[127:0]
-/// MEM[__h+511:__h+384] := Handle[511:384] // CipherText[255:128]
-/// OF := 0
-/// SF := 0
-/// ZF := 0
-/// AF := 0
-/// PF := 0
-/// CF := 0
-/// \endoperation
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi,
-                     void *__h) {
-  return __builtin_ia32_encodekey256_u32(__htype, (__v2di)__key_lo,
-                                         (__v2di)__key_hi, __h);
-}
-
-/// The AESENC128KL performs 10 rounds of AES to encrypt the __idata using
-/// the 128-bit key in the handle from the __h. It stores the result in the
-/// __odata. And return the affected ZF flag status.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> AESENC128KL </c> instructions.
-///
-/// \operation
-/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
-/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
-///                    (Handle[127:0] AND (CPL > 0)) ||
-///                    Handle[383:256] ||
-///                    HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 )
-/// IF (IllegalHandle)
-///   ZF := 1
-/// ELSE
-///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
-///   IF (Authentic == 0)
-///     ZF := 1
-///   ELSE
-///     MEM[__odata+127:__odata] := AES128Encrypt (__idata[127:0], UnwrappedKey)
-///     ZF := 0
-///   FI
-/// FI
-/// dst := ZF
-/// OF := 0
-/// SF := 0
-/// AF := 0
-/// PF := 0
-/// CF := 0
-/// \endoperation
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
-  return __builtin_ia32_aesenc128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
-}
-
-/// The AESENC256KL performs 14 rounds of AES to encrypt the __idata using
-/// the 256-bit key in the handle from the __h. It stores the result in the
-/// __odata. And return the affected ZF flag status.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> AESENC256KL </c> instructions.
-///
-/// \operation
-/// Handle[511:0] := MEM[__h+511:__h] // Load is not guaranteed to be atomic.
-/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
-///                    (Handle[127:0] AND (CPL > 0)) ||
-///                    Handle[255:128] ||
-///                    HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256 )
-/// IF (IllegalHandle)
-///   ZF := 1
-///   MEM[__odata+127:__odata] := 0
-/// ELSE
-///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
-///   IF (Authentic == 0)
-///     ZF := 1
-///     MEM[__odata+127:__odata] := 0
-///   ELSE
-///     MEM[__odata+127:__odata] := AES256Encrypt (__idata[127:0], UnwrappedKey)
-///     ZF := 0
-///   FI
-/// FI
-/// dst := ZF
-/// OF := 0
-/// SF := 0
-/// AF := 0
-/// PF := 0
-/// CF := 0
-/// \endoperation
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
-  return __builtin_ia32_aesenc256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
-}
-
-/// The AESDEC128KL performs 10 rounds of AES to decrypt the __idata using
-/// the 128-bit key in the handle from the __h. It stores the result in the
-/// __odata. And return the affected ZF flag status.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> AESDEC128KL </c> instructions.
-///
-/// \operation
-/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
-/// IllegalHandle := (HandleReservedBitSet (Handle[383:0]) ||
-///                  (Handle[127:0] AND (CPL > 0)) ||
-///                  Handle[383:256] ||
-///                  HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128)
-/// IF (IllegalHandle)
-///   ZF := 1
-///   MEM[__odata+127:__odata] := 0
-/// ELSE
-///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
-///   IF (Authentic == 0)
-///     ZF := 1
-///     MEM[__odata+127:__odata] := 0
-///   ELSE
-///     MEM[__odata+127:__odata] := AES128Decrypt (__idata[127:0], UnwrappedKey)
-///     ZF := 0
-///   FI
-/// FI
-/// dst := ZF
-/// OF := 0
-/// SF := 0
-/// AF := 0
-/// PF := 0
-/// CF := 0
-/// \endoperation
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
-  return __builtin_ia32_aesdec128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
-}
-
-/// The AESDEC256KL performs 10 rounds of AES to decrypt the __idata using
-/// the 256-bit key in the handle from the __h. It stores the result in the
-/// __odata. And return the affected ZF flag status.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> AESDEC256KL </c> instructions.
-///
-/// \operation
-/// Handle[511:0] := MEM[__h+511:__h]
-/// IllegalHandle := (HandleReservedBitSet (Handle[511:0]) ||
-///                   (Handle[127:0] AND (CPL > 0)) ||
-///                   Handle[383:256] ||
-///                   HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256)
-/// IF (IllegalHandle)
-///   ZF := 1
-///   MEM[__odata+127:__odata] := 0
-/// ELSE
-///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
-///   IF (Authentic == 0)
-///     ZF := 1
-///     MEM[__odata+127:__odata] := 0
-///   ELSE
-///     MEM[__odata+127:__odata] := AES256Decrypt (__idata[127:0], UnwrappedKey)
-///     ZF := 0
-///   FI
-/// FI
-/// dst := ZF
-/// OF := 0
-/// SF := 0
-/// AF := 0
-/// PF := 0
-/// CF := 0
-/// \endoperation
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
-  return __builtin_ia32_aesdec256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
-}
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \
-          || defined(__KL__) */
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__WIDEKL__)
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS \
-  __attribute__((__always_inline__, __nodebug__, __target__("kl,widekl"),\
-                 __min_vector_width__(128)))
-
-/// Encrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle
-/// at __h and store each resultant block back from __odata to __odata+7. And
-/// return the affected ZF flag status.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> AESENCWIDE128KL </c> instructions.
-///
-/// \operation
-/// Handle := MEM[__h+383:__h]
-/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
-///                    (Handle[127:0] AND (CPL > 0)) ||
-///                    Handle[255:128] ||
-///                    HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 )
-/// IF (IllegalHandle)
-///   ZF := 1
-///   FOR i := 0 to 7
-///     __odata[i] := 0
-///   ENDFOR
-/// ELSE
-///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
-///   IF Authentic == 0
-///     ZF := 1
-///     FOR i := 0 to 7
-///       __odata[i] := 0
-///     ENDFOR
-///   ELSE
-///     FOR i := 0 to 7
-///       __odata[i] := AES128Encrypt (__idata[i], UnwrappedKey)
-///     ENDFOR
-///     ZF := 0
-///   FI
-/// FI
-/// dst := ZF
-/// OF := 0
-/// SF := 0
-/// AF := 0
-/// PF := 0
-/// CF := 0
-/// \endoperation
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
-  return __builtin_ia32_aesencwide128kl_u8((__v2di *)__odata,
-                                           (const __v2di *)__idata, __h);
-}
-
-/// Encrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle
-/// at __h and store each resultant block back from __odata to __odata+7. And
-/// return the affected ZF flag status.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> AESENCWIDE256KL </c> instructions.
-///
-/// \operation
-/// Handle[511:0] := MEM[__h+511:__h]
-/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
-///                    (Handle[127:0] AND (CPL > 0)) ||
-///                    Handle[255:128] ||
-///                    HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES512 )
-/// IF (IllegalHandle)
-///   ZF := 1
-///   FOR i := 0 to 7
-///     __odata[i] := 0
-///   ENDFOR
-/// ELSE
-///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
-///   IF Authentic == 0
-///     ZF := 1
-///     FOR i := 0 to 7
-///       __odata[i] := 0
-///     ENDFOR
-///   ELSE
-///     FOR i := 0 to 7
-///       __odata[i] := AES256Encrypt (__idata[i], UnwrappedKey)
-///     ENDFOR
-///     ZF := 0
-///   FI
-/// FI
-/// dst := ZF
-/// OF := 0
-/// SF := 0
-/// AF := 0
-/// PF := 0
-/// CF := 0
-/// \endoperation
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
-  return __builtin_ia32_aesencwide256kl_u8((__v2di *)__odata,
-                                           (const __v2di *)__idata, __h);
-}
-
-/// Decrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle
-/// at __h and store each resultant block back from __odata to __odata+7. And
-/// return the affected ZF flag status.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> AESDECWIDE128KL </c> instructions.
-///
-/// \operation
-/// Handle[383:0] := MEM[__h+383:__h]
-/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
-///                    (Handle[127:0] AND (CPL > 0)) ||
-///                    Handle[255:128] ||
-///                    HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES128 )
-/// IF (IllegalHandle)
-///   ZF := 1
-///   FOR i := 0 to 7
-///     __odata[i] := 0
-///   ENDFOR
-/// ELSE
-///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
-///   IF Authentic == 0
-///     ZF := 1
-///     FOR i := 0 to 7
-///       __odata[i] := 0
-///     ENDFOR
-///   ELSE
-///     FOR i := 0 to 7
-///       __odata[i] := AES128Decrypt (__idata[i], UnwrappedKey)
-///     ENDFOR
-///     ZF := 0
-///   FI
-/// FI
-/// dst := ZF
-/// OF := 0
-/// SF := 0
-/// AF := 0
-/// PF := 0
-/// CF := 0
-/// \endoperation
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
-  return __builtin_ia32_aesdecwide128kl_u8((__v2di *)__odata,
-                                           (const __v2di *)__idata, __h);
-}
-
-/// Decrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle
-/// at __h and store each resultant block back from __odata to __odata+7. And
-/// return the affected ZF flag status.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> AESDECWIDE256KL </c> instructions.
-///
-/// \operation
-/// Handle[511:0] := MEM[__h+511:__h]
-/// IllegalHandle = ( HandleReservedBitSet (Handle[511:0]) ||
-///                   (Handle[127:0] AND (CPL > 0)) ||
-///                   Handle[255:128] ||
-///                   HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES512 )
-/// If (IllegalHandle)
-///   ZF := 1
-///   FOR i := 0 to 7
-///     __odata[i] := 0
-///   ENDFOR
-/// ELSE
-///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
-///   IF Authentic == 0
-///     ZF := 1
-///     FOR i := 0 to 7
-///       __odata[i] := 0
-///     ENDFOR
-///   ELSE
-///     FOR i := 0 to 7
-///       __odata[i] := AES256Decrypt (__idata[i], UnwrappedKey)
-///     ENDFOR
-///     ZF := 0
-///   FI
-/// FI
-/// dst := ZF
-/// OF := 0
-/// SF := 0
-/// AF := 0
-/// PF := 0
-/// CF := 0
-/// \endoperation
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
-  return __builtin_ia32_aesdecwide256kl_u8((__v2di *)__odata,
-                                           (const __v2di *)__idata, __h);
-}
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \
-          || defined(__WIDEKL__) */
-
-#endif /* _KEYLOCKERINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/limits.h b/linux-x86/lib64/clang/14.0.2/include/limits.h
deleted file mode 100644
index c653580..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/limits.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*===---- limits.h - Standard header for integer sizes --------------------===*\
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
-\*===----------------------------------------------------------------------===*/
-
-#ifndef __CLANG_LIMITS_H
-#define __CLANG_LIMITS_H
-
-/* The system's limits.h may, in turn, try to #include_next GCC's limits.h.
-   Avert this #include_next madness. */
-#if defined __GNUC__ && !defined _GCC_LIMITS_H_
-#define _GCC_LIMITS_H_
-#endif
-
-/* System headers include a number of constants from POSIX in <limits.h>.
-   Include it if we're hosted. */
-#if __STDC_HOSTED__ && __has_include_next(<limits.h>)
-#include_next <limits.h>
-#endif
-
-/* Many system headers try to "help us out" by defining these.  No really, we
-   know how big each datatype is. */
-#undef  SCHAR_MIN
-#undef  SCHAR_MAX
-#undef  UCHAR_MAX
-#undef  SHRT_MIN
-#undef  SHRT_MAX
-#undef  USHRT_MAX
-#undef  INT_MIN
-#undef  INT_MAX
-#undef  UINT_MAX
-#undef  LONG_MIN
-#undef  LONG_MAX
-#undef  ULONG_MAX
-
-#undef  CHAR_BIT
-#undef  CHAR_MIN
-#undef  CHAR_MAX
-
-/* C90/99 5.2.4.2.1 */
-#define SCHAR_MAX __SCHAR_MAX__
-#define SHRT_MAX  __SHRT_MAX__
-#define INT_MAX   __INT_MAX__
-#define LONG_MAX  __LONG_MAX__
-
-#define SCHAR_MIN (-__SCHAR_MAX__-1)
-#define SHRT_MIN  (-__SHRT_MAX__ -1)
-#define INT_MIN   (-__INT_MAX__  -1)
-#define LONG_MIN  (-__LONG_MAX__ -1L)
-
-#define UCHAR_MAX (__SCHAR_MAX__*2  +1)
-#define USHRT_MAX (__SHRT_MAX__ *2  +1)
-#define UINT_MAX  (__INT_MAX__  *2U +1U)
-#define ULONG_MAX (__LONG_MAX__ *2UL+1UL)
-
-#ifndef MB_LEN_MAX
-#define MB_LEN_MAX 1
-#endif
-
-#define CHAR_BIT  __CHAR_BIT__
-
-#ifdef __CHAR_UNSIGNED__  /* -funsigned-char */
-#define CHAR_MIN 0
-#define CHAR_MAX UCHAR_MAX
-#else
-#define CHAR_MIN SCHAR_MIN
-#define CHAR_MAX __SCHAR_MAX__
-#endif
-
-/* C99 5.2.4.2.1: Added long long.
-   C++11 18.3.3.2: same contents as the Standard C Library header <limits.h>.
- */
-#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L
-
-#undef  LLONG_MIN
-#undef  LLONG_MAX
-#undef  ULLONG_MAX
-
-#define LLONG_MAX  __LONG_LONG_MAX__
-#define LLONG_MIN  (-__LONG_LONG_MAX__-1LL)
-#define ULLONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
-#endif
-
-/* LONG_LONG_MIN/LONG_LONG_MAX/ULONG_LONG_MAX are a GNU extension.  It's too bad
-   that we don't have something like #pragma poison that could be used to
-   deprecate a macro - the code should just use LLONG_MAX and friends.
- */
-#if defined(__GNU_LIBRARY__) ? defined(__USE_GNU) : !defined(__STRICT_ANSI__)
-
-#undef   LONG_LONG_MIN
-#undef   LONG_LONG_MAX
-#undef   ULONG_LONG_MAX
-
-#define LONG_LONG_MAX  __LONG_LONG_MAX__
-#define LONG_LONG_MIN  (-__LONG_LONG_MAX__-1LL)
-#define ULONG_LONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
-#endif
-
-#endif /* __CLANG_LIMITS_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/mm_malloc.h b/linux-x86/lib64/clang/14.0.2/include/mm_malloc.h
deleted file mode 100644
index 933dbaa..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/mm_malloc.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*===---- mm_malloc.h - Allocating and Freeing Aligned Memory Blocks -------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __MM_MALLOC_H
-#define __MM_MALLOC_H
-
-#include <stdlib.h>
-
-#ifdef _WIN32
-#include <malloc.h>
-#else
-#ifndef __cplusplus
-extern int posix_memalign(void **__memptr, size_t __alignment, size_t __size);
-#else
-// Some systems (e.g. those with GNU libc) declare posix_memalign with an
-// exception specifier. Via an "egregious workaround" in
-// Sema::CheckEquivalentExceptionSpec, Clang accepts the following as a valid
-// redeclaration of glibc's declaration.
-extern "C" int posix_memalign(void **__memptr, size_t __alignment, size_t __size);
-#endif
-#endif
-
-#if !(defined(_WIN32) && defined(_mm_malloc))
-static __inline__ void *__attribute__((__always_inline__, __nodebug__,
-                                       __malloc__))
-_mm_malloc(size_t __size, size_t __align)
-{
-  if (__align == 1) {
-    return malloc(__size);
-  }
-
-  if (!(__align & (__align - 1)) && __align < sizeof(void *))
-    __align = sizeof(void *);
-
-  void *__mallocedMemory;
-#if defined(__MINGW32__)
-  __mallocedMemory = __mingw_aligned_malloc(__size, __align);
-#elif defined(_WIN32)
-  __mallocedMemory = _aligned_malloc(__size, __align);
-#else
-  if (posix_memalign(&__mallocedMemory, __align, __size))
-    return 0;
-#endif
-
-  return __mallocedMemory;
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
-_mm_free(void *__p)
-{
-#if defined(__MINGW32__)
-  __mingw_aligned_free(__p);
-#elif defined(_WIN32)
-  _aligned_free(__p);
-#else
-  free(__p);
-#endif
-}
-#endif
-
-#endif /* __MM_MALLOC_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/opencl-c-base.h b/linux-x86/lib64/clang/14.0.2/include/opencl-c-base.h
deleted file mode 100644
index 9c81ddb..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/opencl-c-base.h
+++ /dev/null
@@ -1,775 +0,0 @@
-//===----- opencl-c-base.h - OpenCL C language base definitions -----------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _OPENCL_BASE_H_
-#define _OPENCL_BASE_H_
-
-// Define extension macros
-
-#if (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200)
-// For SPIR and SPIR-V all extensions are supported.
-#if defined(__SPIR__) || defined(__SPIRV__)
-#define cl_khr_subgroup_extended_types 1
-#define cl_khr_subgroup_non_uniform_vote 1
-#define cl_khr_subgroup_ballot 1
-#define cl_khr_subgroup_non_uniform_arithmetic 1
-#define cl_khr_subgroup_shuffle 1
-#define cl_khr_subgroup_shuffle_relative 1
-#define cl_khr_subgroup_clustered_reduce 1
-#define cl_khr_extended_bit_ops 1
-#define cl_khr_integer_dot_product 1
-#define __opencl_c_integer_dot_product_input_4x8bit 1
-#define __opencl_c_integer_dot_product_input_4x8bit_packed 1
-#define cl_ext_float_atomics 1
-#ifdef cl_khr_fp16
-#define __opencl_c_ext_fp16_global_atomic_load_store 1
-#define __opencl_c_ext_fp16_local_atomic_load_store 1
-#define __opencl_c_ext_fp16_global_atomic_add 1
-#define __opencl_c_ext_fp16_local_atomic_add 1
-#define __opencl_c_ext_fp16_global_atomic_min_max 1
-#define __opencl_c_ext_fp16_local_atomic_min_max 1
-#endif
-#ifdef cl_khr_fp64
-#define __opencl_c_ext_fp64_global_atomic_add 1
-#define __opencl_c_ext_fp64_local_atomic_add 1
-#define __opencl_c_ext_fp64_global_atomic_min_max 1
-#define __opencl_c_ext_fp64_local_atomic_min_max 1
-#endif
-#define __opencl_c_ext_fp32_global_atomic_add 1
-#define __opencl_c_ext_fp32_local_atomic_add 1
-#define __opencl_c_ext_fp32_global_atomic_min_max 1
-#define __opencl_c_ext_fp32_local_atomic_min_max 1
-
-#endif // defined(__SPIR__) || defined(__SPIRV__)
-#endif // (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200)
-
-// Define feature macros for OpenCL C 2.0
-#if (__OPENCL_CPP_VERSION__ == 100 || __OPENCL_C_VERSION__ == 200)
-#define __opencl_c_pipes 1
-#define __opencl_c_generic_address_space 1
-#define __opencl_c_work_group_collective_functions 1
-#define __opencl_c_atomic_order_acq_rel 1
-#define __opencl_c_atomic_order_seq_cst 1
-#define __opencl_c_atomic_scope_device 1
-#define __opencl_c_atomic_scope_all_devices 1
-#define __opencl_c_device_enqueue 1
-#define __opencl_c_read_write_images 1
-#define __opencl_c_program_scope_global_variables 1
-#define __opencl_c_images 1
-#endif
-
-// Define header-only feature macros for OpenCL C 3.0.
-#if (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
-// For the SPIR and SPIR-V target all features are supported.
-#if defined(__SPIR__) || defined(__SPIRV__)
-#define __opencl_c_atomic_scope_all_devices 1
-#endif // defined(__SPIR__)
-#endif // (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
-
-// built-in scalar data types:
-
-/**
- * An unsigned 8-bit integer.
- */
-typedef unsigned char uchar;
-
-/**
- * An unsigned 16-bit integer.
- */
-typedef unsigned short ushort;
-
-/**
- * An unsigned 32-bit integer.
- */
-typedef unsigned int uint;
-
-/**
- * An unsigned 64-bit integer.
- */
-typedef unsigned long ulong;
-
-/**
- * The unsigned integer type of the result of the sizeof operator. This
- * is a 32-bit unsigned integer if CL_DEVICE_ADDRESS_BITS
- * defined in table 4.3 is 32-bits and is a 64-bit unsigned integer if
- * CL_DEVICE_ADDRESS_BITS is 64-bits.
- */
-typedef __SIZE_TYPE__ size_t;
-
-/**
- * A signed integer type that is the result of subtracting two pointers.
- * This is a 32-bit signed integer if CL_DEVICE_ADDRESS_BITS
- * defined in table 4.3 is 32-bits and is a 64-bit signed integer if
- * CL_DEVICE_ADDRESS_BITS is 64-bits.
- */
-typedef __PTRDIFF_TYPE__ ptrdiff_t;
-
-/**
- * A signed integer type with the property that any valid pointer to
- * void can be converted to this type, then converted back to pointer
- * to void, and the result will compare equal to the original pointer.
- */
-typedef __INTPTR_TYPE__ intptr_t;
-
-/**
- * An unsigned integer type with the property that any valid pointer to
- * void can be converted to this type, then converted back to pointer
- * to void, and the result will compare equal to the original pointer.
- */
-typedef __UINTPTR_TYPE__ uintptr_t;
-
-// built-in vector data types:
-typedef char char2 __attribute__((ext_vector_type(2)));
-typedef char char3 __attribute__((ext_vector_type(3)));
-typedef char char4 __attribute__((ext_vector_type(4)));
-typedef char char8 __attribute__((ext_vector_type(8)));
-typedef char char16 __attribute__((ext_vector_type(16)));
-typedef uchar uchar2 __attribute__((ext_vector_type(2)));
-typedef uchar uchar3 __attribute__((ext_vector_type(3)));
-typedef uchar uchar4 __attribute__((ext_vector_type(4)));
-typedef uchar uchar8 __attribute__((ext_vector_type(8)));
-typedef uchar uchar16 __attribute__((ext_vector_type(16)));
-typedef short short2 __attribute__((ext_vector_type(2)));
-typedef short short3 __attribute__((ext_vector_type(3)));
-typedef short short4 __attribute__((ext_vector_type(4)));
-typedef short short8 __attribute__((ext_vector_type(8)));
-typedef short short16 __attribute__((ext_vector_type(16)));
-typedef ushort ushort2 __attribute__((ext_vector_type(2)));
-typedef ushort ushort3 __attribute__((ext_vector_type(3)));
-typedef ushort ushort4 __attribute__((ext_vector_type(4)));
-typedef ushort ushort8 __attribute__((ext_vector_type(8)));
-typedef ushort ushort16 __attribute__((ext_vector_type(16)));
-typedef int int2 __attribute__((ext_vector_type(2)));
-typedef int int3 __attribute__((ext_vector_type(3)));
-typedef int int4 __attribute__((ext_vector_type(4)));
-typedef int int8 __attribute__((ext_vector_type(8)));
-typedef int int16 __attribute__((ext_vector_type(16)));
-typedef uint uint2 __attribute__((ext_vector_type(2)));
-typedef uint uint3 __attribute__((ext_vector_type(3)));
-typedef uint uint4 __attribute__((ext_vector_type(4)));
-typedef uint uint8 __attribute__((ext_vector_type(8)));
-typedef uint uint16 __attribute__((ext_vector_type(16)));
-typedef long long2 __attribute__((ext_vector_type(2)));
-typedef long long3 __attribute__((ext_vector_type(3)));
-typedef long long4 __attribute__((ext_vector_type(4)));
-typedef long long8 __attribute__((ext_vector_type(8)));
-typedef long long16 __attribute__((ext_vector_type(16)));
-typedef ulong ulong2 __attribute__((ext_vector_type(2)));
-typedef ulong ulong3 __attribute__((ext_vector_type(3)));
-typedef ulong ulong4 __attribute__((ext_vector_type(4)));
-typedef ulong ulong8 __attribute__((ext_vector_type(8)));
-typedef ulong ulong16 __attribute__((ext_vector_type(16)));
-typedef float float2 __attribute__((ext_vector_type(2)));
-typedef float float3 __attribute__((ext_vector_type(3)));
-typedef float float4 __attribute__((ext_vector_type(4)));
-typedef float float8 __attribute__((ext_vector_type(8)));
-typedef float float16 __attribute__((ext_vector_type(16)));
-#ifdef cl_khr_fp16
-#pragma OPENCL EXTENSION cl_khr_fp16 : enable
-typedef half half2 __attribute__((ext_vector_type(2)));
-typedef half half3 __attribute__((ext_vector_type(3)));
-typedef half half4 __attribute__((ext_vector_type(4)));
-typedef half half8 __attribute__((ext_vector_type(8)));
-typedef half half16 __attribute__((ext_vector_type(16)));
-#endif
-#ifdef cl_khr_fp64
-#if __OPENCL_C_VERSION__ < CL_VERSION_1_2
-#pragma OPENCL EXTENSION cl_khr_fp64 : enable
-#endif
-typedef double double2 __attribute__((ext_vector_type(2)));
-typedef double double3 __attribute__((ext_vector_type(3)));
-typedef double double4 __attribute__((ext_vector_type(4)));
-typedef double double8 __attribute__((ext_vector_type(8)));
-typedef double double16 __attribute__((ext_vector_type(16)));
-#endif
-
-#if defined(__OPENCL_CPP_VERSION__)
-#define NULL nullptr
-#elif defined(__OPENCL_C_VERSION__)
-#define NULL ((void*)0)
-#endif
-
-/**
- * Value of maximum non-infinite single-precision floating-point
- * number.
- */
-#define MAXFLOAT 0x1.fffffep127f
-
-/**
- * A positive float constant expression. HUGE_VALF evaluates
- * to +infinity. Used as an error value returned by the built-in
- * math functions.
- */
-#define HUGE_VALF (__builtin_huge_valf())
-
-/**
- * A positive double constant expression. HUGE_VAL evaluates
- * to +infinity. Used as an error value returned by the built-in
- * math functions.
- */
-#define HUGE_VAL (__builtin_huge_val())
-
-/**
- * A constant expression of type float representing positive or
- * unsigned infinity.
- */
-#define INFINITY (__builtin_inff())
-
-/**
- * A constant expression of type float representing a quiet NaN.
- */
-#define NAN as_float(INT_MAX)
-
-#define FP_ILOGB0    INT_MIN
-#define FP_ILOGBNAN  INT_MAX
-
-#define FLT_DIG 6
-#define FLT_MANT_DIG 24
-#define FLT_MAX_10_EXP +38
-#define FLT_MAX_EXP +128
-#define FLT_MIN_10_EXP -37
-#define FLT_MIN_EXP -125
-#define FLT_RADIX 2
-#define FLT_MAX 0x1.fffffep127f
-#define FLT_MIN 0x1.0p-126f
-#define FLT_EPSILON 0x1.0p-23f
-
-#define M_E_F         2.71828182845904523536028747135266250f
-#define M_LOG2E_F     1.44269504088896340735992468100189214f
-#define M_LOG10E_F    0.434294481903251827651128918916605082f
-#define M_LN2_F       0.693147180559945309417232121458176568f
-#define M_LN10_F      2.30258509299404568401799145468436421f
-#define M_PI_F        3.14159265358979323846264338327950288f
-#define M_PI_2_F      1.57079632679489661923132169163975144f
-#define M_PI_4_F      0.785398163397448309615660845819875721f
-#define M_1_PI_F      0.318309886183790671537767526745028724f
-#define M_2_PI_F      0.636619772367581343075535053490057448f
-#define M_2_SQRTPI_F  1.12837916709551257389615890312154517f
-#define M_SQRT2_F     1.41421356237309504880168872420969808f
-#define M_SQRT1_2_F   0.707106781186547524400844362104849039f
-
-#define DBL_DIG 15
-#define DBL_MANT_DIG 53
-#define DBL_MAX_10_EXP +308
-#define DBL_MAX_EXP +1024
-#define DBL_MIN_10_EXP -307
-#define DBL_MIN_EXP -1021
-#define DBL_RADIX 2
-#define DBL_MAX 0x1.fffffffffffffp1023
-#define DBL_MIN 0x1.0p-1022
-#define DBL_EPSILON 0x1.0p-52
-
-#define M_E           0x1.5bf0a8b145769p+1
-#define M_LOG2E       0x1.71547652b82fep+0
-#define M_LOG10E      0x1.bcb7b1526e50ep-2
-#define M_LN2         0x1.62e42fefa39efp-1
-#define M_LN10        0x1.26bb1bbb55516p+1
-#define M_PI          0x1.921fb54442d18p+1
-#define M_PI_2        0x1.921fb54442d18p+0
-#define M_PI_4        0x1.921fb54442d18p-1
-#define M_1_PI        0x1.45f306dc9c883p-2
-#define M_2_PI        0x1.45f306dc9c883p-1
-#define M_2_SQRTPI    0x1.20dd750429b6dp+0
-#define M_SQRT2       0x1.6a09e667f3bcdp+0
-#define M_SQRT1_2     0x1.6a09e667f3bcdp-1
-
-#ifdef cl_khr_fp16
-
-#define HALF_DIG 3
-#define HALF_MANT_DIG 11
-#define HALF_MAX_10_EXP +4
-#define HALF_MAX_EXP +16
-#define HALF_MIN_10_EXP -4
-#define HALF_MIN_EXP -13
-#define HALF_RADIX 2
-#define HALF_MAX ((0x1.ffcp15h))
-#define HALF_MIN ((0x1.0p-14h))
-#define HALF_EPSILON ((0x1.0p-10h))
-
-#define M_E_H         2.71828182845904523536028747135266250h
-#define M_LOG2E_H     1.44269504088896340735992468100189214h
-#define M_LOG10E_H    0.434294481903251827651128918916605082h
-#define M_LN2_H       0.693147180559945309417232121458176568h
-#define M_LN10_H      2.30258509299404568401799145468436421h
-#define M_PI_H        3.14159265358979323846264338327950288h
-#define M_PI_2_H      1.57079632679489661923132169163975144h
-#define M_PI_4_H      0.785398163397448309615660845819875721h
-#define M_1_PI_H      0.318309886183790671537767526745028724h
-#define M_2_PI_H      0.636619772367581343075535053490057448h
-#define M_2_SQRTPI_H  1.12837916709551257389615890312154517h
-#define M_SQRT2_H     1.41421356237309504880168872420969808h
-#define M_SQRT1_2_H   0.707106781186547524400844362104849039h
-
-#endif //cl_khr_fp16
-
-#define CHAR_BIT  8
-#define SCHAR_MAX 127
-#define SCHAR_MIN (-128)
-#define UCHAR_MAX 255
-#define CHAR_MAX  SCHAR_MAX
-#define CHAR_MIN  SCHAR_MIN
-#define USHRT_MAX 65535
-#define SHRT_MAX  32767
-#define SHRT_MIN  (-32768)
-#define UINT_MAX  0xffffffff
-#define INT_MAX   2147483647
-#define INT_MIN   (-2147483647-1)
-#define ULONG_MAX 0xffffffffffffffffUL
-#define LONG_MAX  0x7fffffffffffffffL
-#define LONG_MIN  (-0x7fffffffffffffffL-1)
-
-// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
-
-// Flag type and values for barrier, mem_fence, read_mem_fence, write_mem_fence
-typedef uint cl_mem_fence_flags;
-
-/**
- * Queue a memory fence to ensure correct
- * ordering of memory operations to local memory
- */
-#define CLK_LOCAL_MEM_FENCE    0x01
-
-/**
- * Queue a memory fence to ensure correct
- * ordering of memory operations to global memory
- */
-#define CLK_GLOBAL_MEM_FENCE   0x02
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-typedef enum memory_scope {
-  memory_scope_work_item = __OPENCL_MEMORY_SCOPE_WORK_ITEM,
-  memory_scope_work_group = __OPENCL_MEMORY_SCOPE_WORK_GROUP,
-  memory_scope_device = __OPENCL_MEMORY_SCOPE_DEVICE,
-#if defined(__opencl_c_atomic_scope_all_devices)
-  memory_scope_all_svm_devices = __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES,
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-  memory_scope_all_devices = memory_scope_all_svm_devices,
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif // defined(__opencl_c_atomic_scope_all_devices)
-/**
- * Subgroups have different requirements on forward progress, so just test
- * all the relevant macros.
- * CL 3.0 sub-groups "they are not guaranteed to make independent forward progress"
- * KHR subgroups "Subgroups within a workgroup are independent, make forward progress with respect to each other"
- */
-#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups)
-  memory_scope_sub_group = __OPENCL_MEMORY_SCOPE_SUB_GROUP
-#endif
-} memory_scope;
-
-/**
- * Queue a memory fence to ensure correct ordering of memory
- * operations between work-items of a work-group to
- * image memory.
- */
-#define CLK_IMAGE_MEM_FENCE  0x04
-
-#ifndef ATOMIC_VAR_INIT
-#define ATOMIC_VAR_INIT(x) (x)
-#endif //ATOMIC_VAR_INIT
-#define ATOMIC_FLAG_INIT 0
-
-// enum values aligned with what clang uses in EmitAtomicExpr()
-typedef enum memory_order
-{
-  memory_order_relaxed = __ATOMIC_RELAXED,
-  memory_order_acquire = __ATOMIC_ACQUIRE,
-  memory_order_release = __ATOMIC_RELEASE,
-  memory_order_acq_rel = __ATOMIC_ACQ_REL,
-#if defined(__opencl_c_atomic_order_seq_cst)
-  memory_order_seq_cst = __ATOMIC_SEQ_CST
-#endif
-} memory_order;
-
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
-
-// These values need to match the runtime equivalent
-//
-// Addressing Mode.
-//
-#define CLK_ADDRESS_NONE                0
-#define CLK_ADDRESS_CLAMP_TO_EDGE       2
-#define CLK_ADDRESS_CLAMP               4
-#define CLK_ADDRESS_REPEAT              6
-#define CLK_ADDRESS_MIRRORED_REPEAT     8
-
-//
-// Coordination Normalization
-//
-#define CLK_NORMALIZED_COORDS_FALSE     0
-#define CLK_NORMALIZED_COORDS_TRUE      1
-
-//
-// Filtering Mode.
-//
-#define CLK_FILTER_NEAREST              0x10
-#define CLK_FILTER_LINEAR               0x20
-
-#ifdef cl_khr_gl_msaa_sharing
-#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
-#endif //cl_khr_gl_msaa_sharing
-
-//
-// Channel Datatype.
-//
-#define CLK_SNORM_INT8        0x10D0
-#define CLK_SNORM_INT16       0x10D1
-#define CLK_UNORM_INT8        0x10D2
-#define CLK_UNORM_INT16       0x10D3
-#define CLK_UNORM_SHORT_565   0x10D4
-#define CLK_UNORM_SHORT_555   0x10D5
-#define CLK_UNORM_INT_101010  0x10D6
-#define CLK_SIGNED_INT8       0x10D7
-#define CLK_SIGNED_INT16      0x10D8
-#define CLK_SIGNED_INT32      0x10D9
-#define CLK_UNSIGNED_INT8     0x10DA
-#define CLK_UNSIGNED_INT16    0x10DB
-#define CLK_UNSIGNED_INT32    0x10DC
-#define CLK_HALF_FLOAT        0x10DD
-#define CLK_FLOAT             0x10DE
-#define CLK_UNORM_INT24       0x10DF
-
-// Channel order, numbering must be aligned with cl_channel_order in cl.h
-//
-#define CLK_R         0x10B0
-#define CLK_A         0x10B1
-#define CLK_RG        0x10B2
-#define CLK_RA        0x10B3
-#define CLK_RGB       0x10B4
-#define CLK_RGBA      0x10B5
-#define CLK_BGRA      0x10B6
-#define CLK_ARGB      0x10B7
-#define CLK_INTENSITY 0x10B8
-#define CLK_LUMINANCE 0x10B9
-#define CLK_Rx                0x10BA
-#define CLK_RGx               0x10BB
-#define CLK_RGBx              0x10BC
-#define CLK_DEPTH             0x10BD
-#define CLK_DEPTH_STENCIL     0x10BE
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
-#define CLK_sRGB              0x10BF
-#define CLK_sRGBx             0x10C0
-#define CLK_sRGBA             0x10C1
-#define CLK_sBGRA             0x10C2
-#define CLK_ABGR              0x10C3
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
-
-// OpenCL v2.0 s6.13.16 - Pipe Functions
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t))
-
-// OpenCL v2.0 s6.13.17 - Enqueue Kernels
-#define CL_COMPLETE                                 0x0
-#define CL_RUNNING                                  0x1
-#define CL_SUBMITTED                                0x2
-#define CL_QUEUED                                   0x3
-
-#define CLK_SUCCESS                                 0
-#define CLK_ENQUEUE_FAILURE                         -101
-#define CLK_INVALID_QUEUE                           -102
-#define CLK_INVALID_NDRANGE                         -160
-#define CLK_INVALID_EVENT_WAIT_LIST                 -57
-#define CLK_DEVICE_QUEUE_FULL                       -161
-#define CLK_INVALID_ARG_SIZE                        -51
-#define CLK_EVENT_ALLOCATION_FAILURE                -100
-#define CLK_OUT_OF_RESOURCES                        -5
-
-#define CLK_NULL_QUEUE                              0
-#define CLK_NULL_EVENT (__builtin_astype(((__SIZE_MAX__)), clk_event_t))
-
-// execution model related definitions
-#define CLK_ENQUEUE_FLAGS_NO_WAIT                   0x0
-#define CLK_ENQUEUE_FLAGS_WAIT_KERNEL               0x1
-#define CLK_ENQUEUE_FLAGS_WAIT_WORK_GROUP           0x2
-
-typedef int kernel_enqueue_flags_t;
-typedef int clk_profiling_info;
-
-// Profiling info name (see capture_event_profiling_info)
-#define CLK_PROFILING_COMMAND_EXEC_TIME 0x1
-
-#define MAX_WORK_DIM 3
-
-typedef struct {
-  unsigned int workDimension;
-  size_t globalWorkOffset[MAX_WORK_DIM];
-  size_t globalWorkSize[MAX_WORK_DIM];
-  size_t localWorkSize[MAX_WORK_DIM];
-} ndrange_t;
-
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * OpenCL v1.1/1.2/2.0 s6.2.4.2 - as_type operators
- * Reinterprets a data type as another data type of the same size
- */
-#define as_char(x) __builtin_astype((x), char)
-#define as_char2(x) __builtin_astype((x), char2)
-#define as_char3(x) __builtin_astype((x), char3)
-#define as_char4(x) __builtin_astype((x), char4)
-#define as_char8(x) __builtin_astype((x), char8)
-#define as_char16(x) __builtin_astype((x), char16)
-
-#define as_uchar(x) __builtin_astype((x), uchar)
-#define as_uchar2(x) __builtin_astype((x), uchar2)
-#define as_uchar3(x) __builtin_astype((x), uchar3)
-#define as_uchar4(x) __builtin_astype((x), uchar4)
-#define as_uchar8(x) __builtin_astype((x), uchar8)
-#define as_uchar16(x) __builtin_astype((x), uchar16)
-
-#define as_short(x) __builtin_astype((x), short)
-#define as_short2(x) __builtin_astype((x), short2)
-#define as_short3(x) __builtin_astype((x), short3)
-#define as_short4(x) __builtin_astype((x), short4)
-#define as_short8(x) __builtin_astype((x), short8)
-#define as_short16(x) __builtin_astype((x), short16)
-
-#define as_ushort(x) __builtin_astype((x), ushort)
-#define as_ushort2(x) __builtin_astype((x), ushort2)
-#define as_ushort3(x) __builtin_astype((x), ushort3)
-#define as_ushort4(x) __builtin_astype((x), ushort4)
-#define as_ushort8(x) __builtin_astype((x), ushort8)
-#define as_ushort16(x) __builtin_astype((x), ushort16)
-
-#define as_int(x) __builtin_astype((x), int)
-#define as_int2(x) __builtin_astype((x), int2)
-#define as_int3(x) __builtin_astype((x), int3)
-#define as_int4(x) __builtin_astype((x), int4)
-#define as_int8(x) __builtin_astype((x), int8)
-#define as_int16(x) __builtin_astype((x), int16)
-
-#define as_uint(x) __builtin_astype((x), uint)
-#define as_uint2(x) __builtin_astype((x), uint2)
-#define as_uint3(x) __builtin_astype((x), uint3)
-#define as_uint4(x) __builtin_astype((x), uint4)
-#define as_uint8(x) __builtin_astype((x), uint8)
-#define as_uint16(x) __builtin_astype((x), uint16)
-
-#define as_long(x) __builtin_astype((x), long)
-#define as_long2(x) __builtin_astype((x), long2)
-#define as_long3(x) __builtin_astype((x), long3)
-#define as_long4(x) __builtin_astype((x), long4)
-#define as_long8(x) __builtin_astype((x), long8)
-#define as_long16(x) __builtin_astype((x), long16)
-
-#define as_ulong(x) __builtin_astype((x), ulong)
-#define as_ulong2(x) __builtin_astype((x), ulong2)
-#define as_ulong3(x) __builtin_astype((x), ulong3)
-#define as_ulong4(x) __builtin_astype((x), ulong4)
-#define as_ulong8(x) __builtin_astype((x), ulong8)
-#define as_ulong16(x) __builtin_astype((x), ulong16)
-
-#define as_float(x) __builtin_astype((x), float)
-#define as_float2(x) __builtin_astype((x), float2)
-#define as_float3(x) __builtin_astype((x), float3)
-#define as_float4(x) __builtin_astype((x), float4)
-#define as_float8(x) __builtin_astype((x), float8)
-#define as_float16(x) __builtin_astype((x), float16)
-
-#ifdef cl_khr_fp64
-#define as_double(x) __builtin_astype((x), double)
-#define as_double2(x) __builtin_astype((x), double2)
-#define as_double3(x) __builtin_astype((x), double3)
-#define as_double4(x) __builtin_astype((x), double4)
-#define as_double8(x) __builtin_astype((x), double8)
-#define as_double16(x) __builtin_astype((x), double16)
-#endif // cl_khr_fp64
-
-#ifdef cl_khr_fp16
-#define as_half(x) __builtin_astype((x), half)
-#define as_half2(x) __builtin_astype((x), half2)
-#define as_half3(x) __builtin_astype((x), half3)
-#define as_half4(x) __builtin_astype((x), half4)
-#define as_half8(x) __builtin_astype((x), half8)
-#define as_half16(x) __builtin_astype((x), half16)
-#endif // cl_khr_fp16
-
-#define as_size_t(x) __builtin_astype((x), size_t)
-#define as_ptrdiff_t(x) __builtin_astype((x), ptrdiff_t)
-#define as_intptr_t(x) __builtin_astype((x), intptr_t)
-#define as_uintptr_t(x) __builtin_astype((x), uintptr_t)
-
-// C++ for OpenCL - __remove_address_space
-#if defined(__OPENCL_CPP_VERSION__)
-template <typename _Tp> struct __remove_address_space { using type = _Tp; };
-template <typename _Tp> struct __remove_address_space<__generic _Tp> {
-  using type = _Tp;
-};
-template <typename _Tp> struct __remove_address_space<__global _Tp> {
-  using type = _Tp;
-};
-template <typename _Tp> struct __remove_address_space<__private _Tp> {
-  using type = _Tp;
-};
-template <typename _Tp> struct __remove_address_space<__local _Tp> {
-  using type = _Tp;
-};
-template <typename _Tp> struct __remove_address_space<__constant _Tp> {
-  using type = _Tp;
-};
-#endif
-
-// OpenCL v1.1 s6.9, v1.2/2.0 s6.10 - Function qualifiers
-
-#define __kernel_exec(X, typen) __kernel \
-	__attribute__((work_group_size_hint(X, 1, 1))) \
-	__attribute__((vec_type_hint(typen)))
-
-#define kernel_exec(X, typen) __kernel \
-	__attribute__((work_group_size_hint(X, 1, 1))) \
-	__attribute__((vec_type_hint(typen)))
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf
-
-int printf(__constant const char* st, ...) __attribute__((format(printf, 1, 2)));
-#endif
-
-#ifdef cl_intel_device_side_avc_motion_estimation
-
-#define CLK_AVC_ME_MAJOR_16x16_INTEL 0x0
-#define CLK_AVC_ME_MAJOR_16x8_INTEL 0x1
-#define CLK_AVC_ME_MAJOR_8x16_INTEL 0x2
-#define CLK_AVC_ME_MAJOR_8x8_INTEL 0x3
-
-#define CLK_AVC_ME_MINOR_8x8_INTEL 0x0
-#define CLK_AVC_ME_MINOR_8x4_INTEL 0x1
-#define CLK_AVC_ME_MINOR_4x8_INTEL 0x2
-#define CLK_AVC_ME_MINOR_4x4_INTEL 0x3
-
-#define CLK_AVC_ME_MAJOR_FORWARD_INTEL 0x0
-#define CLK_AVC_ME_MAJOR_BACKWARD_INTEL 0x1
-#define CLK_AVC_ME_MAJOR_BIDIRECTIONAL_INTEL 0x2
-
-#define CLK_AVC_ME_PARTITION_MASK_ALL_INTEL 0x0
-#define CLK_AVC_ME_PARTITION_MASK_16x16_INTEL 0x7E
-#define CLK_AVC_ME_PARTITION_MASK_16x8_INTEL 0x7D
-#define CLK_AVC_ME_PARTITION_MASK_8x16_INTEL 0x7B
-#define CLK_AVC_ME_PARTITION_MASK_8x8_INTEL 0x77
-#define CLK_AVC_ME_PARTITION_MASK_8x4_INTEL 0x6F
-#define CLK_AVC_ME_PARTITION_MASK_4x8_INTEL 0x5F
-#define CLK_AVC_ME_PARTITION_MASK_4x4_INTEL 0x3F
-
-#define CLK_AVC_ME_SLICE_TYPE_PRED_INTEL 0x0
-#define CLK_AVC_ME_SLICE_TYPE_BPRED_INTEL 0x1
-#define CLK_AVC_ME_SLICE_TYPE_INTRA_INTEL 0x2
-
-#define CLK_AVC_ME_SEARCH_WINDOW_EXHAUSTIVE_INTEL 0x0
-#define CLK_AVC_ME_SEARCH_WINDOW_SMALL_INTEL 0x1
-#define CLK_AVC_ME_SEARCH_WINDOW_TINY_INTEL 0x2
-#define CLK_AVC_ME_SEARCH_WINDOW_EXTRA_TINY_INTEL 0x3
-#define CLK_AVC_ME_SEARCH_WINDOW_DIAMOND_INTEL 0x4
-#define CLK_AVC_ME_SEARCH_WINDOW_LARGE_DIAMOND_INTEL 0x5
-#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED0_INTEL 0x6
-#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED1_INTEL 0x7
-#define CLK_AVC_ME_SEARCH_WINDOW_CUSTOM_INTEL 0x8
-
-#define CLK_AVC_ME_SAD_ADJUST_MODE_NONE_INTEL 0x0
-#define CLK_AVC_ME_SAD_ADJUST_MODE_HAAR_INTEL 0x2
-
-#define CLK_AVC_ME_SUBPIXEL_MODE_INTEGER_INTEL 0x0
-#define CLK_AVC_ME_SUBPIXEL_MODE_HPEL_INTEL 0x1
-#define CLK_AVC_ME_SUBPIXEL_MODE_QPEL_INTEL 0x3
-
-#define CLK_AVC_ME_COST_PRECISION_QPEL_INTEL 0x0
-#define CLK_AVC_ME_COST_PRECISION_HPEL_INTEL 0x1
-#define CLK_AVC_ME_COST_PRECISION_PEL_INTEL 0x2
-#define CLK_AVC_ME_COST_PRECISION_DPEL_INTEL 0x3
-
-#define CLK_AVC_ME_BIDIR_WEIGHT_QUARTER_INTEL 0x10
-#define CLK_AVC_ME_BIDIR_WEIGHT_THIRD_INTEL 0x15
-#define CLK_AVC_ME_BIDIR_WEIGHT_HALF_INTEL 0x20
-#define CLK_AVC_ME_BIDIR_WEIGHT_TWO_THIRD_INTEL 0x2B
-#define CLK_AVC_ME_BIDIR_WEIGHT_THREE_QUARTER_INTEL 0x30
-
-#define CLK_AVC_ME_BORDER_REACHED_LEFT_INTEL 0x0
-#define CLK_AVC_ME_BORDER_REACHED_RIGHT_INTEL 0x2
-#define CLK_AVC_ME_BORDER_REACHED_TOP_INTEL 0x4
-#define CLK_AVC_ME_BORDER_REACHED_BOTTOM_INTEL 0x8
-
-#define CLK_AVC_ME_INTRA_16x16_INTEL 0x0
-#define CLK_AVC_ME_INTRA_8x8_INTEL 0x1
-#define CLK_AVC_ME_INTRA_4x4_INTEL 0x2
-
-#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_16x16_INTEL 0x0
-#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_8x8_INTEL 0x4000
-
-#define CLK_AVC_ME_SKIP_BLOCK_16x16_FORWARD_ENABLE_INTEL (0x1 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_16x16_BACKWARD_ENABLE_INTEL (0x2 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_16x16_DUAL_ENABLE_INTEL (0x3 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_FORWARD_ENABLE_INTEL (0x55 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_BACKWARD_ENABLE_INTEL (0xAA << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_DUAL_ENABLE_INTEL (0xFF << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_FORWARD_ENABLE_INTEL (0x1 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_BACKWARD_ENABLE_INTEL (0x2 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_FORWARD_ENABLE_INTEL (0x1 << 26)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_BACKWARD_ENABLE_INTEL (0x2 << 26)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_FORWARD_ENABLE_INTEL (0x1 << 28)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_BACKWARD_ENABLE_INTEL (0x2 << 28)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_FORWARD_ENABLE_INTEL (0x1 << 30)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_BACKWARD_ENABLE_INTEL (0x2 << 30)
-
-#define CLK_AVC_ME_BLOCK_BASED_SKIP_4x4_INTEL 0x00
-#define CLK_AVC_ME_BLOCK_BASED_SKIP_8x8_INTEL 0x80
-
-#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_ALL_INTEL 0x0
-#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_16x16_INTEL 0x6
-#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_8x8_INTEL 0x5
-#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_4x4_INTEL 0x3
-
-#define CLK_AVC_ME_INTRA_NEIGHBOR_LEFT_MASK_ENABLE_INTEL 0x60
-#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_MASK_ENABLE_INTEL 0x10
-#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_RIGHT_MASK_ENABLE_INTEL 0x8
-#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_LEFT_MASK_ENABLE_INTEL 0x4
-
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_INTEL 0x0
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DC_INTEL 0x2
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_LEFT_INTEL 0x3
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_RIGHT_INTEL 0x4
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_PLANE_INTEL 0x4
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_RIGHT_INTEL 0x5
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_DOWN_INTEL 0x6
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_LEFT_INTEL 0x7
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_UP_INTEL 0x8
-#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_DC_INTEL 0x0
-#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
-#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_VERTICAL_INTEL 0x2
-#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_PLANE_INTEL 0x3
-
-#define CLK_AVC_ME_FRAME_FORWARD_INTEL 0x1
-#define CLK_AVC_ME_FRAME_BACKWARD_INTEL 0x2
-#define CLK_AVC_ME_FRAME_DUAL_INTEL 0x3
-
-#define CLK_AVC_ME_INTERLACED_SCAN_TOP_FIELD_INTEL 0x0
-#define CLK_AVC_ME_INTERLACED_SCAN_BOTTOM_FIELD_INTEL 0x1
-
-#define CLK_AVC_ME_INITIALIZE_INTEL 0x0
-
-#define CLK_AVC_IME_PAYLOAD_INITIALIZE_INTEL 0x0
-#define CLK_AVC_REF_PAYLOAD_INITIALIZE_INTEL 0x0
-#define CLK_AVC_SIC_PAYLOAD_INITIALIZE_INTEL 0x0
-
-#define CLK_AVC_IME_RESULT_INITIALIZE_INTEL 0x0
-#define CLK_AVC_REF_RESULT_INITIALIZE_INTEL 0x0
-#define CLK_AVC_SIC_RESULT_INITIALIZE_INTEL 0x0
-
-#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0
-#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0
-#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0
-#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0
-
-#endif // cl_intel_device_side_avc_motion_estimation
-
-// Disable any extensions we may have enabled previously.
-#pragma OPENCL EXTENSION all : disable
-
-#endif //_OPENCL_BASE_H_
diff --git a/linux-x86/lib64/clang/14.0.2/include/opencl-c.h b/linux-x86/lib64/clang/14.0.2/include/opencl-c.h
deleted file mode 100644
index 32af848..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/opencl-c.h
+++ /dev/null
@@ -1,18498 +0,0 @@
-//===--- opencl-c.h - OpenCL C language builtin function header -----------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _OPENCL_H_
-#define _OPENCL_H_
-
-#include "opencl-c-base.h"
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifndef cl_khr_depth_images
-#define cl_khr_depth_images
-#endif //cl_khr_depth_images
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-#if __OPENCL_C_VERSION__ < CL_VERSION_2_0
-#ifdef cl_khr_3d_image_writes
-#pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable
-#endif //cl_khr_3d_image_writes
-#endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0
-
-#if (defined(__OPENCL_CPP_VERSION__) ||                                        \
-     (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) &&                              \
-    (defined(__SPIR__) || defined(__SPIRV__))
-#pragma OPENCL EXTENSION cl_intel_planar_yuv : begin
-#pragma OPENCL EXTENSION cl_intel_planar_yuv : end
-#endif // (defined(__OPENCL_CPP_VERSION__) ||
-       //  (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) &&
-       // (defined(__SPIR__) || defined(__SPIRV__))
-
-#define __ovld __attribute__((overloadable))
-#define __conv __attribute__((convergent))
-
-// Optimizations
-#define __purefn __attribute__((pure))
-#define __cnfn __attribute__((const))
-
-
-// OpenCL v1.1/1.2/2.0 s6.2.3 - Explicit conversions
-
-char __ovld __cnfn convert_char_rte(char);
-char __ovld __cnfn convert_char_sat_rte(char);
-char __ovld __cnfn convert_char_rtz(char);
-char __ovld __cnfn convert_char_sat_rtz(char);
-char __ovld __cnfn convert_char_rtp(char);
-char __ovld __cnfn convert_char_sat_rtp(char);
-char __ovld __cnfn convert_char_rtn(char);
-char __ovld __cnfn convert_char_sat_rtn(char);
-char __ovld __cnfn convert_char(char);
-char __ovld __cnfn convert_char_sat(char);
-char __ovld __cnfn convert_char_rte(uchar);
-char __ovld __cnfn convert_char_sat_rte(uchar);
-char __ovld __cnfn convert_char_rtz(uchar);
-char __ovld __cnfn convert_char_sat_rtz(uchar);
-char __ovld __cnfn convert_char_rtp(uchar);
-char __ovld __cnfn convert_char_sat_rtp(uchar);
-char __ovld __cnfn convert_char_rtn(uchar);
-char __ovld __cnfn convert_char_sat_rtn(uchar);
-char __ovld __cnfn convert_char(uchar);
-char __ovld __cnfn convert_char_sat(uchar);
-char __ovld __cnfn convert_char_rte(short);
-char __ovld __cnfn convert_char_sat_rte(short);
-char __ovld __cnfn convert_char_rtz(short);
-char __ovld __cnfn convert_char_sat_rtz(short);
-char __ovld __cnfn convert_char_rtp(short);
-char __ovld __cnfn convert_char_sat_rtp(short);
-char __ovld __cnfn convert_char_rtn(short);
-char __ovld __cnfn convert_char_sat_rtn(short);
-char __ovld __cnfn convert_char(short);
-char __ovld __cnfn convert_char_sat(short);
-char __ovld __cnfn convert_char_rte(ushort);
-char __ovld __cnfn convert_char_sat_rte(ushort);
-char __ovld __cnfn convert_char_rtz(ushort);
-char __ovld __cnfn convert_char_sat_rtz(ushort);
-char __ovld __cnfn convert_char_rtp(ushort);
-char __ovld __cnfn convert_char_sat_rtp(ushort);
-char __ovld __cnfn convert_char_rtn(ushort);
-char __ovld __cnfn convert_char_sat_rtn(ushort);
-char __ovld __cnfn convert_char(ushort);
-char __ovld __cnfn convert_char_sat(ushort);
-char __ovld __cnfn convert_char_rte(int);
-char __ovld __cnfn convert_char_sat_rte(int);
-char __ovld __cnfn convert_char_rtz(int);
-char __ovld __cnfn convert_char_sat_rtz(int);
-char __ovld __cnfn convert_char_rtp(int);
-char __ovld __cnfn convert_char_sat_rtp(int);
-char __ovld __cnfn convert_char_rtn(int);
-char __ovld __cnfn convert_char_sat_rtn(int);
-char __ovld __cnfn convert_char(int);
-char __ovld __cnfn convert_char_sat(int);
-char __ovld __cnfn convert_char_rte(uint);
-char __ovld __cnfn convert_char_sat_rte(uint);
-char __ovld __cnfn convert_char_rtz(uint);
-char __ovld __cnfn convert_char_sat_rtz(uint);
-char __ovld __cnfn convert_char_rtp(uint);
-char __ovld __cnfn convert_char_sat_rtp(uint);
-char __ovld __cnfn convert_char_rtn(uint);
-char __ovld __cnfn convert_char_sat_rtn(uint);
-char __ovld __cnfn convert_char(uint);
-char __ovld __cnfn convert_char_sat(uint);
-char __ovld __cnfn convert_char_rte(long);
-char __ovld __cnfn convert_char_sat_rte(long);
-char __ovld __cnfn convert_char_rtz(long);
-char __ovld __cnfn convert_char_sat_rtz(long);
-char __ovld __cnfn convert_char_rtp(long);
-char __ovld __cnfn convert_char_sat_rtp(long);
-char __ovld __cnfn convert_char_rtn(long);
-char __ovld __cnfn convert_char_sat_rtn(long);
-char __ovld __cnfn convert_char(long);
-char __ovld __cnfn convert_char_sat(long);
-char __ovld __cnfn convert_char_rte(ulong);
-char __ovld __cnfn convert_char_sat_rte(ulong);
-char __ovld __cnfn convert_char_rtz(ulong);
-char __ovld __cnfn convert_char_sat_rtz(ulong);
-char __ovld __cnfn convert_char_rtp(ulong);
-char __ovld __cnfn convert_char_sat_rtp(ulong);
-char __ovld __cnfn convert_char_rtn(ulong);
-char __ovld __cnfn convert_char_sat_rtn(ulong);
-char __ovld __cnfn convert_char(ulong);
-char __ovld __cnfn convert_char_sat(ulong);
-char __ovld __cnfn convert_char_rte(float);
-char __ovld __cnfn convert_char_sat_rte(float);
-char __ovld __cnfn convert_char_rtz(float);
-char __ovld __cnfn convert_char_sat_rtz(float);
-char __ovld __cnfn convert_char_rtp(float);
-char __ovld __cnfn convert_char_sat_rtp(float);
-char __ovld __cnfn convert_char_rtn(float);
-char __ovld __cnfn convert_char_sat_rtn(float);
-char __ovld __cnfn convert_char(float);
-char __ovld __cnfn convert_char_sat(float);
-uchar __ovld __cnfn convert_uchar_rte(char);
-uchar __ovld __cnfn convert_uchar_sat_rte(char);
-uchar __ovld __cnfn convert_uchar_rtz(char);
-uchar __ovld __cnfn convert_uchar_sat_rtz(char);
-uchar __ovld __cnfn convert_uchar_rtp(char);
-uchar __ovld __cnfn convert_uchar_sat_rtp(char);
-uchar __ovld __cnfn convert_uchar_rtn(char);
-uchar __ovld __cnfn convert_uchar_sat_rtn(char);
-uchar __ovld __cnfn convert_uchar(char);
-uchar __ovld __cnfn convert_uchar_sat(char);
-uchar __ovld __cnfn convert_uchar_rte(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rte(uchar);
-uchar __ovld __cnfn convert_uchar_rtz(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rtz(uchar);
-uchar __ovld __cnfn convert_uchar_rtp(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rtp(uchar);
-uchar __ovld __cnfn convert_uchar_rtn(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rtn(uchar);
-uchar __ovld __cnfn convert_uchar(uchar);
-uchar __ovld __cnfn convert_uchar_sat(uchar);
-uchar __ovld __cnfn convert_uchar_rte(short);
-uchar __ovld __cnfn convert_uchar_sat_rte(short);
-uchar __ovld __cnfn convert_uchar_rtz(short);
-uchar __ovld __cnfn convert_uchar_sat_rtz(short);
-uchar __ovld __cnfn convert_uchar_rtp(short);
-uchar __ovld __cnfn convert_uchar_sat_rtp(short);
-uchar __ovld __cnfn convert_uchar_rtn(short);
-uchar __ovld __cnfn convert_uchar_sat_rtn(short);
-uchar __ovld __cnfn convert_uchar(short);
-uchar __ovld __cnfn convert_uchar_sat(short);
-uchar __ovld __cnfn convert_uchar_rte(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rte(ushort);
-uchar __ovld __cnfn convert_uchar_rtz(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rtz(ushort);
-uchar __ovld __cnfn convert_uchar_rtp(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rtp(ushort);
-uchar __ovld __cnfn convert_uchar_rtn(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rtn(ushort);
-uchar __ovld __cnfn convert_uchar(ushort);
-uchar __ovld __cnfn convert_uchar_sat(ushort);
-uchar __ovld __cnfn convert_uchar_rte(int);
-uchar __ovld __cnfn convert_uchar_sat_rte(int);
-uchar __ovld __cnfn convert_uchar_rtz(int);
-uchar __ovld __cnfn convert_uchar_sat_rtz(int);
-uchar __ovld __cnfn convert_uchar_rtp(int);
-uchar __ovld __cnfn convert_uchar_sat_rtp(int);
-uchar __ovld __cnfn convert_uchar_rtn(int);
-uchar __ovld __cnfn convert_uchar_sat_rtn(int);
-uchar __ovld __cnfn convert_uchar(int);
-uchar __ovld __cnfn convert_uchar_sat(int);
-uchar __ovld __cnfn convert_uchar_rte(uint);
-uchar __ovld __cnfn convert_uchar_sat_rte(uint);
-uchar __ovld __cnfn convert_uchar_rtz(uint);
-uchar __ovld __cnfn convert_uchar_sat_rtz(uint);
-uchar __ovld __cnfn convert_uchar_rtp(uint);
-uchar __ovld __cnfn convert_uchar_sat_rtp(uint);
-uchar __ovld __cnfn convert_uchar_rtn(uint);
-uchar __ovld __cnfn convert_uchar_sat_rtn(uint);
-uchar __ovld __cnfn convert_uchar(uint);
-uchar __ovld __cnfn convert_uchar_sat(uint);
-uchar __ovld __cnfn convert_uchar_rte(long);
-uchar __ovld __cnfn convert_uchar_sat_rte(long);
-uchar __ovld __cnfn convert_uchar_rtz(long);
-uchar __ovld __cnfn convert_uchar_sat_rtz(long);
-uchar __ovld __cnfn convert_uchar_rtp(long);
-uchar __ovld __cnfn convert_uchar_sat_rtp(long);
-uchar __ovld __cnfn convert_uchar_rtn(long);
-uchar __ovld __cnfn convert_uchar_sat_rtn(long);
-uchar __ovld __cnfn convert_uchar(long);
-uchar __ovld __cnfn convert_uchar_sat(long);
-uchar __ovld __cnfn convert_uchar_rte(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rte(ulong);
-uchar __ovld __cnfn convert_uchar_rtz(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rtz(ulong);
-uchar __ovld __cnfn convert_uchar_rtp(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rtp(ulong);
-uchar __ovld __cnfn convert_uchar_rtn(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rtn(ulong);
-uchar __ovld __cnfn convert_uchar(ulong);
-uchar __ovld __cnfn convert_uchar_sat(ulong);
-uchar __ovld __cnfn convert_uchar_rte(float);
-uchar __ovld __cnfn convert_uchar_sat_rte(float);
-uchar __ovld __cnfn convert_uchar_rtz(float);
-uchar __ovld __cnfn convert_uchar_sat_rtz(float);
-uchar __ovld __cnfn convert_uchar_rtp(float);
-uchar __ovld __cnfn convert_uchar_sat_rtp(float);
-uchar __ovld __cnfn convert_uchar_rtn(float);
-uchar __ovld __cnfn convert_uchar_sat_rtn(float);
-uchar __ovld __cnfn convert_uchar(float);
-uchar __ovld __cnfn convert_uchar_sat(float);
-
-short __ovld __cnfn convert_short_rte(char);
-short __ovld __cnfn convert_short_sat_rte(char);
-short __ovld __cnfn convert_short_rtz(char);
-short __ovld __cnfn convert_short_sat_rtz(char);
-short __ovld __cnfn convert_short_rtp(char);
-short __ovld __cnfn convert_short_sat_rtp(char);
-short __ovld __cnfn convert_short_rtn(char);
-short __ovld __cnfn convert_short_sat_rtn(char);
-short __ovld __cnfn convert_short(char);
-short __ovld __cnfn convert_short_sat(char);
-short __ovld __cnfn convert_short_rte(uchar);
-short __ovld __cnfn convert_short_sat_rte(uchar);
-short __ovld __cnfn convert_short_rtz(uchar);
-short __ovld __cnfn convert_short_sat_rtz(uchar);
-short __ovld __cnfn convert_short_rtp(uchar);
-short __ovld __cnfn convert_short_sat_rtp(uchar);
-short __ovld __cnfn convert_short_rtn(uchar);
-short __ovld __cnfn convert_short_sat_rtn(uchar);
-short __ovld __cnfn convert_short(uchar);
-short __ovld __cnfn convert_short_sat(uchar);
-short __ovld __cnfn convert_short_rte(short);
-short __ovld __cnfn convert_short_sat_rte(short);
-short __ovld __cnfn convert_short_rtz(short);
-short __ovld __cnfn convert_short_sat_rtz(short);
-short __ovld __cnfn convert_short_rtp(short);
-short __ovld __cnfn convert_short_sat_rtp(short);
-short __ovld __cnfn convert_short_rtn(short);
-short __ovld __cnfn convert_short_sat_rtn(short);
-short __ovld __cnfn convert_short(short);
-short __ovld __cnfn convert_short_sat(short);
-short __ovld __cnfn convert_short_rte(ushort);
-short __ovld __cnfn convert_short_sat_rte(ushort);
-short __ovld __cnfn convert_short_rtz(ushort);
-short __ovld __cnfn convert_short_sat_rtz(ushort);
-short __ovld __cnfn convert_short_rtp(ushort);
-short __ovld __cnfn convert_short_sat_rtp(ushort);
-short __ovld __cnfn convert_short_rtn(ushort);
-short __ovld __cnfn convert_short_sat_rtn(ushort);
-short __ovld __cnfn convert_short(ushort);
-short __ovld __cnfn convert_short_sat(ushort);
-short __ovld __cnfn convert_short_rte(int);
-short __ovld __cnfn convert_short_sat_rte(int);
-short __ovld __cnfn convert_short_rtz(int);
-short __ovld __cnfn convert_short_sat_rtz(int);
-short __ovld __cnfn convert_short_rtp(int);
-short __ovld __cnfn convert_short_sat_rtp(int);
-short __ovld __cnfn convert_short_rtn(int);
-short __ovld __cnfn convert_short_sat_rtn(int);
-short __ovld __cnfn convert_short(int);
-short __ovld __cnfn convert_short_sat(int);
-short __ovld __cnfn convert_short_rte(uint);
-short __ovld __cnfn convert_short_sat_rte(uint);
-short __ovld __cnfn convert_short_rtz(uint);
-short __ovld __cnfn convert_short_sat_rtz(uint);
-short __ovld __cnfn convert_short_rtp(uint);
-short __ovld __cnfn convert_short_sat_rtp(uint);
-short __ovld __cnfn convert_short_rtn(uint);
-short __ovld __cnfn convert_short_sat_rtn(uint);
-short __ovld __cnfn convert_short(uint);
-short __ovld __cnfn convert_short_sat(uint);
-short __ovld __cnfn convert_short_rte(long);
-short __ovld __cnfn convert_short_sat_rte(long);
-short __ovld __cnfn convert_short_rtz(long);
-short __ovld __cnfn convert_short_sat_rtz(long);
-short __ovld __cnfn convert_short_rtp(long);
-short __ovld __cnfn convert_short_sat_rtp(long);
-short __ovld __cnfn convert_short_rtn(long);
-short __ovld __cnfn convert_short_sat_rtn(long);
-short __ovld __cnfn convert_short(long);
-short __ovld __cnfn convert_short_sat(long);
-short __ovld __cnfn convert_short_rte(ulong);
-short __ovld __cnfn convert_short_sat_rte(ulong);
-short __ovld __cnfn convert_short_rtz(ulong);
-short __ovld __cnfn convert_short_sat_rtz(ulong);
-short __ovld __cnfn convert_short_rtp(ulong);
-short __ovld __cnfn convert_short_sat_rtp(ulong);
-short __ovld __cnfn convert_short_rtn(ulong);
-short __ovld __cnfn convert_short_sat_rtn(ulong);
-short __ovld __cnfn convert_short(ulong);
-short __ovld __cnfn convert_short_sat(ulong);
-short __ovld __cnfn convert_short_rte(float);
-short __ovld __cnfn convert_short_sat_rte(float);
-short __ovld __cnfn convert_short_rtz(float);
-short __ovld __cnfn convert_short_sat_rtz(float);
-short __ovld __cnfn convert_short_rtp(float);
-short __ovld __cnfn convert_short_sat_rtp(float);
-short __ovld __cnfn convert_short_rtn(float);
-short __ovld __cnfn convert_short_sat_rtn(float);
-short __ovld __cnfn convert_short(float);
-short __ovld __cnfn convert_short_sat(float);
-ushort __ovld __cnfn convert_ushort_rte(char);
-ushort __ovld __cnfn convert_ushort_sat_rte(char);
-ushort __ovld __cnfn convert_ushort_rtz(char);
-ushort __ovld __cnfn convert_ushort_sat_rtz(char);
-ushort __ovld __cnfn convert_ushort_rtp(char);
-ushort __ovld __cnfn convert_ushort_sat_rtp(char);
-ushort __ovld __cnfn convert_ushort_rtn(char);
-ushort __ovld __cnfn convert_ushort_sat_rtn(char);
-ushort __ovld __cnfn convert_ushort(char);
-ushort __ovld __cnfn convert_ushort_sat(char);
-ushort __ovld __cnfn convert_ushort_rte(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rte(uchar);
-ushort __ovld __cnfn convert_ushort_rtz(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rtz(uchar);
-ushort __ovld __cnfn convert_ushort_rtp(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rtp(uchar);
-ushort __ovld __cnfn convert_ushort_rtn(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rtn(uchar);
-ushort __ovld __cnfn convert_ushort(uchar);
-ushort __ovld __cnfn convert_ushort_sat(uchar);
-ushort __ovld __cnfn convert_ushort_rte(short);
-ushort __ovld __cnfn convert_ushort_sat_rte(short);
-ushort __ovld __cnfn convert_ushort_rtz(short);
-ushort __ovld __cnfn convert_ushort_sat_rtz(short);
-ushort __ovld __cnfn convert_ushort_rtp(short);
-ushort __ovld __cnfn convert_ushort_sat_rtp(short);
-ushort __ovld __cnfn convert_ushort_rtn(short);
-ushort __ovld __cnfn convert_ushort_sat_rtn(short);
-ushort __ovld __cnfn convert_ushort(short);
-ushort __ovld __cnfn convert_ushort_sat(short);
-ushort __ovld __cnfn convert_ushort_rte(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rte(ushort);
-ushort __ovld __cnfn convert_ushort_rtz(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rtz(ushort);
-ushort __ovld __cnfn convert_ushort_rtp(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rtp(ushort);
-ushort __ovld __cnfn convert_ushort_rtn(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rtn(ushort);
-ushort __ovld __cnfn convert_ushort(ushort);
-ushort __ovld __cnfn convert_ushort_sat(ushort);
-ushort __ovld __cnfn convert_ushort_rte(int);
-ushort __ovld __cnfn convert_ushort_sat_rte(int);
-ushort __ovld __cnfn convert_ushort_rtz(int);
-ushort __ovld __cnfn convert_ushort_sat_rtz(int);
-ushort __ovld __cnfn convert_ushort_rtp(int);
-ushort __ovld __cnfn convert_ushort_sat_rtp(int);
-ushort __ovld __cnfn convert_ushort_rtn(int);
-ushort __ovld __cnfn convert_ushort_sat_rtn(int);
-ushort __ovld __cnfn convert_ushort(int);
-ushort __ovld __cnfn convert_ushort_sat(int);
-ushort __ovld __cnfn convert_ushort_rte(uint);
-ushort __ovld __cnfn convert_ushort_sat_rte(uint);
-ushort __ovld __cnfn convert_ushort_rtz(uint);
-ushort __ovld __cnfn convert_ushort_sat_rtz(uint);
-ushort __ovld __cnfn convert_ushort_rtp(uint);
-ushort __ovld __cnfn convert_ushort_sat_rtp(uint);
-ushort __ovld __cnfn convert_ushort_rtn(uint);
-ushort __ovld __cnfn convert_ushort_sat_rtn(uint);
-ushort __ovld __cnfn convert_ushort(uint);
-ushort __ovld __cnfn convert_ushort_sat(uint);
-ushort __ovld __cnfn convert_ushort_rte(long);
-ushort __ovld __cnfn convert_ushort_sat_rte(long);
-ushort __ovld __cnfn convert_ushort_rtz(long);
-ushort __ovld __cnfn convert_ushort_sat_rtz(long);
-ushort __ovld __cnfn convert_ushort_rtp(long);
-ushort __ovld __cnfn convert_ushort_sat_rtp(long);
-ushort __ovld __cnfn convert_ushort_rtn(long);
-ushort __ovld __cnfn convert_ushort_sat_rtn(long);
-ushort __ovld __cnfn convert_ushort(long);
-ushort __ovld __cnfn convert_ushort_sat(long);
-ushort __ovld __cnfn convert_ushort_rte(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rte(ulong);
-ushort __ovld __cnfn convert_ushort_rtz(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rtz(ulong);
-ushort __ovld __cnfn convert_ushort_rtp(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rtp(ulong);
-ushort __ovld __cnfn convert_ushort_rtn(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rtn(ulong);
-ushort __ovld __cnfn convert_ushort(ulong);
-ushort __ovld __cnfn convert_ushort_sat(ulong);
-ushort __ovld __cnfn convert_ushort_rte(float);
-ushort __ovld __cnfn convert_ushort_sat_rte(float);
-ushort __ovld __cnfn convert_ushort_rtz(float);
-ushort __ovld __cnfn convert_ushort_sat_rtz(float);
-ushort __ovld __cnfn convert_ushort_rtp(float);
-ushort __ovld __cnfn convert_ushort_sat_rtp(float);
-ushort __ovld __cnfn convert_ushort_rtn(float);
-ushort __ovld __cnfn convert_ushort_sat_rtn(float);
-ushort __ovld __cnfn convert_ushort(float);
-ushort __ovld __cnfn convert_ushort_sat(float);
-int __ovld __cnfn convert_int_rte(char);
-int __ovld __cnfn convert_int_sat_rte(char);
-int __ovld __cnfn convert_int_rtz(char);
-int __ovld __cnfn convert_int_sat_rtz(char);
-int __ovld __cnfn convert_int_rtp(char);
-int __ovld __cnfn convert_int_sat_rtp(char);
-int __ovld __cnfn convert_int_rtn(char);
-int __ovld __cnfn convert_int_sat_rtn(char);
-int __ovld __cnfn convert_int(char);
-int __ovld __cnfn convert_int_sat(char);
-int __ovld __cnfn convert_int_rte(uchar);
-int __ovld __cnfn convert_int_sat_rte(uchar);
-int __ovld __cnfn convert_int_rtz(uchar);
-int __ovld __cnfn convert_int_sat_rtz(uchar);
-int __ovld __cnfn convert_int_rtp(uchar);
-int __ovld __cnfn convert_int_sat_rtp(uchar);
-int __ovld __cnfn convert_int_rtn(uchar);
-int __ovld __cnfn convert_int_sat_rtn(uchar);
-int __ovld __cnfn convert_int(uchar);
-int __ovld __cnfn convert_int_sat(uchar);
-int __ovld __cnfn convert_int_rte(short);
-int __ovld __cnfn convert_int_sat_rte(short);
-int __ovld __cnfn convert_int_rtz(short);
-int __ovld __cnfn convert_int_sat_rtz(short);
-int __ovld __cnfn convert_int_rtp(short);
-int __ovld __cnfn convert_int_sat_rtp(short);
-int __ovld __cnfn convert_int_rtn(short);
-int __ovld __cnfn convert_int_sat_rtn(short);
-int __ovld __cnfn convert_int(short);
-int __ovld __cnfn convert_int_sat(short);
-int __ovld __cnfn convert_int_rte(ushort);
-int __ovld __cnfn convert_int_sat_rte(ushort);
-int __ovld __cnfn convert_int_rtz(ushort);
-int __ovld __cnfn convert_int_sat_rtz(ushort);
-int __ovld __cnfn convert_int_rtp(ushort);
-int __ovld __cnfn convert_int_sat_rtp(ushort);
-int __ovld __cnfn convert_int_rtn(ushort);
-int __ovld __cnfn convert_int_sat_rtn(ushort);
-int __ovld __cnfn convert_int(ushort);
-int __ovld __cnfn convert_int_sat(ushort);
-int __ovld __cnfn convert_int_rte(int);
-int __ovld __cnfn convert_int_sat_rte(int);
-int __ovld __cnfn convert_int_rtz(int);
-int __ovld __cnfn convert_int_sat_rtz(int);
-int __ovld __cnfn convert_int_rtp(int);
-int __ovld __cnfn convert_int_sat_rtp(int);
-int __ovld __cnfn convert_int_rtn(int);
-int __ovld __cnfn convert_int_sat_rtn(int);
-int __ovld __cnfn convert_int(int);
-int __ovld __cnfn convert_int_sat(int);
-int __ovld __cnfn convert_int_rte(uint);
-int __ovld __cnfn convert_int_sat_rte(uint);
-int __ovld __cnfn convert_int_rtz(uint);
-int __ovld __cnfn convert_int_sat_rtz(uint);
-int __ovld __cnfn convert_int_rtp(uint);
-int __ovld __cnfn convert_int_sat_rtp(uint);
-int __ovld __cnfn convert_int_rtn(uint);
-int __ovld __cnfn convert_int_sat_rtn(uint);
-int __ovld __cnfn convert_int(uint);
-int __ovld __cnfn convert_int_sat(uint);
-int __ovld __cnfn convert_int_rte(long);
-int __ovld __cnfn convert_int_sat_rte(long);
-int __ovld __cnfn convert_int_rtz(long);
-int __ovld __cnfn convert_int_sat_rtz(long);
-int __ovld __cnfn convert_int_rtp(long);
-int __ovld __cnfn convert_int_sat_rtp(long);
-int __ovld __cnfn convert_int_rtn(long);
-int __ovld __cnfn convert_int_sat_rtn(long);
-int __ovld __cnfn convert_int(long);
-int __ovld __cnfn convert_int_sat(long);
-int __ovld __cnfn convert_int_rte(ulong);
-int __ovld __cnfn convert_int_sat_rte(ulong);
-int __ovld __cnfn convert_int_rtz(ulong);
-int __ovld __cnfn convert_int_sat_rtz(ulong);
-int __ovld __cnfn convert_int_rtp(ulong);
-int __ovld __cnfn convert_int_sat_rtp(ulong);
-int __ovld __cnfn convert_int_rtn(ulong);
-int __ovld __cnfn convert_int_sat_rtn(ulong);
-int __ovld __cnfn convert_int(ulong);
-int __ovld __cnfn convert_int_sat(ulong);
-int __ovld __cnfn convert_int_rte(float);
-int __ovld __cnfn convert_int_sat_rte(float);
-int __ovld __cnfn convert_int_rtz(float);
-int __ovld __cnfn convert_int_sat_rtz(float);
-int __ovld __cnfn convert_int_rtp(float);
-int __ovld __cnfn convert_int_sat_rtp(float);
-int __ovld __cnfn convert_int_rtn(float);
-int __ovld __cnfn convert_int_sat_rtn(float);
-int __ovld __cnfn convert_int(float);
-int __ovld __cnfn convert_int_sat(float);
-uint __ovld __cnfn convert_uint_rte(char);
-uint __ovld __cnfn convert_uint_sat_rte(char);
-uint __ovld __cnfn convert_uint_rtz(char);
-uint __ovld __cnfn convert_uint_sat_rtz(char);
-uint __ovld __cnfn convert_uint_rtp(char);
-uint __ovld __cnfn convert_uint_sat_rtp(char);
-uint __ovld __cnfn convert_uint_rtn(char);
-uint __ovld __cnfn convert_uint_sat_rtn(char);
-uint __ovld __cnfn convert_uint(char);
-uint __ovld __cnfn convert_uint_sat(char);
-uint __ovld __cnfn convert_uint_rte(uchar);
-uint __ovld __cnfn convert_uint_sat_rte(uchar);
-uint __ovld __cnfn convert_uint_rtz(uchar);
-uint __ovld __cnfn convert_uint_sat_rtz(uchar);
-uint __ovld __cnfn convert_uint_rtp(uchar);
-uint __ovld __cnfn convert_uint_sat_rtp(uchar);
-uint __ovld __cnfn convert_uint_rtn(uchar);
-uint __ovld __cnfn convert_uint_sat_rtn(uchar);
-uint __ovld __cnfn convert_uint(uchar);
-uint __ovld __cnfn convert_uint_sat(uchar);
-uint __ovld __cnfn convert_uint_rte(short);
-uint __ovld __cnfn convert_uint_sat_rte(short);
-uint __ovld __cnfn convert_uint_rtz(short);
-uint __ovld __cnfn convert_uint_sat_rtz(short);
-uint __ovld __cnfn convert_uint_rtp(short);
-uint __ovld __cnfn convert_uint_sat_rtp(short);
-uint __ovld __cnfn convert_uint_rtn(short);
-uint __ovld __cnfn convert_uint_sat_rtn(short);
-uint __ovld __cnfn convert_uint(short);
-uint __ovld __cnfn convert_uint_sat(short);
-uint __ovld __cnfn convert_uint_rte(ushort);
-uint __ovld __cnfn convert_uint_sat_rte(ushort);
-uint __ovld __cnfn convert_uint_rtz(ushort);
-uint __ovld __cnfn convert_uint_sat_rtz(ushort);
-uint __ovld __cnfn convert_uint_rtp(ushort);
-uint __ovld __cnfn convert_uint_sat_rtp(ushort);
-uint __ovld __cnfn convert_uint_rtn(ushort);
-uint __ovld __cnfn convert_uint_sat_rtn(ushort);
-uint __ovld __cnfn convert_uint(ushort);
-uint __ovld __cnfn convert_uint_sat(ushort);
-uint __ovld __cnfn convert_uint_rte(int);
-uint __ovld __cnfn convert_uint_sat_rte(int);
-uint __ovld __cnfn convert_uint_rtz(int);
-uint __ovld __cnfn convert_uint_sat_rtz(int);
-uint __ovld __cnfn convert_uint_rtp(int);
-uint __ovld __cnfn convert_uint_sat_rtp(int);
-uint __ovld __cnfn convert_uint_rtn(int);
-uint __ovld __cnfn convert_uint_sat_rtn(int);
-uint __ovld __cnfn convert_uint(int);
-uint __ovld __cnfn convert_uint_sat(int);
-uint __ovld __cnfn convert_uint_rte(uint);
-uint __ovld __cnfn convert_uint_sat_rte(uint);
-uint __ovld __cnfn convert_uint_rtz(uint);
-uint __ovld __cnfn convert_uint_sat_rtz(uint);
-uint __ovld __cnfn convert_uint_rtp(uint);
-uint __ovld __cnfn convert_uint_sat_rtp(uint);
-uint __ovld __cnfn convert_uint_rtn(uint);
-uint __ovld __cnfn convert_uint_sat_rtn(uint);
-uint __ovld __cnfn convert_uint(uint);
-uint __ovld __cnfn convert_uint_sat(uint);
-uint __ovld __cnfn convert_uint_rte(long);
-uint __ovld __cnfn convert_uint_sat_rte(long);
-uint __ovld __cnfn convert_uint_rtz(long);
-uint __ovld __cnfn convert_uint_sat_rtz(long);
-uint __ovld __cnfn convert_uint_rtp(long);
-uint __ovld __cnfn convert_uint_sat_rtp(long);
-uint __ovld __cnfn convert_uint_rtn(long);
-uint __ovld __cnfn convert_uint_sat_rtn(long);
-uint __ovld __cnfn convert_uint(long);
-uint __ovld __cnfn convert_uint_sat(long);
-uint __ovld __cnfn convert_uint_rte(ulong);
-uint __ovld __cnfn convert_uint_sat_rte(ulong);
-uint __ovld __cnfn convert_uint_rtz(ulong);
-uint __ovld __cnfn convert_uint_sat_rtz(ulong);
-uint __ovld __cnfn convert_uint_rtp(ulong);
-uint __ovld __cnfn convert_uint_sat_rtp(ulong);
-uint __ovld __cnfn convert_uint_rtn(ulong);
-uint __ovld __cnfn convert_uint_sat_rtn(ulong);
-uint __ovld __cnfn convert_uint(ulong);
-uint __ovld __cnfn convert_uint_sat(ulong);
-uint __ovld __cnfn convert_uint_rte(float);
-uint __ovld __cnfn convert_uint_sat_rte(float);
-uint __ovld __cnfn convert_uint_rtz(float);
-uint __ovld __cnfn convert_uint_sat_rtz(float);
-uint __ovld __cnfn convert_uint_rtp(float);
-uint __ovld __cnfn convert_uint_sat_rtp(float);
-uint __ovld __cnfn convert_uint_rtn(float);
-uint __ovld __cnfn convert_uint_sat_rtn(float);
-uint __ovld __cnfn convert_uint(float);
-uint __ovld __cnfn convert_uint_sat(float);
-long __ovld __cnfn convert_long_rte(char);
-long __ovld __cnfn convert_long_sat_rte(char);
-long __ovld __cnfn convert_long_rtz(char);
-long __ovld __cnfn convert_long_sat_rtz(char);
-long __ovld __cnfn convert_long_rtp(char);
-long __ovld __cnfn convert_long_sat_rtp(char);
-long __ovld __cnfn convert_long_rtn(char);
-long __ovld __cnfn convert_long_sat_rtn(char);
-long __ovld __cnfn convert_long(char);
-long __ovld __cnfn convert_long_sat(char);
-long __ovld __cnfn convert_long_rte(uchar);
-long __ovld __cnfn convert_long_sat_rte(uchar);
-long __ovld __cnfn convert_long_rtz(uchar);
-long __ovld __cnfn convert_long_sat_rtz(uchar);
-long __ovld __cnfn convert_long_rtp(uchar);
-long __ovld __cnfn convert_long_sat_rtp(uchar);
-long __ovld __cnfn convert_long_rtn(uchar);
-long __ovld __cnfn convert_long_sat_rtn(uchar);
-long __ovld __cnfn convert_long(uchar);
-long __ovld __cnfn convert_long_sat(uchar);
-long __ovld __cnfn convert_long_rte(short);
-long __ovld __cnfn convert_long_sat_rte(short);
-long __ovld __cnfn convert_long_rtz(short);
-long __ovld __cnfn convert_long_sat_rtz(short);
-long __ovld __cnfn convert_long_rtp(short);
-long __ovld __cnfn convert_long_sat_rtp(short);
-long __ovld __cnfn convert_long_rtn(short);
-long __ovld __cnfn convert_long_sat_rtn(short);
-long __ovld __cnfn convert_long(short);
-long __ovld __cnfn convert_long_sat(short);
-long __ovld __cnfn convert_long_rte(ushort);
-long __ovld __cnfn convert_long_sat_rte(ushort);
-long __ovld __cnfn convert_long_rtz(ushort);
-long __ovld __cnfn convert_long_sat_rtz(ushort);
-long __ovld __cnfn convert_long_rtp(ushort);
-long __ovld __cnfn convert_long_sat_rtp(ushort);
-long __ovld __cnfn convert_long_rtn(ushort);
-long __ovld __cnfn convert_long_sat_rtn(ushort);
-long __ovld __cnfn convert_long(ushort);
-long __ovld __cnfn convert_long_sat(ushort);
-long __ovld __cnfn convert_long_rte(int);
-long __ovld __cnfn convert_long_sat_rte(int);
-long __ovld __cnfn convert_long_rtz(int);
-long __ovld __cnfn convert_long_sat_rtz(int);
-long __ovld __cnfn convert_long_rtp(int);
-long __ovld __cnfn convert_long_sat_rtp(int);
-long __ovld __cnfn convert_long_rtn(int);
-long __ovld __cnfn convert_long_sat_rtn(int);
-long __ovld __cnfn convert_long(int);
-long __ovld __cnfn convert_long_sat(int);
-long __ovld __cnfn convert_long_rte(uint);
-long __ovld __cnfn convert_long_sat_rte(uint);
-long __ovld __cnfn convert_long_rtz(uint);
-long __ovld __cnfn convert_long_sat_rtz(uint);
-long __ovld __cnfn convert_long_rtp(uint);
-long __ovld __cnfn convert_long_sat_rtp(uint);
-long __ovld __cnfn convert_long_rtn(uint);
-long __ovld __cnfn convert_long_sat_rtn(uint);
-long __ovld __cnfn convert_long(uint);
-long __ovld __cnfn convert_long_sat(uint);
-long __ovld __cnfn convert_long_rte(long);
-long __ovld __cnfn convert_long_sat_rte(long);
-long __ovld __cnfn convert_long_rtz(long);
-long __ovld __cnfn convert_long_sat_rtz(long);
-long __ovld __cnfn convert_long_rtp(long);
-long __ovld __cnfn convert_long_sat_rtp(long);
-long __ovld __cnfn convert_long_rtn(long);
-long __ovld __cnfn convert_long_sat_rtn(long);
-long __ovld __cnfn convert_long(long);
-long __ovld __cnfn convert_long_sat(long);
-long __ovld __cnfn convert_long_rte(ulong);
-long __ovld __cnfn convert_long_sat_rte(ulong);
-long __ovld __cnfn convert_long_rtz(ulong);
-long __ovld __cnfn convert_long_sat_rtz(ulong);
-long __ovld __cnfn convert_long_rtp(ulong);
-long __ovld __cnfn convert_long_sat_rtp(ulong);
-long __ovld __cnfn convert_long_rtn(ulong);
-long __ovld __cnfn convert_long_sat_rtn(ulong);
-long __ovld __cnfn convert_long(ulong);
-long __ovld __cnfn convert_long_sat(ulong);
-long __ovld __cnfn convert_long_rte(float);
-long __ovld __cnfn convert_long_sat_rte(float);
-long __ovld __cnfn convert_long_rtz(float);
-long __ovld __cnfn convert_long_sat_rtz(float);
-long __ovld __cnfn convert_long_rtp(float);
-long __ovld __cnfn convert_long_sat_rtp(float);
-long __ovld __cnfn convert_long_rtn(float);
-long __ovld __cnfn convert_long_sat_rtn(float);
-long __ovld __cnfn convert_long(float);
-long __ovld __cnfn convert_long_sat(float);
-ulong __ovld __cnfn convert_ulong_rte(char);
-ulong __ovld __cnfn convert_ulong_sat_rte(char);
-ulong __ovld __cnfn convert_ulong_rtz(char);
-ulong __ovld __cnfn convert_ulong_sat_rtz(char);
-ulong __ovld __cnfn convert_ulong_rtp(char);
-ulong __ovld __cnfn convert_ulong_sat_rtp(char);
-ulong __ovld __cnfn convert_ulong_rtn(char);
-ulong __ovld __cnfn convert_ulong_sat_rtn(char);
-ulong __ovld __cnfn convert_ulong(char);
-ulong __ovld __cnfn convert_ulong_sat(char);
-ulong __ovld __cnfn convert_ulong_rte(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rte(uchar);
-ulong __ovld __cnfn convert_ulong_rtz(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rtz(uchar);
-ulong __ovld __cnfn convert_ulong_rtp(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rtp(uchar);
-ulong __ovld __cnfn convert_ulong_rtn(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rtn(uchar);
-ulong __ovld __cnfn convert_ulong(uchar);
-ulong __ovld __cnfn convert_ulong_sat(uchar);
-ulong __ovld __cnfn convert_ulong_rte(short);
-ulong __ovld __cnfn convert_ulong_sat_rte(short);
-ulong __ovld __cnfn convert_ulong_rtz(short);
-ulong __ovld __cnfn convert_ulong_sat_rtz(short);
-ulong __ovld __cnfn convert_ulong_rtp(short);
-ulong __ovld __cnfn convert_ulong_sat_rtp(short);
-ulong __ovld __cnfn convert_ulong_rtn(short);
-ulong __ovld __cnfn convert_ulong_sat_rtn(short);
-ulong __ovld __cnfn convert_ulong(short);
-ulong __ovld __cnfn convert_ulong_sat(short);
-ulong __ovld __cnfn convert_ulong_rte(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rte(ushort);
-ulong __ovld __cnfn convert_ulong_rtz(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rtz(ushort);
-ulong __ovld __cnfn convert_ulong_rtp(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rtp(ushort);
-ulong __ovld __cnfn convert_ulong_rtn(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rtn(ushort);
-ulong __ovld __cnfn convert_ulong(ushort);
-ulong __ovld __cnfn convert_ulong_sat(ushort);
-ulong __ovld __cnfn convert_ulong_rte(int);
-ulong __ovld __cnfn convert_ulong_sat_rte(int);
-ulong __ovld __cnfn convert_ulong_rtz(int);
-ulong __ovld __cnfn convert_ulong_sat_rtz(int);
-ulong __ovld __cnfn convert_ulong_rtp(int);
-ulong __ovld __cnfn convert_ulong_sat_rtp(int);
-ulong __ovld __cnfn convert_ulong_rtn(int);
-ulong __ovld __cnfn convert_ulong_sat_rtn(int);
-ulong __ovld __cnfn convert_ulong(int);
-ulong __ovld __cnfn convert_ulong_sat(int);
-ulong __ovld __cnfn convert_ulong_rte(uint);
-ulong __ovld __cnfn convert_ulong_sat_rte(uint);
-ulong __ovld __cnfn convert_ulong_rtz(uint);
-ulong __ovld __cnfn convert_ulong_sat_rtz(uint);
-ulong __ovld __cnfn convert_ulong_rtp(uint);
-ulong __ovld __cnfn convert_ulong_sat_rtp(uint);
-ulong __ovld __cnfn convert_ulong_rtn(uint);
-ulong __ovld __cnfn convert_ulong_sat_rtn(uint);
-ulong __ovld __cnfn convert_ulong(uint);
-ulong __ovld __cnfn convert_ulong_sat(uint);
-ulong __ovld __cnfn convert_ulong_rte(long);
-ulong __ovld __cnfn convert_ulong_sat_rte(long);
-ulong __ovld __cnfn convert_ulong_rtz(long);
-ulong __ovld __cnfn convert_ulong_sat_rtz(long);
-ulong __ovld __cnfn convert_ulong_rtp(long);
-ulong __ovld __cnfn convert_ulong_sat_rtp(long);
-ulong __ovld __cnfn convert_ulong_rtn(long);
-ulong __ovld __cnfn convert_ulong_sat_rtn(long);
-ulong __ovld __cnfn convert_ulong(long);
-ulong __ovld __cnfn convert_ulong_sat(long);
-ulong __ovld __cnfn convert_ulong_rte(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rte(ulong);
-ulong __ovld __cnfn convert_ulong_rtz(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rtz(ulong);
-ulong __ovld __cnfn convert_ulong_rtp(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rtp(ulong);
-ulong __ovld __cnfn convert_ulong_rtn(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rtn(ulong);
-ulong __ovld __cnfn convert_ulong(ulong);
-ulong __ovld __cnfn convert_ulong_sat(ulong);
-ulong __ovld __cnfn convert_ulong_rte(float);
-ulong __ovld __cnfn convert_ulong_sat_rte(float);
-ulong __ovld __cnfn convert_ulong_rtz(float);
-ulong __ovld __cnfn convert_ulong_sat_rtz(float);
-ulong __ovld __cnfn convert_ulong_rtp(float);
-ulong __ovld __cnfn convert_ulong_sat_rtp(float);
-ulong __ovld __cnfn convert_ulong_rtn(float);
-ulong __ovld __cnfn convert_ulong_sat_rtn(float);
-ulong __ovld __cnfn convert_ulong(float);
-ulong __ovld __cnfn convert_ulong_sat(float);
-float __ovld __cnfn convert_float_rte(char);
-float __ovld __cnfn convert_float_rtz(char);
-float __ovld __cnfn convert_float_rtp(char);
-float __ovld __cnfn convert_float_rtn(char);
-float __ovld __cnfn convert_float(char);
-float __ovld __cnfn convert_float_rte(uchar);
-float __ovld __cnfn convert_float_rtz(uchar);
-float __ovld __cnfn convert_float_rtp(uchar);
-float __ovld __cnfn convert_float_rtn(uchar);
-float __ovld __cnfn convert_float(uchar);
-float __ovld __cnfn convert_float_rte(short);
-float __ovld __cnfn convert_float_rtz(short);
-float __ovld __cnfn convert_float_rtp(short);
-float __ovld __cnfn convert_float_rtn(short);
-float __ovld __cnfn convert_float(short);
-float __ovld __cnfn convert_float_rte(ushort);
-float __ovld __cnfn convert_float_rtz(ushort);
-float __ovld __cnfn convert_float_rtp(ushort);
-float __ovld __cnfn convert_float_rtn(ushort);
-float __ovld __cnfn convert_float(ushort);
-float __ovld __cnfn convert_float_rte(int);
-float __ovld __cnfn convert_float_rtz(int);
-float __ovld __cnfn convert_float_rtp(int);
-float __ovld __cnfn convert_float_rtn(int);
-float __ovld __cnfn convert_float(int);
-float __ovld __cnfn convert_float_rte(uint);
-float __ovld __cnfn convert_float_rtz(uint);
-float __ovld __cnfn convert_float_rtp(uint);
-float __ovld __cnfn convert_float_rtn(uint);
-float __ovld __cnfn convert_float(uint);
-float __ovld __cnfn convert_float_rte(long);
-float __ovld __cnfn convert_float_rtz(long);
-float __ovld __cnfn convert_float_rtp(long);
-float __ovld __cnfn convert_float_rtn(long);
-float __ovld __cnfn convert_float(long);
-float __ovld __cnfn convert_float_rte(ulong);
-float __ovld __cnfn convert_float_rtz(ulong);
-float __ovld __cnfn convert_float_rtp(ulong);
-float __ovld __cnfn convert_float_rtn(ulong);
-float __ovld __cnfn convert_float(ulong);
-float __ovld __cnfn convert_float_rte(float);
-float __ovld __cnfn convert_float_rtz(float);
-float __ovld __cnfn convert_float_rtp(float);
-float __ovld __cnfn convert_float_rtn(float);
-float __ovld __cnfn convert_float(float);
-char2 __ovld __cnfn convert_char2_rte(char2);
-char2 __ovld __cnfn convert_char2_sat_rte(char2);
-char2 __ovld __cnfn convert_char2_rtz(char2);
-char2 __ovld __cnfn convert_char2_sat_rtz(char2);
-char2 __ovld __cnfn convert_char2_rtp(char2);
-char2 __ovld __cnfn convert_char2_sat_rtp(char2);
-char2 __ovld __cnfn convert_char2_rtn(char2);
-char2 __ovld __cnfn convert_char2_sat_rtn(char2);
-char2 __ovld __cnfn convert_char2(char2);
-char2 __ovld __cnfn convert_char2_sat(char2);
-char2 __ovld __cnfn convert_char2_rte(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rte(uchar2);
-char2 __ovld __cnfn convert_char2_rtz(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rtz(uchar2);
-char2 __ovld __cnfn convert_char2_rtp(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rtp(uchar2);
-char2 __ovld __cnfn convert_char2_rtn(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rtn(uchar2);
-char2 __ovld __cnfn convert_char2(uchar2);
-char2 __ovld __cnfn convert_char2_sat(uchar2);
-char2 __ovld __cnfn convert_char2_rte(short2);
-char2 __ovld __cnfn convert_char2_sat_rte(short2);
-char2 __ovld __cnfn convert_char2_rtz(short2);
-char2 __ovld __cnfn convert_char2_sat_rtz(short2);
-char2 __ovld __cnfn convert_char2_rtp(short2);
-char2 __ovld __cnfn convert_char2_sat_rtp(short2);
-char2 __ovld __cnfn convert_char2_rtn(short2);
-char2 __ovld __cnfn convert_char2_sat_rtn(short2);
-char2 __ovld __cnfn convert_char2(short2);
-char2 __ovld __cnfn convert_char2_sat(short2);
-char2 __ovld __cnfn convert_char2_rte(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rte(ushort2);
-char2 __ovld __cnfn convert_char2_rtz(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rtz(ushort2);
-char2 __ovld __cnfn convert_char2_rtp(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rtp(ushort2);
-char2 __ovld __cnfn convert_char2_rtn(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rtn(ushort2);
-char2 __ovld __cnfn convert_char2(ushort2);
-char2 __ovld __cnfn convert_char2_sat(ushort2);
-char2 __ovld __cnfn convert_char2_rte(int2);
-char2 __ovld __cnfn convert_char2_sat_rte(int2);
-char2 __ovld __cnfn convert_char2_rtz(int2);
-char2 __ovld __cnfn convert_char2_sat_rtz(int2);
-char2 __ovld __cnfn convert_char2_rtp(int2);
-char2 __ovld __cnfn convert_char2_sat_rtp(int2);
-char2 __ovld __cnfn convert_char2_rtn(int2);
-char2 __ovld __cnfn convert_char2_sat_rtn(int2);
-char2 __ovld __cnfn convert_char2(int2);
-char2 __ovld __cnfn convert_char2_sat(int2);
-char2 __ovld __cnfn convert_char2_rte(uint2);
-char2 __ovld __cnfn convert_char2_sat_rte(uint2);
-char2 __ovld __cnfn convert_char2_rtz(uint2);
-char2 __ovld __cnfn convert_char2_sat_rtz(uint2);
-char2 __ovld __cnfn convert_char2_rtp(uint2);
-char2 __ovld __cnfn convert_char2_sat_rtp(uint2);
-char2 __ovld __cnfn convert_char2_rtn(uint2);
-char2 __ovld __cnfn convert_char2_sat_rtn(uint2);
-char2 __ovld __cnfn convert_char2(uint2);
-char2 __ovld __cnfn convert_char2_sat(uint2);
-char2 __ovld __cnfn convert_char2_rte(long2);
-char2 __ovld __cnfn convert_char2_sat_rte(long2);
-char2 __ovld __cnfn convert_char2_rtz(long2);
-char2 __ovld __cnfn convert_char2_sat_rtz(long2);
-char2 __ovld __cnfn convert_char2_rtp(long2);
-char2 __ovld __cnfn convert_char2_sat_rtp(long2);
-char2 __ovld __cnfn convert_char2_rtn(long2);
-char2 __ovld __cnfn convert_char2_sat_rtn(long2);
-char2 __ovld __cnfn convert_char2(long2);
-char2 __ovld __cnfn convert_char2_sat(long2);
-char2 __ovld __cnfn convert_char2_rte(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rte(ulong2);
-char2 __ovld __cnfn convert_char2_rtz(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rtz(ulong2);
-char2 __ovld __cnfn convert_char2_rtp(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rtp(ulong2);
-char2 __ovld __cnfn convert_char2_rtn(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rtn(ulong2);
-char2 __ovld __cnfn convert_char2(ulong2);
-char2 __ovld __cnfn convert_char2_sat(ulong2);
-char2 __ovld __cnfn convert_char2_rte(float2);
-char2 __ovld __cnfn convert_char2_sat_rte(float2);
-char2 __ovld __cnfn convert_char2_rtz(float2);
-char2 __ovld __cnfn convert_char2_sat_rtz(float2);
-char2 __ovld __cnfn convert_char2_rtp(float2);
-char2 __ovld __cnfn convert_char2_sat_rtp(float2);
-char2 __ovld __cnfn convert_char2_rtn(float2);
-char2 __ovld __cnfn convert_char2_sat_rtn(float2);
-char2 __ovld __cnfn convert_char2(float2);
-char2 __ovld __cnfn convert_char2_sat(float2);
-uchar2 __ovld __cnfn convert_uchar2_rte(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(char2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(char2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(char2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(char2);
-uchar2 __ovld __cnfn convert_uchar2(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat(char2);
-uchar2 __ovld __cnfn convert_uchar2_rte(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uchar2);
-uchar2 __ovld __cnfn convert_uchar2(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rte(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(short2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(short2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(short2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(short2);
-uchar2 __ovld __cnfn convert_uchar2(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat(short2);
-uchar2 __ovld __cnfn convert_uchar2_rte(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ushort2);
-uchar2 __ovld __cnfn convert_uchar2(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rte(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(int2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(int2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(int2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(int2);
-uchar2 __ovld __cnfn convert_uchar2(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat(int2);
-uchar2 __ovld __cnfn convert_uchar2_rte(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uint2);
-uchar2 __ovld __cnfn convert_uchar2(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rte(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(long2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(long2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(long2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(long2);
-uchar2 __ovld __cnfn convert_uchar2(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat(long2);
-uchar2 __ovld __cnfn convert_uchar2_rte(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ulong2);
-uchar2 __ovld __cnfn convert_uchar2(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rte(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(float2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(float2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(float2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(float2);
-uchar2 __ovld __cnfn convert_uchar2(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat(float2);
-short2 __ovld __cnfn convert_short2_rte(char2);
-short2 __ovld __cnfn convert_short2_sat_rte(char2);
-short2 __ovld __cnfn convert_short2_rtz(char2);
-short2 __ovld __cnfn convert_short2_sat_rtz(char2);
-short2 __ovld __cnfn convert_short2_rtp(char2);
-short2 __ovld __cnfn convert_short2_sat_rtp(char2);
-short2 __ovld __cnfn convert_short2_rtn(char2);
-short2 __ovld __cnfn convert_short2_sat_rtn(char2);
-short2 __ovld __cnfn convert_short2(char2);
-short2 __ovld __cnfn convert_short2_sat(char2);
-short2 __ovld __cnfn convert_short2_rte(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rte(uchar2);
-short2 __ovld __cnfn convert_short2_rtz(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rtz(uchar2);
-short2 __ovld __cnfn convert_short2_rtp(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rtp(uchar2);
-short2 __ovld __cnfn convert_short2_rtn(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rtn(uchar2);
-short2 __ovld __cnfn convert_short2(uchar2);
-short2 __ovld __cnfn convert_short2_sat(uchar2);
-short2 __ovld __cnfn convert_short2_rte(short2);
-short2 __ovld __cnfn convert_short2_sat_rte(short2);
-short2 __ovld __cnfn convert_short2_rtz(short2);
-short2 __ovld __cnfn convert_short2_sat_rtz(short2);
-short2 __ovld __cnfn convert_short2_rtp(short2);
-short2 __ovld __cnfn convert_short2_sat_rtp(short2);
-short2 __ovld __cnfn convert_short2_rtn(short2);
-short2 __ovld __cnfn convert_short2_sat_rtn(short2);
-short2 __ovld __cnfn convert_short2(short2);
-short2 __ovld __cnfn convert_short2_sat(short2);
-short2 __ovld __cnfn convert_short2_rte(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rte(ushort2);
-short2 __ovld __cnfn convert_short2_rtz(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rtz(ushort2);
-short2 __ovld __cnfn convert_short2_rtp(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rtp(ushort2);
-short2 __ovld __cnfn convert_short2_rtn(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rtn(ushort2);
-short2 __ovld __cnfn convert_short2(ushort2);
-short2 __ovld __cnfn convert_short2_sat(ushort2);
-short2 __ovld __cnfn convert_short2_rte(int2);
-short2 __ovld __cnfn convert_short2_sat_rte(int2);
-short2 __ovld __cnfn convert_short2_rtz(int2);
-short2 __ovld __cnfn convert_short2_sat_rtz(int2);
-short2 __ovld __cnfn convert_short2_rtp(int2);
-short2 __ovld __cnfn convert_short2_sat_rtp(int2);
-short2 __ovld __cnfn convert_short2_rtn(int2);
-short2 __ovld __cnfn convert_short2_sat_rtn(int2);
-short2 __ovld __cnfn convert_short2(int2);
-short2 __ovld __cnfn convert_short2_sat(int2);
-short2 __ovld __cnfn convert_short2_rte(uint2);
-short2 __ovld __cnfn convert_short2_sat_rte(uint2);
-short2 __ovld __cnfn convert_short2_rtz(uint2);
-short2 __ovld __cnfn convert_short2_sat_rtz(uint2);
-short2 __ovld __cnfn convert_short2_rtp(uint2);
-short2 __ovld __cnfn convert_short2_sat_rtp(uint2);
-short2 __ovld __cnfn convert_short2_rtn(uint2);
-short2 __ovld __cnfn convert_short2_sat_rtn(uint2);
-short2 __ovld __cnfn convert_short2(uint2);
-short2 __ovld __cnfn convert_short2_sat(uint2);
-short2 __ovld __cnfn convert_short2_rte(long2);
-short2 __ovld __cnfn convert_short2_sat_rte(long2);
-short2 __ovld __cnfn convert_short2_rtz(long2);
-short2 __ovld __cnfn convert_short2_sat_rtz(long2);
-short2 __ovld __cnfn convert_short2_rtp(long2);
-short2 __ovld __cnfn convert_short2_sat_rtp(long2);
-short2 __ovld __cnfn convert_short2_rtn(long2);
-short2 __ovld __cnfn convert_short2_sat_rtn(long2);
-short2 __ovld __cnfn convert_short2(long2);
-short2 __ovld __cnfn convert_short2_sat(long2);
-short2 __ovld __cnfn convert_short2_rte(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rte(ulong2);
-short2 __ovld __cnfn convert_short2_rtz(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rtz(ulong2);
-short2 __ovld __cnfn convert_short2_rtp(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rtp(ulong2);
-short2 __ovld __cnfn convert_short2_rtn(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rtn(ulong2);
-short2 __ovld __cnfn convert_short2(ulong2);
-short2 __ovld __cnfn convert_short2_sat(ulong2);
-short2 __ovld __cnfn convert_short2_rte(float2);
-short2 __ovld __cnfn convert_short2_sat_rte(float2);
-short2 __ovld __cnfn convert_short2_rtz(float2);
-short2 __ovld __cnfn convert_short2_sat_rtz(float2);
-short2 __ovld __cnfn convert_short2_rtp(float2);
-short2 __ovld __cnfn convert_short2_sat_rtp(float2);
-short2 __ovld __cnfn convert_short2_rtn(float2);
-short2 __ovld __cnfn convert_short2_sat_rtn(float2);
-short2 __ovld __cnfn convert_short2(float2);
-short2 __ovld __cnfn convert_short2_sat(float2);
-ushort2 __ovld __cnfn convert_ushort2_rte(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(char2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(char2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(char2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(char2);
-ushort2 __ovld __cnfn convert_ushort2(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat(char2);
-ushort2 __ovld __cnfn convert_ushort2_rte(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uchar2);
-ushort2 __ovld __cnfn convert_ushort2(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rte(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(short2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(short2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(short2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(short2);
-ushort2 __ovld __cnfn convert_ushort2(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat(short2);
-ushort2 __ovld __cnfn convert_ushort2_rte(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ushort2);
-ushort2 __ovld __cnfn convert_ushort2(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rte(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(int2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(int2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(int2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(int2);
-ushort2 __ovld __cnfn convert_ushort2(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat(int2);
-ushort2 __ovld __cnfn convert_ushort2_rte(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uint2);
-ushort2 __ovld __cnfn convert_ushort2(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rte(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(long2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(long2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(long2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(long2);
-ushort2 __ovld __cnfn convert_ushort2(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat(long2);
-ushort2 __ovld __cnfn convert_ushort2_rte(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ulong2);
-ushort2 __ovld __cnfn convert_ushort2(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rte(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(float2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(float2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(float2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(float2);
-ushort2 __ovld __cnfn convert_ushort2(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat(float2);
-int2 __ovld __cnfn convert_int2_rte(char2);
-int2 __ovld __cnfn convert_int2_sat_rte(char2);
-int2 __ovld __cnfn convert_int2_rtz(char2);
-int2 __ovld __cnfn convert_int2_sat_rtz(char2);
-int2 __ovld __cnfn convert_int2_rtp(char2);
-int2 __ovld __cnfn convert_int2_sat_rtp(char2);
-int2 __ovld __cnfn convert_int2_rtn(char2);
-int2 __ovld __cnfn convert_int2_sat_rtn(char2);
-int2 __ovld __cnfn convert_int2(char2);
-int2 __ovld __cnfn convert_int2_sat(char2);
-int2 __ovld __cnfn convert_int2_rte(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rte(uchar2);
-int2 __ovld __cnfn convert_int2_rtz(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rtz(uchar2);
-int2 __ovld __cnfn convert_int2_rtp(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rtp(uchar2);
-int2 __ovld __cnfn convert_int2_rtn(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rtn(uchar2);
-int2 __ovld __cnfn convert_int2(uchar2);
-int2 __ovld __cnfn convert_int2_sat(uchar2);
-int2 __ovld __cnfn convert_int2_rte(short2);
-int2 __ovld __cnfn convert_int2_sat_rte(short2);
-int2 __ovld __cnfn convert_int2_rtz(short2);
-int2 __ovld __cnfn convert_int2_sat_rtz(short2);
-int2 __ovld __cnfn convert_int2_rtp(short2);
-int2 __ovld __cnfn convert_int2_sat_rtp(short2);
-int2 __ovld __cnfn convert_int2_rtn(short2);
-int2 __ovld __cnfn convert_int2_sat_rtn(short2);
-int2 __ovld __cnfn convert_int2(short2);
-int2 __ovld __cnfn convert_int2_sat(short2);
-int2 __ovld __cnfn convert_int2_rte(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rte(ushort2);
-int2 __ovld __cnfn convert_int2_rtz(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rtz(ushort2);
-int2 __ovld __cnfn convert_int2_rtp(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rtp(ushort2);
-int2 __ovld __cnfn convert_int2_rtn(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rtn(ushort2);
-int2 __ovld __cnfn convert_int2(ushort2);
-int2 __ovld __cnfn convert_int2_sat(ushort2);
-int2 __ovld __cnfn convert_int2_rte(int2);
-int2 __ovld __cnfn convert_int2_sat_rte(int2);
-int2 __ovld __cnfn convert_int2_rtz(int2);
-int2 __ovld __cnfn convert_int2_sat_rtz(int2);
-int2 __ovld __cnfn convert_int2_rtp(int2);
-int2 __ovld __cnfn convert_int2_sat_rtp(int2);
-int2 __ovld __cnfn convert_int2_rtn(int2);
-int2 __ovld __cnfn convert_int2_sat_rtn(int2);
-int2 __ovld __cnfn convert_int2(int2);
-int2 __ovld __cnfn convert_int2_sat(int2);
-int2 __ovld __cnfn convert_int2_rte(uint2);
-int2 __ovld __cnfn convert_int2_sat_rte(uint2);
-int2 __ovld __cnfn convert_int2_rtz(uint2);
-int2 __ovld __cnfn convert_int2_sat_rtz(uint2);
-int2 __ovld __cnfn convert_int2_rtp(uint2);
-int2 __ovld __cnfn convert_int2_sat_rtp(uint2);
-int2 __ovld __cnfn convert_int2_rtn(uint2);
-int2 __ovld __cnfn convert_int2_sat_rtn(uint2);
-int2 __ovld __cnfn convert_int2(uint2);
-int2 __ovld __cnfn convert_int2_sat(uint2);
-int2 __ovld __cnfn convert_int2_rte(long2);
-int2 __ovld __cnfn convert_int2_sat_rte(long2);
-int2 __ovld __cnfn convert_int2_rtz(long2);
-int2 __ovld __cnfn convert_int2_sat_rtz(long2);
-int2 __ovld __cnfn convert_int2_rtp(long2);
-int2 __ovld __cnfn convert_int2_sat_rtp(long2);
-int2 __ovld __cnfn convert_int2_rtn(long2);
-int2 __ovld __cnfn convert_int2_sat_rtn(long2);
-int2 __ovld __cnfn convert_int2(long2);
-int2 __ovld __cnfn convert_int2_sat(long2);
-int2 __ovld __cnfn convert_int2_rte(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rte(ulong2);
-int2 __ovld __cnfn convert_int2_rtz(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rtz(ulong2);
-int2 __ovld __cnfn convert_int2_rtp(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rtp(ulong2);
-int2 __ovld __cnfn convert_int2_rtn(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rtn(ulong2);
-int2 __ovld __cnfn convert_int2(ulong2);
-int2 __ovld __cnfn convert_int2_sat(ulong2);
-int2 __ovld __cnfn convert_int2_rte(float2);
-int2 __ovld __cnfn convert_int2_sat_rte(float2);
-int2 __ovld __cnfn convert_int2_rtz(float2);
-int2 __ovld __cnfn convert_int2_sat_rtz(float2);
-int2 __ovld __cnfn convert_int2_rtp(float2);
-int2 __ovld __cnfn convert_int2_sat_rtp(float2);
-int2 __ovld __cnfn convert_int2_rtn(float2);
-int2 __ovld __cnfn convert_int2_sat_rtn(float2);
-int2 __ovld __cnfn convert_int2(float2);
-int2 __ovld __cnfn convert_int2_sat(float2);
-uint2 __ovld __cnfn convert_uint2_rte(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(char2);
-uint2 __ovld __cnfn convert_uint2_rtz(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(char2);
-uint2 __ovld __cnfn convert_uint2_rtp(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(char2);
-uint2 __ovld __cnfn convert_uint2_rtn(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(char2);
-uint2 __ovld __cnfn convert_uint2(char2);
-uint2 __ovld __cnfn convert_uint2_sat(char2);
-uint2 __ovld __cnfn convert_uint2_rte(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(uchar2);
-uint2 __ovld __cnfn convert_uint2_rtz(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(uchar2);
-uint2 __ovld __cnfn convert_uint2_rtp(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(uchar2);
-uint2 __ovld __cnfn convert_uint2_rtn(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(uchar2);
-uint2 __ovld __cnfn convert_uint2(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat(uchar2);
-uint2 __ovld __cnfn convert_uint2_rte(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(short2);
-uint2 __ovld __cnfn convert_uint2_rtz(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(short2);
-uint2 __ovld __cnfn convert_uint2_rtp(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(short2);
-uint2 __ovld __cnfn convert_uint2_rtn(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(short2);
-uint2 __ovld __cnfn convert_uint2(short2);
-uint2 __ovld __cnfn convert_uint2_sat(short2);
-uint2 __ovld __cnfn convert_uint2_rte(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(ushort2);
-uint2 __ovld __cnfn convert_uint2_rtz(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(ushort2);
-uint2 __ovld __cnfn convert_uint2_rtp(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(ushort2);
-uint2 __ovld __cnfn convert_uint2_rtn(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(ushort2);
-uint2 __ovld __cnfn convert_uint2(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat(ushort2);
-uint2 __ovld __cnfn convert_uint2_rte(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(int2);
-uint2 __ovld __cnfn convert_uint2_rtz(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(int2);
-uint2 __ovld __cnfn convert_uint2_rtp(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(int2);
-uint2 __ovld __cnfn convert_uint2_rtn(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(int2);
-uint2 __ovld __cnfn convert_uint2(int2);
-uint2 __ovld __cnfn convert_uint2_sat(int2);
-uint2 __ovld __cnfn convert_uint2_rte(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(uint2);
-uint2 __ovld __cnfn convert_uint2_rtz(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(uint2);
-uint2 __ovld __cnfn convert_uint2_rtp(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(uint2);
-uint2 __ovld __cnfn convert_uint2_rtn(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(uint2);
-uint2 __ovld __cnfn convert_uint2(uint2);
-uint2 __ovld __cnfn convert_uint2_sat(uint2);
-uint2 __ovld __cnfn convert_uint2_rte(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(long2);
-uint2 __ovld __cnfn convert_uint2_rtz(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(long2);
-uint2 __ovld __cnfn convert_uint2_rtp(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(long2);
-uint2 __ovld __cnfn convert_uint2_rtn(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(long2);
-uint2 __ovld __cnfn convert_uint2(long2);
-uint2 __ovld __cnfn convert_uint2_sat(long2);
-uint2 __ovld __cnfn convert_uint2_rte(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(ulong2);
-uint2 __ovld __cnfn convert_uint2_rtz(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(ulong2);
-uint2 __ovld __cnfn convert_uint2_rtp(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(ulong2);
-uint2 __ovld __cnfn convert_uint2_rtn(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(ulong2);
-uint2 __ovld __cnfn convert_uint2(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat(ulong2);
-uint2 __ovld __cnfn convert_uint2_rte(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(float2);
-uint2 __ovld __cnfn convert_uint2_rtz(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(float2);
-uint2 __ovld __cnfn convert_uint2_rtp(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(float2);
-uint2 __ovld __cnfn convert_uint2_rtn(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(float2);
-uint2 __ovld __cnfn convert_uint2(float2);
-uint2 __ovld __cnfn convert_uint2_sat(float2);
-long2 __ovld __cnfn convert_long2_rte(char2);
-long2 __ovld __cnfn convert_long2_sat_rte(char2);
-long2 __ovld __cnfn convert_long2_rtz(char2);
-long2 __ovld __cnfn convert_long2_sat_rtz(char2);
-long2 __ovld __cnfn convert_long2_rtp(char2);
-long2 __ovld __cnfn convert_long2_sat_rtp(char2);
-long2 __ovld __cnfn convert_long2_rtn(char2);
-long2 __ovld __cnfn convert_long2_sat_rtn(char2);
-long2 __ovld __cnfn convert_long2(char2);
-long2 __ovld __cnfn convert_long2_sat(char2);
-long2 __ovld __cnfn convert_long2_rte(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rte(uchar2);
-long2 __ovld __cnfn convert_long2_rtz(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rtz(uchar2);
-long2 __ovld __cnfn convert_long2_rtp(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rtp(uchar2);
-long2 __ovld __cnfn convert_long2_rtn(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rtn(uchar2);
-long2 __ovld __cnfn convert_long2(uchar2);
-long2 __ovld __cnfn convert_long2_sat(uchar2);
-long2 __ovld __cnfn convert_long2_rte(short2);
-long2 __ovld __cnfn convert_long2_sat_rte(short2);
-long2 __ovld __cnfn convert_long2_rtz(short2);
-long2 __ovld __cnfn convert_long2_sat_rtz(short2);
-long2 __ovld __cnfn convert_long2_rtp(short2);
-long2 __ovld __cnfn convert_long2_sat_rtp(short2);
-long2 __ovld __cnfn convert_long2_rtn(short2);
-long2 __ovld __cnfn convert_long2_sat_rtn(short2);
-long2 __ovld __cnfn convert_long2(short2);
-long2 __ovld __cnfn convert_long2_sat(short2);
-long2 __ovld __cnfn convert_long2_rte(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rte(ushort2);
-long2 __ovld __cnfn convert_long2_rtz(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rtz(ushort2);
-long2 __ovld __cnfn convert_long2_rtp(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rtp(ushort2);
-long2 __ovld __cnfn convert_long2_rtn(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rtn(ushort2);
-long2 __ovld __cnfn convert_long2(ushort2);
-long2 __ovld __cnfn convert_long2_sat(ushort2);
-long2 __ovld __cnfn convert_long2_rte(int2);
-long2 __ovld __cnfn convert_long2_sat_rte(int2);
-long2 __ovld __cnfn convert_long2_rtz(int2);
-long2 __ovld __cnfn convert_long2_sat_rtz(int2);
-long2 __ovld __cnfn convert_long2_rtp(int2);
-long2 __ovld __cnfn convert_long2_sat_rtp(int2);
-long2 __ovld __cnfn convert_long2_rtn(int2);
-long2 __ovld __cnfn convert_long2_sat_rtn(int2);
-long2 __ovld __cnfn convert_long2(int2);
-long2 __ovld __cnfn convert_long2_sat(int2);
-long2 __ovld __cnfn convert_long2_rte(uint2);
-long2 __ovld __cnfn convert_long2_sat_rte(uint2);
-long2 __ovld __cnfn convert_long2_rtz(uint2);
-long2 __ovld __cnfn convert_long2_sat_rtz(uint2);
-long2 __ovld __cnfn convert_long2_rtp(uint2);
-long2 __ovld __cnfn convert_long2_sat_rtp(uint2);
-long2 __ovld __cnfn convert_long2_rtn(uint2);
-long2 __ovld __cnfn convert_long2_sat_rtn(uint2);
-long2 __ovld __cnfn convert_long2(uint2);
-long2 __ovld __cnfn convert_long2_sat(uint2);
-long2 __ovld __cnfn convert_long2_rte(long2);
-long2 __ovld __cnfn convert_long2_sat_rte(long2);
-long2 __ovld __cnfn convert_long2_rtz(long2);
-long2 __ovld __cnfn convert_long2_sat_rtz(long2);
-long2 __ovld __cnfn convert_long2_rtp(long2);
-long2 __ovld __cnfn convert_long2_sat_rtp(long2);
-long2 __ovld __cnfn convert_long2_rtn(long2);
-long2 __ovld __cnfn convert_long2_sat_rtn(long2);
-long2 __ovld __cnfn convert_long2(long2);
-long2 __ovld __cnfn convert_long2_sat(long2);
-long2 __ovld __cnfn convert_long2_rte(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rte(ulong2);
-long2 __ovld __cnfn convert_long2_rtz(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rtz(ulong2);
-long2 __ovld __cnfn convert_long2_rtp(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rtp(ulong2);
-long2 __ovld __cnfn convert_long2_rtn(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rtn(ulong2);
-long2 __ovld __cnfn convert_long2(ulong2);
-long2 __ovld __cnfn convert_long2_sat(ulong2);
-long2 __ovld __cnfn convert_long2_rte(float2);
-long2 __ovld __cnfn convert_long2_sat_rte(float2);
-long2 __ovld __cnfn convert_long2_rtz(float2);
-long2 __ovld __cnfn convert_long2_sat_rtz(float2);
-long2 __ovld __cnfn convert_long2_rtp(float2);
-long2 __ovld __cnfn convert_long2_sat_rtp(float2);
-long2 __ovld __cnfn convert_long2_rtn(float2);
-long2 __ovld __cnfn convert_long2_sat_rtn(float2);
-long2 __ovld __cnfn convert_long2(float2);
-long2 __ovld __cnfn convert_long2_sat(float2);
-ulong2 __ovld __cnfn convert_ulong2_rte(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(char2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(char2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(char2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(char2);
-ulong2 __ovld __cnfn convert_ulong2(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat(char2);
-ulong2 __ovld __cnfn convert_ulong2_rte(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uchar2);
-ulong2 __ovld __cnfn convert_ulong2(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rte(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(short2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(short2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(short2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(short2);
-ulong2 __ovld __cnfn convert_ulong2(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat(short2);
-ulong2 __ovld __cnfn convert_ulong2_rte(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ushort2);
-ulong2 __ovld __cnfn convert_ulong2(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rte(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(int2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(int2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(int2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(int2);
-ulong2 __ovld __cnfn convert_ulong2(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat(int2);
-ulong2 __ovld __cnfn convert_ulong2_rte(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uint2);
-ulong2 __ovld __cnfn convert_ulong2(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rte(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(long2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(long2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(long2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(long2);
-ulong2 __ovld __cnfn convert_ulong2(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat(long2);
-ulong2 __ovld __cnfn convert_ulong2_rte(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ulong2);
-ulong2 __ovld __cnfn convert_ulong2(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rte(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(float2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(float2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(float2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(float2);
-ulong2 __ovld __cnfn convert_ulong2(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat(float2);
-float2 __ovld __cnfn convert_float2_rte(char2);
-float2 __ovld __cnfn convert_float2_rtz(char2);
-float2 __ovld __cnfn convert_float2_rtp(char2);
-float2 __ovld __cnfn convert_float2_rtn(char2);
-float2 __ovld __cnfn convert_float2(char2);
-float2 __ovld __cnfn convert_float2_rte(uchar2);
-float2 __ovld __cnfn convert_float2_rtz(uchar2);
-float2 __ovld __cnfn convert_float2_rtp(uchar2);
-float2 __ovld __cnfn convert_float2_rtn(uchar2);
-float2 __ovld __cnfn convert_float2(uchar2);
-float2 __ovld __cnfn convert_float2_rte(short2);
-float2 __ovld __cnfn convert_float2_rtz(short2);
-float2 __ovld __cnfn convert_float2_rtp(short2);
-float2 __ovld __cnfn convert_float2_rtn(short2);
-float2 __ovld __cnfn convert_float2(short2);
-float2 __ovld __cnfn convert_float2_rte(ushort2);
-float2 __ovld __cnfn convert_float2_rtz(ushort2);
-float2 __ovld __cnfn convert_float2_rtp(ushort2);
-float2 __ovld __cnfn convert_float2_rtn(ushort2);
-float2 __ovld __cnfn convert_float2(ushort2);
-float2 __ovld __cnfn convert_float2_rte(int2);
-float2 __ovld __cnfn convert_float2_rtz(int2);
-float2 __ovld __cnfn convert_float2_rtp(int2);
-float2 __ovld __cnfn convert_float2_rtn(int2);
-float2 __ovld __cnfn convert_float2(int2);
-float2 __ovld __cnfn convert_float2_rte(uint2);
-float2 __ovld __cnfn convert_float2_rtz(uint2);
-float2 __ovld __cnfn convert_float2_rtp(uint2);
-float2 __ovld __cnfn convert_float2_rtn(uint2);
-float2 __ovld __cnfn convert_float2(uint2);
-float2 __ovld __cnfn convert_float2_rte(long2);
-float2 __ovld __cnfn convert_float2_rtz(long2);
-float2 __ovld __cnfn convert_float2_rtp(long2);
-float2 __ovld __cnfn convert_float2_rtn(long2);
-float2 __ovld __cnfn convert_float2(long2);
-float2 __ovld __cnfn convert_float2_rte(ulong2);
-float2 __ovld __cnfn convert_float2_rtz(ulong2);
-float2 __ovld __cnfn convert_float2_rtp(ulong2);
-float2 __ovld __cnfn convert_float2_rtn(ulong2);
-float2 __ovld __cnfn convert_float2(ulong2);
-float2 __ovld __cnfn convert_float2_rte(float2);
-float2 __ovld __cnfn convert_float2_rtz(float2);
-float2 __ovld __cnfn convert_float2_rtp(float2);
-float2 __ovld __cnfn convert_float2_rtn(float2);
-float2 __ovld __cnfn convert_float2(float2);
-char3 __ovld __cnfn convert_char3_rte(char3);
-char3 __ovld __cnfn convert_char3_sat_rte(char3);
-char3 __ovld __cnfn convert_char3_rtz(char3);
-char3 __ovld __cnfn convert_char3_sat_rtz(char3);
-char3 __ovld __cnfn convert_char3_rtp(char3);
-char3 __ovld __cnfn convert_char3_sat_rtp(char3);
-char3 __ovld __cnfn convert_char3_rtn(char3);
-char3 __ovld __cnfn convert_char3_sat_rtn(char3);
-char3 __ovld __cnfn convert_char3(char3);
-char3 __ovld __cnfn convert_char3_sat(char3);
-char3 __ovld __cnfn convert_char3_rte(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rte(uchar3);
-char3 __ovld __cnfn convert_char3_rtz(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rtz(uchar3);
-char3 __ovld __cnfn convert_char3_rtp(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rtp(uchar3);
-char3 __ovld __cnfn convert_char3_rtn(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rtn(uchar3);
-char3 __ovld __cnfn convert_char3(uchar3);
-char3 __ovld __cnfn convert_char3_sat(uchar3);
-char3 __ovld __cnfn convert_char3_rte(short3);
-char3 __ovld __cnfn convert_char3_sat_rte(short3);
-char3 __ovld __cnfn convert_char3_rtz(short3);
-char3 __ovld __cnfn convert_char3_sat_rtz(short3);
-char3 __ovld __cnfn convert_char3_rtp(short3);
-char3 __ovld __cnfn convert_char3_sat_rtp(short3);
-char3 __ovld __cnfn convert_char3_rtn(short3);
-char3 __ovld __cnfn convert_char3_sat_rtn(short3);
-char3 __ovld __cnfn convert_char3(short3);
-char3 __ovld __cnfn convert_char3_sat(short3);
-char3 __ovld __cnfn convert_char3_rte(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rte(ushort3);
-char3 __ovld __cnfn convert_char3_rtz(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rtz(ushort3);
-char3 __ovld __cnfn convert_char3_rtp(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rtp(ushort3);
-char3 __ovld __cnfn convert_char3_rtn(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rtn(ushort3);
-char3 __ovld __cnfn convert_char3(ushort3);
-char3 __ovld __cnfn convert_char3_sat(ushort3);
-char3 __ovld __cnfn convert_char3_rte(int3);
-char3 __ovld __cnfn convert_char3_sat_rte(int3);
-char3 __ovld __cnfn convert_char3_rtz(int3);
-char3 __ovld __cnfn convert_char3_sat_rtz(int3);
-char3 __ovld __cnfn convert_char3_rtp(int3);
-char3 __ovld __cnfn convert_char3_sat_rtp(int3);
-char3 __ovld __cnfn convert_char3_rtn(int3);
-char3 __ovld __cnfn convert_char3_sat_rtn(int3);
-char3 __ovld __cnfn convert_char3(int3);
-char3 __ovld __cnfn convert_char3_sat(int3);
-char3 __ovld __cnfn convert_char3_rte(uint3);
-char3 __ovld __cnfn convert_char3_sat_rte(uint3);
-char3 __ovld __cnfn convert_char3_rtz(uint3);
-char3 __ovld __cnfn convert_char3_sat_rtz(uint3);
-char3 __ovld __cnfn convert_char3_rtp(uint3);
-char3 __ovld __cnfn convert_char3_sat_rtp(uint3);
-char3 __ovld __cnfn convert_char3_rtn(uint3);
-char3 __ovld __cnfn convert_char3_sat_rtn(uint3);
-char3 __ovld __cnfn convert_char3(uint3);
-char3 __ovld __cnfn convert_char3_sat(uint3);
-char3 __ovld __cnfn convert_char3_rte(long3);
-char3 __ovld __cnfn convert_char3_sat_rte(long3);
-char3 __ovld __cnfn convert_char3_rtz(long3);
-char3 __ovld __cnfn convert_char3_sat_rtz(long3);
-char3 __ovld __cnfn convert_char3_rtp(long3);
-char3 __ovld __cnfn convert_char3_sat_rtp(long3);
-char3 __ovld __cnfn convert_char3_rtn(long3);
-char3 __ovld __cnfn convert_char3_sat_rtn(long3);
-char3 __ovld __cnfn convert_char3(long3);
-char3 __ovld __cnfn convert_char3_sat(long3);
-char3 __ovld __cnfn convert_char3_rte(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rte(ulong3);
-char3 __ovld __cnfn convert_char3_rtz(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rtz(ulong3);
-char3 __ovld __cnfn convert_char3_rtp(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rtp(ulong3);
-char3 __ovld __cnfn convert_char3_rtn(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rtn(ulong3);
-char3 __ovld __cnfn convert_char3(ulong3);
-char3 __ovld __cnfn convert_char3_sat(ulong3);
-char3 __ovld __cnfn convert_char3_rte(float3);
-char3 __ovld __cnfn convert_char3_sat_rte(float3);
-char3 __ovld __cnfn convert_char3_rtz(float3);
-char3 __ovld __cnfn convert_char3_sat_rtz(float3);
-char3 __ovld __cnfn convert_char3_rtp(float3);
-char3 __ovld __cnfn convert_char3_sat_rtp(float3);
-char3 __ovld __cnfn convert_char3_rtn(float3);
-char3 __ovld __cnfn convert_char3_sat_rtn(float3);
-char3 __ovld __cnfn convert_char3(float3);
-char3 __ovld __cnfn convert_char3_sat(float3);
-uchar3 __ovld __cnfn convert_uchar3_rte(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(char3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(char3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(char3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(char3);
-uchar3 __ovld __cnfn convert_uchar3(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat(char3);
-uchar3 __ovld __cnfn convert_uchar3_rte(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uchar3);
-uchar3 __ovld __cnfn convert_uchar3(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rte(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(short3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(short3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(short3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(short3);
-uchar3 __ovld __cnfn convert_uchar3(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat(short3);
-uchar3 __ovld __cnfn convert_uchar3_rte(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ushort3);
-uchar3 __ovld __cnfn convert_uchar3(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rte(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(int3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(int3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(int3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(int3);
-uchar3 __ovld __cnfn convert_uchar3(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat(int3);
-uchar3 __ovld __cnfn convert_uchar3_rte(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uint3);
-uchar3 __ovld __cnfn convert_uchar3(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rte(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(long3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(long3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(long3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(long3);
-uchar3 __ovld __cnfn convert_uchar3(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat(long3);
-uchar3 __ovld __cnfn convert_uchar3_rte(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ulong3);
-uchar3 __ovld __cnfn convert_uchar3(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rte(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(float3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(float3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(float3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(float3);
-uchar3 __ovld __cnfn convert_uchar3(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat(float3);
-short3 __ovld __cnfn convert_short3_rte(char3);
-short3 __ovld __cnfn convert_short3_sat_rte(char3);
-short3 __ovld __cnfn convert_short3_rtz(char3);
-short3 __ovld __cnfn convert_short3_sat_rtz(char3);
-short3 __ovld __cnfn convert_short3_rtp(char3);
-short3 __ovld __cnfn convert_short3_sat_rtp(char3);
-short3 __ovld __cnfn convert_short3_rtn(char3);
-short3 __ovld __cnfn convert_short3_sat_rtn(char3);
-short3 __ovld __cnfn convert_short3(char3);
-short3 __ovld __cnfn convert_short3_sat(char3);
-short3 __ovld __cnfn convert_short3_rte(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rte(uchar3);
-short3 __ovld __cnfn convert_short3_rtz(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rtz(uchar3);
-short3 __ovld __cnfn convert_short3_rtp(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rtp(uchar3);
-short3 __ovld __cnfn convert_short3_rtn(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rtn(uchar3);
-short3 __ovld __cnfn convert_short3(uchar3);
-short3 __ovld __cnfn convert_short3_sat(uchar3);
-short3 __ovld __cnfn convert_short3_rte(short3);
-short3 __ovld __cnfn convert_short3_sat_rte(short3);
-short3 __ovld __cnfn convert_short3_rtz(short3);
-short3 __ovld __cnfn convert_short3_sat_rtz(short3);
-short3 __ovld __cnfn convert_short3_rtp(short3);
-short3 __ovld __cnfn convert_short3_sat_rtp(short3);
-short3 __ovld __cnfn convert_short3_rtn(short3);
-short3 __ovld __cnfn convert_short3_sat_rtn(short3);
-short3 __ovld __cnfn convert_short3(short3);
-short3 __ovld __cnfn convert_short3_sat(short3);
-short3 __ovld __cnfn convert_short3_rte(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rte(ushort3);
-short3 __ovld __cnfn convert_short3_rtz(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rtz(ushort3);
-short3 __ovld __cnfn convert_short3_rtp(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rtp(ushort3);
-short3 __ovld __cnfn convert_short3_rtn(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rtn(ushort3);
-short3 __ovld __cnfn convert_short3(ushort3);
-short3 __ovld __cnfn convert_short3_sat(ushort3);
-short3 __ovld __cnfn convert_short3_rte(int3);
-short3 __ovld __cnfn convert_short3_sat_rte(int3);
-short3 __ovld __cnfn convert_short3_rtz(int3);
-short3 __ovld __cnfn convert_short3_sat_rtz(int3);
-short3 __ovld __cnfn convert_short3_rtp(int3);
-short3 __ovld __cnfn convert_short3_sat_rtp(int3);
-short3 __ovld __cnfn convert_short3_rtn(int3);
-short3 __ovld __cnfn convert_short3_sat_rtn(int3);
-short3 __ovld __cnfn convert_short3(int3);
-short3 __ovld __cnfn convert_short3_sat(int3);
-short3 __ovld __cnfn convert_short3_rte(uint3);
-short3 __ovld __cnfn convert_short3_sat_rte(uint3);
-short3 __ovld __cnfn convert_short3_rtz(uint3);
-short3 __ovld __cnfn convert_short3_sat_rtz(uint3);
-short3 __ovld __cnfn convert_short3_rtp(uint3);
-short3 __ovld __cnfn convert_short3_sat_rtp(uint3);
-short3 __ovld __cnfn convert_short3_rtn(uint3);
-short3 __ovld __cnfn convert_short3_sat_rtn(uint3);
-short3 __ovld __cnfn convert_short3(uint3);
-short3 __ovld __cnfn convert_short3_sat(uint3);
-short3 __ovld __cnfn convert_short3_rte(long3);
-short3 __ovld __cnfn convert_short3_sat_rte(long3);
-short3 __ovld __cnfn convert_short3_rtz(long3);
-short3 __ovld __cnfn convert_short3_sat_rtz(long3);
-short3 __ovld __cnfn convert_short3_rtp(long3);
-short3 __ovld __cnfn convert_short3_sat_rtp(long3);
-short3 __ovld __cnfn convert_short3_rtn(long3);
-short3 __ovld __cnfn convert_short3_sat_rtn(long3);
-short3 __ovld __cnfn convert_short3(long3);
-short3 __ovld __cnfn convert_short3_sat(long3);
-short3 __ovld __cnfn convert_short3_rte(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rte(ulong3);
-short3 __ovld __cnfn convert_short3_rtz(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rtz(ulong3);
-short3 __ovld __cnfn convert_short3_rtp(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rtp(ulong3);
-short3 __ovld __cnfn convert_short3_rtn(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rtn(ulong3);
-short3 __ovld __cnfn convert_short3(ulong3);
-short3 __ovld __cnfn convert_short3_sat(ulong3);
-short3 __ovld __cnfn convert_short3_rte(float3);
-short3 __ovld __cnfn convert_short3_sat_rte(float3);
-short3 __ovld __cnfn convert_short3_rtz(float3);
-short3 __ovld __cnfn convert_short3_sat_rtz(float3);
-short3 __ovld __cnfn convert_short3_rtp(float3);
-short3 __ovld __cnfn convert_short3_sat_rtp(float3);
-short3 __ovld __cnfn convert_short3_rtn(float3);
-short3 __ovld __cnfn convert_short3_sat_rtn(float3);
-short3 __ovld __cnfn convert_short3(float3);
-short3 __ovld __cnfn convert_short3_sat(float3);
-ushort3 __ovld __cnfn convert_ushort3_rte(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(char3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(char3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(char3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(char3);
-ushort3 __ovld __cnfn convert_ushort3(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat(char3);
-ushort3 __ovld __cnfn convert_ushort3_rte(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uchar3);
-ushort3 __ovld __cnfn convert_ushort3(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rte(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(short3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(short3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(short3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(short3);
-ushort3 __ovld __cnfn convert_ushort3(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat(short3);
-ushort3 __ovld __cnfn convert_ushort3_rte(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ushort3);
-ushort3 __ovld __cnfn convert_ushort3(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rte(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(int3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(int3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(int3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(int3);
-ushort3 __ovld __cnfn convert_ushort3(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat(int3);
-ushort3 __ovld __cnfn convert_ushort3_rte(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uint3);
-ushort3 __ovld __cnfn convert_ushort3(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rte(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(long3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(long3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(long3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(long3);
-ushort3 __ovld __cnfn convert_ushort3(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat(long3);
-ushort3 __ovld __cnfn convert_ushort3_rte(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ulong3);
-ushort3 __ovld __cnfn convert_ushort3(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rte(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(float3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(float3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(float3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(float3);
-ushort3 __ovld __cnfn convert_ushort3(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat(float3);
-int3 __ovld __cnfn convert_int3_rte(char3);
-int3 __ovld __cnfn convert_int3_sat_rte(char3);
-int3 __ovld __cnfn convert_int3_rtz(char3);
-int3 __ovld __cnfn convert_int3_sat_rtz(char3);
-int3 __ovld __cnfn convert_int3_rtp(char3);
-int3 __ovld __cnfn convert_int3_sat_rtp(char3);
-int3 __ovld __cnfn convert_int3_rtn(char3);
-int3 __ovld __cnfn convert_int3_sat_rtn(char3);
-int3 __ovld __cnfn convert_int3(char3);
-int3 __ovld __cnfn convert_int3_sat(char3);
-int3 __ovld __cnfn convert_int3_rte(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rte(uchar3);
-int3 __ovld __cnfn convert_int3_rtz(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rtz(uchar3);
-int3 __ovld __cnfn convert_int3_rtp(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rtp(uchar3);
-int3 __ovld __cnfn convert_int3_rtn(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rtn(uchar3);
-int3 __ovld __cnfn convert_int3(uchar3);
-int3 __ovld __cnfn convert_int3_sat(uchar3);
-int3 __ovld __cnfn convert_int3_rte(short3);
-int3 __ovld __cnfn convert_int3_sat_rte(short3);
-int3 __ovld __cnfn convert_int3_rtz(short3);
-int3 __ovld __cnfn convert_int3_sat_rtz(short3);
-int3 __ovld __cnfn convert_int3_rtp(short3);
-int3 __ovld __cnfn convert_int3_sat_rtp(short3);
-int3 __ovld __cnfn convert_int3_rtn(short3);
-int3 __ovld __cnfn convert_int3_sat_rtn(short3);
-int3 __ovld __cnfn convert_int3(short3);
-int3 __ovld __cnfn convert_int3_sat(short3);
-int3 __ovld __cnfn convert_int3_rte(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rte(ushort3);
-int3 __ovld __cnfn convert_int3_rtz(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rtz(ushort3);
-int3 __ovld __cnfn convert_int3_rtp(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rtp(ushort3);
-int3 __ovld __cnfn convert_int3_rtn(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rtn(ushort3);
-int3 __ovld __cnfn convert_int3(ushort3);
-int3 __ovld __cnfn convert_int3_sat(ushort3);
-int3 __ovld __cnfn convert_int3_rte(int3);
-int3 __ovld __cnfn convert_int3_sat_rte(int3);
-int3 __ovld __cnfn convert_int3_rtz(int3);
-int3 __ovld __cnfn convert_int3_sat_rtz(int3);
-int3 __ovld __cnfn convert_int3_rtp(int3);
-int3 __ovld __cnfn convert_int3_sat_rtp(int3);
-int3 __ovld __cnfn convert_int3_rtn(int3);
-int3 __ovld __cnfn convert_int3_sat_rtn(int3);
-int3 __ovld __cnfn convert_int3(int3);
-int3 __ovld __cnfn convert_int3_sat(int3);
-int3 __ovld __cnfn convert_int3_rte(uint3);
-int3 __ovld __cnfn convert_int3_sat_rte(uint3);
-int3 __ovld __cnfn convert_int3_rtz(uint3);
-int3 __ovld __cnfn convert_int3_sat_rtz(uint3);
-int3 __ovld __cnfn convert_int3_rtp(uint3);
-int3 __ovld __cnfn convert_int3_sat_rtp(uint3);
-int3 __ovld __cnfn convert_int3_rtn(uint3);
-int3 __ovld __cnfn convert_int3_sat_rtn(uint3);
-int3 __ovld __cnfn convert_int3(uint3);
-int3 __ovld __cnfn convert_int3_sat(uint3);
-int3 __ovld __cnfn convert_int3_rte(long3);
-int3 __ovld __cnfn convert_int3_sat_rte(long3);
-int3 __ovld __cnfn convert_int3_rtz(long3);
-int3 __ovld __cnfn convert_int3_sat_rtz(long3);
-int3 __ovld __cnfn convert_int3_rtp(long3);
-int3 __ovld __cnfn convert_int3_sat_rtp(long3);
-int3 __ovld __cnfn convert_int3_rtn(long3);
-int3 __ovld __cnfn convert_int3_sat_rtn(long3);
-int3 __ovld __cnfn convert_int3(long3);
-int3 __ovld __cnfn convert_int3_sat(long3);
-int3 __ovld __cnfn convert_int3_rte(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rte(ulong3);
-int3 __ovld __cnfn convert_int3_rtz(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rtz(ulong3);
-int3 __ovld __cnfn convert_int3_rtp(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rtp(ulong3);
-int3 __ovld __cnfn convert_int3_rtn(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rtn(ulong3);
-int3 __ovld __cnfn convert_int3(ulong3);
-int3 __ovld __cnfn convert_int3_sat(ulong3);
-int3 __ovld __cnfn convert_int3_rte(float3);
-int3 __ovld __cnfn convert_int3_sat_rte(float3);
-int3 __ovld __cnfn convert_int3_rtz(float3);
-int3 __ovld __cnfn convert_int3_sat_rtz(float3);
-int3 __ovld __cnfn convert_int3_rtp(float3);
-int3 __ovld __cnfn convert_int3_sat_rtp(float3);
-int3 __ovld __cnfn convert_int3_rtn(float3);
-int3 __ovld __cnfn convert_int3_sat_rtn(float3);
-int3 __ovld __cnfn convert_int3(float3);
-int3 __ovld __cnfn convert_int3_sat(float3);
-uint3 __ovld __cnfn convert_uint3_rte(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(char3);
-uint3 __ovld __cnfn convert_uint3_rtz(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(char3);
-uint3 __ovld __cnfn convert_uint3_rtp(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(char3);
-uint3 __ovld __cnfn convert_uint3_rtn(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(char3);
-uint3 __ovld __cnfn convert_uint3(char3);
-uint3 __ovld __cnfn convert_uint3_sat(char3);
-uint3 __ovld __cnfn convert_uint3_rte(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(uchar3);
-uint3 __ovld __cnfn convert_uint3_rtz(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(uchar3);
-uint3 __ovld __cnfn convert_uint3_rtp(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(uchar3);
-uint3 __ovld __cnfn convert_uint3_rtn(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(uchar3);
-uint3 __ovld __cnfn convert_uint3(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat(uchar3);
-uint3 __ovld __cnfn convert_uint3_rte(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(short3);
-uint3 __ovld __cnfn convert_uint3_rtz(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(short3);
-uint3 __ovld __cnfn convert_uint3_rtp(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(short3);
-uint3 __ovld __cnfn convert_uint3_rtn(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(short3);
-uint3 __ovld __cnfn convert_uint3(short3);
-uint3 __ovld __cnfn convert_uint3_sat(short3);
-uint3 __ovld __cnfn convert_uint3_rte(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(ushort3);
-uint3 __ovld __cnfn convert_uint3_rtz(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(ushort3);
-uint3 __ovld __cnfn convert_uint3_rtp(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(ushort3);
-uint3 __ovld __cnfn convert_uint3_rtn(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(ushort3);
-uint3 __ovld __cnfn convert_uint3(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat(ushort3);
-uint3 __ovld __cnfn convert_uint3_rte(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(int3);
-uint3 __ovld __cnfn convert_uint3_rtz(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(int3);
-uint3 __ovld __cnfn convert_uint3_rtp(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(int3);
-uint3 __ovld __cnfn convert_uint3_rtn(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(int3);
-uint3 __ovld __cnfn convert_uint3(int3);
-uint3 __ovld __cnfn convert_uint3_sat(int3);
-uint3 __ovld __cnfn convert_uint3_rte(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(uint3);
-uint3 __ovld __cnfn convert_uint3_rtz(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(uint3);
-uint3 __ovld __cnfn convert_uint3_rtp(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(uint3);
-uint3 __ovld __cnfn convert_uint3_rtn(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(uint3);
-uint3 __ovld __cnfn convert_uint3(uint3);
-uint3 __ovld __cnfn convert_uint3_sat(uint3);
-uint3 __ovld __cnfn convert_uint3_rte(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(long3);
-uint3 __ovld __cnfn convert_uint3_rtz(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(long3);
-uint3 __ovld __cnfn convert_uint3_rtp(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(long3);
-uint3 __ovld __cnfn convert_uint3_rtn(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(long3);
-uint3 __ovld __cnfn convert_uint3(long3);
-uint3 __ovld __cnfn convert_uint3_sat(long3);
-uint3 __ovld __cnfn convert_uint3_rte(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(ulong3);
-uint3 __ovld __cnfn convert_uint3_rtz(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(ulong3);
-uint3 __ovld __cnfn convert_uint3_rtp(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(ulong3);
-uint3 __ovld __cnfn convert_uint3_rtn(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(ulong3);
-uint3 __ovld __cnfn convert_uint3(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat(ulong3);
-uint3 __ovld __cnfn convert_uint3_rte(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(float3);
-uint3 __ovld __cnfn convert_uint3_rtz(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(float3);
-uint3 __ovld __cnfn convert_uint3_rtp(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(float3);
-uint3 __ovld __cnfn convert_uint3_rtn(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(float3);
-uint3 __ovld __cnfn convert_uint3(float3);
-uint3 __ovld __cnfn convert_uint3_sat(float3);
-long3 __ovld __cnfn convert_long3_rte(char3);
-long3 __ovld __cnfn convert_long3_sat_rte(char3);
-long3 __ovld __cnfn convert_long3_rtz(char3);
-long3 __ovld __cnfn convert_long3_sat_rtz(char3);
-long3 __ovld __cnfn convert_long3_rtp(char3);
-long3 __ovld __cnfn convert_long3_sat_rtp(char3);
-long3 __ovld __cnfn convert_long3_rtn(char3);
-long3 __ovld __cnfn convert_long3_sat_rtn(char3);
-long3 __ovld __cnfn convert_long3(char3);
-long3 __ovld __cnfn convert_long3_sat(char3);
-long3 __ovld __cnfn convert_long3_rte(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rte(uchar3);
-long3 __ovld __cnfn convert_long3_rtz(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rtz(uchar3);
-long3 __ovld __cnfn convert_long3_rtp(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rtp(uchar3);
-long3 __ovld __cnfn convert_long3_rtn(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rtn(uchar3);
-long3 __ovld __cnfn convert_long3(uchar3);
-long3 __ovld __cnfn convert_long3_sat(uchar3);
-long3 __ovld __cnfn convert_long3_rte(short3);
-long3 __ovld __cnfn convert_long3_sat_rte(short3);
-long3 __ovld __cnfn convert_long3_rtz(short3);
-long3 __ovld __cnfn convert_long3_sat_rtz(short3);
-long3 __ovld __cnfn convert_long3_rtp(short3);
-long3 __ovld __cnfn convert_long3_sat_rtp(short3);
-long3 __ovld __cnfn convert_long3_rtn(short3);
-long3 __ovld __cnfn convert_long3_sat_rtn(short3);
-long3 __ovld __cnfn convert_long3(short3);
-long3 __ovld __cnfn convert_long3_sat(short3);
-long3 __ovld __cnfn convert_long3_rte(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rte(ushort3);
-long3 __ovld __cnfn convert_long3_rtz(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rtz(ushort3);
-long3 __ovld __cnfn convert_long3_rtp(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rtp(ushort3);
-long3 __ovld __cnfn convert_long3_rtn(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rtn(ushort3);
-long3 __ovld __cnfn convert_long3(ushort3);
-long3 __ovld __cnfn convert_long3_sat(ushort3);
-long3 __ovld __cnfn convert_long3_rte(int3);
-long3 __ovld __cnfn convert_long3_sat_rte(int3);
-long3 __ovld __cnfn convert_long3_rtz(int3);
-long3 __ovld __cnfn convert_long3_sat_rtz(int3);
-long3 __ovld __cnfn convert_long3_rtp(int3);
-long3 __ovld __cnfn convert_long3_sat_rtp(int3);
-long3 __ovld __cnfn convert_long3_rtn(int3);
-long3 __ovld __cnfn convert_long3_sat_rtn(int3);
-long3 __ovld __cnfn convert_long3(int3);
-long3 __ovld __cnfn convert_long3_sat(int3);
-long3 __ovld __cnfn convert_long3_rte(uint3);
-long3 __ovld __cnfn convert_long3_sat_rte(uint3);
-long3 __ovld __cnfn convert_long3_rtz(uint3);
-long3 __ovld __cnfn convert_long3_sat_rtz(uint3);
-long3 __ovld __cnfn convert_long3_rtp(uint3);
-long3 __ovld __cnfn convert_long3_sat_rtp(uint3);
-long3 __ovld __cnfn convert_long3_rtn(uint3);
-long3 __ovld __cnfn convert_long3_sat_rtn(uint3);
-long3 __ovld __cnfn convert_long3(uint3);
-long3 __ovld __cnfn convert_long3_sat(uint3);
-long3 __ovld __cnfn convert_long3_rte(long3);
-long3 __ovld __cnfn convert_long3_sat_rte(long3);
-long3 __ovld __cnfn convert_long3_rtz(long3);
-long3 __ovld __cnfn convert_long3_sat_rtz(long3);
-long3 __ovld __cnfn convert_long3_rtp(long3);
-long3 __ovld __cnfn convert_long3_sat_rtp(long3);
-long3 __ovld __cnfn convert_long3_rtn(long3);
-long3 __ovld __cnfn convert_long3_sat_rtn(long3);
-long3 __ovld __cnfn convert_long3(long3);
-long3 __ovld __cnfn convert_long3_sat(long3);
-long3 __ovld __cnfn convert_long3_rte(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rte(ulong3);
-long3 __ovld __cnfn convert_long3_rtz(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rtz(ulong3);
-long3 __ovld __cnfn convert_long3_rtp(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rtp(ulong3);
-long3 __ovld __cnfn convert_long3_rtn(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rtn(ulong3);
-long3 __ovld __cnfn convert_long3(ulong3);
-long3 __ovld __cnfn convert_long3_sat(ulong3);
-long3 __ovld __cnfn convert_long3_rte(float3);
-long3 __ovld __cnfn convert_long3_sat_rte(float3);
-long3 __ovld __cnfn convert_long3_rtz(float3);
-long3 __ovld __cnfn convert_long3_sat_rtz(float3);
-long3 __ovld __cnfn convert_long3_rtp(float3);
-long3 __ovld __cnfn convert_long3_sat_rtp(float3);
-long3 __ovld __cnfn convert_long3_rtn(float3);
-long3 __ovld __cnfn convert_long3_sat_rtn(float3);
-long3 __ovld __cnfn convert_long3(float3);
-long3 __ovld __cnfn convert_long3_sat(float3);
-ulong3 __ovld __cnfn convert_ulong3_rte(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(char3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(char3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(char3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(char3);
-ulong3 __ovld __cnfn convert_ulong3(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat(char3);
-ulong3 __ovld __cnfn convert_ulong3_rte(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uchar3);
-ulong3 __ovld __cnfn convert_ulong3(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rte(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(short3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(short3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(short3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(short3);
-ulong3 __ovld __cnfn convert_ulong3(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat(short3);
-ulong3 __ovld __cnfn convert_ulong3_rte(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ushort3);
-ulong3 __ovld __cnfn convert_ulong3(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rte(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(int3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(int3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(int3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(int3);
-ulong3 __ovld __cnfn convert_ulong3(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat(int3);
-ulong3 __ovld __cnfn convert_ulong3_rte(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uint3);
-ulong3 __ovld __cnfn convert_ulong3(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rte(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(long3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(long3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(long3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(long3);
-ulong3 __ovld __cnfn convert_ulong3(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat(long3);
-ulong3 __ovld __cnfn convert_ulong3_rte(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ulong3);
-ulong3 __ovld __cnfn convert_ulong3(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rte(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(float3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(float3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(float3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(float3);
-ulong3 __ovld __cnfn convert_ulong3(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat(float3);
-float3 __ovld __cnfn convert_float3_rte(char3);
-float3 __ovld __cnfn convert_float3_rtz(char3);
-float3 __ovld __cnfn convert_float3_rtp(char3);
-float3 __ovld __cnfn convert_float3_rtn(char3);
-float3 __ovld __cnfn convert_float3(char3);
-float3 __ovld __cnfn convert_float3_rte(uchar3);
-float3 __ovld __cnfn convert_float3_rtz(uchar3);
-float3 __ovld __cnfn convert_float3_rtp(uchar3);
-float3 __ovld __cnfn convert_float3_rtn(uchar3);
-float3 __ovld __cnfn convert_float3(uchar3);
-float3 __ovld __cnfn convert_float3_rte(short3);
-float3 __ovld __cnfn convert_float3_rtz(short3);
-float3 __ovld __cnfn convert_float3_rtp(short3);
-float3 __ovld __cnfn convert_float3_rtn(short3);
-float3 __ovld __cnfn convert_float3(short3);
-float3 __ovld __cnfn convert_float3_rte(ushort3);
-float3 __ovld __cnfn convert_float3_rtz(ushort3);
-float3 __ovld __cnfn convert_float3_rtp(ushort3);
-float3 __ovld __cnfn convert_float3_rtn(ushort3);
-float3 __ovld __cnfn convert_float3(ushort3);
-float3 __ovld __cnfn convert_float3_rte(int3);
-float3 __ovld __cnfn convert_float3_rtz(int3);
-float3 __ovld __cnfn convert_float3_rtp(int3);
-float3 __ovld __cnfn convert_float3_rtn(int3);
-float3 __ovld __cnfn convert_float3(int3);
-float3 __ovld __cnfn convert_float3_rte(uint3);
-float3 __ovld __cnfn convert_float3_rtz(uint3);
-float3 __ovld __cnfn convert_float3_rtp(uint3);
-float3 __ovld __cnfn convert_float3_rtn(uint3);
-float3 __ovld __cnfn convert_float3(uint3);
-float3 __ovld __cnfn convert_float3_rte(long3);
-float3 __ovld __cnfn convert_float3_rtz(long3);
-float3 __ovld __cnfn convert_float3_rtp(long3);
-float3 __ovld __cnfn convert_float3_rtn(long3);
-float3 __ovld __cnfn convert_float3(long3);
-float3 __ovld __cnfn convert_float3_rte(ulong3);
-float3 __ovld __cnfn convert_float3_rtz(ulong3);
-float3 __ovld __cnfn convert_float3_rtp(ulong3);
-float3 __ovld __cnfn convert_float3_rtn(ulong3);
-float3 __ovld __cnfn convert_float3(ulong3);
-float3 __ovld __cnfn convert_float3_rte(float3);
-float3 __ovld __cnfn convert_float3_rtz(float3);
-float3 __ovld __cnfn convert_float3_rtp(float3);
-float3 __ovld __cnfn convert_float3_rtn(float3);
-float3 __ovld __cnfn convert_float3(float3);
-char4 __ovld __cnfn convert_char4_rte(char4);
-char4 __ovld __cnfn convert_char4_sat_rte(char4);
-char4 __ovld __cnfn convert_char4_rtz(char4);
-char4 __ovld __cnfn convert_char4_sat_rtz(char4);
-char4 __ovld __cnfn convert_char4_rtp(char4);
-char4 __ovld __cnfn convert_char4_sat_rtp(char4);
-char4 __ovld __cnfn convert_char4_rtn(char4);
-char4 __ovld __cnfn convert_char4_sat_rtn(char4);
-char4 __ovld __cnfn convert_char4(char4);
-char4 __ovld __cnfn convert_char4_sat(char4);
-char4 __ovld __cnfn convert_char4_rte(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rte(uchar4);
-char4 __ovld __cnfn convert_char4_rtz(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rtz(uchar4);
-char4 __ovld __cnfn convert_char4_rtp(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rtp(uchar4);
-char4 __ovld __cnfn convert_char4_rtn(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rtn(uchar4);
-char4 __ovld __cnfn convert_char4(uchar4);
-char4 __ovld __cnfn convert_char4_sat(uchar4);
-char4 __ovld __cnfn convert_char4_rte(short4);
-char4 __ovld __cnfn convert_char4_sat_rte(short4);
-char4 __ovld __cnfn convert_char4_rtz(short4);
-char4 __ovld __cnfn convert_char4_sat_rtz(short4);
-char4 __ovld __cnfn convert_char4_rtp(short4);
-char4 __ovld __cnfn convert_char4_sat_rtp(short4);
-char4 __ovld __cnfn convert_char4_rtn(short4);
-char4 __ovld __cnfn convert_char4_sat_rtn(short4);
-char4 __ovld __cnfn convert_char4(short4);
-char4 __ovld __cnfn convert_char4_sat(short4);
-char4 __ovld __cnfn convert_char4_rte(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rte(ushort4);
-char4 __ovld __cnfn convert_char4_rtz(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rtz(ushort4);
-char4 __ovld __cnfn convert_char4_rtp(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rtp(ushort4);
-char4 __ovld __cnfn convert_char4_rtn(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rtn(ushort4);
-char4 __ovld __cnfn convert_char4(ushort4);
-char4 __ovld __cnfn convert_char4_sat(ushort4);
-char4 __ovld __cnfn convert_char4_rte(int4);
-char4 __ovld __cnfn convert_char4_sat_rte(int4);
-char4 __ovld __cnfn convert_char4_rtz(int4);
-char4 __ovld __cnfn convert_char4_sat_rtz(int4);
-char4 __ovld __cnfn convert_char4_rtp(int4);
-char4 __ovld __cnfn convert_char4_sat_rtp(int4);
-char4 __ovld __cnfn convert_char4_rtn(int4);
-char4 __ovld __cnfn convert_char4_sat_rtn(int4);
-char4 __ovld __cnfn convert_char4(int4);
-char4 __ovld __cnfn convert_char4_sat(int4);
-char4 __ovld __cnfn convert_char4_rte(uint4);
-char4 __ovld __cnfn convert_char4_sat_rte(uint4);
-char4 __ovld __cnfn convert_char4_rtz(uint4);
-char4 __ovld __cnfn convert_char4_sat_rtz(uint4);
-char4 __ovld __cnfn convert_char4_rtp(uint4);
-char4 __ovld __cnfn convert_char4_sat_rtp(uint4);
-char4 __ovld __cnfn convert_char4_rtn(uint4);
-char4 __ovld __cnfn convert_char4_sat_rtn(uint4);
-char4 __ovld __cnfn convert_char4(uint4);
-char4 __ovld __cnfn convert_char4_sat(uint4);
-char4 __ovld __cnfn convert_char4_rte(long4);
-char4 __ovld __cnfn convert_char4_sat_rte(long4);
-char4 __ovld __cnfn convert_char4_rtz(long4);
-char4 __ovld __cnfn convert_char4_sat_rtz(long4);
-char4 __ovld __cnfn convert_char4_rtp(long4);
-char4 __ovld __cnfn convert_char4_sat_rtp(long4);
-char4 __ovld __cnfn convert_char4_rtn(long4);
-char4 __ovld __cnfn convert_char4_sat_rtn(long4);
-char4 __ovld __cnfn convert_char4(long4);
-char4 __ovld __cnfn convert_char4_sat(long4);
-char4 __ovld __cnfn convert_char4_rte(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rte(ulong4);
-char4 __ovld __cnfn convert_char4_rtz(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rtz(ulong4);
-char4 __ovld __cnfn convert_char4_rtp(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rtp(ulong4);
-char4 __ovld __cnfn convert_char4_rtn(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rtn(ulong4);
-char4 __ovld __cnfn convert_char4(ulong4);
-char4 __ovld __cnfn convert_char4_sat(ulong4);
-char4 __ovld __cnfn convert_char4_rte(float4);
-char4 __ovld __cnfn convert_char4_sat_rte(float4);
-char4 __ovld __cnfn convert_char4_rtz(float4);
-char4 __ovld __cnfn convert_char4_sat_rtz(float4);
-char4 __ovld __cnfn convert_char4_rtp(float4);
-char4 __ovld __cnfn convert_char4_sat_rtp(float4);
-char4 __ovld __cnfn convert_char4_rtn(float4);
-char4 __ovld __cnfn convert_char4_sat_rtn(float4);
-char4 __ovld __cnfn convert_char4(float4);
-char4 __ovld __cnfn convert_char4_sat(float4);
-uchar4 __ovld __cnfn convert_uchar4_rte(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(char4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(char4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(char4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(char4);
-uchar4 __ovld __cnfn convert_uchar4(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat(char4);
-uchar4 __ovld __cnfn convert_uchar4_rte(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uchar4);
-uchar4 __ovld __cnfn convert_uchar4(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rte(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(short4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(short4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(short4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(short4);
-uchar4 __ovld __cnfn convert_uchar4(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat(short4);
-uchar4 __ovld __cnfn convert_uchar4_rte(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ushort4);
-uchar4 __ovld __cnfn convert_uchar4(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rte(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(int4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(int4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(int4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(int4);
-uchar4 __ovld __cnfn convert_uchar4(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat(int4);
-uchar4 __ovld __cnfn convert_uchar4_rte(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uint4);
-uchar4 __ovld __cnfn convert_uchar4(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rte(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(long4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(long4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(long4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(long4);
-uchar4 __ovld __cnfn convert_uchar4(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat(long4);
-uchar4 __ovld __cnfn convert_uchar4_rte(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ulong4);
-uchar4 __ovld __cnfn convert_uchar4(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rte(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(float4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(float4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(float4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(float4);
-uchar4 __ovld __cnfn convert_uchar4(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat(float4);
-short4 __ovld __cnfn convert_short4_rte(char4);
-short4 __ovld __cnfn convert_short4_sat_rte(char4);
-short4 __ovld __cnfn convert_short4_rtz(char4);
-short4 __ovld __cnfn convert_short4_sat_rtz(char4);
-short4 __ovld __cnfn convert_short4_rtp(char4);
-short4 __ovld __cnfn convert_short4_sat_rtp(char4);
-short4 __ovld __cnfn convert_short4_rtn(char4);
-short4 __ovld __cnfn convert_short4_sat_rtn(char4);
-short4 __ovld __cnfn convert_short4(char4);
-short4 __ovld __cnfn convert_short4_sat(char4);
-short4 __ovld __cnfn convert_short4_rte(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rte(uchar4);
-short4 __ovld __cnfn convert_short4_rtz(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rtz(uchar4);
-short4 __ovld __cnfn convert_short4_rtp(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rtp(uchar4);
-short4 __ovld __cnfn convert_short4_rtn(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rtn(uchar4);
-short4 __ovld __cnfn convert_short4(uchar4);
-short4 __ovld __cnfn convert_short4_sat(uchar4);
-short4 __ovld __cnfn convert_short4_rte(short4);
-short4 __ovld __cnfn convert_short4_sat_rte(short4);
-short4 __ovld __cnfn convert_short4_rtz(short4);
-short4 __ovld __cnfn convert_short4_sat_rtz(short4);
-short4 __ovld __cnfn convert_short4_rtp(short4);
-short4 __ovld __cnfn convert_short4_sat_rtp(short4);
-short4 __ovld __cnfn convert_short4_rtn(short4);
-short4 __ovld __cnfn convert_short4_sat_rtn(short4);
-short4 __ovld __cnfn convert_short4(short4);
-short4 __ovld __cnfn convert_short4_sat(short4);
-short4 __ovld __cnfn convert_short4_rte(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rte(ushort4);
-short4 __ovld __cnfn convert_short4_rtz(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rtz(ushort4);
-short4 __ovld __cnfn convert_short4_rtp(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rtp(ushort4);
-short4 __ovld __cnfn convert_short4_rtn(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rtn(ushort4);
-short4 __ovld __cnfn convert_short4(ushort4);
-short4 __ovld __cnfn convert_short4_sat(ushort4);
-short4 __ovld __cnfn convert_short4_rte(int4);
-short4 __ovld __cnfn convert_short4_sat_rte(int4);
-short4 __ovld __cnfn convert_short4_rtz(int4);
-short4 __ovld __cnfn convert_short4_sat_rtz(int4);
-short4 __ovld __cnfn convert_short4_rtp(int4);
-short4 __ovld __cnfn convert_short4_sat_rtp(int4);
-short4 __ovld __cnfn convert_short4_rtn(int4);
-short4 __ovld __cnfn convert_short4_sat_rtn(int4);
-short4 __ovld __cnfn convert_short4(int4);
-short4 __ovld __cnfn convert_short4_sat(int4);
-short4 __ovld __cnfn convert_short4_rte(uint4);
-short4 __ovld __cnfn convert_short4_sat_rte(uint4);
-short4 __ovld __cnfn convert_short4_rtz(uint4);
-short4 __ovld __cnfn convert_short4_sat_rtz(uint4);
-short4 __ovld __cnfn convert_short4_rtp(uint4);
-short4 __ovld __cnfn convert_short4_sat_rtp(uint4);
-short4 __ovld __cnfn convert_short4_rtn(uint4);
-short4 __ovld __cnfn convert_short4_sat_rtn(uint4);
-short4 __ovld __cnfn convert_short4(uint4);
-short4 __ovld __cnfn convert_short4_sat(uint4);
-short4 __ovld __cnfn convert_short4_rte(long4);
-short4 __ovld __cnfn convert_short4_sat_rte(long4);
-short4 __ovld __cnfn convert_short4_rtz(long4);
-short4 __ovld __cnfn convert_short4_sat_rtz(long4);
-short4 __ovld __cnfn convert_short4_rtp(long4);
-short4 __ovld __cnfn convert_short4_sat_rtp(long4);
-short4 __ovld __cnfn convert_short4_rtn(long4);
-short4 __ovld __cnfn convert_short4_sat_rtn(long4);
-short4 __ovld __cnfn convert_short4(long4);
-short4 __ovld __cnfn convert_short4_sat(long4);
-short4 __ovld __cnfn convert_short4_rte(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rte(ulong4);
-short4 __ovld __cnfn convert_short4_rtz(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rtz(ulong4);
-short4 __ovld __cnfn convert_short4_rtp(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rtp(ulong4);
-short4 __ovld __cnfn convert_short4_rtn(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rtn(ulong4);
-short4 __ovld __cnfn convert_short4(ulong4);
-short4 __ovld __cnfn convert_short4_sat(ulong4);
-short4 __ovld __cnfn convert_short4_rte(float4);
-short4 __ovld __cnfn convert_short4_sat_rte(float4);
-short4 __ovld __cnfn convert_short4_rtz(float4);
-short4 __ovld __cnfn convert_short4_sat_rtz(float4);
-short4 __ovld __cnfn convert_short4_rtp(float4);
-short4 __ovld __cnfn convert_short4_sat_rtp(float4);
-short4 __ovld __cnfn convert_short4_rtn(float4);
-short4 __ovld __cnfn convert_short4_sat_rtn(float4);
-short4 __ovld __cnfn convert_short4(float4);
-short4 __ovld __cnfn convert_short4_sat(float4);
-ushort4 __ovld __cnfn convert_ushort4_rte(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(char4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(char4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(char4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(char4);
-ushort4 __ovld __cnfn convert_ushort4(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat(char4);
-ushort4 __ovld __cnfn convert_ushort4_rte(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uchar4);
-ushort4 __ovld __cnfn convert_ushort4(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rte(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(short4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(short4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(short4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(short4);
-ushort4 __ovld __cnfn convert_ushort4(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat(short4);
-ushort4 __ovld __cnfn convert_ushort4_rte(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ushort4);
-ushort4 __ovld __cnfn convert_ushort4(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rte(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(int4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(int4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(int4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(int4);
-ushort4 __ovld __cnfn convert_ushort4(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat(int4);
-ushort4 __ovld __cnfn convert_ushort4_rte(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uint4);
-ushort4 __ovld __cnfn convert_ushort4(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rte(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(long4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(long4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(long4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(long4);
-ushort4 __ovld __cnfn convert_ushort4(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat(long4);
-ushort4 __ovld __cnfn convert_ushort4_rte(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ulong4);
-ushort4 __ovld __cnfn convert_ushort4(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rte(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(float4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(float4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(float4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(float4);
-ushort4 __ovld __cnfn convert_ushort4(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat(float4);
-int4 __ovld __cnfn convert_int4_rte(char4);
-int4 __ovld __cnfn convert_int4_sat_rte(char4);
-int4 __ovld __cnfn convert_int4_rtz(char4);
-int4 __ovld __cnfn convert_int4_sat_rtz(char4);
-int4 __ovld __cnfn convert_int4_rtp(char4);
-int4 __ovld __cnfn convert_int4_sat_rtp(char4);
-int4 __ovld __cnfn convert_int4_rtn(char4);
-int4 __ovld __cnfn convert_int4_sat_rtn(char4);
-int4 __ovld __cnfn convert_int4(char4);
-int4 __ovld __cnfn convert_int4_sat(char4);
-int4 __ovld __cnfn convert_int4_rte(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rte(uchar4);
-int4 __ovld __cnfn convert_int4_rtz(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rtz(uchar4);
-int4 __ovld __cnfn convert_int4_rtp(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rtp(uchar4);
-int4 __ovld __cnfn convert_int4_rtn(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rtn(uchar4);
-int4 __ovld __cnfn convert_int4(uchar4);
-int4 __ovld __cnfn convert_int4_sat(uchar4);
-int4 __ovld __cnfn convert_int4_rte(short4);
-int4 __ovld __cnfn convert_int4_sat_rte(short4);
-int4 __ovld __cnfn convert_int4_rtz(short4);
-int4 __ovld __cnfn convert_int4_sat_rtz(short4);
-int4 __ovld __cnfn convert_int4_rtp(short4);
-int4 __ovld __cnfn convert_int4_sat_rtp(short4);
-int4 __ovld __cnfn convert_int4_rtn(short4);
-int4 __ovld __cnfn convert_int4_sat_rtn(short4);
-int4 __ovld __cnfn convert_int4(short4);
-int4 __ovld __cnfn convert_int4_sat(short4);
-int4 __ovld __cnfn convert_int4_rte(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rte(ushort4);
-int4 __ovld __cnfn convert_int4_rtz(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rtz(ushort4);
-int4 __ovld __cnfn convert_int4_rtp(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rtp(ushort4);
-int4 __ovld __cnfn convert_int4_rtn(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rtn(ushort4);
-int4 __ovld __cnfn convert_int4(ushort4);
-int4 __ovld __cnfn convert_int4_sat(ushort4);
-int4 __ovld __cnfn convert_int4_rte(int4);
-int4 __ovld __cnfn convert_int4_sat_rte(int4);
-int4 __ovld __cnfn convert_int4_rtz(int4);
-int4 __ovld __cnfn convert_int4_sat_rtz(int4);
-int4 __ovld __cnfn convert_int4_rtp(int4);
-int4 __ovld __cnfn convert_int4_sat_rtp(int4);
-int4 __ovld __cnfn convert_int4_rtn(int4);
-int4 __ovld __cnfn convert_int4_sat_rtn(int4);
-int4 __ovld __cnfn convert_int4(int4);
-int4 __ovld __cnfn convert_int4_sat(int4);
-int4 __ovld __cnfn convert_int4_rte(uint4);
-int4 __ovld __cnfn convert_int4_sat_rte(uint4);
-int4 __ovld __cnfn convert_int4_rtz(uint4);
-int4 __ovld __cnfn convert_int4_sat_rtz(uint4);
-int4 __ovld __cnfn convert_int4_rtp(uint4);
-int4 __ovld __cnfn convert_int4_sat_rtp(uint4);
-int4 __ovld __cnfn convert_int4_rtn(uint4);
-int4 __ovld __cnfn convert_int4_sat_rtn(uint4);
-int4 __ovld __cnfn convert_int4(uint4);
-int4 __ovld __cnfn convert_int4_sat(uint4);
-int4 __ovld __cnfn convert_int4_rte(long4);
-int4 __ovld __cnfn convert_int4_sat_rte(long4);
-int4 __ovld __cnfn convert_int4_rtz(long4);
-int4 __ovld __cnfn convert_int4_sat_rtz(long4);
-int4 __ovld __cnfn convert_int4_rtp(long4);
-int4 __ovld __cnfn convert_int4_sat_rtp(long4);
-int4 __ovld __cnfn convert_int4_rtn(long4);
-int4 __ovld __cnfn convert_int4_sat_rtn(long4);
-int4 __ovld __cnfn convert_int4(long4);
-int4 __ovld __cnfn convert_int4_sat(long4);
-int4 __ovld __cnfn convert_int4_rte(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rte(ulong4);
-int4 __ovld __cnfn convert_int4_rtz(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rtz(ulong4);
-int4 __ovld __cnfn convert_int4_rtp(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rtp(ulong4);
-int4 __ovld __cnfn convert_int4_rtn(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rtn(ulong4);
-int4 __ovld __cnfn convert_int4(ulong4);
-int4 __ovld __cnfn convert_int4_sat(ulong4);
-int4 __ovld __cnfn convert_int4_rte(float4);
-int4 __ovld __cnfn convert_int4_sat_rte(float4);
-int4 __ovld __cnfn convert_int4_rtz(float4);
-int4 __ovld __cnfn convert_int4_sat_rtz(float4);
-int4 __ovld __cnfn convert_int4_rtp(float4);
-int4 __ovld __cnfn convert_int4_sat_rtp(float4);
-int4 __ovld __cnfn convert_int4_rtn(float4);
-int4 __ovld __cnfn convert_int4_sat_rtn(float4);
-int4 __ovld __cnfn convert_int4(float4);
-int4 __ovld __cnfn convert_int4_sat(float4);
-uint4 __ovld __cnfn convert_uint4_rte(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(char4);
-uint4 __ovld __cnfn convert_uint4_rtz(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(char4);
-uint4 __ovld __cnfn convert_uint4_rtp(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(char4);
-uint4 __ovld __cnfn convert_uint4_rtn(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(char4);
-uint4 __ovld __cnfn convert_uint4(char4);
-uint4 __ovld __cnfn convert_uint4_sat(char4);
-uint4 __ovld __cnfn convert_uint4_rte(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(uchar4);
-uint4 __ovld __cnfn convert_uint4_rtz(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(uchar4);
-uint4 __ovld __cnfn convert_uint4_rtp(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(uchar4);
-uint4 __ovld __cnfn convert_uint4_rtn(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(uchar4);
-uint4 __ovld __cnfn convert_uint4(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat(uchar4);
-uint4 __ovld __cnfn convert_uint4_rte(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(short4);
-uint4 __ovld __cnfn convert_uint4_rtz(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(short4);
-uint4 __ovld __cnfn convert_uint4_rtp(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(short4);
-uint4 __ovld __cnfn convert_uint4_rtn(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(short4);
-uint4 __ovld __cnfn convert_uint4(short4);
-uint4 __ovld __cnfn convert_uint4_sat(short4);
-uint4 __ovld __cnfn convert_uint4_rte(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(ushort4);
-uint4 __ovld __cnfn convert_uint4_rtz(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(ushort4);
-uint4 __ovld __cnfn convert_uint4_rtp(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(ushort4);
-uint4 __ovld __cnfn convert_uint4_rtn(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(ushort4);
-uint4 __ovld __cnfn convert_uint4(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat(ushort4);
-uint4 __ovld __cnfn convert_uint4_rte(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(int4);
-uint4 __ovld __cnfn convert_uint4_rtz(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(int4);
-uint4 __ovld __cnfn convert_uint4_rtp(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(int4);
-uint4 __ovld __cnfn convert_uint4_rtn(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(int4);
-uint4 __ovld __cnfn convert_uint4(int4);
-uint4 __ovld __cnfn convert_uint4_sat(int4);
-uint4 __ovld __cnfn convert_uint4_rte(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(uint4);
-uint4 __ovld __cnfn convert_uint4_rtz(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(uint4);
-uint4 __ovld __cnfn convert_uint4_rtp(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(uint4);
-uint4 __ovld __cnfn convert_uint4_rtn(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(uint4);
-uint4 __ovld __cnfn convert_uint4(uint4);
-uint4 __ovld __cnfn convert_uint4_sat(uint4);
-uint4 __ovld __cnfn convert_uint4_rte(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(long4);
-uint4 __ovld __cnfn convert_uint4_rtz(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(long4);
-uint4 __ovld __cnfn convert_uint4_rtp(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(long4);
-uint4 __ovld __cnfn convert_uint4_rtn(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(long4);
-uint4 __ovld __cnfn convert_uint4(long4);
-uint4 __ovld __cnfn convert_uint4_sat(long4);
-uint4 __ovld __cnfn convert_uint4_rte(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(ulong4);
-uint4 __ovld __cnfn convert_uint4_rtz(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(ulong4);
-uint4 __ovld __cnfn convert_uint4_rtp(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(ulong4);
-uint4 __ovld __cnfn convert_uint4_rtn(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(ulong4);
-uint4 __ovld __cnfn convert_uint4(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat(ulong4);
-uint4 __ovld __cnfn convert_uint4_rte(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(float4);
-uint4 __ovld __cnfn convert_uint4_rtz(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(float4);
-uint4 __ovld __cnfn convert_uint4_rtp(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(float4);
-uint4 __ovld __cnfn convert_uint4_rtn(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(float4);
-uint4 __ovld __cnfn convert_uint4(float4);
-uint4 __ovld __cnfn convert_uint4_sat(float4);
-long4 __ovld __cnfn convert_long4_rte(char4);
-long4 __ovld __cnfn convert_long4_sat_rte(char4);
-long4 __ovld __cnfn convert_long4_rtz(char4);
-long4 __ovld __cnfn convert_long4_sat_rtz(char4);
-long4 __ovld __cnfn convert_long4_rtp(char4);
-long4 __ovld __cnfn convert_long4_sat_rtp(char4);
-long4 __ovld __cnfn convert_long4_rtn(char4);
-long4 __ovld __cnfn convert_long4_sat_rtn(char4);
-long4 __ovld __cnfn convert_long4(char4);
-long4 __ovld __cnfn convert_long4_sat(char4);
-long4 __ovld __cnfn convert_long4_rte(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rte(uchar4);
-long4 __ovld __cnfn convert_long4_rtz(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rtz(uchar4);
-long4 __ovld __cnfn convert_long4_rtp(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rtp(uchar4);
-long4 __ovld __cnfn convert_long4_rtn(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rtn(uchar4);
-long4 __ovld __cnfn convert_long4(uchar4);
-long4 __ovld __cnfn convert_long4_sat(uchar4);
-long4 __ovld __cnfn convert_long4_rte(short4);
-long4 __ovld __cnfn convert_long4_sat_rte(short4);
-long4 __ovld __cnfn convert_long4_rtz(short4);
-long4 __ovld __cnfn convert_long4_sat_rtz(short4);
-long4 __ovld __cnfn convert_long4_rtp(short4);
-long4 __ovld __cnfn convert_long4_sat_rtp(short4);
-long4 __ovld __cnfn convert_long4_rtn(short4);
-long4 __ovld __cnfn convert_long4_sat_rtn(short4);
-long4 __ovld __cnfn convert_long4(short4);
-long4 __ovld __cnfn convert_long4_sat(short4);
-long4 __ovld __cnfn convert_long4_rte(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rte(ushort4);
-long4 __ovld __cnfn convert_long4_rtz(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rtz(ushort4);
-long4 __ovld __cnfn convert_long4_rtp(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rtp(ushort4);
-long4 __ovld __cnfn convert_long4_rtn(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rtn(ushort4);
-long4 __ovld __cnfn convert_long4(ushort4);
-long4 __ovld __cnfn convert_long4_sat(ushort4);
-long4 __ovld __cnfn convert_long4_rte(int4);
-long4 __ovld __cnfn convert_long4_sat_rte(int4);
-long4 __ovld __cnfn convert_long4_rtz(int4);
-long4 __ovld __cnfn convert_long4_sat_rtz(int4);
-long4 __ovld __cnfn convert_long4_rtp(int4);
-long4 __ovld __cnfn convert_long4_sat_rtp(int4);
-long4 __ovld __cnfn convert_long4_rtn(int4);
-long4 __ovld __cnfn convert_long4_sat_rtn(int4);
-long4 __ovld __cnfn convert_long4(int4);
-long4 __ovld __cnfn convert_long4_sat(int4);
-long4 __ovld __cnfn convert_long4_rte(uint4);
-long4 __ovld __cnfn convert_long4_sat_rte(uint4);
-long4 __ovld __cnfn convert_long4_rtz(uint4);
-long4 __ovld __cnfn convert_long4_sat_rtz(uint4);
-long4 __ovld __cnfn convert_long4_rtp(uint4);
-long4 __ovld __cnfn convert_long4_sat_rtp(uint4);
-long4 __ovld __cnfn convert_long4_rtn(uint4);
-long4 __ovld __cnfn convert_long4_sat_rtn(uint4);
-long4 __ovld __cnfn convert_long4(uint4);
-long4 __ovld __cnfn convert_long4_sat(uint4);
-long4 __ovld __cnfn convert_long4_rte(long4);
-long4 __ovld __cnfn convert_long4_sat_rte(long4);
-long4 __ovld __cnfn convert_long4_rtz(long4);
-long4 __ovld __cnfn convert_long4_sat_rtz(long4);
-long4 __ovld __cnfn convert_long4_rtp(long4);
-long4 __ovld __cnfn convert_long4_sat_rtp(long4);
-long4 __ovld __cnfn convert_long4_rtn(long4);
-long4 __ovld __cnfn convert_long4_sat_rtn(long4);
-long4 __ovld __cnfn convert_long4(long4);
-long4 __ovld __cnfn convert_long4_sat(long4);
-long4 __ovld __cnfn convert_long4_rte(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rte(ulong4);
-long4 __ovld __cnfn convert_long4_rtz(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rtz(ulong4);
-long4 __ovld __cnfn convert_long4_rtp(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rtp(ulong4);
-long4 __ovld __cnfn convert_long4_rtn(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rtn(ulong4);
-long4 __ovld __cnfn convert_long4(ulong4);
-long4 __ovld __cnfn convert_long4_sat(ulong4);
-long4 __ovld __cnfn convert_long4_rte(float4);
-long4 __ovld __cnfn convert_long4_sat_rte(float4);
-long4 __ovld __cnfn convert_long4_rtz(float4);
-long4 __ovld __cnfn convert_long4_sat_rtz(float4);
-long4 __ovld __cnfn convert_long4_rtp(float4);
-long4 __ovld __cnfn convert_long4_sat_rtp(float4);
-long4 __ovld __cnfn convert_long4_rtn(float4);
-long4 __ovld __cnfn convert_long4_sat_rtn(float4);
-long4 __ovld __cnfn convert_long4(float4);
-long4 __ovld __cnfn convert_long4_sat(float4);
-ulong4 __ovld __cnfn convert_ulong4_rte(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(char4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(char4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(char4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(char4);
-ulong4 __ovld __cnfn convert_ulong4(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat(char4);
-ulong4 __ovld __cnfn convert_ulong4_rte(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uchar4);
-ulong4 __ovld __cnfn convert_ulong4(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rte(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(short4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(short4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(short4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(short4);
-ulong4 __ovld __cnfn convert_ulong4(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat(short4);
-ulong4 __ovld __cnfn convert_ulong4_rte(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ushort4);
-ulong4 __ovld __cnfn convert_ulong4(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rte(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(int4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(int4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(int4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(int4);
-ulong4 __ovld __cnfn convert_ulong4(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat(int4);
-ulong4 __ovld __cnfn convert_ulong4_rte(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uint4);
-ulong4 __ovld __cnfn convert_ulong4(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rte(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(long4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(long4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(long4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(long4);
-ulong4 __ovld __cnfn convert_ulong4(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat(long4);
-ulong4 __ovld __cnfn convert_ulong4_rte(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ulong4);
-ulong4 __ovld __cnfn convert_ulong4(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rte(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(float4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(float4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(float4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(float4);
-ulong4 __ovld __cnfn convert_ulong4(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat(float4);
-float4 __ovld __cnfn convert_float4_rte(char4);
-float4 __ovld __cnfn convert_float4_rtz(char4);
-float4 __ovld __cnfn convert_float4_rtp(char4);
-float4 __ovld __cnfn convert_float4_rtn(char4);
-float4 __ovld __cnfn convert_float4(char4);
-float4 __ovld __cnfn convert_float4_rte(uchar4);
-float4 __ovld __cnfn convert_float4_rtz(uchar4);
-float4 __ovld __cnfn convert_float4_rtp(uchar4);
-float4 __ovld __cnfn convert_float4_rtn(uchar4);
-float4 __ovld __cnfn convert_float4(uchar4);
-float4 __ovld __cnfn convert_float4_rte(short4);
-float4 __ovld __cnfn convert_float4_rtz(short4);
-float4 __ovld __cnfn convert_float4_rtp(short4);
-float4 __ovld __cnfn convert_float4_rtn(short4);
-float4 __ovld __cnfn convert_float4(short4);
-float4 __ovld __cnfn convert_float4_rte(ushort4);
-float4 __ovld __cnfn convert_float4_rtz(ushort4);
-float4 __ovld __cnfn convert_float4_rtp(ushort4);
-float4 __ovld __cnfn convert_float4_rtn(ushort4);
-float4 __ovld __cnfn convert_float4(ushort4);
-float4 __ovld __cnfn convert_float4_rte(int4);
-float4 __ovld __cnfn convert_float4_rtz(int4);
-float4 __ovld __cnfn convert_float4_rtp(int4);
-float4 __ovld __cnfn convert_float4_rtn(int4);
-float4 __ovld __cnfn convert_float4(int4);
-float4 __ovld __cnfn convert_float4_rte(uint4);
-float4 __ovld __cnfn convert_float4_rtz(uint4);
-float4 __ovld __cnfn convert_float4_rtp(uint4);
-float4 __ovld __cnfn convert_float4_rtn(uint4);
-float4 __ovld __cnfn convert_float4(uint4);
-float4 __ovld __cnfn convert_float4_rte(long4);
-float4 __ovld __cnfn convert_float4_rtz(long4);
-float4 __ovld __cnfn convert_float4_rtp(long4);
-float4 __ovld __cnfn convert_float4_rtn(long4);
-float4 __ovld __cnfn convert_float4(long4);
-float4 __ovld __cnfn convert_float4_rte(ulong4);
-float4 __ovld __cnfn convert_float4_rtz(ulong4);
-float4 __ovld __cnfn convert_float4_rtp(ulong4);
-float4 __ovld __cnfn convert_float4_rtn(ulong4);
-float4 __ovld __cnfn convert_float4(ulong4);
-float4 __ovld __cnfn convert_float4_rte(float4);
-float4 __ovld __cnfn convert_float4_rtz(float4);
-float4 __ovld __cnfn convert_float4_rtp(float4);
-float4 __ovld __cnfn convert_float4_rtn(float4);
-float4 __ovld __cnfn convert_float4(float4);
-char8 __ovld __cnfn convert_char8_rte(char8);
-char8 __ovld __cnfn convert_char8_sat_rte(char8);
-char8 __ovld __cnfn convert_char8_rtz(char8);
-char8 __ovld __cnfn convert_char8_sat_rtz(char8);
-char8 __ovld __cnfn convert_char8_rtp(char8);
-char8 __ovld __cnfn convert_char8_sat_rtp(char8);
-char8 __ovld __cnfn convert_char8_rtn(char8);
-char8 __ovld __cnfn convert_char8_sat_rtn(char8);
-char8 __ovld __cnfn convert_char8(char8);
-char8 __ovld __cnfn convert_char8_sat(char8);
-char8 __ovld __cnfn convert_char8_rte(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rte(uchar8);
-char8 __ovld __cnfn convert_char8_rtz(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rtz(uchar8);
-char8 __ovld __cnfn convert_char8_rtp(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rtp(uchar8);
-char8 __ovld __cnfn convert_char8_rtn(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rtn(uchar8);
-char8 __ovld __cnfn convert_char8(uchar8);
-char8 __ovld __cnfn convert_char8_sat(uchar8);
-char8 __ovld __cnfn convert_char8_rte(short8);
-char8 __ovld __cnfn convert_char8_sat_rte(short8);
-char8 __ovld __cnfn convert_char8_rtz(short8);
-char8 __ovld __cnfn convert_char8_sat_rtz(short8);
-char8 __ovld __cnfn convert_char8_rtp(short8);
-char8 __ovld __cnfn convert_char8_sat_rtp(short8);
-char8 __ovld __cnfn convert_char8_rtn(short8);
-char8 __ovld __cnfn convert_char8_sat_rtn(short8);
-char8 __ovld __cnfn convert_char8(short8);
-char8 __ovld __cnfn convert_char8_sat(short8);
-char8 __ovld __cnfn convert_char8_rte(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rte(ushort8);
-char8 __ovld __cnfn convert_char8_rtz(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rtz(ushort8);
-char8 __ovld __cnfn convert_char8_rtp(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rtp(ushort8);
-char8 __ovld __cnfn convert_char8_rtn(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rtn(ushort8);
-char8 __ovld __cnfn convert_char8(ushort8);
-char8 __ovld __cnfn convert_char8_sat(ushort8);
-char8 __ovld __cnfn convert_char8_rte(int8);
-char8 __ovld __cnfn convert_char8_sat_rte(int8);
-char8 __ovld __cnfn convert_char8_rtz(int8);
-char8 __ovld __cnfn convert_char8_sat_rtz(int8);
-char8 __ovld __cnfn convert_char8_rtp(int8);
-char8 __ovld __cnfn convert_char8_sat_rtp(int8);
-char8 __ovld __cnfn convert_char8_rtn(int8);
-char8 __ovld __cnfn convert_char8_sat_rtn(int8);
-char8 __ovld __cnfn convert_char8(int8);
-char8 __ovld __cnfn convert_char8_sat(int8);
-char8 __ovld __cnfn convert_char8_rte(uint8);
-char8 __ovld __cnfn convert_char8_sat_rte(uint8);
-char8 __ovld __cnfn convert_char8_rtz(uint8);
-char8 __ovld __cnfn convert_char8_sat_rtz(uint8);
-char8 __ovld __cnfn convert_char8_rtp(uint8);
-char8 __ovld __cnfn convert_char8_sat_rtp(uint8);
-char8 __ovld __cnfn convert_char8_rtn(uint8);
-char8 __ovld __cnfn convert_char8_sat_rtn(uint8);
-char8 __ovld __cnfn convert_char8(uint8);
-char8 __ovld __cnfn convert_char8_sat(uint8);
-char8 __ovld __cnfn convert_char8_rte(long8);
-char8 __ovld __cnfn convert_char8_sat_rte(long8);
-char8 __ovld __cnfn convert_char8_rtz(long8);
-char8 __ovld __cnfn convert_char8_sat_rtz(long8);
-char8 __ovld __cnfn convert_char8_rtp(long8);
-char8 __ovld __cnfn convert_char8_sat_rtp(long8);
-char8 __ovld __cnfn convert_char8_rtn(long8);
-char8 __ovld __cnfn convert_char8_sat_rtn(long8);
-char8 __ovld __cnfn convert_char8(long8);
-char8 __ovld __cnfn convert_char8_sat(long8);
-char8 __ovld __cnfn convert_char8_rte(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rte(ulong8);
-char8 __ovld __cnfn convert_char8_rtz(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rtz(ulong8);
-char8 __ovld __cnfn convert_char8_rtp(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rtp(ulong8);
-char8 __ovld __cnfn convert_char8_rtn(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rtn(ulong8);
-char8 __ovld __cnfn convert_char8(ulong8);
-char8 __ovld __cnfn convert_char8_sat(ulong8);
-char8 __ovld __cnfn convert_char8_rte(float8);
-char8 __ovld __cnfn convert_char8_sat_rte(float8);
-char8 __ovld __cnfn convert_char8_rtz(float8);
-char8 __ovld __cnfn convert_char8_sat_rtz(float8);
-char8 __ovld __cnfn convert_char8_rtp(float8);
-char8 __ovld __cnfn convert_char8_sat_rtp(float8);
-char8 __ovld __cnfn convert_char8_rtn(float8);
-char8 __ovld __cnfn convert_char8_sat_rtn(float8);
-char8 __ovld __cnfn convert_char8(float8);
-char8 __ovld __cnfn convert_char8_sat(float8);
-uchar8 __ovld __cnfn convert_uchar8_rte(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(char8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(char8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(char8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(char8);
-uchar8 __ovld __cnfn convert_uchar8(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat(char8);
-uchar8 __ovld __cnfn convert_uchar8_rte(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uchar8);
-uchar8 __ovld __cnfn convert_uchar8(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rte(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(short8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(short8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(short8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(short8);
-uchar8 __ovld __cnfn convert_uchar8(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat(short8);
-uchar8 __ovld __cnfn convert_uchar8_rte(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ushort8);
-uchar8 __ovld __cnfn convert_uchar8(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rte(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(int8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(int8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(int8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(int8);
-uchar8 __ovld __cnfn convert_uchar8(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat(int8);
-uchar8 __ovld __cnfn convert_uchar8_rte(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uint8);
-uchar8 __ovld __cnfn convert_uchar8(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rte(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(long8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(long8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(long8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(long8);
-uchar8 __ovld __cnfn convert_uchar8(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat(long8);
-uchar8 __ovld __cnfn convert_uchar8_rte(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ulong8);
-uchar8 __ovld __cnfn convert_uchar8(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rte(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(float8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(float8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(float8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(float8);
-uchar8 __ovld __cnfn convert_uchar8(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat(float8);
-short8 __ovld __cnfn convert_short8_rte(char8);
-short8 __ovld __cnfn convert_short8_sat_rte(char8);
-short8 __ovld __cnfn convert_short8_rtz(char8);
-short8 __ovld __cnfn convert_short8_sat_rtz(char8);
-short8 __ovld __cnfn convert_short8_rtp(char8);
-short8 __ovld __cnfn convert_short8_sat_rtp(char8);
-short8 __ovld __cnfn convert_short8_rtn(char8);
-short8 __ovld __cnfn convert_short8_sat_rtn(char8);
-short8 __ovld __cnfn convert_short8(char8);
-short8 __ovld __cnfn convert_short8_sat(char8);
-short8 __ovld __cnfn convert_short8_rte(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rte(uchar8);
-short8 __ovld __cnfn convert_short8_rtz(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rtz(uchar8);
-short8 __ovld __cnfn convert_short8_rtp(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rtp(uchar8);
-short8 __ovld __cnfn convert_short8_rtn(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rtn(uchar8);
-short8 __ovld __cnfn convert_short8(uchar8);
-short8 __ovld __cnfn convert_short8_sat(uchar8);
-short8 __ovld __cnfn convert_short8_rte(short8);
-short8 __ovld __cnfn convert_short8_sat_rte(short8);
-short8 __ovld __cnfn convert_short8_rtz(short8);
-short8 __ovld __cnfn convert_short8_sat_rtz(short8);
-short8 __ovld __cnfn convert_short8_rtp(short8);
-short8 __ovld __cnfn convert_short8_sat_rtp(short8);
-short8 __ovld __cnfn convert_short8_rtn(short8);
-short8 __ovld __cnfn convert_short8_sat_rtn(short8);
-short8 __ovld __cnfn convert_short8(short8);
-short8 __ovld __cnfn convert_short8_sat(short8);
-short8 __ovld __cnfn convert_short8_rte(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rte(ushort8);
-short8 __ovld __cnfn convert_short8_rtz(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rtz(ushort8);
-short8 __ovld __cnfn convert_short8_rtp(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rtp(ushort8);
-short8 __ovld __cnfn convert_short8_rtn(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rtn(ushort8);
-short8 __ovld __cnfn convert_short8(ushort8);
-short8 __ovld __cnfn convert_short8_sat(ushort8);
-short8 __ovld __cnfn convert_short8_rte(int8);
-short8 __ovld __cnfn convert_short8_sat_rte(int8);
-short8 __ovld __cnfn convert_short8_rtz(int8);
-short8 __ovld __cnfn convert_short8_sat_rtz(int8);
-short8 __ovld __cnfn convert_short8_rtp(int8);
-short8 __ovld __cnfn convert_short8_sat_rtp(int8);
-short8 __ovld __cnfn convert_short8_rtn(int8);
-short8 __ovld __cnfn convert_short8_sat_rtn(int8);
-short8 __ovld __cnfn convert_short8(int8);
-short8 __ovld __cnfn convert_short8_sat(int8);
-short8 __ovld __cnfn convert_short8_rte(uint8);
-short8 __ovld __cnfn convert_short8_sat_rte(uint8);
-short8 __ovld __cnfn convert_short8_rtz(uint8);
-short8 __ovld __cnfn convert_short8_sat_rtz(uint8);
-short8 __ovld __cnfn convert_short8_rtp(uint8);
-short8 __ovld __cnfn convert_short8_sat_rtp(uint8);
-short8 __ovld __cnfn convert_short8_rtn(uint8);
-short8 __ovld __cnfn convert_short8_sat_rtn(uint8);
-short8 __ovld __cnfn convert_short8(uint8);
-short8 __ovld __cnfn convert_short8_sat(uint8);
-short8 __ovld __cnfn convert_short8_rte(long8);
-short8 __ovld __cnfn convert_short8_sat_rte(long8);
-short8 __ovld __cnfn convert_short8_rtz(long8);
-short8 __ovld __cnfn convert_short8_sat_rtz(long8);
-short8 __ovld __cnfn convert_short8_rtp(long8);
-short8 __ovld __cnfn convert_short8_sat_rtp(long8);
-short8 __ovld __cnfn convert_short8_rtn(long8);
-short8 __ovld __cnfn convert_short8_sat_rtn(long8);
-short8 __ovld __cnfn convert_short8(long8);
-short8 __ovld __cnfn convert_short8_sat(long8);
-short8 __ovld __cnfn convert_short8_rte(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rte(ulong8);
-short8 __ovld __cnfn convert_short8_rtz(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rtz(ulong8);
-short8 __ovld __cnfn convert_short8_rtp(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rtp(ulong8);
-short8 __ovld __cnfn convert_short8_rtn(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rtn(ulong8);
-short8 __ovld __cnfn convert_short8(ulong8);
-short8 __ovld __cnfn convert_short8_sat(ulong8);
-short8 __ovld __cnfn convert_short8_rte(float8);
-short8 __ovld __cnfn convert_short8_sat_rte(float8);
-short8 __ovld __cnfn convert_short8_rtz(float8);
-short8 __ovld __cnfn convert_short8_sat_rtz(float8);
-short8 __ovld __cnfn convert_short8_rtp(float8);
-short8 __ovld __cnfn convert_short8_sat_rtp(float8);
-short8 __ovld __cnfn convert_short8_rtn(float8);
-short8 __ovld __cnfn convert_short8_sat_rtn(float8);
-short8 __ovld __cnfn convert_short8(float8);
-short8 __ovld __cnfn convert_short8_sat(float8);
-ushort8 __ovld __cnfn convert_ushort8_rte(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(char8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(char8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(char8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(char8);
-ushort8 __ovld __cnfn convert_ushort8(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat(char8);
-ushort8 __ovld __cnfn convert_ushort8_rte(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uchar8);
-ushort8 __ovld __cnfn convert_ushort8(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rte(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(short8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(short8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(short8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(short8);
-ushort8 __ovld __cnfn convert_ushort8(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat(short8);
-ushort8 __ovld __cnfn convert_ushort8_rte(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ushort8);
-ushort8 __ovld __cnfn convert_ushort8(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rte(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(int8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(int8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(int8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(int8);
-ushort8 __ovld __cnfn convert_ushort8(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat(int8);
-ushort8 __ovld __cnfn convert_ushort8_rte(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uint8);
-ushort8 __ovld __cnfn convert_ushort8(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rte(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(long8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(long8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(long8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(long8);
-ushort8 __ovld __cnfn convert_ushort8(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat(long8);
-ushort8 __ovld __cnfn convert_ushort8_rte(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ulong8);
-ushort8 __ovld __cnfn convert_ushort8(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rte(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(float8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(float8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(float8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(float8);
-ushort8 __ovld __cnfn convert_ushort8(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat(float8);
-int8 __ovld __cnfn convert_int8_rte(char8);
-int8 __ovld __cnfn convert_int8_sat_rte(char8);
-int8 __ovld __cnfn convert_int8_rtz(char8);
-int8 __ovld __cnfn convert_int8_sat_rtz(char8);
-int8 __ovld __cnfn convert_int8_rtp(char8);
-int8 __ovld __cnfn convert_int8_sat_rtp(char8);
-int8 __ovld __cnfn convert_int8_rtn(char8);
-int8 __ovld __cnfn convert_int8_sat_rtn(char8);
-int8 __ovld __cnfn convert_int8(char8);
-int8 __ovld __cnfn convert_int8_sat(char8);
-int8 __ovld __cnfn convert_int8_rte(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rte(uchar8);
-int8 __ovld __cnfn convert_int8_rtz(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rtz(uchar8);
-int8 __ovld __cnfn convert_int8_rtp(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rtp(uchar8);
-int8 __ovld __cnfn convert_int8_rtn(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rtn(uchar8);
-int8 __ovld __cnfn convert_int8(uchar8);
-int8 __ovld __cnfn convert_int8_sat(uchar8);
-int8 __ovld __cnfn convert_int8_rte(short8);
-int8 __ovld __cnfn convert_int8_sat_rte(short8);
-int8 __ovld __cnfn convert_int8_rtz(short8);
-int8 __ovld __cnfn convert_int8_sat_rtz(short8);
-int8 __ovld __cnfn convert_int8_rtp(short8);
-int8 __ovld __cnfn convert_int8_sat_rtp(short8);
-int8 __ovld __cnfn convert_int8_rtn(short8);
-int8 __ovld __cnfn convert_int8_sat_rtn(short8);
-int8 __ovld __cnfn convert_int8(short8);
-int8 __ovld __cnfn convert_int8_sat(short8);
-int8 __ovld __cnfn convert_int8_rte(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rte(ushort8);
-int8 __ovld __cnfn convert_int8_rtz(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rtz(ushort8);
-int8 __ovld __cnfn convert_int8_rtp(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rtp(ushort8);
-int8 __ovld __cnfn convert_int8_rtn(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rtn(ushort8);
-int8 __ovld __cnfn convert_int8(ushort8);
-int8 __ovld __cnfn convert_int8_sat(ushort8);
-int8 __ovld __cnfn convert_int8_rte(int8);
-int8 __ovld __cnfn convert_int8_sat_rte(int8);
-int8 __ovld __cnfn convert_int8_rtz(int8);
-int8 __ovld __cnfn convert_int8_sat_rtz(int8);
-int8 __ovld __cnfn convert_int8_rtp(int8);
-int8 __ovld __cnfn convert_int8_sat_rtp(int8);
-int8 __ovld __cnfn convert_int8_rtn(int8);
-int8 __ovld __cnfn convert_int8_sat_rtn(int8);
-int8 __ovld __cnfn convert_int8(int8);
-int8 __ovld __cnfn convert_int8_sat(int8);
-int8 __ovld __cnfn convert_int8_rte(uint8);
-int8 __ovld __cnfn convert_int8_sat_rte(uint8);
-int8 __ovld __cnfn convert_int8_rtz(uint8);
-int8 __ovld __cnfn convert_int8_sat_rtz(uint8);
-int8 __ovld __cnfn convert_int8_rtp(uint8);
-int8 __ovld __cnfn convert_int8_sat_rtp(uint8);
-int8 __ovld __cnfn convert_int8_rtn(uint8);
-int8 __ovld __cnfn convert_int8_sat_rtn(uint8);
-int8 __ovld __cnfn convert_int8(uint8);
-int8 __ovld __cnfn convert_int8_sat(uint8);
-int8 __ovld __cnfn convert_int8_rte(long8);
-int8 __ovld __cnfn convert_int8_sat_rte(long8);
-int8 __ovld __cnfn convert_int8_rtz(long8);
-int8 __ovld __cnfn convert_int8_sat_rtz(long8);
-int8 __ovld __cnfn convert_int8_rtp(long8);
-int8 __ovld __cnfn convert_int8_sat_rtp(long8);
-int8 __ovld __cnfn convert_int8_rtn(long8);
-int8 __ovld __cnfn convert_int8_sat_rtn(long8);
-int8 __ovld __cnfn convert_int8(long8);
-int8 __ovld __cnfn convert_int8_sat(long8);
-int8 __ovld __cnfn convert_int8_rte(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rte(ulong8);
-int8 __ovld __cnfn convert_int8_rtz(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rtz(ulong8);
-int8 __ovld __cnfn convert_int8_rtp(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rtp(ulong8);
-int8 __ovld __cnfn convert_int8_rtn(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rtn(ulong8);
-int8 __ovld __cnfn convert_int8(ulong8);
-int8 __ovld __cnfn convert_int8_sat(ulong8);
-int8 __ovld __cnfn convert_int8_rte(float8);
-int8 __ovld __cnfn convert_int8_sat_rte(float8);
-int8 __ovld __cnfn convert_int8_rtz(float8);
-int8 __ovld __cnfn convert_int8_sat_rtz(float8);
-int8 __ovld __cnfn convert_int8_rtp(float8);
-int8 __ovld __cnfn convert_int8_sat_rtp(float8);
-int8 __ovld __cnfn convert_int8_rtn(float8);
-int8 __ovld __cnfn convert_int8_sat_rtn(float8);
-int8 __ovld __cnfn convert_int8(float8);
-int8 __ovld __cnfn convert_int8_sat(float8);
-uint8 __ovld __cnfn convert_uint8_rte(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(char8);
-uint8 __ovld __cnfn convert_uint8_rtz(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(char8);
-uint8 __ovld __cnfn convert_uint8_rtp(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(char8);
-uint8 __ovld __cnfn convert_uint8_rtn(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(char8);
-uint8 __ovld __cnfn convert_uint8(char8);
-uint8 __ovld __cnfn convert_uint8_sat(char8);
-uint8 __ovld __cnfn convert_uint8_rte(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(uchar8);
-uint8 __ovld __cnfn convert_uint8_rtz(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(uchar8);
-uint8 __ovld __cnfn convert_uint8_rtp(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(uchar8);
-uint8 __ovld __cnfn convert_uint8_rtn(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(uchar8);
-uint8 __ovld __cnfn convert_uint8(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat(uchar8);
-uint8 __ovld __cnfn convert_uint8_rte(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(short8);
-uint8 __ovld __cnfn convert_uint8_rtz(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(short8);
-uint8 __ovld __cnfn convert_uint8_rtp(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(short8);
-uint8 __ovld __cnfn convert_uint8_rtn(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(short8);
-uint8 __ovld __cnfn convert_uint8(short8);
-uint8 __ovld __cnfn convert_uint8_sat(short8);
-uint8 __ovld __cnfn convert_uint8_rte(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(ushort8);
-uint8 __ovld __cnfn convert_uint8_rtz(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(ushort8);
-uint8 __ovld __cnfn convert_uint8_rtp(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(ushort8);
-uint8 __ovld __cnfn convert_uint8_rtn(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(ushort8);
-uint8 __ovld __cnfn convert_uint8(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat(ushort8);
-uint8 __ovld __cnfn convert_uint8_rte(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(int8);
-uint8 __ovld __cnfn convert_uint8_rtz(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(int8);
-uint8 __ovld __cnfn convert_uint8_rtp(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(int8);
-uint8 __ovld __cnfn convert_uint8_rtn(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(int8);
-uint8 __ovld __cnfn convert_uint8(int8);
-uint8 __ovld __cnfn convert_uint8_sat(int8);
-uint8 __ovld __cnfn convert_uint8_rte(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(uint8);
-uint8 __ovld __cnfn convert_uint8_rtz(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(uint8);
-uint8 __ovld __cnfn convert_uint8_rtp(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(uint8);
-uint8 __ovld __cnfn convert_uint8_rtn(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(uint8);
-uint8 __ovld __cnfn convert_uint8(uint8);
-uint8 __ovld __cnfn convert_uint8_sat(uint8);
-uint8 __ovld __cnfn convert_uint8_rte(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(long8);
-uint8 __ovld __cnfn convert_uint8_rtz(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(long8);
-uint8 __ovld __cnfn convert_uint8_rtp(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(long8);
-uint8 __ovld __cnfn convert_uint8_rtn(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(long8);
-uint8 __ovld __cnfn convert_uint8(long8);
-uint8 __ovld __cnfn convert_uint8_sat(long8);
-uint8 __ovld __cnfn convert_uint8_rte(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(ulong8);
-uint8 __ovld __cnfn convert_uint8_rtz(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(ulong8);
-uint8 __ovld __cnfn convert_uint8_rtp(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(ulong8);
-uint8 __ovld __cnfn convert_uint8_rtn(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(ulong8);
-uint8 __ovld __cnfn convert_uint8(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat(ulong8);
-uint8 __ovld __cnfn convert_uint8_rte(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(float8);
-uint8 __ovld __cnfn convert_uint8_rtz(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(float8);
-uint8 __ovld __cnfn convert_uint8_rtp(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(float8);
-uint8 __ovld __cnfn convert_uint8_rtn(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(float8);
-uint8 __ovld __cnfn convert_uint8(float8);
-uint8 __ovld __cnfn convert_uint8_sat(float8);
-long8 __ovld __cnfn convert_long8_rte(char8);
-long8 __ovld __cnfn convert_long8_sat_rte(char8);
-long8 __ovld __cnfn convert_long8_rtz(char8);
-long8 __ovld __cnfn convert_long8_sat_rtz(char8);
-long8 __ovld __cnfn convert_long8_rtp(char8);
-long8 __ovld __cnfn convert_long8_sat_rtp(char8);
-long8 __ovld __cnfn convert_long8_rtn(char8);
-long8 __ovld __cnfn convert_long8_sat_rtn(char8);
-long8 __ovld __cnfn convert_long8(char8);
-long8 __ovld __cnfn convert_long8_sat(char8);
-long8 __ovld __cnfn convert_long8_rte(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rte(uchar8);
-long8 __ovld __cnfn convert_long8_rtz(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rtz(uchar8);
-long8 __ovld __cnfn convert_long8_rtp(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rtp(uchar8);
-long8 __ovld __cnfn convert_long8_rtn(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rtn(uchar8);
-long8 __ovld __cnfn convert_long8(uchar8);
-long8 __ovld __cnfn convert_long8_sat(uchar8);
-long8 __ovld __cnfn convert_long8_rte(short8);
-long8 __ovld __cnfn convert_long8_sat_rte(short8);
-long8 __ovld __cnfn convert_long8_rtz(short8);
-long8 __ovld __cnfn convert_long8_sat_rtz(short8);
-long8 __ovld __cnfn convert_long8_rtp(short8);
-long8 __ovld __cnfn convert_long8_sat_rtp(short8);
-long8 __ovld __cnfn convert_long8_rtn(short8);
-long8 __ovld __cnfn convert_long8_sat_rtn(short8);
-long8 __ovld __cnfn convert_long8(short8);
-long8 __ovld __cnfn convert_long8_sat(short8);
-long8 __ovld __cnfn convert_long8_rte(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rte(ushort8);
-long8 __ovld __cnfn convert_long8_rtz(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rtz(ushort8);
-long8 __ovld __cnfn convert_long8_rtp(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rtp(ushort8);
-long8 __ovld __cnfn convert_long8_rtn(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rtn(ushort8);
-long8 __ovld __cnfn convert_long8(ushort8);
-long8 __ovld __cnfn convert_long8_sat(ushort8);
-long8 __ovld __cnfn convert_long8_rte(int8);
-long8 __ovld __cnfn convert_long8_sat_rte(int8);
-long8 __ovld __cnfn convert_long8_rtz(int8);
-long8 __ovld __cnfn convert_long8_sat_rtz(int8);
-long8 __ovld __cnfn convert_long8_rtp(int8);
-long8 __ovld __cnfn convert_long8_sat_rtp(int8);
-long8 __ovld __cnfn convert_long8_rtn(int8);
-long8 __ovld __cnfn convert_long8_sat_rtn(int8);
-long8 __ovld __cnfn convert_long8(int8);
-long8 __ovld __cnfn convert_long8_sat(int8);
-long8 __ovld __cnfn convert_long8_rte(uint8);
-long8 __ovld __cnfn convert_long8_sat_rte(uint8);
-long8 __ovld __cnfn convert_long8_rtz(uint8);
-long8 __ovld __cnfn convert_long8_sat_rtz(uint8);
-long8 __ovld __cnfn convert_long8_rtp(uint8);
-long8 __ovld __cnfn convert_long8_sat_rtp(uint8);
-long8 __ovld __cnfn convert_long8_rtn(uint8);
-long8 __ovld __cnfn convert_long8_sat_rtn(uint8);
-long8 __ovld __cnfn convert_long8(uint8);
-long8 __ovld __cnfn convert_long8_sat(uint8);
-long8 __ovld __cnfn convert_long8_rte(long8);
-long8 __ovld __cnfn convert_long8_sat_rte(long8);
-long8 __ovld __cnfn convert_long8_rtz(long8);
-long8 __ovld __cnfn convert_long8_sat_rtz(long8);
-long8 __ovld __cnfn convert_long8_rtp(long8);
-long8 __ovld __cnfn convert_long8_sat_rtp(long8);
-long8 __ovld __cnfn convert_long8_rtn(long8);
-long8 __ovld __cnfn convert_long8_sat_rtn(long8);
-long8 __ovld __cnfn convert_long8(long8);
-long8 __ovld __cnfn convert_long8_sat(long8);
-long8 __ovld __cnfn convert_long8_rte(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rte(ulong8);
-long8 __ovld __cnfn convert_long8_rtz(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rtz(ulong8);
-long8 __ovld __cnfn convert_long8_rtp(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rtp(ulong8);
-long8 __ovld __cnfn convert_long8_rtn(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rtn(ulong8);
-long8 __ovld __cnfn convert_long8(ulong8);
-long8 __ovld __cnfn convert_long8_sat(ulong8);
-long8 __ovld __cnfn convert_long8_rte(float8);
-long8 __ovld __cnfn convert_long8_sat_rte(float8);
-long8 __ovld __cnfn convert_long8_rtz(float8);
-long8 __ovld __cnfn convert_long8_sat_rtz(float8);
-long8 __ovld __cnfn convert_long8_rtp(float8);
-long8 __ovld __cnfn convert_long8_sat_rtp(float8);
-long8 __ovld __cnfn convert_long8_rtn(float8);
-long8 __ovld __cnfn convert_long8_sat_rtn(float8);
-long8 __ovld __cnfn convert_long8(float8);
-long8 __ovld __cnfn convert_long8_sat(float8);
-ulong8 __ovld __cnfn convert_ulong8_rte(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(char8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(char8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(char8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(char8);
-ulong8 __ovld __cnfn convert_ulong8(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat(char8);
-ulong8 __ovld __cnfn convert_ulong8_rte(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uchar8);
-ulong8 __ovld __cnfn convert_ulong8(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rte(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(short8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(short8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(short8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(short8);
-ulong8 __ovld __cnfn convert_ulong8(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat(short8);
-ulong8 __ovld __cnfn convert_ulong8_rte(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ushort8);
-ulong8 __ovld __cnfn convert_ulong8(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rte(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(int8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(int8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(int8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(int8);
-ulong8 __ovld __cnfn convert_ulong8(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat(int8);
-ulong8 __ovld __cnfn convert_ulong8_rte(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uint8);
-ulong8 __ovld __cnfn convert_ulong8(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rte(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(long8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(long8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(long8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(long8);
-ulong8 __ovld __cnfn convert_ulong8(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat(long8);
-ulong8 __ovld __cnfn convert_ulong8_rte(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ulong8);
-ulong8 __ovld __cnfn convert_ulong8(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rte(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(float8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(float8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(float8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(float8);
-ulong8 __ovld __cnfn convert_ulong8(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat(float8);
-float8 __ovld __cnfn convert_float8_rte(char8);
-float8 __ovld __cnfn convert_float8_rtz(char8);
-float8 __ovld __cnfn convert_float8_rtp(char8);
-float8 __ovld __cnfn convert_float8_rtn(char8);
-float8 __ovld __cnfn convert_float8(char8);
-float8 __ovld __cnfn convert_float8_rte(uchar8);
-float8 __ovld __cnfn convert_float8_rtz(uchar8);
-float8 __ovld __cnfn convert_float8_rtp(uchar8);
-float8 __ovld __cnfn convert_float8_rtn(uchar8);
-float8 __ovld __cnfn convert_float8(uchar8);
-float8 __ovld __cnfn convert_float8_rte(short8);
-float8 __ovld __cnfn convert_float8_rtz(short8);
-float8 __ovld __cnfn convert_float8_rtp(short8);
-float8 __ovld __cnfn convert_float8_rtn(short8);
-float8 __ovld __cnfn convert_float8(short8);
-float8 __ovld __cnfn convert_float8_rte(ushort8);
-float8 __ovld __cnfn convert_float8_rtz(ushort8);
-float8 __ovld __cnfn convert_float8_rtp(ushort8);
-float8 __ovld __cnfn convert_float8_rtn(ushort8);
-float8 __ovld __cnfn convert_float8(ushort8);
-float8 __ovld __cnfn convert_float8_rte(int8);
-float8 __ovld __cnfn convert_float8_rtz(int8);
-float8 __ovld __cnfn convert_float8_rtp(int8);
-float8 __ovld __cnfn convert_float8_rtn(int8);
-float8 __ovld __cnfn convert_float8(int8);
-float8 __ovld __cnfn convert_float8_rte(uint8);
-float8 __ovld __cnfn convert_float8_rtz(uint8);
-float8 __ovld __cnfn convert_float8_rtp(uint8);
-float8 __ovld __cnfn convert_float8_rtn(uint8);
-float8 __ovld __cnfn convert_float8(uint8);
-float8 __ovld __cnfn convert_float8_rte(long8);
-float8 __ovld __cnfn convert_float8_rtz(long8);
-float8 __ovld __cnfn convert_float8_rtp(long8);
-float8 __ovld __cnfn convert_float8_rtn(long8);
-float8 __ovld __cnfn convert_float8(long8);
-float8 __ovld __cnfn convert_float8_rte(ulong8);
-float8 __ovld __cnfn convert_float8_rtz(ulong8);
-float8 __ovld __cnfn convert_float8_rtp(ulong8);
-float8 __ovld __cnfn convert_float8_rtn(ulong8);
-float8 __ovld __cnfn convert_float8(ulong8);
-float8 __ovld __cnfn convert_float8_rte(float8);
-float8 __ovld __cnfn convert_float8_rtz(float8);
-float8 __ovld __cnfn convert_float8_rtp(float8);
-float8 __ovld __cnfn convert_float8_rtn(float8);
-float8 __ovld __cnfn convert_float8(float8);
-char16 __ovld __cnfn convert_char16_rte(char16);
-char16 __ovld __cnfn convert_char16_sat_rte(char16);
-char16 __ovld __cnfn convert_char16_rtz(char16);
-char16 __ovld __cnfn convert_char16_sat_rtz(char16);
-char16 __ovld __cnfn convert_char16_rtp(char16);
-char16 __ovld __cnfn convert_char16_sat_rtp(char16);
-char16 __ovld __cnfn convert_char16_rtn(char16);
-char16 __ovld __cnfn convert_char16_sat_rtn(char16);
-char16 __ovld __cnfn convert_char16(char16);
-char16 __ovld __cnfn convert_char16_sat(char16);
-char16 __ovld __cnfn convert_char16_rte(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rte(uchar16);
-char16 __ovld __cnfn convert_char16_rtz(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rtz(uchar16);
-char16 __ovld __cnfn convert_char16_rtp(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rtp(uchar16);
-char16 __ovld __cnfn convert_char16_rtn(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rtn(uchar16);
-char16 __ovld __cnfn convert_char16(uchar16);
-char16 __ovld __cnfn convert_char16_sat(uchar16);
-char16 __ovld __cnfn convert_char16_rte(short16);
-char16 __ovld __cnfn convert_char16_sat_rte(short16);
-char16 __ovld __cnfn convert_char16_rtz(short16);
-char16 __ovld __cnfn convert_char16_sat_rtz(short16);
-char16 __ovld __cnfn convert_char16_rtp(short16);
-char16 __ovld __cnfn convert_char16_sat_rtp(short16);
-char16 __ovld __cnfn convert_char16_rtn(short16);
-char16 __ovld __cnfn convert_char16_sat_rtn(short16);
-char16 __ovld __cnfn convert_char16(short16);
-char16 __ovld __cnfn convert_char16_sat(short16);
-char16 __ovld __cnfn convert_char16_rte(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rte(ushort16);
-char16 __ovld __cnfn convert_char16_rtz(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rtz(ushort16);
-char16 __ovld __cnfn convert_char16_rtp(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rtp(ushort16);
-char16 __ovld __cnfn convert_char16_rtn(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rtn(ushort16);
-char16 __ovld __cnfn convert_char16(ushort16);
-char16 __ovld __cnfn convert_char16_sat(ushort16);
-char16 __ovld __cnfn convert_char16_rte(int16);
-char16 __ovld __cnfn convert_char16_sat_rte(int16);
-char16 __ovld __cnfn convert_char16_rtz(int16);
-char16 __ovld __cnfn convert_char16_sat_rtz(int16);
-char16 __ovld __cnfn convert_char16_rtp(int16);
-char16 __ovld __cnfn convert_char16_sat_rtp(int16);
-char16 __ovld __cnfn convert_char16_rtn(int16);
-char16 __ovld __cnfn convert_char16_sat_rtn(int16);
-char16 __ovld __cnfn convert_char16(int16);
-char16 __ovld __cnfn convert_char16_sat(int16);
-char16 __ovld __cnfn convert_char16_rte(uint16);
-char16 __ovld __cnfn convert_char16_sat_rte(uint16);
-char16 __ovld __cnfn convert_char16_rtz(uint16);
-char16 __ovld __cnfn convert_char16_sat_rtz(uint16);
-char16 __ovld __cnfn convert_char16_rtp(uint16);
-char16 __ovld __cnfn convert_char16_sat_rtp(uint16);
-char16 __ovld __cnfn convert_char16_rtn(uint16);
-char16 __ovld __cnfn convert_char16_sat_rtn(uint16);
-char16 __ovld __cnfn convert_char16(uint16);
-char16 __ovld __cnfn convert_char16_sat(uint16);
-char16 __ovld __cnfn convert_char16_rte(long16);
-char16 __ovld __cnfn convert_char16_sat_rte(long16);
-char16 __ovld __cnfn convert_char16_rtz(long16);
-char16 __ovld __cnfn convert_char16_sat_rtz(long16);
-char16 __ovld __cnfn convert_char16_rtp(long16);
-char16 __ovld __cnfn convert_char16_sat_rtp(long16);
-char16 __ovld __cnfn convert_char16_rtn(long16);
-char16 __ovld __cnfn convert_char16_sat_rtn(long16);
-char16 __ovld __cnfn convert_char16(long16);
-char16 __ovld __cnfn convert_char16_sat(long16);
-char16 __ovld __cnfn convert_char16_rte(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rte(ulong16);
-char16 __ovld __cnfn convert_char16_rtz(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rtz(ulong16);
-char16 __ovld __cnfn convert_char16_rtp(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rtp(ulong16);
-char16 __ovld __cnfn convert_char16_rtn(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rtn(ulong16);
-char16 __ovld __cnfn convert_char16(ulong16);
-char16 __ovld __cnfn convert_char16_sat(ulong16);
-char16 __ovld __cnfn convert_char16_rte(float16);
-char16 __ovld __cnfn convert_char16_sat_rte(float16);
-char16 __ovld __cnfn convert_char16_rtz(float16);
-char16 __ovld __cnfn convert_char16_sat_rtz(float16);
-char16 __ovld __cnfn convert_char16_rtp(float16);
-char16 __ovld __cnfn convert_char16_sat_rtp(float16);
-char16 __ovld __cnfn convert_char16_rtn(float16);
-char16 __ovld __cnfn convert_char16_sat_rtn(float16);
-char16 __ovld __cnfn convert_char16(float16);
-char16 __ovld __cnfn convert_char16_sat(float16);
-uchar16 __ovld __cnfn convert_uchar16_rte(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(char16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(char16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(char16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(char16);
-uchar16 __ovld __cnfn convert_uchar16(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat(char16);
-uchar16 __ovld __cnfn convert_uchar16_rte(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uchar16);
-uchar16 __ovld __cnfn convert_uchar16(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rte(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(short16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(short16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(short16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(short16);
-uchar16 __ovld __cnfn convert_uchar16(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat(short16);
-uchar16 __ovld __cnfn convert_uchar16_rte(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ushort16);
-uchar16 __ovld __cnfn convert_uchar16(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rte(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(int16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(int16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(int16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(int16);
-uchar16 __ovld __cnfn convert_uchar16(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat(int16);
-uchar16 __ovld __cnfn convert_uchar16_rte(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uint16);
-uchar16 __ovld __cnfn convert_uchar16(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rte(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(long16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(long16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(long16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(long16);
-uchar16 __ovld __cnfn convert_uchar16(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat(long16);
-uchar16 __ovld __cnfn convert_uchar16_rte(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ulong16);
-uchar16 __ovld __cnfn convert_uchar16(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rte(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(float16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(float16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(float16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(float16);
-uchar16 __ovld __cnfn convert_uchar16(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat(float16);
-short16 __ovld __cnfn convert_short16_rte(char16);
-short16 __ovld __cnfn convert_short16_sat_rte(char16);
-short16 __ovld __cnfn convert_short16_rtz(char16);
-short16 __ovld __cnfn convert_short16_sat_rtz(char16);
-short16 __ovld __cnfn convert_short16_rtp(char16);
-short16 __ovld __cnfn convert_short16_sat_rtp(char16);
-short16 __ovld __cnfn convert_short16_rtn(char16);
-short16 __ovld __cnfn convert_short16_sat_rtn(char16);
-short16 __ovld __cnfn convert_short16(char16);
-short16 __ovld __cnfn convert_short16_sat(char16);
-short16 __ovld __cnfn convert_short16_rte(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rte(uchar16);
-short16 __ovld __cnfn convert_short16_rtz(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rtz(uchar16);
-short16 __ovld __cnfn convert_short16_rtp(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rtp(uchar16);
-short16 __ovld __cnfn convert_short16_rtn(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rtn(uchar16);
-short16 __ovld __cnfn convert_short16(uchar16);
-short16 __ovld __cnfn convert_short16_sat(uchar16);
-short16 __ovld __cnfn convert_short16_rte(short16);
-short16 __ovld __cnfn convert_short16_sat_rte(short16);
-short16 __ovld __cnfn convert_short16_rtz(short16);
-short16 __ovld __cnfn convert_short16_sat_rtz(short16);
-short16 __ovld __cnfn convert_short16_rtp(short16);
-short16 __ovld __cnfn convert_short16_sat_rtp(short16);
-short16 __ovld __cnfn convert_short16_rtn(short16);
-short16 __ovld __cnfn convert_short16_sat_rtn(short16);
-short16 __ovld __cnfn convert_short16(short16);
-short16 __ovld __cnfn convert_short16_sat(short16);
-short16 __ovld __cnfn convert_short16_rte(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rte(ushort16);
-short16 __ovld __cnfn convert_short16_rtz(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rtz(ushort16);
-short16 __ovld __cnfn convert_short16_rtp(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rtp(ushort16);
-short16 __ovld __cnfn convert_short16_rtn(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rtn(ushort16);
-short16 __ovld __cnfn convert_short16(ushort16);
-short16 __ovld __cnfn convert_short16_sat(ushort16);
-short16 __ovld __cnfn convert_short16_rte(int16);
-short16 __ovld __cnfn convert_short16_sat_rte(int16);
-short16 __ovld __cnfn convert_short16_rtz(int16);
-short16 __ovld __cnfn convert_short16_sat_rtz(int16);
-short16 __ovld __cnfn convert_short16_rtp(int16);
-short16 __ovld __cnfn convert_short16_sat_rtp(int16);
-short16 __ovld __cnfn convert_short16_rtn(int16);
-short16 __ovld __cnfn convert_short16_sat_rtn(int16);
-short16 __ovld __cnfn convert_short16(int16);
-short16 __ovld __cnfn convert_short16_sat(int16);
-short16 __ovld __cnfn convert_short16_rte(uint16);
-short16 __ovld __cnfn convert_short16_sat_rte(uint16);
-short16 __ovld __cnfn convert_short16_rtz(uint16);
-short16 __ovld __cnfn convert_short16_sat_rtz(uint16);
-short16 __ovld __cnfn convert_short16_rtp(uint16);
-short16 __ovld __cnfn convert_short16_sat_rtp(uint16);
-short16 __ovld __cnfn convert_short16_rtn(uint16);
-short16 __ovld __cnfn convert_short16_sat_rtn(uint16);
-short16 __ovld __cnfn convert_short16(uint16);
-short16 __ovld __cnfn convert_short16_sat(uint16);
-short16 __ovld __cnfn convert_short16_rte(long16);
-short16 __ovld __cnfn convert_short16_sat_rte(long16);
-short16 __ovld __cnfn convert_short16_rtz(long16);
-short16 __ovld __cnfn convert_short16_sat_rtz(long16);
-short16 __ovld __cnfn convert_short16_rtp(long16);
-short16 __ovld __cnfn convert_short16_sat_rtp(long16);
-short16 __ovld __cnfn convert_short16_rtn(long16);
-short16 __ovld __cnfn convert_short16_sat_rtn(long16);
-short16 __ovld __cnfn convert_short16(long16);
-short16 __ovld __cnfn convert_short16_sat(long16);
-short16 __ovld __cnfn convert_short16_rte(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rte(ulong16);
-short16 __ovld __cnfn convert_short16_rtz(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rtz(ulong16);
-short16 __ovld __cnfn convert_short16_rtp(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rtp(ulong16);
-short16 __ovld __cnfn convert_short16_rtn(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rtn(ulong16);
-short16 __ovld __cnfn convert_short16(ulong16);
-short16 __ovld __cnfn convert_short16_sat(ulong16);
-short16 __ovld __cnfn convert_short16_rte(float16);
-short16 __ovld __cnfn convert_short16_sat_rte(float16);
-short16 __ovld __cnfn convert_short16_rtz(float16);
-short16 __ovld __cnfn convert_short16_sat_rtz(float16);
-short16 __ovld __cnfn convert_short16_rtp(float16);
-short16 __ovld __cnfn convert_short16_sat_rtp(float16);
-short16 __ovld __cnfn convert_short16_rtn(float16);
-short16 __ovld __cnfn convert_short16_sat_rtn(float16);
-short16 __ovld __cnfn convert_short16(float16);
-short16 __ovld __cnfn convert_short16_sat(float16);
-ushort16 __ovld __cnfn convert_ushort16_rte(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(char16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(char16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(char16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(char16);
-ushort16 __ovld __cnfn convert_ushort16(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat(char16);
-ushort16 __ovld __cnfn convert_ushort16_rte(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uchar16);
-ushort16 __ovld __cnfn convert_ushort16(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rte(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(short16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(short16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(short16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(short16);
-ushort16 __ovld __cnfn convert_ushort16(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat(short16);
-ushort16 __ovld __cnfn convert_ushort16_rte(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ushort16);
-ushort16 __ovld __cnfn convert_ushort16(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rte(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(int16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(int16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(int16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(int16);
-ushort16 __ovld __cnfn convert_ushort16(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat(int16);
-ushort16 __ovld __cnfn convert_ushort16_rte(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uint16);
-ushort16 __ovld __cnfn convert_ushort16(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rte(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(long16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(long16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(long16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(long16);
-ushort16 __ovld __cnfn convert_ushort16(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat(long16);
-ushort16 __ovld __cnfn convert_ushort16_rte(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ulong16);
-ushort16 __ovld __cnfn convert_ushort16(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rte(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(float16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(float16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(float16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(float16);
-ushort16 __ovld __cnfn convert_ushort16(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat(float16);
-int16 __ovld __cnfn convert_int16_rte(char16);
-int16 __ovld __cnfn convert_int16_sat_rte(char16);
-int16 __ovld __cnfn convert_int16_rtz(char16);
-int16 __ovld __cnfn convert_int16_sat_rtz(char16);
-int16 __ovld __cnfn convert_int16_rtp(char16);
-int16 __ovld __cnfn convert_int16_sat_rtp(char16);
-int16 __ovld __cnfn convert_int16_rtn(char16);
-int16 __ovld __cnfn convert_int16_sat_rtn(char16);
-int16 __ovld __cnfn convert_int16(char16);
-int16 __ovld __cnfn convert_int16_sat(char16);
-int16 __ovld __cnfn convert_int16_rte(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rte(uchar16);
-int16 __ovld __cnfn convert_int16_rtz(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rtz(uchar16);
-int16 __ovld __cnfn convert_int16_rtp(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rtp(uchar16);
-int16 __ovld __cnfn convert_int16_rtn(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rtn(uchar16);
-int16 __ovld __cnfn convert_int16(uchar16);
-int16 __ovld __cnfn convert_int16_sat(uchar16);
-int16 __ovld __cnfn convert_int16_rte(short16);
-int16 __ovld __cnfn convert_int16_sat_rte(short16);
-int16 __ovld __cnfn convert_int16_rtz(short16);
-int16 __ovld __cnfn convert_int16_sat_rtz(short16);
-int16 __ovld __cnfn convert_int16_rtp(short16);
-int16 __ovld __cnfn convert_int16_sat_rtp(short16);
-int16 __ovld __cnfn convert_int16_rtn(short16);
-int16 __ovld __cnfn convert_int16_sat_rtn(short16);
-int16 __ovld __cnfn convert_int16(short16);
-int16 __ovld __cnfn convert_int16_sat(short16);
-int16 __ovld __cnfn convert_int16_rte(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rte(ushort16);
-int16 __ovld __cnfn convert_int16_rtz(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rtz(ushort16);
-int16 __ovld __cnfn convert_int16_rtp(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rtp(ushort16);
-int16 __ovld __cnfn convert_int16_rtn(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rtn(ushort16);
-int16 __ovld __cnfn convert_int16(ushort16);
-int16 __ovld __cnfn convert_int16_sat(ushort16);
-int16 __ovld __cnfn convert_int16_rte(int16);
-int16 __ovld __cnfn convert_int16_sat_rte(int16);
-int16 __ovld __cnfn convert_int16_rtz(int16);
-int16 __ovld __cnfn convert_int16_sat_rtz(int16);
-int16 __ovld __cnfn convert_int16_rtp(int16);
-int16 __ovld __cnfn convert_int16_sat_rtp(int16);
-int16 __ovld __cnfn convert_int16_rtn(int16);
-int16 __ovld __cnfn convert_int16_sat_rtn(int16);
-int16 __ovld __cnfn convert_int16(int16);
-int16 __ovld __cnfn convert_int16_sat(int16);
-int16 __ovld __cnfn convert_int16_rte(uint16);
-int16 __ovld __cnfn convert_int16_sat_rte(uint16);
-int16 __ovld __cnfn convert_int16_rtz(uint16);
-int16 __ovld __cnfn convert_int16_sat_rtz(uint16);
-int16 __ovld __cnfn convert_int16_rtp(uint16);
-int16 __ovld __cnfn convert_int16_sat_rtp(uint16);
-int16 __ovld __cnfn convert_int16_rtn(uint16);
-int16 __ovld __cnfn convert_int16_sat_rtn(uint16);
-int16 __ovld __cnfn convert_int16(uint16);
-int16 __ovld __cnfn convert_int16_sat(uint16);
-int16 __ovld __cnfn convert_int16_rte(long16);
-int16 __ovld __cnfn convert_int16_sat_rte(long16);
-int16 __ovld __cnfn convert_int16_rtz(long16);
-int16 __ovld __cnfn convert_int16_sat_rtz(long16);
-int16 __ovld __cnfn convert_int16_rtp(long16);
-int16 __ovld __cnfn convert_int16_sat_rtp(long16);
-int16 __ovld __cnfn convert_int16_rtn(long16);
-int16 __ovld __cnfn convert_int16_sat_rtn(long16);
-int16 __ovld __cnfn convert_int16(long16);
-int16 __ovld __cnfn convert_int16_sat(long16);
-int16 __ovld __cnfn convert_int16_rte(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rte(ulong16);
-int16 __ovld __cnfn convert_int16_rtz(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rtz(ulong16);
-int16 __ovld __cnfn convert_int16_rtp(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rtp(ulong16);
-int16 __ovld __cnfn convert_int16_rtn(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rtn(ulong16);
-int16 __ovld __cnfn convert_int16(ulong16);
-int16 __ovld __cnfn convert_int16_sat(ulong16);
-int16 __ovld __cnfn convert_int16_rte(float16);
-int16 __ovld __cnfn convert_int16_sat_rte(float16);
-int16 __ovld __cnfn convert_int16_rtz(float16);
-int16 __ovld __cnfn convert_int16_sat_rtz(float16);
-int16 __ovld __cnfn convert_int16_rtp(float16);
-int16 __ovld __cnfn convert_int16_sat_rtp(float16);
-int16 __ovld __cnfn convert_int16_rtn(float16);
-int16 __ovld __cnfn convert_int16_sat_rtn(float16);
-int16 __ovld __cnfn convert_int16(float16);
-int16 __ovld __cnfn convert_int16_sat(float16);
-uint16 __ovld __cnfn convert_uint16_rte(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(char16);
-uint16 __ovld __cnfn convert_uint16_rtz(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(char16);
-uint16 __ovld __cnfn convert_uint16_rtp(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(char16);
-uint16 __ovld __cnfn convert_uint16_rtn(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(char16);
-uint16 __ovld __cnfn convert_uint16(char16);
-uint16 __ovld __cnfn convert_uint16_sat(char16);
-uint16 __ovld __cnfn convert_uint16_rte(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(uchar16);
-uint16 __ovld __cnfn convert_uint16_rtz(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(uchar16);
-uint16 __ovld __cnfn convert_uint16_rtp(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(uchar16);
-uint16 __ovld __cnfn convert_uint16_rtn(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(uchar16);
-uint16 __ovld __cnfn convert_uint16(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat(uchar16);
-uint16 __ovld __cnfn convert_uint16_rte(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(short16);
-uint16 __ovld __cnfn convert_uint16_rtz(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(short16);
-uint16 __ovld __cnfn convert_uint16_rtp(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(short16);
-uint16 __ovld __cnfn convert_uint16_rtn(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(short16);
-uint16 __ovld __cnfn convert_uint16(short16);
-uint16 __ovld __cnfn convert_uint16_sat(short16);
-uint16 __ovld __cnfn convert_uint16_rte(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(ushort16);
-uint16 __ovld __cnfn convert_uint16_rtz(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(ushort16);
-uint16 __ovld __cnfn convert_uint16_rtp(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(ushort16);
-uint16 __ovld __cnfn convert_uint16_rtn(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(ushort16);
-uint16 __ovld __cnfn convert_uint16(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat(ushort16);
-uint16 __ovld __cnfn convert_uint16_rte(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(int16);
-uint16 __ovld __cnfn convert_uint16_rtz(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(int16);
-uint16 __ovld __cnfn convert_uint16_rtp(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(int16);
-uint16 __ovld __cnfn convert_uint16_rtn(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(int16);
-uint16 __ovld __cnfn convert_uint16(int16);
-uint16 __ovld __cnfn convert_uint16_sat(int16);
-uint16 __ovld __cnfn convert_uint16_rte(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(uint16);
-uint16 __ovld __cnfn convert_uint16_rtz(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(uint16);
-uint16 __ovld __cnfn convert_uint16_rtp(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(uint16);
-uint16 __ovld __cnfn convert_uint16_rtn(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(uint16);
-uint16 __ovld __cnfn convert_uint16(uint16);
-uint16 __ovld __cnfn convert_uint16_sat(uint16);
-uint16 __ovld __cnfn convert_uint16_rte(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(long16);
-uint16 __ovld __cnfn convert_uint16_rtz(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(long16);
-uint16 __ovld __cnfn convert_uint16_rtp(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(long16);
-uint16 __ovld __cnfn convert_uint16_rtn(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(long16);
-uint16 __ovld __cnfn convert_uint16(long16);
-uint16 __ovld __cnfn convert_uint16_sat(long16);
-uint16 __ovld __cnfn convert_uint16_rte(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(ulong16);
-uint16 __ovld __cnfn convert_uint16_rtz(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(ulong16);
-uint16 __ovld __cnfn convert_uint16_rtp(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(ulong16);
-uint16 __ovld __cnfn convert_uint16_rtn(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(ulong16);
-uint16 __ovld __cnfn convert_uint16(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat(ulong16);
-uint16 __ovld __cnfn convert_uint16_rte(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(float16);
-uint16 __ovld __cnfn convert_uint16_rtz(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(float16);
-uint16 __ovld __cnfn convert_uint16_rtp(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(float16);
-uint16 __ovld __cnfn convert_uint16_rtn(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(float16);
-uint16 __ovld __cnfn convert_uint16(float16);
-uint16 __ovld __cnfn convert_uint16_sat(float16);
-long16 __ovld __cnfn convert_long16_rte(char16);
-long16 __ovld __cnfn convert_long16_sat_rte(char16);
-long16 __ovld __cnfn convert_long16_rtz(char16);
-long16 __ovld __cnfn convert_long16_sat_rtz(char16);
-long16 __ovld __cnfn convert_long16_rtp(char16);
-long16 __ovld __cnfn convert_long16_sat_rtp(char16);
-long16 __ovld __cnfn convert_long16_rtn(char16);
-long16 __ovld __cnfn convert_long16_sat_rtn(char16);
-long16 __ovld __cnfn convert_long16(char16);
-long16 __ovld __cnfn convert_long16_sat(char16);
-long16 __ovld __cnfn convert_long16_rte(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rte(uchar16);
-long16 __ovld __cnfn convert_long16_rtz(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rtz(uchar16);
-long16 __ovld __cnfn convert_long16_rtp(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rtp(uchar16);
-long16 __ovld __cnfn convert_long16_rtn(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rtn(uchar16);
-long16 __ovld __cnfn convert_long16(uchar16);
-long16 __ovld __cnfn convert_long16_sat(uchar16);
-long16 __ovld __cnfn convert_long16_rte(short16);
-long16 __ovld __cnfn convert_long16_sat_rte(short16);
-long16 __ovld __cnfn convert_long16_rtz(short16);
-long16 __ovld __cnfn convert_long16_sat_rtz(short16);
-long16 __ovld __cnfn convert_long16_rtp(short16);
-long16 __ovld __cnfn convert_long16_sat_rtp(short16);
-long16 __ovld __cnfn convert_long16_rtn(short16);
-long16 __ovld __cnfn convert_long16_sat_rtn(short16);
-long16 __ovld __cnfn convert_long16(short16);
-long16 __ovld __cnfn convert_long16_sat(short16);
-long16 __ovld __cnfn convert_long16_rte(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rte(ushort16);
-long16 __ovld __cnfn convert_long16_rtz(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rtz(ushort16);
-long16 __ovld __cnfn convert_long16_rtp(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rtp(ushort16);
-long16 __ovld __cnfn convert_long16_rtn(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rtn(ushort16);
-long16 __ovld __cnfn convert_long16(ushort16);
-long16 __ovld __cnfn convert_long16_sat(ushort16);
-long16 __ovld __cnfn convert_long16_rte(int16);
-long16 __ovld __cnfn convert_long16_sat_rte(int16);
-long16 __ovld __cnfn convert_long16_rtz(int16);
-long16 __ovld __cnfn convert_long16_sat_rtz(int16);
-long16 __ovld __cnfn convert_long16_rtp(int16);
-long16 __ovld __cnfn convert_long16_sat_rtp(int16);
-long16 __ovld __cnfn convert_long16_rtn(int16);
-long16 __ovld __cnfn convert_long16_sat_rtn(int16);
-long16 __ovld __cnfn convert_long16(int16);
-long16 __ovld __cnfn convert_long16_sat(int16);
-long16 __ovld __cnfn convert_long16_rte(uint16);
-long16 __ovld __cnfn convert_long16_sat_rte(uint16);
-long16 __ovld __cnfn convert_long16_rtz(uint16);
-long16 __ovld __cnfn convert_long16_sat_rtz(uint16);
-long16 __ovld __cnfn convert_long16_rtp(uint16);
-long16 __ovld __cnfn convert_long16_sat_rtp(uint16);
-long16 __ovld __cnfn convert_long16_rtn(uint16);
-long16 __ovld __cnfn convert_long16_sat_rtn(uint16);
-long16 __ovld __cnfn convert_long16(uint16);
-long16 __ovld __cnfn convert_long16_sat(uint16);
-long16 __ovld __cnfn convert_long16_rte(long16);
-long16 __ovld __cnfn convert_long16_sat_rte(long16);
-long16 __ovld __cnfn convert_long16_rtz(long16);
-long16 __ovld __cnfn convert_long16_sat_rtz(long16);
-long16 __ovld __cnfn convert_long16_rtp(long16);
-long16 __ovld __cnfn convert_long16_sat_rtp(long16);
-long16 __ovld __cnfn convert_long16_rtn(long16);
-long16 __ovld __cnfn convert_long16_sat_rtn(long16);
-long16 __ovld __cnfn convert_long16(long16);
-long16 __ovld __cnfn convert_long16_sat(long16);
-long16 __ovld __cnfn convert_long16_rte(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rte(ulong16);
-long16 __ovld __cnfn convert_long16_rtz(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rtz(ulong16);
-long16 __ovld __cnfn convert_long16_rtp(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rtp(ulong16);
-long16 __ovld __cnfn convert_long16_rtn(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rtn(ulong16);
-long16 __ovld __cnfn convert_long16(ulong16);
-long16 __ovld __cnfn convert_long16_sat(ulong16);
-long16 __ovld __cnfn convert_long16_rte(float16);
-long16 __ovld __cnfn convert_long16_sat_rte(float16);
-long16 __ovld __cnfn convert_long16_rtz(float16);
-long16 __ovld __cnfn convert_long16_sat_rtz(float16);
-long16 __ovld __cnfn convert_long16_rtp(float16);
-long16 __ovld __cnfn convert_long16_sat_rtp(float16);
-long16 __ovld __cnfn convert_long16_rtn(float16);
-long16 __ovld __cnfn convert_long16_sat_rtn(float16);
-long16 __ovld __cnfn convert_long16(float16);
-long16 __ovld __cnfn convert_long16_sat(float16);
-ulong16 __ovld __cnfn convert_ulong16_rte(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(char16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(char16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(char16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(char16);
-ulong16 __ovld __cnfn convert_ulong16(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat(char16);
-ulong16 __ovld __cnfn convert_ulong16_rte(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uchar16);
-ulong16 __ovld __cnfn convert_ulong16(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rte(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(short16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(short16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(short16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(short16);
-ulong16 __ovld __cnfn convert_ulong16(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat(short16);
-ulong16 __ovld __cnfn convert_ulong16_rte(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ushort16);
-ulong16 __ovld __cnfn convert_ulong16(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rte(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(int16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(int16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(int16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(int16);
-ulong16 __ovld __cnfn convert_ulong16(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat(int16);
-ulong16 __ovld __cnfn convert_ulong16_rte(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uint16);
-ulong16 __ovld __cnfn convert_ulong16(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rte(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(long16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(long16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(long16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(long16);
-ulong16 __ovld __cnfn convert_ulong16(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat(long16);
-ulong16 __ovld __cnfn convert_ulong16_rte(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ulong16);
-ulong16 __ovld __cnfn convert_ulong16(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rte(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(float16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(float16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(float16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(float16);
-ulong16 __ovld __cnfn convert_ulong16(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat(float16);
-float16 __ovld __cnfn convert_float16_rte(char16);
-float16 __ovld __cnfn convert_float16_rtz(char16);
-float16 __ovld __cnfn convert_float16_rtp(char16);
-float16 __ovld __cnfn convert_float16_rtn(char16);
-float16 __ovld __cnfn convert_float16(char16);
-float16 __ovld __cnfn convert_float16_rte(uchar16);
-float16 __ovld __cnfn convert_float16_rtz(uchar16);
-float16 __ovld __cnfn convert_float16_rtp(uchar16);
-float16 __ovld __cnfn convert_float16_rtn(uchar16);
-float16 __ovld __cnfn convert_float16(uchar16);
-float16 __ovld __cnfn convert_float16_rte(short16);
-float16 __ovld __cnfn convert_float16_rtz(short16);
-float16 __ovld __cnfn convert_float16_rtp(short16);
-float16 __ovld __cnfn convert_float16_rtn(short16);
-float16 __ovld __cnfn convert_float16(short16);
-float16 __ovld __cnfn convert_float16_rte(ushort16);
-float16 __ovld __cnfn convert_float16_rtz(ushort16);
-float16 __ovld __cnfn convert_float16_rtp(ushort16);
-float16 __ovld __cnfn convert_float16_rtn(ushort16);
-float16 __ovld __cnfn convert_float16(ushort16);
-float16 __ovld __cnfn convert_float16_rte(int16);
-float16 __ovld __cnfn convert_float16_rtz(int16);
-float16 __ovld __cnfn convert_float16_rtp(int16);
-float16 __ovld __cnfn convert_float16_rtn(int16);
-float16 __ovld __cnfn convert_float16(int16);
-float16 __ovld __cnfn convert_float16_rte(uint16);
-float16 __ovld __cnfn convert_float16_rtz(uint16);
-float16 __ovld __cnfn convert_float16_rtp(uint16);
-float16 __ovld __cnfn convert_float16_rtn(uint16);
-float16 __ovld __cnfn convert_float16(uint16);
-float16 __ovld __cnfn convert_float16_rte(long16);
-float16 __ovld __cnfn convert_float16_rtz(long16);
-float16 __ovld __cnfn convert_float16_rtp(long16);
-float16 __ovld __cnfn convert_float16_rtn(long16);
-float16 __ovld __cnfn convert_float16(long16);
-float16 __ovld __cnfn convert_float16_rte(ulong16);
-float16 __ovld __cnfn convert_float16_rtz(ulong16);
-float16 __ovld __cnfn convert_float16_rtp(ulong16);
-float16 __ovld __cnfn convert_float16_rtn(ulong16);
-float16 __ovld __cnfn convert_float16(ulong16);
-float16 __ovld __cnfn convert_float16_rte(float16);
-float16 __ovld __cnfn convert_float16_rtz(float16);
-float16 __ovld __cnfn convert_float16_rtp(float16);
-float16 __ovld __cnfn convert_float16_rtn(float16);
-float16 __ovld __cnfn convert_float16(float16);
-
-// Conversions with double data type parameters or return value.
-
-#ifdef cl_khr_fp64
-#pragma OPENCL EXTENSION cl_khr_fp64 : enable
-char __ovld __cnfn convert_char(double);
-char __ovld __cnfn convert_char_rte(double);
-char __ovld __cnfn convert_char_rtn(double);
-char __ovld __cnfn convert_char_rtp(double);
-char __ovld __cnfn convert_char_rtz(double);
-char __ovld __cnfn convert_char_sat(double);
-char __ovld __cnfn convert_char_sat_rte(double);
-char __ovld __cnfn convert_char_sat_rtn(double);
-char __ovld __cnfn convert_char_sat_rtp(double);
-char __ovld __cnfn convert_char_sat_rtz(double);
-char2 __ovld __cnfn convert_char2(double2);
-char2 __ovld __cnfn convert_char2_rte(double2);
-char2 __ovld __cnfn convert_char2_rtn(double2);
-char2 __ovld __cnfn convert_char2_rtp(double2);
-char2 __ovld __cnfn convert_char2_rtz(double2);
-char2 __ovld __cnfn convert_char2_sat(double2);
-char2 __ovld __cnfn convert_char2_sat_rte(double2);
-char2 __ovld __cnfn convert_char2_sat_rtn(double2);
-char2 __ovld __cnfn convert_char2_sat_rtp(double2);
-char2 __ovld __cnfn convert_char2_sat_rtz(double2);
-char3 __ovld __cnfn convert_char3(double3);
-char3 __ovld __cnfn convert_char3_rte(double3);
-char3 __ovld __cnfn convert_char3_rtn(double3);
-char3 __ovld __cnfn convert_char3_rtp(double3);
-char3 __ovld __cnfn convert_char3_rtz(double3);
-char3 __ovld __cnfn convert_char3_sat(double3);
-char3 __ovld __cnfn convert_char3_sat_rte(double3);
-char3 __ovld __cnfn convert_char3_sat_rtn(double3);
-char3 __ovld __cnfn convert_char3_sat_rtp(double3);
-char3 __ovld __cnfn convert_char3_sat_rtz(double3);
-char4 __ovld __cnfn convert_char4(double4);
-char4 __ovld __cnfn convert_char4_rte(double4);
-char4 __ovld __cnfn convert_char4_rtn(double4);
-char4 __ovld __cnfn convert_char4_rtp(double4);
-char4 __ovld __cnfn convert_char4_rtz(double4);
-char4 __ovld __cnfn convert_char4_sat(double4);
-char4 __ovld __cnfn convert_char4_sat_rte(double4);
-char4 __ovld __cnfn convert_char4_sat_rtn(double4);
-char4 __ovld __cnfn convert_char4_sat_rtp(double4);
-char4 __ovld __cnfn convert_char4_sat_rtz(double4);
-char8 __ovld __cnfn convert_char8(double8);
-char8 __ovld __cnfn convert_char8_rte(double8);
-char8 __ovld __cnfn convert_char8_rtn(double8);
-char8 __ovld __cnfn convert_char8_rtp(double8);
-char8 __ovld __cnfn convert_char8_rtz(double8);
-char8 __ovld __cnfn convert_char8_sat(double8);
-char8 __ovld __cnfn convert_char8_sat_rte(double8);
-char8 __ovld __cnfn convert_char8_sat_rtn(double8);
-char8 __ovld __cnfn convert_char8_sat_rtp(double8);
-char8 __ovld __cnfn convert_char8_sat_rtz(double8);
-char16 __ovld __cnfn convert_char16(double16);
-char16 __ovld __cnfn convert_char16_rte(double16);
-char16 __ovld __cnfn convert_char16_rtn(double16);
-char16 __ovld __cnfn convert_char16_rtp(double16);
-char16 __ovld __cnfn convert_char16_rtz(double16);
-char16 __ovld __cnfn convert_char16_sat(double16);
-char16 __ovld __cnfn convert_char16_sat_rte(double16);
-char16 __ovld __cnfn convert_char16_sat_rtn(double16);
-char16 __ovld __cnfn convert_char16_sat_rtp(double16);
-char16 __ovld __cnfn convert_char16_sat_rtz(double16);
-
-uchar __ovld __cnfn convert_uchar(double);
-uchar __ovld __cnfn convert_uchar_rte(double);
-uchar __ovld __cnfn convert_uchar_rtn(double);
-uchar __ovld __cnfn convert_uchar_rtp(double);
-uchar __ovld __cnfn convert_uchar_rtz(double);
-uchar __ovld __cnfn convert_uchar_sat(double);
-uchar __ovld __cnfn convert_uchar_sat_rte(double);
-uchar __ovld __cnfn convert_uchar_sat_rtn(double);
-uchar __ovld __cnfn convert_uchar_sat_rtp(double);
-uchar __ovld __cnfn convert_uchar_sat_rtz(double);
-uchar2 __ovld __cnfn convert_uchar2(double2);
-uchar2 __ovld __cnfn convert_uchar2_rte(double2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(double2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(double2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(double2);
-uchar3 __ovld __cnfn convert_uchar3(double3);
-uchar3 __ovld __cnfn convert_uchar3_rte(double3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(double3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(double3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(double3);
-uchar4 __ovld __cnfn convert_uchar4(double4);
-uchar4 __ovld __cnfn convert_uchar4_rte(double4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(double4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(double4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(double4);
-uchar8 __ovld __cnfn convert_uchar8(double8);
-uchar8 __ovld __cnfn convert_uchar8_rte(double8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(double8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(double8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(double8);
-uchar16 __ovld __cnfn convert_uchar16(double16);
-uchar16 __ovld __cnfn convert_uchar16_rte(double16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(double16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(double16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(double16);
-
-short __ovld __cnfn convert_short(double);
-short __ovld __cnfn convert_short_rte(double);
-short __ovld __cnfn convert_short_rtn(double);
-short __ovld __cnfn convert_short_rtp(double);
-short __ovld __cnfn convert_short_rtz(double);
-short __ovld __cnfn convert_short_sat(double);
-short __ovld __cnfn convert_short_sat_rte(double);
-short __ovld __cnfn convert_short_sat_rtn(double);
-short __ovld __cnfn convert_short_sat_rtp(double);
-short __ovld __cnfn convert_short_sat_rtz(double);
-short2 __ovld __cnfn convert_short2(double2);
-short2 __ovld __cnfn convert_short2_rte(double2);
-short2 __ovld __cnfn convert_short2_rtn(double2);
-short2 __ovld __cnfn convert_short2_rtp(double2);
-short2 __ovld __cnfn convert_short2_rtz(double2);
-short2 __ovld __cnfn convert_short2_sat(double2);
-short2 __ovld __cnfn convert_short2_sat_rte(double2);
-short2 __ovld __cnfn convert_short2_sat_rtn(double2);
-short2 __ovld __cnfn convert_short2_sat_rtp(double2);
-short2 __ovld __cnfn convert_short2_sat_rtz(double2);
-short3 __ovld __cnfn convert_short3(double3);
-short3 __ovld __cnfn convert_short3_rte(double3);
-short3 __ovld __cnfn convert_short3_rtn(double3);
-short3 __ovld __cnfn convert_short3_rtp(double3);
-short3 __ovld __cnfn convert_short3_rtz(double3);
-short3 __ovld __cnfn convert_short3_sat(double3);
-short3 __ovld __cnfn convert_short3_sat_rte(double3);
-short3 __ovld __cnfn convert_short3_sat_rtn(double3);
-short3 __ovld __cnfn convert_short3_sat_rtp(double3);
-short3 __ovld __cnfn convert_short3_sat_rtz(double3);
-short4 __ovld __cnfn convert_short4(double4);
-short4 __ovld __cnfn convert_short4_rte(double4);
-short4 __ovld __cnfn convert_short4_rtn(double4);
-short4 __ovld __cnfn convert_short4_rtp(double4);
-short4 __ovld __cnfn convert_short4_rtz(double4);
-short4 __ovld __cnfn convert_short4_sat(double4);
-short4 __ovld __cnfn convert_short4_sat_rte(double4);
-short4 __ovld __cnfn convert_short4_sat_rtn(double4);
-short4 __ovld __cnfn convert_short4_sat_rtp(double4);
-short4 __ovld __cnfn convert_short4_sat_rtz(double4);
-short8 __ovld __cnfn convert_short8(double8);
-short8 __ovld __cnfn convert_short8_rte(double8);
-short8 __ovld __cnfn convert_short8_rtn(double8);
-short8 __ovld __cnfn convert_short8_rtp(double8);
-short8 __ovld __cnfn convert_short8_rtz(double8);
-short8 __ovld __cnfn convert_short8_sat(double8);
-short8 __ovld __cnfn convert_short8_sat_rte(double8);
-short8 __ovld __cnfn convert_short8_sat_rtn(double8);
-short8 __ovld __cnfn convert_short8_sat_rtp(double8);
-short8 __ovld __cnfn convert_short8_sat_rtz(double8);
-short16 __ovld __cnfn convert_short16(double16);
-short16 __ovld __cnfn convert_short16_rte(double16);
-short16 __ovld __cnfn convert_short16_rtn(double16);
-short16 __ovld __cnfn convert_short16_rtp(double16);
-short16 __ovld __cnfn convert_short16_rtz(double16);
-short16 __ovld __cnfn convert_short16_sat(double16);
-short16 __ovld __cnfn convert_short16_sat_rte(double16);
-short16 __ovld __cnfn convert_short16_sat_rtn(double16);
-short16 __ovld __cnfn convert_short16_sat_rtp(double16);
-short16 __ovld __cnfn convert_short16_sat_rtz(double16);
-
-ushort __ovld __cnfn convert_ushort(double);
-ushort __ovld __cnfn convert_ushort_rte(double);
-ushort __ovld __cnfn convert_ushort_rtn(double);
-ushort __ovld __cnfn convert_ushort_rtp(double);
-ushort __ovld __cnfn convert_ushort_rtz(double);
-ushort __ovld __cnfn convert_ushort_sat(double);
-ushort __ovld __cnfn convert_ushort_sat_rte(double);
-ushort __ovld __cnfn convert_ushort_sat_rtn(double);
-ushort __ovld __cnfn convert_ushort_sat_rtp(double);
-ushort __ovld __cnfn convert_ushort_sat_rtz(double);
-ushort2 __ovld __cnfn convert_ushort2(double2);
-ushort2 __ovld __cnfn convert_ushort2_rte(double2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(double2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(double2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(double2);
-ushort3 __ovld __cnfn convert_ushort3(double3);
-ushort3 __ovld __cnfn convert_ushort3_rte(double3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(double3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(double3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(double3);
-ushort4 __ovld __cnfn convert_ushort4(double4);
-ushort4 __ovld __cnfn convert_ushort4_rte(double4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(double4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(double4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(double4);
-ushort8 __ovld __cnfn convert_ushort8(double8);
-ushort8 __ovld __cnfn convert_ushort8_rte(double8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(double8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(double8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(double8);
-ushort16 __ovld __cnfn convert_ushort16(double16);
-ushort16 __ovld __cnfn convert_ushort16_rte(double16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(double16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(double16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(double16);
-
-int __ovld __cnfn convert_int(double);
-int __ovld __cnfn convert_int_rte(double);
-int __ovld __cnfn convert_int_rtn(double);
-int __ovld __cnfn convert_int_rtp(double);
-int __ovld __cnfn convert_int_rtz(double);
-int __ovld __cnfn convert_int_sat(double);
-int __ovld __cnfn convert_int_sat_rte(double);
-int __ovld __cnfn convert_int_sat_rtn(double);
-int __ovld __cnfn convert_int_sat_rtp(double);
-int __ovld __cnfn convert_int_sat_rtz(double);
-int2 __ovld __cnfn convert_int2(double2);
-int2 __ovld __cnfn convert_int2_rte(double2);
-int2 __ovld __cnfn convert_int2_rtn(double2);
-int2 __ovld __cnfn convert_int2_rtp(double2);
-int2 __ovld __cnfn convert_int2_rtz(double2);
-int2 __ovld __cnfn convert_int2_sat(double2);
-int2 __ovld __cnfn convert_int2_sat_rte(double2);
-int2 __ovld __cnfn convert_int2_sat_rtn(double2);
-int2 __ovld __cnfn convert_int2_sat_rtp(double2);
-int2 __ovld __cnfn convert_int2_sat_rtz(double2);
-int3 __ovld __cnfn convert_int3(double3);
-int3 __ovld __cnfn convert_int3_rte(double3);
-int3 __ovld __cnfn convert_int3_rtn(double3);
-int3 __ovld __cnfn convert_int3_rtp(double3);
-int3 __ovld __cnfn convert_int3_rtz(double3);
-int3 __ovld __cnfn convert_int3_sat(double3);
-int3 __ovld __cnfn convert_int3_sat_rte(double3);
-int3 __ovld __cnfn convert_int3_sat_rtn(double3);
-int3 __ovld __cnfn convert_int3_sat_rtp(double3);
-int3 __ovld __cnfn convert_int3_sat_rtz(double3);
-int4 __ovld __cnfn convert_int4(double4);
-int4 __ovld __cnfn convert_int4_rte(double4);
-int4 __ovld __cnfn convert_int4_rtn(double4);
-int4 __ovld __cnfn convert_int4_rtp(double4);
-int4 __ovld __cnfn convert_int4_rtz(double4);
-int4 __ovld __cnfn convert_int4_sat(double4);
-int4 __ovld __cnfn convert_int4_sat_rte(double4);
-int4 __ovld __cnfn convert_int4_sat_rtn(double4);
-int4 __ovld __cnfn convert_int4_sat_rtp(double4);
-int4 __ovld __cnfn convert_int4_sat_rtz(double4);
-int8 __ovld __cnfn convert_int8(double8);
-int8 __ovld __cnfn convert_int8_rte(double8);
-int8 __ovld __cnfn convert_int8_rtn(double8);
-int8 __ovld __cnfn convert_int8_rtp(double8);
-int8 __ovld __cnfn convert_int8_rtz(double8);
-int8 __ovld __cnfn convert_int8_sat(double8);
-int8 __ovld __cnfn convert_int8_sat_rte(double8);
-int8 __ovld __cnfn convert_int8_sat_rtn(double8);
-int8 __ovld __cnfn convert_int8_sat_rtp(double8);
-int8 __ovld __cnfn convert_int8_sat_rtz(double8);
-int16 __ovld __cnfn convert_int16(double16);
-int16 __ovld __cnfn convert_int16_rte(double16);
-int16 __ovld __cnfn convert_int16_rtn(double16);
-int16 __ovld __cnfn convert_int16_rtp(double16);
-int16 __ovld __cnfn convert_int16_rtz(double16);
-int16 __ovld __cnfn convert_int16_sat(double16);
-int16 __ovld __cnfn convert_int16_sat_rte(double16);
-int16 __ovld __cnfn convert_int16_sat_rtn(double16);
-int16 __ovld __cnfn convert_int16_sat_rtp(double16);
-int16 __ovld __cnfn convert_int16_sat_rtz(double16);
-
-uint __ovld __cnfn convert_uint(double);
-uint __ovld __cnfn convert_uint_rte(double);
-uint __ovld __cnfn convert_uint_rtn(double);
-uint __ovld __cnfn convert_uint_rtp(double);
-uint __ovld __cnfn convert_uint_rtz(double);
-uint __ovld __cnfn convert_uint_sat(double);
-uint __ovld __cnfn convert_uint_sat_rte(double);
-uint __ovld __cnfn convert_uint_sat_rtn(double);
-uint __ovld __cnfn convert_uint_sat_rtp(double);
-uint __ovld __cnfn convert_uint_sat_rtz(double);
-uint2 __ovld __cnfn convert_uint2(double2);
-uint2 __ovld __cnfn convert_uint2_rte(double2);
-uint2 __ovld __cnfn convert_uint2_rtn(double2);
-uint2 __ovld __cnfn convert_uint2_rtp(double2);
-uint2 __ovld __cnfn convert_uint2_rtz(double2);
-uint2 __ovld __cnfn convert_uint2_sat(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(double2);
-uint3 __ovld __cnfn convert_uint3(double3);
-uint3 __ovld __cnfn convert_uint3_rte(double3);
-uint3 __ovld __cnfn convert_uint3_rtn(double3);
-uint3 __ovld __cnfn convert_uint3_rtp(double3);
-uint3 __ovld __cnfn convert_uint3_rtz(double3);
-uint3 __ovld __cnfn convert_uint3_sat(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(double3);
-uint4 __ovld __cnfn convert_uint4(double4);
-uint4 __ovld __cnfn convert_uint4_rte(double4);
-uint4 __ovld __cnfn convert_uint4_rtn(double4);
-uint4 __ovld __cnfn convert_uint4_rtp(double4);
-uint4 __ovld __cnfn convert_uint4_rtz(double4);
-uint4 __ovld __cnfn convert_uint4_sat(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(double4);
-uint8 __ovld __cnfn convert_uint8(double8);
-uint8 __ovld __cnfn convert_uint8_rte(double8);
-uint8 __ovld __cnfn convert_uint8_rtn(double8);
-uint8 __ovld __cnfn convert_uint8_rtp(double8);
-uint8 __ovld __cnfn convert_uint8_rtz(double8);
-uint8 __ovld __cnfn convert_uint8_sat(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(double8);
-uint16 __ovld __cnfn convert_uint16(double16);
-uint16 __ovld __cnfn convert_uint16_rte(double16);
-uint16 __ovld __cnfn convert_uint16_rtn(double16);
-uint16 __ovld __cnfn convert_uint16_rtp(double16);
-uint16 __ovld __cnfn convert_uint16_rtz(double16);
-uint16 __ovld __cnfn convert_uint16_sat(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(double16);
-
-long __ovld __cnfn convert_long(double);
-long __ovld __cnfn convert_long_rte(double);
-long __ovld __cnfn convert_long_rtn(double);
-long __ovld __cnfn convert_long_rtp(double);
-long __ovld __cnfn convert_long_rtz(double);
-long __ovld __cnfn convert_long_sat(double);
-long __ovld __cnfn convert_long_sat_rte(double);
-long __ovld __cnfn convert_long_sat_rtn(double);
-long __ovld __cnfn convert_long_sat_rtp(double);
-long __ovld __cnfn convert_long_sat_rtz(double);
-long2 __ovld __cnfn convert_long2(double2);
-long2 __ovld __cnfn convert_long2_rte(double2);
-long2 __ovld __cnfn convert_long2_rtn(double2);
-long2 __ovld __cnfn convert_long2_rtp(double2);
-long2 __ovld __cnfn convert_long2_rtz(double2);
-long2 __ovld __cnfn convert_long2_sat(double2);
-long2 __ovld __cnfn convert_long2_sat_rte(double2);
-long2 __ovld __cnfn convert_long2_sat_rtn(double2);
-long2 __ovld __cnfn convert_long2_sat_rtp(double2);
-long2 __ovld __cnfn convert_long2_sat_rtz(double2);
-long3 __ovld __cnfn convert_long3(double3);
-long3 __ovld __cnfn convert_long3_rte(double3);
-long3 __ovld __cnfn convert_long3_rtn(double3);
-long3 __ovld __cnfn convert_long3_rtp(double3);
-long3 __ovld __cnfn convert_long3_rtz(double3);
-long3 __ovld __cnfn convert_long3_sat(double3);
-long3 __ovld __cnfn convert_long3_sat_rte(double3);
-long3 __ovld __cnfn convert_long3_sat_rtn(double3);
-long3 __ovld __cnfn convert_long3_sat_rtp(double3);
-long3 __ovld __cnfn convert_long3_sat_rtz(double3);
-long4 __ovld __cnfn convert_long4(double4);
-long4 __ovld __cnfn convert_long4_rte(double4);
-long4 __ovld __cnfn convert_long4_rtn(double4);
-long4 __ovld __cnfn convert_long4_rtp(double4);
-long4 __ovld __cnfn convert_long4_rtz(double4);
-long4 __ovld __cnfn convert_long4_sat(double4);
-long4 __ovld __cnfn convert_long4_sat_rte(double4);
-long4 __ovld __cnfn convert_long4_sat_rtn(double4);
-long4 __ovld __cnfn convert_long4_sat_rtp(double4);
-long4 __ovld __cnfn convert_long4_sat_rtz(double4);
-long8 __ovld __cnfn convert_long8(double8);
-long8 __ovld __cnfn convert_long8_rte(double8);
-long8 __ovld __cnfn convert_long8_rtn(double8);
-long8 __ovld __cnfn convert_long8_rtp(double8);
-long8 __ovld __cnfn convert_long8_rtz(double8);
-long8 __ovld __cnfn convert_long8_sat(double8);
-long8 __ovld __cnfn convert_long8_sat_rte(double8);
-long8 __ovld __cnfn convert_long8_sat_rtn(double8);
-long8 __ovld __cnfn convert_long8_sat_rtp(double8);
-long8 __ovld __cnfn convert_long8_sat_rtz(double8);
-long16 __ovld __cnfn convert_long16(double16);
-long16 __ovld __cnfn convert_long16_rte(double16);
-long16 __ovld __cnfn convert_long16_rtn(double16);
-long16 __ovld __cnfn convert_long16_rtp(double16);
-long16 __ovld __cnfn convert_long16_rtz(double16);
-long16 __ovld __cnfn convert_long16_sat(double16);
-long16 __ovld __cnfn convert_long16_sat_rte(double16);
-long16 __ovld __cnfn convert_long16_sat_rtn(double16);
-long16 __ovld __cnfn convert_long16_sat_rtp(double16);
-long16 __ovld __cnfn convert_long16_sat_rtz(double16);
-
-ulong __ovld __cnfn convert_ulong(double);
-ulong __ovld __cnfn convert_ulong_rte(double);
-ulong __ovld __cnfn convert_ulong_rtn(double);
-ulong __ovld __cnfn convert_ulong_rtp(double);
-ulong __ovld __cnfn convert_ulong_rtz(double);
-ulong __ovld __cnfn convert_ulong_sat(double);
-ulong __ovld __cnfn convert_ulong_sat_rte(double);
-ulong __ovld __cnfn convert_ulong_sat_rtn(double);
-ulong __ovld __cnfn convert_ulong_sat_rtp(double);
-ulong __ovld __cnfn convert_ulong_sat_rtz(double);
-ulong2 __ovld __cnfn convert_ulong2(double2);
-ulong2 __ovld __cnfn convert_ulong2_rte(double2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(double2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(double2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(double2);
-ulong3 __ovld __cnfn convert_ulong3(double3);
-ulong3 __ovld __cnfn convert_ulong3_rte(double3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(double3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(double3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(double3);
-ulong4 __ovld __cnfn convert_ulong4(double4);
-ulong4 __ovld __cnfn convert_ulong4_rte(double4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(double4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(double4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(double4);
-ulong8 __ovld __cnfn convert_ulong8(double8);
-ulong8 __ovld __cnfn convert_ulong8_rte(double8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(double8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(double8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(double8);
-ulong16 __ovld __cnfn convert_ulong16(double16);
-ulong16 __ovld __cnfn convert_ulong16_rte(double16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(double16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(double16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(double16);
-
-float __ovld __cnfn convert_float(double);
-float __ovld __cnfn convert_float_rte(double);
-float __ovld __cnfn convert_float_rtn(double);
-float __ovld __cnfn convert_float_rtp(double);
-float __ovld __cnfn convert_float_rtz(double);
-float2 __ovld __cnfn convert_float2(double2);
-float2 __ovld __cnfn convert_float2_rte(double2);
-float2 __ovld __cnfn convert_float2_rtn(double2);
-float2 __ovld __cnfn convert_float2_rtp(double2);
-float2 __ovld __cnfn convert_float2_rtz(double2);
-float3 __ovld __cnfn convert_float3(double3);
-float3 __ovld __cnfn convert_float3_rte(double3);
-float3 __ovld __cnfn convert_float3_rtn(double3);
-float3 __ovld __cnfn convert_float3_rtp(double3);
-float3 __ovld __cnfn convert_float3_rtz(double3);
-float4 __ovld __cnfn convert_float4(double4);
-float4 __ovld __cnfn convert_float4_rte(double4);
-float4 __ovld __cnfn convert_float4_rtn(double4);
-float4 __ovld __cnfn convert_float4_rtp(double4);
-float4 __ovld __cnfn convert_float4_rtz(double4);
-float8 __ovld __cnfn convert_float8(double8);
-float8 __ovld __cnfn convert_float8_rte(double8);
-float8 __ovld __cnfn convert_float8_rtn(double8);
-float8 __ovld __cnfn convert_float8_rtp(double8);
-float8 __ovld __cnfn convert_float8_rtz(double8);
-float16 __ovld __cnfn convert_float16(double16);
-float16 __ovld __cnfn convert_float16_rte(double16);
-float16 __ovld __cnfn convert_float16_rtn(double16);
-float16 __ovld __cnfn convert_float16_rtp(double16);
-float16 __ovld __cnfn convert_float16_rtz(double16);
-
-double __ovld __cnfn convert_double(char);
-double __ovld __cnfn convert_double(double);
-double __ovld __cnfn convert_double(float);
-double __ovld __cnfn convert_double(int);
-double __ovld __cnfn convert_double(long);
-double __ovld __cnfn convert_double(short);
-double __ovld __cnfn convert_double(uchar);
-double __ovld __cnfn convert_double(uint);
-double __ovld __cnfn convert_double(ulong);
-double __ovld __cnfn convert_double(ushort);
-double __ovld __cnfn convert_double_rte(char);
-double __ovld __cnfn convert_double_rte(double);
-double __ovld __cnfn convert_double_rte(float);
-double __ovld __cnfn convert_double_rte(int);
-double __ovld __cnfn convert_double_rte(long);
-double __ovld __cnfn convert_double_rte(short);
-double __ovld __cnfn convert_double_rte(uchar);
-double __ovld __cnfn convert_double_rte(uint);
-double __ovld __cnfn convert_double_rte(ulong);
-double __ovld __cnfn convert_double_rte(ushort);
-double __ovld __cnfn convert_double_rtn(char);
-double __ovld __cnfn convert_double_rtn(double);
-double __ovld __cnfn convert_double_rtn(float);
-double __ovld __cnfn convert_double_rtn(int);
-double __ovld __cnfn convert_double_rtn(long);
-double __ovld __cnfn convert_double_rtn(short);
-double __ovld __cnfn convert_double_rtn(uchar);
-double __ovld __cnfn convert_double_rtn(uint);
-double __ovld __cnfn convert_double_rtn(ulong);
-double __ovld __cnfn convert_double_rtn(ushort);
-double __ovld __cnfn convert_double_rtp(char);
-double __ovld __cnfn convert_double_rtp(double);
-double __ovld __cnfn convert_double_rtp(float);
-double __ovld __cnfn convert_double_rtp(int);
-double __ovld __cnfn convert_double_rtp(long);
-double __ovld __cnfn convert_double_rtp(short);
-double __ovld __cnfn convert_double_rtp(uchar);
-double __ovld __cnfn convert_double_rtp(uint);
-double __ovld __cnfn convert_double_rtp(ulong);
-double __ovld __cnfn convert_double_rtp(ushort);
-double __ovld __cnfn convert_double_rtz(char);
-double __ovld __cnfn convert_double_rtz(double);
-double __ovld __cnfn convert_double_rtz(float);
-double __ovld __cnfn convert_double_rtz(int);
-double __ovld __cnfn convert_double_rtz(long);
-double __ovld __cnfn convert_double_rtz(short);
-double __ovld __cnfn convert_double_rtz(uchar);
-double __ovld __cnfn convert_double_rtz(uint);
-double __ovld __cnfn convert_double_rtz(ulong);
-double __ovld __cnfn convert_double_rtz(ushort);
-double2 __ovld __cnfn convert_double2(char2);
-double2 __ovld __cnfn convert_double2(double2);
-double2 __ovld __cnfn convert_double2(float2);
-double2 __ovld __cnfn convert_double2(int2);
-double2 __ovld __cnfn convert_double2(long2);
-double2 __ovld __cnfn convert_double2(short2);
-double2 __ovld __cnfn convert_double2(uchar2);
-double2 __ovld __cnfn convert_double2(uint2);
-double2 __ovld __cnfn convert_double2(ulong2);
-double2 __ovld __cnfn convert_double2(ushort2);
-double2 __ovld __cnfn convert_double2_rte(char2);
-double2 __ovld __cnfn convert_double2_rte(double2);
-double2 __ovld __cnfn convert_double2_rte(float2);
-double2 __ovld __cnfn convert_double2_rte(int2);
-double2 __ovld __cnfn convert_double2_rte(long2);
-double2 __ovld __cnfn convert_double2_rte(short2);
-double2 __ovld __cnfn convert_double2_rte(uchar2);
-double2 __ovld __cnfn convert_double2_rte(uint2);
-double2 __ovld __cnfn convert_double2_rte(ulong2);
-double2 __ovld __cnfn convert_double2_rte(ushort2);
-double2 __ovld __cnfn convert_double2_rtn(char2);
-double2 __ovld __cnfn convert_double2_rtn(double2);
-double2 __ovld __cnfn convert_double2_rtn(float2);
-double2 __ovld __cnfn convert_double2_rtn(int2);
-double2 __ovld __cnfn convert_double2_rtn(long2);
-double2 __ovld __cnfn convert_double2_rtn(short2);
-double2 __ovld __cnfn convert_double2_rtn(uchar2);
-double2 __ovld __cnfn convert_double2_rtn(uint2);
-double2 __ovld __cnfn convert_double2_rtn(ulong2);
-double2 __ovld __cnfn convert_double2_rtn(ushort2);
-double2 __ovld __cnfn convert_double2_rtp(char2);
-double2 __ovld __cnfn convert_double2_rtp(double2);
-double2 __ovld __cnfn convert_double2_rtp(float2);
-double2 __ovld __cnfn convert_double2_rtp(int2);
-double2 __ovld __cnfn convert_double2_rtp(long2);
-double2 __ovld __cnfn convert_double2_rtp(short2);
-double2 __ovld __cnfn convert_double2_rtp(uchar2);
-double2 __ovld __cnfn convert_double2_rtp(uint2);
-double2 __ovld __cnfn convert_double2_rtp(ulong2);
-double2 __ovld __cnfn convert_double2_rtp(ushort2);
-double2 __ovld __cnfn convert_double2_rtz(char2);
-double2 __ovld __cnfn convert_double2_rtz(double2);
-double2 __ovld __cnfn convert_double2_rtz(float2);
-double2 __ovld __cnfn convert_double2_rtz(int2);
-double2 __ovld __cnfn convert_double2_rtz(long2);
-double2 __ovld __cnfn convert_double2_rtz(short2);
-double2 __ovld __cnfn convert_double2_rtz(uchar2);
-double2 __ovld __cnfn convert_double2_rtz(uint2);
-double2 __ovld __cnfn convert_double2_rtz(ulong2);
-double2 __ovld __cnfn convert_double2_rtz(ushort2);
-double3 __ovld __cnfn convert_double3(char3);
-double3 __ovld __cnfn convert_double3(double3);
-double3 __ovld __cnfn convert_double3(float3);
-double3 __ovld __cnfn convert_double3(int3);
-double3 __ovld __cnfn convert_double3(long3);
-double3 __ovld __cnfn convert_double3(short3);
-double3 __ovld __cnfn convert_double3(uchar3);
-double3 __ovld __cnfn convert_double3(uint3);
-double3 __ovld __cnfn convert_double3(ulong3);
-double3 __ovld __cnfn convert_double3(ushort3);
-double3 __ovld __cnfn convert_double3_rte(char3);
-double3 __ovld __cnfn convert_double3_rte(double3);
-double3 __ovld __cnfn convert_double3_rte(float3);
-double3 __ovld __cnfn convert_double3_rte(int3);
-double3 __ovld __cnfn convert_double3_rte(long3);
-double3 __ovld __cnfn convert_double3_rte(short3);
-double3 __ovld __cnfn convert_double3_rte(uchar3);
-double3 __ovld __cnfn convert_double3_rte(uint3);
-double3 __ovld __cnfn convert_double3_rte(ulong3);
-double3 __ovld __cnfn convert_double3_rte(ushort3);
-double3 __ovld __cnfn convert_double3_rtn(char3);
-double3 __ovld __cnfn convert_double3_rtn(double3);
-double3 __ovld __cnfn convert_double3_rtn(float3);
-double3 __ovld __cnfn convert_double3_rtn(int3);
-double3 __ovld __cnfn convert_double3_rtn(long3);
-double3 __ovld __cnfn convert_double3_rtn(short3);
-double3 __ovld __cnfn convert_double3_rtn(uchar3);
-double3 __ovld __cnfn convert_double3_rtn(uint3);
-double3 __ovld __cnfn convert_double3_rtn(ulong3);
-double3 __ovld __cnfn convert_double3_rtn(ushort3);
-double3 __ovld __cnfn convert_double3_rtp(char3);
-double3 __ovld __cnfn convert_double3_rtp(double3);
-double3 __ovld __cnfn convert_double3_rtp(float3);
-double3 __ovld __cnfn convert_double3_rtp(int3);
-double3 __ovld __cnfn convert_double3_rtp(long3);
-double3 __ovld __cnfn convert_double3_rtp(short3);
-double3 __ovld __cnfn convert_double3_rtp(uchar3);
-double3 __ovld __cnfn convert_double3_rtp(uint3);
-double3 __ovld __cnfn convert_double3_rtp(ulong3);
-double3 __ovld __cnfn convert_double3_rtp(ushort3);
-double3 __ovld __cnfn convert_double3_rtz(char3);
-double3 __ovld __cnfn convert_double3_rtz(double3);
-double3 __ovld __cnfn convert_double3_rtz(float3);
-double3 __ovld __cnfn convert_double3_rtz(int3);
-double3 __ovld __cnfn convert_double3_rtz(long3);
-double3 __ovld __cnfn convert_double3_rtz(short3);
-double3 __ovld __cnfn convert_double3_rtz(uchar3);
-double3 __ovld __cnfn convert_double3_rtz(uint3);
-double3 __ovld __cnfn convert_double3_rtz(ulong3);
-double3 __ovld __cnfn convert_double3_rtz(ushort3);
-double4 __ovld __cnfn convert_double4(char4);
-double4 __ovld __cnfn convert_double4(double4);
-double4 __ovld __cnfn convert_double4(float4);
-double4 __ovld __cnfn convert_double4(int4);
-double4 __ovld __cnfn convert_double4(long4);
-double4 __ovld __cnfn convert_double4(short4);
-double4 __ovld __cnfn convert_double4(uchar4);
-double4 __ovld __cnfn convert_double4(uint4);
-double4 __ovld __cnfn convert_double4(ulong4);
-double4 __ovld __cnfn convert_double4(ushort4);
-double4 __ovld __cnfn convert_double4_rte(char4);
-double4 __ovld __cnfn convert_double4_rte(double4);
-double4 __ovld __cnfn convert_double4_rte(float4);
-double4 __ovld __cnfn convert_double4_rte(int4);
-double4 __ovld __cnfn convert_double4_rte(long4);
-double4 __ovld __cnfn convert_double4_rte(short4);
-double4 __ovld __cnfn convert_double4_rte(uchar4);
-double4 __ovld __cnfn convert_double4_rte(uint4);
-double4 __ovld __cnfn convert_double4_rte(ulong4);
-double4 __ovld __cnfn convert_double4_rte(ushort4);
-double4 __ovld __cnfn convert_double4_rtn(char4);
-double4 __ovld __cnfn convert_double4_rtn(double4);
-double4 __ovld __cnfn convert_double4_rtn(float4);
-double4 __ovld __cnfn convert_double4_rtn(int4);
-double4 __ovld __cnfn convert_double4_rtn(long4);
-double4 __ovld __cnfn convert_double4_rtn(short4);
-double4 __ovld __cnfn convert_double4_rtn(uchar4);
-double4 __ovld __cnfn convert_double4_rtn(uint4);
-double4 __ovld __cnfn convert_double4_rtn(ulong4);
-double4 __ovld __cnfn convert_double4_rtn(ushort4);
-double4 __ovld __cnfn convert_double4_rtp(char4);
-double4 __ovld __cnfn convert_double4_rtp(double4);
-double4 __ovld __cnfn convert_double4_rtp(float4);
-double4 __ovld __cnfn convert_double4_rtp(int4);
-double4 __ovld __cnfn convert_double4_rtp(long4);
-double4 __ovld __cnfn convert_double4_rtp(short4);
-double4 __ovld __cnfn convert_double4_rtp(uchar4);
-double4 __ovld __cnfn convert_double4_rtp(uint4);
-double4 __ovld __cnfn convert_double4_rtp(ulong4);
-double4 __ovld __cnfn convert_double4_rtp(ushort4);
-double4 __ovld __cnfn convert_double4_rtz(char4);
-double4 __ovld __cnfn convert_double4_rtz(double4);
-double4 __ovld __cnfn convert_double4_rtz(float4);
-double4 __ovld __cnfn convert_double4_rtz(int4);
-double4 __ovld __cnfn convert_double4_rtz(long4);
-double4 __ovld __cnfn convert_double4_rtz(short4);
-double4 __ovld __cnfn convert_double4_rtz(uchar4);
-double4 __ovld __cnfn convert_double4_rtz(uint4);
-double4 __ovld __cnfn convert_double4_rtz(ulong4);
-double4 __ovld __cnfn convert_double4_rtz(ushort4);
-double8 __ovld __cnfn convert_double8(char8);
-double8 __ovld __cnfn convert_double8(double8);
-double8 __ovld __cnfn convert_double8(float8);
-double8 __ovld __cnfn convert_double8(int8);
-double8 __ovld __cnfn convert_double8(long8);
-double8 __ovld __cnfn convert_double8(short8);
-double8 __ovld __cnfn convert_double8(uchar8);
-double8 __ovld __cnfn convert_double8(uint8);
-double8 __ovld __cnfn convert_double8(ulong8);
-double8 __ovld __cnfn convert_double8(ushort8);
-double8 __ovld __cnfn convert_double8_rte(char8);
-double8 __ovld __cnfn convert_double8_rte(double8);
-double8 __ovld __cnfn convert_double8_rte(float8);
-double8 __ovld __cnfn convert_double8_rte(int8);
-double8 __ovld __cnfn convert_double8_rte(long8);
-double8 __ovld __cnfn convert_double8_rte(short8);
-double8 __ovld __cnfn convert_double8_rte(uchar8);
-double8 __ovld __cnfn convert_double8_rte(uint8);
-double8 __ovld __cnfn convert_double8_rte(ulong8);
-double8 __ovld __cnfn convert_double8_rte(ushort8);
-double8 __ovld __cnfn convert_double8_rtn(char8);
-double8 __ovld __cnfn convert_double8_rtn(double8);
-double8 __ovld __cnfn convert_double8_rtn(float8);
-double8 __ovld __cnfn convert_double8_rtn(int8);
-double8 __ovld __cnfn convert_double8_rtn(long8);
-double8 __ovld __cnfn convert_double8_rtn(short8);
-double8 __ovld __cnfn convert_double8_rtn(uchar8);
-double8 __ovld __cnfn convert_double8_rtn(uint8);
-double8 __ovld __cnfn convert_double8_rtn(ulong8);
-double8 __ovld __cnfn convert_double8_rtn(ushort8);
-double8 __ovld __cnfn convert_double8_rtp(char8);
-double8 __ovld __cnfn convert_double8_rtp(double8);
-double8 __ovld __cnfn convert_double8_rtp(float8);
-double8 __ovld __cnfn convert_double8_rtp(int8);
-double8 __ovld __cnfn convert_double8_rtp(long8);
-double8 __ovld __cnfn convert_double8_rtp(short8);
-double8 __ovld __cnfn convert_double8_rtp(uchar8);
-double8 __ovld __cnfn convert_double8_rtp(uint8);
-double8 __ovld __cnfn convert_double8_rtp(ulong8);
-double8 __ovld __cnfn convert_double8_rtp(ushort8);
-double8 __ovld __cnfn convert_double8_rtz(char8);
-double8 __ovld __cnfn convert_double8_rtz(double8);
-double8 __ovld __cnfn convert_double8_rtz(float8);
-double8 __ovld __cnfn convert_double8_rtz(int8);
-double8 __ovld __cnfn convert_double8_rtz(long8);
-double8 __ovld __cnfn convert_double8_rtz(short8);
-double8 __ovld __cnfn convert_double8_rtz(uchar8);
-double8 __ovld __cnfn convert_double8_rtz(uint8);
-double8 __ovld __cnfn convert_double8_rtz(ulong8);
-double8 __ovld __cnfn convert_double8_rtz(ushort8);
-double16 __ovld __cnfn convert_double16(char16);
-double16 __ovld __cnfn convert_double16(double16);
-double16 __ovld __cnfn convert_double16(float16);
-double16 __ovld __cnfn convert_double16(int16);
-double16 __ovld __cnfn convert_double16(long16);
-double16 __ovld __cnfn convert_double16(short16);
-double16 __ovld __cnfn convert_double16(uchar16);
-double16 __ovld __cnfn convert_double16(uint16);
-double16 __ovld __cnfn convert_double16(ulong16);
-double16 __ovld __cnfn convert_double16(ushort16);
-double16 __ovld __cnfn convert_double16_rte(char16);
-double16 __ovld __cnfn convert_double16_rte(double16);
-double16 __ovld __cnfn convert_double16_rte(float16);
-double16 __ovld __cnfn convert_double16_rte(int16);
-double16 __ovld __cnfn convert_double16_rte(long16);
-double16 __ovld __cnfn convert_double16_rte(short16);
-double16 __ovld __cnfn convert_double16_rte(uchar16);
-double16 __ovld __cnfn convert_double16_rte(uint16);
-double16 __ovld __cnfn convert_double16_rte(ulong16);
-double16 __ovld __cnfn convert_double16_rte(ushort16);
-double16 __ovld __cnfn convert_double16_rtn(char16);
-double16 __ovld __cnfn convert_double16_rtn(double16);
-double16 __ovld __cnfn convert_double16_rtn(float16);
-double16 __ovld __cnfn convert_double16_rtn(int16);
-double16 __ovld __cnfn convert_double16_rtn(long16);
-double16 __ovld __cnfn convert_double16_rtn(short16);
-double16 __ovld __cnfn convert_double16_rtn(uchar16);
-double16 __ovld __cnfn convert_double16_rtn(uint16);
-double16 __ovld __cnfn convert_double16_rtn(ulong16);
-double16 __ovld __cnfn convert_double16_rtn(ushort16);
-double16 __ovld __cnfn convert_double16_rtp(char16);
-double16 __ovld __cnfn convert_double16_rtp(double16);
-double16 __ovld __cnfn convert_double16_rtp(float16);
-double16 __ovld __cnfn convert_double16_rtp(int16);
-double16 __ovld __cnfn convert_double16_rtp(long16);
-double16 __ovld __cnfn convert_double16_rtp(short16);
-double16 __ovld __cnfn convert_double16_rtp(uchar16);
-double16 __ovld __cnfn convert_double16_rtp(uint16);
-double16 __ovld __cnfn convert_double16_rtp(ulong16);
-double16 __ovld __cnfn convert_double16_rtp(ushort16);
-double16 __ovld __cnfn convert_double16_rtz(char16);
-double16 __ovld __cnfn convert_double16_rtz(double16);
-double16 __ovld __cnfn convert_double16_rtz(float16);
-double16 __ovld __cnfn convert_double16_rtz(int16);
-double16 __ovld __cnfn convert_double16_rtz(long16);
-double16 __ovld __cnfn convert_double16_rtz(short16);
-double16 __ovld __cnfn convert_double16_rtz(uchar16);
-double16 __ovld __cnfn convert_double16_rtz(uint16);
-double16 __ovld __cnfn convert_double16_rtz(ulong16);
-double16 __ovld __cnfn convert_double16_rtz(ushort16);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-#pragma OPENCL EXTENSION cl_khr_fp16 : enable
-// Convert half types to non-double types.
-uchar __ovld __cnfn convert_uchar(half);
-uchar __ovld __cnfn convert_uchar_rte(half);
-uchar __ovld __cnfn convert_uchar_rtp(half);
-uchar __ovld __cnfn convert_uchar_rtn(half);
-uchar __ovld __cnfn convert_uchar_rtz(half);
-uchar __ovld __cnfn convert_uchar_sat(half);
-uchar __ovld __cnfn convert_uchar_sat_rte(half);
-uchar __ovld __cnfn convert_uchar_sat_rtp(half);
-uchar __ovld __cnfn convert_uchar_sat_rtn(half);
-uchar __ovld __cnfn convert_uchar_sat_rtz(half);
-uchar2 __ovld __cnfn convert_uchar2(half2);
-uchar2 __ovld __cnfn convert_uchar2_rte(half2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(half2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(half2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(half2);
-uchar3 __ovld __cnfn convert_uchar3(half3);
-uchar3 __ovld __cnfn convert_uchar3_rte(half3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(half3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(half3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(half3);
-uchar4 __ovld __cnfn convert_uchar4(half4);
-uchar4 __ovld __cnfn convert_uchar4_rte(half4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(half4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(half4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(half4);
-uchar8 __ovld __cnfn convert_uchar8(half8);
-uchar8 __ovld __cnfn convert_uchar8_rte(half8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(half8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(half8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(half8);
-uchar16 __ovld __cnfn convert_uchar16(half16);
-uchar16 __ovld __cnfn convert_uchar16_rte(half16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(half16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(half16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(half16);
-ushort __ovld __cnfn convert_ushort(half);
-ushort __ovld __cnfn convert_ushort_rte(half);
-ushort __ovld __cnfn convert_ushort_rtp(half);
-ushort __ovld __cnfn convert_ushort_rtn(half);
-ushort __ovld __cnfn convert_ushort_rtz(half);
-ushort __ovld __cnfn convert_ushort_sat(half);
-ushort __ovld __cnfn convert_ushort_sat_rte(half);
-ushort __ovld __cnfn convert_ushort_sat_rtp(half);
-ushort __ovld __cnfn convert_ushort_sat_rtn(half);
-ushort __ovld __cnfn convert_ushort_sat_rtz(half);
-ushort2 __ovld __cnfn convert_ushort2(half2);
-ushort2 __ovld __cnfn convert_ushort2_rte(half2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(half2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(half2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(half2);
-ushort3 __ovld __cnfn convert_ushort3(half3);
-ushort3 __ovld __cnfn convert_ushort3_rte(half3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(half3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(half3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(half3);
-ushort4 __ovld __cnfn convert_ushort4(half4);
-ushort4 __ovld __cnfn convert_ushort4_rte(half4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(half4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(half4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(half4);
-ushort8 __ovld __cnfn convert_ushort8(half8);
-ushort8 __ovld __cnfn convert_ushort8_rte(half8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(half8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(half8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(half8);
-ushort16 __ovld __cnfn convert_ushort16(half16);
-ushort16 __ovld __cnfn convert_ushort16_rte(half16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(half16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(half16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(half16);
-uint __ovld __cnfn convert_uint(half);
-uint __ovld __cnfn convert_uint_rte(half);
-uint __ovld __cnfn convert_uint_rtp(half);
-uint __ovld __cnfn convert_uint_rtn(half);
-uint __ovld __cnfn convert_uint_rtz(half);
-uint __ovld __cnfn convert_uint_sat(half);
-uint __ovld __cnfn convert_uint_sat_rte(half);
-uint __ovld __cnfn convert_uint_sat_rtp(half);
-uint __ovld __cnfn convert_uint_sat_rtn(half);
-uint __ovld __cnfn convert_uint_sat_rtz(half);
-uint2 __ovld __cnfn convert_uint2(half2);
-uint2 __ovld __cnfn convert_uint2_rte(half2);
-uint2 __ovld __cnfn convert_uint2_rtp(half2);
-uint2 __ovld __cnfn convert_uint2_rtn(half2);
-uint2 __ovld __cnfn convert_uint2_rtz(half2);
-uint2 __ovld __cnfn convert_uint2_sat(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(half2);
-uint3 __ovld __cnfn convert_uint3(half3);
-uint3 __ovld __cnfn convert_uint3_rte(half3);
-uint3 __ovld __cnfn convert_uint3_rtp(half3);
-uint3 __ovld __cnfn convert_uint3_rtn(half3);
-uint3 __ovld __cnfn convert_uint3_rtz(half3);
-uint3 __ovld __cnfn convert_uint3_sat(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(half3);
-uint4 __ovld __cnfn convert_uint4(half4);
-uint4 __ovld __cnfn convert_uint4_rte(half4);
-uint4 __ovld __cnfn convert_uint4_rtp(half4);
-uint4 __ovld __cnfn convert_uint4_rtn(half4);
-uint4 __ovld __cnfn convert_uint4_rtz(half4);
-uint4 __ovld __cnfn convert_uint4_sat(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(half4);
-uint8 __ovld __cnfn convert_uint8(half8);
-uint8 __ovld __cnfn convert_uint8_rte(half8);
-uint8 __ovld __cnfn convert_uint8_rtp(half8);
-uint8 __ovld __cnfn convert_uint8_rtn(half8);
-uint8 __ovld __cnfn convert_uint8_rtz(half8);
-uint8 __ovld __cnfn convert_uint8_sat(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(half8);
-uint16 __ovld __cnfn convert_uint16(half16);
-uint16 __ovld __cnfn convert_uint16_rte(half16);
-uint16 __ovld __cnfn convert_uint16_rtp(half16);
-uint16 __ovld __cnfn convert_uint16_rtn(half16);
-uint16 __ovld __cnfn convert_uint16_rtz(half16);
-uint16 __ovld __cnfn convert_uint16_sat(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(half16);
-ulong __ovld __cnfn convert_ulong(half);
-ulong __ovld __cnfn convert_ulong_rte(half);
-ulong __ovld __cnfn convert_ulong_rtp(half);
-ulong __ovld __cnfn convert_ulong_rtn(half);
-ulong __ovld __cnfn convert_ulong_rtz(half);
-ulong __ovld __cnfn convert_ulong_sat(half);
-ulong __ovld __cnfn convert_ulong_sat_rte(half);
-ulong __ovld __cnfn convert_ulong_sat_rtp(half);
-ulong __ovld __cnfn convert_ulong_sat_rtn(half);
-ulong __ovld __cnfn convert_ulong_sat_rtz(half);
-ulong2 __ovld __cnfn convert_ulong2(half2);
-ulong2 __ovld __cnfn convert_ulong2_rte(half2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(half2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(half2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(half2);
-ulong3 __ovld __cnfn convert_ulong3(half3);
-ulong3 __ovld __cnfn convert_ulong3_rte(half3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(half3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(half3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(half3);
-ulong4 __ovld __cnfn convert_ulong4(half4);
-ulong4 __ovld __cnfn convert_ulong4_rte(half4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(half4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(half4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(half4);
-ulong8 __ovld __cnfn convert_ulong8(half8);
-ulong8 __ovld __cnfn convert_ulong8_rte(half8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(half8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(half8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(half8);
-ulong16 __ovld __cnfn convert_ulong16(half16);
-ulong16 __ovld __cnfn convert_ulong16_rte(half16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(half16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(half16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(half16);
-char __ovld __cnfn convert_char(half);
-char __ovld __cnfn convert_char_rte(half);
-char __ovld __cnfn convert_char_rtp(half);
-char __ovld __cnfn convert_char_rtn(half);
-char __ovld __cnfn convert_char_rtz(half);
-char __ovld __cnfn convert_char_sat(half);
-char __ovld __cnfn convert_char_sat_rte(half);
-char __ovld __cnfn convert_char_sat_rtp(half);
-char __ovld __cnfn convert_char_sat_rtn(half);
-char __ovld __cnfn convert_char_sat_rtz(half);
-char2 __ovld __cnfn convert_char2(half2);
-char2 __ovld __cnfn convert_char2_rte(half2);
-char2 __ovld __cnfn convert_char2_rtp(half2);
-char2 __ovld __cnfn convert_char2_rtn(half2);
-char2 __ovld __cnfn convert_char2_rtz(half2);
-char2 __ovld __cnfn convert_char2_sat(half2);
-char2 __ovld __cnfn convert_char2_sat_rte(half2);
-char2 __ovld __cnfn convert_char2_sat_rtp(half2);
-char2 __ovld __cnfn convert_char2_sat_rtn(half2);
-char2 __ovld __cnfn convert_char2_sat_rtz(half2);
-char3 __ovld __cnfn convert_char3(half3);
-char3 __ovld __cnfn convert_char3_rte(half3);
-char3 __ovld __cnfn convert_char3_rtp(half3);
-char3 __ovld __cnfn convert_char3_rtn(half3);
-char3 __ovld __cnfn convert_char3_rtz(half3);
-char3 __ovld __cnfn convert_char3_sat(half3);
-char3 __ovld __cnfn convert_char3_sat_rte(half3);
-char3 __ovld __cnfn convert_char3_sat_rtp(half3);
-char3 __ovld __cnfn convert_char3_sat_rtn(half3);
-char3 __ovld __cnfn convert_char3_sat_rtz(half3);
-char4 __ovld __cnfn convert_char4(half4);
-char4 __ovld __cnfn convert_char4_rte(half4);
-char4 __ovld __cnfn convert_char4_rtp(half4);
-char4 __ovld __cnfn convert_char4_rtn(half4);
-char4 __ovld __cnfn convert_char4_rtz(half4);
-char4 __ovld __cnfn convert_char4_sat(half4);
-char4 __ovld __cnfn convert_char4_sat_rte(half4);
-char4 __ovld __cnfn convert_char4_sat_rtp(half4);
-char4 __ovld __cnfn convert_char4_sat_rtn(half4);
-char4 __ovld __cnfn convert_char4_sat_rtz(half4);
-char8 __ovld __cnfn convert_char8(half8);
-char8 __ovld __cnfn convert_char8_rte(half8);
-char8 __ovld __cnfn convert_char8_rtp(half8);
-char8 __ovld __cnfn convert_char8_rtn(half8);
-char8 __ovld __cnfn convert_char8_rtz(half8);
-char8 __ovld __cnfn convert_char8_sat(half8);
-char8 __ovld __cnfn convert_char8_sat_rte(half8);
-char8 __ovld __cnfn convert_char8_sat_rtp(half8);
-char8 __ovld __cnfn convert_char8_sat_rtn(half8);
-char8 __ovld __cnfn convert_char8_sat_rtz(half8);
-char16 __ovld __cnfn convert_char16(half16);
-char16 __ovld __cnfn convert_char16_rte(half16);
-char16 __ovld __cnfn convert_char16_rtp(half16);
-char16 __ovld __cnfn convert_char16_rtn(half16);
-char16 __ovld __cnfn convert_char16_rtz(half16);
-char16 __ovld __cnfn convert_char16_sat(half16);
-char16 __ovld __cnfn convert_char16_sat_rte(half16);
-char16 __ovld __cnfn convert_char16_sat_rtp(half16);
-char16 __ovld __cnfn convert_char16_sat_rtn(half16);
-char16 __ovld __cnfn convert_char16_sat_rtz(half16);
-short __ovld __cnfn convert_short(half);
-short __ovld __cnfn convert_short_rte(half);
-short __ovld __cnfn convert_short_rtp(half);
-short __ovld __cnfn convert_short_rtn(half);
-short __ovld __cnfn convert_short_rtz(half);
-short __ovld __cnfn convert_short_sat(half);
-short __ovld __cnfn convert_short_sat_rte(half);
-short __ovld __cnfn convert_short_sat_rtp(half);
-short __ovld __cnfn convert_short_sat_rtn(half);
-short __ovld __cnfn convert_short_sat_rtz(half);
-short2 __ovld __cnfn convert_short2(half2);
-short2 __ovld __cnfn convert_short2_rte(half2);
-short2 __ovld __cnfn convert_short2_rtp(half2);
-short2 __ovld __cnfn convert_short2_rtn(half2);
-short2 __ovld __cnfn convert_short2_rtz(half2);
-short2 __ovld __cnfn convert_short2_sat(half2);
-short2 __ovld __cnfn convert_short2_sat_rte(half2);
-short2 __ovld __cnfn convert_short2_sat_rtp(half2);
-short2 __ovld __cnfn convert_short2_sat_rtn(half2);
-short2 __ovld __cnfn convert_short2_sat_rtz(half2);
-short3 __ovld __cnfn convert_short3(half3);
-short3 __ovld __cnfn convert_short3_rte(half3);
-short3 __ovld __cnfn convert_short3_rtp(half3);
-short3 __ovld __cnfn convert_short3_rtn(half3);
-short3 __ovld __cnfn convert_short3_rtz(half3);
-short3 __ovld __cnfn convert_short3_sat(half3);
-short3 __ovld __cnfn convert_short3_sat_rte(half3);
-short3 __ovld __cnfn convert_short3_sat_rtp(half3);
-short3 __ovld __cnfn convert_short3_sat_rtn(half3);
-short3 __ovld __cnfn convert_short3_sat_rtz(half3);
-short4 __ovld __cnfn convert_short4(half4);
-short4 __ovld __cnfn convert_short4_rte(half4);
-short4 __ovld __cnfn convert_short4_rtp(half4);
-short4 __ovld __cnfn convert_short4_rtn(half4);
-short4 __ovld __cnfn convert_short4_rtz(half4);
-short4 __ovld __cnfn convert_short4_sat(half4);
-short4 __ovld __cnfn convert_short4_sat_rte(half4);
-short4 __ovld __cnfn convert_short4_sat_rtp(half4);
-short4 __ovld __cnfn convert_short4_sat_rtn(half4);
-short4 __ovld __cnfn convert_short4_sat_rtz(half4);
-short8 __ovld __cnfn convert_short8(half8);
-short8 __ovld __cnfn convert_short8_rte(half8);
-short8 __ovld __cnfn convert_short8_rtp(half8);
-short8 __ovld __cnfn convert_short8_rtn(half8);
-short8 __ovld __cnfn convert_short8_rtz(half8);
-short8 __ovld __cnfn convert_short8_sat(half8);
-short8 __ovld __cnfn convert_short8_sat_rte(half8);
-short8 __ovld __cnfn convert_short8_sat_rtp(half8);
-short8 __ovld __cnfn convert_short8_sat_rtn(half8);
-short8 __ovld __cnfn convert_short8_sat_rtz(half8);
-short16 __ovld __cnfn convert_short16(half16);
-short16 __ovld __cnfn convert_short16_rte(half16);
-short16 __ovld __cnfn convert_short16_rtp(half16);
-short16 __ovld __cnfn convert_short16_rtn(half16);
-short16 __ovld __cnfn convert_short16_rtz(half16);
-short16 __ovld __cnfn convert_short16_sat(half16);
-short16 __ovld __cnfn convert_short16_sat_rte(half16);
-short16 __ovld __cnfn convert_short16_sat_rtp(half16);
-short16 __ovld __cnfn convert_short16_sat_rtn(half16);
-short16 __ovld __cnfn convert_short16_sat_rtz(half16);
-int __ovld __cnfn convert_int(half);
-int __ovld __cnfn convert_int_rte(half);
-int __ovld __cnfn convert_int_rtp(half);
-int __ovld __cnfn convert_int_rtn(half);
-int __ovld __cnfn convert_int_rtz(half);
-int __ovld __cnfn convert_int_sat(half);
-int __ovld __cnfn convert_int_sat_rte(half);
-int __ovld __cnfn convert_int_sat_rtp(half);
-int __ovld __cnfn convert_int_sat_rtn(half);
-int __ovld __cnfn convert_int_sat_rtz(half);
-int2 __ovld __cnfn convert_int2(half2);
-int2 __ovld __cnfn convert_int2_rte(half2);
-int2 __ovld __cnfn convert_int2_rtp(half2);
-int2 __ovld __cnfn convert_int2_rtn(half2);
-int2 __ovld __cnfn convert_int2_rtz(half2);
-int2 __ovld __cnfn convert_int2_sat(half2);
-int2 __ovld __cnfn convert_int2_sat_rte(half2);
-int2 __ovld __cnfn convert_int2_sat_rtp(half2);
-int2 __ovld __cnfn convert_int2_sat_rtn(half2);
-int2 __ovld __cnfn convert_int2_sat_rtz(half2);
-int3 __ovld __cnfn convert_int3(half3);
-int3 __ovld __cnfn convert_int3_rte(half3);
-int3 __ovld __cnfn convert_int3_rtp(half3);
-int3 __ovld __cnfn convert_int3_rtn(half3);
-int3 __ovld __cnfn convert_int3_rtz(half3);
-int3 __ovld __cnfn convert_int3_sat(half3);
-int3 __ovld __cnfn convert_int3_sat_rte(half3);
-int3 __ovld __cnfn convert_int3_sat_rtp(half3);
-int3 __ovld __cnfn convert_int3_sat_rtn(half3);
-int3 __ovld __cnfn convert_int3_sat_rtz(half3);
-int4 __ovld __cnfn convert_int4(half4);
-int4 __ovld __cnfn convert_int4_rte(half4);
-int4 __ovld __cnfn convert_int4_rtp(half4);
-int4 __ovld __cnfn convert_int4_rtn(half4);
-int4 __ovld __cnfn convert_int4_rtz(half4);
-int4 __ovld __cnfn convert_int4_sat(half4);
-int4 __ovld __cnfn convert_int4_sat_rte(half4);
-int4 __ovld __cnfn convert_int4_sat_rtp(half4);
-int4 __ovld __cnfn convert_int4_sat_rtn(half4);
-int4 __ovld __cnfn convert_int4_sat_rtz(half4);
-int8 __ovld __cnfn convert_int8(half8);
-int8 __ovld __cnfn convert_int8_rte(half8);
-int8 __ovld __cnfn convert_int8_rtp(half8);
-int8 __ovld __cnfn convert_int8_rtn(half8);
-int8 __ovld __cnfn convert_int8_rtz(half8);
-int8 __ovld __cnfn convert_int8_sat(half8);
-int8 __ovld __cnfn convert_int8_sat_rte(half8);
-int8 __ovld __cnfn convert_int8_sat_rtp(half8);
-int8 __ovld __cnfn convert_int8_sat_rtn(half8);
-int8 __ovld __cnfn convert_int8_sat_rtz(half8);
-int16 __ovld __cnfn convert_int16(half16);
-int16 __ovld __cnfn convert_int16_rte(half16);
-int16 __ovld __cnfn convert_int16_rtp(half16);
-int16 __ovld __cnfn convert_int16_rtn(half16);
-int16 __ovld __cnfn convert_int16_rtz(half16);
-int16 __ovld __cnfn convert_int16_sat(half16);
-int16 __ovld __cnfn convert_int16_sat_rte(half16);
-int16 __ovld __cnfn convert_int16_sat_rtp(half16);
-int16 __ovld __cnfn convert_int16_sat_rtn(half16);
-int16 __ovld __cnfn convert_int16_sat_rtz(half16);
-long __ovld __cnfn convert_long(half);
-long __ovld __cnfn convert_long_rte(half);
-long __ovld __cnfn convert_long_rtp(half);
-long __ovld __cnfn convert_long_rtn(half);
-long __ovld __cnfn convert_long_rtz(half);
-long __ovld __cnfn convert_long_sat(half);
-long __ovld __cnfn convert_long_sat_rte(half);
-long __ovld __cnfn convert_long_sat_rtp(half);
-long __ovld __cnfn convert_long_sat_rtn(half);
-long __ovld __cnfn convert_long_sat_rtz(half);
-long2 __ovld __cnfn convert_long2(half2);
-long2 __ovld __cnfn convert_long2_rte(half2);
-long2 __ovld __cnfn convert_long2_rtp(half2);
-long2 __ovld __cnfn convert_long2_rtn(half2);
-long2 __ovld __cnfn convert_long2_rtz(half2);
-long2 __ovld __cnfn convert_long2_sat(half2);
-long2 __ovld __cnfn convert_long2_sat_rte(half2);
-long2 __ovld __cnfn convert_long2_sat_rtp(half2);
-long2 __ovld __cnfn convert_long2_sat_rtn(half2);
-long2 __ovld __cnfn convert_long2_sat_rtz(half2);
-long3 __ovld __cnfn convert_long3(half3);
-long3 __ovld __cnfn convert_long3_rte(half3);
-long3 __ovld __cnfn convert_long3_rtp(half3);
-long3 __ovld __cnfn convert_long3_rtn(half3);
-long3 __ovld __cnfn convert_long3_rtz(half3);
-long3 __ovld __cnfn convert_long3_sat(half3);
-long3 __ovld __cnfn convert_long3_sat_rte(half3);
-long3 __ovld __cnfn convert_long3_sat_rtp(half3);
-long3 __ovld __cnfn convert_long3_sat_rtn(half3);
-long3 __ovld __cnfn convert_long3_sat_rtz(half3);
-long4 __ovld __cnfn convert_long4(half4);
-long4 __ovld __cnfn convert_long4_rte(half4);
-long4 __ovld __cnfn convert_long4_rtp(half4);
-long4 __ovld __cnfn convert_long4_rtn(half4);
-long4 __ovld __cnfn convert_long4_rtz(half4);
-long4 __ovld __cnfn convert_long4_sat(half4);
-long4 __ovld __cnfn convert_long4_sat_rte(half4);
-long4 __ovld __cnfn convert_long4_sat_rtp(half4);
-long4 __ovld __cnfn convert_long4_sat_rtn(half4);
-long4 __ovld __cnfn convert_long4_sat_rtz(half4);
-long8 __ovld __cnfn convert_long8(half8);
-long8 __ovld __cnfn convert_long8_rte(half8);
-long8 __ovld __cnfn convert_long8_rtp(half8);
-long8 __ovld __cnfn convert_long8_rtn(half8);
-long8 __ovld __cnfn convert_long8_rtz(half8);
-long8 __ovld __cnfn convert_long8_sat(half8);
-long8 __ovld __cnfn convert_long8_sat_rte(half8);
-long8 __ovld __cnfn convert_long8_sat_rtp(half8);
-long8 __ovld __cnfn convert_long8_sat_rtn(half8);
-long8 __ovld __cnfn convert_long8_sat_rtz(half8);
-long16 __ovld __cnfn convert_long16(half16);
-long16 __ovld __cnfn convert_long16_rte(half16);
-long16 __ovld __cnfn convert_long16_rtp(half16);
-long16 __ovld __cnfn convert_long16_rtn(half16);
-long16 __ovld __cnfn convert_long16_rtz(half16);
-long16 __ovld __cnfn convert_long16_sat(half16);
-long16 __ovld __cnfn convert_long16_sat_rte(half16);
-long16 __ovld __cnfn convert_long16_sat_rtp(half16);
-long16 __ovld __cnfn convert_long16_sat_rtn(half16);
-long16 __ovld __cnfn convert_long16_sat_rtz(half16);
-float __ovld __cnfn convert_float(half);
-float __ovld __cnfn convert_float_rte(half);
-float __ovld __cnfn convert_float_rtp(half);
-float __ovld __cnfn convert_float_rtn(half);
-float __ovld __cnfn convert_float_rtz(half);
-float2 __ovld __cnfn convert_float2(half2);
-float2 __ovld __cnfn convert_float2_rte(half2);
-float2 __ovld __cnfn convert_float2_rtp(half2);
-float2 __ovld __cnfn convert_float2_rtn(half2);
-float2 __ovld __cnfn convert_float2_rtz(half2);
-float3 __ovld __cnfn convert_float3(half3);
-float3 __ovld __cnfn convert_float3_rte(half3);
-float3 __ovld __cnfn convert_float3_rtp(half3);
-float3 __ovld __cnfn convert_float3_rtn(half3);
-float3 __ovld __cnfn convert_float3_rtz(half3);
-float4 __ovld __cnfn convert_float4(half4);
-float4 __ovld __cnfn convert_float4_rte(half4);
-float4 __ovld __cnfn convert_float4_rtp(half4);
-float4 __ovld __cnfn convert_float4_rtn(half4);
-float4 __ovld __cnfn convert_float4_rtz(half4);
-float8 __ovld __cnfn convert_float8(half8);
-float8 __ovld __cnfn convert_float8_rte(half8);
-float8 __ovld __cnfn convert_float8_rtp(half8);
-float8 __ovld __cnfn convert_float8_rtn(half8);
-float8 __ovld __cnfn convert_float8_rtz(half8);
-float16 __ovld __cnfn convert_float16(half16);
-float16 __ovld __cnfn convert_float16_rte(half16);
-float16 __ovld __cnfn convert_float16_rtp(half16);
-float16 __ovld __cnfn convert_float16_rtn(half16);
-float16 __ovld __cnfn convert_float16_rtz(half16);
-
-// Convert non-double types to half types.
-half __ovld __cnfn convert_half(uchar);
-half __ovld __cnfn convert_half(ushort);
-half __ovld __cnfn convert_half(uint);
-half __ovld __cnfn convert_half(ulong);
-half __ovld __cnfn convert_half(char);
-half __ovld __cnfn convert_half(short);
-half __ovld __cnfn convert_half(int);
-half __ovld __cnfn convert_half(long);
-half __ovld __cnfn convert_half(float);
-half __ovld __cnfn convert_half(half);
-half __ovld __cnfn convert_half_rte(uchar);
-half __ovld __cnfn convert_half_rte(ushort);
-half __ovld __cnfn convert_half_rte(uint);
-half __ovld __cnfn convert_half_rte(ulong);
-half __ovld __cnfn convert_half_rte(char);
-half __ovld __cnfn convert_half_rte(short);
-half __ovld __cnfn convert_half_rte(int);
-half __ovld __cnfn convert_half_rte(long);
-half __ovld __cnfn convert_half_rte(float);
-half __ovld __cnfn convert_half_rte(half);
-half __ovld __cnfn convert_half_rtp(uchar);
-half __ovld __cnfn convert_half_rtp(ushort);
-half __ovld __cnfn convert_half_rtp(uint);
-half __ovld __cnfn convert_half_rtp(ulong);
-half __ovld __cnfn convert_half_rtp(char);
-half __ovld __cnfn convert_half_rtp(short);
-half __ovld __cnfn convert_half_rtp(int);
-half __ovld __cnfn convert_half_rtp(long);
-half __ovld __cnfn convert_half_rtp(float);
-half __ovld __cnfn convert_half_rtp(half);
-half __ovld __cnfn convert_half_rtn(uchar);
-half __ovld __cnfn convert_half_rtn(ushort);
-half __ovld __cnfn convert_half_rtn(uint);
-half __ovld __cnfn convert_half_rtn(ulong);
-half __ovld __cnfn convert_half_rtn(char);
-half __ovld __cnfn convert_half_rtn(short);
-half __ovld __cnfn convert_half_rtn(int);
-half __ovld __cnfn convert_half_rtn(long);
-half __ovld __cnfn convert_half_rtn(float);
-half __ovld __cnfn convert_half_rtn(half);
-half __ovld __cnfn convert_half_rtz(uchar);
-half __ovld __cnfn convert_half_rtz(ushort);
-half __ovld __cnfn convert_half_rtz(uint);
-half __ovld __cnfn convert_half_rtz(ulong);
-half __ovld __cnfn convert_half_rtz(char);
-half __ovld __cnfn convert_half_rtz(short);
-half __ovld __cnfn convert_half_rtz(int);
-half __ovld __cnfn convert_half_rtz(long);
-half __ovld __cnfn convert_half_rtz(float);
-half __ovld __cnfn convert_half_rtz(half);
-half2 __ovld __cnfn convert_half2(char2);
-half2 __ovld __cnfn convert_half2(uchar2);
-half2 __ovld __cnfn convert_half2(short2);
-half2 __ovld __cnfn convert_half2(ushort2);
-half2 __ovld __cnfn convert_half2(int2);
-half2 __ovld __cnfn convert_half2(uint2);
-half2 __ovld __cnfn convert_half2(long2);
-half2 __ovld __cnfn convert_half2(ulong2);
-half2 __ovld __cnfn convert_half2(float2);
-half2 __ovld __cnfn convert_half2(half2);
-half2 __ovld __cnfn convert_half2_rte(char2);
-half2 __ovld __cnfn convert_half2_rte(uchar2);
-half2 __ovld __cnfn convert_half2_rte(short2);
-half2 __ovld __cnfn convert_half2_rte(ushort2);
-half2 __ovld __cnfn convert_half2_rte(int2);
-half2 __ovld __cnfn convert_half2_rte(uint2);
-half2 __ovld __cnfn convert_half2_rte(long2);
-half2 __ovld __cnfn convert_half2_rte(ulong2);
-half2 __ovld __cnfn convert_half2_rte(float2);
-half2 __ovld __cnfn convert_half2_rte(half2);
-half2 __ovld __cnfn convert_half2_rtp(char2);
-half2 __ovld __cnfn convert_half2_rtp(uchar2);
-half2 __ovld __cnfn convert_half2_rtp(short2);
-half2 __ovld __cnfn convert_half2_rtp(ushort2);
-half2 __ovld __cnfn convert_half2_rtp(int2);
-half2 __ovld __cnfn convert_half2_rtp(uint2);
-half2 __ovld __cnfn convert_half2_rtp(long2);
-half2 __ovld __cnfn convert_half2_rtp(ulong2);
-half2 __ovld __cnfn convert_half2_rtp(float2);
-half2 __ovld __cnfn convert_half2_rtp(half2);
-half2 __ovld __cnfn convert_half2_rtn(char2);
-half2 __ovld __cnfn convert_half2_rtn(uchar2);
-half2 __ovld __cnfn convert_half2_rtn(short2);
-half2 __ovld __cnfn convert_half2_rtn(ushort2);
-half2 __ovld __cnfn convert_half2_rtn(int2);
-half2 __ovld __cnfn convert_half2_rtn(uint2);
-half2 __ovld __cnfn convert_half2_rtn(long2);
-half2 __ovld __cnfn convert_half2_rtn(ulong2);
-half2 __ovld __cnfn convert_half2_rtn(float2);
-half2 __ovld __cnfn convert_half2_rtn(half2);
-half2 __ovld __cnfn convert_half2_rtz(char2);
-half2 __ovld __cnfn convert_half2_rtz(uchar2);
-half2 __ovld __cnfn convert_half2_rtz(short2);
-half2 __ovld __cnfn convert_half2_rtz(ushort2);
-half2 __ovld __cnfn convert_half2_rtz(int2);
-half2 __ovld __cnfn convert_half2_rtz(uint2);
-half2 __ovld __cnfn convert_half2_rtz(long2);
-half2 __ovld __cnfn convert_half2_rtz(ulong2);
-half2 __ovld __cnfn convert_half2_rtz(float2);
-half2 __ovld __cnfn convert_half2_rtz(half2);
-half3 __ovld __cnfn convert_half3(char3);
-half3 __ovld __cnfn convert_half3(uchar3);
-half3 __ovld __cnfn convert_half3(short3);
-half3 __ovld __cnfn convert_half3(ushort3);
-half3 __ovld __cnfn convert_half3(int3);
-half3 __ovld __cnfn convert_half3(uint3);
-half3 __ovld __cnfn convert_half3(long3);
-half3 __ovld __cnfn convert_half3(ulong3);
-half3 __ovld __cnfn convert_half3(float3);
-half3 __ovld __cnfn convert_half3(half3);
-half3 __ovld __cnfn convert_half3_rte(char3);
-half3 __ovld __cnfn convert_half3_rte(uchar3);
-half3 __ovld __cnfn convert_half3_rte(short3);
-half3 __ovld __cnfn convert_half3_rte(ushort3);
-half3 __ovld __cnfn convert_half3_rte(int3);
-half3 __ovld __cnfn convert_half3_rte(uint3);
-half3 __ovld __cnfn convert_half3_rte(long3);
-half3 __ovld __cnfn convert_half3_rte(ulong3);
-half3 __ovld __cnfn convert_half3_rte(float3);
-half3 __ovld __cnfn convert_half3_rte(half3);
-half3 __ovld __cnfn convert_half3_rtp(char3);
-half3 __ovld __cnfn convert_half3_rtp(uchar3);
-half3 __ovld __cnfn convert_half3_rtp(short3);
-half3 __ovld __cnfn convert_half3_rtp(ushort3);
-half3 __ovld __cnfn convert_half3_rtp(int3);
-half3 __ovld __cnfn convert_half3_rtp(uint3);
-half3 __ovld __cnfn convert_half3_rtp(long3);
-half3 __ovld __cnfn convert_half3_rtp(ulong3);
-half3 __ovld __cnfn convert_half3_rtp(float3);
-half3 __ovld __cnfn convert_half3_rtp(half3);
-half3 __ovld __cnfn convert_half3_rtn(char3);
-half3 __ovld __cnfn convert_half3_rtn(uchar3);
-half3 __ovld __cnfn convert_half3_rtn(short3);
-half3 __ovld __cnfn convert_half3_rtn(ushort3);
-half3 __ovld __cnfn convert_half3_rtn(int3);
-half3 __ovld __cnfn convert_half3_rtn(uint3);
-half3 __ovld __cnfn convert_half3_rtn(long3);
-half3 __ovld __cnfn convert_half3_rtn(ulong3);
-half3 __ovld __cnfn convert_half3_rtn(float3);
-half3 __ovld __cnfn convert_half3_rtn(half3);
-half3 __ovld __cnfn convert_half3_rtz(char3);
-half3 __ovld __cnfn convert_half3_rtz(uchar3);
-half3 __ovld __cnfn convert_half3_rtz(short3);
-half3 __ovld __cnfn convert_half3_rtz(ushort3);
-half3 __ovld __cnfn convert_half3_rtz(int3);
-half3 __ovld __cnfn convert_half3_rtz(uint3);
-half3 __ovld __cnfn convert_half3_rtz(long3);
-half3 __ovld __cnfn convert_half3_rtz(ulong3);
-half3 __ovld __cnfn convert_half3_rtz(float3);
-half3 __ovld __cnfn convert_half3_rtz(half3);
-half4 __ovld __cnfn convert_half4(char4);
-half4 __ovld __cnfn convert_half4(uchar4);
-half4 __ovld __cnfn convert_half4(short4);
-half4 __ovld __cnfn convert_half4(ushort4);
-half4 __ovld __cnfn convert_half4(int4);
-half4 __ovld __cnfn convert_half4(uint4);
-half4 __ovld __cnfn convert_half4(long4);
-half4 __ovld __cnfn convert_half4(ulong4);
-half4 __ovld __cnfn convert_half4(float4);
-half4 __ovld __cnfn convert_half4(half4);
-half4 __ovld __cnfn convert_half4_rte(char4);
-half4 __ovld __cnfn convert_half4_rte(uchar4);
-half4 __ovld __cnfn convert_half4_rte(short4);
-half4 __ovld __cnfn convert_half4_rte(ushort4);
-half4 __ovld __cnfn convert_half4_rte(int4);
-half4 __ovld __cnfn convert_half4_rte(uint4);
-half4 __ovld __cnfn convert_half4_rte(long4);
-half4 __ovld __cnfn convert_half4_rte(ulong4);
-half4 __ovld __cnfn convert_half4_rte(float4);
-half4 __ovld __cnfn convert_half4_rte(half4);
-half4 __ovld __cnfn convert_half4_rtp(char4);
-half4 __ovld __cnfn convert_half4_rtp(uchar4);
-half4 __ovld __cnfn convert_half4_rtp(short4);
-half4 __ovld __cnfn convert_half4_rtp(ushort4);
-half4 __ovld __cnfn convert_half4_rtp(int4);
-half4 __ovld __cnfn convert_half4_rtp(uint4);
-half4 __ovld __cnfn convert_half4_rtp(long4);
-half4 __ovld __cnfn convert_half4_rtp(ulong4);
-half4 __ovld __cnfn convert_half4_rtp(float4);
-half4 __ovld __cnfn convert_half4_rtp(half4);
-half4 __ovld __cnfn convert_half4_rtn(char4);
-half4 __ovld __cnfn convert_half4_rtn(uchar4);
-half4 __ovld __cnfn convert_half4_rtn(short4);
-half4 __ovld __cnfn convert_half4_rtn(ushort4);
-half4 __ovld __cnfn convert_half4_rtn(int4);
-half4 __ovld __cnfn convert_half4_rtn(uint4);
-half4 __ovld __cnfn convert_half4_rtn(long4);
-half4 __ovld __cnfn convert_half4_rtn(ulong4);
-half4 __ovld __cnfn convert_half4_rtn(float4);
-half4 __ovld __cnfn convert_half4_rtn(half4);
-half4 __ovld __cnfn convert_half4_rtz(char4);
-half4 __ovld __cnfn convert_half4_rtz(uchar4);
-half4 __ovld __cnfn convert_half4_rtz(short4);
-half4 __ovld __cnfn convert_half4_rtz(ushort4);
-half4 __ovld __cnfn convert_half4_rtz(int4);
-half4 __ovld __cnfn convert_half4_rtz(uint4);
-half4 __ovld __cnfn convert_half4_rtz(long4);
-half4 __ovld __cnfn convert_half4_rtz(ulong4);
-half4 __ovld __cnfn convert_half4_rtz(float4);
-half4 __ovld __cnfn convert_half4_rtz(half4);
-half8 __ovld __cnfn convert_half8(char8);
-half8 __ovld __cnfn convert_half8(uchar8);
-half8 __ovld __cnfn convert_half8(short8);
-half8 __ovld __cnfn convert_half8(ushort8);
-half8 __ovld __cnfn convert_half8(int8);
-half8 __ovld __cnfn convert_half8(uint8);
-half8 __ovld __cnfn convert_half8(long8);
-half8 __ovld __cnfn convert_half8(ulong8);
-half8 __ovld __cnfn convert_half8(float8);
-half8 __ovld __cnfn convert_half8(half8);
-half8 __ovld __cnfn convert_half8_rte(char8);
-half8 __ovld __cnfn convert_half8_rte(uchar8);
-half8 __ovld __cnfn convert_half8_rte(short8);
-half8 __ovld __cnfn convert_half8_rte(ushort8);
-half8 __ovld __cnfn convert_half8_rte(int8);
-half8 __ovld __cnfn convert_half8_rte(uint8);
-half8 __ovld __cnfn convert_half8_rte(long8);
-half8 __ovld __cnfn convert_half8_rte(ulong8);
-half8 __ovld __cnfn convert_half8_rte(float8);
-half8 __ovld __cnfn convert_half8_rte(half8);
-half8 __ovld __cnfn convert_half8_rtp(char8);
-half8 __ovld __cnfn convert_half8_rtp(uchar8);
-half8 __ovld __cnfn convert_half8_rtp(short8);
-half8 __ovld __cnfn convert_half8_rtp(ushort8);
-half8 __ovld __cnfn convert_half8_rtp(int8);
-half8 __ovld __cnfn convert_half8_rtp(uint8);
-half8 __ovld __cnfn convert_half8_rtp(long8);
-half8 __ovld __cnfn convert_half8_rtp(ulong8);
-half8 __ovld __cnfn convert_half8_rtp(float8);
-half8 __ovld __cnfn convert_half8_rtp(half8);
-half8 __ovld __cnfn convert_half8_rtn(char8);
-half8 __ovld __cnfn convert_half8_rtn(uchar8);
-half8 __ovld __cnfn convert_half8_rtn(short8);
-half8 __ovld __cnfn convert_half8_rtn(ushort8);
-half8 __ovld __cnfn convert_half8_rtn(int8);
-half8 __ovld __cnfn convert_half8_rtn(uint8);
-half8 __ovld __cnfn convert_half8_rtn(long8);
-half8 __ovld __cnfn convert_half8_rtn(ulong8);
-half8 __ovld __cnfn convert_half8_rtn(float8);
-half8 __ovld __cnfn convert_half8_rtn(half8);
-half8 __ovld __cnfn convert_half8_rtz(char8);
-half8 __ovld __cnfn convert_half8_rtz(uchar8);
-half8 __ovld __cnfn convert_half8_rtz(short8);
-half8 __ovld __cnfn convert_half8_rtz(ushort8);
-half8 __ovld __cnfn convert_half8_rtz(int8);
-half8 __ovld __cnfn convert_half8_rtz(uint8);
-half8 __ovld __cnfn convert_half8_rtz(long8);
-half8 __ovld __cnfn convert_half8_rtz(ulong8);
-half8 __ovld __cnfn convert_half8_rtz(float8);
-half8 __ovld __cnfn convert_half8_rtz(half8);
-half16 __ovld __cnfn convert_half16(char16);
-half16 __ovld __cnfn convert_half16(uchar16);
-half16 __ovld __cnfn convert_half16(short16);
-half16 __ovld __cnfn convert_half16(ushort16);
-half16 __ovld __cnfn convert_half16(int16);
-half16 __ovld __cnfn convert_half16(uint16);
-half16 __ovld __cnfn convert_half16(long16);
-half16 __ovld __cnfn convert_half16(ulong16);
-half16 __ovld __cnfn convert_half16(float16);
-half16 __ovld __cnfn convert_half16(half16);
-half16 __ovld __cnfn convert_half16_rte(char16);
-half16 __ovld __cnfn convert_half16_rte(uchar16);
-half16 __ovld __cnfn convert_half16_rte(short16);
-half16 __ovld __cnfn convert_half16_rte(ushort16);
-half16 __ovld __cnfn convert_half16_rte(int16);
-half16 __ovld __cnfn convert_half16_rte(uint16);
-half16 __ovld __cnfn convert_half16_rte(long16);
-half16 __ovld __cnfn convert_half16_rte(ulong16);
-half16 __ovld __cnfn convert_half16_rte(float16);
-half16 __ovld __cnfn convert_half16_rte(half16);
-half16 __ovld __cnfn convert_half16_rtp(char16);
-half16 __ovld __cnfn convert_half16_rtp(uchar16);
-half16 __ovld __cnfn convert_half16_rtp(short16);
-half16 __ovld __cnfn convert_half16_rtp(ushort16);
-half16 __ovld __cnfn convert_half16_rtp(int16);
-half16 __ovld __cnfn convert_half16_rtp(uint16);
-half16 __ovld __cnfn convert_half16_rtp(long16);
-half16 __ovld __cnfn convert_half16_rtp(ulong16);
-half16 __ovld __cnfn convert_half16_rtp(float16);
-half16 __ovld __cnfn convert_half16_rtp(half16);
-half16 __ovld __cnfn convert_half16_rtn(char16);
-half16 __ovld __cnfn convert_half16_rtn(uchar16);
-half16 __ovld __cnfn convert_half16_rtn(short16);
-half16 __ovld __cnfn convert_half16_rtn(ushort16);
-half16 __ovld __cnfn convert_half16_rtn(int16);
-half16 __ovld __cnfn convert_half16_rtn(uint16);
-half16 __ovld __cnfn convert_half16_rtn(long16);
-half16 __ovld __cnfn convert_half16_rtn(ulong16);
-half16 __ovld __cnfn convert_half16_rtn(float16);
-half16 __ovld __cnfn convert_half16_rtn(half16);
-half16 __ovld __cnfn convert_half16_rtz(char16);
-half16 __ovld __cnfn convert_half16_rtz(uchar16);
-half16 __ovld __cnfn convert_half16_rtz(short16);
-half16 __ovld __cnfn convert_half16_rtz(ushort16);
-half16 __ovld __cnfn convert_half16_rtz(int16);
-half16 __ovld __cnfn convert_half16_rtz(uint16);
-half16 __ovld __cnfn convert_half16_rtz(long16);
-half16 __ovld __cnfn convert_half16_rtz(ulong16);
-half16 __ovld __cnfn convert_half16_rtz(float16);
-half16 __ovld __cnfn convert_half16_rtz(half16);
-
-// Convert half types to double types.
-#ifdef cl_khr_fp64
-double __ovld __cnfn convert_double(half);
-double __ovld __cnfn convert_double_rte(half);
-double __ovld __cnfn convert_double_rtp(half);
-double __ovld __cnfn convert_double_rtn(half);
-double __ovld __cnfn convert_double_rtz(half);
-double2 __ovld __cnfn convert_double2(half2);
-double2 __ovld __cnfn convert_double2_rte(half2);
-double2 __ovld __cnfn convert_double2_rtp(half2);
-double2 __ovld __cnfn convert_double2_rtn(half2);
-double2 __ovld __cnfn convert_double2_rtz(half2);
-double3 __ovld __cnfn convert_double3(half3);
-double3 __ovld __cnfn convert_double3_rte(half3);
-double3 __ovld __cnfn convert_double3_rtp(half3);
-double3 __ovld __cnfn convert_double3_rtn(half3);
-double3 __ovld __cnfn convert_double3_rtz(half3);
-double4 __ovld __cnfn convert_double4(half4);
-double4 __ovld __cnfn convert_double4_rte(half4);
-double4 __ovld __cnfn convert_double4_rtp(half4);
-double4 __ovld __cnfn convert_double4_rtn(half4);
-double4 __ovld __cnfn convert_double4_rtz(half4);
-double8 __ovld __cnfn convert_double8(half8);
-double8 __ovld __cnfn convert_double8_rte(half8);
-double8 __ovld __cnfn convert_double8_rtp(half8);
-double8 __ovld __cnfn convert_double8_rtn(half8);
-double8 __ovld __cnfn convert_double8_rtz(half8);
-double16 __ovld __cnfn convert_double16(half16);
-double16 __ovld __cnfn convert_double16_rte(half16);
-double16 __ovld __cnfn convert_double16_rtp(half16);
-double16 __ovld __cnfn convert_double16_rtn(half16);
-double16 __ovld __cnfn convert_double16_rtz(half16);
-
-// Convert double types to half types.
-half __ovld __cnfn convert_half(double);
-half __ovld __cnfn convert_half_rte(double);
-half __ovld __cnfn convert_half_rtp(double);
-half __ovld __cnfn convert_half_rtn(double);
-half __ovld __cnfn convert_half_rtz(double);
-half2 __ovld __cnfn convert_half2(double2);
-half2 __ovld __cnfn convert_half2_rte(double2);
-half2 __ovld __cnfn convert_half2_rtp(double2);
-half2 __ovld __cnfn convert_half2_rtn(double2);
-half2 __ovld __cnfn convert_half2_rtz(double2);
-half3 __ovld __cnfn convert_half3(double3);
-half3 __ovld __cnfn convert_half3_rte(double3);
-half3 __ovld __cnfn convert_half3_rtp(double3);
-half3 __ovld __cnfn convert_half3_rtn(double3);
-half3 __ovld __cnfn convert_half3_rtz(double3);
-half4 __ovld __cnfn convert_half4(double4);
-half4 __ovld __cnfn convert_half4_rte(double4);
-half4 __ovld __cnfn convert_half4_rtp(double4);
-half4 __ovld __cnfn convert_half4_rtn(double4);
-half4 __ovld __cnfn convert_half4_rtz(double4);
-half8 __ovld __cnfn convert_half8(double8);
-half8 __ovld __cnfn convert_half8_rte(double8);
-half8 __ovld __cnfn convert_half8_rtp(double8);
-half8 __ovld __cnfn convert_half8_rtn(double8);
-half8 __ovld __cnfn convert_half8_rtz(double8);
-half16 __ovld __cnfn convert_half16(double16);
-half16 __ovld __cnfn convert_half16_rte(double16);
-half16 __ovld __cnfn convert_half16_rtp(double16);
-half16 __ovld __cnfn convert_half16_rtn(double16);
-half16 __ovld __cnfn convert_half16_rtz(double16);
-#endif //cl_khr_fp64
-
-#endif // cl_khr_fp16
-
-// OpenCL v1.1 s6.11.1, v1.2 s6.12.1, v2.0 s6.13.1 - Work-item Functions
-
-/**
- * Returns the number of dimensions in use. This is the
- * value given to the work_dim argument specified in
- * clEnqueueNDRangeKernel.
- * For clEnqueueTask, this returns 1.
- */
-uint __ovld __cnfn get_work_dim(void);
-
-/**
- * Returns the number of global work-items specified for
- * dimension identified by dimindx. This value is given by
- * the global_work_size argument to
- * clEnqueueNDRangeKernel. Valid values of dimindx
- * are 0 to get_work_dim() - 1. For other values of
- * dimindx, get_global_size() returns 1.
- * For clEnqueueTask, this always returns 1.
- */
-size_t __ovld __cnfn get_global_size(uint dimindx);
-
-/**
- * Returns the unique global work-item ID value for
- * dimension identified by dimindx. The global work-item
- * ID specifies the work-item ID based on the number of
- * global work-items specified to execute the kernel. Valid
- * values of dimindx are 0 to get_work_dim() - 1. For
- * other values of dimindx, get_global_id() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_global_id(uint dimindx);
-
-/**
- * Returns the number of local work-items specified in
- * dimension identified by dimindx. This value is given by
- * the local_work_size argument to
- * clEnqueueNDRangeKernel if local_work_size is not
- * NULL; otherwise the OpenCL implementation chooses
- * an appropriate local_work_size value which is returned
- * by this function. Valid values of dimindx are 0 to
- * get_work_dim() - 1. For other values of dimindx,
- * get_local_size() returns 1.
- * For clEnqueueTask, this always returns 1.
- */
-size_t __ovld __cnfn get_local_size(uint dimindx);
-
-/**
- * Returns the unique local work-item ID i.e. a work-item
- * within a specific work-group for dimension identified by
- * dimindx. Valid values of dimindx are 0 to
- * get_work_dim() - 1. For other values of dimindx,
- * get_local_id() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_local_id(uint dimindx);
-
-/**
- * Returns the number of work-groups that will execute a
- * kernel for dimension identified by dimindx.
- * Valid values of dimindx are 0 to get_work_dim() - 1.
- * For other values of dimindx, get_num_groups() returns 1.
- * For clEnqueueTask, this always returns 1.
- */
-size_t __ovld __cnfn get_num_groups(uint dimindx);
-
-/**
- * get_group_id returns the work-group ID which is a
- * number from 0 .. get_num_groups(dimindx) - 1.
- * Valid values of dimindx are 0 to get_work_dim() - 1.
- * For other values, get_group_id() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_group_id(uint dimindx);
-
-/**
- * get_global_offset returns the offset values specified in
- * global_work_offset argument to
- * clEnqueueNDRangeKernel.
- * Valid values of dimindx are 0 to get_work_dim() - 1.
- * For other values, get_global_offset() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_global_offset(uint dimindx);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-size_t __ovld get_enqueued_local_size(uint dimindx);
-size_t __ovld get_global_linear_id(void);
-size_t __ovld get_local_linear_id(void);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.2, v1.2 s6.12.2, v2.0 s6.13.2 - Math functions
-
-/**
- * Arc cosine function.
- */
-float __ovld __cnfn acos(float);
-float2 __ovld __cnfn acos(float2);
-float3 __ovld __cnfn acos(float3);
-float4 __ovld __cnfn acos(float4);
-float8 __ovld __cnfn acos(float8);
-float16 __ovld __cnfn acos(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn acos(double);
-double2 __ovld __cnfn acos(double2);
-double3 __ovld __cnfn acos(double3);
-double4 __ovld __cnfn acos(double4);
-double8 __ovld __cnfn acos(double8);
-double16 __ovld __cnfn acos(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn acos(half);
-half2 __ovld __cnfn acos(half2);
-half3 __ovld __cnfn acos(half3);
-half4 __ovld __cnfn acos(half4);
-half8 __ovld __cnfn acos(half8);
-half16 __ovld __cnfn acos(half16);
-#endif //cl_khr_fp16
-
-/**
- * Inverse hyperbolic cosine.
- */
-float __ovld __cnfn acosh(float);
-float2 __ovld __cnfn acosh(float2);
-float3 __ovld __cnfn acosh(float3);
-float4 __ovld __cnfn acosh(float4);
-float8 __ovld __cnfn acosh(float8);
-float16 __ovld __cnfn acosh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn acosh(double);
-double2 __ovld __cnfn acosh(double2);
-double3 __ovld __cnfn acosh(double3);
-double4 __ovld __cnfn acosh(double4);
-double8 __ovld __cnfn acosh(double8);
-double16 __ovld __cnfn acosh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn acosh(half);
-half2 __ovld __cnfn acosh(half2);
-half3 __ovld __cnfn acosh(half3);
-half4 __ovld __cnfn acosh(half4);
-half8 __ovld __cnfn acosh(half8);
-half16 __ovld __cnfn acosh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute acos (x) / PI.
- */
-float __ovld __cnfn acospi(float x);
-float2 __ovld __cnfn acospi(float2 x);
-float3 __ovld __cnfn acospi(float3 x);
-float4 __ovld __cnfn acospi(float4 x);
-float8 __ovld __cnfn acospi(float8 x);
-float16 __ovld __cnfn acospi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn acospi(double x);
-double2 __ovld __cnfn acospi(double2 x);
-double3 __ovld __cnfn acospi(double3 x);
-double4 __ovld __cnfn acospi(double4 x);
-double8 __ovld __cnfn acospi(double8 x);
-double16 __ovld __cnfn acospi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn acospi(half x);
-half2 __ovld __cnfn acospi(half2 x);
-half3 __ovld __cnfn acospi(half3 x);
-half4 __ovld __cnfn acospi(half4 x);
-half8 __ovld __cnfn acospi(half8 x);
-half16 __ovld __cnfn acospi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Arc sine function.
- */
-float __ovld __cnfn asin(float);
-float2 __ovld __cnfn asin(float2);
-float3 __ovld __cnfn asin(float3);
-float4 __ovld __cnfn asin(float4);
-float8 __ovld __cnfn asin(float8);
-float16 __ovld __cnfn asin(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn asin(double);
-double2 __ovld __cnfn asin(double2);
-double3 __ovld __cnfn asin(double3);
-double4 __ovld __cnfn asin(double4);
-double8 __ovld __cnfn asin(double8);
-double16 __ovld __cnfn asin(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn asin(half);
-half2 __ovld __cnfn asin(half2);
-half3 __ovld __cnfn asin(half3);
-half4 __ovld __cnfn asin(half4);
-half8 __ovld __cnfn asin(half8);
-half16 __ovld __cnfn asin(half16);
-#endif //cl_khr_fp16
-
-/**
- * Inverse hyperbolic sine.
- */
-float __ovld __cnfn asinh(float);
-float2 __ovld __cnfn asinh(float2);
-float3 __ovld __cnfn asinh(float3);
-float4 __ovld __cnfn asinh(float4);
-float8 __ovld __cnfn asinh(float8);
-float16 __ovld __cnfn asinh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn asinh(double);
-double2 __ovld __cnfn asinh(double2);
-double3 __ovld __cnfn asinh(double3);
-double4 __ovld __cnfn asinh(double4);
-double8 __ovld __cnfn asinh(double8);
-double16 __ovld __cnfn asinh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn asinh(half);
-half2 __ovld __cnfn asinh(half2);
-half3 __ovld __cnfn asinh(half3);
-half4 __ovld __cnfn asinh(half4);
-half8 __ovld __cnfn asinh(half8);
-half16 __ovld __cnfn asinh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute asin (x) / PI.
- */
-float __ovld __cnfn asinpi(float x);
-float2 __ovld __cnfn asinpi(float2 x);
-float3 __ovld __cnfn asinpi(float3 x);
-float4 __ovld __cnfn asinpi(float4 x);
-float8 __ovld __cnfn asinpi(float8 x);
-float16 __ovld __cnfn asinpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn asinpi(double x);
-double2 __ovld __cnfn asinpi(double2 x);
-double3 __ovld __cnfn asinpi(double3 x);
-double4 __ovld __cnfn asinpi(double4 x);
-double8 __ovld __cnfn asinpi(double8 x);
-double16 __ovld __cnfn asinpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn asinpi(half x);
-half2 __ovld __cnfn asinpi(half2 x);
-half3 __ovld __cnfn asinpi(half3 x);
-half4 __ovld __cnfn asinpi(half4 x);
-half8 __ovld __cnfn asinpi(half8 x);
-half16 __ovld __cnfn asinpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Arc tangent function.
- */
-float __ovld __cnfn atan(float y_over_x);
-float2 __ovld __cnfn atan(float2 y_over_x);
-float3 __ovld __cnfn atan(float3 y_over_x);
-float4 __ovld __cnfn atan(float4 y_over_x);
-float8 __ovld __cnfn atan(float8 y_over_x);
-float16 __ovld __cnfn atan(float16 y_over_x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atan(double y_over_x);
-double2 __ovld __cnfn atan(double2 y_over_x);
-double3 __ovld __cnfn atan(double3 y_over_x);
-double4 __ovld __cnfn atan(double4 y_over_x);
-double8 __ovld __cnfn atan(double8 y_over_x);
-double16 __ovld __cnfn atan(double16 y_over_x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atan(half y_over_x);
-half2 __ovld __cnfn atan(half2 y_over_x);
-half3 __ovld __cnfn atan(half3 y_over_x);
-half4 __ovld __cnfn atan(half4 y_over_x);
-half8 __ovld __cnfn atan(half8 y_over_x);
-half16 __ovld __cnfn atan(half16 y_over_x);
-#endif //cl_khr_fp16
-
-/**
- * Arc tangent of y / x.
- */
-float __ovld __cnfn atan2(float y, float x);
-float2 __ovld __cnfn atan2(float2 y, float2 x);
-float3 __ovld __cnfn atan2(float3 y, float3 x);
-float4 __ovld __cnfn atan2(float4 y, float4 x);
-float8 __ovld __cnfn atan2(float8 y, float8 x);
-float16 __ovld __cnfn atan2(float16 y, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atan2(double y, double x);
-double2 __ovld __cnfn atan2(double2 y, double2 x);
-double3 __ovld __cnfn atan2(double3 y, double3 x);
-double4 __ovld __cnfn atan2(double4 y, double4 x);
-double8 __ovld __cnfn atan2(double8 y, double8 x);
-double16 __ovld __cnfn atan2(double16 y, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atan2(half y, half x);
-half2 __ovld __cnfn atan2(half2 y, half2 x);
-half3 __ovld __cnfn atan2(half3 y, half3 x);
-half4 __ovld __cnfn atan2(half4 y, half4 x);
-half8 __ovld __cnfn atan2(half8 y, half8 x);
-half16 __ovld __cnfn atan2(half16 y, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Hyperbolic arc tangent.
- */
-float __ovld __cnfn atanh(float);
-float2 __ovld __cnfn atanh(float2);
-float3 __ovld __cnfn atanh(float3);
-float4 __ovld __cnfn atanh(float4);
-float8 __ovld __cnfn atanh(float8);
-float16 __ovld __cnfn atanh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atanh(double);
-double2 __ovld __cnfn atanh(double2);
-double3 __ovld __cnfn atanh(double3);
-double4 __ovld __cnfn atanh(double4);
-double8 __ovld __cnfn atanh(double8);
-double16 __ovld __cnfn atanh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atanh(half);
-half2 __ovld __cnfn atanh(half2);
-half3 __ovld __cnfn atanh(half3);
-half4 __ovld __cnfn atanh(half4);
-half8 __ovld __cnfn atanh(half8);
-half16 __ovld __cnfn atanh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute atan (x) / PI.
- */
-float __ovld __cnfn atanpi(float x);
-float2 __ovld __cnfn atanpi(float2 x);
-float3 __ovld __cnfn atanpi(float3 x);
-float4 __ovld __cnfn atanpi(float4 x);
-float8 __ovld __cnfn atanpi(float8 x);
-float16 __ovld __cnfn atanpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atanpi(double x);
-double2 __ovld __cnfn atanpi(double2 x);
-double3 __ovld __cnfn atanpi(double3 x);
-double4 __ovld __cnfn atanpi(double4 x);
-double8 __ovld __cnfn atanpi(double8 x);
-double16 __ovld __cnfn atanpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atanpi(half x);
-half2 __ovld __cnfn atanpi(half2 x);
-half3 __ovld __cnfn atanpi(half3 x);
-half4 __ovld __cnfn atanpi(half4 x);
-half8 __ovld __cnfn atanpi(half8 x);
-half16 __ovld __cnfn atanpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute atan2 (y, x) / PI.
- */
-float __ovld __cnfn atan2pi(float y, float x);
-float2 __ovld __cnfn atan2pi(float2 y, float2 x);
-float3 __ovld __cnfn atan2pi(float3 y, float3 x);
-float4 __ovld __cnfn atan2pi(float4 y, float4 x);
-float8 __ovld __cnfn atan2pi(float8 y, float8 x);
-float16 __ovld __cnfn atan2pi(float16 y, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atan2pi(double y, double x);
-double2 __ovld __cnfn atan2pi(double2 y, double2 x);
-double3 __ovld __cnfn atan2pi(double3 y, double3 x);
-double4 __ovld __cnfn atan2pi(double4 y, double4 x);
-double8 __ovld __cnfn atan2pi(double8 y, double8 x);
-double16 __ovld __cnfn atan2pi(double16 y, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atan2pi(half y, half x);
-half2 __ovld __cnfn atan2pi(half2 y, half2 x);
-half3 __ovld __cnfn atan2pi(half3 y, half3 x);
-half4 __ovld __cnfn atan2pi(half4 y, half4 x);
-half8 __ovld __cnfn atan2pi(half8 y, half8 x);
-half16 __ovld __cnfn atan2pi(half16 y, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute cube-root.
- */
-float __ovld __cnfn cbrt(float);
-float2 __ovld __cnfn cbrt(float2);
-float3 __ovld __cnfn cbrt(float3);
-float4 __ovld __cnfn cbrt(float4);
-float8 __ovld __cnfn cbrt(float8);
-float16 __ovld __cnfn cbrt(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cbrt(double);
-double2 __ovld __cnfn cbrt(double2);
-double3 __ovld __cnfn cbrt(double3);
-double4 __ovld __cnfn cbrt(double4);
-double8 __ovld __cnfn cbrt(double8);
-double16 __ovld __cnfn cbrt(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cbrt(half);
-half2 __ovld __cnfn cbrt(half2);
-half3 __ovld __cnfn cbrt(half3);
-half4 __ovld __cnfn cbrt(half4);
-half8 __ovld __cnfn cbrt(half8);
-half16 __ovld __cnfn cbrt(half16);
-#endif //cl_khr_fp16
-
-/**
- * Round to integral value using the round to positive
- * infinity rounding mode.
- */
-float __ovld __cnfn ceil(float);
-float2 __ovld __cnfn ceil(float2);
-float3 __ovld __cnfn ceil(float3);
-float4 __ovld __cnfn ceil(float4);
-float8 __ovld __cnfn ceil(float8);
-float16 __ovld __cnfn ceil(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn ceil(double);
-double2 __ovld __cnfn ceil(double2);
-double3 __ovld __cnfn ceil(double3);
-double4 __ovld __cnfn ceil(double4);
-double8 __ovld __cnfn ceil(double8);
-double16 __ovld __cnfn ceil(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn ceil(half);
-half2 __ovld __cnfn ceil(half2);
-half3 __ovld __cnfn ceil(half3);
-half4 __ovld __cnfn ceil(half4);
-half8 __ovld __cnfn ceil(half8);
-half16 __ovld __cnfn ceil(half16);
-#endif //cl_khr_fp16
-
-/**
- * Returns x with its sign changed to match the sign of y.
- */
-float __ovld __cnfn copysign(float x, float y);
-float2 __ovld __cnfn copysign(float2 x, float2 y);
-float3 __ovld __cnfn copysign(float3 x, float3 y);
-float4 __ovld __cnfn copysign(float4 x, float4 y);
-float8 __ovld __cnfn copysign(float8 x, float8 y);
-float16 __ovld __cnfn copysign(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn copysign(double x, double y);
-double2 __ovld __cnfn copysign(double2 x, double2 y);
-double3 __ovld __cnfn copysign(double3 x, double3 y);
-double4 __ovld __cnfn copysign(double4 x, double4 y);
-double8 __ovld __cnfn copysign(double8 x, double8 y);
-double16 __ovld __cnfn copysign(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn copysign(half x, half y);
-half2 __ovld __cnfn copysign(half2 x, half2 y);
-half3 __ovld __cnfn copysign(half3 x, half3 y);
-half4 __ovld __cnfn copysign(half4 x, half4 y);
-half8 __ovld __cnfn copysign(half8 x, half8 y);
-half16 __ovld __cnfn copysign(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute cosine.
- */
-float __ovld __cnfn cos(float);
-float2 __ovld __cnfn cos(float2);
-float3 __ovld __cnfn cos(float3);
-float4 __ovld __cnfn cos(float4);
-float8 __ovld __cnfn cos(float8);
-float16 __ovld __cnfn cos(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cos(double);
-double2 __ovld __cnfn cos(double2);
-double3 __ovld __cnfn cos(double3);
-double4 __ovld __cnfn cos(double4);
-double8 __ovld __cnfn cos(double8);
-double16 __ovld __cnfn cos(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cos(half);
-half2 __ovld __cnfn cos(half2);
-half3 __ovld __cnfn cos(half3);
-half4 __ovld __cnfn cos(half4);
-half8 __ovld __cnfn cos(half8);
-half16 __ovld __cnfn cos(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute hyperbolic cosine.
- */
-float __ovld __cnfn cosh(float);
-float2 __ovld __cnfn cosh(float2);
-float3 __ovld __cnfn cosh(float3);
-float4 __ovld __cnfn cosh(float4);
-float8 __ovld __cnfn cosh(float8);
-float16 __ovld __cnfn cosh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cosh(double);
-double2 __ovld __cnfn cosh(double2);
-double3 __ovld __cnfn cosh(double3);
-double4 __ovld __cnfn cosh(double4);
-double8 __ovld __cnfn cosh(double8);
-double16 __ovld __cnfn cosh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cosh(half);
-half2 __ovld __cnfn cosh(half2);
-half3 __ovld __cnfn cosh(half3);
-half4 __ovld __cnfn cosh(half4);
-half8 __ovld __cnfn cosh(half8);
-half16 __ovld __cnfn cosh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute cos (PI * x).
- */
-float __ovld __cnfn cospi(float x);
-float2 __ovld __cnfn cospi(float2 x);
-float3 __ovld __cnfn cospi(float3 x);
-float4 __ovld __cnfn cospi(float4 x);
-float8 __ovld __cnfn cospi(float8 x);
-float16 __ovld __cnfn cospi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cospi(double x);
-double2 __ovld __cnfn cospi(double2 x);
-double3 __ovld __cnfn cospi(double3 x);
-double4 __ovld __cnfn cospi(double4 x);
-double8 __ovld __cnfn cospi(double8 x);
-double16 __ovld __cnfn cospi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cospi(half x);
-half2 __ovld __cnfn cospi(half2 x);
-half3 __ovld __cnfn cospi(half3 x);
-half4 __ovld __cnfn cospi(half4 x);
-half8 __ovld __cnfn cospi(half8 x);
-half16 __ovld __cnfn cospi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Complementary error function.
- */
-float __ovld __cnfn erfc(float);
-float2 __ovld __cnfn erfc(float2);
-float3 __ovld __cnfn erfc(float3);
-float4 __ovld __cnfn erfc(float4);
-float8 __ovld __cnfn erfc(float8);
-float16 __ovld __cnfn erfc(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn erfc(double);
-double2 __ovld __cnfn erfc(double2);
-double3 __ovld __cnfn erfc(double3);
-double4 __ovld __cnfn erfc(double4);
-double8 __ovld __cnfn erfc(double8);
-double16 __ovld __cnfn erfc(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn erfc(half);
-half2 __ovld __cnfn erfc(half2);
-half3 __ovld __cnfn erfc(half3);
-half4 __ovld __cnfn erfc(half4);
-half8 __ovld __cnfn erfc(half8);
-half16 __ovld __cnfn erfc(half16);
-#endif //cl_khr_fp16
-
-/**
- * Error function encountered in integrating the
- * normal distribution.
- */
-float __ovld __cnfn erf(float);
-float2 __ovld __cnfn erf(float2);
-float3 __ovld __cnfn erf(float3);
-float4 __ovld __cnfn erf(float4);
-float8 __ovld __cnfn erf(float8);
-float16 __ovld __cnfn erf(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn erf(double);
-double2 __ovld __cnfn erf(double2);
-double3 __ovld __cnfn erf(double3);
-double4 __ovld __cnfn erf(double4);
-double8 __ovld __cnfn erf(double8);
-double16 __ovld __cnfn erf(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn erf(half);
-half2 __ovld __cnfn erf(half2);
-half3 __ovld __cnfn erf(half3);
-half4 __ovld __cnfn erf(half4);
-half8 __ovld __cnfn erf(half8);
-half16 __ovld __cnfn erf(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute the base e exponential function of x.
- */
-float __ovld __cnfn exp(float x);
-float2 __ovld __cnfn exp(float2 x);
-float3 __ovld __cnfn exp(float3 x);
-float4 __ovld __cnfn exp(float4 x);
-float8 __ovld __cnfn exp(float8 x);
-float16 __ovld __cnfn exp(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn exp(double x);
-double2 __ovld __cnfn exp(double2 x);
-double3 __ovld __cnfn exp(double3 x);
-double4 __ovld __cnfn exp(double4 x);
-double8 __ovld __cnfn exp(double8 x);
-double16 __ovld __cnfn exp(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn exp(half x);
-half2 __ovld __cnfn exp(half2 x);
-half3 __ovld __cnfn exp(half3 x);
-half4 __ovld __cnfn exp(half4 x);
-half8 __ovld __cnfn exp(half8 x);
-half16 __ovld __cnfn exp(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Exponential base 2 function.
- */
-float __ovld __cnfn exp2(float);
-float2 __ovld __cnfn exp2(float2);
-float3 __ovld __cnfn exp2(float3);
-float4 __ovld __cnfn exp2(float4);
-float8 __ovld __cnfn exp2(float8);
-float16 __ovld __cnfn exp2(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn exp2(double);
-double2 __ovld __cnfn exp2(double2);
-double3 __ovld __cnfn exp2(double3);
-double4 __ovld __cnfn exp2(double4);
-double8 __ovld __cnfn exp2(double8);
-double16 __ovld __cnfn exp2(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn exp2(half);
-half2 __ovld __cnfn exp2(half2);
-half3 __ovld __cnfn exp2(half3);
-half4 __ovld __cnfn exp2(half4);
-half8 __ovld __cnfn exp2(half8);
-half16 __ovld __cnfn exp2(half16);
-#endif //cl_khr_fp16
-
-/**
- * Exponential base 10 function.
- */
-float __ovld __cnfn exp10(float);
-float2 __ovld __cnfn exp10(float2);
-float3 __ovld __cnfn exp10(float3);
-float4 __ovld __cnfn exp10(float4);
-float8 __ovld __cnfn exp10(float8);
-float16 __ovld __cnfn exp10(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn exp10(double);
-double2 __ovld __cnfn exp10(double2);
-double3 __ovld __cnfn exp10(double3);
-double4 __ovld __cnfn exp10(double4);
-double8 __ovld __cnfn exp10(double8);
-double16 __ovld __cnfn exp10(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn exp10(half);
-half2 __ovld __cnfn exp10(half2);
-half3 __ovld __cnfn exp10(half3);
-half4 __ovld __cnfn exp10(half4);
-half8 __ovld __cnfn exp10(half8);
-half16 __ovld __cnfn exp10(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute e^x- 1.0.
- */
-float __ovld __cnfn expm1(float x);
-float2 __ovld __cnfn expm1(float2 x);
-float3 __ovld __cnfn expm1(float3 x);
-float4 __ovld __cnfn expm1(float4 x);
-float8 __ovld __cnfn expm1(float8 x);
-float16 __ovld __cnfn expm1(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn expm1(double x);
-double2 __ovld __cnfn expm1(double2 x);
-double3 __ovld __cnfn expm1(double3 x);
-double4 __ovld __cnfn expm1(double4 x);
-double8 __ovld __cnfn expm1(double8 x);
-double16 __ovld __cnfn expm1(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn expm1(half x);
-half2 __ovld __cnfn expm1(half2 x);
-half3 __ovld __cnfn expm1(half3 x);
-half4 __ovld __cnfn expm1(half4 x);
-half8 __ovld __cnfn expm1(half8 x);
-half16 __ovld __cnfn expm1(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute absolute value of a floating-point number.
- */
-float __ovld __cnfn fabs(float);
-float2 __ovld __cnfn fabs(float2);
-float3 __ovld __cnfn fabs(float3);
-float4 __ovld __cnfn fabs(float4);
-float8 __ovld __cnfn fabs(float8);
-float16 __ovld __cnfn fabs(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fabs(double);
-double2 __ovld __cnfn fabs(double2);
-double3 __ovld __cnfn fabs(double3);
-double4 __ovld __cnfn fabs(double4);
-double8 __ovld __cnfn fabs(double8);
-double16 __ovld __cnfn fabs(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fabs(half);
-half2 __ovld __cnfn fabs(half2);
-half3 __ovld __cnfn fabs(half3);
-half4 __ovld __cnfn fabs(half4);
-half8 __ovld __cnfn fabs(half8);
-half16 __ovld __cnfn fabs(half16);
-#endif //cl_khr_fp16
-
-/**
- * x - y if x > y, +0 if x is less than or equal to y.
- */
-float __ovld __cnfn fdim(float x, float y);
-float2 __ovld __cnfn fdim(float2 x, float2 y);
-float3 __ovld __cnfn fdim(float3 x, float3 y);
-float4 __ovld __cnfn fdim(float4 x, float4 y);
-float8 __ovld __cnfn fdim(float8 x, float8 y);
-float16 __ovld __cnfn fdim(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fdim(double x, double y);
-double2 __ovld __cnfn fdim(double2 x, double2 y);
-double3 __ovld __cnfn fdim(double3 x, double3 y);
-double4 __ovld __cnfn fdim(double4 x, double4 y);
-double8 __ovld __cnfn fdim(double8 x, double8 y);
-double16 __ovld __cnfn fdim(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fdim(half x, half y);
-half2 __ovld __cnfn fdim(half2 x, half2 y);
-half3 __ovld __cnfn fdim(half3 x, half3 y);
-half4 __ovld __cnfn fdim(half4 x, half4 y);
-half8 __ovld __cnfn fdim(half8 x, half8 y);
-half16 __ovld __cnfn fdim(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Round to integral value using the round to -ve
- * infinity rounding mode.
- */
-float __ovld __cnfn floor(float);
-float2 __ovld __cnfn floor(float2);
-float3 __ovld __cnfn floor(float3);
-float4 __ovld __cnfn floor(float4);
-float8 __ovld __cnfn floor(float8);
-float16 __ovld __cnfn floor(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn floor(double);
-double2 __ovld __cnfn floor(double2);
-double3 __ovld __cnfn floor(double3);
-double4 __ovld __cnfn floor(double4);
-double8 __ovld __cnfn floor(double8);
-double16 __ovld __cnfn floor(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn floor(half);
-half2 __ovld __cnfn floor(half2);
-half3 __ovld __cnfn floor(half3);
-half4 __ovld __cnfn floor(half4);
-half8 __ovld __cnfn floor(half8);
-half16 __ovld __cnfn floor(half16);
-#endif //cl_khr_fp16
-
-/**
- * Returns the correctly rounded floating-point
- * representation of the sum of c with the infinitely
- * precise product of a and b. Rounding of
- * intermediate products shall not occur. Edge case
- * behavior is per the IEEE 754-2008 standard.
- */
-float __ovld __cnfn fma(float a, float b, float c);
-float2 __ovld __cnfn fma(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn fma(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn fma(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn fma(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn fma(float16 a, float16 b, float16 c);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fma(double a, double b, double c);
-double2 __ovld __cnfn fma(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn fma(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn fma(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn fma(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn fma(double16 a, double16 b, double16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fma(half a, half b, half c);
-half2 __ovld __cnfn fma(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn fma(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn fma(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn fma(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn fma(half16 a, half16 b, half16 c);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if x < y, otherwise it returns x. If one
- * argument is a NaN, fmax() returns the other
- * argument. If both arguments are NaNs, fmax()
- * returns a NaN.
- */
-float __ovld __cnfn fmax(float x, float y);
-float2 __ovld __cnfn fmax(float2 x, float2 y);
-float3 __ovld __cnfn fmax(float3 x, float3 y);
-float4 __ovld __cnfn fmax(float4 x, float4 y);
-float8 __ovld __cnfn fmax(float8 x, float8 y);
-float16 __ovld __cnfn fmax(float16 x, float16 y);
-float2 __ovld __cnfn fmax(float2 x, float y);
-float3 __ovld __cnfn fmax(float3 x, float y);
-float4 __ovld __cnfn fmax(float4 x, float y);
-float8 __ovld __cnfn fmax(float8 x, float y);
-float16 __ovld __cnfn fmax(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fmax(double x, double y);
-double2 __ovld __cnfn fmax(double2 x, double2 y);
-double3 __ovld __cnfn fmax(double3 x, double3 y);
-double4 __ovld __cnfn fmax(double4 x, double4 y);
-double8 __ovld __cnfn fmax(double8 x, double8 y);
-double16 __ovld __cnfn fmax(double16 x, double16 y);
-double2 __ovld __cnfn fmax(double2 x, double y);
-double3 __ovld __cnfn fmax(double3 x, double y);
-double4 __ovld __cnfn fmax(double4 x, double y);
-double8 __ovld __cnfn fmax(double8 x, double y);
-double16 __ovld __cnfn fmax(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fmax(half x, half y);
-half2 __ovld __cnfn fmax(half2 x, half2 y);
-half3 __ovld __cnfn fmax(half3 x, half3 y);
-half4 __ovld __cnfn fmax(half4 x, half4 y);
-half8 __ovld __cnfn fmax(half8 x, half8 y);
-half16 __ovld __cnfn fmax(half16 x, half16 y);
-half2 __ovld __cnfn fmax(half2 x, half y);
-half3 __ovld __cnfn fmax(half3 x, half y);
-half4 __ovld __cnfn fmax(half4 x, half y);
-half8 __ovld __cnfn fmax(half8 x, half y);
-half16 __ovld __cnfn fmax(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if y < x, otherwise it returns x. If one
- * argument is a NaN, fmin() returns the other
- * argument. If both arguments are NaNs, fmin()
- * returns a NaN.
- */
-float __ovld __cnfn fmin(float x, float y);
-float2 __ovld __cnfn fmin(float2 x, float2 y);
-float3 __ovld __cnfn fmin(float3 x, float3 y);
-float4 __ovld __cnfn fmin(float4 x, float4 y);
-float8 __ovld __cnfn fmin(float8 x, float8 y);
-float16 __ovld __cnfn fmin(float16 x, float16 y);
-float2 __ovld __cnfn fmin(float2 x, float y);
-float3 __ovld __cnfn fmin(float3 x, float y);
-float4 __ovld __cnfn fmin(float4 x, float y);
-float8 __ovld __cnfn fmin(float8 x, float y);
-float16 __ovld __cnfn fmin(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fmin(double x, double y);
-double2 __ovld __cnfn fmin(double2 x, double2 y);
-double3 __ovld __cnfn fmin(double3 x, double3 y);
-double4 __ovld __cnfn fmin(double4 x, double4 y);
-double8 __ovld __cnfn fmin(double8 x, double8 y);
-double16 __ovld __cnfn fmin(double16 x, double16 y);
-double2 __ovld __cnfn fmin(double2 x, double y);
-double3 __ovld __cnfn fmin(double3 x, double y);
-double4 __ovld __cnfn fmin(double4 x, double y);
-double8 __ovld __cnfn fmin(double8 x, double y);
-double16 __ovld __cnfn fmin(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fmin(half x, half y);
-half2 __ovld __cnfn fmin(half2 x, half2 y);
-half3 __ovld __cnfn fmin(half3 x, half3 y);
-half4 __ovld __cnfn fmin(half4 x, half4 y);
-half8 __ovld __cnfn fmin(half8 x, half8 y);
-half16 __ovld __cnfn fmin(half16 x, half16 y);
-half2 __ovld __cnfn fmin(half2 x, half y);
-half3 __ovld __cnfn fmin(half3 x, half y);
-half4 __ovld __cnfn fmin(half4 x, half y);
-half8 __ovld __cnfn fmin(half8 x, half y);
-half16 __ovld __cnfn fmin(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Modulus. Returns x - y * trunc (x/y).
- */
-float __ovld __cnfn fmod(float x, float y);
-float2 __ovld __cnfn fmod(float2 x, float2 y);
-float3 __ovld __cnfn fmod(float3 x, float3 y);
-float4 __ovld __cnfn fmod(float4 x, float4 y);
-float8 __ovld __cnfn fmod(float8 x, float8 y);
-float16 __ovld __cnfn fmod(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fmod(double x, double y);
-double2 __ovld __cnfn fmod(double2 x, double2 y);
-double3 __ovld __cnfn fmod(double3 x, double3 y);
-double4 __ovld __cnfn fmod(double4 x, double4 y);
-double8 __ovld __cnfn fmod(double8 x, double8 y);
-double16 __ovld __cnfn fmod(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fmod(half x, half y);
-half2 __ovld __cnfn fmod(half2 x, half2 y);
-half3 __ovld __cnfn fmod(half3 x, half3 y);
-half4 __ovld __cnfn fmod(half4 x, half4 y);
-half8 __ovld __cnfn fmod(half8 x, half8 y);
-half16 __ovld __cnfn fmod(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns fmin(x - floor (x), 0x1.fffffep-1f ).
- * floor(x) is returned in iptr.
- */
-#if defined(__opencl_c_generic_address_space)
-float __ovld fract(float x, float *iptr);
-float2 __ovld fract(float2 x, float2 *iptr);
-float3 __ovld fract(float3 x, float3 *iptr);
-float4 __ovld fract(float4 x, float4 *iptr);
-float8 __ovld fract(float8 x, float8 *iptr);
-float16 __ovld fract(float16 x, float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld fract(double x, double *iptr);
-double2 __ovld fract(double2 x, double2 *iptr);
-double3 __ovld fract(double3 x, double3 *iptr);
-double4 __ovld fract(double4 x, double4 *iptr);
-double8 __ovld fract(double8 x, double8 *iptr);
-double16 __ovld fract(double16 x, double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld fract(half x, half *iptr);
-half2 __ovld fract(half2 x, half2 *iptr);
-half3 __ovld fract(half3 x, half3 *iptr);
-half4 __ovld fract(half4 x, half4 *iptr);
-half8 __ovld fract(half8 x, half8 *iptr);
-half16 __ovld fract(half16 x, half16 *iptr);
-#endif //cl_khr_fp16
-#else
-float __ovld fract(float x, __global float *iptr);
-float2 __ovld fract(float2 x, __global float2 *iptr);
-float3 __ovld fract(float3 x, __global float3 *iptr);
-float4 __ovld fract(float4 x, __global float4 *iptr);
-float8 __ovld fract(float8 x, __global float8 *iptr);
-float16 __ovld fract(float16 x, __global float16 *iptr);
-float __ovld fract(float x, __local float *iptr);
-float2 __ovld fract(float2 x, __local float2 *iptr);
-float3 __ovld fract(float3 x, __local float3 *iptr);
-float4 __ovld fract(float4 x, __local float4 *iptr);
-float8 __ovld fract(float8 x, __local float8 *iptr);
-float16 __ovld fract(float16 x, __local float16 *iptr);
-float __ovld fract(float x, __private float *iptr);
-float2 __ovld fract(float2 x, __private float2 *iptr);
-float3 __ovld fract(float3 x, __private float3 *iptr);
-float4 __ovld fract(float4 x, __private float4 *iptr);
-float8 __ovld fract(float8 x, __private float8 *iptr);
-float16 __ovld fract(float16 x, __private float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld fract(double x, __global double *iptr);
-double2 __ovld fract(double2 x, __global double2 *iptr);
-double3 __ovld fract(double3 x, __global double3 *iptr);
-double4 __ovld fract(double4 x, __global double4 *iptr);
-double8 __ovld fract(double8 x, __global double8 *iptr);
-double16 __ovld fract(double16 x, __global double16 *iptr);
-double __ovld fract(double x, __local double *iptr);
-double2 __ovld fract(double2 x, __local double2 *iptr);
-double3 __ovld fract(double3 x, __local double3 *iptr);
-double4 __ovld fract(double4 x, __local double4 *iptr);
-double8 __ovld fract(double8 x, __local double8 *iptr);
-double16 __ovld fract(double16 x, __local double16 *iptr);
-double __ovld fract(double x, __private double *iptr);
-double2 __ovld fract(double2 x, __private double2 *iptr);
-double3 __ovld fract(double3 x, __private double3 *iptr);
-double4 __ovld fract(double4 x, __private double4 *iptr);
-double8 __ovld fract(double8 x, __private double8 *iptr);
-double16 __ovld fract(double16 x, __private double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld fract(half x, __global half *iptr);
-half2 __ovld fract(half2 x, __global half2 *iptr);
-half3 __ovld fract(half3 x, __global half3 *iptr);
-half4 __ovld fract(half4 x, __global half4 *iptr);
-half8 __ovld fract(half8 x, __global half8 *iptr);
-half16 __ovld fract(half16 x, __global half16 *iptr);
-half __ovld fract(half x, __local half *iptr);
-half2 __ovld fract(half2 x, __local half2 *iptr);
-half3 __ovld fract(half3 x, __local half3 *iptr);
-half4 __ovld fract(half4 x, __local half4 *iptr);
-half8 __ovld fract(half8 x, __local half8 *iptr);
-half16 __ovld fract(half16 x, __local half16 *iptr);
-half __ovld fract(half x, __private half *iptr);
-half2 __ovld fract(half2 x, __private half2 *iptr);
-half3 __ovld fract(half3 x, __private half3 *iptr);
-half4 __ovld fract(half4 x, __private half4 *iptr);
-half8 __ovld fract(half8 x, __private half8 *iptr);
-half16 __ovld fract(half16 x, __private half16 *iptr);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Extract mantissa and exponent from x. For each
- * component the mantissa returned is a float with
- * magnitude in the interval [1/2, 1) or 0. Each
- * component of x equals mantissa returned * 2^exp.
- */
-#if defined(__opencl_c_generic_address_space)
-float __ovld frexp(float x, int *exp);
-float2 __ovld frexp(float2 x, int2 *exp);
-float3 __ovld frexp(float3 x, int3 *exp);
-float4 __ovld frexp(float4 x, int4 *exp);
-float8 __ovld frexp(float8 x, int8 *exp);
-float16 __ovld frexp(float16 x, int16 *exp);
-#ifdef cl_khr_fp64
-double __ovld frexp(double x, int *exp);
-double2 __ovld frexp(double2 x, int2 *exp);
-double3 __ovld frexp(double3 x, int3 *exp);
-double4 __ovld frexp(double4 x, int4 *exp);
-double8 __ovld frexp(double8 x, int8 *exp);
-double16 __ovld frexp(double16 x, int16 *exp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld frexp(half x, int *exp);
-half2 __ovld frexp(half2 x, int2 *exp);
-half3 __ovld frexp(half3 x, int3 *exp);
-half4 __ovld frexp(half4 x, int4 *exp);
-half8 __ovld frexp(half8 x, int8 *exp);
-half16 __ovld frexp(half16 x, int16 *exp);
-#endif //cl_khr_fp16
-#else
-float __ovld frexp(float x, __global int *exp);
-float2 __ovld frexp(float2 x, __global int2 *exp);
-float3 __ovld frexp(float3 x, __global int3 *exp);
-float4 __ovld frexp(float4 x, __global int4 *exp);
-float8 __ovld frexp(float8 x, __global int8 *exp);
-float16 __ovld frexp(float16 x, __global int16 *exp);
-float __ovld frexp(float x, __local int *exp);
-float2 __ovld frexp(float2 x, __local int2 *exp);
-float3 __ovld frexp(float3 x, __local int3 *exp);
-float4 __ovld frexp(float4 x, __local int4 *exp);
-float8 __ovld frexp(float8 x, __local int8 *exp);
-float16 __ovld frexp(float16 x, __local int16 *exp);
-float __ovld frexp(float x, __private int *exp);
-float2 __ovld frexp(float2 x, __private int2 *exp);
-float3 __ovld frexp(float3 x, __private int3 *exp);
-float4 __ovld frexp(float4 x, __private int4 *exp);
-float8 __ovld frexp(float8 x, __private int8 *exp);
-float16 __ovld frexp(float16 x, __private int16 *exp);
-#ifdef cl_khr_fp64
-double __ovld frexp(double x, __global int *exp);
-double2 __ovld frexp(double2 x, __global int2 *exp);
-double3 __ovld frexp(double3 x, __global int3 *exp);
-double4 __ovld frexp(double4 x, __global int4 *exp);
-double8 __ovld frexp(double8 x, __global int8 *exp);
-double16 __ovld frexp(double16 x, __global int16 *exp);
-double __ovld frexp(double x, __local int *exp);
-double2 __ovld frexp(double2 x, __local int2 *exp);
-double3 __ovld frexp(double3 x, __local int3 *exp);
-double4 __ovld frexp(double4 x, __local int4 *exp);
-double8 __ovld frexp(double8 x, __local int8 *exp);
-double16 __ovld frexp(double16 x, __local int16 *exp);
-double __ovld frexp(double x, __private int *exp);
-double2 __ovld frexp(double2 x, __private int2 *exp);
-double3 __ovld frexp(double3 x, __private int3 *exp);
-double4 __ovld frexp(double4 x, __private int4 *exp);
-double8 __ovld frexp(double8 x, __private int8 *exp);
-double16 __ovld frexp(double16 x, __private int16 *exp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld frexp(half x, __global int *exp);
-half2 __ovld frexp(half2 x, __global int2 *exp);
-half3 __ovld frexp(half3 x, __global int3 *exp);
-half4 __ovld frexp(half4 x, __global int4 *exp);
-half8 __ovld frexp(half8 x, __global int8 *exp);
-half16 __ovld frexp(half16 x, __global int16 *exp);
-half __ovld frexp(half x, __local int *exp);
-half2 __ovld frexp(half2 x, __local int2 *exp);
-half3 __ovld frexp(half3 x, __local int3 *exp);
-half4 __ovld frexp(half4 x, __local int4 *exp);
-half8 __ovld frexp(half8 x, __local int8 *exp);
-half16 __ovld frexp(half16 x, __local int16 *exp);
-half __ovld frexp(half x, __private int *exp);
-half2 __ovld frexp(half2 x, __private int2 *exp);
-half3 __ovld frexp(half3 x, __private int3 *exp);
-half4 __ovld frexp(half4 x, __private int4 *exp);
-half8 __ovld frexp(half8 x, __private int8 *exp);
-half16 __ovld frexp(half16 x, __private int16 *exp);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Compute the value of the square root of x^2 + y^2
- * without undue overflow or underflow.
- */
-float __ovld __cnfn hypot(float x, float y);
-float2 __ovld __cnfn hypot(float2 x, float2 y);
-float3 __ovld __cnfn hypot(float3 x, float3 y);
-float4 __ovld __cnfn hypot(float4 x, float4 y);
-float8 __ovld __cnfn hypot(float8 x, float8 y);
-float16 __ovld __cnfn hypot(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn hypot(double x, double y);
-double2 __ovld __cnfn hypot(double2 x, double2 y);
-double3 __ovld __cnfn hypot(double3 x, double3 y);
-double4 __ovld __cnfn hypot(double4 x, double4 y);
-double8 __ovld __cnfn hypot(double8 x, double8 y);
-double16 __ovld __cnfn hypot(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn hypot(half x, half y);
-half2 __ovld __cnfn hypot(half2 x, half2 y);
-half3 __ovld __cnfn hypot(half3 x, half3 y);
-half4 __ovld __cnfn hypot(half4 x, half4 y);
-half8 __ovld __cnfn hypot(half8 x, half8 y);
-half16 __ovld __cnfn hypot(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Return the exponent as an integer value.
- */
-int __ovld __cnfn ilogb(float x);
-int2 __ovld __cnfn ilogb(float2 x);
-int3 __ovld __cnfn ilogb(float3 x);
-int4 __ovld __cnfn ilogb(float4 x);
-int8 __ovld __cnfn ilogb(float8 x);
-int16 __ovld __cnfn ilogb(float16 x);
-#ifdef cl_khr_fp64
-int __ovld __cnfn ilogb(double x);
-int2 __ovld __cnfn ilogb(double2 x);
-int3 __ovld __cnfn ilogb(double3 x);
-int4 __ovld __cnfn ilogb(double4 x);
-int8 __ovld __cnfn ilogb(double8 x);
-int16 __ovld __cnfn ilogb(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn ilogb(half x);
-int2 __ovld __cnfn ilogb(half2 x);
-int3 __ovld __cnfn ilogb(half3 x);
-int4 __ovld __cnfn ilogb(half4 x);
-int8 __ovld __cnfn ilogb(half8 x);
-int16 __ovld __cnfn ilogb(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Multiply x by 2 to the power n.
- */
-float __ovld __cnfn ldexp(float x, int n);
-float2 __ovld __cnfn ldexp(float2 x, int2 n);
-float3 __ovld __cnfn ldexp(float3 x, int3 n);
-float4 __ovld __cnfn ldexp(float4 x, int4 n);
-float8 __ovld __cnfn ldexp(float8 x, int8 n);
-float16 __ovld __cnfn ldexp(float16 x, int16 n);
-float2 __ovld __cnfn ldexp(float2 x, int n);
-float3 __ovld __cnfn ldexp(float3 x, int n);
-float4 __ovld __cnfn ldexp(float4 x, int n);
-float8 __ovld __cnfn ldexp(float8 x, int n);
-float16 __ovld __cnfn ldexp(float16 x, int n);
-#ifdef cl_khr_fp64
-double __ovld __cnfn ldexp(double x, int n);
-double2 __ovld __cnfn ldexp(double2 x, int2 n);
-double3 __ovld __cnfn ldexp(double3 x, int3 n);
-double4 __ovld __cnfn ldexp(double4 x, int4 n);
-double8 __ovld __cnfn ldexp(double8 x, int8 n);
-double16 __ovld __cnfn ldexp(double16 x, int16 n);
-double2 __ovld __cnfn ldexp(double2 x, int n);
-double3 __ovld __cnfn ldexp(double3 x, int n);
-double4 __ovld __cnfn ldexp(double4 x, int n);
-double8 __ovld __cnfn ldexp(double8 x, int n);
-double16 __ovld __cnfn ldexp(double16 x, int n);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn ldexp(half x, int n);
-half2 __ovld __cnfn ldexp(half2 x, int2 n);
-half3 __ovld __cnfn ldexp(half3 x, int3 n);
-half4 __ovld __cnfn ldexp(half4 x, int4 n);
-half8 __ovld __cnfn ldexp(half8 x, int8 n);
-half16 __ovld __cnfn ldexp(half16 x, int16 n);
-half2 __ovld __cnfn ldexp(half2 x, int n);
-half3 __ovld __cnfn ldexp(half3 x, int n);
-half4 __ovld __cnfn ldexp(half4 x, int n);
-half8 __ovld __cnfn ldexp(half8 x, int n);
-half16 __ovld __cnfn ldexp(half16 x, int n);
-#endif //cl_khr_fp16
-
-/**
- * Log gamma function. Returns the natural
- * logarithm of the absolute value of the gamma
- * function. The sign of the gamma function is
- * returned in the signp argument of lgamma_r.
- */
-float __ovld __cnfn lgamma(float x);
-float2 __ovld __cnfn lgamma(float2 x);
-float3 __ovld __cnfn lgamma(float3 x);
-float4 __ovld __cnfn lgamma(float4 x);
-float8 __ovld __cnfn lgamma(float8 x);
-float16 __ovld __cnfn lgamma(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn lgamma(double x);
-double2 __ovld __cnfn lgamma(double2 x);
-double3 __ovld __cnfn lgamma(double3 x);
-double4 __ovld __cnfn lgamma(double4 x);
-double8 __ovld __cnfn lgamma(double8 x);
-double16 __ovld __cnfn lgamma(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn lgamma(half x);
-half2 __ovld __cnfn lgamma(half2 x);
-half3 __ovld __cnfn lgamma(half3 x);
-half4 __ovld __cnfn lgamma(half4 x);
-half8 __ovld __cnfn lgamma(half8 x);
-half16 __ovld __cnfn lgamma(half16 x);
-#endif //cl_khr_fp16
-
-#if defined(__opencl_c_generic_address_space)
-float __ovld lgamma_r(float x, int *signp);
-float2 __ovld lgamma_r(float2 x, int2 *signp);
-float3 __ovld lgamma_r(float3 x, int3 *signp);
-float4 __ovld lgamma_r(float4 x, int4 *signp);
-float8 __ovld lgamma_r(float8 x, int8 *signp);
-float16 __ovld lgamma_r(float16 x, int16 *signp);
-#ifdef cl_khr_fp64
-double __ovld lgamma_r(double x, int *signp);
-double2 __ovld lgamma_r(double2 x, int2 *signp);
-double3 __ovld lgamma_r(double3 x, int3 *signp);
-double4 __ovld lgamma_r(double4 x, int4 *signp);
-double8 __ovld lgamma_r(double8 x, int8 *signp);
-double16 __ovld lgamma_r(double16 x, int16 *signp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld lgamma_r(half x, int *signp);
-half2 __ovld lgamma_r(half2 x, int2 *signp);
-half3 __ovld lgamma_r(half3 x, int3 *signp);
-half4 __ovld lgamma_r(half4 x, int4 *signp);
-half8 __ovld lgamma_r(half8 x, int8 *signp);
-half16 __ovld lgamma_r(half16 x, int16 *signp);
-#endif //cl_khr_fp16
-#else
-float __ovld lgamma_r(float x, __global int *signp);
-float2 __ovld lgamma_r(float2 x, __global int2 *signp);
-float3 __ovld lgamma_r(float3 x, __global int3 *signp);
-float4 __ovld lgamma_r(float4 x, __global int4 *signp);
-float8 __ovld lgamma_r(float8 x, __global int8 *signp);
-float16 __ovld lgamma_r(float16 x, __global int16 *signp);
-float __ovld lgamma_r(float x, __local int *signp);
-float2 __ovld lgamma_r(float2 x, __local int2 *signp);
-float3 __ovld lgamma_r(float3 x, __local int3 *signp);
-float4 __ovld lgamma_r(float4 x, __local int4 *signp);
-float8 __ovld lgamma_r(float8 x, __local int8 *signp);
-float16 __ovld lgamma_r(float16 x, __local int16 *signp);
-float __ovld lgamma_r(float x, __private int *signp);
-float2 __ovld lgamma_r(float2 x, __private int2 *signp);
-float3 __ovld lgamma_r(float3 x, __private int3 *signp);
-float4 __ovld lgamma_r(float4 x, __private int4 *signp);
-float8 __ovld lgamma_r(float8 x, __private int8 *signp);
-float16 __ovld lgamma_r(float16 x, __private int16 *signp);
-#ifdef cl_khr_fp64
-double __ovld lgamma_r(double x, __global int *signp);
-double2 __ovld lgamma_r(double2 x, __global int2 *signp);
-double3 __ovld lgamma_r(double3 x, __global int3 *signp);
-double4 __ovld lgamma_r(double4 x, __global int4 *signp);
-double8 __ovld lgamma_r(double8 x, __global int8 *signp);
-double16 __ovld lgamma_r(double16 x, __global int16 *signp);
-double __ovld lgamma_r(double x, __local int *signp);
-double2 __ovld lgamma_r(double2 x, __local int2 *signp);
-double3 __ovld lgamma_r(double3 x, __local int3 *signp);
-double4 __ovld lgamma_r(double4 x, __local int4 *signp);
-double8 __ovld lgamma_r(double8 x, __local int8 *signp);
-double16 __ovld lgamma_r(double16 x, __local int16 *signp);
-double __ovld lgamma_r(double x, __private int *signp);
-double2 __ovld lgamma_r(double2 x, __private int2 *signp);
-double3 __ovld lgamma_r(double3 x, __private int3 *signp);
-double4 __ovld lgamma_r(double4 x, __private int4 *signp);
-double8 __ovld lgamma_r(double8 x, __private int8 *signp);
-double16 __ovld lgamma_r(double16 x, __private int16 *signp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld lgamma_r(half x, __global int *signp);
-half2 __ovld lgamma_r(half2 x, __global int2 *signp);
-half3 __ovld lgamma_r(half3 x, __global int3 *signp);
-half4 __ovld lgamma_r(half4 x, __global int4 *signp);
-half8 __ovld lgamma_r(half8 x, __global int8 *signp);
-half16 __ovld lgamma_r(half16 x, __global int16 *signp);
-half __ovld lgamma_r(half x, __local int *signp);
-half2 __ovld lgamma_r(half2 x, __local int2 *signp);
-half3 __ovld lgamma_r(half3 x, __local int3 *signp);
-half4 __ovld lgamma_r(half4 x, __local int4 *signp);
-half8 __ovld lgamma_r(half8 x, __local int8 *signp);
-half16 __ovld lgamma_r(half16 x, __local int16 *signp);
-half __ovld lgamma_r(half x, __private int *signp);
-half2 __ovld lgamma_r(half2 x, __private int2 *signp);
-half3 __ovld lgamma_r(half3 x, __private int3 *signp);
-half4 __ovld lgamma_r(half4 x, __private int4 *signp);
-half8 __ovld lgamma_r(half8 x, __private int8 *signp);
-half16 __ovld lgamma_r(half16 x, __private int16 *signp);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Compute natural logarithm.
- */
-float __ovld __cnfn log(float);
-float2 __ovld __cnfn log(float2);
-float3 __ovld __cnfn log(float3);
-float4 __ovld __cnfn log(float4);
-float8 __ovld __cnfn log(float8);
-float16 __ovld __cnfn log(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log(double);
-double2 __ovld __cnfn log(double2);
-double3 __ovld __cnfn log(double3);
-double4 __ovld __cnfn log(double4);
-double8 __ovld __cnfn log(double8);
-double16 __ovld __cnfn log(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log(half);
-half2 __ovld __cnfn log(half2);
-half3 __ovld __cnfn log(half3);
-half4 __ovld __cnfn log(half4);
-half8 __ovld __cnfn log(half8);
-half16 __ovld __cnfn log(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute a base 2 logarithm.
- */
-float __ovld __cnfn log2(float);
-float2 __ovld __cnfn log2(float2);
-float3 __ovld __cnfn log2(float3);
-float4 __ovld __cnfn log2(float4);
-float8 __ovld __cnfn log2(float8);
-float16 __ovld __cnfn log2(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log2(double);
-double2 __ovld __cnfn log2(double2);
-double3 __ovld __cnfn log2(double3);
-double4 __ovld __cnfn log2(double4);
-double8 __ovld __cnfn log2(double8);
-double16 __ovld __cnfn log2(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log2(half);
-half2 __ovld __cnfn log2(half2);
-half3 __ovld __cnfn log2(half3);
-half4 __ovld __cnfn log2(half4);
-half8 __ovld __cnfn log2(half8);
-half16 __ovld __cnfn log2(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute a base 10 logarithm.
- */
-float __ovld __cnfn log10(float);
-float2 __ovld __cnfn log10(float2);
-float3 __ovld __cnfn log10(float3);
-float4 __ovld __cnfn log10(float4);
-float8 __ovld __cnfn log10(float8);
-float16 __ovld __cnfn log10(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log10(double);
-double2 __ovld __cnfn log10(double2);
-double3 __ovld __cnfn log10(double3);
-double4 __ovld __cnfn log10(double4);
-double8 __ovld __cnfn log10(double8);
-double16 __ovld __cnfn log10(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log10(half);
-half2 __ovld __cnfn log10(half2);
-half3 __ovld __cnfn log10(half3);
-half4 __ovld __cnfn log10(half4);
-half8 __ovld __cnfn log10(half8);
-half16 __ovld __cnfn log10(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute a base e logarithm of (1.0 + x).
- */
-float __ovld __cnfn log1p(float x);
-float2 __ovld __cnfn log1p(float2 x);
-float3 __ovld __cnfn log1p(float3 x);
-float4 __ovld __cnfn log1p(float4 x);
-float8 __ovld __cnfn log1p(float8 x);
-float16 __ovld __cnfn log1p(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log1p(double x);
-double2 __ovld __cnfn log1p(double2 x);
-double3 __ovld __cnfn log1p(double3 x);
-double4 __ovld __cnfn log1p(double4 x);
-double8 __ovld __cnfn log1p(double8 x);
-double16 __ovld __cnfn log1p(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log1p(half x);
-half2 __ovld __cnfn log1p(half2 x);
-half3 __ovld __cnfn log1p(half3 x);
-half4 __ovld __cnfn log1p(half4 x);
-half8 __ovld __cnfn log1p(half8 x);
-half16 __ovld __cnfn log1p(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute the exponent of x, which is the integral
- * part of logr | x |.
- */
-float __ovld __cnfn logb(float x);
-float2 __ovld __cnfn logb(float2 x);
-float3 __ovld __cnfn logb(float3 x);
-float4 __ovld __cnfn logb(float4 x);
-float8 __ovld __cnfn logb(float8 x);
-float16 __ovld __cnfn logb(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn logb(double x);
-double2 __ovld __cnfn logb(double2 x);
-double3 __ovld __cnfn logb(double3 x);
-double4 __ovld __cnfn logb(double4 x);
-double8 __ovld __cnfn logb(double8 x);
-double16 __ovld __cnfn logb(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn logb(half x);
-half2 __ovld __cnfn logb(half2 x);
-half3 __ovld __cnfn logb(half3 x);
-half4 __ovld __cnfn logb(half4 x);
-half8 __ovld __cnfn logb(half8 x);
-half16 __ovld __cnfn logb(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * mad approximates a * b + c. Whether or how the
- * product of a * b is rounded and how supernormal or
- * subnormal intermediate products are handled is not
- * defined. mad is intended to be used where speed is
- * preferred over accuracy.
- */
-float __ovld __cnfn mad(float a, float b, float c);
-float2 __ovld __cnfn mad(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn mad(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn mad(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn mad(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn mad(float16 a, float16 b, float16 c);
-#ifdef cl_khr_fp64
-double __ovld __cnfn mad(double a, double b, double c);
-double2 __ovld __cnfn mad(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn mad(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn mad(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn mad(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn mad(double16 a, double16 b, double16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn mad(half a, half b, half c);
-half2 __ovld __cnfn mad(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn mad(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn mad(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn mad(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn mad(half16 a, half16 b, half16 c);
-#endif //cl_khr_fp16
-
-/**
- * Returns x if | x | > | y |, y if | y | > | x |, otherwise
- * fmax(x, y).
- */
-float __ovld __cnfn maxmag(float x, float y);
-float2 __ovld __cnfn maxmag(float2 x, float2 y);
-float3 __ovld __cnfn maxmag(float3 x, float3 y);
-float4 __ovld __cnfn maxmag(float4 x, float4 y);
-float8 __ovld __cnfn maxmag(float8 x, float8 y);
-float16 __ovld __cnfn maxmag(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn maxmag(double x, double y);
-double2 __ovld __cnfn maxmag(double2 x, double2 y);
-double3 __ovld __cnfn maxmag(double3 x, double3 y);
-double4 __ovld __cnfn maxmag(double4 x, double4 y);
-double8 __ovld __cnfn maxmag(double8 x, double8 y);
-double16 __ovld __cnfn maxmag(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn maxmag(half x, half y);
-half2 __ovld __cnfn maxmag(half2 x, half2 y);
-half3 __ovld __cnfn maxmag(half3 x, half3 y);
-half4 __ovld __cnfn maxmag(half4 x, half4 y);
-half8 __ovld __cnfn maxmag(half8 x, half8 y);
-half16 __ovld __cnfn maxmag(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns x if | x | < | y |, y if | y | < | x |, otherwise
- * fmin(x, y).
- */
-float __ovld __cnfn minmag(float x, float y);
-float2 __ovld __cnfn minmag(float2 x, float2 y);
-float3 __ovld __cnfn minmag(float3 x, float3 y);
-float4 __ovld __cnfn minmag(float4 x, float4 y);
-float8 __ovld __cnfn minmag(float8 x, float8 y);
-float16 __ovld __cnfn minmag(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn minmag(double x, double y);
-double2 __ovld __cnfn minmag(double2 x, double2 y);
-double3 __ovld __cnfn minmag(double3 x, double3 y);
-double4 __ovld __cnfn minmag(double4 x, double4 y);
-double8 __ovld __cnfn minmag(double8 x, double8 y);
-double16 __ovld __cnfn minmag(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn minmag(half x, half y);
-half2 __ovld __cnfn minmag(half2 x, half2 y);
-half3 __ovld __cnfn minmag(half3 x, half3 y);
-half4 __ovld __cnfn minmag(half4 x, half4 y);
-half8 __ovld __cnfn minmag(half8 x, half8 y);
-half16 __ovld __cnfn minmag(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Decompose a floating-point number. The modf
- * function breaks the argument x into integral and
- * fractional parts, each of which has the same sign as
- * the argument. It stores the integral part in the object
- * pointed to by iptr.
- */
-#if defined(__opencl_c_generic_address_space)
-float __ovld modf(float x, float *iptr);
-float2 __ovld modf(float2 x, float2 *iptr);
-float3 __ovld modf(float3 x, float3 *iptr);
-float4 __ovld modf(float4 x, float4 *iptr);
-float8 __ovld modf(float8 x, float8 *iptr);
-float16 __ovld modf(float16 x, float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld modf(double x, double *iptr);
-double2 __ovld modf(double2 x, double2 *iptr);
-double3 __ovld modf(double3 x, double3 *iptr);
-double4 __ovld modf(double4 x, double4 *iptr);
-double8 __ovld modf(double8 x, double8 *iptr);
-double16 __ovld modf(double16 x, double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld modf(half x, half *iptr);
-half2 __ovld modf(half2 x, half2 *iptr);
-half3 __ovld modf(half3 x, half3 *iptr);
-half4 __ovld modf(half4 x, half4 *iptr);
-half8 __ovld modf(half8 x, half8 *iptr);
-half16 __ovld modf(half16 x, half16 *iptr);
-#endif //cl_khr_fp16
-#else
-float __ovld modf(float x, __global float *iptr);
-float2 __ovld modf(float2 x, __global float2 *iptr);
-float3 __ovld modf(float3 x, __global float3 *iptr);
-float4 __ovld modf(float4 x, __global float4 *iptr);
-float8 __ovld modf(float8 x, __global float8 *iptr);
-float16 __ovld modf(float16 x, __global float16 *iptr);
-float __ovld modf(float x, __local float *iptr);
-float2 __ovld modf(float2 x, __local float2 *iptr);
-float3 __ovld modf(float3 x, __local float3 *iptr);
-float4 __ovld modf(float4 x, __local float4 *iptr);
-float8 __ovld modf(float8 x, __local float8 *iptr);
-float16 __ovld modf(float16 x, __local float16 *iptr);
-float __ovld modf(float x, __private float *iptr);
-float2 __ovld modf(float2 x, __private float2 *iptr);
-float3 __ovld modf(float3 x, __private float3 *iptr);
-float4 __ovld modf(float4 x, __private float4 *iptr);
-float8 __ovld modf(float8 x, __private float8 *iptr);
-float16 __ovld modf(float16 x, __private float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld modf(double x, __global double *iptr);
-double2 __ovld modf(double2 x, __global double2 *iptr);
-double3 __ovld modf(double3 x, __global double3 *iptr);
-double4 __ovld modf(double4 x, __global double4 *iptr);
-double8 __ovld modf(double8 x, __global double8 *iptr);
-double16 __ovld modf(double16 x, __global double16 *iptr);
-double __ovld modf(double x, __local double *iptr);
-double2 __ovld modf(double2 x, __local double2 *iptr);
-double3 __ovld modf(double3 x, __local double3 *iptr);
-double4 __ovld modf(double4 x, __local double4 *iptr);
-double8 __ovld modf(double8 x, __local double8 *iptr);
-double16 __ovld modf(double16 x, __local double16 *iptr);
-double __ovld modf(double x, __private double *iptr);
-double2 __ovld modf(double2 x, __private double2 *iptr);
-double3 __ovld modf(double3 x, __private double3 *iptr);
-double4 __ovld modf(double4 x, __private double4 *iptr);
-double8 __ovld modf(double8 x, __private double8 *iptr);
-double16 __ovld modf(double16 x, __private double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld modf(half x, __global half *iptr);
-half2 __ovld modf(half2 x, __global half2 *iptr);
-half3 __ovld modf(half3 x, __global half3 *iptr);
-half4 __ovld modf(half4 x, __global half4 *iptr);
-half8 __ovld modf(half8 x, __global half8 *iptr);
-half16 __ovld modf(half16 x, __global half16 *iptr);
-half __ovld modf(half x, __local half *iptr);
-half2 __ovld modf(half2 x, __local half2 *iptr);
-half3 __ovld modf(half3 x, __local half3 *iptr);
-half4 __ovld modf(half4 x, __local half4 *iptr);
-half8 __ovld modf(half8 x, __local half8 *iptr);
-half16 __ovld modf(half16 x, __local half16 *iptr);
-half __ovld modf(half x, __private half *iptr);
-half2 __ovld modf(half2 x, __private half2 *iptr);
-half3 __ovld modf(half3 x, __private half3 *iptr);
-half4 __ovld modf(half4 x, __private half4 *iptr);
-half8 __ovld modf(half8 x, __private half8 *iptr);
-half16 __ovld modf(half16 x, __private half16 *iptr);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Returns a quiet NaN. The nancode may be placed
- * in the significand of the resulting NaN.
- */
-float __ovld __cnfn nan(uint nancode);
-float2 __ovld __cnfn nan(uint2 nancode);
-float3 __ovld __cnfn nan(uint3 nancode);
-float4 __ovld __cnfn nan(uint4 nancode);
-float8 __ovld __cnfn nan(uint8 nancode);
-float16 __ovld __cnfn nan(uint16 nancode);
-#ifdef cl_khr_fp64
-double __ovld __cnfn nan(ulong nancode);
-double2 __ovld __cnfn nan(ulong2 nancode);
-double3 __ovld __cnfn nan(ulong3 nancode);
-double4 __ovld __cnfn nan(ulong4 nancode);
-double8 __ovld __cnfn nan(ulong8 nancode);
-double16 __ovld __cnfn nan(ulong16 nancode);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn nan(ushort nancode);
-half2 __ovld __cnfn nan(ushort2 nancode);
-half3 __ovld __cnfn nan(ushort3 nancode);
-half4 __ovld __cnfn nan(ushort4 nancode);
-half8 __ovld __cnfn nan(ushort8 nancode);
-half16 __ovld __cnfn nan(ushort16 nancode);
-#endif //cl_khr_fp16
-
-/**
- * Computes the next representable single-precision
- * floating-point value following x in the direction of
- * y. Thus, if y is less than x, nextafter() returns the
- * largest representable floating-point number less
- * than x.
- */
-float __ovld __cnfn nextafter(float x, float y);
-float2 __ovld __cnfn nextafter(float2 x, float2 y);
-float3 __ovld __cnfn nextafter(float3 x, float3 y);
-float4 __ovld __cnfn nextafter(float4 x, float4 y);
-float8 __ovld __cnfn nextafter(float8 x, float8 y);
-float16 __ovld __cnfn nextafter(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn nextafter(double x, double y);
-double2 __ovld __cnfn nextafter(double2 x, double2 y);
-double3 __ovld __cnfn nextafter(double3 x, double3 y);
-double4 __ovld __cnfn nextafter(double4 x, double4 y);
-double8 __ovld __cnfn nextafter(double8 x, double8 y);
-double16 __ovld __cnfn nextafter(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn nextafter(half x, half y);
-half2 __ovld __cnfn nextafter(half2 x, half2 y);
-half3 __ovld __cnfn nextafter(half3 x, half3 y);
-half4 __ovld __cnfn nextafter(half4 x, half4 y);
-half8 __ovld __cnfn nextafter(half8 x, half8 y);
-half16 __ovld __cnfn nextafter(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power y.
- */
-float __ovld __cnfn pow(float x, float y);
-float2 __ovld __cnfn pow(float2 x, float2 y);
-float3 __ovld __cnfn pow(float3 x, float3 y);
-float4 __ovld __cnfn pow(float4 x, float4 y);
-float8 __ovld __cnfn pow(float8 x, float8 y);
-float16 __ovld __cnfn pow(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn pow(double x, double y);
-double2 __ovld __cnfn pow(double2 x, double2 y);
-double3 __ovld __cnfn pow(double3 x, double3 y);
-double4 __ovld __cnfn pow(double4 x, double4 y);
-double8 __ovld __cnfn pow(double8 x, double8 y);
-double16 __ovld __cnfn pow(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn pow(half x, half y);
-half2 __ovld __cnfn pow(half2 x, half2 y);
-half3 __ovld __cnfn pow(half3 x, half3 y);
-half4 __ovld __cnfn pow(half4 x, half4 y);
-half8 __ovld __cnfn pow(half8 x, half8 y);
-half16 __ovld __cnfn pow(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power y, where y is an integer.
- */
-float __ovld __cnfn pown(float x, int y);
-float2 __ovld __cnfn pown(float2 x, int2 y);
-float3 __ovld __cnfn pown(float3 x, int3 y);
-float4 __ovld __cnfn pown(float4 x, int4 y);
-float8 __ovld __cnfn pown(float8 x, int8 y);
-float16 __ovld __cnfn pown(float16 x, int16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn pown(double x, int y);
-double2 __ovld __cnfn pown(double2 x, int2 y);
-double3 __ovld __cnfn pown(double3 x, int3 y);
-double4 __ovld __cnfn pown(double4 x, int4 y);
-double8 __ovld __cnfn pown(double8 x, int8 y);
-double16 __ovld __cnfn pown(double16 x, int16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn pown(half x, int y);
-half2 __ovld __cnfn pown(half2 x, int2 y);
-half3 __ovld __cnfn pown(half3 x, int3 y);
-half4 __ovld __cnfn pown(half4 x, int4 y);
-half8 __ovld __cnfn pown(half8 x, int8 y);
-half16 __ovld __cnfn pown(half16 x, int16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power y, where x is >= 0.
- */
-float __ovld __cnfn powr(float x, float y);
-float2 __ovld __cnfn powr(float2 x, float2 y);
-float3 __ovld __cnfn powr(float3 x, float3 y);
-float4 __ovld __cnfn powr(float4 x, float4 y);
-float8 __ovld __cnfn powr(float8 x, float8 y);
-float16 __ovld __cnfn powr(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn powr(double x, double y);
-double2 __ovld __cnfn powr(double2 x, double2 y);
-double3 __ovld __cnfn powr(double3 x, double3 y);
-double4 __ovld __cnfn powr(double4 x, double4 y);
-double8 __ovld __cnfn powr(double8 x, double8 y);
-double16 __ovld __cnfn powr(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn powr(half x, half y);
-half2 __ovld __cnfn powr(half2 x, half2 y);
-half3 __ovld __cnfn powr(half3 x, half3 y);
-half4 __ovld __cnfn powr(half4 x, half4 y);
-half8 __ovld __cnfn powr(half8 x, half8 y);
-half16 __ovld __cnfn powr(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute the value r such that r = x - n*y, where n
- * is the integer nearest the exact value of x/y. If there
- * are two integers closest to x/y, n shall be the even
- * one. If r is zero, it is given the same sign as x.
- */
-float __ovld __cnfn remainder(float x, float y);
-float2 __ovld __cnfn remainder(float2 x, float2 y);
-float3 __ovld __cnfn remainder(float3 x, float3 y);
-float4 __ovld __cnfn remainder(float4 x, float4 y);
-float8 __ovld __cnfn remainder(float8 x, float8 y);
-float16 __ovld __cnfn remainder(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn remainder(double x, double y);
-double2 __ovld __cnfn remainder(double2 x, double2 y);
-double3 __ovld __cnfn remainder(double3 x, double3 y);
-double4 __ovld __cnfn remainder(double4 x, double4 y);
-double8 __ovld __cnfn remainder(double8 x, double8 y);
-double16 __ovld __cnfn remainder(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn remainder(half x, half y);
-half2 __ovld __cnfn remainder(half2 x, half2 y);
-half3 __ovld __cnfn remainder(half3 x, half3 y);
-half4 __ovld __cnfn remainder(half4 x, half4 y);
-half8 __ovld __cnfn remainder(half8 x, half8 y);
-half16 __ovld __cnfn remainder(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * The remquo function computes the value r such
- * that r = x - n*y, where n is the integer nearest the
- * exact value of x/y. If there are two integers closest
- * to x/y, n shall be the even one. If r is zero, it is
- * given the same sign as x. This is the same value
- * that is returned by the remainder function.
- * remquo also calculates the lower seven bits of the
- * integral quotient x/y, and gives that value the same
- * sign as x/y. It stores this signed value in the object
- * pointed to by quo.
- */
-#if defined(__opencl_c_generic_address_space)
-float __ovld remquo(float x, float y, int *quo);
-float2 __ovld remquo(float2 x, float2 y, int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, int16 *quo);
-#ifdef cl_khr_fp64
-double __ovld remquo(double x, double y, int *quo);
-double2 __ovld remquo(double2 x, double2 y, int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, int16 *quo);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld remquo(half x, half y, int *quo);
-half2 __ovld remquo(half2 x, half2 y, int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, int16 *quo);
-
-#endif //cl_khr_fp16
-#else
-float __ovld remquo(float x, float y, __global int *quo);
-float2 __ovld remquo(float2 x, float2 y, __global int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __global int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __global int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __global int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __global int16 *quo);
-float __ovld remquo(float x, float y, __local int *quo);
-float2 __ovld remquo(float2 x, float2 y, __local int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __local int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __local int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __local int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __local int16 *quo);
-float __ovld remquo(float x, float y, __private int *quo);
-float2 __ovld remquo(float2 x, float2 y, __private int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __private int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __private int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __private int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __private int16 *quo);
-#ifdef cl_khr_fp64
-double __ovld remquo(double x, double y, __global int *quo);
-double2 __ovld remquo(double2 x, double2 y, __global int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __global int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __global int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __global int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __global int16 *quo);
-double __ovld remquo(double x, double y, __local int *quo);
-double2 __ovld remquo(double2 x, double2 y, __local int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __local int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __local int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __local int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __local int16 *quo);
-double __ovld remquo(double x, double y, __private int *quo);
-double2 __ovld remquo(double2 x, double2 y, __private int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __private int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __private int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __private int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __private int16 *quo);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld remquo(half x, half y, __global int *quo);
-half2 __ovld remquo(half2 x, half2 y, __global int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __global int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __global int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __global int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __global int16 *quo);
-half __ovld remquo(half x, half y, __local int *quo);
-half2 __ovld remquo(half2 x, half2 y, __local int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __local int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __local int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __local int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __local int16 *quo);
-half __ovld remquo(half x, half y, __private int *quo);
-half2 __ovld remquo(half2 x, half2 y, __private int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __private int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __private int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __private int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __private int16 *quo);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-/**
- * Round to integral value (using round to nearest
- * even rounding mode) in floating-point format.
- * Refer to section 7.1 for description of rounding
- * modes.
- */
-float __ovld __cnfn rint(float);
-float2 __ovld __cnfn rint(float2);
-float3 __ovld __cnfn rint(float3);
-float4 __ovld __cnfn rint(float4);
-float8 __ovld __cnfn rint(float8);
-float16 __ovld __cnfn rint(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn rint(double);
-double2 __ovld __cnfn rint(double2);
-double3 __ovld __cnfn rint(double3);
-double4 __ovld __cnfn rint(double4);
-double8 __ovld __cnfn rint(double8);
-double16 __ovld __cnfn rint(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn rint(half);
-half2 __ovld __cnfn rint(half2);
-half3 __ovld __cnfn rint(half3);
-half4 __ovld __cnfn rint(half4);
-half8 __ovld __cnfn rint(half8);
-half16 __ovld __cnfn rint(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power 1/y.
- */
-float __ovld __cnfn rootn(float x, int y);
-float2 __ovld __cnfn rootn(float2 x, int2 y);
-float3 __ovld __cnfn rootn(float3 x, int3 y);
-float4 __ovld __cnfn rootn(float4 x, int4 y);
-float8 __ovld __cnfn rootn(float8 x, int8 y);
-float16 __ovld __cnfn rootn(float16 x, int16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn rootn(double x, int y);
-double2 __ovld __cnfn rootn(double2 x, int2 y);
-double3 __ovld __cnfn rootn(double3 x, int3 y);
-double4 __ovld __cnfn rootn(double4 x, int4 y);
-double8 __ovld __cnfn rootn(double8 x, int8 y);
-double16 __ovld __cnfn rootn(double16 x, int16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn rootn(half x, int y);
-half2 __ovld __cnfn rootn(half2 x, int2 y);
-half3 __ovld __cnfn rootn(half3 x, int3 y);
-half4 __ovld __cnfn rootn(half4 x, int4 y);
-half8 __ovld __cnfn rootn(half8 x, int8 y);
-half16 __ovld __cnfn rootn(half16 x, int16 y);
-#endif //cl_khr_fp16
-
-/**
- * Return the integral value nearest to x rounding
- * halfway cases away from zero, regardless of the
- * current rounding direction.
- */
-float __ovld __cnfn round(float x);
-float2 __ovld __cnfn round(float2 x);
-float3 __ovld __cnfn round(float3 x);
-float4 __ovld __cnfn round(float4 x);
-float8 __ovld __cnfn round(float8 x);
-float16 __ovld __cnfn round(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn round(double x);
-double2 __ovld __cnfn round(double2 x);
-double3 __ovld __cnfn round(double3 x);
-double4 __ovld __cnfn round(double4 x);
-double8 __ovld __cnfn round(double8 x);
-double16 __ovld __cnfn round(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn round(half x);
-half2 __ovld __cnfn round(half2 x);
-half3 __ovld __cnfn round(half3 x);
-half4 __ovld __cnfn round(half4 x);
-half8 __ovld __cnfn round(half8 x);
-half16 __ovld __cnfn round(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute inverse square root.
- */
-float __ovld __cnfn rsqrt(float);
-float2 __ovld __cnfn rsqrt(float2);
-float3 __ovld __cnfn rsqrt(float3);
-float4 __ovld __cnfn rsqrt(float4);
-float8 __ovld __cnfn rsqrt(float8);
-float16 __ovld __cnfn rsqrt(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn rsqrt(double);
-double2 __ovld __cnfn rsqrt(double2);
-double3 __ovld __cnfn rsqrt(double3);
-double4 __ovld __cnfn rsqrt(double4);
-double8 __ovld __cnfn rsqrt(double8);
-double16 __ovld __cnfn rsqrt(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn rsqrt(half);
-half2 __ovld __cnfn rsqrt(half2);
-half3 __ovld __cnfn rsqrt(half3);
-half4 __ovld __cnfn rsqrt(half4);
-half8 __ovld __cnfn rsqrt(half8);
-half16 __ovld __cnfn rsqrt(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute sine.
- */
-float __ovld __cnfn sin(float);
-float2 __ovld __cnfn sin(float2);
-float3 __ovld __cnfn sin(float3);
-float4 __ovld __cnfn sin(float4);
-float8 __ovld __cnfn sin(float8);
-float16 __ovld __cnfn sin(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sin(double);
-double2 __ovld __cnfn sin(double2);
-double3 __ovld __cnfn sin(double3);
-double4 __ovld __cnfn sin(double4);
-double8 __ovld __cnfn sin(double8);
-double16 __ovld __cnfn sin(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sin(half);
-half2 __ovld __cnfn sin(half2);
-half3 __ovld __cnfn sin(half3);
-half4 __ovld __cnfn sin(half4);
-half8 __ovld __cnfn sin(half8);
-half16 __ovld __cnfn sin(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute sine and cosine of x. The computed sine
- * is the return value and computed cosine is returned
- * in cosval.
- */
-#if defined(__opencl_c_generic_address_space)
-float __ovld sincos(float x, float *cosval);
-float2 __ovld sincos(float2 x, float2 *cosval);
-float3 __ovld sincos(float3 x, float3 *cosval);
-float4 __ovld sincos(float4 x, float4 *cosval);
-float8 __ovld sincos(float8 x, float8 *cosval);
-float16 __ovld sincos(float16 x, float16 *cosval);
-#ifdef cl_khr_fp64
-double __ovld sincos(double x, double *cosval);
-double2 __ovld sincos(double2 x, double2 *cosval);
-double3 __ovld sincos(double3 x, double3 *cosval);
-double4 __ovld sincos(double4 x, double4 *cosval);
-double8 __ovld sincos(double8 x, double8 *cosval);
-double16 __ovld sincos(double16 x, double16 *cosval);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld sincos(half x, half *cosval);
-half2 __ovld sincos(half2 x, half2 *cosval);
-half3 __ovld sincos(half3 x, half3 *cosval);
-half4 __ovld sincos(half4 x, half4 *cosval);
-half8 __ovld sincos(half8 x, half8 *cosval);
-half16 __ovld sincos(half16 x, half16 *cosval);
-#endif //cl_khr_fp16
-#else
-float __ovld sincos(float x, __global float *cosval);
-float2 __ovld sincos(float2 x, __global float2 *cosval);
-float3 __ovld sincos(float3 x, __global float3 *cosval);
-float4 __ovld sincos(float4 x, __global float4 *cosval);
-float8 __ovld sincos(float8 x, __global float8 *cosval);
-float16 __ovld sincos(float16 x, __global float16 *cosval);
-float __ovld sincos(float x, __local float *cosval);
-float2 __ovld sincos(float2 x, __local float2 *cosval);
-float3 __ovld sincos(float3 x, __local float3 *cosval);
-float4 __ovld sincos(float4 x, __local float4 *cosval);
-float8 __ovld sincos(float8 x, __local float8 *cosval);
-float16 __ovld sincos(float16 x, __local float16 *cosval);
-float __ovld sincos(float x, __private float *cosval);
-float2 __ovld sincos(float2 x, __private float2 *cosval);
-float3 __ovld sincos(float3 x, __private float3 *cosval);
-float4 __ovld sincos(float4 x, __private float4 *cosval);
-float8 __ovld sincos(float8 x, __private float8 *cosval);
-float16 __ovld sincos(float16 x, __private float16 *cosval);
-#ifdef cl_khr_fp64
-double __ovld sincos(double x, __global double *cosval);
-double2 __ovld sincos(double2 x, __global double2 *cosval);
-double3 __ovld sincos(double3 x, __global double3 *cosval);
-double4 __ovld sincos(double4 x, __global double4 *cosval);
-double8 __ovld sincos(double8 x, __global double8 *cosval);
-double16 __ovld sincos(double16 x, __global double16 *cosval);
-double __ovld sincos(double x, __local double *cosval);
-double2 __ovld sincos(double2 x, __local double2 *cosval);
-double3 __ovld sincos(double3 x, __local double3 *cosval);
-double4 __ovld sincos(double4 x, __local double4 *cosval);
-double8 __ovld sincos(double8 x, __local double8 *cosval);
-double16 __ovld sincos(double16 x, __local double16 *cosval);
-double __ovld sincos(double x, __private double *cosval);
-double2 __ovld sincos(double2 x, __private double2 *cosval);
-double3 __ovld sincos(double3 x, __private double3 *cosval);
-double4 __ovld sincos(double4 x, __private double4 *cosval);
-double8 __ovld sincos(double8 x, __private double8 *cosval);
-double16 __ovld sincos(double16 x, __private double16 *cosval);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld sincos(half x, __global half *cosval);
-half2 __ovld sincos(half2 x, __global half2 *cosval);
-half3 __ovld sincos(half3 x, __global half3 *cosval);
-half4 __ovld sincos(half4 x, __global half4 *cosval);
-half8 __ovld sincos(half8 x, __global half8 *cosval);
-half16 __ovld sincos(half16 x, __global half16 *cosval);
-half __ovld sincos(half x, __local half *cosval);
-half2 __ovld sincos(half2 x, __local half2 *cosval);
-half3 __ovld sincos(half3 x, __local half3 *cosval);
-half4 __ovld sincos(half4 x, __local half4 *cosval);
-half8 __ovld sincos(half8 x, __local half8 *cosval);
-half16 __ovld sincos(half16 x, __local half16 *cosval);
-half __ovld sincos(half x, __private half *cosval);
-half2 __ovld sincos(half2 x, __private half2 *cosval);
-half3 __ovld sincos(half3 x, __private half3 *cosval);
-half4 __ovld sincos(half4 x, __private half4 *cosval);
-half8 __ovld sincos(half8 x, __private half8 *cosval);
-half16 __ovld sincos(half16 x, __private half16 *cosval);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Compute hyperbolic sine.
- */
-float __ovld __cnfn sinh(float);
-float2 __ovld __cnfn sinh(float2);
-float3 __ovld __cnfn sinh(float3);
-float4 __ovld __cnfn sinh(float4);
-float8 __ovld __cnfn sinh(float8);
-float16 __ovld __cnfn sinh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sinh(double);
-double2 __ovld __cnfn sinh(double2);
-double3 __ovld __cnfn sinh(double3);
-double4 __ovld __cnfn sinh(double4);
-double8 __ovld __cnfn sinh(double8);
-double16 __ovld __cnfn sinh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sinh(half);
-half2 __ovld __cnfn sinh(half2);
-half3 __ovld __cnfn sinh(half3);
-half4 __ovld __cnfn sinh(half4);
-half8 __ovld __cnfn sinh(half8);
-half16 __ovld __cnfn sinh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute sin (PI * x).
- */
-float __ovld __cnfn sinpi(float x);
-float2 __ovld __cnfn sinpi(float2 x);
-float3 __ovld __cnfn sinpi(float3 x);
-float4 __ovld __cnfn sinpi(float4 x);
-float8 __ovld __cnfn sinpi(float8 x);
-float16 __ovld __cnfn sinpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sinpi(double x);
-double2 __ovld __cnfn sinpi(double2 x);
-double3 __ovld __cnfn sinpi(double3 x);
-double4 __ovld __cnfn sinpi(double4 x);
-double8 __ovld __cnfn sinpi(double8 x);
-double16 __ovld __cnfn sinpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sinpi(half x);
-half2 __ovld __cnfn sinpi(half2 x);
-half3 __ovld __cnfn sinpi(half3 x);
-half4 __ovld __cnfn sinpi(half4 x);
-half8 __ovld __cnfn sinpi(half8 x);
-half16 __ovld __cnfn sinpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute square root.
- */
-float __ovld __cnfn sqrt(float);
-float2 __ovld __cnfn sqrt(float2);
-float3 __ovld __cnfn sqrt(float3);
-float4 __ovld __cnfn sqrt(float4);
-float8 __ovld __cnfn sqrt(float8);
-float16 __ovld __cnfn sqrt(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sqrt(double);
-double2 __ovld __cnfn sqrt(double2);
-double3 __ovld __cnfn sqrt(double3);
-double4 __ovld __cnfn sqrt(double4);
-double8 __ovld __cnfn sqrt(double8);
-double16 __ovld __cnfn sqrt(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sqrt(half);
-half2 __ovld __cnfn sqrt(half2);
-half3 __ovld __cnfn sqrt(half3);
-half4 __ovld __cnfn sqrt(half4);
-half8 __ovld __cnfn sqrt(half8);
-half16 __ovld __cnfn sqrt(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute tangent.
- */
-float __ovld __cnfn tan(float);
-float2 __ovld __cnfn tan(float2);
-float3 __ovld __cnfn tan(float3);
-float4 __ovld __cnfn tan(float4);
-float8 __ovld __cnfn tan(float8);
-float16 __ovld __cnfn tan(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tan(double);
-double2 __ovld __cnfn tan(double2);
-double3 __ovld __cnfn tan(double3);
-double4 __ovld __cnfn tan(double4);
-double8 __ovld __cnfn tan(double8);
-double16 __ovld __cnfn tan(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tan(half);
-half2 __ovld __cnfn tan(half2);
-half3 __ovld __cnfn tan(half3);
-half4 __ovld __cnfn tan(half4);
-half8 __ovld __cnfn tan(half8);
-half16 __ovld __cnfn tan(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute hyperbolic tangent.
- */
-float __ovld __cnfn tanh(float);
-float2 __ovld __cnfn tanh(float2);
-float3 __ovld __cnfn tanh(float3);
-float4 __ovld __cnfn tanh(float4);
-float8 __ovld __cnfn tanh(float8);
-float16 __ovld __cnfn tanh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tanh(double);
-double2 __ovld __cnfn tanh(double2);
-double3 __ovld __cnfn tanh(double3);
-double4 __ovld __cnfn tanh(double4);
-double8 __ovld __cnfn tanh(double8);
-double16 __ovld __cnfn tanh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tanh(half);
-half2 __ovld __cnfn tanh(half2);
-half3 __ovld __cnfn tanh(half3);
-half4 __ovld __cnfn tanh(half4);
-half8 __ovld __cnfn tanh(half8);
-half16 __ovld __cnfn tanh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute tan (PI * x).
- */
-float __ovld __cnfn tanpi(float x);
-float2 __ovld __cnfn tanpi(float2 x);
-float3 __ovld __cnfn tanpi(float3 x);
-float4 __ovld __cnfn tanpi(float4 x);
-float8 __ovld __cnfn tanpi(float8 x);
-float16 __ovld __cnfn tanpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tanpi(double x);
-double2 __ovld __cnfn tanpi(double2 x);
-double3 __ovld __cnfn tanpi(double3 x);
-double4 __ovld __cnfn tanpi(double4 x);
-double8 __ovld __cnfn tanpi(double8 x);
-double16 __ovld __cnfn tanpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tanpi(half x);
-half2 __ovld __cnfn tanpi(half2 x);
-half3 __ovld __cnfn tanpi(half3 x);
-half4 __ovld __cnfn tanpi(half4 x);
-half8 __ovld __cnfn tanpi(half8 x);
-half16 __ovld __cnfn tanpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute the gamma function.
- */
-float __ovld __cnfn tgamma(float);
-float2 __ovld __cnfn tgamma(float2);
-float3 __ovld __cnfn tgamma(float3);
-float4 __ovld __cnfn tgamma(float4);
-float8 __ovld __cnfn tgamma(float8);
-float16 __ovld __cnfn tgamma(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tgamma(double);
-double2 __ovld __cnfn tgamma(double2);
-double3 __ovld __cnfn tgamma(double3);
-double4 __ovld __cnfn tgamma(double4);
-double8 __ovld __cnfn tgamma(double8);
-double16 __ovld __cnfn tgamma(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tgamma(half);
-half2 __ovld __cnfn tgamma(half2);
-half3 __ovld __cnfn tgamma(half3);
-half4 __ovld __cnfn tgamma(half4);
-half8 __ovld __cnfn tgamma(half8);
-half16 __ovld __cnfn tgamma(half16);
-#endif //cl_khr_fp16
-
-/**
- * Round to integral value using the round to zero
- * rounding mode.
- */
-float __ovld __cnfn trunc(float);
-float2 __ovld __cnfn trunc(float2);
-float3 __ovld __cnfn trunc(float3);
-float4 __ovld __cnfn trunc(float4);
-float8 __ovld __cnfn trunc(float8);
-float16 __ovld __cnfn trunc(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn trunc(double);
-double2 __ovld __cnfn trunc(double2);
-double3 __ovld __cnfn trunc(double3);
-double4 __ovld __cnfn trunc(double4);
-double8 __ovld __cnfn trunc(double8);
-double16 __ovld __cnfn trunc(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn trunc(half);
-half2 __ovld __cnfn trunc(half2);
-half3 __ovld __cnfn trunc(half3);
-half4 __ovld __cnfn trunc(half4);
-half8 __ovld __cnfn trunc(half8);
-half16 __ovld __cnfn trunc(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute cosine. x must be in the range -2^16 ... +2^16.
- */
-float __ovld __cnfn half_cos(float x);
-float2 __ovld __cnfn half_cos(float2 x);
-float3 __ovld __cnfn half_cos(float3 x);
-float4 __ovld __cnfn half_cos(float4 x);
-float8 __ovld __cnfn half_cos(float8 x);
-float16 __ovld __cnfn half_cos(float16 x);
-
-/**
- * Compute x / y.
- */
-float __ovld __cnfn half_divide(float x, float y);
-float2 __ovld __cnfn half_divide(float2 x, float2 y);
-float3 __ovld __cnfn half_divide(float3 x, float3 y);
-float4 __ovld __cnfn half_divide(float4 x, float4 y);
-float8 __ovld __cnfn half_divide(float8 x, float8 y);
-float16 __ovld __cnfn half_divide(float16 x, float16 y);
-
-/**
- * Compute the base- e exponential of x.
- */
-float __ovld __cnfn half_exp(float x);
-float2 __ovld __cnfn half_exp(float2 x);
-float3 __ovld __cnfn half_exp(float3 x);
-float4 __ovld __cnfn half_exp(float4 x);
-float8 __ovld __cnfn half_exp(float8 x);
-float16 __ovld __cnfn half_exp(float16 x);
-
-/**
- * Compute the base- 2 exponential of x.
- */
-float __ovld __cnfn half_exp2(float x);
-float2 __ovld __cnfn half_exp2(float2 x);
-float3 __ovld __cnfn half_exp2(float3 x);
-float4 __ovld __cnfn half_exp2(float4 x);
-float8 __ovld __cnfn half_exp2(float8 x);
-float16 __ovld __cnfn half_exp2(float16 x);
-
-/**
- * Compute the base- 10 exponential of x.
- */
-float __ovld __cnfn half_exp10(float x);
-float2 __ovld __cnfn half_exp10(float2 x);
-float3 __ovld __cnfn half_exp10(float3 x);
-float4 __ovld __cnfn half_exp10(float4 x);
-float8 __ovld __cnfn half_exp10(float8 x);
-float16 __ovld __cnfn half_exp10(float16 x);
-
-/**
- * Compute natural logarithm.
- */
-float __ovld __cnfn half_log(float x);
-float2 __ovld __cnfn half_log(float2 x);
-float3 __ovld __cnfn half_log(float3 x);
-float4 __ovld __cnfn half_log(float4 x);
-float8 __ovld __cnfn half_log(float8 x);
-float16 __ovld __cnfn half_log(float16 x);
-
-/**
- * Compute a base 2 logarithm.
- */
-float __ovld __cnfn half_log2(float x);
-float2 __ovld __cnfn half_log2(float2 x);
-float3 __ovld __cnfn half_log2(float3 x);
-float4 __ovld __cnfn half_log2(float4 x);
-float8 __ovld __cnfn half_log2(float8 x);
-float16 __ovld __cnfn half_log2(float16 x);
-
-/**
- * Compute a base 10 logarithm.
- */
-float __ovld __cnfn half_log10(float x);
-float2 __ovld __cnfn half_log10(float2 x);
-float3 __ovld __cnfn half_log10(float3 x);
-float4 __ovld __cnfn half_log10(float4 x);
-float8 __ovld __cnfn half_log10(float8 x);
-float16 __ovld __cnfn half_log10(float16 x);
-
-/**
- * Compute x to the power y, where x is >= 0.
- */
-float __ovld __cnfn half_powr(float x, float y);
-float2 __ovld __cnfn half_powr(float2 x, float2 y);
-float3 __ovld __cnfn half_powr(float3 x, float3 y);
-float4 __ovld __cnfn half_powr(float4 x, float4 y);
-float8 __ovld __cnfn half_powr(float8 x, float8 y);
-float16 __ovld __cnfn half_powr(float16 x, float16 y);
-
-/**
- * Compute reciprocal.
- */
-float __ovld __cnfn half_recip(float x);
-float2 __ovld __cnfn half_recip(float2 x);
-float3 __ovld __cnfn half_recip(float3 x);
-float4 __ovld __cnfn half_recip(float4 x);
-float8 __ovld __cnfn half_recip(float8 x);
-float16 __ovld __cnfn half_recip(float16 x);
-
-/**
- * Compute inverse square root.
- */
-float __ovld __cnfn half_rsqrt(float x);
-float2 __ovld __cnfn half_rsqrt(float2 x);
-float3 __ovld __cnfn half_rsqrt(float3 x);
-float4 __ovld __cnfn half_rsqrt(float4 x);
-float8 __ovld __cnfn half_rsqrt(float8 x);
-float16 __ovld __cnfn half_rsqrt(float16 x);
-
-/**
- * Compute sine. x must be in the range -2^16 ... +2^16.
- */
-float __ovld __cnfn half_sin(float x);
-float2 __ovld __cnfn half_sin(float2 x);
-float3 __ovld __cnfn half_sin(float3 x);
-float4 __ovld __cnfn half_sin(float4 x);
-float8 __ovld __cnfn half_sin(float8 x);
-float16 __ovld __cnfn half_sin(float16 x);
-
-/**
- * Compute square root.
- */
-float __ovld __cnfn half_sqrt(float x);
-float2 __ovld __cnfn half_sqrt(float2 x);
-float3 __ovld __cnfn half_sqrt(float3 x);
-float4 __ovld __cnfn half_sqrt(float4 x);
-float8 __ovld __cnfn half_sqrt(float8 x);
-float16 __ovld __cnfn half_sqrt(float16 x);
-
-/**
- * Compute tangent. x must be in the range -216 ... +216.
- */
-float __ovld __cnfn half_tan(float x);
-float2 __ovld __cnfn half_tan(float2 x);
-float3 __ovld __cnfn half_tan(float3 x);
-float4 __ovld __cnfn half_tan(float4 x);
-float8 __ovld __cnfn half_tan(float8 x);
-float16 __ovld __cnfn half_tan(float16 x);
-
-/**
- * Compute cosine over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_cos(float x);
-float2 __ovld __cnfn native_cos(float2 x);
-float3 __ovld __cnfn native_cos(float3 x);
-float4 __ovld __cnfn native_cos(float4 x);
-float8 __ovld __cnfn native_cos(float8 x);
-float16 __ovld __cnfn native_cos(float16 x);
-
-/**
- * Compute x / y over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_divide(float x, float y);
-float2 __ovld __cnfn native_divide(float2 x, float2 y);
-float3 __ovld __cnfn native_divide(float3 x, float3 y);
-float4 __ovld __cnfn native_divide(float4 x, float4 y);
-float8 __ovld __cnfn native_divide(float8 x, float8 y);
-float16 __ovld __cnfn native_divide(float16 x, float16 y);
-
-/**
- * Compute the base- e exponential of x over an
- * implementation-defined range. The maximum error is
- * implementation-defined.
- */
-float __ovld __cnfn native_exp(float x);
-float2 __ovld __cnfn native_exp(float2 x);
-float3 __ovld __cnfn native_exp(float3 x);
-float4 __ovld __cnfn native_exp(float4 x);
-float8 __ovld __cnfn native_exp(float8 x);
-float16 __ovld __cnfn native_exp(float16 x);
-
-/**
- * Compute the base- 2 exponential of x over an
- * implementation-defined range. The maximum error is
- * implementation-defined.
- */
-float __ovld __cnfn native_exp2(float x);
-float2 __ovld __cnfn native_exp2(float2 x);
-float3 __ovld __cnfn native_exp2(float3 x);
-float4 __ovld __cnfn native_exp2(float4 x);
-float8 __ovld __cnfn native_exp2(float8 x);
-float16 __ovld __cnfn native_exp2(float16 x);
-
-/**
- * Compute the base- 10 exponential of x over an
- * implementation-defined range. The maximum error is
- * implementation-defined.
- */
-float __ovld __cnfn native_exp10(float x);
-float2 __ovld __cnfn native_exp10(float2 x);
-float3 __ovld __cnfn native_exp10(float3 x);
-float4 __ovld __cnfn native_exp10(float4 x);
-float8 __ovld __cnfn native_exp10(float8 x);
-float16 __ovld __cnfn native_exp10(float16 x);
-
-/**
- * Compute natural logarithm over an implementationdefined
- * range. The maximum error is implementation
- * defined.
- */
-float __ovld __cnfn native_log(float x);
-float2 __ovld __cnfn native_log(float2 x);
-float3 __ovld __cnfn native_log(float3 x);
-float4 __ovld __cnfn native_log(float4 x);
-float8 __ovld __cnfn native_log(float8 x);
-float16 __ovld __cnfn native_log(float16 x);
-
-/**
- * Compute a base 2 logarithm over an implementationdefined
- * range. The maximum error is implementationdefined.
- */
-float __ovld __cnfn native_log2(float x);
-float2 __ovld __cnfn native_log2(float2 x);
-float3 __ovld __cnfn native_log2(float3 x);
-float4 __ovld __cnfn native_log2(float4 x);
-float8 __ovld __cnfn native_log2(float8 x);
-float16 __ovld __cnfn native_log2(float16 x);
-
-/**
- * Compute a base 10 logarithm over an implementationdefined
- * range. The maximum error is implementationdefined.
- */
-float __ovld __cnfn native_log10(float x);
-float2 __ovld __cnfn native_log10(float2 x);
-float3 __ovld __cnfn native_log10(float3 x);
-float4 __ovld __cnfn native_log10(float4 x);
-float8 __ovld __cnfn native_log10(float8 x);
-float16 __ovld __cnfn native_log10(float16 x);
-
-/**
- * Compute x to the power y, where x is >= 0. The range of
- * x and y are implementation-defined. The maximum error
- * is implementation-defined.
- */
-float __ovld __cnfn native_powr(float x, float y);
-float2 __ovld __cnfn native_powr(float2 x, float2 y);
-float3 __ovld __cnfn native_powr(float3 x, float3 y);
-float4 __ovld __cnfn native_powr(float4 x, float4 y);
-float8 __ovld __cnfn native_powr(float8 x, float8 y);
-float16 __ovld __cnfn native_powr(float16 x, float16 y);
-
-/**
- * Compute reciprocal over an implementation-defined
- * range. The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_recip(float x);
-float2 __ovld __cnfn native_recip(float2 x);
-float3 __ovld __cnfn native_recip(float3 x);
-float4 __ovld __cnfn native_recip(float4 x);
-float8 __ovld __cnfn native_recip(float8 x);
-float16 __ovld __cnfn native_recip(float16 x);
-
-/**
- * Compute inverse square root over an implementationdefined
- * range. The maximum error is implementationdefined.
- */
-float __ovld __cnfn native_rsqrt(float x);
-float2 __ovld __cnfn native_rsqrt(float2 x);
-float3 __ovld __cnfn native_rsqrt(float3 x);
-float4 __ovld __cnfn native_rsqrt(float4 x);
-float8 __ovld __cnfn native_rsqrt(float8 x);
-float16 __ovld __cnfn native_rsqrt(float16 x);
-
-/**
- * Compute sine over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_sin(float x);
-float2 __ovld __cnfn native_sin(float2 x);
-float3 __ovld __cnfn native_sin(float3 x);
-float4 __ovld __cnfn native_sin(float4 x);
-float8 __ovld __cnfn native_sin(float8 x);
-float16 __ovld __cnfn native_sin(float16 x);
-
-/**
- * Compute square root over an implementation-defined
- * range. The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_sqrt(float x);
-float2 __ovld __cnfn native_sqrt(float2 x);
-float3 __ovld __cnfn native_sqrt(float3 x);
-float4 __ovld __cnfn native_sqrt(float4 x);
-float8 __ovld __cnfn native_sqrt(float8 x);
-float16 __ovld __cnfn native_sqrt(float16 x);
-
-/**
- * Compute tangent over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_tan(float x);
-float2 __ovld __cnfn native_tan(float2 x);
-float3 __ovld __cnfn native_tan(float3 x);
-float4 __ovld __cnfn native_tan(float4 x);
-float8 __ovld __cnfn native_tan(float8 x);
-float16 __ovld __cnfn native_tan(float16 x);
-
-// OpenCL v1.1 s6.11.3, v1.2 s6.12.3, v2.0 s6.13.3 - Integer Functions
-
-/**
- * Returns | x |.
- */
-uchar __ovld __cnfn abs(char x);
-uchar __ovld __cnfn abs(uchar x);
-uchar2 __ovld __cnfn abs(char2 x);
-uchar2 __ovld __cnfn abs(uchar2 x);
-uchar3 __ovld __cnfn abs(char3 x);
-uchar3 __ovld __cnfn abs(uchar3 x);
-uchar4 __ovld __cnfn abs(char4 x);
-uchar4 __ovld __cnfn abs(uchar4 x);
-uchar8 __ovld __cnfn abs(char8 x);
-uchar8 __ovld __cnfn abs(uchar8 x);
-uchar16 __ovld __cnfn abs(char16 x);
-uchar16 __ovld __cnfn abs(uchar16 x);
-ushort __ovld __cnfn abs(short x);
-ushort __ovld __cnfn abs(ushort x);
-ushort2 __ovld __cnfn abs(short2 x);
-ushort2 __ovld __cnfn abs(ushort2 x);
-ushort3 __ovld __cnfn abs(short3 x);
-ushort3 __ovld __cnfn abs(ushort3 x);
-ushort4 __ovld __cnfn abs(short4 x);
-ushort4 __ovld __cnfn abs(ushort4 x);
-ushort8 __ovld __cnfn abs(short8 x);
-ushort8 __ovld __cnfn abs(ushort8 x);
-ushort16 __ovld __cnfn abs(short16 x);
-ushort16 __ovld __cnfn abs(ushort16 x);
-uint __ovld __cnfn abs(int x);
-uint __ovld __cnfn abs(uint x);
-uint2 __ovld __cnfn abs(int2 x);
-uint2 __ovld __cnfn abs(uint2 x);
-uint3 __ovld __cnfn abs(int3 x);
-uint3 __ovld __cnfn abs(uint3 x);
-uint4 __ovld __cnfn abs(int4 x);
-uint4 __ovld __cnfn abs(uint4 x);
-uint8 __ovld __cnfn abs(int8 x);
-uint8 __ovld __cnfn abs(uint8 x);
-uint16 __ovld __cnfn abs(int16 x);
-uint16 __ovld __cnfn abs(uint16 x);
-ulong __ovld __cnfn abs(long x);
-ulong __ovld __cnfn abs(ulong x);
-ulong2 __ovld __cnfn abs(long2 x);
-ulong2 __ovld __cnfn abs(ulong2 x);
-ulong3 __ovld __cnfn abs(long3 x);
-ulong3 __ovld __cnfn abs(ulong3 x);
-ulong4 __ovld __cnfn abs(long4 x);
-ulong4 __ovld __cnfn abs(ulong4 x);
-ulong8 __ovld __cnfn abs(long8 x);
-ulong8 __ovld __cnfn abs(ulong8 x);
-ulong16 __ovld __cnfn abs(long16 x);
-ulong16 __ovld __cnfn abs(ulong16 x);
-
-/**
- * Returns | x - y | without modulo overflow.
- */
-uchar __ovld __cnfn abs_diff(char x, char y);
-uchar __ovld __cnfn abs_diff(uchar x, uchar y);
-uchar2 __ovld __cnfn abs_diff(char2 x, char2 y);
-uchar2 __ovld __cnfn abs_diff(uchar2 x, uchar2 y);
-uchar3 __ovld __cnfn abs_diff(char3 x, char3 y);
-uchar3 __ovld __cnfn abs_diff(uchar3 x, uchar3 y);
-uchar4 __ovld __cnfn abs_diff(char4 x, char4 y);
-uchar4 __ovld __cnfn abs_diff(uchar4 x, uchar4 y);
-uchar8 __ovld __cnfn abs_diff(char8 x, char8 y);
-uchar8 __ovld __cnfn abs_diff(uchar8 x, uchar8 y);
-uchar16 __ovld __cnfn abs_diff(char16 x, char16 y);
-uchar16 __ovld __cnfn abs_diff(uchar16 x, uchar16 y);
-ushort __ovld __cnfn abs_diff(short x, short y);
-ushort __ovld __cnfn abs_diff(ushort x, ushort y);
-ushort2 __ovld __cnfn abs_diff(short2 x, short2 y);
-ushort2 __ovld __cnfn abs_diff(ushort2 x, ushort2 y);
-ushort3 __ovld __cnfn abs_diff(short3 x, short3 y);
-ushort3 __ovld __cnfn abs_diff(ushort3 x, ushort3 y);
-ushort4 __ovld __cnfn abs_diff(short4 x, short4 y);
-ushort4 __ovld __cnfn abs_diff(ushort4 x, ushort4 y);
-ushort8 __ovld __cnfn abs_diff(short8 x, short8 y);
-ushort8 __ovld __cnfn abs_diff(ushort8 x, ushort8 y);
-ushort16 __ovld __cnfn abs_diff(short16 x, short16 y);
-ushort16 __ovld __cnfn abs_diff(ushort16 x, ushort16 y);
-uint __ovld __cnfn abs_diff(int x, int y);
-uint __ovld __cnfn abs_diff(uint x, uint y);
-uint2 __ovld __cnfn abs_diff(int2 x, int2 y);
-uint2 __ovld __cnfn abs_diff(uint2 x, uint2 y);
-uint3 __ovld __cnfn abs_diff(int3 x, int3 y);
-uint3 __ovld __cnfn abs_diff(uint3 x, uint3 y);
-uint4 __ovld __cnfn abs_diff(int4 x, int4 y);
-uint4 __ovld __cnfn abs_diff(uint4 x, uint4 y);
-uint8 __ovld __cnfn abs_diff(int8 x, int8 y);
-uint8 __ovld __cnfn abs_diff(uint8 x, uint8 y);
-uint16 __ovld __cnfn abs_diff(int16 x, int16 y);
-uint16 __ovld __cnfn abs_diff(uint16 x, uint16 y);
-ulong __ovld __cnfn abs_diff(long x, long y);
-ulong __ovld __cnfn abs_diff(ulong x, ulong y);
-ulong2 __ovld __cnfn abs_diff(long2 x, long2 y);
-ulong2 __ovld __cnfn abs_diff(ulong2 x, ulong2 y);
-ulong3 __ovld __cnfn abs_diff(long3 x, long3 y);
-ulong3 __ovld __cnfn abs_diff(ulong3 x, ulong3 y);
-ulong4 __ovld __cnfn abs_diff(long4 x, long4 y);
-ulong4 __ovld __cnfn abs_diff(ulong4 x, ulong4 y);
-ulong8 __ovld __cnfn abs_diff(long8 x, long8 y);
-ulong8 __ovld __cnfn abs_diff(ulong8 x, ulong8 y);
-ulong16 __ovld __cnfn abs_diff(long16 x, long16 y);
-ulong16 __ovld __cnfn abs_diff(ulong16 x, ulong16 y);
-
-/**
- * Returns x + y and saturates the result.
- */
-char __ovld __cnfn add_sat(char x, char y);
-uchar __ovld __cnfn add_sat(uchar x, uchar y);
-char2 __ovld __cnfn add_sat(char2 x, char2 y);
-uchar2 __ovld __cnfn add_sat(uchar2 x, uchar2 y);
-char3 __ovld __cnfn add_sat(char3 x, char3 y);
-uchar3 __ovld __cnfn add_sat(uchar3 x, uchar3 y);
-char4 __ovld __cnfn add_sat(char4 x, char4 y);
-uchar4 __ovld __cnfn add_sat(uchar4 x, uchar4 y);
-char8 __ovld __cnfn add_sat(char8 x, char8 y);
-uchar8 __ovld __cnfn add_sat(uchar8 x, uchar8 y);
-char16 __ovld __cnfn add_sat(char16 x, char16 y);
-uchar16 __ovld __cnfn add_sat(uchar16 x, uchar16 y);
-short __ovld __cnfn add_sat(short x, short y);
-ushort __ovld __cnfn add_sat(ushort x, ushort y);
-short2 __ovld __cnfn add_sat(short2 x, short2 y);
-ushort2 __ovld __cnfn add_sat(ushort2 x, ushort2 y);
-short3 __ovld __cnfn add_sat(short3 x, short3 y);
-ushort3 __ovld __cnfn add_sat(ushort3 x, ushort3 y);
-short4 __ovld __cnfn add_sat(short4 x, short4 y);
-ushort4 __ovld __cnfn add_sat(ushort4 x, ushort4 y);
-short8 __ovld __cnfn add_sat(short8 x, short8 y);
-ushort8 __ovld __cnfn add_sat(ushort8 x, ushort8 y);
-short16 __ovld __cnfn add_sat(short16 x, short16 y);
-ushort16 __ovld __cnfn add_sat(ushort16 x, ushort16 y);
-int __ovld __cnfn add_sat(int x, int y);
-uint __ovld __cnfn add_sat(uint x, uint y);
-int2 __ovld __cnfn add_sat(int2 x, int2 y);
-uint2 __ovld __cnfn add_sat(uint2 x, uint2 y);
-int3 __ovld __cnfn add_sat(int3 x, int3 y);
-uint3 __ovld __cnfn add_sat(uint3 x, uint3 y);
-int4 __ovld __cnfn add_sat(int4 x, int4 y);
-uint4 __ovld __cnfn add_sat(uint4 x, uint4 y);
-int8 __ovld __cnfn add_sat(int8 x, int8 y);
-uint8 __ovld __cnfn add_sat(uint8 x, uint8 y);
-int16 __ovld __cnfn add_sat(int16 x, int16 y);
-uint16 __ovld __cnfn add_sat(uint16 x, uint16 y);
-long __ovld __cnfn add_sat(long x, long y);
-ulong __ovld __cnfn add_sat(ulong x, ulong y);
-long2 __ovld __cnfn add_sat(long2 x, long2 y);
-ulong2 __ovld __cnfn add_sat(ulong2 x, ulong2 y);
-long3 __ovld __cnfn add_sat(long3 x, long3 y);
-ulong3 __ovld __cnfn add_sat(ulong3 x, ulong3 y);
-long4 __ovld __cnfn add_sat(long4 x, long4 y);
-ulong4 __ovld __cnfn add_sat(ulong4 x, ulong4 y);
-long8 __ovld __cnfn add_sat(long8 x, long8 y);
-ulong8 __ovld __cnfn add_sat(ulong8 x, ulong8 y);
-long16 __ovld __cnfn add_sat(long16 x, long16 y);
-ulong16 __ovld __cnfn add_sat(ulong16 x, ulong16 y);
-
-/**
- * Returns (x + y) >> 1. The intermediate sum does
- * not modulo overflow.
- */
-char __ovld __cnfn hadd(char x, char y);
-uchar __ovld __cnfn hadd(uchar x, uchar y);
-char2 __ovld __cnfn hadd(char2 x, char2 y);
-uchar2 __ovld __cnfn hadd(uchar2 x, uchar2 y);
-char3 __ovld __cnfn hadd(char3 x, char3 y);
-uchar3 __ovld __cnfn hadd(uchar3 x, uchar3 y);
-char4 __ovld __cnfn hadd(char4 x, char4 y);
-uchar4 __ovld __cnfn hadd(uchar4 x, uchar4 y);
-char8 __ovld __cnfn hadd(char8 x, char8 y);
-uchar8 __ovld __cnfn hadd(uchar8 x, uchar8 y);
-char16 __ovld __cnfn hadd(char16 x, char16 y);
-uchar16 __ovld __cnfn hadd(uchar16 x, uchar16 y);
-short __ovld __cnfn hadd(short x, short y);
-ushort __ovld __cnfn hadd(ushort x, ushort y);
-short2 __ovld __cnfn hadd(short2 x, short2 y);
-ushort2 __ovld __cnfn hadd(ushort2 x, ushort2 y);
-short3 __ovld __cnfn hadd(short3 x, short3 y);
-ushort3 __ovld __cnfn hadd(ushort3 x, ushort3 y);
-short4 __ovld __cnfn hadd(short4 x, short4 y);
-ushort4 __ovld __cnfn hadd(ushort4 x, ushort4 y);
-short8 __ovld __cnfn hadd(short8 x, short8 y);
-ushort8 __ovld __cnfn hadd(ushort8 x, ushort8 y);
-short16 __ovld __cnfn hadd(short16 x, short16 y);
-ushort16 __ovld __cnfn hadd(ushort16 x, ushort16 y);
-int __ovld __cnfn hadd(int x, int y);
-uint __ovld __cnfn hadd(uint x, uint y);
-int2 __ovld __cnfn hadd(int2 x, int2 y);
-uint2 __ovld __cnfn hadd(uint2 x, uint2 y);
-int3 __ovld __cnfn hadd(int3 x, int3 y);
-uint3 __ovld __cnfn hadd(uint3 x, uint3 y);
-int4 __ovld __cnfn hadd(int4 x, int4 y);
-uint4 __ovld __cnfn hadd(uint4 x, uint4 y);
-int8 __ovld __cnfn hadd(int8 x, int8 y);
-uint8 __ovld __cnfn hadd(uint8 x, uint8 y);
-int16 __ovld __cnfn hadd(int16 x, int16 y);
-uint16 __ovld __cnfn hadd(uint16 x, uint16 y);
-long __ovld __cnfn hadd(long x, long y);
-ulong __ovld __cnfn hadd(ulong x, ulong y);
-long2 __ovld __cnfn hadd(long2 x, long2 y);
-ulong2 __ovld __cnfn hadd(ulong2 x, ulong2 y);
-long3 __ovld __cnfn hadd(long3 x, long3 y);
-ulong3 __ovld __cnfn hadd(ulong3 x, ulong3 y);
-long4 __ovld __cnfn hadd(long4 x, long4 y);
-ulong4 __ovld __cnfn hadd(ulong4 x, ulong4 y);
-long8 __ovld __cnfn hadd(long8 x, long8 y);
-ulong8 __ovld __cnfn hadd(ulong8 x, ulong8 y);
-long16 __ovld __cnfn hadd(long16 x, long16 y);
-ulong16 __ovld __cnfn hadd(ulong16 x, ulong16 y);
-
-/**
- * Returns (x + y + 1) >> 1. The intermediate sum
- * does not modulo overflow.
- */
-char __ovld __cnfn rhadd(char x, char y);
-uchar __ovld __cnfn rhadd(uchar x, uchar y);
-char2 __ovld __cnfn rhadd(char2 x, char2 y);
-uchar2 __ovld __cnfn rhadd(uchar2 x, uchar2 y);
-char3 __ovld __cnfn rhadd(char3 x, char3 y);
-uchar3 __ovld __cnfn rhadd(uchar3 x, uchar3 y);
-char4 __ovld __cnfn rhadd(char4 x, char4 y);
-uchar4 __ovld __cnfn rhadd(uchar4 x, uchar4 y);
-char8 __ovld __cnfn rhadd(char8 x, char8 y);
-uchar8 __ovld __cnfn rhadd(uchar8 x, uchar8 y);
-char16 __ovld __cnfn rhadd(char16 x, char16 y);
-uchar16 __ovld __cnfn rhadd(uchar16 x, uchar16 y);
-short __ovld __cnfn rhadd(short x, short y);
-ushort __ovld __cnfn rhadd(ushort x, ushort y);
-short2 __ovld __cnfn rhadd(short2 x, short2 y);
-ushort2 __ovld __cnfn rhadd(ushort2 x, ushort2 y);
-short3 __ovld __cnfn rhadd(short3 x, short3 y);
-ushort3 __ovld __cnfn rhadd(ushort3 x, ushort3 y);
-short4 __ovld __cnfn rhadd(short4 x, short4 y);
-ushort4 __ovld __cnfn rhadd(ushort4 x, ushort4 y);
-short8 __ovld __cnfn rhadd(short8 x, short8 y);
-ushort8 __ovld __cnfn rhadd(ushort8 x, ushort8 y);
-short16 __ovld __cnfn rhadd(short16 x, short16 y);
-ushort16 __ovld __cnfn rhadd(ushort16 x, ushort16 y);
-int __ovld __cnfn rhadd(int x, int y);
-uint __ovld __cnfn rhadd(uint x, uint y);
-int2 __ovld __cnfn rhadd(int2 x, int2 y);
-uint2 __ovld __cnfn rhadd(uint2 x, uint2 y);
-int3 __ovld __cnfn rhadd(int3 x, int3 y);
-uint3 __ovld __cnfn rhadd(uint3 x, uint3 y);
-int4 __ovld __cnfn rhadd(int4 x, int4 y);
-uint4 __ovld __cnfn rhadd(uint4 x, uint4 y);
-int8 __ovld __cnfn rhadd(int8 x, int8 y);
-uint8 __ovld __cnfn rhadd(uint8 x, uint8 y);
-int16 __ovld __cnfn rhadd(int16 x, int16 y);
-uint16 __ovld __cnfn rhadd(uint16 x, uint16 y);
-long __ovld __cnfn rhadd(long x, long y);
-ulong __ovld __cnfn rhadd(ulong x, ulong y);
-long2 __ovld __cnfn rhadd(long2 x, long2 y);
-ulong2 __ovld __cnfn rhadd(ulong2 x, ulong2 y);
-long3 __ovld __cnfn rhadd(long3 x, long3 y);
-ulong3 __ovld __cnfn rhadd(ulong3 x, ulong3 y);
-long4 __ovld __cnfn rhadd(long4 x, long4 y);
-ulong4 __ovld __cnfn rhadd(ulong4 x, ulong4 y);
-long8 __ovld __cnfn rhadd(long8 x, long8 y);
-ulong8 __ovld __cnfn rhadd(ulong8 x, ulong8 y);
-long16 __ovld __cnfn rhadd(long16 x, long16 y);
-ulong16 __ovld __cnfn rhadd(ulong16 x, ulong16 y);
-
-/**
- * Returns min(max(x, minval), maxval).
- * Results are undefined if minval > maxval.
- */
-char __ovld __cnfn clamp(char x, char minval, char maxval);
-uchar __ovld __cnfn clamp(uchar x, uchar minval, uchar maxval);
-char2 __ovld __cnfn clamp(char2 x, char2 minval, char2 maxval);
-uchar2 __ovld __cnfn clamp(uchar2 x, uchar2 minval, uchar2 maxval);
-char3 __ovld __cnfn clamp(char3 x, char3 minval, char3 maxval);
-uchar3 __ovld __cnfn clamp(uchar3 x, uchar3 minval, uchar3 maxval);
-char4 __ovld __cnfn clamp(char4 x, char4 minval, char4 maxval);
-uchar4 __ovld __cnfn clamp(uchar4 x, uchar4 minval, uchar4 maxval);
-char8 __ovld __cnfn clamp(char8 x, char8 minval, char8 maxval);
-uchar8 __ovld __cnfn clamp(uchar8 x, uchar8 minval, uchar8 maxval);
-char16 __ovld __cnfn clamp(char16 x, char16 minval, char16 maxval);
-uchar16 __ovld __cnfn clamp(uchar16 x, uchar16 minval, uchar16 maxval);
-short __ovld __cnfn clamp(short x, short minval, short maxval);
-ushort __ovld __cnfn clamp(ushort x, ushort minval, ushort maxval);
-short2 __ovld __cnfn clamp(short2 x, short2 minval, short2 maxval);
-ushort2 __ovld __cnfn clamp(ushort2 x, ushort2 minval, ushort2 maxval);
-short3 __ovld __cnfn clamp(short3 x, short3 minval, short3 maxval);
-ushort3 __ovld __cnfn clamp(ushort3 x, ushort3 minval, ushort3 maxval);
-short4 __ovld __cnfn clamp(short4 x, short4 minval, short4 maxval);
-ushort4 __ovld __cnfn clamp(ushort4 x, ushort4 minval, ushort4 maxval);
-short8 __ovld __cnfn clamp(short8 x, short8 minval, short8 maxval);
-ushort8 __ovld __cnfn clamp(ushort8 x, ushort8 minval, ushort8 maxval);
-short16 __ovld __cnfn clamp(short16 x, short16 minval, short16 maxval);
-ushort16 __ovld __cnfn clamp(ushort16 x, ushort16 minval, ushort16 maxval);
-int __ovld __cnfn clamp(int x, int minval, int maxval);
-uint __ovld __cnfn clamp(uint x, uint minval, uint maxval);
-int2 __ovld __cnfn clamp(int2 x, int2 minval, int2 maxval);
-uint2 __ovld __cnfn clamp(uint2 x, uint2 minval, uint2 maxval);
-int3 __ovld __cnfn clamp(int3 x, int3 minval, int3 maxval);
-uint3 __ovld __cnfn clamp(uint3 x, uint3 minval, uint3 maxval);
-int4 __ovld __cnfn clamp(int4 x, int4 minval, int4 maxval);
-uint4 __ovld __cnfn clamp(uint4 x, uint4 minval, uint4 maxval);
-int8 __ovld __cnfn clamp(int8 x, int8 minval, int8 maxval);
-uint8 __ovld __cnfn clamp(uint8 x, uint8 minval, uint8 maxval);
-int16 __ovld __cnfn clamp(int16 x, int16 minval, int16 maxval);
-uint16 __ovld __cnfn clamp(uint16 x, uint16 minval, uint16 maxval);
-long __ovld __cnfn clamp(long x, long minval, long maxval);
-ulong __ovld __cnfn clamp(ulong x, ulong minval, ulong maxval);
-long2 __ovld __cnfn clamp(long2 x, long2 minval, long2 maxval);
-ulong2 __ovld __cnfn clamp(ulong2 x, ulong2 minval, ulong2 maxval);
-long3 __ovld __cnfn clamp(long3 x, long3 minval, long3 maxval);
-ulong3 __ovld __cnfn clamp(ulong3 x, ulong3 minval, ulong3 maxval);
-long4 __ovld __cnfn clamp(long4 x, long4 minval, long4 maxval);
-ulong4 __ovld __cnfn clamp(ulong4 x, ulong4 minval, ulong4 maxval);
-long8 __ovld __cnfn clamp(long8 x, long8 minval, long8 maxval);
-ulong8 __ovld __cnfn clamp(ulong8 x, ulong8 minval, ulong8 maxval);
-long16 __ovld __cnfn clamp(long16 x, long16 minval, long16 maxval);
-ulong16 __ovld __cnfn clamp(ulong16 x, ulong16 minval, ulong16 maxval);
-char2 __ovld __cnfn clamp(char2 x, char minval, char maxval);
-uchar2 __ovld __cnfn clamp(uchar2 x, uchar minval, uchar maxval);
-char3 __ovld __cnfn clamp(char3 x, char minval, char maxval);
-uchar3 __ovld __cnfn clamp(uchar3 x, uchar minval, uchar maxval);
-char4 __ovld __cnfn clamp(char4 x, char minval, char maxval);
-uchar4 __ovld __cnfn clamp(uchar4 x, uchar minval, uchar maxval);
-char8 __ovld __cnfn clamp(char8 x, char minval, char maxval);
-uchar8 __ovld __cnfn clamp(uchar8 x, uchar minval, uchar maxval);
-char16 __ovld __cnfn clamp(char16 x, char minval, char maxval);
-uchar16 __ovld __cnfn clamp(uchar16 x, uchar minval, uchar maxval);
-short2 __ovld __cnfn clamp(short2 x, short minval, short maxval);
-ushort2 __ovld __cnfn clamp(ushort2 x, ushort minval, ushort maxval);
-short3 __ovld __cnfn clamp(short3 x, short minval, short maxval);
-ushort3 __ovld __cnfn clamp(ushort3 x, ushort minval, ushort maxval);
-short4 __ovld __cnfn clamp(short4 x, short minval, short maxval);
-ushort4 __ovld __cnfn clamp(ushort4 x, ushort minval, ushort maxval);
-short8 __ovld __cnfn clamp(short8 x, short minval, short maxval);
-ushort8 __ovld __cnfn clamp(ushort8 x, ushort minval, ushort maxval);
-short16 __ovld __cnfn clamp(short16 x, short minval, short maxval);
-ushort16 __ovld __cnfn clamp(ushort16 x, ushort minval, ushort maxval);
-int2 __ovld __cnfn clamp(int2 x, int minval, int maxval);
-uint2 __ovld __cnfn clamp(uint2 x, uint minval, uint maxval);
-int3 __ovld __cnfn clamp(int3 x, int minval, int maxval);
-uint3 __ovld __cnfn clamp(uint3 x, uint minval, uint maxval);
-int4 __ovld __cnfn clamp(int4 x, int minval, int maxval);
-uint4 __ovld __cnfn clamp(uint4 x, uint minval, uint maxval);
-int8 __ovld __cnfn clamp(int8 x, int minval, int maxval);
-uint8 __ovld __cnfn clamp(uint8 x, uint minval, uint maxval);
-int16 __ovld __cnfn clamp(int16 x, int minval, int maxval);
-uint16 __ovld __cnfn clamp(uint16 x, uint minval, uint maxval);
-long2 __ovld __cnfn clamp(long2 x, long minval, long maxval);
-ulong2 __ovld __cnfn clamp(ulong2 x, ulong minval, ulong maxval);
-long3 __ovld __cnfn clamp(long3 x, long minval, long maxval);
-ulong3 __ovld __cnfn clamp(ulong3 x, ulong minval, ulong maxval);
-long4 __ovld __cnfn clamp(long4 x, long minval, long maxval);
-ulong4 __ovld __cnfn clamp(ulong4 x, ulong minval, ulong maxval);
-long8 __ovld __cnfn clamp(long8 x, long minval, long maxval);
-ulong8 __ovld __cnfn clamp(ulong8 x, ulong minval, ulong maxval);
-long16 __ovld __cnfn clamp(long16 x, long minval, long maxval);
-ulong16 __ovld __cnfn clamp(ulong16 x, ulong minval, ulong maxval);
-
-/**
- * Returns the number of leading 0-bits in x, starting
- * at the most significant bit position.
- */
-char __ovld __cnfn clz(char x);
-uchar __ovld __cnfn clz(uchar x);
-char2 __ovld __cnfn clz(char2 x);
-uchar2 __ovld __cnfn clz(uchar2 x);
-char3 __ovld __cnfn clz(char3 x);
-uchar3 __ovld __cnfn clz(uchar3 x);
-char4 __ovld __cnfn clz(char4 x);
-uchar4 __ovld __cnfn clz(uchar4 x);
-char8 __ovld __cnfn clz(char8 x);
-uchar8 __ovld __cnfn clz(uchar8 x);
-char16 __ovld __cnfn clz(char16 x);
-uchar16 __ovld __cnfn clz(uchar16 x);
-short __ovld __cnfn clz(short x);
-ushort __ovld __cnfn clz(ushort x);
-short2 __ovld __cnfn clz(short2 x);
-ushort2 __ovld __cnfn clz(ushort2 x);
-short3 __ovld __cnfn clz(short3 x);
-ushort3 __ovld __cnfn clz(ushort3 x);
-short4 __ovld __cnfn clz(short4 x);
-ushort4 __ovld __cnfn clz(ushort4 x);
-short8 __ovld __cnfn clz(short8 x);
-ushort8 __ovld __cnfn clz(ushort8 x);
-short16 __ovld __cnfn clz(short16 x);
-ushort16 __ovld __cnfn clz(ushort16 x);
-int __ovld __cnfn clz(int x);
-uint __ovld __cnfn clz(uint x);
-int2 __ovld __cnfn clz(int2 x);
-uint2 __ovld __cnfn clz(uint2 x);
-int3 __ovld __cnfn clz(int3 x);
-uint3 __ovld __cnfn clz(uint3 x);
-int4 __ovld __cnfn clz(int4 x);
-uint4 __ovld __cnfn clz(uint4 x);
-int8 __ovld __cnfn clz(int8 x);
-uint8 __ovld __cnfn clz(uint8 x);
-int16 __ovld __cnfn clz(int16 x);
-uint16 __ovld __cnfn clz(uint16 x);
-long __ovld __cnfn clz(long x);
-ulong __ovld __cnfn clz(ulong x);
-long2 __ovld __cnfn clz(long2 x);
-ulong2 __ovld __cnfn clz(ulong2 x);
-long3 __ovld __cnfn clz(long3 x);
-ulong3 __ovld __cnfn clz(ulong3 x);
-long4 __ovld __cnfn clz(long4 x);
-ulong4 __ovld __cnfn clz(ulong4 x);
-long8 __ovld __cnfn clz(long8 x);
-ulong8 __ovld __cnfn clz(ulong8 x);
-long16 __ovld __cnfn clz(long16 x);
-ulong16 __ovld __cnfn clz(ulong16 x);
-
-/**
- * Returns the count of trailing 0-bits in x. If x is 0,
- * returns the size in bits of the type of x or
- * component type of x, if x is a vector.
- */
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-char __ovld __cnfn ctz(char x);
-uchar __ovld __cnfn ctz(uchar x);
-char2 __ovld __cnfn ctz(char2 x);
-uchar2 __ovld __cnfn ctz(uchar2 x);
-char3 __ovld __cnfn ctz(char3 x);
-uchar3 __ovld __cnfn ctz(uchar3 x);
-char4 __ovld __cnfn ctz(char4 x);
-uchar4 __ovld __cnfn ctz(uchar4 x);
-char8 __ovld __cnfn ctz(char8 x);
-uchar8 __ovld __cnfn ctz(uchar8 x);
-char16 __ovld __cnfn ctz(char16 x);
-uchar16 __ovld __cnfn ctz(uchar16 x);
-short __ovld __cnfn ctz(short x);
-ushort __ovld __cnfn ctz(ushort x);
-short2 __ovld __cnfn ctz(short2 x);
-ushort2 __ovld __cnfn ctz(ushort2 x);
-short3 __ovld __cnfn ctz(short3 x);
-ushort3 __ovld __cnfn ctz(ushort3 x);
-short4 __ovld __cnfn ctz(short4 x);
-ushort4 __ovld __cnfn ctz(ushort4 x);
-short8 __ovld __cnfn ctz(short8 x);
-ushort8 __ovld __cnfn ctz(ushort8 x);
-short16 __ovld __cnfn ctz(short16 x);
-ushort16 __ovld __cnfn ctz(ushort16 x);
-int __ovld __cnfn ctz(int x);
-uint __ovld __cnfn ctz(uint x);
-int2 __ovld __cnfn ctz(int2 x);
-uint2 __ovld __cnfn ctz(uint2 x);
-int3 __ovld __cnfn ctz(int3 x);
-uint3 __ovld __cnfn ctz(uint3 x);
-int4 __ovld __cnfn ctz(int4 x);
-uint4 __ovld __cnfn ctz(uint4 x);
-int8 __ovld __cnfn ctz(int8 x);
-uint8 __ovld __cnfn ctz(uint8 x);
-int16 __ovld __cnfn ctz(int16 x);
-uint16 __ovld __cnfn ctz(uint16 x);
-long __ovld __cnfn ctz(long x);
-ulong __ovld __cnfn ctz(ulong x);
-long2 __ovld __cnfn ctz(long2 x);
-ulong2 __ovld __cnfn ctz(ulong2 x);
-long3 __ovld __cnfn ctz(long3 x);
-ulong3 __ovld __cnfn ctz(ulong3 x);
-long4 __ovld __cnfn ctz(long4 x);
-ulong4 __ovld __cnfn ctz(ulong4 x);
-long8 __ovld __cnfn ctz(long8 x);
-ulong8 __ovld __cnfn ctz(ulong8 x);
-long16 __ovld __cnfn ctz(long16 x);
-ulong16 __ovld __cnfn ctz(ulong16 x);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Returns mul_hi(a, b) + c.
- */
-char __ovld __cnfn mad_hi(char a, char b, char c);
-uchar __ovld __cnfn mad_hi(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn mad_hi(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn mad_hi(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn mad_hi(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn mad_hi(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn mad_hi(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn mad_hi(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn mad_hi(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn mad_hi(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn mad_hi(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn mad_hi(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn mad_hi(short a, short b, short c);
-ushort __ovld __cnfn mad_hi(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn mad_hi(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn mad_hi(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn mad_hi(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn mad_hi(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn mad_hi(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn mad_hi(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn mad_hi(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn mad_hi(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn mad_hi(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn mad_hi(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn mad_hi(int a, int b, int c);
-uint __ovld __cnfn mad_hi(uint a, uint b, uint c);
-int2 __ovld __cnfn mad_hi(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn mad_hi(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn mad_hi(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn mad_hi(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn mad_hi(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn mad_hi(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn mad_hi(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn mad_hi(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn mad_hi(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn mad_hi(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn mad_hi(long a, long b, long c);
-ulong __ovld __cnfn mad_hi(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn mad_hi(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn mad_hi(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn mad_hi(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn mad_hi(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn mad_hi(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn mad_hi(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn mad_hi(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn mad_hi(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn mad_hi(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn mad_hi(ulong16 a, ulong16 b, ulong16 c);
-
-/**
- * Returns a * b + c and saturates the result.
- */
-char __ovld __cnfn mad_sat(char a, char b, char c);
-uchar __ovld __cnfn mad_sat(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn mad_sat(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn mad_sat(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn mad_sat(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn mad_sat(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn mad_sat(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn mad_sat(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn mad_sat(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn mad_sat(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn mad_sat(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn mad_sat(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn mad_sat(short a, short b, short c);
-ushort __ovld __cnfn mad_sat(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn mad_sat(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn mad_sat(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn mad_sat(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn mad_sat(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn mad_sat(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn mad_sat(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn mad_sat(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn mad_sat(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn mad_sat(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn mad_sat(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn mad_sat(int a, int b, int c);
-uint __ovld __cnfn mad_sat(uint a, uint b, uint c);
-int2 __ovld __cnfn mad_sat(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn mad_sat(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn mad_sat(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn mad_sat(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn mad_sat(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn mad_sat(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn mad_sat(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn mad_sat(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn mad_sat(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn mad_sat(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn mad_sat(long a, long b, long c);
-ulong __ovld __cnfn mad_sat(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn mad_sat(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn mad_sat(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn mad_sat(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn mad_sat(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn mad_sat(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn mad_sat(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn mad_sat(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn mad_sat(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn mad_sat(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn mad_sat(ulong16 a, ulong16 b, ulong16 c);
-
-/**
- * Returns y if x < y, otherwise it returns x.
- */
-char __ovld __cnfn max(char x, char y);
-uchar __ovld __cnfn max(uchar x, uchar y);
-char2 __ovld __cnfn max(char2 x, char2 y);
-uchar2 __ovld __cnfn max(uchar2 x, uchar2 y);
-char3 __ovld __cnfn max(char3 x, char3 y);
-uchar3 __ovld __cnfn max(uchar3 x, uchar3 y);
-char4 __ovld __cnfn max(char4 x, char4 y);
-uchar4 __ovld __cnfn max(uchar4 x, uchar4 y);
-char8 __ovld __cnfn max(char8 x, char8 y);
-uchar8 __ovld __cnfn max(uchar8 x, uchar8 y);
-char16 __ovld __cnfn max(char16 x, char16 y);
-uchar16 __ovld __cnfn max(uchar16 x, uchar16 y);
-short __ovld __cnfn max(short x, short y);
-ushort __ovld __cnfn max(ushort x, ushort y);
-short2 __ovld __cnfn max(short2 x, short2 y);
-ushort2 __ovld __cnfn max(ushort2 x, ushort2 y);
-short3 __ovld __cnfn max(short3 x, short3 y);
-ushort3 __ovld __cnfn max(ushort3 x, ushort3 y);
-short4 __ovld __cnfn max(short4 x, short4 y);
-ushort4 __ovld __cnfn max(ushort4 x, ushort4 y);
-short8 __ovld __cnfn max(short8 x, short8 y);
-ushort8 __ovld __cnfn max(ushort8 x, ushort8 y);
-short16 __ovld __cnfn max(short16 x, short16 y);
-ushort16 __ovld __cnfn max(ushort16 x, ushort16 y);
-int __ovld __cnfn max(int x, int y);
-uint __ovld __cnfn max(uint x, uint y);
-int2 __ovld __cnfn max(int2 x, int2 y);
-uint2 __ovld __cnfn max(uint2 x, uint2 y);
-int3 __ovld __cnfn max(int3 x, int3 y);
-uint3 __ovld __cnfn max(uint3 x, uint3 y);
-int4 __ovld __cnfn max(int4 x, int4 y);
-uint4 __ovld __cnfn max(uint4 x, uint4 y);
-int8 __ovld __cnfn max(int8 x, int8 y);
-uint8 __ovld __cnfn max(uint8 x, uint8 y);
-int16 __ovld __cnfn max(int16 x, int16 y);
-uint16 __ovld __cnfn max(uint16 x, uint16 y);
-long __ovld __cnfn max(long x, long y);
-ulong __ovld __cnfn max(ulong x, ulong y);
-long2 __ovld __cnfn max(long2 x, long2 y);
-ulong2 __ovld __cnfn max(ulong2 x, ulong2 y);
-long3 __ovld __cnfn max(long3 x, long3 y);
-ulong3 __ovld __cnfn max(ulong3 x, ulong3 y);
-long4 __ovld __cnfn max(long4 x, long4 y);
-ulong4 __ovld __cnfn max(ulong4 x, ulong4 y);
-long8 __ovld __cnfn max(long8 x, long8 y);
-ulong8 __ovld __cnfn max(ulong8 x, ulong8 y);
-long16 __ovld __cnfn max(long16 x, long16 y);
-ulong16 __ovld __cnfn max(ulong16 x, ulong16 y);
-char2 __ovld __cnfn max(char2 x, char y);
-uchar2 __ovld __cnfn max(uchar2 x, uchar y);
-char3 __ovld __cnfn max(char3 x, char y);
-uchar3 __ovld __cnfn max(uchar3 x, uchar y);
-char4 __ovld __cnfn max(char4 x, char y);
-uchar4 __ovld __cnfn max(uchar4 x, uchar y);
-char8 __ovld __cnfn max(char8 x, char y);
-uchar8 __ovld __cnfn max(uchar8 x, uchar y);
-char16 __ovld __cnfn max(char16 x, char y);
-uchar16 __ovld __cnfn max(uchar16 x, uchar y);
-short2 __ovld __cnfn max(short2 x, short y);
-ushort2 __ovld __cnfn max(ushort2 x, ushort y);
-short3 __ovld __cnfn max(short3 x, short y);
-ushort3 __ovld __cnfn max(ushort3 x, ushort y);
-short4 __ovld __cnfn max(short4 x, short y);
-ushort4 __ovld __cnfn max(ushort4 x, ushort y);
-short8 __ovld __cnfn max(short8 x, short y);
-ushort8 __ovld __cnfn max(ushort8 x, ushort y);
-short16 __ovld __cnfn max(short16 x, short y);
-ushort16 __ovld __cnfn max(ushort16 x, ushort y);
-int2 __ovld __cnfn max(int2 x, int y);
-uint2 __ovld __cnfn max(uint2 x, uint y);
-int3 __ovld __cnfn max(int3 x, int y);
-uint3 __ovld __cnfn max(uint3 x, uint y);
-int4 __ovld __cnfn max(int4 x, int y);
-uint4 __ovld __cnfn max(uint4 x, uint y);
-int8 __ovld __cnfn max(int8 x, int y);
-uint8 __ovld __cnfn max(uint8 x, uint y);
-int16 __ovld __cnfn max(int16 x, int y);
-uint16 __ovld __cnfn max(uint16 x, uint y);
-long2 __ovld __cnfn max(long2 x, long y);
-ulong2 __ovld __cnfn max(ulong2 x, ulong y);
-long3 __ovld __cnfn max(long3 x, long y);
-ulong3 __ovld __cnfn max(ulong3 x, ulong y);
-long4 __ovld __cnfn max(long4 x, long y);
-ulong4 __ovld __cnfn max(ulong4 x, ulong y);
-long8 __ovld __cnfn max(long8 x, long y);
-ulong8 __ovld __cnfn max(ulong8 x, ulong y);
-long16 __ovld __cnfn max(long16 x, long y);
-ulong16 __ovld __cnfn max(ulong16 x, ulong y);
-
-/**
- * Returns y if y < x, otherwise it returns x.
- */
-char __ovld __cnfn min(char x, char y);
-uchar __ovld __cnfn min(uchar x, uchar y);
-char2 __ovld __cnfn min(char2 x, char2 y);
-uchar2 __ovld __cnfn min(uchar2 x, uchar2 y);
-char3 __ovld __cnfn min(char3 x, char3 y);
-uchar3 __ovld __cnfn min(uchar3 x, uchar3 y);
-char4 __ovld __cnfn min(char4 x, char4 y);
-uchar4 __ovld __cnfn min(uchar4 x, uchar4 y);
-char8 __ovld __cnfn min(char8 x, char8 y);
-uchar8 __ovld __cnfn min(uchar8 x, uchar8 y);
-char16 __ovld __cnfn min(char16 x, char16 y);
-uchar16 __ovld __cnfn min(uchar16 x, uchar16 y);
-short __ovld __cnfn min(short x, short y);
-ushort __ovld __cnfn min(ushort x, ushort y);
-short2 __ovld __cnfn min(short2 x, short2 y);
-ushort2 __ovld __cnfn min(ushort2 x, ushort2 y);
-short3 __ovld __cnfn min(short3 x, short3 y);
-ushort3 __ovld __cnfn min(ushort3 x, ushort3 y);
-short4 __ovld __cnfn min(short4 x, short4 y);
-ushort4 __ovld __cnfn min(ushort4 x, ushort4 y);
-short8 __ovld __cnfn min(short8 x, short8 y);
-ushort8 __ovld __cnfn min(ushort8 x, ushort8 y);
-short16 __ovld __cnfn min(short16 x, short16 y);
-ushort16 __ovld __cnfn min(ushort16 x, ushort16 y);
-int __ovld __cnfn min(int x, int y);
-uint __ovld __cnfn min(uint x, uint y);
-int2 __ovld __cnfn min(int2 x, int2 y);
-uint2 __ovld __cnfn min(uint2 x, uint2 y);
-int3 __ovld __cnfn min(int3 x, int3 y);
-uint3 __ovld __cnfn min(uint3 x, uint3 y);
-int4 __ovld __cnfn min(int4 x, int4 y);
-uint4 __ovld __cnfn min(uint4 x, uint4 y);
-int8 __ovld __cnfn min(int8 x, int8 y);
-uint8 __ovld __cnfn min(uint8 x, uint8 y);
-int16 __ovld __cnfn min(int16 x, int16 y);
-uint16 __ovld __cnfn min(uint16 x, uint16 y);
-long __ovld __cnfn min(long x, long y);
-ulong __ovld __cnfn min(ulong x, ulong y);
-long2 __ovld __cnfn min(long2 x, long2 y);
-ulong2 __ovld __cnfn min(ulong2 x, ulong2 y);
-long3 __ovld __cnfn min(long3 x, long3 y);
-ulong3 __ovld __cnfn min(ulong3 x, ulong3 y);
-long4 __ovld __cnfn min(long4 x, long4 y);
-ulong4 __ovld __cnfn min(ulong4 x, ulong4 y);
-long8 __ovld __cnfn min(long8 x, long8 y);
-ulong8 __ovld __cnfn min(ulong8 x, ulong8 y);
-long16 __ovld __cnfn min(long16 x, long16 y);
-ulong16 __ovld __cnfn min(ulong16 x, ulong16 y);
-char2 __ovld __cnfn min(char2 x, char y);
-uchar2 __ovld __cnfn min(uchar2 x, uchar y);
-char3 __ovld __cnfn min(char3 x, char y);
-uchar3 __ovld __cnfn min(uchar3 x, uchar y);
-char4 __ovld __cnfn min(char4 x, char y);
-uchar4 __ovld __cnfn min(uchar4 x, uchar y);
-char8 __ovld __cnfn min(char8 x, char y);
-uchar8 __ovld __cnfn min(uchar8 x, uchar y);
-char16 __ovld __cnfn min(char16 x, char y);
-uchar16 __ovld __cnfn min(uchar16 x, uchar y);
-short2 __ovld __cnfn min(short2 x, short y);
-ushort2 __ovld __cnfn min(ushort2 x, ushort y);
-short3 __ovld __cnfn min(short3 x, short y);
-ushort3 __ovld __cnfn min(ushort3 x, ushort y);
-short4 __ovld __cnfn min(short4 x, short y);
-ushort4 __ovld __cnfn min(ushort4 x, ushort y);
-short8 __ovld __cnfn min(short8 x, short y);
-ushort8 __ovld __cnfn min(ushort8 x, ushort y);
-short16 __ovld __cnfn min(short16 x, short y);
-ushort16 __ovld __cnfn min(ushort16 x, ushort y);
-int2 __ovld __cnfn min(int2 x, int y);
-uint2 __ovld __cnfn min(uint2 x, uint y);
-int3 __ovld __cnfn min(int3 x, int y);
-uint3 __ovld __cnfn min(uint3 x, uint y);
-int4 __ovld __cnfn min(int4 x, int y);
-uint4 __ovld __cnfn min(uint4 x, uint y);
-int8 __ovld __cnfn min(int8 x, int y);
-uint8 __ovld __cnfn min(uint8 x, uint y);
-int16 __ovld __cnfn min(int16 x, int y);
-uint16 __ovld __cnfn min(uint16 x, uint y);
-long2 __ovld __cnfn min(long2 x, long y);
-ulong2 __ovld __cnfn min(ulong2 x, ulong y);
-long3 __ovld __cnfn min(long3 x, long y);
-ulong3 __ovld __cnfn min(ulong3 x, ulong y);
-long4 __ovld __cnfn min(long4 x, long y);
-ulong4 __ovld __cnfn min(ulong4 x, ulong y);
-long8 __ovld __cnfn min(long8 x, long y);
-ulong8 __ovld __cnfn min(ulong8 x, ulong y);
-long16 __ovld __cnfn min(long16 x, long y);
-ulong16 __ovld __cnfn min(ulong16 x, ulong y);
-
-/**
- * Computes x * y and returns the high half of the
- * product of x and y.
- */
-char __ovld __cnfn mul_hi(char x, char y);
-uchar __ovld __cnfn mul_hi(uchar x, uchar y);
-char2 __ovld __cnfn mul_hi(char2 x, char2 y);
-uchar2 __ovld __cnfn mul_hi(uchar2 x, uchar2 y);
-char3 __ovld __cnfn mul_hi(char3 x, char3 y);
-uchar3 __ovld __cnfn mul_hi(uchar3 x, uchar3 y);
-char4 __ovld __cnfn mul_hi(char4 x, char4 y);
-uchar4 __ovld __cnfn mul_hi(uchar4 x, uchar4 y);
-char8 __ovld __cnfn mul_hi(char8 x, char8 y);
-uchar8 __ovld __cnfn mul_hi(uchar8 x, uchar8 y);
-char16 __ovld __cnfn mul_hi(char16 x, char16 y);
-uchar16 __ovld __cnfn mul_hi(uchar16 x, uchar16 y);
-short __ovld __cnfn mul_hi(short x, short y);
-ushort __ovld __cnfn mul_hi(ushort x, ushort y);
-short2 __ovld __cnfn mul_hi(short2 x, short2 y);
-ushort2 __ovld __cnfn mul_hi(ushort2 x, ushort2 y);
-short3 __ovld __cnfn mul_hi(short3 x, short3 y);
-ushort3 __ovld __cnfn mul_hi(ushort3 x, ushort3 y);
-short4 __ovld __cnfn mul_hi(short4 x, short4 y);
-ushort4 __ovld __cnfn mul_hi(ushort4 x, ushort4 y);
-short8 __ovld __cnfn mul_hi(short8 x, short8 y);
-ushort8 __ovld __cnfn mul_hi(ushort8 x, ushort8 y);
-short16 __ovld __cnfn mul_hi(short16 x, short16 y);
-ushort16 __ovld __cnfn mul_hi(ushort16 x, ushort16 y);
-int __ovld __cnfn mul_hi(int x, int y);
-uint __ovld __cnfn mul_hi(uint x, uint y);
-int2 __ovld __cnfn mul_hi(int2 x, int2 y);
-uint2 __ovld __cnfn mul_hi(uint2 x, uint2 y);
-int3 __ovld __cnfn mul_hi(int3 x, int3 y);
-uint3 __ovld __cnfn mul_hi(uint3 x, uint3 y);
-int4 __ovld __cnfn mul_hi(int4 x, int4 y);
-uint4 __ovld __cnfn mul_hi(uint4 x, uint4 y);
-int8 __ovld __cnfn mul_hi(int8 x, int8 y);
-uint8 __ovld __cnfn mul_hi(uint8 x, uint8 y);
-int16 __ovld __cnfn mul_hi(int16 x, int16 y);
-uint16 __ovld __cnfn mul_hi(uint16 x, uint16 y);
-long __ovld __cnfn mul_hi(long x, long y);
-ulong __ovld __cnfn mul_hi(ulong x, ulong y);
-long2 __ovld __cnfn mul_hi(long2 x, long2 y);
-ulong2 __ovld __cnfn mul_hi(ulong2 x, ulong2 y);
-long3 __ovld __cnfn mul_hi(long3 x, long3 y);
-ulong3 __ovld __cnfn mul_hi(ulong3 x, ulong3 y);
-long4 __ovld __cnfn mul_hi(long4 x, long4 y);
-ulong4 __ovld __cnfn mul_hi(ulong4 x, ulong4 y);
-long8 __ovld __cnfn mul_hi(long8 x, long8 y);
-ulong8 __ovld __cnfn mul_hi(ulong8 x, ulong8 y);
-long16 __ovld __cnfn mul_hi(long16 x, long16 y);
-ulong16 __ovld __cnfn mul_hi(ulong16 x, ulong16 y);
-
-/**
- * For each element in v, the bits are shifted left by
- * the number of bits given by the corresponding
- * element in i (subject to usual shift modulo rules
- * described in section 6.3). Bits shifted off the left
- * side of the element are shifted back in from the
- * right.
- */
-char __ovld __cnfn rotate(char v, char i);
-uchar __ovld __cnfn rotate(uchar v, uchar i);
-char2 __ovld __cnfn rotate(char2 v, char2 i);
-uchar2 __ovld __cnfn rotate(uchar2 v, uchar2 i);
-char3 __ovld __cnfn rotate(char3 v, char3 i);
-uchar3 __ovld __cnfn rotate(uchar3 v, uchar3 i);
-char4 __ovld __cnfn rotate(char4 v, char4 i);
-uchar4 __ovld __cnfn rotate(uchar4 v, uchar4 i);
-char8 __ovld __cnfn rotate(char8 v, char8 i);
-uchar8 __ovld __cnfn rotate(uchar8 v, uchar8 i);
-char16 __ovld __cnfn rotate(char16 v, char16 i);
-uchar16 __ovld __cnfn rotate(uchar16 v, uchar16 i);
-short __ovld __cnfn rotate(short v, short i);
-ushort __ovld __cnfn rotate(ushort v, ushort i);
-short2 __ovld __cnfn rotate(short2 v, short2 i);
-ushort2 __ovld __cnfn rotate(ushort2 v, ushort2 i);
-short3 __ovld __cnfn rotate(short3 v, short3 i);
-ushort3 __ovld __cnfn rotate(ushort3 v, ushort3 i);
-short4 __ovld __cnfn rotate(short4 v, short4 i);
-ushort4 __ovld __cnfn rotate(ushort4 v, ushort4 i);
-short8 __ovld __cnfn rotate(short8 v, short8 i);
-ushort8 __ovld __cnfn rotate(ushort8 v, ushort8 i);
-short16 __ovld __cnfn rotate(short16 v, short16 i);
-ushort16 __ovld __cnfn rotate(ushort16 v, ushort16 i);
-int __ovld __cnfn rotate(int v, int i);
-uint __ovld __cnfn rotate(uint v, uint i);
-int2 __ovld __cnfn rotate(int2 v, int2 i);
-uint2 __ovld __cnfn rotate(uint2 v, uint2 i);
-int3 __ovld __cnfn rotate(int3 v, int3 i);
-uint3 __ovld __cnfn rotate(uint3 v, uint3 i);
-int4 __ovld __cnfn rotate(int4 v, int4 i);
-uint4 __ovld __cnfn rotate(uint4 v, uint4 i);
-int8 __ovld __cnfn rotate(int8 v, int8 i);
-uint8 __ovld __cnfn rotate(uint8 v, uint8 i);
-int16 __ovld __cnfn rotate(int16 v, int16 i);
-uint16 __ovld __cnfn rotate(uint16 v, uint16 i);
-long __ovld __cnfn rotate(long v, long i);
-ulong __ovld __cnfn rotate(ulong v, ulong i);
-long2 __ovld __cnfn rotate(long2 v, long2 i);
-ulong2 __ovld __cnfn rotate(ulong2 v, ulong2 i);
-long3 __ovld __cnfn rotate(long3 v, long3 i);
-ulong3 __ovld __cnfn rotate(ulong3 v, ulong3 i);
-long4 __ovld __cnfn rotate(long4 v, long4 i);
-ulong4 __ovld __cnfn rotate(ulong4 v, ulong4 i);
-long8 __ovld __cnfn rotate(long8 v, long8 i);
-ulong8 __ovld __cnfn rotate(ulong8 v, ulong8 i);
-long16 __ovld __cnfn rotate(long16 v, long16 i);
-ulong16 __ovld __cnfn rotate(ulong16 v, ulong16 i);
-
-/**
- * Returns x - y and saturates the result.
- */
-char __ovld __cnfn sub_sat(char x, char y);
-uchar __ovld __cnfn sub_sat(uchar x, uchar y);
-char2 __ovld __cnfn sub_sat(char2 x, char2 y);
-uchar2 __ovld __cnfn sub_sat(uchar2 x, uchar2 y);
-char3 __ovld __cnfn sub_sat(char3 x, char3 y);
-uchar3 __ovld __cnfn sub_sat(uchar3 x, uchar3 y);
-char4 __ovld __cnfn sub_sat(char4 x, char4 y);
-uchar4 __ovld __cnfn sub_sat(uchar4 x, uchar4 y);
-char8 __ovld __cnfn sub_sat(char8 x, char8 y);
-uchar8 __ovld __cnfn sub_sat(uchar8 x, uchar8 y);
-char16 __ovld __cnfn sub_sat(char16 x, char16 y);
-uchar16 __ovld __cnfn sub_sat(uchar16 x, uchar16 y);
-short __ovld __cnfn sub_sat(short x, short y);
-ushort __ovld __cnfn sub_sat(ushort x, ushort y);
-short2 __ovld __cnfn sub_sat(short2 x, short2 y);
-ushort2 __ovld __cnfn sub_sat(ushort2 x, ushort2 y);
-short3 __ovld __cnfn sub_sat(short3 x, short3 y);
-ushort3 __ovld __cnfn sub_sat(ushort3 x, ushort3 y);
-short4 __ovld __cnfn sub_sat(short4 x, short4 y);
-ushort4 __ovld __cnfn sub_sat(ushort4 x, ushort4 y);
-short8 __ovld __cnfn sub_sat(short8 x, short8 y);
-ushort8 __ovld __cnfn sub_sat(ushort8 x, ushort8 y);
-short16 __ovld __cnfn sub_sat(short16 x, short16 y);
-ushort16 __ovld __cnfn sub_sat(ushort16 x, ushort16 y);
-int __ovld __cnfn sub_sat(int x, int y);
-uint __ovld __cnfn sub_sat(uint x, uint y);
-int2 __ovld __cnfn sub_sat(int2 x, int2 y);
-uint2 __ovld __cnfn sub_sat(uint2 x, uint2 y);
-int3 __ovld __cnfn sub_sat(int3 x, int3 y);
-uint3 __ovld __cnfn sub_sat(uint3 x, uint3 y);
-int4 __ovld __cnfn sub_sat(int4 x, int4 y);
-uint4 __ovld __cnfn sub_sat(uint4 x, uint4 y);
-int8 __ovld __cnfn sub_sat(int8 x, int8 y);
-uint8 __ovld __cnfn sub_sat(uint8 x, uint8 y);
-int16 __ovld __cnfn sub_sat(int16 x, int16 y);
-uint16 __ovld __cnfn sub_sat(uint16 x, uint16 y);
-long __ovld __cnfn sub_sat(long x, long y);
-ulong __ovld __cnfn sub_sat(ulong x, ulong y);
-long2 __ovld __cnfn sub_sat(long2 x, long2 y);
-ulong2 __ovld __cnfn sub_sat(ulong2 x, ulong2 y);
-long3 __ovld __cnfn sub_sat(long3 x, long3 y);
-ulong3 __ovld __cnfn sub_sat(ulong3 x, ulong3 y);
-long4 __ovld __cnfn sub_sat(long4 x, long4 y);
-ulong4 __ovld __cnfn sub_sat(ulong4 x, ulong4 y);
-long8 __ovld __cnfn sub_sat(long8 x, long8 y);
-ulong8 __ovld __cnfn sub_sat(ulong8 x, ulong8 y);
-long16 __ovld __cnfn sub_sat(long16 x, long16 y);
-ulong16 __ovld __cnfn sub_sat(ulong16 x, ulong16 y);
-
-/**
- * result[i] = ((short)hi[i] << 8) | lo[i]
- * result[i] = ((ushort)hi[i] << 8) | lo[i]
- */
-short __ovld __cnfn upsample(char hi, uchar lo);
-ushort __ovld __cnfn upsample(uchar hi, uchar lo);
-short2 __ovld __cnfn upsample(char2 hi, uchar2 lo);
-short3 __ovld __cnfn upsample(char3 hi, uchar3 lo);
-short4 __ovld __cnfn upsample(char4 hi, uchar4 lo);
-short8 __ovld __cnfn upsample(char8 hi, uchar8 lo);
-short16 __ovld __cnfn upsample(char16 hi, uchar16 lo);
-ushort2 __ovld __cnfn upsample(uchar2 hi, uchar2 lo);
-ushort3 __ovld __cnfn upsample(uchar3 hi, uchar3 lo);
-ushort4 __ovld __cnfn upsample(uchar4 hi, uchar4 lo);
-ushort8 __ovld __cnfn upsample(uchar8 hi, uchar8 lo);
-ushort16 __ovld __cnfn upsample(uchar16 hi, uchar16 lo);
-
-/**
- * result[i] = ((int)hi[i] << 16) | lo[i]
- * result[i] = ((uint)hi[i] << 16) | lo[i]
- */
-int __ovld __cnfn upsample(short hi, ushort lo);
-uint __ovld __cnfn upsample(ushort hi, ushort lo);
-int2 __ovld __cnfn upsample(short2 hi, ushort2 lo);
-int3 __ovld __cnfn upsample(short3 hi, ushort3 lo);
-int4 __ovld __cnfn upsample(short4 hi, ushort4 lo);
-int8 __ovld __cnfn upsample(short8 hi, ushort8 lo);
-int16 __ovld __cnfn upsample(short16 hi, ushort16 lo);
-uint2 __ovld __cnfn upsample(ushort2 hi, ushort2 lo);
-uint3 __ovld __cnfn upsample(ushort3 hi, ushort3 lo);
-uint4 __ovld __cnfn upsample(ushort4 hi, ushort4 lo);
-uint8 __ovld __cnfn upsample(ushort8 hi, ushort8 lo);
-uint16 __ovld __cnfn upsample(ushort16 hi, ushort16 lo);
-/**
- * result[i] = ((long)hi[i] << 32) | lo[i]
- * result[i] = ((ulong)hi[i] << 32) | lo[i]
- */
-long __ovld __cnfn upsample(int hi, uint lo);
-ulong __ovld __cnfn upsample(uint hi, uint lo);
-long2 __ovld __cnfn upsample(int2 hi, uint2 lo);
-long3 __ovld __cnfn upsample(int3 hi, uint3 lo);
-long4 __ovld __cnfn upsample(int4 hi, uint4 lo);
-long8 __ovld __cnfn upsample(int8 hi, uint8 lo);
-long16 __ovld __cnfn upsample(int16 hi, uint16 lo);
-ulong2 __ovld __cnfn upsample(uint2 hi, uint2 lo);
-ulong3 __ovld __cnfn upsample(uint3 hi, uint3 lo);
-ulong4 __ovld __cnfn upsample(uint4 hi, uint4 lo);
-ulong8 __ovld __cnfn upsample(uint8 hi, uint8 lo);
-ulong16 __ovld __cnfn upsample(uint16 hi, uint16 lo);
-
-/*
- * popcount(x): returns the number of set bit in x
- */
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-char __ovld __cnfn popcount(char x);
-uchar __ovld __cnfn popcount(uchar x);
-char2 __ovld __cnfn popcount(char2 x);
-uchar2 __ovld __cnfn popcount(uchar2 x);
-char3 __ovld __cnfn popcount(char3 x);
-uchar3 __ovld __cnfn popcount(uchar3 x);
-char4 __ovld __cnfn popcount(char4 x);
-uchar4 __ovld __cnfn popcount(uchar4 x);
-char8 __ovld __cnfn popcount(char8 x);
-uchar8 __ovld __cnfn popcount(uchar8 x);
-char16 __ovld __cnfn popcount(char16 x);
-uchar16 __ovld __cnfn popcount(uchar16 x);
-short __ovld __cnfn popcount(short x);
-ushort __ovld __cnfn popcount(ushort x);
-short2 __ovld __cnfn popcount(short2 x);
-ushort2 __ovld __cnfn popcount(ushort2 x);
-short3 __ovld __cnfn popcount(short3 x);
-ushort3 __ovld __cnfn popcount(ushort3 x);
-short4 __ovld __cnfn popcount(short4 x);
-ushort4 __ovld __cnfn popcount(ushort4 x);
-short8 __ovld __cnfn popcount(short8 x);
-ushort8 __ovld __cnfn popcount(ushort8 x);
-short16 __ovld __cnfn popcount(short16 x);
-ushort16 __ovld __cnfn popcount(ushort16 x);
-int __ovld __cnfn popcount(int x);
-uint __ovld __cnfn popcount(uint x);
-int2 __ovld __cnfn popcount(int2 x);
-uint2 __ovld __cnfn popcount(uint2 x);
-int3 __ovld __cnfn popcount(int3 x);
-uint3 __ovld __cnfn popcount(uint3 x);
-int4 __ovld __cnfn popcount(int4 x);
-uint4 __ovld __cnfn popcount(uint4 x);
-int8 __ovld __cnfn popcount(int8 x);
-uint8 __ovld __cnfn popcount(uint8 x);
-int16 __ovld __cnfn popcount(int16 x);
-uint16 __ovld __cnfn popcount(uint16 x);
-long __ovld __cnfn popcount(long x);
-ulong __ovld __cnfn popcount(ulong x);
-long2 __ovld __cnfn popcount(long2 x);
-ulong2 __ovld __cnfn popcount(ulong2 x);
-long3 __ovld __cnfn popcount(long3 x);
-ulong3 __ovld __cnfn popcount(ulong3 x);
-long4 __ovld __cnfn popcount(long4 x);
-ulong4 __ovld __cnfn popcount(ulong4 x);
-long8 __ovld __cnfn popcount(long8 x);
-ulong8 __ovld __cnfn popcount(ulong8 x);
-long16 __ovld __cnfn popcount(long16 x);
-ulong16 __ovld __cnfn popcount(ulong16 x);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-/**
- * Multiply two 24-bit integer values x and y and add
- * the 32-bit integer result to the 32-bit integer z.
- * Refer to definition of mul24 to see how the 24-bit
- * integer multiplication is performed.
- */
-int __ovld __cnfn mad24(int x, int y, int z);
-uint __ovld __cnfn mad24(uint x, uint y, uint z);
-int2 __ovld __cnfn mad24(int2 x, int2 y, int2 z);
-uint2 __ovld __cnfn mad24(uint2 x, uint2 y, uint2 z);
-int3 __ovld __cnfn mad24(int3 x, int3 y, int3 z);
-uint3 __ovld __cnfn mad24(uint3 x, uint3 y, uint3 z);
-int4 __ovld __cnfn mad24(int4 x, int4 y, int4 z);
-uint4 __ovld __cnfn mad24(uint4 x, uint4 y, uint4 z);
-int8 __ovld __cnfn mad24(int8 x, int8 y, int8 z);
-uint8 __ovld __cnfn mad24(uint8 x, uint8 y, uint8 z);
-int16 __ovld __cnfn mad24(int16 x, int16 y, int16 z);
-uint16 __ovld __cnfn mad24(uint16 x, uint16 y, uint16 z);
-
-/**
- * Multiply two 24-bit integer values x and y. x and y
- * are 32-bit integers but only the low 24-bits are used
- * to perform the multiplication. mul24 should only
- * be used when values in x and y are in the range [-
- * 2^23, 2^23-1] if x and y are signed integers and in the
- * range [0, 2^24-1] if x and y are unsigned integers. If
- * x and y are not in this range, the multiplication
- * result is implementation-defined.
- */
-int __ovld __cnfn mul24(int x, int y);
-uint __ovld __cnfn mul24(uint x, uint y);
-int2 __ovld __cnfn mul24(int2 x, int2 y);
-uint2 __ovld __cnfn mul24(uint2 x, uint2 y);
-int3 __ovld __cnfn mul24(int3 x, int3 y);
-uint3 __ovld __cnfn mul24(uint3 x, uint3 y);
-int4 __ovld __cnfn mul24(int4 x, int4 y);
-uint4 __ovld __cnfn mul24(uint4 x, uint4 y);
-int8 __ovld __cnfn mul24(int8 x, int8 y);
-uint8 __ovld __cnfn mul24(uint8 x, uint8 y);
-int16 __ovld __cnfn mul24(int16 x, int16 y);
-uint16 __ovld __cnfn mul24(uint16 x, uint16 y);
-
-// OpenCL v1.1 s6.11.4, v1.2 s6.12.4, v2.0 s6.13.4 - Common Functions
-
-/**
- * Returns fmin(fmax(x, minval), maxval).
- * Results are undefined if minval > maxval.
- */
-float __ovld __cnfn clamp(float x, float minval, float maxval);
-float2 __ovld __cnfn clamp(float2 x, float2 minval, float2 maxval);
-float3 __ovld __cnfn clamp(float3 x, float3 minval, float3 maxval);
-float4 __ovld __cnfn clamp(float4 x, float4 minval, float4 maxval);
-float8 __ovld __cnfn clamp(float8 x, float8 minval, float8 maxval);
-float16 __ovld __cnfn clamp(float16 x, float16 minval, float16 maxval);
-float2 __ovld __cnfn clamp(float2 x, float minval, float maxval);
-float3 __ovld __cnfn clamp(float3 x, float minval, float maxval);
-float4 __ovld __cnfn clamp(float4 x, float minval, float maxval);
-float8 __ovld __cnfn clamp(float8 x, float minval, float maxval);
-float16 __ovld __cnfn clamp(float16 x, float minval, float maxval);
-#ifdef cl_khr_fp64
-double __ovld __cnfn clamp(double x, double minval, double maxval);
-double2 __ovld __cnfn clamp(double2 x, double2 minval, double2 maxval);
-double3 __ovld __cnfn clamp(double3 x, double3 minval, double3 maxval);
-double4 __ovld __cnfn clamp(double4 x, double4 minval, double4 maxval);
-double8 __ovld __cnfn clamp(double8 x, double8 minval, double8 maxval);
-double16 __ovld __cnfn clamp(double16 x, double16 minval, double16 maxval);
-double2 __ovld __cnfn clamp(double2 x, double minval, double maxval);
-double3 __ovld __cnfn clamp(double3 x, double minval, double maxval);
-double4 __ovld __cnfn clamp(double4 x, double minval, double maxval);
-double8 __ovld __cnfn clamp(double8 x, double minval, double maxval);
-double16 __ovld __cnfn clamp(double16 x, double minval, double maxval);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn clamp(half x, half minval, half maxval);
-half2 __ovld __cnfn clamp(half2 x, half2 minval, half2 maxval);
-half3 __ovld __cnfn clamp(half3 x, half3 minval, half3 maxval);
-half4 __ovld __cnfn clamp(half4 x, half4 minval, half4 maxval);
-half8 __ovld __cnfn clamp(half8 x, half8 minval, half8 maxval);
-half16 __ovld __cnfn clamp(half16 x, half16 minval, half16 maxval);
-half2 __ovld __cnfn clamp(half2 x, half minval, half maxval);
-half3 __ovld __cnfn clamp(half3 x, half minval, half maxval);
-half4 __ovld __cnfn clamp(half4 x, half minval, half maxval);
-half8 __ovld __cnfn clamp(half8 x, half minval, half maxval);
-half16 __ovld __cnfn clamp(half16 x, half minval, half maxval);
-#endif //cl_khr_fp16
-
-/**
- * Converts radians to degrees, i.e. (180 / PI) *
- * radians.
- */
-float __ovld __cnfn degrees(float radians);
-float2 __ovld __cnfn degrees(float2 radians);
-float3 __ovld __cnfn degrees(float3 radians);
-float4 __ovld __cnfn degrees(float4 radians);
-float8 __ovld __cnfn degrees(float8 radians);
-float16 __ovld __cnfn degrees(float16 radians);
-#ifdef cl_khr_fp64
-double __ovld __cnfn degrees(double radians);
-double2 __ovld __cnfn degrees(double2 radians);
-double3 __ovld __cnfn degrees(double3 radians);
-double4 __ovld __cnfn degrees(double4 radians);
-double8 __ovld __cnfn degrees(double8 radians);
-double16 __ovld __cnfn degrees(double16 radians);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn degrees(half radians);
-half2 __ovld __cnfn degrees(half2 radians);
-half3 __ovld __cnfn degrees(half3 radians);
-half4 __ovld __cnfn degrees(half4 radians);
-half8 __ovld __cnfn degrees(half8 radians);
-half16 __ovld __cnfn degrees(half16 radians);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if x < y, otherwise it returns x. If x and y
- * are infinite or NaN, the return values are undefined.
- */
-float __ovld __cnfn max(float x, float y);
-float2 __ovld __cnfn max(float2 x, float2 y);
-float3 __ovld __cnfn max(float3 x, float3 y);
-float4 __ovld __cnfn max(float4 x, float4 y);
-float8 __ovld __cnfn max(float8 x, float8 y);
-float16 __ovld __cnfn max(float16 x, float16 y);
-float2 __ovld __cnfn max(float2 x, float y);
-float3 __ovld __cnfn max(float3 x, float y);
-float4 __ovld __cnfn max(float4 x, float y);
-float8 __ovld __cnfn max(float8 x, float y);
-float16 __ovld __cnfn max(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn max(double x, double y);
-double2 __ovld __cnfn max(double2 x, double2 y);
-double3 __ovld __cnfn max(double3 x, double3 y);
-double4 __ovld __cnfn max(double4 x, double4 y);
-double8 __ovld __cnfn max(double8 x, double8 y);
-double16 __ovld __cnfn max(double16 x, double16 y);
-double2 __ovld __cnfn max(double2 x, double y);
-double3 __ovld __cnfn max(double3 x, double y);
-double4 __ovld __cnfn max(double4 x, double y);
-double8 __ovld __cnfn max(double8 x, double y);
-double16 __ovld __cnfn max(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn max(half x, half y);
-half2 __ovld __cnfn max(half2 x, half2 y);
-half3 __ovld __cnfn max(half3 x, half3 y);
-half4 __ovld __cnfn max(half4 x, half4 y);
-half8 __ovld __cnfn max(half8 x, half8 y);
-half16 __ovld __cnfn max(half16 x, half16 y);
-half2 __ovld __cnfn max(half2 x, half y);
-half3 __ovld __cnfn max(half3 x, half y);
-half4 __ovld __cnfn max(half4 x, half y);
-half8 __ovld __cnfn max(half8 x, half y);
-half16 __ovld __cnfn max(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if y < x, otherwise it returns x. If x and y
- * are infinite or NaN, the return values are undefined.
- */
-float __ovld __cnfn min(float x, float y);
-float2 __ovld __cnfn min(float2 x, float2 y);
-float3 __ovld __cnfn min(float3 x, float3 y);
-float4 __ovld __cnfn min(float4 x, float4 y);
-float8 __ovld __cnfn min(float8 x, float8 y);
-float16 __ovld __cnfn min(float16 x, float16 y);
-float2 __ovld __cnfn min(float2 x, float y);
-float3 __ovld __cnfn min(float3 x, float y);
-float4 __ovld __cnfn min(float4 x, float y);
-float8 __ovld __cnfn min(float8 x, float y);
-float16 __ovld __cnfn min(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn min(double x, double y);
-double2 __ovld __cnfn min(double2 x, double2 y);
-double3 __ovld __cnfn min(double3 x, double3 y);
-double4 __ovld __cnfn min(double4 x, double4 y);
-double8 __ovld __cnfn min(double8 x, double8 y);
-double16 __ovld __cnfn min(double16 x, double16 y);
-double2 __ovld __cnfn min(double2 x, double y);
-double3 __ovld __cnfn min(double3 x, double y);
-double4 __ovld __cnfn min(double4 x, double y);
-double8 __ovld __cnfn min(double8 x, double y);
-double16 __ovld __cnfn min(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn min(half x, half y);
-half2 __ovld __cnfn min(half2 x, half2 y);
-half3 __ovld __cnfn min(half3 x, half3 y);
-half4 __ovld __cnfn min(half4 x, half4 y);
-half8 __ovld __cnfn min(half8 x, half8 y);
-half16 __ovld __cnfn min(half16 x, half16 y);
-half2 __ovld __cnfn min(half2 x, half y);
-half3 __ovld __cnfn min(half3 x, half y);
-half4 __ovld __cnfn min(half4 x, half y);
-half8 __ovld __cnfn min(half8 x, half y);
-half16 __ovld __cnfn min(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the linear blend of x & y implemented as:
- * x + (y - x) * a
- * a must be a value in the range 0.0 ... 1.0. If a is not
- * in the range 0.0 ... 1.0, the return values are
- * undefined.
- */
-float __ovld __cnfn mix(float x, float y, float a);
-float2 __ovld __cnfn mix(float2 x, float2 y, float2 a);
-float3 __ovld __cnfn mix(float3 x, float3 y, float3 a);
-float4 __ovld __cnfn mix(float4 x, float4 y, float4 a);
-float8 __ovld __cnfn mix(float8 x, float8 y, float8 a);
-float16 __ovld __cnfn mix(float16 x, float16 y, float16 a);
-float2 __ovld __cnfn mix(float2 x, float2 y, float a);
-float3 __ovld __cnfn mix(float3 x, float3 y, float a);
-float4 __ovld __cnfn mix(float4 x, float4 y, float a);
-float8 __ovld __cnfn mix(float8 x, float8 y, float a);
-float16 __ovld __cnfn mix(float16 x, float16 y, float a);
-#ifdef cl_khr_fp64
-double __ovld __cnfn mix(double x, double y, double a);
-double2 __ovld __cnfn mix(double2 x, double2 y, double2 a);
-double3 __ovld __cnfn mix(double3 x, double3 y, double3 a);
-double4 __ovld __cnfn mix(double4 x, double4 y, double4 a);
-double8 __ovld __cnfn mix(double8 x, double8 y, double8 a);
-double16 __ovld __cnfn mix(double16 x, double16 y, double16 a);
-double2 __ovld __cnfn mix(double2 x, double2 y, double a);
-double3 __ovld __cnfn mix(double3 x, double3 y, double a);
-double4 __ovld __cnfn mix(double4 x, double4 y, double a);
-double8 __ovld __cnfn mix(double8 x, double8 y, double a);
-double16 __ovld __cnfn mix(double16 x, double16 y, double a);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn mix(half x, half y, half a);
-half2 __ovld __cnfn mix(half2 x, half2 y, half2 a);
-half3 __ovld __cnfn mix(half3 x, half3 y, half3 a);
-half4 __ovld __cnfn mix(half4 x, half4 y, half4 a);
-half8 __ovld __cnfn mix(half8 x, half8 y, half8 a);
-half16 __ovld __cnfn mix(half16 x, half16 y, half16 a);
-half2 __ovld __cnfn mix(half2 x, half2 y, half a);
-half3 __ovld __cnfn mix(half3 x, half3 y, half a);
-half4 __ovld __cnfn mix(half4 x, half4 y, half a);
-half8 __ovld __cnfn mix(half8 x, half8 y, half a);
-half16 __ovld __cnfn mix(half16 x, half16 y, half a);
-#endif //cl_khr_fp16
-
-/**
- * Converts degrees to radians, i.e. (PI / 180) *
- * degrees.
- */
-float __ovld __cnfn radians(float degrees);
-float2 __ovld __cnfn radians(float2 degrees);
-float3 __ovld __cnfn radians(float3 degrees);
-float4 __ovld __cnfn radians(float4 degrees);
-float8 __ovld __cnfn radians(float8 degrees);
-float16 __ovld __cnfn radians(float16 degrees);
-#ifdef cl_khr_fp64
-double __ovld __cnfn radians(double degrees);
-double2 __ovld __cnfn radians(double2 degrees);
-double3 __ovld __cnfn radians(double3 degrees);
-double4 __ovld __cnfn radians(double4 degrees);
-double8 __ovld __cnfn radians(double8 degrees);
-double16 __ovld __cnfn radians(double16 degrees);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn radians(half degrees);
-half2 __ovld __cnfn radians(half2 degrees);
-half3 __ovld __cnfn radians(half3 degrees);
-half4 __ovld __cnfn radians(half4 degrees);
-half8 __ovld __cnfn radians(half8 degrees);
-half16 __ovld __cnfn radians(half16 degrees);
-#endif //cl_khr_fp16
-
-/**
- * Returns 0.0 if x < edge, otherwise it returns 1.0.
- */
-float __ovld __cnfn step(float edge, float x);
-float2 __ovld __cnfn step(float2 edge, float2 x);
-float3 __ovld __cnfn step(float3 edge, float3 x);
-float4 __ovld __cnfn step(float4 edge, float4 x);
-float8 __ovld __cnfn step(float8 edge, float8 x);
-float16 __ovld __cnfn step(float16 edge, float16 x);
-float2 __ovld __cnfn step(float edge, float2 x);
-float3 __ovld __cnfn step(float edge, float3 x);
-float4 __ovld __cnfn step(float edge, float4 x);
-float8 __ovld __cnfn step(float edge, float8 x);
-float16 __ovld __cnfn step(float edge, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn step(double edge, double x);
-double2 __ovld __cnfn step(double2 edge, double2 x);
-double3 __ovld __cnfn step(double3 edge, double3 x);
-double4 __ovld __cnfn step(double4 edge, double4 x);
-double8 __ovld __cnfn step(double8 edge, double8 x);
-double16 __ovld __cnfn step(double16 edge, double16 x);
-double2 __ovld __cnfn step(double edge, double2 x);
-double3 __ovld __cnfn step(double edge, double3 x);
-double4 __ovld __cnfn step(double edge, double4 x);
-double8 __ovld __cnfn step(double edge, double8 x);
-double16 __ovld __cnfn step(double edge, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn step(half edge, half x);
-half2 __ovld __cnfn step(half2 edge, half2 x);
-half3 __ovld __cnfn step(half3 edge, half3 x);
-half4 __ovld __cnfn step(half4 edge, half4 x);
-half8 __ovld __cnfn step(half8 edge, half8 x);
-half16 __ovld __cnfn step(half16 edge, half16 x);
-half2 __ovld __cnfn step(half edge, half2 x);
-half3 __ovld __cnfn step(half edge, half3 x);
-half4 __ovld __cnfn step(half edge, half4 x);
-half8 __ovld __cnfn step(half edge, half8 x);
-half16 __ovld __cnfn step(half edge, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and
- * performs smooth Hermite interpolation between 0
- * and 1when edge0 < x < edge1. This is useful in
- * cases where you would want a threshold function
- * with a smooth transition.
- * This is equivalent to:
- * gentype t;
- * t = clamp ((x - edge0) / (edge1 - edge0), 0, 1);
- * return t * t * (3 - 2 * t);
- * Results are undefined if edge0 >= edge1 or if x,
- * edge0 or edge1 is a NaN.
- */
-float __ovld __cnfn smoothstep(float edge0, float edge1, float x);
-float2 __ovld __cnfn smoothstep(float2 edge0, float2 edge1, float2 x);
-float3 __ovld __cnfn smoothstep(float3 edge0, float3 edge1, float3 x);
-float4 __ovld __cnfn smoothstep(float4 edge0, float4 edge1, float4 x);
-float8 __ovld __cnfn smoothstep(float8 edge0, float8 edge1, float8 x);
-float16 __ovld __cnfn smoothstep(float16 edge0, float16 edge1, float16 x);
-float2 __ovld __cnfn smoothstep(float edge0, float edge1, float2 x);
-float3 __ovld __cnfn smoothstep(float edge0, float edge1, float3 x);
-float4 __ovld __cnfn smoothstep(float edge0, float edge1, float4 x);
-float8 __ovld __cnfn smoothstep(float edge0, float edge1, float8 x);
-float16 __ovld __cnfn smoothstep(float edge0, float edge1, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn smoothstep(double edge0, double edge1, double x);
-double2 __ovld __cnfn smoothstep(double2 edge0, double2 edge1, double2 x);
-double3 __ovld __cnfn smoothstep(double3 edge0, double3 edge1, double3 x);
-double4 __ovld __cnfn smoothstep(double4 edge0, double4 edge1, double4 x);
-double8 __ovld __cnfn smoothstep(double8 edge0, double8 edge1, double8 x);
-double16 __ovld __cnfn smoothstep(double16 edge0, double16 edge1, double16 x);
-double2 __ovld __cnfn smoothstep(double edge0, double edge1, double2 x);
-double3 __ovld __cnfn smoothstep(double edge0, double edge1, double3 x);
-double4 __ovld __cnfn smoothstep(double edge0, double edge1, double4 x);
-double8 __ovld __cnfn smoothstep(double edge0, double edge1, double8 x);
-double16 __ovld __cnfn smoothstep(double edge0, double edge1, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn smoothstep(half edge0, half edge1, half x);
-half2 __ovld __cnfn smoothstep(half2 edge0, half2 edge1, half2 x);
-half3 __ovld __cnfn smoothstep(half3 edge0, half3 edge1, half3 x);
-half4 __ovld __cnfn smoothstep(half4 edge0, half4 edge1, half4 x);
-half8 __ovld __cnfn smoothstep(half8 edge0, half8 edge1, half8 x);
-half16 __ovld __cnfn smoothstep(half16 edge0, half16 edge1, half16 x);
-half2 __ovld __cnfn smoothstep(half edge0, half edge1, half2 x);
-half3 __ovld __cnfn smoothstep(half edge0, half edge1, half3 x);
-half4 __ovld __cnfn smoothstep(half edge0, half edge1, half4 x);
-half8 __ovld __cnfn smoothstep(half edge0, half edge1, half8 x);
-half16 __ovld __cnfn smoothstep(half edge0, half edge1, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x =
- * +0.0, or -1.0 if x < 0. Returns 0.0 if x is a NaN.
- */
-float __ovld __cnfn sign(float x);
-float2 __ovld __cnfn sign(float2 x);
-float3 __ovld __cnfn sign(float3 x);
-float4 __ovld __cnfn sign(float4 x);
-float8 __ovld __cnfn sign(float8 x);
-float16 __ovld __cnfn sign(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sign(double x);
-double2 __ovld __cnfn sign(double2 x);
-double3 __ovld __cnfn sign(double3 x);
-double4 __ovld __cnfn sign(double4 x);
-double8 __ovld __cnfn sign(double8 x);
-double16 __ovld __cnfn sign(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sign(half x);
-half2 __ovld __cnfn sign(half2 x);
-half3 __ovld __cnfn sign(half3 x);
-half4 __ovld __cnfn sign(half4 x);
-half8 __ovld __cnfn sign(half8 x);
-half16 __ovld __cnfn sign(half16 x);
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.11.5, v1.2 s6.12.5, v2.0 s6.13.5 - Geometric Functions
-
-/**
- * Returns the cross product of p0.xyz and p1.xyz. The
- * w component of float4 result returned will be 0.0.
- */
-float4 __ovld __cnfn cross(float4 p0, float4 p1);
-float3 __ovld __cnfn cross(float3 p0, float3 p1);
-#ifdef cl_khr_fp64
-double4 __ovld __cnfn cross(double4 p0, double4 p1);
-double3 __ovld __cnfn cross(double3 p0, double3 p1);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half4 __ovld __cnfn cross(half4 p0, half4 p1);
-half3 __ovld __cnfn cross(half3 p0, half3 p1);
-#endif //cl_khr_fp16
-
-/**
- * Compute dot product.
- */
-float __ovld __cnfn dot(float p0, float p1);
-float __ovld __cnfn dot(float2 p0, float2 p1);
-float __ovld __cnfn dot(float3 p0, float3 p1);
-float __ovld __cnfn dot(float4 p0, float4 p1);
-#ifdef cl_khr_fp64
-double __ovld __cnfn dot(double p0, double p1);
-double __ovld __cnfn dot(double2 p0, double2 p1);
-double __ovld __cnfn dot(double3 p0, double3 p1);
-double __ovld __cnfn dot(double4 p0, double4 p1);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn dot(half p0, half p1);
-half __ovld __cnfn dot(half2 p0, half2 p1);
-half __ovld __cnfn dot(half3 p0, half3 p1);
-half __ovld __cnfn dot(half4 p0, half4 p1);
-#endif //cl_khr_fp16
-
-/**
- * Returns the distance between p0 and p1. This is
- * calculated as length(p0 - p1).
- */
-float __ovld __cnfn distance(float p0, float p1);
-float __ovld __cnfn distance(float2 p0, float2 p1);
-float __ovld __cnfn distance(float3 p0, float3 p1);
-float __ovld __cnfn distance(float4 p0, float4 p1);
-#ifdef cl_khr_fp64
-double __ovld __cnfn distance(double p0, double p1);
-double __ovld __cnfn distance(double2 p0, double2 p1);
-double __ovld __cnfn distance(double3 p0, double3 p1);
-double __ovld __cnfn distance(double4 p0, double4 p1);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn distance(half p0, half p1);
-half __ovld __cnfn distance(half2 p0, half2 p1);
-half __ovld __cnfn distance(half3 p0, half3 p1);
-half __ovld __cnfn distance(half4 p0, half4 p1);
-#endif //cl_khr_fp16
-
-/**
- * Return the length of vector p, i.e.,
- * sqrt(p.x2 + p.y 2 + ...)
- */
-float __ovld __cnfn length(float p);
-float __ovld __cnfn length(float2 p);
-float __ovld __cnfn length(float3 p);
-float __ovld __cnfn length(float4 p);
-#ifdef cl_khr_fp64
-double __ovld __cnfn length(double p);
-double __ovld __cnfn length(double2 p);
-double __ovld __cnfn length(double3 p);
-double __ovld __cnfn length(double4 p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn length(half p);
-half __ovld __cnfn length(half2 p);
-half __ovld __cnfn length(half3 p);
-half __ovld __cnfn length(half4 p);
-#endif //cl_khr_fp16
-
-/**
- * Returns a vector in the same direction as p but with a
- * length of 1.
- */
-float __ovld __cnfn normalize(float p);
-float2 __ovld __cnfn normalize(float2 p);
-float3 __ovld __cnfn normalize(float3 p);
-float4 __ovld __cnfn normalize(float4 p);
-#ifdef cl_khr_fp64
-double __ovld __cnfn normalize(double p);
-double2 __ovld __cnfn normalize(double2 p);
-double3 __ovld __cnfn normalize(double3 p);
-double4 __ovld __cnfn normalize(double4 p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn normalize(half p);
-half2 __ovld __cnfn normalize(half2 p);
-half3 __ovld __cnfn normalize(half3 p);
-half4 __ovld __cnfn normalize(half4 p);
-#endif //cl_khr_fp16
-
-/**
- * Returns fast_length(p0 - p1).
- */
-float __ovld __cnfn fast_distance(float p0, float p1);
-float __ovld __cnfn fast_distance(float2 p0, float2 p1);
-float __ovld __cnfn fast_distance(float3 p0, float3 p1);
-float __ovld __cnfn fast_distance(float4 p0, float4 p1);
-#ifdef cl_khr_fp16
-half __ovld __cnfn fast_distance(half p0, half p1);
-half __ovld __cnfn fast_distance(half2 p0, half2 p1);
-half __ovld __cnfn fast_distance(half3 p0, half3 p1);
-half __ovld __cnfn fast_distance(half4 p0, half4 p1);
-#endif //cl_khr_fp16
-
-/**
- * Returns the length of vector p computed as:
- * half_sqrt(p.x2 + p.y2 + ...)
- */
-float __ovld __cnfn fast_length(float p);
-float __ovld __cnfn fast_length(float2 p);
-float __ovld __cnfn fast_length(float3 p);
-float __ovld __cnfn fast_length(float4 p);
-#ifdef cl_khr_fp16
-half __ovld __cnfn fast_length(half p);
-half __ovld __cnfn fast_length(half2 p);
-half __ovld __cnfn fast_length(half3 p);
-half __ovld __cnfn fast_length(half4 p);
-#endif //cl_khr_fp16
-
-/**
- * Returns a vector in the same direction as p but with a
- * length of 1. fast_normalize is computed as:
- * p * half_rsqrt (p.x^2 + p.y^2 + ... )
- * The result shall be within 8192 ulps error from the
- * infinitely precise result of
- * if (all(p == 0.0f))
- * result = p;
- * else
- * result = p / sqrt (p.x^2 + p.y^2 + ...);
- * with the following exceptions:
- * 1) If the sum of squares is greater than FLT_MAX
- * then the value of the floating-point values in the
- * result vector are undefined.
- * 2) If the sum of squares is less than FLT_MIN then
- * the implementation may return back p.
- * 3) If the device is in "denorms are flushed to zero"
- * mode, individual operand elements with magnitude
- * less than sqrt(FLT_MIN) may be flushed to zero
- * before proceeding with the calculation.
- */
-float __ovld __cnfn fast_normalize(float p);
-float2 __ovld __cnfn fast_normalize(float2 p);
-float3 __ovld __cnfn fast_normalize(float3 p);
-float4 __ovld __cnfn fast_normalize(float4 p);
-#ifdef cl_khr_fp16
-half __ovld __cnfn fast_normalize(half p);
-half2 __ovld __cnfn fast_normalize(half2 p);
-half3 __ovld __cnfn fast_normalize(half3 p);
-half4 __ovld __cnfn fast_normalize(half4 p);
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.11.6, v1.2 s6.12.6, v2.0 s6.13.6 - Relational Functions
-
-/**
- * intn isequal (floatn x, floatn y)
- * Returns the component-wise compare of x == y.
- */
-int __ovld __cnfn isequal(float x, float y);
-int2 __ovld __cnfn isequal(float2 x, float2 y);
-int3 __ovld __cnfn isequal(float3 x, float3 y);
-int4 __ovld __cnfn isequal(float4 x, float4 y);
-int8 __ovld __cnfn isequal(float8 x, float8 y);
-int16 __ovld __cnfn isequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isequal(double x, double y);
-long2 __ovld __cnfn isequal(double2 x, double2 y);
-long3 __ovld __cnfn isequal(double3 x, double3 y);
-long4 __ovld __cnfn isequal(double4 x, double4 y);
-long8 __ovld __cnfn isequal(double8 x, double8 y);
-long16 __ovld __cnfn isequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isequal(half x, half y);
-short2 __ovld __cnfn isequal(half2 x, half2 y);
-short3 __ovld __cnfn isequal(half3 x, half3 y);
-short4 __ovld __cnfn isequal(half4 x, half4 y);
-short8 __ovld __cnfn isequal(half8 x, half8 y);
-short16 __ovld __cnfn isequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x != y.
- */
-int __ovld __cnfn isnotequal(float x, float y);
-int2 __ovld __cnfn isnotequal(float2 x, float2 y);
-int3 __ovld __cnfn isnotequal(float3 x, float3 y);
-int4 __ovld __cnfn isnotequal(float4 x, float4 y);
-int8 __ovld __cnfn isnotequal(float8 x, float8 y);
-int16 __ovld __cnfn isnotequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isnotequal(double x, double y);
-long2 __ovld __cnfn isnotequal(double2 x, double2 y);
-long3 __ovld __cnfn isnotequal(double3 x, double3 y);
-long4 __ovld __cnfn isnotequal(double4 x, double4 y);
-long8 __ovld __cnfn isnotequal(double8 x, double8 y);
-long16 __ovld __cnfn isnotequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isnotequal(half x, half y);
-short2 __ovld __cnfn isnotequal(half2 x, half2 y);
-short3 __ovld __cnfn isnotequal(half3 x, half3 y);
-short4 __ovld __cnfn isnotequal(half4 x, half4 y);
-short8 __ovld __cnfn isnotequal(half8 x, half8 y);
-short16 __ovld __cnfn isnotequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x > y.
- */
-int __ovld __cnfn isgreater(float x, float y);
-int2 __ovld __cnfn isgreater(float2 x, float2 y);
-int3 __ovld __cnfn isgreater(float3 x, float3 y);
-int4 __ovld __cnfn isgreater(float4 x, float4 y);
-int8 __ovld __cnfn isgreater(float8 x, float8 y);
-int16 __ovld __cnfn isgreater(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isgreater(double x, double y);
-long2 __ovld __cnfn isgreater(double2 x, double2 y);
-long3 __ovld __cnfn isgreater(double3 x, double3 y);
-long4 __ovld __cnfn isgreater(double4 x, double4 y);
-long8 __ovld __cnfn isgreater(double8 x, double8 y);
-long16 __ovld __cnfn isgreater(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isgreater(half x, half y);
-short2 __ovld __cnfn isgreater(half2 x, half2 y);
-short3 __ovld __cnfn isgreater(half3 x, half3 y);
-short4 __ovld __cnfn isgreater(half4 x, half4 y);
-short8 __ovld __cnfn isgreater(half8 x, half8 y);
-short16 __ovld __cnfn isgreater(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x >= y.
- */
-int __ovld __cnfn isgreaterequal(float x, float y);
-int2 __ovld __cnfn isgreaterequal(float2 x, float2 y);
-int3 __ovld __cnfn isgreaterequal(float3 x, float3 y);
-int4 __ovld __cnfn isgreaterequal(float4 x, float4 y);
-int8 __ovld __cnfn isgreaterequal(float8 x, float8 y);
-int16 __ovld __cnfn isgreaterequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isgreaterequal(double x, double y);
-long2 __ovld __cnfn isgreaterequal(double2 x, double2 y);
-long3 __ovld __cnfn isgreaterequal(double3 x, double3 y);
-long4 __ovld __cnfn isgreaterequal(double4 x, double4 y);
-long8 __ovld __cnfn isgreaterequal(double8 x, double8 y);
-long16 __ovld __cnfn isgreaterequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isgreaterequal(half x, half y);
-short2 __ovld __cnfn isgreaterequal(half2 x, half2 y);
-short3 __ovld __cnfn isgreaterequal(half3 x, half3 y);
-short4 __ovld __cnfn isgreaterequal(half4 x, half4 y);
-short8 __ovld __cnfn isgreaterequal(half8 x, half8 y);
-short16 __ovld __cnfn isgreaterequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x < y.
- */
-int __ovld __cnfn isless(float x, float y);
-int2 __ovld __cnfn isless(float2 x, float2 y);
-int3 __ovld __cnfn isless(float3 x, float3 y);
-int4 __ovld __cnfn isless(float4 x, float4 y);
-int8 __ovld __cnfn isless(float8 x, float8 y);
-int16 __ovld __cnfn isless(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isless(double x, double y);
-long2 __ovld __cnfn isless(double2 x, double2 y);
-long3 __ovld __cnfn isless(double3 x, double3 y);
-long4 __ovld __cnfn isless(double4 x, double4 y);
-long8 __ovld __cnfn isless(double8 x, double8 y);
-long16 __ovld __cnfn isless(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isless(half x, half y);
-short2 __ovld __cnfn isless(half2 x, half2 y);
-short3 __ovld __cnfn isless(half3 x, half3 y);
-short4 __ovld __cnfn isless(half4 x, half4 y);
-short8 __ovld __cnfn isless(half8 x, half8 y);
-short16 __ovld __cnfn isless(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x <= y.
- */
-int __ovld __cnfn islessequal(float x, float y);
-int2 __ovld __cnfn islessequal(float2 x, float2 y);
-int3 __ovld __cnfn islessequal(float3 x, float3 y);
-int4 __ovld __cnfn islessequal(float4 x, float4 y);
-int8 __ovld __cnfn islessequal(float8 x, float8 y);
-int16 __ovld __cnfn islessequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn islessequal(double x, double y);
-long2 __ovld __cnfn islessequal(double2 x, double2 y);
-long3 __ovld __cnfn islessequal(double3 x, double3 y);
-long4 __ovld __cnfn islessequal(double4 x, double4 y);
-long8 __ovld __cnfn islessequal(double8 x, double8 y);
-long16 __ovld __cnfn islessequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn islessequal(half x, half y);
-short2 __ovld __cnfn islessequal(half2 x, half2 y);
-short3 __ovld __cnfn islessequal(half3 x, half3 y);
-short4 __ovld __cnfn islessequal(half4 x, half4 y);
-short8 __ovld __cnfn islessequal(half8 x, half8 y);
-short16 __ovld __cnfn islessequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of
- * (x < y) || (x > y) .
- */
-int __ovld __cnfn islessgreater(float x, float y);
-int2 __ovld __cnfn islessgreater(float2 x, float2 y);
-int3 __ovld __cnfn islessgreater(float3 x, float3 y);
-int4 __ovld __cnfn islessgreater(float4 x, float4 y);
-int8 __ovld __cnfn islessgreater(float8 x, float8 y);
-int16 __ovld __cnfn islessgreater(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn islessgreater(double x, double y);
-long2 __ovld __cnfn islessgreater(double2 x, double2 y);
-long3 __ovld __cnfn islessgreater(double3 x, double3 y);
-long4 __ovld __cnfn islessgreater(double4 x, double4 y);
-long8 __ovld __cnfn islessgreater(double8 x, double8 y);
-long16 __ovld __cnfn islessgreater(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn islessgreater(half x, half y);
-short2 __ovld __cnfn islessgreater(half2 x, half2 y);
-short3 __ovld __cnfn islessgreater(half3 x, half3 y);
-short4 __ovld __cnfn islessgreater(half4 x, half4 y);
-short8 __ovld __cnfn islessgreater(half8 x, half8 y);
-short16 __ovld __cnfn islessgreater(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Test for finite value.
- */
-int __ovld __cnfn isfinite(float);
-int2 __ovld __cnfn isfinite(float2);
-int3 __ovld __cnfn isfinite(float3);
-int4 __ovld __cnfn isfinite(float4);
-int8 __ovld __cnfn isfinite(float8);
-int16 __ovld __cnfn isfinite(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isfinite(double);
-long2 __ovld __cnfn isfinite(double2);
-long3 __ovld __cnfn isfinite(double3);
-long4 __ovld __cnfn isfinite(double4);
-long8 __ovld __cnfn isfinite(double8);
-long16 __ovld __cnfn isfinite(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isfinite(half);
-short2 __ovld __cnfn isfinite(half2);
-short3 __ovld __cnfn isfinite(half3);
-short4 __ovld __cnfn isfinite(half4);
-short8 __ovld __cnfn isfinite(half8);
-short16 __ovld __cnfn isfinite(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test for infinity value (+ve or -ve) .
- */
-int __ovld __cnfn isinf(float);
-int2 __ovld __cnfn isinf(float2);
-int3 __ovld __cnfn isinf(float3);
-int4 __ovld __cnfn isinf(float4);
-int8 __ovld __cnfn isinf(float8);
-int16 __ovld __cnfn isinf(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isinf(double);
-long2 __ovld __cnfn isinf(double2);
-long3 __ovld __cnfn isinf(double3);
-long4 __ovld __cnfn isinf(double4);
-long8 __ovld __cnfn isinf(double8);
-long16 __ovld __cnfn isinf(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isinf(half);
-short2 __ovld __cnfn isinf(half2);
-short3 __ovld __cnfn isinf(half3);
-short4 __ovld __cnfn isinf(half4);
-short8 __ovld __cnfn isinf(half8);
-short16 __ovld __cnfn isinf(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test for a NaN.
- */
-int __ovld __cnfn isnan(float);
-int2 __ovld __cnfn isnan(float2);
-int3 __ovld __cnfn isnan(float3);
-int4 __ovld __cnfn isnan(float4);
-int8 __ovld __cnfn isnan(float8);
-int16 __ovld __cnfn isnan(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isnan(double);
-long2 __ovld __cnfn isnan(double2);
-long3 __ovld __cnfn isnan(double3);
-long4 __ovld __cnfn isnan(double4);
-long8 __ovld __cnfn isnan(double8);
-long16 __ovld __cnfn isnan(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isnan(half);
-short2 __ovld __cnfn isnan(half2);
-short3 __ovld __cnfn isnan(half3);
-short4 __ovld __cnfn isnan(half4);
-short8 __ovld __cnfn isnan(half8);
-short16 __ovld __cnfn isnan(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test for a normal value.
- */
-int __ovld __cnfn isnormal(float);
-int2 __ovld __cnfn isnormal(float2);
-int3 __ovld __cnfn isnormal(float3);
-int4 __ovld __cnfn isnormal(float4);
-int8 __ovld __cnfn isnormal(float8);
-int16 __ovld __cnfn isnormal(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isnormal(double);
-long2 __ovld __cnfn isnormal(double2);
-long3 __ovld __cnfn isnormal(double3);
-long4 __ovld __cnfn isnormal(double4);
-long8 __ovld __cnfn isnormal(double8);
-long16 __ovld __cnfn isnormal(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isnormal(half);
-short2 __ovld __cnfn isnormal(half2);
-short3 __ovld __cnfn isnormal(half3);
-short4 __ovld __cnfn isnormal(half4);
-short8 __ovld __cnfn isnormal(half8);
-short16 __ovld __cnfn isnormal(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test if arguments are ordered. isordered() takes
- * arguments x and y, and returns the result
- * isequal(x, x) && isequal(y, y).
- */
-int __ovld __cnfn isordered(float x, float y);
-int2 __ovld __cnfn isordered(float2 x, float2 y);
-int3 __ovld __cnfn isordered(float3 x, float3 y);
-int4 __ovld __cnfn isordered(float4 x, float4 y);
-int8 __ovld __cnfn isordered(float8 x, float8 y);
-int16 __ovld __cnfn isordered(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isordered(double x, double y);
-long2 __ovld __cnfn isordered(double2 x, double2 y);
-long3 __ovld __cnfn isordered(double3 x, double3 y);
-long4 __ovld __cnfn isordered(double4 x, double4 y);
-long8 __ovld __cnfn isordered(double8 x, double8 y);
-long16 __ovld __cnfn isordered(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isordered(half x, half y);
-short2 __ovld __cnfn isordered(half2 x, half2 y);
-short3 __ovld __cnfn isordered(half3 x, half3 y);
-short4 __ovld __cnfn isordered(half4 x, half4 y);
-short8 __ovld __cnfn isordered(half8 x, half8 y);
-short16 __ovld __cnfn isordered(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Test if arguments are unordered. isunordered()
- * takes arguments x and y, returning non-zero if x or y
- * is NaN, and zero otherwise.
- */
-int __ovld __cnfn isunordered(float x, float y);
-int2 __ovld __cnfn isunordered(float2 x, float2 y);
-int3 __ovld __cnfn isunordered(float3 x, float3 y);
-int4 __ovld __cnfn isunordered(float4 x, float4 y);
-int8 __ovld __cnfn isunordered(float8 x, float8 y);
-int16 __ovld __cnfn isunordered(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isunordered(double x, double y);
-long2 __ovld __cnfn isunordered(double2 x, double2 y);
-long3 __ovld __cnfn isunordered(double3 x, double3 y);
-long4 __ovld __cnfn isunordered(double4 x, double4 y);
-long8 __ovld __cnfn isunordered(double8 x, double8 y);
-long16 __ovld __cnfn isunordered(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isunordered(half x, half y);
-short2 __ovld __cnfn isunordered(half2 x, half2 y);
-short3 __ovld __cnfn isunordered(half3 x, half3 y);
-short4 __ovld __cnfn isunordered(half4 x, half4 y);
-short8 __ovld __cnfn isunordered(half8 x, half8 y);
-short16 __ovld __cnfn isunordered(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Test for sign bit. The scalar version of the function
- * returns a 1 if the sign bit in the float is set else returns
- * 0. The vector version of the function returns the
- * following for each component in floatn: a -1 if the
- * sign bit in the float is set else returns 0.
- */
-int __ovld __cnfn signbit(float);
-int2 __ovld __cnfn signbit(float2);
-int3 __ovld __cnfn signbit(float3);
-int4 __ovld __cnfn signbit(float4);
-int8 __ovld __cnfn signbit(float8);
-int16 __ovld __cnfn signbit(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn signbit(double);
-long2 __ovld __cnfn signbit(double2);
-long3 __ovld __cnfn signbit(double3);
-long4 __ovld __cnfn signbit(double4);
-long8 __ovld __cnfn signbit(double8);
-long16 __ovld __cnfn signbit(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn signbit(half);
-short2 __ovld __cnfn signbit(half2);
-short3 __ovld __cnfn signbit(half3);
-short4 __ovld __cnfn signbit(half4);
-short8 __ovld __cnfn signbit(half8);
-short16 __ovld __cnfn signbit(half16);
-#endif //cl_khr_fp16
-
-/**
- * Returns 1 if the most significant bit in any component
- * of x is set; otherwise returns 0.
- */
-int __ovld __cnfn any(char x);
-int __ovld __cnfn any(char2 x);
-int __ovld __cnfn any(char3 x);
-int __ovld __cnfn any(char4 x);
-int __ovld __cnfn any(char8 x);
-int __ovld __cnfn any(char16 x);
-int __ovld __cnfn any(short x);
-int __ovld __cnfn any(short2 x);
-int __ovld __cnfn any(short3 x);
-int __ovld __cnfn any(short4 x);
-int __ovld __cnfn any(short8 x);
-int __ovld __cnfn any(short16 x);
-int __ovld __cnfn any(int x);
-int __ovld __cnfn any(int2 x);
-int __ovld __cnfn any(int3 x);
-int __ovld __cnfn any(int4 x);
-int __ovld __cnfn any(int8 x);
-int __ovld __cnfn any(int16 x);
-int __ovld __cnfn any(long x);
-int __ovld __cnfn any(long2 x);
-int __ovld __cnfn any(long3 x);
-int __ovld __cnfn any(long4 x);
-int __ovld __cnfn any(long8 x);
-int __ovld __cnfn any(long16 x);
-
-/**
- * Returns 1 if the most significant bit in all components
- * of x is set; otherwise returns 0.
- */
-int __ovld __cnfn all(char x);
-int __ovld __cnfn all(char2 x);
-int __ovld __cnfn all(char3 x);
-int __ovld __cnfn all(char4 x);
-int __ovld __cnfn all(char8 x);
-int __ovld __cnfn all(char16 x);
-int __ovld __cnfn all(short x);
-int __ovld __cnfn all(short2 x);
-int __ovld __cnfn all(short3 x);
-int __ovld __cnfn all(short4 x);
-int __ovld __cnfn all(short8 x);
-int __ovld __cnfn all(short16 x);
-int __ovld __cnfn all(int x);
-int __ovld __cnfn all(int2 x);
-int __ovld __cnfn all(int3 x);
-int __ovld __cnfn all(int4 x);
-int __ovld __cnfn all(int8 x);
-int __ovld __cnfn all(int16 x);
-int __ovld __cnfn all(long x);
-int __ovld __cnfn all(long2 x);
-int __ovld __cnfn all(long3 x);
-int __ovld __cnfn all(long4 x);
-int __ovld __cnfn all(long8 x);
-int __ovld __cnfn all(long16 x);
-
-/**
- * Each bit of the result is the corresponding bit of a if
- * the corresponding bit of c is 0. Otherwise it is the
- * corresponding bit of b.
- */
-char __ovld __cnfn bitselect(char a, char b, char c);
-uchar __ovld __cnfn bitselect(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn bitselect(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn bitselect(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn bitselect(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn bitselect(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn bitselect(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn bitselect(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn bitselect(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn bitselect(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn bitselect(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn bitselect(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn bitselect(short a, short b, short c);
-ushort __ovld __cnfn bitselect(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn bitselect(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn bitselect(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn bitselect(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn bitselect(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn bitselect(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn bitselect(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn bitselect(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn bitselect(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn bitselect(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn bitselect(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn bitselect(int a, int b, int c);
-uint __ovld __cnfn bitselect(uint a, uint b, uint c);
-int2 __ovld __cnfn bitselect(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn bitselect(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn bitselect(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn bitselect(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn bitselect(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn bitselect(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn bitselect(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn bitselect(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn bitselect(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn bitselect(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn bitselect(long a, long b, long c);
-ulong __ovld __cnfn bitselect(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn bitselect(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn bitselect(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn bitselect(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn bitselect(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn bitselect(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn bitselect(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn bitselect(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn bitselect(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn bitselect(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn bitselect(ulong16 a, ulong16 b, ulong16 c);
-float __ovld __cnfn bitselect(float a, float b, float c);
-float2 __ovld __cnfn bitselect(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn bitselect(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn bitselect(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn bitselect(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn bitselect(float16 a, float16 b, float16 c);
-#ifdef cl_khr_fp64
-double __ovld __cnfn bitselect(double a, double b, double c);
-double2 __ovld __cnfn bitselect(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn bitselect(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn bitselect(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn bitselect(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn bitselect(double16 a, double16 b, double16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn bitselect(half a, half b, half c);
-half2 __ovld __cnfn bitselect(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn bitselect(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn bitselect(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn bitselect(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn bitselect(half16 a, half16 b, half16 c);
-#endif //cl_khr_fp16
-
-/**
- * For each component of a vector type,
- * result[i] = if MSB of c[i] is set ? b[i] : a[i].
- * For a scalar type, result = c ? b : a.
- * b and a must have the same type.
- * c must have the same number of elements and bits as a.
- */
-char __ovld __cnfn select(char a, char b, char c);
-uchar __ovld __cnfn select(uchar a, uchar b, char c);
-char2 __ovld __cnfn select(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, char2 c);
-char3 __ovld __cnfn select(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, char3 c);
-char4 __ovld __cnfn select(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, char4 c);
-char8 __ovld __cnfn select(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, char8 c);
-char16 __ovld __cnfn select(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, char16 c);
-
-short __ovld __cnfn select(short a, short b, short c);
-ushort __ovld __cnfn select(ushort a, ushort b, short c);
-short2 __ovld __cnfn select(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, short2 c);
-short3 __ovld __cnfn select(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, short3 c);
-short4 __ovld __cnfn select(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, short4 c);
-short8 __ovld __cnfn select(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, short8 c);
-short16 __ovld __cnfn select(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, short16 c);
-
-int __ovld __cnfn select(int a, int b, int c);
-uint __ovld __cnfn select(uint a, uint b, int c);
-int2 __ovld __cnfn select(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn select(uint2 a, uint2 b, int2 c);
-int3 __ovld __cnfn select(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn select(uint3 a, uint3 b, int3 c);
-int4 __ovld __cnfn select(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn select(uint4 a, uint4 b, int4 c);
-int8 __ovld __cnfn select(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn select(uint8 a, uint8 b, int8 c);
-int16 __ovld __cnfn select(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn select(uint16 a, uint16 b, int16 c);
-float __ovld __cnfn select(float a, float b, int c);
-float2 __ovld __cnfn select(float2 a, float2 b, int2 c);
-float3 __ovld __cnfn select(float3 a, float3 b, int3 c);
-float4 __ovld __cnfn select(float4 a, float4 b, int4 c);
-float8 __ovld __cnfn select(float8 a, float8 b, int8 c);
-float16 __ovld __cnfn select(float16 a, float16 b, int16 c);
-
-long __ovld __cnfn select(long a, long b, long c);
-ulong __ovld __cnfn select(ulong a, ulong b, long c);
-long2 __ovld __cnfn select(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, long2 c);
-long3 __ovld __cnfn select(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, long3 c);
-long4 __ovld __cnfn select(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, long4 c);
-long8 __ovld __cnfn select(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, long8 c);
-long16 __ovld __cnfn select(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, long16 c);
-
-char __ovld __cnfn select(char a, char b, uchar c);
-uchar __ovld __cnfn select(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn select(char2 a, char2 b, uchar2 c);
-uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn select(char3 a, char3 b, uchar3 c);
-uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn select(char4 a, char4 b, uchar4 c);
-uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn select(char8 a, char8 b, uchar8 c);
-uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn select(char16 a, char16 b, uchar16 c);
-uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, uchar16 c);
-
-short __ovld __cnfn select(short a, short b, ushort c);
-ushort __ovld __cnfn select(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn select(short2 a, short2 b, ushort2 c);
-ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn select(short3 a, short3 b, ushort3 c);
-ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn select(short4 a, short4 b, ushort4 c);
-ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn select(short8 a, short8 b, ushort8 c);
-ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn select(short16 a, short16 b, ushort16 c);
-ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, ushort16 c);
-
-int __ovld __cnfn select(int a, int b, uint c);
-uint __ovld __cnfn select(uint a, uint b, uint c);
-int2 __ovld __cnfn select(int2 a, int2 b, uint2 c);
-uint2 __ovld __cnfn select(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn select(int3 a, int3 b, uint3 c);
-uint3 __ovld __cnfn select(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn select(int4 a, int4 b, uint4 c);
-uint4 __ovld __cnfn select(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn select(int8 a, int8 b, uint8 c);
-uint8 __ovld __cnfn select(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn select(int16 a, int16 b, uint16 c);
-uint16 __ovld __cnfn select(uint16 a, uint16 b, uint16 c);
-float __ovld __cnfn select(float a, float b, uint c);
-float2 __ovld __cnfn select(float2 a, float2 b, uint2 c);
-float3 __ovld __cnfn select(float3 a, float3 b, uint3 c);
-float4 __ovld __cnfn select(float4 a, float4 b, uint4 c);
-float8 __ovld __cnfn select(float8 a, float8 b, uint8 c);
-float16 __ovld __cnfn select(float16 a, float16 b, uint16 c);
-
-long __ovld __cnfn select(long a, long b, ulong c);
-ulong __ovld __cnfn select(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn select(long2 a, long2 b, ulong2 c);
-ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn select(long3 a, long3 b, ulong3 c);
-ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn select(long4 a, long4 b, ulong4 c);
-ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn select(long8 a, long8 b, ulong8 c);
-ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn select(long16 a, long16 b, ulong16 c);
-ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, ulong16 c);
-
-#ifdef cl_khr_fp64
-double __ovld __cnfn select(double a, double b, long c);
-double2 __ovld __cnfn select(double2 a, double2 b, long2 c);
-double3 __ovld __cnfn select(double3 a, double3 b, long3 c);
-double4 __ovld __cnfn select(double4 a, double4 b, long4 c);
-double8 __ovld __cnfn select(double8 a, double8 b, long8 c);
-double16 __ovld __cnfn select(double16 a, double16 b, long16 c);
-double __ovld __cnfn select(double a, double b, ulong c);
-double2 __ovld __cnfn select(double2 a, double2 b, ulong2 c);
-double3 __ovld __cnfn select(double3 a, double3 b, ulong3 c);
-double4 __ovld __cnfn select(double4 a, double4 b, ulong4 c);
-double8 __ovld __cnfn select(double8 a, double8 b, ulong8 c);
-double16 __ovld __cnfn select(double16 a, double16 b, ulong16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn select(half a, half b, short c);
-half2 __ovld __cnfn select(half2 a, half2 b, short2 c);
-half3 __ovld __cnfn select(half3 a, half3 b, short3 c);
-half4 __ovld __cnfn select(half4 a, half4 b, short4 c);
-half8 __ovld __cnfn select(half8 a, half8 b, short8 c);
-half16 __ovld __cnfn select(half16 a, half16 b, short16 c);
-half __ovld __cnfn select(half a, half b, ushort c);
-half2 __ovld __cnfn select(half2 a, half2 b, ushort2 c);
-half3 __ovld __cnfn select(half3 a, half3 b, ushort3 c);
-half4 __ovld __cnfn select(half4 a, half4 b, ushort4 c);
-half8 __ovld __cnfn select(half8 a, half8 b, ushort8 c);
-half16 __ovld __cnfn select(half16 a, half16 b, ushort16 c);
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.11.7, v1.2 s6.12.7, v2.0 s6.13.7 - Vector Data Load and Store Functions
-// OpenCL extensions v1.1 s9.6.6, v1.2 s9.5.6, v2.0 s9.4.6 - Vector Data Load and Store Functions for Half Type
-/**
- * Use generic type gentype to indicate the built-in data types
- * char, uchar, short, ushort, int, uint, long, ulong, float,
- * double or half.
- *
- * vloadn return sizeof (gentypen) bytes of data read from address (p + (offset * n)).
- *
- * vstoren write sizeof (gentypen) bytes given by data to address (p + (offset * n)).
- *
- * The address computed as (p + (offset * n)) must be
- * 8-bit aligned if gentype is char, uchar;
- * 16-bit aligned if gentype is short, ushort, half;
- * 32-bit aligned if gentype is int, uint, float;
- * 64-bit aligned if gentype is long, ulong, double.
- */
-
-char2 __ovld vload2(size_t offset, const __constant char *p);
-uchar2 __ovld vload2(size_t offset, const __constant uchar *p);
-short2 __ovld vload2(size_t offset, const __constant short *p);
-ushort2 __ovld vload2(size_t offset, const __constant ushort *p);
-int2 __ovld vload2(size_t offset, const __constant int *p);
-uint2 __ovld vload2(size_t offset, const __constant uint *p);
-long2 __ovld vload2(size_t offset, const __constant long *p);
-ulong2 __ovld vload2(size_t offset, const __constant ulong *p);
-float2 __ovld vload2(size_t offset, const __constant float *p);
-char3 __ovld vload3(size_t offset, const __constant char *p);
-uchar3 __ovld vload3(size_t offset, const __constant uchar *p);
-short3 __ovld vload3(size_t offset, const __constant short *p);
-ushort3 __ovld vload3(size_t offset, const __constant ushort *p);
-int3 __ovld vload3(size_t offset, const __constant int *p);
-uint3 __ovld vload3(size_t offset, const __constant uint *p);
-long3 __ovld vload3(size_t offset, const __constant long *p);
-ulong3 __ovld vload3(size_t offset, const __constant ulong *p);
-float3 __ovld vload3(size_t offset, const __constant float *p);
-char4 __ovld vload4(size_t offset, const __constant char *p);
-uchar4 __ovld vload4(size_t offset, const __constant uchar *p);
-short4 __ovld vload4(size_t offset, const __constant short *p);
-ushort4 __ovld vload4(size_t offset, const __constant ushort *p);
-int4 __ovld vload4(size_t offset, const __constant int *p);
-uint4 __ovld vload4(size_t offset, const __constant uint *p);
-long4 __ovld vload4(size_t offset, const __constant long *p);
-ulong4 __ovld vload4(size_t offset, const __constant ulong *p);
-float4 __ovld vload4(size_t offset, const __constant float *p);
-char8 __ovld vload8(size_t offset, const __constant char *p);
-uchar8 __ovld vload8(size_t offset, const __constant uchar *p);
-short8 __ovld vload8(size_t offset, const __constant short *p);
-ushort8 __ovld vload8(size_t offset, const __constant ushort *p);
-int8 __ovld vload8(size_t offset, const __constant int *p);
-uint8 __ovld vload8(size_t offset, const __constant uint *p);
-long8 __ovld vload8(size_t offset, const __constant long *p);
-ulong8 __ovld vload8(size_t offset, const __constant ulong *p);
-float8 __ovld vload8(size_t offset, const __constant float *p);
-char16 __ovld vload16(size_t offset, const __constant char *p);
-uchar16 __ovld vload16(size_t offset, const __constant uchar *p);
-short16 __ovld vload16(size_t offset, const __constant short *p);
-ushort16 __ovld vload16(size_t offset, const __constant ushort *p);
-int16 __ovld vload16(size_t offset, const __constant int *p);
-uint16 __ovld vload16(size_t offset, const __constant uint *p);
-long16 __ovld vload16(size_t offset, const __constant long *p);
-ulong16 __ovld vload16(size_t offset, const __constant ulong *p);
-float16 __ovld vload16(size_t offset, const __constant float *p);
-#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __constant double *p);
-double3 __ovld vload3(size_t offset, const __constant double *p);
-double4 __ovld vload4(size_t offset, const __constant double *p);
-double8 __ovld vload8(size_t offset, const __constant double *p);
-double16 __ovld vload16(size_t offset, const __constant double *p);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __constant half *p);
-half2 __ovld vload2(size_t offset, const __constant half *p);
-half3 __ovld vload3(size_t offset, const __constant half *p);
-half4 __ovld vload4(size_t offset, const __constant half *p);
-half8 __ovld vload8(size_t offset, const __constant half *p);
-half16 __ovld vload16(size_t offset, const __constant half *p);
-#endif //cl_khr_fp16
-
-#if defined(__opencl_c_generic_address_space)
-char2 __ovld vload2(size_t offset, const char *p);
-uchar2 __ovld vload2(size_t offset, const uchar *p);
-short2 __ovld vload2(size_t offset, const short *p);
-ushort2 __ovld vload2(size_t offset, const ushort *p);
-int2 __ovld vload2(size_t offset, const int *p);
-uint2 __ovld vload2(size_t offset, const uint *p);
-long2 __ovld vload2(size_t offset, const long *p);
-ulong2 __ovld vload2(size_t offset, const ulong *p);
-float2 __ovld vload2(size_t offset, const float *p);
-char3 __ovld vload3(size_t offset, const char *p);
-uchar3 __ovld vload3(size_t offset, const uchar *p);
-short3 __ovld vload3(size_t offset, const short *p);
-ushort3 __ovld vload3(size_t offset, const ushort *p);
-int3 __ovld vload3(size_t offset, const int *p);
-uint3 __ovld vload3(size_t offset, const uint *p);
-long3 __ovld vload3(size_t offset, const long *p);
-ulong3 __ovld vload3(size_t offset, const ulong *p);
-float3 __ovld vload3(size_t offset, const float *p);
-char4 __ovld vload4(size_t offset, const char *p);
-uchar4 __ovld vload4(size_t offset, const uchar *p);
-short4 __ovld vload4(size_t offset, const short *p);
-ushort4 __ovld vload4(size_t offset, const ushort *p);
-int4 __ovld vload4(size_t offset, const int *p);
-uint4 __ovld vload4(size_t offset, const uint *p);
-long4 __ovld vload4(size_t offset, const long *p);
-ulong4 __ovld vload4(size_t offset, const ulong *p);
-float4 __ovld vload4(size_t offset, const float *p);
-char8 __ovld vload8(size_t offset, const char *p);
-uchar8 __ovld vload8(size_t offset, const uchar *p);
-short8 __ovld vload8(size_t offset, const short *p);
-ushort8 __ovld vload8(size_t offset, const ushort *p);
-int8 __ovld vload8(size_t offset, const int *p);
-uint8 __ovld vload8(size_t offset, const uint *p);
-long8 __ovld vload8(size_t offset, const long *p);
-ulong8 __ovld vload8(size_t offset, const ulong *p);
-float8 __ovld vload8(size_t offset, const float *p);
-char16 __ovld vload16(size_t offset, const char *p);
-uchar16 __ovld vload16(size_t offset, const uchar *p);
-short16 __ovld vload16(size_t offset, const short *p);
-ushort16 __ovld vload16(size_t offset, const ushort *p);
-int16 __ovld vload16(size_t offset, const int *p);
-uint16 __ovld vload16(size_t offset, const uint *p);
-long16 __ovld vload16(size_t offset, const long *p);
-ulong16 __ovld vload16(size_t offset, const ulong *p);
-float16 __ovld vload16(size_t offset, const float *p);
-
-#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const double *p);
-double3 __ovld vload3(size_t offset, const double *p);
-double4 __ovld vload4(size_t offset, const double *p);
-double8 __ovld vload8(size_t offset, const double *p);
-double16 __ovld vload16(size_t offset, const double *p);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const half *p);
-half2 __ovld vload2(size_t offset, const half *p);
-half3 __ovld vload3(size_t offset, const half *p);
-half4 __ovld vload4(size_t offset, const half *p);
-half8 __ovld vload8(size_t offset, const half *p);
-half16 __ovld vload16(size_t offset, const half *p);
-#endif //cl_khr_fp16
-#else
-char2 __ovld vload2(size_t offset, const __global char *p);
-uchar2 __ovld vload2(size_t offset, const __global uchar *p);
-short2 __ovld vload2(size_t offset, const __global short *p);
-ushort2 __ovld vload2(size_t offset, const __global ushort *p);
-int2 __ovld vload2(size_t offset, const __global int *p);
-uint2 __ovld vload2(size_t offset, const __global uint *p);
-long2 __ovld vload2(size_t offset, const __global long *p);
-ulong2 __ovld vload2(size_t offset, const __global ulong *p);
-float2 __ovld vload2(size_t offset, const __global float *p);
-char3 __ovld vload3(size_t offset, const __global char *p);
-uchar3 __ovld vload3(size_t offset, const __global uchar *p);
-short3 __ovld vload3(size_t offset, const __global short *p);
-ushort3 __ovld vload3(size_t offset, const __global ushort *p);
-int3 __ovld vload3(size_t offset, const __global int *p);
-uint3 __ovld vload3(size_t offset, const __global uint *p);
-long3 __ovld vload3(size_t offset, const __global long *p);
-ulong3 __ovld vload3(size_t offset, const __global ulong *p);
-float3 __ovld vload3(size_t offset, const __global float *p);
-char4 __ovld vload4(size_t offset, const __global char *p);
-uchar4 __ovld vload4(size_t offset, const __global uchar *p);
-short4 __ovld vload4(size_t offset, const __global short *p);
-ushort4 __ovld vload4(size_t offset, const __global ushort *p);
-int4 __ovld vload4(size_t offset, const __global int *p);
-uint4 __ovld vload4(size_t offset, const __global uint *p);
-long4 __ovld vload4(size_t offset, const __global long *p);
-ulong4 __ovld vload4(size_t offset, const __global ulong *p);
-float4 __ovld vload4(size_t offset, const __global float *p);
-char8 __ovld vload8(size_t offset, const __global char *p);
-uchar8 __ovld vload8(size_t offset, const __global uchar *p);
-short8 __ovld vload8(size_t offset, const __global short *p);
-ushort8 __ovld vload8(size_t offset, const __global ushort *p);
-int8 __ovld vload8(size_t offset, const __global int *p);
-uint8 __ovld vload8(size_t offset, const __global uint *p);
-long8 __ovld vload8(size_t offset, const __global long *p);
-ulong8 __ovld vload8(size_t offset, const __global ulong *p);
-float8 __ovld vload8(size_t offset, const __global float *p);
-char16 __ovld vload16(size_t offset, const __global char *p);
-uchar16 __ovld vload16(size_t offset, const __global uchar *p);
-short16 __ovld vload16(size_t offset, const __global short *p);
-ushort16 __ovld vload16(size_t offset, const __global ushort *p);
-int16 __ovld vload16(size_t offset, const __global int *p);
-uint16 __ovld vload16(size_t offset, const __global uint *p);
-long16 __ovld vload16(size_t offset, const __global long *p);
-ulong16 __ovld vload16(size_t offset, const __global ulong *p);
-float16 __ovld vload16(size_t offset, const __global float *p);
-char2 __ovld vload2(size_t offset, const __local char *p);
-uchar2 __ovld vload2(size_t offset, const __local uchar *p);
-short2 __ovld vload2(size_t offset, const __local short *p);
-ushort2 __ovld vload2(size_t offset, const __local ushort *p);
-int2 __ovld vload2(size_t offset, const __local int *p);
-uint2 __ovld vload2(size_t offset, const __local uint *p);
-long2 __ovld vload2(size_t offset, const __local long *p);
-ulong2 __ovld vload2(size_t offset, const __local ulong *p);
-float2 __ovld vload2(size_t offset, const __local float *p);
-char3 __ovld vload3(size_t offset, const __local char *p);
-uchar3 __ovld vload3(size_t offset, const __local uchar *p);
-short3 __ovld vload3(size_t offset, const __local short *p);
-ushort3 __ovld vload3(size_t offset, const __local ushort *p);
-int3 __ovld vload3(size_t offset, const __local int *p);
-uint3 __ovld vload3(size_t offset, const __local uint *p);
-long3 __ovld vload3(size_t offset, const __local long *p);
-ulong3 __ovld vload3(size_t offset, const __local ulong *p);
-float3 __ovld vload3(size_t offset, const __local float *p);
-char4 __ovld vload4(size_t offset, const __local char *p);
-uchar4 __ovld vload4(size_t offset, const __local uchar *p);
-short4 __ovld vload4(size_t offset, const __local short *p);
-ushort4 __ovld vload4(size_t offset, const __local ushort *p);
-int4 __ovld vload4(size_t offset, const __local int *p);
-uint4 __ovld vload4(size_t offset, const __local uint *p);
-long4 __ovld vload4(size_t offset, const __local long *p);
-ulong4 __ovld vload4(size_t offset, const __local ulong *p);
-float4 __ovld vload4(size_t offset, const __local float *p);
-char8 __ovld vload8(size_t offset, const __local char *p);
-uchar8 __ovld vload8(size_t offset, const __local uchar *p);
-short8 __ovld vload8(size_t offset, const __local short *p);
-ushort8 __ovld vload8(size_t offset, const __local ushort *p);
-int8 __ovld vload8(size_t offset, const __local int *p);
-uint8 __ovld vload8(size_t offset, const __local uint *p);
-long8 __ovld vload8(size_t offset, const __local long *p);
-ulong8 __ovld vload8(size_t offset, const __local ulong *p);
-float8 __ovld vload8(size_t offset, const __local float *p);
-char16 __ovld vload16(size_t offset, const __local char *p);
-uchar16 __ovld vload16(size_t offset, const __local uchar *p);
-short16 __ovld vload16(size_t offset, const __local short *p);
-ushort16 __ovld vload16(size_t offset, const __local ushort *p);
-int16 __ovld vload16(size_t offset, const __local int *p);
-uint16 __ovld vload16(size_t offset, const __local uint *p);
-long16 __ovld vload16(size_t offset, const __local long *p);
-ulong16 __ovld vload16(size_t offset, const __local ulong *p);
-float16 __ovld vload16(size_t offset, const __local float *p);
-char2 __ovld vload2(size_t offset, const __private char *p);
-uchar2 __ovld vload2(size_t offset, const __private uchar *p);
-short2 __ovld vload2(size_t offset, const __private short *p);
-ushort2 __ovld vload2(size_t offset, const __private ushort *p);
-int2 __ovld vload2(size_t offset, const __private int *p);
-uint2 __ovld vload2(size_t offset, const __private uint *p);
-long2 __ovld vload2(size_t offset, const __private long *p);
-ulong2 __ovld vload2(size_t offset, const __private ulong *p);
-float2 __ovld vload2(size_t offset, const __private float *p);
-char3 __ovld vload3(size_t offset, const __private char *p);
-uchar3 __ovld vload3(size_t offset, const __private uchar *p);
-short3 __ovld vload3(size_t offset, const __private short *p);
-ushort3 __ovld vload3(size_t offset, const __private ushort *p);
-int3 __ovld vload3(size_t offset, const __private int *p);
-uint3 __ovld vload3(size_t offset, const __private uint *p);
-long3 __ovld vload3(size_t offset, const __private long *p);
-ulong3 __ovld vload3(size_t offset, const __private ulong *p);
-float3 __ovld vload3(size_t offset, const __private float *p);
-char4 __ovld vload4(size_t offset, const __private char *p);
-uchar4 __ovld vload4(size_t offset, const __private uchar *p);
-short4 __ovld vload4(size_t offset, const __private short *p);
-ushort4 __ovld vload4(size_t offset, const __private ushort *p);
-int4 __ovld vload4(size_t offset, const __private int *p);
-uint4 __ovld vload4(size_t offset, const __private uint *p);
-long4 __ovld vload4(size_t offset, const __private long *p);
-ulong4 __ovld vload4(size_t offset, const __private ulong *p);
-float4 __ovld vload4(size_t offset, const __private float *p);
-char8 __ovld vload8(size_t offset, const __private char *p);
-uchar8 __ovld vload8(size_t offset, const __private uchar *p);
-short8 __ovld vload8(size_t offset, const __private short *p);
-ushort8 __ovld vload8(size_t offset, const __private ushort *p);
-int8 __ovld vload8(size_t offset, const __private int *p);
-uint8 __ovld vload8(size_t offset, const __private uint *p);
-long8 __ovld vload8(size_t offset, const __private long *p);
-ulong8 __ovld vload8(size_t offset, const __private ulong *p);
-float8 __ovld vload8(size_t offset, const __private float *p);
-char16 __ovld vload16(size_t offset, const __private char *p);
-uchar16 __ovld vload16(size_t offset, const __private uchar *p);
-short16 __ovld vload16(size_t offset, const __private short *p);
-ushort16 __ovld vload16(size_t offset, const __private ushort *p);
-int16 __ovld vload16(size_t offset, const __private int *p);
-uint16 __ovld vload16(size_t offset, const __private uint *p);
-long16 __ovld vload16(size_t offset, const __private long *p);
-ulong16 __ovld vload16(size_t offset, const __private ulong *p);
-float16 __ovld vload16(size_t offset, const __private float *p);
-
-#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __global double *p);
-double3 __ovld vload3(size_t offset, const __global double *p);
-double4 __ovld vload4(size_t offset, const __global double *p);
-double8 __ovld vload8(size_t offset, const __global double *p);
-double16 __ovld vload16(size_t offset, const __global double *p);
-double2 __ovld vload2(size_t offset, const __local double *p);
-double3 __ovld vload3(size_t offset, const __local double *p);
-double4 __ovld vload4(size_t offset, const __local double *p);
-double8 __ovld vload8(size_t offset, const __local double *p);
-double16 __ovld vload16(size_t offset, const __local double *p);
-double2 __ovld vload2(size_t offset, const __private double *p);
-double3 __ovld vload3(size_t offset, const __private double *p);
-double4 __ovld vload4(size_t offset, const __private double *p);
-double8 __ovld vload8(size_t offset, const __private double *p);
-double16 __ovld vload16(size_t offset, const __private double *p);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __global half *p);
-half2 __ovld vload2(size_t offset, const __global half *p);
-half3 __ovld vload3(size_t offset, const __global half *p);
-half4 __ovld vload4(size_t offset, const __global half *p);
-half8 __ovld vload8(size_t offset, const __global half *p);
-half16 __ovld vload16(size_t offset, const __global half *p);
-half __ovld vload(size_t offset, const __local half *p);
-half2 __ovld vload2(size_t offset, const __local half *p);
-half3 __ovld vload3(size_t offset, const __local half *p);
-half4 __ovld vload4(size_t offset, const __local half *p);
-half8 __ovld vload8(size_t offset, const __local half *p);
-half16 __ovld vload16(size_t offset, const __local half *p);
-half __ovld vload(size_t offset, const __private half *p);
-half2 __ovld vload2(size_t offset, const __private half *p);
-half3 __ovld vload3(size_t offset, const __private half *p);
-half4 __ovld vload4(size_t offset, const __private half *p);
-half8 __ovld vload8(size_t offset, const __private half *p);
-half16 __ovld vload16(size_t offset, const __private half *p);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-#if defined(__opencl_c_generic_address_space)
-void __ovld vstore2(char2 data, size_t offset, char *p);
-void __ovld vstore2(uchar2 data, size_t offset, uchar *p);
-void __ovld vstore2(short2 data, size_t offset, short *p);
-void __ovld vstore2(ushort2 data, size_t offset, ushort *p);
-void __ovld vstore2(int2 data, size_t offset, int *p);
-void __ovld vstore2(uint2 data, size_t offset, uint *p);
-void __ovld vstore2(long2 data, size_t offset, long *p);
-void __ovld vstore2(ulong2 data, size_t offset, ulong *p);
-void __ovld vstore2(float2 data, size_t offset, float *p);
-void __ovld vstore3(char3 data, size_t offset, char *p);
-void __ovld vstore3(uchar3 data, size_t offset, uchar *p);
-void __ovld vstore3(short3 data, size_t offset, short *p);
-void __ovld vstore3(ushort3 data, size_t offset, ushort *p);
-void __ovld vstore3(int3 data, size_t offset, int *p);
-void __ovld vstore3(uint3 data, size_t offset, uint *p);
-void __ovld vstore3(long3 data, size_t offset, long *p);
-void __ovld vstore3(ulong3 data, size_t offset, ulong *p);
-void __ovld vstore3(float3 data, size_t offset, float *p);
-void __ovld vstore4(char4 data, size_t offset, char *p);
-void __ovld vstore4(uchar4 data, size_t offset, uchar *p);
-void __ovld vstore4(short4 data, size_t offset, short *p);
-void __ovld vstore4(ushort4 data, size_t offset, ushort *p);
-void __ovld vstore4(int4 data, size_t offset, int *p);
-void __ovld vstore4(uint4 data, size_t offset, uint *p);
-void __ovld vstore4(long4 data, size_t offset, long *p);
-void __ovld vstore4(ulong4 data, size_t offset, ulong *p);
-void __ovld vstore4(float4 data, size_t offset, float *p);
-void __ovld vstore8(char8 data, size_t offset, char *p);
-void __ovld vstore8(uchar8 data, size_t offset, uchar *p);
-void __ovld vstore8(short8 data, size_t offset, short *p);
-void __ovld vstore8(ushort8 data, size_t offset, ushort *p);
-void __ovld vstore8(int8 data, size_t offset, int *p);
-void __ovld vstore8(uint8 data, size_t offset, uint *p);
-void __ovld vstore8(long8 data, size_t offset, long *p);
-void __ovld vstore8(ulong8 data, size_t offset, ulong *p);
-void __ovld vstore8(float8 data, size_t offset, float *p);
-void __ovld vstore16(char16 data, size_t offset, char *p);
-void __ovld vstore16(uchar16 data, size_t offset, uchar *p);
-void __ovld vstore16(short16 data, size_t offset, short *p);
-void __ovld vstore16(ushort16 data, size_t offset, ushort *p);
-void __ovld vstore16(int16 data, size_t offset, int *p);
-void __ovld vstore16(uint16 data, size_t offset, uint *p);
-void __ovld vstore16(long16 data, size_t offset, long *p);
-void __ovld vstore16(ulong16 data, size_t offset, ulong *p);
-void __ovld vstore16(float16 data, size_t offset, float *p);
-#ifdef cl_khr_fp64
-void __ovld vstore2(double2 data, size_t offset, double *p);
-void __ovld vstore3(double3 data, size_t offset, double *p);
-void __ovld vstore4(double4 data, size_t offset, double *p);
-void __ovld vstore8(double8 data, size_t offset, double *p);
-void __ovld vstore16(double16 data, size_t offset, double *p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-void __ovld vstore(half data, size_t offset, half *p);
-void __ovld vstore2(half2 data, size_t offset, half *p);
-void __ovld vstore3(half3 data, size_t offset, half *p);
-void __ovld vstore4(half4 data, size_t offset, half *p);
-void __ovld vstore8(half8 data, size_t offset, half *p);
-void __ovld vstore16(half16 data, size_t offset, half *p);
-#endif //cl_khr_fp16
-#else
-void __ovld vstore2(char2 data, size_t offset, __global char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __global uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __global short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __global ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __global int *p);
-void __ovld vstore2(uint2 data, size_t offset, __global uint *p);
-void __ovld vstore2(long2 data, size_t offset, __global long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __global ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __global float *p);
-void __ovld vstore3(char3 data, size_t offset, __global char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __global uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __global short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __global ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __global int *p);
-void __ovld vstore3(uint3 data, size_t offset, __global uint *p);
-void __ovld vstore3(long3 data, size_t offset, __global long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __global ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __global float *p);
-void __ovld vstore4(char4 data, size_t offset, __global char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __global uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __global short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __global ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __global int *p);
-void __ovld vstore4(uint4 data, size_t offset, __global uint *p);
-void __ovld vstore4(long4 data, size_t offset, __global long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __global ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __global float *p);
-void __ovld vstore8(char8 data, size_t offset, __global char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __global uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __global short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __global ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __global int *p);
-void __ovld vstore8(uint8 data, size_t offset, __global uint *p);
-void __ovld vstore8(long8 data, size_t offset, __global long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __global ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __global float *p);
-void __ovld vstore16(char16 data, size_t offset, __global char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __global uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __global short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __global ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __global int *p);
-void __ovld vstore16(uint16 data, size_t offset, __global uint *p);
-void __ovld vstore16(long16 data, size_t offset, __global long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __global ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __global float *p);
-void __ovld vstore2(char2 data, size_t offset, __local char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __local uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __local short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __local ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __local int *p);
-void __ovld vstore2(uint2 data, size_t offset, __local uint *p);
-void __ovld vstore2(long2 data, size_t offset, __local long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __local ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __local float *p);
-void __ovld vstore3(char3 data, size_t offset, __local char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __local uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __local short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __local ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __local int *p);
-void __ovld vstore3(uint3 data, size_t offset, __local uint *p);
-void __ovld vstore3(long3 data, size_t offset, __local long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __local ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __local float *p);
-void __ovld vstore4(char4 data, size_t offset, __local char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __local uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __local short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __local ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __local int *p);
-void __ovld vstore4(uint4 data, size_t offset, __local uint *p);
-void __ovld vstore4(long4 data, size_t offset, __local long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __local ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __local float *p);
-void __ovld vstore8(char8 data, size_t offset, __local char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __local uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __local short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __local ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __local int *p);
-void __ovld vstore8(uint8 data, size_t offset, __local uint *p);
-void __ovld vstore8(long8 data, size_t offset, __local long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __local ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __local float *p);
-void __ovld vstore16(char16 data, size_t offset, __local char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __local uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __local short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __local ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __local int *p);
-void __ovld vstore16(uint16 data, size_t offset, __local uint *p);
-void __ovld vstore16(long16 data, size_t offset, __local long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __local ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __local float *p);
-void __ovld vstore2(char2 data, size_t offset, __private char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __private uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __private short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __private ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __private int *p);
-void __ovld vstore2(uint2 data, size_t offset, __private uint *p);
-void __ovld vstore2(long2 data, size_t offset, __private long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __private ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __private float *p);
-void __ovld vstore3(char3 data, size_t offset, __private char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __private uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __private short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __private ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __private int *p);
-void __ovld vstore3(uint3 data, size_t offset, __private uint *p);
-void __ovld vstore3(long3 data, size_t offset, __private long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __private ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __private float *p);
-void __ovld vstore4(char4 data, size_t offset, __private char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __private uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __private short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __private ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __private int *p);
-void __ovld vstore4(uint4 data, size_t offset, __private uint *p);
-void __ovld vstore4(long4 data, size_t offset, __private long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __private ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __private float *p);
-void __ovld vstore8(char8 data, size_t offset, __private char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __private uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __private short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __private ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __private int *p);
-void __ovld vstore8(uint8 data, size_t offset, __private uint *p);
-void __ovld vstore8(long8 data, size_t offset, __private long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __private ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __private float *p);
-void __ovld vstore16(char16 data, size_t offset, __private char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __private uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __private short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __private ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __private int *p);
-void __ovld vstore16(uint16 data, size_t offset, __private uint *p);
-void __ovld vstore16(long16 data, size_t offset, __private long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __private ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __private float *p);
-#ifdef cl_khr_fp64
-void __ovld vstore2(double2 data, size_t offset, __global double *p);
-void __ovld vstore3(double3 data, size_t offset, __global double *p);
-void __ovld vstore4(double4 data, size_t offset, __global double *p);
-void __ovld vstore8(double8 data, size_t offset, __global double *p);
-void __ovld vstore16(double16 data, size_t offset, __global double *p);
-void __ovld vstore2(double2 data, size_t offset, __local double *p);
-void __ovld vstore3(double3 data, size_t offset, __local double *p);
-void __ovld vstore4(double4 data, size_t offset, __local double *p);
-void __ovld vstore8(double8 data, size_t offset, __local double *p);
-void __ovld vstore16(double16 data, size_t offset, __local double *p);
-void __ovld vstore2(double2 data, size_t offset, __private double *p);
-void __ovld vstore3(double3 data, size_t offset, __private double *p);
-void __ovld vstore4(double4 data, size_t offset, __private double *p);
-void __ovld vstore8(double8 data, size_t offset, __private double *p);
-void __ovld vstore16(double16 data, size_t offset, __private double *p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-void __ovld vstore(half data, size_t offset, __global half *p);
-void __ovld vstore2(half2 data, size_t offset, __global half *p);
-void __ovld vstore3(half3 data, size_t offset, __global half *p);
-void __ovld vstore4(half4 data, size_t offset, __global half *p);
-void __ovld vstore8(half8 data, size_t offset, __global half *p);
-void __ovld vstore16(half16 data, size_t offset, __global half *p);
-void __ovld vstore(half data, size_t offset, __local half *p);
-void __ovld vstore2(half2 data, size_t offset, __local half *p);
-void __ovld vstore3(half3 data, size_t offset, __local half *p);
-void __ovld vstore4(half4 data, size_t offset, __local half *p);
-void __ovld vstore8(half8 data, size_t offset, __local half *p);
-void __ovld vstore16(half16 data, size_t offset, __local half *p);
-void __ovld vstore(half data, size_t offset, __private half *p);
-void __ovld vstore2(half2 data, size_t offset, __private half *p);
-void __ovld vstore3(half3 data, size_t offset, __private half *p);
-void __ovld vstore4(half4 data, size_t offset, __private half *p);
-void __ovld vstore8(half8 data, size_t offset, __private half *p);
-void __ovld vstore16(half16 data, size_t offset, __private half *p);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Read sizeof (half) bytes of data from address
- * (p + offset). The data read is interpreted as a
- * half value. The half value is converted to a
- * float value and the float value is returned.
- * The read address computed as (p + offset)
- * must be 16-bit aligned.
- */
-float __ovld vload_half(size_t offset, const __constant half *p);
-#if defined(__opencl_c_generic_address_space)
-float __ovld vload_half(size_t offset, const half *p);
-#else
-float __ovld vload_half(size_t offset, const __global half *p);
-float __ovld vload_half(size_t offset, const __local half *p);
-float __ovld vload_half(size_t offset, const __private half *p);
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Read sizeof (halfn) bytes of data from address
- * (p + (offset * n)). The data read is interpreted
- * as a halfn value. The halfn value read is
- * converted to a floatn value and the floatn
- * value is returned. The read address computed
- * as (p + (offset * n)) must be 16-bit aligned.
- */
-float2 __ovld vload_half2(size_t offset, const __constant half *p);
-float3 __ovld vload_half3(size_t offset, const __constant half *p);
-float4 __ovld vload_half4(size_t offset, const __constant half *p);
-float8 __ovld vload_half8(size_t offset, const __constant half *p);
-float16 __ovld vload_half16(size_t offset, const __constant half *p);
-#if defined(__opencl_c_generic_address_space)
-float2 __ovld vload_half2(size_t offset, const half *p);
-float3 __ovld vload_half3(size_t offset, const half *p);
-float4 __ovld vload_half4(size_t offset, const half *p);
-float8 __ovld vload_half8(size_t offset, const half *p);
-float16 __ovld vload_half16(size_t offset, const half *p);
-#else
-float2 __ovld vload_half2(size_t offset, const __global half *p);
-float3 __ovld vload_half3(size_t offset, const __global half *p);
-float4 __ovld vload_half4(size_t offset, const __global half *p);
-float8 __ovld vload_half8(size_t offset, const __global half *p);
-float16 __ovld vload_half16(size_t offset, const __global half *p);
-float2 __ovld vload_half2(size_t offset, const __local half *p);
-float3 __ovld vload_half3(size_t offset, const __local half *p);
-float4 __ovld vload_half4(size_t offset, const __local half *p);
-float8 __ovld vload_half8(size_t offset, const __local half *p);
-float16 __ovld vload_half16(size_t offset, const __local half *p);
-float2 __ovld vload_half2(size_t offset, const __private half *p);
-float3 __ovld vload_half3(size_t offset, const __private half *p);
-float4 __ovld vload_half4(size_t offset, const __private half *p);
-float8 __ovld vload_half8(size_t offset, const __private half *p);
-float16 __ovld vload_half16(size_t offset, const __private half *p);
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * The float value given by data is first
- * converted to a half value using the appropriate
- * rounding mode. The half value is then written
- * to address computed as (p + offset). The
- * address computed as (p + offset) must be 16-
- * bit aligned.
- * vstore_half use the current rounding mode.
- * The default current rounding mode is round to
- * nearest even.
- */
-#if defined(__opencl_c_generic_address_space)
-void __ovld vstore_half(float data, size_t offset, half *p);
-void __ovld vstore_half_rte(float data, size_t offset, half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half(double data, size_t offset, half *p);
-void __ovld vstore_half_rte(double data, size_t offset, half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, half *p);
-#endif //cl_khr_fp64
-#else
-void __ovld vstore_half(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __global half *p);
-void __ovld vstore_half(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __local half *p);
-void __ovld vstore_half(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __private half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __global half *p);
-void __ovld vstore_half(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __local half *p);
-void __ovld vstore_half(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
-#endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * The floatn value given by data is converted to
- * a halfn value using the appropriate rounding
- * mode. The halfn value is then written to
- * address computed as (p + (offset * n)). The
- * address computed as (p + (offset * n)) must be
- * 16-bit aligned.
- * vstore_halfn uses the current rounding mode.
- * The default current rounding mode is round to
- * nearest even.
- */
-#if defined(__opencl_c_generic_address_space)
-void __ovld vstore_half2(float2 data, size_t offset, half *p);
-void __ovld vstore_half3(float3 data, size_t offset, half *p);
-void __ovld vstore_half4(float4 data, size_t offset, half *p);
-void __ovld vstore_half8(float8 data, size_t offset, half *p);
-void __ovld vstore_half16(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half2(double2 data, size_t offset, half *p);
-void __ovld vstore_half3(double3 data, size_t offset, half *p);
-void __ovld vstore_half4(double4 data, size_t offset, half *p);
-void __ovld vstore_half8(double8 data, size_t offset, half *p);
-void __ovld vstore_half16(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, half *p);
-#endif //cl_khr_fp64
-#else
-void __ovld vstore_half2(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __private half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half2(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
-#endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * For n = 1, 2, 4, 8 and 16 read sizeof (halfn)
- * bytes of data from address (p + (offset * n)).
- * The data read is interpreted as a halfn value.
- * The halfn value read is converted to a floatn
- * value and the floatn value is returned.
- * The address computed as (p + (offset * n))
- * must be aligned to sizeof (halfn) bytes.
- * For n = 3, vloada_half3 reads a half3 from
- * address (p + (offset * 4)) and returns a float3.
- * The address computed as (p + (offset * 4))
- * must be aligned to sizeof (half) * 4 bytes.
- */
-float2 __ovld vloada_half2(size_t offset, const __constant half *p);
-float3 __ovld vloada_half3(size_t offset, const __constant half *p);
-float4 __ovld vloada_half4(size_t offset, const __constant half *p);
-float8 __ovld vloada_half8(size_t offset, const __constant half *p);
-float16 __ovld vloada_half16(size_t offset, const __constant half *p);
-#if defined(__opencl_c_generic_address_space)
-float2 __ovld vloada_half2(size_t offset, const half *p);
-float3 __ovld vloada_half3(size_t offset, const half *p);
-float4 __ovld vloada_half4(size_t offset, const half *p);
-float8 __ovld vloada_half8(size_t offset, const half *p);
-float16 __ovld vloada_half16(size_t offset, const half *p);
-#else
-float2 __ovld vloada_half2(size_t offset, const __global half *p);
-float3 __ovld vloada_half3(size_t offset, const __global half *p);
-float4 __ovld vloada_half4(size_t offset, const __global half *p);
-float8 __ovld vloada_half8(size_t offset, const __global half *p);
-float16 __ovld vloada_half16(size_t offset, const __global half *p);
-float2 __ovld vloada_half2(size_t offset, const __local half *p);
-float3 __ovld vloada_half3(size_t offset, const __local half *p);
-float4 __ovld vloada_half4(size_t offset, const __local half *p);
-float8 __ovld vloada_half8(size_t offset, const __local half *p);
-float16 __ovld vloada_half16(size_t offset, const __local half *p);
-float2 __ovld vloada_half2(size_t offset, const __private half *p);
-float3 __ovld vloada_half3(size_t offset, const __private half *p);
-float4 __ovld vloada_half4(size_t offset, const __private half *p);
-float8 __ovld vloada_half8(size_t offset, const __private half *p);
-float16 __ovld vloada_half16(size_t offset, const __private half *p);
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * The floatn value given by data is converted to
- * a halfn value using the appropriate rounding
- * mode.
- * For n = 1, 2, 4, 8 and 16, the halfn value is
- * written to the address computed as (p + (offset
- * * n)). The address computed as (p + (offset *
- * n)) must be aligned to sizeof (halfn) bytes.
- * For n = 3, the half3 value is written to the
- * address computed as (p + (offset * 4)). The
- * address computed as (p + (offset * 4)) must be
- * aligned to sizeof (half) * 4 bytes.
- * vstorea_halfn uses the current rounding
- * mode. The default current rounding mode is
- * round to nearest even.
- */
-#if defined(__opencl_c_generic_address_space)
-void __ovld vstorea_half2(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, half *p);
-
-#ifdef cl_khr_fp64
-void __ovld vstorea_half2(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, half *p);
-#endif //cl_khr_fp64
-
-#else
-void __ovld vstorea_half2(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __private half *p);
-
-#ifdef cl_khr_fp64
-void __ovld vstorea_half2(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtn(double2 data,size_t offset, __private half *p);
-void __ovld vstorea_half3_rtn(double3 data,size_t offset, __private half *p);
-void __ovld vstorea_half4_rtn(double4 data,size_t offset, __private half *p);
-void __ovld vstorea_half8_rtn(double8 data,size_t offset, __private half *p);
-void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
-#endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
-
-// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
-
-/**
- * All work-items in a work-group executing the kernel
- * on a processor must execute this function before any
- * are allowed to continue execution beyond the barrier.
- * This function must be encountered by all work-items in
- * a work-group executing the kernel.
- * If barrier is inside a conditional statement, then all
- * work-items must enter the conditional if any work-item
- * enters the conditional statement and executes the
- * barrier.
- * If barrer is inside a loop, all work-items must execute
- * the barrier for each iteration of the loop before any are
- * allowed to continue execution beyond the barrier.
- * The barrier function also queues a memory fence
- * (reads and writes) to ensure correct ordering of
- * memory operations to local or global memory.
- * The flags argument specifies the memory address space
- * and can be set to a combination of the following literal
- * values.
- * CLK_LOCAL_MEM_FENCE - The barrier function
- * will either flush any variables stored in local memory
- * or queue a memory fence to ensure correct ordering of
- * memory operations to local memory.
- * CLK_GLOBAL_MEM_FENCE - The barrier function
- * will queue a memory fence to ensure correct ordering
- * of memory operations to global memory. This can be
- * useful when work-items, for example, write to buffer or
- * image objects and then want to read the updated data.
- */
-
-void __ovld __conv barrier(cl_mem_fence_flags flags);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
-void __ovld __conv work_group_barrier(cl_mem_fence_flags flags);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.9, v1.2 s6.12.9 - Explicit Memory Fence Functions
-
-/**
- * Orders loads and stores of a work-item
- * executing a kernel. This means that loads
- * and stores preceding the mem_fence will
- * be committed to memory before any loads
- * and stores following the mem_fence.
- * The flags argument specifies the memory
- * address space and can be set to a
- * combination of the following literal
- * values:
- * CLK_LOCAL_MEM_FENCE
- * CLK_GLOBAL_MEM_FENCE.
- */
-void __ovld mem_fence(cl_mem_fence_flags flags);
-
-/**
- * Read memory barrier that orders only
- * loads.
- * The flags argument specifies the memory
- * address space and can be set to a
- * combination of the following literal
- * values:
- * CLK_LOCAL_MEM_FENCE
- * CLK_GLOBAL_MEM_FENCE.
- */
-void __ovld read_mem_fence(cl_mem_fence_flags flags);
-
-/**
- * Write memory barrier that orders only
- * stores.
- * The flags argument specifies the memory
- * address space and can be set to a
- * combination of the following literal
- * values:
- * CLK_LOCAL_MEM_FENCE
- * CLK_GLOBAL_MEM_FENCE.
- */
-void __ovld write_mem_fence(cl_mem_fence_flags flags);
-
-// OpenCL v2.0 s6.13.9 - Address Space Qualifier Functions
-
-#if defined(__opencl_c_generic_address_space)
-cl_mem_fence_flags __ovld get_fence(const void *ptr);
-cl_mem_fence_flags __ovld get_fence(void *ptr);
-
-/**
- * Builtin functions to_global, to_local, and to_private need to be declared as Clang builtin functions
- * and checked in Sema since they should be declared as
- *   addr gentype* to_addr (gentype*);
- * where gentype is builtin type or user defined type.
- */
-
-#endif //defined(__opencl_c_generic_address_space)
-
-// OpenCL v1.1 s6.11.10, v1.2 s6.12.10, v2.0 s6.13.10 - Async Copies from Global to Local Memory, Local to Global Memory, and Prefetch
-
-/**
- * event_t async_work_group_copy (
- * __global gentype *dst,
- * const __local gentype *src,
- * size_t num_elements,
- * event_t event)
- * Perform an async copy of num_elements
- * gentype elements from src to dst. The async
- * copy is performed by all work-items in a workgroup
- * and this built-in function must therefore
- * be encountered by all work-items in a workgroup
- * executing the kernel with the same
- * argument values; otherwise the results are
- * undefined.
- * Returns an event object that can be used by
- * wait_group_events to wait for the async copy
- * to finish. The event argument can also be used
- * to associate the async_work_group_copy with
- * a previous async copy allowing an event to be
- * shared by multiple async copies; otherwise event
- * should be zero.
- * If event argument is non-zero, the event object
- * supplied in event argument will be returned.
- * This function does not perform any implicit
- * synchronization of source data such as using a
- * barrier before performing the copy.
- */
-event_t __ovld async_work_group_copy(__local char *dst, const __global char *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short *dst, const __global short *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int *dst, const __global int *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint *dst, const __global uint *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long *dst, const __global long *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float *dst, const __global float *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char *dst, const __local char *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short *dst, const __local short *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int *dst, const __local int *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint *dst, const __local uint *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long *dst, const __local long *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float *dst, const __local float *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, event_t event);
-#ifdef cl_khr_fp64
-event_t __ovld async_work_group_copy(__local double *dst, const __global double *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double *dst, const __local double *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, event_t event);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-event_t __ovld async_work_group_copy(__local half *dst, const __global half *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half *dst, const __local half *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, event_t event);
-#endif //cl_khr_fp16
-
-/**
- * Perform an async gather of num_elements
- * gentype elements from src to dst. The
- * src_stride is the stride in elements for each
- * gentype element read from src. The dst_stride
- * is the stride in elements for each gentype
- * element written to dst. The async gather is
- * performed by all work-items in a work-group.
- * This built-in function must therefore be
- * encountered by all work-items in a work-group
- * executing the kernel with the same argument
- * values; otherwise the results are undefined.
- * Returns an event object that can be used by
- * wait_group_events to wait for the async copy
- * to finish. The event argument can also be used
- * to associate the
- * async_work_group_strided_copy with a
- * previous async copy allowing an event to be
- * shared by multiple async copies; otherwise event
- * should be zero.
- * If event argument is non-zero, the event object
- * supplied in event argument will be returned.
- * This function does not perform any implicit
- * synchronization of source data such as using a
- * barrier before performing the copy.
- */
-event_t __ovld async_work_group_strided_copy(__local char *dst, const __global char *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short *dst, const __global short *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int *dst, const __global int *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint *dst, const __global uint *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long *dst, const __global long *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float *dst, const __global float *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char *dst, const __local char *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short *dst, const __local short *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int *dst, const __local int *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint *dst, const __local uint *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long *dst, const __local long *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float *dst, const __local float *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, size_t dst_stride, event_t event);
-#ifdef cl_khr_fp64
-event_t __ovld async_work_group_strided_copy(__local double *dst, const __global double *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double *dst, const __local double *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, size_t dst_stride, event_t event);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-event_t __ovld async_work_group_strided_copy(__local half *dst, const __global half *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half *dst, const __local half *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, size_t dst_stride, event_t event);
-#endif //cl_khr_fp16
-
-/**
- * Wait for events that identify the
- * async_work_group_copy operations to
- * complete. The event objects specified in
- * event_list will be released after the wait is
- * performed.
- * This function must be encountered by all workitems
- * in a work-group executing the kernel with
- * the same num_events and event objects specified
- * in event_list; otherwise the results are undefined.
- */
-void __ovld wait_group_events(int num_events, event_t *event_list);
-
-/**
- * Prefetch num_elements * sizeof(gentype)
- * bytes into the global cache. The prefetch
- * instruction is applied to a work-item in a workgroup
- * and does not affect the functional
- * behavior of the kernel.
- */
-void __ovld prefetch(const __global char *p, size_t num_elements);
-void __ovld prefetch(const __global uchar *p, size_t num_elements);
-void __ovld prefetch(const __global short *p, size_t num_elements);
-void __ovld prefetch(const __global ushort *p, size_t num_elements);
-void __ovld prefetch(const __global int *p, size_t num_elements);
-void __ovld prefetch(const __global uint *p, size_t num_elements);
-void __ovld prefetch(const __global long *p, size_t num_elements);
-void __ovld prefetch(const __global ulong *p, size_t num_elements);
-void __ovld prefetch(const __global float *p, size_t num_elements);
-void __ovld prefetch(const __global char2 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar2 *p, size_t num_elements);
-void __ovld prefetch(const __global short2 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort2 *p, size_t num_elements);
-void __ovld prefetch(const __global int2 *p, size_t num_elements);
-void __ovld prefetch(const __global uint2 *p, size_t num_elements);
-void __ovld prefetch(const __global long2 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong2 *p, size_t num_elements);
-void __ovld prefetch(const __global float2 *p, size_t num_elements);
-void __ovld prefetch(const __global char3 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar3 *p, size_t num_elements);
-void __ovld prefetch(const __global short3 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort3 *p, size_t num_elements);
-void __ovld prefetch(const __global int3 *p, size_t num_elements);
-void __ovld prefetch(const __global uint3 *p, size_t num_elements);
-void __ovld prefetch(const __global long3 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong3 *p, size_t num_elements);
-void __ovld prefetch(const __global float3 *p, size_t num_elements);
-void __ovld prefetch(const __global char4 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar4 *p, size_t num_elements);
-void __ovld prefetch(const __global short4 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort4 *p, size_t num_elements);
-void __ovld prefetch(const __global int4 *p, size_t num_elements);
-void __ovld prefetch(const __global uint4 *p, size_t num_elements);
-void __ovld prefetch(const __global long4 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong4 *p, size_t num_elements);
-void __ovld prefetch(const __global float4 *p, size_t num_elements);
-void __ovld prefetch(const __global char8 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar8 *p, size_t num_elements);
-void __ovld prefetch(const __global short8 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort8 *p, size_t num_elements);
-void __ovld prefetch(const __global int8 *p, size_t num_elements);
-void __ovld prefetch(const __global uint8 *p, size_t num_elements);
-void __ovld prefetch(const __global long8 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong8 *p, size_t num_elements);
-void __ovld prefetch(const __global float8 *p, size_t num_elements);
-void __ovld prefetch(const __global char16 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar16 *p, size_t num_elements);
-void __ovld prefetch(const __global short16 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort16 *p, size_t num_elements);
-void __ovld prefetch(const __global int16 *p, size_t num_elements);
-void __ovld prefetch(const __global uint16 *p, size_t num_elements);
-void __ovld prefetch(const __global long16 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong16 *p, size_t num_elements);
-void __ovld prefetch(const __global float16 *p, size_t num_elements);
-#ifdef cl_khr_fp64
-void __ovld prefetch(const __global double *p, size_t num_elements);
-void __ovld prefetch(const __global double2 *p, size_t num_elements);
-void __ovld prefetch(const __global double3 *p, size_t num_elements);
-void __ovld prefetch(const __global double4 *p, size_t num_elements);
-void __ovld prefetch(const __global double8 *p, size_t num_elements);
-void __ovld prefetch(const __global double16 *p, size_t num_elements);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-void __ovld prefetch(const __global half *p, size_t num_elements);
-void __ovld prefetch(const __global half2 *p, size_t num_elements);
-void __ovld prefetch(const __global half3 *p, size_t num_elements);
-void __ovld prefetch(const __global half4 *p, size_t num_elements);
-void __ovld prefetch(const __global half8 *p, size_t num_elements);
-void __ovld prefetch(const __global half16 *p, size_t num_elements);
-#endif // cl_khr_fp16
-
-// OpenCL v1.1 s6.11.1, v1.2 s6.12.11 - Atomic Functions
-
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
-#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
-#endif
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old + val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_add(volatile __global int *p, int val);
-unsigned int __ovld atomic_add(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_add(volatile __local int *p, int val);
-unsigned int __ovld atomic_add(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_add(volatile int *p, int val);
-unsigned int __ovld atomic_add(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_add(volatile __global int *p, int val);
-unsigned int __ovld atom_add(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_add(volatile __local int *p, int val);
-unsigned int __ovld atom_add(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_add(volatile __global long *p, long val);
-unsigned long __ovld atom_add(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_add(volatile __local long *p, long val);
-unsigned long __ovld atom_add(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old) stored at location pointed by p.
- * Compute (old - val) and store result at location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_sub(volatile __global int *p, int val);
-unsigned int __ovld atomic_sub(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_sub(volatile __local int *p, int val);
-unsigned int __ovld atomic_sub(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_sub(volatile int *p, int val);
-unsigned int __ovld atomic_sub(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_sub(volatile __global int *p, int val);
-unsigned int __ovld atom_sub(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_sub(volatile __local int *p, int val);
-unsigned int __ovld atom_sub(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_sub(volatile __global long *p, long val);
-unsigned long __ovld atom_sub(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_sub(volatile __local long *p, long val);
-unsigned long __ovld atom_sub(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Swaps the old value stored at location p
- * with new value given by val. Returns old
- * value.
- */
-int __ovld atomic_xchg(volatile __global int *p, int val);
-unsigned int __ovld atomic_xchg(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_xchg(volatile __local int *p, int val);
-unsigned int __ovld atomic_xchg(volatile __local unsigned int *p, unsigned int val);
-float __ovld atomic_xchg(volatile __global float *p, float val);
-float __ovld atomic_xchg(volatile __local float *p, float val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_xchg(volatile int *p, int val);
-unsigned int __ovld atomic_xchg(volatile unsigned int *p, unsigned int val);
-float __ovld atomic_xchg(volatile float *p, float val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_xchg(volatile __global int *p, int val);
-unsigned int __ovld atom_xchg(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_xchg(volatile __local int *p, int val);
-unsigned int __ovld atom_xchg(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_xchg(volatile __global long *p, long val);
-long __ovld atom_xchg(volatile __local long *p, long val);
-unsigned long __ovld atom_xchg(volatile __global unsigned long *p, unsigned long val);
-unsigned long __ovld atom_xchg(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old + 1) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_inc(volatile __global int *p);
-unsigned int __ovld atomic_inc(volatile __global unsigned int *p);
-int __ovld atomic_inc(volatile __local int *p);
-unsigned int __ovld atomic_inc(volatile __local unsigned int *p);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_inc(volatile int *p);
-unsigned int __ovld atomic_inc(volatile unsigned int *p);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_inc(volatile __global int *p);
-unsigned int __ovld atom_inc(volatile __global unsigned int *p);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_inc(volatile __local int *p);
-unsigned int __ovld atom_inc(volatile __local unsigned int *p);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_inc(volatile __global long *p);
-unsigned long __ovld atom_inc(volatile __global unsigned long *p);
-long __ovld atom_inc(volatile __local long *p);
-unsigned long __ovld atom_inc(volatile __local unsigned long *p);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old - 1) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_dec(volatile __global int *p);
-unsigned int __ovld atomic_dec(volatile __global unsigned int *p);
-int __ovld atomic_dec(volatile __local int *p);
-unsigned int __ovld atomic_dec(volatile __local unsigned int *p);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_dec(volatile int *p);
-unsigned int __ovld atomic_dec(volatile unsigned int *p);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_dec(volatile __global int *p);
-unsigned int __ovld atom_dec(volatile __global unsigned int *p);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_dec(volatile __local int *p);
-unsigned int __ovld atom_dec(volatile __local unsigned int *p);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_dec(volatile __global long *p);
-unsigned long __ovld atom_dec(volatile __global unsigned long *p);
-long __ovld atom_dec(volatile __local long *p);
-unsigned long __ovld atom_dec(volatile __local unsigned long *p);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old == cmp) ? val : old and store result at
- * location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_cmpxchg(volatile __global int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
-int __ovld atomic_cmpxchg(volatile __local int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_cmpxchg(volatile int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile unsigned int *p, unsigned int cmp, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_cmpxchg(volatile __global int *p, int cmp, int val);
-unsigned int __ovld atom_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_cmpxchg(volatile __local int *p, int cmp, int val);
-unsigned int __ovld atom_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_cmpxchg(volatile __global long *p, long cmp, long val);
-unsigned long __ovld atom_cmpxchg(volatile __global unsigned long *p, unsigned long cmp, unsigned long val);
-long __ovld atom_cmpxchg(volatile __local long *p, long cmp, long val);
-unsigned long __ovld atom_cmpxchg(volatile __local unsigned long *p, unsigned long cmp, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * min(old, val) and store minimum value at
- * location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_min(volatile __global int *p, int val);
-unsigned int __ovld atomic_min(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_min(volatile __local int *p, int val);
-unsigned int __ovld atomic_min(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_min(volatile int *p, int val);
-unsigned int __ovld atomic_min(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_min(volatile __global int *p, int val);
-unsigned int __ovld atom_min(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_min(volatile __local int *p, int val);
-unsigned int __ovld atom_min(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_min(volatile __global long *p, long val);
-unsigned long __ovld atom_min(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_min(volatile __local long *p, long val);
-unsigned long __ovld atom_min(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * max(old, val) and store maximum value at
- * location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_max(volatile __global int *p, int val);
-unsigned int __ovld atomic_max(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_max(volatile __local int *p, int val);
-unsigned int __ovld atomic_max(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_max(volatile int *p, int val);
-unsigned int __ovld atomic_max(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_max(volatile __global int *p, int val);
-unsigned int __ovld atom_max(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_max(volatile __local int *p, int val);
-unsigned int __ovld atom_max(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_max(volatile __global long *p, long val);
-unsigned long __ovld atom_max(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_max(volatile __local long *p, long val);
-unsigned long __ovld atom_max(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old & val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_and(volatile __global int *p, int val);
-unsigned int __ovld atomic_and(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_and(volatile __local int *p, int val);
-unsigned int __ovld atomic_and(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_and(volatile int *p, int val);
-unsigned int __ovld atomic_and(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_and(volatile __global int *p, int val);
-unsigned int __ovld atom_and(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_and(volatile __local int *p, int val);
-unsigned int __ovld atom_and(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_and(volatile __global long *p, long val);
-unsigned long __ovld atom_and(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_and(volatile __local long *p, long val);
-unsigned long __ovld atom_and(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old | val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_or(volatile __global int *p, int val);
-unsigned int __ovld atomic_or(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_or(volatile __local int *p, int val);
-unsigned int __ovld atomic_or(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_or(volatile int *p, int val);
-unsigned int __ovld atomic_or(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_or(volatile __global int *p, int val);
-unsigned int __ovld atom_or(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_or(volatile __local int *p, int val);
-unsigned int __ovld atom_or(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_or(volatile __global long *p, long val);
-unsigned long __ovld atom_or(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_or(volatile __local long *p, long val);
-unsigned long __ovld atom_or(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old ^ val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_xor(volatile __global int *p, int val);
-unsigned int __ovld atomic_xor(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_xor(volatile __local int *p, int val);
-unsigned int __ovld atomic_xor(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_xor(volatile int *p, int val);
-unsigned int __ovld atomic_xor(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_xor(volatile __global int *p, int val);
-unsigned int __ovld atom_xor(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_xor(volatile __local int *p, int val);
-unsigned int __ovld atom_xor(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_xor(volatile __global long *p, long val);
-unsigned long __ovld atom_xor(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_xor(volatile __local long *p, long val);
-unsigned long __ovld atom_xor(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : disable
-#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : disable
-#endif
-
-// OpenCL v2.0 s6.13.11 - Atomics Functions
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// double atomics support requires extensions cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
-#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
-#endif
-
-// atomic_init()
-#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_init(volatile atomic_int *object, int value);
-void __ovld atomic_init(volatile atomic_uint *object, uint value);
-void __ovld atomic_init(volatile atomic_float *object, float value);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-void __ovld atomic_init(volatile atomic_long *object, long value);
-void __ovld atomic_init(volatile atomic_ulong *object, ulong value);
-#ifdef cl_khr_fp64
-void __ovld atomic_init(volatile atomic_double *object, double value);
-#endif //cl_khr_fp64
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_init(volatile __global atomic_int *object, int value);
-void __ovld atomic_init(volatile __local atomic_int *object, int value);
-void __ovld atomic_init(volatile __global atomic_uint *object, uint value);
-void __ovld atomic_init(volatile __local atomic_uint *object, uint value);
-void __ovld atomic_init(volatile __global atomic_float *object, float value);
-void __ovld atomic_init(volatile __local atomic_float *object, float value);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-void __ovld atomic_init(volatile __global atomic_long *object, long value);
-void __ovld atomic_init(volatile __local atomic_long *object, long value);
-void __ovld atomic_init(volatile __global atomic_ulong *object, ulong value);
-void __ovld atomic_init(volatile __local atomic_ulong *object, ulong value);
-#ifdef cl_khr_fp64
-void __ovld atomic_init(volatile __global atomic_double *object, double value);
-void __ovld atomic_init(volatile __local atomic_double *object, double value);
-#endif //cl_khr_fp64
-#endif
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// atomic_work_item_fence()
-void __ovld atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope);
-
-// atomic_fetch()
-// OpenCL v2.0 s6.13.11.7.5:
-// add/sub: atomic type argument can be uintptr_t/intptr_t, value type argument can be ptrdiff_t.
-
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_fetch_add(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_add(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_sub(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_sub(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_or(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_or(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_xor(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_xor(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_and(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_and(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_min(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_min(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_max(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_max(volatile atomic_uint *object, uint operand);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_add(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_sub(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_sub(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_or(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_or(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_xor(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_xor(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_and(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_and(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_min(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_max(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *object, ptrdiff_t operand);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_fetch_add(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_add(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_add(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_add(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_sub(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_sub(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_sub(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_sub(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_or(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_or(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_or(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_or(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_xor(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_xor(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_xor(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_xor(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_and(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_and(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_and(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_and(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_min(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_min(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_min(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_min(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_max(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_max(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_max(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_max(volatile __local atomic_uint *object, uint operand);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_add(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_add(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_add(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_add(volatile __global atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_add(volatile __local atomic_uintptr_t *object, ptrdiff_t operand);
-long __ovld atomic_fetch_sub(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_sub(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_sub(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_sub(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_sub(volatile __global atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_sub(volatile __local atomic_uintptr_t *object, ptrdiff_t operand);
-long __ovld atomic_fetch_or(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_or(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_or(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_or(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_or(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_or(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_or(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_or(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_xor(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_xor(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_xor(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_xor(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_xor(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_xor(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_xor(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_xor(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_and(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_and(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_and(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_and(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_and(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_and(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_and(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_and(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_min(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_min(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_min(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_min(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_min(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_min(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_min(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_min(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_max(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_max(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_max(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_max(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_add(volatile __global atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_sub(volatile __local atomic_uintptr_t *object, ptrdiff_t operand);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// The functionality added by cl_ext_float_atomics extension
-#if defined(cl_ext_float_atomics)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_load_store)
-void __ovld atomic_store(volatile __global atomic_half *object, half operand);
-void __ovld atomic_store_explicit(volatile __global atomic_half *object,
-                                  half operand, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_half *object,
-                                  half operand, memory_order order,
-                                  memory_scope scope);
-half __ovld atomic_load(volatile __global atomic_half *object);
-half __ovld atomic_load_explicit(volatile __global atomic_half *object,
-                                 memory_order order);
-half __ovld atomic_load_explicit(volatile __global atomic_half *object,
-                                 memory_order order, memory_scope scope);
-half __ovld atomic_exchange(volatile __global atomic_half *object,
-                            half operand);
-half __ovld atomic_exchange_explicit(volatile __global atomic_half *object,
-                                     half operand, memory_order order);
-half __ovld atomic_exchange_explicit(volatile __global atomic_half *object,
-                                     half operand, memory_order order,
-                                     memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store)
-
-#if defined(__opencl_c_ext_fp16_local_atomic_load_store)
-void __ovld atomic_store(volatile __local atomic_half *object, half operand);
-void __ovld atomic_store_explicit(volatile __local atomic_half *object,
-                                  half operand, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_half *object,
-                                  half operand, memory_order order,
-                                  memory_scope scope);
-half __ovld atomic_load(volatile __local atomic_half *object);
-half __ovld atomic_load_explicit(volatile __local atomic_half *object,
-                                 memory_order order);
-half __ovld atomic_load_explicit(volatile __local atomic_half *object,
-                                 memory_order order, memory_scope scope);
-half __ovld atomic_exchange(volatile __local atomic_half *object, half operand);
-half __ovld atomic_exchange_explicit(volatile __local atomic_half *object,
-                                     half operand, memory_order order);
-half __ovld atomic_exchange_explicit(volatile __local atomic_half *object,
-                                     half operand, memory_order order,
-                                     memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_local_atomic_load_store)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_load_store) &&                   \
-    defined(__opencl_c_ext_fp16_local_atomic_load_store)
-void __ovld atomic_store(volatile atomic_half *object, half operand);
-void __ovld atomic_store_explicit(volatile atomic_half *object, half operand,
-                                  memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_half *object, half operand,
-                                  memory_order order, memory_scope scope);
-half __ovld atomic_load(volatile atomic_half *object);
-half __ovld atomic_load_explicit(volatile atomic_half *object,
-                                 memory_order order);
-half __ovld atomic_load_explicit(volatile atomic_half *object,
-                                 memory_order order, memory_scope scope);
-half __ovld atomic_exchange(volatile atomic_half *object, half operand);
-half __ovld atomic_exchange_explicit(volatile atomic_half *object, half operand,
-                                     memory_order order);
-half __ovld atomic_exchange_explicit(volatile atomic_half *object, half operand,
-                                     memory_order order, memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store) &&
-       // defined(__opencl_c_ext_fp16_local_atomic_load_store)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_min_max)
-half __ovld atomic_fetch_min(volatile __global atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_max(volatile __global atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp16_local_atomic_min_max)
-half __ovld atomic_fetch_min(volatile __local atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_max(volatile __local atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_min_max) &&                      \
-    defined(__opencl_c_ext_fp16_local_atomic_min_max)
-half __ovld atomic_fetch_min(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_max(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_min_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_max_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_min_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_max_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max) &&                \
-    defined(__opencl_c_ext_fp16_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp32_global_atomic_min_max)
-float __ovld atomic_fetch_min(volatile __global atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_max(volatile __global atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp32_local_atomic_min_max)
-float __ovld atomic_fetch_min(volatile __local atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_max(volatile __local atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp32_global_atomic_min_max) &&                      \
-    defined(__opencl_c_ext_fp32_local_atomic_min_max)
-float __ovld atomic_fetch_min(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_max(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_min_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_max_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_min_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_max_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max) &&                \
-    defined(__opencl_c_ext_fp32_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp64_global_atomic_min_max)
-double __ovld atomic_fetch_min(volatile __global atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_max(volatile __global atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp64_local_atomic_min_max)
-double __ovld atomic_fetch_min(volatile __local atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_max(volatile __local atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp64_global_atomic_min_max) &&                      \
-    defined(__opencl_c_ext_fp64_local_atomic_min_max)
-double __ovld atomic_fetch_min(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_max(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_min_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_max_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_min_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_max_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max) &&                \
-    defined(__opencl_c_ext_fp64_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_add)
-half __ovld atomic_fetch_add(volatile __global atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_sub(volatile __global atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_add)
-
-#if defined(__opencl_c_ext_fp16_local_atomic_add)
-half __ovld atomic_fetch_add(volatile __local atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_sub(volatile __local atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_local_atomic_add)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_add) &&                          \
-    defined(__opencl_c_ext_fp16_local_atomic_add)
-half __ovld atomic_fetch_add(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_sub(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_add_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_sub_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_add_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_sub_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_add) &&                    \
-    defined(__opencl_c_ext_fp16_local_atomic_add)
-
-#if defined(__opencl_c_ext_fp32_global_atomic_add)
-float __ovld atomic_fetch_add(volatile __global atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_sub(volatile __global atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_global_atomic_add)
-
-#if defined(__opencl_c_ext_fp32_local_atomic_add)
-float __ovld atomic_fetch_add(volatile __local atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_sub(volatile __local atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_local_atomic_add)
-
-#if defined(__opencl_c_ext_fp32_global_atomic_add) &&                          \
-    defined(__opencl_c_ext_fp32_local_atomic_add)
-float __ovld atomic_fetch_add(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_sub(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_add_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_sub_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_add_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_sub_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_global_atomic_add) &&                    \
-    defined(__opencl_c_ext_fp32_local_atomic_add)
-
-#if defined(__opencl_c_ext_fp64_global_atomic_add)
-double __ovld atomic_fetch_add(volatile __global atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_sub(volatile __global atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_global_atomic_add)
-
-#if defined(__opencl_c_ext_fp64_local_atomic_add)
-double __ovld atomic_fetch_add(volatile __local atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_sub(volatile __local atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_local_atomic_add)
-
-#if defined(__opencl_c_ext_fp64_global_atomic_add) &&                          \
-    defined(__opencl_c_ext_fp64_local_atomic_add)
-double __ovld atomic_fetch_add(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_sub(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_add_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_sub_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_add_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_sub_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_global_atomic_add) &&                    \
-    defined(__opencl_c_ext_fp64_local_atomic_add)
-
-#endif // cl_ext_float_atomics
-
-// atomic_store()
-
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_store(volatile atomic_int *object, int desired);
-void __ovld atomic_store(volatile atomic_uint *object, uint desired);
-void __ovld atomic_store(volatile atomic_float *object, float desired);
-
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store(volatile atomic_double *object, double desired);
-#endif //cl_khr_fp64
-void __ovld atomic_store(volatile atomic_long *object, long desired);
-void __ovld atomic_store(volatile atomic_ulong *object, ulong desired);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_store(volatile __global atomic_int *object, int desired);
-void __ovld atomic_store(volatile __local atomic_int *object, int desired);
-void __ovld atomic_store(volatile __global atomic_uint *object, uint desired);
-void __ovld atomic_store(volatile __local atomic_uint *object, uint desired);
-void __ovld atomic_store(volatile __global atomic_float *object, float desired);
-void __ovld atomic_store(volatile __local atomic_float *object, float desired);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store(volatile __global atomic_double *object, double desired);
-void __ovld atomic_store(volatile __local atomic_double *object, double desired);
-#endif //cl_khr_fp64
-void __ovld atomic_store(volatile __global atomic_long *object, long desired);
-void __ovld atomic_store(volatile __local atomic_long *object, long desired);
-void __ovld atomic_store(volatile __global atomic_ulong *object, ulong desired);
-void __ovld atomic_store(volatile __local atomic_ulong *object, ulong desired);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order);
-#endif //cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_store_explicit(volatile __global atomic_int *object, int desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_int *object, int desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_uint *object, uint desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_uint *object, uint desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_float *object, float desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_float *object, float desired, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile __global atomic_double *object, double desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_double *object, double desired, memory_order order);
-#endif
-void __ovld atomic_store_explicit(volatile __global atomic_long *object, long desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_long *object, long desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_store_explicit(volatile __global atomic_int *object, int desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_int *object, int desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __global atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __global atomic_float *object, float desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_float *object, float desired, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile __global atomic_double *object, double desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_double *object, double desired, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-void __ovld atomic_store_explicit(volatile __global atomic_long *object, long desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_long *object, long desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// atomic_load()
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_load(volatile atomic_int *object);
-uint __ovld atomic_load(volatile atomic_uint *object);
-float __ovld atomic_load(volatile atomic_float *object);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load(volatile atomic_double *object);
-#endif //cl_khr_fp64
-long __ovld atomic_load(volatile atomic_long *object);
-ulong __ovld atomic_load(volatile atomic_ulong *object);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_load(volatile __global atomic_int *object);
-int __ovld atomic_load(volatile __local atomic_int *object);
-uint __ovld atomic_load(volatile __global atomic_uint *object);
-uint __ovld atomic_load(volatile __local atomic_uint *object);
-float __ovld atomic_load(volatile __global atomic_float *object);
-float __ovld atomic_load(volatile __local atomic_float *object);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load(volatile __global atomic_double *object);
-double __ovld atomic_load(volatile __local atomic_double *object);
-#endif //cl_khr_fp64
-long __ovld atomic_load(volatile __global atomic_long *object);
-long __ovld atomic_load(volatile __local atomic_long *object);
-ulong __ovld atomic_load(volatile __global atomic_ulong *object);
-ulong __ovld atomic_load(volatile __local atomic_ulong *object);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order);
-uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order);
-float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order);
-#endif //cl_khr_fp64
-long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order);
-ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_load_explicit(volatile __global atomic_int *object, memory_order order);
-int __ovld atomic_load_explicit(volatile __local atomic_int *object, memory_order order);
-uint __ovld atomic_load_explicit(volatile __global atomic_uint *object, memory_order order);
-uint __ovld atomic_load_explicit(volatile __local atomic_uint *object, memory_order order);
-float __ovld atomic_load_explicit(volatile __global atomic_float *object, memory_order order);
-float __ovld atomic_load_explicit(volatile __local atomic_float *object, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile __global atomic_double *object, memory_order order);
-double __ovld atomic_load_explicit(volatile __local atomic_double *object, memory_order order);
-#endif //cl_khr_fp64
-long __ovld atomic_load_explicit(volatile __global atomic_long *object, memory_order order);
-long __ovld atomic_load_explicit(volatile __local atomic_long *object, memory_order order);
-ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *object, memory_order order);
-ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *object, memory_order order);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order, memory_scope scope);
-uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order, memory_scope scope);
-float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order, memory_scope scope);
-ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order, memory_scope scope);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_load_explicit(volatile __global atomic_int *object, memory_order order, memory_scope scope);
-int __ovld atomic_load_explicit(volatile __local atomic_int *object, memory_order order, memory_scope scope);
-uint __ovld atomic_load_explicit(volatile __global atomic_uint *object, memory_order order, memory_scope scope);
-uint __ovld atomic_load_explicit(volatile __local atomic_uint *object, memory_order order, memory_scope scope);
-float __ovld atomic_load_explicit(volatile __global atomic_float *object, memory_order order, memory_scope scope);
-float __ovld atomic_load_explicit(volatile __local atomic_float *object, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile __global atomic_double *object, memory_order order, memory_scope scope);
-double __ovld atomic_load_explicit(volatile __local atomic_double *object, memory_order order, memory_scope scope);
-#endif
-long __ovld atomic_load_explicit(volatile __global atomic_long *object, memory_order order, memory_scope scope);
-long __ovld atomic_load_explicit(volatile __local atomic_long *object, memory_order order, memory_scope scope);
-ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *object, memory_order order, memory_scope scope);
-ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *object, memory_order order, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// atomic_exchange()
-
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_exchange(volatile atomic_int *object, int desired);
-uint __ovld atomic_exchange(volatile atomic_uint *object, uint desired);
-float __ovld atomic_exchange(volatile atomic_float *object, float desired);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange(volatile atomic_double *object, double desired);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange(volatile atomic_long *object, long desired);
-ulong __ovld atomic_exchange(volatile atomic_ulong *object, ulong desired);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_exchange(volatile __global atomic_int *object, int desired);
-int __ovld atomic_exchange(volatile __local atomic_int *object, int desired);
-uint __ovld atomic_exchange(volatile __global atomic_uint *object, uint desired);
-uint __ovld atomic_exchange(volatile __local atomic_uint *object, uint desired);
-float __ovld atomic_exchange(volatile __global atomic_float *object, float desired);
-float __ovld atomic_exchange(volatile __local atomic_float *object, float desired);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange(volatile __global atomic_double *object, double desired);
-double __ovld atomic_exchange(volatile __local atomic_double *object, double desired);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange(volatile __global atomic_long *object, long desired);
-long __ovld atomic_exchange(volatile __local atomic_long *object, long desired);
-ulong __ovld atomic_exchange(volatile __global atomic_ulong *object, ulong desired);
-ulong __ovld atomic_exchange(volatile __local atomic_ulong *object, ulong desired);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order);
-uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order);
-float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order);
-ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_exchange_explicit(volatile __global atomic_int *object, int desired, memory_order order);
-int __ovld atomic_exchange_explicit(volatile __local atomic_int *object, int desired, memory_order order);
-uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *object, uint desired, memory_order order);
-uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *object, uint desired, memory_order order);
-float __ovld atomic_exchange_explicit(volatile __global atomic_float *object, float desired, memory_order order);
-float __ovld atomic_exchange_explicit(volatile __local atomic_float *object, float desired, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile __global atomic_double *object, double desired, memory_order order);
-double __ovld atomic_exchange_explicit(volatile __local atomic_double *object, double desired, memory_order order);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile __global atomic_long *object, long desired, memory_order order);
-long __ovld atomic_exchange_explicit(volatile __local atomic_long *object, long desired, memory_order order);
-ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order);
-ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)wi
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
-uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
-ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_exchange_explicit(volatile __global atomic_int *object, int desired, memory_order order, memory_scope scope);
-int __ovld atomic_exchange_explicit(volatile __local atomic_int *object, int desired, memory_order order, memory_scope scope);
-uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-float __ovld atomic_exchange_explicit(volatile __global atomic_float *object, float desired, memory_order order, memory_scope scope);
-float __ovld atomic_exchange_explicit(volatile __local atomic_float *object, float desired, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile __global atomic_double *object, double desired, memory_order order, memory_scope scope);
-double __ovld atomic_exchange_explicit(volatile __local atomic_double *object, double desired, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile __global atomic_long *object, long desired, memory_order order, memory_scope scope);
-long __ovld atomic_exchange_explicit(volatile __local atomic_long *object, long desired, memory_order order, memory_scope scope);
-ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// atomic_compare_exchange_strong() and atomic_compare_exchange_weak()
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_compare_exchange_strong(volatile atomic_int *object, int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *object, uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_int *object, int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *object, uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_float *object, float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_float *object, float *expected, float desired);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile atomic_double *object, double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_double *object, double *expected, double desired);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile atomic_long *object, long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_long *object, long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *object, ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *object, ulong *expected, ulong desired);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *object, __private float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *object, __private float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *object, __private float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *object, __private float *expected, float desired);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *object, __private double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *object, __private double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *object, __private double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *object, __private double *expected, double desired);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *object, __private ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *object, __private ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *object, __private ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *object, __private ulong *expected, ulong desired);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// atomic_flag_test_and_set() and atomic_flag_clear()
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_flag_test_and_set(volatile atomic_flag *object);
-void __ovld atomic_flag_clear(volatile atomic_flag *object);
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_flag_test_and_set(volatile __global atomic_flag *object);
-bool __ovld atomic_flag_test_and_set(volatile __local atomic_flag *object);
-void __ovld atomic_flag_clear(volatile __global atomic_flag *object);
-void __ovld atomic_flag_clear(volatile __local atomic_flag *object);
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order);
-void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order);
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *object, memory_order order);
-bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *object, memory_order order);
-void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *object, memory_order order);
-void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *object, memory_order order);
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
-void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *object, memory_order order, memory_scope scope);
-bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *object, memory_order order, memory_scope scope);
-void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *object, memory_order order, memory_scope scope);
-void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *object, memory_order order, memory_scope scope);
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions
-
-/**
- * The shuffle and shuffle2 built-in functions construct
- * a permutation of elements from one or two input
- * vectors respectively that are of the same type,
- * returning a vector with the same element type as the
- * input and length that is the same as the shuffle mask.
- * The size of each element in the mask must match the
- * size of each element in the result. For shuffle, only
- * the ilogb(2m-1) least significant bits of each mask
- * element are considered. For shuffle2, only the
- * ilogb(2m-1)+1 least significant bits of each mask
- * element are considered. Other bits in the mask shall
- * be ignored.
- * The elements of the input vectors are numbered from
- * left to right across one or both of the vectors. For this
- * purpose, the number of elements in a vector is given
- * by vec_step(gentypem). The shuffle mask operand
- * specifies, for each element of the result vector, which
- * element of the one or two input vectors the result
- * element gets.
- * Examples:
- * uint4 mask = (uint4)(3, 2,
- * 1, 0);
- * float4 a;
- * float4 r = shuffle(a, mask);
- * // r.s0123 = a.wzyx
- * uint8 mask = (uint8)(0, 1, 2, 3,
- * 4, 5, 6, 7);
- * float4 a, b;
- * float8 r = shuffle2(a, b, mask);
- * // r.s0123 = a.xyzw
- * // r.s4567 = b.xyzw
- * uint4 mask;
- * float8 a;
- * float4 b;
- * b = shuffle(a, mask);
- * Examples that are not valid are:
- * uint8 mask;
- * short16 a;
- * short8 b;
- * b = shuffle(a, mask); <- not valid
- */
-char2 __ovld __cnfn shuffle(char2 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char4 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char8 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char16 x, uchar2 mask);
-
-uchar2 __ovld __cnfn shuffle(uchar2 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar4 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar8 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar16 x, uchar2 mask);
-
-short2 __ovld __cnfn shuffle(short2 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short4 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short8 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short16 x, ushort2 mask);
-
-ushort2 __ovld __cnfn shuffle(ushort2 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort4 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort8 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort16 x, ushort2 mask);
-
-int2 __ovld __cnfn shuffle(int2 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int4 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int8 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int16 x, uint2 mask);
-
-uint2 __ovld __cnfn shuffle(uint2 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint4 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint8 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint16 x, uint2 mask);
-
-long2 __ovld __cnfn shuffle(long2 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long4 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long8 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long16 x, ulong2 mask);
-
-ulong2 __ovld __cnfn shuffle(ulong2 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong4 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong8 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong16 x, ulong2 mask);
-
-float2 __ovld __cnfn shuffle(float2 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float4 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float8 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float16 x, uint2 mask);
-
-char4 __ovld __cnfn shuffle(char2 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char4 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char8 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char16 x, uchar4 mask);
-
-uchar4 __ovld __cnfn shuffle(uchar2 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar4 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar8 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar16 x, uchar4 mask);
-
-short4 __ovld __cnfn shuffle(short2 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short4 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short8 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short16 x, ushort4 mask);
-
-ushort4 __ovld __cnfn shuffle(ushort2 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort4 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort8 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort16 x, ushort4 mask);
-
-int4 __ovld __cnfn shuffle(int2 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int4 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int8 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int16 x, uint4 mask);
-
-uint4 __ovld __cnfn shuffle(uint2 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint4 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint8 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint16 x, uint4 mask);
-
-long4 __ovld __cnfn shuffle(long2 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long4 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long8 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long16 x, ulong4 mask);
-
-ulong4 __ovld __cnfn shuffle(ulong2 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong4 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong8 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong16 x, ulong4 mask);
-
-float4 __ovld __cnfn shuffle(float2 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float4 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float8 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float16 x, uint4 mask);
-
-char8 __ovld __cnfn shuffle(char2 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char4 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char8 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char16 x, uchar8 mask);
-
-uchar8 __ovld __cnfn shuffle(uchar2 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar4 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar8 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar16 x, uchar8 mask);
-
-short8 __ovld __cnfn shuffle(short2 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short4 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short8 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short16 x, ushort8 mask);
-
-ushort8 __ovld __cnfn shuffle(ushort2 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort4 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort8 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort16 x, ushort8 mask);
-
-int8 __ovld __cnfn shuffle(int2 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int4 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int8 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int16 x, uint8 mask);
-
-uint8 __ovld __cnfn shuffle(uint2 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint4 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint8 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint16 x, uint8 mask);
-
-long8 __ovld __cnfn shuffle(long2 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long4 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long8 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long16 x, ulong8 mask);
-
-ulong8 __ovld __cnfn shuffle(ulong2 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong4 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong8 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong16 x, ulong8 mask);
-
-float8 __ovld __cnfn shuffle(float2 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float4 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float8 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float16 x, uint8 mask);
-
-char16 __ovld __cnfn shuffle(char2 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char4 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char8 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char16 x, uchar16 mask);
-
-uchar16 __ovld __cnfn shuffle(uchar2 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar4 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar8 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar16 x, uchar16 mask);
-
-short16 __ovld __cnfn shuffle(short2 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short4 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short8 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short16 x, ushort16 mask);
-
-ushort16 __ovld __cnfn shuffle(ushort2 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort4 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort8 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort16 x, ushort16 mask);
-
-int16 __ovld __cnfn shuffle(int2 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int4 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int8 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int16 x, uint16 mask);
-
-uint16 __ovld __cnfn shuffle(uint2 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint4 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint8 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint16 x, uint16 mask);
-
-long16 __ovld __cnfn shuffle(long2 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long4 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long8 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long16 x, ulong16 mask);
-
-ulong16 __ovld __cnfn shuffle(ulong2 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong4 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong8 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong16 x, ulong16 mask);
-
-float16 __ovld __cnfn shuffle(float2 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float4 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float8 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float16 x, uint16 mask);
-
-#ifdef cl_khr_fp64
-double2 __ovld __cnfn shuffle(double2 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double4 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double8 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double16 x, ulong2 mask);
-
-double4 __ovld __cnfn shuffle(double2 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double4 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double8 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double16 x, ulong4 mask);
-
-double8 __ovld __cnfn shuffle(double2 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double4 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double8 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double16 x, ulong8 mask);
-
-double16 __ovld __cnfn shuffle(double2 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double4 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double8 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double16 x, ulong16 mask);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half2 __ovld __cnfn shuffle(half2 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half4 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half8 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half16 x, ushort2 mask);
-
-half4 __ovld __cnfn shuffle(half2 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half4 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half8 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half16 x, ushort4 mask);
-
-half8 __ovld __cnfn shuffle(half2 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half4 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half8 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half16 x, ushort8 mask);
-
-half16 __ovld __cnfn shuffle(half2 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half4 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half8 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half16 x, ushort16 mask);
-#endif //cl_khr_fp16
-
-char2 __ovld __cnfn shuffle2(char2 x, char2 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char4 x, char4 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char8 x, char8 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char16 x, char16 y, uchar2 mask);
-
-uchar2 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar2 mask);
-
-short2 __ovld __cnfn shuffle2(short2 x, short2 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short4 x, short4 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short8 x, short8 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short16 x, short16 y, ushort2 mask);
-
-ushort2 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort2 mask);
-
-int2 __ovld __cnfn shuffle2(int2 x, int2 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int4 x, int4 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int8 x, int8 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int16 x, int16 y, uint2 mask);
-
-uint2 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint2 mask);
-
-long2 __ovld __cnfn shuffle2(long2 x, long2 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long4 x, long4 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long8 x, long8 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long16 x, long16 y, ulong2 mask);
-
-ulong2 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong2 mask);
-
-float2 __ovld __cnfn shuffle2(float2 x, float2 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float4 x, float4 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float8 x, float8 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float16 x, float16 y, uint2 mask);
-
-char4 __ovld __cnfn shuffle2(char2 x, char2 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char4 x, char4 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char8 x, char8 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char16 x, char16 y, uchar4 mask);
-
-uchar4 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar4 mask);
-
-short4 __ovld __cnfn shuffle2(short2 x, short2 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short4 x, short4 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short8 x, short8 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short16 x, short16 y, ushort4 mask);
-
-ushort4 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort4 mask);
-
-int4 __ovld __cnfn shuffle2(int2 x, int2 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int4 x, int4 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int8 x, int8 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int16 x, int16 y, uint4 mask);
-
-uint4 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint4 mask);
-
-long4 __ovld __cnfn shuffle2(long2 x, long2 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long4 x, long4 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long8 x, long8 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long16 x, long16 y, ulong4 mask);
-
-ulong4 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong4 mask);
-
-float4 __ovld __cnfn shuffle2(float2 x, float2 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float4 x, float4 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float8 x, float8 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float16 x, float16 y, uint4 mask);
-
-char8 __ovld __cnfn shuffle2(char2 x, char2 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char4 x, char4 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char8 x, char8 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char16 x, char16 y, uchar8 mask);
-
-uchar8 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar8 mask);
-
-short8 __ovld __cnfn shuffle2(short2 x, short2 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short4 x, short4 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short8 x, short8 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short16 x, short16 y, ushort8 mask);
-
-ushort8 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort8 mask);
-
-int8 __ovld __cnfn shuffle2(int2 x, int2 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int4 x, int4 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int8 x, int8 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int16 x, int16 y, uint8 mask);
-
-uint8 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint8 mask);
-
-long8 __ovld __cnfn shuffle2(long2 x, long2 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long4 x, long4 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long8 x, long8 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long16 x, long16 y, ulong8 mask);
-
-ulong8 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong8 mask);
-
-float8 __ovld __cnfn shuffle2(float2 x, float2 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float4 x, float4 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float8 x, float8 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float16 x, float16 y, uint8 mask);
-
-char16 __ovld __cnfn shuffle2(char2 x, char2 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char4 x, char4 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char8 x, char8 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char16 x, char16 y, uchar16 mask);
-
-uchar16 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar16 mask);
-
-short16 __ovld __cnfn shuffle2(short2 x, short2 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short4 x, short4 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short8 x, short8 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short16 x, short16 y, ushort16 mask);
-
-ushort16 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort16 mask);
-
-int16 __ovld __cnfn shuffle2(int2 x, int2 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int4 x, int4 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int8 x, int8 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int16 x, int16 y, uint16 mask);
-
-uint16 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint16 mask);
-
-long16 __ovld __cnfn shuffle2(long2 x, long2 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long4 x, long4 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long8 x, long8 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long16 x, long16 y, ulong16 mask);
-
-ulong16 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong16 mask);
-
-float16 __ovld __cnfn shuffle2(float2 x, float2 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float4 x, float4 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float8 x, float8 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float16 x, float16 y, uint16 mask);
-
-#ifdef cl_khr_fp64
-double2 __ovld __cnfn shuffle2(double2 x, double2 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double4 x, double4 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double8 x, double8 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double16 x, double16 y, ulong2 mask);
-
-double4 __ovld __cnfn shuffle2(double2 x, double2 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double4 x, double4 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double8 x, double8 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double16 x, double16 y, ulong4 mask);
-
-double8 __ovld __cnfn shuffle2(double2 x, double2 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double4 x, double4 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double8 x, double8 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double16 x, double16 y, ulong8 mask);
-
-double16 __ovld __cnfn shuffle2(double2 x, double2 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double4 x, double4 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double8 x, double8 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double16 x, double16 y, ulong16 mask);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half2 __ovld __cnfn shuffle2(half2 x, half2 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half4 x, half4 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half8 x, half8 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half16 x, half16 y, ushort2 mask);
-
-half4 __ovld __cnfn shuffle2(half2 x, half2 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half4 x, half4 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half8 x, half8 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half16 x, half16 y, ushort4 mask);
-
-half8 __ovld __cnfn shuffle2(half2 x, half2 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half4 x, half4 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half8 x, half8 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half16 x, half16 y, ushort8 mask);
-
-half16 __ovld __cnfn shuffle2(half2 x, half2 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half4 x, half4 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half8 x, half8 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask);
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
-
-#ifdef cl_khr_gl_msaa_sharing
-#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
-#endif //cl_khr_gl_msaa_sharing
-
-/**
- * Use the coordinate (coord.xy) to do an element lookup in
- * the 2D image object specified by image.
- *
- * Use the coordinate (coord.x, coord.y, coord.z) to do
- * an element lookup in the 3D image object specified
- * by image. coord.w is ignored.
- *
- * Use the coordinate (coord.z) to index into the
- * 2D image array object specified by image_array
- * and (coord.x, coord.y) to do an element lookup in
- * the 2D image object specified by image.
- *
- * Use the coordinate (x) to do an element lookup in
- * the 1D image object specified by image.
- *
- * Use the coordinate (coord.y) to index into the
- * 1D image array object specified by image_array
- * and (coord.x) to do an element lookup in
- * the 1D image object specified by image.
- *
- * Use the coordinate (cood.xy) and sample to do an
- * element lookup in the 2D multi-sample image specified
- * by image.
- *
- * Use coord.xy and sample to do an element
- * lookup in the 2D multi-sample image layer
- * identified by index coord.z in the 2D multi-sample
- * image array specified by image.
- *
- * For mipmap images, use the mip-level specified by
- * the Level-of-Detail (lod) or use gradients for LOD
- * computation.
- *
- * read_imagef returns floating-point values in the
- * range [0.0 ... 1.0] for image objects created with
- * image_channel_data_type set to one of the predefined
- * packed formats or CL_UNORM_INT8, or
- * CL_UNORM_INT16.
- *
- * read_imagef returns floating-point values in the
- * range [-1.0 ... 1.0] for image objects created with
- * image_channel_data_type set to CL_SNORM_INT8,
- * or CL_SNORM_INT16.
- *
- * read_imagef returns floating-point values for image
- * objects created with image_channel_data_type set to
- * CL_HALF_FLOAT or CL_FLOAT.
- *
- * read_imagei and read_imageui return
- * unnormalized signed integer and unsigned integer
- * values respectively. Each channel will be stored in a
- * 32-bit integer.
- *
- * read_imagei can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_SIGNED_INT8,
- * CL_SIGNED_INT16 and
- * CL_SIGNED_INT32.
- * If the image_channel_data_type is not one of the
- * above values, the values returned by read_imagei
- * are undefined.
- *
- * read_imageui can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_UNSIGNED_INT8,
- * CL_UNSIGNED_INT16 and
- * CL_UNSIGNED_INT32.
- * If the image_channel_data_type is not one of the
- * above values, the values returned by read_imageui
- * are undefined.
- *
- * The read_image{i|ui} calls support a nearest filter
- * only. The filter_mode specified in sampler
- * must be set to CLK_FILTER_NEAREST; otherwise
- * the values returned are undefined.
-
- * The read_image{f|i|ui} calls that take
- * integer coordinates must use a sampler with
- * normalized coordinates set to
- * CLK_NORMALIZED_COORDS_FALSE and
- * addressing mode set to
- * CLK_ADDRESS_CLAMP_TO_EDGE,
- * CLK_ADDRESS_CLAMP or CLK_ADDRESS_NONE;
- * otherwise the values returned are undefined.
- *
- * Values returned by read_imagef for image objects
- * with image_channel_data_type values not specified
- * in the description above are undefined.
- */
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, int2 coord);
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord);
-
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord);
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, int4 coord);
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord);
-
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
-
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, int coord);
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord);
-
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
-
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord);
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, int2 coord);
-
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, int4 coord);
-#endif //cl_khr_depth_images
-
-#if defined(cl_khr_gl_msaa_sharing)
-float4 __purefn __ovld read_imagef(read_only image2d_msaa_t image, int2 coord, int sample);
-int4 __purefn __ovld read_imagei(read_only image2d_msaa_t image, int2 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_only image2d_msaa_t image, int2 coord, int sample);
-
-float __purefn __ovld read_imagef(read_only image2d_msaa_depth_t image, int2 coord, int sample);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_msaa_t image, int4 coord, int sample);
-int4 __purefn __ovld read_imagei(read_only image2d_array_msaa_t image, int4 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_msaa_t image, int4 coord, int sample);
-
-float __purefn __ovld read_imagef(read_only image2d_array_msaa_depth_t image, int4 coord, int sample);
-#endif //cl_khr_gl_msaa_sharing
-
-// OpenCL Extension v2.0 s9.18 - Mipmaps
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-
-#endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-/**
-* Sampler-less Image Access
-*/
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_only image1d_buffer_t image, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_buffer_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_buffer_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image, int4 coord);
-
-#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, int2 coord);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, int4 coord);
-#endif //cl_khr_depth_images
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, int4 coord);
-
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-// Image read functions returning half4 type
-#ifdef cl_khr_fp16
-half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, int coord);
-half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, float coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, float2 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, float4 coord);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, float4 coord);
-/**
- * Sampler-less Image Access
- */
-half4 __purefn __ovld read_imageh(read_only image1d_t image, int coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_buffer_t image, int coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-#endif //cl_khr_fp16
-
-// Image read functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-float4 __purefn __ovld read_imagef(read_write image1d_t image, int coord);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_write image1d_buffer_t image, int coord);
-int4 __purefn __ovld read_imagei(read_write image1d_buffer_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_buffer_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_write image2d_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image, int4 coord);
-
-float4 __purefn __ovld read_imagef(read_write image3d_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, int4 coord);
-
-#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, int2 coord);
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, int4 coord);
-#endif //cl_khr_depth_images
-
-#if cl_khr_gl_msaa_sharing
-float4 __purefn __ovld read_imagef(read_write image2d_msaa_t image, int2 coord, int sample);
-int4 __purefn __ovld read_imagei(read_write image2d_msaa_t image, int2 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_write image2d_msaa_t image, int2 coord, int sample);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_msaa_t image, int4 coord, int sample);
-int4 __purefn __ovld read_imagei(read_write image2d_array_msaa_t image, int4 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_msaa_t image, int4 coord, int sample);
-
-float __purefn __ovld read_imagef(read_write image2d_msaa_depth_t image, int2 coord, int sample);
-float __purefn __ovld read_imagef(read_write image2d_array_msaa_depth_t image, int4 coord, int sample);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
-float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-
-#endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Image read functions returning half4 type
-#ifdef cl_khr_fp16
-half4 __purefn __ovld read_imageh(read_write image1d_t image, int coord);
-half4 __purefn __ovld read_imageh(read_write image2d_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_write image3d_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_write image1d_array_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Write color value to location specified by coordinate
- * (coord.x, coord.y) in the 2D image object specified by image.
- * (coord.x, coord.y) are considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1, and 0
- * ... image height - 1.
-
- * Write color value to location specified by coordinate
- * (coord.x, coord.y) in the 2D image object specified by index
- * (coord.z) of the 2D image array object image_array.
- * (coord.x, coord.y) are considered to be unnormalized
- * coordinates and must be in the range 0 ... image width
- * - 1.
- *
- * Write color value to location specified by coordinate
- * (coord) in the 1D image (buffer) object specified by image.
- * coord is considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1.
- *
- * Write color value to location specified by coordinate
- * (coord.x) in the 1D image object specified by index
- * (coord.y) of the 1D image array object image_array.
- * x is considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1.
- *
- * Write color value to location specified by coordinate
- * (coord.x, coord.y, coord.z) in the 3D image object specified by image.
- * coord.x & coord.y are considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1, and 0
- * ... image height - 1.
- *
- * For mipmap images, use mip-level specified by lod.
- *
- * Appropriate data format conversion to the specified
- * image format is done before writing the color value.
- *
- * write_imagef can only be used with image objects
- * created with image_channel_data_type set to one of
- * the pre-defined packed formats or set to
- * CL_SNORM_INT8, CL_UNORM_INT8,
- * CL_SNORM_INT16, CL_UNORM_INT16,
- * CL_HALF_FLOAT or CL_FLOAT. Appropriate data
- * format conversion will be done to convert channel
- * data from a floating-point value to actual data format
- * in which the channels are stored.
- *
- * write_imagei can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_SIGNED_INT8,
- * CL_SIGNED_INT16 and
- * CL_SIGNED_INT32.
- *
- * write_imageui can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_UNSIGNED_INT8,
- * CL_UNSIGNED_INT16 and
- * CL_UNSIGNED_INT32.
- *
- * The behavior of write_imagef, write_imagei and
- * write_imageui for image objects created with
- * image_channel_data_type values not specified in
- * the description above or with (x, y) coordinate
- * values that are not in the range (0 ... image width -1,
- * 0 ... image height - 1), respectively, is undefined.
- */
-void __ovld write_imagef(write_only image2d_t image, int2 coord, float4 color);
-void __ovld write_imagei(write_only image2d_t image, int2 coord, int4 color);
-void __ovld write_imageui(write_only image2d_t image, int2 coord, uint4 color);
-
-void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, float4 color);
-void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int4 color);
-void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, uint4 color);
-
-void __ovld write_imagef(write_only image1d_t image, int coord, float4 color);
-void __ovld write_imagei(write_only image1d_t image, int coord, int4 color);
-void __ovld write_imageui(write_only image1d_t image, int coord, uint4 color);
-
-void __ovld write_imagef(write_only image1d_buffer_t image, int coord, float4 color);
-void __ovld write_imagei(write_only image1d_buffer_t image, int coord, int4 color);
-void __ovld write_imageui(write_only image1d_buffer_t image, int coord, uint4 color);
-
-void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, float4 color);
-void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int4 color);
-void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, uint4 color);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(write_only image3d_t image, int4 coord, float4 color);
-void __ovld write_imagei(write_only image3d_t image, int4 coord, int4 color);
-void __ovld write_imageui(write_only image3d_t image, int4 coord, uint4 color);
-#endif
-
-#ifdef cl_khr_depth_images
-void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, float color);
-void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, float color);
-#endif //cl_khr_depth_images
-
-// OpenCL Extension v2.0 s9.18 - Mipmaps
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#if defined(cl_khr_mipmap_image_writes)
-void __ovld write_imagef(write_only image1d_t image, int coord, int lod, float4 color);
-void __ovld write_imagei(write_only image1d_t image, int coord, int lod, int4 color);
-void __ovld write_imageui(write_only image1d_t image, int coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image2d_t image, int2 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image2d_t image, int2 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image2d_t image, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, int lod, float depth);
-void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, int lod, float depth);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(write_only image3d_t image, int4 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image3d_t image, int4 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image3d_t image, int4 coord, int lod, uint4 color);
-#endif //cl_khr_3d_image_writes
-
-#endif //defined(cl_khr_mipmap_image_writes)
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Image write functions for half4 type
-#ifdef cl_khr_fp16
-void __ovld write_imageh(write_only image1d_t image, int coord, half4 color);
-void __ovld write_imageh(write_only image2d_t image, int2 coord, half4 color);
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imageh(write_only image3d_t image, int4 coord, half4 color);
-#endif
-void __ovld write_imageh(write_only image1d_array_t image, int2 coord, half4 color);
-void __ovld write_imageh(write_only image2d_array_t image, int4 coord, half4 color);
-void __ovld write_imageh(write_only image1d_buffer_t image, int coord, half4 color);
-#endif //cl_khr_fp16
-
-// Image write functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld write_imagef(read_write image2d_t image, int2 coord, float4 color);
-void __ovld write_imagei(read_write image2d_t image, int2 coord, int4 color);
-void __ovld write_imageui(read_write image2d_t image, int2 coord, uint4 color);
-
-void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, float4 color);
-void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int4 color);
-void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, uint4 color);
-
-void __ovld write_imagef(read_write image1d_t image, int coord, float4 color);
-void __ovld write_imagei(read_write image1d_t image, int coord, int4 color);
-void __ovld write_imageui(read_write image1d_t image, int coord, uint4 color);
-
-void __ovld write_imagef(read_write image1d_buffer_t image, int coord, float4 color);
-void __ovld write_imagei(read_write image1d_buffer_t image, int coord, int4 color);
-void __ovld write_imageui(read_write image1d_buffer_t image, int coord, uint4 color);
-
-void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, float4 color);
-void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int4 color);
-void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, uint4 color);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(read_write image3d_t image, int4 coord, float4 color);
-void __ovld write_imagei(read_write image3d_t image, int4 coord, int4 color);
-void __ovld write_imageui(read_write image3d_t image, int4 coord, uint4 color);
-#endif
-
-#ifdef cl_khr_depth_images
-void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, float color);
-void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, float color);
-#endif //cl_khr_depth_images
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#if defined(cl_khr_mipmap_image_writes)
-void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color);
-void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color);
-void __ovld write_imageui(read_write image1d_t image, int coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image2d_t image, int2 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image2d_t image, int2 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image2d_t image, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, int lod, float color);
-void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, int lod, float color);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(read_write image3d_t image, int4 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image3d_t image, int4 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image3d_t image, int4 coord, int lod, uint4 color);
-#endif //cl_khr_3d_image_writes
-
-#endif //cl_khr_mipmap_image_writes
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Image write functions for half4 type
-#ifdef cl_khr_fp16
-void __ovld write_imageh(read_write image1d_t image, int coord, half4 color);
-void __ovld write_imageh(read_write image2d_t image, int2 coord, half4 color);
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imageh(read_write image3d_t image, int4 coord, half4 color);
-#endif
-void __ovld write_imageh(read_write image1d_array_t image, int2 coord, half4 color);
-void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color);
-void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 color);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have
-// access qualifier, which by default assume read_only access qualifier. Image query builtin
-// functions with write_only image argument should also be declared.
-
-/**
- * Return the image width in pixels.
- *
-  */
-int __ovld __cnfn get_image_width(read_only image1d_t image);
-int __ovld __cnfn get_image_width(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_width(read_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_width(read_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_width(read_only image1d_array_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_width(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_width(write_only image1d_t image);
-int __ovld __cnfn get_image_width(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_width(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_width(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_width(write_only image1d_array_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_width(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_width(read_write image1d_t image);
-int __ovld __cnfn get_image_width(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_width(read_write image2d_t image);
-int __ovld __cnfn get_image_width(read_write image3d_t image);
-int __ovld __cnfn get_image_width(read_write image1d_array_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image height in pixels.
- */
-int __ovld __cnfn get_image_height(read_only image2d_t image);
-int __ovld __cnfn get_image_height(read_only image3d_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_height(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_height(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_height(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_height(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_height(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_height(read_write image2d_t image);
-int __ovld __cnfn get_image_height(read_write image3d_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image depth in pixels.
- */
-int __ovld __cnfn get_image_depth(read_only image3d_t image);
-
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_depth(write_only image3d_t image);
-#endif
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_depth(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL Extension v2.0 s9.18 - Mipmaps
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
-/**
- * Return the image miplevels.
- */
-
-int __ovld get_image_num_mip_levels(read_only image1d_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_t image);
-int __ovld get_image_num_mip_levels(read_only image3d_t image);
-
-int __ovld get_image_num_mip_levels(write_only image1d_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld get_image_num_mip_levels(write_only image3d_t image);
-#endif
-
-int __ovld get_image_num_mip_levels(read_write image1d_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_t image);
-int __ovld get_image_num_mip_levels(read_write image3d_t image);
-
-int __ovld get_image_num_mip_levels(read_only image1d_array_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_array_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_depth_t image);
-
-int __ovld get_image_num_mip_levels(write_only image1d_array_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_array_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_depth_t image);
-
-int __ovld get_image_num_mip_levels(read_write image1d_array_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_array_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
-
-#endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the channel data type. Valid values are:
- * CLK_SNORM_INT8
- * CLK_SNORM_INT16
- * CLK_UNORM_INT8
- * CLK_UNORM_INT16
- * CLK_UNORM_SHORT_565
- * CLK_UNORM_SHORT_555
- * CLK_UNORM_SHORT_101010
- * CLK_SIGNED_INT8
- * CLK_SIGNED_INT16
- * CLK_SIGNED_INT32
- * CLK_UNSIGNED_INT8
- * CLK_UNSIGNED_INT16
- * CLK_UNSIGNED_INT32
- * CLK_HALF_FLOAT
- * CLK_FLOAT
- */
-
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image3d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_channel_data_type(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image3d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image channel order. Valid values are:
- * CLK_A
- * CLK_R
- * CLK_Rx
- * CLK_RG
- * CLK_RGx
- * CLK_RA
- * CLK_RGB
- * CLK_RGBx
- * CLK_RGBA
- * CLK_ARGB
- * CLK_BGRA
- * CLK_INTENSITY
- * CLK_LUMINANCE
- */
-
-int __ovld __cnfn get_image_channel_order(read_only image1d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image3d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_channel_order(write_only image1d_t image);
-int __ovld __cnfn get_image_channel_order(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_channel_order(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_channel_order(write_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_channel_order(read_write image1d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image3d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the 2D image width and height as an int2
- * type. The width is returned in the x component, and
- * the height in the y component.
- */
-int2 __ovld __cnfn get_image_dim(read_only image2d_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int2 __ovld __cnfn get_image_dim(write_only image2d_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int2 __ovld __cnfn get_image_dim(read_write image2d_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the 3D image width, height, and depth as an
- * int4 type. The width is returned in the x
- * component, height in the y component, depth in the z
- * component and the w component is 0.
- */
-int4 __ovld __cnfn get_image_dim(read_only image3d_t image);
-#ifdef cl_khr_3d_image_writes
-int4 __ovld __cnfn get_image_dim(write_only image3d_t image);
-#endif
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int4 __ovld __cnfn get_image_dim(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image array size.
- */
-
-size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_t image_array);
-#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_depth_t image_array);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_depth_t image_array);
-#endif //cl_khr_gl_msaa_sharing
-
-size_t __ovld __cnfn get_image_array_size(write_only image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_t image_array);
-#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_depth_t image_array);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t image_array);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t image_array);
-#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t image_array);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t image_array);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
-* Return the number of samples associated with image
-*/
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld get_image_num_samples(read_only image2d_msaa_t image);
-int __ovld get_image_num_samples(read_only image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(read_only image2d_array_msaa_t image);
-int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
-
-int __ovld get_image_num_samples(write_only image2d_msaa_t image);
-int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
-int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld get_image_num_samples(read_write image2d_msaa_t image);
-int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
-int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#endif
-
-// OpenCL v2.0 s6.13.15 - Work-group Functions
-
-#if defined(__opencl_c_work_group_collective_functions)
-int __ovld __conv work_group_all(int predicate);
-int __ovld __conv work_group_any(int predicate);
-
-#ifdef cl_khr_fp16
-half __ovld __conv work_group_broadcast(half a, size_t local_id);
-half __ovld __conv work_group_broadcast(half a, size_t x, size_t y);
-half __ovld __conv work_group_broadcast(half a, size_t x, size_t y, size_t z);
-#endif
-int __ovld __conv work_group_broadcast(int a, size_t local_id);
-int __ovld __conv work_group_broadcast(int a, size_t x, size_t y);
-int __ovld __conv work_group_broadcast(int a, size_t x, size_t y, size_t z);
-uint __ovld __conv work_group_broadcast(uint a, size_t local_id);
-uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y);
-uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y, size_t z);
-long __ovld __conv work_group_broadcast(long a, size_t local_id);
-long __ovld __conv work_group_broadcast(long a, size_t x, size_t y);
-long __ovld __conv work_group_broadcast(long a, size_t x, size_t y, size_t z);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t local_id);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y, size_t z);
-float __ovld __conv work_group_broadcast(float a, size_t local_id);
-float __ovld __conv work_group_broadcast(float a, size_t x, size_t y);
-float __ovld __conv work_group_broadcast(float a, size_t x, size_t y, size_t z);
-#ifdef cl_khr_fp64
-double __ovld __conv work_group_broadcast(double a, size_t local_id);
-double __ovld __conv work_group_broadcast(double a, size_t x, size_t y);
-double __ovld __conv work_group_broadcast(double a, size_t x, size_t y, size_t z);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld __conv work_group_reduce_add(half x);
-half __ovld __conv work_group_reduce_min(half x);
-half __ovld __conv work_group_reduce_max(half x);
-half __ovld __conv work_group_scan_exclusive_add(half x);
-half __ovld __conv work_group_scan_exclusive_min(half x);
-half __ovld __conv work_group_scan_exclusive_max(half x);
-half __ovld __conv work_group_scan_inclusive_add(half x);
-half __ovld __conv work_group_scan_inclusive_min(half x);
-half __ovld __conv work_group_scan_inclusive_max(half x);
-#endif
-int __ovld __conv work_group_reduce_add(int x);
-int __ovld __conv work_group_reduce_min(int x);
-int __ovld __conv work_group_reduce_max(int x);
-int __ovld __conv work_group_scan_exclusive_add(int x);
-int __ovld __conv work_group_scan_exclusive_min(int x);
-int __ovld __conv work_group_scan_exclusive_max(int x);
-int __ovld __conv work_group_scan_inclusive_add(int x);
-int __ovld __conv work_group_scan_inclusive_min(int x);
-int __ovld __conv work_group_scan_inclusive_max(int x);
-uint __ovld __conv work_group_reduce_add(uint x);
-uint __ovld __conv work_group_reduce_min(uint x);
-uint __ovld __conv work_group_reduce_max(uint x);
-uint __ovld __conv work_group_scan_exclusive_add(uint x);
-uint __ovld __conv work_group_scan_exclusive_min(uint x);
-uint __ovld __conv work_group_scan_exclusive_max(uint x);
-uint __ovld __conv work_group_scan_inclusive_add(uint x);
-uint __ovld __conv work_group_scan_inclusive_min(uint x);
-uint __ovld __conv work_group_scan_inclusive_max(uint x);
-long __ovld __conv work_group_reduce_add(long x);
-long __ovld __conv work_group_reduce_min(long x);
-long __ovld __conv work_group_reduce_max(long x);
-long __ovld __conv work_group_scan_exclusive_add(long x);
-long __ovld __conv work_group_scan_exclusive_min(long x);
-long __ovld __conv work_group_scan_exclusive_max(long x);
-long __ovld __conv work_group_scan_inclusive_add(long x);
-long __ovld __conv work_group_scan_inclusive_min(long x);
-long __ovld __conv work_group_scan_inclusive_max(long x);
-ulong __ovld __conv work_group_reduce_add(ulong x);
-ulong __ovld __conv work_group_reduce_min(ulong x);
-ulong __ovld __conv work_group_reduce_max(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_add(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_min(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_max(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_add(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_min(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_max(ulong x);
-float __ovld __conv work_group_reduce_add(float x);
-float __ovld __conv work_group_reduce_min(float x);
-float __ovld __conv work_group_reduce_max(float x);
-float __ovld __conv work_group_scan_exclusive_add(float x);
-float __ovld __conv work_group_scan_exclusive_min(float x);
-float __ovld __conv work_group_scan_exclusive_max(float x);
-float __ovld __conv work_group_scan_inclusive_add(float x);
-float __ovld __conv work_group_scan_inclusive_min(float x);
-float __ovld __conv work_group_scan_inclusive_max(float x);
-#ifdef cl_khr_fp64
-double __ovld __conv work_group_reduce_add(double x);
-double __ovld __conv work_group_reduce_min(double x);
-double __ovld __conv work_group_reduce_max(double x);
-double __ovld __conv work_group_scan_exclusive_add(double x);
-double __ovld __conv work_group_scan_exclusive_min(double x);
-double __ovld __conv work_group_scan_exclusive_max(double x);
-double __ovld __conv work_group_scan_inclusive_add(double x);
-double __ovld __conv work_group_scan_inclusive_min(double x);
-double __ovld __conv work_group_scan_inclusive_max(double x);
-#endif //cl_khr_fp64
-
-#endif //defined(__opencl_c_work_group_collective_functions)
-
-// OpenCL v2.0 s6.13.16 - Pipe Functions
-#if defined(__opencl_c_pipes)
-bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);
-#endif //defined(__opencl_c_pipes)
-
-
-// OpenCL v2.0 s6.13.17 - Enqueue Kernels
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-ndrange_t __ovld ndrange_1D(size_t);
-ndrange_t __ovld ndrange_1D(size_t, size_t);
-ndrange_t __ovld ndrange_1D(size_t, size_t, size_t);
-
-ndrange_t __ovld ndrange_2D(const size_t[2]);
-ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2]);
-ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2], const size_t[2]);
-
-ndrange_t __ovld ndrange_3D(const size_t[3]);
-ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3]);
-ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3], const size_t[3]);
-
-int __ovld enqueue_marker(queue_t, uint, const clk_event_t*, clk_event_t*);
-
-void __ovld retain_event(clk_event_t);
-
-void __ovld release_event(clk_event_t);
-
-clk_event_t __ovld create_user_event(void);
-
-void __ovld set_user_event_status(clk_event_t e, int state);
-
-bool __ovld is_valid_event (clk_event_t event);
-
-void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void* value);
-
-queue_t __ovld get_default_queue(void);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL Extension v2.0 s9.17 - Sub-groups
-
-#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups)
-// Shared Sub Group Functions
-uint    __ovld get_sub_group_size(void);
-uint    __ovld get_max_sub_group_size(void);
-uint    __ovld get_num_sub_groups(void);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint    __ovld get_enqueued_num_sub_groups(void);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint    __ovld get_sub_group_id(void);
-uint    __ovld get_sub_group_local_id(void);
-
-void    __ovld __conv sub_group_barrier(cl_mem_fence_flags flags);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void    __ovld __conv sub_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-int     __ovld __conv sub_group_all(int predicate);
-int     __ovld __conv sub_group_any(int predicate);
-
-int     __ovld __conv sub_group_broadcast(int   x, uint sub_group_local_id);
-uint    __ovld __conv sub_group_broadcast(uint  x, uint sub_group_local_id);
-long    __ovld __conv sub_group_broadcast(long  x, uint sub_group_local_id);
-ulong   __ovld __conv sub_group_broadcast(ulong x, uint sub_group_local_id);
-float   __ovld __conv sub_group_broadcast(float x, uint sub_group_local_id);
-
-int     __ovld __conv sub_group_reduce_add(int   x);
-uint    __ovld __conv sub_group_reduce_add(uint  x);
-long    __ovld __conv sub_group_reduce_add(long  x);
-ulong   __ovld __conv sub_group_reduce_add(ulong x);
-float   __ovld __conv sub_group_reduce_add(float x);
-int     __ovld __conv sub_group_reduce_min(int   x);
-uint    __ovld __conv sub_group_reduce_min(uint  x);
-long    __ovld __conv sub_group_reduce_min(long  x);
-ulong   __ovld __conv sub_group_reduce_min(ulong x);
-float   __ovld __conv sub_group_reduce_min(float x);
-int     __ovld __conv sub_group_reduce_max(int   x);
-uint    __ovld __conv sub_group_reduce_max(uint  x);
-long    __ovld __conv sub_group_reduce_max(long  x);
-ulong   __ovld __conv sub_group_reduce_max(ulong x);
-float   __ovld __conv sub_group_reduce_max(float x);
-
-int     __ovld __conv sub_group_scan_exclusive_add(int   x);
-uint    __ovld __conv sub_group_scan_exclusive_add(uint  x);
-long    __ovld __conv sub_group_scan_exclusive_add(long  x);
-ulong   __ovld __conv sub_group_scan_exclusive_add(ulong x);
-float   __ovld __conv sub_group_scan_exclusive_add(float x);
-int     __ovld __conv sub_group_scan_exclusive_min(int   x);
-uint    __ovld __conv sub_group_scan_exclusive_min(uint  x);
-long    __ovld __conv sub_group_scan_exclusive_min(long  x);
-ulong   __ovld __conv sub_group_scan_exclusive_min(ulong x);
-float   __ovld __conv sub_group_scan_exclusive_min(float x);
-int     __ovld __conv sub_group_scan_exclusive_max(int   x);
-uint    __ovld __conv sub_group_scan_exclusive_max(uint  x);
-long    __ovld __conv sub_group_scan_exclusive_max(long  x);
-ulong   __ovld __conv sub_group_scan_exclusive_max(ulong x);
-float   __ovld __conv sub_group_scan_exclusive_max(float x);
-
-int     __ovld __conv sub_group_scan_inclusive_add(int   x);
-uint    __ovld __conv sub_group_scan_inclusive_add(uint  x);
-long    __ovld __conv sub_group_scan_inclusive_add(long  x);
-ulong   __ovld __conv sub_group_scan_inclusive_add(ulong x);
-float   __ovld __conv sub_group_scan_inclusive_add(float x);
-int     __ovld __conv sub_group_scan_inclusive_min(int   x);
-uint    __ovld __conv sub_group_scan_inclusive_min(uint  x);
-long    __ovld __conv sub_group_scan_inclusive_min(long  x);
-ulong   __ovld __conv sub_group_scan_inclusive_min(ulong x);
-float   __ovld __conv sub_group_scan_inclusive_min(float x);
-int     __ovld __conv sub_group_scan_inclusive_max(int   x);
-uint    __ovld __conv sub_group_scan_inclusive_max(uint  x);
-long    __ovld __conv sub_group_scan_inclusive_max(long  x);
-ulong   __ovld __conv sub_group_scan_inclusive_max(ulong x);
-float   __ovld __conv sub_group_scan_inclusive_max(float x);
-
-#ifdef cl_khr_fp16
-half    __ovld __conv sub_group_broadcast(half x, uint sub_group_local_id);
-half    __ovld __conv sub_group_reduce_add(half x);
-half    __ovld __conv sub_group_reduce_min(half x);
-half    __ovld __conv sub_group_reduce_max(half x);
-half    __ovld __conv sub_group_scan_exclusive_add(half x);
-half    __ovld __conv sub_group_scan_exclusive_min(half x);
-half    __ovld __conv sub_group_scan_exclusive_max(half x);
-half    __ovld __conv sub_group_scan_inclusive_add(half x);
-half    __ovld __conv sub_group_scan_inclusive_min(half x);
-half    __ovld __conv sub_group_scan_inclusive_max(half x);
-#endif //cl_khr_fp16
-
-#ifdef cl_khr_fp64
-double  __ovld __conv sub_group_broadcast(double x, uint sub_group_local_id);
-double  __ovld __conv sub_group_reduce_add(double x);
-double  __ovld __conv sub_group_reduce_min(double x);
-double  __ovld __conv sub_group_reduce_max(double x);
-double  __ovld __conv sub_group_scan_exclusive_add(double x);
-double  __ovld __conv sub_group_scan_exclusive_min(double x);
-double  __ovld __conv sub_group_scan_exclusive_max(double x);
-double  __ovld __conv sub_group_scan_inclusive_add(double x);
-double  __ovld __conv sub_group_scan_inclusive_min(double x);
-double  __ovld __conv sub_group_scan_inclusive_max(double x);
-#endif //cl_khr_fp64
-
-#endif //cl_khr_subgroups cl_intel_subgroups __opencl_c_subgroups
-
-#if defined(cl_khr_subgroup_extended_types)
-char __ovld __conv sub_group_broadcast( char value, uint index );
-char2 __ovld __conv sub_group_broadcast( char2 value, uint index );
-char3 __ovld __conv sub_group_broadcast( char3 value, uint index );
-char4 __ovld __conv sub_group_broadcast( char4 value, uint index );
-char8 __ovld __conv sub_group_broadcast( char8 value, uint index );
-char16 __ovld __conv sub_group_broadcast( char16 value, uint index );
-
-uchar __ovld __conv sub_group_broadcast( uchar value, uint index );
-uchar2 __ovld __conv sub_group_broadcast( uchar2 value, uint index );
-uchar3 __ovld __conv sub_group_broadcast( uchar3 value, uint index );
-uchar4 __ovld __conv sub_group_broadcast( uchar4 value, uint index );
-uchar8 __ovld __conv sub_group_broadcast( uchar8 value, uint index );
-uchar16 __ovld __conv sub_group_broadcast( uchar16 value, uint index );
-
-short __ovld __conv sub_group_broadcast( short value, uint index );
-short2 __ovld __conv sub_group_broadcast( short2 value, uint index );
-short3 __ovld __conv sub_group_broadcast( short3 value, uint index );
-short4 __ovld __conv sub_group_broadcast( short4 value, uint index );
-short8 __ovld __conv sub_group_broadcast( short8 value, uint index );
-short16 __ovld __conv sub_group_broadcast( short16 value, uint index );
-
-ushort __ovld __conv sub_group_broadcast( ushort value, uint index );
-ushort2 __ovld __conv sub_group_broadcast( ushort2 value, uint index );
-ushort3 __ovld __conv sub_group_broadcast( ushort3 value, uint index );
-ushort4 __ovld __conv sub_group_broadcast( ushort4 value, uint index );
-ushort8 __ovld __conv sub_group_broadcast( ushort8 value, uint index );
-ushort16 __ovld __conv sub_group_broadcast( ushort16 value, uint index );
-
-// scalar int broadcast is part of cl_khr_subgroups
-int2 __ovld __conv sub_group_broadcast( int2 value, uint index );
-int3 __ovld __conv sub_group_broadcast( int3 value, uint index );
-int4 __ovld __conv sub_group_broadcast( int4 value, uint index );
-int8 __ovld __conv sub_group_broadcast( int8 value, uint index );
-int16 __ovld __conv sub_group_broadcast( int16 value, uint index );
-
-// scalar uint broadcast is part of cl_khr_subgroups
-uint2 __ovld __conv sub_group_broadcast( uint2 value, uint index );
-uint3 __ovld __conv sub_group_broadcast( uint3 value, uint index );
-uint4 __ovld __conv sub_group_broadcast( uint4 value, uint index );
-uint8 __ovld __conv sub_group_broadcast( uint8 value, uint index );
-uint16 __ovld __conv sub_group_broadcast( uint16 value, uint index );
-
-// scalar long broadcast is part of cl_khr_subgroups
-long2 __ovld __conv sub_group_broadcast( long2 value, uint index );
-long3 __ovld __conv sub_group_broadcast( long3 value, uint index );
-long4 __ovld __conv sub_group_broadcast( long4 value, uint index );
-long8 __ovld __conv sub_group_broadcast( long8 value, uint index );
-long16 __ovld __conv sub_group_broadcast( long16 value, uint index );
-
-// scalar ulong broadcast is part of cl_khr_subgroups
-ulong2 __ovld __conv sub_group_broadcast( ulong2 value, uint index );
-ulong3 __ovld __conv sub_group_broadcast( ulong3 value, uint index );
-ulong4 __ovld __conv sub_group_broadcast( ulong4 value, uint index );
-ulong8 __ovld __conv sub_group_broadcast( ulong8 value, uint index );
-ulong16 __ovld __conv sub_group_broadcast( ulong16 value, uint index );
-
-// scalar float broadcast is part of cl_khr_subgroups
-float2 __ovld __conv sub_group_broadcast( float2 value, uint index );
-float3 __ovld __conv sub_group_broadcast( float3 value, uint index );
-float4 __ovld __conv sub_group_broadcast( float4 value, uint index );
-float8 __ovld __conv sub_group_broadcast( float8 value, uint index );
-float16 __ovld __conv sub_group_broadcast( float16 value, uint index );
-
-char __ovld __conv sub_group_reduce_add( char value );
-uchar __ovld __conv sub_group_reduce_add( uchar value );
-short __ovld __conv sub_group_reduce_add( short value );
-ushort __ovld __conv sub_group_reduce_add( ushort value );
-
-char __ovld __conv sub_group_reduce_min( char value );
-uchar __ovld __conv sub_group_reduce_min( uchar value );
-short __ovld __conv sub_group_reduce_min( short value );
-ushort __ovld __conv sub_group_reduce_min( ushort value );
-
-char __ovld __conv sub_group_reduce_max( char value );
-uchar __ovld __conv sub_group_reduce_max( uchar value );
-short __ovld __conv sub_group_reduce_max( short value );
-ushort __ovld __conv sub_group_reduce_max( ushort value );
-
-char __ovld __conv sub_group_scan_inclusive_add( char value );
-uchar __ovld __conv sub_group_scan_inclusive_add( uchar value );
-short __ovld __conv sub_group_scan_inclusive_add( short value );
-ushort __ovld __conv sub_group_scan_inclusive_add( ushort value );
-
-char __ovld __conv sub_group_scan_inclusive_min( char value );
-uchar __ovld __conv sub_group_scan_inclusive_min( uchar value );
-short __ovld __conv sub_group_scan_inclusive_min( short value );
-ushort __ovld __conv sub_group_scan_inclusive_min( ushort value );
-
-char __ovld __conv sub_group_scan_inclusive_max( char value );
-uchar __ovld __conv sub_group_scan_inclusive_max( uchar value );
-short __ovld __conv sub_group_scan_inclusive_max( short value );
-ushort __ovld __conv sub_group_scan_inclusive_max( ushort value );
-
-char __ovld __conv sub_group_scan_exclusive_add( char value );
-uchar __ovld __conv sub_group_scan_exclusive_add( uchar value );
-short __ovld __conv sub_group_scan_exclusive_add( short value );
-ushort __ovld __conv sub_group_scan_exclusive_add( ushort value );
-
-char __ovld __conv sub_group_scan_exclusive_min( char value );
-uchar __ovld __conv sub_group_scan_exclusive_min( uchar value );
-short __ovld __conv sub_group_scan_exclusive_min( short value );
-ushort __ovld __conv sub_group_scan_exclusive_min( ushort value );
-
-char __ovld __conv sub_group_scan_exclusive_max( char value );
-uchar __ovld __conv sub_group_scan_exclusive_max( uchar value );
-short __ovld __conv sub_group_scan_exclusive_max( short value );
-ushort __ovld __conv sub_group_scan_exclusive_max( ushort value );
-
-#if defined(cl_khr_fp16)
-// scalar half broadcast is part of cl_khr_subgroups
-half2 __ovld __conv sub_group_broadcast( half2 value, uint index );
-half3 __ovld __conv sub_group_broadcast( half3 value, uint index );
-half4 __ovld __conv sub_group_broadcast( half4 value, uint index );
-half8 __ovld __conv sub_group_broadcast( half8 value, uint index );
-half16 __ovld __conv sub_group_broadcast( half16 value, uint index );
-#endif  // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-// scalar double broadcast is part of cl_khr_subgroups
-double2 __ovld __conv sub_group_broadcast( double2 value, uint index );
-double3 __ovld __conv sub_group_broadcast( double3 value, uint index );
-double4 __ovld __conv sub_group_broadcast( double4 value, uint index );
-double8 __ovld __conv sub_group_broadcast( double8 value, uint index );
-double16 __ovld __conv sub_group_broadcast( double16 value, uint index );
-#endif  // cl_khr_fp64
-
-#endif  // cl_khr_subgroup_extended_types
-
-#if defined(cl_khr_subgroup_non_uniform_vote)
-int     __ovld sub_group_elect(void);
-int     __ovld sub_group_non_uniform_all( int predicate );
-int     __ovld sub_group_non_uniform_any( int predicate );
-
-int     __ovld sub_group_non_uniform_all_equal( char value );
-int     __ovld sub_group_non_uniform_all_equal( uchar value );
-int     __ovld sub_group_non_uniform_all_equal( short value );
-int     __ovld sub_group_non_uniform_all_equal( ushort value );
-int     __ovld sub_group_non_uniform_all_equal( int value );
-int     __ovld sub_group_non_uniform_all_equal( uint value );
-int     __ovld sub_group_non_uniform_all_equal( long value );
-int     __ovld sub_group_non_uniform_all_equal( ulong value );
-int     __ovld sub_group_non_uniform_all_equal( float value );
-
-#if defined(cl_khr_fp16)
-int     __ovld sub_group_non_uniform_all_equal( half value );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-int     __ovld sub_group_non_uniform_all_equal( double value );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_non_uniform_vote
-
-#if defined(cl_khr_subgroup_ballot)
-char    __ovld sub_group_non_uniform_broadcast( char value, uint index );
-char2   __ovld sub_group_non_uniform_broadcast( char2 value, uint index );
-char3   __ovld sub_group_non_uniform_broadcast( char3 value, uint index );
-char4   __ovld sub_group_non_uniform_broadcast( char4 value, uint index );
-char8   __ovld sub_group_non_uniform_broadcast( char8 value, uint index );
-char16  __ovld sub_group_non_uniform_broadcast( char16 value, uint index );
-
-uchar   __ovld sub_group_non_uniform_broadcast( uchar value, uint index );
-uchar2  __ovld sub_group_non_uniform_broadcast( uchar2 value, uint index );
-uchar3  __ovld sub_group_non_uniform_broadcast( uchar3 value, uint index );
-uchar4  __ovld sub_group_non_uniform_broadcast( uchar4 value, uint index );
-uchar8  __ovld sub_group_non_uniform_broadcast( uchar8 value, uint index );
-uchar16 __ovld sub_group_non_uniform_broadcast( uchar16 value, uint index );
-
-short   __ovld sub_group_non_uniform_broadcast( short value, uint index );
-short2  __ovld sub_group_non_uniform_broadcast( short2 value, uint index );
-short3  __ovld sub_group_non_uniform_broadcast( short3 value, uint index );
-short4  __ovld sub_group_non_uniform_broadcast( short4 value, uint index );
-short8  __ovld sub_group_non_uniform_broadcast( short8 value, uint index );
-short16 __ovld sub_group_non_uniform_broadcast( short16 value, uint index );
-
-ushort  __ovld sub_group_non_uniform_broadcast( ushort value, uint index );
-ushort2 __ovld sub_group_non_uniform_broadcast( ushort2 value, uint index );
-ushort3 __ovld sub_group_non_uniform_broadcast( ushort3 value, uint index );
-ushort4 __ovld sub_group_non_uniform_broadcast( ushort4 value, uint index );
-ushort8 __ovld sub_group_non_uniform_broadcast( ushort8 value, uint index );
-ushort16 __ovld sub_group_non_uniform_broadcast( ushort16 value, uint index );
-
-int     __ovld sub_group_non_uniform_broadcast( int value, uint index );
-int2    __ovld sub_group_non_uniform_broadcast( int2 value, uint index );
-int3    __ovld sub_group_non_uniform_broadcast( int3 value, uint index );
-int4    __ovld sub_group_non_uniform_broadcast( int4 value, uint index );
-int8    __ovld sub_group_non_uniform_broadcast( int8 value, uint index );
-int16   __ovld sub_group_non_uniform_broadcast( int16 value, uint index );
-
-uint    __ovld sub_group_non_uniform_broadcast( uint value, uint index );
-uint2   __ovld sub_group_non_uniform_broadcast( uint2 value, uint index );
-uint3   __ovld sub_group_non_uniform_broadcast( uint3 value, uint index );
-uint4   __ovld sub_group_non_uniform_broadcast( uint4 value, uint index );
-uint8   __ovld sub_group_non_uniform_broadcast( uint8 value, uint index );
-uint16  __ovld sub_group_non_uniform_broadcast( uint16 value, uint index );
-
-long    __ovld sub_group_non_uniform_broadcast( long value, uint index );
-long2   __ovld sub_group_non_uniform_broadcast( long2 value, uint index );
-long3   __ovld sub_group_non_uniform_broadcast( long3 value, uint index );
-long4   __ovld sub_group_non_uniform_broadcast( long4 value, uint index );
-long8   __ovld sub_group_non_uniform_broadcast( long8 value, uint index );
-long16  __ovld sub_group_non_uniform_broadcast( long16 value, uint index );
-
-ulong   __ovld sub_group_non_uniform_broadcast( ulong value, uint index );
-ulong2  __ovld sub_group_non_uniform_broadcast( ulong2 value, uint index );
-ulong3  __ovld sub_group_non_uniform_broadcast( ulong3 value, uint index );
-ulong4  __ovld sub_group_non_uniform_broadcast( ulong4 value, uint index );
-ulong8  __ovld sub_group_non_uniform_broadcast( ulong8 value, uint index );
-ulong16 __ovld sub_group_non_uniform_broadcast( ulong16 value, uint index );
-
-float   __ovld sub_group_non_uniform_broadcast( float value, uint index );
-float2  __ovld sub_group_non_uniform_broadcast( float2 value, uint index );
-float3  __ovld sub_group_non_uniform_broadcast( float3 value, uint index );
-float4  __ovld sub_group_non_uniform_broadcast( float4 value, uint index );
-float8  __ovld sub_group_non_uniform_broadcast( float8 value, uint index );
-float16 __ovld sub_group_non_uniform_broadcast( float16 value, uint index );
-
-char    __ovld sub_group_broadcast_first( char value );
-uchar   __ovld sub_group_broadcast_first( uchar value );
-short   __ovld sub_group_broadcast_first( short value );
-ushort  __ovld sub_group_broadcast_first( ushort value );
-int     __ovld sub_group_broadcast_first( int value );
-uint    __ovld sub_group_broadcast_first( uint value );
-long    __ovld sub_group_broadcast_first( long value );
-ulong   __ovld sub_group_broadcast_first( ulong value );
-float   __ovld sub_group_broadcast_first( float value );
-
-uint4   __ovld sub_group_ballot( int predicate );
-int     __ovld __cnfn sub_group_inverse_ballot( uint4 value );
-int     __ovld __cnfn sub_group_ballot_bit_extract( uint4 value, uint index );
-uint    __ovld __cnfn sub_group_ballot_bit_count( uint4 value );
-
-uint    __ovld sub_group_ballot_inclusive_scan( uint4 value );
-uint    __ovld sub_group_ballot_exclusive_scan( uint4 value );
-uint    __ovld sub_group_ballot_find_lsb( uint4 value );
-uint    __ovld sub_group_ballot_find_msb( uint4 value );
-
-uint4   __ovld __cnfn get_sub_group_eq_mask(void);
-uint4   __ovld __cnfn get_sub_group_ge_mask(void);
-uint4   __ovld __cnfn get_sub_group_gt_mask(void);
-uint4   __ovld __cnfn get_sub_group_le_mask(void);
-uint4   __ovld __cnfn get_sub_group_lt_mask(void);
-
-#if defined(cl_khr_fp16)
-half    __ovld sub_group_non_uniform_broadcast( half value, uint index );
-half2   __ovld sub_group_non_uniform_broadcast( half2 value, uint index );
-half3   __ovld sub_group_non_uniform_broadcast( half3 value, uint index );
-half4   __ovld sub_group_non_uniform_broadcast( half4 value, uint index );
-half8   __ovld sub_group_non_uniform_broadcast( half8 value, uint index );
-half16  __ovld sub_group_non_uniform_broadcast( half16 value, uint index );
-
-half    __ovld sub_group_broadcast_first( half value );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-double   __ovld sub_group_non_uniform_broadcast( double value, uint index );
-double2  __ovld sub_group_non_uniform_broadcast( double2 value, uint index );
-double3  __ovld sub_group_non_uniform_broadcast( double3 value, uint index );
-double4  __ovld sub_group_non_uniform_broadcast( double4 value, uint index );
-double8  __ovld sub_group_non_uniform_broadcast( double8 value, uint index );
-double16 __ovld sub_group_non_uniform_broadcast( double16 value, uint index );
-
-double   __ovld sub_group_broadcast_first( double value );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_ballot
-
-#if defined(cl_khr_subgroup_non_uniform_arithmetic)
-char    __ovld sub_group_non_uniform_reduce_add( char value );
-uchar   __ovld sub_group_non_uniform_reduce_add( uchar value );
-short   __ovld sub_group_non_uniform_reduce_add( short value );
-ushort  __ovld sub_group_non_uniform_reduce_add( ushort value );
-int     __ovld sub_group_non_uniform_reduce_add( int value );
-uint    __ovld sub_group_non_uniform_reduce_add( uint value );
-long    __ovld sub_group_non_uniform_reduce_add( long value );
-ulong   __ovld sub_group_non_uniform_reduce_add( ulong value );
-float   __ovld sub_group_non_uniform_reduce_add( float value );
-
-char    __ovld sub_group_non_uniform_reduce_mul( char value );
-uchar   __ovld sub_group_non_uniform_reduce_mul( uchar value );
-short   __ovld sub_group_non_uniform_reduce_mul( short value );
-ushort  __ovld sub_group_non_uniform_reduce_mul( ushort value );
-int     __ovld sub_group_non_uniform_reduce_mul( int value );
-uint    __ovld sub_group_non_uniform_reduce_mul( uint value );
-long    __ovld sub_group_non_uniform_reduce_mul( long value );
-ulong   __ovld sub_group_non_uniform_reduce_mul( ulong value );
-float   __ovld sub_group_non_uniform_reduce_mul( float value );
-
-char    __ovld sub_group_non_uniform_reduce_min( char value );
-uchar   __ovld sub_group_non_uniform_reduce_min( uchar value );
-short   __ovld sub_group_non_uniform_reduce_min( short value );
-ushort  __ovld sub_group_non_uniform_reduce_min( ushort value );
-int     __ovld sub_group_non_uniform_reduce_min( int value );
-uint    __ovld sub_group_non_uniform_reduce_min( uint value );
-long    __ovld sub_group_non_uniform_reduce_min( long value );
-ulong   __ovld sub_group_non_uniform_reduce_min( ulong value );
-float   __ovld sub_group_non_uniform_reduce_min( float value );
-
-char    __ovld sub_group_non_uniform_reduce_max( char value );
-uchar   __ovld sub_group_non_uniform_reduce_max( uchar value );
-short   __ovld sub_group_non_uniform_reduce_max( short value );
-ushort  __ovld sub_group_non_uniform_reduce_max( ushort value );
-int     __ovld sub_group_non_uniform_reduce_max( int value );
-uint    __ovld sub_group_non_uniform_reduce_max( uint value );
-long    __ovld sub_group_non_uniform_reduce_max( long value );
-ulong   __ovld sub_group_non_uniform_reduce_max( ulong value );
-float   __ovld sub_group_non_uniform_reduce_max( float value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_add( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_add( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_add( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_add( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_add( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_add( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_add( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_add( ulong value );
-float   __ovld sub_group_non_uniform_scan_inclusive_add( float value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_mul( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_mul( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_mul( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_mul( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_mul( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_mul( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_mul( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_mul( ulong value );
-float   __ovld sub_group_non_uniform_scan_inclusive_mul( float value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_min( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_min( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_min( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_min( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_min( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_min( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_min( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_min( ulong value );
-float   __ovld sub_group_non_uniform_scan_inclusive_min( float value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_max( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_max( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_max( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_max( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_max( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_max( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_max( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_max( ulong value );
-float   __ovld sub_group_non_uniform_scan_inclusive_max( float value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_add( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_add( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_add( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_add( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_add( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_add( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_add( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_add( ulong value );
-float   __ovld sub_group_non_uniform_scan_exclusive_add( float value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_mul( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_mul( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_mul( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_mul( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_mul( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_mul( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_mul( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_mul( ulong value );
-float   __ovld sub_group_non_uniform_scan_exclusive_mul( float value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_min( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_min( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_min( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_min( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_min( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_min( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_min( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_min( ulong value );
-float   __ovld sub_group_non_uniform_scan_exclusive_min( float value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_max( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_max( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_max( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_max( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_max( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_max( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_max( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_max( ulong value );
-float   __ovld sub_group_non_uniform_scan_exclusive_max( float value );
-
-char    __ovld sub_group_non_uniform_reduce_and( char value );
-uchar   __ovld sub_group_non_uniform_reduce_and( uchar value );
-short   __ovld sub_group_non_uniform_reduce_and( short value );
-ushort  __ovld sub_group_non_uniform_reduce_and( ushort value );
-int     __ovld sub_group_non_uniform_reduce_and( int value );
-uint    __ovld sub_group_non_uniform_reduce_and( uint value );
-long    __ovld sub_group_non_uniform_reduce_and( long value );
-ulong   __ovld sub_group_non_uniform_reduce_and( ulong value );
-
-char    __ovld sub_group_non_uniform_reduce_or( char value );
-uchar   __ovld sub_group_non_uniform_reduce_or( uchar value );
-short   __ovld sub_group_non_uniform_reduce_or( short value );
-ushort  __ovld sub_group_non_uniform_reduce_or( ushort value );
-int     __ovld sub_group_non_uniform_reduce_or( int value );
-uint    __ovld sub_group_non_uniform_reduce_or( uint value );
-long    __ovld sub_group_non_uniform_reduce_or( long value );
-ulong   __ovld sub_group_non_uniform_reduce_or( ulong value );
-
-char    __ovld sub_group_non_uniform_reduce_xor( char value );
-uchar   __ovld sub_group_non_uniform_reduce_xor( uchar value );
-short   __ovld sub_group_non_uniform_reduce_xor( short value );
-ushort  __ovld sub_group_non_uniform_reduce_xor( ushort value );
-int     __ovld sub_group_non_uniform_reduce_xor( int value );
-uint    __ovld sub_group_non_uniform_reduce_xor( uint value );
-long    __ovld sub_group_non_uniform_reduce_xor( long value );
-ulong   __ovld sub_group_non_uniform_reduce_xor( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_and( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_and( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_and( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_and( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_and( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_and( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_and( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_and( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_or( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_or( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_or( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_or( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_or( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_or( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_or( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_or( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_xor( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_xor( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_xor( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_xor( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_xor( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_xor( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_xor( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_xor( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_and( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_and( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_and( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_and( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_and( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_and( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_and( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_and( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_or( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_or( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_or( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_or( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_or( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_or( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_or( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_or( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_xor( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_xor( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_xor( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_xor( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_xor( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_xor( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_xor( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_xor( ulong value );
-
-int     __ovld sub_group_non_uniform_reduce_logical_and( int predicate );
-int     __ovld sub_group_non_uniform_reduce_logical_or( int predicate );
-int     __ovld sub_group_non_uniform_reduce_logical_xor( int predicate );
-
-int     __ovld sub_group_non_uniform_scan_inclusive_logical_and( int predicate );
-int     __ovld sub_group_non_uniform_scan_inclusive_logical_or( int predicate );
-int     __ovld sub_group_non_uniform_scan_inclusive_logical_xor( int predicate );
-
-int     __ovld sub_group_non_uniform_scan_exclusive_logical_and( int predicate );
-int     __ovld sub_group_non_uniform_scan_exclusive_logical_or( int predicate );
-int     __ovld sub_group_non_uniform_scan_exclusive_logical_xor( int predicate );
-
-#if defined(cl_khr_fp16)
-half    __ovld sub_group_non_uniform_reduce_add( half value );
-half    __ovld sub_group_non_uniform_reduce_mul( half value );
-half    __ovld sub_group_non_uniform_reduce_min( half value );
-half    __ovld sub_group_non_uniform_reduce_max( half value );
-half    __ovld sub_group_non_uniform_scan_inclusive_add( half value );
-half    __ovld sub_group_non_uniform_scan_inclusive_mul( half value );
-half    __ovld sub_group_non_uniform_scan_inclusive_min( half value );
-half    __ovld sub_group_non_uniform_scan_inclusive_max( half value );
-half    __ovld sub_group_non_uniform_scan_exclusive_add( half value );
-half    __ovld sub_group_non_uniform_scan_exclusive_mul( half value );
-half    __ovld sub_group_non_uniform_scan_exclusive_min( half value );
-half    __ovld sub_group_non_uniform_scan_exclusive_max( half value );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-double  __ovld sub_group_non_uniform_reduce_add( double value );
-double  __ovld sub_group_non_uniform_reduce_mul( double value );
-double  __ovld sub_group_non_uniform_reduce_min( double value );
-double  __ovld sub_group_non_uniform_reduce_max( double value );
-double  __ovld sub_group_non_uniform_scan_inclusive_add( double value );
-double  __ovld sub_group_non_uniform_scan_inclusive_mul( double value );
-double  __ovld sub_group_non_uniform_scan_inclusive_min( double value );
-double  __ovld sub_group_non_uniform_scan_inclusive_max( double value );
-double  __ovld sub_group_non_uniform_scan_exclusive_add( double value );
-double  __ovld sub_group_non_uniform_scan_exclusive_mul( double value );
-double  __ovld sub_group_non_uniform_scan_exclusive_min( double value );
-double  __ovld sub_group_non_uniform_scan_exclusive_max( double value );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_non_uniform_arithmetic
-
-#if defined(cl_khr_subgroup_shuffle)
-char    __ovld sub_group_shuffle( char value, uint index );
-uchar   __ovld sub_group_shuffle( uchar value, uint index );
-short   __ovld sub_group_shuffle( short value, uint index );
-ushort  __ovld sub_group_shuffle( ushort value, uint index );
-int     __ovld sub_group_shuffle( int value, uint index );
-uint    __ovld sub_group_shuffle( uint value, uint index );
-long    __ovld sub_group_shuffle( long value, uint index );
-ulong   __ovld sub_group_shuffle( ulong value, uint index );
-float   __ovld sub_group_shuffle( float value, uint index );
-
-char    __ovld sub_group_shuffle_xor( char value, uint mask );
-uchar   __ovld sub_group_shuffle_xor( uchar value, uint mask );
-short   __ovld sub_group_shuffle_xor( short value, uint mask );
-ushort  __ovld sub_group_shuffle_xor( ushort value, uint mask );
-int     __ovld sub_group_shuffle_xor( int value, uint mask );
-uint    __ovld sub_group_shuffle_xor( uint value, uint mask );
-long    __ovld sub_group_shuffle_xor( long value, uint mask );
-ulong   __ovld sub_group_shuffle_xor( ulong value, uint mask );
-float   __ovld sub_group_shuffle_xor( float value, uint mask );
-
-#if defined(cl_khr_fp16)
-half    __ovld sub_group_shuffle( half value, uint index );
-half    __ovld sub_group_shuffle_xor( half value, uint mask );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-double  __ovld sub_group_shuffle( double value, uint index );
-double  __ovld sub_group_shuffle_xor( double value, uint mask );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_shuffle
-
-#if defined(cl_khr_subgroup_shuffle_relative)
-char    __ovld sub_group_shuffle_up( char value, uint delta );
-uchar   __ovld sub_group_shuffle_up( uchar value, uint delta );
-short   __ovld sub_group_shuffle_up( short value, uint delta );
-ushort  __ovld sub_group_shuffle_up( ushort value, uint delta );
-int     __ovld sub_group_shuffle_up( int value, uint delta );
-uint    __ovld sub_group_shuffle_up( uint value, uint delta );
-long    __ovld sub_group_shuffle_up( long value, uint delta );
-ulong   __ovld sub_group_shuffle_up( ulong value, uint delta );
-float   __ovld sub_group_shuffle_up( float value, uint delta );
-
-char    __ovld sub_group_shuffle_down( char value, uint delta );
-uchar   __ovld sub_group_shuffle_down( uchar value, uint delta );
-short   __ovld sub_group_shuffle_down( short value, uint delta );
-ushort  __ovld sub_group_shuffle_down( ushort value, uint delta );
-int     __ovld sub_group_shuffle_down( int value, uint delta );
-uint    __ovld sub_group_shuffle_down( uint value, uint delta );
-long    __ovld sub_group_shuffle_down( long value, uint delta );
-ulong   __ovld sub_group_shuffle_down( ulong value, uint delta );
-float   __ovld sub_group_shuffle_down( float value, uint delta );
-
-#if defined(cl_khr_fp16)
-half    __ovld sub_group_shuffle_up( half value, uint delta );
-half    __ovld sub_group_shuffle_down( half value, uint delta );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-double  __ovld sub_group_shuffle_up( double value, uint delta );
-double  __ovld sub_group_shuffle_down( double value, uint delta );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_shuffle_relative
-
-#if defined(cl_khr_subgroup_clustered_reduce)
-char    __ovld sub_group_clustered_reduce_add( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_add( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_add( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_add( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_add( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_add( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_add( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_add( ulong value, uint clustersize );
-float   __ovld sub_group_clustered_reduce_add( float value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_mul( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_mul( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_mul( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_mul( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_mul( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_mul( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_mul( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_mul( ulong value, uint clustersize );
-float   __ovld sub_group_clustered_reduce_mul( float value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_min( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_min( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_min( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_min( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_min( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_min( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_min( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_min( ulong value, uint clustersize );
-float   __ovld sub_group_clustered_reduce_min( float value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_max( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_max( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_max( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_max( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_max( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_max( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_max( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_max( ulong value, uint clustersize );
-float   __ovld sub_group_clustered_reduce_max( float value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_and( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_and( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_and( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_and( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_and( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_and( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_and( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_and( ulong value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_or( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_or( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_or( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_or( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_or( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_or( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_or( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_or( ulong value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_xor( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_xor( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_xor( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_xor( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_xor( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_xor( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_xor( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_xor( ulong value, uint clustersize );
-
-int     __ovld sub_group_clustered_reduce_logical_and( int predicate, uint clustersize );
-int     __ovld sub_group_clustered_reduce_logical_or( int predicate, uint clustersize );
-int     __ovld sub_group_clustered_reduce_logical_xor( int predicate, uint clustersize );
-
-#if defined(cl_khr_fp16)
-half    __ovld sub_group_clustered_reduce_add( half value, uint clustersize );
-half    __ovld sub_group_clustered_reduce_mul( half value, uint clustersize );
-half    __ovld sub_group_clustered_reduce_min( half value, uint clustersize );
-half    __ovld sub_group_clustered_reduce_max( half value, uint clustersize );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-double  __ovld sub_group_clustered_reduce_add( double value, uint clustersize );
-double  __ovld sub_group_clustered_reduce_mul( double value, uint clustersize );
-double  __ovld sub_group_clustered_reduce_min( double value, uint clustersize );
-double  __ovld sub_group_clustered_reduce_max( double value, uint clustersize );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_clustered_reduce
-
-#if defined(cl_khr_extended_bit_ops)
-char __ovld __cnfn bitfield_insert(char, char, uint, uint);
-uchar __ovld __cnfn bitfield_insert(uchar, uchar, uint, uint);
-short __ovld __cnfn bitfield_insert(short, short, uint, uint);
-ushort __ovld __cnfn bitfield_insert(ushort, ushort, uint, uint);
-int __ovld __cnfn bitfield_insert(int, int, uint, uint);
-uint __ovld __cnfn bitfield_insert(uint, uint, uint, uint);
-long __ovld __cnfn bitfield_insert(long, long, uint, uint);
-ulong __ovld __cnfn bitfield_insert(ulong, ulong, uint, uint);
-char2 __ovld __cnfn bitfield_insert(char2, char2, uint, uint);
-uchar2 __ovld __cnfn bitfield_insert(uchar2, uchar2, uint, uint);
-short2 __ovld __cnfn bitfield_insert(short2, short2, uint, uint);
-ushort2 __ovld __cnfn bitfield_insert(ushort2, ushort2, uint, uint);
-int2 __ovld __cnfn bitfield_insert(int2, int2, uint, uint);
-uint2 __ovld __cnfn bitfield_insert(uint2, uint2, uint, uint);
-long2 __ovld __cnfn bitfield_insert(long2, long2, uint, uint);
-ulong2 __ovld __cnfn bitfield_insert(ulong2, ulong2, uint, uint);
-char3 __ovld __cnfn bitfield_insert(char3, char3, uint, uint);
-uchar3 __ovld __cnfn bitfield_insert(uchar3, uchar3, uint, uint);
-short3 __ovld __cnfn bitfield_insert(short3, short3, uint, uint);
-ushort3 __ovld __cnfn bitfield_insert(ushort3, ushort3, uint, uint);
-int3 __ovld __cnfn bitfield_insert(int3, int3, uint, uint);
-uint3 __ovld __cnfn bitfield_insert(uint3, uint3, uint, uint);
-long3 __ovld __cnfn bitfield_insert(long3, long3, uint, uint);
-ulong3 __ovld __cnfn bitfield_insert(ulong3, ulong3, uint, uint);
-char4 __ovld __cnfn bitfield_insert(char4, char4, uint, uint);
-uchar4 __ovld __cnfn bitfield_insert(uchar4, uchar4, uint, uint);
-short4 __ovld __cnfn bitfield_insert(short4, short4, uint, uint);
-ushort4 __ovld __cnfn bitfield_insert(ushort4, ushort4, uint, uint);
-int4 __ovld __cnfn bitfield_insert(int4, int4, uint, uint);
-uint4 __ovld __cnfn bitfield_insert(uint4, uint4, uint, uint);
-long4 __ovld __cnfn bitfield_insert(long4, long4, uint, uint);
-ulong4 __ovld __cnfn bitfield_insert(ulong4, ulong4, uint, uint);
-char8 __ovld __cnfn bitfield_insert(char8, char8, uint, uint);
-uchar8 __ovld __cnfn bitfield_insert(uchar8, uchar8, uint, uint);
-short8 __ovld __cnfn bitfield_insert(short8, short8, uint, uint);
-ushort8 __ovld __cnfn bitfield_insert(ushort8, ushort8, uint, uint);
-int8 __ovld __cnfn bitfield_insert(int8, int8, uint, uint);
-uint8 __ovld __cnfn bitfield_insert(uint8, uint8, uint, uint);
-long8 __ovld __cnfn bitfield_insert(long8, long8, uint, uint);
-ulong8 __ovld __cnfn bitfield_insert(ulong8, ulong8, uint, uint);
-char16 __ovld __cnfn bitfield_insert(char16, char16, uint, uint);
-uchar16 __ovld __cnfn bitfield_insert(uchar16, uchar16, uint, uint);
-short16 __ovld __cnfn bitfield_insert(short16, short16, uint, uint);
-ushort16 __ovld __cnfn bitfield_insert(ushort16, ushort16, uint, uint);
-int16 __ovld __cnfn bitfield_insert(int16, int16, uint, uint);
-uint16 __ovld __cnfn bitfield_insert(uint16, uint16, uint, uint);
-long16 __ovld __cnfn bitfield_insert(long16, long16, uint, uint);
-ulong16 __ovld __cnfn bitfield_insert(ulong16, ulong16, uint, uint);
-
-char __ovld __cnfn bitfield_extract_signed(char, uint, uint);
-short __ovld __cnfn bitfield_extract_signed(short, uint, uint);
-int __ovld __cnfn bitfield_extract_signed(int, uint, uint);
-long __ovld __cnfn bitfield_extract_signed(long, uint, uint);
-char2 __ovld __cnfn bitfield_extract_signed(char2, uint, uint);
-short2 __ovld __cnfn bitfield_extract_signed(short2, uint, uint);
-int2 __ovld __cnfn bitfield_extract_signed(int2, uint, uint);
-long2 __ovld __cnfn bitfield_extract_signed(long2, uint, uint);
-char3 __ovld __cnfn bitfield_extract_signed(char3, uint, uint);
-short3 __ovld __cnfn bitfield_extract_signed(short3, uint, uint);
-int3 __ovld __cnfn bitfield_extract_signed(int3, uint, uint);
-long3 __ovld __cnfn bitfield_extract_signed(long3, uint, uint);
-char4 __ovld __cnfn bitfield_extract_signed(char4, uint, uint);
-short4 __ovld __cnfn bitfield_extract_signed(short4, uint, uint);
-int4 __ovld __cnfn bitfield_extract_signed(int4, uint, uint);
-long4 __ovld __cnfn bitfield_extract_signed(long4, uint, uint);
-char8 __ovld __cnfn bitfield_extract_signed(char8, uint, uint);
-short8 __ovld __cnfn bitfield_extract_signed(short8, uint, uint);
-int8 __ovld __cnfn bitfield_extract_signed(int8, uint, uint);
-long8 __ovld __cnfn bitfield_extract_signed(long8, uint, uint);
-char16 __ovld __cnfn bitfield_extract_signed(char16, uint, uint);
-short16 __ovld __cnfn bitfield_extract_signed(short16, uint, uint);
-int16 __ovld __cnfn bitfield_extract_signed(int16, uint, uint);
-long16 __ovld __cnfn bitfield_extract_signed(long16, uint, uint);
-
-char __ovld __cnfn bitfield_extract_signed(uchar, uint, uint);
-short __ovld __cnfn bitfield_extract_signed(ushort, uint, uint);
-int __ovld __cnfn bitfield_extract_signed(uint, uint, uint);
-long __ovld __cnfn bitfield_extract_signed(ulong, uint, uint);
-char2 __ovld __cnfn bitfield_extract_signed(uchar2, uint, uint);
-short2 __ovld __cnfn bitfield_extract_signed(ushort2, uint, uint);
-int2 __ovld __cnfn bitfield_extract_signed(uint2, uint, uint);
-long2 __ovld __cnfn bitfield_extract_signed(ulong2, uint, uint);
-char3 __ovld __cnfn bitfield_extract_signed(uchar3, uint, uint);
-short3 __ovld __cnfn bitfield_extract_signed(ushort3, uint, uint);
-int3 __ovld __cnfn bitfield_extract_signed(uint3, uint, uint);
-long3 __ovld __cnfn bitfield_extract_signed(ulong3, uint, uint);
-char4 __ovld __cnfn bitfield_extract_signed(uchar4, uint, uint);
-short4 __ovld __cnfn bitfield_extract_signed(ushort4, uint, uint);
-int4 __ovld __cnfn bitfield_extract_signed(uint4, uint, uint);
-long4 __ovld __cnfn bitfield_extract_signed(ulong4, uint, uint);
-char8 __ovld __cnfn bitfield_extract_signed(uchar8, uint, uint);
-short8 __ovld __cnfn bitfield_extract_signed(ushort8, uint, uint);
-int8 __ovld __cnfn bitfield_extract_signed(uint8, uint, uint);
-long8 __ovld __cnfn bitfield_extract_signed(ulong8, uint, uint);
-char16 __ovld __cnfn bitfield_extract_signed(uchar16, uint, uint);
-short16 __ovld __cnfn bitfield_extract_signed(ushort16, uint, uint);
-int16 __ovld __cnfn bitfield_extract_signed(uint16, uint, uint);
-long16 __ovld __cnfn bitfield_extract_signed(ulong16, uint, uint);
-
-uchar __ovld __cnfn bitfield_extract_unsigned(char, uint, uint);
-ushort __ovld __cnfn bitfield_extract_unsigned(short, uint, uint);
-uint __ovld __cnfn bitfield_extract_unsigned(int, uint, uint);
-ulong __ovld __cnfn bitfield_extract_unsigned(long, uint, uint);
-uchar2 __ovld __cnfn bitfield_extract_unsigned(char2, uint, uint);
-ushort2 __ovld __cnfn bitfield_extract_unsigned(short2, uint, uint);
-uint2 __ovld __cnfn bitfield_extract_unsigned(int2, uint, uint);
-ulong2 __ovld __cnfn bitfield_extract_unsigned(long2, uint, uint);
-uchar3 __ovld __cnfn bitfield_extract_unsigned(char3, uint, uint);
-ushort3 __ovld __cnfn bitfield_extract_unsigned(short3, uint, uint);
-uint3 __ovld __cnfn bitfield_extract_unsigned(int3, uint, uint);
-ulong3 __ovld __cnfn bitfield_extract_unsigned(long3, uint, uint);
-uchar4 __ovld __cnfn bitfield_extract_unsigned(char4, uint, uint);
-ushort4 __ovld __cnfn bitfield_extract_unsigned(short4, uint, uint);
-uint4 __ovld __cnfn bitfield_extract_unsigned(int4, uint, uint);
-ulong4 __ovld __cnfn bitfield_extract_unsigned(long4, uint, uint);
-uchar8 __ovld __cnfn bitfield_extract_unsigned(char8, uint, uint);
-ushort8 __ovld __cnfn bitfield_extract_unsigned(short8, uint, uint);
-uint8 __ovld __cnfn bitfield_extract_unsigned(int8, uint, uint);
-ulong8 __ovld __cnfn bitfield_extract_unsigned(long8, uint, uint);
-uchar16 __ovld __cnfn bitfield_extract_unsigned(char16, uint, uint);
-ushort16 __ovld __cnfn bitfield_extract_unsigned(short16, uint, uint);
-uint16 __ovld __cnfn bitfield_extract_unsigned(int16, uint, uint);
-ulong16 __ovld __cnfn bitfield_extract_unsigned(long16, uint, uint);
-
-uchar __ovld __cnfn bitfield_extract_unsigned(uchar, uint, uint);
-ushort __ovld __cnfn bitfield_extract_unsigned(ushort, uint, uint);
-uint __ovld __cnfn bitfield_extract_unsigned(uint, uint, uint);
-ulong __ovld __cnfn bitfield_extract_unsigned(ulong, uint, uint);
-uchar2 __ovld __cnfn bitfield_extract_unsigned(uchar2, uint, uint);
-ushort2 __ovld __cnfn bitfield_extract_unsigned(ushort2, uint, uint);
-uint2 __ovld __cnfn bitfield_extract_unsigned(uint2, uint, uint);
-ulong2 __ovld __cnfn bitfield_extract_unsigned(ulong2, uint, uint);
-uchar3 __ovld __cnfn bitfield_extract_unsigned(uchar3, uint, uint);
-ushort3 __ovld __cnfn bitfield_extract_unsigned(ushort3, uint, uint);
-uint3 __ovld __cnfn bitfield_extract_unsigned(uint3, uint, uint);
-ulong3 __ovld __cnfn bitfield_extract_unsigned(ulong3, uint, uint);
-uchar4 __ovld __cnfn bitfield_extract_unsigned(uchar4, uint, uint);
-ushort4 __ovld __cnfn bitfield_extract_unsigned(ushort4, uint, uint);
-uint4 __ovld __cnfn bitfield_extract_unsigned(uint4, uint, uint);
-ulong4 __ovld __cnfn bitfield_extract_unsigned(ulong4, uint, uint);
-uchar8 __ovld __cnfn bitfield_extract_unsigned(uchar8, uint, uint);
-ushort8 __ovld __cnfn bitfield_extract_unsigned(ushort8, uint, uint);
-uint8 __ovld __cnfn bitfield_extract_unsigned(uint8, uint, uint);
-ulong8 __ovld __cnfn bitfield_extract_unsigned(ulong8, uint, uint);
-uchar16 __ovld __cnfn bitfield_extract_unsigned(uchar16, uint, uint);
-ushort16 __ovld __cnfn bitfield_extract_unsigned(ushort16, uint, uint);
-uint16 __ovld __cnfn bitfield_extract_unsigned(uint16, uint, uint);
-ulong16 __ovld __cnfn bitfield_extract_unsigned(ulong16, uint, uint);
-
-char __ovld __cnfn bit_reverse(char);
-uchar __ovld __cnfn bit_reverse(uchar);
-short __ovld __cnfn bit_reverse(short);
-ushort __ovld __cnfn bit_reverse(ushort);
-int __ovld __cnfn bit_reverse(int);
-uint __ovld __cnfn bit_reverse(uint);
-long __ovld __cnfn bit_reverse(long);
-ulong __ovld __cnfn bit_reverse(ulong);
-char2 __ovld __cnfn bit_reverse(char2);
-uchar2 __ovld __cnfn bit_reverse(uchar2);
-short2 __ovld __cnfn bit_reverse(short2);
-ushort2 __ovld __cnfn bit_reverse(ushort2);
-int2 __ovld __cnfn bit_reverse(int2);
-uint2 __ovld __cnfn bit_reverse(uint2);
-long2 __ovld __cnfn bit_reverse(long2);
-ulong2 __ovld __cnfn bit_reverse(ulong2);
-char3 __ovld __cnfn bit_reverse(char3);
-uchar3 __ovld __cnfn bit_reverse(uchar3);
-short3 __ovld __cnfn bit_reverse(short3);
-ushort3 __ovld __cnfn bit_reverse(ushort3);
-int3 __ovld __cnfn bit_reverse(int3);
-uint3 __ovld __cnfn bit_reverse(uint3);
-long3 __ovld __cnfn bit_reverse(long3);
-ulong3 __ovld __cnfn bit_reverse(ulong3);
-char4 __ovld __cnfn bit_reverse(char4);
-uchar4 __ovld __cnfn bit_reverse(uchar4);
-short4 __ovld __cnfn bit_reverse(short4);
-ushort4 __ovld __cnfn bit_reverse(ushort4);
-int4 __ovld __cnfn bit_reverse(int4);
-uint4 __ovld __cnfn bit_reverse(uint4);
-long4 __ovld __cnfn bit_reverse(long4);
-ulong4 __ovld __cnfn bit_reverse(ulong4);
-char8 __ovld __cnfn bit_reverse(char8);
-uchar8 __ovld __cnfn bit_reverse(uchar8);
-short8 __ovld __cnfn bit_reverse(short8);
-ushort8 __ovld __cnfn bit_reverse(ushort8);
-int8 __ovld __cnfn bit_reverse(int8);
-uint8 __ovld __cnfn bit_reverse(uint8);
-long8 __ovld __cnfn bit_reverse(long8);
-ulong8 __ovld __cnfn bit_reverse(ulong8);
-char16 __ovld __cnfn bit_reverse(char16);
-uchar16 __ovld __cnfn bit_reverse(uchar16);
-short16 __ovld __cnfn bit_reverse(short16);
-ushort16 __ovld __cnfn bit_reverse(ushort16);
-int16 __ovld __cnfn bit_reverse(int16);
-uint16 __ovld __cnfn bit_reverse(uint16);
-long16 __ovld __cnfn bit_reverse(long16);
-ulong16 __ovld __cnfn bit_reverse(ulong16);
-#endif // cl_khr_extended_bit_ops
-
-#if defined(__opencl_c_integer_dot_product_input_4x8bit)
-uint __ovld __cnfn dot(uchar4, uchar4);
-int __ovld __cnfn dot(char4, char4);
-int __ovld __cnfn dot(uchar4, char4);
-int __ovld __cnfn dot(char4, uchar4);
-
-uint __ovld __cnfn dot_acc_sat(uchar4, uchar4, uint);
-int __ovld __cnfn dot_acc_sat(char4, char4, int);
-int __ovld __cnfn dot_acc_sat(uchar4, char4, int);
-int __ovld __cnfn dot_acc_sat(char4, uchar4, int);
-#endif // __opencl_c_integer_dot_product_input_4x8bit
-
-#if defined(__opencl_c_integer_dot_product_input_4x8bit_packed)
-uint __ovld __cnfn dot_4x8packed_uu_uint(uint, uint);
-int __ovld __cnfn dot_4x8packed_ss_int(uint, uint);
-int __ovld __cnfn dot_4x8packed_us_int(uint, uint);
-int __ovld __cnfn dot_4x8packed_su_int(uint, uint);
-
-uint __ovld __cnfn dot_acc_sat_4x8packed_uu_uint(uint, uint, uint);
-int __ovld __cnfn dot_acc_sat_4x8packed_ss_int(uint, uint, int);
-int __ovld __cnfn dot_acc_sat_4x8packed_us_int(uint, uint, int);
-int __ovld __cnfn dot_acc_sat_4x8packed_su_int(uint, uint, int);
-#endif // __opencl_c_integer_dot_product_input_4x8bit_packed
-
-#if defined(cl_intel_subgroups)
-// Intel-Specific Sub Group Functions
-float   __ovld __conv intel_sub_group_shuffle( float  x, uint c );
-float2  __ovld __conv intel_sub_group_shuffle( float2 x, uint c );
-float3  __ovld __conv intel_sub_group_shuffle( float3 x, uint c );
-float4  __ovld __conv intel_sub_group_shuffle( float4 x, uint c );
-float8  __ovld __conv intel_sub_group_shuffle( float8 x, uint c );
-float16 __ovld __conv intel_sub_group_shuffle( float16 x, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle( int  x, uint c );
-int2    __ovld __conv intel_sub_group_shuffle( int2 x, uint c );
-int3    __ovld __conv intel_sub_group_shuffle( int3 x, uint c );
-int4    __ovld __conv intel_sub_group_shuffle( int4 x, uint c );
-int8    __ovld __conv intel_sub_group_shuffle( int8 x, uint c );
-int16   __ovld __conv intel_sub_group_shuffle( int16 x, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle( uint  x, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle( uint2 x, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle( uint3 x, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle( uint4 x, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle( uint8 x, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle( uint16 x, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle( long x, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle( ulong x, uint c );
-
-float   __ovld __conv intel_sub_group_shuffle_down( float  cur, float  next, uint c );
-float2  __ovld __conv intel_sub_group_shuffle_down( float2 cur, float2 next, uint c );
-float3  __ovld __conv intel_sub_group_shuffle_down( float3 cur, float3 next, uint c );
-float4  __ovld __conv intel_sub_group_shuffle_down( float4 cur, float4 next, uint c );
-float8  __ovld __conv intel_sub_group_shuffle_down( float8 cur, float8 next, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_down( float16 cur, float16 next, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle_down( int  cur, int  next, uint c );
-int2    __ovld __conv intel_sub_group_shuffle_down( int2 cur, int2 next, uint c );
-int3    __ovld __conv intel_sub_group_shuffle_down( int3 cur, int3 next, uint c );
-int4    __ovld __conv intel_sub_group_shuffle_down( int4 cur, int4 next, uint c );
-int8    __ovld __conv intel_sub_group_shuffle_down( int8 cur, int8 next, uint c );
-int16   __ovld __conv intel_sub_group_shuffle_down( int16 cur, int16 next, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle_down( uint  cur, uint  next, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle_down( uint2 cur, uint2 next, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle_down( uint3 cur, uint3 next, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle_down( uint4 cur, uint4 next, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle_down( uint8 cur, uint8 next, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle_down( uint16 cur, uint16 next, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle_down( long prev, long cur, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle_down( ulong prev, ulong cur, uint c );
-
-float   __ovld __conv intel_sub_group_shuffle_up( float  prev, float  cur, uint c );
-float2  __ovld __conv intel_sub_group_shuffle_up( float2 prev, float2 cur, uint c );
-float3  __ovld __conv intel_sub_group_shuffle_up( float3 prev, float3 cur, uint c );
-float4  __ovld __conv intel_sub_group_shuffle_up( float4 prev, float4 cur, uint c );
-float8  __ovld __conv intel_sub_group_shuffle_up( float8 prev, float8 cur, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_up( float16 prev, float16 cur, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle_up( int  prev, int  cur, uint c );
-int2    __ovld __conv intel_sub_group_shuffle_up( int2 prev, int2 cur, uint c );
-int3    __ovld __conv intel_sub_group_shuffle_up( int3 prev, int3 cur, uint c );
-int4    __ovld __conv intel_sub_group_shuffle_up( int4 prev, int4 cur, uint c );
-int8    __ovld __conv intel_sub_group_shuffle_up( int8 prev, int8 cur, uint c );
-int16   __ovld __conv intel_sub_group_shuffle_up( int16 prev, int16 cur, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle_up( uint  prev, uint  cur, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle_up( uint2 prev, uint2 cur, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle_up( uint3 prev, uint3 cur, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle_up( uint4 prev, uint4 cur, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle_up( uint8 prev, uint8 cur, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle_up( uint16 prev, uint16 cur, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle_up( long prev, long cur, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle_up( ulong prev, ulong cur, uint c );
-
-float   __ovld __conv intel_sub_group_shuffle_xor( float  x, uint c );
-float2  __ovld __conv intel_sub_group_shuffle_xor( float2 x, uint c );
-float3  __ovld __conv intel_sub_group_shuffle_xor( float3 x, uint c );
-float4  __ovld __conv intel_sub_group_shuffle_xor( float4 x, uint c );
-float8  __ovld __conv intel_sub_group_shuffle_xor( float8 x, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_xor( float16 x, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle_xor( int  x, uint c );
-int2    __ovld __conv intel_sub_group_shuffle_xor( int2 x, uint c );
-int3    __ovld __conv intel_sub_group_shuffle_xor( int3 x, uint c );
-int4    __ovld __conv intel_sub_group_shuffle_xor( int4 x, uint c );
-int8    __ovld __conv intel_sub_group_shuffle_xor( int8 x, uint c );
-int16   __ovld __conv intel_sub_group_shuffle_xor( int16 x, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle_xor( uint  x, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle_xor( uint2 x, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle_xor( uint3 x, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle_xor( uint4 x, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle_xor( uint8 x, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle_xor( uint16 x, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle_xor( long x, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle_xor( ulong x, uint c );
-
-uint    __ovld __conv intel_sub_group_block_read( read_only image2d_t image, int2 coord );
-uint2   __ovld __conv intel_sub_group_block_read2( read_only image2d_t image, int2 coord );
-uint4   __ovld __conv intel_sub_group_block_read4( read_only image2d_t image, int2 coord );
-uint8   __ovld __conv intel_sub_group_block_read8( read_only image2d_t image, int2 coord );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint    __ovld __conv intel_sub_group_block_read(read_write image2d_t image, int2 coord);
-uint2   __ovld __conv intel_sub_group_block_read2(read_write image2d_t image, int2 coord);
-uint4   __ovld __conv intel_sub_group_block_read4(read_write image2d_t image, int2 coord);
-uint8   __ovld __conv intel_sub_group_block_read8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-uint    __ovld __conv intel_sub_group_block_read( const __global uint* p );
-uint2   __ovld __conv intel_sub_group_block_read2( const __global uint* p );
-uint4   __ovld __conv intel_sub_group_block_read4( const __global uint* p );
-uint8   __ovld __conv intel_sub_group_block_read8( const __global uint* p );
-
-void    __ovld __conv intel_sub_group_block_write(write_only image2d_t image, int2 coord, uint data);
-void    __ovld __conv intel_sub_group_block_write2(write_only image2d_t image, int2 coord, uint2 data);
-void    __ovld __conv intel_sub_group_block_write4(write_only image2d_t image, int2 coord, uint4 data);
-void    __ovld __conv intel_sub_group_block_write8(write_only image2d_t image, int2 coord, uint8 data);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void    __ovld __conv intel_sub_group_block_write(read_write image2d_t image, int2 coord, uint data);
-void    __ovld __conv intel_sub_group_block_write2(read_write image2d_t image, int2 coord, uint2 data);
-void    __ovld __conv intel_sub_group_block_write4(read_write image2d_t image, int2 coord, uint4 data);
-void    __ovld __conv intel_sub_group_block_write8(read_write image2d_t image, int2 coord, uint8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-void    __ovld __conv intel_sub_group_block_write( __global uint* p, uint data );
-void    __ovld __conv intel_sub_group_block_write2( __global uint* p, uint2 data );
-void    __ovld __conv intel_sub_group_block_write4( __global uint* p, uint4 data );
-void    __ovld __conv intel_sub_group_block_write8( __global uint* p, uint8 data );
-
-#ifdef cl_khr_fp16
-half    __ovld __conv intel_sub_group_shuffle( half x, uint c );
-half    __ovld __conv intel_sub_group_shuffle_down( half prev, half cur, uint c );
-half    __ovld __conv intel_sub_group_shuffle_up( half prev, half cur, uint c );
-half    __ovld __conv intel_sub_group_shuffle_xor( half x, uint c );
-#endif
-
-#if defined(cl_khr_fp64)
-double  __ovld __conv intel_sub_group_shuffle( double x, uint c );
-double  __ovld __conv intel_sub_group_shuffle_down( double prev, double cur, uint c );
-double  __ovld __conv intel_sub_group_shuffle_up( double prev, double cur, uint c );
-double  __ovld __conv intel_sub_group_shuffle_xor( double x, uint c );
-#endif
-
-#endif //cl_intel_subgroups
-
-#if defined(cl_intel_subgroups_short)
-short       __ovld __conv intel_sub_group_broadcast( short  x, uint sub_group_local_id );
-short2      __ovld __conv intel_sub_group_broadcast( short2 x, uint sub_group_local_id );
-short3      __ovld __conv intel_sub_group_broadcast( short3 x, uint sub_group_local_id );
-short4      __ovld __conv intel_sub_group_broadcast( short4 x, uint sub_group_local_id );
-short8      __ovld __conv intel_sub_group_broadcast( short8 x, uint sub_group_local_id );
-
-ushort      __ovld __conv intel_sub_group_broadcast( ushort  x, uint sub_group_local_id );
-ushort2     __ovld __conv intel_sub_group_broadcast( ushort2 x, uint sub_group_local_id );
-ushort3     __ovld __conv intel_sub_group_broadcast( ushort3 x, uint sub_group_local_id );
-ushort4     __ovld __conv intel_sub_group_broadcast( ushort4 x, uint sub_group_local_id );
-ushort8     __ovld __conv intel_sub_group_broadcast( ushort8 x, uint sub_group_local_id );
-
-short       __ovld __conv intel_sub_group_shuffle( short   x, uint c );
-short2      __ovld __conv intel_sub_group_shuffle( short2  x, uint c );
-short3      __ovld __conv intel_sub_group_shuffle( short3  x, uint c );
-short4      __ovld __conv intel_sub_group_shuffle( short4  x, uint c );
-short8      __ovld __conv intel_sub_group_shuffle( short8  x, uint c );
-short16     __ovld __conv intel_sub_group_shuffle( short16 x, uint c);
-
-ushort      __ovld __conv intel_sub_group_shuffle( ushort   x, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle( ushort2  x, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle( ushort3  x, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle( ushort4  x, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle( ushort8  x, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle( ushort16 x, uint c );
-
-short       __ovld __conv intel_sub_group_shuffle_down( short   cur, short   next, uint c );
-short2      __ovld __conv intel_sub_group_shuffle_down( short2  cur, short2  next, uint c );
-short3      __ovld __conv intel_sub_group_shuffle_down( short3  cur, short3  next, uint c );
-short4      __ovld __conv intel_sub_group_shuffle_down( short4  cur, short4  next, uint c );
-short8      __ovld __conv intel_sub_group_shuffle_down( short8  cur, short8  next, uint c );
-short16     __ovld __conv intel_sub_group_shuffle_down( short16 cur, short16 next, uint c );
-
-ushort      __ovld __conv intel_sub_group_shuffle_down( ushort   cur, ushort   next, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle_down( ushort2  cur, ushort2  next, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle_down( ushort3  cur, ushort3  next, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle_down( ushort4  cur, ushort4  next, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle_down( ushort8  cur, ushort8  next, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle_down( ushort16 cur, ushort16 next, uint c );
-
-short       __ovld __conv intel_sub_group_shuffle_up( short   cur, short   next, uint c );
-short2      __ovld __conv intel_sub_group_shuffle_up( short2  cur, short2  next, uint c );
-short3      __ovld __conv intel_sub_group_shuffle_up( short3  cur, short3  next, uint c );
-short4      __ovld __conv intel_sub_group_shuffle_up( short4  cur, short4  next, uint c );
-short8      __ovld __conv intel_sub_group_shuffle_up( short8  cur, short8  next, uint c );
-short16     __ovld __conv intel_sub_group_shuffle_up( short16 cur, short16 next, uint c );
-
-ushort      __ovld __conv intel_sub_group_shuffle_up( ushort   cur, ushort   next, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle_up( ushort2  cur, ushort2  next, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle_up( ushort3  cur, ushort3  next, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle_up( ushort4  cur, ushort4  next, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle_up( ushort8  cur, ushort8  next, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle_up( ushort16 cur, ushort16 next, uint c );
-
-short       __ovld __conv intel_sub_group_shuffle_xor( short   x, uint c );
-short2      __ovld __conv intel_sub_group_shuffle_xor( short2  x, uint c );
-short3      __ovld __conv intel_sub_group_shuffle_xor( short3  x, uint c );
-short4      __ovld __conv intel_sub_group_shuffle_xor( short4  x, uint c );
-short8      __ovld __conv intel_sub_group_shuffle_xor( short8  x, uint c );
-short16     __ovld __conv intel_sub_group_shuffle_xor( short16 x, uint c );
-
-ushort      __ovld __conv intel_sub_group_shuffle_xor( ushort   x, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle_xor( ushort2  x, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle_xor( ushort3  x, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle_xor( ushort4  x, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle_xor( ushort8  x, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle_xor( ushort16 x, uint c );
-
-short       __ovld __conv intel_sub_group_reduce_add( short   x );
-ushort      __ovld __conv intel_sub_group_reduce_add( ushort  x );
-short       __ovld __conv intel_sub_group_reduce_min( short   x );
-ushort      __ovld __conv intel_sub_group_reduce_min( ushort  x );
-short       __ovld __conv intel_sub_group_reduce_max( short   x );
-ushort      __ovld __conv intel_sub_group_reduce_max( ushort  x );
-
-short       __ovld __conv intel_sub_group_scan_exclusive_add( short   x );
-ushort      __ovld __conv intel_sub_group_scan_exclusive_add( ushort  x );
-short       __ovld __conv intel_sub_group_scan_exclusive_min( short   x );
-ushort      __ovld __conv intel_sub_group_scan_exclusive_min( ushort  x );
-short       __ovld __conv intel_sub_group_scan_exclusive_max( short   x );
-ushort      __ovld __conv intel_sub_group_scan_exclusive_max( ushort  x );
-
-short       __ovld __conv intel_sub_group_scan_inclusive_add( short   x );
-ushort      __ovld __conv intel_sub_group_scan_inclusive_add( ushort  x );
-short       __ovld __conv intel_sub_group_scan_inclusive_min( short   x );
-ushort      __ovld __conv intel_sub_group_scan_inclusive_min( ushort  x );
-short       __ovld __conv intel_sub_group_scan_inclusive_max( short   x );
-ushort      __ovld __conv intel_sub_group_scan_inclusive_max( ushort  x );
-
-uint       __ovld __conv intel_sub_group_block_read_ui( read_only image2d_t image, int2 byte_coord );
-uint2      __ovld __conv intel_sub_group_block_read_ui2( read_only image2d_t image, int2 byte_coord );
-uint4      __ovld __conv intel_sub_group_block_read_ui4( read_only image2d_t image, int2 byte_coord );
-uint8      __ovld __conv intel_sub_group_block_read_ui8( read_only image2d_t image, int2 byte_coord );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint       __ovld __conv intel_sub_group_block_read_ui( read_write image2d_t image, int2 byte_coord );
-uint2      __ovld __conv intel_sub_group_block_read_ui2( read_write image2d_t image, int2 byte_coord );
-uint4      __ovld __conv intel_sub_group_block_read_ui4( read_write image2d_t image, int2 byte_coord );
-uint8      __ovld __conv intel_sub_group_block_read_ui8( read_write image2d_t image, int2 byte_coord );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-uint       __ovld __conv intel_sub_group_block_read_ui( const __global uint* p );
-uint2      __ovld __conv intel_sub_group_block_read_ui2( const __global uint* p );
-uint4      __ovld __conv intel_sub_group_block_read_ui4( const __global uint* p );
-uint8      __ovld __conv intel_sub_group_block_read_ui8( const __global uint* p );
-
-void       __ovld __conv intel_sub_group_block_write_ui( read_only image2d_t image, int2 byte_coord, uint data );
-void       __ovld __conv intel_sub_group_block_write_ui2( read_only image2d_t image, int2 byte_coord, uint2 data );
-void       __ovld __conv intel_sub_group_block_write_ui4( read_only image2d_t image, int2 byte_coord, uint4 data );
-void       __ovld __conv intel_sub_group_block_write_ui8( read_only image2d_t image, int2 byte_coord, uint8 data );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void       __ovld __conv intel_sub_group_block_write_ui( read_write image2d_t image, int2 byte_coord, uint data );
-void       __ovld __conv intel_sub_group_block_write_ui2( read_write image2d_t image, int2 byte_coord, uint2 data );
-void       __ovld __conv intel_sub_group_block_write_ui4( read_write image2d_t image, int2 byte_coord, uint4 data );
-void       __ovld __conv intel_sub_group_block_write_ui8( read_write image2d_t image, int2 byte_coord, uint8 data );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-void       __ovld __conv intel_sub_group_block_write_ui( __global uint* p, uint data );
-void       __ovld __conv intel_sub_group_block_write_ui2( __global uint* p, uint2 data );
-void       __ovld __conv intel_sub_group_block_write_ui4( __global uint* p, uint4 data );
-void       __ovld __conv intel_sub_group_block_write_ui8( __global uint* p, uint8 data );
-
-ushort      __ovld __conv intel_sub_group_block_read_us( read_only image2d_t image, int2 coord );
-ushort2     __ovld __conv intel_sub_group_block_read_us2( read_only image2d_t image, int2 coord );
-ushort4     __ovld __conv intel_sub_group_block_read_us4( read_only image2d_t image, int2 coord );
-ushort8     __ovld __conv intel_sub_group_block_read_us8( read_only image2d_t image, int2 coord );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-ushort      __ovld __conv intel_sub_group_block_read_us(read_write image2d_t image, int2 coord);
-ushort2     __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t image, int2 coord);
-ushort4     __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t image, int2 coord);
-ushort8     __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-ushort      __ovld __conv intel_sub_group_block_read_us(  const __global ushort* p );
-ushort2     __ovld __conv intel_sub_group_block_read_us2( const __global ushort* p );
-ushort4     __ovld __conv intel_sub_group_block_read_us4( const __global ushort* p );
-ushort8     __ovld __conv intel_sub_group_block_read_us8( const __global ushort* p );
-
-void        __ovld __conv intel_sub_group_block_write_us(write_only image2d_t image, int2 coord, ushort  data);
-void        __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t image, int2 coord, ushort2 data);
-void        __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t image, int2 coord, ushort4 data);
-void        __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t image, int2 coord, ushort8 data);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void        __ovld __conv intel_sub_group_block_write_us(read_write image2d_t image, int2 coord, ushort  data);
-void        __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t image, int2 coord, ushort2 data);
-void        __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t image, int2 coord, ushort4 data);
-void        __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t image, int2 coord, ushort8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-void        __ovld __conv intel_sub_group_block_write_us(  __global ushort* p, ushort  data );
-void        __ovld __conv intel_sub_group_block_write_us2( __global ushort* p, ushort2 data );
-void        __ovld __conv intel_sub_group_block_write_us4( __global ushort* p, ushort4 data );
-void        __ovld __conv intel_sub_group_block_write_us8( __global ushort* p, ushort8 data );
-#endif // cl_intel_subgroups_short
-
-#ifdef cl_intel_device_side_avc_motion_estimation
-#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : begin
-
-// MCE built-in functions
-uchar __ovld
-intel_sub_group_avc_mce_get_default_inter_base_multi_reference_penalty(
-    uchar slice_type, uchar qp);
-ulong __ovld intel_sub_group_avc_mce_get_default_inter_shape_penalty(
-    uchar slice_type, uchar qp);
-uchar __ovld intel_sub_group_avc_mce_get_default_inter_direction_penalty(
-    uchar slice_type, uchar qp);
-uint __ovld intel_sub_group_avc_mce_get_default_intra_luma_shape_penalty(
-    uchar slice_type, uchar qp);
-uint2 __ovld
-intel_sub_group_avc_mce_get_default_inter_motion_vector_cost_table(
-    uchar slice_type, uchar qp);
-uchar __ovld intel_sub_group_avc_mce_get_default_intra_luma_mode_penalty(
-    uchar slice_type, uchar qp);
-
-uint2 __ovld intel_sub_group_avc_mce_get_default_high_penalty_cost_table();
-uint2 __ovld intel_sub_group_avc_mce_get_default_medium_penalty_cost_table();
-uint2 __ovld intel_sub_group_avc_mce_get_default_low_penalty_cost_table();
-uint __ovld intel_sub_group_avc_mce_get_default_non_dc_luma_intra_penalty();
-uchar __ovld
-intel_sub_group_avc_mce_get_default_intra_chroma_mode_base_penalty();
-
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_inter_shape_penalty(
-    ulong packed_shape_penalty, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_ac_only_haar(
-    intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_mce_payload_t payload);
-
-ulong __ovld intel_sub_group_avc_mce_get_motion_vectors(
-    intel_sub_group_avc_mce_result_t result);
-ushort __ovld intel_sub_group_avc_mce_get_inter_distortions(
-    intel_sub_group_avc_mce_result_t result);
-ushort __ovld intel_sub_group_avc_mce_get_best_inter_distortion(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_major_shape(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_minor_shapes(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_directions(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_motion_vector_count(
-    intel_sub_group_avc_mce_result_t result);
-uint __ovld intel_sub_group_avc_mce_get_inter_reference_ids(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld
-intel_sub_group_avc_mce_get_inter_reference_interlaced_field_polarities(
-    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
-    intel_sub_group_avc_mce_result_t result);
-
-// IME built-in functions
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_initialize(
-    ushort2 src_coord, uchar partition_mask, uchar sad_adjustment);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_single_reference(
-    short2 ref_offset, uchar search_window_config,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_dual_reference(
-    short2 fwd_ref_offset, short2 bwd_ref_offset, uchar search_window_config,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_max_motion_vector_count(
-    uchar max_motion_vector_count, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_unidirectional_mix_disable(
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_early_search_termination_threshold(
-    uchar threshold, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_weighted_sad(
-    uint packed_sad_weights, intel_sub_group_avc_ime_payload_t payload);
-
-__attribute__((deprecated("If you use the latest Intel driver, please use "
-                          "intel_sub_group_avc_ime_ref_window_size instead",
-                          "intel_sub_group_avc_ime_ref_window_size")))
-ushort2 __ovld
-intel_sub_group_ime_ref_window_size(uchar search_window_config, char dual_ref);
-ushort2 __ovld intel_sub_group_avc_ime_ref_window_size(
-    uchar search_window_config, char dual_ref);
-short2 __ovld intel_sub_group_avc_ime_adjust_ref_offset(
-    short2 ref_offset, ushort2 src_coord, ushort2 ref_window_size,
-    ushort2 image_size);
-
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference_streamout(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference_streamout(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference_streamin(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference_streamin(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
-intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference_streaminout(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
-intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference_streaminout(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
-
-intel_sub_group_avc_ime_single_reference_streamin_t __ovld
-intel_sub_group_avc_ime_get_single_reference_streamin(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result);
-intel_sub_group_avc_ime_dual_reference_streamin_t __ovld
-intel_sub_group_avc_ime_get_dual_reference_streamin(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_strip_single_reference_streamout(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_strip_dual_reference_streamout(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
-
-uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
-    uchar major_shape);
-ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
-    uchar major_shape);
-uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
-    uchar major_shape);
-uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
-    uchar major_shape, uchar direction);
-ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
-    uchar major_shape, uchar direction);
-uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
-    uchar major_shape, uchar direction);
-
-uchar __ovld intel_sub_group_avc_ime_get_border_reached(
-    uchar image_select, intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ime_get_truncated_search_indication(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld
-intel_sub_group_avc_ime_get_unidirectional_early_search_termination(
-    intel_sub_group_avc_ime_result_t result);
-uint __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_motion_vector(
-    intel_sub_group_avc_ime_result_t result);
-ushort __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_distortion(
-    intel_sub_group_avc_ime_result_t result);
-
-// REF built-in functions
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_fme_initialize(
-    ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
-    uchar minor_shapes, uchar directions, uchar pixel_resolution,
-    uchar sad_adjustment);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_bme_initialize(
-    ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
-    uchar minor_shapes, uchar directions, uchar pixel_resolution,
-    uchar bidirectional_weight, uchar sad_adjustment);
-
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_bidirectional_mix_disable(
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_bilinear_filter_enable(
-    intel_sub_group_avc_ref_payload_t payload);
-
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_single_reference(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_dual_reference(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ref_payload_t payload);
-
-// SIC built-in functions
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_initialize(
-    ushort2 src_coord);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_configure_skc(
-    uint skip_block_partition_type, uint skip_motion_vector_mask,
-    ulong motion_vectors, uchar bidirectional_weight, uchar skip_sad_adjustment,
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_configure_ipe(
-    uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty,
-    uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
-    uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
-    uchar intra_sad_adjustment, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_configure_ipe(
-    uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty,
-    uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
-    uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
-    ushort left_edge_chroma_pixels, ushort upper_left_corner_chroma_pixel,
-    ushort upper_edge_chroma_pixels, uchar intra_sad_adjustment,
-    intel_sub_group_avc_sic_payload_t payload);
-uint __ovld
-intel_sub_group_avc_sic_get_motion_vector_mask(
-    uint skip_block_partition_type, uchar direction);
-
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_intra_luma_shape_penalty(
-    uint packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_intra_luma_mode_cost_function(
-    uchar luma_mode_penalty, uint luma_packed_neighbor_modes,
-    uint luma_packed_non_dc_penalty, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_intra_chroma_mode_cost_function(
-    uchar chroma_mode_penalty, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_skc_bilinear_filter_enable(
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_skc_forward_transform_enable(
-    ulong packed_sad_coefficients, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_block_based_raw_skip_sad(
-    uchar block_based_skip_type,
-    intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_ipe(
-    read_only image2d_t src_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_single_reference(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_dual_reference(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
-    intel_sub_group_avc_sic_payload_t payload);
-
-uchar __ovld intel_sub_group_avc_sic_get_ipe_luma_shape(
-    intel_sub_group_avc_sic_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_best_ipe_luma_distortion(
-    intel_sub_group_avc_sic_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_best_ipe_chroma_distortion(
-    intel_sub_group_avc_sic_result_t result);
-ulong __ovld intel_sub_group_avc_sic_get_packed_ipe_luma_modes(
-    intel_sub_group_avc_sic_result_t result);
-uchar __ovld intel_sub_group_avc_sic_get_ipe_chroma_mode(
-    intel_sub_group_avc_sic_result_t result);
-uint __ovld intel_sub_group_avc_sic_get_packed_skc_luma_count_threshold(
-    intel_sub_group_avc_sic_result_t result);
-ulong __ovld intel_sub_group_avc_sic_get_packed_skc_luma_sum_threshold(
-    intel_sub_group_avc_sic_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_inter_raw_sads(
-    intel_sub_group_avc_sic_result_t result);
-
-// Wrappers
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_inter_shape_penalty(
-    ulong packed_shape_cost, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_inter_shape_penalty(
-    ulong packed_shape_cost, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_inter_shape_penalty(
-    ulong packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_ac_only_haar(
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_ac_only_haar(
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_ac_only_haar(
-    intel_sub_group_avc_sic_payload_t payload);
-
-ulong __ovld intel_sub_group_avc_ime_get_motion_vectors(
-    intel_sub_group_avc_ime_result_t result);
-ulong __ovld intel_sub_group_avc_ref_get_motion_vectors(
-    intel_sub_group_avc_ref_result_t result);
-
-ushort __ovld intel_sub_group_avc_ime_get_inter_distortions(
-    intel_sub_group_avc_ime_result_t result);
-ushort __ovld intel_sub_group_avc_ref_get_inter_distortions(
-    intel_sub_group_avc_ref_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_inter_distortions(
-    intel_sub_group_avc_sic_result_t result);
-
-ushort __ovld intel_sub_group_avc_ime_get_best_inter_distortion(
-    intel_sub_group_avc_ime_result_t result);
-ushort __ovld intel_sub_group_avc_ref_get_best_inter_distortion(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld intel_sub_group_avc_ime_get_inter_major_shape(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_major_shape(
-    intel_sub_group_avc_ref_result_t result);
-uchar __ovld intel_sub_group_avc_ime_get_inter_minor_shapes(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_minor_shapes(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld intel_sub_group_avc_ime_get_inter_directions(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_directions(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld intel_sub_group_avc_ime_get_inter_motion_vector_count(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_motion_vector_count(
-    intel_sub_group_avc_ref_result_t result);
-
-uint __ovld intel_sub_group_avc_ime_get_inter_reference_ids(
-    intel_sub_group_avc_ime_result_t result);
-uint __ovld intel_sub_group_avc_ref_get_inter_reference_ids(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld
-intel_sub_group_avc_ime_get_inter_reference_interlaced_field_polarities(
-    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld
-intel_sub_group_avc_ref_get_inter_reference_interlaced_field_polarities(
-    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
-    intel_sub_group_avc_ref_result_t result);
-
-// Type conversion functions
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_ime_convert_to_mce_payload(
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_mce_convert_to_ime_payload(
-    intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_ref_convert_to_mce_payload(
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_mce_convert_to_ref_payload(
-    intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_sic_convert_to_mce_payload(
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_mce_convert_to_sic_payload(
-    intel_sub_group_avc_mce_payload_t payload);
-
-intel_sub_group_avc_mce_result_t __ovld
-intel_sub_group_avc_ime_convert_to_mce_result(
-    intel_sub_group_avc_ime_result_t result);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_mce_convert_to_ime_result(
-    intel_sub_group_avc_mce_result_t result);
-intel_sub_group_avc_mce_result_t __ovld
-intel_sub_group_avc_ref_convert_to_mce_result(
-    intel_sub_group_avc_ref_result_t result);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_mce_convert_to_ref_result(
-    intel_sub_group_avc_mce_result_t result);
-intel_sub_group_avc_mce_result_t __ovld
-intel_sub_group_avc_sic_convert_to_mce_result(
-    intel_sub_group_avc_sic_result_t result);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_mce_convert_to_sic_result(
-    intel_sub_group_avc_mce_result_t result);
-#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : end
-#endif // cl_intel_device_side_avc_motion_estimation
-
-#ifdef cl_amd_media_ops
-uint __ovld amd_bitalign(uint a, uint b, uint c);
-uint2 __ovld amd_bitalign(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_bitalign(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_bitalign(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_bitalign(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_bitalign(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_bytealign(uint a, uint b, uint c);
-uint2 __ovld amd_bytealign(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_bytealign(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_bytealign(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_bytealign(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_bytealign(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_lerp(uint a, uint b, uint c);
-uint2 __ovld amd_lerp(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_lerp(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_lerp(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_lerp(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_lerp(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_pack(float4 v);
-
-uint __ovld amd_sad4(uint4 x, uint4 y, uint z);
-
-uint __ovld amd_sadhi(uint a, uint b, uint c);
-uint2 __ovld amd_sadhi(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_sadhi(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_sadhi(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_sadhi(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_sadhi(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_sad(uint a, uint b, uint c);
-uint2 __ovld amd_sad(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_sad(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_sad(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_sad(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_sad(uint16 a, uint16 b, uint16 c);
-
-float __ovld amd_unpack0(uint a);
-float2 __ovld amd_unpack0(uint2 a);
-float3 __ovld amd_unpack0(uint3 a);
-float4 __ovld amd_unpack0(uint4 a);
-float8 __ovld amd_unpack0(uint8 a);
-float16 __ovld amd_unpack0(uint16 a);
-
-float __ovld amd_unpack1(uint a);
-float2 __ovld amd_unpack1(uint2 a);
-float3 __ovld amd_unpack1(uint3 a);
-float4 __ovld amd_unpack1(uint4 a);
-float8 __ovld amd_unpack1(uint8 a);
-float16 __ovld amd_unpack1(uint16 a);
-
-float __ovld amd_unpack2(uint a);
-float2 __ovld amd_unpack2(uint2 a);
-float3 __ovld amd_unpack2(uint3 a);
-float4 __ovld amd_unpack2(uint4 a);
-float8 __ovld amd_unpack2(uint8 a);
-float16 __ovld amd_unpack2(uint16 a);
-
-float __ovld amd_unpack3(uint a);
-float2 __ovld amd_unpack3(uint2 a);
-float3 __ovld amd_unpack3(uint3 a);
-float4 __ovld amd_unpack3(uint4 a);
-float8 __ovld amd_unpack3(uint8 a);
-float16 __ovld amd_unpack3(uint16 a);
-#endif // cl_amd_media_ops
-
-#ifdef cl_amd_media_ops2
-int __ovld amd_bfe(int src0, uint src1, uint src2);
-int2 __ovld amd_bfe(int2 src0, uint2 src1, uint2 src2);
-int3 __ovld amd_bfe(int3 src0, uint3 src1, uint3 src2);
-int4 __ovld amd_bfe(int4 src0, uint4 src1, uint4 src2);
-int8 __ovld amd_bfe(int8 src0, uint8 src1, uint8 src2);
-int16 __ovld amd_bfe(int16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_bfe(uint src0, uint src1, uint src2);
-uint2 __ovld amd_bfe(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_bfe(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_bfe(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_bfe(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_bfe(uint16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_bfm(uint src0, uint src1);
-uint2 __ovld amd_bfm(uint2 src0, uint2 src1);
-uint3 __ovld amd_bfm(uint3 src0, uint3 src1);
-uint4 __ovld amd_bfm(uint4 src0, uint4 src1);
-uint8 __ovld amd_bfm(uint8 src0, uint8 src1);
-uint16 __ovld amd_bfm(uint16 src0, uint16 src1);
-
-float __ovld amd_max3(float src0, float src1, float src2);
-float2 __ovld amd_max3(float2 src0, float2 src1, float2 src2);
-float3 __ovld amd_max3(float3 src0, float3 src1, float3 src2);
-float4 __ovld amd_max3(float4 src0, float4 src1, float4 src2);
-float8 __ovld amd_max3(float8 src0, float8 src1, float8 src2);
-float16 __ovld amd_max3(float16 src0, float16 src1, float16 src2);
-
-int __ovld amd_max3(int src0, int src1, int src2);
-int2 __ovld amd_max3(int2 src0, int2 src1, int2 src2);
-int3 __ovld amd_max3(int3 src0, int3 src1, int3 src2);
-int4 __ovld amd_max3(int4 src0, int4 src1, int4 src2);
-int8 __ovld amd_max3(int8 src0, int8 src1, int8 src2);
-int16 __ovld amd_max3(int16 src0, int16 src1, int16 src2);
-
-uint __ovld amd_max3(uint src0, uint src1, uint src2);
-uint2 __ovld amd_max3(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_max3(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_max3(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_max3(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_max3(uint16 src0, uint16 src1, uint16 src2);
-
-float __ovld amd_median3(float src0, float src1, float src2);
-float2 __ovld amd_median3(float2 src0, float2 src1, float2 src2);
-float3 __ovld amd_median3(float3 src0, float3 src1, float3 src2);
-float4 __ovld amd_median3(float4 src0, float4 src1, float4 src2);
-float8 __ovld amd_median3(float8 src0, float8 src1, float8 src2);
-float16 __ovld amd_median3(float16 src0, float16 src1, float16 src2);
-
-int __ovld amd_median3(int src0, int src1, int src2);
-int2 __ovld amd_median3(int2 src0, int2 src1, int2 src2);
-int3 __ovld amd_median3(int3 src0, int3 src1, int3 src2);
-int4 __ovld amd_median3(int4 src0, int4 src1, int4 src2);
-int8 __ovld amd_median3(int8 src0, int8 src1, int8 src2);
-int16 __ovld amd_median3(int16 src0, int16 src1, int16 src2);
-
-uint __ovld amd_median3(uint src0, uint src1, uint src2);
-uint2 __ovld amd_median3(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_median3(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_median3(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_median3(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_median3(uint16 src0, uint16 src1, uint16 src2);
-
-float __ovld amd_min3(float src0, float src1, float src);
-float2 __ovld amd_min3(float2 src0, float2 src1, float2 src);
-float3 __ovld amd_min3(float3 src0, float3 src1, float3 src);
-float4 __ovld amd_min3(float4 src0, float4 src1, float4 src);
-float8 __ovld amd_min3(float8 src0, float8 src1, float8 src);
-float16 __ovld amd_min3(float16 src0, float16 src1, float16 src);
-
-int __ovld amd_min3(int src0, int src1, int src2);
-int2 __ovld amd_min3(int2 src0, int2 src1, int2 src2);
-int3 __ovld amd_min3(int3 src0, int3 src1, int3 src2);
-int4 __ovld amd_min3(int4 src0, int4 src1, int4 src2);
-int8 __ovld amd_min3(int8 src0, int8 src1, int8 src2);
-int16 __ovld amd_min3(int16 src0, int16 src1, int16 src2);
-
-uint __ovld amd_min3(uint src0, uint src1, uint src2);
-uint2 __ovld amd_min3(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_min3(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_min3(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_min3(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_min3(uint16 src0, uint16 src1, uint16 src2);
-
-ulong __ovld amd_mqsad(ulong src0, uint src1, ulong src2);
-ulong2 __ovld amd_mqsad(ulong2 src0, uint2 src1, ulong2 src2);
-ulong3 __ovld amd_mqsad(ulong3 src0, uint3 src1, ulong3 src2);
-ulong4 __ovld amd_mqsad(ulong4 src0, uint4 src1, ulong4 src2);
-ulong8 __ovld amd_mqsad(ulong8 src0, uint8 src1, ulong8 src2);
-ulong16 __ovld amd_mqsad(ulong16 src0, uint16 src1, ulong16 src2);
-
-ulong __ovld amd_qsad(ulong src0, uint src1, ulong src2);
-ulong2 __ovld amd_qsad(ulong2 src0, uint2 src1, ulong2 src2);
-ulong3 __ovld amd_qsad(ulong3 src0, uint3 src1, ulong3 src2);
-ulong4 __ovld amd_qsad(ulong4 src0, uint4 src1, ulong4 src2);
-ulong8 __ovld amd_qsad(ulong8 src0, uint8 src1, ulong8 src2);
-ulong16 __ovld amd_qsad(ulong16 src0, uint16 src1, ulong16 src2);
-
-uint __ovld amd_msad(uint src0, uint src1, uint src2);
-uint2 __ovld amd_msad(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_msad(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_msad(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_msad(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_msad(uint16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_sadd(uint src0, uint src1, uint src2);
-uint2 __ovld amd_sadd(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_sadd(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_sadd(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_sadd(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_sadd(uint16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_sadw(uint src0, uint src1, uint src2);
-uint2 __ovld amd_sadw(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_sadw(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_sadw(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_sadw(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2);
-#endif // cl_amd_media_ops2
-
-#if defined(cl_arm_integer_dot_product_int8)
-uint __ovld arm_dot(uchar4 a, uchar4 b);
-int __ovld arm_dot(char4 a, char4 b);
-#endif // defined(cl_arm_integer_dot_product_int8)
-
-#if defined(cl_arm_integer_dot_product_accumulate_int8)
-uint __ovld arm_dot_acc(uchar4 a, uchar4 b, uint c);
-int __ovld arm_dot_acc(char4 a, char4 b, int c);
-#endif // defined(cl_arm_integer_dot_product_accumulate_int8)
-
-#if defined(cl_arm_integer_dot_product_accumulate_int16)
-uint __ovld arm_dot_acc(ushort2 a, ushort2 b, uint c);
-int __ovld arm_dot_acc(short2 a, short2 b, int c);
-#endif // defined(cl_arm_integer_dot_product_accumulate_int16)
-
-#if defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
-uint __ovld arm_dot_acc_sat(uchar4 a, uchar4 b, uint c);
-int __ovld arm_dot_acc_sat(char4 a, char4 b, int c);
-#endif // defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
-
-// Disable any extensions we may have enabled previously.
-#pragma OPENCL EXTENSION all : disable
-
-#undef __cnfn
-#undef __ovld
-#endif //_OPENCL_H_
diff --git a/linux-x86/lib64/clang/14.0.2/include/pmmintrin.h b/linux-x86/lib64/clang/14.0.2/include/pmmintrin.h
deleted file mode 100644
index eda8356..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/pmmintrin.h
+++ /dev/null
@@ -1,294 +0,0 @@
-/*===---- pmmintrin.h - SSE3 intrinsics ------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __PMMINTRIN_H
-#define __PMMINTRIN_H
-
-#if !defined(__i386__) && !defined(__x86_64__)
-#error "This header is only meant to be used on x86 and x64 architecture"
-#endif
-
-#include <emmintrin.h>
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS \
-  __attribute__((__always_inline__, __nodebug__, __target__("sse3"), __min_vector_width__(128)))
-
-/// Loads data from an unaligned memory location to elements in a 128-bit
-///    vector.
-///
-///    If the address of the data is not 16-byte aligned, the instruction may
-///    read two adjacent aligned blocks of memory to retrieve the requested
-///    data.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VLDDQU </c> instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit integer vector containing integer values.
-/// \returns A 128-bit vector containing the moved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_lddqu_si128(__m128i const *__p)
-{
-  return (__m128i)__builtin_ia32_lddqu((char const *)__p);
-}
-
-/// Adds the even-indexed values and subtracts the odd-indexed values of
-///    two 128-bit vectors of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDSUBPS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the left source operand.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing the right source operand.
-/// \returns A 128-bit vector of [4 x float] containing the alternating sums and
-///    differences of both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_addsub_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_addsubps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Horizontally adds the adjacent pairs of values contained in two
-///    128-bit vectors of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VHADDPS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-///    The horizontal sums of the values are stored in the lower bits of the
-///    destination.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-///    The horizontal sums of the values are stored in the upper bits of the
-///    destination.
-/// \returns A 128-bit vector of [4 x float] containing the horizontal sums of
-///    both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_hadd_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_haddps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Horizontally subtracts the adjacent pairs of values contained in two
-///    128-bit vectors of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VHSUBPS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-///    The horizontal differences between the values are stored in the lower
-///    bits of the destination.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-///    The horizontal differences between the values are stored in the upper
-///    bits of the destination.
-/// \returns A 128-bit vector of [4 x float] containing the horizontal
-///    differences of both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_hsub_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_hsubps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Moves and duplicates odd-indexed values from a 128-bit vector
-///    of [4 x float] to float values stored in a 128-bit vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSHDUP </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. \n
-///    Bits [127:96] of the source are written to bits [127:96] and [95:64] of
-///    the destination. \n
-///    Bits [63:32] of the source are written to bits [63:32] and [31:0] of the
-///    destination.
-/// \returns A 128-bit vector of [4 x float] containing the moved and duplicated
-///    values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_movehdup_ps(__m128 __a)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 1, 1, 3, 3);
-}
-
-/// Duplicates even-indexed values from a 128-bit vector of
-///    [4 x float] to float values stored in a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSLDUP </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] \n
-///    Bits [95:64] of the source are written to bits [127:96] and [95:64] of
-///    the destination. \n
-///    Bits [31:0] of the source are written to bits [63:32] and [31:0] of the
-///    destination.
-/// \returns A 128-bit vector of [4 x float] containing the moved and duplicated
-///    values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_moveldup_ps(__m128 __a)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 2, 2);
-}
-
-/// Adds the even-indexed values and subtracts the odd-indexed values of
-///    two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDSUBPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the left source operand.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the right source operand.
-/// \returns A 128-bit vector of [2 x double] containing the alternating sums
-///    and differences of both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_addsub_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_addsubpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Horizontally adds the pairs of values contained in two 128-bit
-///    vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VHADDPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-///    The horizontal sum of the values is stored in the lower bits of the
-///    destination.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-///    The horizontal sum of the values is stored in the upper bits of the
-///    destination.
-/// \returns A 128-bit vector of [2 x double] containing the horizontal sums of
-///    both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_hadd_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_haddpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Horizontally subtracts the pairs of values contained in two 128-bit
-///    vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VHSUBPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-///    The horizontal difference of the values is stored in the lower bits of
-///    the destination.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-///    The horizontal difference of the values is stored in the upper bits of
-///    the destination.
-/// \returns A 128-bit vector of [2 x double] containing the horizontal
-///    differences of both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_hsub_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_hsubpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Moves and duplicates one double-precision value to double-precision
-///    values stored in a 128-bit vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_loaddup_pd(double const *dp);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP </c> instruction.
-///
-/// \param dp
-///    A pointer to a double-precision value to be moved and duplicated.
-/// \returns A 128-bit vector of [2 x double] containing the moved and
-///    duplicated values.
-#define        _mm_loaddup_pd(dp)        _mm_load1_pd(dp)
-
-/// Moves and duplicates the double-precision value in the lower bits of
-///    a 128-bit vector of [2 x double] to double-precision values stored in a
-///    128-bit vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. Bits [63:0] are written to bits
-///    [127:64] and [63:0] of the destination.
-/// \returns A 128-bit vector of [2 x double] containing the moved and
-///    duplicated values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_movedup_pd(__m128d __a)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
-}
-
-/// Establishes a linear address memory range to be monitored and puts
-///    the processor in the monitor event pending state. Data stored in the
-///    monitored address range causes the processor to exit the pending state.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MONITOR </c> instruction.
-///
-/// \param __p
-///    The memory range to be monitored. The size of the range is determined by
-///    CPUID function 0000_0005h.
-/// \param __extensions
-///    Optional extensions for the monitoring state.
-/// \param __hints
-///    Optional hints for the monitoring state.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_monitor(void const *__p, unsigned __extensions, unsigned __hints)
-{
-  __builtin_ia32_monitor(__p, __extensions, __hints);
-}
-
-/// Used with the MONITOR instruction to wait while the processor is in
-///    the monitor event pending state. Data stored in the monitored address
-///    range causes the processor to exit the pending state.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MWAIT </c> instruction.
-///
-/// \param __extensions
-///    Optional extensions for the monitoring state, which may vary by
-///    processor.
-/// \param __hints
-///    Optional hints for the monitoring state, which may vary by processor.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_mwait(unsigned __extensions, unsigned __hints)
-{
-  __builtin_ia32_mwait(__extensions, __hints);
-}
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __PMMINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/emmintrin.h b/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/emmintrin.h
deleted file mode 100644
index 82a7178..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/emmintrin.h
+++ /dev/null
@@ -1,2325 +0,0 @@
-/*===---- emmintrin.h - Implementation of SSE2 intrinsics on PowerPC -------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* Implemented from the specification included in the Intel C++ Compiler
-   User Guide and Reference, version 9.0.  */
-
-#ifndef NO_WARN_X86_INTRINSICS
-/* This header file is to help porting code using Intel intrinsics
-   explicitly from x86_64 to powerpc64/powerpc64le.
-
-   Since X86 SSE2 intrinsics mainly handles __m128i and __m128d type,
-   PowerPC VMX/VSX ISA is a good match for vector float SIMD operations.
-   However scalar float operations in vector (XMM) registers require
-   the POWER8 VSX ISA (2.07) level. There are differences for data
-   format and placement of float scalars in the vector register, which
-   require extra steps to match SSE2 scalar float semantics on POWER.
-
-   It should be noted that there's much difference between X86_64's
-   MXSCR and PowerISA's FPSCR/VSCR registers. It's recommended to use
-   portable <fenv.h> instead of access MXSCR directly.
-
-   Most SSE2 scalar float intrinsic operations can be performed more
-   efficiently as C language float scalar operations or optimized to
-   use vector SIMD operations. We recommend this for new applications.
-*/
-#error "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this error."
-#endif
-
-#ifndef EMMINTRIN_H_
-#define EMMINTRIN_H_
-
-#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
-
-#include <altivec.h>
-
-/* We need definitions from the SSE header files.  */
-#include <xmmintrin.h>
-
-/* SSE2 */
-typedef __vector double __v2df;
-typedef __vector long long __v2di;
-typedef __vector unsigned long long __v2du;
-typedef __vector int __v4si;
-typedef __vector unsigned int __v4su;
-typedef __vector short __v8hi;
-typedef __vector unsigned short __v8hu;
-typedef __vector signed char __v16qi;
-typedef __vector unsigned char __v16qu;
-
-/* The Intel API is flexible enough that we must allow aliasing with other
-   vector types, and their scalar components.  */
-typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__));
-typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__));
-
-/* Unaligned version of the same types.  */
-typedef long long __m128i_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1)));
-typedef double __m128d_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1)));
-
-/* Define two value permute mask.  */
-#define _MM_SHUFFLE2(x,y) (((x) << 1) | (y))
-
-/* Create a vector with element 0 as F and the rest zero.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_sd (double __F)
-{
-  return __extension__ (__m128d){ __F, 0.0 };
-}
-
-/* Create a vector with both elements equal to F.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_pd (double __F)
-{
-  return __extension__ (__m128d){ __F, __F };
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_pd1 (double __F)
-{
-  return _mm_set1_pd (__F);
-}
-
-/* Create a vector with the lower value X and upper value W.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_pd (double __W, double __X)
-{
-  return __extension__ (__m128d){ __X, __W };
-}
-
-/* Create a vector with the lower value W and upper value X.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_pd (double __W, double __X)
-{
-  return __extension__ (__m128d){ __W, __X };
-}
-
-/* Create an undefined vector.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_undefined_pd (void)
-{
-  __m128d __Y = __Y;
-  return __Y;
-}
-
-/* Create a vector of zeros.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setzero_pd (void)
-{
-  return (__m128d) vec_splats (0);
-}
-
-/* Sets the low DPFP value of A from the low value of B.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_move_sd (__m128d __A, __m128d __B)
-{
-  __v2df result = (__v2df) __A;
-  result [0] = ((__v2df) __B)[0];
-  return (__m128d) result;
-}
-
-/* Load two DPFP values from P.  The address must be 16-byte aligned.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_pd (double const *__P)
-{
-  return ((__m128d)vec_ld(0, (__v16qu*)__P));
-}
-
-/* Load two DPFP values from P.  The address need not be 16-byte aligned.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadu_pd (double const *__P)
-{
-  return (vec_vsx_ld(0, __P));
-}
-
-/* Create a vector with all two elements equal to *P.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load1_pd (double const *__P)
-{
-  return (vec_splats (*__P));
-}
-
-/* Create a vector with element 0 as *P and the rest zero.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_sd (double const *__P)
-{
-  return _mm_set_sd (*__P);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_pd1 (double const *__P)
-{
-  return _mm_load1_pd (__P);
-}
-
-/* Load two DPFP values in reverse order.  The address must be aligned.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadr_pd (double const *__P)
-{
-  __v2df __tmp = _mm_load_pd (__P);
-  return (__m128d)vec_xxpermdi (__tmp, __tmp, 2);
-}
-
-/* Store two DPFP values.  The address must be 16-byte aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_pd (double *__P, __m128d __A)
-{
-  vec_st((__v16qu)__A, 0, (__v16qu*)__P);
-}
-
-/* Store two DPFP values.  The address need not be 16-byte aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeu_pd (double *__P, __m128d __A)
-{
-  *(__m128d_u *)__P = __A;
-}
-
-/* Stores the lower DPFP value.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_sd (double *__P, __m128d __A)
-{
-  *__P = ((__v2df)__A)[0];
-}
-
-extern __inline double __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_f64 (__m128d __A)
-{
-  return ((__v2df)__A)[0];
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storel_pd (double *__P, __m128d __A)
-{
-  _mm_store_sd (__P, __A);
-}
-
-/* Stores the upper DPFP value.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeh_pd (double *__P, __m128d __A)
-{
-  *__P = ((__v2df)__A)[1];
-}
-/* Store the lower DPFP value across two words.
-   The address must be 16-byte aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store1_pd (double *__P, __m128d __A)
-{
-  _mm_store_pd (__P, vec_splat (__A, 0));
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_pd1 (double *__P, __m128d __A)
-{
-  _mm_store1_pd (__P, __A);
-}
-
-/* Store two DPFP values in reverse order.  The address must be aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storer_pd (double *__P, __m128d __A)
-{
-  _mm_store_pd (__P, vec_xxpermdi (__A, __A, 2));
-}
-
-/* Intel intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi128_si64 (__m128i __A)
-{
-  return ((__v2di)__A)[0];
-}
-
-/* Microsoft intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi128_si64x (__m128i __A)
-{
-  return ((__v2di)__A)[0];
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_pd (__m128d __A, __m128d __B)
-{
-  return (__m128d) ((__v2df)__A + (__v2df)__B);
-}
-
-/* Add the lower double-precision (64-bit) floating-point element in
-   a and b, store the result in the lower element of dst, and copy
-   the upper element from a to the upper element of dst. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_sd (__m128d __A, __m128d __B)
-{
-  __A[0] = __A[0] + __B[0];
-  return (__A);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_pd (__m128d __A, __m128d __B)
-{
-  return (__m128d) ((__v2df)__A - (__v2df)__B);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_sd (__m128d __A, __m128d __B)
-{
-  __A[0] = __A[0] - __B[0];
-  return (__A);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_pd (__m128d __A, __m128d __B)
-{
-  return (__m128d) ((__v2df)__A * (__v2df)__B);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_sd (__m128d __A, __m128d __B)
-{
-  __A[0] = __A[0] * __B[0];
-  return (__A);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_pd (__m128d __A, __m128d __B)
-{
-  return (__m128d) ((__v2df)__A / (__v2df)__B);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_sd (__m128d __A, __m128d __B)
-{
-  __A[0] = __A[0] / __B[0];
-  return (__A);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_pd (__m128d __A)
-{
-  return (vec_sqrt (__A));
-}
-
-/* Return pair {sqrt (B[0]), A[1]}.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_sd (__m128d __A, __m128d __B)
-{
-  __v2df c;
-  c = vec_sqrt ((__v2df) _mm_set1_pd (__B[0]));
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_pd (__m128d __A, __m128d __B)
-{
-  return (vec_min (__A, __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = vec_min (a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_pd (__m128d __A, __m128d __B)
-{
-  return (vec_max (__A, __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = vec_max (a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmpeq ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmplt ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmple ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmpgt ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmpge ((__v2df) __A,(__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_pd (__m128d __A, __m128d __B)
-{
-  __v2df temp = (__v2df) vec_cmpeq ((__v2df) __A, (__v2df)__B);
-  return ((__m128d)vec_nor (temp, temp));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmpge ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmpgt ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmple ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_pd (__m128d __A, __m128d __B)
-{
-  return ((__m128d)vec_cmplt ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_pd (__m128d __A, __m128d __B)
-{
-#if _ARCH_PWR8
-  __v2du c, d;
-  /* Compare against self will return false (0's) if NAN.  */
-  c = (__v2du)vec_cmpeq (__A, __A);
-  d = (__v2du)vec_cmpeq (__B, __B);
-#else
-  __v2du a, b;
-  __v2du c, d;
-  const __v2du double_exp_mask  = {0x7ff0000000000000, 0x7ff0000000000000};
-  a = (__v2du)vec_abs ((__v2df)__A);
-  b = (__v2du)vec_abs ((__v2df)__B);
-  c = (__v2du)vec_cmpgt (double_exp_mask, a);
-  d = (__v2du)vec_cmpgt (double_exp_mask, b);
-#endif
-  /* A != NAN and B != NAN.  */
-  return ((__m128d)vec_and(c, d));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_pd (__m128d __A, __m128d __B)
-{
-#if _ARCH_PWR8
-  __v2du c, d;
-  /* Compare against self will return false (0's) if NAN.  */
-  c = (__v2du)vec_cmpeq ((__v2df)__A, (__v2df)__A);
-  d = (__v2du)vec_cmpeq ((__v2df)__B, (__v2df)__B);
-  /* A == NAN OR B == NAN converts too:
-     NOT(A != NAN) OR NOT(B != NAN).  */
-  c = vec_nor (c, c);
-  return ((__m128d)vec_orc(c, d));
-#else
-  __v2du c, d;
-  /* Compare against self will return false (0's) if NAN.  */
-  c = (__v2du)vec_cmpeq ((__v2df)__A, (__v2df)__A);
-  d = (__v2du)vec_cmpeq ((__v2df)__B, (__v2df)__B);
-  /* Convert the true ('1's) is NAN.  */
-  c = vec_nor (c, c);
-  d = vec_nor (d, d);
-  return ((__m128d)vec_or(c, d));
-#endif
-}
-
-extern __inline  __m128d  __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_sd(__m128d  __A, __m128d  __B)
-{
-  __v2df a, b, c;
-  /* PowerISA VSX does not allow partial (for just lower double)
-     results. So to insure we don't generate spurious exceptions
-     (from the upper double values) we splat the lower double
-     before we do the operation. */
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = (__v2df) vec_cmpeq(a, b);
-  /* Then we merge the lower double result with the original upper
-     double from __A.  */
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = (__v2df) vec_cmplt(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = (__v2df) vec_cmple(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = (__v2df) vec_cmpgt(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = (__v2df) vec_cmpge(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  c = (__v2df) vec_cmpeq(a, b);
-  c = vec_nor (c, c);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  /* Not less than is just greater than or equal.  */
-  c = (__v2df) vec_cmpge(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  /* Not less than or equal is just greater than.  */
-  c = (__v2df) vec_cmpge(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  /* Not greater than is just less than or equal.  */
-  c = (__v2df) vec_cmple(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_sd (__m128d __A, __m128d __B)
-{
-  __v2df a, b, c;
-  a = vec_splats (__A[0]);
-  b = vec_splats (__B[0]);
-  /* Not greater than or equal is just less than.  */
-  c = (__v2df) vec_cmplt(a, b);
-  return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_sd (__m128d __A, __m128d __B)
-{
-  __v2df r;
-  r = (__v2df)_mm_cmpord_pd (vec_splats (__A[0]), vec_splats (__B[0]));
-  return (__m128d) _mm_setr_pd (r[0], ((__v2df)__A)[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_sd (__m128d __A, __m128d __B)
-{
-  __v2df r;
-  r = _mm_cmpunord_pd (vec_splats (__A[0]), vec_splats (__B[0]));
-  return (__m128d) _mm_setr_pd (r[0], __A[1]);
-}
-
-/* FIXME
-   The __mm_comi??_sd and __mm_ucomi??_sd implementations below are
-   exactly the same because GCC for PowerPC only generates unordered
-   compares (scalar and vector).
-   Technically __mm_comieq_sp et all should be using the ordered
-   compare and signal for QNaNs.  The __mm_ucomieq_sd et all should
-   be OK.   */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comieq_sd (__m128d __A, __m128d __B)
-{
-  return (__A[0] == __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comilt_sd (__m128d __A, __m128d __B)
-{
-  return (__A[0] < __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comile_sd (__m128d __A, __m128d __B)
-{
-  return (__A[0] <= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comigt_sd (__m128d __A, __m128d __B)
-{
-  return (__A[0] > __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comige_sd (__m128d __A, __m128d __B)
-{
-  return (__A[0] >= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comineq_sd (__m128d __A, __m128d __B)
-{
-  return (__A[0] != __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomieq_sd (__m128d __A, __m128d __B)
-{
-	return (__A[0] == __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomilt_sd (__m128d __A, __m128d __B)
-{
-	return (__A[0] < __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomile_sd (__m128d __A, __m128d __B)
-{
-	return (__A[0] <= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomigt_sd (__m128d __A, __m128d __B)
-{
-	return (__A[0] > __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomige_sd (__m128d __A, __m128d __B)
-{
-	return (__A[0] >= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomineq_sd (__m128d __A, __m128d __B)
-{
-  return (__A[0] != __B[0]);
-}
-
-/* Create a vector of Qi, where i is the element number.  */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi64x (long long __q1, long long __q0)
-{
-  return __extension__ (__m128i)(__v2di){ __q0, __q1 };
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi64 (__m64 __q1,  __m64 __q0)
-{
-  return _mm_set_epi64x ((long long)__q1, (long long)__q0);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi32 (int __q3, int __q2, int __q1, int __q0)
-{
-  return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 };
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi16 (short __q7, short __q6, short __q5, short __q4,
-	       short __q3, short __q2, short __q1, short __q0)
-{
-  return __extension__ (__m128i)(__v8hi){
-    __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 };
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi8 (char __q15, char __q14, char __q13, char __q12,
-	      char __q11, char __q10, char __q09, char __q08,
-	      char __q07, char __q06, char __q05, char __q04,
-	      char __q03, char __q02, char __q01, char __q00)
-{
-  return __extension__ (__m128i)(__v16qi){
-    __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
-    __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15
-  };
-}
-
-/* Set all of the elements of the vector to A.  */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi64x (long long __A)
-{
-  return _mm_set_epi64x (__A, __A);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi64 (__m64 __A)
-{
-  return _mm_set_epi64 (__A, __A);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi32 (int __A)
-{
-  return _mm_set_epi32 (__A, __A, __A, __A);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi16 (short __A)
-{
-  return _mm_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi8 (char __A)
-{
-  return _mm_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A,
-		       __A, __A, __A, __A, __A, __A, __A, __A);
-}
-
-/* Create a vector of Qi, where i is the element number.
-   The parameter order is reversed from the _mm_set_epi* functions.  */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi64 (__m64 __q0, __m64 __q1)
-{
-  return _mm_set_epi64 (__q1, __q0);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3)
-{
-  return _mm_set_epi32 (__q3, __q2, __q1, __q0);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3,
-	        short __q4, short __q5, short __q6, short __q7)
-{
-  return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03,
-	       char __q04, char __q05, char __q06, char __q07,
-	       char __q08, char __q09, char __q10, char __q11,
-	       char __q12, char __q13, char __q14, char __q15)
-{
-  return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08,
-		       __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00);
-}
-
-/* Create a vector with element 0 as *P and the rest zero.  */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_si128 (__m128i const *__P)
-{
-  return *__P;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadu_si128 (__m128i_u const *__P)
-{
-  return (__m128i) (vec_vsx_ld(0, (signed int const *)__P));
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadl_epi64 (__m128i_u const *__P)
-{
-  return _mm_set_epi64 ((__m64)0LL, *(__m64 *)__P);
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_si128 (__m128i *__P, __m128i __B)
-{
-  vec_st ((__v16qu) __B, 0, (__v16qu*)__P);
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeu_si128 (__m128i_u *__P, __m128i __B)
-{
-  *__P = __B;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storel_epi64 (__m128i_u *__P, __m128i __B)
-{
-  *(long long *)__P = ((__v2di)__B)[0];
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movepi64_pi64 (__m128i_u __B)
-{
-  return (__m64) ((__v2di)__B)[0];
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movpi64_epi64 (__m64 __A)
-{
-  return _mm_set_epi64 ((__m64)0LL, __A);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_move_epi64 (__m128i __A)
-{
-  return _mm_set_epi64 ((__m64)0LL, (__m64)__A[0]);
-}
-
-/* Create an undefined vector.  */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_undefined_si128 (void)
-{
-  __m128i __Y = __Y;
-  return __Y;
-}
-
-/* Create a vector of zeros.  */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setzero_si128 (void)
-{
-  return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 };
-}
-
-#ifdef _ARCH_PWR8
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtepi32_pd (__m128i __A)
-{
-  __v2di val;
-  /* For LE need to generate Vector Unpack Low Signed Word.
-     Which is generated from unpackh.  */
-  val = (__v2di)vec_unpackh ((__v4si)__A);
-
-  return (__m128d)vec_ctf (val, 0);
-}
-#endif
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtepi32_ps (__m128i __A)
-{
-  return ((__m128)vec_ctf((__v4si)__A, 0));
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpd_epi32 (__m128d __A)
-{
-  __v2df rounded = vec_rint (__A);
-  __v4si result, temp;
-  const __v4si vzero =
-    { 0, 0, 0, 0 };
-
-  /* VSX Vector truncate Double-Precision to integer and Convert to
-   Signed Integer Word format with Saturate.  */
-  __asm__(
-      "xvcvdpsxws %x0,%x1"
-      : "=wa" (temp)
-      : "wa" (rounded)
-      : );
-
-#ifdef _ARCH_PWR8
-  temp = vec_mergeo (temp, temp);
-  result = (__v4si) vec_vpkudum ((__vector long long) temp,
-				 (__vector long long) vzero);
-#else
-  {
-    const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
-	0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
-    result = (__v4si) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
-  }
-#endif
-  return (__m128i) result;
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpd_pi32 (__m128d __A)
-{
-  __m128i result = _mm_cvtpd_epi32(__A);
-
-  return (__m64) result[0];
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpd_ps (__m128d __A)
-{
-  __v4sf result;
-  __v4si temp;
-  const __v4si vzero = { 0, 0, 0, 0 };
-
-  __asm__(
-      "xvcvdpsp %x0,%x1"
-      : "=wa" (temp)
-      : "wa" (__A)
-      : );
-
-#ifdef _ARCH_PWR8
-  temp = vec_mergeo (temp, temp);
-  result = (__v4sf) vec_vpkudum ((__vector long long) temp,
-				 (__vector long long) vzero);
-#else
-  {
-    const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
-	0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
-    result = (__v4sf) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
-  }
-#endif
-  return ((__m128)result);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttpd_epi32 (__m128d __A)
-{
-  __v4si result;
-  __v4si temp;
-  const __v4si vzero = { 0, 0, 0, 0 };
-
-  /* VSX Vector truncate Double-Precision to integer and Convert to
-   Signed Integer Word format with Saturate.  */
-  __asm__(
-      "xvcvdpsxws %x0,%x1"
-      : "=wa" (temp)
-      : "wa" (__A)
-      : );
-
-#ifdef _ARCH_PWR8
-  temp = vec_mergeo (temp, temp);
-  result = (__v4si) vec_vpkudum ((__vector long long) temp,
-				 (__vector long long) vzero);
-#else
-  {
-    const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
-	0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
-    result = (__v4si) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
-  }
-#endif
-
-  return ((__m128i) result);
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttpd_pi32 (__m128d __A)
-{
-  __m128i result = _mm_cvttpd_epi32 (__A);
-
-  return (__m64) result[0];
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi128_si32 (__m128i __A)
-{
-  return ((__v4si)__A)[0];
-}
-
-#ifdef _ARCH_PWR8
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi32_pd (__m64 __A)
-{
-  __v4si temp;
-  __v2di tmp2;
-  __v2df result;
-
-  temp = (__v4si)vec_splats (__A);
-  tmp2 = (__v2di)vec_unpackl (temp);
-  result = vec_ctf ((__vector signed long long) tmp2, 0);
-  return (__m128d)result;
-}
-#endif
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_epi32 (__m128 __A)
-{
-  __v4sf rounded;
-  __v4si result;
-
-  rounded = vec_rint((__v4sf) __A);
-  result = vec_cts (rounded, 0);
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttps_epi32 (__m128 __A)
-{
-  __v4si result;
-
-  result = vec_cts ((__v4sf) __A, 0);
-  return (__m128i) result;
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pd (__m128 __A)
-{
-  /* Check if vec_doubleh is defined by <altivec.h>. If so use that. */
-#ifdef vec_doubleh
-  return (__m128d) vec_doubleh ((__v4sf)__A);
-#else
-  /* Otherwise the compiler is not current and so need to generate the
-     equivalent code.  */
-  __v4sf a = (__v4sf)__A;
-  __v4sf temp;
-  __v2df result;
-#ifdef __LITTLE_ENDIAN__
-  /* The input float values are in elements {[0], [1]} but the convert
-     instruction needs them in elements {[1], [3]}, So we use two
-     shift left double vector word immediates to get the elements
-     lined up.  */
-  temp = __builtin_vsx_xxsldwi (a, a, 3);
-  temp = __builtin_vsx_xxsldwi (a, temp, 2);
-#else
-  /* The input float values are in elements {[0], [1]} but the convert
-     instruction needs them in elements {[0], [2]}, So we use two
-     shift left double vector word immediates to get the elements
-     lined up.  */
-  temp = vec_vmrghw (a, a);
-#endif
-  __asm__(
-      " xvcvspdp %x0,%x1"
-      : "=wa" (result)
-      : "wa" (temp)
-      : );
-  return (__m128d) result;
-#endif
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_si32 (__m128d __A)
-{
-  __v2df rounded = vec_rint((__v2df) __A);
-  int result = ((__v2df)rounded)[0];
-
-  return result;
-}
-/* Intel intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_si64 (__m128d __A)
-{
-  __v2df rounded = vec_rint ((__v2df) __A );
-  long long result = ((__v2df) rounded)[0];
-
-  return result;
-}
-
-/* Microsoft intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_si64x (__m128d __A)
-{
-  return _mm_cvtsd_si64 ((__v2df)__A);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_si32 (__m128d __A)
-{
-  int result = ((__v2df)__A)[0];
-
-  return result;
-}
-
-/* Intel intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_si64 (__m128d __A)
-{
-  long long result = ((__v2df)__A)[0];
-
-  return result;
-}
-
-/* Microsoft intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_si64x (__m128d __A)
-{
-  return _mm_cvttsd_si64 (__A);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_ss (__m128 __A, __m128d __B)
-{
-  __v4sf result = (__v4sf)__A;
-
-#ifdef __LITTLE_ENDIAN__
-  __v4sf temp_s;
-  /* Copy double element[0] to element [1] for conversion.  */
-  __v2df temp_b = vec_splat((__v2df)__B, 0);
-
-  /* Pre-rotate __A left 3 (logically right 1) elements.  */
-  result = __builtin_vsx_xxsldwi (result, result, 3);
-  /* Convert double to single float scalar in a vector.  */
-  __asm__(
-      "xscvdpsp %x0,%x1"
-      : "=wa" (temp_s)
-      : "wa" (temp_b)
-      : );
-  /* Shift the resulting scalar into vector element [0].  */
-  result = __builtin_vsx_xxsldwi (result, temp_s, 1);
-#else
-  result [0] = ((__v2df)__B)[0];
-#endif
-  return (__m128) result;
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi32_sd (__m128d __A, int __B)
-{
-  __v2df result = (__v2df)__A;
-  double db = __B;
-  result [0] = db;
-  return (__m128d)result;
-}
-
-/* Intel intrinsic.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64_sd (__m128d __A, long long __B)
-{
-  __v2df result = (__v2df)__A;
-  double db = __B;
-  result [0] = db;
-  return (__m128d)result;
-}
-
-/* Microsoft intrinsic.  */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64x_sd (__m128d __A, long long __B)
-{
-  return _mm_cvtsi64_sd (__A, __B);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_sd (__m128d __A, __m128 __B)
-{
-#ifdef __LITTLE_ENDIAN__
-  /* Use splat to move element [0] into position for the convert. */
-  __v4sf temp = vec_splat ((__v4sf)__B, 0);
-  __v2df res;
-  /* Convert single float scalar to double in a vector.  */
-  __asm__(
-      "xscvspdp %x0,%x1"
-      : "=wa" (res)
-      : "wa" (temp)
-      : );
-  return (__m128d) vec_mergel (res, (__v2df)__A);
-#else
-  __v2df res = (__v2df)__A;
-  res [0] = ((__v4sf)__B) [0];
-  return (__m128d) res;
-#endif
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask)
-{
-  __vector double result;
-  const int litmsk = __mask & 0x3;
-
-  if (litmsk == 0)
-    result = vec_mergeh (__A, __B);
-#if __GNUC__ < 6
-  else if (litmsk == 1)
-    result = vec_xxpermdi (__B, __A, 2);
-  else if (litmsk == 2)
-    result = vec_xxpermdi (__B, __A, 1);
-#else
-  else if (litmsk == 1)
-    result = vec_xxpermdi (__A, __B, 2);
-  else if (litmsk == 2)
-    result = vec_xxpermdi (__A, __B, 1);
-#endif
-  else
-    result = vec_mergel (__A, __B);
-
-  return result;
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_pd (__m128d __A, __m128d __B)
-{
-  return (__m128d) vec_mergel ((__v2df)__A, (__v2df)__B);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_pd (__m128d __A, __m128d __B)
-{
-  return (__m128d) vec_mergeh ((__v2df)__A, (__v2df)__B);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadh_pd (__m128d __A, double const *__B)
-{
-  __v2df result = (__v2df)__A;
-  result [1] = *__B;
-  return (__m128d)result;
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadl_pd (__m128d __A, double const *__B)
-{
-  __v2df result = (__v2df)__A;
-  result [0] = *__B;
-  return (__m128d)result;
-}
-
-#ifdef _ARCH_PWR8
-/* Intrinsic functions that require PowerISA 2.07 minimum.  */
-
-/* Creates a 2-bit mask from the most significant bits of the DPFP values.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_pd (__m128d  __A)
-{
-  __vector unsigned long long result;
-  static const __vector unsigned int perm_mask =
-    {
-#ifdef __LITTLE_ENDIAN__
-	0x80800040, 0x80808080, 0x80808080, 0x80808080
-#else
-      0x80808080, 0x80808080, 0x80808080, 0x80804000
-#endif
-    };
-
-  result = ((__vector unsigned long long)
-	    vec_vbpermq ((__vector unsigned char) __A,
-			 (__vector unsigned char) perm_mask));
-
-#ifdef __LITTLE_ENDIAN__
-  return result[1];
-#else
-  return result[0];
-#endif
-}
-#endif /* _ARCH_PWR8 */
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_packs_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_packs ((__v8hi) __A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_packs_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_packs ((__v4si)__A, (__v4si)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_packus_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_packsu ((__v8hi) __A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergel ((__v16qu)__A, (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergel ((__v8hu)__A, (__v8hu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergel ((__v4su)__A, (__v4su)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi64 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergel ((__vector long long) __A,
-			       (__vector long long) __B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergeh ((__v16qu)__A, (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergeh ((__v8hi)__A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergeh ((__v4si)__A, (__v4si)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi64 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_mergeh ((__vector long long) __A,
-			       (__vector long long) __B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v16qu)__A + (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v8hu)__A + (__v8hu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v4su)__A + (__v4su)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi64 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v2du)__A + (__v2du)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_adds ((__v16qi)__A, (__v16qi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_adds ((__v8hi)__A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epu8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_adds ((__v16qu)__A, (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epu16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_adds ((__v8hu)__A, (__v8hu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v16qu)__A - (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v8hu)__A - (__v8hu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v4su)__A - (__v4su)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi64 (__m128i __A, __m128i __B)
-{
-  return (__m128i) ((__v2du)__A - (__v2du)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_subs ((__v16qi)__A, (__v16qi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_subs ((__v8hi)__A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epu8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_subs ((__v16qu)__A, (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epu16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_subs ((__v8hu)__A, (__v8hu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_madd_epi16 (__m128i __A, __m128i __B)
-{
-  __vector signed int zero = {0, 0, 0, 0};
-
-  return (__m128i) vec_vmsumshm ((__v8hi)__A, (__v8hi)__B, zero);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhi_epi16 (__m128i __A, __m128i __B)
-{
-  __vector signed int w0, w1;
-
-  __vector unsigned char xform1 = {
-#ifdef __LITTLE_ENDIAN__
-      0x02, 0x03, 0x12, 0x13,  0x06, 0x07, 0x16, 0x17,
-      0x0A, 0x0B, 0x1A, 0x1B,  0x0E, 0x0F, 0x1E, 0x1F
-#else
-      0x00, 0x01, 0x10, 0x11,  0x04, 0x05, 0x14, 0x15,
-      0x08, 0x09, 0x18, 0x19,  0x0C, 0x0D, 0x1C, 0x1D
-#endif
-    };
-
-  w0 = vec_vmulesh ((__v8hi)__A, (__v8hi)__B);
-  w1 = vec_vmulosh ((__v8hi)__A, (__v8hi)__B);
-  return (__m128i) vec_perm (w0, w1, xform1);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mullo_epi16 (__m128i __A, __m128i __B)
-{
-    return (__m128i) ((__v8hi)__A * (__v8hi)__B);
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_su32 (__m64 __A, __m64 __B)
-{
-  unsigned int a = __A;
-  unsigned int b = __B;
-
-  return ((__m64)a * (__m64)b);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_epu32 (__m128i __A, __m128i __B)
-{
-#if __GNUC__ < 8
-  __v2du result;
-
-#ifdef __LITTLE_ENDIAN__
-  /* VMX Vector Multiply Odd Unsigned Word.  */
-  __asm__(
-      "vmulouw %0,%1,%2"
-      : "=v" (result)
-      : "v" (__A), "v" (__B)
-      : );
-#else
-  /* VMX Vector Multiply Even Unsigned Word.  */
-  __asm__(
-      "vmuleuw %0,%1,%2"
-      : "=v" (result)
-      : "v" (__A), "v" (__B)
-      : );
-#endif
-  return (__m128i) result;
-#else
-  return (__m128i) vec_mule ((__v4su)__A, (__v4su)__B);
-#endif
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_epi16 (__m128i __A, int __B)
-{
-  __v8hu lshift;
-  __v8hi result = { 0, 0, 0, 0, 0, 0, 0, 0 };
-
-  if (__B >= 0 && __B < 16)
-    {
-      if (__builtin_constant_p(__B))
-	lshift = (__v8hu) vec_splat_s16(__B);
-      else
-	lshift = vec_splats ((unsigned short) __B);
-
-      result = vec_sl ((__v8hi) __A, lshift);
-    }
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_epi32 (__m128i __A, int __B)
-{
-  __v4su lshift;
-  __v4si result = { 0, 0, 0, 0 };
-
-  if (__B >= 0 && __B < 32)
-    {
-      if (__builtin_constant_p(__B) && __B < 16)
-	lshift = (__v4su) vec_splat_s32(__B);
-      else
-	lshift = vec_splats ((unsigned int) __B);
-
-      result = vec_sl ((__v4si) __A, lshift);
-    }
-
-  return (__m128i) result;
-}
-
-#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_epi64 (__m128i __A, int __B)
-{
-  __v2du lshift;
-  __v2di result = { 0, 0 };
-
-  if (__B >= 0 && __B < 64)
-    {
-      if (__builtin_constant_p(__B) && __B < 16)
-	lshift = (__v2du) vec_splat_s32(__B);
-      else
-	lshift = (__v2du) vec_splats ((unsigned int) __B);
-
-      result = vec_sl ((__v2di) __A, lshift);
-    }
-
-  return (__m128i) result;
-}
-#endif
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srai_epi16 (__m128i __A, int __B)
-{
-  __v8hu rshift = { 15, 15, 15, 15, 15, 15, 15, 15 };
-  __v8hi result;
-
-  if (__B < 16)
-    {
-      if (__builtin_constant_p(__B))
-	rshift = (__v8hu) vec_splat_s16(__B);
-      else
-	rshift = vec_splats ((unsigned short) __B);
-    }
-  result = vec_sra ((__v8hi) __A, rshift);
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srai_epi32 (__m128i __A, int __B)
-{
-  __v4su rshift = { 31, 31, 31, 31 };
-  __v4si result;
-
-  if (__B < 32)
-    {
-      if (__builtin_constant_p(__B))
-	{
-	  if (__B < 16)
-	      rshift = (__v4su) vec_splat_s32(__B);
-	    else
-	      rshift = (__v4su) vec_splats((unsigned int)__B);
-	}
-      else
-	rshift = vec_splats ((unsigned int) __B);
-    }
-  result = vec_sra ((__v4si) __A, rshift);
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_bslli_si128 (__m128i __A, const int __N)
-{
-  __v16qu result;
-  const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-
-  if (__N < 16)
-    result = vec_sld ((__v16qu) __A, zeros, __N);
-  else
-    result = zeros;
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_bsrli_si128 (__m128i __A, const int __N)
-{
-  __v16qu result;
-  const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-
-  if (__N < 16)
-#ifdef __LITTLE_ENDIAN__
-    if (__builtin_constant_p(__N))
-      /* Would like to use Vector Shift Left Double by Octet
-	 Immediate here to use the immediate form and avoid
-	 load of __N * 8 value into a separate VR.  */
-      result = vec_sld (zeros, (__v16qu) __A, (16 - __N));
-    else
-#endif
-      {
-	__v16qu shift = vec_splats((unsigned char)(__N*8));
-#ifdef __LITTLE_ENDIAN__
-	result = vec_sro ((__v16qu)__A, shift);
-#else
-	result = vec_slo ((__v16qu)__A, shift);
-#endif
-      }
-  else
-    result = zeros;
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srli_si128 (__m128i __A, const int __N)
-{
-  return _mm_bsrli_si128 (__A, __N);
-}
-
-extern __inline  __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_si128 (__m128i __A, const int _imm5)
-{
-  __v16qu result;
-  const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-
-  if (_imm5 < 16)
-#ifdef __LITTLE_ENDIAN__
-    result = vec_sld ((__v16qu) __A, zeros, _imm5);
-#else
-    result = vec_sld (zeros, (__v16qu) __A, (16 - _imm5));
-#endif
-  else
-    result = zeros;
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-
-_mm_srli_epi16 (__m128i  __A, int __B)
-{
-  __v8hu rshift;
-  __v8hi result = { 0, 0, 0, 0, 0, 0, 0, 0 };
-
-  if (__B < 16)
-    {
-      if (__builtin_constant_p(__B))
-	rshift = (__v8hu) vec_splat_s16(__B);
-      else
-	rshift = vec_splats ((unsigned short) __B);
-
-      result = vec_sr ((__v8hi) __A, rshift);
-    }
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srli_epi32 (__m128i __A, int __B)
-{
-  __v4su rshift;
-  __v4si result = { 0, 0, 0, 0 };
-
-  if (__B < 32)
-    {
-      if (__builtin_constant_p(__B))
-	{
-	  if (__B < 16)
-	      rshift = (__v4su) vec_splat_s32(__B);
-	    else
-	      rshift = (__v4su) vec_splats((unsigned int)__B);
-	}
-      else
-	rshift = vec_splats ((unsigned int) __B);
-
-      result = vec_sr ((__v4si) __A, rshift);
-    }
-
-  return (__m128i) result;
-}
-
-#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srli_epi64 (__m128i __A, int __B)
-{
-  __v2du rshift;
-  __v2di result = { 0, 0 };
-
-  if (__B < 64)
-    {
-      if (__builtin_constant_p(__B))
-	{
-	  if (__B < 16)
-	      rshift = (__v2du) vec_splat_s32(__B);
-	    else
-	      rshift = (__v2du) vec_splats((unsigned long long)__B);
-	}
-      else
-	rshift = (__v2du) vec_splats ((unsigned int) __B);
-
-      result = vec_sr ((__v2di) __A, rshift);
-    }
-
-  return (__m128i) result;
-}
-#endif
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sll_epi16 (__m128i __A, __m128i __B)
-{
-  __v8hu lshift;
-  __vector __bool short shmask;
-  const __v8hu shmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
-  __v8hu result;
-
-#ifdef __LITTLE_ENDIAN__
-  lshift = vec_splat ((__v8hu) __B, 0);
-#else
-  lshift = vec_splat ((__v8hu) __B, 3);
-#endif
-  shmask = vec_cmple (lshift, shmax);
-  result = vec_sl ((__v8hu) __A, lshift);
-  result = vec_sel ((__v8hu) shmask, result, shmask);
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sll_epi32 (__m128i __A, __m128i __B)
-{
-  __v4su lshift;
-  __vector __bool int shmask;
-  const __v4su shmax = { 32, 32, 32, 32 };
-  __v4su result;
-#ifdef __LITTLE_ENDIAN__
-  lshift = vec_splat ((__v4su) __B, 0);
-#else
-  lshift = vec_splat ((__v4su) __B, 1);
-#endif
-  shmask = vec_cmplt (lshift, shmax);
-  result = vec_sl ((__v4su) __A, lshift);
-  result = vec_sel ((__v4su) shmask, result, shmask);
-
-  return (__m128i) result;
-}
-
-#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sll_epi64 (__m128i __A, __m128i __B)
-{
-  __v2du lshift;
-  __vector __bool long long shmask;
-  const __v2du shmax = { 64, 64 };
-  __v2du result;
-
-  lshift = vec_splat ((__v2du) __B, 0);
-  shmask = vec_cmplt (lshift, shmax);
-  result = vec_sl ((__v2du) __A, lshift);
-  result = (__v2du)vec_sel ((__v2df) shmask, (__v2df)result, shmask);
-
-  return (__m128i) result;
-}
-#endif
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sra_epi16 (__m128i __A, __m128i __B)
-{
-  const __v8hu rshmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
-  __v8hu rshift;
-  __v8hi result;
-
-#ifdef __LITTLE_ENDIAN__
-  rshift = vec_splat ((__v8hu)__B, 0);
-#else
-  rshift = vec_splat ((__v8hu)__B, 3);
-#endif
-  rshift = vec_min (rshift, rshmax);
-  result = vec_sra ((__v8hi) __A, rshift);
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sra_epi32 (__m128i __A, __m128i __B)
-{
-  const __v4su rshmax = { 31, 31, 31, 31 };
-  __v4su rshift;
-  __v4si result;
-
-#ifdef __LITTLE_ENDIAN__
-  rshift = vec_splat ((__v4su)__B, 0);
-#else
-  rshift = vec_splat ((__v4su)__B, 1);
-#endif
-  rshift = vec_min (rshift, rshmax);
-  result = vec_sra ((__v4si) __A, rshift);
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srl_epi16 (__m128i __A, __m128i __B)
-{
-  __v8hu rshift;
-  __vector __bool short shmask;
-  const __v8hu shmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
-  __v8hu result;
-
-#ifdef __LITTLE_ENDIAN__
-  rshift = vec_splat ((__v8hu) __B, 0);
-#else
-  rshift = vec_splat ((__v8hu) __B, 3);
-#endif
-  shmask = vec_cmple (rshift, shmax);
-  result = vec_sr ((__v8hu) __A, rshift);
-  result = vec_sel ((__v8hu) shmask, result, shmask);
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srl_epi32 (__m128i __A, __m128i __B)
-{
-  __v4su rshift;
-  __vector __bool int shmask;
-  const __v4su shmax = { 32, 32, 32, 32 };
-  __v4su result;
-
-#ifdef __LITTLE_ENDIAN__
-  rshift = vec_splat ((__v4su) __B, 0);
-#else
-  rshift = vec_splat ((__v4su) __B, 1);
-#endif
-  shmask = vec_cmplt (rshift, shmax);
-  result = vec_sr ((__v4su) __A, rshift);
-  result = vec_sel ((__v4su) shmask, result, shmask);
-
-  return (__m128i) result;
-}
-
-#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srl_epi64 (__m128i __A, __m128i __B)
-{
-  __v2du rshift;
-  __vector __bool long long shmask;
-  const __v2du shmax = { 64, 64 };
-  __v2du result;
-
-  rshift = vec_splat ((__v2du) __B, 0);
-  shmask = vec_cmplt (rshift, shmax);
-  result = vec_sr ((__v2du) __A, rshift);
-  result = (__v2du)vec_sel ((__v2df) shmask, (__v2df)result, shmask);
-
-  return (__m128i) result;
-}
-#endif
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_and_pd (__m128d __A, __m128d __B)
-{
-  return (vec_and ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_andnot_pd (__m128d __A, __m128d __B)
-{
-  return (vec_andc ((__v2df) __B, (__v2df) __A));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_or_pd (__m128d __A, __m128d __B)
-{
-  return (vec_or ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_xor_pd (__m128d __A, __m128d __B)
-{
-  return (vec_xor ((__v2df) __A, (__v2df) __B));
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_and_si128 (__m128i __A, __m128i __B)
-{
-  return (__m128i)vec_and ((__v2di) __A, (__v2di) __B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_andnot_si128 (__m128i __A, __m128i __B)
-{
-  return (__m128i)vec_andc ((__v2di) __B, (__v2di) __A);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_or_si128 (__m128i __A, __m128i __B)
-{
-  return (__m128i)vec_or ((__v2di) __A, (__v2di) __B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_xor_si128 (__m128i __A, __m128i __B)
-{
-  return (__m128i)vec_xor ((__v2di) __A, (__v2di) __B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmpeq ((__v16qi) __A, (__v16qi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmpeq ((__v8hi) __A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmpeq ((__v4si) __A, (__v4si)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmplt ((__v16qi) __A, (__v16qi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmplt ((__v8hi) __A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmplt ((__v4si) __A, (__v4si)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_epi8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmpgt ((__v16qi) __A, (__v16qi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmpgt ((__v8hi) __A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_cmpgt ((__v4si) __A, (__v4si)__B);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_extract_epi16 (__m128i const __A, int const __N)
-{
-  return (unsigned short) ((__v8hi)__A)[__N & 7];
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_insert_epi16 (__m128i const __A, int const __D, int const __N)
-{
-  __v8hi result = (__v8hi)__A;
-
-  result [(__N & 7)] = __D;
-
-  return (__m128i) result;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_max ((__v8hi)__A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_epu8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_max ((__v16qu) __A, (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_epi16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_min ((__v8hi) __A, (__v8hi)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_epu8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_min ((__v16qu) __A, (__v16qu)__B);
-}
-
-
-#ifdef _ARCH_PWR8
-/* Intrinsic functions that require PowerISA 2.07 minimum.  */
-
-/* Creates a 4-bit mask from the most significant bits of the SPFP values.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_epi8 (__m128i __A)
-{
-  __vector unsigned long long result;
-  static const __vector unsigned char perm_mask =
-    {
-	0x78, 0x70, 0x68, 0x60, 0x58, 0x50, 0x48, 0x40,
-	0x38, 0x30, 0x28, 0x20, 0x18, 0x10, 0x08, 0x00
-    };
-
-  result = ((__vector unsigned long long)
-	    vec_vbpermq ((__vector unsigned char) __A,
-			 (__vector unsigned char) perm_mask));
-
-#ifdef __LITTLE_ENDIAN__
-  return result[1];
-#else
-  return result[0];
-#endif
-}
-#endif /* _ARCH_PWR8 */
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhi_epu16 (__m128i __A, __m128i __B)
-{
-  __v4su w0, w1;
-  __v16qu xform1 = {
-#ifdef __LITTLE_ENDIAN__
-      0x02, 0x03, 0x12, 0x13,  0x06, 0x07, 0x16, 0x17,
-      0x0A, 0x0B, 0x1A, 0x1B,  0x0E, 0x0F, 0x1E, 0x1F
-#else
-      0x00, 0x01, 0x10, 0x11,  0x04, 0x05, 0x14, 0x15,
-      0x08, 0x09, 0x18, 0x19,  0x0C, 0x0D, 0x1C, 0x1D
-#endif
-    };
-
-  w0 = vec_vmuleuh ((__v8hu)__A, (__v8hu)__B);
-  w1 = vec_vmulouh ((__v8hu)__A, (__v8hu)__B);
-  return (__m128i) vec_perm (w0, w1, xform1);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shufflehi_epi16 (__m128i __A, const int __mask)
-{
-  unsigned long element_selector_98 = __mask & 0x03;
-  unsigned long element_selector_BA = (__mask >> 2) & 0x03;
-  unsigned long element_selector_DC = (__mask >> 4) & 0x03;
-  unsigned long element_selector_FE = (__mask >> 6) & 0x03;
-  static const unsigned short permute_selectors[4] =
-    {
-#ifdef __LITTLE_ENDIAN__
-	      0x0908, 0x0B0A, 0x0D0C, 0x0F0E
-#else
-	      0x0809, 0x0A0B, 0x0C0D, 0x0E0F
-#endif
-    };
-  __v2du pmask =
-#ifdef __LITTLE_ENDIAN__
-      { 0x1716151413121110UL,  0UL};
-#else
-      { 0x1011121314151617UL,  0UL};
-#endif
-  __m64_union t;
-  __v2du a, r;
-
-  t.as_short[0] = permute_selectors[element_selector_98];
-  t.as_short[1] = permute_selectors[element_selector_BA];
-  t.as_short[2] = permute_selectors[element_selector_DC];
-  t.as_short[3] = permute_selectors[element_selector_FE];
-  pmask[1] = t.as_m64;
-  a = (__v2du)__A;
-  r = vec_perm (a, a, (__vector unsigned char)pmask);
-  return (__m128i) r;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shufflelo_epi16 (__m128i __A, const int __mask)
-{
-  unsigned long element_selector_10 = __mask & 0x03;
-  unsigned long element_selector_32 = (__mask >> 2) & 0x03;
-  unsigned long element_selector_54 = (__mask >> 4) & 0x03;
-  unsigned long element_selector_76 = (__mask >> 6) & 0x03;
-  static const unsigned short permute_selectors[4] =
-    {
-#ifdef __LITTLE_ENDIAN__
-	      0x0100, 0x0302, 0x0504, 0x0706
-#else
-	      0x0001, 0x0203, 0x0405, 0x0607
-#endif
-    };
-  __v2du pmask =
-#ifdef __LITTLE_ENDIAN__
-                 { 0UL,  0x1f1e1d1c1b1a1918UL};
-#else
-                 { 0UL,  0x18191a1b1c1d1e1fUL};
-#endif
-  __m64_union t;
-  __v2du a, r;
-  t.as_short[0] = permute_selectors[element_selector_10];
-  t.as_short[1] = permute_selectors[element_selector_32];
-  t.as_short[2] = permute_selectors[element_selector_54];
-  t.as_short[3] = permute_selectors[element_selector_76];
-  pmask[0] = t.as_m64;
-  a = (__v2du)__A;
-  r = vec_perm (a, a, (__vector unsigned char)pmask);
-  return (__m128i) r;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_epi32 (__m128i __A, const int __mask)
-{
-  unsigned long element_selector_10 = __mask & 0x03;
-  unsigned long element_selector_32 = (__mask >> 2) & 0x03;
-  unsigned long element_selector_54 = (__mask >> 4) & 0x03;
-  unsigned long element_selector_76 = (__mask >> 6) & 0x03;
-  static const unsigned int permute_selectors[4] =
-    {
-#ifdef __LITTLE_ENDIAN__
-	0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
-#else
-      0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
-#endif
-    };
-  __v4su t;
-
-  t[0] = permute_selectors[element_selector_10];
-  t[1] = permute_selectors[element_selector_32];
-  t[2] = permute_selectors[element_selector_54] + 0x10101010;
-  t[3] = permute_selectors[element_selector_76] + 0x10101010;
-  return (__m128i)vec_perm ((__v4si) __A, (__v4si)__A, (__vector unsigned char)t);
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
-{
-  __v2du hibit = { 0x7f7f7f7f7f7f7f7fUL, 0x7f7f7f7f7f7f7f7fUL};
-  __v16qu mask, tmp;
-  __m128i_u *p = (__m128i_u*)__C;
-
-  tmp = (__v16qu)_mm_loadu_si128(p);
-  mask = (__v16qu)vec_cmpgt ((__v16qu)__B, (__v16qu)hibit);
-  tmp = vec_sel (tmp, (__v16qu)__A, mask);
-  _mm_storeu_si128 (p, (__m128i)tmp);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_epu8 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_avg ((__v16qu)__A, (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_epu16 (__m128i __A, __m128i __B)
-{
-  return (__m128i) vec_avg ((__v8hu)__A, (__v8hu)__B);
-}
-
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sad_epu8 (__m128i __A, __m128i __B)
-{
-  __v16qu a, b;
-  __v16qu vmin, vmax, vabsdiff;
-  __v4si vsum;
-  const __v4su zero = { 0, 0, 0, 0 };
-  __v4si result;
-
-  a = (__v16qu) __A;
-  b = (__v16qu) __B;
-  vmin = vec_min (a, b);
-  vmax = vec_max (a, b);
-  vabsdiff = vec_sub (vmax, vmin);
-  /* Sum four groups of bytes into integers.  */
-  vsum = (__vector signed int) vec_sum4s (vabsdiff, zero);
-  /* Sum across four integers with two integer results.  */
-  result = vec_sum2s (vsum, (__vector signed int) zero);
-  /* Rotate the sums into the correct position.  */
-#ifdef __LITTLE_ENDIAN__
-  result = vec_sld (result, result, 4);
-#else
-  result = vec_sld (result, result, 6);
-#endif
-  /* Rotate the sums into the correct position.  */
-  return (__m128i) result;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_si32 (int *__A, int __B)
-{
-  /* Use the data cache block touch for store transient.  */
-  __asm__ (
-    "dcbtstt 0,%0"
-    :
-    : "b" (__A)
-    : "memory"
-  );
-  *__A = __B;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_si64 (long long int *__A, long long int __B)
-{
-  /* Use the data cache block touch for store transient.  */
-  __asm__ (
-    "	dcbtstt	0,%0"
-    :
-    : "b" (__A)
-    : "memory"
-  );
-  *__A = __B;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_si128 (__m128i *__A, __m128i __B)
-{
-  /* Use the data cache block touch for store transient.  */
-  __asm__ (
-    "dcbtstt 0,%0"
-    :
-    : "b" (__A)
-    : "memory"
-  );
-  *__A = __B;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_pd (double *__A, __m128d __B)
-{
-  /* Use the data cache block touch for store transient.  */
-  __asm__ (
-    "dcbtstt 0,%0"
-    :
-    : "b" (__A)
-    : "memory"
-  );
-  *(__m128d*)__A = __B;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_clflush (void const *__A)
-{
-  /* Use the data cache block flush.  */
-  __asm__ (
-    "dcbf 0,%0"
-    :
-    : "b" (__A)
-    : "memory"
-  );
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_lfence (void)
-{
-  /* Use light weight sync for load to load ordering.  */
-  __atomic_thread_fence (__ATOMIC_RELEASE);
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mfence (void)
-{
-  /* Use heavy weight sync for any to any ordering.  */
-  __atomic_thread_fence (__ATOMIC_SEQ_CST);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi32_si128 (int __A)
-{
-  return _mm_set_epi32 (0, 0, 0, __A);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64_si128 (long long __A)
-{
-  return __extension__ (__m128i)(__v2di){ __A, 0LL };
-}
-
-/* Microsoft intrinsic.  */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64x_si128 (long long __A)
-{
-  return __extension__ (__m128i)(__v2di){ __A, 0LL };
-}
-
-/* Casts between various SP, DP, INT vector types.  Note that these do no
-   conversion of values, they just change the type.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castpd_ps(__m128d __A)
-{
-  return (__m128) __A;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castpd_si128(__m128d __A)
-{
-  return (__m128i) __A;
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castps_pd(__m128 __A)
-{
-  return (__m128d) __A;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castps_si128(__m128 __A)
-{
-  return (__m128i) __A;
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castsi128_ps(__m128i __A)
-{
-  return (__m128) __A;
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castsi128_pd(__m128i __A)
-{
-  return (__m128d) __A;
-}
-
-#else
-#include_next <emmintrin.h>
-#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))   \
-        */
-
-#endif /* EMMINTRIN_H_ */
diff --git a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/mm_malloc.h b/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/mm_malloc.h
deleted file mode 100644
index 86cf1a0..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/mm_malloc.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*===---- mm_malloc.h - Implementation of _mm_malloc and _mm_free ----------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef _MM_MALLOC_H_INCLUDED
-#define _MM_MALLOC_H_INCLUDED
-
-#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
-
-#include <stdlib.h>
-
-/* We can't depend on <stdlib.h> since the prototype of posix_memalign
-   may not be visible.  */
-#ifndef __cplusplus
-extern int posix_memalign (void **, size_t, size_t);
-#else
-extern "C" int posix_memalign (void **, size_t, size_t) throw ();
-#endif
-
-static __inline void *
-_mm_malloc (size_t size, size_t alignment)
-{
-  /* PowerPC64 ELF V2 ABI requires quadword alignment.  */
-  size_t vec_align = sizeof (__vector float);
-  void *ptr;
-
-  if (alignment < vec_align)
-    alignment = vec_align;
-  if (posix_memalign (&ptr, alignment, size) == 0)
-    return ptr;
-  else
-    return NULL;
-}
-
-static __inline void
-_mm_free (void * ptr)
-{
-  free (ptr);
-}
-
-#else
-#include_next <mm_malloc.h>
-#endif
-
-#endif /* _MM_MALLOC_H_INCLUDED */
diff --git a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/mmintrin.h b/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/mmintrin.h
deleted file mode 100644
index 54e4ee9..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/mmintrin.h
+++ /dev/null
@@ -1,1451 +0,0 @@
-/*===---- mmintrin.h - Implementation of MMX intrinsics on PowerPC ---------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* Implemented from the specification included in the Intel C++ Compiler
-   User Guide and Reference, version 9.0.  */
-
-#ifndef NO_WARN_X86_INTRINSICS
-/* This header file is to help porting code using Intel intrinsics
-   explicitly from x86_64 to powerpc64/powerpc64le.
-
-   Since PowerPC target doesn't support native 64-bit vector type, we
-   typedef __m64 to 64-bit unsigned long long in MMX intrinsics, which
-   works well for _si64 and some _pi32 operations.
-
-   For _pi16 and _pi8 operations, it's better to transfer __m64 into
-   128-bit PowerPC vector first. Power8 introduced direct register
-   move instructions which helps for more efficient implementation.
-
-   It's user's responsibility to determine if the results of such port
-   are acceptable or further changes are needed. Please note that much
-   code using Intel intrinsics CAN BE REWRITTEN in more portable and
-   efficient standard C or GNU C extensions with 64-bit scalar
-   operations, or 128-bit SSE/Altivec operations, which are more
-   recommended. */
-#error                                                                         \
-    "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this error."
-#endif
-
-#ifndef _MMINTRIN_H_INCLUDED
-#define _MMINTRIN_H_INCLUDED
-
-#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
-
-#include <altivec.h>
-/* The Intel API is flexible enough that we must allow aliasing with other
-   vector types, and their scalar components.  */
-typedef __attribute__((__aligned__(8))) unsigned long long __m64;
-
-typedef __attribute__((__aligned__(8))) union {
-  __m64 as_m64;
-  char as_char[8];
-  signed char as_signed_char[8];
-  short as_short[4];
-  int as_int[2];
-  long long as_long_long;
-  float as_float[2];
-  double as_double;
-} __m64_union;
-
-/* Empty the multimedia state.  */
-extern __inline void
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_empty(void) {
-  /* nothing to do on PowerPC.  */
-}
-
-extern __inline void
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_empty(void) {
-  /* nothing to do on PowerPC.  */
-}
-
-/* Convert I to a __m64 object.  The integer is zero-extended to 64-bits.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_cvtsi32_si64(int __i) {
-  return (__m64)(unsigned int)__i;
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_from_int(int __i) {
-  return _mm_cvtsi32_si64(__i);
-}
-
-/* Convert the lower 32 bits of the __m64 object into an integer.  */
-extern __inline int
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_cvtsi64_si32(__m64 __i) {
-  return ((int)__i);
-}
-
-extern __inline int
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_to_int(__m64 __i) {
-  return _mm_cvtsi64_si32(__i);
-}
-
-/* Convert I to a __m64 object.  */
-
-/* Intel intrinsic.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_from_int64(long long __i) {
-  return (__m64)__i;
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_cvtsi64_m64(long long __i) {
-  return (__m64)__i;
-}
-
-/* Microsoft intrinsic.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_cvtsi64x_si64(long long __i) {
-  return (__m64)__i;
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_set_pi64x(long long __i) {
-  return (__m64)__i;
-}
-
-/* Convert the __m64 object to a 64bit integer.  */
-
-/* Intel intrinsic.  */
-extern __inline long long
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_to_int64(__m64 __i) {
-  return (long long)__i;
-}
-
-extern __inline long long
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_cvtm64_si64(__m64 __i) {
-  return (long long)__i;
-}
-
-/* Microsoft intrinsic.  */
-extern __inline long long
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_cvtsi64_si64x(__m64 __i) {
-  return (long long)__i;
-}
-
-#ifdef _ARCH_PWR8
-/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
-   the result, and the four 16-bit values from M2 into the upper four 8-bit
-   values of the result, all with signed saturation.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_packs_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short vm1;
-  __vector signed char vresult;
-
-  vm1 = (__vector signed short)(__vector unsigned long long)
-#ifdef __LITTLE_ENDIAN__
-      {__m1, __m2};
-#else
-      {__m2, __m1};
-#endif
-  vresult = vec_packs(vm1, vm1);
-  return (__m64)((__vector long long)vresult)[0];
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_packsswb(__m64 __m1, __m64 __m2) {
-  return _mm_packs_pi16(__m1, __m2);
-}
-
-/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
-   the result, and the two 32-bit values from M2 into the upper two 16-bit
-   values of the result, all with signed saturation.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_packs_pi32(__m64 __m1, __m64 __m2) {
-  __vector signed int vm1;
-  __vector signed short vresult;
-
-  vm1 = (__vector signed int)(__vector unsigned long long)
-#ifdef __LITTLE_ENDIAN__
-      {__m1, __m2};
-#else
-      {__m2, __m1};
-#endif
-  vresult = vec_packs(vm1, vm1);
-  return (__m64)((__vector long long)vresult)[0];
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_packssdw(__m64 __m1, __m64 __m2) {
-  return _mm_packs_pi32(__m1, __m2);
-}
-
-/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
-   the result, and the four 16-bit values from M2 into the upper four 8-bit
-   values of the result, all with unsigned saturation.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_packs_pu16(__m64 __m1, __m64 __m2) {
-  __vector unsigned char r;
-  __vector signed short vm1 = (__vector signed short)(__vector long long)
-#ifdef __LITTLE_ENDIAN__
-      {__m1, __m2};
-#else
-      {__m2, __m1};
-#endif
-  const __vector signed short __zero = {0};
-  __vector __bool short __select = vec_cmplt(vm1, __zero);
-  r = vec_packs((__vector unsigned short)vm1, (__vector unsigned short)vm1);
-  __vector __bool char packsel = vec_pack(__select, __select);
-  r = vec_sel(r, (const __vector unsigned char)__zero, packsel);
-  return (__m64)((__vector long long)r)[0];
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_packuswb(__m64 __m1, __m64 __m2) {
-  return _mm_packs_pu16(__m1, __m2);
-}
-#endif /* end ARCH_PWR8 */
-
-/* Interleave the four 8-bit values from the high half of M1 with the four
-   8-bit values from the high half of M2.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_unpackhi_pi8(__m64 __m1, __m64 __m2) {
-#if _ARCH_PWR8
-  __vector unsigned char a, b, c;
-
-  a = (__vector unsigned char)vec_splats(__m1);
-  b = (__vector unsigned char)vec_splats(__m2);
-  c = vec_mergel(a, b);
-  return (__m64)((__vector long long)c)[1];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_char[0] = m1.as_char[4];
-  res.as_char[1] = m2.as_char[4];
-  res.as_char[2] = m1.as_char[5];
-  res.as_char[3] = m2.as_char[5];
-  res.as_char[4] = m1.as_char[6];
-  res.as_char[5] = m2.as_char[6];
-  res.as_char[6] = m1.as_char[7];
-  res.as_char[7] = m2.as_char[7];
-
-  return (__m64)res.as_m64;
-#endif
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_punpckhbw(__m64 __m1, __m64 __m2) {
-  return _mm_unpackhi_pi8(__m1, __m2);
-}
-
-/* Interleave the two 16-bit values from the high half of M1 with the two
-   16-bit values from the high half of M2.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_unpackhi_pi16(__m64 __m1, __m64 __m2) {
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_short[0] = m1.as_short[2];
-  res.as_short[1] = m2.as_short[2];
-  res.as_short[2] = m1.as_short[3];
-  res.as_short[3] = m2.as_short[3];
-
-  return (__m64)res.as_m64;
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_punpckhwd(__m64 __m1, __m64 __m2) {
-  return _mm_unpackhi_pi16(__m1, __m2);
-}
-/* Interleave the 32-bit value from the high half of M1 with the 32-bit
-   value from the high half of M2.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_unpackhi_pi32(__m64 __m1, __m64 __m2) {
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_int[0] = m1.as_int[1];
-  res.as_int[1] = m2.as_int[1];
-
-  return (__m64)res.as_m64;
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_punpckhdq(__m64 __m1, __m64 __m2) {
-  return _mm_unpackhi_pi32(__m1, __m2);
-}
-/* Interleave the four 8-bit values from the low half of M1 with the four
-   8-bit values from the low half of M2.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_unpacklo_pi8(__m64 __m1, __m64 __m2) {
-#if _ARCH_PWR8
-  __vector unsigned char a, b, c;
-
-  a = (__vector unsigned char)vec_splats(__m1);
-  b = (__vector unsigned char)vec_splats(__m2);
-  c = vec_mergel(a, b);
-  return (__m64)((__vector long long)c)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_char[0] = m1.as_char[0];
-  res.as_char[1] = m2.as_char[0];
-  res.as_char[2] = m1.as_char[1];
-  res.as_char[3] = m2.as_char[1];
-  res.as_char[4] = m1.as_char[2];
-  res.as_char[5] = m2.as_char[2];
-  res.as_char[6] = m1.as_char[3];
-  res.as_char[7] = m2.as_char[3];
-
-  return (__m64)res.as_m64;
-#endif
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_punpcklbw(__m64 __m1, __m64 __m2) {
-  return _mm_unpacklo_pi8(__m1, __m2);
-}
-/* Interleave the two 16-bit values from the low half of M1 with the two
-   16-bit values from the low half of M2.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_unpacklo_pi16(__m64 __m1, __m64 __m2) {
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_short[0] = m1.as_short[0];
-  res.as_short[1] = m2.as_short[0];
-  res.as_short[2] = m1.as_short[1];
-  res.as_short[3] = m2.as_short[1];
-
-  return (__m64)res.as_m64;
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_punpcklwd(__m64 __m1, __m64 __m2) {
-  return _mm_unpacklo_pi16(__m1, __m2);
-}
-
-/* Interleave the 32-bit value from the low half of M1 with the 32-bit
-   value from the low half of M2.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_unpacklo_pi32(__m64 __m1, __m64 __m2) {
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_int[0] = m1.as_int[0];
-  res.as_int[1] = m2.as_int[0];
-
-  return (__m64)res.as_m64;
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_punpckldq(__m64 __m1, __m64 __m2) {
-  return _mm_unpacklo_pi32(__m1, __m2);
-}
-
-/* Add the 8-bit values in M1 to the 8-bit values in M2.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_add_pi8(__m64 __m1, __m64 __m2) {
-#if _ARCH_PWR8
-  __vector signed char a, b, c;
-
-  a = (__vector signed char)vec_splats(__m1);
-  b = (__vector signed char)vec_splats(__m2);
-  c = vec_add(a, b);
-  return (__m64)((__vector long long)c)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_char[0] = m1.as_char[0] + m2.as_char[0];
-  res.as_char[1] = m1.as_char[1] + m2.as_char[1];
-  res.as_char[2] = m1.as_char[2] + m2.as_char[2];
-  res.as_char[3] = m1.as_char[3] + m2.as_char[3];
-  res.as_char[4] = m1.as_char[4] + m2.as_char[4];
-  res.as_char[5] = m1.as_char[5] + m2.as_char[5];
-  res.as_char[6] = m1.as_char[6] + m2.as_char[6];
-  res.as_char[7] = m1.as_char[7] + m2.as_char[7];
-
-  return (__m64)res.as_m64;
-#endif
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_paddb(__m64 __m1, __m64 __m2) {
-  return _mm_add_pi8(__m1, __m2);
-}
-
-/* Add the 16-bit values in M1 to the 16-bit values in M2.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_add_pi16(__m64 __m1, __m64 __m2) {
-#if _ARCH_PWR8
-  __vector signed short a, b, c;
-
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = vec_add(a, b);
-  return (__m64)((__vector long long)c)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_short[0] = m1.as_short[0] + m2.as_short[0];
-  res.as_short[1] = m1.as_short[1] + m2.as_short[1];
-  res.as_short[2] = m1.as_short[2] + m2.as_short[2];
-  res.as_short[3] = m1.as_short[3] + m2.as_short[3];
-
-  return (__m64)res.as_m64;
-#endif
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_paddw(__m64 __m1, __m64 __m2) {
-  return _mm_add_pi16(__m1, __m2);
-}
-
-/* Add the 32-bit values in M1 to the 32-bit values in M2.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_add_pi32(__m64 __m1, __m64 __m2) {
-#if _ARCH_PWR9
-  __vector signed int a, b, c;
-
-  a = (__vector signed int)vec_splats(__m1);
-  b = (__vector signed int)vec_splats(__m2);
-  c = vec_add(a, b);
-  return (__m64)((__vector long long)c)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_int[0] = m1.as_int[0] + m2.as_int[0];
-  res.as_int[1] = m1.as_int[1] + m2.as_int[1];
-
-  return (__m64)res.as_m64;
-#endif
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_paddd(__m64 __m1, __m64 __m2) {
-  return _mm_add_pi32(__m1, __m2);
-}
-
-/* Subtract the 8-bit values in M2 from the 8-bit values in M1.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_sub_pi8(__m64 __m1, __m64 __m2) {
-#if _ARCH_PWR8
-  __vector signed char a, b, c;
-
-  a = (__vector signed char)vec_splats(__m1);
-  b = (__vector signed char)vec_splats(__m2);
-  c = vec_sub(a, b);
-  return (__m64)((__vector long long)c)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_char[0] = m1.as_char[0] - m2.as_char[0];
-  res.as_char[1] = m1.as_char[1] - m2.as_char[1];
-  res.as_char[2] = m1.as_char[2] - m2.as_char[2];
-  res.as_char[3] = m1.as_char[3] - m2.as_char[3];
-  res.as_char[4] = m1.as_char[4] - m2.as_char[4];
-  res.as_char[5] = m1.as_char[5] - m2.as_char[5];
-  res.as_char[6] = m1.as_char[6] - m2.as_char[6];
-  res.as_char[7] = m1.as_char[7] - m2.as_char[7];
-
-  return (__m64)res.as_m64;
-#endif
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psubb(__m64 __m1, __m64 __m2) {
-  return _mm_sub_pi8(__m1, __m2);
-}
-
-/* Subtract the 16-bit values in M2 from the 16-bit values in M1.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_sub_pi16(__m64 __m1, __m64 __m2) {
-#if _ARCH_PWR8
-  __vector signed short a, b, c;
-
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = vec_sub(a, b);
-  return (__m64)((__vector long long)c)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_short[0] = m1.as_short[0] - m2.as_short[0];
-  res.as_short[1] = m1.as_short[1] - m2.as_short[1];
-  res.as_short[2] = m1.as_short[2] - m2.as_short[2];
-  res.as_short[3] = m1.as_short[3] - m2.as_short[3];
-
-  return (__m64)res.as_m64;
-#endif
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psubw(__m64 __m1, __m64 __m2) {
-  return _mm_sub_pi16(__m1, __m2);
-}
-
-/* Subtract the 32-bit values in M2 from the 32-bit values in M1.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_sub_pi32(__m64 __m1, __m64 __m2) {
-#if _ARCH_PWR9
-  __vector signed int a, b, c;
-
-  a = (__vector signed int)vec_splats(__m1);
-  b = (__vector signed int)vec_splats(__m2);
-  c = vec_sub(a, b);
-  return (__m64)((__vector long long)c)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_int[0] = m1.as_int[0] - m2.as_int[0];
-  res.as_int[1] = m1.as_int[1] - m2.as_int[1];
-
-  return (__m64)res.as_m64;
-#endif
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psubd(__m64 __m1, __m64 __m2) {
-  return _mm_sub_pi32(__m1, __m2);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_add_si64(__m64 __m1, __m64 __m2) {
-  return (__m1 + __m2);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_sub_si64(__m64 __m1, __m64 __m2) {
-  return (__m1 - __m2);
-}
-
-/* Shift the 64-bit value in M left by COUNT.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_sll_si64(__m64 __m, __m64 __count) {
-  return (__m << __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psllq(__m64 __m, __m64 __count) {
-  return _mm_sll_si64(__m, __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_slli_si64(__m64 __m, const int __count) {
-  return (__m << __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psllqi(__m64 __m, const int __count) {
-  return _mm_slli_si64(__m, __count);
-}
-
-/* Shift the 64-bit value in M left by COUNT; shift in zeros.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_srl_si64(__m64 __m, __m64 __count) {
-  return (__m >> __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psrlq(__m64 __m, __m64 __count) {
-  return _mm_srl_si64(__m, __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_srli_si64(__m64 __m, const int __count) {
-  return (__m >> __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psrlqi(__m64 __m, const int __count) {
-  return _mm_srli_si64(__m, __count);
-}
-
-/* Bit-wise AND the 64-bit values in M1 and M2.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_and_si64(__m64 __m1, __m64 __m2) {
-  return (__m1 & __m2);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_pand(__m64 __m1, __m64 __m2) {
-  return _mm_and_si64(__m1, __m2);
-}
-
-/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
-   64-bit value in M2.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_andnot_si64(__m64 __m1, __m64 __m2) {
-  return (~__m1 & __m2);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_pandn(__m64 __m1, __m64 __m2) {
-  return _mm_andnot_si64(__m1, __m2);
-}
-
-/* Bit-wise inclusive OR the 64-bit values in M1 and M2.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_or_si64(__m64 __m1, __m64 __m2) {
-  return (__m1 | __m2);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_por(__m64 __m1, __m64 __m2) {
-  return _mm_or_si64(__m1, __m2);
-}
-
-/* Bit-wise exclusive OR the 64-bit values in M1 and M2.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_xor_si64(__m64 __m1, __m64 __m2) {
-  return (__m1 ^ __m2);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_pxor(__m64 __m1, __m64 __m2) {
-  return _mm_xor_si64(__m1, __m2);
-}
-
-/* Creates a 64-bit zero.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_setzero_si64(void) {
-  return (__m64)0;
-}
-
-/* Compare eight 8-bit values.  The result of the comparison is 0xFF if the
-   test is true and zero if false.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) {
-#if defined(_ARCH_PWR6) && defined(__powerpc64__)
-  __m64 res;
-  __asm__("cmpb %0,%1,%2;\n" : "=r"(res) : "r"(__m1), "r"(__m2) :);
-  return (res);
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_char[0] = (m1.as_char[0] == m2.as_char[0]) ? -1 : 0;
-  res.as_char[1] = (m1.as_char[1] == m2.as_char[1]) ? -1 : 0;
-  res.as_char[2] = (m1.as_char[2] == m2.as_char[2]) ? -1 : 0;
-  res.as_char[3] = (m1.as_char[3] == m2.as_char[3]) ? -1 : 0;
-  res.as_char[4] = (m1.as_char[4] == m2.as_char[4]) ? -1 : 0;
-  res.as_char[5] = (m1.as_char[5] == m2.as_char[5]) ? -1 : 0;
-  res.as_char[6] = (m1.as_char[6] == m2.as_char[6]) ? -1 : 0;
-  res.as_char[7] = (m1.as_char[7] == m2.as_char[7]) ? -1 : 0;
-
-  return (__m64)res.as_m64;
-#endif
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_pcmpeqb(__m64 __m1, __m64 __m2) {
-  return _mm_cmpeq_pi8(__m1, __m2);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) {
-#if _ARCH_PWR8
-  __vector signed char a, b, c;
-
-  a = (__vector signed char)vec_splats(__m1);
-  b = (__vector signed char)vec_splats(__m2);
-  c = (__vector signed char)vec_cmpgt(a, b);
-  return (__m64)((__vector long long)c)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_char[0] = (m1.as_char[0] > m2.as_char[0]) ? -1 : 0;
-  res.as_char[1] = (m1.as_char[1] > m2.as_char[1]) ? -1 : 0;
-  res.as_char[2] = (m1.as_char[2] > m2.as_char[2]) ? -1 : 0;
-  res.as_char[3] = (m1.as_char[3] > m2.as_char[3]) ? -1 : 0;
-  res.as_char[4] = (m1.as_char[4] > m2.as_char[4]) ? -1 : 0;
-  res.as_char[5] = (m1.as_char[5] > m2.as_char[5]) ? -1 : 0;
-  res.as_char[6] = (m1.as_char[6] > m2.as_char[6]) ? -1 : 0;
-  res.as_char[7] = (m1.as_char[7] > m2.as_char[7]) ? -1 : 0;
-
-  return (__m64)res.as_m64;
-#endif
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_pcmpgtb(__m64 __m1, __m64 __m2) {
-  return _mm_cmpgt_pi8(__m1, __m2);
-}
-
-/* Compare four 16-bit values.  The result of the comparison is 0xFFFF if
-   the test is true and zero if false.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) {
-#if _ARCH_PWR8
-  __vector signed short a, b, c;
-
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = (__vector signed short)vec_cmpeq(a, b);
-  return (__m64)((__vector long long)c)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_short[0] = (m1.as_short[0] == m2.as_short[0]) ? -1 : 0;
-  res.as_short[1] = (m1.as_short[1] == m2.as_short[1]) ? -1 : 0;
-  res.as_short[2] = (m1.as_short[2] == m2.as_short[2]) ? -1 : 0;
-  res.as_short[3] = (m1.as_short[3] == m2.as_short[3]) ? -1 : 0;
-
-  return (__m64)res.as_m64;
-#endif
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_pcmpeqw(__m64 __m1, __m64 __m2) {
-  return _mm_cmpeq_pi16(__m1, __m2);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) {
-#if _ARCH_PWR8
-  __vector signed short a, b, c;
-
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = (__vector signed short)vec_cmpgt(a, b);
-  return (__m64)((__vector long long)c)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_short[0] = (m1.as_short[0] > m2.as_short[0]) ? -1 : 0;
-  res.as_short[1] = (m1.as_short[1] > m2.as_short[1]) ? -1 : 0;
-  res.as_short[2] = (m1.as_short[2] > m2.as_short[2]) ? -1 : 0;
-  res.as_short[3] = (m1.as_short[3] > m2.as_short[3]) ? -1 : 0;
-
-  return (__m64)res.as_m64;
-#endif
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_pcmpgtw(__m64 __m1, __m64 __m2) {
-  return _mm_cmpgt_pi16(__m1, __m2);
-}
-
-/* Compare two 32-bit values.  The result of the comparison is 0xFFFFFFFF if
-   the test is true and zero if false.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) {
-#if _ARCH_PWR9
-  __vector signed int a, b, c;
-
-  a = (__vector signed int)vec_splats(__m1);
-  b = (__vector signed int)vec_splats(__m2);
-  c = (__vector signed int)vec_cmpeq(a, b);
-  return (__m64)((__vector long long)c)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_int[0] = (m1.as_int[0] == m2.as_int[0]) ? -1 : 0;
-  res.as_int[1] = (m1.as_int[1] == m2.as_int[1]) ? -1 : 0;
-
-  return (__m64)res.as_m64;
-#endif
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_pcmpeqd(__m64 __m1, __m64 __m2) {
-  return _mm_cmpeq_pi32(__m1, __m2);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_cmpgt_pi32(__m64 __m1, __m64 __m2) {
-#if _ARCH_PWR9
-  __vector signed int a, b, c;
-
-  a = (__vector signed int)vec_splats(__m1);
-  b = (__vector signed int)vec_splats(__m2);
-  c = (__vector signed int)vec_cmpgt(a, b);
-  return (__m64)((__vector long long)c)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
-
-  res.as_int[0] = (m1.as_int[0] > m2.as_int[0]) ? -1 : 0;
-  res.as_int[1] = (m1.as_int[1] > m2.as_int[1]) ? -1 : 0;
-
-  return (__m64)res.as_m64;
-#endif
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_pcmpgtd(__m64 __m1, __m64 __m2) {
-  return _mm_cmpgt_pi32(__m1, __m2);
-}
-
-#if _ARCH_PWR8
-/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
-   saturated arithmetic.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_adds_pi8(__m64 __m1, __m64 __m2) {
-  __vector signed char a, b, c;
-
-  a = (__vector signed char)vec_splats(__m1);
-  b = (__vector signed char)vec_splats(__m2);
-  c = vec_adds(a, b);
-  return (__m64)((__vector long long)c)[0];
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_paddsb(__m64 __m1, __m64 __m2) {
-  return _mm_adds_pi8(__m1, __m2);
-}
-/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
-   saturated arithmetic.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_adds_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short a, b, c;
-
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = vec_adds(a, b);
-  return (__m64)((__vector long long)c)[0];
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_paddsw(__m64 __m1, __m64 __m2) {
-  return _mm_adds_pi16(__m1, __m2);
-}
-/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
-   saturated arithmetic.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_adds_pu8(__m64 __m1, __m64 __m2) {
-  __vector unsigned char a, b, c;
-
-  a = (__vector unsigned char)vec_splats(__m1);
-  b = (__vector unsigned char)vec_splats(__m2);
-  c = vec_adds(a, b);
-  return (__m64)((__vector long long)c)[0];
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_paddusb(__m64 __m1, __m64 __m2) {
-  return _mm_adds_pu8(__m1, __m2);
-}
-
-/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
-   saturated arithmetic.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_adds_pu16(__m64 __m1, __m64 __m2) {
-  __vector unsigned short a, b, c;
-
-  a = (__vector unsigned short)vec_splats(__m1);
-  b = (__vector unsigned short)vec_splats(__m2);
-  c = vec_adds(a, b);
-  return (__m64)((__vector long long)c)[0];
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_paddusw(__m64 __m1, __m64 __m2) {
-  return _mm_adds_pu16(__m1, __m2);
-}
-
-/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
-   saturating arithmetic.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_subs_pi8(__m64 __m1, __m64 __m2) {
-  __vector signed char a, b, c;
-
-  a = (__vector signed char)vec_splats(__m1);
-  b = (__vector signed char)vec_splats(__m2);
-  c = vec_subs(a, b);
-  return (__m64)((__vector long long)c)[0];
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psubsb(__m64 __m1, __m64 __m2) {
-  return _mm_subs_pi8(__m1, __m2);
-}
-
-/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
-   signed saturating arithmetic.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_subs_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short a, b, c;
-
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = vec_subs(a, b);
-  return (__m64)((__vector long long)c)[0];
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psubsw(__m64 __m1, __m64 __m2) {
-  return _mm_subs_pi16(__m1, __m2);
-}
-
-/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
-   unsigned saturating arithmetic.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_subs_pu8(__m64 __m1, __m64 __m2) {
-  __vector unsigned char a, b, c;
-
-  a = (__vector unsigned char)vec_splats(__m1);
-  b = (__vector unsigned char)vec_splats(__m2);
-  c = vec_subs(a, b);
-  return (__m64)((__vector long long)c)[0];
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psubusb(__m64 __m1, __m64 __m2) {
-  return _mm_subs_pu8(__m1, __m2);
-}
-
-/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
-   unsigned saturating arithmetic.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_subs_pu16(__m64 __m1, __m64 __m2) {
-  __vector unsigned short a, b, c;
-
-  a = (__vector unsigned short)vec_splats(__m1);
-  b = (__vector unsigned short)vec_splats(__m2);
-  c = vec_subs(a, b);
-  return (__m64)((__vector long long)c)[0];
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psubusw(__m64 __m1, __m64 __m2) {
-  return _mm_subs_pu16(__m1, __m2);
-}
-
-/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
-   four 32-bit intermediate results, which are then summed by pairs to
-   produce two 32-bit results.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_madd_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short a, b;
-  __vector signed int c;
-  __vector signed int zero = {0, 0, 0, 0};
-
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = vec_vmsumshm(a, b, zero);
-  return (__m64)((__vector long long)c)[0];
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_pmaddwd(__m64 __m1, __m64 __m2) {
-  return _mm_madd_pi16(__m1, __m2);
-}
-/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
-   M2 and produce the high 16 bits of the 32-bit results.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_mulhi_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short a, b;
-  __vector signed short c;
-  __vector signed int w0, w1;
-  __vector unsigned char xform1 = {
-#ifdef __LITTLE_ENDIAN__
-      0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
-      0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
-#else
-      0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x00,
-      0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15
-#endif
-  };
-
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-
-  w0 = vec_vmulesh(a, b);
-  w1 = vec_vmulosh(a, b);
-  c = (__vector signed short)vec_perm(w0, w1, xform1);
-
-  return (__m64)((__vector long long)c)[0];
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_pmulhw(__m64 __m1, __m64 __m2) {
-  return _mm_mulhi_pi16(__m1, __m2);
-}
-
-/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
-   the low 16 bits of the results.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_mullo_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short a, b, c;
-
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = a * b;
-  return (__m64)((__vector long long)c)[0];
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_pmullw(__m64 __m1, __m64 __m2) {
-  return _mm_mullo_pi16(__m1, __m2);
-}
-
-/* Shift four 16-bit values in M left by COUNT.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_sll_pi16(__m64 __m, __m64 __count) {
-  __vector signed short m, r;
-  __vector unsigned short c;
-
-  if (__count <= 15) {
-    m = (__vector signed short)vec_splats(__m);
-    c = (__vector unsigned short)vec_splats((unsigned short)__count);
-    r = vec_sl(m, (__vector unsigned short)c);
-    return (__m64)((__vector long long)r)[0];
-  } else
-    return (0);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psllw(__m64 __m, __m64 __count) {
-  return _mm_sll_pi16(__m, __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_slli_pi16(__m64 __m, int __count) {
-  /* Promote int to long then invoke mm_sll_pi16.  */
-  return _mm_sll_pi16(__m, __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psllwi(__m64 __m, int __count) {
-  return _mm_slli_pi16(__m, __count);
-}
-
-/* Shift two 32-bit values in M left by COUNT.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_sll_pi32(__m64 __m, __m64 __count) {
-  __m64_union m, res;
-
-  m.as_m64 = __m;
-
-  res.as_int[0] = m.as_int[0] << __count;
-  res.as_int[1] = m.as_int[1] << __count;
-  return (res.as_m64);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_pslld(__m64 __m, __m64 __count) {
-  return _mm_sll_pi32(__m, __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_slli_pi32(__m64 __m, int __count) {
-  /* Promote int to long then invoke mm_sll_pi32.  */
-  return _mm_sll_pi32(__m, __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_pslldi(__m64 __m, int __count) {
-  return _mm_slli_pi32(__m, __count);
-}
-
-/* Shift four 16-bit values in M right by COUNT; shift in the sign bit.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_sra_pi16(__m64 __m, __m64 __count) {
-  __vector signed short m, r;
-  __vector unsigned short c;
-
-  if (__count <= 15) {
-    m = (__vector signed short)vec_splats(__m);
-    c = (__vector unsigned short)vec_splats((unsigned short)__count);
-    r = vec_sra(m, (__vector unsigned short)c);
-    return (__m64)((__vector long long)r)[0];
-  } else
-    return (0);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psraw(__m64 __m, __m64 __count) {
-  return _mm_sra_pi16(__m, __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_srai_pi16(__m64 __m, int __count) {
-  /* Promote int to long then invoke mm_sra_pi32.  */
-  return _mm_sra_pi16(__m, __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psrawi(__m64 __m, int __count) {
-  return _mm_srai_pi16(__m, __count);
-}
-
-/* Shift two 32-bit values in M right by COUNT; shift in the sign bit.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_sra_pi32(__m64 __m, __m64 __count) {
-  __m64_union m, res;
-
-  m.as_m64 = __m;
-
-  res.as_int[0] = m.as_int[0] >> __count;
-  res.as_int[1] = m.as_int[1] >> __count;
-  return (res.as_m64);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psrad(__m64 __m, __m64 __count) {
-  return _mm_sra_pi32(__m, __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_srai_pi32(__m64 __m, int __count) {
-  /* Promote int to long then invoke mm_sra_pi32.  */
-  return _mm_sra_pi32(__m, __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psradi(__m64 __m, int __count) {
-  return _mm_srai_pi32(__m, __count);
-}
-
-/* Shift four 16-bit values in M right by COUNT; shift in zeros.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_srl_pi16(__m64 __m, __m64 __count) {
-  __vector unsigned short m, r;
-  __vector unsigned short c;
-
-  if (__count <= 15) {
-    m = (__vector unsigned short)vec_splats(__m);
-    c = (__vector unsigned short)vec_splats((unsigned short)__count);
-    r = vec_sr(m, (__vector unsigned short)c);
-    return (__m64)((__vector long long)r)[0];
-  } else
-    return (0);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psrlw(__m64 __m, __m64 __count) {
-  return _mm_srl_pi16(__m, __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_srli_pi16(__m64 __m, int __count) {
-  /* Promote int to long then invoke mm_sra_pi32.  */
-  return _mm_srl_pi16(__m, __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psrlwi(__m64 __m, int __count) {
-  return _mm_srli_pi16(__m, __count);
-}
-
-/* Shift two 32-bit values in M right by COUNT; shift in zeros.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_srl_pi32(__m64 __m, __m64 __count) {
-  __m64_union m, res;
-
-  m.as_m64 = __m;
-
-  res.as_int[0] = (unsigned int)m.as_int[0] >> __count;
-  res.as_int[1] = (unsigned int)m.as_int[1] >> __count;
-  return (res.as_m64);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psrld(__m64 __m, __m64 __count) {
-  return _mm_srl_pi32(__m, __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_srli_pi32(__m64 __m, int __count) {
-  /* Promote int to long then invoke mm_srl_pi32.  */
-  return _mm_srl_pi32(__m, __count);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _m_psrldi(__m64 __m, int __count) {
-  return _mm_srli_pi32(__m, __count);
-}
-#endif /* _ARCH_PWR8 */
-
-/* Creates a vector of two 32-bit values; I0 is least significant.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_set_pi32(int __i1, int __i0) {
-  __m64_union res;
-
-  res.as_int[0] = __i0;
-  res.as_int[1] = __i1;
-  return (res.as_m64);
-}
-
-/* Creates a vector of four 16-bit values; W0 is least significant.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_set_pi16(short __w3, short __w2, short __w1, short __w0) {
-  __m64_union res;
-
-  res.as_short[0] = __w0;
-  res.as_short[1] = __w1;
-  res.as_short[2] = __w2;
-  res.as_short[3] = __w3;
-  return (res.as_m64);
-}
-
-/* Creates a vector of eight 8-bit values; B0 is least significant.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3,
-                char __b2, char __b1, char __b0) {
-  __m64_union res;
-
-  res.as_char[0] = __b0;
-  res.as_char[1] = __b1;
-  res.as_char[2] = __b2;
-  res.as_char[3] = __b3;
-  res.as_char[4] = __b4;
-  res.as_char[5] = __b5;
-  res.as_char[6] = __b6;
-  res.as_char[7] = __b7;
-  return (res.as_m64);
-}
-
-/* Similar, but with the arguments in reverse order.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_setr_pi32(int __i0, int __i1) {
-  __m64_union res;
-
-  res.as_int[0] = __i0;
-  res.as_int[1] = __i1;
-  return (res.as_m64);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_setr_pi16(short __w0, short __w1, short __w2, short __w3) {
-  return _mm_set_pi16(__w3, __w2, __w1, __w0);
-}
-
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4,
-                 char __b5, char __b6, char __b7) {
-  return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
-}
-
-/* Creates a vector of two 32-bit values, both elements containing I.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_set1_pi32(int __i) {
-  __m64_union res;
-
-  res.as_int[0] = __i;
-  res.as_int[1] = __i;
-  return (res.as_m64);
-}
-
-/* Creates a vector of four 16-bit values, all elements containing W.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_set1_pi16(short __w) {
-#if _ARCH_PWR9
-  __vector signed short w;
-
-  w = (__vector signed short)vec_splats(__w);
-  return (__m64)((__vector long long)w)[0];
-#else
-  __m64_union res;
-
-  res.as_short[0] = __w;
-  res.as_short[1] = __w;
-  res.as_short[2] = __w;
-  res.as_short[3] = __w;
-  return (res.as_m64);
-#endif
-}
-
-/* Creates a vector of eight 8-bit values, all elements containing B.  */
-extern __inline __m64
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_set1_pi8(signed char __b) {
-#if _ARCH_PWR8
-  __vector signed char b;
-
-  b = (__vector signed char)vec_splats(__b);
-  return (__m64)((__vector long long)b)[0];
-#else
-  __m64_union res;
-
-  res.as_char[0] = __b;
-  res.as_char[1] = __b;
-  res.as_char[2] = __b;
-  res.as_char[3] = __b;
-  res.as_char[4] = __b;
-  res.as_char[5] = __b;
-  res.as_char[6] = __b;
-  res.as_char[7] = __b;
-  return (res.as_m64);
-#endif
-}
-
-#else
-#include_next <mmintrin.h>
-#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))   \
-        */
-
-#endif /* _MMINTRIN_H_INCLUDED */
diff --git a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/pmmintrin.h b/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/pmmintrin.h
deleted file mode 100644
index 8d4046b..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/pmmintrin.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/*===---- pmmintrin.h - Implementation of SSE3 intrinsics on PowerPC -------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* Implemented from the specification included in the Intel C++ Compiler
-   User Guide and Reference, version 9.0.  */
-
-#ifndef NO_WARN_X86_INTRINSICS
-/* This header is distributed to simplify porting x86_64 code that
-   makes explicit use of Intel intrinsics to powerpc64le.
-   It is the user's responsibility to determine if the results are
-   acceptable and make additional changes as necessary.
-   Note that much code that uses Intel intrinsics can be rewritten in
-   standard C or GNU C extensions, which are more portable and better
-   optimized across multiple targets.
-
-   In the specific case of X86 SSE3 intrinsics, the PowerPC VMX/VSX ISA
-   is a good match for most SIMD operations.  However the Horizontal
-   add/sub requires the data pairs be permuted into a separate
-   registers with vertical even/odd alignment for the operation.
-   And the addsub operation requires the sign of only the even numbered
-   elements be flipped (xored with -0.0).
-   For larger blocks of code using these intrinsic implementations,
-   the compiler be should be able to schedule instructions to avoid
-   additional latency.
-
-   In the specific case of the monitor and mwait instructions there are
-   no direct equivalent in the PowerISA at this time.  So those
-   intrinsics are not implemented.  */
-#error "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this warning."
-#endif
-
-#ifndef PMMINTRIN_H_
-#define PMMINTRIN_H_
-
-#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
-
-/* We need definitions from the SSE2 and SSE header files*/
-#include <emmintrin.h>
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_addsub_ps (__m128 __X, __m128 __Y)
-{
-  const __v4sf even_n0 = {-0.0, 0.0, -0.0, 0.0};
-  __v4sf even_neg_Y = vec_xor(__Y, even_n0);
-  return (__m128) vec_add (__X, even_neg_Y);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_addsub_pd (__m128d __X, __m128d __Y)
-{
-  const __v2df even_n0 = {-0.0, 0.0};
-  __v2df even_neg_Y = vec_xor(__Y, even_n0);
-  return (__m128d) vec_add (__X, even_neg_Y);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_ps (__m128 __X, __m128 __Y)
-{
-  __vector unsigned char xform2 = {
-      0x00, 0x01, 0x02, 0x03,
-      0x08, 0x09, 0x0A, 0x0B,
-      0x10, 0x11, 0x12, 0x13,
-      0x18, 0x19, 0x1A, 0x1B
-    };
-  __vector unsigned char xform1 = {
-      0x04, 0x05, 0x06, 0x07,
-      0x0C, 0x0D, 0x0E, 0x0F,
-      0x14, 0x15, 0x16, 0x17,
-      0x1C, 0x1D, 0x1E, 0x1F
-    };
-  return (__m128) vec_add (vec_perm ((__v4sf) __X, (__v4sf) __Y, xform2),
-			   vec_perm ((__v4sf) __X, (__v4sf) __Y, xform1));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_ps (__m128 __X, __m128 __Y)
-{
-  __vector unsigned char xform2 = {
-      0x00, 0x01, 0x02, 0x03,
-      0x08, 0x09, 0x0A, 0x0B,
-      0x10, 0x11, 0x12, 0x13,
-      0x18, 0x19, 0x1A, 0x1B
-    };
-  __vector unsigned char xform1 = {
-      0x04, 0x05, 0x06, 0x07,
-      0x0C, 0x0D, 0x0E, 0x0F,
-      0x14, 0x15, 0x16, 0x17,
-      0x1C, 0x1D, 0x1E, 0x1F
-    };
-  return (__m128) vec_sub (vec_perm ((__v4sf) __X, (__v4sf) __Y, xform2),
-			   vec_perm ((__v4sf) __X, (__v4sf) __Y, xform1));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_pd (__m128d __X, __m128d __Y)
-{
-  return (__m128d) vec_add (vec_mergeh ((__v2df) __X, (__v2df)__Y),
-				  vec_mergel ((__v2df) __X, (__v2df)__Y));
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_pd (__m128d __X, __m128d __Y)
-{
-  return (__m128d) vec_sub (vec_mergeh ((__v2df) __X, (__v2df)__Y),
-			    vec_mergel ((__v2df) __X, (__v2df)__Y));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movehdup_ps (__m128 __X)
-{
-  return (__m128)vec_mergeo ((__v4su)__X, (__v4su)__X);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_moveldup_ps (__m128 __X)
-{
-  return (__m128)vec_mergee ((__v4su)__X, (__v4su)__X);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loaddup_pd (double const *__P)
-{
-  return (__m128d) vec_splats (*__P);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movedup_pd (__m128d __X)
-{
-  return _mm_shuffle_pd (__X, __X, _MM_SHUFFLE2 (0,0));
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_lddqu_si128 (__m128i const *__P)
-{
-  return (__m128i) (vec_vsx_ld(0, (signed int const *)__P));
-}
-
-/* POWER8 / POWER9 have no equivalent for _mm_monitor nor _mm_wait.  */
-
-#else
-#include_next <pmmintrin.h>
-#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))   \
-        */
-
-#endif /* PMMINTRIN_H_ */
diff --git a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/smmintrin.h b/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/smmintrin.h
deleted file mode 100644
index 6747032..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/smmintrin.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*===---- smmintrin.h - Implementation of SSE4 intrinsics on PowerPC -------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* Implemented from the specification included in the Intel C++ Compiler
-   User Guide and Reference, version 9.0.
-
-   NOTE: This is NOT a complete implementation of the SSE4 intrinsics!  */
-
-#ifndef NO_WARN_X86_INTRINSICS
-/* This header is distributed to simplify porting x86_64 code that
-   makes explicit use of Intel intrinsics to powerp64/powerpc64le.
-
-   It is the user's responsibility to determine if the results are
-   acceptable and make additional changes as necessary.
-
-   Note that much code that uses Intel intrinsics can be rewritten in
-   standard C or GNU C extensions, which are more portable and better
-   optimized across multiple targets.  */
-#error                                                                         \
-    "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this error."
-#endif
-
-#ifndef SMMINTRIN_H_
-#define SMMINTRIN_H_
-
-#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
-
-#include <altivec.h>
-#include <tmmintrin.h>
-
-extern __inline int
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_extract_epi8(__m128i __X, const int __N) {
-  return (unsigned char)((__v16qi)__X)[__N & 15];
-}
-
-extern __inline int
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_extract_epi32(__m128i __X, const int __N) {
-  return ((__v4si)__X)[__N & 3];
-}
-
-extern __inline int
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_extract_epi64(__m128i __X, const int __N) {
-  return ((__v2di)__X)[__N & 1];
-}
-
-extern __inline int
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_extract_ps(__m128 __X, const int __N) {
-  return ((__v4si)__X)[__N & 3];
-}
-
-extern __inline __m128i
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_blend_epi16(__m128i __A, __m128i __B, const int __imm8) {
-  __v16qi __charmask = vec_splats((signed char)__imm8);
-  __charmask = vec_gb(__charmask);
-  __v8hu __shortmask = (__v8hu)vec_unpackh(__charmask);
-#ifdef __BIG_ENDIAN__
-  __shortmask = vec_reve(__shortmask);
-#endif
-  return (__m128i)vec_sel((__v8hu)__A, (__v8hu)__B, __shortmask);
-}
-
-extern __inline __m128i
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_blendv_epi8(__m128i __A, __m128i __B, __m128i __mask) {
-  const __v16qu __seven = vec_splats((unsigned char)0x07);
-  __v16qu __lmask = vec_sra((__v16qu)__mask, __seven);
-  return (__m128i)vec_sel((__v16qu)__A, (__v16qu)__B, __lmask);
-}
-
-extern __inline __m128i
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_insert_epi8(__m128i const __A, int const __D, int const __N) {
-  __v16qi result = (__v16qi)__A;
-  result[__N & 0xf] = __D;
-  return (__m128i)result;
-}
-
-extern __inline __m128i
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_insert_epi32(__m128i const __A, int const __D, int const __N) {
-  __v4si result = (__v4si)__A;
-  result[__N & 3] = __D;
-  return (__m128i)result;
-}
-
-extern __inline __m128i
-    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-    _mm_insert_epi64(__m128i const __A, long long const __D, int const __N) {
-  __v2di result = (__v2di)__A;
-  result[__N & 1] = __D;
-  return (__m128i)result;
-}
-
-#else
-#include_next <smmintrin.h>
-#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))   \
-        */
-
-#endif /* _SMMINTRIN_H_ */
diff --git a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/tmmintrin.h b/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/tmmintrin.h
deleted file mode 100644
index ebef7b8..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/tmmintrin.h
+++ /dev/null
@@ -1,496 +0,0 @@
-/*===---- tmmintrin.h - Implementation of SSSE3 intrinsics on PowerPC ------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* Implemented from the specification included in the Intel C++ Compiler
-   User Guide and Reference, version 9.0.  */
-
-#ifndef NO_WARN_X86_INTRINSICS
-/* This header is distributed to simplify porting x86_64 code that
-   makes explicit use of Intel intrinsics to powerpc64le.
-
-   It is the user's responsibility to determine if the results are
-   acceptable and make additional changes as necessary.
-
-   Note that much code that uses Intel intrinsics can be rewritten in
-   standard C or GNU C extensions, which are more portable and better
-   optimized across multiple targets.  */
-#endif
-
-#ifndef TMMINTRIN_H_
-#define TMMINTRIN_H_
-
-#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
-
-#include <altivec.h>
-
-/* We need definitions from the SSE header files.  */
-#include <pmmintrin.h>
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_epi16 (__m128i __A)
-{
-  return (__m128i) vec_abs ((__v8hi) __A);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_epi32 (__m128i __A)
-{
-  return (__m128i) vec_abs ((__v4si) __A);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_epi8 (__m128i __A)
-{
-  return (__m128i) vec_abs ((__v16qi) __A);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_pi16 (__m64 __A)
-{
-  __v8hi __B = (__v8hi) (__v2du) { __A, __A };
-  return (__m64) ((__v2du) vec_abs (__B))[0];
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_pi32 (__m64 __A)
-{
-  __v4si __B = (__v4si) (__v2du) { __A, __A };
-  return (__m64) ((__v2du) vec_abs (__B))[0];
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_pi8 (__m64 __A)
-{
-  __v16qi __B = (__v16qi) (__v2du) { __A, __A };
-  return (__m64) ((__v2du) vec_abs (__B))[0];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_alignr_epi8 (__m128i __A, __m128i __B, const unsigned int __count)
-{
-  if (__builtin_constant_p (__count) && __count < 16)
-    {
-#ifdef __LITTLE_ENDIAN__
-      __A = (__m128i) vec_reve ((__v16qu) __A);
-      __B = (__m128i) vec_reve ((__v16qu) __B);
-#endif
-      __A = (__m128i) vec_sld ((__v16qu) __B, (__v16qu) __A, __count);
-#ifdef __LITTLE_ENDIAN__
-      __A = (__m128i) vec_reve ((__v16qu) __A);
-#endif
-      return __A;
-    }
-
-  if (__count == 0)
-    return __B;
-
-  if (__count >= 16)
-    {
-      if (__count >= 32)
-	{
-	  const __v16qu zero = { 0 };
-	  return (__m128i) zero;
-	}
-      else
-	{
-	  const __v16qu __shift =
-	    vec_splats ((unsigned char) ((__count - 16) * 8));
-#ifdef __LITTLE_ENDIAN__
-	  return (__m128i) vec_sro ((__v16qu) __A, __shift);
-#else
-	  return (__m128i) vec_slo ((__v16qu) __A, __shift);
-#endif
-	}
-    }
-  else
-    {
-      const __v16qu __shiftA =
-	vec_splats ((unsigned char) ((16 - __count) * 8));
-      const __v16qu __shiftB = vec_splats ((unsigned char) (__count * 8));
-#ifdef __LITTLE_ENDIAN__
-      __A = (__m128i) vec_slo ((__v16qu) __A, __shiftA);
-      __B = (__m128i) vec_sro ((__v16qu) __B, __shiftB);
-#else
-      __A = (__m128i) vec_sro ((__v16qu) __A, __shiftA);
-      __B = (__m128i) vec_slo ((__v16qu) __B, __shiftB);
-#endif
-      return (__m128i) vec_or ((__v16qu) __A, (__v16qu) __B);
-    }
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_alignr_pi8 (__m64 __A, __m64 __B, unsigned int __count)
-{
-  if (__count < 16)
-    {
-      __v2du __C = { __B, __A };
-#ifdef __LITTLE_ENDIAN__
-      const __v4su __shift = { __count << 3, 0, 0, 0 };
-      __C = (__v2du) vec_sro ((__v16qu) __C, (__v16qu) __shift);
-#else
-      const __v4su __shift = { 0, 0, 0, __count << 3 };
-      __C = (__v2du) vec_slo ((__v16qu) __C, (__v16qu) __shift);
-#endif
-      return (__m64) __C[0];
-    }
-  else
-    {
-      const __m64 __zero = { 0 };
-      return __zero;
-    }
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_epi16 (__m128i __A, __m128i __B)
-{
-  const __v16qu __P =
-    {  0,  1,  4,  5,  8,  9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
-  const __v16qu __Q =
-    {  2,  3,  6,  7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
-  __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P);
-  __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q);
-  return (__m128i) vec_add (__C, __D);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_epi32 (__m128i __A, __m128i __B)
-{
-  const __v16qu __P =
-    {  0,  1,  2,  3,  8,  9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 };
-  const __v16qu __Q =
-    {  4,  5,  6,  7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 };
-  __v4si __C = vec_perm ((__v4si) __A, (__v4si) __B, __P);
-  __v4si __D = vec_perm ((__v4si) __A, (__v4si) __B, __Q);
-  return (__m128i) vec_add (__C, __D);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_pi16 (__m64 __A, __m64 __B)
-{
-  __v8hi __C = (__v8hi) (__v2du) { __A, __B };
-  const __v16qu __P =
-    {  0,  1,  4,  5,  8,  9, 12, 13,  0,  1,  4,  5,  8,  9, 12, 13 };
-  const __v16qu __Q =
-    {  2,  3,  6,  7, 10, 11, 14, 15,  2,  3,  6,  7, 10, 11, 14, 15 };
-  __v8hi __D = vec_perm (__C, __C, __Q);
-  __C = vec_perm (__C, __C, __P);
-  __C = vec_add (__C, __D);
-  return (__m64) ((__v2du) __C)[1];
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_pi32 (__m64 __A, __m64 __B)
-{
-  __v4si __C = (__v4si) (__v2du) { __A, __B };
-  const __v16qu __P =
-    {  0,  1,  2,  3,  8,  9, 10, 11,  0,  1,  2,  3,  8,  9, 10, 11 };
-  const __v16qu __Q =
-    {  4,  5,  6,  7, 12, 13, 14, 15,  4,  5,  6,  7, 12, 13, 14, 15 };
-  __v4si __D = vec_perm (__C, __C, __Q);
-  __C = vec_perm (__C, __C, __P);
-  __C = vec_add (__C, __D);
-  return (__m64) ((__v2du) __C)[1];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadds_epi16 (__m128i __A, __m128i __B)
-{
-  __v4si __C = { 0 }, __D = { 0 };
-  __C = vec_sum4s ((__v8hi) __A, __C);
-  __D = vec_sum4s ((__v8hi) __B, __D);
-  __C = (__v4si) vec_packs (__C, __D);
-  return (__m128i) __C;
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadds_pi16 (__m64 __A, __m64 __B)
-{
-  const __v4si __zero = { 0 };
-  __v8hi __C = (__v8hi) (__v2du) { __A, __B };
-  __v4si __D = vec_sum4s (__C, __zero);
-  __C = vec_packs (__D, __D);
-  return (__m64) ((__v2du) __C)[1];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_epi16 (__m128i __A, __m128i __B)
-{
-  const __v16qu __P =
-    {  0,  1,  4,  5,  8,  9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
-  const __v16qu __Q =
-    {  2,  3,  6,  7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
-  __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P);
-  __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q);
-  return (__m128i) vec_sub (__C, __D);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_epi32 (__m128i __A, __m128i __B)
-{
-  const __v16qu __P =
-    {  0,  1,  2,  3,  8,  9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 };
-  const __v16qu __Q =
-    {  4,  5,  6,  7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 };
-  __v4si __C = vec_perm ((__v4si) __A, (__v4si) __B, __P);
-  __v4si __D = vec_perm ((__v4si) __A, (__v4si) __B, __Q);
-  return (__m128i) vec_sub (__C, __D);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_pi16 (__m64 __A, __m64 __B)
-{
-  const __v16qu __P =
-    {  0,  1,  4,  5,  8,  9, 12, 13,  0,  1,  4,  5,  8,  9, 12, 13 };
-  const __v16qu __Q =
-    {  2,  3,  6,  7, 10, 11, 14, 15,  2,  3,  6,  7, 10, 11, 14, 15 };
-  __v8hi __C = (__v8hi) (__v2du) { __A, __B };
-  __v8hi __D = vec_perm (__C, __C, __Q);
-  __C = vec_perm (__C, __C, __P);
-  __C = vec_sub (__C, __D);
-  return (__m64) ((__v2du) __C)[1];
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_pi32 (__m64 __A, __m64 __B)
-{
-  const __v16qu __P =
-    {  0,  1,  2,  3,  8,  9, 10, 11,  0,  1,  2,  3,  8,  9, 10, 11 };
-  const __v16qu __Q =
-    {  4,  5,  6,  7, 12, 13, 14, 15,  4,  5,  6,  7, 12, 13, 14, 15 };
-  __v4si __C = (__v4si) (__v2du) { __A, __B };
-  __v4si __D = vec_perm (__C, __C, __Q);
-  __C = vec_perm (__C, __C, __P);
-  __C = vec_sub (__C, __D);
-  return (__m64) ((__v2du) __C)[1];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsubs_epi16 (__m128i __A, __m128i __B)
-{
-  const __v16qu __P =
-    {  0,  1,  4,  5,  8,  9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
-  const __v16qu __Q =
-    {  2,  3,  6,  7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
-  __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P);
-  __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q);
-  return (__m128i) vec_subs (__C, __D);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsubs_pi16 (__m64 __A, __m64 __B)
-{
-  const __v16qu __P =
-    {  0,  1,  4,  5,  8,  9, 12, 13,  0,  1,  4,  5,  8,  9, 12, 13 };
-  const __v16qu __Q =
-    {  2,  3,  6,  7, 10, 11, 14, 15,  2,  3,  6,  7, 10, 11, 14, 15 };
-  __v8hi __C = (__v8hi) (__v2du) { __A, __B };
-  __v8hi __D = vec_perm (__C, __C, __P);
-  __v8hi __E = vec_perm (__C, __C, __Q);
-  __C = vec_subs (__D, __E);
-  return (__m64) ((__v2du) __C)[1];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_epi8 (__m128i __A, __m128i __B)
-{
-  const __v16qi __zero = { 0 };
-  __vector __bool char __select = vec_cmplt ((__v16qi) __B, __zero);
-  __v16qi __C = vec_perm ((__v16qi) __A, (__v16qi) __A, (__v16qu) __B);
-  return (__m128i) vec_sel (__C, __zero, __select);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_pi8 (__m64 __A, __m64 __B)
-{
-  const __v16qi __zero = { 0 };
-  __v16qi __C = (__v16qi) (__v2du) { __A, __A };
-  __v16qi __D = (__v16qi) (__v2du) { __B, __B };
-  __vector __bool char __select = vec_cmplt ((__v16qi) __D, __zero);
-  __C = vec_perm ((__v16qi) __C, (__v16qi) __C, (__v16qu) __D);
-  __C = vec_sel (__C, __zero, __select);
-  return (__m64) ((__v2du) (__C))[0];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_epi8 (__m128i __A, __m128i __B)
-{
-  const __v16qi __zero = { 0 };
-  __v16qi __selectneg = (__v16qi) vec_cmplt ((__v16qi) __B, __zero);
-  __v16qi __selectpos =
-    (__v16qi) vec_neg ((__v16qi) vec_cmpgt ((__v16qi) __B, __zero));
-  __v16qi __conv = vec_add (__selectneg, __selectpos);
-  return (__m128i) vec_mul ((__v16qi) __A, (__v16qi) __conv);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_epi16 (__m128i __A, __m128i __B)
-{
-  const __v8hi __zero = { 0 };
-  __v8hi __selectneg = (__v8hi) vec_cmplt ((__v8hi) __B, __zero);
-  __v8hi __selectpos =
-    (__v8hi) vec_neg ((__v8hi) vec_cmpgt ((__v8hi) __B, __zero));
-  __v8hi __conv = vec_add (__selectneg, __selectpos);
-  return (__m128i) vec_mul ((__v8hi) __A, (__v8hi) __conv);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_epi32 (__m128i __A, __m128i __B)
-{
-  const __v4si __zero = { 0 };
-  __v4si __selectneg = (__v4si) vec_cmplt ((__v4si) __B, __zero);
-  __v4si __selectpos =
-    (__v4si) vec_neg ((__v4si) vec_cmpgt ((__v4si) __B, __zero));
-  __v4si __conv = vec_add (__selectneg, __selectpos);
-  return (__m128i) vec_mul ((__v4si) __A, (__v4si) __conv);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_pi8 (__m64 __A, __m64 __B)
-{
-  const __v16qi __zero = { 0 };
-  __v16qi __C = (__v16qi) (__v2du) { __A, __A };
-  __v16qi __D = (__v16qi) (__v2du) { __B, __B };
-  __C = (__v16qi) _mm_sign_epi8 ((__m128i) __C, (__m128i) __D);
-  return (__m64) ((__v2du) (__C))[0];
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_pi16 (__m64 __A, __m64 __B)
-{
-  const __v8hi __zero = { 0 };
-  __v8hi __C = (__v8hi) (__v2du) { __A, __A };
-  __v8hi __D = (__v8hi) (__v2du) { __B, __B };
-  __C = (__v8hi) _mm_sign_epi16 ((__m128i) __C, (__m128i) __D);
-  return (__m64) ((__v2du) (__C))[0];
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_pi32 (__m64 __A, __m64 __B)
-{
-  const __v4si __zero = { 0 };
-  __v4si __C = (__v4si) (__v2du) { __A, __A };
-  __v4si __D = (__v4si) (__v2du) { __B, __B };
-  __C = (__v4si) _mm_sign_epi32 ((__m128i) __C, (__m128i) __D);
-  return (__m64) ((__v2du) (__C))[0];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maddubs_epi16 (__m128i __A, __m128i __B)
-{
-  __v8hi __unsigned = vec_splats ((signed short) 0x00ff);
-  __v8hi __C = vec_and (vec_unpackh ((__v16qi) __A), __unsigned);
-  __v8hi __D = vec_and (vec_unpackl ((__v16qi) __A), __unsigned);
-  __v8hi __E = vec_unpackh ((__v16qi) __B);
-  __v8hi __F = vec_unpackl ((__v16qi) __B);
-  __C = vec_mul (__C, __E);
-  __D = vec_mul (__D, __F);
-  const __v16qu __odds  =
-    {  0,  1,  4,  5,  8,  9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
-  const __v16qu __evens =
-    {  2,  3,  6,  7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
-  __E = vec_perm (__C, __D, __odds);
-  __F = vec_perm (__C, __D, __evens);
-  return (__m128i) vec_adds (__E, __F);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maddubs_pi16 (__m64 __A, __m64 __B)
-{
-  __v8hi __C = (__v8hi) (__v2du) { __A, __A };
-  __C = vec_unpackl ((__v16qi) __C);
-  const __v8hi __unsigned = vec_splats ((signed short) 0x00ff);
-  __C = vec_and (__C, __unsigned);
-  __v8hi __D = (__v8hi) (__v2du) { __B, __B };
-  __D = vec_unpackl ((__v16qi) __D);
-  __D = vec_mul (__C, __D);
-  const __v16qu __odds  =
-    {  0,  1,  4,  5,  8,  9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
-  const __v16qu __evens =
-    {  2,  3,  6,  7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
-  __C = vec_perm (__D, __D, __odds);
-  __D = vec_perm (__D, __D, __evens);
-  __C = vec_adds (__C, __D);
-  return (__m64) ((__v2du) (__C))[0];
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhrs_epi16 (__m128i __A, __m128i __B)
-{
-  __v4si __C = vec_unpackh ((__v8hi) __A);
-  __v4si __D = vec_unpackh ((__v8hi) __B);
-  __C = vec_mul (__C, __D);
-  __D = vec_unpackl ((__v8hi) __A);
-  __v4si __E = vec_unpackl ((__v8hi) __B);
-  __D = vec_mul (__D, __E);
-  const __v4su __shift = vec_splats ((unsigned int) 14);
-  __C = vec_sr (__C, __shift);
-  __D = vec_sr (__D, __shift);
-  const __v4si __ones = vec_splats ((signed int) 1);
-  __C = vec_add (__C, __ones);
-  __C = vec_sr (__C, (__v4su) __ones);
-  __D = vec_add (__D, __ones);
-  __D = vec_sr (__D, (__v4su) __ones);
-  return (__m128i) vec_pack (__C, __D);
-}
-
-extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhrs_pi16 (__m64 __A, __m64 __B)
-{
-  __v4si __C = (__v4si) (__v2du) { __A, __A };
-  __C = vec_unpackh ((__v8hi) __C);
-  __v4si __D = (__v4si) (__v2du) { __B, __B };
-  __D = vec_unpackh ((__v8hi) __D);
-  __C = vec_mul (__C, __D);
-  const __v4su __shift = vec_splats ((unsigned int) 14);
-  __C = vec_sr (__C, __shift);
-  const __v4si __ones = vec_splats ((signed int) 1);
-  __C = vec_add (__C, __ones);
-  __C = vec_sr (__C, (__v4su) __ones);
-  __v8hi __E = vec_pack (__C, __D);
-  return (__m64) ((__v2du) (__E))[0];
-}
-
-#else
-#include_next <tmmintrin.h>
-#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))   \
-        */
-
-#endif /* TMMINTRIN_H_ */
diff --git a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/xmmintrin.h b/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/xmmintrin.h
deleted file mode 100644
index 956603d..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/xmmintrin.h
+++ /dev/null
@@ -1,1844 +0,0 @@
-/*===---- xmmintrin.h - Implementation of SSE intrinsics on PowerPC --------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* Implemented from the specification included in the Intel C++ Compiler
-   User Guide and Reference, version 9.0.  */
-
-#ifndef NO_WARN_X86_INTRINSICS
-/* This header file is to help porting code using Intel intrinsics
-   explicitly from x86_64 to powerpc64/powerpc64le.
-
-   Since X86 SSE intrinsics mainly handles __m128 type, PowerPC
-   VMX/VSX ISA is a good match for vector float SIMD operations.
-   However scalar float operations in vector (XMM) registers require
-   the POWER8 VSX ISA (2.07) level. There are differences for data
-   format and placement of float scalars in the vector register, which
-   require extra steps to match SSE scalar float semantics on POWER.
-
-   It should be noted that there's much difference between X86_64's
-   MXSCR and PowerISA's FPSCR/VSCR registers. It's recommended to use
-   portable <fenv.h> instead of access MXSCR directly.
-
-   Most SSE scalar float intrinsic operations can be performed more
-   efficiently as C language float scalar operations or optimized to
-   use vector SIMD operations. We recommend this for new applications. */
-#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
-#endif
-
-#ifndef _XMMINTRIN_H_INCLUDED
-#define _XMMINTRIN_H_INCLUDED
-
-#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
-
-/* Define four value permute mask */
-#define _MM_SHUFFLE(w,x,y,z) (((w) << 6) | ((x) << 4) | ((y) << 2) | (z))
-
-#include <altivec.h>
-
-/* Avoid collisions between altivec.h and strict adherence to C++ and
-   C11 standards.  This should eventually be done inside altivec.h itself,
-   but only after testing a full distro build.  */
-#if defined(__STRICT_ANSI__) && (defined(__cplusplus) || \
-				 (defined(__STDC_VERSION__) &&	\
-				  __STDC_VERSION__ >= 201112L))
-#undef vector
-#undef pixel
-#undef bool
-#endif
-
-/* We need type definitions from the MMX header file.  */
-#include <mmintrin.h>
-
-/* Get _mm_malloc () and _mm_free ().  */
-#if __STDC_HOSTED__
-#include <mm_malloc.h>
-#endif
-
-/* The Intel API is flexible enough that we must allow aliasing with other
-   vector types, and their scalar components.  */
-typedef vector float __m128 __attribute__((__may_alias__));
-
-/* Unaligned version of the same type.  */
-typedef vector float __m128_u __attribute__((__may_alias__, __aligned__(1)));
-
-/* Internal data types for implementing the intrinsics.  */
-typedef vector float __v4sf;
-
-/* Create an undefined vector.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_undefined_ps (void)
-{
-  __m128 __Y = __Y;
-  return __Y;
-}
-
-/* Create a vector of zeros.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setzero_ps (void)
-{
-  return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
-}
-
-/* Load four SPFP values from P.  The address must be 16-byte aligned.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_ps (float const *__P)
-{
-  return ((__m128)vec_ld(0, (__v4sf*)__P));
-}
-
-/* Load four SPFP values from P.  The address need not be 16-byte aligned.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadu_ps (float const *__P)
-{
-  return (vec_vsx_ld(0, __P));
-}
-
-/* Load four SPFP values in reverse order.  The address must be aligned.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadr_ps (float const *__P)
-{
-  __v4sf   __tmp;
-  __m128 result;
-  static const __vector unsigned char permute_vector =
-    { 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B, 0x14, 0x15, 0x16,
-	0x17, 0x10, 0x11, 0x12, 0x13 };
-
-  __tmp = vec_ld (0, (__v4sf *) __P);
-  result = (__m128) vec_perm (__tmp, __tmp, permute_vector);
-  return result;
-}
-
-/* Create a vector with all four elements equal to F.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_ps (float __F)
-{
-  return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_ps1 (float __F)
-{
-  return _mm_set1_ps (__F);
-}
-
-/* Create the vector [Z Y X W].  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
-{
-  return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
-}
-
-/* Create the vector [W X Y Z].  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_ps (float __Z, float __Y, float __X, float __W)
-{
-  return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
-}
-
-/* Store four SPFP values.  The address must be 16-byte aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_ps (float *__P, __m128 __A)
-{
-  vec_st((__v4sf)__A, 0, (__v4sf*)__P);
-}
-
-/* Store four SPFP values.  The address need not be 16-byte aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeu_ps (float *__P, __m128 __A)
-{
-  *(__m128_u *)__P = __A;
-}
-
-/* Store four SPFP values in reverse order.  The address must be aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storer_ps (float *__P, __m128 __A)
-{
-  __v4sf   __tmp;
-  static const __vector unsigned char permute_vector =
-    { 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B, 0x14, 0x15, 0x16,
-	0x17, 0x10, 0x11, 0x12, 0x13 };
-
-  __tmp = (__m128) vec_perm (__A, __A, permute_vector);
-
-  _mm_store_ps (__P, __tmp);
-}
-
-/* Store the lower SPFP value across four words.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store1_ps (float *__P, __m128 __A)
-{
-  __v4sf __va = vec_splat((__v4sf)__A, 0);
-  _mm_store_ps (__P, __va);
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_ps1 (float *__P, __m128 __A)
-{
-  _mm_store1_ps (__P, __A);
-}
-
-/* Create a vector with element 0 as F and the rest zero.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_ss (float __F)
-{
-  return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
-}
-
-/* Sets the low SPFP value of A from the low value of B.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_move_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-
-  return (vec_sel ((__v4sf)__A, (__v4sf)__B, mask));
-}
-
-/* Create a vector with element 0 as *P and the rest zero.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_ss (float const *__P)
-{
-  return _mm_set_ss (*__P);
-}
-
-/* Stores the lower SPFP value.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_ss (float *__P, __m128 __A)
-{
-  *__P = ((__v4sf)__A)[0];
-}
-
-/* Perform the respective operation on the lower SPFP (single-precision
-   floating-point) values of A and B; the upper three SPFP values are
-   passed through from A.  */
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_ss (__m128 __A, __m128 __B)
-{
-#ifdef _ARCH_PWR7
-  __m128 a, b, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower double)
-     results. So to insure we don't generate spurious exceptions
-     (from the upper double values) we splat the lower double
-     before we to the operation.  */
-  a = vec_splat (__A, 0);
-  b = vec_splat (__B, 0);
-  c = a + b;
-  /* Then we merge the lower float result with the original upper
-     float elements from __A.  */
-  return (vec_sel (__A, c, mask));
-#else
-  __A[0] = __A[0] + __B[0];
-  return (__A);
-#endif
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_ss (__m128 __A, __m128 __B)
-{
-#ifdef _ARCH_PWR7
-  __m128 a, b, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower double)
-     results. So to insure we don't generate spurious exceptions
-     (from the upper double values) we splat the lower double
-     before we to the operation.  */
-  a = vec_splat (__A, 0);
-  b = vec_splat (__B, 0);
-  c = a - b;
-  /* Then we merge the lower float result with the original upper
-     float elements from __A.  */
-  return (vec_sel (__A, c, mask));
-#else
-  __A[0] = __A[0] - __B[0];
-  return (__A);
-#endif
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_ss (__m128 __A, __m128 __B)
-{
-#ifdef _ARCH_PWR7
-  __m128 a, b, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower double)
-     results. So to insure we don't generate spurious exceptions
-     (from the upper double values) we splat the lower double
-     before we to the operation.  */
-  a = vec_splat (__A, 0);
-  b = vec_splat (__B, 0);
-  c = a * b;
-  /* Then we merge the lower float result with the original upper
-     float elements from __A.  */
-  return (vec_sel (__A, c, mask));
-#else
-  __A[0] = __A[0] * __B[0];
-  return (__A);
-#endif
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_ss (__m128 __A, __m128 __B)
-{
-#ifdef _ARCH_PWR7
-  __m128 a, b, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower double)
-     results. So to insure we don't generate spurious exceptions
-     (from the upper double values) we splat the lower double
-     before we to the operation.  */
-  a = vec_splat (__A, 0);
-  b = vec_splat (__B, 0);
-  c = a / b;
-  /* Then we merge the lower float result with the original upper
-     float elements from __A.  */
-  return (vec_sel (__A, c, mask));
-#else
-  __A[0] = __A[0] / __B[0];
-  return (__A);
-#endif
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_ss (__m128 __A)
-{
-  __m128 a, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower double)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper double values) we splat the lower double
-   * before we to the operation. */
-  a = vec_splat (__A, 0);
-  c = vec_sqrt (a);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return (vec_sel (__A, c, mask));
-}
-
-/* Perform the respective operation on the four SPFP values in A and B.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) ((__v4sf)__A + (__v4sf)__B);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) ((__v4sf)__A - (__v4sf)__B);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) ((__v4sf)__A * (__v4sf)__B);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) ((__v4sf)__A / (__v4sf)__B);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_ps (__m128 __A)
-{
-  return (vec_sqrt ((__v4sf)__A));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rcp_ps (__m128 __A)
-{
-  return (vec_re ((__v4sf)__A));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rsqrt_ps (__m128 __A)
-{
-  return (vec_rsqrte (__A));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rcp_ss (__m128 __A)
-{
-  __m128 a, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower double)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper double values) we splat the lower double
-   * before we to the operation. */
-  a = vec_splat (__A, 0);
-  c = _mm_rcp_ps (a);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return (vec_sel (__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rsqrt_ss (__m128 __A)
-{
-  __m128 a, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower double)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper double values) we splat the lower double
-   * before we to the operation. */
-  a = vec_splat (__A, 0);
-  c = vec_rsqrte (a);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return (vec_sel (__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_ss (__m128 __A, __m128 __B)
-{
-  __v4sf a, b, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower float)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper float values) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf)__A, 0);
-  b = vec_splat ((__v4sf)__B, 0);
-  c = vec_min (a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return (vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_ss (__m128 __A, __m128 __B)
-{
-  __v4sf a, b, c;
-  static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
-  /* PowerISA VSX does not allow partial (for just lower float)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper float values) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat (__A, 0);
-  b = vec_splat (__B, 0);
-  c = vec_max (a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return (vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_ps (__m128 __A, __m128 __B)
-{
-  __vector __bool int m = vec_cmpgt ((__v4sf) __B, (__v4sf) __A);
-  return vec_sel (__B, __A, m);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_ps (__m128 __A, __m128 __B)
-{
-  __vector __bool int m = vec_cmpgt ((__v4sf) __A, (__v4sf) __B);
-  return vec_sel (__B, __A, m);
-}
-
-/* Perform logical bit-wise operations on 128-bit values.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_and_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_and ((__v4sf)__A, (__v4sf)__B));
-//  return __builtin_ia32_andps (__A, __B);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_andnot_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_andc ((__v4sf)__B, (__v4sf)__A));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_or_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_or ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_xor_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_xor ((__v4sf)__A, (__v4sf)__B));
-}
-
-/* Perform a comparison on the four SPFP values of A and B.  For each
-   element, if the comparison is true, place a mask of all ones in the
-   result, otherwise a mask of zeros.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmpeq ((__v4sf)__A,(__v4sf) __B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmplt ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmple ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmpgt ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmpge ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline  __m128  __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_ps (__m128  __A, __m128  __B)
-{
-  __v4sf temp = (__v4sf ) vec_cmpeq ((__v4sf) __A, (__v4sf)__B);
-  return ((__m128)vec_nor (temp, temp));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmpge ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmpgt ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmple ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_ps (__m128 __A, __m128 __B)
-{
-  return ((__m128)vec_cmplt ((__v4sf)__A, (__v4sf)__B));
-}
-
-extern __inline  __m128  __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_ps (__m128  __A, __m128  __B)
-{
-  __vector unsigned int a, b;
-  __vector unsigned int c, d;
-  static const __vector unsigned int float_exp_mask =
-    { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
-
-  a = (__vector unsigned int) vec_abs ((__v4sf)__A);
-  b = (__vector unsigned int) vec_abs ((__v4sf)__B);
-  c = (__vector unsigned int) vec_cmpgt (float_exp_mask, a);
-  d = (__vector unsigned int) vec_cmpgt (float_exp_mask, b);
-  return ((__m128 ) vec_and (c, d));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_ps (__m128 __A, __m128 __B)
-{
-  __vector unsigned int a, b;
-  __vector unsigned int c, d;
-  static const __vector unsigned int float_exp_mask =
-    { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
-
-  a = (__vector unsigned int) vec_abs ((__v4sf)__A);
-  b = (__vector unsigned int) vec_abs ((__v4sf)__B);
-  c = (__vector unsigned int) vec_cmpgt (a, float_exp_mask);
-  d = (__vector unsigned int) vec_cmpgt (b, float_exp_mask);
-  return ((__m128 ) vec_or (c, d));
-}
-
-/* Perform a comparison on the lower SPFP values of A and B.  If the
-   comparison is true, place a mask of all ones in the result, otherwise a
-   mask of zeros.  The upper three SPFP values are passed through from A.  */
-extern __inline  __m128  __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_ss (__m128  __A, __m128  __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmpeq(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmplt(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmple(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmpgt(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmpge(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmpeq(a, b);
-  c = vec_nor (c, c);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmpge(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmpgt(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we to the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmple(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_ss (__m128 __A, __m128 __B)
-{
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-  __v4sf a, b, c;
-  /* PowerISA VMX does not allow partial (for just element 0)
-   * results. So to insure we don't generate spurious exceptions
-   * (from the upper elements) we splat the lower float
-   * before we do the operation. */
-  a = vec_splat ((__v4sf) __A, 0);
-  b = vec_splat ((__v4sf) __B, 0);
-  c = (__v4sf) vec_cmplt(a, b);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_ss (__m128 __A, __m128 __B)
-{
-  __vector unsigned int a, b;
-  __vector unsigned int c, d;
-  static const __vector unsigned int float_exp_mask =
-    { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-
-  a = (__vector unsigned int) vec_abs ((__v4sf)__A);
-  b = (__vector unsigned int) vec_abs ((__v4sf)__B);
-  c = (__vector unsigned int) vec_cmpgt (float_exp_mask, a);
-  d = (__vector unsigned int) vec_cmpgt (float_exp_mask, b);
-  c = vec_and (c, d);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, (__v4sf)c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_ss (__m128 __A, __m128 __B)
-{
-  __vector unsigned int a, b;
-  __vector unsigned int c, d;
-  static const __vector unsigned int float_exp_mask =
-    { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
-  static const __vector unsigned int mask =
-    { 0xffffffff, 0, 0, 0 };
-
-  a = (__vector unsigned int) vec_abs ((__v4sf)__A);
-  b = (__vector unsigned int) vec_abs ((__v4sf)__B);
-  c = (__vector unsigned int) vec_cmpgt (a, float_exp_mask);
-  d = (__vector unsigned int) vec_cmpgt (b, float_exp_mask);
-  c = vec_or (c, d);
-  /* Then we merge the lower float result with the original upper
-   * float elements from __A.  */
-  return ((__m128)vec_sel ((__v4sf)__A, (__v4sf)c, mask));
-}
-
-/* Compare the lower SPFP values of A and B and return 1 if true
-   and 0 if false.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comieq_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] == __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comilt_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] < __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comile_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] <= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comigt_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] > __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comige_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] >= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comineq_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] != __B[0]);
-}
-
-/* FIXME
- * The __mm_ucomi??_ss implementations below are exactly the same as
- * __mm_comi??_ss because GCC for PowerPC only generates unordered
- * compares (scalar and vector).
- * Technically __mm_comieq_ss et al should be using the ordered
- * compare and signal for QNaNs.
- * The __mm_ucomieq_sd et all should be OK, as is.
- */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomieq_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] == __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomilt_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] < __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomile_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] <= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomigt_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] > __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomige_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] >= __B[0]);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomineq_ss (__m128 __A, __m128 __B)
-{
-  return (__A[0] != __B[0]);
-}
-
-extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_f32 (__m128 __A)
-{
-  return ((__v4sf)__A)[0];
-}
-
-/* Convert the lower SPFP value to a 32-bit integer according to the current
-   rounding mode.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_si32 (__m128 __A)
-{
-  __m64 res = 0;
-#ifdef _ARCH_PWR8
-  double dtmp;
-  __asm__(
-#ifdef __LITTLE_ENDIAN__
-      "xxsldwi %x0,%x0,%x0,3;\n"
-#endif
-      "xscvspdp %x2,%x0;\n"
-      "fctiw  %2,%2;\n"
-      "mfvsrd  %1,%x2;\n"
-      : "+wa" (__A),
-        "=r" (res),
-        "=f" (dtmp)
-      : );
-#else
-  res = __builtin_rint(__A[0]);
-#endif
-  return (res);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_ss2si (__m128 __A)
-{
-  return _mm_cvtss_si32 (__A);
-}
-
-/* Convert the lower SPFP value to a 32-bit integer according to the
-   current rounding mode.  */
-
-/* Intel intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_si64 (__m128 __A)
-{
-  __m64 res = 0;
-#ifdef _ARCH_PWR8
-  double dtmp;
-  __asm__(
-#ifdef __LITTLE_ENDIAN__
-      "xxsldwi %x0,%x0,%x0,3;\n"
-#endif
-      "xscvspdp %x2,%x0;\n"
-      "fctid  %2,%2;\n"
-      "mfvsrd  %1,%x2;\n"
-      : "+wa" (__A),
-        "=r" (res),
-        "=f" (dtmp)
-      : );
-#else
-  res = __builtin_llrint(__A[0]);
-#endif
-  return (res);
-}
-
-/* Microsoft intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_si64x (__m128 __A)
-{
-  return _mm_cvtss_si64 ((__v4sf) __A);
-}
-
-/* Constants for use with _mm_prefetch.  */
-enum _mm_hint
-{
-  /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit.  */
-  _MM_HINT_ET0 = 7,
-  _MM_HINT_ET1 = 6,
-  _MM_HINT_T0 = 3,
-  _MM_HINT_T1 = 2,
-  _MM_HINT_T2 = 1,
-  _MM_HINT_NTA = 0
-};
-
-/* Loads one cache line from address P to a location "closer" to the
-   processor.  The selector I specifies the type of prefetch operation.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_prefetch (const void *__P, enum _mm_hint __I)
-{
-  /* Current PowerPC will ignores the hint parameters.  */
-  __builtin_prefetch (__P);
-}
-
-/* Convert the two lower SPFP values to 32-bit integers according to the
-   current rounding mode.  Return the integers in packed form.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi32 (__m128 __A)
-{
-  /* Splat two lower SPFP values to both halves.  */
-  __v4sf temp, rounded;
-  __vector unsigned long long result;
-
-  /* Splat two lower SPFP values to both halves.  */
-  temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
-  rounded = vec_rint(temp);
-  result = (__vector unsigned long long) vec_cts (rounded, 0);
-
-  return (__m64) ((__vector long long) result)[0];
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_ps2pi (__m128 __A)
-{
-  return _mm_cvtps_pi32 (__A);
-}
-
-/* Truncate the lower SPFP value to a 32-bit integer.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_si32 (__m128 __A)
-{
-  /* Extract the lower float element.  */
-  float temp = __A[0];
-  /* truncate to 32-bit integer and return.  */
-  return temp;
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_ss2si (__m128 __A)
-{
-  return _mm_cvttss_si32 (__A);
-}
-
-/* Intel intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_si64 (__m128 __A)
-{
-  /* Extract the lower float element.  */
-  float temp = __A[0];
-  /* truncate to 32-bit integer and return.  */
-  return temp;
-}
-
-/* Microsoft intrinsic.  */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_si64x (__m128 __A)
-{
-  /* Extract the lower float element.  */
-  float temp = __A[0];
-  /* truncate to 32-bit integer and return.  */
-  return temp;
-}
-
-/* Truncate the two lower SPFP values to 32-bit integers.  Return the
-   integers in packed form.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttps_pi32 (__m128 __A)
-{
-  __v4sf temp;
-  __vector unsigned long long result;
-
-  /* Splat two lower SPFP values to both halves.  */
-  temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
-  result = (__vector unsigned long long) vec_cts (temp, 0);
-
-  return (__m64) ((__vector long long) result)[0];
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_ps2pi (__m128 __A)
-{
-  return _mm_cvttps_pi32 (__A);
-}
-
-/* Convert B to a SPFP value and insert it as element zero in A.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi32_ss (__m128 __A, int __B)
-{
-  float temp = __B;
-  __A[0] = temp;
-
-  return __A;
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_si2ss (__m128 __A, int __B)
-{
-  return _mm_cvtsi32_ss (__A, __B);
-}
-
-/* Convert B to a SPFP value and insert it as element zero in A.  */
-/* Intel intrinsic.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64_ss (__m128 __A, long long __B)
-{
-  float temp = __B;
-  __A[0] = temp;
-
-  return __A;
-}
-
-/* Microsoft intrinsic.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64x_ss (__m128 __A, long long __B)
-{
-  return _mm_cvtsi64_ss (__A, __B);
-}
-
-/* Convert the two 32-bit values in B to SPFP form and insert them
-   as the two lower elements in A.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi32_ps (__m128        __A, __m64        __B)
-{
-  __vector signed int vm1;
-  __vector float vf1;
-
-  vm1 = (__vector signed int) (__vector unsigned long long) {__B, __B};
-  vf1 = (__vector float) vec_ctf (vm1, 0);
-
-  return ((__m128) (__vector unsigned long long)
-    { ((__vector unsigned long long)vf1) [0],
-	((__vector unsigned long long)__A) [1]});
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_pi2ps (__m128 __A, __m64 __B)
-{
-  return _mm_cvtpi32_ps (__A, __B);
-}
-
-/* Convert the four signed 16-bit values in A to SPFP form.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi16_ps (__m64 __A)
-{
-  __vector signed short vs8;
-  __vector signed int vi4;
-  __vector float vf1;
-
-  vs8 = (__vector signed short) (__vector unsigned long long) { __A, __A };
-  vi4 = vec_vupklsh (vs8);
-  vf1 = (__vector float) vec_ctf (vi4, 0);
-
-  return (__m128) vf1;
-}
-
-/* Convert the four unsigned 16-bit values in A to SPFP form.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpu16_ps (__m64 __A)
-{
-  const __vector unsigned short zero =
-    { 0, 0, 0, 0, 0, 0, 0, 0 };
-  __vector unsigned short vs8;
-  __vector unsigned int vi4;
-  __vector float vf1;
-
-  vs8 = (__vector unsigned short) (__vector unsigned long long) { __A, __A };
-  vi4 = (__vector unsigned int) vec_mergel
-#ifdef __LITTLE_ENDIAN__
-                                           (vs8, zero);
-#else
-                                           (zero, vs8);
-#endif
-  vf1 = (__vector float) vec_ctf (vi4, 0);
-
-  return (__m128) vf1;
-}
-
-/* Convert the low four signed 8-bit values in A to SPFP form.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi8_ps (__m64 __A)
-{
-  __vector signed char vc16;
-  __vector signed short vs8;
-  __vector signed int vi4;
-  __vector float vf1;
-
-  vc16 = (__vector signed char) (__vector unsigned long long) { __A, __A };
-  vs8 = vec_vupkhsb (vc16);
-  vi4 = vec_vupkhsh (vs8);
-  vf1 = (__vector float) vec_ctf (vi4, 0);
-
-  return (__m128) vf1;
-}
-
-/* Convert the low four unsigned 8-bit values in A to SPFP form.  */
-extern __inline  __m128  __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-
-_mm_cvtpu8_ps (__m64  __A)
-{
-  const __vector unsigned char zero =
-    { 0, 0, 0, 0, 0, 0, 0, 0 };
-  __vector unsigned char vc16;
-  __vector unsigned short vs8;
-  __vector unsigned int vi4;
-  __vector float vf1;
-
-  vc16 = (__vector unsigned char) (__vector unsigned long long) { __A, __A };
-#ifdef __LITTLE_ENDIAN__
-  vs8 = (__vector unsigned short) vec_mergel (vc16, zero);
-  vi4 = (__vector unsigned int) vec_mergeh (vs8,
-					    (__vector unsigned short) zero);
-#else
-  vs8 = (__vector unsigned short) vec_mergel (zero, vc16);
-  vi4 = (__vector unsigned int) vec_mergeh ((__vector unsigned short) zero,
-                                            vs8);
-#endif
-  vf1 = (__vector float) vec_ctf (vi4, 0);
-
-  return (__m128) vf1;
-}
-
-/* Convert the four signed 32-bit values in A and B to SPFP form.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi32x2_ps (__m64 __A, __m64 __B)
-{
-  __vector signed int vi4;
-  __vector float vf4;
-
-  vi4 = (__vector signed int) (__vector unsigned long long) { __A, __B };
-  vf4 = (__vector float) vec_ctf (vi4, 0);
-  return (__m128) vf4;
-}
-
-/* Convert the four SPFP values in A to four signed 16-bit integers.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi16 (__m128 __A)
-{
-  __v4sf rounded;
-  __vector signed int temp;
-  __vector unsigned long long result;
-
-  rounded = vec_rint(__A);
-  temp = vec_cts (rounded, 0);
-  result = (__vector unsigned long long) vec_pack (temp, temp);
-
-  return (__m64) ((__vector long long) result)[0];
-}
-
-/* Convert the four SPFP values in A to four signed 8-bit integers.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi8 (__m128 __A)
-{
-  __v4sf rounded;
-  __vector signed int tmp_i;
-  static const __vector signed int zero = {0, 0, 0, 0};
-  __vector signed short tmp_s;
-  __vector signed char res_v;
-
-  rounded = vec_rint(__A);
-  tmp_i = vec_cts (rounded, 0);
-  tmp_s = vec_pack (tmp_i, zero);
-  res_v = vec_pack (tmp_s, tmp_s);
-  return (__m64) ((__vector long long) res_v)[0];
-}
-
-/* Selects four specific SPFP values from A and B based on MASK.  */
-extern __inline  __m128  __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-
-_mm_shuffle_ps (__m128  __A, __m128  __B, int const __mask)
-{
-  unsigned long element_selector_10 = __mask & 0x03;
-  unsigned long element_selector_32 = (__mask >> 2) & 0x03;
-  unsigned long element_selector_54 = (__mask >> 4) & 0x03;
-  unsigned long element_selector_76 = (__mask >> 6) & 0x03;
-  static const unsigned int permute_selectors[4] =
-    {
-#ifdef __LITTLE_ENDIAN__
-      0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
-#else
-      0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
-#endif
-    };
-  __vector unsigned int t;
-
-  t[0] = permute_selectors[element_selector_10];
-  t[1] = permute_selectors[element_selector_32];
-  t[2] = permute_selectors[element_selector_54] + 0x10101010;
-  t[3] = permute_selectors[element_selector_76] + 0x10101010;
-  return vec_perm ((__v4sf) __A, (__v4sf)__B, (__vector unsigned char)t);
-}
-
-/* Selects and interleaves the upper two SPFP values from A and B.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) vec_vmrglw ((__v4sf) __A, (__v4sf)__B);
-}
-
-/* Selects and interleaves the lower two SPFP values from A and B.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) vec_vmrghw ((__v4sf) __A, (__v4sf)__B);
-}
-
-/* Sets the upper two SPFP values with 64-bits of data loaded from P;
-   the lower two values are passed through from A.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadh_pi (__m128 __A, __m64 const *__P)
-{
-  __vector unsigned long long __a = (__vector unsigned long long)__A;
-  __vector unsigned long long __p = vec_splats(*__P);
-  __a [1] = __p [1];
-
-  return (__m128)__a;
-}
-
-/* Stores the upper two SPFP values of A into P.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeh_pi (__m64 *__P, __m128 __A)
-{
-  __vector unsigned long long __a = (__vector unsigned long long) __A;
-
-  *__P = __a[1];
-}
-
-/* Moves the upper two values of B into the lower two values of A.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movehl_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) vec_mergel ((__vector unsigned long long)__B,
-			      (__vector unsigned long long)__A);
-}
-
-/* Moves the lower two values of B into the upper two values of A.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movelh_ps (__m128 __A, __m128 __B)
-{
-  return (__m128) vec_mergeh ((__vector unsigned long long)__A,
-			      (__vector unsigned long long)__B);
-}
-
-/* Sets the lower two SPFP values with 64-bits of data loaded from P;
-   the upper two values are passed through from A.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadl_pi (__m128 __A, __m64 const *__P)
-{
-  __vector unsigned long long __a = (__vector unsigned long long)__A;
-  __vector unsigned long long __p = vec_splats(*__P);
-  __a [0] = __p [0];
-
-  return (__m128)__a;
-}
-
-/* Stores the lower two SPFP values of A into P.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storel_pi (__m64 *__P, __m128 __A)
-{
-  __vector unsigned long long __a = (__vector unsigned long long) __A;
-
-  *__P = __a[0];
-}
-
-#ifdef _ARCH_PWR8
-/* Intrinsic functions that require PowerISA 2.07 minimum.  */
-
-/* Creates a 4-bit mask from the most significant bits of the SPFP values.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_ps (__m128  __A)
-{
-  __vector unsigned long long result;
-  static const __vector unsigned int perm_mask =
-    {
-#ifdef __LITTLE_ENDIAN__
-	0x00204060, 0x80808080, 0x80808080, 0x80808080
-#else
-      0x80808080, 0x80808080, 0x80808080, 0x00204060
-#endif
-    };
-
-  result = ((__vector unsigned long long)
-	    vec_vbpermq ((__vector unsigned char) __A,
-			 (__vector unsigned char) perm_mask));
-
-#ifdef __LITTLE_ENDIAN__
-  return result[1];
-#else
-  return result[0];
-#endif
-}
-#endif /* _ARCH_PWR8 */
-
-/* Create a vector with all four elements equal to *P.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load1_ps (float const *__P)
-{
-  return _mm_set1_ps (*__P);
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_ps1 (float const *__P)
-{
-  return _mm_load1_ps (__P);
-}
-
-/* Extracts one of the four words of A.  The selector N must be immediate.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_extract_pi16 (__m64 const __A, int const __N)
-{
-  unsigned int shiftr = __N & 3;
-#ifdef __BIG_ENDIAN__
-  shiftr = 3 - shiftr;
-#endif
-
-  return ((__A >> (shiftr * 16)) & 0xffff);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pextrw (__m64 const __A, int const __N)
-{
-  return _mm_extract_pi16 (__A, __N);
-}
-
-/* Inserts word D into one of four words of A.  The selector N must be
-   immediate.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
-{
-  const int shiftl = (__N & 3) * 16;
-  const __m64 shiftD = (const __m64) __D << shiftl;
-  const __m64 mask = 0xffffUL << shiftl;
-  __m64 result = (__A & (~mask)) | (shiftD & mask);
-
-  return (result);
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pinsrw (__m64 const __A, int const __D, int const __N)
-{
-  return _mm_insert_pi16 (__A, __D, __N);
-}
-
-/* Compute the element-wise maximum of signed 16-bit values.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-
-_mm_max_pi16 (__m64 __A, __m64 __B)
-{
-#if _ARCH_PWR8
-  __vector signed short a, b, r;
-  __vector __bool short c;
-
-  a = (__vector signed short)vec_splats (__A);
-  b = (__vector signed short)vec_splats (__B);
-  c = (__vector __bool short)vec_cmpgt (a, b);
-  r = vec_sel (b, a, c);
-  return (__m64) ((__vector long long) r)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __A;
-  m2.as_m64 = __B;
-
-  res.as_short[0] =
-      (m1.as_short[0] > m2.as_short[0]) ? m1.as_short[0] : m2.as_short[0];
-  res.as_short[1] =
-      (m1.as_short[1] > m2.as_short[1]) ? m1.as_short[1] : m2.as_short[1];
-  res.as_short[2] =
-      (m1.as_short[2] > m2.as_short[2]) ? m1.as_short[2] : m2.as_short[2];
-  res.as_short[3] =
-      (m1.as_short[3] > m2.as_short[3]) ? m1.as_short[3] : m2.as_short[3];
-
-  return (__m64) res.as_m64;
-#endif
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmaxsw (__m64 __A, __m64 __B)
-{
-  return _mm_max_pi16 (__A, __B);
-}
-
-/* Compute the element-wise maximum of unsigned 8-bit values.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_pu8 (__m64 __A, __m64 __B)
-{
-#if _ARCH_PWR8
-  __vector unsigned char a, b, r;
-  __vector __bool char c;
-
-  a = (__vector unsigned char)vec_splats (__A);
-  b = (__vector unsigned char)vec_splats (__B);
-  c = (__vector __bool char)vec_cmpgt (a, b);
-  r = vec_sel (b, a, c);
-  return (__m64) ((__vector long long) r)[0];
-#else
-  __m64_union m1, m2, res;
-  long i;
-
-  m1.as_m64 = __A;
-  m2.as_m64 = __B;
-
-
-  for (i = 0; i < 8; i++)
-  res.as_char[i] =
-      ((unsigned char) m1.as_char[i] > (unsigned char) m2.as_char[i]) ?
-	  m1.as_char[i] : m2.as_char[i];
-
-  return (__m64) res.as_m64;
-#endif
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmaxub (__m64 __A, __m64 __B)
-{
-  return _mm_max_pu8 (__A, __B);
-}
-
-/* Compute the element-wise minimum of signed 16-bit values.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_pi16 (__m64 __A, __m64 __B)
-{
-#if _ARCH_PWR8
-  __vector signed short a, b, r;
-  __vector __bool short c;
-
-  a = (__vector signed short)vec_splats (__A);
-  b = (__vector signed short)vec_splats (__B);
-  c = (__vector __bool short)vec_cmplt (a, b);
-  r = vec_sel (b, a, c);
-  return (__m64) ((__vector long long) r)[0];
-#else
-  __m64_union m1, m2, res;
-
-  m1.as_m64 = __A;
-  m2.as_m64 = __B;
-
-  res.as_short[0] =
-      (m1.as_short[0] < m2.as_short[0]) ? m1.as_short[0] : m2.as_short[0];
-  res.as_short[1] =
-      (m1.as_short[1] < m2.as_short[1]) ? m1.as_short[1] : m2.as_short[1];
-  res.as_short[2] =
-      (m1.as_short[2] < m2.as_short[2]) ? m1.as_short[2] : m2.as_short[2];
-  res.as_short[3] =
-      (m1.as_short[3] < m2.as_short[3]) ? m1.as_short[3] : m2.as_short[3];
-
-  return (__m64) res.as_m64;
-#endif
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pminsw (__m64 __A, __m64 __B)
-{
-  return _mm_min_pi16 (__A, __B);
-}
-
-/* Compute the element-wise minimum of unsigned 8-bit values.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_pu8 (__m64 __A, __m64 __B)
-{
-#if _ARCH_PWR8
-  __vector unsigned char a, b, r;
-  __vector __bool char c;
-
-  a = (__vector unsigned char)vec_splats (__A);
-  b = (__vector unsigned char)vec_splats (__B);
-  c = (__vector __bool char)vec_cmplt (a, b);
-  r = vec_sel (b, a, c);
-  return (__m64) ((__vector long long) r)[0];
-#else
-  __m64_union m1, m2, res;
-  long i;
-
-  m1.as_m64 = __A;
-  m2.as_m64 = __B;
-
-
-  for (i = 0; i < 8; i++)
-  res.as_char[i] =
-      ((unsigned char) m1.as_char[i] < (unsigned char) m2.as_char[i]) ?
-	  m1.as_char[i] : m2.as_char[i];
-
-  return (__m64) res.as_m64;
-#endif
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pminub (__m64 __A, __m64 __B)
-{
-  return _mm_min_pu8 (__A, __B);
-}
-
-/* Create an 8-bit mask of the signs of 8-bit values.  */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_pi8 (__m64 __A)
-{
-  unsigned long long p =
-#ifdef __LITTLE_ENDIAN__
-                         0x0008101820283038UL; // permute control for sign bits
-#else
-                         0x3830282018100800UL; // permute control for sign bits
-#endif
-  return __builtin_bpermd (p, __A);
-}
-
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmovmskb (__m64 __A)
-{
-  return _mm_movemask_pi8 (__A);
-}
-
-/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
-   in B and produce the high 16 bits of the 32-bit results.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhi_pu16 (__m64 __A, __m64 __B)
-{
-  __vector unsigned short a, b;
-  __vector unsigned short c;
-  __vector unsigned int w0, w1;
-  __vector unsigned char xform1 = {
-#ifdef __LITTLE_ENDIAN__
-      0x02, 0x03, 0x12, 0x13,  0x06, 0x07, 0x16, 0x17,
-      0x0A, 0x0B, 0x1A, 0x1B,  0x0E, 0x0F, 0x1E, 0x1F
-#else
-      0x00, 0x01, 0x10, 0x11,  0x04, 0x05, 0x14, 0x15,
-      0x00, 0x01, 0x10, 0x11,  0x04, 0x05, 0x14, 0x15
-#endif
-    };
-
-  a = (__vector unsigned short)vec_splats (__A);
-  b = (__vector unsigned short)vec_splats (__B);
-
-  w0 = vec_vmuleuh (a, b);
-  w1 = vec_vmulouh (a, b);
-  c = (__vector unsigned short)vec_perm (w0, w1, xform1);
-
-  return (__m64) ((__vector long long) c)[0];
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmulhuw (__m64 __A, __m64 __B)
-{
-  return _mm_mulhi_pu16 (__A, __B);
-}
-
-/* Return a combination of the four 16-bit values in A.  The selector
-   must be an immediate.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_pi16 (__m64 __A, int const __N)
-{
-  unsigned long element_selector_10 = __N & 0x03;
-  unsigned long element_selector_32 = (__N >> 2) & 0x03;
-  unsigned long element_selector_54 = (__N >> 4) & 0x03;
-  unsigned long element_selector_76 = (__N >> 6) & 0x03;
-  static const unsigned short permute_selectors[4] =
-    {
-#ifdef __LITTLE_ENDIAN__
-	      0x0908, 0x0B0A, 0x0D0C, 0x0F0E
-#else
-	      0x0607, 0x0405, 0x0203, 0x0001
-#endif
-    };
-  __m64_union t;
-  __vector unsigned long long a, p, r;
-
-#ifdef __LITTLE_ENDIAN__
-  t.as_short[0] = permute_selectors[element_selector_10];
-  t.as_short[1] = permute_selectors[element_selector_32];
-  t.as_short[2] = permute_selectors[element_selector_54];
-  t.as_short[3] = permute_selectors[element_selector_76];
-#else
-  t.as_short[3] = permute_selectors[element_selector_10];
-  t.as_short[2] = permute_selectors[element_selector_32];
-  t.as_short[1] = permute_selectors[element_selector_54];
-  t.as_short[0] = permute_selectors[element_selector_76];
-#endif
-  p = vec_splats (t.as_m64);
-  a = vec_splats (__A);
-  r = vec_perm (a, a, (__vector unsigned char)p);
-  return (__m64) ((__vector long long) r)[0];
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pshufw (__m64 __A, int const __N)
-{
-  return _mm_shuffle_pi16 (__A, __N);
-}
-
-/* Conditionally store byte elements of A into P.  The high bit of each
-   byte in the selector N determines whether the corresponding byte from
-   A is stored.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
-{
-  __m64 hibit = 0x8080808080808080UL;
-  __m64 mask, tmp;
-  __m64 *p = (__m64*)__P;
-
-  tmp = *p;
-  mask = _mm_cmpeq_pi8 ((__N & hibit), hibit);
-  tmp = (tmp & (~mask)) | (__A & mask);
-  *p = tmp;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_maskmovq (__m64 __A, __m64 __N, char *__P)
-{
-  _mm_maskmove_si64 (__A, __N, __P);
-}
-
-/* Compute the rounded averages of the unsigned 8-bit values in A and B.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_pu8 (__m64 __A, __m64 __B)
-{
-  __vector unsigned char a, b, c;
-
-  a = (__vector unsigned char)vec_splats (__A);
-  b = (__vector unsigned char)vec_splats (__B);
-  c = vec_avg (a, b);
-  return (__m64) ((__vector long long) c)[0];
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pavgb (__m64 __A, __m64 __B)
-{
-  return _mm_avg_pu8 (__A, __B);
-}
-
-/* Compute the rounded averages of the unsigned 16-bit values in A and B.  */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_pu16 (__m64 __A, __m64 __B)
-{
-  __vector unsigned short a, b, c;
-
-  a = (__vector unsigned short)vec_splats (__A);
-  b = (__vector unsigned short)vec_splats (__B);
-  c = vec_avg (a, b);
-  return (__m64) ((__vector long long) c)[0];
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pavgw (__m64 __A, __m64 __B)
-{
-  return _mm_avg_pu16 (__A, __B);
-}
-
-/* Compute the sum of the absolute differences of the unsigned 8-bit
-   values in A and B.  Return the value in the lower 16-bit word; the
-   upper words are cleared.  */
-extern __inline    __m64    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sad_pu8 (__m64  __A, __m64  __B)
-{
-  __vector unsigned char a, b;
-  __vector unsigned char vmin, vmax, vabsdiff;
-  __vector signed int vsum;
-  const __vector unsigned int zero =
-    { 0, 0, 0, 0 };
-  __m64_union result = {0};
-
-  a = (__vector unsigned char) (__vector unsigned long long) { 0UL, __A };
-  b = (__vector unsigned char) (__vector unsigned long long) { 0UL, __B };
-  vmin = vec_min (a, b);
-  vmax = vec_max (a, b);
-  vabsdiff = vec_sub (vmax, vmin);
-  /* Sum four groups of bytes into integers.  */
-  vsum = (__vector signed int) vec_sum4s (vabsdiff, zero);
-  /* Sum across four integers with integer result.  */
-  vsum = vec_sums (vsum, (__vector signed int) zero);
-  /* The sum is in the right most 32-bits of the vector result.
-     Transfer to a GPR and truncate to 16 bits.  */
-  result.as_short[0] = vsum[3];
-  return result.as_m64;
-}
-
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_psadbw (__m64 __A, __m64 __B)
-{
-  return _mm_sad_pu8 (__A, __B);
-}
-
-/* Stores the data in A to the address P without polluting the caches.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_pi (__m64 *__P, __m64 __A)
-{
-  /* Use the data cache block touch for store transient.  */
-  __asm__ (
-    "	dcbtstt	0,%0"
-    :
-    : "b" (__P)
-    : "memory"
-  );
-  *__P = __A;
-}
-
-/* Likewise.  The address must be 16-byte aligned.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_ps (float *__P, __m128 __A)
-{
-  /* Use the data cache block touch for store transient.  */
-  __asm__ (
-    "	dcbtstt	0,%0"
-    :
-    : "b" (__P)
-    : "memory"
-  );
-  _mm_store_ps (__P, __A);
-}
-
-/* Guarantees that every preceding store is globally visible before
-   any subsequent store.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sfence (void)
-{
-  /* Generate a light weight sync.  */
-  __atomic_thread_fence (__ATOMIC_RELEASE);
-}
-
-/* The execution of the next instruction is delayed by an implementation
-   specific amount of time.  The instruction does not modify the
-   architectural state.  This is after the pop_options pragma because
-   it does not require SSE support in the processor--the encoding is a
-   nop on processors that do not support it.  */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_pause (void)
-{
-  /* There is no exact match with this construct, but the following is
-     close to the desired effect.  */
-#if _ARCH_PWR8
-  /* On power8 and later processors we can depend on Program Priority
-     (PRI) and associated "very low" PPI setting.  Since we don't know
-     what PPI this thread is running at we: 1) save the current PRI
-     from the PPR SPR into a local GRP, 2) set the PRI to "very low*
-     via the special or 31,31,31 encoding. 3) issue an "isync" to
-     insure the PRI change takes effect before we execute any more
-     instructions.
-     Now we can execute a lwsync (release barrier) while we execute
-     this thread at "very low" PRI.  Finally we restore the original
-     PRI and continue execution.  */
-  unsigned long __PPR;
-
-  __asm__ volatile (
-    "	mfppr	%0;"
-    "   or 31,31,31;"
-    "   isync;"
-    "   lwsync;"
-    "   isync;"
-    "   mtppr	%0;"
-    : "=r" (__PPR)
-    :
-    : "memory"
-  );
-#else
-  /* For older processor where we may not even have Program Priority
-     controls we can only depend on Heavy Weight Sync.  */
-  __atomic_thread_fence (__ATOMIC_SEQ_CST);
-#endif
-}
-
-/* Transpose the 4x4 matrix composed of row[0-3].  */
-#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3)			\
-do {									\
-  __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3);	\
-  __v4sf __t0 = vec_vmrghw (__r0, __r1);			\
-  __v4sf __t1 = vec_vmrghw (__r2, __r3);			\
-  __v4sf __t2 = vec_vmrglw (__r0, __r1);			\
-  __v4sf __t3 = vec_vmrglw (__r2, __r3);			\
-  (row0) = (__v4sf)vec_mergeh ((__vector long long)__t0, 	\
-			       (__vector long long)__t1);	\
-  (row1) = (__v4sf)vec_mergel ((__vector long long)__t0,	\
-			       (__vector long long)__t1);	\
-  (row2) = (__v4sf)vec_mergeh ((__vector long long)__t2,	\
-			       (__vector long long)__t3);	\
-  (row3) = (__v4sf)vec_mergel ((__vector long long)__t2,	\
-			       (__vector long long)__t3);	\
-} while (0)
-
-/* For backward source compatibility.  */
-//# include <emmintrin.h>
-
-#else
-#include_next <xmmintrin.h>
-#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))   \
-        */
-
-#endif /* _XMMINTRIN_H_INCLUDED */
diff --git a/linux-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc b/linux-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc
deleted file mode 100644
index 008b8dd..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc
+++ /dev/null
@@ -1,896 +0,0 @@
-/*===-- InstrProfData.inc - instr profiling runtime structures -*- C++ -*-=== *\
-|*
-|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-|* See https://llvm.org/LICENSE.txt for license information.
-|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-|*
-\*===----------------------------------------------------------------------===*/
-/*
- * This is the main file that defines all the data structure, signature,
- * constant literals that are shared across profiling runtime library,
- * compiler (instrumentation), and host tools (reader/writer). The entities
- * defined in this file affect the profile runtime ABI, the raw profile format,
- * or both.
- *
- * The file has two identical copies. The primary copy lives in LLVM and
- * the other one  sits in compiler-rt/lib/profile directory. To make changes
- * in this file, first modify the primary copy and copy it over to compiler-rt.
- * Testing of any change in this file can start only after the two copies are
- * synced up.
- *
- * The first part of the file includes macros that defines types, names, and
- * initializers for the member fields of the core data structures. The field
- * declarations for one structure is enabled by defining the field activation
- * macro associated with that structure. Only one field activation record
- * can be defined at one time and the rest definitions will be filtered out by
- * the preprocessor.
- *
- * Examples of how the template is used to instantiate structure definition:
- * 1. To declare a structure:
- *
- * struct ProfData {
- * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
- *    Type Name;
- * #include "llvm/ProfileData/InstrProfData.inc"
- * };
- *
- * 2. To construct LLVM type arrays for the struct type:
- *
- * Type *DataTypes[] = {
- * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
- *   LLVMType,
- * #include "llvm/ProfileData/InstrProfData.inc"
- * };
- *
- * 4. To construct constant array for the initializers:
- * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
- *   Initializer,
- * Constant *ConstantVals[] = {
- * #include "llvm/ProfileData/InstrProfData.inc"
- * };
- *
- *
- * The second part of the file includes definitions all other entities that
- * are related to runtime ABI and format. When no field activation macro is
- * defined, this file can be included to introduce the definitions.
- *
-\*===----------------------------------------------------------------------===*/
-
-/* Functions marked with INSTR_PROF_VISIBILITY must have hidden visibility in
- * the compiler runtime. */
-#ifndef INSTR_PROF_VISIBILITY
-#define INSTR_PROF_VISIBILITY
-#endif
-
-/* INSTR_PROF_DATA start. */
-/* Definition of member fields of the per-function control structure. */
-#ifndef INSTR_PROF_DATA
-#define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
-                ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
-                IndexedInstrProf::ComputeHash(getPGOFuncNameVarInitializer(Inc->getName()))))
-INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
-                ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
-                Inc->getHash()->getZExtValue()))
-INSTR_PROF_DATA(const IntPtrT, IntPtrTy, CounterPtr, RelativeCounterPtr)
-/* This is used to map function pointers for the indirect call targets to
- * function name hashes during the conversion from raw to merged profile
- * data.
- */
-INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), FunctionPointer, \
-                FunctionAddr)
-INSTR_PROF_DATA(IntPtrT, llvm::Type::getInt8PtrTy(Ctx), Values, \
-                ValuesPtrExpr)
-INSTR_PROF_DATA(const uint32_t, llvm::Type::getInt32Ty(Ctx), NumCounters, \
-                ConstantInt::get(llvm::Type::getInt32Ty(Ctx), NumCounters))
-INSTR_PROF_DATA(const uint16_t, Int16ArrayTy, NumValueSites[IPVK_Last+1], \
-                ConstantArray::get(Int16ArrayTy, Int16ArrayVals))
-#undef INSTR_PROF_DATA
-/* INSTR_PROF_DATA end. */
-
-
-/* This is an internal data structure used by value profiler. It
- * is defined here to allow serialization code sharing by LLVM
- * to be used in unit test.
- *
- * typedef struct ValueProfNode {
- *   // InstrProfValueData VData;
- *   uint64_t Value;
- *   uint64_t Count;
- *   struct ValueProfNode *Next;
- * } ValueProfNode;
- */
-/* INSTR_PROF_VALUE_NODE start. */
-#ifndef INSTR_PROF_VALUE_NODE
-#define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Value, \
-                      ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
-INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Count, \
-                      ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
-INSTR_PROF_VALUE_NODE(PtrToNodeT, llvm::Type::getInt8PtrTy(Ctx), Next, \
-                      ConstantInt::get(llvm::Type::GetInt8PtrTy(Ctx), 0))
-#undef INSTR_PROF_VALUE_NODE
-/* INSTR_PROF_VALUE_NODE end. */
-
-/* INSTR_PROF_RAW_HEADER  start */
-/* Definition of member fields of the raw profile header data structure. */
-#ifndef INSTR_PROF_RAW_HEADER
-#define INSTR_PROF_RAW_HEADER(Type, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
-INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
-INSTR_PROF_RAW_HEADER(uint64_t, BinaryIdsSize, __llvm_write_binary_ids(NULL))
-INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
-INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesBeforeCounters, PaddingBytesBeforeCounters)
-INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
-INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesAfterCounters, PaddingBytesAfterCounters)
-INSTR_PROF_RAW_HEADER(uint64_t, NamesSize,  NamesSize)
-INSTR_PROF_RAW_HEADER(uint64_t, CountersDelta,
-                      (uintptr_t)CountersBegin - (uintptr_t)DataBegin)
-INSTR_PROF_RAW_HEADER(uint64_t, NamesDelta, (uintptr_t)NamesBegin)
-INSTR_PROF_RAW_HEADER(uint64_t, ValueKindLast, IPVK_Last)
-#undef INSTR_PROF_RAW_HEADER
-/* INSTR_PROF_RAW_HEADER  end */
-
-/* VALUE_PROF_FUNC_PARAM start */
-/* Definition of parameter types of the runtime API used to do value profiling
- * for a given value site.
- */
-#ifndef VALUE_PROF_FUNC_PARAM
-#define VALUE_PROF_FUNC_PARAM(ArgType, ArgName, ArgLLVMType)
-#define INSTR_PROF_COMMA
-#else
-#define INSTR_PROF_DATA_DEFINED
-#define INSTR_PROF_COMMA ,
-#endif
-VALUE_PROF_FUNC_PARAM(uint64_t, TargetValue, Type::getInt64Ty(Ctx)) \
-                      INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(void *, Data, Type::getInt8PtrTy(Ctx)) INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
-#undef VALUE_PROF_FUNC_PARAM
-#undef INSTR_PROF_COMMA
-/* VALUE_PROF_FUNC_PARAM end */
-
-/* VALUE_PROF_KIND start */
-#ifndef VALUE_PROF_KIND
-#define VALUE_PROF_KIND(Enumerator, Value, Descr)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-/* For indirect function call value profiling, the addresses of the target
- * functions are profiled by the instrumented code. The target addresses are
- * written in the raw profile data and converted to target function name's MD5
- * hash by the profile reader during deserialization.  Typically, this happens
- * when the raw profile data is read during profile merging.
- *
- * For this remapping the ProfData is used.  ProfData contains both the function
- * name hash and the function address.
- */
-VALUE_PROF_KIND(IPVK_IndirectCallTarget, 0, "indirect call target")
-/* For memory intrinsic functions size profiling. */
-VALUE_PROF_KIND(IPVK_MemOPSize, 1, "memory intrinsic functions size")
-/* These two kinds must be the last to be
- * declared. This is to make sure the string
- * array created with the template can be
- * indexed with the kind value.
- */
-VALUE_PROF_KIND(IPVK_First, IPVK_IndirectCallTarget, "first")
-VALUE_PROF_KIND(IPVK_Last, IPVK_MemOPSize, "last")
-
-#undef VALUE_PROF_KIND
-/* VALUE_PROF_KIND end */
-
-#undef COVMAP_V2_OR_V3
-#ifdef COVMAP_V2
-#define COVMAP_V2_OR_V3
-#endif
-#ifdef COVMAP_V3
-#define COVMAP_V2_OR_V3
-#endif
-
-/* COVMAP_FUNC_RECORD start */
-/* Definition of member fields of the function record structure in coverage
- * map.
- */
-#ifndef COVMAP_FUNC_RECORD
-#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-#ifdef COVMAP_V1
-COVMAP_FUNC_RECORD(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), \
-                   NamePtr, llvm::ConstantExpr::getBitCast(NamePtr, \
-                   llvm::Type::getInt8PtrTy(Ctx)))
-COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
-                   llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), \
-                   NameValue.size()))
-#endif
-#ifdef COVMAP_V2_OR_V3
-COVMAP_FUNC_RECORD(const int64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
-                   llvm::ConstantInt::get( \
-                     llvm::Type::getInt64Ty(Ctx), NameHash))
-#endif
-COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \
-                   llvm::ConstantInt::get( \
-                     llvm::Type::getInt32Ty(Ctx), CoverageMapping.size()))
-COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
-                   llvm::ConstantInt::get( \
-                     llvm::Type::getInt64Ty(Ctx), FuncHash))
-#ifdef COVMAP_V3
-COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FilenamesRef, \
-                   llvm::ConstantInt::get( \
-                     llvm::Type::getInt64Ty(Ctx), FilenamesRef))
-COVMAP_FUNC_RECORD(const char, \
-                   llvm::ArrayType::get(llvm::Type::getInt8Ty(Ctx), \
-                                        CoverageMapping.size()), \
-                   CoverageMapping,
-                   llvm::ConstantDataArray::getRaw( \
-                     CoverageMapping, CoverageMapping.size(), \
-                     llvm::Type::getInt8Ty(Ctx)))
-#endif
-#undef COVMAP_FUNC_RECORD
-/* COVMAP_FUNC_RECORD end.  */
-
-/* COVMAP_HEADER start */
-/* Definition of member fields of coverage map header.
- */
-#ifndef COVMAP_HEADER
-#define COVMAP_HEADER(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-COVMAP_HEADER(uint32_t, Int32Ty, NRecords, \
-              llvm::ConstantInt::get(Int32Ty, NRecords))
-COVMAP_HEADER(uint32_t, Int32Ty, FilenamesSize, \
-              llvm::ConstantInt::get(Int32Ty, FilenamesSize))
-COVMAP_HEADER(uint32_t, Int32Ty, CoverageSize, \
-              llvm::ConstantInt::get(Int32Ty, CoverageMappingSize))
-COVMAP_HEADER(uint32_t, Int32Ty, Version, \
-              llvm::ConstantInt::get(Int32Ty, CovMapVersion::CurrentVersion))
-#undef COVMAP_HEADER
-/* COVMAP_HEADER end.  */
-
-
-#ifdef INSTR_PROF_SECT_ENTRY
-#define INSTR_PROF_DATA_DEFINED
-INSTR_PROF_SECT_ENTRY(IPSK_data, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON), \
-                      INSTR_PROF_DATA_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_cnts, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON), \
-                      INSTR_PROF_CNTS_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_name, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON), \
-                      INSTR_PROF_NAME_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_vals, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON), \
-                      INSTR_PROF_VALS_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_vnodes, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON), \
-                      INSTR_PROF_VNODES_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_covmap, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON), \
-                      INSTR_PROF_COVMAP_COFF, "__LLVM_COV,")
-INSTR_PROF_SECT_ENTRY(IPSK_covfun, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON), \
-                      INSTR_PROF_COVFUN_COFF, "__LLVM_COV,")
-INSTR_PROF_SECT_ENTRY(IPSK_orderfile, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON), \
-                      INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COFF), "__DATA,")
-
-#undef INSTR_PROF_SECT_ENTRY
-#endif
-
-
-#ifdef INSTR_PROF_VALUE_PROF_DATA
-#define INSTR_PROF_DATA_DEFINED
-
-#define INSTR_PROF_MAX_NUM_VAL_PER_SITE 255
-/*!
- * This is the header of the data structure that defines the on-disk
- * layout of the value profile data of a particular kind for one function.
- */
-typedef struct ValueProfRecord {
-  /* The kind of the value profile record. */
-  uint32_t Kind;
-  /*
-   * The number of value profile sites. It is guaranteed to be non-zero;
-   * otherwise the record for this kind won't be emitted.
-   */
-  uint32_t NumValueSites;
-  /*
-   * The first element of the array that stores the number of profiled
-   * values for each value site. The size of the array is NumValueSites.
-   * Since NumValueSites is greater than zero, there is at least one
-   * element in the array.
-   */
-  uint8_t SiteCountArray[1];
-
-  /*
-   * The fake declaration is for documentation purpose only.
-   * Align the start of next field to be on 8 byte boundaries.
-  uint8_t Padding[X];
-   */
-
-  /* The array of value profile data. The size of the array is the sum
-   * of all elements in SiteCountArray[].
-  InstrProfValueData ValueData[];
-   */
-
-#ifdef __cplusplus
-  /*!
-   * Return the number of value sites.
-   */
-  uint32_t getNumValueSites() const { return NumValueSites; }
-  /*!
-   * Read data from this record and save it to Record.
-   */
-  void deserializeTo(InstrProfRecord &Record,
-                     InstrProfSymtab *SymTab);
-  /*
-   * In-place byte swap:
-   * Do byte swap for this instance. \c Old is the original order before
-   * the swap, and \c New is the New byte order.
-   */
-  void swapBytes(support::endianness Old, support::endianness New);
-#endif
-} ValueProfRecord;
-
-/*!
- * Per-function header/control data structure for value profiling
- * data in indexed format.
- */
-typedef struct ValueProfData {
-  /*
-   * Total size in bytes including this field. It must be a multiple
-   * of sizeof(uint64_t).
-   */
-  uint32_t TotalSize;
-  /*
-   *The number of value profile kinds that has value profile data.
-   * In this implementation, a value profile kind is considered to
-   * have profile data if the number of value profile sites for the
-   * kind is not zero. More aggressively, the implementation can
-   * choose to check the actual data value: if none of the value sites
-   * has any profiled values, the kind can be skipped.
-   */
-  uint32_t NumValueKinds;
-
-  /*
-   * Following are a sequence of variable length records. The prefix/header
-   * of each record is defined by ValueProfRecord type. The number of
-   * records is NumValueKinds.
-   * ValueProfRecord Record_1;
-   * ValueProfRecord Record_N;
-   */
-
-#if __cplusplus
-  /*!
-   * Return the total size in bytes of the on-disk value profile data
-   * given the data stored in Record.
-   */
-  static uint32_t getSize(const InstrProfRecord &Record);
-  /*!
-   * Return a pointer to \c ValueProfData instance ready to be streamed.
-   */
-  static std::unique_ptr<ValueProfData>
-  serializeFrom(const InstrProfRecord &Record);
-  /*!
-   * Check the integrity of the record.
-   */
-  Error checkIntegrity();
-  /*!
-   * Return a pointer to \c ValueProfileData instance ready to be read.
-   * All data in the instance are properly byte swapped. The input
-   * data is assumed to be in little endian order.
-   */
-  static Expected<std::unique_ptr<ValueProfData>>
-  getValueProfData(const unsigned char *SrcBuffer,
-                   const unsigned char *const SrcBufferEnd,
-                   support::endianness SrcDataEndianness);
-  /*!
-   * Swap byte order from \c Endianness order to host byte order.
-   */
-  void swapBytesToHost(support::endianness Endianness);
-  /*!
-   * Swap byte order from host byte order to \c Endianness order.
-   */
-  void swapBytesFromHost(support::endianness Endianness);
-  /*!
-   * Return the total size of \c ValueProfileData.
-   */
-  uint32_t getSize() const { return TotalSize; }
-  /*!
-   * Read data from this data and save it to \c Record.
-   */
-  void deserializeTo(InstrProfRecord &Record,
-                     InstrProfSymtab *SymTab);
-  void operator delete(void *ptr) { ::operator delete(ptr); }
-#endif
-} ValueProfData;
-
-/*
- * The closure is designed to abstact away two types of value profile data:
- * - InstrProfRecord which is the primary data structure used to
- *   represent profile data in host tools (reader, writer, and profile-use)
- * - value profile runtime data structure suitable to be used by C
- *   runtime library.
- *
- * Both sources of data need to serialize to disk/memory-buffer in common
- * format: ValueProfData. The abstraction allows compiler-rt's raw profiler
- * writer to share the same format and code with indexed profile writer.
- *
- * For documentation of the member methods below, refer to corresponding methods
- * in class InstrProfRecord.
- */
-typedef struct ValueProfRecordClosure {
-  const void *Record;
-  uint32_t (*GetNumValueKinds)(const void *Record);
-  uint32_t (*GetNumValueSites)(const void *Record, uint32_t VKind);
-  uint32_t (*GetNumValueData)(const void *Record, uint32_t VKind);
-  uint32_t (*GetNumValueDataForSite)(const void *R, uint32_t VK, uint32_t S);
-
-  /*
-   * After extracting the value profile data from the value profile record,
-   * this method is used to map the in-memory value to on-disk value. If
-   * the method is null, value will be written out untranslated.
-   */
-  uint64_t (*RemapValueData)(uint32_t, uint64_t Value);
-  void (*GetValueForSite)(const void *R, InstrProfValueData *Dst, uint32_t K,
-                          uint32_t S);
-  ValueProfData *(*AllocValueProfData)(size_t TotalSizeInBytes);
-} ValueProfRecordClosure;
-
-INSTR_PROF_VISIBILITY ValueProfRecord *
-getFirstValueProfRecord(ValueProfData *VPD);
-INSTR_PROF_VISIBILITY ValueProfRecord *
-getValueProfRecordNext(ValueProfRecord *VPR);
-INSTR_PROF_VISIBILITY InstrProfValueData *
-getValueProfRecordValueData(ValueProfRecord *VPR);
-INSTR_PROF_VISIBILITY uint32_t
-getValueProfRecordHeaderSize(uint32_t NumValueSites);
-
-#undef INSTR_PROF_VALUE_PROF_DATA
-#endif  /* INSTR_PROF_VALUE_PROF_DATA */
-
-
-#ifdef INSTR_PROF_COMMON_API_IMPL
-#define INSTR_PROF_DATA_DEFINED
-#ifdef __cplusplus
-#define INSTR_PROF_INLINE inline
-#define INSTR_PROF_NULLPTR nullptr
-#else
-#define INSTR_PROF_INLINE
-#define INSTR_PROF_NULLPTR NULL
-#endif
-
-#ifndef offsetof
-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
-#endif
-
-/*!
- * Return the \c ValueProfRecord header size including the
- * padding bytes.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-uint32_t getValueProfRecordHeaderSize(uint32_t NumValueSites) {
-  uint32_t Size = offsetof(ValueProfRecord, SiteCountArray) +
-                  sizeof(uint8_t) * NumValueSites;
-  /* Round the size to multiple of 8 bytes. */
-  Size = (Size + 7) & ~7;
-  return Size;
-}
-
-/*!
- * Return the total size of the value profile record including the
- * header and the value data.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-uint32_t getValueProfRecordSize(uint32_t NumValueSites,
-                                uint32_t NumValueData) {
-  return getValueProfRecordHeaderSize(NumValueSites) +
-         sizeof(InstrProfValueData) * NumValueData;
-}
-
-/*!
- * Return the pointer to the start of value data array.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-InstrProfValueData *getValueProfRecordValueData(ValueProfRecord *This) {
-  return (InstrProfValueData *)((char *)This + getValueProfRecordHeaderSize(
-                                                   This->NumValueSites));
-}
-
-/*!
- * Return the total number of value data for \c This record.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-uint32_t getValueProfRecordNumValueData(ValueProfRecord *This) {
-  uint32_t NumValueData = 0;
-  uint32_t I;
-  for (I = 0; I < This->NumValueSites; I++)
-    NumValueData += This->SiteCountArray[I];
-  return NumValueData;
-}
-
-/*!
- * Use this method to advance to the next \c This \c ValueProfRecord.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-ValueProfRecord *getValueProfRecordNext(ValueProfRecord *This) {
-  uint32_t NumValueData = getValueProfRecordNumValueData(This);
-  return (ValueProfRecord *)((char *)This +
-                             getValueProfRecordSize(This->NumValueSites,
-                                                    NumValueData));
-}
-
-/*!
- * Return the first \c ValueProfRecord instance.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-ValueProfRecord *getFirstValueProfRecord(ValueProfData *This) {
-  return (ValueProfRecord *)((char *)This + sizeof(ValueProfData));
-}
-
-/* Closure based interfaces.  */
-
-/*!
- * Return the total size in bytes of the on-disk value profile data
- * given the data stored in Record.
- */
-INSTR_PROF_VISIBILITY uint32_t
-getValueProfDataSize(ValueProfRecordClosure *Closure) {
-  uint32_t Kind;
-  uint32_t TotalSize = sizeof(ValueProfData);
-  const void *Record = Closure->Record;
-
-  for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
-    uint32_t NumValueSites = Closure->GetNumValueSites(Record, Kind);
-    if (!NumValueSites)
-      continue;
-    TotalSize += getValueProfRecordSize(NumValueSites,
-                                        Closure->GetNumValueData(Record, Kind));
-  }
-  return TotalSize;
-}
-
-/*!
- * Extract value profile data of a function for the profile kind \c ValueKind
- * from the \c Closure and serialize the data into \c This record instance.
- */
-INSTR_PROF_VISIBILITY void
-serializeValueProfRecordFrom(ValueProfRecord *This,
-                             ValueProfRecordClosure *Closure,
-                             uint32_t ValueKind, uint32_t NumValueSites) {
-  uint32_t S;
-  const void *Record = Closure->Record;
-  This->Kind = ValueKind;
-  This->NumValueSites = NumValueSites;
-  InstrProfValueData *DstVD = getValueProfRecordValueData(This);
-
-  for (S = 0; S < NumValueSites; S++) {
-    uint32_t ND = Closure->GetNumValueDataForSite(Record, ValueKind, S);
-    This->SiteCountArray[S] = ND;
-    Closure->GetValueForSite(Record, DstVD, ValueKind, S);
-    DstVD += ND;
-  }
-}
-
-/*!
- * Extract value profile data of a function  from the \c Closure
- * and serialize the data into \c DstData if it is not NULL or heap
- * memory allocated by the \c Closure's allocator method. If \c
- * DstData is not null, the caller is expected to set the TotalSize
- * in DstData.
- */
-INSTR_PROF_VISIBILITY ValueProfData *
-serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
-                           ValueProfData *DstData) {
-  uint32_t Kind;
-  uint32_t TotalSize =
-      DstData ? DstData->TotalSize : getValueProfDataSize(Closure);
-
-  ValueProfData *VPD =
-      DstData ? DstData : Closure->AllocValueProfData(TotalSize);
-
-  VPD->TotalSize = TotalSize;
-  VPD->NumValueKinds = Closure->GetNumValueKinds(Closure->Record);
-  ValueProfRecord *VR = getFirstValueProfRecord(VPD);
-  for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
-    uint32_t NumValueSites = Closure->GetNumValueSites(Closure->Record, Kind);
-    if (!NumValueSites)
-      continue;
-    serializeValueProfRecordFrom(VR, Closure, Kind, NumValueSites);
-    VR = getValueProfRecordNext(VR);
-  }
-  return VPD;
-}
-
-#undef INSTR_PROF_COMMON_API_IMPL
-#endif /* INSTR_PROF_COMMON_API_IMPL */
-
-/*============================================================================*/
-
-#ifndef INSTR_PROF_DATA_DEFINED
-
-#ifndef INSTR_PROF_DATA_INC
-#define INSTR_PROF_DATA_INC
-
-/* Helper macros.  */
-#define INSTR_PROF_SIMPLE_QUOTE(x) #x
-#define INSTR_PROF_QUOTE(x) INSTR_PROF_SIMPLE_QUOTE(x)
-#define INSTR_PROF_SIMPLE_CONCAT(x,y) x ## y
-#define INSTR_PROF_CONCAT(x,y) INSTR_PROF_SIMPLE_CONCAT(x,y)
-
-/* Magic number to detect file format and endianness.
- * Use 255 at one end, since no UTF-8 file can use that character.  Avoid 0,
- * so that utilities, like strings, don't grab it as a string.  129 is also
- * invalid UTF-8, and high enough to be interesting.
- * Use "lprofr" in the centre to stand for "LLVM Profile Raw", or "lprofR"
- * for 32-bit platforms.
- */
-#define INSTR_PROF_RAW_MAGIC_64 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
-       (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
-        (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129
-#define INSTR_PROF_RAW_MAGIC_32 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
-       (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
-        (uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129
-
-/* Raw profile format version (start from 1). */
-#define INSTR_PROF_RAW_VERSION 8
-/* Indexed profile format version (start from 1). */
-#define INSTR_PROF_INDEX_VERSION 7
-/* Coverage mapping format version (start from 0). */
-#define INSTR_PROF_COVMAP_VERSION 5
-
-/* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
- * version for other variants of profile. We set the lowest bit of the upper 8
- * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentaiton
- * generated profile, and 0 if this is a Clang FE generated profile.
- * 1 in bit 57 indicates there are context-sensitive records in the profile.
- */
-#define VARIANT_MASKS_ALL 0xff00000000000000ULL
-#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
-#define VARIANT_MASK_IR_PROF (0x1ULL << 56)
-#define VARIANT_MASK_CSIR_PROF (0x1ULL << 57)
-#define VARIANT_MASK_INSTR_ENTRY (0x1ULL << 58)
-#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
-#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
-#define INSTR_PROF_PROFILE_COUNTER_BIAS_VAR __llvm_profile_counter_bias
-
-/* The variable that holds the name of the profile data
- * specified via command line. */
-#define INSTR_PROF_PROFILE_NAME_VAR __llvm_profile_filename
-
-/* section name strings common to all targets other
-   than WIN32 */
-#define INSTR_PROF_DATA_COMMON __llvm_prf_data
-#define INSTR_PROF_NAME_COMMON __llvm_prf_names
-#define INSTR_PROF_CNTS_COMMON __llvm_prf_cnts
-#define INSTR_PROF_VALS_COMMON __llvm_prf_vals
-#define INSTR_PROF_VNODES_COMMON __llvm_prf_vnds
-#define INSTR_PROF_COVMAP_COMMON __llvm_covmap
-#define INSTR_PROF_COVFUN_COMMON __llvm_covfun
-#define INSTR_PROF_ORDERFILE_COMMON __llvm_orderfile
-/* Windows section names. Because these section names contain dollar characters,
- * they must be quoted.
- */
-#define INSTR_PROF_DATA_COFF ".lprfd$M"
-#define INSTR_PROF_NAME_COFF ".lprfn$M"
-#define INSTR_PROF_CNTS_COFF ".lprfc$M"
-#define INSTR_PROF_VALS_COFF ".lprfv$M"
-#define INSTR_PROF_VNODES_COFF ".lprfnd$M"
-#define INSTR_PROF_COVMAP_COFF ".lcovmap$M"
-#define INSTR_PROF_COVFUN_COFF ".lcovfun$M"
-#define INSTR_PROF_ORDERFILE_COFF ".lorderfile$M"
-
-#ifdef _WIN32
-/* Runtime section names and name strings.  */
-#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_DATA_COFF
-#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_NAME_COFF
-#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_CNTS_COFF
-/* Array of pointers. Each pointer points to a list
- * of value nodes associated with one value site.
- */
-#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_VALS_COFF
-/* Value profile nodes section. */
-#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COFF
-#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COFF
-#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_COVFUN_COFF
-#define INSTR_PROF_ORDERFILE_SECT_NAME INSTR_PROF_ORDERFILE_COFF
-#else
-/* Runtime section names and name strings.  */
-#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON)
-#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON)
-#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON)
-/* Array of pointers. Each pointer points to a list
- * of value nodes associated with one value site.
- */
-#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON)
-/* Value profile nodes section. */
-#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON)
-#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON)
-#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON)
-/* Order file instrumentation. */
-#define INSTR_PROF_ORDERFILE_SECT_NAME                                         \
-  INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON)
-#endif
-
-#define INSTR_PROF_ORDERFILE_BUFFER_NAME _llvm_order_file_buffer
-#define INSTR_PROF_ORDERFILE_BUFFER_NAME_STR                                   \
-  INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_NAME)
-#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME _llvm_order_file_buffer_idx
-#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME_STR                               \
-  INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME)
-
-/* Macros to define start/stop section symbol for a given
- * section on Linux. For instance
- * INSTR_PROF_SECT_START(INSTR_PROF_DATA_SECT_NAME) will
- * expand to __start___llvm_prof_data
- */
-#define INSTR_PROF_SECT_START(Sect) \
-        INSTR_PROF_CONCAT(__start_,Sect)
-#define INSTR_PROF_SECT_STOP(Sect) \
-        INSTR_PROF_CONCAT(__stop_,Sect)
-
-/* Value Profiling API linkage name.  */
-#define INSTR_PROF_VALUE_PROF_FUNC __llvm_profile_instrument_target
-#define INSTR_PROF_VALUE_PROF_FUNC_STR \
-        INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_FUNC)
-#define INSTR_PROF_VALUE_PROF_MEMOP_FUNC __llvm_profile_instrument_memop
-#define INSTR_PROF_VALUE_PROF_MEMOP_FUNC_STR                                   \
-  INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_MEMOP_FUNC)
-
-/* InstrProfile per-function control data alignment.  */
-#define INSTR_PROF_DATA_ALIGNMENT 8
-
-/* The data structure that represents a tracked value by the
- * value profiler.
- */
-typedef struct InstrProfValueData {
-  /* Profiled value. */
-  uint64_t Value;
-  /* Number of times the value appears in the training run. */
-  uint64_t Count;
-} InstrProfValueData;
-
-#endif /* INSTR_PROF_DATA_INC */
-
-#ifndef INSTR_ORDER_FILE_INC
-/* The maximal # of functions: 128*1024 (the buffer size will be 128*4 KB). */
-#define INSTR_ORDER_FILE_BUFFER_SIZE 131072
-#define INSTR_ORDER_FILE_BUFFER_BITS 17
-#define INSTR_ORDER_FILE_BUFFER_MASK 0x1ffff
-#endif /* INSTR_ORDER_FILE_INC */
-#else
-#undef INSTR_PROF_DATA_DEFINED
-#endif
-
-#undef COVMAP_V2_OR_V3
-
-#ifdef INSTR_PROF_VALUE_PROF_MEMOP_API
-
-#ifdef __cplusplus
-#define INSTR_PROF_INLINE inline
-#else
-#define INSTR_PROF_INLINE
-#endif
-
-/* The value range buckets (22 buckets) for the memop size value profiling looks
- * like:
- *
- *   [0, 0]
- *   [1, 1]
- *   [2, 2]
- *   [3, 3]
- *   [4, 4]
- *   [5, 5]
- *   [6, 6]
- *   [7, 7]
- *   [8, 8]
- *   [9, 15]
- *   [16, 16]
- *   [17, 31]
- *   [32, 32]
- *   [33, 63]
- *   [64, 64]
- *   [65, 127]
- *   [128, 128]
- *   [129, 255]
- *   [256, 256]
- *   [257, 511]
- *   [512, 512]
- *   [513, UINT64_MAX]
- *
- * Each range has a 'representative value' which is the lower end value of the
- * range and used to store in the runtime profile data records and the VP
- * metadata. For example, it's 2 for [2, 2] and 64 for [65, 127].
- */
-#define INSTR_PROF_NUM_BUCKETS 22
-
-/*
- * Clz and Popcount. This code was copied from
- * compiler-rt/lib/fuzzer/{FuzzerBuiltins.h,FuzzerBuiltinsMsvc.h} and
- * llvm/include/llvm/Support/MathExtras.h.
- */
-#if defined(_MSC_VER) && !defined(__clang__)
-
-#include <intrin.h>
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-int InstProfClzll(unsigned long long X) {
-  unsigned long LeadZeroIdx = 0;
-#if !defined(_M_ARM64) && !defined(_M_X64)
-  // Scan the high 32 bits.
-  if (_BitScanReverse(&LeadZeroIdx, (unsigned long)(X >> 32)))
-    return (int)(63 - (LeadZeroIdx + 32)); // Create a bit offset
-                                                      // from the MSB.
-  // Scan the low 32 bits.
-  if (_BitScanReverse(&LeadZeroIdx, (unsigned long)(X)))
-    return (int)(63 - LeadZeroIdx);
-#else
-  if (_BitScanReverse64(&LeadZeroIdx, X)) return 63 - LeadZeroIdx;
-#endif
-  return 64;
-}
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-int InstProfPopcountll(unsigned long long X) {
-  // This code originates from https://reviews.llvm.org/rG30626254510f.
-  unsigned long long v = X;
-  v = v - ((v >> 1) & 0x5555555555555555ULL);
-  v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
-  v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
-  return (int)((unsigned long long)(v * 0x0101010101010101ULL) >> 56);
-}
-
-#else
-
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-int InstProfClzll(unsigned long long X) { return __builtin_clzll(X); }
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-int InstProfPopcountll(unsigned long long X) { return __builtin_popcountll(X); }
-
-#endif  /* defined(_MSC_VER) && !defined(__clang__) */
-
-/* Map an (observed) memop size value to the representative value of its range.
- * For example, 5 -> 5, 22 -> 17, 99 -> 65, 256 -> 256, 1001 -> 513. */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE uint64_t
-InstrProfGetRangeRepValue(uint64_t Value) {
-  if (Value <= 8)
-    // The first ranges are individually tracked. Use the value as is.
-    return Value;
-  else if (Value >= 513)
-    // The last range is mapped to its lowest value.
-    return 513;
-  else if (InstProfPopcountll(Value) == 1)
-    // If it's a power of two, use it as is.
-    return Value;
-  else
-    // Otherwise, take to the previous power of two + 1.
-    return (UINT64_C(1) << (64 - InstProfClzll(Value) - 1)) + 1;
-}
-
-/* Return true if the range that an (observed) memop size value belongs to has
- * only a single value in the range.  For example, 0 -> true, 8 -> true, 10 ->
- * false, 64 -> true, 100 -> false, 513 -> false. */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE unsigned
-InstrProfIsSingleValRange(uint64_t Value) {
-  if (Value <= 8)
-    // The first ranges are individually tracked.
-    return 1;
-  else if (InstProfPopcountll(Value) == 1)
-    // If it's a power of two, there's only one value.
-    return 1;
-  else
-    // Otherwise, there's more than one value in the range.
-    return 0;
-}
-
-#endif /* INSTR_PROF_VALUE_PROF_MEMOP_API */
diff --git a/linux-x86/lib64/clang/14.0.2/include/rdseedintrin.h b/linux-x86/lib64/clang/14.0.2/include/rdseedintrin.h
deleted file mode 100644
index ccb3d2d..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/rdseedintrin.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*===---- rdseedintrin.h - RDSEED intrinsics -------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
-#error "Never use <rdseedintrin.h> directly; include <x86intrin.h> instead."
-#endif
-
-#ifndef __RDSEEDINTRIN_H
-#define __RDSEEDINTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rdseed")))
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_rdseed16_step(unsigned short *__p)
-{
-  return __builtin_ia32_rdseed16_step(__p);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_rdseed32_step(unsigned int *__p)
-{
-  return __builtin_ia32_rdseed32_step(__p);
-}
-
-#ifdef __x86_64__
-static __inline__ int __DEFAULT_FN_ATTRS
-_rdseed64_step(unsigned long long *__p)
-{
-  return __builtin_ia32_rdseed64_step(__p);
-}
-#endif
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __RDSEEDINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/common_interface_defs.h b/linux-x86/lib64/clang/14.0.2/include/sanitizer/common_interface_defs.h
deleted file mode 100644
index 692b8f7..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/sanitizer/common_interface_defs.h
+++ /dev/null
@@ -1,357 +0,0 @@
-//===-- sanitizer/common_interface_defs.h -----------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// Common part of the public sanitizer interface.
-//===----------------------------------------------------------------------===//
-
-#ifndef SANITIZER_COMMON_INTERFACE_DEFS_H
-#define SANITIZER_COMMON_INTERFACE_DEFS_H
-
-#include <stddef.h>
-#include <stdint.h>
-
-// GCC does not understand __has_feature.
-#if !defined(__has_feature)
-#define __has_feature(x) 0
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-// Arguments for __sanitizer_sandbox_on_notify() below.
-typedef struct {
-  // Enable sandbox support in sanitizer coverage.
-  int coverage_sandboxed;
-  // File descriptor to write coverage data to. If -1 is passed, a file will
-  // be pre-opened by __sanitizer_sandbox_on_notify(). This field has no
-  // effect if coverage_sandboxed == 0.
-  intptr_t coverage_fd;
-  // If non-zero, split the coverage data into well-formed blocks. This is
-  // useful when coverage_fd is a socket descriptor. Each block will contain
-  // a header, allowing data from multiple processes to be sent over the same
-  // socket.
-  unsigned int coverage_max_block_size;
-} __sanitizer_sandbox_arguments;
-
-// Tell the tools to write their reports to "path.<pid>" instead of stderr.
-void __sanitizer_set_report_path(const char *path);
-// Tell the tools to write their reports to the provided file descriptor
-// (casted to void *).
-void __sanitizer_set_report_fd(void *fd);
-// Get the current full report file path, if a path was specified by
-// an earlier call to __sanitizer_set_report_path. Returns null otherwise.
-const char *__sanitizer_get_report_path();
-
-// Notify the tools that the sandbox is going to be turned on. The reserved
-// parameter will be used in the future to hold a structure with functions
-// that the tools may call to bypass the sandbox.
-void __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
-
-// This function is called by the tool when it has just finished reporting
-// an error. 'error_summary' is a one-line string that summarizes
-// the error message. This function can be overridden by the client.
-void __sanitizer_report_error_summary(const char *error_summary);
-
-// Some of the sanitizers (for example ASan/TSan) could miss bugs that happen
-// in unaligned loads/stores. To find such bugs reliably, you need to replace
-// plain unaligned loads/stores with these calls.
-
-/// Loads a 16-bit unaligned value.
-///
-/// \param p Pointer to unaligned memory.
-///
-/// \returns Loaded value.
-uint16_t __sanitizer_unaligned_load16(const void *p);
-
-/// Loads a 32-bit unaligned value.
-///
-/// \param p Pointer to unaligned memory.
-///
-/// \returns Loaded value.
-uint32_t __sanitizer_unaligned_load32(const void *p);
-
-/// Loads a 64-bit unaligned value.
-///
-/// \param p Pointer to unaligned memory.
-///
-/// \returns Loaded value.
-uint64_t __sanitizer_unaligned_load64(const void *p);
-
-/// Stores a 16-bit unaligned value.
-///
-/// \param p Pointer to unaligned memory.
-/// \param x 16-bit value to store.
-void __sanitizer_unaligned_store16(void *p, uint16_t x);
-
-/// Stores a 32-bit unaligned value.
-///
-/// \param p Pointer to unaligned memory.
-/// \param x 32-bit value to store.
-void __sanitizer_unaligned_store32(void *p, uint32_t x);
-
-/// Stores a 64-bit unaligned value.
-///
-/// \param p Pointer to unaligned memory.
-/// \param x 64-bit value to store.
-void __sanitizer_unaligned_store64(void *p, uint64_t x);
-
-// Returns 1 on the first call, then returns 0 thereafter.  Called by the tool
-// to ensure only one report is printed when multiple errors occur
-// simultaneously.
-int __sanitizer_acquire_crash_state();
-
-/// Annotates the current state of a contiguous container, such as
-/// <c>std::vector</c>, <c>std::string</c>, or similar.
-///
-/// A contiguous container is a container that keeps all of its elements
-/// in a contiguous region of memory. The container owns the region of memory
-/// <c>[beg, end)</c>; the memory <c>[beg, mid)</c> is used to store the
-/// current elements, and the memory <c>[mid, end)</c> is reserved for future
-/// elements (<c>beg <= mid <= end</c>). For example, in
-/// <c>std::vector<> v</c>:
-///
-/// \code
-///   beg = &v[0];
-///   end = beg + v.capacity() * sizeof(v[0]);
-///   mid = beg + v.size()     * sizeof(v[0]);
-/// \endcode
-///
-/// This annotation tells the Sanitizer tool about the current state of the
-/// container so that the tool can report errors when memory from
-/// <c>[mid, end)</c> is accessed. Insert this annotation into methods like
-/// <c>push_back()</c> or <c>pop_back()</c>. Supply the old and new values of
-/// <c>mid</c>(<c><i>old_mid</i></c> and <c><i>new_mid</i></c>). In the initial
-/// state <c>mid == end</c>, so that should be the final state when the
-/// container is destroyed or when the container reallocates the storage.
-///
-/// For ASan, <c><i>beg</i></c> should be 8-aligned and <c><i>end</i></c>
-/// should be either 8-aligned or it should point to the end of a separate
-/// heap-, stack-, or global-allocated buffer. So the following example will
-/// not work:
-///
-/// \code
-///   int64_t x[2]; // 16 bytes, 8-aligned
-///   char *beg = (char *)&x[0];
-///   char *end = beg + 12; // Not 8-aligned, not the end of the buffer
-/// \endcode
-///
-/// The following, however, will work:
-/// \code
-///   int32_t x[3]; // 12 bytes, but 8-aligned under ASan.
-///   char *beg = (char*)&x[0];
-///   char *end = beg + 12; // Not 8-aligned, but is the end of the buffer
-/// \endcode
-///
-/// \note  Use this function with caution and do not use for anything other
-/// than vector-like classes.
-///
-/// \param beg Beginning of memory region.
-/// \param end End of memory region.
-/// \param old_mid Old middle of memory region.
-/// \param new_mid New middle of memory region.
-void __sanitizer_annotate_contiguous_container(const void *beg,
-                                               const void *end,
-                                               const void *old_mid,
-                                               const void *new_mid);
-
-/// Returns true if the contiguous container <c>[beg, end)</c> is properly
-/// poisoned.
-///
-/// Proper poisoning could occur, for example, with
-/// <c>__sanitizer_annotate_contiguous_container</c>), that is, if
-/// <c>[beg, mid)</c> is addressable and <c>[mid, end)</c> is unaddressable.
-/// Full verification requires O (<c>end - beg</c>) time; this function tries
-/// to avoid such complexity by touching only parts of the container around
-/// <c><i>beg</i></c>, <c><i>mid</i></c>, and <c><i>end</i></c>.
-///
-/// \param beg Beginning of memory region.
-/// \param mid Middle of memory region.
-/// \param end Old end of memory region.
-///
-/// \returns True if the contiguous container <c>[beg, end)</c> is properly
-///  poisoned.
-int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
-                                            const void *end);
-
-/// Similar to <c>__sanitizer_verify_contiguous_container()</c> but also
-/// returns the address of the first improperly poisoned byte.
-///
-/// Returns NULL if the area is poisoned properly.
-///
-/// \param beg Beginning of memory region.
-/// \param mid Middle of memory region.
-/// \param end Old end of memory region.
-///
-/// \returns The bad address or NULL.
-const void *__sanitizer_contiguous_container_find_bad_address(const void *beg,
-                                                              const void *mid,
-                                                              const void *end);
-
-/// Prints the stack trace leading to this call (useful for calling from the
-/// debugger).
-void __sanitizer_print_stack_trace(void);
-
-// Symbolizes the supplied 'pc' using the format string 'fmt'.
-// Outputs at most 'out_buf_size' bytes into 'out_buf'.
-// If 'out_buf' is not empty then output is zero or more non empty C strings
-// followed by single empty C string. Multiple strings can be returned if PC
-// corresponds to inlined function. Inlined frames are printed in the order
-// from "most-inlined" to the "least-inlined", so the last frame should be the
-// not inlined function.
-// Inlined frames can be removed with 'symbolize_inline_frames=0'.
-// The format syntax is described in
-// lib/sanitizer_common/sanitizer_stacktrace_printer.h.
-void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf,
-                              size_t out_buf_size);
-// Same as __sanitizer_symbolize_pc, but for data section (i.e. globals).
-void __sanitizer_symbolize_global(void *data_ptr, const char *fmt,
-                                  char *out_buf, size_t out_buf_size);
-
-/// Sets the callback to be called immediately before death on error.
-///
-/// Passing 0 will unset the callback.
-///
-/// \param callback User-provided callback.
-void __sanitizer_set_death_callback(void (*callback)(void));
-
-
-// Interceptor hooks.
-// Whenever a libc function interceptor is called, it checks if the
-// corresponding weak hook is defined, and calls it if it is indeed defined.
-// The primary use-case is data-flow-guided fuzzing, where the fuzzer needs
-// to know what is being passed to libc functions (for example memcmp).
-// FIXME: implement more hooks.
-
-/// Interceptor hook for <c>memcmp()</c>.
-///
-/// \param called_pc PC (program counter) address of the original call.
-/// \param s1 Pointer to block of memory.
-/// \param s2 Pointer to block of memory.
-/// \param n Number of bytes to compare.
-/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,
-                                  const void *s2, size_t n, int result);
-
-/// Interceptor hook for <c>strncmp()</c>.
-///
-/// \param called_pc PC (program counter) address of the original call.
-/// \param s1 Pointer to block of memory.
-/// \param s2 Pointer to block of memory.
-/// \param n Number of bytes to compare.
-/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
-                                  const char *s2, size_t n, int result);
-
-/// Interceptor hook for <c>strncasecmp()</c>.
-///
-/// \param called_pc PC (program counter) address of the original call.
-/// \param s1 Pointer to block of memory.
-/// \param s2 Pointer to block of memory.
-/// \param n Number of bytes to compare.
-/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1,
-                                       const char *s2, size_t n, int result);
-
-/// Interceptor hook for <c>strcmp()</c>.
-///
-/// \param called_pc PC (program counter) address of the original call.
-/// \param s1 Pointer to block of memory.
-/// \param s2 Pointer to block of memory.
-/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_strcmp(void *called_pc, const char *s1,
-                                  const char *s2, int result);
-
-/// Interceptor hook for <c>strcasecmp()</c>.
-///
-/// \param called_pc PC (program counter) address of the original call.
-/// \param s1 Pointer to block of memory.
-/// \param s2 Pointer to block of memory.
-/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1,
-                                      const char *s2, int result);
-
-/// Interceptor hook for <c>strstr()</c>.
-///
-/// \param called_pc PC (program counter) address of the original call.
-/// \param s1 Pointer to block of memory.
-/// \param s2 Pointer to block of memory.
-/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_strstr(void *called_pc, const char *s1,
-                                  const char *s2, char *result);
-
-void __sanitizer_weak_hook_strcasestr(void *called_pc, const char *s1,
-                                      const char *s2, char *result);
-
-void __sanitizer_weak_hook_memmem(void *called_pc,
-                                  const void *s1, size_t len1,
-                                  const void *s2, size_t len2, void *result);
-
-// Prints stack traces for all live heap allocations ordered by total
-// allocation size until top_percent of total live heap is shown. top_percent
-// should be between 1 and 100. At most max_number_of_contexts contexts
-// (stack traces) are printed.
-// Experimental feature currently available only with ASan on Linux/x86_64.
-void __sanitizer_print_memory_profile(size_t top_percent,
-                                      size_t max_number_of_contexts);
-
-/// Notify ASan that a fiber switch has started (required only if implementing
-/// your own fiber library).
-///
-/// Before switching to a different stack, you must call
-/// <c>__sanitizer_start_switch_fiber()</c> with a pointer to the bottom of the
-/// destination stack and with its size. When code starts running on the new
-/// stack, it must call <c>__sanitizer_finish_switch_fiber()</c> to finalize
-/// the switch. The <c>__sanitizer_start_switch_fiber()</c> function takes a
-/// <c>void**</c> pointer argument to store the current fake stack if there is
-/// one (it is necessary when the runtime option
-/// <c>detect_stack_use_after_return</c> is enabled).
-///
-/// When restoring a stack, this <c>void**</c> pointer must be given to the
-/// <c>__sanitizer_finish_switch_fiber()</c> function. In most cases, this
-/// pointer can be stored on the stack immediately before switching. When
-/// leaving a fiber definitely, NULL must be passed as the first argument to
-/// the <c>__sanitizer_start_switch_fiber()</c> function so that the fake stack
-/// is destroyed. If your program does not need stack use-after-return
-/// detection, you can always pass NULL to these two functions.
-///
-/// \note The fake stack mechanism is disabled during fiber switch, so if a
-/// signal callback runs during the switch, it will not benefit from stack
-/// use-after-return detection.
-///
-/// \param[out] fake_stack_save Fake stack save location.
-/// \param bottom Bottom address of stack.
-/// \param size Size of stack in bytes.
-void __sanitizer_start_switch_fiber(void **fake_stack_save,
-                                    const void *bottom, size_t size);
-
-/// Notify ASan that a fiber switch has completed (required only if
-/// implementing your own fiber library).
-///
-/// When code starts running on the new stack, it must call
-/// <c>__sanitizer_finish_switch_fiber()</c> to finalize
-/// the switch. For usage details, see the description of
-/// <c>__sanitizer_start_switch_fiber()</c>.
-///
-/// \param fake_stack_save Fake stack save location.
-/// \param[out] bottom_old Bottom address of old stack.
-/// \param[out] size_old Size of old stack in bytes.
-void __sanitizer_finish_switch_fiber(void *fake_stack_save,
-                                     const void **bottom_old,
-                                     size_t *size_old);
-
-// Get full module name and calculate pc offset within it.
-// Returns 1 if pc belongs to some module, 0 if module was not found.
-int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path,
-                                             size_t module_path_len,
-                                             void **pc_offset);
-
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  // SANITIZER_COMMON_INTERFACE_DEFS_H
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h b/linux-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h
deleted file mode 100644
index d6209a3..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h
+++ /dev/null
@@ -1,159 +0,0 @@
-//===-- dfsan_interface.h -------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of DataFlowSanitizer.
-//
-// Public interface header.
-//===----------------------------------------------------------------------===//
-#ifndef DFSAN_INTERFACE_H
-#define DFSAN_INTERFACE_H
-
-#include <stddef.h>
-#include <stdint.h>
-#include <sanitizer/common_interface_defs.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef uint8_t dfsan_label;
-typedef uint32_t dfsan_origin;
-
-/// Signature of the callback argument to dfsan_set_write_callback().
-typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
-
-/// Computes the union of \c l1 and \c l2, resulting in a union label.
-dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
-
-/// Sets the label for each address in [addr,addr+size) to \c label.
-void dfsan_set_label(dfsan_label label, void *addr, size_t size);
-
-/// Sets the label for each address in [addr,addr+size) to the union of the
-/// current label for that address and \c label.
-void dfsan_add_label(dfsan_label label, void *addr, size_t size);
-
-/// Retrieves the label associated with the given data.
-///
-/// The type of 'data' is arbitrary.  The function accepts a value of any type,
-/// which can be truncated or extended (implicitly or explicitly) as necessary.
-/// The truncation/extension operations will preserve the label of the original
-/// value.
-dfsan_label dfsan_get_label(long data);
-
-/// Retrieves the immediate origin associated with the given data. The returned
-/// origin may point to another origin.
-///
-/// The type of 'data' is arbitrary.
-dfsan_origin dfsan_get_origin(long data);
-
-/// Retrieves the label associated with the data at the given address.
-dfsan_label dfsan_read_label(const void *addr, size_t size);
-
-/// Returns whether the given label label contains the label elem.
-int dfsan_has_label(dfsan_label label, dfsan_label elem);
-
-/// Flushes the DFSan shadow, i.e. forgets about all labels currently associated
-/// with the application memory.  Use this call to start over the taint tracking
-/// within the same process.
-///
-/// Note: If another thread is working with tainted data during the flush, that
-/// taint could still be written to shadow after the flush.
-void dfsan_flush(void);
-
-/// Sets a callback to be invoked on calls to write().  The callback is invoked
-/// before the write is done.  The write is not guaranteed to succeed when the
-/// callback executes.  Pass in NULL to remove any callback.
-void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
-
-/// Interceptor hooks.
-/// Whenever a dfsan's custom function is called the corresponding
-/// hook is called it non-zero. The hooks should be defined by the user.
-/// The primary use case is taint-guided fuzzing, where the fuzzer
-/// needs to see the parameters of the function and the labels.
-/// FIXME: implement more hooks.
-void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2,
-                            size_t n, dfsan_label s1_label,
-                            dfsan_label s2_label, dfsan_label n_label);
-void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
-                             size_t n, dfsan_label s1_label,
-                             dfsan_label s2_label, dfsan_label n_label);
-
-/// Prints the origin trace of the label at the address addr to stderr. It also
-/// prints description at the beginning of the trace. If origin tracking is not
-/// on, or the address is not labeled, it prints nothing.
-void dfsan_print_origin_trace(const void *addr, const char *description);
-
-/// Prints the origin trace of the label at the address \p addr to a
-/// pre-allocated output buffer. If origin tracking is not on, or the address is
-/// not labeled, it prints nothing.
-///
-/// Typical usage:
-/// \code
-///   char kDescription[] = "...";
-///   char buf[1024];
-///   dfsan_sprint_origin_trace(&tainted_var, kDescription, buf, sizeof(buf));
-/// \endcode
-///
-/// Typical usage that handles truncation:
-/// \code
-///   char buf[1024];
-///   int len = dfsan_sprint_origin_trace(&var, nullptr, buf, sizeof(buf));
-///
-///   if (len < sizeof(buf)) {
-///     ProcessOriginTrace(buf);
-///   } else {
-///     char *tmpbuf = new char[len + 1];
-///     dfsan_sprint_origin_trace(&var, nullptr, tmpbuf, len + 1);
-///     ProcessOriginTrace(tmpbuf);
-///     delete[] tmpbuf;
-///   }
-/// \endcode
-///
-/// \param addr The tainted memory address whose origin we are printing.
-/// \param description A description printed at the beginning of the trace.
-/// \param [out] out_buf The output buffer to write the results to.
-/// \param out_buf_size The size of \p out_buf.
-///
-/// \returns The number of symbols that should have been written to \p out_buf
-/// (not including trailing null byte '\0'). Thus, the string is truncated iff
-/// return value is not less than \p out_buf_size.
-size_t dfsan_sprint_origin_trace(const void *addr, const char *description,
-                                 char *out_buf, size_t out_buf_size);
-
-/// Prints the stack trace leading to this call to a pre-allocated output
-/// buffer.
-///
-/// For usage examples, see dfsan_sprint_origin_trace.
-///
-/// \param [out] out_buf The output buffer to write the results to.
-/// \param out_buf_size The size of \p out_buf.
-///
-/// \returns The number of symbols that should have been written to \p out_buf
-/// (not including trailing null byte '\0'). Thus, the string is truncated iff
-/// return value is not less than \p out_buf_size.
-size_t dfsan_sprint_stack_trace(char *out_buf, size_t out_buf_size);
-
-/// Retrieves the very first origin associated with the data at the given
-/// address.
-dfsan_origin dfsan_get_init_origin(const void *addr);
-
-/// Returns the value of -dfsan-track-origins.
-/// * 0: do not track origins.
-/// * 1: track origins at memory store operations.
-/// * 2: track origins at memory load and store operations.
-int dfsan_get_track_origins(void);
-#ifdef __cplusplus
-}  // extern "C"
-
-template <typename T> void dfsan_set_label(dfsan_label label, T &data) {
-  dfsan_set_label(label, (void *)&data, sizeof(T));
-}
-
-#endif
-
-#endif  // DFSAN_INTERFACE_H
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/msan_interface.h b/linux-x86/lib64/clang/14.0.2/include/sanitizer/msan_interface.h
deleted file mode 100644
index eeb39fb..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/sanitizer/msan_interface.h
+++ /dev/null
@@ -1,124 +0,0 @@
-//===-- msan_interface.h --------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of MemorySanitizer.
-//
-// Public interface header.
-//===----------------------------------------------------------------------===//
-#ifndef MSAN_INTERFACE_H
-#define MSAN_INTERFACE_H
-
-#include <sanitizer/common_interface_defs.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-  /* Set raw origin for the memory range. */
-  void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin);
-
-  /* Get raw origin for an address. */
-  uint32_t __msan_get_origin(const volatile void *a);
-
-  /* Test that this_id is a descendant of prev_id (or they are simply equal).
-   * "descendant" here means they are part of the same chain, created with
-   * __msan_chain_origin. */
-  int __msan_origin_is_descendant_or_same(uint32_t this_id, uint32_t prev_id);
-
-  /* Returns non-zero if tracking origins. */
-  int __msan_get_track_origins(void);
-
-  /* Returns the origin id of the latest UMR in the calling thread. */
-  uint32_t __msan_get_umr_origin(void);
-
-  /* Make memory region fully initialized (without changing its contents). */
-  void __msan_unpoison(const volatile void *a, size_t size);
-
-  /* Make a null-terminated string fully initialized (without changing its
-     contents). */
-  void __msan_unpoison_string(const volatile char *a);
-
-  /* Make first n parameters of the next function call fully initialized. */
-  void __msan_unpoison_param(size_t n);
-
-  /* Make memory region fully uninitialized (without changing its contents).
-     This is a legacy interface that does not update origin information. Use
-     __msan_allocated_memory() instead. */
-  void __msan_poison(const volatile void *a, size_t size);
-
-  /* Make memory region partially uninitialized (without changing its contents).
-   */
-  void __msan_partial_poison(const volatile void *data, void *shadow,
-                             size_t size);
-
-  /* Returns the offset of the first (at least partially) poisoned byte in the
-     memory range, or -1 if the whole range is good. */
-  intptr_t __msan_test_shadow(const volatile void *x, size_t size);
-
-  /* Checks that memory range is fully initialized, and reports an error if it
-   * is not. */
-  void __msan_check_mem_is_initialized(const volatile void *x, size_t size);
-
-  /* For testing:
-     __msan_set_expect_umr(1);
-     ... some buggy code ...
-     __msan_set_expect_umr(0);
-     The last line will verify that a UMR happened. */
-  void __msan_set_expect_umr(int expect_umr);
-
-  /* Change the value of keep_going flag. Non-zero value means don't terminate
-     program execution when an error is detected. This will not affect error in
-     modules that were compiled without the corresponding compiler flag. */
-  void __msan_set_keep_going(int keep_going);
-
-  /* Print shadow and origin for the memory range to stderr in a human-readable
-     format. */
-  void __msan_print_shadow(const volatile void *x, size_t size);
-
-  /* Print shadow for the memory range to stderr in a minimalistic
-     human-readable format. */
-  void __msan_dump_shadow(const volatile void *x, size_t size);
-
-  /* Returns true if running under a dynamic tool (DynamoRio-based). */
-  int  __msan_has_dynamic_component(void);
-
-  /* Tell MSan about newly allocated memory (ex.: custom allocator).
-     Memory will be marked uninitialized, with origin at the call site. */
-  void __msan_allocated_memory(const volatile void* data, size_t size);
-
-  /* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */
-  void __sanitizer_dtor_callback(const volatile void* data, size_t size);
-
-  /* This function may be optionally provided by user and should return
-     a string containing Msan runtime options. See msan_flags.h for details. */
-  const char* __msan_default_options(void);
-
-  /* Deprecated. Call __sanitizer_set_death_callback instead. */
-  void __msan_set_death_callback(void (*callback)(void));
-
-  /* Update shadow for the application copy of size bytes from src to dst.
-     Src and dst are application addresses. This function does not copy the
-     actual application memory, it only updates shadow and origin for such
-     copy. Source and destination regions can overlap. */
-  void __msan_copy_shadow(const volatile void *dst, const volatile void *src,
-                          size_t size);
-
-  /* Disables uninitialized memory checks in interceptors. */
-  void __msan_scoped_disable_interceptor_checks(void);
-
-  /* Re-enables uninitialized memory checks in interceptors after a previous
-     call to __msan_scoped_disable_interceptor_checks. */
-  void __msan_scoped_enable_interceptor_checks(void);
-
-  void __msan_start_switch_fiber(const void *bottom, size_t size);
-  void __msan_finish_switch_fiber(const void **bottom_old, size_t *size_old);
-
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/smmintrin.h b/linux-x86/lib64/clang/14.0.2/include/smmintrin.h
deleted file mode 100644
index 710e55a..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/smmintrin.h
+++ /dev/null
@@ -1,2351 +0,0 @@
-/*===---- smmintrin.h - SSE4 intrinsics ------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __SMMINTRIN_H
-#define __SMMINTRIN_H
-
-#if !defined(__i386__) && !defined(__x86_64__)
-#error "This header is only meant to be used on x86 and x64 architecture"
-#endif
-
-#include <tmmintrin.h>
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1"), __min_vector_width__(128)))
-
-/* SSE4 Rounding macros. */
-#define _MM_FROUND_TO_NEAREST_INT    0x00
-#define _MM_FROUND_TO_NEG_INF        0x01
-#define _MM_FROUND_TO_POS_INF        0x02
-#define _MM_FROUND_TO_ZERO           0x03
-#define _MM_FROUND_CUR_DIRECTION     0x04
-
-#define _MM_FROUND_RAISE_EXC         0x00
-#define _MM_FROUND_NO_EXC            0x08
-
-#define _MM_FROUND_NINT      (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
-#define _MM_FROUND_FLOOR     (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
-#define _MM_FROUND_CEIL      (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
-#define _MM_FROUND_TRUNC     (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
-#define _MM_FROUND_RINT      (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
-#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
-
-/// Rounds up each element of the 128-bit vector of [4 x float] to an
-///    integer and returns the rounded values in a 128-bit vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128 _mm_ceil_ps(__m128 X);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDPS / ROUNDPS </c> instruction.
-///
-/// \param X
-///    A 128-bit vector of [4 x float] values to be rounded up.
-/// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_ceil_ps(X)       _mm_round_ps((X), _MM_FROUND_CEIL)
-
-/// Rounds up each element of the 128-bit vector of [2 x double] to an
-///    integer and returns the rounded values in a 128-bit vector of
-///    [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_ceil_pd(__m128d X);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDPD / ROUNDPD </c> instruction.
-///
-/// \param X
-///    A 128-bit vector of [2 x double] values to be rounded up.
-/// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_ceil_pd(X)       _mm_round_pd((X), _MM_FROUND_CEIL)
-
-/// Copies three upper elements of the first 128-bit vector operand to
-///    the corresponding three upper elements of the 128-bit result vector of
-///    [4 x float]. Rounds up the lowest element of the second 128-bit vector
-///    operand to an integer and copies it to the lowest element of the 128-bit
-///    result vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128 _mm_ceil_ss(__m128 X, __m128 Y);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDSS / ROUNDSS </c> instruction.
-///
-/// \param X
-///    A 128-bit vector of [4 x float]. The values stored in bits [127:32] are
-///    copied to the corresponding bits of the result.
-/// \param Y
-///    A 128-bit vector of [4 x float]. The value stored in bits [31:0] is
-///    rounded up to the nearest integer and copied to the corresponding bits
-///    of the result.
-/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
-///    values.
-#define _mm_ceil_ss(X, Y)    _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
-
-/// Copies the upper element of the first 128-bit vector operand to the
-///    corresponding upper element of the 128-bit result vector of [2 x double].
-///    Rounds up the lower element of the second 128-bit vector operand to an
-///    integer and copies it to the lower element of the 128-bit result vector
-///    of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_ceil_sd(__m128d X, __m128d Y);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDSD / ROUNDSD </c> instruction.
-///
-/// \param X
-///    A 128-bit vector of [2 x double]. The value stored in bits [127:64] is
-///    copied to the corresponding bits of the result.
-/// \param Y
-///    A 128-bit vector of [2 x double]. The value stored in bits [63:0] is
-///    rounded up to the nearest integer and copied to the corresponding bits
-///    of the result.
-/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
-///    values.
-#define _mm_ceil_sd(X, Y)    _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
-
-/// Rounds down each element of the 128-bit vector of [4 x float] to an
-///    an integer and returns the rounded values in a 128-bit vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128 _mm_floor_ps(__m128 X);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDPS / ROUNDPS </c> instruction.
-///
-/// \param X
-///    A 128-bit vector of [4 x float] values to be rounded down.
-/// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_floor_ps(X)      _mm_round_ps((X), _MM_FROUND_FLOOR)
-
-/// Rounds down each element of the 128-bit vector of [2 x double] to an
-///    integer and returns the rounded values in a 128-bit vector of
-///    [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_floor_pd(__m128d X);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDPD / ROUNDPD </c> instruction.
-///
-/// \param X
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_floor_pd(X)      _mm_round_pd((X), _MM_FROUND_FLOOR)
-
-/// Copies three upper elements of the first 128-bit vector operand to
-///    the corresponding three upper elements of the 128-bit result vector of
-///    [4 x float]. Rounds down the lowest element of the second 128-bit vector
-///    operand to an integer and copies it to the lowest element of the 128-bit
-///    result vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128 _mm_floor_ss(__m128 X, __m128 Y);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDSS / ROUNDSS </c> instruction.
-///
-/// \param X
-///    A 128-bit vector of [4 x float]. The values stored in bits [127:32] are
-///    copied to the corresponding bits of the result.
-/// \param Y
-///    A 128-bit vector of [4 x float]. The value stored in bits [31:0] is
-///    rounded down to the nearest integer and copied to the corresponding bits
-///    of the result.
-/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
-///    values.
-#define _mm_floor_ss(X, Y)   _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
-
-/// Copies the upper element of the first 128-bit vector operand to the
-///    corresponding upper element of the 128-bit result vector of [2 x double].
-///    Rounds down the lower element of the second 128-bit vector operand to an
-///    integer and copies it to the lower element of the 128-bit result vector
-///    of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_floor_sd(__m128d X, __m128d Y);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDSD / ROUNDSD </c> instruction.
-///
-/// \param X
-///    A 128-bit vector of [2 x double]. The value stored in bits [127:64] is
-///    copied to the corresponding bits of the result.
-/// \param Y
-///    A 128-bit vector of [2 x double]. The value stored in bits [63:0] is
-///    rounded down to the nearest integer and copied to the corresponding bits
-///    of the result.
-/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
-///    values.
-#define _mm_floor_sd(X, Y)   _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
-
-/// Rounds each element of the 128-bit vector of [4 x float] to an
-///    integer value according to the rounding control specified by the second
-///    argument and returns the rounded values in a 128-bit vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128 _mm_round_ps(__m128 X, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDPS / ROUNDPS </c> instruction.
-///
-/// \param X
-///    A 128-bit vector of [4 x float].
-/// \param M
-///    An integer value that specifies the rounding operation. \n
-///    Bits [7:4] are reserved. \n
-///    Bit [3] is a precision exception value: \n
-///      0: A normal PE exception is used \n
-///      1: The PE field is not updated \n
-///    Bit [2] is the rounding control source: \n
-///      0: Use bits [1:0] of \a M \n
-///      1: Use the current MXCSR setting \n
-///    Bits [1:0] contain the rounding control definition: \n
-///      00: Nearest \n
-///      01: Downward (toward negative infinity) \n
-///      10: Upward (toward positive infinity) \n
-///      11: Truncated
-/// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_round_ps(X, M) \
-  ((__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)))
-
-/// Copies three upper elements of the first 128-bit vector operand to
-///    the corresponding three upper elements of the 128-bit result vector of
-///    [4 x float]. Rounds the lowest element of the second 128-bit vector
-///    operand to an integer value according to the rounding control specified
-///    by the third argument and copies it to the lowest element of the 128-bit
-///    result vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128 _mm_round_ss(__m128 X, __m128 Y, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDSS / ROUNDSS </c> instruction.
-///
-/// \param X
-///    A 128-bit vector of [4 x float]. The values stored in bits [127:32] are
-///    copied to the corresponding bits of the result.
-/// \param Y
-///    A 128-bit vector of [4 x float]. The value stored in bits [31:0] is
-///    rounded to the nearest integer using the specified rounding control and
-///    copied to the corresponding bits of the result.
-/// \param M
-///    An integer value that specifies the rounding operation. \n
-///    Bits [7:4] are reserved. \n
-///    Bit [3] is a precision exception value: \n
-///      0: A normal PE exception is used \n
-///      1: The PE field is not updated \n
-///    Bit [2] is the rounding control source: \n
-///      0: Use bits [1:0] of \a M \n
-///      1: Use the current MXCSR setting \n
-///    Bits [1:0] contain the rounding control definition: \n
-///      00: Nearest \n
-///      01: Downward (toward negative infinity) \n
-///      10: Upward (toward positive infinity) \n
-///      11: Truncated
-/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
-///    values.
-#define _mm_round_ss(X, Y, M) \
-  ((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \
-                                  (__v4sf)(__m128)(Y), (M)))
-
-/// Rounds each element of the 128-bit vector of [2 x double] to an
-///    integer value according to the rounding control specified by the second
-///    argument and returns the rounded values in a 128-bit vector of
-///    [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_round_pd(__m128d X, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDPD / ROUNDPD </c> instruction.
-///
-/// \param X
-///    A 128-bit vector of [2 x double].
-/// \param M
-///    An integer value that specifies the rounding operation. \n
-///    Bits [7:4] are reserved. \n
-///    Bit [3] is a precision exception value: \n
-///      0: A normal PE exception is used \n
-///      1: The PE field is not updated \n
-///    Bit [2] is the rounding control source: \n
-///      0: Use bits [1:0] of \a M \n
-///      1: Use the current MXCSR setting \n
-///    Bits [1:0] contain the rounding control definition: \n
-///      00: Nearest \n
-///      01: Downward (toward negative infinity) \n
-///      10: Upward (toward positive infinity) \n
-///      11: Truncated
-/// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_round_pd(X, M) \
-  ((__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)))
-
-/// Copies the upper element of the first 128-bit vector operand to the
-///    corresponding upper element of the 128-bit result vector of [2 x double].
-///    Rounds the lower element of the second 128-bit vector operand to an
-///    integer value according to the rounding control specified by the third
-///    argument and copies it to the lower element of the 128-bit result vector
-///    of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_round_sd(__m128d X, __m128d Y, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VROUNDSD / ROUNDSD </c> instruction.
-///
-/// \param X
-///    A 128-bit vector of [2 x double]. The value stored in bits [127:64] is
-///    copied to the corresponding bits of the result.
-/// \param Y
-///    A 128-bit vector of [2 x double]. The value stored in bits [63:0] is
-///    rounded to the nearest integer using the specified rounding control and
-///    copied to the corresponding bits of the result.
-/// \param M
-///    An integer value that specifies the rounding operation. \n
-///    Bits [7:4] are reserved. \n
-///    Bit [3] is a precision exception value: \n
-///      0: A normal PE exception is used \n
-///      1: The PE field is not updated \n
-///    Bit [2] is the rounding control source: \n
-///      0: Use bits [1:0] of \a M \n
-///      1: Use the current MXCSR setting \n
-///    Bits [1:0] contain the rounding control definition: \n
-///      00: Nearest \n
-///      01: Downward (toward negative infinity) \n
-///      10: Upward (toward positive infinity) \n
-///      11: Truncated
-/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
-///    values.
-#define _mm_round_sd(X, Y, M) \
-  ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \
-                                   (__v2df)(__m128d)(Y), (M)))
-
-/* SSE4 Packed Blending Intrinsics.  */
-/// Returns a 128-bit vector of [2 x double] where the values are
-///    selected from either the first or second operand as specified by the
-///    third operand, the control mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_blend_pd(__m128d V1, __m128d V2, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VBLENDPD / BLENDPD </c> instruction.
-///
-/// \param V1
-///    A 128-bit vector of [2 x double].
-/// \param V2
-///    A 128-bit vector of [2 x double].
-/// \param M
-///    An immediate integer operand, with mask bits [1:0] specifying how the
-///    values are to be copied. The position of the mask bit corresponds to the
-///    index of a copied value. When a mask bit is 0, the corresponding 64-bit
-///    element in operand \a V1 is copied to the same position in the result.
-///    When a mask bit is 1, the corresponding 64-bit element in operand \a V2
-///    is copied to the same position in the result.
-/// \returns A 128-bit vector of [2 x double] containing the copied values.
-#define _mm_blend_pd(V1, V2, M) \
-  ((__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(V1), \
-                                     (__v2df)(__m128d)(V2), (int)(M)))
-
-/// Returns a 128-bit vector of [4 x float] where the values are selected
-///    from either the first or second operand as specified by the third
-///    operand, the control mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128 _mm_blend_ps(__m128 V1, __m128 V2, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VBLENDPS / BLENDPS </c> instruction.
-///
-/// \param V1
-///    A 128-bit vector of [4 x float].
-/// \param V2
-///    A 128-bit vector of [4 x float].
-/// \param M
-///    An immediate integer operand, with mask bits [3:0] specifying how the
-///    values are to be copied. The position of the mask bit corresponds to the
-///    index of a copied value. When a mask bit is 0, the corresponding 32-bit
-///    element in operand \a V1 is copied to the same position in the result.
-///    When a mask bit is 1, the corresponding 32-bit element in operand \a V2
-///    is copied to the same position in the result.
-/// \returns A 128-bit vector of [4 x float] containing the copied values.
-#define _mm_blend_ps(V1, V2, M) \
-  ((__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(V1), \
-                                    (__v4sf)(__m128)(V2), (int)(M)))
-
-/// Returns a 128-bit vector of [2 x double] where the values are
-///    selected from either the first or second operand as specified by the
-///    third operand, the control mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBLENDVPD / BLENDVPD </c> instruction.
-///
-/// \param __V1
-///    A 128-bit vector of [2 x double].
-/// \param __V2
-///    A 128-bit vector of [2 x double].
-/// \param __M
-///    A 128-bit vector operand, with mask bits 127 and 63 specifying how the
-///    values are to be copied. The position of the mask bit corresponds to the
-///    most significant bit of a copied value. When a mask bit is 0, the
-///    corresponding 64-bit element in operand \a __V1 is copied to the same
-///    position in the result. When a mask bit is 1, the corresponding 64-bit
-///    element in operand \a __V2 is copied to the same position in the result.
-/// \returns A 128-bit vector of [2 x double] containing the copied values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
-{
-  return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
-                                            (__v2df)__M);
-}
-
-/// Returns a 128-bit vector of [4 x float] where the values are
-///    selected from either the first or second operand as specified by the
-///    third operand, the control mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBLENDVPS / BLENDVPS </c> instruction.
-///
-/// \param __V1
-///    A 128-bit vector of [4 x float].
-/// \param __V2
-///    A 128-bit vector of [4 x float].
-/// \param __M
-///    A 128-bit vector operand, with mask bits 127, 95, 63, and 31 specifying
-///    how the values are to be copied. The position of the mask bit corresponds
-///    to the most significant bit of a copied value. When a mask bit is 0, the
-///    corresponding 32-bit element in operand \a __V1 is copied to the same
-///    position in the result. When a mask bit is 1, the corresponding 32-bit
-///    element in operand \a __V2 is copied to the same position in the result.
-/// \returns A 128-bit vector of [4 x float] containing the copied values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
-{
-  return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
-                                           (__v4sf)__M);
-}
-
-/// Returns a 128-bit vector of [16 x i8] where the values are selected
-///    from either of the first or second operand as specified by the third
-///    operand, the control mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPBLENDVB / PBLENDVB </c> instruction.
-///
-/// \param __V1
-///    A 128-bit vector of [16 x i8].
-/// \param __V2
-///    A 128-bit vector of [16 x i8].
-/// \param __M
-///    A 128-bit vector operand, with mask bits 127, 119, 111...7 specifying
-///    how the values are to be copied. The position of the mask bit corresponds
-///    to the most significant bit of a copied value. When a mask bit is 0, the
-///    corresponding 8-bit element in operand \a __V1 is copied to the same
-///    position in the result. When a mask bit is 1, the corresponding 8-bit
-///    element in operand \a __V2 is copied to the same position in the result.
-/// \returns A 128-bit vector of [16 x i8] containing the copied values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
-{
-  return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
-                                               (__v16qi)__M);
-}
-
-/// Returns a 128-bit vector of [8 x i16] where the values are selected
-///    from either of the first or second operand as specified by the third
-///    operand, the control mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_blend_epi16(__m128i V1, __m128i V2, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPBLENDW / PBLENDW </c> instruction.
-///
-/// \param V1
-///    A 128-bit vector of [8 x i16].
-/// \param V2
-///    A 128-bit vector of [8 x i16].
-/// \param M
-///    An immediate integer operand, with mask bits [7:0] specifying how the
-///    values are to be copied. The position of the mask bit corresponds to the
-///    index of a copied value. When a mask bit is 0, the corresponding 16-bit
-///    element in operand \a V1 is copied to the same position in the result.
-///    When a mask bit is 1, the corresponding 16-bit element in operand \a V2
-///    is copied to the same position in the result.
-/// \returns A 128-bit vector of [8 x i16] containing the copied values.
-#define _mm_blend_epi16(V1, V2, M) \
-  ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(V1), \
-                                        (__v8hi)(__m128i)(V2), (int)(M)))
-
-/* SSE4 Dword Multiply Instructions.  */
-/// Multiples corresponding elements of two 128-bit vectors of [4 x i32]
-///    and returns the lower 32 bits of the each product in a 128-bit vector of
-///    [4 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULLD / PMULLD </c> instruction.
-///
-/// \param __V1
-///    A 128-bit integer vector.
-/// \param __V2
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the products of both operands.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_mullo_epi32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) ((__v4su)__V1 * (__v4su)__V2);
-}
-
-/// Multiplies corresponding even-indexed elements of two 128-bit
-///    vectors of [4 x i32] and returns a 128-bit vector of [2 x i64]
-///    containing the products.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULDQ / PMULDQ </c> instruction.
-///
-/// \param __V1
-///    A 128-bit vector of [4 x i32].
-/// \param __V2
-///    A 128-bit vector of [4 x i32].
-/// \returns A 128-bit vector of [2 x i64] containing the products of both
-///    operands.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_mul_epi32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
-}
-
-/* SSE4 Floating Point Dot Product Instructions.  */
-/// Computes the dot product of the two 128-bit vectors of [4 x float]
-///    and returns it in the elements of the 128-bit result vector of
-///    [4 x float].
-///
-///    The immediate integer operand controls which input elements
-///    will contribute to the dot product, and where the final results are
-///    returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128 _mm_dp_ps(__m128 X, __m128 Y, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VDPPS / DPPS </c> instruction.
-///
-/// \param X
-///    A 128-bit vector of [4 x float].
-/// \param Y
-///    A 128-bit vector of [4 x float].
-/// \param M
-///    An immediate integer operand. Mask bits [7:4] determine which elements
-///    of the input vectors are used, with bit [4] corresponding to the lowest
-///    element and bit [7] corresponding to the highest element of each [4 x
-///    float] vector. If a bit is set, the corresponding elements from the two
-///    input vectors are used as an input for dot product; otherwise that input
-///    is treated as zero. Bits [3:0] determine which elements of the result
-///    will receive a copy of the final dot product, with bit [0] corresponding
-///    to the lowest element and bit [3] corresponding to the highest element of
-///    each [4 x float] subvector. If a bit is set, the dot product is returned
-///    in the corresponding element; otherwise that element is set to zero.
-/// \returns A 128-bit vector of [4 x float] containing the dot product.
-#define _mm_dp_ps(X, Y, M) \
-  ((__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \
-                                (__v4sf)(__m128)(Y), (M)))
-
-/// Computes the dot product of the two 128-bit vectors of [2 x double]
-///    and returns it in the elements of the 128-bit result vector of
-///    [2 x double].
-///
-///    The immediate integer operand controls which input
-///    elements will contribute to the dot product, and where the final results
-///    are returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_dp_pd(__m128d X, __m128d Y, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VDPPD / DPPD </c> instruction.
-///
-/// \param X
-///    A 128-bit vector of [2 x double].
-/// \param Y
-///    A 128-bit vector of [2 x double].
-/// \param M
-///    An immediate integer operand. Mask bits [5:4] determine which elements
-///    of the input vectors are used, with bit [4] corresponding to the lowest
-///    element and bit [5] corresponding to the highest element of each of [2 x
-///    double] vector. If a bit is set, the corresponding elements from the two
-///    input vectors are used as an input for dot product; otherwise that input
-///    is treated as zero. Bits [1:0] determine which elements of the result
-///    will receive a copy of the final dot product, with bit [0] corresponding
-///    to the lowest element and bit [1] corresponding to the highest element of
-///    each [2 x double] vector. If a bit is set, the dot product is returned in
-///    the corresponding element; otherwise that element is set to zero.
-#define _mm_dp_pd(X, Y, M) \
-  ((__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \
-                                 (__v2df)(__m128d)(Y), (M)))
-
-/* SSE4 Streaming Load Hint Instruction.  */
-/// Loads integer values from a 128-bit aligned memory location to a
-///    128-bit integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVNTDQA / MOVNTDQA </c> instruction.
-///
-/// \param __V
-///    A pointer to a 128-bit aligned memory location that contains the integer
-///    values.
-/// \returns A 128-bit integer vector containing the data stored at the
-///    specified memory location.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_stream_load_si128 (__m128i const *__V)
-{
-  return (__m128i) __builtin_nontemporal_load ((const __v2di *) __V);
-}
-
-/* SSE4 Packed Integer Min/Max Instructions.  */
-/// Compares the corresponding elements of two 128-bit vectors of
-///    [16 x i8] and returns a 128-bit vector of [16 x i8] containing the lesser
-///    of the two values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMINSB / PMINSB </c> instruction.
-///
-/// \param __V1
-///    A 128-bit vector of [16 x i8].
-/// \param __V2
-///    A 128-bit vector of [16 x i8]
-/// \returns A 128-bit vector of [16 x i8] containing the lesser values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi8 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
-}
-
-/// Compares the corresponding elements of two 128-bit vectors of
-///    [16 x i8] and returns a 128-bit vector of [16 x i8] containing the
-///    greater value of the two.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMAXSB / PMAXSB </c> instruction.
-///
-/// \param __V1
-///    A 128-bit vector of [16 x i8].
-/// \param __V2
-///    A 128-bit vector of [16 x i8].
-/// \returns A 128-bit vector of [16 x i8] containing the greater values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi8 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
-}
-
-/// Compares the corresponding elements of two 128-bit vectors of
-///    [8 x u16] and returns a 128-bit vector of [8 x u16] containing the lesser
-///    value of the two.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMINUW / PMINUW </c> instruction.
-///
-/// \param __V1
-///    A 128-bit vector of [8 x u16].
-/// \param __V2
-///    A 128-bit vector of [8 x u16].
-/// \returns A 128-bit vector of [8 x u16] containing the lesser values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu16 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
-}
-
-/// Compares the corresponding elements of two 128-bit vectors of
-///    [8 x u16] and returns a 128-bit vector of [8 x u16] containing the
-///    greater value of the two.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMAXUW / PMAXUW </c> instruction.
-///
-/// \param __V1
-///    A 128-bit vector of [8 x u16].
-/// \param __V2
-///    A 128-bit vector of [8 x u16].
-/// \returns A 128-bit vector of [8 x u16] containing the greater values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu16 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
-}
-
-/// Compares the corresponding elements of two 128-bit vectors of
-///    [4 x i32] and returns a 128-bit vector of [4 x i32] containing the lesser
-///    value of the two.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMINSD / PMINSD </c> instruction.
-///
-/// \param __V1
-///    A 128-bit vector of [4 x i32].
-/// \param __V2
-///    A 128-bit vector of [4 x i32].
-/// \returns A 128-bit vector of [4 x i32] containing the lesser values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
-}
-
-/// Compares the corresponding elements of two 128-bit vectors of
-///    [4 x i32] and returns a 128-bit vector of [4 x i32] containing the
-///    greater value of the two.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMAXSD / PMAXSD </c> instruction.
-///
-/// \param __V1
-///    A 128-bit vector of [4 x i32].
-/// \param __V2
-///    A 128-bit vector of [4 x i32].
-/// \returns A 128-bit vector of [4 x i32] containing the greater values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
-}
-
-/// Compares the corresponding elements of two 128-bit vectors of
-///    [4 x u32] and returns a 128-bit vector of [4 x u32] containing the lesser
-///    value of the two.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMINUD / PMINUD </c>  instruction.
-///
-/// \param __V1
-///    A 128-bit vector of [4 x u32].
-/// \param __V2
-///    A 128-bit vector of [4 x u32].
-/// \returns A 128-bit vector of [4 x u32] containing the lesser values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
-}
-
-/// Compares the corresponding elements of two 128-bit vectors of
-///    [4 x u32] and returns a 128-bit vector of [4 x u32] containing the
-///    greater value of the two.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMAXUD / PMAXUD </c> instruction.
-///
-/// \param __V1
-///    A 128-bit vector of [4 x u32].
-/// \param __V2
-///    A 128-bit vector of [4 x u32].
-/// \returns A 128-bit vector of [4 x u32] containing the greater values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
-}
-
-/* SSE4 Insertion and Extraction from XMM Register Instructions.  */
-/// Takes the first argument \a X and inserts an element from the second
-///    argument \a Y as selected by the third argument \a N. That result then
-///    has elements zeroed out also as selected by the third argument \a N. The
-///    resulting 128-bit vector of [4 x float] is then returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128 _mm_insert_ps(__m128 X, __m128 Y, const int N);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VINSERTPS </c> instruction.
-///
-/// \param X
-///    A 128-bit vector source operand of [4 x float]. With the exception of
-///    those bits in the result copied from parameter \a Y and zeroed by bits
-///    [3:0] of \a N, all bits from this parameter are copied to the result.
-/// \param Y
-///    A 128-bit vector source operand of [4 x float]. One single-precision
-///    floating-point element from this source, as determined by the immediate
-///    parameter, is copied to the result.
-/// \param N
-///    Specifies which bits from operand \a Y will be copied, which bits in the
-///    result they will be be copied to, and which bits in the result will be
-///    cleared. The following assignments are made: \n
-///    Bits [7:6] specify the bits to copy from operand \a Y: \n
-///      00: Selects bits [31:0] from operand \a Y. \n
-///      01: Selects bits [63:32] from operand \a Y. \n
-///      10: Selects bits [95:64] from operand \a Y. \n
-///      11: Selects bits [127:96] from operand \a Y. \n
-///    Bits [5:4] specify the bits in the result to which the selected bits
-///    from operand \a Y are copied: \n
-///      00: Copies the selected bits from \a Y to result bits [31:0]. \n
-///      01: Copies the selected bits from \a Y to result bits [63:32]. \n
-///      10: Copies the selected bits from \a Y to result bits [95:64]. \n
-///      11: Copies the selected bits from \a Y to result bits [127:96]. \n
-///    Bits[3:0]: If any of these bits are set, the corresponding result
-///    element is cleared.
-/// \returns A 128-bit vector of [4 x float] containing the copied
-///    single-precision floating point elements from the operands.
-#define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N))
-
-/// Extracts a 32-bit integer from a 128-bit vector of [4 x float] and
-///    returns it, using the immediate value parameter \a N as a selector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_extract_ps(__m128 X, const int N);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VEXTRACTPS / EXTRACTPS </c>
-/// instruction.
-///
-/// \param X
-///    A 128-bit vector of [4 x float].
-/// \param N
-///    An immediate value. Bits [1:0] determines which bits from the argument
-///    \a X are extracted and returned: \n
-///    00: Bits [31:0] of parameter \a X are returned. \n
-///    01: Bits [63:32] of parameter \a X are returned. \n
-///    10: Bits [95:64] of parameter \a X are returned. \n
-///    11: Bits [127:96] of parameter \a X are returned.
-/// \returns A 32-bit integer containing the extracted 32 bits of float data.
-#define _mm_extract_ps(X, N) \
-  __builtin_bit_cast(int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)))
-
-/* Miscellaneous insert and extract macros.  */
-/* Extract a single-precision float from X at index N into D.  */
-#define _MM_EXTRACT_FLOAT(D, X, N) \
-  do { (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); } while (0)
-
-/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
-   an index suitable for _mm_insert_ps.  */
-#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
-
-/* Extract a float from X at index N into the first index of the return.  */
-#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X),   \
-                                             _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
-
-/* Insert int into packed integer array at index.  */
-/// Constructs a 128-bit vector of [16 x i8] by first making a copy of
-///    the 128-bit integer vector parameter, and then inserting the lower 8 bits
-///    of an integer parameter \a I into an offset specified by the immediate
-///    value parameter \a N.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_insert_epi8(__m128i X, int I, const int N);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPINSRB / PINSRB </c> instruction.
-///
-/// \param X
-///    A 128-bit integer vector of [16 x i8]. This vector is copied to the
-///    result and then one of the sixteen elements in the result vector is
-///    replaced by the lower 8 bits of \a I.
-/// \param I
-///    An integer. The lower 8 bits of this operand are written to the result
-///    beginning at the offset specified by \a N.
-/// \param N
-///    An immediate value. Bits [3:0] specify the bit offset in the result at
-///    which the lower 8 bits of \a I are written. \n
-///    0000: Bits [7:0] of the result are used for insertion. \n
-///    0001: Bits [15:8] of the result are used for insertion. \n
-///    0010: Bits [23:16] of the result are used for insertion. \n
-///    0011: Bits [31:24] of the result are used for insertion. \n
-///    0100: Bits [39:32] of the result are used for insertion. \n
-///    0101: Bits [47:40] of the result are used for insertion. \n
-///    0110: Bits [55:48] of the result are used for insertion. \n
-///    0111: Bits [63:56] of the result are used for insertion. \n
-///    1000: Bits [71:64] of the result are used for insertion. \n
-///    1001: Bits [79:72] of the result are used for insertion. \n
-///    1010: Bits [87:80] of the result are used for insertion. \n
-///    1011: Bits [95:88] of the result are used for insertion. \n
-///    1100: Bits [103:96] of the result are used for insertion. \n
-///    1101: Bits [111:104] of the result are used for insertion. \n
-///    1110: Bits [119:112] of the result are used for insertion. \n
-///    1111: Bits [127:120] of the result are used for insertion.
-/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi8(X, I, N) \
-  ((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), \
-                                         (int)(I), (int)(N)))
-
-/// Constructs a 128-bit vector of [4 x i32] by first making a copy of
-///    the 128-bit integer vector parameter, and then inserting the 32-bit
-///    integer parameter \a I at the offset specified by the immediate value
-///    parameter \a N.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_insert_epi32(__m128i X, int I, const int N);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPINSRD / PINSRD </c> instruction.
-///
-/// \param X
-///    A 128-bit integer vector of [4 x i32]. This vector is copied to the
-///    result and then one of the four elements in the result vector is
-///    replaced by \a I.
-/// \param I
-///    A 32-bit integer that is written to the result beginning at the offset
-///    specified by \a N.
-/// \param N
-///    An immediate value. Bits [1:0] specify the bit offset in the result at
-///    which the integer \a I is written. \n
-///    00: Bits [31:0] of the result are used for insertion. \n
-///    01: Bits [63:32] of the result are used for insertion. \n
-///    10: Bits [95:64] of the result are used for insertion. \n
-///    11: Bits [127:96] of the result are used for insertion.
-/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi32(X, I, N) \
-  ((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), \
-                                        (int)(I), (int)(N)))
-
-#ifdef __x86_64__
-/// Constructs a 128-bit vector of [2 x i64] by first making a copy of
-///    the 128-bit integer vector parameter, and then inserting the 64-bit
-///    integer parameter \a I, using the immediate value parameter \a N as an
-///    insertion location selector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_insert_epi64(__m128i X, long long I, const int N);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPINSRQ / PINSRQ </c> instruction.
-///
-/// \param X
-///    A 128-bit integer vector of [2 x i64]. This vector is copied to the
-///    result and then one of the two elements in the result vector is replaced
-///    by \a I.
-/// \param I
-///    A 64-bit integer that is written to the result beginning at the offset
-///    specified by \a N.
-/// \param N
-///    An immediate value. Bit [0] specifies the bit offset in the result at
-///    which the integer \a I is written. \n
-///    0: Bits [63:0] of the result are used for insertion. \n
-///    1: Bits [127:64] of the result are used for insertion. \n
-/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi64(X, I, N) \
-  ((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), \
-                                        (long long)(I), (int)(N)))
-#endif /* __x86_64__ */
-
-/* Extract int from packed integer array at index.  This returns the element
- * as a zero extended value, so it is unsigned.
- */
-/// Extracts an 8-bit element from the 128-bit integer vector of
-///    [16 x i8], using the immediate value parameter \a N as a selector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_extract_epi8(__m128i X, const int N);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPEXTRB / PEXTRB </c> instruction.
-///
-/// \param X
-///    A 128-bit integer vector.
-/// \param N
-///    An immediate value. Bits [3:0] specify which 8-bit vector element from
-///    the argument \a X to extract and copy to the result. \n
-///    0000: Bits [7:0] of parameter \a X are extracted. \n
-///    0001: Bits [15:8] of the parameter \a X are extracted. \n
-///    0010: Bits [23:16] of the parameter \a X are extracted. \n
-///    0011: Bits [31:24] of the parameter \a X are extracted. \n
-///    0100: Bits [39:32] of the parameter \a X are extracted. \n
-///    0101: Bits [47:40] of the parameter \a X are extracted. \n
-///    0110: Bits [55:48] of the parameter \a X are extracted. \n
-///    0111: Bits [63:56] of the parameter \a X are extracted. \n
-///    1000: Bits [71:64] of the parameter \a X are extracted. \n
-///    1001: Bits [79:72] of the parameter \a X are extracted. \n
-///    1010: Bits [87:80] of the parameter \a X are extracted. \n
-///    1011: Bits [95:88] of the parameter \a X are extracted. \n
-///    1100: Bits [103:96] of the parameter \a X are extracted. \n
-///    1101: Bits [111:104] of the parameter \a X are extracted. \n
-///    1110: Bits [119:112] of the parameter \a X are extracted. \n
-///    1111: Bits [127:120] of the parameter \a X are extracted.
-/// \returns  An unsigned integer, whose lower 8 bits are selected from the
-///    128-bit integer vector parameter and the remaining bits are assigned
-///    zeros.
-#define _mm_extract_epi8(X, N) \
-  ((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \
-                                                    (int)(N)))
-
-/// Extracts a 32-bit element from the 128-bit integer vector of
-///    [4 x i32], using the immediate value parameter \a N as a selector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_extract_epi32(__m128i X, const int N);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPEXTRD / PEXTRD </c> instruction.
-///
-/// \param X
-///    A 128-bit integer vector.
-/// \param N
-///    An immediate value. Bits [1:0] specify which 32-bit vector element from
-///    the argument \a X to extract and copy to the result. \n
-///    00: Bits [31:0] of the parameter \a X are extracted. \n
-///    01: Bits [63:32] of the parameter \a X are extracted. \n
-///    10: Bits [95:64] of the parameter \a X are extracted. \n
-///    11: Bits [127:96] of the parameter \a X are exracted.
-/// \returns  An integer, whose lower 32 bits are selected from the 128-bit
-///    integer vector parameter and the remaining bits are assigned zeros.
-#define _mm_extract_epi32(X, N) \
-  ((int)__builtin_ia32_vec_ext_v4si((__v4si)(__m128i)(X), (int)(N)))
-
-#ifdef __x86_64__
-/// Extracts a 64-bit element from the 128-bit integer vector of
-///    [2 x i64], using the immediate value parameter \a N as a selector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// long long _mm_extract_epi64(__m128i X, const int N);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPEXTRQ / PEXTRQ </c> instruction.
-///
-/// \param X
-///    A 128-bit integer vector.
-/// \param N
-///    An immediate value. Bit [0] specifies which 64-bit vector element from
-///    the argument \a X to return. \n
-///    0: Bits [63:0] are returned. \n
-///    1: Bits [127:64] are returned. \n
-/// \returns  A 64-bit integer.
-#define _mm_extract_epi64(X, N) \
-  ((long long)__builtin_ia32_vec_ext_v2di((__v2di)(__m128i)(X), (int)(N)))
-#endif /* __x86_64 */
-
-/* SSE4 128-bit Packed Integer Comparisons.  */
-/// Tests whether the specified bits in a 128-bit integer vector are all
-///    zeros.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPTEST / PTEST </c> instruction.
-///
-/// \param __M
-///    A 128-bit integer vector containing the bits to be tested.
-/// \param __V
-///    A 128-bit integer vector selecting which bits to test in operand \a __M.
-/// \returns TRUE if the specified bits are all zeros; FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testz_si128(__m128i __M, __m128i __V)
-{
-  return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
-}
-
-/// Tests whether the specified bits in a 128-bit integer vector are all
-///    ones.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPTEST / PTEST </c> instruction.
-///
-/// \param __M
-///    A 128-bit integer vector containing the bits to be tested.
-/// \param __V
-///    A 128-bit integer vector selecting which bits to test in operand \a __M.
-/// \returns TRUE if the specified bits are all ones; FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testc_si128(__m128i __M, __m128i __V)
-{
-  return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
-}
-
-/// Tests whether the specified bits in a 128-bit integer vector are
-///    neither all zeros nor all ones.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPTEST / PTEST </c> instruction.
-///
-/// \param __M
-///    A 128-bit integer vector containing the bits to be tested.
-/// \param __V
-///    A 128-bit integer vector selecting which bits to test in operand \a __M.
-/// \returns TRUE if the specified bits are neither all zeros nor all ones;
-///    FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testnzc_si128(__m128i __M, __m128i __V)
-{
-  return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
-}
-
-/// Tests whether the specified bits in a 128-bit integer vector are all
-///    ones.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_test_all_ones(__m128i V);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPTEST / PTEST </c> instruction.
-///
-/// \param V
-///    A 128-bit integer vector containing the bits to be tested.
-/// \returns TRUE if the bits specified in the operand are all set to 1; FALSE
-///    otherwise.
-#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
-
-/// Tests whether the specified bits in a 128-bit integer vector are
-///    neither all zeros nor all ones.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_test_mix_ones_zeros(__m128i M, __m128i V);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPTEST / PTEST </c> instruction.
-///
-/// \param M
-///    A 128-bit integer vector containing the bits to be tested.
-/// \param V
-///    A 128-bit integer vector selecting which bits to test in operand \a M.
-/// \returns TRUE if the specified bits are neither all zeros nor all ones;
-///    FALSE otherwise.
-#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
-
-/// Tests whether the specified bits in a 128-bit integer vector are all
-///    zeros.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_test_all_zeros(__m128i M, __m128i V);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPTEST / PTEST </c> instruction.
-///
-/// \param M
-///    A 128-bit integer vector containing the bits to be tested.
-/// \param V
-///    A 128-bit integer vector selecting which bits to test in operand \a M.
-/// \returns TRUE if the specified bits are all zeros; FALSE otherwise.
-#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
-
-/* SSE4 64-bit Packed Integer Comparisons.  */
-/// Compares each of the corresponding 64-bit values of the 128-bit
-///    integer vectors for equality.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPEQQ / PCMPEQQ </c> instruction.
-///
-/// \param __V1
-///    A 128-bit integer vector.
-/// \param __V2
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
-{
-  return (__m128i)((__v2di)__V1 == (__v2di)__V2);
-}
-
-/* SSE4 Packed Integer Sign-Extension.  */
-/// Sign-extends each of the lower eight 8-bit integer elements of a
-///    128-bit vector of [16 x i8] to 16-bit values and returns them in a
-///    128-bit vector of [8 x i16]. The upper eight elements of the input vector
-///    are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVSXBW / PMOVSXBW </c> instruction.
-///
-/// \param __V
-///    A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are sign-
-///    extended to 16-bit values.
-/// \returns A 128-bit vector of [8 x i16] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi16(__m128i __V)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
-}
-
-/// Sign-extends each of the lower four 8-bit integer elements of a
-///    128-bit vector of [16 x i8] to 32-bit values and returns them in a
-///    128-bit vector of [4 x i32]. The upper twelve elements of the input
-///    vector are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVSXBD / PMOVSXBD </c> instruction.
-///
-/// \param __V
-///    A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
-///    sign-extended to 32-bit values.
-/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi32(__m128i __V)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
-}
-
-/// Sign-extends each of the lower two 8-bit integer elements of a
-///    128-bit integer vector of [16 x i8] to 64-bit values and returns them in
-///    a 128-bit vector of [2 x i64]. The upper fourteen elements of the input
-///    vector are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVSXBQ / PMOVSXBQ </c> instruction.
-///
-/// \param __V
-///    A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
-///    sign-extended to 64-bit values.
-/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi64(__m128i __V)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
-}
-
-/// Sign-extends each of the lower four 16-bit integer elements of a
-///    128-bit integer vector of [8 x i16] to 32-bit values and returns them in
-///    a 128-bit vector of [4 x i32]. The upper four elements of the input
-///    vector are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVSXWD / PMOVSXWD </c> instruction.
-///
-/// \param __V
-///    A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
-///    sign-extended to 32-bit values.
-/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi16_epi32(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
-}
-
-/// Sign-extends each of the lower two 16-bit integer elements of a
-///    128-bit integer vector of [8 x i16] to 64-bit values and returns them in
-///    a 128-bit vector of [2 x i64]. The upper six elements of the input
-///    vector are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVSXWQ / PMOVSXWQ </c> instruction.
-///
-/// \param __V
-///    A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
-///     sign-extended to 64-bit values.
-/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi16_epi64(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
-}
-
-/// Sign-extends each of the lower two 32-bit integer elements of a
-///    128-bit integer vector of [4 x i32] to 64-bit values and returns them in
-///    a 128-bit vector of [2 x i64]. The upper two elements of the input vector
-///    are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVSXDQ / PMOVSXDQ </c> instruction.
-///
-/// \param __V
-///    A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
-///    sign-extended to 64-bit values.
-/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi32_epi64(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
-}
-
-/* SSE4 Packed Integer Zero-Extension.  */
-/// Zero-extends each of the lower eight 8-bit integer elements of a
-///    128-bit vector of [16 x i8] to 16-bit values and returns them in a
-///    128-bit vector of [8 x i16]. The upper eight elements of the input vector
-///    are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVZXBW / PMOVZXBW </c> instruction.
-///
-/// \param __V
-///    A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are
-///    zero-extended to 16-bit values.
-/// \returns A 128-bit vector of [8 x i16] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi16(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
-}
-
-/// Zero-extends each of the lower four 8-bit integer elements of a
-///    128-bit vector of [16 x i8] to 32-bit values and returns them in a
-///    128-bit vector of [4 x i32]. The upper twelve elements of the input
-///    vector are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVZXBD / PMOVZXBD </c> instruction.
-///
-/// \param __V
-///    A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
-///    zero-extended to 32-bit values.
-/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi32(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
-}
-
-/// Zero-extends each of the lower two 8-bit integer elements of a
-///    128-bit integer vector of [16 x i8] to 64-bit values and returns them in
-///    a 128-bit vector of [2 x i64]. The upper fourteen elements of the input
-///    vector are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVZXBQ / PMOVZXBQ </c> instruction.
-///
-/// \param __V
-///    A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
-///    zero-extended to 64-bit values.
-/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi64(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
-}
-
-/// Zero-extends each of the lower four 16-bit integer elements of a
-///    128-bit integer vector of [8 x i16] to 32-bit values and returns them in
-///    a 128-bit vector of [4 x i32]. The upper four elements of the input
-///    vector are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVZXWD / PMOVZXWD </c> instruction.
-///
-/// \param __V
-///    A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
-///    zero-extended to 32-bit values.
-/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu16_epi32(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
-}
-
-/// Zero-extends each of the lower two 16-bit integer elements of a
-///    128-bit integer vector of [8 x i16] to 64-bit values and returns them in
-///    a 128-bit vector of [2 x i64]. The upper six elements of the input vector
-///    are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVZXWQ / PMOVZXWQ </c> instruction.
-///
-/// \param __V
-///    A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
-///    zero-extended to 64-bit values.
-/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu16_epi64(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
-}
-
-/// Zero-extends each of the lower two 32-bit integer elements of a
-///    128-bit integer vector of [4 x i32] to 64-bit values and returns them in
-///    a 128-bit vector of [2 x i64]. The upper two elements of the input vector
-///    are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVZXDQ / PMOVZXDQ </c> instruction.
-///
-/// \param __V
-///    A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
-///    zero-extended to 64-bit values.
-/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu32_epi64(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
-}
-
-/* SSE4 Pack with Unsigned Saturation.  */
-/// Converts 32-bit signed integers from both 128-bit integer vector
-///    operands into 16-bit unsigned integers, and returns the packed result.
-///    Values greater than 0xFFFF are saturated to 0xFFFF. Values less than
-///    0x0000 are saturated to 0x0000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPACKUSDW / PACKUSDW </c> instruction.
-///
-/// \param __V1
-///    A 128-bit vector of [4 x i32]. Each 32-bit element is treated as a
-///    signed integer and is converted to a 16-bit unsigned integer with
-///    saturation. Values greater than 0xFFFF are saturated to 0xFFFF. Values
-///    less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values
-///    are written to the lower 64 bits of the result.
-/// \param __V2
-///    A 128-bit vector of [4 x i32]. Each 32-bit element is treated as a
-///    signed integer and is converted to a 16-bit unsigned integer with
-///    saturation. Values greater than 0xFFFF are saturated to 0xFFFF. Values
-///    less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values
-///    are written to the higher 64 bits of the result.
-/// \returns A 128-bit vector of [8 x i16] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packus_epi32(__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
-}
-
-/* SSE4 Multiple Packed Sums of Absolute Difference.  */
-/// Subtracts 8-bit unsigned integer values and computes the absolute
-///    values of the differences to the corresponding bits in the destination.
-///    Then sums of the absolute differences are returned according to the bit
-///    fields in the immediate operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_mpsadbw_epu8(__m128i X, __m128i Y, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VMPSADBW / MPSADBW </c> instruction.
-///
-/// \param X
-///    A 128-bit vector of [16 x i8].
-/// \param Y
-///    A 128-bit vector of [16 x i8].
-/// \param M
-///    An 8-bit immediate operand specifying how the absolute differences are to
-///    be calculated, according to the following algorithm:
-///    \code
-///    // M2 represents bit 2 of the immediate operand
-///    // M10 represents bits [1:0] of the immediate operand
-///    i = M2 * 4;
-///    j = M10 * 4;
-///    for (k = 0; k < 8; k = k + 1) {
-///      d0 = abs(X[i + k + 0] - Y[j + 0]);
-///      d1 = abs(X[i + k + 1] - Y[j + 1]);
-///      d2 = abs(X[i + k + 2] - Y[j + 2]);
-///      d3 = abs(X[i + k + 3] - Y[j + 3]);
-///      r[k] = d0 + d1 + d2 + d3;
-///    }
-///    \endcode
-/// \returns A 128-bit integer vector containing the sums of the sets of
-///    absolute differences between both operands.
-#define _mm_mpsadbw_epu8(X, Y, M) \
-  ((__m128i) __builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
-                                       (__v16qi)(__m128i)(Y), (M)))
-
-/// Finds the minimum unsigned 16-bit element in the input 128-bit
-///    vector of [8 x u16] and returns it and along with its index.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPHMINPOSUW / PHMINPOSUW </c>
-/// instruction.
-///
-/// \param __V
-///    A 128-bit vector of [8 x u16].
-/// \returns A 128-bit value where bits [15:0] contain the minimum value found
-///    in parameter \a __V, bits [18:16] contain the index of the minimum value
-///    and the remaining bits are set to 0.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_minpos_epu16(__m128i __V)
-{
-  return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
-}
-
-/* Handle the sse4.2 definitions here. */
-
-/* These definitions are normally in nmmintrin.h, but gcc puts them in here
-   so we'll do the same.  */
-
-#undef __DEFAULT_FN_ATTRS
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
-
-/* These specify the type of data that we're comparing.  */
-#define _SIDD_UBYTE_OPS                 0x00
-#define _SIDD_UWORD_OPS                 0x01
-#define _SIDD_SBYTE_OPS                 0x02
-#define _SIDD_SWORD_OPS                 0x03
-
-/* These specify the type of comparison operation.  */
-#define _SIDD_CMP_EQUAL_ANY             0x00
-#define _SIDD_CMP_RANGES                0x04
-#define _SIDD_CMP_EQUAL_EACH            0x08
-#define _SIDD_CMP_EQUAL_ORDERED         0x0c
-
-/* These macros specify the polarity of the operation.  */
-#define _SIDD_POSITIVE_POLARITY         0x00
-#define _SIDD_NEGATIVE_POLARITY         0x10
-#define _SIDD_MASKED_POSITIVE_POLARITY  0x20
-#define _SIDD_MASKED_NEGATIVE_POLARITY  0x30
-
-/* These macros are used in _mm_cmpXstri() to specify the return.  */
-#define _SIDD_LEAST_SIGNIFICANT         0x00
-#define _SIDD_MOST_SIGNIFICANT          0x40
-
-/* These macros are used in _mm_cmpXstri() to specify the return.  */
-#define _SIDD_BIT_MASK                  0x00
-#define _SIDD_UNIT_MASK                 0x40
-
-/* SSE4.2 Packed Comparison Intrinsics.  */
-/// Uses the immediate operand \a M to perform a comparison of string
-///    data with implicitly defined lengths that is contained in source operands
-///    \a A and \a B. Returns a 128-bit integer vector representing the result
-///    mask of the comparison.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_cmpistrm(__m128i A, __m128i B, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPCMPISTRM / PCMPISTRM </c>
-/// instruction.
-///
-/// \param A
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param B
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param M
-///    An 8-bit immediate operand specifying whether the characters are bytes or
-///    words, the type of comparison to perform, and the format of the return
-///    value. \n
-///    Bits [1:0]: Determine source data format. \n
-///      00: 16 unsigned bytes \n
-///      01: 8 unsigned words \n
-///      10: 16 signed bytes \n
-///      11: 8 signed words \n
-///    Bits [3:2]: Determine comparison type and aggregation method. \n
-///      00: Subset: Each character in \a B is compared for equality with all
-///          the characters in \a A. \n
-///      01: Ranges: Each character in \a B is compared to \a A. The comparison
-///          basis is greater than or equal for even-indexed elements in \a A,
-///          and less than or equal for odd-indexed elements in \a A. \n
-///      10: Match: Compare each pair of corresponding characters in \a A and
-///          \a B for equality. \n
-///      11: Substring: Search \a B for substring matches of \a A. \n
-///    Bits [5:4]: Determine whether to perform a one's complement on the bit
-///                mask of the comparison results. \n
-///      00: No effect. \n
-///      01: Negate the bit mask. \n
-///      10: No effect. \n
-///      11: Negate the bit mask only for bits with an index less than or equal
-///          to the size of \a A or \a B. \n
-///    Bit [6]: Determines whether the result is zero-extended or expanded to 16
-///             bytes. \n
-///      0: The result is zero-extended to 16 bytes. \n
-///      1: The result is expanded to 16 bytes (this expansion is performed by
-///         repeating each bit 8 or 16 times).
-/// \returns Returns a 128-bit integer vector representing the result mask of
-///    the comparison.
-#define _mm_cmpistrm(A, B, M) \
-  ((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
-                                        (__v16qi)(__m128i)(B), (int)(M)))
-
-/// Uses the immediate operand \a M to perform a comparison of string
-///    data with implicitly defined lengths that is contained in source operands
-///    \a A and \a B. Returns an integer representing the result index of the
-///    comparison.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_cmpistri(__m128i A, __m128i B, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPCMPISTRI / PCMPISTRI </c>
-/// instruction.
-///
-/// \param A
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param B
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param M
-///    An 8-bit immediate operand specifying whether the characters are bytes or
-///    words, the type of comparison to perform, and the format of the return
-///    value. \n
-///    Bits [1:0]: Determine source data format. \n
-///      00: 16 unsigned bytes \n
-///      01: 8 unsigned words \n
-///      10: 16 signed bytes \n
-///      11: 8 signed words \n
-///    Bits [3:2]: Determine comparison type and aggregation method. \n
-///      00: Subset: Each character in \a B is compared for equality with all
-///          the characters in \a A. \n
-///      01: Ranges: Each character in \a B is compared to \a A. The comparison
-///          basis is greater than or equal for even-indexed elements in \a A,
-///          and less than or equal for odd-indexed elements in \a A. \n
-///      10: Match: Compare each pair of corresponding characters in \a A and
-///          \a B for equality. \n
-///      11: Substring: Search B for substring matches of \a A. \n
-///    Bits [5:4]: Determine whether to perform a one's complement on the bit
-///                mask of the comparison results. \n
-///      00: No effect. \n
-///      01: Negate the bit mask. \n
-///      10: No effect. \n
-///      11: Negate the bit mask only for bits with an index less than or equal
-///          to the size of \a A or \a B. \n
-///    Bit [6]: Determines whether the index of the lowest set bit or the
-///             highest set bit is returned. \n
-///      0: The index of the least significant set bit. \n
-///      1: The index of the most significant set bit. \n
-/// \returns Returns an integer representing the result index of the comparison.
-#define _mm_cmpistri(A, B, M) \
-  ((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
-                                   (__v16qi)(__m128i)(B), (int)(M)))
-
-/// Uses the immediate operand \a M to perform a comparison of string
-///    data with explicitly defined lengths that is contained in source operands
-///    \a A and \a B. Returns a 128-bit integer vector representing the result
-///    mask of the comparison.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_cmpestrm(__m128i A, int LA, __m128i B, int LB, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPCMPESTRM / PCMPESTRM </c>
-/// instruction.
-///
-/// \param A
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param LA
-///    An integer that specifies the length of the string in \a A.
-/// \param B
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param LB
-///    An integer that specifies the length of the string in \a B.
-/// \param M
-///    An 8-bit immediate operand specifying whether the characters are bytes or
-///    words, the type of comparison to perform, and the format of the return
-///    value. \n
-///    Bits [1:0]: Determine source data format. \n
-///      00: 16 unsigned bytes \n
-///      01: 8 unsigned words \n
-///      10: 16 signed bytes \n
-///      11: 8 signed words \n
-///    Bits [3:2]: Determine comparison type and aggregation method. \n
-///      00: Subset: Each character in \a B is compared for equality with all
-///          the characters in \a A. \n
-///      01: Ranges: Each character in \a B is compared to \a A. The comparison
-///          basis is greater than or equal for even-indexed elements in \a A,
-///          and less than or equal for odd-indexed elements in \a A. \n
-///      10: Match: Compare each pair of corresponding characters in \a A and
-///          \a B for equality. \n
-///      11: Substring: Search \a B for substring matches of \a A. \n
-///    Bits [5:4]: Determine whether to perform a one's complement on the bit
-///                mask of the comparison results. \n
-///      00: No effect. \n
-///      01: Negate the bit mask. \n
-///      10: No effect. \n
-///      11: Negate the bit mask only for bits with an index less than or equal
-///          to the size of \a A or \a B. \n
-///    Bit [6]: Determines whether the result is zero-extended or expanded to 16
-///             bytes. \n
-///      0: The result is zero-extended to 16 bytes. \n
-///      1: The result is expanded to 16 bytes (this expansion is performed by
-///         repeating each bit 8 or 16 times). \n
-/// \returns Returns a 128-bit integer vector representing the result mask of
-///    the comparison.
-#define _mm_cmpestrm(A, LA, B, LB, M) \
-  ((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
-                                        (__v16qi)(__m128i)(B), (int)(LB), \
-                                        (int)(M)))
-
-/// Uses the immediate operand \a M to perform a comparison of string
-///    data with explicitly defined lengths that is contained in source operands
-///    \a A and \a B. Returns an integer representing the result index of the
-///    comparison.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_cmpestri(__m128i A, int LA, __m128i B, int LB, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPCMPESTRI / PCMPESTRI </c>
-/// instruction.
-///
-/// \param A
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param LA
-///    An integer that specifies the length of the string in \a A.
-/// \param B
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param LB
-///    An integer that specifies the length of the string in \a B.
-/// \param M
-///    An 8-bit immediate operand specifying whether the characters are bytes or
-///    words, the type of comparison to perform, and the format of the return
-///    value. \n
-///    Bits [1:0]: Determine source data format. \n
-///      00: 16 unsigned bytes \n
-///      01: 8 unsigned words \n
-///      10: 16 signed bytes \n
-///      11: 8 signed words \n
-///    Bits [3:2]: Determine comparison type and aggregation method. \n
-///      00: Subset: Each character in \a B is compared for equality with all
-///          the characters in \a A. \n
-///      01: Ranges: Each character in \a B is compared to \a A. The comparison
-///          basis is greater than or equal for even-indexed elements in \a A,
-///          and less than or equal for odd-indexed elements in \a A. \n
-///      10: Match: Compare each pair of corresponding characters in \a A and
-///          \a B for equality. \n
-///      11: Substring: Search B for substring matches of \a A. \n
-///    Bits [5:4]: Determine whether to perform a one's complement on the bit
-///                mask of the comparison results. \n
-///      00: No effect. \n
-///      01: Negate the bit mask. \n
-///      10: No effect. \n
-///      11: Negate the bit mask only for bits with an index less than or equal
-///          to the size of \a A or \a B. \n
-///    Bit [6]: Determines whether the index of the lowest set bit or the
-///             highest set bit is returned. \n
-///      0: The index of the least significant set bit. \n
-///      1: The index of the most significant set bit. \n
-/// \returns Returns an integer representing the result index of the comparison.
-#define _mm_cmpestri(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
-                                    (__v16qi)(__m128i)(B), (int)(LB), \
-                                    (int)(M)))
-
-/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading.  */
-/// Uses the immediate operand \a M to perform a comparison of string
-///    data with implicitly defined lengths that is contained in source operands
-///    \a A and \a B. Returns 1 if the bit mask is zero and the length of the
-///    string in \a B is the maximum, otherwise, returns 0.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_cmpistra(__m128i A, __m128i B, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPCMPISTRI / PCMPISTRI </c>
-/// instruction.
-///
-/// \param A
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param B
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param M
-///    An 8-bit immediate operand specifying whether the characters are bytes or
-///    words and the type of comparison to perform. \n
-///    Bits [1:0]: Determine source data format. \n
-///      00: 16 unsigned bytes \n
-///      01: 8 unsigned words \n
-///      10: 16 signed bytes \n
-///      11: 8 signed words \n
-///    Bits [3:2]: Determine comparison type and aggregation method. \n
-///      00: Subset: Each character in \a B is compared for equality with all
-///          the characters in \a A. \n
-///      01: Ranges: Each character in \a B is compared to \a A. The comparison
-///          basis is greater than or equal for even-indexed elements in \a A,
-///          and less than or equal for odd-indexed elements in \a A. \n
-///      10: Match: Compare each pair of corresponding characters in \a A and
-///          \a B for equality. \n
-///      11: Substring: Search \a B for substring matches of \a A. \n
-///    Bits [5:4]: Determine whether to perform a one's complement on the bit
-///                mask of the comparison results. \n
-///      00: No effect. \n
-///      01: Negate the bit mask. \n
-///      10: No effect. \n
-///      11: Negate the bit mask only for bits with an index less than or equal
-///          to the size of \a A or \a B. \n
-/// \returns Returns 1 if the bit mask is zero and the length of the string in
-///    \a B is the maximum; otherwise, returns 0.
-#define _mm_cmpistra(A, B, M) \
-  ((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
-                                     (__v16qi)(__m128i)(B), (int)(M)))
-
-/// Uses the immediate operand \a M to perform a comparison of string
-///    data with implicitly defined lengths that is contained in source operands
-///    \a A and \a B. Returns 1 if the bit mask is non-zero, otherwise, returns
-///    0.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_cmpistrc(__m128i A, __m128i B, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPCMPISTRI / PCMPISTRI </c>
-/// instruction.
-///
-/// \param A
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param B
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param M
-///    An 8-bit immediate operand specifying whether the characters are bytes or
-///    words and the type of comparison to perform. \n
-///    Bits [1:0]: Determine source data format. \n
-///      00: 16 unsigned bytes \n
-///      01: 8 unsigned words \n
-///      10: 16 signed bytes \n
-///      11: 8 signed words \n
-///    Bits [3:2]: Determine comparison type and aggregation method. \n
-///      00: Subset: Each character in \a B is compared for equality with all
-///          the characters in \a A. \n
-///      01: Ranges: Each character in \a B is compared to \a A. The comparison
-///          basis is greater than or equal for even-indexed elements in \a A,
-///          and less than or equal for odd-indexed elements in \a A. \n
-///      10: Match: Compare each pair of corresponding characters in \a A and
-///          \a B for equality. \n
-///      11: Substring: Search B for substring matches of \a A. \n
-///    Bits [5:4]: Determine whether to perform a one's complement on the bit
-///                mask of the comparison results. \n
-///      00: No effect. \n
-///      01: Negate the bit mask. \n
-///      10: No effect. \n
-///      11: Negate the bit mask only for bits with an index less than or equal
-///          to the size of \a A or \a B.
-/// \returns Returns 1 if the bit mask is non-zero, otherwise, returns 0.
-#define _mm_cmpistrc(A, B, M) \
-  ((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
-                                     (__v16qi)(__m128i)(B), (int)(M)))
-
-/// Uses the immediate operand \a M to perform a comparison of string
-///    data with implicitly defined lengths that is contained in source operands
-///    \a A and \a B. Returns bit 0 of the resulting bit mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_cmpistro(__m128i A, __m128i B, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPCMPISTRI / PCMPISTRI </c>
-/// instruction.
-///
-/// \param A
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param B
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param M
-///    An 8-bit immediate operand specifying whether the characters are bytes or
-///    words and the type of comparison to perform. \n
-///    Bits [1:0]: Determine source data format. \n
-///      00: 16 unsigned bytes \n
-///      01: 8 unsigned words \n
-///      10: 16 signed bytes \n
-///      11: 8 signed words \n
-///    Bits [3:2]: Determine comparison type and aggregation method. \n
-///      00: Subset: Each character in \a B is compared for equality with all
-///          the characters in \a A. \n
-///      01: Ranges: Each character in \a B is compared to \a A. The comparison
-///          basis is greater than or equal for even-indexed elements in \a A,
-///          and less than or equal for odd-indexed elements in \a A. \n
-///      10: Match: Compare each pair of corresponding characters in \a A and
-///          \a B for equality. \n
-///      11: Substring: Search B for substring matches of \a A. \n
-///    Bits [5:4]: Determine whether to perform a one's complement on the bit
-///                mask of the comparison results. \n
-///      00: No effect. \n
-///      01: Negate the bit mask. \n
-///      10: No effect. \n
-///      11: Negate the bit mask only for bits with an index less than or equal
-///          to the size of \a A or \a B. \n
-/// \returns Returns bit 0 of the resulting bit mask.
-#define _mm_cmpistro(A, B, M) \
-  ((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
-                                     (__v16qi)(__m128i)(B), (int)(M)))
-
-/// Uses the immediate operand \a M to perform a comparison of string
-///    data with implicitly defined lengths that is contained in source operands
-///    \a A and \a B. Returns 1 if the length of the string in \a A is less than
-///    the maximum, otherwise, returns 0.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_cmpistrs(__m128i A, __m128i B, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPCMPISTRI / PCMPISTRI </c>
-/// instruction.
-///
-/// \param A
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param B
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param M
-///    An 8-bit immediate operand specifying whether the characters are bytes or
-///    words and the type of comparison to perform. \n
-///    Bits [1:0]: Determine source data format. \n
-///      00: 16 unsigned bytes \n
-///      01: 8 unsigned words \n
-///      10: 16 signed bytes \n
-///      11: 8 signed words \n
-///    Bits [3:2]: Determine comparison type and aggregation method. \n
-///      00: Subset: Each character in \a B is compared for equality with all
-///          the characters in \a A. \n
-///      01: Ranges: Each character in \a B is compared to \a A. The comparison
-///          basis is greater than or equal for even-indexed elements in \a A,
-///          and less than or equal for odd-indexed elements in \a A. \n
-///      10: Match: Compare each pair of corresponding characters in \a A and
-///          \a B for equality. \n
-///      11: Substring: Search \a B for substring matches of \a A. \n
-///    Bits [5:4]: Determine whether to perform a one's complement on the bit
-///                mask of the comparison results. \n
-///      00: No effect. \n
-///      01: Negate the bit mask. \n
-///      10: No effect. \n
-///      11: Negate the bit mask only for bits with an index less than or equal
-///          to the size of \a A or \a B. \n
-/// \returns Returns 1 if the length of the string in \a A is less than the
-///    maximum, otherwise, returns 0.
-#define _mm_cmpistrs(A, B, M) \
-  ((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
-                                     (__v16qi)(__m128i)(B), (int)(M)))
-
-/// Uses the immediate operand \a M to perform a comparison of string
-///    data with implicitly defined lengths that is contained in source operands
-///    \a A and \a B. Returns 1 if the length of the string in \a B is less than
-///    the maximum, otherwise, returns 0.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_cmpistrz(__m128i A, __m128i B, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPCMPISTRI / PCMPISTRI </c>
-/// instruction.
-///
-/// \param A
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param B
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param M
-///    An 8-bit immediate operand specifying whether the characters are bytes or
-///    words and the type of comparison to perform. \n
-///    Bits [1:0]: Determine source data format. \n
-///      00: 16 unsigned bytes \n
-///      01: 8 unsigned words \n
-///      10: 16 signed bytes \n
-///      11: 8 signed words \n
-///    Bits [3:2]: Determine comparison type and aggregation method. \n
-///      00: Subset: Each character in \a B is compared for equality with all
-///          the characters in \a A. \n
-///      01: Ranges: Each character in \a B is compared to \a A. The comparison
-///          basis is greater than or equal for even-indexed elements in \a A,
-///          and less than or equal for odd-indexed elements in \a A. \n
-///      10: Match: Compare each pair of corresponding characters in \a A and
-///          \a B for equality. \n
-///      11: Substring: Search \a B for substring matches of \a A. \n
-///    Bits [5:4]: Determine whether to perform a one's complement on the bit
-///                mask of the comparison results. \n
-///      00: No effect. \n
-///      01: Negate the bit mask. \n
-///      10: No effect. \n
-///      11: Negate the bit mask only for bits with an index less than or equal
-///          to the size of \a A or \a B.
-/// \returns Returns 1 if the length of the string in \a B is less than the
-///    maximum, otherwise, returns 0.
-#define _mm_cmpistrz(A, B, M) \
-  ((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
-                                     (__v16qi)(__m128i)(B), (int)(M)))
-
-/// Uses the immediate operand \a M to perform a comparison of string
-///    data with explicitly defined lengths that is contained in source operands
-///    \a A and \a B. Returns 1 if the bit mask is zero and the length of the
-///    string in \a B is the maximum, otherwise, returns 0.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_cmpestra(__m128i A, int LA, __m128i B, int LB, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPCMPESTRI / PCMPESTRI </c>
-/// instruction.
-///
-/// \param A
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param LA
-///    An integer that specifies the length of the string in \a A.
-/// \param B
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param LB
-///    An integer that specifies the length of the string in \a B.
-/// \param M
-///    An 8-bit immediate operand specifying whether the characters are bytes or
-///    words and the type of comparison to perform. \n
-///    Bits [1:0]: Determine source data format. \n
-///      00: 16 unsigned bytes \n
-///      01: 8 unsigned words \n
-///      10: 16 signed bytes \n
-///      11: 8 signed words \n
-///    Bits [3:2]: Determine comparison type and aggregation method. \n
-///      00: Subset: Each character in \a B is compared for equality with all
-///          the characters in \a A. \n
-///      01: Ranges: Each character in \a B is compared to \a A. The comparison
-///          basis is greater than or equal for even-indexed elements in \a A,
-///          and less than or equal for odd-indexed elements in \a A. \n
-///      10: Match: Compare each pair of corresponding characters in \a A and
-///          \a B for equality. \n
-///      11: Substring: Search \a B for substring matches of \a A. \n
-///    Bits [5:4]: Determine whether to perform a one's complement on the bit
-///                mask of the comparison results. \n
-///      00: No effect. \n
-///      01: Negate the bit mask. \n
-///      10: No effect. \n
-///      11: Negate the bit mask only for bits with an index less than or equal
-///          to the size of \a A or \a B.
-/// \returns Returns 1 if the bit mask is zero and the length of the string in
-///    \a B is the maximum, otherwise, returns 0.
-#define _mm_cmpestra(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
-                                     (__v16qi)(__m128i)(B), (int)(LB), \
-                                     (int)(M)))
-
-/// Uses the immediate operand \a M to perform a comparison of string
-///    data with explicitly defined lengths that is contained in source operands
-///    \a A and \a B. Returns 1 if the resulting mask is non-zero, otherwise,
-///    returns 0.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_cmpestrc(__m128i A, int LA, __m128i B, int LB, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPCMPESTRI / PCMPESTRI </c>
-/// instruction.
-///
-/// \param A
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param LA
-///    An integer that specifies the length of the string in \a A.
-/// \param B
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param LB
-///    An integer that specifies the length of the string in \a B.
-/// \param M
-///    An 8-bit immediate operand specifying whether the characters are bytes or
-///    words and the type of comparison to perform. \n
-///    Bits [1:0]: Determine source data format. \n
-///      00: 16 unsigned bytes \n
-///      01: 8 unsigned words \n
-///      10: 16 signed bytes \n
-///      11: 8 signed words \n
-///    Bits [3:2]: Determine comparison type and aggregation method. \n
-///      00: Subset: Each character in \a B is compared for equality with all
-///          the characters in \a A. \n
-///      01: Ranges: Each character in \a B is compared to \a A. The comparison
-///          basis is greater than or equal for even-indexed elements in \a A,
-///          and less than or equal for odd-indexed elements in \a A. \n
-///      10: Match: Compare each pair of corresponding characters in \a A and
-///          \a B for equality. \n
-///      11: Substring: Search \a B for substring matches of \a A. \n
-///    Bits [5:4]: Determine whether to perform a one's complement on the bit
-///                mask of the comparison results. \n
-///      00: No effect. \n
-///      01: Negate the bit mask. \n
-///      10: No effect. \n
-///      11: Negate the bit mask only for bits with an index less than or equal
-///          to the size of \a A or \a B. \n
-/// \returns Returns 1 if the resulting mask is non-zero, otherwise, returns 0.
-#define _mm_cmpestrc(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
-                                     (__v16qi)(__m128i)(B), (int)(LB), \
-                                     (int)(M)))
-
-/// Uses the immediate operand \a M to perform a comparison of string
-///    data with explicitly defined lengths that is contained in source operands
-///    \a A and \a B. Returns bit 0 of the resulting bit mask.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_cmpestro(__m128i A, int LA, __m128i B, int LB, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPCMPESTRI / PCMPESTRI </c>
-/// instruction.
-///
-/// \param A
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param LA
-///    An integer that specifies the length of the string in \a A.
-/// \param B
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param LB
-///    An integer that specifies the length of the string in \a B.
-/// \param M
-///    An 8-bit immediate operand specifying whether the characters are bytes or
-///    words and the type of comparison to perform. \n
-///    Bits [1:0]: Determine source data format. \n
-///      00: 16 unsigned bytes \n
-///      01: 8 unsigned words \n
-///      10: 16 signed bytes \n
-///      11: 8 signed words \n
-///    Bits [3:2]: Determine comparison type and aggregation method. \n
-///      00: Subset: Each character in \a B is compared for equality with all
-///          the characters in \a A. \n
-///      01: Ranges: Each character in \a B is compared to \a A. The comparison
-///          basis is greater than or equal for even-indexed elements in \a A,
-///          and less than or equal for odd-indexed elements in \a A. \n
-///      10: Match: Compare each pair of corresponding characters in \a A and
-///          \a B for equality. \n
-///      11: Substring: Search \a B for substring matches of \a A. \n
-///    Bits [5:4]: Determine whether to perform a one's complement on the bit
-///                mask of the comparison results. \n
-///      00: No effect. \n
-///      01: Negate the bit mask. \n
-///      10: No effect. \n
-///      11: Negate the bit mask only for bits with an index less than or equal
-///          to the size of \a A or \a B.
-/// \returns Returns bit 0 of the resulting bit mask.
-#define _mm_cmpestro(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
-                                     (__v16qi)(__m128i)(B), (int)(LB), \
-                                     (int)(M)))
-
-/// Uses the immediate operand \a M to perform a comparison of string
-///    data with explicitly defined lengths that is contained in source operands
-///    \a A and \a B. Returns 1 if the length of the string in \a A is less than
-///    the maximum, otherwise, returns 0.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_cmpestrs(__m128i A, int LA, __m128i B, int LB, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPCMPESTRI / PCMPESTRI </c>
-/// instruction.
-///
-/// \param A
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param LA
-///    An integer that specifies the length of the string in \a A.
-/// \param B
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param LB
-///    An integer that specifies the length of the string in \a B.
-/// \param M
-///    An 8-bit immediate operand specifying whether the characters are bytes or
-///    words and the type of comparison to perform. \n
-///    Bits [1:0]: Determine source data format. \n
-///      00: 16 unsigned bytes \n
-///      01: 8 unsigned words \n
-///      10: 16 signed bytes \n
-///      11: 8 signed words \n
-///    Bits [3:2]: Determine comparison type and aggregation method. \n
-///      00: Subset: Each character in \a B is compared for equality with all
-///          the characters in \a A. \n
-///      01: Ranges: Each character in \a B is compared to \a A. The comparison
-///          basis is greater than or equal for even-indexed elements in \a A,
-///          and less than or equal for odd-indexed elements in \a A. \n
-///      10: Match: Compare each pair of corresponding characters in \a A and
-///          \a B for equality. \n
-///      11: Substring: Search \a B for substring matches of \a A. \n
-///    Bits [5:4]: Determine whether to perform a one's complement in the bit
-///                mask of the comparison results. \n
-///      00: No effect. \n
-///      01: Negate the bit mask. \n
-///      10: No effect. \n
-///      11: Negate the bit mask only for bits with an index less than or equal
-///          to the size of \a A or \a B. \n
-/// \returns Returns 1 if the length of the string in \a A is less than the
-///    maximum, otherwise, returns 0.
-#define _mm_cmpestrs(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
-                                     (__v16qi)(__m128i)(B), (int)(LB), \
-                                     (int)(M)))
-
-/// Uses the immediate operand \a M to perform a comparison of string
-///    data with explicitly defined lengths that is contained in source operands
-///    \a A and \a B. Returns 1 if the length of the string in \a B is less than
-///    the maximum, otherwise, returns 0.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_cmpestrz(__m128i A, int LA, __m128i B, int LB, const int M);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPCMPESTRI </c> instruction.
-///
-/// \param A
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param LA
-///    An integer that specifies the length of the string in \a A.
-/// \param B
-///    A 128-bit integer vector containing one of the source operands to be
-///    compared.
-/// \param LB
-///    An integer that specifies the length of the string in \a B.
-/// \param M
-///    An 8-bit immediate operand specifying whether the characters are bytes or
-///    words and the type of comparison to perform. \n
-///    Bits [1:0]: Determine source data format. \n
-///      00: 16 unsigned bytes  \n
-///      01: 8 unsigned words \n
-///      10: 16 signed bytes \n
-///      11: 8 signed words \n
-///    Bits [3:2]: Determine comparison type and aggregation method. \n
-///      00: Subset: Each character in \a B is compared for equality with all
-///          the characters in \a A. \n
-///      01: Ranges: Each character in \a B is compared to \a A. The comparison
-///          basis is greater than or equal for even-indexed elements in \a A,
-///          and less than or equal for odd-indexed elements in \a A. \n
-///      10: Match: Compare each pair of corresponding characters in \a A and
-///          \a B for equality. \n
-///      11: Substring: Search \a B for substring matches of \a A. \n
-///    Bits [5:4]: Determine whether to perform a one's complement on the bit
-///                mask of the comparison results. \n
-///      00: No effect. \n
-///      01: Negate the bit mask. \n
-///      10: No effect. \n
-///      11: Negate the bit mask only for bits with an index less than or equal
-///          to the size of \a A or \a B.
-/// \returns Returns 1 if the length of the string in \a B is less than the
-///    maximum, otherwise, returns 0.
-#define _mm_cmpestrz(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
-                                     (__v16qi)(__m128i)(B), (int)(LB), \
-                                     (int)(M)))
-
-/* SSE4.2 Compare Packed Data -- Greater Than.  */
-/// Compares each of the corresponding 64-bit values of the 128-bit
-///    integer vectors to determine if the values in the first operand are
-///    greater than those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTQ / PCMPGTQ </c> instruction.
-///
-/// \param __V1
-///    A 128-bit integer vector.
-/// \param __V2
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
-{
-  return (__m128i)((__v2di)__V1 > (__v2di)__V2);
-}
-
-#undef __DEFAULT_FN_ATTRS
-
-#include <popcntintrin.h>
-
-#include <crc32intrin.h>
-
-#endif /* __SMMINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/stdbool.h b/linux-x86/lib64/clang/14.0.2/include/stdbool.h
deleted file mode 100644
index 2525363..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/stdbool.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*===---- stdbool.h - Standard header for booleans -------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __STDBOOL_H
-#define __STDBOOL_H
-
-/* Don't define bool, true, and false in C++, except as a GNU extension. */
-#ifndef __cplusplus
-#define bool _Bool
-#define true 1
-#define false 0
-#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
-/* Define _Bool as a GNU extension. */
-#define _Bool bool
-#if __cplusplus < 201103L
-/* For C++98, define bool, false, true as a GNU extension. */
-#define bool  bool
-#define false false
-#define true  true
-#endif
-#endif
-
-#define __bool_true_false_are_defined 1
-
-#endif /* __STDBOOL_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/stdint.h b/linux-x86/lib64/clang/14.0.2/include/stdint.h
deleted file mode 100644
index 192f653..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/stdint.h
+++ /dev/null
@@ -1,693 +0,0 @@
-/*===---- stdint.h - Standard header for sized integer types --------------===*\
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
-\*===----------------------------------------------------------------------===*/
-
-#ifndef __CLANG_STDINT_H
-// AIX system headers need stdint.h to be re-enterable while _STD_TYPES_T
-// is defined until an inclusion of it without _STD_TYPES_T occurs, in which
-// case the header guard macro is defined.
-#if !defined(_AIX) || !defined(_STD_TYPES_T) || !defined(__STDC_HOSTED__)
-#define __CLANG_STDINT_H
-#endif
-
-/* If we're hosted, fall back to the system's stdint.h, which might have
- * additional definitions.
- */
-#if __STDC_HOSTED__ && __has_include_next(<stdint.h>)
-
-// C99 7.18.3 Limits of other integer types
-//
-//  Footnote 219, 220: C++ implementations should define these macros only when
-//  __STDC_LIMIT_MACROS is defined before <stdint.h> is included.
-//
-//  Footnote 222: C++ implementations should define these macros only when
-//  __STDC_CONSTANT_MACROS is defined before <stdint.h> is included.
-//
-// C++11 [cstdint.syn]p2:
-//
-//  The macros defined by <cstdint> are provided unconditionally. In particular,
-//  the symbols __STDC_LIMIT_MACROS and __STDC_CONSTANT_MACROS (mentioned in
-//  footnotes 219, 220, and 222 in the C standard) play no role in C++.
-//
-// C11 removed the problematic footnotes.
-//
-// Work around this inconsistency by always defining those macros in C++ mode,
-// so that a C library implementation which follows the C99 standard can be
-// used in C++.
-# ifdef __cplusplus
-#  if !defined(__STDC_LIMIT_MACROS)
-#   define __STDC_LIMIT_MACROS
-#   define __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
-#  endif
-#  if !defined(__STDC_CONSTANT_MACROS)
-#   define __STDC_CONSTANT_MACROS
-#   define __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
-#  endif
-# endif
-
-# include_next <stdint.h>
-
-# ifdef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
-#  undef __STDC_LIMIT_MACROS
-#  undef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
-# endif
-# ifdef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
-#  undef __STDC_CONSTANT_MACROS
-#  undef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
-# endif
-
-#else
-
-/* C99 7.18.1.1 Exact-width integer types.
- * C99 7.18.1.2 Minimum-width integer types.
- * C99 7.18.1.3 Fastest minimum-width integer types.
- *
- * The standard requires that exact-width type be defined for 8-, 16-, 32-, and
- * 64-bit types if they are implemented. Other exact width types are optional.
- * This implementation defines an exact-width types for every integer width
- * that is represented in the standard integer types.
- *
- * The standard also requires minimum-width types be defined for 8-, 16-, 32-,
- * and 64-bit widths regardless of whether there are corresponding exact-width
- * types.
- *
- * To accommodate targets that are missing types that are exactly 8, 16, 32, or
- * 64 bits wide, this implementation takes an approach of cascading
- * redefinitions, redefining __int_leastN_t to successively smaller exact-width
- * types. It is therefore important that the types are defined in order of
- * descending widths.
- *
- * We currently assume that the minimum-width types and the fastest
- * minimum-width types are the same. This is allowed by the standard, but is
- * suboptimal.
- *
- * In violation of the standard, some targets do not implement a type that is
- * wide enough to represent all of the required widths (8-, 16-, 32-, 64-bit).
- * To accommodate these targets, a required minimum-width type is only
- * defined if there exists an exact-width type of equal or greater width.
- */
-
-#ifdef __INT64_TYPE__
-# ifndef __int8_t_defined /* glibc sys/types.h also defines int64_t*/
-typedef __INT64_TYPE__ int64_t;
-# endif /* __int8_t_defined */
-typedef __UINT64_TYPE__ uint64_t;
-# define __int_least64_t int64_t
-# define __uint_least64_t uint64_t
-# define __int_least32_t int64_t
-# define __uint_least32_t uint64_t
-# define __int_least16_t int64_t
-# define __uint_least16_t uint64_t
-# define __int_least8_t int64_t
-# define __uint_least8_t uint64_t
-#endif /* __INT64_TYPE__ */
-
-#ifdef __int_least64_t
-typedef __int_least64_t int_least64_t;
-typedef __uint_least64_t uint_least64_t;
-typedef __int_least64_t int_fast64_t;
-typedef __uint_least64_t uint_fast64_t;
-#endif /* __int_least64_t */
-
-#ifdef __INT56_TYPE__
-typedef __INT56_TYPE__ int56_t;
-typedef __UINT56_TYPE__ uint56_t;
-typedef int56_t int_least56_t;
-typedef uint56_t uint_least56_t;
-typedef int56_t int_fast56_t;
-typedef uint56_t uint_fast56_t;
-# define __int_least32_t int56_t
-# define __uint_least32_t uint56_t
-# define __int_least16_t int56_t
-# define __uint_least16_t uint56_t
-# define __int_least8_t int56_t
-# define __uint_least8_t uint56_t
-#endif /* __INT56_TYPE__ */
-
-
-#ifdef __INT48_TYPE__
-typedef __INT48_TYPE__ int48_t;
-typedef __UINT48_TYPE__ uint48_t;
-typedef int48_t int_least48_t;
-typedef uint48_t uint_least48_t;
-typedef int48_t int_fast48_t;
-typedef uint48_t uint_fast48_t;
-# define __int_least32_t int48_t
-# define __uint_least32_t uint48_t
-# define __int_least16_t int48_t
-# define __uint_least16_t uint48_t
-# define __int_least8_t int48_t
-# define __uint_least8_t uint48_t
-#endif /* __INT48_TYPE__ */
-
-
-#ifdef __INT40_TYPE__
-typedef __INT40_TYPE__ int40_t;
-typedef __UINT40_TYPE__ uint40_t;
-typedef int40_t int_least40_t;
-typedef uint40_t uint_least40_t;
-typedef int40_t int_fast40_t;
-typedef uint40_t uint_fast40_t;
-# define __int_least32_t int40_t
-# define __uint_least32_t uint40_t
-# define __int_least16_t int40_t
-# define __uint_least16_t uint40_t
-# define __int_least8_t int40_t
-# define __uint_least8_t uint40_t
-#endif /* __INT40_TYPE__ */
-
-
-#ifdef __INT32_TYPE__
-
-# ifndef __int8_t_defined /* glibc sys/types.h also defines int32_t*/
-typedef __INT32_TYPE__ int32_t;
-# endif /* __int8_t_defined */
-
-# ifndef __uint32_t_defined  /* more glibc compatibility */
-# define __uint32_t_defined
-typedef __UINT32_TYPE__ uint32_t;
-# endif /* __uint32_t_defined */
-
-# define __int_least32_t int32_t
-# define __uint_least32_t uint32_t
-# define __int_least16_t int32_t
-# define __uint_least16_t uint32_t
-# define __int_least8_t int32_t
-# define __uint_least8_t uint32_t
-#endif /* __INT32_TYPE__ */
-
-#ifdef __int_least32_t
-typedef __int_least32_t int_least32_t;
-typedef __uint_least32_t uint_least32_t;
-typedef __int_least32_t int_fast32_t;
-typedef __uint_least32_t uint_fast32_t;
-#endif /* __int_least32_t */
-
-#ifdef __INT24_TYPE__
-typedef __INT24_TYPE__ int24_t;
-typedef __UINT24_TYPE__ uint24_t;
-typedef int24_t int_least24_t;
-typedef uint24_t uint_least24_t;
-typedef int24_t int_fast24_t;
-typedef uint24_t uint_fast24_t;
-# define __int_least16_t int24_t
-# define __uint_least16_t uint24_t
-# define __int_least8_t int24_t
-# define __uint_least8_t uint24_t
-#endif /* __INT24_TYPE__ */
-
-#ifdef __INT16_TYPE__
-#ifndef __int8_t_defined /* glibc sys/types.h also defines int16_t*/
-typedef __INT16_TYPE__ int16_t;
-#endif /* __int8_t_defined */
-typedef __UINT16_TYPE__ uint16_t;
-# define __int_least16_t int16_t
-# define __uint_least16_t uint16_t
-# define __int_least8_t int16_t
-# define __uint_least8_t uint16_t
-#endif /* __INT16_TYPE__ */
-
-#ifdef __int_least16_t
-typedef __int_least16_t int_least16_t;
-typedef __uint_least16_t uint_least16_t;
-typedef __int_least16_t int_fast16_t;
-typedef __uint_least16_t uint_fast16_t;
-#endif /* __int_least16_t */
-
-
-#ifdef __INT8_TYPE__
-#ifndef __int8_t_defined  /* glibc sys/types.h also defines int8_t*/
-typedef __INT8_TYPE__ int8_t;
-#endif /* __int8_t_defined */
-typedef __UINT8_TYPE__ uint8_t;
-# define __int_least8_t int8_t
-# define __uint_least8_t uint8_t
-#endif /* __INT8_TYPE__ */
-
-#ifdef __int_least8_t
-typedef __int_least8_t int_least8_t;
-typedef __uint_least8_t uint_least8_t;
-typedef __int_least8_t int_fast8_t;
-typedef __uint_least8_t uint_fast8_t;
-#endif /* __int_least8_t */
-
-/* prevent glibc sys/types.h from defining conflicting types */
-#ifndef __int8_t_defined
-# define __int8_t_defined
-#endif /* __int8_t_defined */
-
-/* C99 7.18.1.4 Integer types capable of holding object pointers.
- */
-#define __stdint_join3(a,b,c) a ## b ## c
-
-#ifndef _INTPTR_T
-#ifndef __intptr_t_defined
-typedef __INTPTR_TYPE__ intptr_t;
-#define __intptr_t_defined
-#define _INTPTR_T
-#endif
-#endif
-
-#ifndef _UINTPTR_T
-typedef __UINTPTR_TYPE__ uintptr_t;
-#define _UINTPTR_T
-#endif
-
-/* C99 7.18.1.5 Greatest-width integer types.
- */
-typedef __INTMAX_TYPE__  intmax_t;
-typedef __UINTMAX_TYPE__ uintmax_t;
-
-/* C99 7.18.4 Macros for minimum-width integer constants.
- *
- * The standard requires that integer constant macros be defined for all the
- * minimum-width types defined above. As 8-, 16-, 32-, and 64-bit minimum-width
- * types are required, the corresponding integer constant macros are defined
- * here. This implementation also defines minimum-width types for every other
- * integer width that the target implements, so corresponding macros are
- * defined below, too.
- *
- * These macros are defined using the same successive-shrinking approach as
- * the type definitions above. It is likewise important that macros are defined
- * in order of decending width.
- *
- * Note that C++ should not check __STDC_CONSTANT_MACROS here, contrary to the
- * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
- */
-
-#define __int_c_join(a, b) a ## b
-#define __int_c(v, suffix) __int_c_join(v, suffix)
-#define __uint_c(v, suffix) __int_c_join(v##U, suffix)
-
-
-#ifdef __INT64_TYPE__
-# ifdef __INT64_C_SUFFIX__
-#  define __int64_c_suffix __INT64_C_SUFFIX__
-#  define __int32_c_suffix __INT64_C_SUFFIX__
-#  define __int16_c_suffix __INT64_C_SUFFIX__
-#  define  __int8_c_suffix __INT64_C_SUFFIX__
-# else
-#  undef __int64_c_suffix
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
-# endif /* __INT64_C_SUFFIX__ */
-#endif /* __INT64_TYPE__ */
-
-#ifdef __int_least64_t
-# ifdef __int64_c_suffix
-#  define INT64_C(v) __int_c(v, __int64_c_suffix)
-#  define UINT64_C(v) __uint_c(v, __int64_c_suffix)
-# else
-#  define INT64_C(v) v
-#  define UINT64_C(v) v ## U
-# endif /* __int64_c_suffix */
-#endif /* __int_least64_t */
-
-
-#ifdef __INT56_TYPE__
-# ifdef __INT56_C_SUFFIX__
-#  define INT56_C(v) __int_c(v, __INT56_C_SUFFIX__)
-#  define UINT56_C(v) __uint_c(v, __INT56_C_SUFFIX__)
-#  define __int32_c_suffix __INT56_C_SUFFIX__
-#  define __int16_c_suffix __INT56_C_SUFFIX__
-#  define __int8_c_suffix  __INT56_C_SUFFIX__
-# else
-#  define INT56_C(v) v
-#  define UINT56_C(v) v ## U
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
-# endif /* __INT56_C_SUFFIX__ */
-#endif /* __INT56_TYPE__ */
-
-
-#ifdef __INT48_TYPE__
-# ifdef __INT48_C_SUFFIX__
-#  define INT48_C(v) __int_c(v, __INT48_C_SUFFIX__)
-#  define UINT48_C(v) __uint_c(v, __INT48_C_SUFFIX__)
-#  define __int32_c_suffix __INT48_C_SUFFIX__
-#  define __int16_c_suffix __INT48_C_SUFFIX__
-#  define __int8_c_suffix  __INT48_C_SUFFIX__
-# else
-#  define INT48_C(v) v
-#  define UINT48_C(v) v ## U
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
-# endif /* __INT48_C_SUFFIX__ */
-#endif /* __INT48_TYPE__ */
-
-
-#ifdef __INT40_TYPE__
-# ifdef __INT40_C_SUFFIX__
-#  define INT40_C(v) __int_c(v, __INT40_C_SUFFIX__)
-#  define UINT40_C(v) __uint_c(v, __INT40_C_SUFFIX__)
-#  define __int32_c_suffix __INT40_C_SUFFIX__
-#  define __int16_c_suffix __INT40_C_SUFFIX__
-#  define __int8_c_suffix  __INT40_C_SUFFIX__
-# else
-#  define INT40_C(v) v
-#  define UINT40_C(v) v ## U
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
-# endif /* __INT40_C_SUFFIX__ */
-#endif /* __INT40_TYPE__ */
-
-
-#ifdef __INT32_TYPE__
-# ifdef __INT32_C_SUFFIX__
-#  define __int32_c_suffix __INT32_C_SUFFIX__
-#  define __int16_c_suffix __INT32_C_SUFFIX__
-#  define __int8_c_suffix  __INT32_C_SUFFIX__
-#else
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
-# endif /* __INT32_C_SUFFIX__ */
-#endif /* __INT32_TYPE__ */
-
-#ifdef __int_least32_t
-# ifdef __int32_c_suffix
-#  define INT32_C(v) __int_c(v, __int32_c_suffix)
-#  define UINT32_C(v) __uint_c(v, __int32_c_suffix)
-# else
-#  define INT32_C(v) v
-#  define UINT32_C(v) v ## U
-# endif /* __int32_c_suffix */
-#endif /* __int_least32_t */
-
-
-#ifdef __INT24_TYPE__
-# ifdef __INT24_C_SUFFIX__
-#  define INT24_C(v) __int_c(v, __INT24_C_SUFFIX__)
-#  define UINT24_C(v) __uint_c(v, __INT24_C_SUFFIX__)
-#  define __int16_c_suffix __INT24_C_SUFFIX__
-#  define __int8_c_suffix  __INT24_C_SUFFIX__
-# else
-#  define INT24_C(v) v
-#  define UINT24_C(v) v ## U
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
-# endif /* __INT24_C_SUFFIX__ */
-#endif /* __INT24_TYPE__ */
-
-
-#ifdef __INT16_TYPE__
-# ifdef __INT16_C_SUFFIX__
-#  define __int16_c_suffix __INT16_C_SUFFIX__
-#  define __int8_c_suffix  __INT16_C_SUFFIX__
-#else
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
-# endif /* __INT16_C_SUFFIX__ */
-#endif /* __INT16_TYPE__ */
-
-#ifdef __int_least16_t
-# ifdef __int16_c_suffix
-#  define INT16_C(v) __int_c(v, __int16_c_suffix)
-#  define UINT16_C(v) __uint_c(v, __int16_c_suffix)
-# else
-#  define INT16_C(v) v
-#  define UINT16_C(v) v ## U
-# endif /* __int16_c_suffix */
-#endif /* __int_least16_t */
-
-
-#ifdef __INT8_TYPE__
-# ifdef __INT8_C_SUFFIX__
-#  define __int8_c_suffix __INT8_C_SUFFIX__
-#else
-#  undef  __int8_c_suffix
-# endif /* __INT8_C_SUFFIX__ */
-#endif /* __INT8_TYPE__ */
-
-#ifdef __int_least8_t
-# ifdef __int8_c_suffix
-#  define INT8_C(v) __int_c(v, __int8_c_suffix)
-#  define UINT8_C(v) __uint_c(v, __int8_c_suffix)
-# else
-#  define INT8_C(v) v
-#  define UINT8_C(v) v ## U
-# endif /* __int8_c_suffix */
-#endif /* __int_least8_t */
-
-
-/* C99 7.18.2.1 Limits of exact-width integer types.
- * C99 7.18.2.2 Limits of minimum-width integer types.
- * C99 7.18.2.3 Limits of fastest minimum-width integer types.
- *
- * The presence of limit macros are completely optional in C99.  This
- * implementation defines limits for all of the types (exact- and
- * minimum-width) that it defines above, using the limits of the minimum-width
- * type for any types that do not have exact-width representations.
- *
- * As in the type definitions, this section takes an approach of
- * successive-shrinking to determine which limits to use for the standard (8,
- * 16, 32, 64) bit widths when they don't have exact representations. It is
- * therefore important that the definitions be kept in order of decending
- * widths.
- *
- * Note that C++ should not check __STDC_LIMIT_MACROS here, contrary to the
- * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
- */
-
-#ifdef __INT64_TYPE__
-# define INT64_MAX           INT64_C( 9223372036854775807)
-# define INT64_MIN         (-INT64_C( 9223372036854775807)-1)
-# define UINT64_MAX         UINT64_C(18446744073709551615)
-# define __INT_LEAST64_MIN   INT64_MIN
-# define __INT_LEAST64_MAX   INT64_MAX
-# define __UINT_LEAST64_MAX UINT64_MAX
-# define __INT_LEAST32_MIN   INT64_MIN
-# define __INT_LEAST32_MAX   INT64_MAX
-# define __UINT_LEAST32_MAX UINT64_MAX
-# define __INT_LEAST16_MIN   INT64_MIN
-# define __INT_LEAST16_MAX   INT64_MAX
-# define __UINT_LEAST16_MAX UINT64_MAX
-# define __INT_LEAST8_MIN    INT64_MIN
-# define __INT_LEAST8_MAX    INT64_MAX
-# define __UINT_LEAST8_MAX  UINT64_MAX
-#endif /* __INT64_TYPE__ */
-
-#ifdef __INT_LEAST64_MIN
-# define INT_LEAST64_MIN   __INT_LEAST64_MIN
-# define INT_LEAST64_MAX   __INT_LEAST64_MAX
-# define UINT_LEAST64_MAX __UINT_LEAST64_MAX
-# define INT_FAST64_MIN    __INT_LEAST64_MIN
-# define INT_FAST64_MAX    __INT_LEAST64_MAX
-# define UINT_FAST64_MAX  __UINT_LEAST64_MAX
-#endif /* __INT_LEAST64_MIN */
-
-
-#ifdef __INT56_TYPE__
-# define INT56_MAX           INT56_C(36028797018963967)
-# define INT56_MIN         (-INT56_C(36028797018963967)-1)
-# define UINT56_MAX         UINT56_C(72057594037927935)
-# define INT_LEAST56_MIN     INT56_MIN
-# define INT_LEAST56_MAX     INT56_MAX
-# define UINT_LEAST56_MAX   UINT56_MAX
-# define INT_FAST56_MIN      INT56_MIN
-# define INT_FAST56_MAX      INT56_MAX
-# define UINT_FAST56_MAX    UINT56_MAX
-# define __INT_LEAST32_MIN   INT56_MIN
-# define __INT_LEAST32_MAX   INT56_MAX
-# define __UINT_LEAST32_MAX UINT56_MAX
-# define __INT_LEAST16_MIN   INT56_MIN
-# define __INT_LEAST16_MAX   INT56_MAX
-# define __UINT_LEAST16_MAX UINT56_MAX
-# define __INT_LEAST8_MIN    INT56_MIN
-# define __INT_LEAST8_MAX    INT56_MAX
-# define __UINT_LEAST8_MAX  UINT56_MAX
-#endif /* __INT56_TYPE__ */
-
-
-#ifdef __INT48_TYPE__
-# define INT48_MAX           INT48_C(140737488355327)
-# define INT48_MIN         (-INT48_C(140737488355327)-1)
-# define UINT48_MAX         UINT48_C(281474976710655)
-# define INT_LEAST48_MIN     INT48_MIN
-# define INT_LEAST48_MAX     INT48_MAX
-# define UINT_LEAST48_MAX   UINT48_MAX
-# define INT_FAST48_MIN      INT48_MIN
-# define INT_FAST48_MAX      INT48_MAX
-# define UINT_FAST48_MAX    UINT48_MAX
-# define __INT_LEAST32_MIN   INT48_MIN
-# define __INT_LEAST32_MAX   INT48_MAX
-# define __UINT_LEAST32_MAX UINT48_MAX
-# define __INT_LEAST16_MIN   INT48_MIN
-# define __INT_LEAST16_MAX   INT48_MAX
-# define __UINT_LEAST16_MAX UINT48_MAX
-# define __INT_LEAST8_MIN    INT48_MIN
-# define __INT_LEAST8_MAX    INT48_MAX
-# define __UINT_LEAST8_MAX  UINT48_MAX
-#endif /* __INT48_TYPE__ */
-
-
-#ifdef __INT40_TYPE__
-# define INT40_MAX           INT40_C(549755813887)
-# define INT40_MIN         (-INT40_C(549755813887)-1)
-# define UINT40_MAX         UINT40_C(1099511627775)
-# define INT_LEAST40_MIN     INT40_MIN
-# define INT_LEAST40_MAX     INT40_MAX
-# define UINT_LEAST40_MAX   UINT40_MAX
-# define INT_FAST40_MIN      INT40_MIN
-# define INT_FAST40_MAX      INT40_MAX
-# define UINT_FAST40_MAX    UINT40_MAX
-# define __INT_LEAST32_MIN   INT40_MIN
-# define __INT_LEAST32_MAX   INT40_MAX
-# define __UINT_LEAST32_MAX UINT40_MAX
-# define __INT_LEAST16_MIN   INT40_MIN
-# define __INT_LEAST16_MAX   INT40_MAX
-# define __UINT_LEAST16_MAX UINT40_MAX
-# define __INT_LEAST8_MIN    INT40_MIN
-# define __INT_LEAST8_MAX    INT40_MAX
-# define __UINT_LEAST8_MAX  UINT40_MAX
-#endif /* __INT40_TYPE__ */
-
-
-#ifdef __INT32_TYPE__
-# define INT32_MAX           INT32_C(2147483647)
-# define INT32_MIN         (-INT32_C(2147483647)-1)
-# define UINT32_MAX         UINT32_C(4294967295)
-# define __INT_LEAST32_MIN   INT32_MIN
-# define __INT_LEAST32_MAX   INT32_MAX
-# define __UINT_LEAST32_MAX UINT32_MAX
-# define __INT_LEAST16_MIN   INT32_MIN
-# define __INT_LEAST16_MAX   INT32_MAX
-# define __UINT_LEAST16_MAX UINT32_MAX
-# define __INT_LEAST8_MIN    INT32_MIN
-# define __INT_LEAST8_MAX    INT32_MAX
-# define __UINT_LEAST8_MAX  UINT32_MAX
-#endif /* __INT32_TYPE__ */
-
-#ifdef __INT_LEAST32_MIN
-# define INT_LEAST32_MIN   __INT_LEAST32_MIN
-# define INT_LEAST32_MAX   __INT_LEAST32_MAX
-# define UINT_LEAST32_MAX __UINT_LEAST32_MAX
-# define INT_FAST32_MIN    __INT_LEAST32_MIN
-# define INT_FAST32_MAX    __INT_LEAST32_MAX
-# define UINT_FAST32_MAX  __UINT_LEAST32_MAX
-#endif /* __INT_LEAST32_MIN */
-
-
-#ifdef __INT24_TYPE__
-# define INT24_MAX           INT24_C(8388607)
-# define INT24_MIN         (-INT24_C(8388607)-1)
-# define UINT24_MAX         UINT24_C(16777215)
-# define INT_LEAST24_MIN     INT24_MIN
-# define INT_LEAST24_MAX     INT24_MAX
-# define UINT_LEAST24_MAX   UINT24_MAX
-# define INT_FAST24_MIN      INT24_MIN
-# define INT_FAST24_MAX      INT24_MAX
-# define UINT_FAST24_MAX    UINT24_MAX
-# define __INT_LEAST16_MIN   INT24_MIN
-# define __INT_LEAST16_MAX   INT24_MAX
-# define __UINT_LEAST16_MAX UINT24_MAX
-# define __INT_LEAST8_MIN    INT24_MIN
-# define __INT_LEAST8_MAX    INT24_MAX
-# define __UINT_LEAST8_MAX  UINT24_MAX
-#endif /* __INT24_TYPE__ */
-
-
-#ifdef __INT16_TYPE__
-#define INT16_MAX            INT16_C(32767)
-#define INT16_MIN          (-INT16_C(32767)-1)
-#define UINT16_MAX          UINT16_C(65535)
-# define __INT_LEAST16_MIN   INT16_MIN
-# define __INT_LEAST16_MAX   INT16_MAX
-# define __UINT_LEAST16_MAX UINT16_MAX
-# define __INT_LEAST8_MIN    INT16_MIN
-# define __INT_LEAST8_MAX    INT16_MAX
-# define __UINT_LEAST8_MAX  UINT16_MAX
-#endif /* __INT16_TYPE__ */
-
-#ifdef __INT_LEAST16_MIN
-# define INT_LEAST16_MIN   __INT_LEAST16_MIN
-# define INT_LEAST16_MAX   __INT_LEAST16_MAX
-# define UINT_LEAST16_MAX __UINT_LEAST16_MAX
-# define INT_FAST16_MIN    __INT_LEAST16_MIN
-# define INT_FAST16_MAX    __INT_LEAST16_MAX
-# define UINT_FAST16_MAX  __UINT_LEAST16_MAX
-#endif /* __INT_LEAST16_MIN */
-
-
-#ifdef __INT8_TYPE__
-# define INT8_MAX            INT8_C(127)
-# define INT8_MIN          (-INT8_C(127)-1)
-# define UINT8_MAX          UINT8_C(255)
-# define __INT_LEAST8_MIN    INT8_MIN
-# define __INT_LEAST8_MAX    INT8_MAX
-# define __UINT_LEAST8_MAX  UINT8_MAX
-#endif /* __INT8_TYPE__ */
-
-#ifdef __INT_LEAST8_MIN
-# define INT_LEAST8_MIN   __INT_LEAST8_MIN
-# define INT_LEAST8_MAX   __INT_LEAST8_MAX
-# define UINT_LEAST8_MAX __UINT_LEAST8_MAX
-# define INT_FAST8_MIN    __INT_LEAST8_MIN
-# define INT_FAST8_MAX    __INT_LEAST8_MAX
-# define UINT_FAST8_MAX  __UINT_LEAST8_MAX
-#endif /* __INT_LEAST8_MIN */
-
-/* Some utility macros */
-#define  __INTN_MIN(n)  __stdint_join3( INT, n, _MIN)
-#define  __INTN_MAX(n)  __stdint_join3( INT, n, _MAX)
-#define __UINTN_MAX(n)  __stdint_join3(UINT, n, _MAX)
-#define  __INTN_C(n, v) __stdint_join3( INT, n, _C(v))
-#define __UINTN_C(n, v) __stdint_join3(UINT, n, _C(v))
-
-/* C99 7.18.2.4 Limits of integer types capable of holding object pointers. */
-/* C99 7.18.3 Limits of other integer types. */
-
-#define  INTPTR_MIN  (-__INTPTR_MAX__-1)
-#define  INTPTR_MAX    __INTPTR_MAX__
-#define UINTPTR_MAX   __UINTPTR_MAX__
-#define PTRDIFF_MIN (-__PTRDIFF_MAX__-1)
-#define PTRDIFF_MAX   __PTRDIFF_MAX__
-#define    SIZE_MAX      __SIZE_MAX__
-
-/* ISO9899:2011 7.20 (C11 Annex K): Define RSIZE_MAX if __STDC_WANT_LIB_EXT1__
- * is enabled. */
-#if defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1
-#define   RSIZE_MAX            (SIZE_MAX >> 1)
-#endif
-
-/* C99 7.18.2.5 Limits of greatest-width integer types. */
-#define  INTMAX_MIN (-__INTMAX_MAX__-1)
-#define  INTMAX_MAX   __INTMAX_MAX__
-#define UINTMAX_MAX  __UINTMAX_MAX__
-
-/* C99 7.18.3 Limits of other integer types. */
-#define SIG_ATOMIC_MIN __INTN_MIN(__SIG_ATOMIC_WIDTH__)
-#define SIG_ATOMIC_MAX __INTN_MAX(__SIG_ATOMIC_WIDTH__)
-#ifdef __WINT_UNSIGNED__
-# define WINT_MIN       __UINTN_C(__WINT_WIDTH__, 0)
-# define WINT_MAX       __UINTN_MAX(__WINT_WIDTH__)
-#else
-# define WINT_MIN       __INTN_MIN(__WINT_WIDTH__)
-# define WINT_MAX       __INTN_MAX(__WINT_WIDTH__)
-#endif
-
-#ifndef WCHAR_MAX
-# define WCHAR_MAX __WCHAR_MAX__
-#endif
-#ifndef WCHAR_MIN
-# if __WCHAR_MAX__ == __INTN_MAX(__WCHAR_WIDTH__)
-#  define WCHAR_MIN __INTN_MIN(__WCHAR_WIDTH__)
-# else
-#  define WCHAR_MIN __UINTN_C(__WCHAR_WIDTH__, 0)
-# endif
-#endif
-
-/* 7.18.4.2 Macros for greatest-width integer constants. */
-#define  INTMAX_C(v) __int_c(v,  __INTMAX_C_SUFFIX__)
-#define UINTMAX_C(v) __int_c(v, __UINTMAX_C_SUFFIX__)
-
-#endif /* __STDC_HOSTED__ */
-#endif /* __CLANG_STDINT_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/stdnoreturn.h b/linux-x86/lib64/clang/14.0.2/include/stdnoreturn.h
deleted file mode 100644
index e83cd81..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/stdnoreturn.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*===---- stdnoreturn.h - Standard header for noreturn macro ---------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __STDNORETURN_H
-#define __STDNORETURN_H
-
-#define noreturn _Noreturn
-#define __noreturn_is_defined 1
-
-#endif /* __STDNORETURN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/unwind.h b/linux-x86/lib64/clang/14.0.2/include/unwind.h
deleted file mode 100644
index 029524b..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/unwind.h
+++ /dev/null
@@ -1,327 +0,0 @@
-/*===---- unwind.h - Stack unwinding ----------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* See "Data Definitions for libgcc_s" in the Linux Standard Base.*/
-
-#ifndef __CLANG_UNWIND_H
-#define __CLANG_UNWIND_H
-
-#if defined(__APPLE__) && __has_include_next(<unwind.h>)
-/* Darwin (from 11.x on) provide an unwind.h. If that's available,
- * use it. libunwind wraps some of its definitions in #ifdef _GNU_SOURCE,
- * so define that around the include.*/
-# ifndef _GNU_SOURCE
-#  define _SHOULD_UNDEFINE_GNU_SOURCE
-#  define _GNU_SOURCE
-# endif
-// libunwind's unwind.h reflects the current visibility.  However, Mozilla
-// builds with -fvisibility=hidden and relies on gcc's unwind.h to reset the
-// visibility to default and export its contents.  gcc also allows users to
-// override its override by #defining HIDE_EXPORTS (but note, this only obeys
-// the user's -fvisibility setting; it doesn't hide any exports on its own).  We
-// imitate gcc's header here:
-# ifdef HIDE_EXPORTS
-#  include_next <unwind.h>
-# else
-#  pragma GCC visibility push(default)
-#  include_next <unwind.h>
-#  pragma GCC visibility pop
-# endif
-# ifdef _SHOULD_UNDEFINE_GNU_SOURCE
-#  undef _GNU_SOURCE
-#  undef _SHOULD_UNDEFINE_GNU_SOURCE
-# endif
-#else
-
-#include <stdint.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* It is a bit strange for a header to play with the visibility of the
-   symbols it declares, but this matches gcc's behavior and some programs
-   depend on it */
-#ifndef HIDE_EXPORTS
-#pragma GCC visibility push(default)
-#endif
-
-typedef uintptr_t _Unwind_Word __attribute__((__mode__(__unwind_word__)));
-typedef intptr_t _Unwind_Sword __attribute__((__mode__(__unwind_word__)));
-typedef uintptr_t _Unwind_Ptr;
-typedef uintptr_t _Unwind_Internal_Ptr;
-typedef uint64_t _Unwind_Exception_Class;
-
-typedef intptr_t _sleb128_t;
-typedef uintptr_t _uleb128_t;
-
-struct _Unwind_Context;
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
-struct _Unwind_Control_Block;
-typedef struct _Unwind_Control_Block _Unwind_Exception; /* Alias */
-#else
-struct _Unwind_Exception;
-typedef struct _Unwind_Exception _Unwind_Exception;
-#endif
-typedef enum {
-  _URC_NO_REASON = 0,
-#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
-    !defined(__ARM_DWARF_EH__)
-  _URC_OK = 0, /* used by ARM EHABI */
-#endif
-  _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
-
-  _URC_FATAL_PHASE2_ERROR = 2,
-  _URC_FATAL_PHASE1_ERROR = 3,
-  _URC_NORMAL_STOP = 4,
-
-  _URC_END_OF_STACK = 5,
-  _URC_HANDLER_FOUND = 6,
-  _URC_INSTALL_CONTEXT = 7,
-  _URC_CONTINUE_UNWIND = 8,
-#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
-    !defined(__ARM_DWARF_EH__)
-  _URC_FAILURE = 9 /* used by ARM EHABI */
-#endif
-} _Unwind_Reason_Code;
-
-typedef enum {
-  _UA_SEARCH_PHASE = 1,
-  _UA_CLEANUP_PHASE = 2,
-
-  _UA_HANDLER_FRAME = 4,
-  _UA_FORCE_UNWIND = 8,
-  _UA_END_OF_STACK = 16 /* gcc extension to C++ ABI */
-} _Unwind_Action;
-
-typedef void (*_Unwind_Exception_Cleanup_Fn)(_Unwind_Reason_Code,
-                                             _Unwind_Exception *);
-
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
-typedef struct _Unwind_Control_Block _Unwind_Control_Block;
-typedef uint32_t _Unwind_EHT_Header;
-
-struct _Unwind_Control_Block {
-  uint64_t exception_class;
-  void (*exception_cleanup)(_Unwind_Reason_Code, _Unwind_Control_Block *);
-  /* unwinder cache (private fields for the unwinder's use) */
-  struct {
-    uint32_t reserved1; /* forced unwind stop function, 0 if not forced */
-    uint32_t reserved2; /* personality routine */
-    uint32_t reserved3; /* callsite */
-    uint32_t reserved4; /* forced unwind stop argument */
-    uint32_t reserved5;
-  } unwinder_cache;
-  /* propagation barrier cache (valid after phase 1) */
-  struct {
-    uint32_t sp;
-    uint32_t bitpattern[5];
-  } barrier_cache;
-  /* cleanup cache (preserved over cleanup) */
-  struct {
-    uint32_t bitpattern[4];
-  } cleanup_cache;
-  /* personality cache (for personality's benefit) */
-  struct {
-    uint32_t fnstart;         /* function start address */
-    _Unwind_EHT_Header *ehtp; /* pointer to EHT entry header word */
-    uint32_t additional;      /* additional data */
-    uint32_t reserved1;
-  } pr_cache;
-  long long int : 0; /* force alignment of next item to 8-byte boundary */
-} __attribute__((__aligned__(8)));
-#else
-struct _Unwind_Exception {
-  _Unwind_Exception_Class exception_class;
-  _Unwind_Exception_Cleanup_Fn exception_cleanup;
-#if !defined (__USING_SJLJ_EXCEPTIONS__) && defined (__SEH__)
-  _Unwind_Word private_[6];
-#else
-  _Unwind_Word private_1;
-  _Unwind_Word private_2;
-#endif
-  /* The Itanium ABI requires that _Unwind_Exception objects are "double-word
-   * aligned".  GCC has interpreted this to mean "use the maximum useful
-   * alignment for the target"; so do we. */
-} __attribute__((__aligned__));
-#endif
-
-typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn)(int, _Unwind_Action,
-                                               _Unwind_Exception_Class,
-                                               _Unwind_Exception *,
-                                               struct _Unwind_Context *,
-                                               void *);
-
-typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn)(int, _Unwind_Action,
-                                                      _Unwind_Exception_Class,
-                                                      _Unwind_Exception *,
-                                                      struct _Unwind_Context *);
-typedef _Unwind_Personality_Fn __personality_routine;
-
-typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context *,
-                                                void *);
-
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
-typedef enum {
-  _UVRSC_CORE = 0,        /* integer register */
-  _UVRSC_VFP = 1,         /* vfp */
-  _UVRSC_WMMXD = 3,       /* Intel WMMX data register */
-  _UVRSC_WMMXC = 4        /* Intel WMMX control register */
-} _Unwind_VRS_RegClass;
-
-typedef enum {
-  _UVRSD_UINT32 = 0,
-  _UVRSD_VFPX = 1,
-  _UVRSD_UINT64 = 3,
-  _UVRSD_FLOAT = 4,
-  _UVRSD_DOUBLE = 5
-} _Unwind_VRS_DataRepresentation;
-
-typedef enum {
-  _UVRSR_OK = 0,
-  _UVRSR_NOT_IMPLEMENTED = 1,
-  _UVRSR_FAILED = 2
-} _Unwind_VRS_Result;
-
-typedef uint32_t _Unwind_State;
-#define _US_VIRTUAL_UNWIND_FRAME  ((_Unwind_State)0)
-#define _US_UNWIND_FRAME_STARTING ((_Unwind_State)1)
-#define _US_UNWIND_FRAME_RESUME   ((_Unwind_State)2)
-#define _US_ACTION_MASK           ((_Unwind_State)3)
-#define _US_FORCE_UNWIND          ((_Unwind_State)8)
-
-_Unwind_VRS_Result _Unwind_VRS_Get(struct _Unwind_Context *__context,
-  _Unwind_VRS_RegClass __regclass,
-  uint32_t __regno,
-  _Unwind_VRS_DataRepresentation __representation,
-  void *__valuep);
-
-_Unwind_VRS_Result _Unwind_VRS_Set(struct _Unwind_Context *__context,
-  _Unwind_VRS_RegClass __regclass,
-  uint32_t __regno,
-  _Unwind_VRS_DataRepresentation __representation,
-  void *__valuep);
-
-static __inline__
-_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *__context, int __index) {
-  _Unwind_Word __value;
-  _Unwind_VRS_Get(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);
-  return __value;
-}
-
-static __inline__
-void _Unwind_SetGR(struct _Unwind_Context *__context, int __index,
-                   _Unwind_Word __value) {
-  _Unwind_VRS_Set(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);
-}
-
-static __inline__
-_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *__context) {
-  _Unwind_Word __ip = _Unwind_GetGR(__context, 15);
-  return __ip & ~(_Unwind_Word)(0x1); /* Remove thumb mode bit. */
-}
-
-static __inline__
-void _Unwind_SetIP(struct _Unwind_Context *__context, _Unwind_Word __value) {
-  _Unwind_Word __thumb_mode_bit = _Unwind_GetGR(__context, 15) & 0x1;
-  _Unwind_SetGR(__context, 15, __value | __thumb_mode_bit);
-}
-#else
-_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *, int);
-void _Unwind_SetGR(struct _Unwind_Context *, int, _Unwind_Word);
-
-_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *);
-void _Unwind_SetIP(struct _Unwind_Context *, _Unwind_Word);
-#endif
-
-
-_Unwind_Word _Unwind_GetIPInfo(struct _Unwind_Context *, int *);
-
-_Unwind_Word _Unwind_GetCFA(struct _Unwind_Context *);
-
-_Unwind_Word _Unwind_GetBSP(struct _Unwind_Context *);
-
-void *_Unwind_GetLanguageSpecificData(struct _Unwind_Context *);
-
-_Unwind_Ptr _Unwind_GetRegionStart(struct _Unwind_Context *);
-
-/* DWARF EH functions; currently not available on Darwin/ARM */
-#if !defined(__APPLE__) || !defined(__arm__)
-_Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Exception *);
-_Unwind_Reason_Code _Unwind_ForcedUnwind(_Unwind_Exception *, _Unwind_Stop_Fn,
-                                         void *);
-void _Unwind_DeleteException(_Unwind_Exception *);
-void _Unwind_Resume(_Unwind_Exception *);
-_Unwind_Reason_Code _Unwind_Resume_or_Rethrow(_Unwind_Exception *);
-
-#endif
-
-_Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn, void *);
-
-/* setjmp(3)/longjmp(3) stuff */
-typedef struct SjLj_Function_Context *_Unwind_FunctionContext_t;
-
-void _Unwind_SjLj_Register(_Unwind_FunctionContext_t);
-void _Unwind_SjLj_Unregister(_Unwind_FunctionContext_t);
-_Unwind_Reason_Code _Unwind_SjLj_RaiseException(_Unwind_Exception *);
-_Unwind_Reason_Code _Unwind_SjLj_ForcedUnwind(_Unwind_Exception *,
-                                              _Unwind_Stop_Fn, void *);
-void _Unwind_SjLj_Resume(_Unwind_Exception *);
-_Unwind_Reason_Code _Unwind_SjLj_Resume_or_Rethrow(_Unwind_Exception *);
-
-void *_Unwind_FindEnclosingFunction(void *);
-
-#ifdef __APPLE__
-
-_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *)
-    __attribute__((__unavailable__));
-_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *)
-    __attribute__((__unavailable__));
-
-/* Darwin-specific functions */
-void __register_frame(const void *);
-void __deregister_frame(const void *);
-
-struct dwarf_eh_bases {
-  uintptr_t tbase;
-  uintptr_t dbase;
-  uintptr_t func;
-};
-void *_Unwind_Find_FDE(const void *, struct dwarf_eh_bases *);
-
-void __register_frame_info_bases(const void *, void *, void *, void *)
-  __attribute__((__unavailable__));
-void __register_frame_info(const void *, void *) __attribute__((__unavailable__));
-void __register_frame_info_table_bases(const void *, void*, void *, void *)
-  __attribute__((__unavailable__));
-void __register_frame_info_table(const void *, void *)
-  __attribute__((__unavailable__));
-void __register_frame_table(const void *) __attribute__((__unavailable__));
-void __deregister_frame_info(const void *) __attribute__((__unavailable__));
-void __deregister_frame_info_bases(const void *)__attribute__((__unavailable__));
-
-#else
-
-_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *);
-_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *);
-
-#endif
-
-
-#ifndef HIDE_EXPORTS
-#pragma GCC visibility pop
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
-#endif /* __CLANG_UNWIND_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/vaesintrin.h b/linux-x86/lib64/clang/14.0.2/include/vaesintrin.h
deleted file mode 100644
index f3c0807..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/vaesintrin.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*===------------------ vaesintrin.h - VAES intrinsics ---------------------===
- *
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <vaesintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __VAESINTRIN_H
-#define __VAESINTRIN_H
-
-/* Default attributes for YMM forms. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("vaes"), __min_vector_width__(256)))
-
-/* Default attributes for ZMM forms. */
-#define __DEFAULT_FN_ATTRS_F __attribute__((__always_inline__, __nodebug__, __target__("avx512f,vaes"), __min_vector_width__(512)))
-
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
- _mm256_aesenc_epi128(__m256i __A, __m256i __B)
-{
-  return (__m256i) __builtin_ia32_aesenc256((__v4di) __A,
-              (__v4di) __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
- _mm256_aesdec_epi128(__m256i __A, __m256i __B)
-{
-  return (__m256i) __builtin_ia32_aesdec256((__v4di) __A,
-              (__v4di) __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
- _mm256_aesenclast_epi128(__m256i __A, __m256i __B)
-{
-  return (__m256i) __builtin_ia32_aesenclast256((__v4di) __A,
-              (__v4di) __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
- _mm256_aesdeclast_epi128(__m256i __A, __m256i __B)
-{
-  return (__m256i) __builtin_ia32_aesdeclast256((__v4di) __A,
-              (__v4di) __B);
-}
-
-#ifdef __AVX512FINTRIN_H
-static __inline__ __m512i __DEFAULT_FN_ATTRS_F
- _mm512_aesenc_epi128(__m512i __A, __m512i __B)
-{
-  return (__m512i) __builtin_ia32_aesenc512((__v8di) __A,
-              (__v8di) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS_F
- _mm512_aesdec_epi128(__m512i __A, __m512i __B)
-{
-  return (__m512i) __builtin_ia32_aesdec512((__v8di) __A,
-              (__v8di) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS_F
- _mm512_aesenclast_epi128(__m512i __A, __m512i __B)
-{
-  return (__m512i) __builtin_ia32_aesenclast512((__v8di) __A,
-              (__v8di) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS_F
- _mm512_aesdeclast_epi128(__m512i __A, __m512i __B)
-{
-  return (__m512i) __builtin_ia32_aesdeclast512((__v8di) __A,
-              (__v8di) __B);
-}
-#endif // __AVX512FINTRIN_H
-
-#undef __DEFAULT_FN_ATTRS
-#undef __DEFAULT_FN_ATTRS_F
-
-#endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/wasm_simd128.h b/linux-x86/lib64/clang/14.0.2/include/wasm_simd128.h
deleted file mode 100644
index 3889a27..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/wasm_simd128.h
+++ /dev/null
@@ -1,1877 +0,0 @@
-/*===---- wasm_simd128.h - WebAssembly portable SIMD intrinsics ------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __WASM_SIMD128_H
-#define __WASM_SIMD128_H
-
-#include <stdbool.h>
-#include <stdint.h>
-
-// User-facing type
-typedef int32_t v128_t __attribute__((__vector_size__(16), __aligned__(16)));
-
-// Internal types determined by clang builtin definitions
-typedef int32_t __v128_u __attribute__((__vector_size__(16), __aligned__(1)));
-typedef signed char __i8x16
-    __attribute__((__vector_size__(16), __aligned__(16)));
-typedef unsigned char __u8x16
-    __attribute__((__vector_size__(16), __aligned__(16)));
-typedef short __i16x8 __attribute__((__vector_size__(16), __aligned__(16)));
-typedef unsigned short __u16x8
-    __attribute__((__vector_size__(16), __aligned__(16)));
-typedef int __i32x4 __attribute__((__vector_size__(16), __aligned__(16)));
-typedef unsigned int __u32x4
-    __attribute__((__vector_size__(16), __aligned__(16)));
-typedef long long __i64x2 __attribute__((__vector_size__(16), __aligned__(16)));
-typedef unsigned long long __u64x2
-    __attribute__((__vector_size__(16), __aligned__(16)));
-typedef float __f32x4 __attribute__((__vector_size__(16), __aligned__(16)));
-typedef double __f64x2 __attribute__((__vector_size__(16), __aligned__(16)));
-
-typedef signed char __i8x8 __attribute__((__vector_size__(8), __aligned__(8)));
-typedef unsigned char __u8x8
-    __attribute__((__vector_size__(8), __aligned__(8)));
-typedef short __i16x4 __attribute__((__vector_size__(8), __aligned__(8)));
-typedef unsigned short __u16x4
-    __attribute__((__vector_size__(8), __aligned__(8)));
-typedef int __i32x2 __attribute__((__vector_size__(8), __aligned__(8)));
-typedef unsigned int __u32x2
-    __attribute__((__vector_size__(8), __aligned__(8)));
-typedef float __f32x2 __attribute__((__vector_size__(8), __aligned__(8)));
-
-#define __DEFAULT_FN_ATTRS                                                     \
-  __attribute__((__always_inline__, __nodebug__, __target__("simd128"),        \
-                 __min_vector_width__(128)))
-
-#define __REQUIRE_CONSTANT(c)                                                  \
-  __attribute__((__diagnose_if__(!__builtin_constant_p(c),                     \
-                                 #c " must be constant", "error")))
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load(const void *__mem) {
-  // UB-free unaligned access copied from xmmintrin.h
-  struct __wasm_v128_load_struct {
-    __v128_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __wasm_v128_load_struct *)__mem)->__v;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_v128_load8_splat(const void *__mem) {
-  struct __wasm_v128_load8_splat_struct {
-    uint8_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  uint8_t __v = ((const struct __wasm_v128_load8_splat_struct *)__mem)->__v;
-  return (v128_t)(__u8x16){__v, __v, __v, __v, __v, __v, __v, __v,
-                           __v, __v, __v, __v, __v, __v, __v, __v};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_v128_load16_splat(const void *__mem) {
-  struct __wasm_v128_load16_splat_struct {
-    uint16_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  uint16_t __v = ((const struct __wasm_v128_load16_splat_struct *)__mem)->__v;
-  return (v128_t)(__u16x8){__v, __v, __v, __v, __v, __v, __v, __v};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_v128_load32_splat(const void *__mem) {
-  struct __wasm_v128_load32_splat_struct {
-    uint32_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  uint32_t __v = ((const struct __wasm_v128_load32_splat_struct *)__mem)->__v;
-  return (v128_t)(__u32x4){__v, __v, __v, __v};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_v128_load64_splat(const void *__mem) {
-  struct __wasm_v128_load64_splat_struct {
-    uint64_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  uint64_t __v = ((const struct __wasm_v128_load64_splat_struct *)__mem)->__v;
-  return (v128_t)(__u64x2){__v, __v};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_load8x8(const void *__mem) {
-  struct __wasm_i16x8_load8x8_struct {
-    __i8x8 __v;
-  } __attribute__((__packed__, __may_alias__));
-  __i8x8 __v = ((const struct __wasm_i16x8_load8x8_struct *)__mem)->__v;
-  return (v128_t) __builtin_convertvector(__v, __i16x8);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_load8x8(const void *__mem) {
-  struct __wasm_u16x8_load8x8_struct {
-    __u8x8 __v;
-  } __attribute__((__packed__, __may_alias__));
-  __u8x8 __v = ((const struct __wasm_u16x8_load8x8_struct *)__mem)->__v;
-  return (v128_t) __builtin_convertvector(__v, __u16x8);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_load16x4(const void *__mem) {
-  struct __wasm_i32x4_load16x4_struct {
-    __i16x4 __v;
-  } __attribute__((__packed__, __may_alias__));
-  __i16x4 __v = ((const struct __wasm_i32x4_load16x4_struct *)__mem)->__v;
-  return (v128_t) __builtin_convertvector(__v, __i32x4);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u32x4_load16x4(const void *__mem) {
-  struct __wasm_u32x4_load16x4_struct {
-    __u16x4 __v;
-  } __attribute__((__packed__, __may_alias__));
-  __u16x4 __v = ((const struct __wasm_u32x4_load16x4_struct *)__mem)->__v;
-  return (v128_t) __builtin_convertvector(__v, __u32x4);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i64x2_load32x2(const void *__mem) {
-  struct __wasm_i64x2_load32x2_struct {
-    __i32x2 __v;
-  } __attribute__((__packed__, __may_alias__));
-  __i32x2 __v = ((const struct __wasm_i64x2_load32x2_struct *)__mem)->__v;
-  return (v128_t) __builtin_convertvector(__v, __i64x2);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u64x2_load32x2(const void *__mem) {
-  struct __wasm_u64x2_load32x2_struct {
-    __u32x2 __v;
-  } __attribute__((__packed__, __may_alias__));
-  __u32x2 __v = ((const struct __wasm_u64x2_load32x2_struct *)__mem)->__v;
-  return (v128_t) __builtin_convertvector(__v, __u64x2);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_v128_load32_zero(const void *__mem) {
-  struct __wasm_v128_load32_zero_struct {
-    int32_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  int32_t __v = ((const struct __wasm_v128_load32_zero_struct *)__mem)->__v;
-  return (v128_t)(__i32x4){__v, 0, 0, 0};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_v128_load64_zero(const void *__mem) {
-  struct __wasm_v128_load64_zero_struct {
-    int64_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  int64_t __v = ((const struct __wasm_v128_load64_zero_struct *)__mem)->__v;
-  return (v128_t)(__i64x2){__v, 0};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load8_lane(
-    const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) {
-  struct __wasm_v128_load8_lane_struct {
-    int8_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  int8_t __v = ((const struct __wasm_v128_load8_lane_struct *)__mem)->__v;
-  __i8x16 __ret = (__i8x16)__vec;
-  __ret[__i] = __v;
-  return (v128_t)__ret;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load16_lane(
-    const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) {
-  struct __wasm_v128_load16_lane_struct {
-    int16_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  int16_t __v = ((const struct __wasm_v128_load16_lane_struct *)__mem)->__v;
-  __i16x8 __ret = (__i16x8)__vec;
-  __ret[__i] = __v;
-  return (v128_t)__ret;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load32_lane(
-    const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) {
-  struct __wasm_v128_load32_lane_struct {
-    int32_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  int32_t __v = ((const struct __wasm_v128_load32_lane_struct *)__mem)->__v;
-  __i32x4 __ret = (__i32x4)__vec;
-  __ret[__i] = __v;
-  return (v128_t)__ret;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load64_lane(
-    const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) {
-  struct __wasm_v128_load64_lane_struct {
-    int64_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  int64_t __v = ((const struct __wasm_v128_load64_lane_struct *)__mem)->__v;
-  __i64x2 __ret = (__i64x2)__vec;
-  __ret[__i] = __v;
-  return (v128_t)__ret;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store(void *__mem,
-                                                          v128_t __a) {
-  // UB-free unaligned access copied from xmmintrin.h
-  struct __wasm_v128_store_struct {
-    __v128_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __wasm_v128_store_struct *)__mem)->__v = __a;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store8_lane(void *__mem,
-                                                                v128_t __vec,
-                                                                int __i)
-    __REQUIRE_CONSTANT(__i) {
-  struct __wasm_v128_store8_lane_struct {
-    int8_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __wasm_v128_store8_lane_struct *)__mem)->__v = ((__i8x16)__vec)[__i];
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store16_lane(void *__mem,
-                                                                 v128_t __vec,
-                                                                 int __i)
-    __REQUIRE_CONSTANT(__i) {
-  struct __wasm_v128_store16_lane_struct {
-    int16_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __wasm_v128_store16_lane_struct *)__mem)->__v =
-      ((__i16x8)__vec)[__i];
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store32_lane(void *__mem,
-                                                                 v128_t __vec,
-                                                                 int __i)
-    __REQUIRE_CONSTANT(__i) {
-  struct __wasm_v128_store32_lane_struct {
-    int32_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __wasm_v128_store32_lane_struct *)__mem)->__v =
-      ((__i32x4)__vec)[__i];
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store64_lane(void *__mem,
-                                                                 v128_t __vec,
-                                                                 int __i)
-    __REQUIRE_CONSTANT(__i) {
-  struct __wasm_v128_store64_lane_struct {
-    int64_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __wasm_v128_store64_lane_struct *)__mem)->__v =
-      ((__i64x2)__vec)[__i];
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i8x16_make(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3, int8_t __c4,
-                int8_t __c5, int8_t __c6, int8_t __c7, int8_t __c8, int8_t __c9,
-                int8_t __c10, int8_t __c11, int8_t __c12, int8_t __c13,
-                int8_t __c14, int8_t __c15) {
-  return (v128_t)(__i8x16){__c0,  __c1,  __c2,  __c3, __c4,  __c5,
-                           __c6,  __c7,  __c8,  __c9, __c10, __c11,
-                           __c12, __c13, __c14, __c15};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u8x16_make(uint8_t __c0, uint8_t __c1, uint8_t __c2, uint8_t __c3,
-                uint8_t __c4, uint8_t __c5, uint8_t __c6, uint8_t __c7,
-                uint8_t __c8, uint8_t __c9, uint8_t __c10, uint8_t __c11,
-                uint8_t __c12, uint8_t __c13, uint8_t __c14, uint8_t __c15) {
-  return (v128_t)(__u8x16){__c0,  __c1,  __c2,  __c3, __c4,  __c5,
-                           __c6,  __c7,  __c8,  __c9, __c10, __c11,
-                           __c12, __c13, __c14, __c15};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_make(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3,
-                int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7) {
-  return (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_make(uint16_t __c0, uint16_t __c1, uint16_t __c2, uint16_t __c3,
-                uint16_t __c4, uint16_t __c5, uint16_t __c6, uint16_t __c7) {
-  return (v128_t)(__u16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_make(int32_t __c0,
-                                                            int32_t __c1,
-                                                            int32_t __c2,
-                                                            int32_t __c3) {
-  return (v128_t)(__i32x4){__c0, __c1, __c2, __c3};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_make(uint32_t __c0,
-                                                            uint32_t __c1,
-                                                            uint32_t __c2,
-                                                            uint32_t __c3) {
-  return (v128_t)(__u32x4){__c0, __c1, __c2, __c3};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_make(int64_t __c0,
-                                                            int64_t __c1) {
-  return (v128_t)(__i64x2){__c0, __c1};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_make(uint64_t __c0,
-                                                            uint64_t __c1) {
-  return (v128_t)(__u64x2){__c0, __c1};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_make(float __c0,
-                                                            float __c1,
-                                                            float __c2,
-                                                            float __c3) {
-  return (v128_t)(__f32x4){__c0, __c1, __c2, __c3};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_make(double __c0,
-                                                            double __c1) {
-  return (v128_t)(__f64x2){__c0, __c1};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i8x16_const(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3,
-                 int8_t __c4, int8_t __c5, int8_t __c6, int8_t __c7,
-                 int8_t __c8, int8_t __c9, int8_t __c10, int8_t __c11,
-                 int8_t __c12, int8_t __c13, int8_t __c14, int8_t __c15)
-    __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
-        __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4)
-            __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6)
-                __REQUIRE_CONSTANT(__c7) __REQUIRE_CONSTANT(__c8)
-                    __REQUIRE_CONSTANT(__c9) __REQUIRE_CONSTANT(__c10)
-                        __REQUIRE_CONSTANT(__c11) __REQUIRE_CONSTANT(__c12)
-                            __REQUIRE_CONSTANT(__c13) __REQUIRE_CONSTANT(__c14)
-                                __REQUIRE_CONSTANT(__c15) {
-  return (v128_t)(__i8x16){__c0,  __c1,  __c2,  __c3, __c4,  __c5,
-                           __c6,  __c7,  __c8,  __c9, __c10, __c11,
-                           __c12, __c13, __c14, __c15};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u8x16_const(uint8_t __c0, uint8_t __c1, uint8_t __c2, uint8_t __c3,
-                 uint8_t __c4, uint8_t __c5, uint8_t __c6, uint8_t __c7,
-                 uint8_t __c8, uint8_t __c9, uint8_t __c10, uint8_t __c11,
-                 uint8_t __c12, uint8_t __c13, uint8_t __c14, uint8_t __c15)
-    __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
-        __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4)
-            __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6)
-                __REQUIRE_CONSTANT(__c7) __REQUIRE_CONSTANT(__c8)
-                    __REQUIRE_CONSTANT(__c9) __REQUIRE_CONSTANT(__c10)
-                        __REQUIRE_CONSTANT(__c11) __REQUIRE_CONSTANT(__c12)
-                            __REQUIRE_CONSTANT(__c13) __REQUIRE_CONSTANT(__c14)
-                                __REQUIRE_CONSTANT(__c15) {
-  return (v128_t)(__u8x16){__c0,  __c1,  __c2,  __c3, __c4,  __c5,
-                           __c6,  __c7,  __c8,  __c9, __c10, __c11,
-                           __c12, __c13, __c14, __c15};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_const(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3,
-                 int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7)
-    __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
-        __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4)
-            __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6)
-                __REQUIRE_CONSTANT(__c7) {
-  return (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_const(uint16_t __c0, uint16_t __c1, uint16_t __c2, uint16_t __c3,
-                 uint16_t __c4, uint16_t __c5, uint16_t __c6, uint16_t __c7)
-    __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
-        __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4)
-            __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6)
-                __REQUIRE_CONSTANT(__c7) {
-  return (v128_t)(__u16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_const(int32_t __c0, int32_t __c1, int32_t __c2, int32_t __c3)
-    __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
-        __REQUIRE_CONSTANT(__c3) {
-  return (v128_t)(__i32x4){__c0, __c1, __c2, __c3};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u32x4_const(uint32_t __c0, uint32_t __c1, uint32_t __c2, uint32_t __c3)
-    __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
-        __REQUIRE_CONSTANT(__c3) {
-  return (v128_t)(__u32x4){__c0, __c1, __c2, __c3};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_const(int64_t __c0,
-                                                             int64_t __c1)
-    __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) {
-  return (v128_t)(__i64x2){__c0, __c1};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_const(uint64_t __c0,
-                                                             uint64_t __c1)
-    __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) {
-  return (v128_t)(__u64x2){__c0, __c1};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_f32x4_const(float __c0, float __c1, float __c2, float __c3)
-    __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
-        __REQUIRE_CONSTANT(__c3) {
-  return (v128_t)(__f32x4){__c0, __c1, __c2, __c3};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_const(double __c0,
-                                                             double __c1)
-    __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) {
-  return (v128_t)(__f64x2){__c0, __c1};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_const_splat(int8_t __c)
-    __REQUIRE_CONSTANT(__c) {
-  return (v128_t)(__i8x16){__c, __c, __c, __c, __c, __c, __c, __c,
-                           __c, __c, __c, __c, __c, __c, __c, __c};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_const_splat(uint8_t __c)
-    __REQUIRE_CONSTANT(__c) {
-  return (v128_t)(__u8x16){__c, __c, __c, __c, __c, __c, __c, __c,
-                           __c, __c, __c, __c, __c, __c, __c, __c};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_const_splat(int16_t __c)
-    __REQUIRE_CONSTANT(__c) {
-  return (v128_t)(__i16x8){__c, __c, __c, __c, __c, __c, __c, __c};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_const_splat(uint16_t __c)
-    __REQUIRE_CONSTANT(__c) {
-  return (v128_t)(__u16x8){__c, __c, __c, __c, __c, __c, __c, __c};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_const_splat(int32_t __c)
-    __REQUIRE_CONSTANT(__c) {
-  return (v128_t)(__i32x4){__c, __c, __c, __c};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_const_splat(uint32_t __c)
-    __REQUIRE_CONSTANT(__c) {
-  return (v128_t)(__u32x4){__c, __c, __c, __c};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_const_splat(int64_t __c)
-    __REQUIRE_CONSTANT(__c) {
-  return (v128_t)(__i64x2){__c, __c};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_const_splat(uint64_t __c)
-    __REQUIRE_CONSTANT(__c) {
-  return (v128_t)(__u64x2){__c, __c};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_const_splat(float __c)
-    __REQUIRE_CONSTANT(__c) {
-  return (v128_t)(__f32x4){__c, __c, __c, __c};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_const_splat(double __c)
-    __REQUIRE_CONSTANT(__c) {
-  return (v128_t)(__f64x2){__c, __c};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_splat(int8_t __a) {
-  return (v128_t)(__i8x16){__a, __a, __a, __a, __a, __a, __a, __a,
-                           __a, __a, __a, __a, __a, __a, __a, __a};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_splat(uint8_t __a) {
-  return (v128_t)(__u8x16){__a, __a, __a, __a, __a, __a, __a, __a,
-                           __a, __a, __a, __a, __a, __a, __a, __a};
-}
-
-static __inline__ int8_t __DEFAULT_FN_ATTRS wasm_i8x16_extract_lane(v128_t __a,
-                                                                    int __i)
-    __REQUIRE_CONSTANT(__i) {
-  return ((__i8x16)__a)[__i];
-}
-
-static __inline__ uint8_t __DEFAULT_FN_ATTRS wasm_u8x16_extract_lane(v128_t __a,
-                                                                     int __i)
-    __REQUIRE_CONSTANT(__i) {
-  return ((__u8x16)__a)[__i];
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_replace_lane(v128_t __a,
-                                                                    int __i,
-                                                                    int8_t __b)
-    __REQUIRE_CONSTANT(__i) {
-  __i8x16 __v = (__i8x16)__a;
-  __v[__i] = __b;
-  return (v128_t)__v;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_replace_lane(v128_t __a,
-                                                                    int __i,
-                                                                    uint8_t __b)
-    __REQUIRE_CONSTANT(__i) {
-  __u8x16 __v = (__u8x16)__a;
-  __v[__i] = __b;
-  return (v128_t)__v;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_splat(int16_t __a) {
-  return (v128_t)(__i16x8){__a, __a, __a, __a, __a, __a, __a, __a};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_splat(uint16_t __a) {
-  return (v128_t)(__u16x8){__a, __a, __a, __a, __a, __a, __a, __a};
-}
-
-static __inline__ int16_t __DEFAULT_FN_ATTRS wasm_i16x8_extract_lane(v128_t __a,
-                                                                     int __i)
-    __REQUIRE_CONSTANT(__i) {
-  return ((__i16x8)__a)[__i];
-}
-
-static __inline__ uint16_t __DEFAULT_FN_ATTRS
-wasm_u16x8_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i) {
-  return ((__u16x8)__a)[__i];
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_replace_lane(v128_t __a,
-                                                                    int __i,
-                                                                    int16_t __b)
-    __REQUIRE_CONSTANT(__i) {
-  __i16x8 __v = (__i16x8)__a;
-  __v[__i] = __b;
-  return (v128_t)__v;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_replace_lane(
-    v128_t __a, int __i, uint16_t __b) __REQUIRE_CONSTANT(__i) {
-  __u16x8 __v = (__u16x8)__a;
-  __v[__i] = __b;
-  return (v128_t)__v;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_splat(int32_t __a) {
-  return (v128_t)(__i32x4){__a, __a, __a, __a};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_splat(uint32_t __a) {
-  return (v128_t)(__u32x4){__a, __a, __a, __a};
-}
-
-static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i32x4_extract_lane(v128_t __a,
-                                                                     int __i)
-    __REQUIRE_CONSTANT(__i) {
-  return ((__i32x4)__a)[__i];
-}
-
-static __inline__ uint32_t __DEFAULT_FN_ATTRS
-wasm_u32x4_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i) {
-  return ((__u32x4)__a)[__i];
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_replace_lane(v128_t __a,
-                                                                    int __i,
-                                                                    int32_t __b)
-    __REQUIRE_CONSTANT(__i) {
-  __i32x4 __v = (__i32x4)__a;
-  __v[__i] = __b;
-  return (v128_t)__v;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_replace_lane(
-    v128_t __a, int __i, uint32_t __b) __REQUIRE_CONSTANT(__i) {
-  __u32x4 __v = (__u32x4)__a;
-  __v[__i] = __b;
-  return (v128_t)__v;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_splat(int64_t __a) {
-  return (v128_t)(__i64x2){__a, __a};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_splat(uint64_t __a) {
-  return (v128_t)(__u64x2){__a, __a};
-}
-
-static __inline__ int64_t __DEFAULT_FN_ATTRS wasm_i64x2_extract_lane(v128_t __a,
-                                                                     int __i)
-    __REQUIRE_CONSTANT(__i) {
-  return ((__i64x2)__a)[__i];
-}
-
-static __inline__ uint64_t __DEFAULT_FN_ATTRS
-wasm_u64x2_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i) {
-  return ((__u64x2)__a)[__i];
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_replace_lane(v128_t __a,
-                                                                    int __i,
-                                                                    int64_t __b)
-    __REQUIRE_CONSTANT(__i) {
-  __i64x2 __v = (__i64x2)__a;
-  __v[__i] = __b;
-  return (v128_t)__v;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_replace_lane(
-    v128_t __a, int __i, uint64_t __b) __REQUIRE_CONSTANT(__i) {
-  __u64x2 __v = (__u64x2)__a;
-  __v[__i] = __b;
-  return (v128_t)__v;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_splat(float __a) {
-  return (v128_t)(__f32x4){__a, __a, __a, __a};
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS wasm_f32x4_extract_lane(v128_t __a,
-                                                                   int __i)
-    __REQUIRE_CONSTANT(__i) {
-  return ((__f32x4)__a)[__i];
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_replace_lane(v128_t __a,
-                                                                    int __i,
-                                                                    float __b)
-    __REQUIRE_CONSTANT(__i) {
-  __f32x4 __v = (__f32x4)__a;
-  __v[__i] = __b;
-  return (v128_t)__v;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_splat(double __a) {
-  return (v128_t)(__f64x2){__a, __a};
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS wasm_f64x2_extract_lane(v128_t __a,
-                                                                    int __i)
-    __REQUIRE_CONSTANT(__i) {
-  return ((__f64x2)__a)[__i];
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_replace_lane(v128_t __a,
-                                                                    int __i,
-                                                                    double __b)
-    __REQUIRE_CONSTANT(__i) {
-  __f64x2 __v = (__f64x2)__a;
-  __v[__i] = __b;
-  return (v128_t)__v;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_eq(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i8x16)__a == (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ne(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i8x16)__a != (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i8x16)__a < (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u8x16)__a < (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i8x16)__a > (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u8x16)__a > (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i8x16)__a <= (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u8x16)__a <= (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i8x16)__a >= (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u8x16)__a >= (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_eq(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i16x8)__a == (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ne(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u16x8)__a != (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i16x8)__a < (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u16x8)__a < (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i16x8)__a > (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u16x8)__a > (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i16x8)__a <= (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u16x8)__a <= (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i16x8)__a >= (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u16x8)__a >= (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_eq(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i32x4)__a == (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ne(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i32x4)__a != (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i32x4)__a < (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u32x4)__a < (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i32x4)__a > (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u32x4)__a > (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i32x4)__a <= (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u32x4)__a <= (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i32x4)__a >= (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u32x4)__a >= (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_eq(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i64x2)__a == (__i64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_ne(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i64x2)__a != (__i64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i64x2)__a < (__i64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i64x2)__a > (__i64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i64x2)__a <= (__i64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i64x2)__a >= (__i64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_eq(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f32x4)__a == (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ne(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f32x4)__a != (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f32x4)__a < (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f32x4)__a > (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f32x4)__a <= (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f32x4)__a >= (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_eq(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f64x2)__a == (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ne(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f64x2)__a != (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f64x2)__a < (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f64x2)__a > (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f64x2)__a <= (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f64x2)__a >= (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_not(v128_t __a) {
-  return ~__a;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_and(v128_t __a,
-                                                          v128_t __b) {
-  return __a & __b;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_or(v128_t __a,
-                                                         v128_t __b) {
-  return __a | __b;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_xor(v128_t __a,
-                                                          v128_t __b) {
-  return __a ^ __b;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_andnot(v128_t __a,
-                                                             v128_t __b) {
-  return __a & ~__b;
-}
-
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_v128_any_true(v128_t __a) {
-  return __builtin_wasm_any_true_v128((__i8x16)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_bitselect(v128_t __a,
-                                                                v128_t __b,
-                                                                v128_t __mask) {
-  return (v128_t)__builtin_wasm_bitselect((__i32x4)__a, (__i32x4)__b,
-                                          (__i32x4)__mask);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_abs(v128_t __a) {
-  return (v128_t)__builtin_wasm_abs_i8x16((__i8x16)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_neg(v128_t __a) {
-  return (v128_t)(-(__u8x16)__a);
-}
-
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_all_true(v128_t __a) {
-  return __builtin_wasm_all_true_i8x16((__i8x16)__a);
-}
-
-static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i8x16_bitmask(v128_t __a) {
-  return __builtin_wasm_bitmask_i8x16((__i8x16)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_popcnt(v128_t __a) {
-  return (v128_t)__builtin_wasm_popcnt_i8x16((__i8x16)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a,
-                                                           uint32_t __b) {
-  return (v128_t)((__i8x16)__a << __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a,
-                                                           uint32_t __b) {
-  return (v128_t)((__i8x16)__a >> __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a,
-                                                           uint32_t __b) {
-  return (v128_t)((__u8x16)__a >> __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u8x16)__a + (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add_sat(v128_t __a,
-                                                               v128_t __b) {
-  return (v128_t)__builtin_wasm_add_sat_s_i8x16((__i8x16)__a, (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_add_sat(v128_t __a,
-                                                               v128_t __b) {
-  return (v128_t)__builtin_wasm_add_sat_u_i8x16((__u8x16)__a, (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u8x16)__a - (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub_sat(v128_t __a,
-                                                               v128_t __b) {
-  return (v128_t)__builtin_wasm_sub_sat_s_i8x16((__i8x16)__a, (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_sub_sat(v128_t __a,
-                                                               v128_t __b) {
-  return (v128_t)__builtin_wasm_sub_sat_u_i8x16((__u8x16)__a, (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_s_i8x16((__i8x16)__a, (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_u_i8x16((__u8x16)__a, (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_s_i8x16((__i8x16)__a, (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_u_i8x16((__u8x16)__a, (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_avgr(v128_t __a,
-                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_avgr_u_i8x16((__u8x16)__a, (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_abs(v128_t __a) {
-  return (v128_t)__builtin_wasm_abs_i16x8((__i16x8)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_neg(v128_t __a) {
-  return (v128_t)(-(__u16x8)__a);
-}
-
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_all_true(v128_t __a) {
-  return __builtin_wasm_all_true_i16x8((__i16x8)__a);
-}
-
-static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i16x8_bitmask(v128_t __a) {
-  return __builtin_wasm_bitmask_i16x8((__i16x8)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shl(v128_t __a,
-                                                           uint32_t __b) {
-  return (v128_t)((__i16x8)__a << __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shr(v128_t __a,
-                                                           uint32_t __b) {
-  return (v128_t)((__i16x8)__a >> __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_shr(v128_t __a,
-                                                           uint32_t __b) {
-  return (v128_t)((__u16x8)__a >> __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u16x8)__a + (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add_sat(v128_t __a,
-                                                               v128_t __b) {
-  return (v128_t)__builtin_wasm_add_sat_s_i16x8((__i16x8)__a, (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_add_sat(v128_t __a,
-                                                               v128_t __b) {
-  return (v128_t)__builtin_wasm_add_sat_u_i16x8((__u16x8)__a, (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__i16x8)__a - (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub_sat(v128_t __a,
-                                                               v128_t __b) {
-  return (v128_t)__builtin_wasm_sub_sat_s_i16x8((__i16x8)__a, (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_sub_sat(v128_t __a,
-                                                               v128_t __b) {
-  return (v128_t)__builtin_wasm_sub_sat_u_i16x8((__u16x8)__a, (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_mul(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u16x8)__a * (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_s_i16x8((__i16x8)__a, (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_u_i16x8((__u16x8)__a, (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_s_i16x8((__i16x8)__a, (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_u_i16x8((__u16x8)__a, (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_avgr(v128_t __a,
-                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_avgr_u_i16x8((__u16x8)__a, (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_abs(v128_t __a) {
-  return (v128_t)__builtin_wasm_abs_i32x4((__i32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_neg(v128_t __a) {
-  return (v128_t)(-(__u32x4)__a);
-}
-
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_all_true(v128_t __a) {
-  return __builtin_wasm_all_true_i32x4((__i32x4)__a);
-}
-
-static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i32x4_bitmask(v128_t __a) {
-  return __builtin_wasm_bitmask_i32x4((__i32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shl(v128_t __a,
-                                                           uint32_t __b) {
-  return (v128_t)((__i32x4)__a << __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shr(v128_t __a,
-                                                           uint32_t __b) {
-  return (v128_t)((__i32x4)__a >> __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_shr(v128_t __a,
-                                                           uint32_t __b) {
-  return (v128_t)((__u32x4)__a >> __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_add(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u32x4)__a + (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_sub(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u32x4)__a - (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_mul(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u32x4)__a * (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_s_i32x4((__i32x4)__a, (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_u_i32x4((__u32x4)__a, (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_s_i32x4((__i32x4)__a, (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_u_i32x4((__u32x4)__a, (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_dot_i16x8(v128_t __a,
-                                                                 v128_t __b) {
-  return (v128_t)__builtin_wasm_dot_s_i32x4_i16x8((__i16x8)__a, (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_abs(v128_t __a) {
-  return (v128_t)__builtin_wasm_abs_i64x2((__i64x2)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_neg(v128_t __a) {
-  return (v128_t)(-(__u64x2)__a);
-}
-
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_all_true(v128_t __a) {
-  return __builtin_wasm_all_true_i64x2((__i64x2)__a);
-}
-
-static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i64x2_bitmask(v128_t __a) {
-  return __builtin_wasm_bitmask_i64x2((__i64x2)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shl(v128_t __a,
-                                                           uint32_t __b) {
-  return (v128_t)((__i64x2)__a << (int64_t)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shr(v128_t __a,
-                                                           uint32_t __b) {
-  return (v128_t)((__i64x2)__a >> (int64_t)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_shr(v128_t __a,
-                                                           uint32_t __b) {
-  return (v128_t)((__u64x2)__a >> (int64_t)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_add(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u64x2)__a + (__u64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_sub(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u64x2)__a - (__u64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_mul(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u64x2)__a * (__u64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_abs(v128_t __a) {
-  return (v128_t)__builtin_wasm_abs_f32x4((__f32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_neg(v128_t __a) {
-  return (v128_t)(-(__f32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sqrt(v128_t __a) {
-  return (v128_t)__builtin_wasm_sqrt_f32x4((__f32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ceil(v128_t __a) {
-  return (v128_t)__builtin_wasm_ceil_f32x4((__f32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_floor(v128_t __a) {
-  return (v128_t)__builtin_wasm_floor_f32x4((__f32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_trunc(v128_t __a) {
-  return (v128_t)__builtin_wasm_trunc_f32x4((__f32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_nearest(v128_t __a) {
-  return (v128_t)__builtin_wasm_nearest_f32x4((__f32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_add(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f32x4)__a + (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sub(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f32x4)__a - (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_mul(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f32x4)__a * (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_div(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f32x4)__a / (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_f32x4((__f32x4)__a, (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_f32x4((__f32x4)__a, (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmin(v128_t __a,
-                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_pmin_f32x4((__f32x4)__a, (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmax(v128_t __a,
-                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_pmax_f32x4((__f32x4)__a, (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_abs(v128_t __a) {
-  return (v128_t)__builtin_wasm_abs_f64x2((__f64x2)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_neg(v128_t __a) {
-  return (v128_t)(-(__f64x2)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sqrt(v128_t __a) {
-  return (v128_t)__builtin_wasm_sqrt_f64x2((__f64x2)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ceil(v128_t __a) {
-  return (v128_t)__builtin_wasm_ceil_f64x2((__f64x2)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_floor(v128_t __a) {
-  return (v128_t)__builtin_wasm_floor_f64x2((__f64x2)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_trunc(v128_t __a) {
-  return (v128_t)__builtin_wasm_trunc_f64x2((__f64x2)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_nearest(v128_t __a) {
-  return (v128_t)__builtin_wasm_nearest_f64x2((__f64x2)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_add(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f64x2)__a + (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sub(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f64x2)__a - (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_mul(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f64x2)__a * (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_div(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f64x2)__a / (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_f64x2((__f64x2)__a, (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_f64x2((__f64x2)__a, (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmin(v128_t __a,
-                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_pmin_f64x2((__f64x2)__a, (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmax(v128_t __a,
-                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_pmax_f64x2((__f64x2)__a, (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_trunc_sat_f32x4(v128_t __a) {
-  return (v128_t)__builtin_wasm_trunc_saturate_s_i32x4_f32x4((__f32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u32x4_trunc_sat_f32x4(v128_t __a) {
-  return (v128_t)__builtin_wasm_trunc_saturate_u_i32x4_f32x4((__f32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_f32x4_convert_i32x4(v128_t __a) {
-  return (v128_t) __builtin_convertvector((__i32x4)__a, __f32x4);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_f32x4_convert_u32x4(v128_t __a) {
-  return (v128_t) __builtin_convertvector((__u32x4)__a, __f32x4);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_f64x2_convert_low_i32x4(v128_t __a) {
-  return (v128_t) __builtin_convertvector((__i32x2){__a[0], __a[1]}, __f64x2);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_f64x2_convert_low_u32x4(v128_t __a) {
-  return (v128_t) __builtin_convertvector((__u32x2){__a[0], __a[1]}, __f64x2);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_trunc_sat_f64x2_zero(v128_t __a) {
-  return (v128_t)__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4((__f64x2)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u32x4_trunc_sat_f64x2_zero(v128_t __a) {
-  return (v128_t)__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4((__f64x2)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_f32x4_demote_f64x2_zero(v128_t __a) {
-  return (v128_t) __builtin_convertvector(
-      __builtin_shufflevector((__f64x2)__a, (__f64x2){0, 0}, 0, 1, 2, 3),
-      __f32x4);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_f64x2_promote_low_f32x4(v128_t __a) {
-  return (v128_t) __builtin_convertvector(
-      (__f32x2){((__f32x4)__a)[0], ((__f32x4)__a)[1]}, __f64x2);
-}
-
-#define wasm_i8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
-                           __c7, __c8, __c9, __c10, __c11, __c12, __c13,       \
-                           __c14, __c15)                                       \
-  ((v128_t)__builtin_wasm_shuffle_i8x16(                                       \
-      (__i8x16)(__a), (__i8x16)(__b), __c0, __c1, __c2, __c3, __c4, __c5,      \
-      __c6, __c7, __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15))
-
-#define wasm_i16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
-                           __c7)                                               \
-  ((v128_t)__builtin_wasm_shuffle_i8x16(                                       \
-      (__i8x16)(__a), (__i8x16)(__b), (__c0)*2, (__c0)*2 + 1, (__c1)*2,        \
-      (__c1)*2 + 1, (__c2)*2, (__c2)*2 + 1, (__c3)*2, (__c3)*2 + 1, (__c4)*2,  \
-      (__c4)*2 + 1, (__c5)*2, (__c5)*2 + 1, (__c6)*2, (__c6)*2 + 1, (__c7)*2,  \
-      (__c7)*2 + 1))
-
-#define wasm_i32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3)                   \
-  ((v128_t)__builtin_wasm_shuffle_i8x16(                                       \
-      (__i8x16)(__a), (__i8x16)(__b), (__c0)*4, (__c0)*4 + 1, (__c0)*4 + 2,    \
-      (__c0)*4 + 3, (__c1)*4, (__c1)*4 + 1, (__c1)*4 + 2, (__c1)*4 + 3,        \
-      (__c2)*4, (__c2)*4 + 1, (__c2)*4 + 2, (__c2)*4 + 3, (__c3)*4,            \
-      (__c3)*4 + 1, (__c3)*4 + 2, (__c3)*4 + 3))
-
-#define wasm_i64x2_shuffle(__a, __b, __c0, __c1)                               \
-  ((v128_t)__builtin_wasm_shuffle_i8x16(                                       \
-      (__i8x16)(__a), (__i8x16)(__b), (__c0)*8, (__c0)*8 + 1, (__c0)*8 + 2,    \
-      (__c0)*8 + 3, (__c0)*8 + 4, (__c0)*8 + 5, (__c0)*8 + 6, (__c0)*8 + 7,    \
-      (__c1)*8, (__c1)*8 + 1, (__c1)*8 + 2, (__c1)*8 + 3, (__c1)*8 + 4,        \
-      (__c1)*8 + 5, (__c1)*8 + 6, (__c1)*8 + 7))
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_swizzle(v128_t __a,
-                                                               v128_t __b) {
-  return (v128_t)__builtin_wasm_swizzle_i8x16((__i8x16)__a, (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i8x16_narrow_i16x8(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_narrow_s_i8x16_i16x8((__i16x8)__a,
-                                                     (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u8x16_narrow_i16x8(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__i16x8)__a,
-                                                     (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_narrow_i32x4(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_narrow_s_i16x8_i32x4((__i32x4)__a,
-                                                     (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_narrow_i32x4(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__i32x4)__a,
-                                                     (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_extend_low_i8x16(v128_t __a) {
-  return (v128_t) __builtin_convertvector(
-      (__i8x8){((__i8x16)__a)[0], ((__i8x16)__a)[1], ((__i8x16)__a)[2],
-               ((__i8x16)__a)[3], ((__i8x16)__a)[4], ((__i8x16)__a)[5],
-               ((__i8x16)__a)[6], ((__i8x16)__a)[7]},
-      __i16x8);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_extend_high_i8x16(v128_t __a) {
-  return (v128_t) __builtin_convertvector(
-      (__i8x8){((__i8x16)__a)[8], ((__i8x16)__a)[9], ((__i8x16)__a)[10],
-               ((__i8x16)__a)[11], ((__i8x16)__a)[12], ((__i8x16)__a)[13],
-               ((__i8x16)__a)[14], ((__i8x16)__a)[15]},
-      __i16x8);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_extend_low_u8x16(v128_t __a) {
-  return (v128_t) __builtin_convertvector(
-      (__u8x8){((__u8x16)__a)[0], ((__u8x16)__a)[1], ((__u8x16)__a)[2],
-               ((__u8x16)__a)[3], ((__u8x16)__a)[4], ((__u8x16)__a)[5],
-               ((__u8x16)__a)[6], ((__u8x16)__a)[7]},
-      __u16x8);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_extend_high_u8x16(v128_t __a) {
-  return (v128_t) __builtin_convertvector(
-      (__u8x8){((__u8x16)__a)[8], ((__u8x16)__a)[9], ((__u8x16)__a)[10],
-               ((__u8x16)__a)[11], ((__u8x16)__a)[12], ((__u8x16)__a)[13],
-               ((__u8x16)__a)[14], ((__u8x16)__a)[15]},
-      __u16x8);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_extend_low_i16x8(v128_t __a) {
-  return (v128_t) __builtin_convertvector(
-      (__i16x4){((__i16x8)__a)[0], ((__i16x8)__a)[1], ((__i16x8)__a)[2],
-                ((__i16x8)__a)[3]},
-      __i32x4);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_extend_high_i16x8(v128_t __a) {
-  return (v128_t) __builtin_convertvector(
-      (__i16x4){((__i16x8)__a)[4], ((__i16x8)__a)[5], ((__i16x8)__a)[6],
-                ((__i16x8)__a)[7]},
-      __i32x4);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u32x4_extend_low_u16x8(v128_t __a) {
-  return (v128_t) __builtin_convertvector(
-      (__u16x4){((__u16x8)__a)[0], ((__u16x8)__a)[1], ((__u16x8)__a)[2],
-                ((__u16x8)__a)[3]},
-      __u32x4);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u32x4_extend_high_u16x8(v128_t __a) {
-  return (v128_t) __builtin_convertvector(
-      (__u16x4){((__u16x8)__a)[4], ((__u16x8)__a)[5], ((__u16x8)__a)[6],
-                ((__u16x8)__a)[7]},
-      __u32x4);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i64x2_extend_low_i32x4(v128_t __a) {
-  return (v128_t) __builtin_convertvector(
-      (__i32x2){((__i32x4)__a)[0], ((__i32x4)__a)[1]}, __i64x2);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i64x2_extend_high_i32x4(v128_t __a) {
-  return (v128_t) __builtin_convertvector(
-      (__i32x2){((__i32x4)__a)[2], ((__i32x4)__a)[3]}, __i64x2);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u64x2_extend_low_u32x4(v128_t __a) {
-  return (v128_t) __builtin_convertvector(
-      (__u32x2){((__u32x4)__a)[0], ((__u32x4)__a)[1]}, __u64x2);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u64x2_extend_high_u32x4(v128_t __a) {
-  return (v128_t) __builtin_convertvector(
-      (__u32x2){((__u32x4)__a)[2], ((__u32x4)__a)[3]}, __u64x2);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_extadd_pairwise_i8x16(v128_t __a) {
-  return (v128_t)__builtin_wasm_extadd_pairwise_i8x16_s_i16x8((__i8x16)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_extadd_pairwise_u8x16(v128_t __a) {
-  return (v128_t)__builtin_wasm_extadd_pairwise_i8x16_u_i16x8((__u8x16)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_extadd_pairwise_i16x8(v128_t __a) {
-  return (v128_t)__builtin_wasm_extadd_pairwise_i16x8_s_i32x4((__i16x8)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u32x4_extadd_pairwise_u16x8(v128_t __a) {
-  return (v128_t)__builtin_wasm_extadd_pairwise_i16x8_u_i32x4((__u16x8)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_extmul_low_i8x16(v128_t __a, v128_t __b) {
-  return (v128_t)((__i16x8)wasm_i16x8_extend_low_i8x16(__a) *
-                  (__i16x8)wasm_i16x8_extend_low_i8x16(__b));
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_extmul_high_i8x16(v128_t __a, v128_t __b) {
-  return (v128_t)((__i16x8)wasm_i16x8_extend_high_i8x16(__a) *
-                  (__i16x8)wasm_i16x8_extend_high_i8x16(__b));
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_extmul_low_u8x16(v128_t __a, v128_t __b) {
-  return (v128_t)((__u16x8)wasm_u16x8_extend_low_u8x16(__a) *
-                  (__u16x8)wasm_u16x8_extend_low_u8x16(__b));
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_extmul_high_u8x16(v128_t __a, v128_t __b) {
-  return (v128_t)((__u16x8)wasm_u16x8_extend_high_u8x16(__a) *
-                  (__u16x8)wasm_u16x8_extend_high_u8x16(__b));
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_extmul_low_i16x8(v128_t __a, v128_t __b) {
-  return (v128_t)((__i32x4)wasm_i32x4_extend_low_i16x8(__a) *
-                  (__i32x4)wasm_i32x4_extend_low_i16x8(__b));
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_extmul_high_i16x8(v128_t __a, v128_t __b) {
-  return (v128_t)((__i32x4)wasm_i32x4_extend_high_i16x8(__a) *
-                  (__i32x4)wasm_i32x4_extend_high_i16x8(__b));
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u32x4_extmul_low_u16x8(v128_t __a, v128_t __b) {
-  return (v128_t)((__u32x4)wasm_u32x4_extend_low_u16x8(__a) *
-                  (__u32x4)wasm_u32x4_extend_low_u16x8(__b));
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u32x4_extmul_high_u16x8(v128_t __a, v128_t __b) {
-  return (v128_t)((__u32x4)wasm_u32x4_extend_high_u16x8(__a) *
-                  (__u32x4)wasm_u32x4_extend_high_u16x8(__b));
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i64x2_extmul_low_i32x4(v128_t __a, v128_t __b) {
-  return (v128_t)((__i64x2)wasm_i64x2_extend_low_i32x4(__a) *
-                  (__i64x2)wasm_i64x2_extend_low_i32x4(__b));
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i64x2_extmul_high_i32x4(v128_t __a, v128_t __b) {
-  return (v128_t)((__i64x2)wasm_i64x2_extend_high_i32x4(__a) *
-                  (__i64x2)wasm_i64x2_extend_high_i32x4(__b));
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u64x2_extmul_low_u32x4(v128_t __a, v128_t __b) {
-  return (v128_t)((__u64x2)wasm_u64x2_extend_low_u32x4(__a) *
-                  (__u64x2)wasm_u64x2_extend_low_u32x4(__b));
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u64x2_extmul_high_u32x4(v128_t __a, v128_t __b) {
-  return (v128_t)((__u64x2)wasm_u64x2_extend_high_u32x4(__a) *
-                  (__u64x2)wasm_u64x2_extend_high_u32x4(__b));
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_q15mulr_sat(v128_t __a,
-                                                                   v128_t __b) {
-  return (v128_t)__builtin_wasm_q15mulr_sat_s_i16x8((__i16x8)__a, (__i16x8)__b);
-}
-
-// Old intrinsic names supported to ease transitioning to the standard names. Do
-// not use these; they will be removed in the near future.
-
-#define __DEPRECATED_FN_ATTRS(__replacement)                                   \
-  __DEFAULT_FN_ATTRS __attribute__(                                            \
-      (deprecated("use " __replacement " instead", __replacement)))
-
-#define __WASM_STR(X) #X
-
-#ifdef __DEPRECATED
-#define __DEPRECATED_WASM_MACRO(__name, __replacement)                         \
-  _Pragma(__WASM_STR(GCC warning(                                              \
-      "'" __name "' is deprecated: use '" __replacement "' instead")))
-#else
-#define __DEPRECATED_WASM_MACRO(__name, __replacement)
-#endif
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load8_splat")
-wasm_v8x16_load_splat(const void *__mem) {
-  return wasm_v128_load8_splat(__mem);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load16_splat")
-wasm_v16x8_load_splat(const void *__mem) {
-  return wasm_v128_load16_splat(__mem);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load32_splat")
-wasm_v32x4_load_splat(const void *__mem) {
-  return wasm_v128_load32_splat(__mem);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load64_splat")
-wasm_v64x2_load_splat(const void *__mem) {
-  return wasm_v128_load64_splat(__mem);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_load8x8")
-wasm_i16x8_load_8x8(const void *__mem) {
-  return wasm_i16x8_load8x8(__mem);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_load8x8")
-wasm_u16x8_load_8x8(const void *__mem) {
-  return wasm_u16x8_load8x8(__mem);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_load16x4")
-wasm_i32x4_load_16x4(const void *__mem) {
-  return wasm_i32x4_load16x4(__mem);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_load16x4")
-wasm_u32x4_load_16x4(const void *__mem) {
-  return wasm_u32x4_load16x4(__mem);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i64x2_load32x2")
-wasm_i64x2_load_32x2(const void *__mem) {
-  return wasm_i64x2_load32x2(__mem);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u64x2_load32x2")
-wasm_u64x2_load_32x2(const void *__mem) {
-  return wasm_u64x2_load32x2(__mem);
-}
-
-#define wasm_v8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
-                           __c7, __c8, __c9, __c10, __c11, __c12, __c13,       \
-                           __c14, __c15)                                       \
-  __DEPRECATED_WASM_MACRO("wasm_v8x16_shuffle", "wasm_i8x16_shuffle")          \
-  wasm_i8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7, \
-                     __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15)
-
-#define wasm_v16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
-                           __c7)                                               \
-  __DEPRECATED_WASM_MACRO("wasm_v16x8_shuffle", "wasm_i16x8_shuffle")          \
-  wasm_i16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7)
-
-#define wasm_v32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3)                   \
-  __DEPRECATED_WASM_MACRO("wasm_v32x4_shuffle", "wasm_i32x4_shuffle")          \
-  wasm_i32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3)
-
-#define wasm_v64x2_shuffle(__a, __b, __c0, __c1)                               \
-  __DEPRECATED_WASM_MACRO("wasm_v64x2_shuffle", "wasm_i64x2_shuffle")          \
-  wasm_i64x2_shuffle(__a, __b, __c0, __c1)
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_swizzle")
-wasm_v8x16_swizzle(v128_t __a, v128_t __b) {
-  return wasm_i8x16_swizzle(__a, __b);
-}
-
-static __inline__ bool __DEPRECATED_FN_ATTRS("wasm_v128_any_true")
-wasm_i8x16_any_true(v128_t __a) {
-  return wasm_v128_any_true(__a);
-}
-
-static __inline__ bool __DEPRECATED_FN_ATTRS("wasm_v128_any_true")
-wasm_i16x8_any_true(v128_t __a) {
-  return wasm_v128_any_true(__a);
-}
-
-static __inline__ bool __DEPRECATED_FN_ATTRS("wasm_v128_any_true")
-wasm_i32x4_any_true(v128_t __a) {
-  return wasm_v128_any_true(__a);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_add_sat")
-wasm_i8x16_add_saturate(v128_t __a, v128_t __b) {
-  return wasm_i8x16_add_sat(__a, __b);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u8x16_add_sat")
-wasm_u8x16_add_saturate(v128_t __a, v128_t __b) {
-  return wasm_u8x16_add_sat(__a, __b);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_sub_sat")
-wasm_i8x16_sub_saturate(v128_t __a, v128_t __b) {
-  return wasm_i8x16_sub_sat(__a, __b);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u8x16_sub_sat")
-wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) {
-  return wasm_u8x16_sub_sat(__a, __b);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_add_sat")
-wasm_i16x8_add_saturate(v128_t __a, v128_t __b) {
-  return wasm_i16x8_add_sat(__a, __b);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_add_sat")
-wasm_u16x8_add_saturate(v128_t __a, v128_t __b) {
-  return wasm_u16x8_add_sat(__a, __b);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_sub_sat")
-wasm_i16x8_sub_saturate(v128_t __a, v128_t __b) {
-  return wasm_i16x8_sub_sat(__a, __b);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_sub_sat")
-wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) {
-  return wasm_u16x8_sub_sat(__a, __b);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_extend_low_i8x16")
-wasm_i16x8_widen_low_i8x16(v128_t __a) {
-  return wasm_i16x8_extend_low_i8x16(__a);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_extend_high_i8x16")
-wasm_i16x8_widen_high_i8x16(v128_t __a) {
-  return wasm_i16x8_extend_high_i8x16(__a);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_extend_low_u8x16")
-wasm_i16x8_widen_low_u8x16(v128_t __a) {
-  return wasm_u16x8_extend_low_u8x16(__a);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_extend_high_u8x16")
-wasm_i16x8_widen_high_u8x16(v128_t __a) {
-  return wasm_u16x8_extend_high_u8x16(__a);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_extend_low_i16x8")
-wasm_i32x4_widen_low_i16x8(v128_t __a) {
-  return wasm_i32x4_extend_low_i16x8(__a);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_extend_high_i16x8")
-wasm_i32x4_widen_high_i16x8(v128_t __a) {
-  return wasm_i32x4_extend_high_i16x8(__a);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_extend_low_u16x8")
-wasm_i32x4_widen_low_u16x8(v128_t __a) {
-  return wasm_u32x4_extend_low_u16x8(__a);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_extend_high_u16x8")
-wasm_i32x4_widen_high_u16x8(v128_t __a) {
-  return wasm_u32x4_extend_high_u16x8(__a);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_trunc_sat_f32x4")
-wasm_i32x4_trunc_saturate_f32x4(v128_t __a) {
-  return wasm_i32x4_trunc_sat_f32x4(__a);
-}
-
-static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_trunc_sat_f32x4")
-wasm_u32x4_trunc_saturate_f32x4(v128_t __a) {
-  return wasm_u32x4_trunc_sat_f32x4(__a);
-}
-
-// Undefine helper macros
-#undef __DEFAULT_FN_ATTRS
-#undef __DEPRECATED_FN_ATTRS
-
-#endif // __WASM_SIMD128_H
diff --git a/linux-x86/lib64/clang/14.0.2/include/x86gprintrin.h b/linux-x86/lib64/clang/14.0.2/include/x86gprintrin.h
deleted file mode 100644
index 01e741f..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/x86gprintrin.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*===--------------- x86gprintrin.h - X86 GPR intrinsics ------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __X86GPRINTRIN_H
-#define __X86GPRINTRIN_H
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__HRESET__)
-#include <hresetintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__UINTR__)
-#include <uintrintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__CRC32__)
-#include <crc32intrin.h>
-#endif
-
-#define __SSC_MARK(Tag)                                                        \
-  __asm__ __volatile__("mov {%%ebx, %%eax|eax, ebx}; "                      \
-                       "mov {%0, %%ebx|ebx, %0}; "                          \
-                       ".byte 0x64, 0x67, 0x90; "                              \
-                       "mov {%%eax, %%ebx|ebx, eax};" ::"i"(Tag)            \
-                       : "%eax");
-
-#endif /* __X86GPRINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/x86intrin.h b/linux-x86/lib64/clang/14.0.2/include/x86intrin.h
deleted file mode 100644
index 768d0e5..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/x86intrin.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*===---- x86intrin.h - X86 intrinsics -------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __X86INTRIN_H
-#define __X86INTRIN_H
-
-#include <ia32intrin.h>
-
-#include <immintrin.h>
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__3dNOW__)
-#include <mm3dnow.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__PRFCHW__)
-#include <prfchwintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SSE4A__)
-#include <ammintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__FMA4__)
-#include <fma4intrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__XOP__)
-#include <xopintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__TBM__)
-#include <tbmintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__LWP__)
-#include <lwpintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__MWAITX__)
-#include <mwaitxintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__CLZERO__)
-#include <clzerointrin.h>
-#endif
-
-
-#endif /* __X86INTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/xmmintrin.h b/linux-x86/lib64/clang/14.0.2/include/xmmintrin.h
deleted file mode 100644
index 1612d3d..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/xmmintrin.h
+++ /dev/null
@@ -1,3012 +0,0 @@
-/*===---- xmmintrin.h - SSE intrinsics -------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __XMMINTRIN_H
-#define __XMMINTRIN_H
-
-#if !defined(__i386__) && !defined(__x86_64__)
-#error "This header is only meant to be used on x86 and x64 architecture"
-#endif
-
-#include <mmintrin.h>
-
-typedef int __v4si __attribute__((__vector_size__(16)));
-typedef float __v4sf __attribute__((__vector_size__(16)));
-typedef float __m128 __attribute__((__vector_size__(16), __aligned__(16)));
-
-typedef float __m128_u __attribute__((__vector_size__(16), __aligned__(1)));
-
-/* Unsigned types */
-typedef unsigned int __v4su __attribute__((__vector_size__(16)));
-
-/* This header should only be included in a hosted environment as it depends on
- * a standard library to provide allocation routines. */
-#if __STDC_HOSTED__
-#include <mm_malloc.h>
-#endif
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse"), __min_vector_width__(64)))
-
-/// Adds the 32-bit float values in the low-order bits of the operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDSS / ADDSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-///    The lower 32 bits of this operand are used in the calculation.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-///    The lower 32 bits of this operand are used in the calculation.
-/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the sum
-///    of the lower 32 bits of both operands. The upper 96 bits are copied from
-///    the upper 96 bits of the first source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_add_ss(__m128 __a, __m128 __b)
-{
-  __a[0] += __b[0];
-  return __a;
-}
-
-/// Adds two 128-bit vectors of [4 x float], and returns the results of
-///    the addition.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDPS / ADDPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \returns A 128-bit vector of [4 x float] containing the sums of both
-///    operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_add_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)((__v4sf)__a + (__v4sf)__b);
-}
-
-/// Subtracts the 32-bit float value in the low-order bits of the second
-///    operand from the corresponding value in the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSUBSS / SUBSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the minuend. The lower 32 bits
-///    of this operand are used in the calculation.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing the subtrahend. The lower 32
-///    bits of this operand are used in the calculation.
-/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
-///    difference of the lower 32 bits of both operands. The upper 96 bits are
-///    copied from the upper 96 bits of the first source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_sub_ss(__m128 __a, __m128 __b)
-{
-  __a[0] -= __b[0];
-  return __a;
-}
-
-/// Subtracts each of the values of the second operand from the first
-///    operand, both of which are 128-bit vectors of [4 x float] and returns
-///    the results of the subtraction.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSUBPS / SUBPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the minuend.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing the subtrahend.
-/// \returns A 128-bit vector of [4 x float] containing the differences between
-///    both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_sub_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)((__v4sf)__a - (__v4sf)__b);
-}
-
-/// Multiplies two 32-bit float values in the low-order bits of the
-///    operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMULSS / MULSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-///    The lower 32 bits of this operand are used in the calculation.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-///    The lower 32 bits of this operand are used in the calculation.
-/// \returns A 128-bit vector of [4 x float] containing the product of the lower
-///    32 bits of both operands. The upper 96 bits are copied from the upper 96
-///    bits of the first source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mul_ss(__m128 __a, __m128 __b)
-{
-  __a[0] *= __b[0];
-  return __a;
-}
-
-/// Multiplies two 128-bit vectors of [4 x float] and returns the
-///    results of the multiplication.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMULPS / MULPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \returns A 128-bit vector of [4 x float] containing the products of both
-///    operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mul_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)((__v4sf)__a * (__v4sf)__b);
-}
-
-/// Divides the value in the low-order 32 bits of the first operand by
-///    the corresponding value in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDIVSS / DIVSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the dividend. The lower 32
-///    bits of this operand are used in the calculation.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing the divisor. The lower 32 bits
-///    of this operand are used in the calculation.
-/// \returns A 128-bit vector of [4 x float] containing the quotients of the
-///    lower 32 bits of both operands. The upper 96 bits are copied from the
-///    upper 96 bits of the first source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_div_ss(__m128 __a, __m128 __b)
-{
-  __a[0] /= __b[0];
-  return __a;
-}
-
-/// Divides two 128-bit vectors of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDIVPS / DIVPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the dividend.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing the divisor.
-/// \returns A 128-bit vector of [4 x float] containing the quotients of both
-///    operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_div_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)((__v4sf)__a / (__v4sf)__b);
-}
-
-/// Calculates the square root of the value stored in the low-order bits
-///    of a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSQRTSS / SQRTSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the calculation.
-/// \returns A 128-bit vector of [4 x float] containing the square root of the
-///    value in the low-order bits of the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_sqrt_ss(__m128 __a)
-{
-  return (__m128)__builtin_ia32_sqrtss((__v4sf)__a);
-}
-
-/// Calculates the square roots of the values stored in a 128-bit vector
-///    of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSQRTPS / SQRTPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the square roots of the
-///    values in the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_sqrt_ps(__m128 __a)
-{
-  return __builtin_ia32_sqrtps((__v4sf)__a);
-}
-
-/// Calculates the approximate reciprocal of the value stored in the
-///    low-order bits of a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VRCPSS / RCPSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the calculation.
-/// \returns A 128-bit vector of [4 x float] containing the approximate
-///    reciprocal of the value in the low-order bits of the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_rcp_ss(__m128 __a)
-{
-  return (__m128)__builtin_ia32_rcpss((__v4sf)__a);
-}
-
-/// Calculates the approximate reciprocals of the values stored in a
-///    128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VRCPPS / RCPPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the approximate
-///    reciprocals of the values in the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_rcp_ps(__m128 __a)
-{
-  return (__m128)__builtin_ia32_rcpps((__v4sf)__a);
-}
-
-/// Calculates the approximate reciprocal of the square root of the value
-///    stored in the low-order bits of a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VRSQRTSS / RSQRTSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the calculation.
-/// \returns A 128-bit vector of [4 x float] containing the approximate
-///    reciprocal of the square root of the value in the low-order bits of the
-///    operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_rsqrt_ss(__m128 __a)
-{
-  return __builtin_ia32_rsqrtss((__v4sf)__a);
-}
-
-/// Calculates the approximate reciprocals of the square roots of the
-///    values stored in a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VRSQRTPS / RSQRTPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the approximate
-///    reciprocals of the square roots of the values in the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_rsqrt_ps(__m128 __a)
-{
-  return __builtin_ia32_rsqrtps((__v4sf)__a);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands and returns the lesser value in the low-order bits of the
-///    vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMINSS / MINSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
-///    minimum value between both operands. The upper 96 bits are copied from
-///    the upper 96 bits of the first source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_min_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_minss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 128-bit vectors of [4 x float] and returns the lesser
-///    of each pair of values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMINPS / MINPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands.
-/// \returns A 128-bit vector of [4 x float] containing the minimum values
-///    between both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_min_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_minps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands and returns the greater value in the low-order bits of a 128-bit
-///    vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMAXSS / MAXSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
-///    maximum value between both operands. The upper 96 bits are copied from
-///    the upper 96 bits of the first source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_max_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_maxss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 128-bit vectors of [4 x float] and returns the greater
-///    of each pair of values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMAXPS / MAXPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands.
-/// \returns A 128-bit vector of [4 x float] containing the maximum values
-///    between both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_max_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_maxps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit vectors of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VANDPS / ANDPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector containing one of the source operands.
-/// \param __b
-///    A 128-bit vector containing one of the source operands.
-/// \returns A 128-bit vector of [4 x float] containing the bitwise AND of the
-///    values between both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_and_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)((__v4su)__a & (__v4su)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit vectors of [4 x float], using
-///    the one's complement of the values contained in the first source
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VANDNPS / ANDNPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the first source operand. The
-///    one's complement of this value is used in the bitwise AND.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing the second source operand.
-/// \returns A 128-bit vector of [4 x float] containing the bitwise AND of the
-///    one's complement of the first operand and the values in the second
-///    operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_andnot_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)(~(__v4su)__a & (__v4su)__b);
-}
-
-/// Performs a bitwise OR of two 128-bit vectors of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VORPS / ORPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \returns A 128-bit vector of [4 x float] containing the bitwise OR of the
-///    values between both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_or_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)((__v4su)__a | (__v4su)__b);
-}
-
-/// Performs a bitwise exclusive OR of two 128-bit vectors of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \returns A 128-bit vector of [4 x float] containing the bitwise exclusive OR
-///    of the values between both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_xor_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)((__v4su)__a ^ (__v4su)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands for equality and returns the result of the comparison in the
-///    low-order bits of a vector [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPEQSS / CMPEQSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpeq_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpeqss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] for equality.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPEQPS / CMPEQPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpeq_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpeqps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is less than the
-///    corresponding value in the second operand and returns the result of the
-///    comparison in the low-order bits of a vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTSS / CMPLTSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmplt_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpltss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are less than those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTPS / CMPLTPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmplt_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpltps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is less than or
-///    equal to the corresponding value in the second operand and returns the
-///    result of the comparison in the low-order bits of a vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLESS / CMPLESS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmple_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpless((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are less than or equal to those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLEPS / CMPLEPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmple_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpleps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is greater than
-///    the corresponding value in the second operand and returns the result of
-///    the comparison in the low-order bits of a vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTSS / CMPLTSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpgt_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_shufflevector((__v4sf)__a,
-                                         (__v4sf)__builtin_ia32_cmpltss((__v4sf)__b, (__v4sf)__a),
-                                         4, 1, 2, 3);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are greater than those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTPS / CMPLTPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpgt_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpltps((__v4sf)__b, (__v4sf)__a);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is greater than
-///    or equal to the corresponding value in the second operand and returns
-///    the result of the comparison in the low-order bits of a vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLESS / CMPLESS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpge_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_shufflevector((__v4sf)__a,
-                                         (__v4sf)__builtin_ia32_cmpless((__v4sf)__b, (__v4sf)__a),
-                                         4, 1, 2, 3);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are greater than or equal to those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLEPS / CMPLEPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpge_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpleps((__v4sf)__b, (__v4sf)__a);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands for inequality and returns the result of the comparison in the
-///    low-order bits of a vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNEQSS / CMPNEQSS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpneq_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpneqss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] for inequality.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNEQPS / CMPNEQPS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpneq_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpneqps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is not less than
-///    the corresponding value in the second operand and returns the result of
-///    the comparison in the low-order bits of a vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTSS / CMPNLTSS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpnlt_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpnltss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are not less than those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTPS / CMPNLTPS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpnlt_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpnltps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is not less than
-///    or equal to the corresponding value in the second operand and returns
-///    the result of the comparison in the low-order bits of a vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLESS / CMPNLESS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpnle_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpnless((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are not less than or equal to those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLEPS / CMPNLEPS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpnle_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpnleps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is not greater
-///    than the corresponding value in the second operand and returns the
-///    result of the comparison in the low-order bits of a vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTSS / CMPNLTSS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpngt_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_shufflevector((__v4sf)__a,
-                                         (__v4sf)__builtin_ia32_cmpnltss((__v4sf)__b, (__v4sf)__a),
-                                         4, 1, 2, 3);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are not greater than those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTPS / CMPNLTPS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpngt_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpnltps((__v4sf)__b, (__v4sf)__a);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is not greater
-///    than or equal to the corresponding value in the second operand and
-///    returns the result of the comparison in the low-order bits of a vector
-///    of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLESS / CMPNLESS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpnge_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_shufflevector((__v4sf)__a,
-                                         (__v4sf)__builtin_ia32_cmpnless((__v4sf)__b, (__v4sf)__a),
-                                         4, 1, 2, 3);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are not greater than or equal to those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLEPS / CMPNLEPS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpnge_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpnleps((__v4sf)__b, (__v4sf)__a);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is ordered with
-///    respect to the corresponding value in the second operand and returns the
-///    result of the comparison in the low-order bits of a vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPORDSS / CMPORDSS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpord_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpordss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are ordered with respect to those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPORDPS / CMPORDPS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpord_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpordps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is unordered
-///    with respect to the corresponding value in the second operand and
-///    returns the result of the comparison in the low-order bits of a vector
-///    of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPUNORDSS / CMPUNORDSS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpunord_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpunordss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are unordered with respect to those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPUNORDPS / CMPUNORDPS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpunord_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpunordps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands for equality and returns the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the
-///    two lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comieq_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_comieq((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the first operand is less than the second
-///    operand and returns the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comilt_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_comilt((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the first operand is less than or equal to the
-///    second operand and returns the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comile_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_comile((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the first operand is greater than the second
-///    operand and returns the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the
-///     two lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comigt_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_comigt((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the first operand is greater than or equal to
-///    the second operand and returns the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comige_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_comige((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the first operand is not equal to the second
-///    operand and returns the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 1 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the
-///     two lower 32-bit values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comineq_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_comineq((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Performs an unordered comparison of two 32-bit float values using
-///    the low-order bits of both operands to determine equality and returns
-///    the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomieq_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_ucomieq((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Performs an unordered comparison of two 32-bit float values using
-///    the low-order bits of both operands to determine if the first operand is
-///    less than the second operand and returns the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomilt_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_ucomilt((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Performs an unordered comparison of two 32-bit float values using
-///    the low-order bits of both operands to determine if the first operand is
-///    less than or equal to the second operand and returns the result of the
-///    comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomile_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_ucomile((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Performs an unordered comparison of two 32-bit float values using
-///    the low-order bits of both operands to determine if the first operand is
-///    greater than the second operand and returns the result of the
-///    comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomigt_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_ucomigt((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Performs an unordered comparison of two 32-bit float values using
-///    the low-order bits of both operands to determine if the first operand is
-///    greater than or equal to the second operand and returns the result of
-///    the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomige_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_ucomige((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Performs an unordered comparison of two 32-bit float values using
-///    the low-order bits of both operands to determine inequality and returns
-///    the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 1 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower 32-bit values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomineq_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_ucomineq((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Converts a float value contained in the lower 32 bits of a vector of
-///    [4 x float] into a 32-bit integer.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSS2SI / CVTSS2SI </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the conversion.
-/// \returns A 32-bit integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtss_si32(__m128 __a)
-{
-  return __builtin_ia32_cvtss2si((__v4sf)__a);
-}
-
-/// Converts a float value contained in the lower 32 bits of a vector of
-///    [4 x float] into a 32-bit integer.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSS2SI / CVTSS2SI </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the conversion.
-/// \returns A 32-bit integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvt_ss2si(__m128 __a)
-{
-  return _mm_cvtss_si32(__a);
-}
-
-#ifdef __x86_64__
-
-/// Converts a float value contained in the lower 32 bits of a vector of
-///    [4 x float] into a 64-bit integer.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSS2SI / CVTSS2SI </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the conversion.
-/// \returns A 64-bit integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtss_si64(__m128 __a)
-{
-  return __builtin_ia32_cvtss2si64((__v4sf)__a);
-}
-
-#endif
-
-/// Converts two low-order float values in a 128-bit vector of
-///    [4 x float] into a 64-bit vector of [2 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPS2PI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 64-bit integer vector containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtps_pi32(__m128 __a)
-{
-  return (__m64)__builtin_ia32_cvtps2pi((__v4sf)__a);
-}
-
-/// Converts two low-order float values in a 128-bit vector of
-///    [4 x float] into a 64-bit vector of [2 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPS2PI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 64-bit integer vector containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvt_ps2pi(__m128 __a)
-{
-  return _mm_cvtps_pi32(__a);
-}
-
-/// Converts a float value contained in the lower 32 bits of a vector of
-///    [4 x float] into a 32-bit integer, truncating the result when it is
-///    inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTSS2SI / CVTTSS2SI </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the conversion.
-/// \returns A 32-bit integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvttss_si32(__m128 __a)
-{
-  return __builtin_ia32_cvttss2si((__v4sf)__a);
-}
-
-/// Converts a float value contained in the lower 32 bits of a vector of
-///    [4 x float] into a 32-bit integer, truncating the result when it is
-///    inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTSS2SI / CVTTSS2SI </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the conversion.
-/// \returns A 32-bit integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtt_ss2si(__m128 __a)
-{
-  return _mm_cvttss_si32(__a);
-}
-
-#ifdef __x86_64__
-/// Converts a float value contained in the lower 32 bits of a vector of
-///    [4 x float] into a 64-bit integer, truncating the result when it is
-///    inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTSS2SI / CVTTSS2SI </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the conversion.
-/// \returns A 64-bit integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvttss_si64(__m128 __a)
-{
-  return __builtin_ia32_cvttss2si64((__v4sf)__a);
-}
-#endif
-
-/// Converts two low-order float values in a 128-bit vector of
-///    [4 x float] into a 64-bit vector of [2 x i32], truncating the result
-///    when it is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTTPS2PI / VTTPS2PI </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 64-bit integer vector containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvttps_pi32(__m128 __a)
-{
-  return (__m64)__builtin_ia32_cvttps2pi((__v4sf)__a);
-}
-
-/// Converts two low-order float values in a 128-bit vector of [4 x
-///    float] into a 64-bit vector of [2 x i32], truncating the result when it
-///    is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTTPS2PI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 64-bit integer vector containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtt_ps2pi(__m128 __a)
-{
-  return _mm_cvttps_pi32(__a);
-}
-
-/// Converts a 32-bit signed integer value into a floating point value
-///    and writes it to the lower 32 bits of the destination. The remaining
-///    higher order elements of the destination vector are copied from the
-///    corresponding elements in the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSI2SS / CVTSI2SS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 32-bit signed integer operand containing the value to be converted.
-/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
-///    converted value of the second operand. The upper 96 bits are copied from
-///    the upper 96 bits of the first operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtsi32_ss(__m128 __a, int __b)
-{
-  __a[0] = __b;
-  return __a;
-}
-
-/// Converts a 32-bit signed integer value into a floating point value
-///    and writes it to the lower 32 bits of the destination. The remaining
-///    higher order elements of the destination are copied from the
-///    corresponding elements in the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSI2SS / CVTSI2SS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 32-bit signed integer operand containing the value to be converted.
-/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
-///    converted value of the second operand. The upper 96 bits are copied from
-///    the upper 96 bits of the first operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvt_si2ss(__m128 __a, int __b)
-{
-  return _mm_cvtsi32_ss(__a, __b);
-}
-
-#ifdef __x86_64__
-
-/// Converts a 64-bit signed integer value into a floating point value
-///    and writes it to the lower 32 bits of the destination. The remaining
-///    higher order elements of the destination are copied from the
-///    corresponding elements in the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSI2SS / CVTSI2SS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 64-bit signed integer operand containing the value to be converted.
-/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
-///    converted value of the second operand. The upper 96 bits are copied from
-///    the upper 96 bits of the first operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtsi64_ss(__m128 __a, long long __b)
-{
-  __a[0] = __b;
-  return __a;
-}
-
-#endif
-
-/// Converts two elements of a 64-bit vector of [2 x i32] into two
-///    floating point values and writes them to the lower 64-bits of the
-///    destination. The remaining higher order elements of the destination are
-///    copied from the corresponding elements in the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 64-bit vector of [2 x i32]. The elements in this vector are converted
-///    and written to the corresponding low-order elements in the destination.
-/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
-///    converted value of the second operand. The upper 64 bits are copied from
-///    the upper 64 bits of the first operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi32_ps(__m128 __a, __m64 __b)
-{
-  return __builtin_ia32_cvtpi2ps((__v4sf)__a, (__v2si)__b);
-}
-
-/// Converts two elements of a 64-bit vector of [2 x i32] into two
-///    floating point values and writes them to the lower 64-bits of the
-///    destination. The remaining higher order elements of the destination are
-///    copied from the corresponding elements in the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 64-bit vector of [2 x i32]. The elements in this vector are converted
-///    and written to the corresponding low-order elements in the destination.
-/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
-///    converted value from the second operand. The upper 64 bits are copied
-///    from the upper 64 bits of the first operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
-_mm_cvt_pi2ps(__m128 __a, __m64 __b)
-{
-  return _mm_cvtpi32_ps(__a, __b);
-}
-
-/// Extracts a float value contained in the lower 32 bits of a vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the extraction.
-/// \returns A 32-bit float containing the extracted value.
-static __inline__ float __DEFAULT_FN_ATTRS
-_mm_cvtss_f32(__m128 __a)
-{
-  return __a[0];
-}
-
-/// Loads two packed float values from the address \a __p into the
-///     high-order bits of a 128-bit vector of [4 x float]. The low-order bits
-///     are copied from the low-order bits of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVHPD / MOVHPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. Bits [63:0] are written to bits [63:0]
-///    of the destination.
-/// \param __p
-///    A pointer to two packed float values. Bits [63:0] are written to bits
-///    [127:64] of the destination.
-/// \returns A 128-bit vector of [4 x float] containing the moved values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_loadh_pi(__m128 __a, const __m64 *__p)
-{
-  typedef float __mm_loadh_pi_v2f32 __attribute__((__vector_size__(8)));
-  struct __mm_loadh_pi_struct {
-    __mm_loadh_pi_v2f32 __u;
-  } __attribute__((__packed__, __may_alias__));
-  __mm_loadh_pi_v2f32 __b = ((const struct __mm_loadh_pi_struct*)__p)->__u;
-  __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1);
-  return __builtin_shufflevector(__a, __bb, 0, 1, 4, 5);
-}
-
-/// Loads two packed float values from the address \a __p into the
-///    low-order bits of a 128-bit vector of [4 x float]. The high-order bits
-///    are copied from the high-order bits of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPD / MOVLPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. Bits [127:64] are written to bits
-///    [127:64] of the destination.
-/// \param __p
-///    A pointer to two packed float values. Bits [63:0] are written to bits
-///    [63:0] of the destination.
-/// \returns A 128-bit vector of [4 x float] containing the moved values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_loadl_pi(__m128 __a, const __m64 *__p)
-{
-  typedef float __mm_loadl_pi_v2f32 __attribute__((__vector_size__(8)));
-  struct __mm_loadl_pi_struct {
-    __mm_loadl_pi_v2f32 __u;
-  } __attribute__((__packed__, __may_alias__));
-  __mm_loadl_pi_v2f32 __b = ((const struct __mm_loadl_pi_struct*)__p)->__u;
-  __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1);
-  return __builtin_shufflevector(__a, __bb, 4, 5, 2, 3);
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float]. The lower
-///    32 bits of the vector are initialized with the single-precision
-///    floating-point value loaded from a specified memory location. The upper
-///    96 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSS / MOVSS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 32-bit memory location containing a single-precision
-///    floating-point value.
-/// \returns An initialized 128-bit floating-point vector of [4 x float]. The
-///    lower 32 bits contain the value loaded from the memory location. The
-///    upper 96 bits are set to zero.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_load_ss(const float *__p)
-{
-  struct __mm_load_ss_struct {
-    float __u;
-  } __attribute__((__packed__, __may_alias__));
-  float __u = ((const struct __mm_load_ss_struct*)__p)->__u;
-  return __extension__ (__m128){ __u, 0, 0, 0 };
-}
-
-/// Loads a 32-bit float value and duplicates it to all four vector
-///    elements of a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBROADCASTSS / MOVSS + shuffling </c>
-///    instruction.
-///
-/// \param __p
-///    A pointer to a float value to be loaded and duplicated.
-/// \returns A 128-bit vector of [4 x float] containing the loaded and
-///    duplicated values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_load1_ps(const float *__p)
-{
-  struct __mm_load1_ps_struct {
-    float __u;
-  } __attribute__((__packed__, __may_alias__));
-  float __u = ((const struct __mm_load1_ps_struct*)__p)->__u;
-  return __extension__ (__m128){ __u, __u, __u, __u };
-}
-
-#define        _mm_load_ps1(p) _mm_load1_ps(p)
-
-/// Loads a 128-bit floating-point vector of [4 x float] from an aligned
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location has to be 128-bit aligned.
-/// \returns A 128-bit vector of [4 x float] containing the loaded values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_load_ps(const float *__p)
-{
-  return *(const __m128*)__p;
-}
-
-/// Loads a 128-bit floating-point vector of [4 x float] from an
-///    unaligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPS / MOVUPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [4 x float] containing the loaded values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_loadu_ps(const float *__p)
-{
-  struct __loadu_ps {
-    __m128_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_ps*)__p)->__v;
-}
-
-/// Loads four packed float values, in reverse order, from an aligned
-///    memory location to 32-bit elements in a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS + shuffling </c>
-///    instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location has to be 128-bit aligned.
-/// \returns A 128-bit vector of [4 x float] containing the moved values, loaded
-///    in reverse order.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_loadr_ps(const float *__p)
-{
-  __m128 __a = _mm_load_ps(__p);
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0);
-}
-
-/// Create a 128-bit vector of [4 x float] with undefined values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \returns A 128-bit vector of [4 x float] containing undefined values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_undefined_ps(void)
-{
-  return (__m128)__builtin_ia32_undef128();
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float]. The lower
-///    32 bits of the vector are initialized with the specified single-precision
-///    floating-point value. The upper 96 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSS / MOVSS </c> instruction.
-///
-/// \param __w
-///    A single-precision floating-point value used to initialize the lower 32
-///    bits of the result.
-/// \returns An initialized 128-bit floating-point vector of [4 x float]. The
-///    lower 32 bits contain the value provided in the source operand. The
-///    upper 96 bits are set to zero.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_set_ss(float __w)
-{
-  return __extension__ (__m128){ __w, 0, 0, 0 };
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float], with each
-///    of the four single-precision floating-point vector elements set to the
-///    specified single-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPERMILPS / PERMILPS </c> instruction.
-///
-/// \param __w
-///    A single-precision floating-point value used to initialize each vector
-///    element of the result.
-/// \returns An initialized 128-bit floating-point vector of [4 x float].
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_set1_ps(float __w)
-{
-  return __extension__ (__m128){ __w, __w, __w, __w };
-}
-
-/* Microsoft specific. */
-/// Constructs a 128-bit floating-point vector of [4 x float], with each
-///    of the four single-precision floating-point vector elements set to the
-///    specified single-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPERMILPS / PERMILPS </c> instruction.
-///
-/// \param __w
-///    A single-precision floating-point value used to initialize each vector
-///    element of the result.
-/// \returns An initialized 128-bit floating-point vector of [4 x float].
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_set_ps1(float __w)
-{
-    return _mm_set1_ps(__w);
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float]
-///    initialized with the specified single-precision floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __z
-///    A single-precision floating-point value used to initialize bits [127:96]
-///    of the result.
-/// \param __y
-///    A single-precision floating-point value used to initialize bits [95:64]
-///    of the result.
-/// \param __x
-///    A single-precision floating-point value used to initialize bits [63:32]
-///    of the result.
-/// \param __w
-///    A single-precision floating-point value used to initialize bits [31:0]
-///    of the result.
-/// \returns An initialized 128-bit floating-point vector of [4 x float].
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_set_ps(float __z, float __y, float __x, float __w)
-{
-  return __extension__ (__m128){ __w, __x, __y, __z };
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float],
-///    initialized in reverse order with the specified 32-bit single-precision
-///    float-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __z
-///    A single-precision floating-point value used to initialize bits [31:0]
-///    of the result.
-/// \param __y
-///    A single-precision floating-point value used to initialize bits [63:32]
-///    of the result.
-/// \param __x
-///    A single-precision floating-point value used to initialize bits [95:64]
-///    of the result.
-/// \param __w
-///    A single-precision floating-point value used to initialize bits [127:96]
-///    of the result.
-/// \returns An initialized 128-bit floating-point vector of [4 x float].
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_setr_ps(float __z, float __y, float __x, float __w)
-{
-  return __extension__ (__m128){ __z, __y, __x, __w };
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float] initialized
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instruction.
-///
-/// \returns An initialized 128-bit floating-point vector of [4 x float] with
-///    all elements set to zero.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_setzero_ps(void)
-{
-  return __extension__ (__m128){ 0, 0, 0, 0 };
-}
-
-/// Stores the upper 64 bits of a 128-bit vector of [4 x float] to a
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPEXTRQ / PEXTRQ </c> instruction.
-///
-/// \param __p
-///    A pointer to a 64-bit memory location.
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeh_pi(__m64 *__p, __m128 __a)
-{
-  typedef float __mm_storeh_pi_v2f32 __attribute__((__vector_size__(8)));
-  struct __mm_storeh_pi_struct {
-    __mm_storeh_pi_v2f32 __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pi_struct*)__p)->__u = __builtin_shufflevector(__a, __a, 2, 3);
-}
-
-/// Stores the lower 64 bits of a 128-bit vector of [4 x float] to a
-///     memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPS / MOVLPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that will receive the float values.
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_pi(__m64 *__p, __m128 __a)
-{
-  typedef float __mm_storeh_pi_v2f32 __attribute__((__vector_size__(8)));
-  struct __mm_storeh_pi_struct {
-    __mm_storeh_pi_v2f32 __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pi_struct*)__p)->__u = __builtin_shufflevector(__a, __a, 0, 1);
-}
-
-/// Stores the lower 32 bits of a 128-bit vector of [4 x float] to a
-///     memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSS / MOVSS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 32-bit memory location.
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_ss(float *__p, __m128 __a)
-{
-  struct __mm_store_ss_struct {
-    float __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_store_ss_struct*)__p)->__u = __a[0];
-}
-
-/// Stores a 128-bit vector of [4 x float] to an unaligned memory
-///    location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPS / MOVUPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_ps(float *__p, __m128 __a)
-{
-  struct __storeu_ps {
-    __m128_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_ps*)__p)->__v = __a;
-}
-
-/// Stores a 128-bit vector of [4 x float] into an aligned memory
-///    location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location has to be 16-byte aligned.
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_ps(float *__p, __m128 __a)
-{
-  *(__m128*)__p = __a;
-}
-
-/// Stores the lower 32 bits of a 128-bit vector of [4 x float] into
-///    four contiguous elements in an aligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to <c> VMOVAPS / MOVAPS + shuffling </c>
-///    instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location.
-/// \param __a
-///    A 128-bit vector of [4 x float] whose lower 32 bits are stored to each
-///    of the four contiguous elements pointed by \a __p.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store1_ps(float *__p, __m128 __a)
-{
-  __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 0, 0);
-  _mm_store_ps(__p, __a);
-}
-
-/// Stores the lower 32 bits of a 128-bit vector of [4 x float] into
-///    four contiguous elements in an aligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to <c> VMOVAPS / MOVAPS + shuffling </c>
-///    instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location.
-/// \param __a
-///    A 128-bit vector of [4 x float] whose lower 32 bits are stored to each
-///    of the four contiguous elements pointed by \a __p.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_ps1(float *__p, __m128 __a)
-{
-  _mm_store1_ps(__p, __a);
-}
-
-/// Stores float values from a 128-bit vector of [4 x float] to an
-///    aligned memory location in reverse order.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS + shuffling </c>
-///    instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location has to be 128-bit aligned.
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storer_ps(float *__p, __m128 __a)
-{
-  __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0);
-  _mm_store_ps(__p, __a);
-}
-
-#define _MM_HINT_ET0 7
-#define _MM_HINT_ET1 6
-#define _MM_HINT_T0  3
-#define _MM_HINT_T1  2
-#define _MM_HINT_T2  1
-#define _MM_HINT_NTA 0
-
-#ifndef _MSC_VER
-/* FIXME: We have to #define this because "sel" must be a constant integer, and
-   Sema doesn't do any form of constant propagation yet. */
-
-/// Loads one cache line of data from the specified address to a location
-///    closer to the processor.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// void _mm_prefetch(const void * a, const int sel);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> PREFETCHNTA </c> instruction.
-///
-/// \param a
-///    A pointer to a memory location containing a cache line of data.
-/// \param sel
-///    A predefined integer constant specifying the type of prefetch
-///    operation: \n
-///    _MM_HINT_NTA: Move data using the non-temporal access (NTA) hint. The
-///    PREFETCHNTA instruction will be generated. \n
-///    _MM_HINT_T0: Move data using the T0 hint. The PREFETCHT0 instruction will
-///    be generated. \n
-///    _MM_HINT_T1: Move data using the T1 hint. The PREFETCHT1 instruction will
-///    be generated. \n
-///    _MM_HINT_T2: Move data using the T2 hint. The PREFETCHT2 instruction will
-///    be generated.
-#define _mm_prefetch(a, sel) (__builtin_prefetch((const void *)(a), \
-                                                 ((sel) >> 2) & 1, (sel) & 0x3))
-#endif
-
-/// Stores a 64-bit integer in the specified aligned memory location. To
-///    minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVNTQ </c> instruction.
-///
-/// \param __p
-///    A pointer to an aligned memory location used to store the register value.
-/// \param __a
-///    A 64-bit integer containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS_MMX
-_mm_stream_pi(__m64 *__p, __m64 __a)
-{
-  __builtin_ia32_movntq(__p, __a);
-}
-
-/// Moves packed float values from a 128-bit vector of [4 x float] to a
-///    128-bit aligned memory location. To minimize caching, the data is flagged
-///    as non-temporal (unlikely to be used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVNTPS / MOVNTPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit aligned memory location that will receive the
-///    single-precision floating-point values.
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_ps(float *__p, __m128 __a)
-{
-  __builtin_nontemporal_store((__v4sf)__a, (__v4sf*)__p);
-}
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/// Forces strong memory ordering (serialization) between store
-///    instructions preceding this instruction and store instructions following
-///    this instruction, ensuring the system completes all previous stores
-///    before executing subsequent stores.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> SFENCE </c> instruction.
-///
-void _mm_sfence(void);
-
-#if defined(__cplusplus)
-} // extern "C"
-#endif
-
-/// Extracts 16-bit element from a 64-bit vector of [4 x i16] and
-///    returns it, as specified by the immediate integer operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_extract_pi16(__m64 a, int n);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPEXTRW / PEXTRW </c> instruction.
-///
-/// \param a
-///    A 64-bit vector of [4 x i16].
-/// \param n
-///    An immediate integer operand that determines which bits are extracted: \n
-///    0: Bits [15:0] are copied to the destination. \n
-///    1: Bits [31:16] are copied to the destination. \n
-///    2: Bits [47:32] are copied to the destination. \n
-///    3: Bits [63:48] are copied to the destination.
-/// \returns A 16-bit integer containing the extracted 16 bits of packed data.
-#define _mm_extract_pi16(a, n) \
-  ((int)__builtin_ia32_vec_ext_v4hi((__v4hi)a, (int)n))
-
-/// Copies data from the 64-bit vector of [4 x i16] to the destination,
-///    and inserts the lower 16-bits of an integer operand at the 16-bit offset
-///    specified by the immediate operand \a n.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m64 _mm_insert_pi16(__m64 a, int d, int n);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> PINSRW </c> instruction.
-///
-/// \param a
-///    A 64-bit vector of [4 x i16].
-/// \param d
-///    An integer. The lower 16-bit value from this operand is written to the
-///    destination at the offset specified by operand \a n.
-/// \param n
-///    An immediate integer operant that determines which the bits to be used
-///    in the destination. \n
-///    0: Bits [15:0] are copied to the destination. \n
-///    1: Bits [31:16] are copied to the destination. \n
-///    2: Bits [47:32] are copied to the destination. \n
-///    3: Bits [63:48] are copied to the destination.  \n
-///    The remaining bits in the destination are copied from the corresponding
-///    bits in operand \a a.
-/// \returns A 64-bit integer vector containing the copied packed data from the
-///    operands.
-#define _mm_insert_pi16(a, d, n) \
-  ((__m64)__builtin_ia32_vec_set_v4hi((__v4hi)a, (int)d, (int)n))
-
-/// Compares each of the corresponding packed 16-bit integer values of
-///    the 64-bit integer vectors, and writes the greater value to the
-///    corresponding bits in the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMAXSW </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector containing the comparison results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_max_pi16(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_pmaxsw((__v4hi)__a, (__v4hi)__b);
-}
-
-/// Compares each of the corresponding packed 8-bit unsigned integer
-///    values of the 64-bit integer vectors, and writes the greater value to the
-///    corresponding bits in the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMAXUB </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector containing the comparison results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_max_pu8(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_pmaxub((__v8qi)__a, (__v8qi)__b);
-}
-
-/// Compares each of the corresponding packed 16-bit integer values of
-///    the 64-bit integer vectors, and writes the lesser value to the
-///    corresponding bits in the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMINSW </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector containing the comparison results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_min_pi16(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_pminsw((__v4hi)__a, (__v4hi)__b);
-}
-
-/// Compares each of the corresponding packed 8-bit unsigned integer
-///    values of the 64-bit integer vectors, and writes the lesser value to the
-///    corresponding bits in the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMINUB </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector containing the comparison results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_min_pu8(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_pminub((__v8qi)__a, (__v8qi)__b);
-}
-
-/// Takes the most significant bit from each 8-bit element in a 64-bit
-///    integer vector to create an 8-bit mask value. Zero-extends the value to
-///    32-bit integer and writes it to the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMOVMSKB </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing the values with bits to be extracted.
-/// \returns The most significant bit from each 8-bit element in \a __a,
-///    written to bits [7:0].
-static __inline__ int __DEFAULT_FN_ATTRS_MMX
-_mm_movemask_pi8(__m64 __a)
-{
-  return __builtin_ia32_pmovmskb((__v8qi)__a);
-}
-
-/// Multiplies packed 16-bit unsigned integer values and writes the
-///    high-order 16 bits of each 32-bit product to the corresponding bits in
-///    the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMULHUW </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector containing the products of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_mulhi_pu16(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_pmulhuw((__v4hi)__a, (__v4hi)__b);
-}
-
-/// Shuffles the 4 16-bit integers from a 64-bit integer vector to the
-///    destination, as specified by the immediate value operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m64 _mm_shuffle_pi16(__m64 a, const int n);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> PSHUFW </c> instruction.
-///
-/// \param a
-///    A 64-bit integer vector containing the values to be shuffled.
-/// \param n
-///    An immediate value containing an 8-bit value specifying which elements to
-///    copy from \a a. The destinations within the 64-bit destination are
-///    assigned values as follows: \n
-///    Bits [1:0] are used to assign values to bits [15:0] in the
-///    destination. \n
-///    Bits [3:2] are used to assign values to bits [31:16] in the
-///    destination. \n
-///    Bits [5:4] are used to assign values to bits [47:32] in the
-///    destination. \n
-///    Bits [7:6] are used to assign values to bits [63:48] in the
-///    destination. \n
-///    Bit value assignments: \n
-///    00: assigned from bits [15:0] of \a a. \n
-///    01: assigned from bits [31:16] of \a a. \n
-///    10: assigned from bits [47:32] of \a a. \n
-///    11: assigned from bits [63:48] of \a a.
-/// \returns A 64-bit integer vector containing the shuffled values.
-#define _mm_shuffle_pi16(a, n) \
-  ((__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n)))
-
-/// Conditionally copies the values from each 8-bit element in the first
-///    64-bit integer vector operand to the specified memory location, as
-///    specified by the most significant bit in the corresponding element in the
-///    second 64-bit integer vector operand.
-///
-///    To minimize caching, the data is flagged as non-temporal
-///    (unlikely to be used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MASKMOVQ </c> instruction.
-///
-/// \param __d
-///    A 64-bit integer vector containing the values with elements to be copied.
-/// \param __n
-///    A 64-bit integer vector operand. The most significant bit from each 8-bit
-///    element determines whether the corresponding element in operand \a __d
-///    is copied. If the most significant bit of a given element is 1, the
-///    corresponding element in operand \a __d is copied.
-/// \param __p
-///    A pointer to a 64-bit memory location that will receive the conditionally
-///    copied integer values. The address of the memory location does not have
-///    to be aligned.
-static __inline__ void __DEFAULT_FN_ATTRS_MMX
-_mm_maskmove_si64(__m64 __d, __m64 __n, char *__p)
-{
-  __builtin_ia32_maskmovq((__v8qi)__d, (__v8qi)__n, __p);
-}
-
-/// Computes the rounded averages of the packed unsigned 8-bit integer
-///    values and writes the averages to the corresponding bits in the
-///    destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PAVGB </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector containing the averages of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_avg_pu8(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_pavgb((__v8qi)__a, (__v8qi)__b);
-}
-
-/// Computes the rounded averages of the packed unsigned 16-bit integer
-///    values and writes the averages to the corresponding bits in the
-///    destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PAVGW </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector containing the averages of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_avg_pu16(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_pavgw((__v4hi)__a, (__v4hi)__b);
-}
-
-/// Subtracts the corresponding 8-bit unsigned integer values of the two
-///    64-bit vector operands and computes the absolute value for each of the
-///    difference. Then sum of the 8 absolute differences is written to the
-///    bits [15:0] of the destination; the remaining bits [63:16] are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PSADBW </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector whose lower 16 bits contain the sums of the
-///    sets of absolute differences between both operands. The upper bits are
-///    cleared.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_sad_pu8(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_psadbw((__v8qi)__a, (__v8qi)__b);
-}
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/// Returns the contents of the MXCSR register as a 32-bit unsigned
-///    integer value.
-///
-///    There are several groups of macros associated with this
-///    intrinsic, including:
-///    <ul>
-///    <li>
-///      For checking exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO,
-///      _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW,
-///      _MM_EXCEPT_INEXACT. There is a convenience wrapper
-///      _MM_GET_EXCEPTION_STATE().
-///    </li>
-///    <li>
-///      For checking exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW,
-///      _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT.
-///      There is a convenience wrapper _MM_GET_EXCEPTION_MASK().
-///    </li>
-///    <li>
-///      For checking rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN,
-///      _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper
-///      _MM_GET_ROUNDING_MODE().
-///    </li>
-///    <li>
-///      For checking flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF.
-///      There is a convenience wrapper _MM_GET_FLUSH_ZERO_MODE().
-///    </li>
-///    <li>
-///      For checking denormals-are-zero mode: _MM_DENORMALS_ZERO_ON,
-///      _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper
-///      _MM_GET_DENORMALS_ZERO_MODE().
-///    </li>
-///    </ul>
-///
-///    For example, the following expression checks if an overflow exception has
-///    occurred:
-///    \code
-///      ( _mm_getcsr() & _MM_EXCEPT_OVERFLOW )
-///    \endcode
-///
-///    The following expression gets the current rounding mode:
-///    \code
-///      _MM_GET_ROUNDING_MODE()
-///    \endcode
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSTMXCSR / STMXCSR </c> instruction.
-///
-/// \returns A 32-bit unsigned integer containing the contents of the MXCSR
-///    register.
-unsigned int _mm_getcsr(void);
-
-/// Sets the MXCSR register with the 32-bit unsigned integer value.
-///
-///    There are several groups of macros associated with this intrinsic,
-///    including:
-///    <ul>
-///    <li>
-///      For setting exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO,
-///      _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW,
-///      _MM_EXCEPT_INEXACT. There is a convenience wrapper
-///      _MM_SET_EXCEPTION_STATE(x) where x is one of these macros.
-///    </li>
-///    <li>
-///      For setting exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW,
-///      _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT.
-///      There is a convenience wrapper _MM_SET_EXCEPTION_MASK(x) where x is one
-///      of these macros.
-///    </li>
-///    <li>
-///      For setting rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN,
-///      _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper
-///      _MM_SET_ROUNDING_MODE(x) where x is one of these macros.
-///    </li>
-///    <li>
-///      For setting flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF.
-///      There is a convenience wrapper _MM_SET_FLUSH_ZERO_MODE(x) where x is
-///      one of these macros.
-///    </li>
-///    <li>
-///      For setting denormals-are-zero mode: _MM_DENORMALS_ZERO_ON,
-///      _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper
-///      _MM_SET_DENORMALS_ZERO_MODE(x) where x is one of these macros.
-///    </li>
-///    </ul>
-///
-///    For example, the following expression causes subsequent floating-point
-///    operations to round up:
-///      _mm_setcsr(_mm_getcsr() | _MM_ROUND_UP)
-///
-///    The following example sets the DAZ and FTZ flags:
-///    \code
-///    void setFlags() {
-///      _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
-///      _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
-///    }
-///    \endcode
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VLDMXCSR / LDMXCSR </c> instruction.
-///
-/// \param __i
-///    A 32-bit unsigned integer value to be written to the MXCSR register.
-void _mm_setcsr(unsigned int __i);
-
-#if defined(__cplusplus)
-} // extern "C"
-#endif
-
-/// Selects 4 float values from the 128-bit operands of [4 x float], as
-///    specified by the immediate value operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128 _mm_shuffle_ps(__m128 a, __m128 b, const int mask);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VSHUFPS / SHUFPS </c> instruction.
-///
-/// \param a
-///    A 128-bit vector of [4 x float].
-/// \param b
-///    A 128-bit vector of [4 x float].
-/// \param mask
-///    An immediate value containing an 8-bit value specifying which elements to
-///    copy from \a a and \a b. \n
-///    Bits [3:0] specify the values copied from operand \a a. \n
-///    Bits [7:4] specify the values copied from operand \a b. \n
-///    The destinations within the 128-bit destination are assigned values as
-///    follows: \n
-///    Bits [1:0] are used to assign values to bits [31:0] in the
-///    destination. \n
-///    Bits [3:2] are used to assign values to bits [63:32] in the
-///    destination. \n
-///    Bits [5:4] are used to assign values to bits [95:64] in the
-///    destination. \n
-///    Bits [7:6] are used to assign values to bits [127:96] in the
-///    destination. \n
-///    Bit value assignments: \n
-///    00: Bits [31:0] copied from the specified operand. \n
-///    01: Bits [63:32] copied from the specified operand. \n
-///    10: Bits [95:64] copied from the specified operand. \n
-///    11: Bits [127:96] copied from the specified operand.
-/// \returns A 128-bit vector of [4 x float] containing the shuffled values.
-#define _mm_shuffle_ps(a, b, mask) \
-  ((__m128)__builtin_ia32_shufps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \
-                                 (int)(mask)))
-
-/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of
-///    [4 x float] and interleaves them into a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKHPS / UNPCKHPS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. \n
-///    Bits [95:64] are written to bits [31:0] of the destination. \n
-///    Bits [127:96] are written to bits [95:64] of the destination.
-/// \param __b
-///    A 128-bit vector of [4 x float].
-///    Bits [95:64] are written to bits [63:32] of the destination. \n
-///    Bits [127:96] are written to bits [127:96] of the destination.
-/// \returns A 128-bit vector of [4 x float] containing the interleaved values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_unpackhi_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 2, 6, 3, 7);
-}
-
-/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of
-///    [4 x float] and interleaves them into a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPS / UNPCKLPS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. \n
-///    Bits [31:0] are written to bits [31:0] of the destination.  \n
-///    Bits [63:32] are written to bits [95:64] of the destination.
-/// \param __b
-///    A 128-bit vector of [4 x float]. \n
-///    Bits [31:0] are written to bits [63:32] of the destination. \n
-///    Bits [63:32] are written to bits [127:96] of the destination.
-/// \returns A 128-bit vector of [4 x float] containing the interleaved values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_unpacklo_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 4, 1, 5);
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float]. The lower
-///    32 bits are set to the lower 32 bits of the second parameter. The upper
-///    96 bits are set to the upper 96 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBLENDPS / BLENDPS / MOVSS </c>
-///    instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float]. The upper 96 bits are
-///    written to the upper 96 bits of the result.
-/// \param __b
-///    A 128-bit floating-point vector of [4 x float]. The lower 32 bits are
-///    written to the lower 32 bits of the result.
-/// \returns A 128-bit floating-point vector of [4 x float].
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_move_ss(__m128 __a, __m128 __b)
-{
-  __a[0] = __b[0];
-  return __a;
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float]. The lower
-///    64 bits are set to the upper 64 bits of the second parameter. The upper
-///    64 bits are set to the upper 64 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKHPD / UNPCKHPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float]. The upper 64 bits are
-///    written to the upper 64 bits of the result.
-/// \param __b
-///    A 128-bit floating-point vector of [4 x float]. The upper 64 bits are
-///    written to the lower 64 bits of the result.
-/// \returns A 128-bit floating-point vector of [4 x float].
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_movehl_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 6, 7, 2, 3);
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float]. The lower
-///    64 bits are set to the lower 64 bits of the first parameter. The upper
-///    64 bits are set to the lower 64 bits of the second parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float]. The lower 64 bits are
-///    written to the lower 64 bits of the result.
-/// \param __b
-///    A 128-bit floating-point vector of [4 x float]. The lower 64 bits are
-///    written to the upper 64 bits of the result.
-/// \returns A 128-bit floating-point vector of [4 x float].
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_movelh_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 1, 4, 5);
-}
-
-/// Converts a 64-bit vector of [4 x i16] into a 128-bit vector of [4 x
-///    float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PS + COMPOSITE </c> instruction.
-///
-/// \param __a
-///    A 64-bit vector of [4 x i16]. The elements of the destination are copied
-///    from the corresponding elements in this operand.
-/// \returns A 128-bit vector of [4 x float] containing the copied and converted
-///    values from the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi16_ps(__m64 __a)
-{
-  __m64 __b, __c;
-  __m128 __r;
-
-  __b = _mm_setzero_si64();
-  __b = _mm_cmpgt_pi16(__b, __a);
-  __c = _mm_unpackhi_pi16(__a, __b);
-  __r = _mm_setzero_ps();
-  __r = _mm_cvtpi32_ps(__r, __c);
-  __r = _mm_movelh_ps(__r, __r);
-  __c = _mm_unpacklo_pi16(__a, __b);
-  __r = _mm_cvtpi32_ps(__r, __c);
-
-  return __r;
-}
-
-/// Converts a 64-bit vector of 16-bit unsigned integer values into a
-///    128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PS + COMPOSITE </c> instruction.
-///
-/// \param __a
-///    A 64-bit vector of 16-bit unsigned integer values. The elements of the
-///    destination are copied from the corresponding elements in this operand.
-/// \returns A 128-bit vector of [4 x float] containing the copied and converted
-///    values from the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpu16_ps(__m64 __a)
-{
-  __m64 __b, __c;
-  __m128 __r;
-
-  __b = _mm_setzero_si64();
-  __c = _mm_unpackhi_pi16(__a, __b);
-  __r = _mm_setzero_ps();
-  __r = _mm_cvtpi32_ps(__r, __c);
-  __r = _mm_movelh_ps(__r, __r);
-  __c = _mm_unpacklo_pi16(__a, __b);
-  __r = _mm_cvtpi32_ps(__r, __c);
-
-  return __r;
-}
-
-/// Converts the lower four 8-bit values from a 64-bit vector of [8 x i8]
-///    into a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PS + COMPOSITE </c> instruction.
-///
-/// \param __a
-///    A 64-bit vector of [8 x i8]. The elements of the destination are copied
-///    from the corresponding lower 4 elements in this operand.
-/// \returns A 128-bit vector of [4 x float] containing the copied and converted
-///    values from the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi8_ps(__m64 __a)
-{
-  __m64 __b;
-
-  __b = _mm_setzero_si64();
-  __b = _mm_cmpgt_pi8(__b, __a);
-  __b = _mm_unpacklo_pi8(__a, __b);
-
-  return _mm_cvtpi16_ps(__b);
-}
-
-/// Converts the lower four unsigned 8-bit integer values from a 64-bit
-///    vector of [8 x u8] into a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PS + COMPOSITE </c> instruction.
-///
-/// \param __a
-///    A 64-bit vector of unsigned 8-bit integer values. The elements of the
-///    destination are copied from the corresponding lower 4 elements in this
-///    operand.
-/// \returns A 128-bit vector of [4 x float] containing the copied and converted
-///    values from the source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpu8_ps(__m64 __a)
-{
-  __m64 __b;
-
-  __b = _mm_setzero_si64();
-  __b = _mm_unpacklo_pi8(__a, __b);
-
-  return _mm_cvtpi16_ps(__b);
-}
-
-/// Converts the two 32-bit signed integer values from each 64-bit vector
-///    operand of [2 x i32] into a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PS + COMPOSITE </c> instruction.
-///
-/// \param __a
-///    A 64-bit vector of [2 x i32]. The lower elements of the destination are
-///    copied from the elements in this operand.
-/// \param __b
-///    A 64-bit vector of [2 x i32]. The upper elements of the destination are
-///    copied from the elements in this operand.
-/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
-///    copied and converted values from the first operand. The upper 64 bits
-///    contain the copied and converted values from the second operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi32x2_ps(__m64 __a, __m64 __b)
-{
-  __m128 __c;
-
-  __c = _mm_setzero_ps();
-  __c = _mm_cvtpi32_ps(__c, __b);
-  __c = _mm_movelh_ps(__c, __c);
-
-  return _mm_cvtpi32_ps(__c, __a);
-}
-
-/// Converts each single-precision floating-point element of a 128-bit
-///    floating-point vector of [4 x float] into a 16-bit signed integer, and
-///    packs the results into a 64-bit integer vector of [4 x i16].
-///
-///    If the floating-point element is NaN or infinity, or if the
-///    floating-point element is greater than 0x7FFFFFFF or less than -0x8000,
-///    it is converted to 0x8000. Otherwise if the floating-point element is
-///    greater than 0x7FFF, it is converted to 0x7FFF.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPS2PI + COMPOSITE </c> instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float].
-/// \returns A 64-bit integer vector of [4 x i16] containing the converted
-///    values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtps_pi16(__m128 __a)
-{
-  __m64 __b, __c;
-
-  __b = _mm_cvtps_pi32(__a);
-  __a = _mm_movehl_ps(__a, __a);
-  __c = _mm_cvtps_pi32(__a);
-
-  return _mm_packs_pi32(__b, __c);
-}
-
-/// Converts each single-precision floating-point element of a 128-bit
-///    floating-point vector of [4 x float] into an 8-bit signed integer, and
-///    packs the results into the lower 32 bits of a 64-bit integer vector of
-///    [8 x i8]. The upper 32 bits of the vector are set to 0.
-///
-///    If the floating-point element is NaN or infinity, or if the
-///    floating-point element is greater than 0x7FFFFFFF or less than -0x80, it
-///    is converted to 0x80. Otherwise if the floating-point element is greater
-///    than 0x7F, it is converted to 0x7F.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPS2PI + COMPOSITE </c> instruction.
-///
-/// \param __a
-///    128-bit floating-point vector of [4 x float].
-/// \returns A 64-bit integer vector of [8 x i8]. The lower 32 bits contain the
-///    converted values and the uppper 32 bits are set to zero.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtps_pi8(__m128 __a)
-{
-  __m64 __b, __c;
-
-  __b = _mm_cvtps_pi16(__a);
-  __c = _mm_setzero_si64();
-
-  return _mm_packs_pi16(__b, __c);
-}
-
-/// Extracts the sign bits from each single-precision floating-point
-///    element of a 128-bit floating-point vector of [4 x float] and returns the
-///    sign bits in bits [0:3] of the result. Bits [31:4] of the result are set
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVMSKPS / MOVMSKPS </c> instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float].
-/// \returns A 32-bit integer value. Bits [3:0] contain the sign bits from each
-///    single-precision floating-point element of the parameter. Bits [31:4] are
-///    set to zero.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_ps(__m128 __a)
-{
-  return __builtin_ia32_movmskps((__v4sf)__a);
-}
-
-
-#define _MM_ALIGN16 __attribute__((aligned(16)))
-
-#define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
-
-#define _MM_EXCEPT_INVALID    (0x0001U)
-#define _MM_EXCEPT_DENORM     (0x0002U)
-#define _MM_EXCEPT_DIV_ZERO   (0x0004U)
-#define _MM_EXCEPT_OVERFLOW   (0x0008U)
-#define _MM_EXCEPT_UNDERFLOW  (0x0010U)
-#define _MM_EXCEPT_INEXACT    (0x0020U)
-#define _MM_EXCEPT_MASK       (0x003fU)
-
-#define _MM_MASK_INVALID      (0x0080U)
-#define _MM_MASK_DENORM       (0x0100U)
-#define _MM_MASK_DIV_ZERO     (0x0200U)
-#define _MM_MASK_OVERFLOW     (0x0400U)
-#define _MM_MASK_UNDERFLOW    (0x0800U)
-#define _MM_MASK_INEXACT      (0x1000U)
-#define _MM_MASK_MASK         (0x1f80U)
-
-#define _MM_ROUND_NEAREST     (0x0000U)
-#define _MM_ROUND_DOWN        (0x2000U)
-#define _MM_ROUND_UP          (0x4000U)
-#define _MM_ROUND_TOWARD_ZERO (0x6000U)
-#define _MM_ROUND_MASK        (0x6000U)
-
-#define _MM_FLUSH_ZERO_MASK   (0x8000U)
-#define _MM_FLUSH_ZERO_ON     (0x8000U)
-#define _MM_FLUSH_ZERO_OFF    (0x0000U)
-
-#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
-#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
-#define _MM_GET_FLUSH_ZERO_MODE() (_mm_getcsr() & _MM_FLUSH_ZERO_MASK)
-#define _MM_GET_ROUNDING_MODE() (_mm_getcsr() & _MM_ROUND_MASK)
-
-#define _MM_SET_EXCEPTION_MASK(x) (_mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | (x)))
-#define _MM_SET_EXCEPTION_STATE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | (x)))
-#define _MM_SET_FLUSH_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | (x)))
-#define _MM_SET_ROUNDING_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | (x)))
-
-#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
-do { \
-  __m128 tmp3, tmp2, tmp1, tmp0; \
-  tmp0 = _mm_unpacklo_ps((row0), (row1)); \
-  tmp2 = _mm_unpacklo_ps((row2), (row3)); \
-  tmp1 = _mm_unpackhi_ps((row0), (row1)); \
-  tmp3 = _mm_unpackhi_ps((row2), (row3)); \
-  (row0) = _mm_movelh_ps(tmp0, tmp2); \
-  (row1) = _mm_movehl_ps(tmp2, tmp0); \
-  (row2) = _mm_movelh_ps(tmp1, tmp3); \
-  (row3) = _mm_movehl_ps(tmp3, tmp1); \
-} while (0)
-
-/* Aliases for compatibility. */
-#define _m_pextrw _mm_extract_pi16
-#define _m_pinsrw _mm_insert_pi16
-#define _m_pmaxsw _mm_max_pi16
-#define _m_pmaxub _mm_max_pu8
-#define _m_pminsw _mm_min_pi16
-#define _m_pminub _mm_min_pu8
-#define _m_pmovmskb _mm_movemask_pi8
-#define _m_pmulhuw _mm_mulhi_pu16
-#define _m_pshufw _mm_shuffle_pi16
-#define _m_maskmovq _mm_maskmove_si64
-#define _m_pavgb _mm_avg_pu8
-#define _m_pavgw _mm_avg_pu16
-#define _m_psadbw _mm_sad_pu8
-#define _m_ _mm_
-#define _m_ _mm_
-
-#undef __DEFAULT_FN_ATTRS
-#undef __DEFAULT_FN_ATTRS_MMX
-
-/* Ugly hack for backwards-compatibility (compatible with gcc) */
-#if defined(__SSE2__) && !__building_module(_Builtin_intrinsics)
-#include <emmintrin.h>
-#endif
-
-#endif /* __XMMINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_builtin_vars.h b/linux-x86/lib64/clang/16.0.2/include/__clang_cuda_builtin_vars.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_builtin_vars.h
rename to linux-x86/lib64/clang/16.0.2/include/__clang_cuda_builtin_vars.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_cmath.h b/linux-x86/lib64/clang/16.0.2/include/__clang_cuda_cmath.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_cmath.h
rename to linux-x86/lib64/clang/16.0.2/include/__clang_cuda_cmath.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_complex_builtins.h b/linux-x86/lib64/clang/16.0.2/include/__clang_cuda_complex_builtins.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_complex_builtins.h
rename to linux-x86/lib64/clang/16.0.2/include/__clang_cuda_complex_builtins.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_device_functions.h b/linux-x86/lib64/clang/16.0.2/include/__clang_cuda_device_functions.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_device_functions.h
rename to linux-x86/lib64/clang/16.0.2/include/__clang_cuda_device_functions.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_intrinsics.h b/linux-x86/lib64/clang/16.0.2/include/__clang_cuda_intrinsics.h
similarity index 97%
copy from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_intrinsics.h
copy to linux-x86/lib64/clang/16.0.2/include/__clang_cuda_intrinsics.h
index e0875bb..b87413e 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_intrinsics.h
+++ b/linux-x86/lib64/clang/16.0.2/include/__clang_cuda_intrinsics.h
@@ -71,8 +71,8 @@
   }                                                                            \
   inline __device__ unsigned long long __FnName(                               \
       unsigned long long __val, __Type __offset, int __width = warpSize) {     \
-    return static_cast<unsigned long long>(::__FnName(                         \
-        static_cast<unsigned long long>(__val), __offset, __width));           \
+    return static_cast<unsigned long long>(                                    \
+        ::__FnName(static_cast<long long>(__val), __offset, __width));         \
   }                                                                            \
   inline __device__ double __FnName(double __val, __Type __offset,             \
                                     int __width = warpSize) {                  \
@@ -139,8 +139,8 @@
   inline __device__ unsigned long long __FnName(                               \
       unsigned int __mask, unsigned long long __val, __Type __offset,          \
       int __width = warpSize) {                                                \
-    return static_cast<unsigned long long>(::__FnName(                         \
-        __mask, static_cast<unsigned long long>(__val), __offset, __width));   \
+    return static_cast<unsigned long long>(                                    \
+        ::__FnName(__mask, static_cast<long long>(__val), __offset, __width)); \
   }                                                                            \
   inline __device__ long __FnName(unsigned int __mask, long __val,             \
                                   __Type __offset, int __width = warpSize) {   \
@@ -234,7 +234,7 @@
   return __nvvm_match_any_sync_i32(mask, value);
 }
 
-inline __device__ unsigned long long
+inline __device__ unsigned int
 __match64_any_sync(unsigned int mask, unsigned long long value) {
   return __nvvm_match_any_sync_i64(mask, value);
 }
@@ -244,7 +244,7 @@
   return __nvvm_match_all_sync_i32p(mask, value, pred);
 }
 
-inline __device__ unsigned long long
+inline __device__ unsigned int
 __match64_all_sync(unsigned int mask, unsigned long long value, int *pred) {
   return __nvvm_match_all_sync_i64p(mask, value, pred);
 }
@@ -509,7 +509,7 @@
 __device__ inline void *__nv_cvta_local_to_generic_impl(size_t __ptr) {
   return (void *)(void __attribute__((address_space(5))) *)__ptr;
 }
-__device__ inline uint32_t __nvvm_get_smem_pointer(void *__ptr) {
+__device__ inline cuuint32_t __nvvm_get_smem_pointer(void *__ptr) {
   return __nv_cvta_generic_to_shared_impl(__ptr);
 }
 } // extern "C"
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_libdevice_declares.h b/linux-x86/lib64/clang/16.0.2/include/__clang_cuda_libdevice_declares.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_libdevice_declares.h
rename to linux-x86/lib64/clang/16.0.2/include/__clang_cuda_libdevice_declares.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h b/linux-x86/lib64/clang/16.0.2/include/__clang_cuda_math.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h
copy to linux-x86/lib64/clang/16.0.2/include/__clang_cuda_math.h
index 538556f..e447590 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h
+++ b/linux-x86/lib64/clang/16.0.2/include/__clang_cuda_math.h
@@ -345,4 +345,4 @@
 #pragma pop_macro("__DEVICE_VOID__")
 #pragma pop_macro("__FAST_OR_SLOW")
 
-#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
+#endif // __CLANG_CUDA_MATH_H__
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_math_forward_declares.h b/linux-x86/lib64/clang/16.0.2/include/__clang_cuda_math_forward_declares.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_math_forward_declares.h
rename to linux-x86/lib64/clang/16.0.2/include/__clang_cuda_math_forward_declares.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_runtime_wrapper.h b/linux-x86/lib64/clang/16.0.2/include/__clang_cuda_runtime_wrapper.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_runtime_wrapper.h
rename to linux-x86/lib64/clang/16.0.2/include/__clang_cuda_runtime_wrapper.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_texture_intrinsics.h b/linux-x86/lib64/clang/16.0.2/include/__clang_cuda_texture_intrinsics.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_texture_intrinsics.h
rename to linux-x86/lib64/clang/16.0.2/include/__clang_cuda_texture_intrinsics.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_hip_cmath.h b/linux-x86/lib64/clang/16.0.2/include/__clang_hip_cmath.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_hip_cmath.h
rename to linux-x86/lib64/clang/16.0.2/include/__clang_hip_cmath.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_hip_libdevice_declares.h b/linux-x86/lib64/clang/16.0.2/include/__clang_hip_libdevice_declares.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_hip_libdevice_declares.h
rename to linux-x86/lib64/clang/16.0.2/include/__clang_hip_libdevice_declares.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_hip_math.h b/linux-x86/lib64/clang/16.0.2/include/__clang_hip_math.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_hip_math.h
rename to linux-x86/lib64/clang/16.0.2/include/__clang_hip_math.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h b/linux-x86/lib64/clang/16.0.2/include/__clang_hip_runtime_wrapper.h
similarity index 80%
copy from darwin-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h
copy to linux-x86/lib64/clang/16.0.2/include/__clang_hip_runtime_wrapper.h
index 73021d2..10cec58 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h
+++ b/linux-x86/lib64/clang/16.0.2/include/__clang_hip_runtime_wrapper.h
@@ -50,6 +50,9 @@
 #include <cmath>
 #include <cstdlib>
 #include <stdlib.h>
+#if __has_include("hip/hip_version.h")
+#include "hip/hip_version.h"
+#endif // __has_include("hip/hip_version.h")
 #else
 typedef __SIZE_TYPE__ size_t;
 // Define macros which are needed to declare HIP device API's without standard
@@ -74,25 +77,35 @@
 extern "C" {
 #endif //__cplusplus
 
+#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 405
+extern "C" __device__ unsigned long long __ockl_dm_alloc(unsigned long long __size);
+extern "C" __device__ void __ockl_dm_dealloc(unsigned long long __addr);
+__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
+  return (void *) __ockl_dm_alloc(__size);
+}
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
+  __ockl_dm_dealloc((unsigned long long)__ptr);
+}
+#else  // HIP version check
 #if __HIP_ENABLE_DEVICE_MALLOC__
 __device__ void *__hip_malloc(__hip_size_t __size);
 __device__ void *__hip_free(void *__ptr);
 __attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
   return __hip_malloc(__size);
 }
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
-  return __hip_free(__ptr);
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
+  __hip_free(__ptr);
 }
 #else
 __attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
   __builtin_trap();
   return (void *)0;
 }
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
   __builtin_trap();
-  return (void *)0;
 }
 #endif
+#endif // HIP version check
 
 #ifdef __cplusplus
 } // extern "C"
diff --git a/linux-x86/lib64/clang/14.0.2/include/__stddef_max_align_t.h b/linux-x86/lib64/clang/16.0.2/include/__stddef_max_align_t.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__stddef_max_align_t.h
rename to linux-x86/lib64/clang/16.0.2/include/__stddef_max_align_t.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__wmmintrin_aes.h b/linux-x86/lib64/clang/16.0.2/include/__wmmintrin_aes.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__wmmintrin_aes.h
rename to linux-x86/lib64/clang/16.0.2/include/__wmmintrin_aes.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__wmmintrin_pclmul.h b/linux-x86/lib64/clang/16.0.2/include/__wmmintrin_pclmul.h
similarity index 76%
copy from darwin-x86/lib64/clang/14.0.2/include/__wmmintrin_pclmul.h
copy to linux-x86/lib64/clang/16.0.2/include/__wmmintrin_pclmul.h
index fef4b93..c9a6d50 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/__wmmintrin_pclmul.h
+++ b/linux-x86/lib64/clang/16.0.2/include/__wmmintrin_pclmul.h
@@ -22,23 +22,23 @@
 /// \headerfile <x86intrin.h>
 ///
 /// \code
-/// __m128i _mm_clmulepi64_si128(__m128i __X, __m128i __Y, const int __I);
+/// __m128i _mm_clmulepi64_si128(__m128i X, __m128i Y, const int I);
 /// \endcode
 ///
 /// This intrinsic corresponds to the <c> VPCLMULQDQ </c> instruction.
 ///
-/// \param __X
+/// \param X
 ///    A 128-bit vector of [2 x i64] containing one of the source operands.
-/// \param __Y
+/// \param Y
 ///    A 128-bit vector of [2 x i64] containing one of the source operands.
-/// \param __I
+/// \param I
 ///    An immediate value specifying which 64-bit values to select from the
-///    operands. Bit 0 is used to select a value from operand \a __X, and bit
-///    4 is used to select a value from operand \a __Y: \n
-///    Bit[0]=0 indicates that bits[63:0] of operand \a __X are used. \n
-///    Bit[0]=1 indicates that bits[127:64] of operand \a __X are used. \n
-///    Bit[4]=0 indicates that bits[63:0] of operand \a __Y are used. \n
-///    Bit[4]=1 indicates that bits[127:64] of operand \a __Y are used.
+///    operands. Bit 0 is used to select a value from operand \a X, and bit
+///    4 is used to select a value from operand \a Y: \n
+///    Bit[0]=0 indicates that bits[63:0] of operand \a X are used. \n
+///    Bit[0]=1 indicates that bits[127:64] of operand \a X are used. \n
+///    Bit[4]=0 indicates that bits[63:0] of operand \a Y are used. \n
+///    Bit[4]=1 indicates that bits[127:64] of operand \a Y are used.
 /// \returns The 128-bit integer vector containing the result of the carry-less
 ///    multiplication of the selected 64-bit values.
 #define _mm_clmulepi64_si128(X, Y, I) \
diff --git a/linux-x86/lib64/clang/14.0.2/include/adxintrin.h b/linux-x86/lib64/clang/16.0.2/include/adxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/adxintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/adxintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/altivec.h b/linux-x86/lib64/clang/16.0.2/include/altivec.h
similarity index 95%
copy from darwin-x86/lib64/clang/14.0.2/include/altivec.h
copy to linux-x86/lib64/clang/16.0.2/include/altivec.h
index 55195b0..0b1e76e 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/altivec.h
+++ b/linux-x86/lib64/clang/16.0.2/include/altivec.h
@@ -311,7 +311,7 @@
 
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_add_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vadduqm(__a, __b);
+  return (vector unsigned char)__builtin_altivec_vadduqm(__a, __b);
 }
 #elif defined(__VSX__)
 static __inline__ vector signed long long __ATTRS_o_ai
@@ -325,9 +325,9 @@
       (vector unsigned int)__a + (vector unsigned int)__b;
   vector unsigned int __carry = __builtin_altivec_vaddcuw(
       (vector unsigned int)__a, (vector unsigned int)__b);
-  __carry = __builtin_shufflevector((vector unsigned char)__carry,
-                                    (vector unsigned char)__carry, 0, 0, 0, 7,
-                                    0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0);
+  __carry = (vector unsigned int)__builtin_shufflevector(
+      (vector unsigned char)__carry, (vector unsigned char)__carry, 0, 0, 0, 7,
+      0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0);
   return (vector signed long long)(__res + __carry);
 #endif
 }
@@ -358,7 +358,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_adde(vector signed __int128 __a, vector signed __int128 __b,
          vector signed __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vaddeuqm(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -371,7 +373,9 @@
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_adde_u128(vector unsigned char __a, vector unsigned char __b,
               vector unsigned char __c) {
-  return (vector unsigned char)__builtin_altivec_vaddeuqm(__a, __b, __c);
+  return (vector unsigned char)__builtin_altivec_vaddeuqm_c(
+      (vector unsigned char)__a, (vector unsigned char)__b,
+      (vector unsigned char)__c);
 }
 #endif
 
@@ -398,7 +402,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_addec(vector signed __int128 __a, vector signed __int128 __b,
           vector signed __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vaddecuq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -411,7 +417,9 @@
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_addec_u128(vector unsigned char __a, vector unsigned char __b,
                vector unsigned char __c) {
-  return (vector unsigned char)__builtin_altivec_vaddecuq(__a, __b, __c);
+  return (vector unsigned char)__builtin_altivec_vaddecuq_c(
+      (vector unsigned char)__a, (vector unsigned char)__b,
+      (vector unsigned char)__c);
 }
 
 #ifdef __powerpc64__
@@ -600,7 +608,8 @@
 
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_addc_u128(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vaddcuq(__a, __b);
+  return (vector unsigned char)__builtin_altivec_vaddcuq_c(
+      (vector unsigned char)__a, (vector unsigned char)__b);
 }
 #endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
 
@@ -824,7 +833,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vaddeuqm(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vaddeuqm(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -837,7 +848,8 @@
 
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vaddcuq(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vaddcuq(__a, __b);
+  return (vector signed __int128)__builtin_altivec_vaddcuq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -850,7 +862,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vaddecuq(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vaddecuq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -1806,18 +1820,19 @@
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpeq(vector signed __int128 __a, vector signed __int128 __b) {
   return (vector bool __int128)__builtin_altivec_vcmpequq(
-      (vector bool __int128)__a, (vector bool __int128)__b);
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
 }
 
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpeq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
   return (vector bool __int128)__builtin_altivec_vcmpequq(
-      (vector bool __int128)__a, (vector bool __int128)__b);
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
 }
 
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpeq(vector bool __int128 __a, vector bool  __int128 __b) {
-  return (vector bool __int128)__builtin_altivec_vcmpequq(__a, __b);
+  return (vector bool __int128)__builtin_altivec_vcmpequq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
 }
 #endif
 
@@ -1887,19 +1902,20 @@
 #if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpne(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
-      (vector bool __int128)__a, (vector bool __int128)__b));
+  return (vector bool __int128)~(__builtin_altivec_vcmpequq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b));
 }
 
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpne(vector signed __int128 __a, vector signed __int128 __b) {
-  return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
-      (vector bool __int128)__a, (vector bool __int128)__b));
+  return (vector bool __int128)~(__builtin_altivec_vcmpequq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b));
 }
 
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpne(vector bool __int128 __a, vector bool __int128 __b) {
-  return (vector bool __int128) ~(__builtin_altivec_vcmpequq(__a, __b));
+  return (vector bool __int128)~(__builtin_altivec_vcmpequq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b));
 }
 #endif
 
@@ -1944,16 +1960,16 @@
 static __inline__ signed int __ATTRS_o_ai
 vec_cntlz_lsbb(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vctzlsbb(__a);
+  return __builtin_altivec_vctzlsbb((vector unsigned char)__a);
 #else
-  return __builtin_altivec_vclzlsbb(__a);
+  return __builtin_altivec_vclzlsbb((vector unsigned char)__a);
 #endif
 }
 
 static __inline__ signed int __ATTRS_o_ai
 vec_cntlz_lsbb(vector unsigned char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vctzlsbb(__a);
+  return __builtin_altivec_vctzlsbb((vector unsigned char)__a);
 #else
   return __builtin_altivec_vclzlsbb(__a);
 #endif
@@ -1962,9 +1978,9 @@
 static __inline__ signed int __ATTRS_o_ai
 vec_cnttz_lsbb(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclzlsbb(__a);
+  return __builtin_altivec_vclzlsbb((vector unsigned char)__a);
 #else
-  return __builtin_altivec_vctzlsbb(__a);
+  return __builtin_altivec_vctzlsbb((vector unsigned char)__a);
 #endif
 }
 
@@ -1984,7 +2000,7 @@
 
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_parity_lsbb(vector signed int __a) {
-  return __builtin_altivec_vprtybw(__a);
+  return __builtin_altivec_vprtybw((vector unsigned int)__a);
 }
 
 #ifdef __SIZEOF_INT128__
@@ -1995,7 +2011,7 @@
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
 vec_parity_lsbb(vector signed __int128 __a) {
-  return __builtin_altivec_vprtybq(__a);
+  return __builtin_altivec_vprtybq((vector unsigned __int128)__a);
 }
 #endif
 
@@ -2006,7 +2022,7 @@
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_parity_lsbb(vector signed long long __a) {
-  return __builtin_altivec_vprtybd(__a);
+  return __builtin_altivec_vprtybd((vector unsigned long long)__a);
 }
 
 #else
@@ -2212,14 +2228,12 @@
 #if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpgt(vector signed __int128 __a, vector signed __int128 __b) {
-  return (vector bool __int128)__builtin_altivec_vcmpgtsq(
-      (vector bool __int128)__a, (vector bool __int128)__b);
+  return (vector bool __int128)__builtin_altivec_vcmpgtsq(__a, __b);
 }
 
 static __inline__ vector bool __int128 __ATTRS_o_ai
 vec_cmpgt(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return (vector bool __int128)__builtin_altivec_vcmpgtuq(
-      (vector bool __int128)__a, (vector bool __int128)__b);
+  return (vector bool __int128)__builtin_altivec_vcmpgtuq(__a, __b);
 }
 #endif
 
@@ -2488,7 +2502,8 @@
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_popcnt(vector signed char __a) {
-  return __builtin_altivec_vpopcntb(__a);
+  return (vector unsigned char)__builtin_altivec_vpopcntb(
+      (vector unsigned char)__a);
 }
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_popcnt(vector unsigned char __a) {
@@ -2496,7 +2511,8 @@
 }
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_popcnt(vector signed short __a) {
-  return __builtin_altivec_vpopcnth(__a);
+  return (vector unsigned short)__builtin_altivec_vpopcnth(
+      (vector unsigned short)__a);
 }
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_popcnt(vector unsigned short __a) {
@@ -2504,7 +2520,7 @@
 }
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_popcnt(vector signed int __a) {
-  return __builtin_altivec_vpopcntw(__a);
+  return __builtin_altivec_vpopcntw((vector unsigned int)__a);
 }
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_popcnt(vector unsigned int __a) {
@@ -2512,7 +2528,7 @@
 }
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_popcnt(vector signed long long __a) {
-  return __builtin_altivec_vpopcntd(__a);
+  return __builtin_altivec_vpopcntd((vector unsigned long long)__a);
 }
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_popcnt(vector unsigned long long __a) {
@@ -2524,7 +2540,7 @@
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_cntlz(vector signed char __a) {
-  return __builtin_altivec_vclzb(__a);
+  return (vector signed char)__builtin_altivec_vclzb((vector unsigned char)__a);
 }
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_cntlz(vector unsigned char __a) {
@@ -2532,7 +2548,8 @@
 }
 static __inline__ vector signed short __ATTRS_o_ai
 vec_cntlz(vector signed short __a) {
-  return __builtin_altivec_vclzh(__a);
+  return (vector signed short)__builtin_altivec_vclzh(
+      (vector unsigned short)__a);
 }
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_cntlz(vector unsigned short __a) {
@@ -2540,7 +2557,7 @@
 }
 static __inline__ vector signed int __ATTRS_o_ai
 vec_cntlz(vector signed int __a) {
-  return __builtin_altivec_vclzw(__a);
+  return (vector signed int)__builtin_altivec_vclzw((vector unsigned int)__a);
 }
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_cntlz(vector unsigned int __a) {
@@ -2548,7 +2565,8 @@
 }
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_cntlz(vector signed long long __a) {
-  return __builtin_altivec_vclzd(__a);
+  return (vector signed long long)__builtin_altivec_vclzd(
+      (vector unsigned long long)__a);
 }
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_cntlz(vector unsigned long long __a) {
@@ -2562,7 +2580,7 @@
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_cnttz(vector signed char __a) {
-  return __builtin_altivec_vctzb(__a);
+  return (vector signed char)__builtin_altivec_vctzb((vector unsigned char)__a);
 }
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_cnttz(vector unsigned char __a) {
@@ -2570,7 +2588,8 @@
 }
 static __inline__ vector signed short __ATTRS_o_ai
 vec_cnttz(vector signed short __a) {
-  return __builtin_altivec_vctzh(__a);
+  return (vector signed short)__builtin_altivec_vctzh(
+      (vector unsigned short)__a);
 }
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_cnttz(vector unsigned short __a) {
@@ -2578,7 +2597,7 @@
 }
 static __inline__ vector signed int __ATTRS_o_ai
 vec_cnttz(vector signed int __a) {
-  return __builtin_altivec_vctzw(__a);
+  return (vector signed int)__builtin_altivec_vctzw((vector unsigned int)__a);
 }
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_cnttz(vector unsigned int __a) {
@@ -2586,7 +2605,8 @@
 }
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_cnttz(vector signed long long __a) {
-  return __builtin_altivec_vctzd(__a);
+  return (vector signed long long)__builtin_altivec_vctzd(
+      (vector unsigned long long)__a);
 }
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_cnttz(vector unsigned long long __a) {
@@ -3144,7 +3164,8 @@
   vector unsigned char __mask =
       (vector unsigned char)__builtin_altivec_lvsl(16 - __c, (int *)NULL);
   vector unsigned char __res =
-      __builtin_altivec_vperm_4si((vector int)__a, (vector int)__a, __mask);
+      (vector unsigned char)__builtin_altivec_vperm_4si(
+          (vector int)__a, (vector int)__a, __mask);
   return __builtin_vsx_stxvll((vector int)__res, __b, (__c << 56));
 }
 #endif
@@ -3181,34 +3202,40 @@
 // the XL-compatible signatures are used for those functions.
 #ifdef __XL_COMPAT_ALTIVEC__
 #define vec_ctf(__a, __b)                                                      \
-  _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),  \
-             vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
-                                                   (__b)),                     \
-             vector unsigned long long                                         \
-           : (__builtin_vsx_xvcvuxdsp((vector unsigned long long)(__a)) *      \
-              (vector float)(vector unsigned)((0x7f - (__b)) << 23)),          \
-             vector signed long long                                           \
-           : (__builtin_vsx_xvcvsxdsp((vector signed long long)(__a)) *        \
-              (vector float)(vector unsigned)((0x7f - (__b)) << 23)))
+  _Generic(                                                                    \
+      (__a), vector int                                                        \
+      : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),       \
+        vector unsigned int                                                    \
+      : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a),      \
+                                              (__b)),                          \
+        vector unsigned long long                                              \
+      : (vector float)(__builtin_vsx_xvcvuxdsp(                                \
+                           (vector unsigned long long)(__a)) *                 \
+                       (vector float)(vector unsigned)((0x7f - (__b)) << 23)), \
+        vector signed long long                                                \
+      : (vector float)(__builtin_vsx_xvcvsxdsp(                                \
+                           (vector signed long long)(__a)) *                   \
+                       (vector float)(vector unsigned)((0x7f - (__b)) << 23)))
 #else // __XL_COMPAT_ALTIVEC__
 #define vec_ctf(__a, __b)                                                      \
-  _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),  \
-             vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
-                                                   (__b)),                     \
-             vector unsigned long long                                         \
-           : (__builtin_convertvector((vector unsigned long long)(__a),        \
-                                      vector double) *                         \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)),              \
-             vector signed long long                                           \
-           : (__builtin_convertvector((vector signed long long)(__a),          \
-                                      vector double) *                         \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)))
+  _Generic(                                                                    \
+      (__a), vector int                                                        \
+      : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),       \
+        vector unsigned int                                                    \
+      : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a),      \
+                                              (__b)),                          \
+        vector unsigned long long                                              \
+      : (vector float)(__builtin_convertvector(                                \
+                           (vector unsigned long long)(__a), vector double) *  \
+                       (vector double)(vector unsigned long long)((0x3ffULL -  \
+                                                                   (__b))      \
+                                                                  << 52)),     \
+        vector signed long long                                                \
+      : (vector float)(__builtin_convertvector((vector signed long long)(__a), \
+                                               vector double) *                \
+                       (vector double)(vector unsigned long long)((0x3ffULL -  \
+                                                                   (__b))      \
+                                                                  << 52)))
 #endif // __XL_COMPAT_ALTIVEC__
 #else
 #define vec_ctf(__a, __b)                                                      \
@@ -3255,26 +3282,29 @@
 #ifdef __XL_COMPAT_ALTIVEC__
 #define vec_cts(__a, __b)                                                      \
   _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctsxs((vector float)(__a), (__b)),             \
+           : (vector signed int)__builtin_altivec_vctsxs((vector float)(__a),  \
+                                                         (__b)),               \
              vector double                                                     \
            : __extension__({                                                   \
              vector double __ret =                                             \
                  (vector double)(__a) *                                        \
                  (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
                                                             << 52);            \
-             __builtin_vsx_xvcvdpsxws(__ret);                                  \
+             (vector signed long long)__builtin_vsx_xvcvdpsxws(__ret);         \
            }))
 #else // __XL_COMPAT_ALTIVEC__
 #define vec_cts(__a, __b)                                                      \
   _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctsxs((vector float)(__a), (__b)),             \
+           : (vector signed int)__builtin_altivec_vctsxs((vector float)(__a),  \
+                                                         (__b)),               \
              vector double                                                     \
            : __extension__({                                                   \
              vector double __ret =                                             \
                  (vector double)(__a) *                                        \
                  (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
                                                             << 52);            \
-             __builtin_convertvector(__ret, vector signed long long);          \
+             (vector signed long long)__builtin_convertvector(                 \
+                 __ret, vector signed long long);                              \
            }))
 #endif // __XL_COMPAT_ALTIVEC__
 #else
@@ -3291,26 +3321,29 @@
 #ifdef __XL_COMPAT_ALTIVEC__
 #define vec_ctu(__a, __b)                                                      \
   _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctuxs((vector float)(__a), (__b)),             \
+           : (vector unsigned int)__builtin_altivec_vctuxs(                    \
+                 (vector float)(__a), (__b)),                                  \
              vector double                                                     \
            : __extension__({                                                   \
              vector double __ret =                                             \
                  (vector double)(__a) *                                        \
                  (vector double)(vector unsigned long long)((0x3ffULL + __b)   \
                                                             << 52);            \
-             __builtin_vsx_xvcvdpuxws(__ret);                                  \
+             (vector unsigned long long)__builtin_vsx_xvcvdpuxws(__ret);       \
            }))
 #else // __XL_COMPAT_ALTIVEC__
 #define vec_ctu(__a, __b)                                                      \
   _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctuxs((vector float)(__a), (__b)),             \
+           : (vector unsigned int)__builtin_altivec_vctuxs(                    \
+                 (vector float)(__a), (__b)),                                  \
              vector double                                                     \
            : __extension__({                                                   \
              vector double __ret =                                             \
                  (vector double)(__a) *                                        \
                  (vector double)(vector unsigned long long)((0x3ffULL + __b)   \
                                                             << 52);            \
-             __builtin_convertvector(__ret, vector unsigned long long);        \
+             (vector unsigned long long)__builtin_convertvector(               \
+                 __ret, vector unsigned long long);                            \
            }))
 #endif // __XL_COMPAT_ALTIVEC__
 #else
@@ -6491,12 +6524,12 @@
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_nand(vector signed char __a, vector bool char __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector signed char)__b);
 }
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_nand(vector bool char __a, vector signed char __b) {
-  return ~(__a & __b);
+  return (vector signed char)~(__a & (vector bool char)__b);
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
@@ -6506,12 +6539,12 @@
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_nand(vector unsigned char __a, vector bool char __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector unsigned char)__b);
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_nand(vector bool char __a, vector unsigned char __b) {
-  return ~(__a & __b);
+  return (vector unsigned char)~(__a & (vector bool char)__b);
 }
 
 static __inline__ vector bool char __ATTRS_o_ai vec_nand(vector bool char __a,
@@ -6526,12 +6559,12 @@
 
 static __inline__ vector signed short __ATTRS_o_ai
 vec_nand(vector signed short __a, vector bool short __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector signed short)__b);
 }
 
 static __inline__ vector signed short __ATTRS_o_ai
 vec_nand(vector bool short __a, vector signed short __b) {
-  return ~(__a & __b);
+  return (vector signed short)~(__a & (vector bool short)__b);
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
@@ -6541,7 +6574,7 @@
 
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_nand(vector unsigned short __a, vector bool short __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector unsigned short)__b);
 }
 
 static __inline__ vector bool short __ATTRS_o_ai
@@ -6556,12 +6589,12 @@
 
 static __inline__ vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
                                                           vector bool int __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector signed int)__b);
 }
 
 static __inline__ vector signed int __ATTRS_o_ai
 vec_nand(vector bool int __a, vector signed int __b) {
-  return ~(__a & __b);
+  return (vector signed int)~(__a & (vector bool int)__b);
 }
 
 static __inline__ vector unsigned int __ATTRS_o_ai
@@ -6571,12 +6604,12 @@
 
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_nand(vector unsigned int __a, vector bool int __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector unsigned int)__b);
 }
 
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_nand(vector bool int __a, vector unsigned int __b) {
-  return ~(__a & __b);
+  return (vector unsigned int)~(__a & (vector bool int)__b);
 }
 
 static __inline__ vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
@@ -6597,12 +6630,12 @@
 
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_nand(vector signed long long __a, vector bool long long __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector signed long long)__b);
 }
 
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_nand(vector bool long long __a, vector signed long long __b) {
-  return ~(__a & __b);
+  return (vector signed long long)~(__a & (vector bool long long)__b);
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
@@ -6612,12 +6645,12 @@
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_nand(vector unsigned long long __a, vector bool long long __b) {
-  return ~(__a & __b);
+  return ~(__a & (vector unsigned long long)__b);
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_nand(vector bool long long __a, vector unsigned long long __b) {
-  return ~(__a & __b);
+  return (vector unsigned long long)~(__a & (vector bool long long)__b);
 }
 
 static __inline__ vector bool long long __ATTRS_o_ai
@@ -7005,12 +7038,12 @@
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_orc(vector signed char __a, vector bool char __b) {
-  return __a | ~__b;
+  return __a | (vector signed char)~__b;
 }
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_orc(vector bool char __a, vector signed char __b) {
-  return __a | ~__b;
+  return (vector signed char)(__a | (vector bool char)~__b);
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
@@ -7020,12 +7053,12 @@
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_orc(vector unsigned char __a, vector bool char __b) {
-  return __a | ~__b;
+  return __a | (vector unsigned char)~__b;
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_orc(vector bool char __a, vector unsigned char __b) {
-  return __a | ~__b;
+  return (vector unsigned char)(__a | (vector bool char)~__b);
 }
 
 static __inline__ vector bool char __ATTRS_o_ai vec_orc(vector bool char __a,
@@ -7040,12 +7073,12 @@
 
 static __inline__ vector signed short __ATTRS_o_ai
 vec_orc(vector signed short __a, vector bool short __b) {
-  return __a | ~__b;
+  return __a | (vector signed short)~__b;
 }
 
 static __inline__ vector signed short __ATTRS_o_ai
 vec_orc(vector bool short __a, vector signed short __b) {
-  return __a | ~__b;
+  return (vector signed short)(__a | (vector bool short)~__b);
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
@@ -7055,12 +7088,12 @@
 
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_orc(vector unsigned short __a, vector bool short __b) {
-  return __a | ~__b;
+  return __a | (vector unsigned short)~__b;
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_orc(vector bool short __a, vector unsigned short __b) {
-  return __a | ~__b;
+  return (vector unsigned short)(__a | (vector bool short)~__b);
 }
 
 static __inline__ vector bool short __ATTRS_o_ai
@@ -7075,12 +7108,12 @@
 
 static __inline__ vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
                                                          vector bool int __b) {
-  return __a | ~__b;
+  return __a | (vector signed int)~__b;
 }
 
 static __inline__ vector signed int __ATTRS_o_ai
 vec_orc(vector bool int __a, vector signed int __b) {
-  return __a | ~__b;
+  return (vector signed int)(__a | (vector bool int)~__b);
 }
 
 static __inline__ vector unsigned int __ATTRS_o_ai
@@ -7090,12 +7123,12 @@
 
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_orc(vector unsigned int __a, vector bool int __b) {
-  return __a | ~__b;
+  return __a | (vector unsigned int)~__b;
 }
 
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_orc(vector bool int __a, vector unsigned int __b) {
-  return __a | ~__b;
+  return (vector unsigned int)(__a | (vector bool int)~__b);
 }
 
 static __inline__ vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
@@ -7105,12 +7138,12 @@
 
 static __inline__ vector float __ATTRS_o_ai
 vec_orc(vector bool int __a, vector float __b) {
- return (vector float)(__a | ~(vector unsigned int)__b);
+  return (vector float)(__a | ~(vector bool int)__b);
 }
 
 static __inline__ vector float __ATTRS_o_ai
 vec_orc(vector float __a, vector bool int __b) {
-  return (vector float)((vector unsigned int)__a | ~__b);
+  return (vector float)((vector bool int)__a | ~__b);
 }
 
 static __inline__ vector float __ATTRS_o_ai vec_orc(vector float __a,
@@ -7125,12 +7158,12 @@
 
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_orc(vector signed long long __a, vector bool long long __b) {
-  return __a | ~__b;
+  return __a | (vector signed long long)~__b;
 }
 
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_orc(vector bool long long __a, vector signed long long __b) {
-  return __a | ~__b;
+  return (vector signed long long)(__a | (vector bool long long)~__b);
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
@@ -7140,12 +7173,12 @@
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_orc(vector unsigned long long __a, vector bool long long __b) {
-  return __a | ~__b;
+  return __a | (vector unsigned long long)~__b;
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_orc(vector bool long long __a, vector unsigned long long __b) {
-  return __a | ~__b;
+  return (vector unsigned long long)(__a | (vector bool long long)~__b);
 }
 
 static __inline__ vector bool long long __ATTRS_o_ai
@@ -7155,17 +7188,17 @@
 
 static __inline__ vector double __ATTRS_o_ai
 vec_orc(vector double __a, vector bool long long __b) {
-  return (vector double)((vector unsigned long long)__a | ~__b);
+  return (vector double)((vector bool long long)__a | ~__b);
 }
 
 static __inline__ vector double __ATTRS_o_ai
 vec_orc(vector bool long long __a, vector double __b) {
-  return (vector double)(__a | ~(vector unsigned long long)__b);
+  return (vector double)(__a | ~(vector bool long long)__b);
 }
 
 static __inline__ vector double __ATTRS_o_ai vec_orc(vector double __a,
                                                      vector double __b) {
-  return (vector double)((vector bool long long)__a |
+  return (vector double)((vector unsigned long long)__a |
                          ~(vector unsigned long long)__b);
 }
 #endif
@@ -8276,14 +8309,20 @@
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vrld(__a, __b);
+  return (vector unsigned long long)__builtin_altivec_vrld(
+      (vector long long)__a, __b);
 }
 #endif
 
 #if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_rl(vector signed __int128 __a, vector unsigned __int128 __b) {
-  return (__b << __a)|(__b >> ((__CHAR_BIT__ * sizeof(vector signed __int128)) - __a));
+  return (vector signed __int128)(((vector unsigned __int128)__b
+                                   << (vector unsigned __int128)__a) |
+                                  ((vector unsigned __int128)__b >>
+                                   ((__CHAR_BIT__ *
+                                     sizeof(vector unsigned __int128)) -
+                                    (vector unsigned __int128)__a)));
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -8317,7 +8356,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_rlmi(vector signed __int128 __a, vector signed __int128 __b,
          vector signed __int128 __c) {
-  return __builtin_altivec_vrlqmi(__a, __c, __b);
+  return (vector signed __int128)__builtin_altivec_vrlqmi(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__c,
+      (vector unsigned __int128)__b);
 }
 #endif
 
@@ -8370,7 +8411,8 @@
       __builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, 31, 30, 15, -1,
                               -1, -1, -1, -1, -1, -1, -1);
 #endif
-  return __builtin_altivec_vrlqnm(__a, (vector unsigned __int128) MaskAndShift);
+  return (vector signed __int128)__builtin_altivec_vrlqnm(
+      (vector unsigned __int128)__a, (vector unsigned __int128)MaskAndShift);
 }
 #endif
 
@@ -12070,13 +12112,15 @@
 
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_subc(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
+  return (vector signed __int128)__builtin_altivec_vsubcuq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
 }
 #endif
 
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_subc_u128(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsubcuq(__a, __b);
+  return (vector unsigned char)__builtin_altivec_vsubcuq_c(
+      (vector unsigned char)__a, (vector unsigned char)__b);
 }
 #endif // __POWER8_VECTOR__
 
@@ -12298,7 +12342,7 @@
 
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsubuqm(__a, __b);
+  return (vector unsigned char)__builtin_altivec_vsubuqm(__a, __b);
 }
 
 /* vec_vsubeuqm */
@@ -12307,7 +12351,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vsubeuqm(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12319,7 +12365,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_sube(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vsubeuqm(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12332,7 +12380,9 @@
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
               vector unsigned char __c) {
-  return (vector unsigned char)__builtin_altivec_vsubeuqm(__a, __b, __c);
+  return (vector unsigned char)__builtin_altivec_vsubeuqm_c(
+      (vector unsigned char)__a, (vector unsigned char)__b,
+      (vector unsigned char)__c);
 }
 
 /* vec_vsubcuq */
@@ -12340,7 +12390,8 @@
 #ifdef __SIZEOF_INT128__
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
+  return (vector signed __int128)__builtin_altivec_vsubcuq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12353,7 +12404,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_vsubecuq(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vsubecuq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12381,7 +12434,9 @@
 static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_subec(vector signed __int128 __a, vector signed __int128 __b,
              vector signed __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
+  return (vector signed __int128)__builtin_altivec_vsubecuq(
+      (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+      (vector unsigned __int128)__c);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12394,7 +12449,9 @@
 static __inline__ vector unsigned char __attribute__((__always_inline__))
 vec_subec_u128(vector unsigned char __a, vector unsigned char __b,
                vector unsigned char __c) {
-  return (vector unsigned char)__builtin_altivec_vsubecuq(__a, __b, __c);
+  return (vector unsigned char)__builtin_altivec_vsubecuq_c(
+      (vector unsigned char)__a, (vector unsigned char)__b,
+      (vector unsigned char)__c);
 }
 #endif // __POWER8_VECTOR__
 
@@ -14900,17 +14957,20 @@
 #if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
 static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed __int128 __a,
                                               vector signed __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_LT, (vector unsigned __int128)__a,
+                                      (vector signed __int128)__b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned __int128 __a,
                                               vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_LT, __a,
+                                      (vector signed __int128)__b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool __int128 __a,
                                               vector bool __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_LT, (vector unsigned __int128)__a,
+                                      (vector signed __int128)__b);
 }
 #endif
 
@@ -15850,17 +15910,20 @@
 #if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
 static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed __int128 __a,
                                               vector signed __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ, (vector unsigned __int128)__a,
+                                      __b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned __int128 __a,
                                               vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a,
+                                      (vector signed __int128)__b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool __int128 __a,
                                               vector bool __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ, (vector unsigned __int128)__a,
+                                      (vector signed __int128)__b);
 }
 #endif
 
@@ -16144,17 +16207,20 @@
 #if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
 static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed __int128 __a,
                                               vector signed __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV,
+                                      (vector unsigned __int128)__a, __b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned __int128 __a,
                                               vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a,
+                                      (vector signed __int128)__b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool __int128 __a,
                                               vector bool __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+  return __builtin_altivec_vcmpequq_p(
+      __CR6_EQ_REV, (vector unsigned __int128)__a, (vector signed __int128)__b);
 }
 #endif
 
@@ -17124,17 +17190,20 @@
 #if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
 static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed __int128 __a,
                                               vector signed __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV,
+                                      (vector unsigned __int128)__a, __b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned __int128 __a,
                                               vector unsigned __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a,
+                                      (vector signed __int128)__b);
 }
 
 static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool __int128 __a,
                                               vector bool __int128 __b) {
-  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+  return __builtin_altivec_vcmpequq_p(
+      __CR6_LT_REV, (vector unsigned __int128)__a, (vector signed __int128)__b);
 }
 #endif
 
@@ -17297,13 +17366,17 @@
 static __inline__ vector bool char __ATTRS_o_ai
 vec_permxor(vector bool char __a, vector bool char __b,
             vector bool char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
+  return (vector bool char)__builtin_altivec_crypto_vpermxor(
+      (vector unsigned char)__a, (vector unsigned char)__b,
+      (vector unsigned char)__c);
 }
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_permxor(vector signed char __a, vector signed char __b,
             vector signed char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
+  return (vector signed char)__builtin_altivec_crypto_vpermxor(
+      (vector unsigned char)__a, (vector unsigned char)__b,
+      (vector unsigned char)__c);
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
@@ -17365,7 +17438,7 @@
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_vgbbd(vector signed char __a) {
-  return __builtin_altivec_vgbbd((vector unsigned char)__a);
+  return (vector signed char)__builtin_altivec_vgbbd((vector unsigned char)__a);
 }
 
 #define vec_pmsum_be __builtin_crypto_vpmsumb
@@ -17378,23 +17451,25 @@
 
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_gbb(vector signed long long __a) {
-  return __builtin_altivec_vgbbd((vector unsigned char)__a);
+  return (vector signed long long)__builtin_altivec_vgbbd(
+      (vector unsigned char)__a);
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_gbb(vector unsigned long long __a) {
-  return __builtin_altivec_vgbbd((vector unsigned char)__a);
+  return (vector unsigned long long)__builtin_altivec_vgbbd(
+      (vector unsigned char)__a);
 }
 
 static __inline__ vector long long __ATTRS_o_ai
 vec_vbpermq(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vbpermq((vector unsigned char)__a,
-                                   (vector unsigned char)__b);
+  return (vector long long)__builtin_altivec_vbpermq((vector unsigned char)__a,
+                                                     (vector unsigned char)__b);
 }
 
 static __inline__ vector long long __ATTRS_o_ai
 vec_vbpermq(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vbpermq(__a, __b);
+  return (vector long long)__builtin_altivec_vbpermq(__a, __b);
 }
 
 #if defined(__powerpc64__) && defined(__SIZEOF_INT128__)
@@ -17406,7 +17481,7 @@
 #endif
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_bperm(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vbpermq(__a, __b);
+  return (vector unsigned char)__builtin_altivec_vbpermq(__a, __b);
 }
 #endif // __POWER8_VECTOR__
 #ifdef __POWER9_VECTOR__
@@ -17777,26 +17852,26 @@
 #if defined(__POWER10_VECTOR__) && defined(__VSX__) &&                         \
     defined(__SIZEOF_INT128__)
 
-/* vect_xl_sext */
+/* vec_xl_sext */
 
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_xl_sext(ptrdiff_t __offset, const signed char *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
+  return (vector signed __int128)*(__pointer + __offset);
 }
 
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_xl_sext(ptrdiff_t __offset, const signed short *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
+  return (vector signed __int128)*(__pointer + __offset);
 }
 
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_xl_sext(ptrdiff_t __offset, const signed int *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
+  return (vector signed __int128)*(__pointer + __offset);
 }
 
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
 vec_xl_sext(ptrdiff_t __offset, const signed long long *__pointer) {
-  return (vector unsigned __int128)*(__pointer + __offset);
+  return (vector signed __int128)*(__pointer + __offset);
 }
 
 /* vec_xl_zext */
@@ -18260,13 +18335,17 @@
 
 #define vec_cntm(__a, __mp)                                                    \
   _Generic((__a), vector unsigned char                                         \
-           : __builtin_altivec_vcntmbb((__a), (unsigned char)(__mp)),          \
+           : __builtin_altivec_vcntmbb((vector unsigned char)(__a),            \
+                                       (unsigned char)(__mp)),                 \
              vector unsigned short                                             \
-           : __builtin_altivec_vcntmbh((__a), (unsigned char)(__mp)),          \
+           : __builtin_altivec_vcntmbh((vector unsigned short)(__a),           \
+                                       (unsigned char)(__mp)),                 \
              vector unsigned int                                               \
-           : __builtin_altivec_vcntmbw((__a), (unsigned char)(__mp)),          \
+           : __builtin_altivec_vcntmbw((vector unsigned int)(__a),             \
+                                       (unsigned char)(__mp)),                 \
              vector unsigned long long                                         \
-           : __builtin_altivec_vcntmbd((__a), (unsigned char)(__mp)))
+           : __builtin_altivec_vcntmbd((vector unsigned long long)(__a),       \
+                                       (unsigned char)(__mp)))
 
 /* vec_gen[b|h|w|d|q]m */
 
@@ -18327,43 +18406,52 @@
 #ifdef __SIZEOF_INT128__
 #define vec_ternarylogic(__a, __b, __c, __imm)                                 \
   _Generic((__a), vector unsigned char                                         \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
+           : (vector unsigned char)__builtin_vsx_xxeval(                       \
+                 (vector unsigned long long)(__a),                             \
+                 (vector unsigned long long)(__b),                             \
+                 (vector unsigned long long)(__c), (__imm)),                   \
              vector unsigned short                                             \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
+           : (vector unsigned short)__builtin_vsx_xxeval(                      \
+                 (vector unsigned long long)(__a),                             \
+                 (vector unsigned long long)(__b),                             \
+                 (vector unsigned long long)(__c), (__imm)),                   \
              vector unsigned int                                               \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
+           : (vector unsigned int)__builtin_vsx_xxeval(                        \
+                 (vector unsigned long long)(__a),                             \
+                 (vector unsigned long long)(__b),                             \
+                 (vector unsigned long long)(__c), (__imm)),                   \
              vector unsigned long long                                         \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
+           : (vector unsigned long long)__builtin_vsx_xxeval(                  \
+                 (vector unsigned long long)(__a),                             \
+                 (vector unsigned long long)(__b),                             \
+                 (vector unsigned long long)(__c), (__imm)),                   \
              vector unsigned __int128                                          \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)))
+           : (vector unsigned __int128)__builtin_vsx_xxeval(                   \
+               (vector unsigned long long)(__a),                               \
+               (vector unsigned long long)(__b),                               \
+               (vector unsigned long long)(__c), (__imm)))
 #else
 #define vec_ternarylogic(__a, __b, __c, __imm)                                 \
   _Generic((__a), vector unsigned char                                         \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
+           : (vector unsigned char)__builtin_vsx_xxeval(                       \
+                 (vector unsigned long long)(__a),                             \
+                 (vector unsigned long long)(__b),                             \
+                 (vector unsigned long long)(__c), (__imm)),                   \
              vector unsigned short                                             \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
+           : (vector unsigned short)__builtin_vsx_xxeval(                      \
+                 (vector unsigned long long)(__a),                             \
+                 (vector unsigned long long)(__b),                             \
+                 (vector unsigned long long)(__c), (__imm)),                   \
              vector unsigned int                                               \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
+           : (vector unsigned int)__builtin_vsx_xxeval(                        \
+                 (vector unsigned long long)(__a),                             \
+                 (vector unsigned long long)(__b),                             \
+                 (vector unsigned long long)(__c), (__imm)),                   \
              vector unsigned long long                                         \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)))
+           : (vector unsigned long long)__builtin_vsx_xxeval(                  \
+               (vector unsigned long long)(__a),                               \
+               (vector unsigned long long)(__b),                               \
+               (vector unsigned long long)(__c), (__imm)))
 #endif /* __SIZEOF_INT128__ */
 #endif /* __VSX__ */
 
@@ -18371,14 +18459,16 @@
 
 #ifdef __VSX__
 #define vec_genpcvm(__a, __imm)                                                \
-  _Generic((__a), vector unsigned char                                         \
-           : __builtin_vsx_xxgenpcvbm((__a), (int)(__imm)),                    \
-             vector unsigned short                                             \
-           : __builtin_vsx_xxgenpcvhm((__a), (int)(__imm)),                    \
-             vector unsigned int                                               \
-           : __builtin_vsx_xxgenpcvwm((__a), (int)(__imm)),                    \
-             vector unsigned long long                                         \
-           : __builtin_vsx_xxgenpcvdm((__a), (int)(__imm)))
+  _Generic(                                                                    \
+      (__a), vector unsigned char                                              \
+      : __builtin_vsx_xxgenpcvbm((vector unsigned char)(__a), (int)(__imm)),   \
+        vector unsigned short                                                  \
+      : __builtin_vsx_xxgenpcvhm((vector unsigned short)(__a), (int)(__imm)),  \
+        vector unsigned int                                                    \
+      : __builtin_vsx_xxgenpcvwm((vector unsigned int)(__a), (int)(__imm)),    \
+        vector unsigned long long                                              \
+      : __builtin_vsx_xxgenpcvdm((vector unsigned long long)(__a),             \
+                                 (int)(__imm)))
 #endif /* __VSX__ */
 
 /* vec_clr_first */
@@ -18386,18 +18476,22 @@
 static __inline__ vector signed char __ATTRS_o_ai
 vec_clr_first(vector signed char __a, unsigned int __n) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrrb(__a, __n);
+  return (vector signed char)__builtin_altivec_vclrrb((vector unsigned char)__a,
+                                                      __n);
 #else
-  return __builtin_altivec_vclrlb( __a, __n);
+  return (vector signed char)__builtin_altivec_vclrlb((vector unsigned char)__a,
+                                                      __n);
 #endif
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_clr_first(vector unsigned char __a, unsigned int __n) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+  return (vector unsigned char)__builtin_altivec_vclrrb(
+      (vector unsigned char)__a, __n);
 #else
-  return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+  return (vector unsigned char)__builtin_altivec_vclrlb(
+      (vector unsigned char)__a, __n);
 #endif
 }
 
@@ -18406,18 +18500,22 @@
 static __inline__ vector signed char __ATTRS_o_ai
 vec_clr_last(vector signed char __a, unsigned int __n) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrlb(__a, __n);
+  return (vector signed char)__builtin_altivec_vclrlb((vector unsigned char)__a,
+                                                      __n);
 #else
-  return __builtin_altivec_vclrrb( __a, __n);
+  return (vector signed char)__builtin_altivec_vclrrb((vector unsigned char)__a,
+                                                      __n);
 #endif
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_clr_last(vector unsigned char __a, unsigned int __n) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+  return (vector unsigned char)__builtin_altivec_vclrlb(
+      (vector unsigned char)__a, __n);
 #else
-  return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+  return (vector unsigned char)__builtin_altivec_vclrrb(
+      (vector unsigned char)__a, __n);
 #endif
 }
 
@@ -18469,13 +18567,75 @@
 }
 #endif
 
-/* vec_sldbi */
+/* vec_sldb */
+#define vec_sldb(__a, __b, __c)                                                \
+  _Generic(                                                                    \
+      (__a), vector unsigned char                                              \
+      : (vector unsigned char)__builtin_altivec_vsldbi(                        \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed char                                                     \
+      : (vector signed char)__builtin_altivec_vsldbi(                          \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector unsigned short                                                  \
+      : (vector unsigned short)__builtin_altivec_vsldbi(                       \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed short                                                    \
+      : (vector signed short)__builtin_altivec_vsldbi(                         \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector unsigned int                                                    \
+      : (vector unsigned int)__builtin_altivec_vsldbi(                         \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed int                                                      \
+      : (vector signed int)__builtin_altivec_vsldbi((vector unsigned char)__a, \
+                                                    (vector unsigned char)__b, \
+                                                    (__c & 0x7)),              \
+        vector unsigned long long                                              \
+      : (vector unsigned long long)__builtin_altivec_vsldbi(                   \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed long long                                                \
+      : (vector signed long long)__builtin_altivec_vsldbi(                     \
+          (vector unsigned char)__a, (vector unsigned char)__b, (__c & 0x7)))
 
-#define vec_sldb(__a, __b, __c) __builtin_altivec_vsldbi(__a, __b, (__c & 0x7))
-
-/* vec_srdbi */
-
-#define vec_srdb(__a, __b, __c) __builtin_altivec_vsrdbi(__a, __b, (__c & 0x7))
+/* vec_srdb */
+#define vec_srdb(__a, __b, __c)                                                \
+  _Generic(                                                                    \
+      (__a), vector unsigned char                                              \
+      : (vector unsigned char)__builtin_altivec_vsrdbi(                        \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed char                                                     \
+      : (vector signed char)__builtin_altivec_vsrdbi(                          \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector unsigned short                                                  \
+      : (vector unsigned short)__builtin_altivec_vsrdbi(                       \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed short                                                    \
+      : (vector signed short)__builtin_altivec_vsrdbi(                         \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector unsigned int                                                    \
+      : (vector unsigned int)__builtin_altivec_vsrdbi(                         \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed int                                                      \
+      : (vector signed int)__builtin_altivec_vsrdbi((vector unsigned char)__a, \
+                                                    (vector unsigned char)__b, \
+                                                    (__c & 0x7)),              \
+        vector unsigned long long                                              \
+      : (vector unsigned long long)__builtin_altivec_vsrdbi(                   \
+            (vector unsigned char)__a, (vector unsigned char)__b,              \
+            (__c & 0x7)),                                                      \
+        vector signed long long                                                \
+      : (vector signed long long)__builtin_altivec_vsrdbi(                     \
+          (vector unsigned char)__a, (vector unsigned char)__b, (__c & 0x7)))
 
 /* vec_insertl */
 
@@ -18704,16 +18864,46 @@
 #ifdef __VSX__
 
 /* vec_permx */
-
 #define vec_permx(__a, __b, __c, __d)                                          \
-  __builtin_vsx_xxpermx((__a), (__b), (__c), (__d))
+  _Generic(                                                                    \
+      (__a), vector unsigned char                                              \
+      : (vector unsigned char)__builtin_vsx_xxpermx(                           \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector signed char                                                     \
+      : (vector signed char)__builtin_vsx_xxpermx(                             \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector unsigned short                                                  \
+      : (vector unsigned short)__builtin_vsx_xxpermx(                          \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector signed short                                                    \
+      : (vector signed short)__builtin_vsx_xxpermx(                            \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector unsigned int                                                    \
+      : (vector unsigned int)__builtin_vsx_xxpermx(                            \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector signed int                                                      \
+      : (vector signed int)__builtin_vsx_xxpermx(                              \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector unsigned long long                                              \
+      : (vector unsigned long long)__builtin_vsx_xxpermx(                      \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector signed long long                                                \
+      : (vector signed long long)__builtin_vsx_xxpermx(                        \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector float                                                           \
+      : (vector float)__builtin_vsx_xxpermx(                                   \
+            (vector unsigned char)__a, (vector unsigned char)__b, __c, __d),   \
+        vector double                                                          \
+      : (vector double)__builtin_vsx_xxpermx(                                  \
+          (vector unsigned char)__a, (vector unsigned char)__b, __c, __d))
 
 /* vec_blendv */
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_blendv(vector signed char __a, vector signed char __b,
            vector unsigned char __c) {
-  return __builtin_vsx_xxblendvb(__a, __b, __c);
+  return (vector signed char)__builtin_vsx_xxblendvb(
+      (vector unsigned char)__a, (vector unsigned char)__b, __c);
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
@@ -18725,7 +18915,8 @@
 static __inline__ vector signed short __ATTRS_o_ai
 vec_blendv(vector signed short __a, vector signed short __b,
            vector unsigned short __c) {
-  return __builtin_vsx_xxblendvh(__a, __b, __c);
+  return (vector signed short)__builtin_vsx_xxblendvh(
+      (vector unsigned short)__a, (vector unsigned short)__b, __c);
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
@@ -18737,7 +18928,8 @@
 static __inline__ vector signed int __ATTRS_o_ai
 vec_blendv(vector signed int __a, vector signed int __b,
            vector unsigned int __c) {
-  return __builtin_vsx_xxblendvw(__a, __b, __c);
+  return (vector signed int)__builtin_vsx_xxblendvw(
+      (vector unsigned int)__a, (vector unsigned int)__b, __c);
 }
 
 static __inline__ vector unsigned int __ATTRS_o_ai
@@ -18749,33 +18941,68 @@
 static __inline__ vector signed long long __ATTRS_o_ai
 vec_blendv(vector signed long long __a, vector signed long long __b,
            vector unsigned long long __c) {
-  return __builtin_vsx_xxblendvd(__a, __b, __c);
+  return (vector signed long long)__builtin_vsx_xxblendvd(
+      (vector unsigned long long)__a, (vector unsigned long long)__b, __c);
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_blendv(vector unsigned long long __a, vector unsigned long long __b,
            vector unsigned long long __c) {
-  return __builtin_vsx_xxblendvd(__a, __b, __c);
+  return (vector unsigned long long)__builtin_vsx_xxblendvd(__a, __b, __c);
 }
 
 static __inline__ vector float __ATTRS_o_ai
 vec_blendv(vector float __a, vector float __b, vector unsigned int __c) {
-  return __builtin_vsx_xxblendvw(__a, __b, __c);
+  return (vector float)__builtin_vsx_xxblendvw((vector unsigned int)__a,
+                                               (vector unsigned int)__b, __c);
 }
 
 static __inline__ vector double __ATTRS_o_ai
 vec_blendv(vector double __a, vector double __b,
            vector unsigned long long __c) {
-  return __builtin_vsx_xxblendvd(__a, __b, __c);
+  return (vector double)__builtin_vsx_xxblendvd(
+      (vector unsigned long long)__a, (vector unsigned long long)__b, __c);
 }
 
-/* vec_replace_elt */
+#define vec_replace_unaligned(__a, __b, __c)                                   \
+  _Generic((__a), vector signed int                                            \
+           : __builtin_altivec_vinsw((vector unsigned char)__a,                \
+                                     (unsigned int)__b, __c),                  \
+             vector unsigned int                                               \
+           : __builtin_altivec_vinsw((vector unsigned char)__a,                \
+                                     (unsigned int)__b, __c),                  \
+             vector unsigned long long                                         \
+           : __builtin_altivec_vinsd((vector unsigned char)__a,                \
+                                     (unsigned long long)__b, __c),            \
+             vector signed long long                                           \
+           : __builtin_altivec_vinsd((vector unsigned char)__a,                \
+                                     (unsigned long long)__b, __c),            \
+             vector float                                                      \
+           : __builtin_altivec_vinsw((vector unsigned char)__a,                \
+                                     (unsigned int)__b, __c),                  \
+             vector double                                                     \
+           : __builtin_altivec_vinsd((vector unsigned char)__a,                \
+                                     (unsigned long long)__b, __c))
 
-#define vec_replace_elt __builtin_altivec_vec_replace_elt
-
-/* vec_replace_unaligned */
-
-#define vec_replace_unaligned __builtin_altivec_vec_replace_unaligned
+#define vec_replace_elt(__a, __b, __c)                                         \
+  _Generic((__a), vector signed int                                            \
+           : (vector signed int)__builtin_altivec_vinsw_elt(                   \
+                 (vector unsigned char)__a, (unsigned int)__b, __c),           \
+             vector unsigned int                                               \
+           : (vector unsigned int)__builtin_altivec_vinsw_elt(                 \
+                 (vector unsigned char)__a, (unsigned int)__b, __c),           \
+             vector unsigned long long                                         \
+           : (vector unsigned long long)__builtin_altivec_vinsd_elt(           \
+                 (vector unsigned char)__a, (unsigned long long)__b, __c),     \
+             vector signed long long                                           \
+           : (vector signed long long)__builtin_altivec_vinsd_elt(             \
+                 (vector unsigned char)__a, (unsigned long long)__b, __c),     \
+             vector float                                                      \
+           : (vector float)__builtin_altivec_vinsw_elt(                        \
+                 (vector unsigned char)__a, (unsigned int)__b, __c),           \
+             vector double                                                     \
+           : (vector double)__builtin_altivec_vinsd_elt(                       \
+               (vector unsigned char)__a, (unsigned long long)__b, __c))
 
 /* vec_splati */
 
@@ -18852,27 +19079,33 @@
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_stril(vector unsigned char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribr((vector signed char)__a);
+  return (vector unsigned char)__builtin_altivec_vstribr(
+      (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribl((vector signed char)__a);
+  return (vector unsigned char)__builtin_altivec_vstribl(
+      (vector unsigned char)__a);
 #endif
 }
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_stril(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribr(__a);
+  return (vector signed char)__builtin_altivec_vstribr(
+      (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribl(__a);
+  return (vector signed char)__builtin_altivec_vstribl(
+      (vector unsigned char)__a);
 #endif
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_stril(vector unsigned short __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstrihr((vector signed short)__a);
+  return (vector unsigned short)__builtin_altivec_vstrihr(
+      (vector signed short)__a);
 #else
-  return __builtin_altivec_vstrihl((vector signed short)__a);
+  return (vector unsigned short)__builtin_altivec_vstrihl(
+      (vector signed short)__a);
 #endif
 }
 
@@ -18889,17 +19122,17 @@
 
 static __inline__ int __ATTRS_o_ai vec_stril_p(vector unsigned char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
+  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
+  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
 #endif
 }
 
 static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
+  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
+  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
 #endif
 }
 
@@ -18924,27 +19157,33 @@
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_strir(vector unsigned char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribl((vector signed char)__a);
+  return (vector unsigned char)__builtin_altivec_vstribl(
+      (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribr((vector signed char)__a);
+  return (vector unsigned char)__builtin_altivec_vstribr(
+      (vector unsigned char)__a);
 #endif
 }
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_strir(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribl(__a);
+  return (vector signed char)__builtin_altivec_vstribl(
+      (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribr(__a);
+  return (vector signed char)__builtin_altivec_vstribr(
+      (vector unsigned char)__a);
 #endif
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_strir(vector unsigned short __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstrihl((vector signed short)__a);
+  return (vector unsigned short)__builtin_altivec_vstrihl(
+      (vector signed short)__a);
 #else
-  return __builtin_altivec_vstrihr((vector signed short)__a);
+  return (vector unsigned short)__builtin_altivec_vstrihr(
+      (vector signed short)__a);
 #endif
 }
 
@@ -18961,17 +19200,17 @@
 
 static __inline__ int __ATTRS_o_ai vec_strir_p(vector unsigned char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
+  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
+  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
 #endif
 }
 
 static __inline__ int __ATTRS_o_ai vec_strir_p(vector signed char __a) {
 #ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
+  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
 #else
-  return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
+  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
 #endif
 }
 
diff --git a/linux-x86/lib64/clang/14.0.2/include/ammintrin.h b/linux-x86/lib64/clang/16.0.2/include/ammintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/ammintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/ammintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/amxintrin.h b/linux-x86/lib64/clang/16.0.2/include/amxintrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/amxintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/amxintrin.h
index 4940666..ec67a87 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/amxintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/amxintrin.h
@@ -439,8 +439,6 @@
 ///
 /// This intrinsic corresponds to the <c> TILESTORED </c> instruction.
 ///
-/// \param dst
-///    A destination tile. Max size is 1024 Bytes.
 /// \param base
 ///    A pointer to base address.
 /// \param stride
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm64intr.h b/linux-x86/lib64/clang/16.0.2/include/arm64intr.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/arm64intr.h
rename to linux-x86/lib64/clang/16.0.2/include/arm64intr.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_acle.h b/linux-x86/lib64/clang/16.0.2/include/arm_acle.h
similarity index 90%
copy from darwin-x86/lib64/clang/14.0.2/include/arm_acle.h
copy to linux-x86/lib64/clang/16.0.2/include/arm_acle.h
index 45fac24..ed3fc1d 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/arm_acle.h
+++ b/linux-x86/lib64/clang/16.0.2/include/arm_acle.h
@@ -64,7 +64,7 @@
 }
 #endif
 
-#if __ARM_32BIT_STATE
+#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
 #define __dbg(t) __builtin_arm_dbg(t)
 #endif
 
@@ -82,7 +82,7 @@
 /* 8.6.1 Data prefetch */
 #define __pld(addr) __pldx(0, 0, 0, addr)
 
-#if __ARM_32BIT_STATE
+#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
 #define __pldx(access_kind, cache_level, retention_policy, addr) \
   __builtin_arm_prefetch(addr, access_kind, 1)
 #else
@@ -93,7 +93,7 @@
 /* 8.6.2 Instruction prefetch */
 #define __pli(addr) __plix(0, 0, addr)
 
-#if __ARM_32BIT_STATE
+#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
 #define __plix(cache_level, retention_policy, addr) \
   __builtin_arm_prefetch(addr, 0, 0)
 #else
@@ -140,17 +140,17 @@
 /* CLZ */
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
 __clz(uint32_t __t) {
-  return __builtin_clz(__t);
+  return (uint32_t)__builtin_clz(__t);
 }
 
 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
 __clzl(unsigned long __t) {
-  return __builtin_clzl(__t);
+  return (unsigned long)__builtin_clzl(__t);
 }
 
 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
 __clzll(uint64_t __t) {
-  return __builtin_clzll(__t);
+  return (uint64_t)__builtin_clzll(__t);
 }
 
 /* CLS */
@@ -201,7 +201,7 @@
 
 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
 __rev16ll(uint64_t __t) {
-  return (((uint64_t)__rev16(__t >> 32)) << 32) | __rev16(__t);
+  return (((uint64_t)__rev16(__t >> 32)) << 32) | (uint64_t)__rev16((uint32_t)__t);
 }
 
 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
@@ -216,7 +216,7 @@
 /* REVSH */
 static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))
 __revsh(int16_t __t) {
-  return __builtin_bswap16(__t);
+  return (int16_t)__builtin_bswap16((uint16_t)__t);
 }
 
 /* RBIT */
@@ -227,7 +227,7 @@
 
 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
 __rbitll(uint64_t __t) {
-#if __ARM_32BIT_STATE
+#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
   return (((uint64_t)__builtin_arm_rbit(__t)) << 32) |
          __builtin_arm_rbit(__t >> 32);
 #else
@@ -247,7 +247,7 @@
 /*
  * 9.3 16-bit multiplications
  */
-#if __ARM_FEATURE_DSP
+#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
 static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
 __smulbb(int32_t __a, int32_t __b) {
   return __builtin_arm_smulbb(__a, __b);
@@ -277,17 +277,17 @@
 /*
  * 9.4 Saturating intrinsics
  *
- * FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag
+ * FIXME: Change guard to their corresponding __ARM_FEATURE flag when Q flag
  * intrinsics are implemented and the flag is enabled.
  */
 /* 9.4.1 Width-specified saturation intrinsics */
-#if __ARM_FEATURE_SAT
+#if defined(__ARM_FEATURE_SAT) && __ARM_FEATURE_SAT
 #define __ssat(x, y) __builtin_arm_ssat(x, y)
 #define __usat(x, y) __builtin_arm_usat(x, y)
 #endif
 
 /* 9.4.2 Saturating addition and subtraction intrinsics */
-#if __ARM_FEATURE_DSP
+#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
 __qadd(int32_t __t, int32_t __v) {
   return __builtin_arm_qadd(__t, __v);
@@ -305,7 +305,7 @@
 #endif
 
 /* 9.4.3 Accumultating multiplications */
-#if __ARM_FEATURE_DSP
+#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
 __smlabb(int32_t __a, int32_t __b, int32_t __c) {
   return __builtin_arm_smlabb(__a, __b, __c);
@@ -334,13 +334,13 @@
 
 
 /* 9.5.4 Parallel 16-bit saturation */
-#if __ARM_FEATURE_SIMD32
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
 #define __ssat16(x, y) __builtin_arm_ssat16(x, y)
 #define __usat16(x, y) __builtin_arm_usat16(x, y)
 #endif
 
 /* 9.5.5 Packing and unpacking */
-#if __ARM_FEATURE_SIMD32
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
 typedef int32_t int8x4_t;
 typedef int32_t int16x2_t;
 typedef uint32_t uint8x4_t;
@@ -365,7 +365,7 @@
 #endif
 
 /* 9.5.6 Parallel selection */
-#if __ARM_FEATURE_SIMD32
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
 __sel(uint8x4_t __a, uint8x4_t __b) {
   return __builtin_arm_sel(__a, __b);
@@ -373,7 +373,7 @@
 #endif
 
 /* 9.5.7 Parallel 8-bit addition and subtraction */
-#if __ARM_FEATURE_SIMD32
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
 __qadd8(int8x4_t __a, int8x4_t __b) {
   return __builtin_arm_qadd8(__a, __b);
@@ -425,7 +425,7 @@
 #endif
 
 /* 9.5.8 Sum of 8-bit absolute differences */
-#if __ARM_FEATURE_SIMD32
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
 __usad8(uint8x4_t __a, uint8x4_t __b) {
   return __builtin_arm_usad8(__a, __b);
@@ -437,7 +437,7 @@
 #endif
 
 /* 9.5.9 Parallel 16-bit addition and subtraction */
-#if __ARM_FEATURE_SIMD32
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
 __qadd16(int16x2_t __a, int16x2_t __b) {
   return __builtin_arm_qadd16(__a, __b);
@@ -537,7 +537,7 @@
 #endif
 
 /* 9.5.10 Parallel 16-bit multiplications */
-#if __ARM_FEATURE_SIMD32
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
 __smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {
   return __builtin_arm_smlad(__a, __b, __c);
@@ -589,7 +589,7 @@
 #endif
 
 /* 9.7 CRC32 intrinsics */
-#if __ARM_FEATURE_CRC32
+#if defined(__ARM_FEATURE_CRC32) && __ARM_FEATURE_CRC32
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
 __crc32b(uint32_t __a, uint8_t __b) {
   return __builtin_arm_crc32b(__a, __b);
@@ -632,7 +632,7 @@
 #endif
 
 /* Armv8.3-A Javascript conversion intrinsic */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_JCVT)
+#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE && defined(__ARM_FEATURE_JCVT)
 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
 __jcvt(double __a) {
   return __builtin_arm_jcvt(__a);
@@ -640,50 +640,50 @@
 #endif
 
 /* Armv8.5-A FP rounding intrinsics */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_FRINT)
+#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE && defined(__ARM_FEATURE_FRINT)
 static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint32zf(float __a) {
-  return __builtin_arm_frint32zf(__a);
+__rint32zf(float __a) {
+  return __builtin_arm_rint32zf(__a);
 }
 
 static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint32z(double __a) {
-  return __builtin_arm_frint32z(__a);
+__rint32z(double __a) {
+  return __builtin_arm_rint32z(__a);
 }
 
 static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint64zf(float __a) {
-  return __builtin_arm_frint64zf(__a);
+__rint64zf(float __a) {
+  return __builtin_arm_rint64zf(__a);
 }
 
 static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint64z(double __a) {
-  return __builtin_arm_frint64z(__a);
+__rint64z(double __a) {
+  return __builtin_arm_rint64z(__a);
 }
 
 static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint32xf(float __a) {
-  return __builtin_arm_frint32xf(__a);
+__rint32xf(float __a) {
+  return __builtin_arm_rint32xf(__a);
 }
 
 static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint32x(double __a) {
-  return __builtin_arm_frint32x(__a);
+__rint32x(double __a) {
+  return __builtin_arm_rint32x(__a);
 }
 
 static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint64xf(float __a) {
-  return __builtin_arm_frint64xf(__a);
+__rint64xf(float __a) {
+  return __builtin_arm_rint64xf(__a);
 }
 
 static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint64x(double __a) {
-  return __builtin_arm_frint64x(__a);
+__rint64x(double __a) {
+  return __builtin_arm_rint64x(__a);
 }
 #endif
 
 /* Armv8.7-A load/store 64-byte intrinsics */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_LS64)
+#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE && defined(__ARM_FEATURE_LS64)
 typedef struct {
     uint64_t val[8];
 } data512_t;
@@ -721,7 +721,7 @@
 #define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v))
 
 /* Memory Tagging Extensions (MTE) Intrinsics */
-#if __ARM_FEATURE_MEMORY_TAGGING
+#if defined(__ARM_FEATURE_MEMORY_TAGGING) && __ARM_FEATURE_MEMORY_TAGGING
 #define __arm_mte_create_random_tag(__ptr, __mask)  __builtin_arm_irg(__ptr, __mask)
 #define __arm_mte_increment_tag(__ptr, __tag_offset)  __builtin_arm_addg(__ptr, __tag_offset)
 #define __arm_mte_exclude_tag(__ptr, __excluded)  __builtin_arm_gmi(__ptr, __excluded)
@@ -730,8 +730,14 @@
 #define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb)
 #endif
 
+/* Memory Operations Intrinsics */
+#if defined(__ARM_FEATURE_MOPS) && __ARM_FEATURE_MOPS && defined(__ARM_FEATURE_MEMORY_TAGGING) && __ARM_FEATURE_MEMORY_TAGGING
+#define __arm_mops_memset_tag(__tagged_address, __value, __size)    \
+  __builtin_arm_mops_memset_tag(__tagged_address, __value, __size)
+#endif
+
 /* Transactional Memory Extension (TME) Intrinsics */
-#if __ARM_FEATURE_TME
+#if defined(__ARM_FEATURE_TME) && __ARM_FEATURE_TME
 
 #define _TMFAILURE_REASON  0x00007fffu
 #define _TMFAILURE_RTRY    0x00008000u
@@ -753,7 +759,7 @@
 #endif /* __ARM_FEATURE_TME */
 
 /* Armv8.5-A Random number generation intrinsics */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_RNG)
+#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE && defined(__ARM_FEATURE_RNG)
 static __inline__ int __attribute__((__always_inline__, __nodebug__))
 __rndr(uint64_t *__p) {
   return __builtin_arm_rndr(__p);
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_bf16.h b/linux-x86/lib64/clang/16.0.2/include/arm_bf16.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/arm_bf16.h
rename to linux-x86/lib64/clang/16.0.2/include/arm_bf16.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_cde.h b/linux-x86/lib64/clang/16.0.2/include/arm_cde.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/arm_cde.h
rename to linux-x86/lib64/clang/16.0.2/include/arm_cde.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_cmse.h b/linux-x86/lib64/clang/16.0.2/include/arm_cmse.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/arm_cmse.h
rename to linux-x86/lib64/clang/16.0.2/include/arm_cmse.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_fp16.h b/linux-x86/lib64/clang/16.0.2/include/arm_fp16.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/arm_fp16.h
copy to linux-x86/lib64/clang/16.0.2/include/arm_fp16.h
index ce993ce..6e8470f 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/arm_fp16.h
+++ b/linux-x86/lib64/clang/16.0.2/include/arm_fp16.h
@@ -31,561 +31,561 @@
 
 #if defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) && defined(__aarch64__)
 #define vabdh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vabdh_f16(__s0, __s1); \
   __ret; \
 })
 #define vabsh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vabsh_f16(__s0); \
   __ret; \
 })
 #define vaddh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vaddh_f16(__s0, __s1); \
   __ret; \
 })
 #define vcageh_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vcageh_f16(__s0, __s1); \
   __ret; \
 })
 #define vcagth_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vcagth_f16(__s0, __s1); \
   __ret; \
 })
 #define vcaleh_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vcaleh_f16(__s0, __s1); \
   __ret; \
 })
 #define vcalth_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vcalth_f16(__s0, __s1); \
   __ret; \
 })
 #define vceqh_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vceqh_f16(__s0, __s1); \
   __ret; \
 })
 #define vceqzh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vceqzh_f16(__s0); \
   __ret; \
 })
 #define vcgeh_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vcgeh_f16(__s0, __s1); \
   __ret; \
 })
 #define vcgezh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcgezh_f16(__s0); \
   __ret; \
 })
 #define vcgth_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vcgth_f16(__s0, __s1); \
   __ret; \
 })
 #define vcgtzh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcgtzh_f16(__s0); \
   __ret; \
 })
 #define vcleh_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vcleh_f16(__s0, __s1); \
   __ret; \
 })
 #define vclezh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vclezh_f16(__s0); \
   __ret; \
 })
 #define vclth_f16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vclth_f16(__s0, __s1); \
   __ret; \
 })
 #define vcltzh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcltzh_f16(__s0); \
   __ret; \
 })
 #define vcvth_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vcvth_n_s16_f16(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_s32_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vcvth_n_s32_f16(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_s64_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vcvth_n_s64_f16(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcvth_n_u16_f16(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_u32_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vcvth_n_u32_f16(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_u64_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vcvth_n_u64_f16(__s0, __p1); \
   __ret; \
 })
 #define vcvth_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vcvth_s16_f16(__s0); \
   __ret; \
 })
 #define vcvth_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vcvth_s32_f16(__s0); \
   __ret; \
 })
 #define vcvth_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vcvth_s64_f16(__s0); \
   __ret; \
 })
 #define vcvth_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcvth_u16_f16(__s0); \
   __ret; \
 })
 #define vcvth_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vcvth_u32_f16(__s0); \
   __ret; \
 })
 #define vcvth_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vcvth_u64_f16(__s0); \
   __ret; \
 })
 #define vcvtah_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vcvtah_s16_f16(__s0); \
   __ret; \
 })
 #define vcvtah_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vcvtah_s32_f16(__s0); \
   __ret; \
 })
 #define vcvtah_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vcvtah_s64_f16(__s0); \
   __ret; \
 })
 #define vcvtah_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcvtah_u16_f16(__s0); \
   __ret; \
 })
 #define vcvtah_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vcvtah_u32_f16(__s0); \
   __ret; \
 })
 #define vcvtah_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vcvtah_u64_f16(__s0); \
   __ret; \
 })
 #define vcvth_f16_u16(__p0) __extension__ ({ \
-  uint16_t __s0 = __p0; \
   float16_t __ret; \
+  uint16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_f16_u16(__s0); \
   __ret; \
 })
 #define vcvth_f16_s16(__p0) __extension__ ({ \
-  int16_t __s0 = __p0; \
   float16_t __ret; \
+  int16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__s0); \
   __ret; \
 })
 #define vcvth_f16_u32(__p0) __extension__ ({ \
-  uint32_t __s0 = __p0; \
   float16_t __ret; \
+  uint32_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__s0); \
   __ret; \
 })
 #define vcvth_f16_s32(__p0) __extension__ ({ \
-  int32_t __s0 = __p0; \
   float16_t __ret; \
+  int32_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_f16_s32(__s0); \
   __ret; \
 })
 #define vcvth_f16_u64(__p0) __extension__ ({ \
-  uint64_t __s0 = __p0; \
   float16_t __ret; \
+  uint64_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__s0); \
   __ret; \
 })
 #define vcvth_f16_s64(__p0) __extension__ ({ \
-  int64_t __s0 = __p0; \
   float16_t __ret; \
+  int64_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_f16_s64(__s0); \
   __ret; \
 })
 #define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
   float16_t __ret; \
+  uint32_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
   float16_t __ret; \
+  int32_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
   float16_t __ret; \
+  uint64_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
   float16_t __ret; \
+  int64_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
   float16_t __ret; \
+  uint16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \
   __ret; \
 })
 #define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
   float16_t __ret; \
+  int16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_s16(__s0, __p1); \
   __ret; \
 })
 #define vcvtmh_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vcvtmh_s16_f16(__s0); \
   __ret; \
 })
 #define vcvtmh_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vcvtmh_s32_f16(__s0); \
   __ret; \
 })
 #define vcvtmh_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vcvtmh_s64_f16(__s0); \
   __ret; \
 })
 #define vcvtmh_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcvtmh_u16_f16(__s0); \
   __ret; \
 })
 #define vcvtmh_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vcvtmh_u32_f16(__s0); \
   __ret; \
 })
 #define vcvtmh_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vcvtmh_u64_f16(__s0); \
   __ret; \
 })
 #define vcvtnh_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vcvtnh_s16_f16(__s0); \
   __ret; \
 })
 #define vcvtnh_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vcvtnh_s32_f16(__s0); \
   __ret; \
 })
 #define vcvtnh_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vcvtnh_s64_f16(__s0); \
   __ret; \
 })
 #define vcvtnh_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcvtnh_u16_f16(__s0); \
   __ret; \
 })
 #define vcvtnh_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vcvtnh_u32_f16(__s0); \
   __ret; \
 })
 #define vcvtnh_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vcvtnh_u64_f16(__s0); \
   __ret; \
 })
 #define vcvtph_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vcvtph_s16_f16(__s0); \
   __ret; \
 })
 #define vcvtph_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vcvtph_s32_f16(__s0); \
   __ret; \
 })
 #define vcvtph_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   int64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vcvtph_s64_f16(__s0); \
   __ret; \
 })
 #define vcvtph_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vcvtph_u16_f16(__s0); \
   __ret; \
 })
 #define vcvtph_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint32_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vcvtph_u32_f16(__s0); \
   __ret; \
 })
 #define vcvtph_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   uint64_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vcvtph_u64_f16(__s0); \
   __ret; \
 })
 #define vdivh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vdivh_f16(__s0, __s1); \
   __ret; \
 })
 #define vfmah_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
   float16_t __s2 = __p2; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vfmah_f16(__s0, __s1, __s2); \
   __ret; \
 })
 #define vfmsh_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
   float16_t __s2 = __p2; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vfmsh_f16(__s0, __s1, __s2); \
   __ret; \
 })
 #define vmaxh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vmaxh_f16(__s0, __s1); \
   __ret; \
 })
 #define vmaxnmh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vmaxnmh_f16(__s0, __s1); \
   __ret; \
 })
 #define vminh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vminh_f16(__s0, __s1); \
   __ret; \
 })
 #define vminnmh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vminnmh_f16(__s0, __s1); \
   __ret; \
 })
 #define vmulh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vmulh_f16(__s0, __s1); \
   __ret; \
 })
 #define vmulxh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vmulxh_f16(__s0, __s1); \
   __ret; \
 })
 #define vnegh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vnegh_f16(__s0); \
   __ret; \
 })
 #define vrecpeh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrecpeh_f16(__s0); \
   __ret; \
 })
 #define vrecpsh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vrecpsh_f16(__s0, __s1); \
   __ret; \
 })
 #define vrecpxh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrecpxh_f16(__s0); \
   __ret; \
 })
 #define vrndh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrndh_f16(__s0); \
   __ret; \
 })
 #define vrndah_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrndah_f16(__s0); \
   __ret; \
 })
 #define vrndih_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrndih_f16(__s0); \
   __ret; \
 })
 #define vrndmh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrndmh_f16(__s0); \
   __ret; \
 })
 #define vrndnh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrndnh_f16(__s0); \
   __ret; \
 })
 #define vrndph_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrndph_f16(__s0); \
   __ret; \
 })
 #define vrndxh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrndxh_f16(__s0); \
   __ret; \
 })
 #define vrsqrteh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vrsqrteh_f16(__s0); \
   __ret; \
 })
 #define vrsqrtsh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vrsqrtsh_f16(__s0, __s1); \
   __ret; \
 })
 #define vsqrth_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16_t) __builtin_neon_vsqrth_f16(__s0); \
   __ret; \
 })
 #define vsubh_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
   float16_t __s0 = __p0; \
   float16_t __s1 = __p1; \
-  float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vsubh_f16(__s0, __s1); \
   __ret; \
 })
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_mve.h b/linux-x86/lib64/clang/16.0.2/include/arm_mve.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/arm_mve.h
rename to linux-x86/lib64/clang/16.0.2/include/arm_mve.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_neon.h b/linux-x86/lib64/clang/16.0.2/include/arm_neon.h
similarity index 89%
copy from darwin-x86/lib64/clang/14.0.2/include/arm_neon.h
copy to linux-x86/lib64/clang/16.0.2/include/arm_neon.h
index 2448870..2991b12 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/arm_neon.h
+++ b/linux-x86/lib64/clang/16.0.2/include/arm_neon.h
@@ -462,53 +462,53 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
   poly8x8_t __ret; \
+  poly8x8_t __s0 = __p0; \
   __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \
   __ret; \
 })
 #else
 #define splat_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 4); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
   poly8x8_t __ret; \
+  poly8x8_t __s0 = __p0; \
   __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \
   __ret; \
 })
 #endif
 
 #define splat_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
   poly64x1_t __ret; \
+  poly64x1_t __s0 = __p0; \
   __ret = (poly64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 6); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
   poly16x4_t __ret; \
+  poly16x4_t __s0 = __p0; \
   __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \
   __ret; \
 })
 #else
 #define splat_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 5); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
   poly16x4_t __ret; \
+  poly16x4_t __s0 = __p0; \
   __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \
   __ret; \
 })
@@ -516,23 +516,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
   poly8x16_t __ret; \
+  poly8x8_t __s0 = __p0; \
   __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \
   __ret; \
 })
 #else
 #define splatq_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 4); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
   poly8x16_t __ret; \
+  poly8x8_t __s0 = __p0; \
   __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \
   __ret; \
 })
@@ -540,22 +540,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
   poly64x2_t __ret; \
+  poly64x1_t __s0 = __p0; \
   __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
   __ret; \
 })
 #else
 #define splatq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
   poly64x2_t __ret; \
+  poly64x1_t __s0 = __p0; \
   __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
   poly64x2_t __ret; \
+  poly64x1_t __s0 = __p0; \
   __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
   __ret; \
 })
@@ -563,23 +563,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
   poly16x8_t __ret; \
+  poly16x4_t __s0 = __p0; \
   __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \
   __ret; \
 })
 #else
 #define splatq_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 5); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
   poly16x8_t __ret; \
+  poly16x4_t __s0 = __p0; \
   __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \
   __ret; \
 })
@@ -587,23 +587,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define splatq_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -611,23 +611,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define splatq_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
@@ -635,22 +635,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
 #else
 #define splatq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
@@ -658,23 +658,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define splatq_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -682,23 +682,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define splatq_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
@@ -706,22 +706,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
   float64x2_t __ret; \
+  float64x1_t __s0 = __p0; \
   __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
   __ret; \
 })
 #else
 #define splatq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
   float64x2_t __ret; \
+  float64x1_t __s0 = __p0; \
   __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
   float64x2_t __ret; \
+  float64x1_t __s0 = __p0; \
   __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
   __ret; \
 })
@@ -729,23 +729,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   float32x4_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \
   __ret; \
 })
 #else
 #define splatq_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __ret; \
   float32x2_t __s0 = __p0; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 9); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   float32x4_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \
   __ret; \
 })
@@ -753,23 +753,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16x4_t __s0 = __p0; \
   __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \
   __ret; \
 })
 #else
 #define splatq_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
   float16x4_t __s0 = __p0; \
   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __ret; \
   __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 8); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16x4_t __s0 = __p0; \
   __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \
   __ret; \
 })
@@ -777,23 +777,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define splatq_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
@@ -801,22 +801,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
   __ret; \
 })
 #else
 #define splatq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
   __ret; \
 })
@@ -824,23 +824,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define splatq_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
@@ -848,23 +848,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define splat_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -872,53 +872,53 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define splat_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #endif
 
 #define splat_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x1_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define splat_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -926,53 +926,53 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define splat_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
 #endif
 
 #define splat_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
   float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
   __ret = (float64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 10); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \
   __ret; \
 })
 #else
 #define splat_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __ret; \
   float32x2_t __s0 = __p0; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 9); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \
   __ret; \
 })
@@ -980,23 +980,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
   __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \
   __ret; \
 })
 #else
 #define splat_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
   float16x4_t __s0 = __p0; \
   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
   __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 8); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
   __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \
   __ret; \
 })
@@ -1004,53 +1004,53 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define splat_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #endif
 
 #define splat_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x1_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define splat_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define splat_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
@@ -1058,23 +1058,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
   poly8x8_t __ret; \
+  poly8x16_t __s0 = __p0; \
   __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \
   __ret; \
 })
 #else
 #define splat_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 36); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
   poly8x8_t __ret; \
+  poly8x16_t __s0 = __p0; \
   __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \
   __ret; \
 })
@@ -1082,22 +1082,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
   poly64x1_t __ret; \
+  poly64x2_t __s0 = __p0; \
   __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \
   __ret; \
 })
 #else
 #define splat_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x1_t __ret; \
   poly64x2_t __s0 = __p0; \
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x1_t __ret; \
   __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 38); \
   __ret; \
 })
 #define __noswap_splat_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
   poly64x1_t __ret; \
+  poly64x2_t __s0 = __p0; \
   __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \
   __ret; \
 })
@@ -1105,23 +1105,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
   poly16x4_t __ret; \
+  poly16x8_t __s0 = __p0; \
   __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \
   __ret; \
 })
 #else
 #define splat_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 37); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
   poly16x4_t __ret; \
+  poly16x8_t __s0 = __p0; \
   __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \
   __ret; \
 })
@@ -1129,23 +1129,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
   poly8x16_t __ret; \
+  poly8x16_t __s0 = __p0; \
   __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \
   __ret; \
 })
 #else
 #define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 36); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
   poly8x16_t __ret; \
+  poly8x16_t __s0 = __p0; \
   __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \
   __ret; \
 })
@@ -1153,23 +1153,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
   poly64x2_t __ret; \
+  poly64x2_t __s0 = __p0; \
   __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \
   __ret; \
 })
 #else
 #define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64x2_t __s0 = __p0; \
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 38); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
   poly64x2_t __ret; \
+  poly64x2_t __s0 = __p0; \
   __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \
   __ret; \
 })
@@ -1177,23 +1177,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
   poly16x8_t __ret; \
+  poly16x8_t __s0 = __p0; \
   __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \
   __ret; \
 })
 #else
 #define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 37); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
   poly16x8_t __ret; \
+  poly16x8_t __s0 = __p0; \
   __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \
   __ret; \
 })
@@ -1201,23 +1201,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
 #else
 #define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
@@ -1225,23 +1225,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
@@ -1249,23 +1249,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
@@ -1273,23 +1273,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
@@ -1297,23 +1297,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
 #else
 #define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
@@ -1321,23 +1321,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
   float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
   __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \
   __ret; \
 })
 #else
 #define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x2_t __ret; \
   float64x2_t __s0 = __p0; \
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
   __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 42); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
   float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
   __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \
   __ret; \
 })
@@ -1345,23 +1345,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \
   __ret; \
 })
 #else
 #define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __ret; \
   float32x4_t __s0 = __p0; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 41); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \
   __ret; \
 })
@@ -1369,23 +1369,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
   __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \
   __ret; \
 })
 #else
 #define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
   float16x8_t __s0 = __p0; \
   float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
   __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 40); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
   __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \
   __ret; \
 })
@@ -1393,23 +1393,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
@@ -1417,23 +1417,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
 #else
 #define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
@@ -1441,23 +1441,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
 #else
 #define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splatq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
@@ -1465,23 +1465,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
 #else
 #define splat_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
@@ -1489,23 +1489,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define splat_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
@@ -1513,22 +1513,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x1_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define splat_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x1_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x1_t __ret; \
   __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 51); \
   __ret; \
 })
 #define __noswap_splat_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x1_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
@@ -1536,23 +1536,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define splat_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
@@ -1560,23 +1560,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
 #else
 #define splat_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
@@ -1584,22 +1584,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
   float64x1_t __ret; \
+  float64x2_t __s0 = __p0; \
   __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \
   __ret; \
 })
 #else
 #define splat_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x1_t __ret; \
   float64x2_t __s0 = __p0; \
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x1_t __ret; \
   __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 42); \
   __ret; \
 })
 #define __noswap_splat_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
   float64x1_t __ret; \
+  float64x2_t __s0 = __p0; \
   __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \
   __ret; \
 })
@@ -1607,23 +1607,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   float32x2_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \
   __ret; \
 })
 #else
 #define splat_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __ret; \
   float32x4_t __s0 = __p0; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 41); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   float32x2_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \
   __ret; \
 })
@@ -1631,23 +1631,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16x8_t __s0 = __p0; \
   __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \
   __ret; \
 })
 #else
 #define splat_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
   float16x8_t __s0 = __p0; \
   float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
   __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 40); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16x8_t __s0 = __p0; \
   __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \
   __ret; \
 })
@@ -1655,23 +1655,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define splat_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
@@ -1679,22 +1679,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x1_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
 #else
 #define splat_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x1_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x1_t __ret; \
   __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 35); \
   __ret; \
 })
 #define __noswap_splat_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x1_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
@@ -1702,23 +1702,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define splat_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
 #else
 #define splat_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_splat_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
@@ -1732,9 +1732,9 @@
 }
 #else
 __ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -1754,9 +1754,9 @@
 }
 #else
 __ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -1776,9 +1776,9 @@
 }
 #else
 __ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -1798,9 +1798,9 @@
 }
 #else
 __ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -1820,9 +1820,9 @@
 }
 #else
 __ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -1837,9 +1837,9 @@
 }
 #else
 __ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -1859,9 +1859,9 @@
 }
 #else
 __ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -1881,9 +1881,9 @@
 }
 #else
 __ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -1903,9 +1903,9 @@
 }
 #else
 __ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -1925,9 +1925,9 @@
 }
 #else
 __ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -1947,9 +1947,9 @@
 }
 #else
 __ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -1969,9 +1969,9 @@
 }
 #else
 __ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -1986,9 +1986,9 @@
 }
 #else
 __ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2008,9 +2008,9 @@
 }
 #else
 __ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2030,8 +2030,8 @@
 }
 #else
 __ai int8x16_t vabsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2046,8 +2046,8 @@
 }
 #else
 __ai float32x4_t vabsq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2062,8 +2062,8 @@
 }
 #else
 __ai int32x4_t vabsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2078,8 +2078,8 @@
 }
 #else
 __ai int16x8_t vabsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2094,8 +2094,8 @@
 }
 #else
 __ai int8x8_t vabs_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2110,8 +2110,8 @@
 }
 #else
 __ai float32x2_t vabs_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2126,8 +2126,8 @@
 }
 #else
 __ai int32x2_t vabs_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2142,8 +2142,8 @@
 }
 #else
 __ai int16x4_t vabs_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2158,9 +2158,9 @@
 }
 #else
 __ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2175,9 +2175,9 @@
 }
 #else
 __ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2192,9 +2192,9 @@
 }
 #else
 __ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2209,9 +2209,9 @@
 }
 #else
 __ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2226,9 +2226,9 @@
 }
 #else
 __ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2243,9 +2243,9 @@
 }
 #else
 __ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2260,9 +2260,9 @@
 }
 #else
 __ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2277,9 +2277,9 @@
 }
 #else
 __ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2294,9 +2294,9 @@
 }
 #else
 __ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2311,9 +2311,9 @@
 }
 #else
 __ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2328,9 +2328,9 @@
 }
 #else
 __ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2350,9 +2350,9 @@
 }
 #else
 __ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2367,9 +2367,9 @@
 }
 #else
 __ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2384,9 +2384,9 @@
 }
 #else
 __ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2401,9 +2401,9 @@
 }
 #else
 __ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2423,9 +2423,9 @@
 }
 #else
 __ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 + __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2440,9 +2440,9 @@
 }
 #else
 __ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2462,9 +2462,9 @@
 }
 #else
 __ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2479,9 +2479,9 @@
 }
 #else
 __ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2496,9 +2496,9 @@
 }
 #else
 __ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
   __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 38);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2513,9 +2513,9 @@
 }
 #else
 __ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 37);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2530,9 +2530,9 @@
 }
 #else
 __ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint16x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2552,9 +2552,9 @@
 }
 #else
 __ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint32x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2574,9 +2574,9 @@
 }
 #else
 __ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint8x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2596,9 +2596,9 @@
 }
 #else
 __ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+  int16x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2618,9 +2618,9 @@
 }
 #else
 __ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+  int32x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2640,9 +2640,9 @@
 }
 #else
 __ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+  int8x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2662,9 +2662,9 @@
 }
 #else
 __ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2679,9 +2679,9 @@
 }
 #else
 __ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2696,9 +2696,9 @@
 }
 #else
 __ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2713,9 +2713,9 @@
 }
 #else
 __ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2730,9 +2730,9 @@
 }
 #else
 __ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2747,9 +2747,9 @@
 }
 #else
 __ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2764,9 +2764,9 @@
 }
 #else
 __ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2781,9 +2781,9 @@
 }
 #else
 __ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2798,9 +2798,9 @@
 }
 #else
 __ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2815,9 +2815,9 @@
 }
 #else
 __ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2837,9 +2837,9 @@
 }
 #else
 __ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2854,9 +2854,9 @@
 }
 #else
 __ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2871,9 +2871,9 @@
 }
 #else
 __ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2893,9 +2893,9 @@
 }
 #else
 __ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 & __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2910,9 +2910,9 @@
 }
 #else
 __ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2927,9 +2927,9 @@
 }
 #else
 __ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -2944,9 +2944,9 @@
 }
 #else
 __ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -2961,9 +2961,9 @@
 }
 #else
 __ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2978,9 +2978,9 @@
 }
 #else
 __ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -2995,9 +2995,9 @@
 }
 #else
 __ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3012,9 +3012,9 @@
 }
 #else
 __ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3029,9 +3029,9 @@
 }
 #else
 __ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3046,9 +3046,9 @@
 }
 #else
 __ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3063,9 +3063,9 @@
 }
 #else
 __ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3085,9 +3085,9 @@
 }
 #else
 __ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3102,9 +3102,9 @@
 }
 #else
 __ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3119,9 +3119,9 @@
 }
 #else
 __ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3141,9 +3141,9 @@
 }
 #else
 __ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 & ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3158,10 +3158,10 @@
 }
 #else
 __ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
+  poly8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3176,10 +3176,10 @@
 }
 #else
 __ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
+  poly16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   poly16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3194,10 +3194,10 @@
 }
 #else
 __ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
+  poly8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3212,10 +3212,10 @@
 }
 #else
 __ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
+  poly16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3230,10 +3230,10 @@
 }
 #else
 __ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3248,10 +3248,10 @@
 }
 #else
 __ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3266,10 +3266,10 @@
 }
 #else
 __ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3284,10 +3284,10 @@
 }
 #else
 __ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3302,10 +3302,10 @@
 }
 #else
 __ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3320,10 +3320,10 @@
 }
 #else
 __ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3338,10 +3338,10 @@
 }
 #else
 __ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3356,10 +3356,10 @@
 }
 #else
 __ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3374,10 +3374,10 @@
 }
 #else
 __ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3392,10 +3392,10 @@
 }
 #else
 __ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3410,10 +3410,10 @@
 }
 #else
 __ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3433,10 +3433,10 @@
 }
 #else
 __ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3451,10 +3451,10 @@
 }
 #else
 __ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3469,10 +3469,10 @@
 }
 #else
 __ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3487,10 +3487,10 @@
 }
 #else
 __ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3510,10 +3510,10 @@
 }
 #else
 __ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3528,9 +3528,9 @@
 }
 #else
 __ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3545,9 +3545,9 @@
 }
 #else
 __ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3562,9 +3562,9 @@
 }
 #else
 __ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3579,9 +3579,9 @@
 }
 #else
 __ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3596,9 +3596,9 @@
 }
 #else
 __ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3613,9 +3613,9 @@
 }
 #else
 __ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3630,9 +3630,9 @@
 }
 #else
 __ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3647,9 +3647,9 @@
 }
 #else
 __ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3664,9 +3664,9 @@
 }
 #else
 __ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  uint8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3681,9 +3681,9 @@
 }
 #else
 __ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  uint8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3698,9 +3698,9 @@
 }
 #else
 __ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3715,9 +3715,9 @@
 }
 #else
 __ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3732,9 +3732,9 @@
 }
 #else
 __ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3749,9 +3749,9 @@
 }
 #else
 __ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3766,9 +3766,9 @@
 }
 #else
 __ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3783,9 +3783,9 @@
 }
 #else
 __ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3800,9 +3800,9 @@
 }
 #else
 __ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3817,9 +3817,9 @@
 }
 #else
 __ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3834,9 +3834,9 @@
 }
 #else
 __ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3851,9 +3851,9 @@
 }
 #else
 __ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3868,9 +3868,9 @@
 }
 #else
 __ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3885,9 +3885,9 @@
 }
 #else
 __ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3902,9 +3902,9 @@
 }
 #else
 __ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -3919,9 +3919,9 @@
 }
 #else
 __ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 == __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3936,9 +3936,9 @@
 }
 #else
 __ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3953,9 +3953,9 @@
 }
 #else
 __ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -3970,9 +3970,9 @@
 }
 #else
 __ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -3987,9 +3987,9 @@
 }
 #else
 __ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4004,9 +4004,9 @@
 }
 #else
 __ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4021,9 +4021,9 @@
 }
 #else
 __ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4038,9 +4038,9 @@
 }
 #else
 __ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4055,9 +4055,9 @@
 }
 #else
 __ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4072,9 +4072,9 @@
 }
 #else
 __ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4089,9 +4089,9 @@
 }
 #else
 __ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4106,9 +4106,9 @@
 }
 #else
 __ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4123,9 +4123,9 @@
 }
 #else
 __ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4140,9 +4140,9 @@
 }
 #else
 __ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4157,9 +4157,9 @@
 }
 #else
 __ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 >= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4174,9 +4174,9 @@
 }
 #else
 __ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4191,9 +4191,9 @@
 }
 #else
 __ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4208,9 +4208,9 @@
 }
 #else
 __ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4225,9 +4225,9 @@
 }
 #else
 __ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4242,9 +4242,9 @@
 }
 #else
 __ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4259,9 +4259,9 @@
 }
 #else
 __ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4276,9 +4276,9 @@
 }
 #else
 __ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4293,9 +4293,9 @@
 }
 #else
 __ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4310,9 +4310,9 @@
 }
 #else
 __ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4327,9 +4327,9 @@
 }
 #else
 __ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4344,9 +4344,9 @@
 }
 #else
 __ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4361,9 +4361,9 @@
 }
 #else
 __ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4378,9 +4378,9 @@
 }
 #else
 __ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4395,9 +4395,9 @@
 }
 #else
 __ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 > __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4412,9 +4412,9 @@
 }
 #else
 __ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4429,9 +4429,9 @@
 }
 #else
 __ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4446,9 +4446,9 @@
 }
 #else
 __ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4463,9 +4463,9 @@
 }
 #else
 __ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4480,9 +4480,9 @@
 }
 #else
 __ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4497,9 +4497,9 @@
 }
 #else
 __ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4514,9 +4514,9 @@
 }
 #else
 __ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4531,9 +4531,9 @@
 }
 #else
 __ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4548,9 +4548,9 @@
 }
 #else
 __ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4565,9 +4565,9 @@
 }
 #else
 __ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4582,9 +4582,9 @@
 }
 #else
 __ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4599,9 +4599,9 @@
 }
 #else
 __ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4616,9 +4616,9 @@
 }
 #else
 __ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4633,9 +4633,9 @@
 }
 #else
 __ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 <= __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4650,8 +4650,8 @@
 }
 #else
 __ai int8x16_t vclsq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4666,8 +4666,8 @@
 }
 #else
 __ai int32x4_t vclsq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4682,8 +4682,8 @@
 }
 #else
 __ai int16x8_t vclsq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4698,8 +4698,8 @@
 }
 #else
 __ai int8x16_t vclsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4714,8 +4714,8 @@
 }
 #else
 __ai int32x4_t vclsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4730,8 +4730,8 @@
 }
 #else
 __ai int16x8_t vclsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4746,8 +4746,8 @@
 }
 #else
 __ai int8x8_t vcls_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4762,8 +4762,8 @@
 }
 #else
 __ai int32x2_t vcls_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4778,8 +4778,8 @@
 }
 #else
 __ai int16x4_t vcls_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4794,8 +4794,8 @@
 }
 #else
 __ai int8x8_t vcls_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4810,8 +4810,8 @@
 }
 #else
 __ai int32x2_t vcls_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4826,8 +4826,8 @@
 }
 #else
 __ai int16x4_t vcls_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4842,9 +4842,9 @@
 }
 #else
 __ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4859,9 +4859,9 @@
 }
 #else
 __ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4876,9 +4876,9 @@
 }
 #else
 __ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4893,9 +4893,9 @@
 }
 #else
 __ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4910,9 +4910,9 @@
 }
 #else
 __ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
+  uint32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4927,9 +4927,9 @@
 }
 #else
 __ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -4944,9 +4944,9 @@
 }
 #else
 __ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4961,9 +4961,9 @@
 }
 #else
 __ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -4978,9 +4978,9 @@
 }
 #else
 __ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -4995,9 +4995,9 @@
 }
 #else
 __ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5012,9 +5012,9 @@
 }
 #else
 __ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5029,9 +5029,9 @@
 }
 #else
 __ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
+  uint32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5046,9 +5046,9 @@
 }
 #else
 __ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5063,9 +5063,9 @@
 }
 #else
 __ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t)(__rev0 < __rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5080,8 +5080,8 @@
 }
 #else
 __ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5096,8 +5096,8 @@
 }
 #else
 __ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5112,8 +5112,8 @@
 }
 #else
 __ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5128,8 +5128,8 @@
 }
 #else
 __ai int8x16_t vclzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5144,8 +5144,8 @@
 }
 #else
 __ai int32x4_t vclzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5160,8 +5160,8 @@
 }
 #else
 __ai int16x8_t vclzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5176,8 +5176,8 @@
 }
 #else
 __ai uint8x8_t vclz_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5192,8 +5192,8 @@
 }
 #else
 __ai uint32x2_t vclz_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5208,8 +5208,8 @@
 }
 #else
 __ai uint16x4_t vclz_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5224,8 +5224,8 @@
 }
 #else
 __ai int8x8_t vclz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5240,8 +5240,8 @@
 }
 #else
 __ai int32x2_t vclz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5256,8 +5256,8 @@
 }
 #else
 __ai int16x4_t vclz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5272,8 +5272,8 @@
 }
 #else
 __ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5288,8 +5288,8 @@
 }
 #else
 __ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5304,8 +5304,8 @@
 }
 #else
 __ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5320,8 +5320,8 @@
 }
 #else
 __ai int8x16_t vcntq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5336,8 +5336,8 @@
 }
 #else
 __ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5352,8 +5352,8 @@
 }
 #else
 __ai int8x8_t vcnt_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5368,9 +5368,9 @@
 }
 #else
 __ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x16_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5385,9 +5385,9 @@
 }
 #else
 __ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x8_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5402,9 +5402,9 @@
 }
 #else
 __ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x16_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5424,9 +5424,9 @@
 }
 #else
 __ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x4_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5461,9 +5461,9 @@
 }
 #else
 __ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x8_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5483,9 +5483,9 @@
 }
 #else
 __ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x16_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5505,9 +5505,9 @@
 }
 #else
 __ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x4_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5527,9 +5527,9 @@
 }
 #else
 __ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x8_t __ret;
   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5549,9 +5549,9 @@
 }
 #else
 __ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x4_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5586,9 +5586,9 @@
 }
 #else
 __ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x8_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -5680,8 +5680,8 @@
 }
 #else
 __ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5696,8 +5696,8 @@
 }
 #else
 __ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5712,8 +5712,8 @@
 }
 #else
 __ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5728,8 +5728,8 @@
 }
 #else
 __ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5738,16 +5738,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   float32x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
+  float32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -5756,16 +5756,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   float32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
+  float32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -5774,16 +5774,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   float32x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
+  float32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -5792,16 +5792,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   float32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
+  float32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -5810,16 +5810,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   float32x4_t __s0 = __p0; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -5828,16 +5828,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   float32x2_t __s0 = __p0; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -5846,16 +5846,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   float32x4_t __s0 = __p0; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -5864,16 +5864,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   float32x2_t __s0 = __p0; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -5888,8 +5888,8 @@
 }
 #else
 __ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5904,8 +5904,8 @@
 }
 #else
 __ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5920,8 +5920,8 @@
 }
 #else
 __ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -5936,8 +5936,8 @@
 }
 #else
 __ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -5946,16 +5946,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdup_lane_p8(__p0_0, __p1_0) __extension__ ({ \
-  poly8x8_t __s0_0 = __p0_0; \
   poly8x8_t __ret_0; \
+  poly8x8_t __s0_0 = __p0_0; \
   __ret_0 = splat_lane_p8(__s0_0, __p1_0); \
   __ret_0; \
 })
 #else
 #define vdup_lane_p8(__p0_1, __p1_1) __extension__ ({ \
+  poly8x8_t __ret_1; \
   poly8x8_t __s0_1 = __p0_1; \
   poly8x8_t __rev0_1;  __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_1; \
   __ret_1 = __noswap_splat_lane_p8(__rev0_1, __p1_1); \
   __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_1; \
@@ -5964,16 +5964,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdup_lane_p16(__p0_2, __p1_2) __extension__ ({ \
-  poly16x4_t __s0_2 = __p0_2; \
   poly16x4_t __ret_2; \
+  poly16x4_t __s0_2 = __p0_2; \
   __ret_2 = splat_lane_p16(__s0_2, __p1_2); \
   __ret_2; \
 })
 #else
 #define vdup_lane_p16(__p0_3, __p1_3) __extension__ ({ \
+  poly16x4_t __ret_3; \
   poly16x4_t __s0_3 = __p0_3; \
   poly16x4_t __rev0_3;  __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 3, 2, 1, 0); \
-  poly16x4_t __ret_3; \
   __ret_3 = __noswap_splat_lane_p16(__rev0_3, __p1_3); \
   __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 3, 2, 1, 0); \
   __ret_3; \
@@ -5982,16 +5982,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_p8(__p0_4, __p1_4) __extension__ ({ \
-  poly8x8_t __s0_4 = __p0_4; \
   poly8x16_t __ret_4; \
+  poly8x8_t __s0_4 = __p0_4; \
   __ret_4 = splatq_lane_p8(__s0_4, __p1_4); \
   __ret_4; \
 })
 #else
 #define vdupq_lane_p8(__p0_5, __p1_5) __extension__ ({ \
+  poly8x16_t __ret_5; \
   poly8x8_t __s0_5 = __p0_5; \
   poly8x8_t __rev0_5;  __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_5; \
   __ret_5 = __noswap_splatq_lane_p8(__rev0_5, __p1_5); \
   __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_5; \
@@ -6000,16 +6000,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_p16(__p0_6, __p1_6) __extension__ ({ \
-  poly16x4_t __s0_6 = __p0_6; \
   poly16x8_t __ret_6; \
+  poly16x4_t __s0_6 = __p0_6; \
   __ret_6 = splatq_lane_p16(__s0_6, __p1_6); \
   __ret_6; \
 })
 #else
 #define vdupq_lane_p16(__p0_7, __p1_7) __extension__ ({ \
+  poly16x8_t __ret_7; \
   poly16x4_t __s0_7 = __p0_7; \
   poly16x4_t __rev0_7;  __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 3, 2, 1, 0); \
-  poly16x8_t __ret_7; \
   __ret_7 = __noswap_splatq_lane_p16(__rev0_7, __p1_7); \
   __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_7; \
@@ -6018,16 +6018,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_u8(__p0_8, __p1_8) __extension__ ({ \
-  uint8x8_t __s0_8 = __p0_8; \
   uint8x16_t __ret_8; \
+  uint8x8_t __s0_8 = __p0_8; \
   __ret_8 = splatq_lane_u8(__s0_8, __p1_8); \
   __ret_8; \
 })
 #else
 #define vdupq_lane_u8(__p0_9, __p1_9) __extension__ ({ \
+  uint8x16_t __ret_9; \
   uint8x8_t __s0_9 = __p0_9; \
   uint8x8_t __rev0_9;  __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_9; \
   __ret_9 = __noswap_splatq_lane_u8(__rev0_9, __p1_9); \
   __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_9; \
@@ -6036,16 +6036,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_u32(__p0_10, __p1_10) __extension__ ({ \
-  uint32x2_t __s0_10 = __p0_10; \
   uint32x4_t __ret_10; \
+  uint32x2_t __s0_10 = __p0_10; \
   __ret_10 = splatq_lane_u32(__s0_10, __p1_10); \
   __ret_10; \
 })
 #else
 #define vdupq_lane_u32(__p0_11, __p1_11) __extension__ ({ \
+  uint32x4_t __ret_11; \
   uint32x2_t __s0_11 = __p0_11; \
   uint32x2_t __rev0_11;  __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 1, 0); \
-  uint32x4_t __ret_11; \
   __ret_11 = __noswap_splatq_lane_u32(__rev0_11, __p1_11); \
   __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \
   __ret_11; \
@@ -6054,15 +6054,15 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_u64(__p0_12, __p1_12) __extension__ ({ \
-  uint64x1_t __s0_12 = __p0_12; \
   uint64x2_t __ret_12; \
+  uint64x1_t __s0_12 = __p0_12; \
   __ret_12 = splatq_lane_u64(__s0_12, __p1_12); \
   __ret_12; \
 })
 #else
 #define vdupq_lane_u64(__p0_13, __p1_13) __extension__ ({ \
-  uint64x1_t __s0_13 = __p0_13; \
   uint64x2_t __ret_13; \
+  uint64x1_t __s0_13 = __p0_13; \
   __ret_13 = __noswap_splatq_lane_u64(__s0_13, __p1_13); \
   __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 1, 0); \
   __ret_13; \
@@ -6071,16 +6071,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_u16(__p0_14, __p1_14) __extension__ ({ \
-  uint16x4_t __s0_14 = __p0_14; \
   uint16x8_t __ret_14; \
+  uint16x4_t __s0_14 = __p0_14; \
   __ret_14 = splatq_lane_u16(__s0_14, __p1_14); \
   __ret_14; \
 })
 #else
 #define vdupq_lane_u16(__p0_15, __p1_15) __extension__ ({ \
+  uint16x8_t __ret_15; \
   uint16x4_t __s0_15 = __p0_15; \
   uint16x4_t __rev0_15;  __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \
-  uint16x8_t __ret_15; \
   __ret_15 = __noswap_splatq_lane_u16(__rev0_15, __p1_15); \
   __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_15; \
@@ -6089,16 +6089,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_s8(__p0_16, __p1_16) __extension__ ({ \
-  int8x8_t __s0_16 = __p0_16; \
   int8x16_t __ret_16; \
+  int8x8_t __s0_16 = __p0_16; \
   __ret_16 = splatq_lane_s8(__s0_16, __p1_16); \
   __ret_16; \
 })
 #else
 #define vdupq_lane_s8(__p0_17, __p1_17) __extension__ ({ \
+  int8x16_t __ret_17; \
   int8x8_t __s0_17 = __p0_17; \
   int8x8_t __rev0_17;  __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_17; \
   __ret_17 = __noswap_splatq_lane_s8(__rev0_17, __p1_17); \
   __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_17; \
@@ -6107,16 +6107,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_lane_f32(__p0_18, __p1_18) __extension__ ({ \
-  float32x2_t __s0_18 = __p0_18; \
   float32x4_t __ret_18; \
+  float32x2_t __s0_18 = __p0_18; \
   __ret_18 = splatq_lane_f32(__s0_18, __p1_18); \
   __ret_18; \
 })
 #else
 #define vdupq_lane_f32(__p0_19, __p1_19) __extension__ ({ \
+  float32x4_t __ret_19; \
   float32x2_t __s0_19 = __p0_19; \
   float32x2_t __rev0_19;  __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \
-  float32x4_t __ret_19; \
   __ret_19 = __noswap_splatq_lane_f32(__rev0_19, __p1_19); \
   __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \
   __ret_19; \
@@ -6124,194 +6124,230 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s32(__p0_20, __p1_20) __extension__ ({ \
-  int32x2_t __s0_20 = __p0_20; \
-  int32x4_t __ret_20; \
-  __ret_20 = splatq_lane_s32(__s0_20, __p1_20); \
+#define vdupq_lane_f16(__p0_20, __p1_20) __extension__ ({ \
+  float16x8_t __ret_20; \
+  float16x4_t __s0_20 = __p0_20; \
+  __ret_20 = splatq_lane_f16(__s0_20, __p1_20); \
   __ret_20; \
 })
 #else
-#define vdupq_lane_s32(__p0_21, __p1_21) __extension__ ({ \
-  int32x2_t __s0_21 = __p0_21; \
-  int32x2_t __rev0_21;  __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \
-  int32x4_t __ret_21; \
-  __ret_21 = __noswap_splatq_lane_s32(__rev0_21, __p1_21); \
-  __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 3, 2, 1, 0); \
+#define vdupq_lane_f16(__p0_21, __p1_21) __extension__ ({ \
+  float16x8_t __ret_21; \
+  float16x4_t __s0_21 = __p0_21; \
+  float16x4_t __rev0_21;  __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 3, 2, 1, 0); \
+  __ret_21 = __noswap_splatq_lane_f16(__rev0_21, __p1_21); \
+  __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_21; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s64(__p0_22, __p1_22) __extension__ ({ \
-  int64x1_t __s0_22 = __p0_22; \
-  int64x2_t __ret_22; \
-  __ret_22 = splatq_lane_s64(__s0_22, __p1_22); \
+#define vdupq_lane_s32(__p0_22, __p1_22) __extension__ ({ \
+  int32x4_t __ret_22; \
+  int32x2_t __s0_22 = __p0_22; \
+  __ret_22 = splatq_lane_s32(__s0_22, __p1_22); \
   __ret_22; \
 })
 #else
-#define vdupq_lane_s64(__p0_23, __p1_23) __extension__ ({ \
-  int64x1_t __s0_23 = __p0_23; \
-  int64x2_t __ret_23; \
-  __ret_23 = __noswap_splatq_lane_s64(__s0_23, __p1_23); \
-  __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 1, 0); \
+#define vdupq_lane_s32(__p0_23, __p1_23) __extension__ ({ \
+  int32x4_t __ret_23; \
+  int32x2_t __s0_23 = __p0_23; \
+  int32x2_t __rev0_23;  __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 1, 0); \
+  __ret_23 = __noswap_splatq_lane_s32(__rev0_23, __p1_23); \
+  __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 3, 2, 1, 0); \
   __ret_23; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s16(__p0_24, __p1_24) __extension__ ({ \
-  int16x4_t __s0_24 = __p0_24; \
-  int16x8_t __ret_24; \
-  __ret_24 = splatq_lane_s16(__s0_24, __p1_24); \
+#define vdupq_lane_s64(__p0_24, __p1_24) __extension__ ({ \
+  int64x2_t __ret_24; \
+  int64x1_t __s0_24 = __p0_24; \
+  __ret_24 = splatq_lane_s64(__s0_24, __p1_24); \
   __ret_24; \
 })
 #else
-#define vdupq_lane_s16(__p0_25, __p1_25) __extension__ ({ \
-  int16x4_t __s0_25 = __p0_25; \
-  int16x4_t __rev0_25;  __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 3, 2, 1, 0); \
-  int16x8_t __ret_25; \
-  __ret_25 = __noswap_splatq_lane_s16(__rev0_25, __p1_25); \
-  __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vdupq_lane_s64(__p0_25, __p1_25) __extension__ ({ \
+  int64x2_t __ret_25; \
+  int64x1_t __s0_25 = __p0_25; \
+  __ret_25 = __noswap_splatq_lane_s64(__s0_25, __p1_25); \
+  __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 1, 0); \
   __ret_25; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u8(__p0_26, __p1_26) __extension__ ({ \
-  uint8x8_t __s0_26 = __p0_26; \
-  uint8x8_t __ret_26; \
-  __ret_26 = splat_lane_u8(__s0_26, __p1_26); \
+#define vdupq_lane_s16(__p0_26, __p1_26) __extension__ ({ \
+  int16x8_t __ret_26; \
+  int16x4_t __s0_26 = __p0_26; \
+  __ret_26 = splatq_lane_s16(__s0_26, __p1_26); \
   __ret_26; \
 })
 #else
-#define vdup_lane_u8(__p0_27, __p1_27) __extension__ ({ \
-  uint8x8_t __s0_27 = __p0_27; \
-  uint8x8_t __rev0_27;  __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_27; \
-  __ret_27 = __noswap_splat_lane_u8(__rev0_27, __p1_27); \
+#define vdupq_lane_s16(__p0_27, __p1_27) __extension__ ({ \
+  int16x8_t __ret_27; \
+  int16x4_t __s0_27 = __p0_27; \
+  int16x4_t __rev0_27;  __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 3, 2, 1, 0); \
+  __ret_27 = __noswap_splatq_lane_s16(__rev0_27, __p1_27); \
   __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_27; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u32(__p0_28, __p1_28) __extension__ ({ \
-  uint32x2_t __s0_28 = __p0_28; \
-  uint32x2_t __ret_28; \
-  __ret_28 = splat_lane_u32(__s0_28, __p1_28); \
+#define vdup_lane_u8(__p0_28, __p1_28) __extension__ ({ \
+  uint8x8_t __ret_28; \
+  uint8x8_t __s0_28 = __p0_28; \
+  __ret_28 = splat_lane_u8(__s0_28, __p1_28); \
   __ret_28; \
 })
 #else
-#define vdup_lane_u32(__p0_29, __p1_29) __extension__ ({ \
-  uint32x2_t __s0_29 = __p0_29; \
-  uint32x2_t __rev0_29;  __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 1, 0); \
-  uint32x2_t __ret_29; \
-  __ret_29 = __noswap_splat_lane_u32(__rev0_29, __p1_29); \
-  __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 1, 0); \
+#define vdup_lane_u8(__p0_29, __p1_29) __extension__ ({ \
+  uint8x8_t __ret_29; \
+  uint8x8_t __s0_29 = __p0_29; \
+  uint8x8_t __rev0_29;  __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_29 = __noswap_splat_lane_u8(__rev0_29, __p1_29); \
+  __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_29; \
 })
 #endif
 
-#define vdup_lane_u64(__p0_30, __p1_30) __extension__ ({ \
-  uint64x1_t __s0_30 = __p0_30; \
-  uint64x1_t __ret_30; \
-  __ret_30 = splat_lane_u64(__s0_30, __p1_30); \
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_u32(__p0_30, __p1_30) __extension__ ({ \
+  uint32x2_t __ret_30; \
+  uint32x2_t __s0_30 = __p0_30; \
+  __ret_30 = splat_lane_u32(__s0_30, __p1_30); \
   __ret_30; \
 })
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u16(__p0_31, __p1_31) __extension__ ({ \
-  uint16x4_t __s0_31 = __p0_31; \
-  uint16x4_t __ret_31; \
-  __ret_31 = splat_lane_u16(__s0_31, __p1_31); \
-  __ret_31; \
-})
 #else
-#define vdup_lane_u16(__p0_32, __p1_32) __extension__ ({ \
-  uint16x4_t __s0_32 = __p0_32; \
-  uint16x4_t __rev0_32;  __rev0_32 = __builtin_shufflevector(__s0_32, __s0_32, 3, 2, 1, 0); \
-  uint16x4_t __ret_32; \
-  __ret_32 = __noswap_splat_lane_u16(__rev0_32, __p1_32); \
-  __ret_32 = __builtin_shufflevector(__ret_32, __ret_32, 3, 2, 1, 0); \
-  __ret_32; \
+#define vdup_lane_u32(__p0_31, __p1_31) __extension__ ({ \
+  uint32x2_t __ret_31; \
+  uint32x2_t __s0_31 = __p0_31; \
+  uint32x2_t __rev0_31;  __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 1, 0); \
+  __ret_31 = __noswap_splat_lane_u32(__rev0_31, __p1_31); \
+  __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 1, 0); \
+  __ret_31; \
 })
 #endif
 
+#define vdup_lane_u64(__p0_32, __p1_32) __extension__ ({ \
+  uint64x1_t __ret_32; \
+  uint64x1_t __s0_32 = __p0_32; \
+  __ret_32 = splat_lane_u64(__s0_32, __p1_32); \
+  __ret_32; \
+})
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s8(__p0_33, __p1_33) __extension__ ({ \
-  int8x8_t __s0_33 = __p0_33; \
-  int8x8_t __ret_33; \
-  __ret_33 = splat_lane_s8(__s0_33, __p1_33); \
+#define vdup_lane_u16(__p0_33, __p1_33) __extension__ ({ \
+  uint16x4_t __ret_33; \
+  uint16x4_t __s0_33 = __p0_33; \
+  __ret_33 = splat_lane_u16(__s0_33, __p1_33); \
   __ret_33; \
 })
 #else
-#define vdup_lane_s8(__p0_34, __p1_34) __extension__ ({ \
-  int8x8_t __s0_34 = __p0_34; \
-  int8x8_t __rev0_34;  __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_34; \
-  __ret_34 = __noswap_splat_lane_s8(__rev0_34, __p1_34); \
-  __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vdup_lane_u16(__p0_34, __p1_34) __extension__ ({ \
+  uint16x4_t __ret_34; \
+  uint16x4_t __s0_34 = __p0_34; \
+  uint16x4_t __rev0_34;  __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 3, 2, 1, 0); \
+  __ret_34 = __noswap_splat_lane_u16(__rev0_34, __p1_34); \
+  __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 3, 2, 1, 0); \
   __ret_34; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f32(__p0_35, __p1_35) __extension__ ({ \
-  float32x2_t __s0_35 = __p0_35; \
-  float32x2_t __ret_35; \
-  __ret_35 = splat_lane_f32(__s0_35, __p1_35); \
+#define vdup_lane_s8(__p0_35, __p1_35) __extension__ ({ \
+  int8x8_t __ret_35; \
+  int8x8_t __s0_35 = __p0_35; \
+  __ret_35 = splat_lane_s8(__s0_35, __p1_35); \
   __ret_35; \
 })
 #else
-#define vdup_lane_f32(__p0_36, __p1_36) __extension__ ({ \
-  float32x2_t __s0_36 = __p0_36; \
-  float32x2_t __rev0_36;  __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 1, 0); \
-  float32x2_t __ret_36; \
-  __ret_36 = __noswap_splat_lane_f32(__rev0_36, __p1_36); \
-  __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 1, 0); \
+#define vdup_lane_s8(__p0_36, __p1_36) __extension__ ({ \
+  int8x8_t __ret_36; \
+  int8x8_t __s0_36 = __p0_36; \
+  int8x8_t __rev0_36;  __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_36 = __noswap_splat_lane_s8(__rev0_36, __p1_36); \
+  __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_36; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s32(__p0_37, __p1_37) __extension__ ({ \
-  int32x2_t __s0_37 = __p0_37; \
-  int32x2_t __ret_37; \
-  __ret_37 = splat_lane_s32(__s0_37, __p1_37); \
+#define vdup_lane_f32(__p0_37, __p1_37) __extension__ ({ \
+  float32x2_t __ret_37; \
+  float32x2_t __s0_37 = __p0_37; \
+  __ret_37 = splat_lane_f32(__s0_37, __p1_37); \
   __ret_37; \
 })
 #else
-#define vdup_lane_s32(__p0_38, __p1_38) __extension__ ({ \
-  int32x2_t __s0_38 = __p0_38; \
-  int32x2_t __rev0_38;  __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \
-  int32x2_t __ret_38; \
-  __ret_38 = __noswap_splat_lane_s32(__rev0_38, __p1_38); \
+#define vdup_lane_f32(__p0_38, __p1_38) __extension__ ({ \
+  float32x2_t __ret_38; \
+  float32x2_t __s0_38 = __p0_38; \
+  float32x2_t __rev0_38;  __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \
+  __ret_38 = __noswap_splat_lane_f32(__rev0_38, __p1_38); \
   __ret_38 = __builtin_shufflevector(__ret_38, __ret_38, 1, 0); \
   __ret_38; \
 })
 #endif
 
-#define vdup_lane_s64(__p0_39, __p1_39) __extension__ ({ \
-  int64x1_t __s0_39 = __p0_39; \
-  int64x1_t __ret_39; \
-  __ret_39 = splat_lane_s64(__s0_39, __p1_39); \
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_f16(__p0_39, __p1_39) __extension__ ({ \
+  float16x4_t __ret_39; \
+  float16x4_t __s0_39 = __p0_39; \
+  __ret_39 = splat_lane_f16(__s0_39, __p1_39); \
   __ret_39; \
 })
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s16(__p0_40, __p1_40) __extension__ ({ \
-  int16x4_t __s0_40 = __p0_40; \
-  int16x4_t __ret_40; \
-  __ret_40 = splat_lane_s16(__s0_40, __p1_40); \
+#else
+#define vdup_lane_f16(__p0_40, __p1_40) __extension__ ({ \
+  float16x4_t __ret_40; \
+  float16x4_t __s0_40 = __p0_40; \
+  float16x4_t __rev0_40;  __rev0_40 = __builtin_shufflevector(__s0_40, __s0_40, 3, 2, 1, 0); \
+  __ret_40 = __noswap_splat_lane_f16(__rev0_40, __p1_40); \
+  __ret_40 = __builtin_shufflevector(__ret_40, __ret_40, 3, 2, 1, 0); \
   __ret_40; \
 })
-#else
-#define vdup_lane_s16(__p0_41, __p1_41) __extension__ ({ \
-  int16x4_t __s0_41 = __p0_41; \
-  int16x4_t __rev0_41;  __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 3, 2, 1, 0); \
-  int16x4_t __ret_41; \
-  __ret_41 = __noswap_splat_lane_s16(__rev0_41, __p1_41); \
-  __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 3, 2, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_s32(__p0_41, __p1_41) __extension__ ({ \
+  int32x2_t __ret_41; \
+  int32x2_t __s0_41 = __p0_41; \
+  __ret_41 = splat_lane_s32(__s0_41, __p1_41); \
   __ret_41; \
 })
+#else
+#define vdup_lane_s32(__p0_42, __p1_42) __extension__ ({ \
+  int32x2_t __ret_42; \
+  int32x2_t __s0_42 = __p0_42; \
+  int32x2_t __rev0_42;  __rev0_42 = __builtin_shufflevector(__s0_42, __s0_42, 1, 0); \
+  __ret_42 = __noswap_splat_lane_s32(__rev0_42, __p1_42); \
+  __ret_42 = __builtin_shufflevector(__ret_42, __ret_42, 1, 0); \
+  __ret_42; \
+})
+#endif
+
+#define vdup_lane_s64(__p0_43, __p1_43) __extension__ ({ \
+  int64x1_t __ret_43; \
+  int64x1_t __s0_43 = __p0_43; \
+  __ret_43 = splat_lane_s64(__s0_43, __p1_43); \
+  __ret_43; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_s16(__p0_44, __p1_44) __extension__ ({ \
+  int16x4_t __ret_44; \
+  int16x4_t __s0_44 = __p0_44; \
+  __ret_44 = splat_lane_s16(__s0_44, __p1_44); \
+  __ret_44; \
+})
+#else
+#define vdup_lane_s16(__p0_45, __p1_45) __extension__ ({ \
+  int16x4_t __ret_45; \
+  int16x4_t __s0_45 = __p0_45; \
+  int16x4_t __rev0_45;  __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 3, 2, 1, 0); \
+  __ret_45 = __noswap_splat_lane_s16(__rev0_45, __p1_45); \
+  __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 3, 2, 1, 0); \
+  __ret_45; \
+})
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -6466,15 +6502,15 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
   __ret; \
 })
 #else
 #define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -6608,15 +6644,15 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
   __ret; \
 })
 #else
 #define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -6666,9 +6702,9 @@
 }
 #else
 __ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -6683,9 +6719,9 @@
 }
 #else
 __ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -6700,9 +6736,9 @@
 }
 #else
 __ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -6717,9 +6753,9 @@
 }
 #else
 __ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -6734,9 +6770,9 @@
 }
 #else
 __ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -6751,9 +6787,9 @@
 }
 #else
 __ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -6768,9 +6804,9 @@
 }
 #else
 __ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -6785,9 +6821,9 @@
 }
 #else
 __ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -6802,9 +6838,9 @@
 }
 #else
 __ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -6819,9 +6855,9 @@
 }
 #else
 __ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -6841,9 +6877,9 @@
 }
 #else
 __ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -6858,9 +6894,9 @@
 }
 #else
 __ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -6875,9 +6911,9 @@
 }
 #else
 __ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -6897,9 +6933,9 @@
 }
 #else
 __ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 ^ __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -6908,19 +6944,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vext_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
   __ret; \
 })
 #else
 #define vext_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -6929,19 +6965,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vext_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
   __ret; \
 })
 #else
 #define vext_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -6950,19 +6986,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
   __ret; \
 })
 #else
 #define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -6971,19 +7007,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
   __ret; \
 })
 #else
 #define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -6992,19 +7028,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
   __ret; \
 })
 #else
 #define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -7013,19 +7049,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
   __ret; \
 })
 #else
 #define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -7034,19 +7070,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
   __ret; \
 })
 #else
 #define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -7055,19 +7091,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
   __ret; \
 })
 #else
 #define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -7076,19 +7112,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
   __ret; \
 })
 #else
 #define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -7097,19 +7133,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4_t __ret; \
   float32x4_t __s0 = __p0; \
   float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \
   __ret; \
 })
 #else
 #define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4_t __ret; \
   float32x4_t __s0 = __p0; \
   float32x4_t __s1 = __p1; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -7118,19 +7154,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
   __ret; \
 })
 #else
 #define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -7139,19 +7175,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
   __ret; \
 })
 #else
 #define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -7160,19 +7196,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
   __ret; \
 })
 #else
 #define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -7181,19 +7217,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vext_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
   __ret; \
 })
 #else
 #define vext_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -7202,19 +7238,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vext_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
   __ret; \
 })
 #else
 #define vext_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -7222,27 +7258,27 @@
 #endif
 
 #define vext_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1_t __ret; \
   uint64x1_t __s0 = __p0; \
   uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
   __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vext_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
   __ret; \
 })
 #else
 #define vext_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -7251,19 +7287,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vext_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
   __ret; \
 })
 #else
 #define vext_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -7272,19 +7308,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vext_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2_t __ret; \
   float32x2_t __s0 = __p0; \
   float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \
   __ret; \
 })
 #else
 #define vext_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2_t __ret; \
   float32x2_t __s0 = __p0; \
   float32x2_t __s1 = __p1; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -7293,19 +7329,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vext_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
 #define vext_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -7313,27 +7349,27 @@
 #endif
 
 #define vext_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1_t __ret; \
   int64x1_t __s0 = __p0; \
   int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
   __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vext_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
 #define vext_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -7348,8 +7384,8 @@
 }
 #else
 __ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -7369,8 +7405,8 @@
 }
 #else
 __ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x4_t __ret;
+  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -7385,8 +7421,8 @@
 }
 #else
 __ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -7406,8 +7442,8 @@
 }
 #else
 __ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x2_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -7427,8 +7463,8 @@
 }
 #else
 __ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x1_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1);
   return __ret;
 }
@@ -7442,8 +7478,8 @@
 }
 #else
 __ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -7463,8 +7499,8 @@
 }
 #else
 __ai int8x8_t vget_high_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -7484,8 +7520,8 @@
 }
 #else
 __ai float32x2_t vget_high_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x2_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -7505,8 +7541,8 @@
 }
 #else
 __ai float16x4_t vget_high_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -7526,8 +7562,8 @@
 }
 #else
 __ai int32x2_t vget_high_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x2_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -7547,8 +7583,8 @@
 }
 #else
 __ai int64x1_t vget_high_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x1_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1);
   return __ret;
 }
@@ -7562,8 +7598,8 @@
 }
 #else
 __ai int16x4_t vget_high_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -7577,22 +7613,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
   poly8_t __ret; \
+  poly8x8_t __s0 = __p0; \
   __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
   __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
   poly8_t __ret; \
+  poly8x8_t __s0 = __p0; \
   __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
   __ret; \
 })
@@ -7600,22 +7636,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
   poly16_t __ret; \
+  poly16x4_t __s0 = __p0; \
   __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16_t __ret; \
   __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
   poly16_t __ret; \
+  poly16x4_t __s0 = __p0; \
   __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
   __ret; \
 })
@@ -7623,22 +7659,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
   poly8_t __ret; \
+  poly8x16_t __s0 = __p0; \
   __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
   __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
   poly8_t __ret; \
+  poly8x16_t __s0 = __p0; \
   __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
   __ret; \
 })
@@ -7646,22 +7682,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
   poly16_t __ret; \
+  poly16x8_t __s0 = __p0; \
   __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16_t __ret; \
   __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
   poly16_t __ret; \
+  poly16x8_t __s0 = __p0; \
   __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
   __ret; \
 })
@@ -7669,22 +7705,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
   __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
   __ret; \
 })
@@ -7692,22 +7728,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32_t __ret; \
   __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
@@ -7715,22 +7751,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
+  uint64_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64_t __ret; \
   __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
@@ -7738,22 +7774,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
@@ -7761,22 +7797,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
   __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
   __ret; \
 })
@@ -7784,22 +7820,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   float32_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
   float32x4_t __s0 = __p0; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32_t __ret; \
   __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
   float32_t __ret; \
+  float32x4_t __s0 = __p0; \
   __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
   __ret; \
 })
@@ -7807,22 +7843,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32_t __ret; \
   __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
@@ -7830,22 +7866,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64_t __ret; \
   __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
@@ -7853,22 +7889,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret; \
   __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
@@ -7876,22 +7912,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
   __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
   __ret; \
 })
@@ -7899,51 +7935,51 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32_t __ret; \
   __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
 
 #define vget_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16_t __ret; \
   __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
@@ -7951,22 +7987,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
   __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
   __ret; \
 })
@@ -7974,22 +8010,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   float32_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_f32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
   float32x2_t __s0 = __p0; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32_t __ret; \
   __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
   float32_t __ret; \
+  float32x2_t __s0 = __p0; \
   __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
   __ret; \
 })
@@ -7997,51 +8033,51 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32_t __ret; \
   __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
 
 #define vget_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
 #define vget_lane_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16_t __ret; \
   __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
@@ -8055,8 +8091,8 @@
 }
 #else
 __ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8071,8 +8107,8 @@
 }
 #else
 __ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
+  poly16x4_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8087,8 +8123,8 @@
 }
 #else
 __ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
+  uint8x8_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8103,8 +8139,8 @@
 }
 #else
 __ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x2_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -8119,8 +8155,8 @@
 }
 #else
 __ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x1_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0);
   return __ret;
 }
@@ -8134,8 +8170,8 @@
 }
 #else
 __ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8150,8 +8186,8 @@
 }
 #else
 __ai int8x8_t vget_low_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8166,8 +8202,8 @@
 }
 #else
 __ai float32x2_t vget_low_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x2_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -8182,8 +8218,8 @@
 }
 #else
 __ai float16x4_t vget_low_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8198,8 +8234,8 @@
 }
 #else
 __ai int32x2_t vget_low_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x2_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -8214,8 +8250,8 @@
 }
 #else
 __ai int64x1_t vget_low_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x1_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0);
   return __ret;
 }
@@ -8229,8 +8265,8 @@
 }
 #else
 __ai int16x4_t vget_low_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8245,9 +8281,9 @@
 }
 #else
 __ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8262,9 +8298,9 @@
 }
 #else
 __ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8279,9 +8315,9 @@
 }
 #else
 __ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8296,9 +8332,9 @@
 }
 #else
 __ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8313,9 +8349,9 @@
 }
 #else
 __ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8330,9 +8366,9 @@
 }
 #else
 __ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8347,9 +8383,9 @@
 }
 #else
 __ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8364,9 +8400,9 @@
 }
 #else
 __ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -8381,9 +8417,9 @@
 }
 #else
 __ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8398,9 +8434,9 @@
 }
 #else
 __ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8415,9 +8451,9 @@
 }
 #else
 __ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -8432,9 +8468,9 @@
 }
 #else
 __ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8449,9 +8485,9 @@
 }
 #else
 __ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8466,9 +8502,9 @@
 }
 #else
 __ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8483,9 +8519,9 @@
 }
 #else
 __ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8500,9 +8536,9 @@
 }
 #else
 __ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8517,9 +8553,9 @@
 }
 #else
 __ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8534,9 +8570,9 @@
 }
 #else
 __ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8551,9 +8587,9 @@
 }
 #else
 __ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8568,9 +8604,9 @@
 }
 #else
 __ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -8585,9 +8621,9 @@
 }
 #else
 __ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -8602,9 +8638,9 @@
 }
 #else
 __ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -8619,9 +8655,9 @@
 }
 #else
 __ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -8636,9 +8672,9 @@
 }
 #else
 __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -9267,16 +9303,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
   poly8x8_t __ret; \
+  poly8x8_t __s1 = __p1; \
   __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
   __ret; \
 })
 #else
 #define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s1 = __p1; \
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9285,16 +9321,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
   poly16x4_t __ret; \
+  poly16x4_t __s1 = __p1; \
   __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
   __ret; \
 })
 #else
 #define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s1 = __p1; \
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -9303,16 +9339,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
   poly8x16_t __ret; \
+  poly8x16_t __s1 = __p1; \
   __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
   __ret; \
 })
 #else
 #define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s1 = __p1; \
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9321,16 +9357,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
   poly16x8_t __ret; \
+  poly16x8_t __s1 = __p1; \
   __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
   __ret; \
 })
 #else
 #define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s1 = __p1; \
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9339,16 +9375,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
   uint8x16_t __ret; \
+  uint8x16_t __s1 = __p1; \
   __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
   __ret; \
 })
 #else
 #define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s1 = __p1; \
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9357,16 +9393,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
   uint32x4_t __ret; \
+  uint32x4_t __s1 = __p1; \
   __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
   __ret; \
 })
 #else
 #define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -9375,16 +9411,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
   uint64x2_t __ret; \
+  uint64x2_t __s1 = __p1; \
   __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
   __ret; \
 })
 #else
 #define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -9393,16 +9429,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
   uint16x8_t __ret; \
+  uint16x8_t __s1 = __p1; \
   __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
   __ret; \
 })
 #else
 #define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9411,16 +9447,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
   int8x16_t __ret; \
+  int8x16_t __s1 = __p1; \
   __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
   __ret; \
 })
 #else
 #define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s1 = __p1; \
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9429,16 +9465,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
   float32x4_t __ret; \
+  float32x4_t __s1 = __p1; \
   __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
   __ret; \
 })
 #else
 #define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4_t __ret; \
   float32x4_t __s1 = __p1; \
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -9447,16 +9483,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
   int32x4_t __ret; \
+  int32x4_t __s1 = __p1; \
   __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
   __ret; \
 })
 #else
 #define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s1 = __p1; \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -9465,16 +9501,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
   int64x2_t __ret; \
+  int64x2_t __s1 = __p1; \
   __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
   __ret; \
 })
 #else
 #define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s1 = __p1; \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -9483,16 +9519,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
   int16x8_t __ret; \
+  int16x8_t __s1 = __p1; \
   __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
   __ret; \
 })
 #else
 #define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s1 = __p1; \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9501,16 +9537,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
   uint8x8_t __ret; \
+  uint8x8_t __s1 = __p1; \
   __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
   __ret; \
 })
 #else
 #define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s1 = __p1; \
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9519,16 +9555,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
   uint32x2_t __ret; \
+  uint32x2_t __s1 = __p1; \
   __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
   __ret; \
 })
 #else
 #define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -9536,23 +9572,23 @@
 #endif
 
 #define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
   uint64x1_t __ret; \
+  uint64x1_t __s1 = __p1; \
   __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
   uint16x4_t __ret; \
+  uint16x4_t __s1 = __p1; \
   __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
   __ret; \
 })
 #else
 #define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -9561,16 +9597,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
   int8x8_t __ret; \
+  int8x8_t __s1 = __p1; \
   __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
   __ret; \
 })
 #else
 #define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s1 = __p1; \
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -9579,16 +9615,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
   float32x2_t __ret; \
+  float32x2_t __s1 = __p1; \
   __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
   __ret; \
 })
 #else
 #define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2_t __ret; \
   float32x2_t __s1 = __p1; \
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -9597,16 +9633,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
   int32x2_t __ret; \
+  int32x2_t __s1 = __p1; \
   __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
 #define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s1 = __p1; \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -9614,23 +9650,23 @@
 #endif
 
 #define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
   int64x1_t __ret; \
+  int64x1_t __s1 = __p1; \
   __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
   int16x4_t __ret; \
+  int16x4_t __s1 = __p1; \
   __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
 #define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s1 = __p1; \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -11415,18 +11451,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
   poly8x8x2_t __ret; \
+  poly8x8x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
   __ret; \
 })
 #else
 #define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8x2_t __ret; \
   poly8x8x2_t __s1 = __p1; \
   poly8x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -11437,18 +11473,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
   poly16x4x2_t __ret; \
+  poly16x4x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
   __ret; \
 })
 #else
 #define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4x2_t __ret; \
   poly16x4x2_t __s1 = __p1; \
   poly16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  poly16x4x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -11459,18 +11495,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
   poly16x8x2_t __ret; \
+  poly16x8x2_t __s1 = __p1; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
   __ret; \
 })
 #else
 #define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8x2_t __ret; \
   poly16x8x2_t __s1 = __p1; \
   poly16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x2_t __ret; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -11481,18 +11517,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
   uint32x4x2_t __ret; \
+  uint32x4x2_t __s1 = __p1; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
   __ret; \
 })
 #else
 #define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4x2_t __ret; \
   uint32x4x2_t __s1 = __p1; \
   uint32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  uint32x4x2_t __ret; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -11503,18 +11539,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
   uint16x8x2_t __ret; \
+  uint16x8x2_t __s1 = __p1; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
   __ret; \
 })
 #else
 #define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8x2_t __ret; \
   uint16x8x2_t __s1 = __p1; \
   uint16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x2_t __ret; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -11525,18 +11561,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
   float32x4x2_t __ret; \
+  float32x4x2_t __s1 = __p1; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \
   __ret; \
 })
 #else
 #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4x2_t __ret; \
   float32x4x2_t __s1 = __p1; \
   float32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  float32x4x2_t __ret; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -11547,18 +11583,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
   int32x4x2_t __ret; \
+  int32x4x2_t __s1 = __p1; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \
   __ret; \
 })
 #else
 #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4x2_t __ret; \
   int32x4x2_t __s1 = __p1; \
   int32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  int32x4x2_t __ret; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -11569,18 +11605,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
   int16x8x2_t __ret; \
+  int16x8x2_t __s1 = __p1; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \
   __ret; \
 })
 #else
 #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8x2_t __ret; \
   int16x8x2_t __s1 = __p1; \
   int16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x2_t __ret; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -11591,18 +11627,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
   uint8x8x2_t __ret; \
+  uint8x8x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
   __ret; \
 })
 #else
 #define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8x2_t __ret; \
   uint8x8x2_t __s1 = __p1; \
   uint8x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -11613,18 +11649,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
   uint32x2x2_t __ret; \
+  uint32x2x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
   __ret; \
 })
 #else
 #define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2x2_t __ret; \
   uint32x2x2_t __s1 = __p1; \
   uint32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  uint32x2x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -11635,18 +11671,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
   uint16x4x2_t __ret; \
+  uint16x4x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
   __ret; \
 })
 #else
 #define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4x2_t __ret; \
   uint16x4x2_t __s1 = __p1; \
   uint16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  uint16x4x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -11657,18 +11693,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
   int8x8x2_t __ret; \
+  int8x8x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
   __ret; \
 })
 #else
 #define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8x2_t __ret; \
   int8x8x2_t __s1 = __p1; \
   int8x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -11679,18 +11715,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
   float32x2x2_t __ret; \
+  float32x2x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \
   __ret; \
 })
 #else
 #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2x2_t __ret; \
   float32x2x2_t __s1 = __p1; \
   float32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  float32x2x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -11701,18 +11737,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
   int32x2x2_t __ret; \
+  int32x2x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \
   __ret; \
 })
 #else
 #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2x2_t __ret; \
   int32x2x2_t __s1 = __p1; \
   int32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  int32x2x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -11723,18 +11759,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
   int16x4x2_t __ret; \
+  int16x4x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \
   __ret; \
 })
 #else
 #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4x2_t __ret; \
   int16x4x2_t __s1 = __p1; \
   int16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  int16x4x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -12449,19 +12485,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
   poly8x8x3_t __ret; \
+  poly8x8x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
   __ret; \
 })
 #else
 #define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8x3_t __ret; \
   poly8x8x3_t __s1 = __p1; \
   poly8x8x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -12473,19 +12509,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
   poly16x4x3_t __ret; \
+  poly16x4x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
   __ret; \
 })
 #else
 #define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4x3_t __ret; \
   poly16x4x3_t __s1 = __p1; \
   poly16x4x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  poly16x4x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -12497,19 +12533,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
   poly16x8x3_t __ret; \
+  poly16x8x3_t __s1 = __p1; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
   __ret; \
 })
 #else
 #define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8x3_t __ret; \
   poly16x8x3_t __s1 = __p1; \
   poly16x8x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x3_t __ret; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -12521,19 +12557,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
   uint32x4x3_t __ret; \
+  uint32x4x3_t __s1 = __p1; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
   __ret; \
 })
 #else
 #define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4x3_t __ret; \
   uint32x4x3_t __s1 = __p1; \
   uint32x4x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  uint32x4x3_t __ret; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -12545,19 +12581,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
   uint16x8x3_t __ret; \
+  uint16x8x3_t __s1 = __p1; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
   __ret; \
 })
 #else
 #define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8x3_t __ret; \
   uint16x8x3_t __s1 = __p1; \
   uint16x8x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x3_t __ret; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -12569,19 +12605,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
   float32x4x3_t __ret; \
+  float32x4x3_t __s1 = __p1; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \
   __ret; \
 })
 #else
 #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4x3_t __ret; \
   float32x4x3_t __s1 = __p1; \
   float32x4x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  float32x4x3_t __ret; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -12593,19 +12629,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
   int32x4x3_t __ret; \
+  int32x4x3_t __s1 = __p1; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \
   __ret; \
 })
 #else
 #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4x3_t __ret; \
   int32x4x3_t __s1 = __p1; \
   int32x4x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  int32x4x3_t __ret; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -12617,19 +12653,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
   int16x8x3_t __ret; \
+  int16x8x3_t __s1 = __p1; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \
   __ret; \
 })
 #else
 #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8x3_t __ret; \
   int16x8x3_t __s1 = __p1; \
   int16x8x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x3_t __ret; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -12641,19 +12677,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
   uint8x8x3_t __ret; \
+  uint8x8x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
   __ret; \
 })
 #else
 #define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8x3_t __ret; \
   uint8x8x3_t __s1 = __p1; \
   uint8x8x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -12665,19 +12701,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
   uint32x2x3_t __ret; \
+  uint32x2x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
   __ret; \
 })
 #else
 #define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2x3_t __ret; \
   uint32x2x3_t __s1 = __p1; \
   uint32x2x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  uint32x2x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -12689,19 +12725,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
   uint16x4x3_t __ret; \
+  uint16x4x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
   __ret; \
 })
 #else
 #define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4x3_t __ret; \
   uint16x4x3_t __s1 = __p1; \
   uint16x4x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  uint16x4x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -12713,19 +12749,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
   int8x8x3_t __ret; \
+  int8x8x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
   __ret; \
 })
 #else
 #define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8x3_t __ret; \
   int8x8x3_t __s1 = __p1; \
   int8x8x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -12737,19 +12773,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
   float32x2x3_t __ret; \
+  float32x2x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \
   __ret; \
 })
 #else
 #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2x3_t __ret; \
   float32x2x3_t __s1 = __p1; \
   float32x2x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  float32x2x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -12761,19 +12797,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
   int32x2x3_t __ret; \
+  int32x2x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \
   __ret; \
 })
 #else
 #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2x3_t __ret; \
   int32x2x3_t __s1 = __p1; \
   int32x2x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  int32x2x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -12785,19 +12821,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
   int16x4x3_t __ret; \
+  int16x4x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \
   __ret; \
 })
 #else
 #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4x3_t __ret; \
   int16x4x3_t __s1 = __p1; \
   int16x4x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  int16x4x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -13551,20 +13587,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
   poly8x8x4_t __ret; \
+  poly8x8x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
   __ret; \
 })
 #else
 #define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8x4_t __ret; \
   poly8x8x4_t __s1 = __p1; \
   poly8x8x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -13577,20 +13613,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
   poly16x4x4_t __ret; \
+  poly16x4x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
   __ret; \
 })
 #else
 #define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4x4_t __ret; \
   poly16x4x4_t __s1 = __p1; \
   poly16x4x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  poly16x4x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -13603,20 +13639,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
   poly16x8x4_t __ret; \
+  poly16x8x4_t __s1 = __p1; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
   __ret; \
 })
 #else
 #define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8x4_t __ret; \
   poly16x8x4_t __s1 = __p1; \
   poly16x8x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x4_t __ret; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -13629,20 +13665,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
   uint32x4x4_t __ret; \
+  uint32x4x4_t __s1 = __p1; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
   __ret; \
 })
 #else
 #define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4x4_t __ret; \
   uint32x4x4_t __s1 = __p1; \
   uint32x4x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  uint32x4x4_t __ret; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -13655,20 +13691,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
   uint16x8x4_t __ret; \
+  uint16x8x4_t __s1 = __p1; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
   __ret; \
 })
 #else
 #define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8x4_t __ret; \
   uint16x8x4_t __s1 = __p1; \
   uint16x8x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x4_t __ret; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -13681,20 +13717,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
   float32x4x4_t __ret; \
+  float32x4x4_t __s1 = __p1; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \
   __ret; \
 })
 #else
 #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4x4_t __ret; \
   float32x4x4_t __s1 = __p1; \
   float32x4x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  float32x4x4_t __ret; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -13707,20 +13743,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
   int32x4x4_t __ret; \
+  int32x4x4_t __s1 = __p1; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \
   __ret; \
 })
 #else
 #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4x4_t __ret; \
   int32x4x4_t __s1 = __p1; \
   int32x4x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  int32x4x4_t __ret; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -13733,20 +13769,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
   int16x8x4_t __ret; \
+  int16x8x4_t __s1 = __p1; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \
   __ret; \
 })
 #else
 #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8x4_t __ret; \
   int16x8x4_t __s1 = __p1; \
   int16x8x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x4_t __ret; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -13759,20 +13795,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
   uint8x8x4_t __ret; \
+  uint8x8x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
   __ret; \
 })
 #else
 #define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8x4_t __ret; \
   uint8x8x4_t __s1 = __p1; \
   uint8x8x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -13785,20 +13821,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
   uint32x2x4_t __ret; \
+  uint32x2x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
   __ret; \
 })
 #else
 #define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2x4_t __ret; \
   uint32x2x4_t __s1 = __p1; \
   uint32x2x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  uint32x2x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -13811,20 +13847,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
   uint16x4x4_t __ret; \
+  uint16x4x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
   __ret; \
 })
 #else
 #define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4x4_t __ret; \
   uint16x4x4_t __s1 = __p1; \
   uint16x4x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  uint16x4x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -13837,20 +13873,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
   int8x8x4_t __ret; \
+  int8x8x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
   __ret; \
 })
 #else
 #define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8x4_t __ret; \
   int8x8x4_t __s1 = __p1; \
   int8x8x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -13863,20 +13899,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
   float32x2x4_t __ret; \
+  float32x2x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \
   __ret; \
 })
 #else
 #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2x4_t __ret; \
   float32x2x4_t __s1 = __p1; \
   float32x2x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  float32x2x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -13889,20 +13925,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
   int32x2x4_t __ret; \
+  int32x2x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \
   __ret; \
 })
 #else
 #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2x4_t __ret; \
   int32x2x4_t __s1 = __p1; \
   int32x2x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  int32x2x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
@@ -13915,20 +13951,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
   int16x4x4_t __ret; \
+  int16x4x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \
   __ret; \
 })
 #else
 #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4x4_t __ret; \
   int16x4x4_t __s1 = __p1; \
   int16x4x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  int16x4x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -13947,9 +13983,9 @@
 }
 #else
 __ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -13964,9 +14000,9 @@
 }
 #else
 __ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -13981,9 +14017,9 @@
 }
 #else
 __ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -13998,9 +14034,9 @@
 }
 #else
 __ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14015,9 +14051,9 @@
 }
 #else
 __ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14032,9 +14068,9 @@
 }
 #else
 __ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14049,9 +14085,9 @@
 }
 #else
 __ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14066,9 +14102,9 @@
 }
 #else
 __ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14083,9 +14119,9 @@
 }
 #else
 __ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14100,9 +14136,9 @@
 }
 #else
 __ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14117,9 +14153,9 @@
 }
 #else
 __ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14134,9 +14170,9 @@
 }
 #else
 __ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14151,9 +14187,9 @@
 }
 #else
 __ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14168,9 +14204,9 @@
 }
 #else
 __ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14185,9 +14221,9 @@
 }
 #else
 __ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14202,9 +14238,9 @@
 }
 #else
 __ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14219,9 +14255,9 @@
 }
 #else
 __ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14236,9 +14272,9 @@
 }
 #else
 __ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14253,9 +14289,9 @@
 }
 #else
 __ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14270,9 +14306,9 @@
 }
 #else
 __ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14287,9 +14323,9 @@
 }
 #else
 __ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14304,9 +14340,9 @@
 }
 #else
 __ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14321,9 +14357,9 @@
 }
 #else
 __ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14338,9 +14374,9 @@
 }
 #else
 __ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14355,9 +14391,9 @@
 }
 #else
 __ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14372,9 +14408,9 @@
 }
 #else
 __ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14389,9 +14425,9 @@
 }
 #else
 __ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14406,9 +14442,9 @@
 }
 #else
 __ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14423,10 +14459,10 @@
 }
 #else
 __ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14441,10 +14477,10 @@
 }
 #else
 __ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14459,10 +14495,10 @@
 }
 #else
 __ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14477,10 +14513,10 @@
 }
 #else
 __ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14495,10 +14531,10 @@
 }
 #else
 __ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14513,10 +14549,10 @@
 }
 #else
 __ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14531,10 +14567,10 @@
 }
 #else
 __ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14549,10 +14585,10 @@
 }
 #else
 __ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14567,10 +14603,10 @@
 }
 #else
 __ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14585,10 +14621,10 @@
 }
 #else
 __ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14603,10 +14639,10 @@
 }
 #else
 __ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14621,10 +14657,10 @@
 }
 #else
 __ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14639,10 +14675,10 @@
 }
 #else
 __ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -14657,10 +14693,10 @@
 }
 #else
 __ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 + __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14668,246 +14704,246 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u32(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
-  uint32x4_t __s0_42 = __p0_42; \
-  uint32x4_t __s1_42 = __p1_42; \
-  uint32x2_t __s2_42 = __p2_42; \
-  uint32x4_t __ret_42; \
-  __ret_42 = __s0_42 + __s1_42 * splatq_lane_u32(__s2_42, __p3_42); \
-  __ret_42; \
-})
-#else
-#define vmlaq_lane_u32(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
-  uint32x4_t __s0_43 = __p0_43; \
-  uint32x4_t __s1_43 = __p1_43; \
-  uint32x2_t __s2_43 = __p2_43; \
-  uint32x4_t __rev0_43;  __rev0_43 = __builtin_shufflevector(__s0_43, __s0_43, 3, 2, 1, 0); \
-  uint32x4_t __rev1_43;  __rev1_43 = __builtin_shufflevector(__s1_43, __s1_43, 3, 2, 1, 0); \
-  uint32x2_t __rev2_43;  __rev2_43 = __builtin_shufflevector(__s2_43, __s2_43, 1, 0); \
-  uint32x4_t __ret_43; \
-  __ret_43 = __rev0_43 + __rev1_43 * __noswap_splatq_lane_u32(__rev2_43, __p3_43); \
-  __ret_43 = __builtin_shufflevector(__ret_43, __ret_43, 3, 2, 1, 0); \
-  __ret_43; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
-  uint16x8_t __s0_44 = __p0_44; \
-  uint16x8_t __s1_44 = __p1_44; \
-  uint16x4_t __s2_44 = __p2_44; \
-  uint16x8_t __ret_44; \
-  __ret_44 = __s0_44 + __s1_44 * splatq_lane_u16(__s2_44, __p3_44); \
-  __ret_44; \
-})
-#else
-#define vmlaq_lane_u16(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
-  uint16x8_t __s0_45 = __p0_45; \
-  uint16x8_t __s1_45 = __p1_45; \
-  uint16x4_t __s2_45 = __p2_45; \
-  uint16x8_t __rev0_45;  __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_45;  __rev1_45 = __builtin_shufflevector(__s1_45, __s1_45, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_45;  __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 3, 2, 1, 0); \
-  uint16x8_t __ret_45; \
-  __ret_45 = __rev0_45 + __rev1_45 * __noswap_splatq_lane_u16(__rev2_45, __p3_45); \
-  __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_45; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_f32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
-  float32x4_t __s0_46 = __p0_46; \
-  float32x4_t __s1_46 = __p1_46; \
-  float32x2_t __s2_46 = __p2_46; \
-  float32x4_t __ret_46; \
-  __ret_46 = __s0_46 + __s1_46 * splatq_lane_f32(__s2_46, __p3_46); \
+#define vmlaq_lane_u32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
+  uint32x4_t __ret_46; \
+  uint32x4_t __s0_46 = __p0_46; \
+  uint32x4_t __s1_46 = __p1_46; \
+  uint32x2_t __s2_46 = __p2_46; \
+  __ret_46 = __s0_46 + __s1_46 * splatq_lane_u32(__s2_46, __p3_46); \
   __ret_46; \
 })
 #else
-#define vmlaq_lane_f32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
-  float32x4_t __s0_47 = __p0_47; \
-  float32x4_t __s1_47 = __p1_47; \
-  float32x2_t __s2_47 = __p2_47; \
-  float32x4_t __rev0_47;  __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \
-  float32x4_t __rev1_47;  __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \
-  float32x2_t __rev2_47;  __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \
-  float32x4_t __ret_47; \
-  __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_f32(__rev2_47, __p3_47); \
+#define vmlaq_lane_u32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
+  uint32x4_t __ret_47; \
+  uint32x4_t __s0_47 = __p0_47; \
+  uint32x4_t __s1_47 = __p1_47; \
+  uint32x2_t __s2_47 = __p2_47; \
+  uint32x4_t __rev0_47;  __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \
+  uint32x4_t __rev1_47;  __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \
+  uint32x2_t __rev2_47;  __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \
+  __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_u32(__rev2_47, __p3_47); \
   __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 3, 2, 1, 0); \
   __ret_47; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
-  int32x4_t __s0_48 = __p0_48; \
-  int32x4_t __s1_48 = __p1_48; \
-  int32x2_t __s2_48 = __p2_48; \
-  int32x4_t __ret_48; \
-  __ret_48 = __s0_48 + __s1_48 * splatq_lane_s32(__s2_48, __p3_48); \
+#define vmlaq_lane_u16(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
+  uint16x8_t __ret_48; \
+  uint16x8_t __s0_48 = __p0_48; \
+  uint16x8_t __s1_48 = __p1_48; \
+  uint16x4_t __s2_48 = __p2_48; \
+  __ret_48 = __s0_48 + __s1_48 * splatq_lane_u16(__s2_48, __p3_48); \
   __ret_48; \
 })
 #else
-#define vmlaq_lane_s32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
-  int32x4_t __s0_49 = __p0_49; \
-  int32x4_t __s1_49 = __p1_49; \
-  int32x2_t __s2_49 = __p2_49; \
-  int32x4_t __rev0_49;  __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 3, 2, 1, 0); \
-  int32x4_t __rev1_49;  __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 3, 2, 1, 0); \
-  int32x2_t __rev2_49;  __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 1, 0); \
-  int32x4_t __ret_49; \
-  __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_s32(__rev2_49, __p3_49); \
-  __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 3, 2, 1, 0); \
+#define vmlaq_lane_u16(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
+  uint16x8_t __ret_49; \
+  uint16x8_t __s0_49 = __p0_49; \
+  uint16x8_t __s1_49 = __p1_49; \
+  uint16x4_t __s2_49 = __p2_49; \
+  uint16x8_t __rev0_49;  __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_49;  __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_49;  __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 3, 2, 1, 0); \
+  __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_u16(__rev2_49, __p3_49); \
+  __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_49; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s16(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
-  int16x8_t __s0_50 = __p0_50; \
-  int16x8_t __s1_50 = __p1_50; \
-  int16x4_t __s2_50 = __p2_50; \
-  int16x8_t __ret_50; \
-  __ret_50 = __s0_50 + __s1_50 * splatq_lane_s16(__s2_50, __p3_50); \
+#define vmlaq_lane_f32(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
+  float32x4_t __ret_50; \
+  float32x4_t __s0_50 = __p0_50; \
+  float32x4_t __s1_50 = __p1_50; \
+  float32x2_t __s2_50 = __p2_50; \
+  __ret_50 = __s0_50 + __s1_50 * splatq_lane_f32(__s2_50, __p3_50); \
   __ret_50; \
 })
 #else
-#define vmlaq_lane_s16(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
-  int16x8_t __s0_51 = __p0_51; \
-  int16x8_t __s1_51 = __p1_51; \
-  int16x4_t __s2_51 = __p2_51; \
-  int16x8_t __rev0_51;  __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_51;  __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_51;  __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 3, 2, 1, 0); \
-  int16x8_t __ret_51; \
-  __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_s16(__rev2_51, __p3_51); \
-  __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vmlaq_lane_f32(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
+  float32x4_t __ret_51; \
+  float32x4_t __s0_51 = __p0_51; \
+  float32x4_t __s1_51 = __p1_51; \
+  float32x2_t __s2_51 = __p2_51; \
+  float32x4_t __rev0_51;  __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 3, 2, 1, 0); \
+  float32x4_t __rev1_51;  __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 3, 2, 1, 0); \
+  float32x2_t __rev2_51;  __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 1, 0); \
+  __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_f32(__rev2_51, __p3_51); \
+  __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 3, 2, 1, 0); \
   __ret_51; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
-  uint32x2_t __s0_52 = __p0_52; \
-  uint32x2_t __s1_52 = __p1_52; \
-  uint32x2_t __s2_52 = __p2_52; \
-  uint32x2_t __ret_52; \
-  __ret_52 = __s0_52 + __s1_52 * splat_lane_u32(__s2_52, __p3_52); \
+#define vmlaq_lane_s32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
+  int32x4_t __ret_52; \
+  int32x4_t __s0_52 = __p0_52; \
+  int32x4_t __s1_52 = __p1_52; \
+  int32x2_t __s2_52 = __p2_52; \
+  __ret_52 = __s0_52 + __s1_52 * splatq_lane_s32(__s2_52, __p3_52); \
   __ret_52; \
 })
 #else
-#define vmla_lane_u32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
-  uint32x2_t __s0_53 = __p0_53; \
-  uint32x2_t __s1_53 = __p1_53; \
-  uint32x2_t __s2_53 = __p2_53; \
-  uint32x2_t __rev0_53;  __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 1, 0); \
-  uint32x2_t __rev1_53;  __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 1, 0); \
-  uint32x2_t __rev2_53;  __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \
-  uint32x2_t __ret_53; \
-  __ret_53 = __rev0_53 + __rev1_53 * __noswap_splat_lane_u32(__rev2_53, __p3_53); \
-  __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 1, 0); \
+#define vmlaq_lane_s32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
+  int32x4_t __ret_53; \
+  int32x4_t __s0_53 = __p0_53; \
+  int32x4_t __s1_53 = __p1_53; \
+  int32x2_t __s2_53 = __p2_53; \
+  int32x4_t __rev0_53;  __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 3, 2, 1, 0); \
+  int32x4_t __rev1_53;  __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 3, 2, 1, 0); \
+  int32x2_t __rev2_53;  __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \
+  __ret_53 = __rev0_53 + __rev1_53 * __noswap_splatq_lane_s32(__rev2_53, __p3_53); \
+  __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 3, 2, 1, 0); \
   __ret_53; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
-  uint16x4_t __s0_54 = __p0_54; \
-  uint16x4_t __s1_54 = __p1_54; \
-  uint16x4_t __s2_54 = __p2_54; \
-  uint16x4_t __ret_54; \
-  __ret_54 = __s0_54 + __s1_54 * splat_lane_u16(__s2_54, __p3_54); \
+#define vmlaq_lane_s16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
+  int16x8_t __ret_54; \
+  int16x8_t __s0_54 = __p0_54; \
+  int16x8_t __s1_54 = __p1_54; \
+  int16x4_t __s2_54 = __p2_54; \
+  __ret_54 = __s0_54 + __s1_54 * splatq_lane_s16(__s2_54, __p3_54); \
   __ret_54; \
 })
 #else
-#define vmla_lane_u16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
-  uint16x4_t __s0_55 = __p0_55; \
-  uint16x4_t __s1_55 = __p1_55; \
-  uint16x4_t __s2_55 = __p2_55; \
-  uint16x4_t __rev0_55;  __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 3, 2, 1, 0); \
-  uint16x4_t __rev1_55;  __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 3, 2, 1, 0); \
-  uint16x4_t __rev2_55;  __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \
-  uint16x4_t __ret_55; \
-  __ret_55 = __rev0_55 + __rev1_55 * __noswap_splat_lane_u16(__rev2_55, __p3_55); \
-  __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 3, 2, 1, 0); \
+#define vmlaq_lane_s16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
+  int16x8_t __ret_55; \
+  int16x8_t __s0_55 = __p0_55; \
+  int16x8_t __s1_55 = __p1_55; \
+  int16x4_t __s2_55 = __p2_55; \
+  int16x8_t __rev0_55;  __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_55;  __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_55;  __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \
+  __ret_55 = __rev0_55 + __rev1_55 * __noswap_splatq_lane_s16(__rev2_55, __p3_55); \
+  __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_55; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_f32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
-  float32x2_t __s0_56 = __p0_56; \
-  float32x2_t __s1_56 = __p1_56; \
-  float32x2_t __s2_56 = __p2_56; \
-  float32x2_t __ret_56; \
-  __ret_56 = __s0_56 + __s1_56 * splat_lane_f32(__s2_56, __p3_56); \
+#define vmla_lane_u32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
+  uint32x2_t __ret_56; \
+  uint32x2_t __s0_56 = __p0_56; \
+  uint32x2_t __s1_56 = __p1_56; \
+  uint32x2_t __s2_56 = __p2_56; \
+  __ret_56 = __s0_56 + __s1_56 * splat_lane_u32(__s2_56, __p3_56); \
   __ret_56; \
 })
 #else
-#define vmla_lane_f32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
-  float32x2_t __s0_57 = __p0_57; \
-  float32x2_t __s1_57 = __p1_57; \
-  float32x2_t __s2_57 = __p2_57; \
-  float32x2_t __rev0_57;  __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \
-  float32x2_t __rev1_57;  __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \
-  float32x2_t __rev2_57;  __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \
-  float32x2_t __ret_57; \
-  __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_f32(__rev2_57, __p3_57); \
+#define vmla_lane_u32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
+  uint32x2_t __ret_57; \
+  uint32x2_t __s0_57 = __p0_57; \
+  uint32x2_t __s1_57 = __p1_57; \
+  uint32x2_t __s2_57 = __p2_57; \
+  uint32x2_t __rev0_57;  __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \
+  uint32x2_t __rev1_57;  __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \
+  uint32x2_t __rev2_57;  __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \
+  __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_u32(__rev2_57, __p3_57); \
   __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 1, 0); \
   __ret_57; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
-  int32x2_t __s0_58 = __p0_58; \
-  int32x2_t __s1_58 = __p1_58; \
-  int32x2_t __s2_58 = __p2_58; \
-  int32x2_t __ret_58; \
-  __ret_58 = __s0_58 + __s1_58 * splat_lane_s32(__s2_58, __p3_58); \
+#define vmla_lane_u16(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
+  uint16x4_t __ret_58; \
+  uint16x4_t __s0_58 = __p0_58; \
+  uint16x4_t __s1_58 = __p1_58; \
+  uint16x4_t __s2_58 = __p2_58; \
+  __ret_58 = __s0_58 + __s1_58 * splat_lane_u16(__s2_58, __p3_58); \
   __ret_58; \
 })
 #else
-#define vmla_lane_s32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
-  int32x2_t __s0_59 = __p0_59; \
-  int32x2_t __s1_59 = __p1_59; \
-  int32x2_t __s2_59 = __p2_59; \
-  int32x2_t __rev0_59;  __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 1, 0); \
-  int32x2_t __rev1_59;  __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 1, 0); \
-  int32x2_t __rev2_59;  __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 1, 0); \
-  int32x2_t __ret_59; \
-  __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_s32(__rev2_59, __p3_59); \
-  __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 1, 0); \
+#define vmla_lane_u16(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
+  uint16x4_t __ret_59; \
+  uint16x4_t __s0_59 = __p0_59; \
+  uint16x4_t __s1_59 = __p1_59; \
+  uint16x4_t __s2_59 = __p2_59; \
+  uint16x4_t __rev0_59;  __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 3, 2, 1, 0); \
+  uint16x4_t __rev1_59;  __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 3, 2, 1, 0); \
+  uint16x4_t __rev2_59;  __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 3, 2, 1, 0); \
+  __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_u16(__rev2_59, __p3_59); \
+  __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 3, 2, 1, 0); \
   __ret_59; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s16(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
-  int16x4_t __s0_60 = __p0_60; \
-  int16x4_t __s1_60 = __p1_60; \
-  int16x4_t __s2_60 = __p2_60; \
-  int16x4_t __ret_60; \
-  __ret_60 = __s0_60 + __s1_60 * splat_lane_s16(__s2_60, __p3_60); \
+#define vmla_lane_f32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
+  float32x2_t __ret_60; \
+  float32x2_t __s0_60 = __p0_60; \
+  float32x2_t __s1_60 = __p1_60; \
+  float32x2_t __s2_60 = __p2_60; \
+  __ret_60 = __s0_60 + __s1_60 * splat_lane_f32(__s2_60, __p3_60); \
   __ret_60; \
 })
 #else
-#define vmla_lane_s16(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
-  int16x4_t __s0_61 = __p0_61; \
-  int16x4_t __s1_61 = __p1_61; \
-  int16x4_t __s2_61 = __p2_61; \
-  int16x4_t __rev0_61;  __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \
-  int16x4_t __rev1_61;  __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 3, 2, 1, 0); \
-  int16x4_t __rev2_61;  __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \
-  int16x4_t __ret_61; \
-  __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_s16(__rev2_61, __p3_61); \
-  __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \
+#define vmla_lane_f32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
+  float32x2_t __ret_61; \
+  float32x2_t __s0_61 = __p0_61; \
+  float32x2_t __s1_61 = __p1_61; \
+  float32x2_t __s2_61 = __p2_61; \
+  float32x2_t __rev0_61;  __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 1, 0); \
+  float32x2_t __rev1_61;  __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 1, 0); \
+  float32x2_t __rev2_61;  __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 1, 0); \
+  __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_f32(__rev2_61, __p3_61); \
+  __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 1, 0); \
   __ret_61; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vmla_lane_s32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
+  int32x2_t __ret_62; \
+  int32x2_t __s0_62 = __p0_62; \
+  int32x2_t __s1_62 = __p1_62; \
+  int32x2_t __s2_62 = __p2_62; \
+  __ret_62 = __s0_62 + __s1_62 * splat_lane_s32(__s2_62, __p3_62); \
+  __ret_62; \
+})
+#else
+#define vmla_lane_s32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
+  int32x2_t __ret_63; \
+  int32x2_t __s0_63 = __p0_63; \
+  int32x2_t __s1_63 = __p1_63; \
+  int32x2_t __s2_63 = __p2_63; \
+  int32x2_t __rev0_63;  __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 1, 0); \
+  int32x2_t __rev1_63;  __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 1, 0); \
+  int32x2_t __rev2_63;  __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \
+  __ret_63 = __rev0_63 + __rev1_63 * __noswap_splat_lane_s32(__rev2_63, __p3_63); \
+  __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 1, 0); \
+  __ret_63; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_lane_s16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
+  int16x4_t __ret_64; \
+  int16x4_t __s0_64 = __p0_64; \
+  int16x4_t __s1_64 = __p1_64; \
+  int16x4_t __s2_64 = __p2_64; \
+  __ret_64 = __s0_64 + __s1_64 * splat_lane_s16(__s2_64, __p3_64); \
+  __ret_64; \
+})
+#else
+#define vmla_lane_s16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
+  int16x4_t __ret_65; \
+  int16x4_t __s0_65 = __p0_65; \
+  int16x4_t __s1_65 = __p1_65; \
+  int16x4_t __s2_65 = __p2_65; \
+  int16x4_t __rev0_65;  __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 3, 2, 1, 0); \
+  int16x4_t __rev1_65;  __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 3, 2, 1, 0); \
+  int16x4_t __rev2_65;  __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \
+  __ret_65 = __rev0_65 + __rev1_65 * __noswap_splat_lane_s16(__rev2_65, __p3_65); \
+  __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 3, 2, 1, 0); \
+  __ret_65; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
   uint32x4_t __ret;
   __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
@@ -14915,9 +14951,9 @@
 }
 #else
 __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14932,9 +14968,9 @@
 }
 #else
 __ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -14949,9 +14985,9 @@
 }
 #else
 __ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14966,9 +15002,9 @@
 }
 #else
 __ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -14983,9 +15019,9 @@
 }
 #else
 __ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15000,9 +15036,9 @@
 }
 #else
 __ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15017,9 +15053,9 @@
 }
 #else
 __ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15034,9 +15070,9 @@
 }
 #else
 __ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15051,9 +15087,9 @@
 }
 #else
 __ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15068,9 +15104,9 @@
 }
 #else
 __ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15085,10 +15121,10 @@
 }
 #else
 __ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15103,10 +15139,10 @@
 }
 #else
 __ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15121,10 +15157,10 @@
 }
 #else
 __ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15139,10 +15175,10 @@
 }
 #else
 __ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15157,10 +15193,10 @@
 }
 #else
 __ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15175,10 +15211,10 @@
 }
 #else
 __ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15193,10 +15229,10 @@
 }
 #else
 __ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15211,10 +15247,10 @@
 }
 #else
 __ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15229,10 +15265,10 @@
 }
 #else
 __ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15247,10 +15283,10 @@
 }
 #else
 __ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15265,10 +15301,10 @@
 }
 #else
 __ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15283,10 +15319,10 @@
 }
 #else
 __ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15301,10 +15337,10 @@
 }
 #else
 __ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15319,10 +15355,10 @@
 }
 #else
 __ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 - __rev1 * __rev2;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15330,246 +15366,246 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
-  uint32x4_t __s0_62 = __p0_62; \
-  uint32x4_t __s1_62 = __p1_62; \
-  uint32x2_t __s2_62 = __p2_62; \
-  uint32x4_t __ret_62; \
-  __ret_62 = __s0_62 - __s1_62 * splatq_lane_u32(__s2_62, __p3_62); \
-  __ret_62; \
-})
-#else
-#define vmlsq_lane_u32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
-  uint32x4_t __s0_63 = __p0_63; \
-  uint32x4_t __s1_63 = __p1_63; \
-  uint32x2_t __s2_63 = __p2_63; \
-  uint32x4_t __rev0_63;  __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 3, 2, 1, 0); \
-  uint32x4_t __rev1_63;  __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 3, 2, 1, 0); \
-  uint32x2_t __rev2_63;  __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \
-  uint32x4_t __ret_63; \
-  __ret_63 = __rev0_63 - __rev1_63 * __noswap_splatq_lane_u32(__rev2_63, __p3_63); \
-  __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 3, 2, 1, 0); \
-  __ret_63; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
-  uint16x8_t __s0_64 = __p0_64; \
-  uint16x8_t __s1_64 = __p1_64; \
-  uint16x4_t __s2_64 = __p2_64; \
-  uint16x8_t __ret_64; \
-  __ret_64 = __s0_64 - __s1_64 * splatq_lane_u16(__s2_64, __p3_64); \
-  __ret_64; \
-})
-#else
-#define vmlsq_lane_u16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
-  uint16x8_t __s0_65 = __p0_65; \
-  uint16x8_t __s1_65 = __p1_65; \
-  uint16x4_t __s2_65 = __p2_65; \
-  uint16x8_t __rev0_65;  __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_65;  __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_65;  __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \
-  uint16x8_t __ret_65; \
-  __ret_65 = __rev0_65 - __rev1_65 * __noswap_splatq_lane_u16(__rev2_65, __p3_65); \
-  __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_65; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_f32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
-  float32x4_t __s0_66 = __p0_66; \
-  float32x4_t __s1_66 = __p1_66; \
-  float32x2_t __s2_66 = __p2_66; \
-  float32x4_t __ret_66; \
-  __ret_66 = __s0_66 - __s1_66 * splatq_lane_f32(__s2_66, __p3_66); \
+#define vmlsq_lane_u32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
+  uint32x4_t __ret_66; \
+  uint32x4_t __s0_66 = __p0_66; \
+  uint32x4_t __s1_66 = __p1_66; \
+  uint32x2_t __s2_66 = __p2_66; \
+  __ret_66 = __s0_66 - __s1_66 * splatq_lane_u32(__s2_66, __p3_66); \
   __ret_66; \
 })
 #else
-#define vmlsq_lane_f32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
-  float32x4_t __s0_67 = __p0_67; \
-  float32x4_t __s1_67 = __p1_67; \
-  float32x2_t __s2_67 = __p2_67; \
-  float32x4_t __rev0_67;  __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \
-  float32x4_t __rev1_67;  __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \
-  float32x2_t __rev2_67;  __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \
-  float32x4_t __ret_67; \
-  __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_f32(__rev2_67, __p3_67); \
+#define vmlsq_lane_u32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
+  uint32x4_t __ret_67; \
+  uint32x4_t __s0_67 = __p0_67; \
+  uint32x4_t __s1_67 = __p1_67; \
+  uint32x2_t __s2_67 = __p2_67; \
+  uint32x4_t __rev0_67;  __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \
+  uint32x4_t __rev1_67;  __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \
+  uint32x2_t __rev2_67;  __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \
+  __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_u32(__rev2_67, __p3_67); \
   __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \
   __ret_67; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
-  int32x4_t __s0_68 = __p0_68; \
-  int32x4_t __s1_68 = __p1_68; \
-  int32x2_t __s2_68 = __p2_68; \
-  int32x4_t __ret_68; \
-  __ret_68 = __s0_68 - __s1_68 * splatq_lane_s32(__s2_68, __p3_68); \
+#define vmlsq_lane_u16(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
+  uint16x8_t __ret_68; \
+  uint16x8_t __s0_68 = __p0_68; \
+  uint16x8_t __s1_68 = __p1_68; \
+  uint16x4_t __s2_68 = __p2_68; \
+  __ret_68 = __s0_68 - __s1_68 * splatq_lane_u16(__s2_68, __p3_68); \
   __ret_68; \
 })
 #else
-#define vmlsq_lane_s32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
-  int32x4_t __s0_69 = __p0_69; \
-  int32x4_t __s1_69 = __p1_69; \
-  int32x2_t __s2_69 = __p2_69; \
-  int32x4_t __rev0_69;  __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \
-  int32x4_t __rev1_69;  __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 3, 2, 1, 0); \
-  int32x2_t __rev2_69;  __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 1, 0); \
-  int32x4_t __ret_69; \
-  __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_s32(__rev2_69, __p3_69); \
-  __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \
+#define vmlsq_lane_u16(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
+  uint16x8_t __ret_69; \
+  uint16x8_t __s0_69 = __p0_69; \
+  uint16x8_t __s1_69 = __p1_69; \
+  uint16x4_t __s2_69 = __p2_69; \
+  uint16x8_t __rev0_69;  __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_69;  __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_69;  __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 3, 2, 1, 0); \
+  __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_u16(__rev2_69, __p3_69); \
+  __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_69; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s16(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
-  int16x8_t __s0_70 = __p0_70; \
-  int16x8_t __s1_70 = __p1_70; \
-  int16x4_t __s2_70 = __p2_70; \
-  int16x8_t __ret_70; \
-  __ret_70 = __s0_70 - __s1_70 * splatq_lane_s16(__s2_70, __p3_70); \
+#define vmlsq_lane_f32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
+  float32x4_t __ret_70; \
+  float32x4_t __s0_70 = __p0_70; \
+  float32x4_t __s1_70 = __p1_70; \
+  float32x2_t __s2_70 = __p2_70; \
+  __ret_70 = __s0_70 - __s1_70 * splatq_lane_f32(__s2_70, __p3_70); \
   __ret_70; \
 })
 #else
-#define vmlsq_lane_s16(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
-  int16x8_t __s0_71 = __p0_71; \
-  int16x8_t __s1_71 = __p1_71; \
-  int16x4_t __s2_71 = __p2_71; \
-  int16x8_t __rev0_71;  __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_71;  __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_71;  __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \
-  int16x8_t __ret_71; \
-  __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_s16(__rev2_71, __p3_71); \
-  __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vmlsq_lane_f32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
+  float32x4_t __ret_71; \
+  float32x4_t __s0_71 = __p0_71; \
+  float32x4_t __s1_71 = __p1_71; \
+  float32x2_t __s2_71 = __p2_71; \
+  float32x4_t __rev0_71;  __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 3, 2, 1, 0); \
+  float32x4_t __rev1_71;  __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 3, 2, 1, 0); \
+  float32x2_t __rev2_71;  __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 1, 0); \
+  __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_f32(__rev2_71, __p3_71); \
+  __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 3, 2, 1, 0); \
   __ret_71; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
-  uint32x2_t __s0_72 = __p0_72; \
-  uint32x2_t __s1_72 = __p1_72; \
-  uint32x2_t __s2_72 = __p2_72; \
-  uint32x2_t __ret_72; \
-  __ret_72 = __s0_72 - __s1_72 * splat_lane_u32(__s2_72, __p3_72); \
+#define vmlsq_lane_s32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
+  int32x4_t __ret_72; \
+  int32x4_t __s0_72 = __p0_72; \
+  int32x4_t __s1_72 = __p1_72; \
+  int32x2_t __s2_72 = __p2_72; \
+  __ret_72 = __s0_72 - __s1_72 * splatq_lane_s32(__s2_72, __p3_72); \
   __ret_72; \
 })
 #else
-#define vmls_lane_u32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
-  uint32x2_t __s0_73 = __p0_73; \
-  uint32x2_t __s1_73 = __p1_73; \
-  uint32x2_t __s2_73 = __p2_73; \
-  uint32x2_t __rev0_73;  __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \
-  uint32x2_t __rev1_73;  __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 1, 0); \
-  uint32x2_t __rev2_73;  __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \
-  uint32x2_t __ret_73; \
-  __ret_73 = __rev0_73 - __rev1_73 * __noswap_splat_lane_u32(__rev2_73, __p3_73); \
-  __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \
+#define vmlsq_lane_s32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
+  int32x4_t __ret_73; \
+  int32x4_t __s0_73 = __p0_73; \
+  int32x4_t __s1_73 = __p1_73; \
+  int32x2_t __s2_73 = __p2_73; \
+  int32x4_t __rev0_73;  __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 3, 2, 1, 0); \
+  int32x4_t __rev1_73;  __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 3, 2, 1, 0); \
+  int32x2_t __rev2_73;  __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \
+  __ret_73 = __rev0_73 - __rev1_73 * __noswap_splatq_lane_s32(__rev2_73, __p3_73); \
+  __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 3, 2, 1, 0); \
   __ret_73; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
-  uint16x4_t __s0_74 = __p0_74; \
-  uint16x4_t __s1_74 = __p1_74; \
-  uint16x4_t __s2_74 = __p2_74; \
-  uint16x4_t __ret_74; \
-  __ret_74 = __s0_74 - __s1_74 * splat_lane_u16(__s2_74, __p3_74); \
+#define vmlsq_lane_s16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
+  int16x8_t __ret_74; \
+  int16x8_t __s0_74 = __p0_74; \
+  int16x8_t __s1_74 = __p1_74; \
+  int16x4_t __s2_74 = __p2_74; \
+  __ret_74 = __s0_74 - __s1_74 * splatq_lane_s16(__s2_74, __p3_74); \
   __ret_74; \
 })
 #else
-#define vmls_lane_u16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
-  uint16x4_t __s0_75 = __p0_75; \
-  uint16x4_t __s1_75 = __p1_75; \
-  uint16x4_t __s2_75 = __p2_75; \
-  uint16x4_t __rev0_75;  __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 3, 2, 1, 0); \
-  uint16x4_t __rev1_75;  __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 3, 2, 1, 0); \
-  uint16x4_t __rev2_75;  __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \
-  uint16x4_t __ret_75; \
-  __ret_75 = __rev0_75 - __rev1_75 * __noswap_splat_lane_u16(__rev2_75, __p3_75); \
-  __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 3, 2, 1, 0); \
+#define vmlsq_lane_s16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
+  int16x8_t __ret_75; \
+  int16x8_t __s0_75 = __p0_75; \
+  int16x8_t __s1_75 = __p1_75; \
+  int16x4_t __s2_75 = __p2_75; \
+  int16x8_t __rev0_75;  __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_75;  __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_75;  __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \
+  __ret_75 = __rev0_75 - __rev1_75 * __noswap_splatq_lane_s16(__rev2_75, __p3_75); \
+  __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_75; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_f32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
-  float32x2_t __s0_76 = __p0_76; \
-  float32x2_t __s1_76 = __p1_76; \
-  float32x2_t __s2_76 = __p2_76; \
-  float32x2_t __ret_76; \
-  __ret_76 = __s0_76 - __s1_76 * splat_lane_f32(__s2_76, __p3_76); \
+#define vmls_lane_u32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
+  uint32x2_t __ret_76; \
+  uint32x2_t __s0_76 = __p0_76; \
+  uint32x2_t __s1_76 = __p1_76; \
+  uint32x2_t __s2_76 = __p2_76; \
+  __ret_76 = __s0_76 - __s1_76 * splat_lane_u32(__s2_76, __p3_76); \
   __ret_76; \
 })
 #else
-#define vmls_lane_f32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
-  float32x2_t __s0_77 = __p0_77; \
-  float32x2_t __s1_77 = __p1_77; \
-  float32x2_t __s2_77 = __p2_77; \
-  float32x2_t __rev0_77;  __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \
-  float32x2_t __rev1_77;  __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \
-  float32x2_t __rev2_77;  __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \
-  float32x2_t __ret_77; \
-  __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_f32(__rev2_77, __p3_77); \
+#define vmls_lane_u32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
+  uint32x2_t __ret_77; \
+  uint32x2_t __s0_77 = __p0_77; \
+  uint32x2_t __s1_77 = __p1_77; \
+  uint32x2_t __s2_77 = __p2_77; \
+  uint32x2_t __rev0_77;  __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \
+  uint32x2_t __rev1_77;  __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \
+  uint32x2_t __rev2_77;  __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \
+  __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_u32(__rev2_77, __p3_77); \
   __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 1, 0); \
   __ret_77; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s32(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
-  int32x2_t __s0_78 = __p0_78; \
-  int32x2_t __s1_78 = __p1_78; \
-  int32x2_t __s2_78 = __p2_78; \
-  int32x2_t __ret_78; \
-  __ret_78 = __s0_78 - __s1_78 * splat_lane_s32(__s2_78, __p3_78); \
+#define vmls_lane_u16(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
+  uint16x4_t __ret_78; \
+  uint16x4_t __s0_78 = __p0_78; \
+  uint16x4_t __s1_78 = __p1_78; \
+  uint16x4_t __s2_78 = __p2_78; \
+  __ret_78 = __s0_78 - __s1_78 * splat_lane_u16(__s2_78, __p3_78); \
   __ret_78; \
 })
 #else
-#define vmls_lane_s32(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
-  int32x2_t __s0_79 = __p0_79; \
-  int32x2_t __s1_79 = __p1_79; \
-  int32x2_t __s2_79 = __p2_79; \
-  int32x2_t __rev0_79;  __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 1, 0); \
-  int32x2_t __rev1_79;  __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 1, 0); \
-  int32x2_t __rev2_79;  __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 1, 0); \
-  int32x2_t __ret_79; \
-  __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_s32(__rev2_79, __p3_79); \
-  __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 1, 0); \
+#define vmls_lane_u16(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
+  uint16x4_t __ret_79; \
+  uint16x4_t __s0_79 = __p0_79; \
+  uint16x4_t __s1_79 = __p1_79; \
+  uint16x4_t __s2_79 = __p2_79; \
+  uint16x4_t __rev0_79;  __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 3, 2, 1, 0); \
+  uint16x4_t __rev1_79;  __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 3, 2, 1, 0); \
+  uint16x4_t __rev2_79;  __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 3, 2, 1, 0); \
+  __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_u16(__rev2_79, __p3_79); \
+  __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 3, 2, 1, 0); \
   __ret_79; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s16(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
-  int16x4_t __s0_80 = __p0_80; \
-  int16x4_t __s1_80 = __p1_80; \
-  int16x4_t __s2_80 = __p2_80; \
-  int16x4_t __ret_80; \
-  __ret_80 = __s0_80 - __s1_80 * splat_lane_s16(__s2_80, __p3_80); \
+#define vmls_lane_f32(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
+  float32x2_t __ret_80; \
+  float32x2_t __s0_80 = __p0_80; \
+  float32x2_t __s1_80 = __p1_80; \
+  float32x2_t __s2_80 = __p2_80; \
+  __ret_80 = __s0_80 - __s1_80 * splat_lane_f32(__s2_80, __p3_80); \
   __ret_80; \
 })
 #else
-#define vmls_lane_s16(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
-  int16x4_t __s0_81 = __p0_81; \
-  int16x4_t __s1_81 = __p1_81; \
-  int16x4_t __s2_81 = __p2_81; \
-  int16x4_t __rev0_81;  __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 3, 2, 1, 0); \
-  int16x4_t __rev1_81;  __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 3, 2, 1, 0); \
-  int16x4_t __rev2_81;  __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 3, 2, 1, 0); \
-  int16x4_t __ret_81; \
-  __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_s16(__rev2_81, __p3_81); \
-  __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 3, 2, 1, 0); \
+#define vmls_lane_f32(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
+  float32x2_t __ret_81; \
+  float32x2_t __s0_81 = __p0_81; \
+  float32x2_t __s1_81 = __p1_81; \
+  float32x2_t __s2_81 = __p2_81; \
+  float32x2_t __rev0_81;  __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 1, 0); \
+  float32x2_t __rev1_81;  __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 1, 0); \
+  float32x2_t __rev2_81;  __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 1, 0); \
+  __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_f32(__rev2_81, __p3_81); \
+  __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 1, 0); \
   __ret_81; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vmls_lane_s32(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \
+  int32x2_t __ret_82; \
+  int32x2_t __s0_82 = __p0_82; \
+  int32x2_t __s1_82 = __p1_82; \
+  int32x2_t __s2_82 = __p2_82; \
+  __ret_82 = __s0_82 - __s1_82 * splat_lane_s32(__s2_82, __p3_82); \
+  __ret_82; \
+})
+#else
+#define vmls_lane_s32(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \
+  int32x2_t __ret_83; \
+  int32x2_t __s0_83 = __p0_83; \
+  int32x2_t __s1_83 = __p1_83; \
+  int32x2_t __s2_83 = __p2_83; \
+  int32x2_t __rev0_83;  __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 1, 0); \
+  int32x2_t __rev1_83;  __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \
+  int32x2_t __rev2_83;  __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 1, 0); \
+  __ret_83 = __rev0_83 - __rev1_83 * __noswap_splat_lane_s32(__rev2_83, __p3_83); \
+  __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 1, 0); \
+  __ret_83; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_lane_s16(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \
+  int16x4_t __ret_84; \
+  int16x4_t __s0_84 = __p0_84; \
+  int16x4_t __s1_84 = __p1_84; \
+  int16x4_t __s2_84 = __p2_84; \
+  __ret_84 = __s0_84 - __s1_84 * splat_lane_s16(__s2_84, __p3_84); \
+  __ret_84; \
+})
+#else
+#define vmls_lane_s16(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \
+  int16x4_t __ret_85; \
+  int16x4_t __s0_85 = __p0_85; \
+  int16x4_t __s1_85 = __p1_85; \
+  int16x4_t __s2_85 = __p2_85; \
+  int16x4_t __rev0_85;  __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 3, 2, 1, 0); \
+  int16x4_t __rev1_85;  __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \
+  int16x4_t __rev2_85;  __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 3, 2, 1, 0); \
+  __ret_85 = __rev0_85 - __rev1_85 * __noswap_splat_lane_s16(__rev2_85, __p3_85); \
+  __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 3, 2, 1, 0); \
+  __ret_85; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
   uint32x4_t __ret;
   __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
@@ -15577,9 +15613,9 @@
 }
 #else
 __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15594,9 +15630,9 @@
 }
 #else
 __ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15611,9 +15647,9 @@
 }
 #else
 __ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15628,9 +15664,9 @@
 }
 #else
 __ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15645,9 +15681,9 @@
 }
 #else
 __ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -15662,9 +15698,9 @@
 }
 #else
 __ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15679,9 +15715,9 @@
 }
 #else
 __ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15696,9 +15732,9 @@
 }
 #else
 __ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15713,9 +15749,9 @@
 }
 #else
 __ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -15730,9 +15766,9 @@
 }
 #else
 __ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -15891,15 +15927,15 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
   __ret; \
 })
 #else
 #define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x8_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -16033,15 +16069,15 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
   __ret; \
 })
 #else
 #define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
   float16x4_t __ret; \
+  float16_t __s0 = __p0; \
   __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -16091,8 +16127,8 @@
 }
 #else
 __ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16112,8 +16148,8 @@
 }
 #else
 __ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16133,8 +16169,8 @@
 }
 #else
 __ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16154,8 +16190,8 @@
 }
 #else
 __ai int16x8_t vmovl_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16175,8 +16211,8 @@
 }
 #else
 __ai int64x2_t vmovl_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16196,8 +16232,8 @@
 }
 #else
 __ai int32x4_t vmovl_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16217,8 +16253,8 @@
 }
 #else
 __ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16238,8 +16274,8 @@
 }
 #else
 __ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16259,8 +16295,8 @@
 }
 #else
 __ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16280,8 +16316,8 @@
 }
 #else
 __ai int16x4_t vmovn_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16301,8 +16337,8 @@
 }
 #else
 __ai int32x2_t vmovn_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16322,8 +16358,8 @@
 }
 #else
 __ai int8x8_t vmovn_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16343,9 +16379,9 @@
 }
 #else
 __ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16360,9 +16396,9 @@
 }
 #else
 __ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16377,9 +16413,9 @@
 }
 #else
 __ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16394,9 +16430,9 @@
 }
 #else
 __ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16411,9 +16447,9 @@
 }
 #else
 __ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16428,9 +16464,9 @@
 }
 #else
 __ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16445,9 +16481,9 @@
 }
 #else
 __ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16462,9 +16498,9 @@
 }
 #else
 __ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16479,9 +16515,9 @@
 }
 #else
 __ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16496,9 +16532,9 @@
 }
 #else
 __ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16513,9 +16549,9 @@
 }
 #else
 __ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16530,9 +16566,9 @@
 }
 #else
 __ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16547,9 +16583,9 @@
 }
 #else
 __ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16564,9 +16600,9 @@
 }
 #else
 __ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 * __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16581,9 +16617,9 @@
 }
 #else
 __ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16598,9 +16634,9 @@
 }
 #else
 __ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16608,216 +16644,216 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u32(__p0_82, __p1_82, __p2_82) __extension__ ({ \
-  uint32x4_t __s0_82 = __p0_82; \
-  uint32x2_t __s1_82 = __p1_82; \
-  uint32x4_t __ret_82; \
-  __ret_82 = __s0_82 * splatq_lane_u32(__s1_82, __p2_82); \
-  __ret_82; \
-})
-#else
-#define vmulq_lane_u32(__p0_83, __p1_83, __p2_83) __extension__ ({ \
-  uint32x4_t __s0_83 = __p0_83; \
-  uint32x2_t __s1_83 = __p1_83; \
-  uint32x4_t __rev0_83;  __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 3, 2, 1, 0); \
-  uint32x2_t __rev1_83;  __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \
-  uint32x4_t __ret_83; \
-  __ret_83 = __rev0_83 * __noswap_splatq_lane_u32(__rev1_83, __p2_83); \
-  __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 3, 2, 1, 0); \
-  __ret_83; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u16(__p0_84, __p1_84, __p2_84) __extension__ ({ \
-  uint16x8_t __s0_84 = __p0_84; \
-  uint16x4_t __s1_84 = __p1_84; \
-  uint16x8_t __ret_84; \
-  __ret_84 = __s0_84 * splatq_lane_u16(__s1_84, __p2_84); \
-  __ret_84; \
-})
-#else
-#define vmulq_lane_u16(__p0_85, __p1_85, __p2_85) __extension__ ({ \
-  uint16x8_t __s0_85 = __p0_85; \
-  uint16x4_t __s1_85 = __p1_85; \
-  uint16x8_t __rev0_85;  __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1_85;  __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \
-  uint16x8_t __ret_85; \
-  __ret_85 = __rev0_85 * __noswap_splatq_lane_u16(__rev1_85, __p2_85); \
-  __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_85; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f32(__p0_86, __p1_86, __p2_86) __extension__ ({ \
-  float32x4_t __s0_86 = __p0_86; \
-  float32x2_t __s1_86 = __p1_86; \
-  float32x4_t __ret_86; \
-  __ret_86 = __s0_86 * splatq_lane_f32(__s1_86, __p2_86); \
+#define vmulq_lane_u32(__p0_86, __p1_86, __p2_86) __extension__ ({ \
+  uint32x4_t __ret_86; \
+  uint32x4_t __s0_86 = __p0_86; \
+  uint32x2_t __s1_86 = __p1_86; \
+  __ret_86 = __s0_86 * splatq_lane_u32(__s1_86, __p2_86); \
   __ret_86; \
 })
 #else
-#define vmulq_lane_f32(__p0_87, __p1_87, __p2_87) __extension__ ({ \
-  float32x4_t __s0_87 = __p0_87; \
-  float32x2_t __s1_87 = __p1_87; \
-  float32x4_t __rev0_87;  __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \
-  float32x2_t __rev1_87;  __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \
-  float32x4_t __ret_87; \
-  __ret_87 = __rev0_87 * __noswap_splatq_lane_f32(__rev1_87, __p2_87); \
+#define vmulq_lane_u32(__p0_87, __p1_87, __p2_87) __extension__ ({ \
+  uint32x4_t __ret_87; \
+  uint32x4_t __s0_87 = __p0_87; \
+  uint32x2_t __s1_87 = __p1_87; \
+  uint32x4_t __rev0_87;  __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \
+  uint32x2_t __rev1_87;  __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \
+  __ret_87 = __rev0_87 * __noswap_splatq_lane_u32(__rev1_87, __p2_87); \
   __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \
   __ret_87; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s32(__p0_88, __p1_88, __p2_88) __extension__ ({ \
-  int32x4_t __s0_88 = __p0_88; \
-  int32x2_t __s1_88 = __p1_88; \
-  int32x4_t __ret_88; \
-  __ret_88 = __s0_88 * splatq_lane_s32(__s1_88, __p2_88); \
+#define vmulq_lane_u16(__p0_88, __p1_88, __p2_88) __extension__ ({ \
+  uint16x8_t __ret_88; \
+  uint16x8_t __s0_88 = __p0_88; \
+  uint16x4_t __s1_88 = __p1_88; \
+  __ret_88 = __s0_88 * splatq_lane_u16(__s1_88, __p2_88); \
   __ret_88; \
 })
 #else
-#define vmulq_lane_s32(__p0_89, __p1_89, __p2_89) __extension__ ({ \
-  int32x4_t __s0_89 = __p0_89; \
-  int32x2_t __s1_89 = __p1_89; \
-  int32x4_t __rev0_89;  __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 3, 2, 1, 0); \
-  int32x2_t __rev1_89;  __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 1, 0); \
-  int32x4_t __ret_89; \
-  __ret_89 = __rev0_89 * __noswap_splatq_lane_s32(__rev1_89, __p2_89); \
-  __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 3, 2, 1, 0); \
+#define vmulq_lane_u16(__p0_89, __p1_89, __p2_89) __extension__ ({ \
+  uint16x8_t __ret_89; \
+  uint16x8_t __s0_89 = __p0_89; \
+  uint16x4_t __s1_89 = __p1_89; \
+  uint16x8_t __rev0_89;  __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev1_89;  __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 3, 2, 1, 0); \
+  __ret_89 = __rev0_89 * __noswap_splatq_lane_u16(__rev1_89, __p2_89); \
+  __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_89; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s16(__p0_90, __p1_90, __p2_90) __extension__ ({ \
-  int16x8_t __s0_90 = __p0_90; \
-  int16x4_t __s1_90 = __p1_90; \
-  int16x8_t __ret_90; \
-  __ret_90 = __s0_90 * splatq_lane_s16(__s1_90, __p2_90); \
+#define vmulq_lane_f32(__p0_90, __p1_90, __p2_90) __extension__ ({ \
+  float32x4_t __ret_90; \
+  float32x4_t __s0_90 = __p0_90; \
+  float32x2_t __s1_90 = __p1_90; \
+  __ret_90 = __s0_90 * splatq_lane_f32(__s1_90, __p2_90); \
   __ret_90; \
 })
 #else
-#define vmulq_lane_s16(__p0_91, __p1_91, __p2_91) __extension__ ({ \
-  int16x8_t __s0_91 = __p0_91; \
-  int16x4_t __s1_91 = __p1_91; \
-  int16x8_t __rev0_91;  __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_91;  __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 3, 2, 1, 0); \
-  int16x8_t __ret_91; \
-  __ret_91 = __rev0_91 * __noswap_splatq_lane_s16(__rev1_91, __p2_91); \
-  __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vmulq_lane_f32(__p0_91, __p1_91, __p2_91) __extension__ ({ \
+  float32x4_t __ret_91; \
+  float32x4_t __s0_91 = __p0_91; \
+  float32x2_t __s1_91 = __p1_91; \
+  float32x4_t __rev0_91;  __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 3, 2, 1, 0); \
+  float32x2_t __rev1_91;  __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 1, 0); \
+  __ret_91 = __rev0_91 * __noswap_splatq_lane_f32(__rev1_91, __p2_91); \
+  __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 3, 2, 1, 0); \
   __ret_91; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u32(__p0_92, __p1_92, __p2_92) __extension__ ({ \
-  uint32x2_t __s0_92 = __p0_92; \
-  uint32x2_t __s1_92 = __p1_92; \
-  uint32x2_t __ret_92; \
-  __ret_92 = __s0_92 * splat_lane_u32(__s1_92, __p2_92); \
+#define vmulq_lane_s32(__p0_92, __p1_92, __p2_92) __extension__ ({ \
+  int32x4_t __ret_92; \
+  int32x4_t __s0_92 = __p0_92; \
+  int32x2_t __s1_92 = __p1_92; \
+  __ret_92 = __s0_92 * splatq_lane_s32(__s1_92, __p2_92); \
   __ret_92; \
 })
 #else
-#define vmul_lane_u32(__p0_93, __p1_93, __p2_93) __extension__ ({ \
-  uint32x2_t __s0_93 = __p0_93; \
-  uint32x2_t __s1_93 = __p1_93; \
-  uint32x2_t __rev0_93;  __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \
-  uint32x2_t __rev1_93;  __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \
-  uint32x2_t __ret_93; \
-  __ret_93 = __rev0_93 * __noswap_splat_lane_u32(__rev1_93, __p2_93); \
-  __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \
+#define vmulq_lane_s32(__p0_93, __p1_93, __p2_93) __extension__ ({ \
+  int32x4_t __ret_93; \
+  int32x4_t __s0_93 = __p0_93; \
+  int32x2_t __s1_93 = __p1_93; \
+  int32x4_t __rev0_93;  __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 3, 2, 1, 0); \
+  int32x2_t __rev1_93;  __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \
+  __ret_93 = __rev0_93 * __noswap_splatq_lane_s32(__rev1_93, __p2_93); \
+  __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 3, 2, 1, 0); \
   __ret_93; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u16(__p0_94, __p1_94, __p2_94) __extension__ ({ \
-  uint16x4_t __s0_94 = __p0_94; \
-  uint16x4_t __s1_94 = __p1_94; \
-  uint16x4_t __ret_94; \
-  __ret_94 = __s0_94 * splat_lane_u16(__s1_94, __p2_94); \
+#define vmulq_lane_s16(__p0_94, __p1_94, __p2_94) __extension__ ({ \
+  int16x8_t __ret_94; \
+  int16x8_t __s0_94 = __p0_94; \
+  int16x4_t __s1_94 = __p1_94; \
+  __ret_94 = __s0_94 * splatq_lane_s16(__s1_94, __p2_94); \
   __ret_94; \
 })
 #else
-#define vmul_lane_u16(__p0_95, __p1_95, __p2_95) __extension__ ({ \
-  uint16x4_t __s0_95 = __p0_95; \
-  uint16x4_t __s1_95 = __p1_95; \
-  uint16x4_t __rev0_95;  __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \
-  uint16x4_t __rev1_95;  __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \
-  uint16x4_t __ret_95; \
-  __ret_95 = __rev0_95 * __noswap_splat_lane_u16(__rev1_95, __p2_95); \
-  __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \
+#define vmulq_lane_s16(__p0_95, __p1_95, __p2_95) __extension__ ({ \
+  int16x8_t __ret_95; \
+  int16x8_t __s0_95 = __p0_95; \
+  int16x4_t __s1_95 = __p1_95; \
+  int16x8_t __rev0_95;  __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_95;  __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \
+  __ret_95 = __rev0_95 * __noswap_splatq_lane_s16(__rev1_95, __p2_95); \
+  __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_95; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f32(__p0_96, __p1_96, __p2_96) __extension__ ({ \
-  float32x2_t __s0_96 = __p0_96; \
-  float32x2_t __s1_96 = __p1_96; \
-  float32x2_t __ret_96; \
-  __ret_96 = __s0_96 * splat_lane_f32(__s1_96, __p2_96); \
+#define vmul_lane_u32(__p0_96, __p1_96, __p2_96) __extension__ ({ \
+  uint32x2_t __ret_96; \
+  uint32x2_t __s0_96 = __p0_96; \
+  uint32x2_t __s1_96 = __p1_96; \
+  __ret_96 = __s0_96 * splat_lane_u32(__s1_96, __p2_96); \
   __ret_96; \
 })
 #else
-#define vmul_lane_f32(__p0_97, __p1_97, __p2_97) __extension__ ({ \
-  float32x2_t __s0_97 = __p0_97; \
-  float32x2_t __s1_97 = __p1_97; \
-  float32x2_t __rev0_97;  __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \
-  float32x2_t __rev1_97;  __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \
-  float32x2_t __ret_97; \
-  __ret_97 = __rev0_97 * __noswap_splat_lane_f32(__rev1_97, __p2_97); \
+#define vmul_lane_u32(__p0_97, __p1_97, __p2_97) __extension__ ({ \
+  uint32x2_t __ret_97; \
+  uint32x2_t __s0_97 = __p0_97; \
+  uint32x2_t __s1_97 = __p1_97; \
+  uint32x2_t __rev0_97;  __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \
+  uint32x2_t __rev1_97;  __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \
+  __ret_97 = __rev0_97 * __noswap_splat_lane_u32(__rev1_97, __p2_97); \
   __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 1, 0); \
   __ret_97; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s32(__p0_98, __p1_98, __p2_98) __extension__ ({ \
-  int32x2_t __s0_98 = __p0_98; \
-  int32x2_t __s1_98 = __p1_98; \
-  int32x2_t __ret_98; \
-  __ret_98 = __s0_98 * splat_lane_s32(__s1_98, __p2_98); \
+#define vmul_lane_u16(__p0_98, __p1_98, __p2_98) __extension__ ({ \
+  uint16x4_t __ret_98; \
+  uint16x4_t __s0_98 = __p0_98; \
+  uint16x4_t __s1_98 = __p1_98; \
+  __ret_98 = __s0_98 * splat_lane_u16(__s1_98, __p2_98); \
   __ret_98; \
 })
 #else
-#define vmul_lane_s32(__p0_99, __p1_99, __p2_99) __extension__ ({ \
-  int32x2_t __s0_99 = __p0_99; \
-  int32x2_t __s1_99 = __p1_99; \
-  int32x2_t __rev0_99;  __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 1, 0); \
-  int32x2_t __rev1_99;  __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \
-  int32x2_t __ret_99; \
-  __ret_99 = __rev0_99 * __noswap_splat_lane_s32(__rev1_99, __p2_99); \
-  __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 1, 0); \
+#define vmul_lane_u16(__p0_99, __p1_99, __p2_99) __extension__ ({ \
+  uint16x4_t __ret_99; \
+  uint16x4_t __s0_99 = __p0_99; \
+  uint16x4_t __s1_99 = __p1_99; \
+  uint16x4_t __rev0_99;  __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 3, 2, 1, 0); \
+  uint16x4_t __rev1_99;  __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 3, 2, 1, 0); \
+  __ret_99 = __rev0_99 * __noswap_splat_lane_u16(__rev1_99, __p2_99); \
+  __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 3, 2, 1, 0); \
   __ret_99; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s16(__p0_100, __p1_100, __p2_100) __extension__ ({ \
-  int16x4_t __s0_100 = __p0_100; \
-  int16x4_t __s1_100 = __p1_100; \
-  int16x4_t __ret_100; \
-  __ret_100 = __s0_100 * splat_lane_s16(__s1_100, __p2_100); \
+#define vmul_lane_f32(__p0_100, __p1_100, __p2_100) __extension__ ({ \
+  float32x2_t __ret_100; \
+  float32x2_t __s0_100 = __p0_100; \
+  float32x2_t __s1_100 = __p1_100; \
+  __ret_100 = __s0_100 * splat_lane_f32(__s1_100, __p2_100); \
   __ret_100; \
 })
 #else
-#define vmul_lane_s16(__p0_101, __p1_101, __p2_101) __extension__ ({ \
-  int16x4_t __s0_101 = __p0_101; \
-  int16x4_t __s1_101 = __p1_101; \
-  int16x4_t __rev0_101;  __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 3, 2, 1, 0); \
-  int16x4_t __rev1_101;  __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 3, 2, 1, 0); \
-  int16x4_t __ret_101; \
-  __ret_101 = __rev0_101 * __noswap_splat_lane_s16(__rev1_101, __p2_101); \
-  __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 3, 2, 1, 0); \
+#define vmul_lane_f32(__p0_101, __p1_101, __p2_101) __extension__ ({ \
+  float32x2_t __ret_101; \
+  float32x2_t __s0_101 = __p0_101; \
+  float32x2_t __s1_101 = __p1_101; \
+  float32x2_t __rev0_101;  __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 1, 0); \
+  float32x2_t __rev1_101;  __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 1, 0); \
+  __ret_101 = __rev0_101 * __noswap_splat_lane_f32(__rev1_101, __p2_101); \
+  __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 1, 0); \
   __ret_101; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vmul_lane_s32(__p0_102, __p1_102, __p2_102) __extension__ ({ \
+  int32x2_t __ret_102; \
+  int32x2_t __s0_102 = __p0_102; \
+  int32x2_t __s1_102 = __p1_102; \
+  __ret_102 = __s0_102 * splat_lane_s32(__s1_102, __p2_102); \
+  __ret_102; \
+})
+#else
+#define vmul_lane_s32(__p0_103, __p1_103, __p2_103) __extension__ ({ \
+  int32x2_t __ret_103; \
+  int32x2_t __s0_103 = __p0_103; \
+  int32x2_t __s1_103 = __p1_103; \
+  int32x2_t __rev0_103;  __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \
+  int32x2_t __rev1_103;  __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \
+  __ret_103 = __rev0_103 * __noswap_splat_lane_s32(__rev1_103, __p2_103); \
+  __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \
+  __ret_103; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_lane_s16(__p0_104, __p1_104, __p2_104) __extension__ ({ \
+  int16x4_t __ret_104; \
+  int16x4_t __s0_104 = __p0_104; \
+  int16x4_t __s1_104 = __p1_104; \
+  __ret_104 = __s0_104 * splat_lane_s16(__s1_104, __p2_104); \
+  __ret_104; \
+})
+#else
+#define vmul_lane_s16(__p0_105, __p1_105, __p2_105) __extension__ ({ \
+  int16x4_t __ret_105; \
+  int16x4_t __s0_105 = __p0_105; \
+  int16x4_t __s1_105 = __p1_105; \
+  int16x4_t __rev0_105;  __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \
+  int16x4_t __rev1_105;  __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \
+  __ret_105 = __rev0_105 * __noswap_splat_lane_s16(__rev1_105, __p2_105); \
+  __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \
+  __ret_105; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
   uint32x4_t __ret;
   __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
@@ -16825,8 +16861,8 @@
 }
 #else
 __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16841,8 +16877,8 @@
 }
 #else
 __ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16857,8 +16893,8 @@
 }
 #else
 __ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16873,8 +16909,8 @@
 }
 #else
 __ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16889,8 +16925,8 @@
 }
 #else
 __ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -16905,8 +16941,8 @@
 }
 #else
 __ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __rev0 * (uint32x2_t) {__p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16921,8 +16957,8 @@
 }
 #else
 __ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16937,8 +16973,8 @@
 }
 #else
 __ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __rev0 * (float32x2_t) {__p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16953,8 +16989,8 @@
 }
 #else
 __ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __rev0 * (int32x2_t) {__p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -16969,8 +17005,8 @@
 }
 #else
 __ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1};
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -16985,9 +17021,9 @@
 }
 #else
 __ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly16x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17007,9 +17043,9 @@
 }
 #else
 __ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint16x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17029,9 +17065,9 @@
 }
 #else
 __ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint64x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17051,9 +17087,9 @@
 }
 #else
 __ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint32x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17073,9 +17109,9 @@
 }
 #else
 __ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
+  int16x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17095,9 +17131,9 @@
 }
 #else
 __ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
+  int64x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17117,9 +17153,9 @@
 }
 #else
 __ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
+  int32x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17132,90 +17168,90 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u32(__p0_102, __p1_102, __p2_102) __extension__ ({ \
-  uint32x2_t __s0_102 = __p0_102; \
-  uint32x2_t __s1_102 = __p1_102; \
-  uint64x2_t __ret_102; \
-  __ret_102 = vmull_u32(__s0_102, splat_lane_u32(__s1_102, __p2_102)); \
-  __ret_102; \
-})
-#else
-#define vmull_lane_u32(__p0_103, __p1_103, __p2_103) __extension__ ({ \
-  uint32x2_t __s0_103 = __p0_103; \
-  uint32x2_t __s1_103 = __p1_103; \
-  uint32x2_t __rev0_103;  __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \
-  uint32x2_t __rev1_103;  __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \
-  uint64x2_t __ret_103; \
-  __ret_103 = __noswap_vmull_u32(__rev0_103, __noswap_splat_lane_u32(__rev1_103, __p2_103)); \
-  __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \
-  __ret_103; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u16(__p0_104, __p1_104, __p2_104) __extension__ ({ \
-  uint16x4_t __s0_104 = __p0_104; \
-  uint16x4_t __s1_104 = __p1_104; \
-  uint32x4_t __ret_104; \
-  __ret_104 = vmull_u16(__s0_104, splat_lane_u16(__s1_104, __p2_104)); \
-  __ret_104; \
-})
-#else
-#define vmull_lane_u16(__p0_105, __p1_105, __p2_105) __extension__ ({ \
-  uint16x4_t __s0_105 = __p0_105; \
-  uint16x4_t __s1_105 = __p1_105; \
-  uint16x4_t __rev0_105;  __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \
-  uint16x4_t __rev1_105;  __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \
-  uint32x4_t __ret_105; \
-  __ret_105 = __noswap_vmull_u16(__rev0_105, __noswap_splat_lane_u16(__rev1_105, __p2_105)); \
-  __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \
-  __ret_105; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s32(__p0_106, __p1_106, __p2_106) __extension__ ({ \
-  int32x2_t __s0_106 = __p0_106; \
-  int32x2_t __s1_106 = __p1_106; \
-  int64x2_t __ret_106; \
-  __ret_106 = vmull_s32(__s0_106, splat_lane_s32(__s1_106, __p2_106)); \
+#define vmull_lane_u32(__p0_106, __p1_106, __p2_106) __extension__ ({ \
+  uint64x2_t __ret_106; \
+  uint32x2_t __s0_106 = __p0_106; \
+  uint32x2_t __s1_106 = __p1_106; \
+  __ret_106 = vmull_u32(__s0_106, splat_lane_u32(__s1_106, __p2_106)); \
   __ret_106; \
 })
 #else
-#define vmull_lane_s32(__p0_107, __p1_107, __p2_107) __extension__ ({ \
-  int32x2_t __s0_107 = __p0_107; \
-  int32x2_t __s1_107 = __p1_107; \
-  int32x2_t __rev0_107;  __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \
-  int32x2_t __rev1_107;  __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \
-  int64x2_t __ret_107; \
-  __ret_107 = __noswap_vmull_s32(__rev0_107, __noswap_splat_lane_s32(__rev1_107, __p2_107)); \
+#define vmull_lane_u32(__p0_107, __p1_107, __p2_107) __extension__ ({ \
+  uint64x2_t __ret_107; \
+  uint32x2_t __s0_107 = __p0_107; \
+  uint32x2_t __s1_107 = __p1_107; \
+  uint32x2_t __rev0_107;  __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \
+  uint32x2_t __rev1_107;  __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \
+  __ret_107 = __noswap_vmull_u32(__rev0_107, __noswap_splat_lane_u32(__rev1_107, __p2_107)); \
   __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \
   __ret_107; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s16(__p0_108, __p1_108, __p2_108) __extension__ ({ \
-  int16x4_t __s0_108 = __p0_108; \
-  int16x4_t __s1_108 = __p1_108; \
-  int32x4_t __ret_108; \
-  __ret_108 = vmull_s16(__s0_108, splat_lane_s16(__s1_108, __p2_108)); \
+#define vmull_lane_u16(__p0_108, __p1_108, __p2_108) __extension__ ({ \
+  uint32x4_t __ret_108; \
+  uint16x4_t __s0_108 = __p0_108; \
+  uint16x4_t __s1_108 = __p1_108; \
+  __ret_108 = vmull_u16(__s0_108, splat_lane_u16(__s1_108, __p2_108)); \
   __ret_108; \
 })
 #else
-#define vmull_lane_s16(__p0_109, __p1_109, __p2_109) __extension__ ({ \
-  int16x4_t __s0_109 = __p0_109; \
-  int16x4_t __s1_109 = __p1_109; \
-  int16x4_t __rev0_109;  __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \
-  int16x4_t __rev1_109;  __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \
-  int32x4_t __ret_109; \
-  __ret_109 = __noswap_vmull_s16(__rev0_109, __noswap_splat_lane_s16(__rev1_109, __p2_109)); \
+#define vmull_lane_u16(__p0_109, __p1_109, __p2_109) __extension__ ({ \
+  uint32x4_t __ret_109; \
+  uint16x4_t __s0_109 = __p0_109; \
+  uint16x4_t __s1_109 = __p1_109; \
+  uint16x4_t __rev0_109;  __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \
+  uint16x4_t __rev1_109;  __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \
+  __ret_109 = __noswap_vmull_u16(__rev0_109, __noswap_splat_lane_u16(__rev1_109, __p2_109)); \
   __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 3, 2, 1, 0); \
   __ret_109; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vmull_lane_s32(__p0_110, __p1_110, __p2_110) __extension__ ({ \
+  int64x2_t __ret_110; \
+  int32x2_t __s0_110 = __p0_110; \
+  int32x2_t __s1_110 = __p1_110; \
+  __ret_110 = vmull_s32(__s0_110, splat_lane_s32(__s1_110, __p2_110)); \
+  __ret_110; \
+})
+#else
+#define vmull_lane_s32(__p0_111, __p1_111, __p2_111) __extension__ ({ \
+  int64x2_t __ret_111; \
+  int32x2_t __s0_111 = __p0_111; \
+  int32x2_t __s1_111 = __p1_111; \
+  int32x2_t __rev0_111;  __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \
+  int32x2_t __rev1_111;  __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \
+  __ret_111 = __noswap_vmull_s32(__rev0_111, __noswap_splat_lane_s32(__rev1_111, __p2_111)); \
+  __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \
+  __ret_111; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_lane_s16(__p0_112, __p1_112, __p2_112) __extension__ ({ \
+  int32x4_t __ret_112; \
+  int16x4_t __s0_112 = __p0_112; \
+  int16x4_t __s1_112 = __p1_112; \
+  __ret_112 = vmull_s16(__s0_112, splat_lane_s16(__s1_112, __p2_112)); \
+  __ret_112; \
+})
+#else
+#define vmull_lane_s16(__p0_113, __p1_113, __p2_113) __extension__ ({ \
+  int32x4_t __ret_113; \
+  int16x4_t __s0_113 = __p0_113; \
+  int16x4_t __s1_113 = __p1_113; \
+  int16x4_t __rev0_113;  __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \
+  int16x4_t __rev1_113;  __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \
+  __ret_113 = __noswap_vmull_s16(__rev0_113, __noswap_splat_lane_s16(__rev1_113, __p2_113)); \
+  __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \
+  __ret_113; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
   uint64x2_t __ret;
   __ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
@@ -17223,8 +17259,8 @@
 }
 #else
 __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17244,8 +17280,8 @@
 }
 #else
 __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17265,8 +17301,8 @@
 }
 #else
 __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17286,8 +17322,8 @@
 }
 #else
 __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17307,8 +17343,8 @@
 }
 #else
 __ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17323,8 +17359,8 @@
 }
 #else
 __ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17339,8 +17375,8 @@
 }
 #else
 __ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17355,8 +17391,8 @@
 }
 #else
 __ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17371,8 +17407,8 @@
 }
 #else
 __ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17387,8 +17423,8 @@
 }
 #else
 __ai int8x16_t vmvnq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17403,8 +17439,8 @@
 }
 #else
 __ai int32x4_t vmvnq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17419,8 +17455,8 @@
 }
 #else
 __ai int16x8_t vmvnq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17435,8 +17471,8 @@
 }
 #else
 __ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17451,8 +17487,8 @@
 }
 #else
 __ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17467,8 +17503,8 @@
 }
 #else
 __ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17483,8 +17519,8 @@
 }
 #else
 __ai int8x8_t vmvn_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17499,8 +17535,8 @@
 }
 #else
 __ai int32x2_t vmvn_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17515,8 +17551,8 @@
 }
 #else
 __ai int16x4_t vmvn_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = ~__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17531,8 +17567,8 @@
 }
 #else
 __ai int8x16_t vnegq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17547,8 +17583,8 @@
 }
 #else
 __ai float32x4_t vnegq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17563,8 +17599,8 @@
 }
 #else
 __ai int32x4_t vnegq_s32(int32x4_t __p0) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17579,8 +17615,8 @@
 }
 #else
 __ai int16x8_t vnegq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17595,8 +17631,8 @@
 }
 #else
 __ai int8x8_t vneg_s8(int8x8_t __p0) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17611,8 +17647,8 @@
 }
 #else
 __ai float32x2_t vneg_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17627,8 +17663,8 @@
 }
 #else
 __ai int32x2_t vneg_s32(int32x2_t __p0) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17643,8 +17679,8 @@
 }
 #else
 __ai int16x4_t vneg_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = -__rev0;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17659,9 +17695,9 @@
 }
 #else
 __ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17676,9 +17712,9 @@
 }
 #else
 __ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17693,9 +17729,9 @@
 }
 #else
 __ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17710,9 +17746,9 @@
 }
 #else
 __ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17727,9 +17763,9 @@
 }
 #else
 __ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17744,9 +17780,9 @@
 }
 #else
 __ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17761,9 +17797,9 @@
 }
 #else
 __ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17778,9 +17814,9 @@
 }
 #else
 __ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17795,9 +17831,9 @@
 }
 #else
 __ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17812,9 +17848,9 @@
 }
 #else
 __ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17834,9 +17870,9 @@
 }
 #else
 __ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17851,9 +17887,9 @@
 }
 #else
 __ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17868,9 +17904,9 @@
 }
 #else
 __ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17890,9 +17926,9 @@
 }
 #else
 __ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 | ~__rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17907,9 +17943,9 @@
 }
 #else
 __ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17924,9 +17960,9 @@
 }
 #else
 __ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -17941,9 +17977,9 @@
 }
 #else
 __ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -17958,9 +17994,9 @@
 }
 #else
 __ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17975,9 +18011,9 @@
 }
 #else
 __ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -17992,9 +18028,9 @@
 }
 #else
 __ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18009,9 +18045,9 @@
 }
 #else
 __ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18026,9 +18062,9 @@
 }
 #else
 __ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18043,9 +18079,9 @@
 }
 #else
 __ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18060,9 +18096,9 @@
 }
 #else
 __ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18082,9 +18118,9 @@
 }
 #else
 __ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18099,9 +18135,9 @@
 }
 #else
 __ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18116,9 +18152,9 @@
 }
 #else
 __ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18138,9 +18174,9 @@
 }
 #else
 __ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 | __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18155,9 +18191,9 @@
 }
 #else
 __ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18172,9 +18208,9 @@
 }
 #else
 __ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18189,9 +18225,9 @@
 }
 #else
 __ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18206,9 +18242,9 @@
 }
 #else
 __ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18223,9 +18259,9 @@
 }
 #else
 __ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18240,9 +18276,9 @@
 }
 #else
 __ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18257,9 +18293,9 @@
 }
 #else
 __ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18274,8 +18310,8 @@
 }
 #else
 __ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x1_t __ret;
+  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19);
   return __ret;
 }
@@ -18289,9 +18325,9 @@
 }
 #else
 __ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18306,9 +18342,9 @@
 }
 #else
 __ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18323,8 +18359,8 @@
 }
 #else
 __ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x1_t __ret;
+  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3);
   return __ret;
 }
@@ -18338,9 +18374,9 @@
 }
 #else
 __ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18355,9 +18391,9 @@
 }
 #else
 __ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18372,9 +18408,9 @@
 }
 #else
 __ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18389,9 +18425,9 @@
 }
 #else
 __ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18406,9 +18442,9 @@
 }
 #else
 __ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18423,9 +18459,9 @@
 }
 #else
 __ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18440,9 +18476,9 @@
 }
 #else
 __ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18457,9 +18493,9 @@
 }
 #else
 __ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18474,8 +18510,8 @@
 }
 #else
 __ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18490,8 +18526,8 @@
 }
 #else
 __ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint64x2_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18506,8 +18542,8 @@
 }
 #else
 __ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint32x4_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18522,8 +18558,8 @@
 }
 #else
 __ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18538,8 +18574,8 @@
 }
 #else
 __ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int64x2_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18554,8 +18590,8 @@
 }
 #else
 __ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int32x4_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18570,8 +18606,8 @@
 }
 #else
 __ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18586,8 +18622,8 @@
 }
 #else
 __ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x1_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19);
   return __ret;
 }
@@ -18601,8 +18637,8 @@
 }
 #else
 __ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x2_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18617,8 +18653,8 @@
 }
 #else
 __ai int16x4_t vpaddl_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x4_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18633,8 +18669,8 @@
 }
 #else
 __ai int64x1_t vpaddl_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x1_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3);
   return __ret;
 }
@@ -18648,8 +18684,8 @@
 }
 #else
 __ai int32x2_t vpaddl_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x2_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18664,9 +18700,9 @@
 }
 #else
 __ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18681,9 +18717,9 @@
 }
 #else
 __ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18698,9 +18734,9 @@
 }
 #else
 __ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18715,9 +18751,9 @@
 }
 #else
 __ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18732,9 +18768,9 @@
 }
 #else
 __ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18749,9 +18785,9 @@
 }
 #else
 __ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18766,9 +18802,9 @@
 }
 #else
 __ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18783,9 +18819,9 @@
 }
 #else
 __ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18800,9 +18836,9 @@
 }
 #else
 __ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18817,9 +18853,9 @@
 }
 #else
 __ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18834,9 +18870,9 @@
 }
 #else
 __ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18851,9 +18887,9 @@
 }
 #else
 __ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18868,9 +18904,9 @@
 }
 #else
 __ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18885,9 +18921,9 @@
 }
 #else
 __ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18902,8 +18938,8 @@
 }
 #else
 __ai int8x16_t vqabsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18918,8 +18954,8 @@
 }
 #else
 __ai int32x4_t vqabsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18934,8 +18970,8 @@
 }
 #else
 __ai int16x8_t vqabsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18950,8 +18986,8 @@
 }
 #else
 __ai int8x8_t vqabs_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -18966,8 +19002,8 @@
 }
 #else
 __ai int32x2_t vqabs_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -18982,8 +19018,8 @@
 }
 #else
 __ai int16x4_t vqabs_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -18998,9 +19034,9 @@
 }
 #else
 __ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19015,9 +19051,9 @@
 }
 #else
 __ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19032,9 +19068,9 @@
 }
 #else
 __ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19049,9 +19085,9 @@
 }
 #else
 __ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19066,9 +19102,9 @@
 }
 #else
 __ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19083,18 +19119,13 @@
 }
 #else
 __ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -19105,9 +19136,9 @@
 }
 #else
 __ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19122,18 +19153,13 @@
 }
 #else
 __ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -19144,9 +19170,9 @@
 }
 #else
 __ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19161,9 +19187,9 @@
 }
 #else
 __ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19183,9 +19209,9 @@
 }
 #else
 __ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19200,9 +19226,9 @@
 }
 #else
 __ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19217,18 +19243,13 @@
 }
 #else
 __ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
-__ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
 #endif
 
 __ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
@@ -19244,18 +19265,13 @@
 }
 #else
 __ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -19266,10 +19282,10 @@
 }
 #else
 __ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19289,10 +19305,10 @@
 }
 #else
 __ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19305,50 +19321,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
-  int64x2_t __s0_110 = __p0_110; \
-  int32x2_t __s1_110 = __p1_110; \
-  int32x2_t __s2_110 = __p2_110; \
-  int64x2_t __ret_110; \
-  __ret_110 = vqdmlal_s32(__s0_110, __s1_110, splat_lane_s32(__s2_110, __p3_110)); \
-  __ret_110; \
+#define vqdmlal_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
+  int64x2_t __ret_114; \
+  int64x2_t __s0_114 = __p0_114; \
+  int32x2_t __s1_114 = __p1_114; \
+  int32x2_t __s2_114 = __p2_114; \
+  __ret_114 = vqdmlal_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \
+  __ret_114; \
 })
 #else
-#define vqdmlal_lane_s32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
-  int64x2_t __s0_111 = __p0_111; \
-  int32x2_t __s1_111 = __p1_111; \
-  int32x2_t __s2_111 = __p2_111; \
-  int64x2_t __rev0_111;  __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \
-  int32x2_t __rev1_111;  __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \
-  int32x2_t __rev2_111;  __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 1, 0); \
-  int64x2_t __ret_111; \
-  __ret_111 = __noswap_vqdmlal_s32(__rev0_111, __rev1_111, __noswap_splat_lane_s32(__rev2_111, __p3_111)); \
-  __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \
-  __ret_111; \
+#define vqdmlal_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
+  int64x2_t __ret_115; \
+  int64x2_t __s0_115 = __p0_115; \
+  int32x2_t __s1_115 = __p1_115; \
+  int32x2_t __s2_115 = __p2_115; \
+  int64x2_t __rev0_115;  __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \
+  int32x2_t __rev1_115;  __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \
+  int32x2_t __rev2_115;  __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \
+  __ret_115 = __noswap_vqdmlal_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \
+  __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \
+  __ret_115; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s16(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \
-  int32x4_t __s0_112 = __p0_112; \
-  int16x4_t __s1_112 = __p1_112; \
-  int16x4_t __s2_112 = __p2_112; \
-  int32x4_t __ret_112; \
-  __ret_112 = vqdmlal_s16(__s0_112, __s1_112, splat_lane_s16(__s2_112, __p3_112)); \
-  __ret_112; \
+#define vqdmlal_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
+  int32x4_t __ret_116; \
+  int32x4_t __s0_116 = __p0_116; \
+  int16x4_t __s1_116 = __p1_116; \
+  int16x4_t __s2_116 = __p2_116; \
+  __ret_116 = vqdmlal_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \
+  __ret_116; \
 })
 #else
-#define vqdmlal_lane_s16(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \
-  int32x4_t __s0_113 = __p0_113; \
-  int16x4_t __s1_113 = __p1_113; \
-  int16x4_t __s2_113 = __p2_113; \
-  int32x4_t __rev0_113;  __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \
-  int16x4_t __rev1_113;  __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \
-  int16x4_t __rev2_113;  __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 3, 2, 1, 0); \
-  int32x4_t __ret_113; \
-  __ret_113 = __noswap_vqdmlal_s16(__rev0_113, __rev1_113, __noswap_splat_lane_s16(__rev2_113, __p3_113)); \
-  __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \
-  __ret_113; \
+#define vqdmlal_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
+  int32x4_t __ret_117; \
+  int32x4_t __s0_117 = __p0_117; \
+  int16x4_t __s1_117 = __p1_117; \
+  int16x4_t __s2_117 = __p2_117; \
+  int32x4_t __rev0_117;  __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \
+  int16x4_t __rev1_117;  __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \
+  int16x4_t __rev2_117;  __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \
+  __ret_117 = __noswap_vqdmlal_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \
+  __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \
+  __ret_117; \
 })
 #endif
 
@@ -19360,9 +19376,9 @@
 }
 #else
 __ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vqdmlal_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19382,9 +19398,9 @@
 }
 #else
 __ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vqdmlal_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19404,10 +19420,10 @@
 }
 #else
 __ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19427,10 +19443,10 @@
 }
 #else
 __ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19443,50 +19459,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
-  int64x2_t __s0_114 = __p0_114; \
-  int32x2_t __s1_114 = __p1_114; \
-  int32x2_t __s2_114 = __p2_114; \
-  int64x2_t __ret_114; \
-  __ret_114 = vqdmlsl_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \
-  __ret_114; \
+#define vqdmlsl_lane_s32(__p0_118, __p1_118, __p2_118, __p3_118) __extension__ ({ \
+  int64x2_t __ret_118; \
+  int64x2_t __s0_118 = __p0_118; \
+  int32x2_t __s1_118 = __p1_118; \
+  int32x2_t __s2_118 = __p2_118; \
+  __ret_118 = vqdmlsl_s32(__s0_118, __s1_118, splat_lane_s32(__s2_118, __p3_118)); \
+  __ret_118; \
 })
 #else
-#define vqdmlsl_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
-  int64x2_t __s0_115 = __p0_115; \
-  int32x2_t __s1_115 = __p1_115; \
-  int32x2_t __s2_115 = __p2_115; \
-  int64x2_t __rev0_115;  __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \
-  int32x2_t __rev1_115;  __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \
-  int32x2_t __rev2_115;  __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \
-  int64x2_t __ret_115; \
-  __ret_115 = __noswap_vqdmlsl_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \
-  __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \
-  __ret_115; \
+#define vqdmlsl_lane_s32(__p0_119, __p1_119, __p2_119, __p3_119) __extension__ ({ \
+  int64x2_t __ret_119; \
+  int64x2_t __s0_119 = __p0_119; \
+  int32x2_t __s1_119 = __p1_119; \
+  int32x2_t __s2_119 = __p2_119; \
+  int64x2_t __rev0_119;  __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \
+  int32x2_t __rev1_119;  __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \
+  int32x2_t __rev2_119;  __rev2_119 = __builtin_shufflevector(__s2_119, __s2_119, 1, 0); \
+  __ret_119 = __noswap_vqdmlsl_s32(__rev0_119, __rev1_119, __noswap_splat_lane_s32(__rev2_119, __p3_119)); \
+  __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \
+  __ret_119; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
-  int32x4_t __s0_116 = __p0_116; \
-  int16x4_t __s1_116 = __p1_116; \
-  int16x4_t __s2_116 = __p2_116; \
-  int32x4_t __ret_116; \
-  __ret_116 = vqdmlsl_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \
-  __ret_116; \
+#define vqdmlsl_lane_s16(__p0_120, __p1_120, __p2_120, __p3_120) __extension__ ({ \
+  int32x4_t __ret_120; \
+  int32x4_t __s0_120 = __p0_120; \
+  int16x4_t __s1_120 = __p1_120; \
+  int16x4_t __s2_120 = __p2_120; \
+  __ret_120 = vqdmlsl_s16(__s0_120, __s1_120, splat_lane_s16(__s2_120, __p3_120)); \
+  __ret_120; \
 })
 #else
-#define vqdmlsl_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
-  int32x4_t __s0_117 = __p0_117; \
-  int16x4_t __s1_117 = __p1_117; \
-  int16x4_t __s2_117 = __p2_117; \
-  int32x4_t __rev0_117;  __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \
-  int16x4_t __rev1_117;  __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \
-  int16x4_t __rev2_117;  __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \
-  int32x4_t __ret_117; \
-  __ret_117 = __noswap_vqdmlsl_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \
-  __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \
-  __ret_117; \
+#define vqdmlsl_lane_s16(__p0_121, __p1_121, __p2_121, __p3_121) __extension__ ({ \
+  int32x4_t __ret_121; \
+  int32x4_t __s0_121 = __p0_121; \
+  int16x4_t __s1_121 = __p1_121; \
+  int16x4_t __s2_121 = __p2_121; \
+  int32x4_t __rev0_121;  __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \
+  int16x4_t __rev1_121;  __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \
+  int16x4_t __rev2_121;  __rev2_121 = __builtin_shufflevector(__s2_121, __s2_121, 3, 2, 1, 0); \
+  __ret_121 = __noswap_vqdmlsl_s16(__rev0_121, __rev1_121, __noswap_splat_lane_s16(__rev2_121, __p3_121)); \
+  __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \
+  __ret_121; \
 })
 #endif
 
@@ -19498,9 +19514,9 @@
 }
 #else
 __ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19520,9 +19536,9 @@
 }
 #else
 __ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19542,9 +19558,9 @@
 }
 #else
 __ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19564,9 +19580,9 @@
 }
 #else
 __ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19586,9 +19602,9 @@
 }
 #else
 __ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19608,9 +19624,9 @@
 }
 #else
 __ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19630,8 +19646,8 @@
 }
 #else
 __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19646,8 +19662,8 @@
 }
 #else
 __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19662,8 +19678,8 @@
 }
 #else
 __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19678,8 +19694,8 @@
 }
 #else
 __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19694,9 +19710,9 @@
 }
 #else
 __ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
+  int64x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19716,9 +19732,9 @@
 }
 #else
 __ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
+  int32x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19731,44 +19747,44 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s32(__p0_118, __p1_118, __p2_118) __extension__ ({ \
-  int32x2_t __s0_118 = __p0_118; \
-  int32x2_t __s1_118 = __p1_118; \
-  int64x2_t __ret_118; \
-  __ret_118 = vqdmull_s32(__s0_118, splat_lane_s32(__s1_118, __p2_118)); \
-  __ret_118; \
+#define vqdmull_lane_s32(__p0_122, __p1_122, __p2_122) __extension__ ({ \
+  int64x2_t __ret_122; \
+  int32x2_t __s0_122 = __p0_122; \
+  int32x2_t __s1_122 = __p1_122; \
+  __ret_122 = vqdmull_s32(__s0_122, splat_lane_s32(__s1_122, __p2_122)); \
+  __ret_122; \
 })
 #else
-#define vqdmull_lane_s32(__p0_119, __p1_119, __p2_119) __extension__ ({ \
-  int32x2_t __s0_119 = __p0_119; \
-  int32x2_t __s1_119 = __p1_119; \
-  int32x2_t __rev0_119;  __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \
-  int32x2_t __rev1_119;  __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \
-  int64x2_t __ret_119; \
-  __ret_119 = __noswap_vqdmull_s32(__rev0_119, __noswap_splat_lane_s32(__rev1_119, __p2_119)); \
-  __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \
-  __ret_119; \
+#define vqdmull_lane_s32(__p0_123, __p1_123, __p2_123) __extension__ ({ \
+  int64x2_t __ret_123; \
+  int32x2_t __s0_123 = __p0_123; \
+  int32x2_t __s1_123 = __p1_123; \
+  int32x2_t __rev0_123;  __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 1, 0); \
+  int32x2_t __rev1_123;  __rev1_123 = __builtin_shufflevector(__s1_123, __s1_123, 1, 0); \
+  __ret_123 = __noswap_vqdmull_s32(__rev0_123, __noswap_splat_lane_s32(__rev1_123, __p2_123)); \
+  __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 1, 0); \
+  __ret_123; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s16(__p0_120, __p1_120, __p2_120) __extension__ ({ \
-  int16x4_t __s0_120 = __p0_120; \
-  int16x4_t __s1_120 = __p1_120; \
-  int32x4_t __ret_120; \
-  __ret_120 = vqdmull_s16(__s0_120, splat_lane_s16(__s1_120, __p2_120)); \
-  __ret_120; \
+#define vqdmull_lane_s16(__p0_124, __p1_124, __p2_124) __extension__ ({ \
+  int32x4_t __ret_124; \
+  int16x4_t __s0_124 = __p0_124; \
+  int16x4_t __s1_124 = __p1_124; \
+  __ret_124 = vqdmull_s16(__s0_124, splat_lane_s16(__s1_124, __p2_124)); \
+  __ret_124; \
 })
 #else
-#define vqdmull_lane_s16(__p0_121, __p1_121, __p2_121) __extension__ ({ \
-  int16x4_t __s0_121 = __p0_121; \
-  int16x4_t __s1_121 = __p1_121; \
-  int16x4_t __rev0_121;  __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \
-  int16x4_t __rev1_121;  __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \
-  int32x4_t __ret_121; \
-  __ret_121 = __noswap_vqdmull_s16(__rev0_121, __noswap_splat_lane_s16(__rev1_121, __p2_121)); \
-  __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \
-  __ret_121; \
+#define vqdmull_lane_s16(__p0_125, __p1_125, __p2_125) __extension__ ({ \
+  int32x4_t __ret_125; \
+  int16x4_t __s0_125 = __p0_125; \
+  int16x4_t __s1_125 = __p1_125; \
+  int16x4_t __rev0_125;  __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \
+  int16x4_t __rev1_125;  __rev1_125 = __builtin_shufflevector(__s1_125, __s1_125, 3, 2, 1, 0); \
+  __ret_125 = __noswap_vqdmull_s16(__rev0_125, __noswap_splat_lane_s16(__rev1_125, __p2_125)); \
+  __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \
+  __ret_125; \
 })
 #endif
 
@@ -19780,8 +19796,8 @@
 }
 #else
 __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19801,8 +19817,8 @@
 }
 #else
 __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19822,8 +19838,8 @@
 }
 #else
 __ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19843,8 +19859,8 @@
 }
 #else
 __ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19864,8 +19880,8 @@
 }
 #else
 __ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19885,8 +19901,8 @@
 }
 #else
 __ai int16x4_t vqmovn_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19906,8 +19922,8 @@
 }
 #else
 __ai int32x2_t vqmovn_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19927,8 +19943,8 @@
 }
 #else
 __ai int8x8_t vqmovn_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -19948,8 +19964,8 @@
 }
 #else
 __ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -19969,8 +19985,8 @@
 }
 #else
 __ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -19990,8 +20006,8 @@
 }
 #else
 __ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20011,8 +20027,8 @@
 }
 #else
 __ai int8x16_t vqnegq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20027,8 +20043,8 @@
 }
 #else
 __ai int32x4_t vqnegq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20043,8 +20059,8 @@
 }
 #else
 __ai int16x8_t vqnegq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20059,8 +20075,8 @@
 }
 #else
 __ai int8x8_t vqneg_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20075,8 +20091,8 @@
 }
 #else
 __ai int32x2_t vqneg_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20091,8 +20107,8 @@
 }
 #else
 __ai int16x4_t vqneg_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20107,9 +20123,9 @@
 }
 #else
 __ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20129,9 +20145,9 @@
 }
 #else
 __ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20151,9 +20167,9 @@
 }
 #else
 __ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20173,9 +20189,9 @@
 }
 #else
 __ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20195,8 +20211,8 @@
 }
 #else
 __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20211,8 +20227,8 @@
 }
 #else
 __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20227,8 +20243,8 @@
 }
 #else
 __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20243,8 +20259,8 @@
 }
 #else
 __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20259,9 +20275,9 @@
 }
 #else
 __ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20276,9 +20292,9 @@
 }
 #else
 __ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20293,9 +20309,9 @@
 }
 #else
 __ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20310,9 +20326,9 @@
 }
 #else
 __ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20327,9 +20343,9 @@
 }
 #else
 __ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20344,9 +20360,9 @@
 }
 #else
 __ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20361,9 +20377,9 @@
 }
 #else
 __ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20378,9 +20394,9 @@
 }
 #else
 __ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20395,9 +20411,9 @@
 }
 #else
 __ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20412,9 +20428,9 @@
 }
 #else
 __ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20434,9 +20450,9 @@
 }
 #else
 __ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20451,9 +20467,9 @@
 }
 #else
 __ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20468,9 +20484,9 @@
 }
 #else
 __ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20490,9 +20506,9 @@
 }
 #else
 __ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20501,23 +20517,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -20525,23 +20541,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
@@ -20549,23 +20565,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -20573,23 +20589,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
@@ -20597,23 +20613,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
@@ -20621,23 +20637,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
@@ -20645,23 +20661,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -20669,23 +20685,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
@@ -20693,23 +20709,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -20723,9 +20739,9 @@
 }
 #else
 __ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20740,9 +20756,9 @@
 }
 #else
 __ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20757,9 +20773,9 @@
 }
 #else
 __ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20774,9 +20790,9 @@
 }
 #else
 __ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20791,9 +20807,9 @@
 }
 #else
 __ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20808,9 +20824,9 @@
 }
 #else
 __ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20825,9 +20841,9 @@
 }
 #else
 __ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20842,9 +20858,9 @@
 }
 #else
 __ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20859,9 +20875,9 @@
 }
 #else
 __ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20876,9 +20892,9 @@
 }
 #else
 __ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20898,9 +20914,9 @@
 }
 #else
 __ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20915,9 +20931,9 @@
 }
 #else
 __ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -20932,9 +20948,9 @@
 }
 #else
 __ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -20954,9 +20970,9 @@
 }
 #else
 __ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -20965,16 +20981,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
 #else
 #define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -20983,16 +20999,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -21001,16 +21017,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -21019,16 +21035,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21037,16 +21053,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
 #else
 #define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21055,16 +21071,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -21073,16 +21089,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
 #else
 #define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -21091,16 +21107,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
 #else
 #define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21109,16 +21125,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vqshl_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21127,16 +21143,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vqshl_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -21144,23 +21160,23 @@
 #endif
 
 #define vqshl_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x1_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vqshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vqshl_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -21169,16 +21185,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vqshl_n_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21187,16 +21203,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vqshl_n_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -21204,23 +21220,23 @@
 #endif
 
 #define vqshl_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x1_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vqshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vqshl_n_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -21229,16 +21245,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   uint8x16_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
 #else
 #define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21247,16 +21263,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -21265,16 +21281,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -21283,16 +21299,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21301,16 +21317,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -21319,16 +21335,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -21336,23 +21352,23 @@
 #endif
 
 #define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   uint64x1_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -21361,23 +21377,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -21385,23 +21401,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
@@ -21409,23 +21425,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -21433,23 +21449,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
@@ -21457,23 +21473,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
@@ -21481,23 +21497,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
@@ -21505,23 +21521,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -21529,23 +21545,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
@@ -21553,23 +21569,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -21583,9 +21599,9 @@
 }
 #else
 __ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -21600,9 +21616,9 @@
 }
 #else
 __ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -21617,9 +21633,9 @@
 }
 #else
 __ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -21634,9 +21650,9 @@
 }
 #else
 __ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -21651,9 +21667,9 @@
 }
 #else
 __ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -21668,18 +21684,13 @@
 }
 #else
 __ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -21690,9 +21701,9 @@
 }
 #else
 __ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -21707,18 +21718,13 @@
 }
 #else
 __ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -21729,9 +21735,9 @@
 }
 #else
 __ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -21746,9 +21752,9 @@
 }
 #else
 __ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -21768,9 +21774,9 @@
 }
 #else
 __ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -21785,9 +21791,9 @@
 }
 #else
 __ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -21802,18 +21808,13 @@
 }
 #else
 __ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
-__ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
 #endif
 
 __ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
@@ -21829,18 +21830,13 @@
 }
 #else
 __ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -21851,9 +21847,9 @@
 }
 #else
 __ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint16x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -21873,9 +21869,9 @@
 }
 #else
 __ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint32x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -21895,9 +21891,9 @@
 }
 #else
 __ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint8x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -21917,9 +21913,9 @@
 }
 #else
 __ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+  int16x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -21939,9 +21935,9 @@
 }
 #else
 __ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+  int32x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -21961,9 +21957,9 @@
 }
 #else
 __ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+  int8x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -21983,8 +21979,8 @@
 }
 #else
 __ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -21999,8 +21995,8 @@
 }
 #else
 __ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22015,8 +22011,8 @@
 }
 #else
 __ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22031,8 +22027,8 @@
 }
 #else
 __ai float32x2_t vrecpe_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22047,9 +22043,9 @@
 }
 #else
 __ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22064,9 +22060,9 @@
 }
 #else
 __ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22081,8 +22077,8 @@
 }
 #else
 __ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22097,8 +22093,8 @@
 }
 #else
 __ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22113,8 +22109,8 @@
 }
 #else
 __ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22129,8 +22125,8 @@
 }
 #else
 __ai int8x16_t vrev16q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22145,8 +22141,8 @@
 }
 #else
 __ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22161,8 +22157,8 @@
 }
 #else
 __ai int8x8_t vrev16_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22177,8 +22173,8 @@
 }
 #else
 __ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22193,8 +22189,8 @@
 }
 #else
 __ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22209,8 +22205,8 @@
 }
 #else
 __ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22225,8 +22221,8 @@
 }
 #else
 __ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22241,8 +22237,8 @@
 }
 #else
 __ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22257,8 +22253,8 @@
 }
 #else
 __ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22273,8 +22269,8 @@
 }
 #else
 __ai int8x16_t vrev32q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22289,8 +22285,8 @@
 }
 #else
 __ai int16x8_t vrev32q_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22305,8 +22301,8 @@
 }
 #else
 __ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22321,8 +22317,8 @@
 }
 #else
 __ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22337,8 +22333,8 @@
 }
 #else
 __ai int8x8_t vrev32_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22353,8 +22349,8 @@
 }
 #else
 __ai int16x4_t vrev32_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22369,8 +22365,8 @@
 }
 #else
 __ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22385,8 +22381,8 @@
 }
 #else
 __ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __ret;
+  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22401,8 +22397,8 @@
 }
 #else
 __ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22417,8 +22413,8 @@
 }
 #else
 __ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __ret;
+  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22433,8 +22429,8 @@
 }
 #else
 __ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22449,8 +22445,8 @@
 }
 #else
 __ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22465,8 +22461,8 @@
 }
 #else
 __ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22481,8 +22477,8 @@
 }
 #else
 __ai int8x16_t vrev64q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22497,8 +22493,8 @@
 }
 #else
 __ai float32x4_t vrev64q_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22513,8 +22509,8 @@
 }
 #else
 __ai int32x4_t vrev64q_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22529,8 +22525,8 @@
 }
 #else
 __ai int16x8_t vrev64q_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22545,8 +22541,8 @@
 }
 #else
 __ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22561,8 +22557,8 @@
 }
 #else
 __ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22577,8 +22573,8 @@
 }
 #else
 __ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22593,8 +22589,8 @@
 }
 #else
 __ai int8x8_t vrev64_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22609,8 +22605,8 @@
 }
 #else
 __ai float32x2_t vrev64_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22625,8 +22621,8 @@
 }
 #else
 __ai int32x2_t vrev64_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22641,8 +22637,8 @@
 }
 #else
 __ai int16x4_t vrev64_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22657,9 +22653,9 @@
 }
 #else
 __ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22674,9 +22670,9 @@
 }
 #else
 __ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22691,9 +22687,9 @@
 }
 #else
 __ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22708,9 +22704,9 @@
 }
 #else
 __ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22725,9 +22721,9 @@
 }
 #else
 __ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22742,9 +22738,9 @@
 }
 #else
 __ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22759,9 +22755,9 @@
 }
 #else
 __ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22776,9 +22772,9 @@
 }
 #else
 __ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22793,9 +22789,9 @@
 }
 #else
 __ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22810,9 +22806,9 @@
 }
 #else
 __ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22827,9 +22823,9 @@
 }
 #else
 __ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22844,9 +22840,9 @@
 }
 #else
 __ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22861,9 +22857,9 @@
 }
 #else
 __ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22878,9 +22874,9 @@
 }
 #else
 __ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22895,9 +22891,9 @@
 }
 #else
 __ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22912,9 +22908,9 @@
 }
 #else
 __ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22929,9 +22925,9 @@
 }
 #else
 __ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22946,9 +22942,9 @@
 }
 #else
 __ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -22963,9 +22959,9 @@
 }
 #else
 __ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -22980,9 +22976,9 @@
 }
 #else
 __ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -22997,9 +22993,9 @@
 }
 #else
 __ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -23014,9 +23010,9 @@
 }
 #else
 __ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -23036,9 +23032,9 @@
 }
 #else
 __ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -23053,9 +23049,9 @@
 }
 #else
 __ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -23070,9 +23066,9 @@
 }
 #else
 __ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -23092,9 +23088,9 @@
 }
 #else
 __ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -23103,16 +23099,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
 #else
 #define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23121,16 +23117,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23139,16 +23135,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23157,16 +23153,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23175,16 +23171,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
 #else
 #define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23193,16 +23189,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23211,16 +23207,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
 #else
 #define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23229,16 +23225,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
 #else
 #define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23247,16 +23243,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vrshr_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23265,16 +23261,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vrshr_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23282,23 +23278,23 @@
 #endif
 
 #define vrshr_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x1_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vrshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vrshr_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23307,16 +23303,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vrshr_n_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23325,16 +23321,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vrshr_n_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23342,23 +23338,23 @@
 #endif
 
 #define vrshr_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x1_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vrshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vrshr_n_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23367,23 +23363,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -23391,23 +23387,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
@@ -23415,23 +23411,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -23439,23 +23435,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
@@ -23463,23 +23459,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
@@ -23487,23 +23483,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
@@ -23517,8 +23513,8 @@
 }
 #else
 __ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -23533,8 +23529,8 @@
 }
 #else
 __ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -23549,8 +23545,8 @@
 }
 #else
 __ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -23565,8 +23561,8 @@
 }
 #else
 __ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -23581,9 +23577,9 @@
 }
 #else
 __ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -23598,9 +23594,9 @@
 }
 #else
 __ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -23609,19 +23605,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
   __ret; \
 })
 #else
 #define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23630,19 +23626,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
   __ret; \
 })
 #else
 #define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23651,19 +23647,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
   __ret; \
 })
 #else
 #define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23672,19 +23668,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
   __ret; \
 })
 #else
 #define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23693,19 +23689,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
   __ret; \
 })
 #else
 #define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23714,19 +23710,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
   __ret; \
 })
 #else
 #define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23735,19 +23731,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
   __ret; \
 })
 #else
 #define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23756,19 +23752,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
   __ret; \
 })
 #else
 #define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23777,19 +23773,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
   __ret; \
 })
 #else
 #define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23798,19 +23794,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
   __ret; \
 })
 #else
 #define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23818,27 +23814,27 @@
 #endif
 
 #define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1_t __ret; \
   uint64x1_t __s0 = __p0; \
   uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
   __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
   __ret; \
 })
 #else
 #define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23847,19 +23843,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
   __ret; \
 })
 #else
 #define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -23868,19 +23864,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
 #define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -23888,27 +23884,27 @@
 #endif
 
 #define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1_t __ret; \
   int64x1_t __s0 = __p0; \
   int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
   __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
 #define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -23923,9 +23919,9 @@
 }
 #else
 __ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint16x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -23945,9 +23941,9 @@
 }
 #else
 __ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint32x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -23967,9 +23963,9 @@
 }
 #else
 __ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint8x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -23989,9 +23985,9 @@
 }
 #else
 __ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+  int16x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -24011,9 +24007,9 @@
 }
 #else
 __ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+  int32x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -24033,9 +24029,9 @@
 }
 #else
 __ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+  int8x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -24049,26 +24045,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
   __ret; \
 })
@@ -24076,26 +24072,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
   __ret; \
 })
@@ -24103,26 +24099,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
   __ret; \
 })
@@ -24130,26 +24126,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
   __ret; \
 })
@@ -24157,26 +24153,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
   __ret; \
 })
@@ -24184,26 +24180,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
@@ -24211,26 +24207,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
@@ -24238,26 +24234,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
@@ -24265,26 +24261,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
   __ret; \
 })
@@ -24292,26 +24288,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4_t __ret; \
   float32_t __s0 = __p0; \
   float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4_t __ret; \
   float32_t __s0 = __p0; \
   float32x4_t __s1 = __p1; \
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x4_t __ret; \
   float32_t __s0 = __p0; \
   float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
   __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
   __ret; \
 })
@@ -24319,26 +24315,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
@@ -24346,26 +24342,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
@@ -24373,26 +24369,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
@@ -24400,26 +24396,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
   __ret; \
 })
@@ -24427,60 +24423,60 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
 
 #define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1_t __ret; \
   uint64_t __s0 = __p0; \
   uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
   __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
@@ -24488,26 +24484,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
   __ret; \
 })
@@ -24515,26 +24511,26 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2_t __ret; \
   float32_t __s0 = __p0; \
   float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2_t __ret; \
   float32_t __s0 = __p0; \
   float32x2_t __s1 = __p1; \
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+  float32x2_t __ret; \
   float32_t __s0 = __p0; \
   float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
   __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
   __ret; \
 })
@@ -24542,60 +24538,60 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
 
 #define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1_t __ret; \
   int64_t __s0 = __p0; \
   int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
   __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
@@ -24609,9 +24605,9 @@
 }
 #else
 __ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -24626,9 +24622,9 @@
 }
 #else
 __ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -24643,9 +24639,9 @@
 }
 #else
 __ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -24660,9 +24656,9 @@
 }
 #else
 __ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -24677,9 +24673,9 @@
 }
 #else
 __ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -24694,9 +24690,9 @@
 }
 #else
 __ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -24711,9 +24707,9 @@
 }
 #else
 __ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -24728,9 +24724,9 @@
 }
 #else
 __ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -24745,9 +24741,9 @@
 }
 #else
 __ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -24762,9 +24758,9 @@
 }
 #else
 __ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -24784,9 +24780,9 @@
 }
 #else
 __ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -24801,9 +24797,9 @@
 }
 #else
 __ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -24818,9 +24814,9 @@
 }
 #else
 __ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -24840,9 +24836,9 @@
 }
 #else
 __ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -24851,16 +24847,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
 #else
 #define vshlq_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -24869,16 +24865,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vshlq_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -24887,16 +24883,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define vshlq_n_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -24905,16 +24901,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define vshlq_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -24923,16 +24919,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
 #else
 #define vshlq_n_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -24941,16 +24937,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define vshlq_n_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -24959,16 +24955,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
 #else
 #define vshlq_n_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -24977,16 +24973,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
 #else
 #define vshlq_n_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -24995,16 +24991,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vshl_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25013,16 +25009,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vshl_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25030,23 +25026,23 @@
 #endif
 
 #define vshl_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x1_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vshl_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25055,16 +25051,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vshl_n_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25073,16 +25069,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vshl_n_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25090,23 +25086,23 @@
 #endif
 
 #define vshl_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x1_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vshl_n_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25115,23 +25111,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define vshll_n_u8(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
   __ret; \
 })
@@ -25139,23 +25135,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define vshll_n_u32(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
   __ret; \
 })
@@ -25163,23 +25159,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vshll_n_u16(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
   __ret; \
 })
@@ -25187,23 +25183,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
   __ret; \
 })
 #else
 #define vshll_n_s8(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
   __ret; \
 })
@@ -25211,23 +25207,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
   __ret; \
 })
 #else
 #define vshll_n_s32(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
   __ret; \
 })
@@ -25235,23 +25231,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define vshll_n_s16(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
   __ret; \
 })
@@ -25259,16 +25255,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
   uint8x16_t __ret; \
+  uint8x16_t __s0 = __p0; \
   __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \
   __ret; \
 })
 #else
 #define vshrq_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25277,16 +25273,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint32x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \
   __ret; \
 })
 #else
 #define vshrq_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25295,16 +25291,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \
   __ret; \
 })
 #else
 #define vshrq_n_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25313,16 +25309,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \
   __ret; \
 })
 #else
 #define vshrq_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25331,16 +25327,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
   int8x16_t __ret; \
+  int8x16_t __s0 = __p0; \
   __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \
   __ret; \
 })
 #else
 #define vshrq_n_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25349,16 +25345,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \
   __ret; \
 })
 #else
 #define vshrq_n_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25367,16 +25363,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \
   __ret; \
 })
 #else
 #define vshrq_n_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25385,16 +25381,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \
   __ret; \
 })
 #else
 #define vshrq_n_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25403,16 +25399,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint8x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vshr_n_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25421,16 +25417,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint32x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vshr_n_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25438,23 +25434,23 @@
 #endif
 
 #define vshr_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
   uint64x1_t __ret; \
+  uint64x1_t __s0 = __p0; \
   __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vshr_n_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25463,16 +25459,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int8x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vshr_n_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25481,16 +25477,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vshr_n_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25498,23 +25494,23 @@
 #endif
 
 #define vshr_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
   int64x1_t __ret; \
+  int64x1_t __s0 = __p0; \
   __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vshr_n_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25523,23 +25519,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
 #else
 #define vshrn_n_u32(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
   uint16x4_t __ret; \
+  uint32x4_t __s0 = __p0; \
   __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
   __ret; \
 })
@@ -25547,23 +25543,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
 #else
 #define vshrn_n_u64(__p0, __p1) __extension__ ({ \
+  uint32x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
   uint32x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
   __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
   __ret; \
 })
@@ -25571,23 +25567,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
 #else
 #define vshrn_n_u16(__p0, __p1) __extension__ ({ \
+  uint8x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
   uint8x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
   __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
   __ret; \
 })
@@ -25595,23 +25591,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
 #else
 #define vshrn_n_s32(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
   int16x4_t __ret; \
+  int32x4_t __s0 = __p0; \
   __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
   __ret; \
 })
@@ -25619,23 +25615,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
 #else
 #define vshrn_n_s64(__p0, __p1) __extension__ ({ \
+  int32x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
   int32x2_t __ret; \
+  int64x2_t __s0 = __p0; \
   __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
   __ret; \
 })
@@ -25643,23 +25639,23 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
 #else
 #define vshrn_n_s16(__p0, __p1) __extension__ ({ \
+  int8x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
 #define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
   int8x8_t __ret; \
+  int16x8_t __s0 = __p0; \
   __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
   __ret; \
 })
@@ -25667,19 +25663,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
   __ret; \
 })
 #else
 #define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25688,19 +25684,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
   __ret; \
 })
 #else
 #define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25709,19 +25705,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
   __ret; \
 })
 #else
 #define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25730,19 +25726,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
   __ret; \
 })
 #else
 #define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25751,19 +25747,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
   __ret; \
 })
 #else
 #define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25772,19 +25768,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
   __ret; \
 })
 #else
 #define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25793,19 +25789,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
   __ret; \
 })
 #else
 #define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25814,19 +25810,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
   __ret; \
 })
 #else
 #define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25835,19 +25831,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
   __ret; \
 })
 #else
 #define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25856,19 +25852,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
   __ret; \
 })
 #else
 #define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25877,19 +25873,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
   __ret; \
 })
 #else
 #define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25898,19 +25894,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
   __ret; \
 })
 #else
 #define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25919,19 +25915,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
   __ret; \
 })
 #else
 #define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -25940,19 +25936,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
   __ret; \
 })
 #else
 #define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -25960,27 +25956,27 @@
 #endif
 
 #define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1_t __ret; \
   uint64x1_t __s0 = __p0; \
   uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
   __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
   __ret; \
 })
 #else
 #define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -25989,19 +25985,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
   __ret; \
 })
 #else
 #define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26010,19 +26006,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
 #define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26030,27 +26026,27 @@
 #endif
 
 #define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1_t __ret; \
   int64x1_t __s0 = __p0; \
   int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
   __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
 #define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26059,19 +26055,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
   __ret; \
 })
 #else
 #define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26080,19 +26076,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
   __ret; \
 })
 #else
 #define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26101,19 +26097,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
   __ret; \
 })
 #else
 #define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26122,19 +26118,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
   __ret; \
 })
 #else
 #define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26143,19 +26139,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
   __ret; \
 })
 #else
 #define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26164,19 +26160,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
   __ret; \
 })
 #else
 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26185,19 +26181,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
   __ret; \
 })
 #else
 #define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26206,19 +26202,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
   __ret; \
 })
 #else
 #define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26227,19 +26223,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
   __ret; \
 })
 #else
 #define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26248,19 +26244,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
   __ret; \
 })
 #else
 #define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26268,27 +26264,27 @@
 #endif
 
 #define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1_t __ret; \
   uint64x1_t __s0 = __p0; \
   uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
   __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
   __ret; \
 })
 #else
 #define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26297,19 +26293,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
   __ret; \
 })
 #else
 #define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26318,19 +26314,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
 #define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26338,27 +26334,27 @@
 #endif
 
 #define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1_t __ret; \
   int64x1_t __s0 = __p0; \
   int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
   __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
 #define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26367,19 +26363,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
   __ret; \
 })
 #else
 #define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x8_t __ret; \
   poly8x8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
   __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26388,19 +26384,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
   __ret; \
 })
 #else
 #define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x4_t __ret; \
   poly16x4_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
   __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26409,19 +26405,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
   __ret; \
 })
 #else
 #define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16_t __ret; \
   poly8x16_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
   __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26430,19 +26426,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
   __ret; \
 })
 #else
 #define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+  poly16x8_t __ret; \
   poly16x8_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
   __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26451,19 +26447,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
   __ret; \
 })
 #else
 #define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16_t __ret; \
   uint8x16_t __s0 = __p0; \
   uint8x16_t __s1 = __p1; \
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
   __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26472,19 +26468,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
   __ret; \
 })
 #else
 #define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26493,19 +26489,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
   __ret; \
 })
 #else
 #define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26514,19 +26510,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
   __ret; \
 })
 #else
 #define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x8_t __ret; \
   uint16x8_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
   __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26535,19 +26531,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
   __ret; \
 })
 #else
 #define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16_t __ret; \
   int8x16_t __s0 = __p0; \
   int8x16_t __s1 = __p1; \
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
   __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26556,19 +26552,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
   __ret; \
 })
 #else
 #define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
   int32x4_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
   __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26577,19 +26573,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
   __ret; \
 })
 #else
 #define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2_t __ret; \
   int64x2_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
   __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26598,19 +26594,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
   __ret; \
 })
 #else
 #define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
   int16x8_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
   __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26619,19 +26615,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
   __ret; \
 })
 #else
 #define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x8_t __ret; \
   uint8x8_t __s0 = __p0; \
   uint8x8_t __s1 = __p1; \
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
   __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26640,19 +26636,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
   __ret; \
 })
 #else
 #define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
+  uint32x2_t __ret; \
   uint32x2_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
   __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26660,27 +26656,27 @@
 #endif
 
 #define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1_t __ret; \
   uint64x1_t __s0 = __p0; \
   uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
   __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
   __ret; \
 })
 #else
 #define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
+  uint16x4_t __ret; \
   uint16x4_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
   __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -26689,19 +26685,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
   __ret; \
 })
 #else
 #define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x8_t __ret; \
   int8x8_t __s0 = __p0; \
   int8x8_t __s1 = __p1; \
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
   __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -26710,19 +26706,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
 #define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
   int32x2_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
   __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -26730,27 +26726,27 @@
 #endif
 
 #define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1_t __ret; \
   int64x1_t __s0 = __p0; \
   int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
   __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
 #define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
   int16x4_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
   __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -29893,9 +29889,9 @@
 }
 #else
 __ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -29910,9 +29906,9 @@
 }
 #else
 __ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -29927,9 +29923,9 @@
 }
 #else
 __ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -29944,9 +29940,9 @@
 }
 #else
 __ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -29961,9 +29957,9 @@
 }
 #else
 __ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -29978,9 +29974,9 @@
 }
 #else
 __ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -29995,9 +29991,9 @@
 }
 #else
 __ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30012,9 +30008,9 @@
 }
 #else
 __ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30029,9 +30025,9 @@
 }
 #else
 __ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30046,9 +30042,9 @@
 }
 #else
 __ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30063,9 +30059,9 @@
 }
 #else
 __ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30085,9 +30081,9 @@
 }
 #else
 __ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30102,9 +30098,9 @@
 }
 #else
 __ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30119,9 +30115,9 @@
 }
 #else
 __ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30136,9 +30132,9 @@
 }
 #else
 __ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30158,9 +30154,9 @@
 }
 #else
 __ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30175,9 +30171,9 @@
 }
 #else
 __ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint16x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30197,9 +30193,9 @@
 }
 #else
 __ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint32x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30219,9 +30215,9 @@
 }
 #else
 __ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint8x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30241,9 +30237,9 @@
 }
 #else
 __ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+  int16x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30263,9 +30259,9 @@
 }
 #else
 __ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+  int32x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30285,9 +30281,9 @@
 }
 #else
 __ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+  int8x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30307,9 +30303,9 @@
 }
 #else
 __ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint16x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30324,9 +30320,9 @@
 }
 #else
 __ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint64x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30341,9 +30337,9 @@
 }
 #else
 __ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint32x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30358,9 +30354,9 @@
 }
 #else
 __ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
+  int16x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30375,9 +30371,9 @@
 }
 #else
 __ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
+  int64x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30392,9 +30388,9 @@
 }
 #else
 __ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
+  int32x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30409,9 +30405,9 @@
 }
 #else
 __ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 - __noswap_vmovl_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30426,9 +30422,9 @@
 }
 #else
 __ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 - __noswap_vmovl_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30443,9 +30439,9 @@
 }
 #else
 __ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 - __noswap_vmovl_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30460,9 +30456,9 @@
 }
 #else
 __ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 - __noswap_vmovl_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30477,9 +30473,9 @@
 }
 #else
 __ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 - __noswap_vmovl_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -30494,9 +30490,9 @@
 }
 #else
 __ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 - __noswap_vmovl_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -30511,9 +30507,9 @@
 }
 #else
 __ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30528,9 +30524,9 @@
 }
 #else
 __ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30545,9 +30541,9 @@
 }
 #else
 __ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30562,11 +30558,11 @@
 }
 #else
 __ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8x2_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30581,11 +30577,11 @@
 }
 #else
 __ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8x2_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30600,11 +30596,11 @@
 }
 #else
 __ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8x2_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30619,12 +30615,12 @@
 }
 #else
 __ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8x3_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30639,12 +30635,12 @@
 }
 #else
 __ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8x3_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30659,12 +30655,12 @@
 }
 #else
 __ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8x3_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30679,13 +30675,13 @@
 }
 #else
 __ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8x4_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30700,13 +30696,13 @@
 }
 #else
 __ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8x4_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30721,13 +30717,13 @@
 }
 #else
 __ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8x4_t __rev0;
   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30742,10 +30738,10 @@
 }
 #else
 __ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30760,10 +30756,10 @@
 }
 #else
 __ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30778,10 +30774,10 @@
 }
 #else
 __ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30796,12 +30792,12 @@
 }
 #else
 __ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8x2_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30816,12 +30812,12 @@
 }
 #else
 __ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8x2_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30836,12 +30832,12 @@
 }
 #else
 __ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8x2_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30856,13 +30852,13 @@
 }
 #else
 __ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8x3_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30877,13 +30873,13 @@
 }
 #else
 __ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8x3_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30898,13 +30894,13 @@
 }
 #else
 __ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8x3_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30919,6 +30915,7 @@
 }
 #else
 __ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8x4_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -30926,7 +30923,6 @@
   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30941,6 +30937,7 @@
 }
 #else
 __ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8x4_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -30948,7 +30945,6 @@
   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30963,6 +30959,7 @@
 }
 #else
 __ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8x4_t __rev1;
   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -30970,7 +30967,6 @@
   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
   __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -30985,9 +30981,9 @@
 }
 #else
 __ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8x2_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31004,9 +31000,9 @@
 }
 #else
 __ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4x2_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31023,9 +31019,9 @@
 }
 #else
 __ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16x2_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31042,9 +31038,9 @@
 }
 #else
 __ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8x2_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31061,9 +31057,9 @@
 }
 #else
 __ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16x2_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31080,9 +31076,9 @@
 }
 #else
 __ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4x2_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31099,9 +31095,9 @@
 }
 #else
 __ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8x2_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31118,9 +31114,9 @@
 }
 #else
 __ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16x2_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31137,9 +31133,9 @@
 }
 #else
 __ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4x2_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31156,9 +31152,9 @@
 }
 #else
 __ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4x2_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31175,9 +31171,9 @@
 }
 #else
 __ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8x2_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31194,9 +31190,9 @@
 }
 #else
 __ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8x2_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31213,9 +31209,9 @@
 }
 #else
 __ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -31232,9 +31228,9 @@
 }
 #else
 __ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4x2_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31251,9 +31247,9 @@
 }
 #else
 __ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8x2_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31270,9 +31266,9 @@
 }
 #else
 __ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -31289,9 +31285,9 @@
 }
 #else
 __ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -31308,9 +31304,9 @@
 }
 #else
 __ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4x2_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31327,9 +31323,9 @@
 }
 #else
 __ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  uint8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31344,9 +31340,9 @@
 }
 #else
 __ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  uint16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -31361,9 +31357,9 @@
 }
 #else
 __ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  uint8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31378,9 +31374,9 @@
 }
 #else
 __ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  uint16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31395,9 +31391,9 @@
 }
 #else
 __ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31412,9 +31408,9 @@
 }
 #else
 __ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -31429,9 +31425,9 @@
 }
 #else
 __ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31446,9 +31442,9 @@
 }
 #else
 __ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31463,9 +31459,9 @@
 }
 #else
 __ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -31480,9 +31476,9 @@
 }
 #else
 __ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31497,9 +31493,9 @@
 }
 #else
 __ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31514,9 +31510,9 @@
 }
 #else
 __ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -31531,9 +31527,9 @@
 }
 #else
 __ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -31548,9 +31544,9 @@
 }
 #else
 __ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -31565,9 +31561,9 @@
 }
 #else
 __ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -31582,9 +31578,9 @@
 }
 #else
 __ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -31599,9 +31595,9 @@
 }
 #else
 __ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8x2_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31618,9 +31614,9 @@
 }
 #else
 __ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4x2_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31637,9 +31633,9 @@
 }
 #else
 __ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16x2_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31656,9 +31652,9 @@
 }
 #else
 __ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8x2_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31675,9 +31671,9 @@
 }
 #else
 __ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16x2_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31694,9 +31690,9 @@
 }
 #else
 __ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4x2_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31713,9 +31709,9 @@
 }
 #else
 __ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8x2_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31732,9 +31728,9 @@
 }
 #else
 __ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16x2_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31751,9 +31747,9 @@
 }
 #else
 __ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4x2_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31770,9 +31766,9 @@
 }
 #else
 __ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4x2_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31789,9 +31785,9 @@
 }
 #else
 __ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8x2_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31808,9 +31804,9 @@
 }
 #else
 __ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8x2_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31827,9 +31823,9 @@
 }
 #else
 __ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -31846,9 +31842,9 @@
 }
 #else
 __ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4x2_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31865,9 +31861,9 @@
 }
 #else
 __ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8x2_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31884,9 +31880,9 @@
 }
 #else
 __ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -31903,9 +31899,9 @@
 }
 #else
 __ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -31922,9 +31918,9 @@
 }
 #else
 __ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4x2_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31941,9 +31937,9 @@
 }
 #else
 __ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8x2_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31960,9 +31956,9 @@
 }
 #else
 __ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4x2_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -31979,9 +31975,9 @@
 }
 #else
 __ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16x2_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -31998,9 +31994,9 @@
 }
 #else
 __ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8x2_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -32017,9 +32013,9 @@
 }
 #else
 __ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16x2_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -32036,9 +32032,9 @@
 }
 #else
 __ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4x2_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -32055,9 +32051,9 @@
 }
 #else
 __ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8x2_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -32074,9 +32070,9 @@
 }
 #else
 __ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16x2_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
@@ -32093,9 +32089,9 @@
 }
 #else
 __ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4x2_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -32112,9 +32108,9 @@
 }
 #else
 __ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4x2_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -32131,9 +32127,9 @@
 }
 #else
 __ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8x2_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -32150,9 +32146,9 @@
 }
 #else
 __ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8x2_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -32169,9 +32165,9 @@
 }
 #else
 __ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -32188,9 +32184,9 @@
 }
 #else
 __ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4x2_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -32207,9 +32203,9 @@
 }
 #else
 __ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8x2_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
@@ -32226,9 +32222,9 @@
 }
 #else
 __ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -32245,9 +32241,9 @@
 }
 #else
 __ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
@@ -32264,9 +32260,9 @@
 }
 #else
 __ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4x2_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
 
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
@@ -32277,124 +32273,20 @@
 
 #if !defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0_122, __p1_122) __extension__ ({ \
-  float16x4_t __s0_122 = __p0_122; \
-  float16x8_t __ret_122; \
-  __ret_122 = splatq_lane_f16(__s0_122, __p1_122); \
-  __ret_122; \
-})
-#else
-#define vdupq_lane_f16(__p0_123, __p1_123) __extension__ ({ \
-  float16x4_t __s0_123 = __p0_123; \
-  float16x4_t __rev0_123;  __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 3, 2, 1, 0); \
-  float16x8_t __ret_123; \
-  __ret_123 = __noswap_splatq_lane_f16(__rev0_123, __p1_123); \
-  __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_123; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0_124, __p1_124) __extension__ ({ \
-  float16x4_t __s0_124 = __p0_124; \
-  float16x4_t __ret_124; \
-  __ret_124 = splat_lane_f16(__s0_124, __p1_124); \
-  __ret_124; \
-})
-#else
-#define vdup_lane_f16(__p0_125, __p1_125) __extension__ ({ \
-  float16x4_t __s0_125 = __p0_125; \
-  float16x4_t __rev0_125;  __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \
-  float16x4_t __ret_125; \
-  __ret_125 = __noswap_splat_lane_f16(__rev0_125, __p1_125); \
-  __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \
-  __ret_125; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
 #define vqdmulhq_lane_s32(__p0_126, __p1_126, __p2_126) __extension__ ({ \
+  int32x4_t __ret_126; \
   int32x4_t __s0_126 = __p0_126; \
   int32x2_t __s1_126 = __p1_126; \
-  int32x4_t __ret_126; \
   __ret_126 = vqdmulhq_s32(__s0_126, splatq_lane_s32(__s1_126, __p2_126)); \
   __ret_126; \
 })
 #else
 #define vqdmulhq_lane_s32(__p0_127, __p1_127, __p2_127) __extension__ ({ \
+  int32x4_t __ret_127; \
   int32x4_t __s0_127 = __p0_127; \
   int32x2_t __s1_127 = __p1_127; \
   int32x4_t __rev0_127;  __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 3, 2, 1, 0); \
   int32x2_t __rev1_127;  __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 1, 0); \
-  int32x4_t __ret_127; \
   __ret_127 = __noswap_vqdmulhq_s32(__rev0_127, __noswap_splatq_lane_s32(__rev1_127, __p2_127)); \
   __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0); \
   __ret_127; \
@@ -32403,19 +32295,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqdmulhq_lane_s16(__p0_128, __p1_128, __p2_128) __extension__ ({ \
+  int16x8_t __ret_128; \
   int16x8_t __s0_128 = __p0_128; \
   int16x4_t __s1_128 = __p1_128; \
-  int16x8_t __ret_128; \
   __ret_128 = vqdmulhq_s16(__s0_128, splatq_lane_s16(__s1_128, __p2_128)); \
   __ret_128; \
 })
 #else
 #define vqdmulhq_lane_s16(__p0_129, __p1_129, __p2_129) __extension__ ({ \
+  int16x8_t __ret_129; \
   int16x8_t __s0_129 = __p0_129; \
   int16x4_t __s1_129 = __p1_129; \
   int16x8_t __rev0_129;  __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x4_t __rev1_129;  __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \
-  int16x8_t __ret_129; \
   __ret_129 = __noswap_vqdmulhq_s16(__rev0_129, __noswap_splatq_lane_s16(__rev1_129, __p2_129)); \
   __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_129; \
@@ -32424,19 +32316,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqdmulh_lane_s32(__p0_130, __p1_130, __p2_130) __extension__ ({ \
+  int32x2_t __ret_130; \
   int32x2_t __s0_130 = __p0_130; \
   int32x2_t __s1_130 = __p1_130; \
-  int32x2_t __ret_130; \
   __ret_130 = vqdmulh_s32(__s0_130, splat_lane_s32(__s1_130, __p2_130)); \
   __ret_130; \
 })
 #else
 #define vqdmulh_lane_s32(__p0_131, __p1_131, __p2_131) __extension__ ({ \
+  int32x2_t __ret_131; \
   int32x2_t __s0_131 = __p0_131; \
   int32x2_t __s1_131 = __p1_131; \
   int32x2_t __rev0_131;  __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 1, 0); \
   int32x2_t __rev1_131;  __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 1, 0); \
-  int32x2_t __ret_131; \
   __ret_131 = __noswap_vqdmulh_s32(__rev0_131, __noswap_splat_lane_s32(__rev1_131, __p2_131)); \
   __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 1, 0); \
   __ret_131; \
@@ -32445,19 +32337,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqdmulh_lane_s16(__p0_132, __p1_132, __p2_132) __extension__ ({ \
+  int16x4_t __ret_132; \
   int16x4_t __s0_132 = __p0_132; \
   int16x4_t __s1_132 = __p1_132; \
-  int16x4_t __ret_132; \
   __ret_132 = vqdmulh_s16(__s0_132, splat_lane_s16(__s1_132, __p2_132)); \
   __ret_132; \
 })
 #else
 #define vqdmulh_lane_s16(__p0_133, __p1_133, __p2_133) __extension__ ({ \
+  int16x4_t __ret_133; \
   int16x4_t __s0_133 = __p0_133; \
   int16x4_t __s1_133 = __p1_133; \
   int16x4_t __rev0_133;  __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 3, 2, 1, 0); \
   int16x4_t __rev1_133;  __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \
-  int16x4_t __ret_133; \
   __ret_133 = __noswap_vqdmulh_s16(__rev0_133, __noswap_splat_lane_s16(__rev1_133, __p2_133)); \
   __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 3, 2, 1, 0); \
   __ret_133; \
@@ -32466,19 +32358,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrdmulhq_lane_s32(__p0_134, __p1_134, __p2_134) __extension__ ({ \
+  int32x4_t __ret_134; \
   int32x4_t __s0_134 = __p0_134; \
   int32x2_t __s1_134 = __p1_134; \
-  int32x4_t __ret_134; \
   __ret_134 = vqrdmulhq_s32(__s0_134, splatq_lane_s32(__s1_134, __p2_134)); \
   __ret_134; \
 })
 #else
 #define vqrdmulhq_lane_s32(__p0_135, __p1_135, __p2_135) __extension__ ({ \
+  int32x4_t __ret_135; \
   int32x4_t __s0_135 = __p0_135; \
   int32x2_t __s1_135 = __p1_135; \
   int32x4_t __rev0_135;  __rev0_135 = __builtin_shufflevector(__s0_135, __s0_135, 3, 2, 1, 0); \
   int32x2_t __rev1_135;  __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, 1, 0); \
-  int32x4_t __ret_135; \
   __ret_135 = __noswap_vqrdmulhq_s32(__rev0_135, __noswap_splatq_lane_s32(__rev1_135, __p2_135)); \
   __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); \
   __ret_135; \
@@ -32487,19 +32379,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrdmulhq_lane_s16(__p0_136, __p1_136, __p2_136) __extension__ ({ \
+  int16x8_t __ret_136; \
   int16x8_t __s0_136 = __p0_136; \
   int16x4_t __s1_136 = __p1_136; \
-  int16x8_t __ret_136; \
   __ret_136 = vqrdmulhq_s16(__s0_136, splatq_lane_s16(__s1_136, __p2_136)); \
   __ret_136; \
 })
 #else
 #define vqrdmulhq_lane_s16(__p0_137, __p1_137, __p2_137) __extension__ ({ \
+  int16x8_t __ret_137; \
   int16x8_t __s0_137 = __p0_137; \
   int16x4_t __s1_137 = __p1_137; \
   int16x8_t __rev0_137;  __rev0_137 = __builtin_shufflevector(__s0_137, __s0_137, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x4_t __rev1_137;  __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 3, 2, 1, 0); \
-  int16x8_t __ret_137; \
   __ret_137 = __noswap_vqrdmulhq_s16(__rev0_137, __noswap_splatq_lane_s16(__rev1_137, __p2_137)); \
   __ret_137 = __builtin_shufflevector(__ret_137, __ret_137, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_137; \
@@ -32508,19 +32400,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrdmulh_lane_s32(__p0_138, __p1_138, __p2_138) __extension__ ({ \
+  int32x2_t __ret_138; \
   int32x2_t __s0_138 = __p0_138; \
   int32x2_t __s1_138 = __p1_138; \
-  int32x2_t __ret_138; \
   __ret_138 = vqrdmulh_s32(__s0_138, splat_lane_s32(__s1_138, __p2_138)); \
   __ret_138; \
 })
 #else
 #define vqrdmulh_lane_s32(__p0_139, __p1_139, __p2_139) __extension__ ({ \
+  int32x2_t __ret_139; \
   int32x2_t __s0_139 = __p0_139; \
   int32x2_t __s1_139 = __p1_139; \
   int32x2_t __rev0_139;  __rev0_139 = __builtin_shufflevector(__s0_139, __s0_139, 1, 0); \
   int32x2_t __rev1_139;  __rev1_139 = __builtin_shufflevector(__s1_139, __s1_139, 1, 0); \
-  int32x2_t __ret_139; \
   __ret_139 = __noswap_vqrdmulh_s32(__rev0_139, __noswap_splat_lane_s32(__rev1_139, __p2_139)); \
   __ret_139 = __builtin_shufflevector(__ret_139, __ret_139, 1, 0); \
   __ret_139; \
@@ -32529,19 +32421,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vqrdmulh_lane_s16(__p0_140, __p1_140, __p2_140) __extension__ ({ \
+  int16x4_t __ret_140; \
   int16x4_t __s0_140 = __p0_140; \
   int16x4_t __s1_140 = __p1_140; \
-  int16x4_t __ret_140; \
   __ret_140 = vqrdmulh_s16(__s0_140, splat_lane_s16(__s1_140, __p2_140)); \
   __ret_140; \
 })
 #else
 #define vqrdmulh_lane_s16(__p0_141, __p1_141, __p2_141) __extension__ ({ \
+  int16x4_t __ret_141; \
   int16x4_t __s0_141 = __p0_141; \
   int16x4_t __s1_141 = __p1_141; \
   int16x4_t __rev0_141;  __rev0_141 = __builtin_shufflevector(__s0_141, __s0_141, 3, 2, 1, 0); \
   int16x4_t __rev1_141;  __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 3, 2, 1, 0); \
-  int16x4_t __ret_141; \
   __ret_141 = __noswap_vqrdmulh_s16(__rev0_141, __noswap_splat_lane_s16(__rev1_141, __p2_141)); \
   __ret_141 = __builtin_shufflevector(__ret_141, __ret_141, 3, 2, 1, 0); \
   __ret_141; \
@@ -33878,8 +33770,8 @@
 }
 #else
 __ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -33899,8 +33791,8 @@
 }
 #else
 __ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -33974,16 +33866,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
   float16x8_t __ret; \
+  float16x8_t __s1 = __p1; \
   __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
   __ret; \
 })
 #else
 #define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8_t __ret; \
   float16x8_t __s1 = __p1; \
   float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
   __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
@@ -33992,16 +33884,16 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
   float16x4_t __ret; \
+  float16x4_t __s1 = __p1; \
   __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
   __ret; \
 })
 #else
 #define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4_t __ret; \
   float16x4_t __s1 = __p1; \
   float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
   __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -34186,18 +34078,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
   float16x8x2_t __ret; \
+  float16x8x2_t __s1 = __p1; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \
   __ret; \
 })
 #else
 #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8x2_t __ret; \
   float16x8x2_t __s1 = __p1; \
   float16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x2_t __ret; \
   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -34208,18 +34100,18 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
   float16x4x2_t __ret; \
+  float16x4x2_t __s1 = __p1; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \
   __ret; \
 })
 #else
 #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4x2_t __ret; \
   float16x4x2_t __s1 = __p1; \
   float16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  float16x4x2_t __ret; \
   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -34302,19 +34194,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
   float16x8x3_t __ret; \
+  float16x8x3_t __s1 = __p1; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \
   __ret; \
 })
 #else
 #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8x3_t __ret; \
   float16x8x3_t __s1 = __p1; \
   float16x8x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x3_t __ret; \
   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -34326,19 +34218,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
   float16x4x3_t __ret; \
+  float16x4x3_t __s1 = __p1; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \
   __ret; \
 })
 #else
 #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4x3_t __ret; \
   float16x4x3_t __s1 = __p1; \
   float16x4x3_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  float16x4x3_t __ret; \
   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -34426,20 +34318,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
   float16x8x4_t __ret; \
+  float16x8x4_t __s1 = __p1; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \
   __ret; \
 })
 #else
 #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8x4_t __ret; \
   float16x8x4_t __s1 = __p1; \
   float16x8x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x4_t __ret; \
   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -34452,20 +34344,20 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
   float16x4x4_t __ret; \
+  float16x4x4_t __s1 = __p1; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \
   __ret; \
 })
 #else
 #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4x4_t __ret; \
   float16x4x4_t __s1 = __p1; \
   float16x4x4_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  float16x4x4_t __ret; \
   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
@@ -34826,8 +34718,8 @@
 }
 #else
 __ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -34842,8 +34734,8 @@
 }
 #else
 __ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -34858,8 +34750,8 @@
 }
 #else
 __ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -34874,8 +34766,8 @@
 }
 #else
 __ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -34890,8 +34782,8 @@
 }
 #else
 __ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -34906,8 +34798,8 @@
 }
 #else
 __ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -34922,8 +34814,8 @@
 }
 #else
 __ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -34938,8 +34830,8 @@
 }
 #else
 __ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -34954,8 +34846,8 @@
 }
 #else
 __ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -34970,8 +34862,8 @@
 }
 #else
 __ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -34986,8 +34878,8 @@
 }
 #else
 __ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35002,8 +34894,8 @@
 }
 #else
 __ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35018,8 +34910,8 @@
 }
 #else
 __ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35034,8 +34926,8 @@
 }
 #else
 __ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35050,8 +34942,8 @@
 }
 #else
 __ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35066,8 +34958,8 @@
 }
 #else
 __ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35084,9 +34976,9 @@
 }
 #else
 __ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35101,9 +34993,9 @@
 }
 #else
 __ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35118,8 +35010,8 @@
 }
 #else
 __ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35134,8 +35026,8 @@
 }
 #else
 __ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35152,8 +35044,8 @@
 }
 #else
 __ai float32x4_t vrndq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35168,8 +35060,8 @@
 }
 #else
 __ai float32x2_t vrnd_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35184,8 +35076,8 @@
 }
 #else
 __ai float32x4_t vrndaq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35200,8 +35092,8 @@
 }
 #else
 __ai float32x2_t vrnda_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35216,8 +35108,8 @@
 }
 #else
 __ai float32x4_t vrndiq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35232,8 +35124,8 @@
 }
 #else
 __ai float32x2_t vrndi_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35248,8 +35140,8 @@
 }
 #else
 __ai float32x4_t vrndmq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35264,8 +35156,8 @@
 }
 #else
 __ai float32x2_t vrndm_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35280,8 +35172,8 @@
 }
 #else
 __ai float32x4_t vrndnq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35296,8 +35188,8 @@
 }
 #else
 __ai float32x2_t vrndn_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35317,8 +35209,8 @@
 }
 #else
 __ai float32x4_t vrndpq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35333,8 +35225,8 @@
 }
 #else
 __ai float32x2_t vrndp_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35349,8 +35241,8 @@
 }
 #else
 __ai float32x4_t vrndxq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35365,8 +35257,8 @@
 }
 #else
 __ai float32x2_t vrndx_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35383,8 +35275,8 @@
 }
 #else
 __ai float16x8_t vrndq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35399,8 +35291,8 @@
 }
 #else
 __ai float16x4_t vrnd_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35415,8 +35307,8 @@
 }
 #else
 __ai float16x8_t vrndaq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35431,8 +35323,8 @@
 }
 #else
 __ai float16x4_t vrnda_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35447,8 +35339,8 @@
 }
 #else
 __ai float16x8_t vrndmq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35463,8 +35355,8 @@
 }
 #else
 __ai float16x4_t vrndm_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35479,8 +35371,8 @@
 }
 #else
 __ai float16x8_t vrndnq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35495,8 +35387,8 @@
 }
 #else
 __ai float16x4_t vrndn_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35511,8 +35403,8 @@
 }
 #else
 __ai float16x8_t vrndpq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35527,8 +35419,8 @@
 }
 #else
 __ai float16x4_t vrndp_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35543,8 +35435,8 @@
 }
 #else
 __ai float16x8_t vrndxq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35559,8 +35451,8 @@
 }
 #else
 __ai float16x4_t vrndx_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35577,9 +35469,9 @@
 }
 #else
 __ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35594,9 +35486,9 @@
 }
 #else
 __ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35611,9 +35503,9 @@
 }
 #else
 __ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35628,9 +35520,9 @@
 }
 #else
 __ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35647,9 +35539,9 @@
 }
 #else
 __ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
   float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
   __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35664,9 +35556,9 @@
 }
 #else
 __ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
   __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35681,9 +35573,9 @@
 }
 #else
 __ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
   float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
   __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35698,9 +35590,9 @@
 }
 #else
 __ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
   __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35717,9 +35609,9 @@
 }
 #else
 __ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__rev0, __p1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35739,9 +35631,9 @@
 }
 #else
 __ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__rev0, __p1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35756,9 +35648,9 @@
 }
 #else
 __ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__rev0, __p1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35773,10 +35665,10 @@
 }
 #else
 __ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35791,9 +35683,9 @@
 }
 #else
 __ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35808,10 +35700,10 @@
 }
 #else
 __ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35826,10 +35718,10 @@
 }
 #else
 __ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35844,9 +35736,9 @@
 }
 #else
 __ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35861,10 +35753,10 @@
 }
 #else
 __ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35872,7 +35764,8900 @@
 #endif
 
 #endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA3) && defined(__aarch64__)
+#if defined(__ARM_FEATURE_BF16) && !defined(__aarch64__)
+__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t)(__p0);
+  return __ret;
+}
+__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
+  poly64x1_t __ret;
+  __ret = (poly64x1_t)(__p0);
+  return __ret;
+}
+__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
+  poly16x4_t __ret;
+  __ret = (poly16x4_t)(__p0);
+  return __ret;
+}
+__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t)(__p0);
+  return __ret;
+}
+__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t)(__p0);
+  return __ret;
+}
+__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
+  poly16x8_t __ret;
+  __ret = (poly16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t)(__p0);
+  return __ret;
+}
+__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t)(__p0);
+  return __ret;
+}
+__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0);
+  return __ret;
+}
+__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0);
+  return __ret;
+}
+__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
+  int8x16_t __ret;
+  __ret = (int8x16_t)(__p0);
+  return __ret;
+}
+__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t)(__p0);
+  return __ret;
+}
+__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t)(__p0);
+  return __ret;
+}
+__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
+  int32x4_t __ret;
+  __ret = (int32x4_t)(__p0);
+  return __ret;
+}
+__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t)(__p0);
+  return __ret;
+}
+__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t)(__p0);
+  return __ret;
+}
+__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t)(__p0);
+  return __ret;
+}
+__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0);
+  return __ret;
+}
+__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0);
+  return __ret;
+}
+__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
+  int8x8_t __ret;
+  __ret = (int8x8_t)(__p0);
+  return __ret;
+}
+__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t)(__p0);
+  return __ret;
+}
+__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t)(__p0);
+  return __ret;
+}
+__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
+  int32x2_t __ret;
+  __ret = (int32x2_t)(__p0);
+  return __ret;
+}
+__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t)(__p0);
+  return __ret;
+}
+__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+#endif
+#if defined(__ARM_FEATURE_BF16) && defined(__aarch64__)
+__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t)(__p0);
+  return __ret;
+}
+__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
+  poly64x1_t __ret;
+  __ret = (poly64x1_t)(__p0);
+  return __ret;
+}
+__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
+  poly16x4_t __ret;
+  __ret = (poly16x4_t)(__p0);
+  return __ret;
+}
+__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t)(__p0);
+  return __ret;
+}
+__ai poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) {
+  poly128_t __ret;
+  __ret = (poly128_t)(__p0);
+  return __ret;
+}
+__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t)(__p0);
+  return __ret;
+}
+__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
+  poly16x8_t __ret;
+  __ret = (poly16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t)(__p0);
+  return __ret;
+}
+__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t)(__p0);
+  return __ret;
+}
+__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0);
+  return __ret;
+}
+__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0);
+  return __ret;
+}
+__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
+  int8x16_t __ret;
+  __ret = (int8x16_t)(__p0);
+  return __ret;
+}
+__ai float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t)(__p0);
+  return __ret;
+}
+__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t)(__p0);
+  return __ret;
+}
+__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t)(__p0);
+  return __ret;
+}
+__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
+  int32x4_t __ret;
+  __ret = (int32x4_t)(__p0);
+  return __ret;
+}
+__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t)(__p0);
+  return __ret;
+}
+__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t)(__p0);
+  return __ret;
+}
+__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t)(__p0);
+  return __ret;
+}
+__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0);
+  return __ret;
+}
+__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0);
+  return __ret;
+}
+__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
+  int8x8_t __ret;
+  __ret = (int8x8_t)(__p0);
+  return __ret;
+}
+__ai float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t)(__p0);
+  return __ret;
+}
+__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t)(__p0);
+  return __ret;
+}
+__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t)(__p0);
+  return __ret;
+}
+__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
+  int32x2_t __ret;
+  __ret = (int32x2_t)(__p0);
+  return __ret;
+}
+__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t)(__p0);
+  return __ret;
+}
+__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+#endif
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#else
+#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#else
+#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#else
+#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#else
+#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  bfloat16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdotq_lane_f32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \
+  float32x4_t __ret_142; \
+  float32x4_t __s0_142 = __p0_142; \
+  bfloat16x8_t __s1_142 = __p1_142; \
+  bfloat16x4_t __s2_142 = __p2_142; \
+bfloat16x4_t __reint_142 = __s2_142; \
+float32x4_t __reint1_142 = splatq_lane_f32(*(float32x2_t *) &__reint_142, __p3_142); \
+  __ret_142 = vbfdotq_f32(__s0_142, __s1_142, *(bfloat16x8_t *) &__reint1_142); \
+  __ret_142; \
+})
+#else
+#define vbfdotq_lane_f32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \
+  float32x4_t __ret_143; \
+  float32x4_t __s0_143 = __p0_143; \
+  bfloat16x8_t __s1_143 = __p1_143; \
+  bfloat16x4_t __s2_143 = __p2_143; \
+  float32x4_t __rev0_143;  __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_143;  __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_143;  __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, 3, 2, 1, 0); \
+bfloat16x4_t __reint_143 = __rev2_143; \
+float32x4_t __reint1_143 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_143, __p3_143); \
+  __ret_143 = __noswap_vbfdotq_f32(__rev0_143, __rev1_143, *(bfloat16x8_t *) &__reint1_143); \
+  __ret_143 = __builtin_shufflevector(__ret_143, __ret_143, 3, 2, 1, 0); \
+  __ret_143; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdot_lane_f32(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \
+  float32x2_t __ret_144; \
+  float32x2_t __s0_144 = __p0_144; \
+  bfloat16x4_t __s1_144 = __p1_144; \
+  bfloat16x4_t __s2_144 = __p2_144; \
+bfloat16x4_t __reint_144 = __s2_144; \
+float32x2_t __reint1_144 = splat_lane_f32(*(float32x2_t *) &__reint_144, __p3_144); \
+  __ret_144 = vbfdot_f32(__s0_144, __s1_144, *(bfloat16x4_t *) &__reint1_144); \
+  __ret_144; \
+})
+#else
+#define vbfdot_lane_f32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \
+  float32x2_t __ret_145; \
+  float32x2_t __s0_145 = __p0_145; \
+  bfloat16x4_t __s1_145 = __p1_145; \
+  bfloat16x4_t __s2_145 = __p2_145; \
+  float32x2_t __rev0_145;  __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, 1, 0); \
+  bfloat16x4_t __rev1_145;  __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_145;  __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, 3, 2, 1, 0); \
+bfloat16x4_t __reint_145 = __rev2_145; \
+float32x2_t __reint1_145 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_145, __p3_145); \
+  __ret_145 = __noswap_vbfdot_f32(__rev0_145, __rev1_145, *(bfloat16x4_t *) &__reint1_145); \
+  __ret_145 = __builtin_shufflevector(__ret_145, __ret_145, 1, 0); \
+  __ret_145; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdotq_laneq_f32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \
+  float32x4_t __ret_146; \
+  float32x4_t __s0_146 = __p0_146; \
+  bfloat16x8_t __s1_146 = __p1_146; \
+  bfloat16x8_t __s2_146 = __p2_146; \
+bfloat16x8_t __reint_146 = __s2_146; \
+float32x4_t __reint1_146 = splatq_laneq_f32(*(float32x4_t *) &__reint_146, __p3_146); \
+  __ret_146 = vbfdotq_f32(__s0_146, __s1_146, *(bfloat16x8_t *) &__reint1_146); \
+  __ret_146; \
+})
+#else
+#define vbfdotq_laneq_f32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \
+  float32x4_t __ret_147; \
+  float32x4_t __s0_147 = __p0_147; \
+  bfloat16x8_t __s1_147 = __p1_147; \
+  bfloat16x8_t __s2_147 = __p2_147; \
+  float32x4_t __rev0_147;  __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_147;  __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_147;  __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, 7, 6, 5, 4, 3, 2, 1, 0); \
+bfloat16x8_t __reint_147 = __rev2_147; \
+float32x4_t __reint1_147 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_147, __p3_147); \
+  __ret_147 = __noswap_vbfdotq_f32(__rev0_147, __rev1_147, *(bfloat16x8_t *) &__reint1_147); \
+  __ret_147 = __builtin_shufflevector(__ret_147, __ret_147, 3, 2, 1, 0); \
+  __ret_147; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdot_laneq_f32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \
+  float32x2_t __ret_148; \
+  float32x2_t __s0_148 = __p0_148; \
+  bfloat16x4_t __s1_148 = __p1_148; \
+  bfloat16x8_t __s2_148 = __p2_148; \
+bfloat16x8_t __reint_148 = __s2_148; \
+float32x2_t __reint1_148 = splat_laneq_f32(*(float32x4_t *) &__reint_148, __p3_148); \
+  __ret_148 = vbfdot_f32(__s0_148, __s1_148, *(bfloat16x4_t *) &__reint1_148); \
+  __ret_148; \
+})
+#else
+#define vbfdot_laneq_f32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \
+  float32x2_t __ret_149; \
+  float32x2_t __s0_149 = __p0_149; \
+  bfloat16x4_t __s1_149 = __p1_149; \
+  bfloat16x8_t __s2_149 = __p2_149; \
+  float32x2_t __rev0_149;  __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, 1, 0); \
+  bfloat16x4_t __rev1_149;  __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_149;  __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, 7, 6, 5, 4, 3, 2, 1, 0); \
+bfloat16x8_t __reint_149 = __rev2_149; \
+float32x2_t __reint1_149 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_149, __p3_149); \
+  __ret_149 = __noswap_vbfdot_f32(__rev0_149, __rev1_149, *(bfloat16x4_t *) &__reint1_149); \
+  __ret_149 = __builtin_shufflevector(__ret_149, __ret_149, 1, 0); \
+  __ret_149; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
+  bfloat16x8_t __ret;
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+  return __ret;
+}
+#endif
+
+#define vcreate_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  uint64_t __promote = __p0; \
+  __ret = (bfloat16x4_t)(__promote); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_150) {
+  float32x4_t __ret_150;
+bfloat16x4_t __reint_150 = __p0_150;
+int32x4_t __reint1_150 = vshll_n_s16(*(int16x4_t *) &__reint_150, 16);
+  __ret_150 = *(float32x4_t *) &__reint1_150;
+  return __ret_150;
+}
+#else
+__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_151) {
+  float32x4_t __ret_151;
+  bfloat16x4_t __rev0_151;  __rev0_151 = __builtin_shufflevector(__p0_151, __p0_151, 3, 2, 1, 0);
+bfloat16x4_t __reint_151 = __rev0_151;
+int32x4_t __reint1_151 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_151, 16);
+  __ret_151 = *(float32x4_t *) &__reint1_151;
+  __ret_151 = __builtin_shufflevector(__ret_151, __ret_151, 3, 2, 1, 0);
+  return __ret_151;
+}
+__ai float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_152) {
+  float32x4_t __ret_152;
+bfloat16x4_t __reint_152 = __p0_152;
+int32x4_t __reint1_152 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_152, 16);
+  __ret_152 = *(float32x4_t *) &__reint1_152;
+  return __ret_152;
+}
+#endif
+
+__ai float32_t vcvtah_f32_bf16(bfloat16_t __p0) {
+  float32_t __ret;
+bfloat16_t __reint = __p0;
+int32_t __reint1 = *(int32_t *) &__reint << 16;
+  __ret = *(float32_t *) &__reint1;
+  return __ret;
+}
+__ai bfloat16_t vcvth_bf16_f32(float32_t __p0) {
+  bfloat16_t __ret;
+  __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_bf16(__p0_153, __p1_153) __extension__ ({ \
+  bfloat16x8_t __ret_153; \
+  bfloat16x4_t __s0_153 = __p0_153; \
+  __ret_153 = splatq_lane_bf16(__s0_153, __p1_153); \
+  __ret_153; \
+})
+#else
+#define vdupq_lane_bf16(__p0_154, __p1_154) __extension__ ({ \
+  bfloat16x8_t __ret_154; \
+  bfloat16x4_t __s0_154 = __p0_154; \
+  bfloat16x4_t __rev0_154;  __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 3, 2, 1, 0); \
+  __ret_154 = __noswap_splatq_lane_bf16(__rev0_154, __p1_154); \
+  __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_154; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_bf16(__p0_155, __p1_155) __extension__ ({ \
+  bfloat16x4_t __ret_155; \
+  bfloat16x4_t __s0_155 = __p0_155; \
+  __ret_155 = splat_lane_bf16(__s0_155, __p1_155); \
+  __ret_155; \
+})
+#else
+#define vdup_lane_bf16(__p0_156, __p1_156) __extension__ ({ \
+  bfloat16x4_t __ret_156; \
+  bfloat16x4_t __s0_156 = __p0_156; \
+  bfloat16x4_t __rev0_156;  __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \
+  __ret_156 = __noswap_splat_lane_bf16(__rev0_156, __p1_156); \
+  __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \
+  __ret_156; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_bf16(__p0_157, __p1_157) __extension__ ({ \
+  bfloat16x8_t __ret_157; \
+  bfloat16x8_t __s0_157 = __p0_157; \
+  __ret_157 = splatq_laneq_bf16(__s0_157, __p1_157); \
+  __ret_157; \
+})
+#else
+#define vdupq_laneq_bf16(__p0_158, __p1_158) __extension__ ({ \
+  bfloat16x8_t __ret_158; \
+  bfloat16x8_t __s0_158 = __p0_158; \
+  bfloat16x8_t __rev0_158;  __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_158 = __noswap_splatq_laneq_bf16(__rev0_158, __p1_158); \
+  __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_158; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_bf16(__p0_159, __p1_159) __extension__ ({ \
+  bfloat16x4_t __ret_159; \
+  bfloat16x8_t __s0_159 = __p0_159; \
+  __ret_159 = splat_laneq_bf16(__s0_159, __p1_159); \
+  __ret_159; \
+})
+#else
+#define vdup_laneq_bf16(__p0_160, __p1_160) __extension__ ({ \
+  bfloat16x4_t __ret_160; \
+  bfloat16x8_t __s0_160 = __p0_160; \
+  bfloat16x8_t __rev0_160;  __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_160 = __noswap_splat_laneq_bf16(__rev0_160, __p1_160); \
+  __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 3, 2, 1, 0); \
+  __ret_160; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x8_t __s0 = __p0; \
+  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16_t __ret; \
+  bfloat16x4_t __s0 = __p0; \
+  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x8_t __s1 = __p1; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
+  __ret; \
+})
+#else
+#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x4_t __s1 = __p1; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
+  __ret; \
+})
+#else
+#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld2q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld2_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld2q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld2_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  bfloat16x8x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
+  __ret; \
+})
+#else
+#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  bfloat16x4x2_t __s1 = __p1; \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
+  __ret; \
+})
+#else
+#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld3q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld3_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld3q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld3_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  bfloat16x8x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
+  __ret; \
+})
+#else
+#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  bfloat16x4x3_t __s1 = __p1; \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
+  __ret; \
+})
+#else
+#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld4q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld4_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld4q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld4_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  bfloat16x8x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
+  __ret; \
+})
+#else
+#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  bfloat16x4x4_t __s1 = __p1; \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
+  __ret; \
+})
+#else
+#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x8_t __s1 = __p1; \
+  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
+  __ret; \
+})
+#else
+#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x8_t __s1 = __p1; \
+  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x4_t __s1 = __p1; \
+  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
+  __ret; \
+})
+#else
+#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x4_t __s1 = __p1; \
+  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 43); \
+})
+#else
+#define vst1q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 11); \
+})
+#else
+#define vst1_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
+})
+#else
+#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
+})
+#else
+#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
+})
+#else
+#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
+})
+#else
+#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
+})
+#else
+#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
+})
+#else
+#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
+})
+#else
+#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
+})
+#else
+#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
+})
+#else
+#define vst2q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
+})
+#else
+#define vst2_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
+})
+#else
+#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
+})
+#else
+#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
+})
+#else
+#define vst3q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
+})
+#else
+#define vst3_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
+})
+#else
+#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
+})
+#else
+#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
+})
+#else
+#define vst4q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
+})
+#else
+#define vst4_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
+})
+#else
+#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
+})
+#else
+#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__rev0, 11);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __a32_vcvt_bf16_f32(__p0);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap___a32_vcvt_bf16_f32(__rev0);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0));
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __ret;
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0));
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__rev0, 43);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_bf16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \
+  bfloat16x8_t __ret_161; \
+  bfloat16x8_t __s0_161 = __p0_161; \
+  bfloat16x4_t __s2_161 = __p2_161; \
+  __ret_161 = vsetq_lane_bf16(vget_lane_bf16(__s2_161, __p3_161), __s0_161, __p1_161); \
+  __ret_161; \
+})
+#else
+#define vcopyq_lane_bf16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \
+  bfloat16x8_t __ret_162; \
+  bfloat16x8_t __s0_162 = __p0_162; \
+  bfloat16x4_t __s2_162 = __p2_162; \
+  bfloat16x8_t __rev0_162;  __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_162;  __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 3, 2, 1, 0); \
+  __ret_162 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_162, __p3_162), __rev0_162, __p1_162); \
+  __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_162; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_bf16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \
+  bfloat16x4_t __ret_163; \
+  bfloat16x4_t __s0_163 = __p0_163; \
+  bfloat16x4_t __s2_163 = __p2_163; \
+  __ret_163 = vset_lane_bf16(vget_lane_bf16(__s2_163, __p3_163), __s0_163, __p1_163); \
+  __ret_163; \
+})
+#else
+#define vcopy_lane_bf16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \
+  bfloat16x4_t __ret_164; \
+  bfloat16x4_t __s0_164 = __p0_164; \
+  bfloat16x4_t __s2_164 = __p2_164; \
+  bfloat16x4_t __rev0_164;  __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_164;  __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \
+  __ret_164 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_164, __p3_164), __rev0_164, __p1_164); \
+  __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 3, 2, 1, 0); \
+  __ret_164; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_bf16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \
+  bfloat16x8_t __ret_165; \
+  bfloat16x8_t __s0_165 = __p0_165; \
+  bfloat16x8_t __s2_165 = __p2_165; \
+  __ret_165 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_165, __p3_165), __s0_165, __p1_165); \
+  __ret_165; \
+})
+#else
+#define vcopyq_laneq_bf16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \
+  bfloat16x8_t __ret_166; \
+  bfloat16x8_t __s0_166 = __p0_166; \
+  bfloat16x8_t __s2_166 = __p2_166; \
+  bfloat16x8_t __rev0_166;  __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_166;  __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_166 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_166, __p3_166), __rev0_166, __p1_166); \
+  __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_166; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_bf16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \
+  bfloat16x4_t __ret_167; \
+  bfloat16x4_t __s0_167 = __p0_167; \
+  bfloat16x8_t __s2_167 = __p2_167; \
+  __ret_167 = vset_lane_bf16(vgetq_lane_bf16(__s2_167, __p3_167), __s0_167, __p1_167); \
+  __ret_167; \
+})
+#else
+#define vcopy_laneq_bf16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \
+  bfloat16x4_t __ret_168; \
+  bfloat16x4_t __s0_168 = __p0_168; \
+  bfloat16x8_t __s2_168 = __p2_168; \
+  bfloat16x4_t __rev0_168;  __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_168;  __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_168 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_168, __p3_168), __rev0_168, __p1_168); \
+  __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \
+  __ret_168; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0));
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__p0, (int8x16_t)__p1, 43);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __ret;
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__rev0, (int8x16_t)__rev1, 43);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = __a64_vcvtq_low_bf16_f32(__p0);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_COMPLEX)
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_lane_f32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \
+  float32x2_t __ret_169; \
+  float32x2_t __s0_169 = __p0_169; \
+  float32x2_t __s1_169 = __p1_169; \
+  float32x2_t __s2_169 = __p2_169; \
+float32x2_t __reint_169 = __s2_169; \
+uint64x1_t __reint1_169 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_169, __p3_169)}; \
+  __ret_169 = vcmla_f32(__s0_169, __s1_169, *(float32x2_t *) &__reint1_169); \
+  __ret_169; \
+})
+#else
+#define vcmla_lane_f32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \
+  float32x2_t __ret_170; \
+  float32x2_t __s0_170 = __p0_170; \
+  float32x2_t __s1_170 = __p1_170; \
+  float32x2_t __s2_170 = __p2_170; \
+  float32x2_t __rev0_170;  __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 1, 0); \
+  float32x2_t __rev1_170;  __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 1, 0); \
+  float32x2_t __rev2_170;  __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 1, 0); \
+float32x2_t __reint_170 = __rev2_170; \
+uint64x1_t __reint1_170 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_170, __p3_170)}; \
+  __ret_170 = __noswap_vcmla_f32(__rev0_170, __rev1_170, *(float32x2_t *) &__reint1_170); \
+  __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 1, 0); \
+  __ret_170; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_lane_f32(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \
+  float32x4_t __ret_171; \
+  float32x4_t __s0_171 = __p0_171; \
+  float32x4_t __s1_171 = __p1_171; \
+  float32x2_t __s2_171 = __p2_171; \
+float32x2_t __reint_171 = __s2_171; \
+uint64x2_t __reint1_171 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171), vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171)}; \
+  __ret_171 = vcmlaq_f32(__s0_171, __s1_171, *(float32x4_t *) &__reint1_171); \
+  __ret_171; \
+})
+#else
+#define vcmlaq_lane_f32(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \
+  float32x4_t __ret_172; \
+  float32x4_t __s0_172 = __p0_172; \
+  float32x4_t __s1_172 = __p1_172; \
+  float32x2_t __s2_172 = __p2_172; \
+  float32x4_t __rev0_172;  __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 3, 2, 1, 0); \
+  float32x4_t __rev1_172;  __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 3, 2, 1, 0); \
+  float32x2_t __rev2_172;  __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 1, 0); \
+float32x2_t __reint_172 = __rev2_172; \
+uint64x2_t __reint1_172 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172), vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172)}; \
+  __ret_172 = __noswap_vcmlaq_f32(__rev0_172, __rev1_172, *(float32x4_t *) &__reint1_172); \
+  __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 3, 2, 1, 0); \
+  __ret_172; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_laneq_f32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \
+  float32x2_t __ret_173; \
+  float32x2_t __s0_173 = __p0_173; \
+  float32x2_t __s1_173 = __p1_173; \
+  float32x4_t __s2_173 = __p2_173; \
+float32x4_t __reint_173 = __s2_173; \
+uint64x1_t __reint1_173 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_173, __p3_173)}; \
+  __ret_173 = vcmla_f32(__s0_173, __s1_173, *(float32x2_t *) &__reint1_173); \
+  __ret_173; \
+})
+#else
+#define vcmla_laneq_f32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \
+  float32x2_t __ret_174; \
+  float32x2_t __s0_174 = __p0_174; \
+  float32x2_t __s1_174 = __p1_174; \
+  float32x4_t __s2_174 = __p2_174; \
+  float32x2_t __rev0_174;  __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \
+  float32x2_t __rev1_174;  __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 1, 0); \
+  float32x4_t __rev2_174;  __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 3, 2, 1, 0); \
+float32x4_t __reint_174 = __rev2_174; \
+uint64x1_t __reint1_174 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_174, __p3_174)}; \
+  __ret_174 = __noswap_vcmla_f32(__rev0_174, __rev1_174, *(float32x2_t *) &__reint1_174); \
+  __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \
+  __ret_174; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_laneq_f32(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \
+  float32x4_t __ret_175; \
+  float32x4_t __s0_175 = __p0_175; \
+  float32x4_t __s1_175 = __p1_175; \
+  float32x4_t __s2_175 = __p2_175; \
+float32x4_t __reint_175 = __s2_175; \
+uint64x2_t __reint1_175 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175), vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175)}; \
+  __ret_175 = vcmlaq_f32(__s0_175, __s1_175, *(float32x4_t *) &__reint1_175); \
+  __ret_175; \
+})
+#else
+#define vcmlaq_laneq_f32(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \
+  float32x4_t __ret_176; \
+  float32x4_t __s0_176 = __p0_176; \
+  float32x4_t __s1_176 = __p1_176; \
+  float32x4_t __s2_176 = __p2_176; \
+  float32x4_t __rev0_176;  __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 3, 2, 1, 0); \
+  float32x4_t __rev1_176;  __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 3, 2, 1, 0); \
+  float32x4_t __rev2_176;  __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 3, 2, 1, 0); \
+float32x4_t __reint_176 = __rev2_176; \
+uint64x2_t __reint1_176 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176)}; \
+  __ret_176 = __noswap_vcmlaq_f32(__rev0_176, __rev1_176, *(float32x4_t *) &__reint1_176); \
+  __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 3, 2, 1, 0); \
+  __ret_176; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot180_lane_f32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \
+  float32x2_t __ret_177; \
+  float32x2_t __s0_177 = __p0_177; \
+  float32x2_t __s1_177 = __p1_177; \
+  float32x2_t __s2_177 = __p2_177; \
+float32x2_t __reint_177 = __s2_177; \
+uint64x1_t __reint1_177 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_177, __p3_177)}; \
+  __ret_177 = vcmla_rot180_f32(__s0_177, __s1_177, *(float32x2_t *) &__reint1_177); \
+  __ret_177; \
+})
+#else
+#define vcmla_rot180_lane_f32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \
+  float32x2_t __ret_178; \
+  float32x2_t __s0_178 = __p0_178; \
+  float32x2_t __s1_178 = __p1_178; \
+  float32x2_t __s2_178 = __p2_178; \
+  float32x2_t __rev0_178;  __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 1, 0); \
+  float32x2_t __rev1_178;  __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 1, 0); \
+  float32x2_t __rev2_178;  __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 1, 0); \
+float32x2_t __reint_178 = __rev2_178; \
+uint64x1_t __reint1_178 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_178, __p3_178)}; \
+  __ret_178 = __noswap_vcmla_rot180_f32(__rev0_178, __rev1_178, *(float32x2_t *) &__reint1_178); \
+  __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 1, 0); \
+  __ret_178; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot180_lane_f32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \
+  float32x4_t __ret_179; \
+  float32x4_t __s0_179 = __p0_179; \
+  float32x4_t __s1_179 = __p1_179; \
+  float32x2_t __s2_179 = __p2_179; \
+float32x2_t __reint_179 = __s2_179; \
+uint64x2_t __reint1_179 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179), vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179)}; \
+  __ret_179 = vcmlaq_rot180_f32(__s0_179, __s1_179, *(float32x4_t *) &__reint1_179); \
+  __ret_179; \
+})
+#else
+#define vcmlaq_rot180_lane_f32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \
+  float32x4_t __ret_180; \
+  float32x4_t __s0_180 = __p0_180; \
+  float32x4_t __s1_180 = __p1_180; \
+  float32x2_t __s2_180 = __p2_180; \
+  float32x4_t __rev0_180;  __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \
+  float32x4_t __rev1_180;  __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 3, 2, 1, 0); \
+  float32x2_t __rev2_180;  __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 1, 0); \
+float32x2_t __reint_180 = __rev2_180; \
+uint64x2_t __reint1_180 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180), vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180)}; \
+  __ret_180 = __noswap_vcmlaq_rot180_f32(__rev0_180, __rev1_180, *(float32x4_t *) &__reint1_180); \
+  __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \
+  __ret_180; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot180_laneq_f32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \
+  float32x2_t __ret_181; \
+  float32x2_t __s0_181 = __p0_181; \
+  float32x2_t __s1_181 = __p1_181; \
+  float32x4_t __s2_181 = __p2_181; \
+float32x4_t __reint_181 = __s2_181; \
+uint64x1_t __reint1_181 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_181, __p3_181)}; \
+  __ret_181 = vcmla_rot180_f32(__s0_181, __s1_181, *(float32x2_t *) &__reint1_181); \
+  __ret_181; \
+})
+#else
+#define vcmla_rot180_laneq_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \
+  float32x2_t __ret_182; \
+  float32x2_t __s0_182 = __p0_182; \
+  float32x2_t __s1_182 = __p1_182; \
+  float32x4_t __s2_182 = __p2_182; \
+  float32x2_t __rev0_182;  __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \
+  float32x2_t __rev1_182;  __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 1, 0); \
+  float32x4_t __rev2_182;  __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 3, 2, 1, 0); \
+float32x4_t __reint_182 = __rev2_182; \
+uint64x1_t __reint1_182 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_182, __p3_182)}; \
+  __ret_182 = __noswap_vcmla_rot180_f32(__rev0_182, __rev1_182, *(float32x2_t *) &__reint1_182); \
+  __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \
+  __ret_182; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot180_laneq_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \
+  float32x4_t __ret_183; \
+  float32x4_t __s0_183 = __p0_183; \
+  float32x4_t __s1_183 = __p1_183; \
+  float32x4_t __s2_183 = __p2_183; \
+float32x4_t __reint_183 = __s2_183; \
+uint64x2_t __reint1_183 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183), vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183)}; \
+  __ret_183 = vcmlaq_rot180_f32(__s0_183, __s1_183, *(float32x4_t *) &__reint1_183); \
+  __ret_183; \
+})
+#else
+#define vcmlaq_rot180_laneq_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \
+  float32x4_t __ret_184; \
+  float32x4_t __s0_184 = __p0_184; \
+  float32x4_t __s1_184 = __p1_184; \
+  float32x4_t __s2_184 = __p2_184; \
+  float32x4_t __rev0_184;  __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 3, 2, 1, 0); \
+  float32x4_t __rev1_184;  __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 3, 2, 1, 0); \
+  float32x4_t __rev2_184;  __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 3, 2, 1, 0); \
+float32x4_t __reint_184 = __rev2_184; \
+uint64x2_t __reint1_184 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184)}; \
+  __ret_184 = __noswap_vcmlaq_rot180_f32(__rev0_184, __rev1_184, *(float32x4_t *) &__reint1_184); \
+  __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 3, 2, 1, 0); \
+  __ret_184; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot270_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \
+  float32x2_t __ret_185; \
+  float32x2_t __s0_185 = __p0_185; \
+  float32x2_t __s1_185 = __p1_185; \
+  float32x2_t __s2_185 = __p2_185; \
+float32x2_t __reint_185 = __s2_185; \
+uint64x1_t __reint1_185 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \
+  __ret_185 = vcmla_rot270_f32(__s0_185, __s1_185, *(float32x2_t *) &__reint1_185); \
+  __ret_185; \
+})
+#else
+#define vcmla_rot270_lane_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \
+  float32x2_t __ret_186; \
+  float32x2_t __s0_186 = __p0_186; \
+  float32x2_t __s1_186 = __p1_186; \
+  float32x2_t __s2_186 = __p2_186; \
+  float32x2_t __rev0_186;  __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 1, 0); \
+  float32x2_t __rev1_186;  __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 1, 0); \
+  float32x2_t __rev2_186;  __rev2_186 = __builtin_shufflevector(__s2_186, __s2_186, 1, 0); \
+float32x2_t __reint_186 = __rev2_186; \
+uint64x1_t __reint1_186 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_186, __p3_186)}; \
+  __ret_186 = __noswap_vcmla_rot270_f32(__rev0_186, __rev1_186, *(float32x2_t *) &__reint1_186); \
+  __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 1, 0); \
+  __ret_186; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot270_lane_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \
+  float32x4_t __ret_187; \
+  float32x4_t __s0_187 = __p0_187; \
+  float32x4_t __s1_187 = __p1_187; \
+  float32x2_t __s2_187 = __p2_187; \
+float32x2_t __reint_187 = __s2_187; \
+uint64x2_t __reint1_187 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187), vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187)}; \
+  __ret_187 = vcmlaq_rot270_f32(__s0_187, __s1_187, *(float32x4_t *) &__reint1_187); \
+  __ret_187; \
+})
+#else
+#define vcmlaq_rot270_lane_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \
+  float32x4_t __ret_188; \
+  float32x4_t __s0_188 = __p0_188; \
+  float32x4_t __s1_188 = __p1_188; \
+  float32x2_t __s2_188 = __p2_188; \
+  float32x4_t __rev0_188;  __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \
+  float32x4_t __rev1_188;  __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \
+  float32x2_t __rev2_188;  __rev2_188 = __builtin_shufflevector(__s2_188, __s2_188, 1, 0); \
+float32x2_t __reint_188 = __rev2_188; \
+uint64x2_t __reint1_188 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188), vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188)}; \
+  __ret_188 = __noswap_vcmlaq_rot270_f32(__rev0_188, __rev1_188, *(float32x4_t *) &__reint1_188); \
+  __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \
+  __ret_188; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot270_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \
+  float32x2_t __ret_189; \
+  float32x2_t __s0_189 = __p0_189; \
+  float32x2_t __s1_189 = __p1_189; \
+  float32x4_t __s2_189 = __p2_189; \
+float32x4_t __reint_189 = __s2_189; \
+uint64x1_t __reint1_189 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \
+  __ret_189 = vcmla_rot270_f32(__s0_189, __s1_189, *(float32x2_t *) &__reint1_189); \
+  __ret_189; \
+})
+#else
+#define vcmla_rot270_laneq_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \
+  float32x2_t __ret_190; \
+  float32x2_t __s0_190 = __p0_190; \
+  float32x2_t __s1_190 = __p1_190; \
+  float32x4_t __s2_190 = __p2_190; \
+  float32x2_t __rev0_190;  __rev0_190 = __builtin_shufflevector(__s0_190, __s0_190, 1, 0); \
+  float32x2_t __rev1_190;  __rev1_190 = __builtin_shufflevector(__s1_190, __s1_190, 1, 0); \
+  float32x4_t __rev2_190;  __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \
+float32x4_t __reint_190 = __rev2_190; \
+uint64x1_t __reint1_190 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_190, __p3_190)}; \
+  __ret_190 = __noswap_vcmla_rot270_f32(__rev0_190, __rev1_190, *(float32x2_t *) &__reint1_190); \
+  __ret_190 = __builtin_shufflevector(__ret_190, __ret_190, 1, 0); \
+  __ret_190; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot270_laneq_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \
+  float32x4_t __ret_191; \
+  float32x4_t __s0_191 = __p0_191; \
+  float32x4_t __s1_191 = __p1_191; \
+  float32x4_t __s2_191 = __p2_191; \
+float32x4_t __reint_191 = __s2_191; \
+uint64x2_t __reint1_191 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191), vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191)}; \
+  __ret_191 = vcmlaq_rot270_f32(__s0_191, __s1_191, *(float32x4_t *) &__reint1_191); \
+  __ret_191; \
+})
+#else
+#define vcmlaq_rot270_laneq_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \
+  float32x4_t __ret_192; \
+  float32x4_t __s0_192 = __p0_192; \
+  float32x4_t __s1_192 = __p1_192; \
+  float32x4_t __s2_192 = __p2_192; \
+  float32x4_t __rev0_192;  __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 3, 2, 1, 0); \
+  float32x4_t __rev1_192;  __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 3, 2, 1, 0); \
+  float32x4_t __rev2_192;  __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \
+float32x4_t __reint_192 = __rev2_192; \
+uint64x2_t __reint1_192 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192)}; \
+  __ret_192 = __noswap_vcmlaq_rot270_f32(__rev0_192, __rev1_192, *(float32x4_t *) &__reint1_192); \
+  __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 3, 2, 1, 0); \
+  __ret_192; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot90_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \
+  float32x2_t __ret_193; \
+  float32x2_t __s0_193 = __p0_193; \
+  float32x2_t __s1_193 = __p1_193; \
+  float32x2_t __s2_193 = __p2_193; \
+float32x2_t __reint_193 = __s2_193; \
+uint64x1_t __reint1_193 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \
+  __ret_193 = vcmla_rot90_f32(__s0_193, __s1_193, *(float32x2_t *) &__reint1_193); \
+  __ret_193; \
+})
+#else
+#define vcmla_rot90_lane_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \
+  float32x2_t __ret_194; \
+  float32x2_t __s0_194 = __p0_194; \
+  float32x2_t __s1_194 = __p1_194; \
+  float32x2_t __s2_194 = __p2_194; \
+  float32x2_t __rev0_194;  __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 1, 0); \
+  float32x2_t __rev1_194;  __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 1, 0); \
+  float32x2_t __rev2_194;  __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 1, 0); \
+float32x2_t __reint_194 = __rev2_194; \
+uint64x1_t __reint1_194 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_194, __p3_194)}; \
+  __ret_194 = __noswap_vcmla_rot90_f32(__rev0_194, __rev1_194, *(float32x2_t *) &__reint1_194); \
+  __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 1, 0); \
+  __ret_194; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot90_lane_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \
+  float32x4_t __ret_195; \
+  float32x4_t __s0_195 = __p0_195; \
+  float32x4_t __s1_195 = __p1_195; \
+  float32x2_t __s2_195 = __p2_195; \
+float32x2_t __reint_195 = __s2_195; \
+uint64x2_t __reint1_195 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195), vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195)}; \
+  __ret_195 = vcmlaq_rot90_f32(__s0_195, __s1_195, *(float32x4_t *) &__reint1_195); \
+  __ret_195; \
+})
+#else
+#define vcmlaq_rot90_lane_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \
+  float32x4_t __ret_196; \
+  float32x4_t __s0_196 = __p0_196; \
+  float32x4_t __s1_196 = __p1_196; \
+  float32x2_t __s2_196 = __p2_196; \
+  float32x4_t __rev0_196;  __rev0_196 = __builtin_shufflevector(__s0_196, __s0_196, 3, 2, 1, 0); \
+  float32x4_t __rev1_196;  __rev1_196 = __builtin_shufflevector(__s1_196, __s1_196, 3, 2, 1, 0); \
+  float32x2_t __rev2_196;  __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 1, 0); \
+float32x2_t __reint_196 = __rev2_196; \
+uint64x2_t __reint1_196 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196), vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196)}; \
+  __ret_196 = __noswap_vcmlaq_rot90_f32(__rev0_196, __rev1_196, *(float32x4_t *) &__reint1_196); \
+  __ret_196 = __builtin_shufflevector(__ret_196, __ret_196, 3, 2, 1, 0); \
+  __ret_196; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot90_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \
+  float32x2_t __ret_197; \
+  float32x2_t __s0_197 = __p0_197; \
+  float32x2_t __s1_197 = __p1_197; \
+  float32x4_t __s2_197 = __p2_197; \
+float32x4_t __reint_197 = __s2_197; \
+uint64x1_t __reint1_197 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \
+  __ret_197 = vcmla_rot90_f32(__s0_197, __s1_197, *(float32x2_t *) &__reint1_197); \
+  __ret_197; \
+})
+#else
+#define vcmla_rot90_laneq_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \
+  float32x2_t __ret_198; \
+  float32x2_t __s0_198 = __p0_198; \
+  float32x2_t __s1_198 = __p1_198; \
+  float32x4_t __s2_198 = __p2_198; \
+  float32x2_t __rev0_198;  __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 1, 0); \
+  float32x2_t __rev1_198;  __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 1, 0); \
+  float32x4_t __rev2_198;  __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 3, 2, 1, 0); \
+float32x4_t __reint_198 = __rev2_198; \
+uint64x1_t __reint1_198 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_198, __p3_198)}; \
+  __ret_198 = __noswap_vcmla_rot90_f32(__rev0_198, __rev1_198, *(float32x2_t *) &__reint1_198); \
+  __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 1, 0); \
+  __ret_198; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot90_laneq_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \
+  float32x4_t __ret_199; \
+  float32x4_t __s0_199 = __p0_199; \
+  float32x4_t __s1_199 = __p1_199; \
+  float32x4_t __s2_199 = __p2_199; \
+float32x4_t __reint_199 = __s2_199; \
+uint64x2_t __reint1_199 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199), vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199)}; \
+  __ret_199 = vcmlaq_rot90_f32(__s0_199, __s1_199, *(float32x4_t *) &__reint1_199); \
+  __ret_199; \
+})
+#else
+#define vcmlaq_rot90_laneq_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \
+  float32x4_t __ret_200; \
+  float32x4_t __s0_200 = __p0_200; \
+  float32x4_t __s1_200 = __p1_200; \
+  float32x4_t __s2_200 = __p2_200; \
+  float32x4_t __rev0_200;  __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \
+  float32x4_t __rev1_200;  __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \
+  float32x4_t __rev2_200;  __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 3, 2, 1, 0); \
+float32x4_t __reint_200 = __rev2_200; \
+uint64x2_t __reint1_200 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200)}; \
+  __ret_200 = __noswap_vcmlaq_rot90_f32(__rev0_200, __rev1_200, *(float32x4_t *) &__reint1_200); \
+  __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \
+  __ret_200; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_COMPLEX) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_lane_f16(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \
+  float16x4_t __ret_201; \
+  float16x4_t __s0_201 = __p0_201; \
+  float16x4_t __s1_201 = __p1_201; \
+  float16x4_t __s2_201 = __p2_201; \
+float16x4_t __reint_201 = __s2_201; \
+uint32x2_t __reint1_201 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201), vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201)}; \
+  __ret_201 = vcmla_f16(__s0_201, __s1_201, *(float16x4_t *) &__reint1_201); \
+  __ret_201; \
+})
+#else
+#define vcmla_lane_f16(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \
+  float16x4_t __ret_202; \
+  float16x4_t __s0_202 = __p0_202; \
+  float16x4_t __s1_202 = __p1_202; \
+  float16x4_t __s2_202 = __p2_202; \
+  float16x4_t __rev0_202;  __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 3, 2, 1, 0); \
+  float16x4_t __rev1_202;  __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 3, 2, 1, 0); \
+  float16x4_t __rev2_202;  __rev2_202 = __builtin_shufflevector(__s2_202, __s2_202, 3, 2, 1, 0); \
+float16x4_t __reint_202 = __rev2_202; \
+uint32x2_t __reint1_202 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202)}; \
+  __ret_202 = __noswap_vcmla_f16(__rev0_202, __rev1_202, *(float16x4_t *) &__reint1_202); \
+  __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 3, 2, 1, 0); \
+  __ret_202; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_lane_f16(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \
+  float16x8_t __ret_203; \
+  float16x8_t __s0_203 = __p0_203; \
+  float16x8_t __s1_203 = __p1_203; \
+  float16x4_t __s2_203 = __p2_203; \
+float16x4_t __reint_203 = __s2_203; \
+uint32x4_t __reint1_203 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203)}; \
+  __ret_203 = vcmlaq_f16(__s0_203, __s1_203, *(float16x8_t *) &__reint1_203); \
+  __ret_203; \
+})
+#else
+#define vcmlaq_lane_f16(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \
+  float16x8_t __ret_204; \
+  float16x8_t __s0_204 = __p0_204; \
+  float16x8_t __s1_204 = __p1_204; \
+  float16x4_t __s2_204 = __p2_204; \
+  float16x8_t __rev0_204;  __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_204;  __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_204;  __rev2_204 = __builtin_shufflevector(__s2_204, __s2_204, 3, 2, 1, 0); \
+float16x4_t __reint_204 = __rev2_204; \
+uint32x4_t __reint1_204 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204)}; \
+  __ret_204 = __noswap_vcmlaq_f16(__rev0_204, __rev1_204, *(float16x8_t *) &__reint1_204); \
+  __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_204; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_laneq_f16(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \
+  float16x4_t __ret_205; \
+  float16x4_t __s0_205 = __p0_205; \
+  float16x4_t __s1_205 = __p1_205; \
+  float16x8_t __s2_205 = __p2_205; \
+float16x8_t __reint_205 = __s2_205; \
+uint32x2_t __reint1_205 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205), vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205)}; \
+  __ret_205 = vcmla_f16(__s0_205, __s1_205, *(float16x4_t *) &__reint1_205); \
+  __ret_205; \
+})
+#else
+#define vcmla_laneq_f16(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \
+  float16x4_t __ret_206; \
+  float16x4_t __s0_206 = __p0_206; \
+  float16x4_t __s1_206 = __p1_206; \
+  float16x8_t __s2_206 = __p2_206; \
+  float16x4_t __rev0_206;  __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 3, 2, 1, 0); \
+  float16x4_t __rev1_206;  __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 3, 2, 1, 0); \
+  float16x8_t __rev2_206;  __rev2_206 = __builtin_shufflevector(__s2_206, __s2_206, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_206 = __rev2_206; \
+uint32x2_t __reint1_206 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206)}; \
+  __ret_206 = __noswap_vcmla_f16(__rev0_206, __rev1_206, *(float16x4_t *) &__reint1_206); \
+  __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 3, 2, 1, 0); \
+  __ret_206; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_laneq_f16(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \
+  float16x8_t __ret_207; \
+  float16x8_t __s0_207 = __p0_207; \
+  float16x8_t __s1_207 = __p1_207; \
+  float16x8_t __s2_207 = __p2_207; \
+float16x8_t __reint_207 = __s2_207; \
+uint32x4_t __reint1_207 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207)}; \
+  __ret_207 = vcmlaq_f16(__s0_207, __s1_207, *(float16x8_t *) &__reint1_207); \
+  __ret_207; \
+})
+#else
+#define vcmlaq_laneq_f16(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \
+  float16x8_t __ret_208; \
+  float16x8_t __s0_208 = __p0_208; \
+  float16x8_t __s1_208 = __p1_208; \
+  float16x8_t __s2_208 = __p2_208; \
+  float16x8_t __rev0_208;  __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_208;  __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_208;  __rev2_208 = __builtin_shufflevector(__s2_208, __s2_208, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_208 = __rev2_208; \
+uint32x4_t __reint1_208 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208)}; \
+  __ret_208 = __noswap_vcmlaq_f16(__rev0_208, __rev1_208, *(float16x8_t *) &__reint1_208); \
+  __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_208; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot180_lane_f16(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \
+  float16x4_t __ret_209; \
+  float16x4_t __s0_209 = __p0_209; \
+  float16x4_t __s1_209 = __p1_209; \
+  float16x4_t __s2_209 = __p2_209; \
+float16x4_t __reint_209 = __s2_209; \
+uint32x2_t __reint1_209 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209), vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209)}; \
+  __ret_209 = vcmla_rot180_f16(__s0_209, __s1_209, *(float16x4_t *) &__reint1_209); \
+  __ret_209; \
+})
+#else
+#define vcmla_rot180_lane_f16(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \
+  float16x4_t __ret_210; \
+  float16x4_t __s0_210 = __p0_210; \
+  float16x4_t __s1_210 = __p1_210; \
+  float16x4_t __s2_210 = __p2_210; \
+  float16x4_t __rev0_210;  __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 3, 2, 1, 0); \
+  float16x4_t __rev1_210;  __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 3, 2, 1, 0); \
+  float16x4_t __rev2_210;  __rev2_210 = __builtin_shufflevector(__s2_210, __s2_210, 3, 2, 1, 0); \
+float16x4_t __reint_210 = __rev2_210; \
+uint32x2_t __reint1_210 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210)}; \
+  __ret_210 = __noswap_vcmla_rot180_f16(__rev0_210, __rev1_210, *(float16x4_t *) &__reint1_210); \
+  __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 3, 2, 1, 0); \
+  __ret_210; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot180_lane_f16(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \
+  float16x8_t __ret_211; \
+  float16x8_t __s0_211 = __p0_211; \
+  float16x8_t __s1_211 = __p1_211; \
+  float16x4_t __s2_211 = __p2_211; \
+float16x4_t __reint_211 = __s2_211; \
+uint32x4_t __reint1_211 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211)}; \
+  __ret_211 = vcmlaq_rot180_f16(__s0_211, __s1_211, *(float16x8_t *) &__reint1_211); \
+  __ret_211; \
+})
+#else
+#define vcmlaq_rot180_lane_f16(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \
+  float16x8_t __ret_212; \
+  float16x8_t __s0_212 = __p0_212; \
+  float16x8_t __s1_212 = __p1_212; \
+  float16x4_t __s2_212 = __p2_212; \
+  float16x8_t __rev0_212;  __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_212;  __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_212;  __rev2_212 = __builtin_shufflevector(__s2_212, __s2_212, 3, 2, 1, 0); \
+float16x4_t __reint_212 = __rev2_212; \
+uint32x4_t __reint1_212 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212)}; \
+  __ret_212 = __noswap_vcmlaq_rot180_f16(__rev0_212, __rev1_212, *(float16x8_t *) &__reint1_212); \
+  __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_212; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot180_laneq_f16(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \
+  float16x4_t __ret_213; \
+  float16x4_t __s0_213 = __p0_213; \
+  float16x4_t __s1_213 = __p1_213; \
+  float16x8_t __s2_213 = __p2_213; \
+float16x8_t __reint_213 = __s2_213; \
+uint32x2_t __reint1_213 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213), vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213)}; \
+  __ret_213 = vcmla_rot180_f16(__s0_213, __s1_213, *(float16x4_t *) &__reint1_213); \
+  __ret_213; \
+})
+#else
+#define vcmla_rot180_laneq_f16(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \
+  float16x4_t __ret_214; \
+  float16x4_t __s0_214 = __p0_214; \
+  float16x4_t __s1_214 = __p1_214; \
+  float16x8_t __s2_214 = __p2_214; \
+  float16x4_t __rev0_214;  __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \
+  float16x4_t __rev1_214;  __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 3, 2, 1, 0); \
+  float16x8_t __rev2_214;  __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_214 = __rev2_214; \
+uint32x2_t __reint1_214 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214)}; \
+  __ret_214 = __noswap_vcmla_rot180_f16(__rev0_214, __rev1_214, *(float16x4_t *) &__reint1_214); \
+  __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \
+  __ret_214; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot180_laneq_f16(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \
+  float16x8_t __ret_215; \
+  float16x8_t __s0_215 = __p0_215; \
+  float16x8_t __s1_215 = __p1_215; \
+  float16x8_t __s2_215 = __p2_215; \
+float16x8_t __reint_215 = __s2_215; \
+uint32x4_t __reint1_215 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215)}; \
+  __ret_215 = vcmlaq_rot180_f16(__s0_215, __s1_215, *(float16x8_t *) &__reint1_215); \
+  __ret_215; \
+})
+#else
+#define vcmlaq_rot180_laneq_f16(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \
+  float16x8_t __ret_216; \
+  float16x8_t __s0_216 = __p0_216; \
+  float16x8_t __s1_216 = __p1_216; \
+  float16x8_t __s2_216 = __p2_216; \
+  float16x8_t __rev0_216;  __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_216;  __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_216;  __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_216 = __rev2_216; \
+uint32x4_t __reint1_216 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216)}; \
+  __ret_216 = __noswap_vcmlaq_rot180_f16(__rev0_216, __rev1_216, *(float16x8_t *) &__reint1_216); \
+  __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_216; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot270_lane_f16(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \
+  float16x4_t __ret_217; \
+  float16x4_t __s0_217 = __p0_217; \
+  float16x4_t __s1_217 = __p1_217; \
+  float16x4_t __s2_217 = __p2_217; \
+float16x4_t __reint_217 = __s2_217; \
+uint32x2_t __reint1_217 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217), vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217)}; \
+  __ret_217 = vcmla_rot270_f16(__s0_217, __s1_217, *(float16x4_t *) &__reint1_217); \
+  __ret_217; \
+})
+#else
+#define vcmla_rot270_lane_f16(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \
+  float16x4_t __ret_218; \
+  float16x4_t __s0_218 = __p0_218; \
+  float16x4_t __s1_218 = __p1_218; \
+  float16x4_t __s2_218 = __p2_218; \
+  float16x4_t __rev0_218;  __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \
+  float16x4_t __rev1_218;  __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \
+  float16x4_t __rev2_218;  __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 3, 2, 1, 0); \
+float16x4_t __reint_218 = __rev2_218; \
+uint32x2_t __reint1_218 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218)}; \
+  __ret_218 = __noswap_vcmla_rot270_f16(__rev0_218, __rev1_218, *(float16x4_t *) &__reint1_218); \
+  __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \
+  __ret_218; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot270_lane_f16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \
+  float16x8_t __ret_219; \
+  float16x8_t __s0_219 = __p0_219; \
+  float16x8_t __s1_219 = __p1_219; \
+  float16x4_t __s2_219 = __p2_219; \
+float16x4_t __reint_219 = __s2_219; \
+uint32x4_t __reint1_219 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219)}; \
+  __ret_219 = vcmlaq_rot270_f16(__s0_219, __s1_219, *(float16x8_t *) &__reint1_219); \
+  __ret_219; \
+})
+#else
+#define vcmlaq_rot270_lane_f16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \
+  float16x8_t __ret_220; \
+  float16x8_t __s0_220 = __p0_220; \
+  float16x8_t __s1_220 = __p1_220; \
+  float16x4_t __s2_220 = __p2_220; \
+  float16x8_t __rev0_220;  __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_220;  __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_220;  __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \
+float16x4_t __reint_220 = __rev2_220; \
+uint32x4_t __reint1_220 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220)}; \
+  __ret_220 = __noswap_vcmlaq_rot270_f16(__rev0_220, __rev1_220, *(float16x8_t *) &__reint1_220); \
+  __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_220; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot270_laneq_f16(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \
+  float16x4_t __ret_221; \
+  float16x4_t __s0_221 = __p0_221; \
+  float16x4_t __s1_221 = __p1_221; \
+  float16x8_t __s2_221 = __p2_221; \
+float16x8_t __reint_221 = __s2_221; \
+uint32x2_t __reint1_221 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221), vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221)}; \
+  __ret_221 = vcmla_rot270_f16(__s0_221, __s1_221, *(float16x4_t *) &__reint1_221); \
+  __ret_221; \
+})
+#else
+#define vcmla_rot270_laneq_f16(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \
+  float16x4_t __ret_222; \
+  float16x4_t __s0_222 = __p0_222; \
+  float16x4_t __s1_222 = __p1_222; \
+  float16x8_t __s2_222 = __p2_222; \
+  float16x4_t __rev0_222;  __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 3, 2, 1, 0); \
+  float16x4_t __rev1_222;  __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 3, 2, 1, 0); \
+  float16x8_t __rev2_222;  __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_222 = __rev2_222; \
+uint32x2_t __reint1_222 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222)}; \
+  __ret_222 = __noswap_vcmla_rot270_f16(__rev0_222, __rev1_222, *(float16x4_t *) &__reint1_222); \
+  __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 3, 2, 1, 0); \
+  __ret_222; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot270_laneq_f16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \
+  float16x8_t __ret_223; \
+  float16x8_t __s0_223 = __p0_223; \
+  float16x8_t __s1_223 = __p1_223; \
+  float16x8_t __s2_223 = __p2_223; \
+float16x8_t __reint_223 = __s2_223; \
+uint32x4_t __reint1_223 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223)}; \
+  __ret_223 = vcmlaq_rot270_f16(__s0_223, __s1_223, *(float16x8_t *) &__reint1_223); \
+  __ret_223; \
+})
+#else
+#define vcmlaq_rot270_laneq_f16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \
+  float16x8_t __ret_224; \
+  float16x8_t __s0_224 = __p0_224; \
+  float16x8_t __s1_224 = __p1_224; \
+  float16x8_t __s2_224 = __p2_224; \
+  float16x8_t __rev0_224;  __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_224;  __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_224;  __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_224 = __rev2_224; \
+uint32x4_t __reint1_224 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224)}; \
+  __ret_224 = __noswap_vcmlaq_rot270_f16(__rev0_224, __rev1_224, *(float16x8_t *) &__reint1_224); \
+  __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_224; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot90_lane_f16(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \
+  float16x4_t __ret_225; \
+  float16x4_t __s0_225 = __p0_225; \
+  float16x4_t __s1_225 = __p1_225; \
+  float16x4_t __s2_225 = __p2_225; \
+float16x4_t __reint_225 = __s2_225; \
+uint32x2_t __reint1_225 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225), vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225)}; \
+  __ret_225 = vcmla_rot90_f16(__s0_225, __s1_225, *(float16x4_t *) &__reint1_225); \
+  __ret_225; \
+})
+#else
+#define vcmla_rot90_lane_f16(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \
+  float16x4_t __ret_226; \
+  float16x4_t __s0_226 = __p0_226; \
+  float16x4_t __s1_226 = __p1_226; \
+  float16x4_t __s2_226 = __p2_226; \
+  float16x4_t __rev0_226;  __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \
+  float16x4_t __rev1_226;  __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \
+  float16x4_t __rev2_226;  __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 3, 2, 1, 0); \
+float16x4_t __reint_226 = __rev2_226; \
+uint32x2_t __reint1_226 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226)}; \
+  __ret_226 = __noswap_vcmla_rot90_f16(__rev0_226, __rev1_226, *(float16x4_t *) &__reint1_226); \
+  __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \
+  __ret_226; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot90_lane_f16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \
+  float16x8_t __ret_227; \
+  float16x8_t __s0_227 = __p0_227; \
+  float16x8_t __s1_227 = __p1_227; \
+  float16x4_t __s2_227 = __p2_227; \
+float16x4_t __reint_227 = __s2_227; \
+uint32x4_t __reint1_227 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227)}; \
+  __ret_227 = vcmlaq_rot90_f16(__s0_227, __s1_227, *(float16x8_t *) &__reint1_227); \
+  __ret_227; \
+})
+#else
+#define vcmlaq_rot90_lane_f16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \
+  float16x8_t __ret_228; \
+  float16x8_t __s0_228 = __p0_228; \
+  float16x8_t __s1_228 = __p1_228; \
+  float16x4_t __s2_228 = __p2_228; \
+  float16x8_t __rev0_228;  __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_228;  __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_228;  __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \
+float16x4_t __reint_228 = __rev2_228; \
+uint32x4_t __reint1_228 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228)}; \
+  __ret_228 = __noswap_vcmlaq_rot90_f16(__rev0_228, __rev1_228, *(float16x8_t *) &__reint1_228); \
+  __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_228; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot90_laneq_f16(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \
+  float16x4_t __ret_229; \
+  float16x4_t __s0_229 = __p0_229; \
+  float16x4_t __s1_229 = __p1_229; \
+  float16x8_t __s2_229 = __p2_229; \
+float16x8_t __reint_229 = __s2_229; \
+uint32x2_t __reint1_229 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229), vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229)}; \
+  __ret_229 = vcmla_rot90_f16(__s0_229, __s1_229, *(float16x4_t *) &__reint1_229); \
+  __ret_229; \
+})
+#else
+#define vcmla_rot90_laneq_f16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \
+  float16x4_t __ret_230; \
+  float16x4_t __s0_230 = __p0_230; \
+  float16x4_t __s1_230 = __p1_230; \
+  float16x8_t __s2_230 = __p2_230; \
+  float16x4_t __rev0_230;  __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 3, 2, 1, 0); \
+  float16x4_t __rev1_230;  __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 3, 2, 1, 0); \
+  float16x8_t __rev2_230;  __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_230 = __rev2_230; \
+uint32x2_t __reint1_230 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230)}; \
+  __ret_230 = __noswap_vcmla_rot90_f16(__rev0_230, __rev1_230, *(float16x4_t *) &__reint1_230); \
+  __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 3, 2, 1, 0); \
+  __ret_230; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot90_laneq_f16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \
+  float16x8_t __ret_231; \
+  float16x8_t __s0_231 = __p0_231; \
+  float16x8_t __s1_231 = __p1_231; \
+  float16x8_t __s2_231 = __p2_231; \
+float16x8_t __reint_231 = __s2_231; \
+uint32x4_t __reint1_231 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231)}; \
+  __ret_231 = vcmlaq_rot90_f16(__s0_231, __s1_231, *(float16x8_t *) &__reint1_231); \
+  __ret_231; \
+})
+#else
+#define vcmlaq_rot90_laneq_f16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \
+  float16x8_t __ret_232; \
+  float16x8_t __s0_232 = __p0_232; \
+  float16x8_t __s1_232 = __p1_232; \
+  float16x8_t __s2_232 = __p2_232; \
+  float16x8_t __rev0_232;  __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_232;  __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_232;  __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_232 = __rev2_232; \
+uint32x4_t __reint1_232 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232)}; \
+  __ret_232 = __noswap_vcmlaq_rot90_f16(__rev0_232, __rev1_232, *(float16x8_t *) &__reint1_232); \
+  __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_232; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+  return __ret;
+}
+#define vcmla_lane_f64(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \
+  float64x1_t __ret_233; \
+  float64x1_t __s0_233 = __p0_233; \
+  float64x1_t __s1_233 = __p1_233; \
+  float64x1_t __s2_233 = __p2_233; \
+float64x1_t __reint_233 = __s2_233; \
+uint64x2_t __reint1_233 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233), vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233)}; \
+  __ret_233 = vcmla_f64(__s0_233, __s1_233, *(float64x1_t *) &__reint1_233); \
+  __ret_233; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_lane_f64(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \
+  float64x2_t __ret_234; \
+  float64x2_t __s0_234 = __p0_234; \
+  float64x2_t __s1_234 = __p1_234; \
+  float64x1_t __s2_234 = __p2_234; \
+float64x1_t __reint_234 = __s2_234; \
+uint64x2_t __reint1_234 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234), vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234)}; \
+  __ret_234 = vcmlaq_f64(__s0_234, __s1_234, *(float64x2_t *) &__reint1_234); \
+  __ret_234; \
+})
+#else
+#define vcmlaq_lane_f64(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \
+  float64x2_t __ret_235; \
+  float64x2_t __s0_235 = __p0_235; \
+  float64x2_t __s1_235 = __p1_235; \
+  float64x1_t __s2_235 = __p2_235; \
+  float64x2_t __rev0_235;  __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 1, 0); \
+  float64x2_t __rev1_235;  __rev1_235 = __builtin_shufflevector(__s1_235, __s1_235, 1, 0); \
+float64x1_t __reint_235 = __s2_235; \
+uint64x2_t __reint1_235 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235)}; \
+  __ret_235 = __noswap_vcmlaq_f64(__rev0_235, __rev1_235, *(float64x2_t *) &__reint1_235); \
+  __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 1, 0); \
+  __ret_235; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_laneq_f64(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \
+  float64x1_t __ret_236; \
+  float64x1_t __s0_236 = __p0_236; \
+  float64x1_t __s1_236 = __p1_236; \
+  float64x2_t __s2_236 = __p2_236; \
+float64x2_t __reint_236 = __s2_236; \
+uint64x2_t __reint1_236 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236), vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236)}; \
+  __ret_236 = vcmla_f64(__s0_236, __s1_236, *(float64x1_t *) &__reint1_236); \
+  __ret_236; \
+})
+#else
+#define vcmla_laneq_f64(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \
+  float64x1_t __ret_237; \
+  float64x1_t __s0_237 = __p0_237; \
+  float64x1_t __s1_237 = __p1_237; \
+  float64x2_t __s2_237 = __p2_237; \
+  float64x2_t __rev2_237;  __rev2_237 = __builtin_shufflevector(__s2_237, __s2_237, 1, 0); \
+float64x2_t __reint_237 = __rev2_237; \
+uint64x2_t __reint1_237 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237)}; \
+  __ret_237 = vcmla_f64(__s0_237, __s1_237, *(float64x1_t *) &__reint1_237); \
+  __ret_237; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_laneq_f64(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \
+  float64x2_t __ret_238; \
+  float64x2_t __s0_238 = __p0_238; \
+  float64x2_t __s1_238 = __p1_238; \
+  float64x2_t __s2_238 = __p2_238; \
+float64x2_t __reint_238 = __s2_238; \
+uint64x2_t __reint1_238 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238), vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238)}; \
+  __ret_238 = vcmlaq_f64(__s0_238, __s1_238, *(float64x2_t *) &__reint1_238); \
+  __ret_238; \
+})
+#else
+#define vcmlaq_laneq_f64(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \
+  float64x2_t __ret_239; \
+  float64x2_t __s0_239 = __p0_239; \
+  float64x2_t __s1_239 = __p1_239; \
+  float64x2_t __s2_239 = __p2_239; \
+  float64x2_t __rev0_239;  __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 1, 0); \
+  float64x2_t __rev1_239;  __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 1, 0); \
+  float64x2_t __rev2_239;  __rev2_239 = __builtin_shufflevector(__s2_239, __s2_239, 1, 0); \
+float64x2_t __reint_239 = __rev2_239; \
+uint64x2_t __reint1_239 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239)}; \
+  __ret_239 = __noswap_vcmlaq_f64(__rev0_239, __rev1_239, *(float64x2_t *) &__reint1_239); \
+  __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 1, 0); \
+  __ret_239; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+  return __ret;
+}
+#define vcmla_rot180_lane_f64(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \
+  float64x1_t __ret_240; \
+  float64x1_t __s0_240 = __p0_240; \
+  float64x1_t __s1_240 = __p1_240; \
+  float64x1_t __s2_240 = __p2_240; \
+float64x1_t __reint_240 = __s2_240; \
+uint64x2_t __reint1_240 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240), vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240)}; \
+  __ret_240 = vcmla_rot180_f64(__s0_240, __s1_240, *(float64x1_t *) &__reint1_240); \
+  __ret_240; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot180_lane_f64(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \
+  float64x2_t __ret_241; \
+  float64x2_t __s0_241 = __p0_241; \
+  float64x2_t __s1_241 = __p1_241; \
+  float64x1_t __s2_241 = __p2_241; \
+float64x1_t __reint_241 = __s2_241; \
+uint64x2_t __reint1_241 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241), vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241)}; \
+  __ret_241 = vcmlaq_rot180_f64(__s0_241, __s1_241, *(float64x2_t *) &__reint1_241); \
+  __ret_241; \
+})
+#else
+#define vcmlaq_rot180_lane_f64(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \
+  float64x2_t __ret_242; \
+  float64x2_t __s0_242 = __p0_242; \
+  float64x2_t __s1_242 = __p1_242; \
+  float64x1_t __s2_242 = __p2_242; \
+  float64x2_t __rev0_242;  __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 1, 0); \
+  float64x2_t __rev1_242;  __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 1, 0); \
+float64x1_t __reint_242 = __s2_242; \
+uint64x2_t __reint1_242 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242)}; \
+  __ret_242 = __noswap_vcmlaq_rot180_f64(__rev0_242, __rev1_242, *(float64x2_t *) &__reint1_242); \
+  __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 1, 0); \
+  __ret_242; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot180_laneq_f64(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \
+  float64x1_t __ret_243; \
+  float64x1_t __s0_243 = __p0_243; \
+  float64x1_t __s1_243 = __p1_243; \
+  float64x2_t __s2_243 = __p2_243; \
+float64x2_t __reint_243 = __s2_243; \
+uint64x2_t __reint1_243 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243), vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243)}; \
+  __ret_243 = vcmla_rot180_f64(__s0_243, __s1_243, *(float64x1_t *) &__reint1_243); \
+  __ret_243; \
+})
+#else
+#define vcmla_rot180_laneq_f64(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \
+  float64x1_t __ret_244; \
+  float64x1_t __s0_244 = __p0_244; \
+  float64x1_t __s1_244 = __p1_244; \
+  float64x2_t __s2_244 = __p2_244; \
+  float64x2_t __rev2_244;  __rev2_244 = __builtin_shufflevector(__s2_244, __s2_244, 1, 0); \
+float64x2_t __reint_244 = __rev2_244; \
+uint64x2_t __reint1_244 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244)}; \
+  __ret_244 = vcmla_rot180_f64(__s0_244, __s1_244, *(float64x1_t *) &__reint1_244); \
+  __ret_244; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot180_laneq_f64(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \
+  float64x2_t __ret_245; \
+  float64x2_t __s0_245 = __p0_245; \
+  float64x2_t __s1_245 = __p1_245; \
+  float64x2_t __s2_245 = __p2_245; \
+float64x2_t __reint_245 = __s2_245; \
+uint64x2_t __reint1_245 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245), vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245)}; \
+  __ret_245 = vcmlaq_rot180_f64(__s0_245, __s1_245, *(float64x2_t *) &__reint1_245); \
+  __ret_245; \
+})
+#else
+#define vcmlaq_rot180_laneq_f64(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \
+  float64x2_t __ret_246; \
+  float64x2_t __s0_246 = __p0_246; \
+  float64x2_t __s1_246 = __p1_246; \
+  float64x2_t __s2_246 = __p2_246; \
+  float64x2_t __rev0_246;  __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \
+  float64x2_t __rev1_246;  __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \
+  float64x2_t __rev2_246;  __rev2_246 = __builtin_shufflevector(__s2_246, __s2_246, 1, 0); \
+float64x2_t __reint_246 = __rev2_246; \
+uint64x2_t __reint1_246 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246)}; \
+  __ret_246 = __noswap_vcmlaq_rot180_f64(__rev0_246, __rev1_246, *(float64x2_t *) &__reint1_246); \
+  __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \
+  __ret_246; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+  return __ret;
+}
+#define vcmla_rot270_lane_f64(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \
+  float64x1_t __ret_247; \
+  float64x1_t __s0_247 = __p0_247; \
+  float64x1_t __s1_247 = __p1_247; \
+  float64x1_t __s2_247 = __p2_247; \
+float64x1_t __reint_247 = __s2_247; \
+uint64x2_t __reint1_247 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247), vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247)}; \
+  __ret_247 = vcmla_rot270_f64(__s0_247, __s1_247, *(float64x1_t *) &__reint1_247); \
+  __ret_247; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot270_lane_f64(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \
+  float64x2_t __ret_248; \
+  float64x2_t __s0_248 = __p0_248; \
+  float64x2_t __s1_248 = __p1_248; \
+  float64x1_t __s2_248 = __p2_248; \
+float64x1_t __reint_248 = __s2_248; \
+uint64x2_t __reint1_248 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248), vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248)}; \
+  __ret_248 = vcmlaq_rot270_f64(__s0_248, __s1_248, *(float64x2_t *) &__reint1_248); \
+  __ret_248; \
+})
+#else
+#define vcmlaq_rot270_lane_f64(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \
+  float64x2_t __ret_249; \
+  float64x2_t __s0_249 = __p0_249; \
+  float64x2_t __s1_249 = __p1_249; \
+  float64x1_t __s2_249 = __p2_249; \
+  float64x2_t __rev0_249;  __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 1, 0); \
+  float64x2_t __rev1_249;  __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 1, 0); \
+float64x1_t __reint_249 = __s2_249; \
+uint64x2_t __reint1_249 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249)}; \
+  __ret_249 = __noswap_vcmlaq_rot270_f64(__rev0_249, __rev1_249, *(float64x2_t *) &__reint1_249); \
+  __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 1, 0); \
+  __ret_249; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot270_laneq_f64(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \
+  float64x1_t __ret_250; \
+  float64x1_t __s0_250 = __p0_250; \
+  float64x1_t __s1_250 = __p1_250; \
+  float64x2_t __s2_250 = __p2_250; \
+float64x2_t __reint_250 = __s2_250; \
+uint64x2_t __reint1_250 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250), vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250)}; \
+  __ret_250 = vcmla_rot270_f64(__s0_250, __s1_250, *(float64x1_t *) &__reint1_250); \
+  __ret_250; \
+})
+#else
+#define vcmla_rot270_laneq_f64(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \
+  float64x1_t __ret_251; \
+  float64x1_t __s0_251 = __p0_251; \
+  float64x1_t __s1_251 = __p1_251; \
+  float64x2_t __s2_251 = __p2_251; \
+  float64x2_t __rev2_251;  __rev2_251 = __builtin_shufflevector(__s2_251, __s2_251, 1, 0); \
+float64x2_t __reint_251 = __rev2_251; \
+uint64x2_t __reint1_251 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251)}; \
+  __ret_251 = vcmla_rot270_f64(__s0_251, __s1_251, *(float64x1_t *) &__reint1_251); \
+  __ret_251; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot270_laneq_f64(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \
+  float64x2_t __ret_252; \
+  float64x2_t __s0_252 = __p0_252; \
+  float64x2_t __s1_252 = __p1_252; \
+  float64x2_t __s2_252 = __p2_252; \
+float64x2_t __reint_252 = __s2_252; \
+uint64x2_t __reint1_252 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252), vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252)}; \
+  __ret_252 = vcmlaq_rot270_f64(__s0_252, __s1_252, *(float64x2_t *) &__reint1_252); \
+  __ret_252; \
+})
+#else
+#define vcmlaq_rot270_laneq_f64(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \
+  float64x2_t __ret_253; \
+  float64x2_t __s0_253 = __p0_253; \
+  float64x2_t __s1_253 = __p1_253; \
+  float64x2_t __s2_253 = __p2_253; \
+  float64x2_t __rev0_253;  __rev0_253 = __builtin_shufflevector(__s0_253, __s0_253, 1, 0); \
+  float64x2_t __rev1_253;  __rev1_253 = __builtin_shufflevector(__s1_253, __s1_253, 1, 0); \
+  float64x2_t __rev2_253;  __rev2_253 = __builtin_shufflevector(__s2_253, __s2_253, 1, 0); \
+float64x2_t __reint_253 = __rev2_253; \
+uint64x2_t __reint1_253 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253)}; \
+  __ret_253 = __noswap_vcmlaq_rot270_f64(__rev0_253, __rev1_253, *(float64x2_t *) &__reint1_253); \
+  __ret_253 = __builtin_shufflevector(__ret_253, __ret_253, 1, 0); \
+  __ret_253; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+  return __ret;
+}
+#define vcmla_rot90_lane_f64(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \
+  float64x1_t __ret_254; \
+  float64x1_t __s0_254 = __p0_254; \
+  float64x1_t __s1_254 = __p1_254; \
+  float64x1_t __s2_254 = __p2_254; \
+float64x1_t __reint_254 = __s2_254; \
+uint64x2_t __reint1_254 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254), vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254)}; \
+  __ret_254 = vcmla_rot90_f64(__s0_254, __s1_254, *(float64x1_t *) &__reint1_254); \
+  __ret_254; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot90_lane_f64(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \
+  float64x2_t __ret_255; \
+  float64x2_t __s0_255 = __p0_255; \
+  float64x2_t __s1_255 = __p1_255; \
+  float64x1_t __s2_255 = __p2_255; \
+float64x1_t __reint_255 = __s2_255; \
+uint64x2_t __reint1_255 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255), vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255)}; \
+  __ret_255 = vcmlaq_rot90_f64(__s0_255, __s1_255, *(float64x2_t *) &__reint1_255); \
+  __ret_255; \
+})
+#else
+#define vcmlaq_rot90_lane_f64(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \
+  float64x2_t __ret_256; \
+  float64x2_t __s0_256 = __p0_256; \
+  float64x2_t __s1_256 = __p1_256; \
+  float64x1_t __s2_256 = __p2_256; \
+  float64x2_t __rev0_256;  __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 1, 0); \
+  float64x2_t __rev1_256;  __rev1_256 = __builtin_shufflevector(__s1_256, __s1_256, 1, 0); \
+float64x1_t __reint_256 = __s2_256; \
+uint64x2_t __reint1_256 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256)}; \
+  __ret_256 = __noswap_vcmlaq_rot90_f64(__rev0_256, __rev1_256, *(float64x2_t *) &__reint1_256); \
+  __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 1, 0); \
+  __ret_256; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmla_rot90_laneq_f64(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \
+  float64x1_t __ret_257; \
+  float64x1_t __s0_257 = __p0_257; \
+  float64x1_t __s1_257 = __p1_257; \
+  float64x2_t __s2_257 = __p2_257; \
+float64x2_t __reint_257 = __s2_257; \
+uint64x2_t __reint1_257 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257), vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257)}; \
+  __ret_257 = vcmla_rot90_f64(__s0_257, __s1_257, *(float64x1_t *) &__reint1_257); \
+  __ret_257; \
+})
+#else
+#define vcmla_rot90_laneq_f64(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
+  float64x1_t __ret_258; \
+  float64x1_t __s0_258 = __p0_258; \
+  float64x1_t __s1_258 = __p1_258; \
+  float64x2_t __s2_258 = __p2_258; \
+  float64x2_t __rev2_258;  __rev2_258 = __builtin_shufflevector(__s2_258, __s2_258, 1, 0); \
+float64x2_t __reint_258 = __rev2_258; \
+uint64x2_t __reint1_258 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258)}; \
+  __ret_258 = vcmla_rot90_f64(__s0_258, __s1_258, *(float64x1_t *) &__reint1_258); \
+  __ret_258; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcmlaq_rot90_laneq_f64(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
+  float64x2_t __ret_259; \
+  float64x2_t __s0_259 = __p0_259; \
+  float64x2_t __s1_259 = __p1_259; \
+  float64x2_t __s2_259 = __p2_259; \
+float64x2_t __reint_259 = __s2_259; \
+uint64x2_t __reint1_259 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259), vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259)}; \
+  __ret_259 = vcmlaq_rot90_f64(__s0_259, __s1_259, *(float64x2_t *) &__reint1_259); \
+  __ret_259; \
+})
+#else
+#define vcmlaq_rot90_laneq_f64(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
+  float64x2_t __ret_260; \
+  float64x2_t __s0_260 = __p0_260; \
+  float64x2_t __s1_260 = __p1_260; \
+  float64x2_t __s2_260 = __p2_260; \
+  float64x2_t __rev0_260;  __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 1, 0); \
+  float64x2_t __rev1_260;  __rev1_260 = __builtin_shufflevector(__s1_260, __s1_260, 1, 0); \
+  float64x2_t __rev2_260;  __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 1, 0); \
+float64x2_t __reint_260 = __rev2_260; \
+uint64x2_t __reint1_260 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260)}; \
+  __ret_260 = __noswap_vcmlaq_rot90_f64(__rev0_260, __rev1_260, *(float64x2_t *) &__reint1_260); \
+  __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 1, 0); \
+  __ret_260; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_DOTPROD)
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#else
+__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdotq_lane_u32(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
+  uint32x4_t __ret_261; \
+  uint32x4_t __s0_261 = __p0_261; \
+  uint8x16_t __s1_261 = __p1_261; \
+  uint8x8_t __s2_261 = __p2_261; \
+uint8x8_t __reint_261 = __s2_261; \
+uint32x4_t __reint1_261 = splatq_lane_u32(*(uint32x2_t *) &__reint_261, __p3_261); \
+  __ret_261 = vdotq_u32(__s0_261, __s1_261, *(uint8x16_t *) &__reint1_261); \
+  __ret_261; \
+})
+#else
+#define vdotq_lane_u32(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
+  uint32x4_t __ret_262; \
+  uint32x4_t __s0_262 = __p0_262; \
+  uint8x16_t __s1_262 = __p1_262; \
+  uint8x8_t __s2_262 = __p2_262; \
+  uint32x4_t __rev0_262;  __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 3, 2, 1, 0); \
+  uint8x16_t __rev1_262;  __rev1_262 = __builtin_shufflevector(__s1_262, __s1_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_262;  __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x8_t __reint_262 = __rev2_262; \
+uint32x4_t __reint1_262 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_262, __p3_262); \
+  __ret_262 = __noswap_vdotq_u32(__rev0_262, __rev1_262, *(uint8x16_t *) &__reint1_262); \
+  __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 3, 2, 1, 0); \
+  __ret_262; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdotq_lane_s32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
+  int32x4_t __ret_263; \
+  int32x4_t __s0_263 = __p0_263; \
+  int8x16_t __s1_263 = __p1_263; \
+  int8x8_t __s2_263 = __p2_263; \
+int8x8_t __reint_263 = __s2_263; \
+int32x4_t __reint1_263 = splatq_lane_s32(*(int32x2_t *) &__reint_263, __p3_263); \
+  __ret_263 = vdotq_s32(__s0_263, __s1_263, *(int8x16_t *) &__reint1_263); \
+  __ret_263; \
+})
+#else
+#define vdotq_lane_s32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
+  int32x4_t __ret_264; \
+  int32x4_t __s0_264 = __p0_264; \
+  int8x16_t __s1_264 = __p1_264; \
+  int8x8_t __s2_264 = __p2_264; \
+  int32x4_t __rev0_264;  __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \
+  int8x16_t __rev1_264;  __rev1_264 = __builtin_shufflevector(__s1_264, __s1_264, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_264;  __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x8_t __reint_264 = __rev2_264; \
+int32x4_t __reint1_264 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_264, __p3_264); \
+  __ret_264 = __noswap_vdotq_s32(__rev0_264, __rev1_264, *(int8x16_t *) &__reint1_264); \
+  __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \
+  __ret_264; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdot_lane_u32(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
+  uint32x2_t __ret_265; \
+  uint32x2_t __s0_265 = __p0_265; \
+  uint8x8_t __s1_265 = __p1_265; \
+  uint8x8_t __s2_265 = __p2_265; \
+uint8x8_t __reint_265 = __s2_265; \
+uint32x2_t __reint1_265 = splat_lane_u32(*(uint32x2_t *) &__reint_265, __p3_265); \
+  __ret_265 = vdot_u32(__s0_265, __s1_265, *(uint8x8_t *) &__reint1_265); \
+  __ret_265; \
+})
+#else
+#define vdot_lane_u32(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \
+  uint32x2_t __ret_266; \
+  uint32x2_t __s0_266 = __p0_266; \
+  uint8x8_t __s1_266 = __p1_266; \
+  uint8x8_t __s2_266 = __p2_266; \
+  uint32x2_t __rev0_266;  __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 1, 0); \
+  uint8x8_t __rev1_266;  __rev1_266 = __builtin_shufflevector(__s1_266, __s1_266, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_266;  __rev2_266 = __builtin_shufflevector(__s2_266, __s2_266, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x8_t __reint_266 = __rev2_266; \
+uint32x2_t __reint1_266 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_266, __p3_266); \
+  __ret_266 = __noswap_vdot_u32(__rev0_266, __rev1_266, *(uint8x8_t *) &__reint1_266); \
+  __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 1, 0); \
+  __ret_266; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdot_lane_s32(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \
+  int32x2_t __ret_267; \
+  int32x2_t __s0_267 = __p0_267; \
+  int8x8_t __s1_267 = __p1_267; \
+  int8x8_t __s2_267 = __p2_267; \
+int8x8_t __reint_267 = __s2_267; \
+int32x2_t __reint1_267 = splat_lane_s32(*(int32x2_t *) &__reint_267, __p3_267); \
+  __ret_267 = vdot_s32(__s0_267, __s1_267, *(int8x8_t *) &__reint1_267); \
+  __ret_267; \
+})
+#else
+#define vdot_lane_s32(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
+  int32x2_t __ret_268; \
+  int32x2_t __s0_268 = __p0_268; \
+  int8x8_t __s1_268 = __p1_268; \
+  int8x8_t __s2_268 = __p2_268; \
+  int32x2_t __rev0_268;  __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 1, 0); \
+  int8x8_t __rev1_268;  __rev1_268 = __builtin_shufflevector(__s1_268, __s1_268, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_268;  __rev2_268 = __builtin_shufflevector(__s2_268, __s2_268, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x8_t __reint_268 = __rev2_268; \
+int32x2_t __reint1_268 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_268, __p3_268); \
+  __ret_268 = __noswap_vdot_s32(__rev0_268, __rev1_268, *(int8x8_t *) &__reint1_268); \
+  __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 1, 0); \
+  __ret_268; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+#define vdotq_laneq_u32(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
+  uint32x4_t __ret_269; \
+  uint32x4_t __s0_269 = __p0_269; \
+  uint8x16_t __s1_269 = __p1_269; \
+  uint8x16_t __s2_269 = __p2_269; \
+uint8x16_t __reint_269 = __s2_269; \
+uint32x4_t __reint1_269 = splatq_laneq_u32(*(uint32x4_t *) &__reint_269, __p3_269); \
+  __ret_269 = vdotq_u32(__s0_269, __s1_269, *(uint8x16_t *) &__reint1_269); \
+  __ret_269; \
+})
+#else
+#define vdotq_laneq_u32(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
+  uint32x4_t __ret_270; \
+  uint32x4_t __s0_270 = __p0_270; \
+  uint8x16_t __s1_270 = __p1_270; \
+  uint8x16_t __s2_270 = __p2_270; \
+  uint32x4_t __rev0_270;  __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 3, 2, 1, 0); \
+  uint8x16_t __rev1_270;  __rev1_270 = __builtin_shufflevector(__s1_270, __s1_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_270;  __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x16_t __reint_270 = __rev2_270; \
+uint32x4_t __reint1_270 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_270, __p3_270); \
+  __ret_270 = __noswap_vdotq_u32(__rev0_270, __rev1_270, *(uint8x16_t *) &__reint1_270); \
+  __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 3, 2, 1, 0); \
+  __ret_270; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdotq_laneq_s32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
+  int32x4_t __ret_271; \
+  int32x4_t __s0_271 = __p0_271; \
+  int8x16_t __s1_271 = __p1_271; \
+  int8x16_t __s2_271 = __p2_271; \
+int8x16_t __reint_271 = __s2_271; \
+int32x4_t __reint1_271 = splatq_laneq_s32(*(int32x4_t *) &__reint_271, __p3_271); \
+  __ret_271 = vdotq_s32(__s0_271, __s1_271, *(int8x16_t *) &__reint1_271); \
+  __ret_271; \
+})
+#else
+#define vdotq_laneq_s32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
+  int32x4_t __ret_272; \
+  int32x4_t __s0_272 = __p0_272; \
+  int8x16_t __s1_272 = __p1_272; \
+  int8x16_t __s2_272 = __p2_272; \
+  int32x4_t __rev0_272;  __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 3, 2, 1, 0); \
+  int8x16_t __rev1_272;  __rev1_272 = __builtin_shufflevector(__s1_272, __s1_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_272;  __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x16_t __reint_272 = __rev2_272; \
+int32x4_t __reint1_272 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_272, __p3_272); \
+  __ret_272 = __noswap_vdotq_s32(__rev0_272, __rev1_272, *(int8x16_t *) &__reint1_272); \
+  __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 3, 2, 1, 0); \
+  __ret_272; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdot_laneq_u32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
+  uint32x2_t __ret_273; \
+  uint32x2_t __s0_273 = __p0_273; \
+  uint8x8_t __s1_273 = __p1_273; \
+  uint8x16_t __s2_273 = __p2_273; \
+uint8x16_t __reint_273 = __s2_273; \
+uint32x2_t __reint1_273 = splat_laneq_u32(*(uint32x4_t *) &__reint_273, __p3_273); \
+  __ret_273 = vdot_u32(__s0_273, __s1_273, *(uint8x8_t *) &__reint1_273); \
+  __ret_273; \
+})
+#else
+#define vdot_laneq_u32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
+  uint32x2_t __ret_274; \
+  uint32x2_t __s0_274 = __p0_274; \
+  uint8x8_t __s1_274 = __p1_274; \
+  uint8x16_t __s2_274 = __p2_274; \
+  uint32x2_t __rev0_274;  __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 1, 0); \
+  uint8x8_t __rev1_274;  __rev1_274 = __builtin_shufflevector(__s1_274, __s1_274, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_274;  __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x16_t __reint_274 = __rev2_274; \
+uint32x2_t __reint1_274 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_274, __p3_274); \
+  __ret_274 = __noswap_vdot_u32(__rev0_274, __rev1_274, *(uint8x8_t *) &__reint1_274); \
+  __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 1, 0); \
+  __ret_274; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdot_laneq_s32(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
+  int32x2_t __ret_275; \
+  int32x2_t __s0_275 = __p0_275; \
+  int8x8_t __s1_275 = __p1_275; \
+  int8x16_t __s2_275 = __p2_275; \
+int8x16_t __reint_275 = __s2_275; \
+int32x2_t __reint1_275 = splat_laneq_s32(*(int32x4_t *) &__reint_275, __p3_275); \
+  __ret_275 = vdot_s32(__s0_275, __s1_275, *(int8x8_t *) &__reint1_275); \
+  __ret_275; \
+})
+#else
+#define vdot_laneq_s32(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
+  int32x2_t __ret_276; \
+  int32x2_t __s0_276 = __p0_276; \
+  int8x8_t __s1_276 = __p1_276; \
+  int8x16_t __s2_276 = __p2_276; \
+  int32x2_t __rev0_276;  __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 1, 0); \
+  int8x8_t __rev1_276;  __rev1_276 = __builtin_shufflevector(__s1_276, __s1_276, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_276;  __rev2_276 = __builtin_shufflevector(__s2_276, __s2_276, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x16_t __reint_276 = __rev2_276; \
+int32x2_t __reint1_276 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_276, __p3_276); \
+  __ret_276 = __noswap_vdot_s32(__rev0_276, __rev1_276, *(int8x8_t *) &__reint1_276); \
+  __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 1, 0); \
+  __ret_276; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_FMA)
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+  float32x4_t __ret;
+  __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
+  return __ret;
+}
+#else
+__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+  float32x2_t __ret;
+  __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
+  return __ret;
+}
+#else
+__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  __ret = vfmaq_f32(__p0, -__p1, __p2);
+  return __ret;
+}
+#else
+__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  __ret = vfma_f32(__p0, -__p1, __p2);
+  return __ret;
+}
+#else
+__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vabsq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vabsq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vabs_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vabs_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __p0 + __p1;
+  return __ret;
+}
+#else
+__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __rev0 + __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __p0 + __p1;
+  return __ret;
+}
+#else
+__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __rev0 + __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0 == __p1);
+  return __ret;
+}
+#else
+__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t)(__rev0 == __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0 == __p1);
+  return __ret;
+}
+#else
+__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t)(__rev0 == __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0 >= __p1);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t)(__rev0 >= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0 >= __p1);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t)(__rev0 >= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0 > __p1);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t)(__rev0 > __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0 > __p1);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t)(__rev0 > __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0 <= __p1);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t)(__rev0 <= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0 <= __p1);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t)(__rev0 <= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vclez_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vclez_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0 < __p1);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t)(__rev0 < __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0 < __p1);
+  return __ret;
+}
+#else
+__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint16x4_t)(__rev0 < __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
+  float16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33);
+  return __ret;
+}
+#else
+__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
+  float16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
+  float16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1);
+  return __ret;
+}
+#else
+__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
+  float16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
+  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \
+  __ret; \
+})
+#else
+#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  uint16x8_t __s0 = __p0; \
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \
+  __ret; \
+})
+#else
+#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
+  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \
+  __ret; \
+})
+#else
+#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  uint16x4_t __s0 = __p0; \
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \
+  __ret; \
+})
+#else
+#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \
+  __ret; \
+})
+#else
+#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
+  int16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \
+  __ret; \
+})
+#else
+#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
+  int16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \
+  __ret; \
+})
+#else
+#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \
+  __ret; \
+})
+#else
+#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1);
+  return __ret;
+}
+#else
+__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1);
+  return __ret;
+}
+#else
+__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1);
+  return __ret;
+}
+#else
+__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1);
+  return __ret;
+}
+#else
+__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
+  int16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1);
+  return __ret;
+}
+#else
+__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
+  int16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
+  uint16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
+  uint16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \
+  __ret; \
+})
+#else
+#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \
+  __ret; \
+})
+#else
+#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  __ret = vfmaq_f16(__p0, -__p1, __p2);
+  return __ret;
+}
+#else
+__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  __ret = vfma_f16(__p0, -__p1, __p2);
+  return __ret;
+}
+#else
+__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __p0 * __p1;
+  return __ret;
+}
+#else
+__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __rev0 * __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __p0 * __p1;
+  return __ret;
+}
+#else
+__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __rev0 * __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_lane_f16(__p0_277, __p1_277, __p2_277) __extension__ ({ \
+  float16x8_t __ret_277; \
+  float16x8_t __s0_277 = __p0_277; \
+  float16x4_t __s1_277 = __p1_277; \
+  __ret_277 = __s0_277 * splatq_lane_f16(__s1_277, __p2_277); \
+  __ret_277; \
+})
+#else
+#define vmulq_lane_f16(__p0_278, __p1_278, __p2_278) __extension__ ({ \
+  float16x8_t __ret_278; \
+  float16x8_t __s0_278 = __p0_278; \
+  float16x4_t __s1_278 = __p1_278; \
+  float16x8_t __rev0_278;  __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev1_278;  __rev1_278 = __builtin_shufflevector(__s1_278, __s1_278, 3, 2, 1, 0); \
+  __ret_278 = __rev0_278 * __noswap_splatq_lane_f16(__rev1_278, __p2_278); \
+  __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_278; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_lane_f16(__p0_279, __p1_279, __p2_279) __extension__ ({ \
+  float16x4_t __ret_279; \
+  float16x4_t __s0_279 = __p0_279; \
+  float16x4_t __s1_279 = __p1_279; \
+  __ret_279 = __s0_279 * splat_lane_f16(__s1_279, __p2_279); \
+  __ret_279; \
+})
+#else
+#define vmul_lane_f16(__p0_280, __p1_280, __p2_280) __extension__ ({ \
+  float16x4_t __ret_280; \
+  float16x4_t __s0_280 = __p0_280; \
+  float16x4_t __s1_280 = __p1_280; \
+  float16x4_t __rev0_280;  __rev0_280 = __builtin_shufflevector(__s0_280, __s0_280, 3, 2, 1, 0); \
+  float16x4_t __rev1_280;  __rev1_280 = __builtin_shufflevector(__s1_280, __s1_280, 3, 2, 1, 0); \
+  __ret_280 = __rev0_280 * __noswap_splat_lane_f16(__rev1_280, __p2_280); \
+  __ret_280 = __builtin_shufflevector(__ret_280, __ret_280, 3, 2, 1, 0); \
+  __ret_280; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
+  __ret; \
+})
+#else
+#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_n_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
+  __ret; \
+})
+#else
+#define vmul_n_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vnegq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = -__p0;
+  return __ret;
+}
+#else
+__ai float16x8_t vnegq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = -__rev0;
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vneg_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = -__p0;
+  return __ret;
+}
+#else
+__ai float16x4_t vneg_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = -__rev0;
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
+  return __ret;
+}
+#else
+__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vrev64_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  return __ret;
+}
+#else
+__ai float16x4_t vrev64_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __p0 - __p1;
+  return __ret;
+}
+#else
+__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __rev0 - __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __p0 - __p1;
+  return __ret;
+}
+#else
+__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __rev0 - __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8x2_t __ret;
+  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8x2_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4x2_t __ret;
+  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4x2_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8x2_t __ret;
+  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8x2_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4x2_t __ret;
+  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4x2_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8x2_t __ret;
+  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8x2_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4x2_t __ret;
+  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4x2_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __p0 / __p1;
+  return __ret;
+}
+#else
+__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __rev0 / __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __p0 / __p1;
+  return __ret;
+}
+#else
+__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __rev0 / __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
+  __ret; \
+})
+#else
+#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \
+  __ret; \
+})
+#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
+  __ret; \
+})
+#else
+#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
+  __ret; \
+})
+#else
+#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x4_t __s2 = __p2; \
+  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
+  __ret; \
+})
+#else
+#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \
+  __ret; \
+})
+#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
+  __ret; \
+})
+#else
+#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
+  __ret; \
+})
+#else
+#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x8_t __s2 = __p2; \
+  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
+  __ret; \
+})
+#else
+#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
+  __ret; \
+})
+#else
+#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsh_lane_f16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
+  float16_t __ret_281; \
+  float16_t __s0_281 = __p0_281; \
+  float16_t __s1_281 = __p1_281; \
+  float16x4_t __s2_281 = __p2_281; \
+  __ret_281 = vfmah_lane_f16(__s0_281, -__s1_281, __s2_281, __p3_281); \
+  __ret_281; \
+})
+#else
+#define vfmsh_lane_f16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
+  float16_t __ret_282; \
+  float16_t __s0_282 = __p0_282; \
+  float16_t __s1_282 = __p1_282; \
+  float16x4_t __s2_282 = __p2_282; \
+  float16x4_t __rev2_282;  __rev2_282 = __builtin_shufflevector(__s2_282, __s2_282, 3, 2, 1, 0); \
+  __ret_282 = __noswap_vfmah_lane_f16(__s0_282, -__s1_282, __rev2_282, __p3_282); \
+  __ret_282; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_lane_f16(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
+  float16x8_t __ret_283; \
+  float16x8_t __s0_283 = __p0_283; \
+  float16x8_t __s1_283 = __p1_283; \
+  float16x4_t __s2_283 = __p2_283; \
+  __ret_283 = vfmaq_lane_f16(__s0_283, -__s1_283, __s2_283, __p3_283); \
+  __ret_283; \
+})
+#else
+#define vfmsq_lane_f16(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
+  float16x8_t __ret_284; \
+  float16x8_t __s0_284 = __p0_284; \
+  float16x8_t __s1_284 = __p1_284; \
+  float16x4_t __s2_284 = __p2_284; \
+  float16x8_t __rev0_284;  __rev0_284 = __builtin_shufflevector(__s0_284, __s0_284, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_284;  __rev1_284 = __builtin_shufflevector(__s1_284, __s1_284, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_284;  __rev2_284 = __builtin_shufflevector(__s2_284, __s2_284, 3, 2, 1, 0); \
+  __ret_284 = __noswap_vfmaq_lane_f16(__rev0_284, -__rev1_284, __rev2_284, __p3_284); \
+  __ret_284 = __builtin_shufflevector(__ret_284, __ret_284, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_284; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_lane_f16(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
+  float16x4_t __ret_285; \
+  float16x4_t __s0_285 = __p0_285; \
+  float16x4_t __s1_285 = __p1_285; \
+  float16x4_t __s2_285 = __p2_285; \
+  __ret_285 = vfma_lane_f16(__s0_285, -__s1_285, __s2_285, __p3_285); \
+  __ret_285; \
+})
+#else
+#define vfms_lane_f16(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
+  float16x4_t __ret_286; \
+  float16x4_t __s0_286 = __p0_286; \
+  float16x4_t __s1_286 = __p1_286; \
+  float16x4_t __s2_286 = __p2_286; \
+  float16x4_t __rev0_286;  __rev0_286 = __builtin_shufflevector(__s0_286, __s0_286, 3, 2, 1, 0); \
+  float16x4_t __rev1_286;  __rev1_286 = __builtin_shufflevector(__s1_286, __s1_286, 3, 2, 1, 0); \
+  float16x4_t __rev2_286;  __rev2_286 = __builtin_shufflevector(__s2_286, __s2_286, 3, 2, 1, 0); \
+  __ret_286 = __noswap_vfma_lane_f16(__rev0_286, -__rev1_286, __rev2_286, __p3_286); \
+  __ret_286 = __builtin_shufflevector(__ret_286, __ret_286, 3, 2, 1, 0); \
+  __ret_286; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsh_laneq_f16(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
+  float16_t __ret_287; \
+  float16_t __s0_287 = __p0_287; \
+  float16_t __s1_287 = __p1_287; \
+  float16x8_t __s2_287 = __p2_287; \
+  __ret_287 = vfmah_laneq_f16(__s0_287, -__s1_287, __s2_287, __p3_287); \
+  __ret_287; \
+})
+#else
+#define vfmsh_laneq_f16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
+  float16_t __ret_288; \
+  float16_t __s0_288 = __p0_288; \
+  float16_t __s1_288 = __p1_288; \
+  float16x8_t __s2_288 = __p2_288; \
+  float16x8_t __rev2_288;  __rev2_288 = __builtin_shufflevector(__s2_288, __s2_288, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_288 = __noswap_vfmah_laneq_f16(__s0_288, -__s1_288, __rev2_288, __p3_288); \
+  __ret_288; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_laneq_f16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
+  float16x8_t __ret_289; \
+  float16x8_t __s0_289 = __p0_289; \
+  float16x8_t __s1_289 = __p1_289; \
+  float16x8_t __s2_289 = __p2_289; \
+  __ret_289 = vfmaq_laneq_f16(__s0_289, -__s1_289, __s2_289, __p3_289); \
+  __ret_289; \
+})
+#else
+#define vfmsq_laneq_f16(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \
+  float16x8_t __ret_290; \
+  float16x8_t __s0_290 = __p0_290; \
+  float16x8_t __s1_290 = __p1_290; \
+  float16x8_t __s2_290 = __p2_290; \
+  float16x8_t __rev0_290;  __rev0_290 = __builtin_shufflevector(__s0_290, __s0_290, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_290;  __rev1_290 = __builtin_shufflevector(__s1_290, __s1_290, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_290;  __rev2_290 = __builtin_shufflevector(__s2_290, __s2_290, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_290 = __noswap_vfmaq_laneq_f16(__rev0_290, -__rev1_290, __rev2_290, __p3_290); \
+  __ret_290 = __builtin_shufflevector(__ret_290, __ret_290, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_290; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_laneq_f16(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \
+  float16x4_t __ret_291; \
+  float16x4_t __s0_291 = __p0_291; \
+  float16x4_t __s1_291 = __p1_291; \
+  float16x8_t __s2_291 = __p2_291; \
+  __ret_291 = vfma_laneq_f16(__s0_291, -__s1_291, __s2_291, __p3_291); \
+  __ret_291; \
+})
+#else
+#define vfms_laneq_f16(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \
+  float16x4_t __ret_292; \
+  float16x4_t __s0_292 = __p0_292; \
+  float16x4_t __s1_292 = __p1_292; \
+  float16x8_t __s2_292 = __p2_292; \
+  float16x4_t __rev0_292;  __rev0_292 = __builtin_shufflevector(__s0_292, __s0_292, 3, 2, 1, 0); \
+  float16x4_t __rev1_292;  __rev1_292 = __builtin_shufflevector(__s1_292, __s1_292, 3, 2, 1, 0); \
+  float16x8_t __rev2_292;  __rev2_292 = __builtin_shufflevector(__s2_292, __s2_292, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_292 = __noswap_vfma_laneq_f16(__rev0_292, -__rev1_292, __rev2_292, __p3_292); \
+  __ret_292 = __builtin_shufflevector(__ret_292, __ret_292, 3, 2, 1, 0); \
+  __ret_292; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
+  __ret; \
+})
+#else
+#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
+  __ret; \
+})
+#else
+#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16_t __s2 = __p2; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmaxnmvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \
+  __ret; \
+})
+#else
+#define vmaxnmvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmaxnmv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \
+  __ret; \
+})
+#else
+#define vmaxnmv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmaxvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \
+  __ret; \
+})
+#else
+#define vmaxvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmaxv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \
+  __ret; \
+})
+#else
+#define vmaxv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vminnmvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \
+  __ret; \
+})
+#else
+#define vminnmvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vminnmv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \
+  __ret; \
+})
+#else
+#define vminnmv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vminvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \
+  __ret; \
+})
+#else
+#define vminvq_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vminv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \
+  __ret; \
+})
+#else
+#define vminv_f16(__p0) __extension__ ({ \
+  float16_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_f16(__p0_293, __p1_293, __p2_293) __extension__ ({ \
+  float16x8_t __ret_293; \
+  float16x8_t __s0_293 = __p0_293; \
+  float16x8_t __s1_293 = __p1_293; \
+  __ret_293 = __s0_293 * splatq_laneq_f16(__s1_293, __p2_293); \
+  __ret_293; \
+})
+#else
+#define vmulq_laneq_f16(__p0_294, __p1_294, __p2_294) __extension__ ({ \
+  float16x8_t __ret_294; \
+  float16x8_t __s0_294 = __p0_294; \
+  float16x8_t __s1_294 = __p1_294; \
+  float16x8_t __rev0_294;  __rev0_294 = __builtin_shufflevector(__s0_294, __s0_294, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_294;  __rev1_294 = __builtin_shufflevector(__s1_294, __s1_294, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_294 = __rev0_294 * __noswap_splatq_laneq_f16(__rev1_294, __p2_294); \
+  __ret_294 = __builtin_shufflevector(__ret_294, __ret_294, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_294; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_f16(__p0_295, __p1_295, __p2_295) __extension__ ({ \
+  float16x4_t __ret_295; \
+  float16x4_t __s0_295 = __p0_295; \
+  float16x8_t __s1_295 = __p1_295; \
+  __ret_295 = __s0_295 * splat_laneq_f16(__s1_295, __p2_295); \
+  __ret_295; \
+})
+#else
+#define vmul_laneq_f16(__p0_296, __p1_296, __p2_296) __extension__ ({ \
+  float16x4_t __ret_296; \
+  float16x4_t __s0_296 = __p0_296; \
+  float16x8_t __s1_296 = __p1_296; \
+  float16x4_t __rev0_296;  __rev0_296 = __builtin_shufflevector(__s0_296, __s0_296, 3, 2, 1, 0); \
+  float16x8_t __rev1_296;  __rev1_296 = __builtin_shufflevector(__s1_296, __s1_296, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_296 = __rev0_296 * __noswap_splat_laneq_f16(__rev1_296, __p2_296); \
+  __ret_296 = __builtin_shufflevector(__ret_296, __ret_296, 3, 2, 1, 0); \
+  __ret_296; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \
+  __ret; \
+})
+#else
+#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16x4_t __s1 = __p1; \
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_lane_f16(__p0_297, __p1_297, __p2_297) __extension__ ({ \
+  float16x8_t __ret_297; \
+  float16x8_t __s0_297 = __p0_297; \
+  float16x4_t __s1_297 = __p1_297; \
+  __ret_297 = vmulxq_f16(__s0_297, splatq_lane_f16(__s1_297, __p2_297)); \
+  __ret_297; \
+})
+#else
+#define vmulxq_lane_f16(__p0_298, __p1_298, __p2_298) __extension__ ({ \
+  float16x8_t __ret_298; \
+  float16x8_t __s0_298 = __p0_298; \
+  float16x4_t __s1_298 = __p1_298; \
+  float16x8_t __rev0_298;  __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev1_298;  __rev1_298 = __builtin_shufflevector(__s1_298, __s1_298, 3, 2, 1, 0); \
+  __ret_298 = __noswap_vmulxq_f16(__rev0_298, __noswap_splatq_lane_f16(__rev1_298, __p2_298)); \
+  __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_298; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_lane_f16(__p0_299, __p1_299, __p2_299) __extension__ ({ \
+  float16x4_t __ret_299; \
+  float16x4_t __s0_299 = __p0_299; \
+  float16x4_t __s1_299 = __p1_299; \
+  __ret_299 = vmulx_f16(__s0_299, splat_lane_f16(__s1_299, __p2_299)); \
+  __ret_299; \
+})
+#else
+#define vmulx_lane_f16(__p0_300, __p1_300, __p2_300) __extension__ ({ \
+  float16x4_t __ret_300; \
+  float16x4_t __s0_300 = __p0_300; \
+  float16x4_t __s1_300 = __p1_300; \
+  float16x4_t __rev0_300;  __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 3, 2, 1, 0); \
+  float16x4_t __rev1_300;  __rev1_300 = __builtin_shufflevector(__s1_300, __s1_300, 3, 2, 1, 0); \
+  __ret_300 = __noswap_vmulx_f16(__rev0_300, __noswap_splat_lane_f16(__rev1_300, __p2_300)); \
+  __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 3, 2, 1, 0); \
+  __ret_300; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \
+  __ret; \
+})
+#else
+#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
+  float16_t __ret; \
+  float16_t __s0 = __p0; \
+  float16x8_t __s1 = __p1; \
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_laneq_f16(__p0_301, __p1_301, __p2_301) __extension__ ({ \
+  float16x8_t __ret_301; \
+  float16x8_t __s0_301 = __p0_301; \
+  float16x8_t __s1_301 = __p1_301; \
+  __ret_301 = vmulxq_f16(__s0_301, splatq_laneq_f16(__s1_301, __p2_301)); \
+  __ret_301; \
+})
+#else
+#define vmulxq_laneq_f16(__p0_302, __p1_302, __p2_302) __extension__ ({ \
+  float16x8_t __ret_302; \
+  float16x8_t __s0_302 = __p0_302; \
+  float16x8_t __s1_302 = __p1_302; \
+  float16x8_t __rev0_302;  __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_302;  __rev1_302 = __builtin_shufflevector(__s1_302, __s1_302, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_302 = __noswap_vmulxq_f16(__rev0_302, __noswap_splatq_laneq_f16(__rev1_302, __p2_302)); \
+  __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_302; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_laneq_f16(__p0_303, __p1_303, __p2_303) __extension__ ({ \
+  float16x4_t __ret_303; \
+  float16x4_t __s0_303 = __p0_303; \
+  float16x8_t __s1_303 = __p1_303; \
+  __ret_303 = vmulx_f16(__s0_303, splat_laneq_f16(__s1_303, __p2_303)); \
+  __ret_303; \
+})
+#else
+#define vmulx_laneq_f16(__p0_304, __p1_304, __p2_304) __extension__ ({ \
+  float16x4_t __ret_304; \
+  float16x4_t __s0_304 = __p0_304; \
+  float16x8_t __s1_304 = __p1_304; \
+  float16x4_t __rev0_304;  __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 3, 2, 1, 0); \
+  float16x8_t __rev1_304;  __rev1_304 = __builtin_shufflevector(__s1_304, __s1_304, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_304 = __noswap_vmulx_f16(__rev0_304, __noswap_splat_laneq_f16(__rev1_304, __p2_304)); \
+  __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 3, 2, 1, 0); \
+  __ret_304; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
+  __ret; \
+})
+#else
+#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __ret; \
+  float16x8_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
+  __ret; \
+})
+#else
+#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __ret; \
+  float16x4_t __s0 = __p0; \
+  float16_t __s1 = __p1; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vrndi_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vrndi_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40);
+  return __ret;
+}
+#else
+__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8);
+  return __ret;
+}
+#else
+__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
+  return __ret;
+}
+#else
+__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
+  return __ret;
+}
+#else
+__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
+  return __ret;
+}
+#else
+__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
+  return __ret;
+}
+#else
+__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
+  return __ret;
+}
+#else
+__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
+  return __ret;
+}
+#else
+__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
+  return __ret;
+}
+#else
+__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
+  return __ret;
+}
+#else
+__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
+  return __ret;
+}
+#else
+__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
+  return __ret;
+}
+#else
+__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
+  return __ret;
+}
+#else
+__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
+  float16x8_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
+  return __ret;
+}
+#else
+__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
+  float16x4_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_MATMUL_INT8)
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#else
+__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vusdotq_lane_s32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \
+  int32x4_t __ret_305; \
+  int32x4_t __s0_305 = __p0_305; \
+  uint8x16_t __s1_305 = __p1_305; \
+  int8x8_t __s2_305 = __p2_305; \
+int8x8_t __reint_305 = __s2_305; \
+  __ret_305 = vusdotq_s32(__s0_305, __s1_305, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_305, __p3_305))); \
+  __ret_305; \
+})
+#else
+#define vusdotq_lane_s32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \
+  int32x4_t __ret_306; \
+  int32x4_t __s0_306 = __p0_306; \
+  uint8x16_t __s1_306 = __p1_306; \
+  int8x8_t __s2_306 = __p2_306; \
+  int32x4_t __rev0_306;  __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \
+  uint8x16_t __rev1_306;  __rev1_306 = __builtin_shufflevector(__s1_306, __s1_306, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_306;  __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x8_t __reint_306 = __rev2_306; \
+  __ret_306 = __noswap_vusdotq_s32(__rev0_306, __rev1_306, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_306, __p3_306))); \
+  __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \
+  __ret_306; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vusdot_lane_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \
+  int32x2_t __ret_307; \
+  int32x2_t __s0_307 = __p0_307; \
+  uint8x8_t __s1_307 = __p1_307; \
+  int8x8_t __s2_307 = __p2_307; \
+int8x8_t __reint_307 = __s2_307; \
+  __ret_307 = vusdot_s32(__s0_307, __s1_307, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_307, __p3_307))); \
+  __ret_307; \
+})
+#else
+#define vusdot_lane_s32(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \
+  int32x2_t __ret_308; \
+  int32x2_t __s0_308 = __p0_308; \
+  uint8x8_t __s1_308 = __p1_308; \
+  int8x8_t __s2_308 = __p2_308; \
+  int32x2_t __rev0_308;  __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 1, 0); \
+  uint8x8_t __rev1_308;  __rev1_308 = __builtin_shufflevector(__s1_308, __s1_308, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_308;  __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x8_t __reint_308 = __rev2_308; \
+  __ret_308 = __noswap_vusdot_s32(__rev0_308, __rev1_308, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_308, __p3_308))); \
+  __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 1, 0); \
+  __ret_308; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_QRDMX)
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int32x4_t __noswap_vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int16x8_t __noswap_vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#else
+__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai int32x2_t __noswap_vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
+  return __ret;
+}
+#else
+__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int16x4_t __noswap_vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahq_lane_s32(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \
+  int32x4_t __ret_309; \
+  int32x4_t __s0_309 = __p0_309; \
+  int32x4_t __s1_309 = __p1_309; \
+  int32x2_t __s2_309 = __p2_309; \
+  __ret_309 = vqrdmlahq_s32(__s0_309, __s1_309, splatq_lane_s32(__s2_309, __p3_309)); \
+  __ret_309; \
+})
+#else
+#define vqrdmlahq_lane_s32(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \
+  int32x4_t __ret_310; \
+  int32x4_t __s0_310 = __p0_310; \
+  int32x4_t __s1_310 = __p1_310; \
+  int32x2_t __s2_310 = __p2_310; \
+  int32x4_t __rev0_310;  __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 3, 2, 1, 0); \
+  int32x4_t __rev1_310;  __rev1_310 = __builtin_shufflevector(__s1_310, __s1_310, 3, 2, 1, 0); \
+  int32x2_t __rev2_310;  __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 1, 0); \
+  __ret_310 = __noswap_vqrdmlahq_s32(__rev0_310, __rev1_310, __noswap_splatq_lane_s32(__rev2_310, __p3_310)); \
+  __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 3, 2, 1, 0); \
+  __ret_310; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahq_lane_s16(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \
+  int16x8_t __ret_311; \
+  int16x8_t __s0_311 = __p0_311; \
+  int16x8_t __s1_311 = __p1_311; \
+  int16x4_t __s2_311 = __p2_311; \
+  __ret_311 = vqrdmlahq_s16(__s0_311, __s1_311, splatq_lane_s16(__s2_311, __p3_311)); \
+  __ret_311; \
+})
+#else
+#define vqrdmlahq_lane_s16(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \
+  int16x8_t __ret_312; \
+  int16x8_t __s0_312 = __p0_312; \
+  int16x8_t __s1_312 = __p1_312; \
+  int16x4_t __s2_312 = __p2_312; \
+  int16x8_t __rev0_312;  __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_312;  __rev1_312 = __builtin_shufflevector(__s1_312, __s1_312, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_312;  __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 3, 2, 1, 0); \
+  __ret_312 = __noswap_vqrdmlahq_s16(__rev0_312, __rev1_312, __noswap_splatq_lane_s16(__rev2_312, __p3_312)); \
+  __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_312; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlah_lane_s32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \
+  int32x2_t __ret_313; \
+  int32x2_t __s0_313 = __p0_313; \
+  int32x2_t __s1_313 = __p1_313; \
+  int32x2_t __s2_313 = __p2_313; \
+  __ret_313 = vqrdmlah_s32(__s0_313, __s1_313, splat_lane_s32(__s2_313, __p3_313)); \
+  __ret_313; \
+})
+#else
+#define vqrdmlah_lane_s32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \
+  int32x2_t __ret_314; \
+  int32x2_t __s0_314 = __p0_314; \
+  int32x2_t __s1_314 = __p1_314; \
+  int32x2_t __s2_314 = __p2_314; \
+  int32x2_t __rev0_314;  __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 1, 0); \
+  int32x2_t __rev1_314;  __rev1_314 = __builtin_shufflevector(__s1_314, __s1_314, 1, 0); \
+  int32x2_t __rev2_314;  __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 1, 0); \
+  __ret_314 = __noswap_vqrdmlah_s32(__rev0_314, __rev1_314, __noswap_splat_lane_s32(__rev2_314, __p3_314)); \
+  __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 1, 0); \
+  __ret_314; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlah_lane_s16(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \
+  int16x4_t __ret_315; \
+  int16x4_t __s0_315 = __p0_315; \
+  int16x4_t __s1_315 = __p1_315; \
+  int16x4_t __s2_315 = __p2_315; \
+  __ret_315 = vqrdmlah_s16(__s0_315, __s1_315, splat_lane_s16(__s2_315, __p3_315)); \
+  __ret_315; \
+})
+#else
+#define vqrdmlah_lane_s16(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \
+  int16x4_t __ret_316; \
+  int16x4_t __s0_316 = __p0_316; \
+  int16x4_t __s1_316 = __p1_316; \
+  int16x4_t __s2_316 = __p2_316; \
+  int16x4_t __rev0_316;  __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \
+  int16x4_t __rev1_316;  __rev1_316 = __builtin_shufflevector(__s1_316, __s1_316, 3, 2, 1, 0); \
+  int16x4_t __rev2_316;  __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 3, 2, 1, 0); \
+  __ret_316 = __noswap_vqrdmlah_s16(__rev0_316, __rev1_316, __noswap_splat_lane_s16(__rev2_316, __p3_316)); \
+  __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \
+  __ret_316; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int32x4_t __noswap_vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int16x8_t __noswap_vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#else
+__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai int32x2_t __noswap_vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
+  return __ret;
+}
+#else
+__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int16x4_t __noswap_vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_lane_s32(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \
+  int32x4_t __ret_317; \
+  int32x4_t __s0_317 = __p0_317; \
+  int32x4_t __s1_317 = __p1_317; \
+  int32x2_t __s2_317 = __p2_317; \
+  __ret_317 = vqrdmlshq_s32(__s0_317, __s1_317, splatq_lane_s32(__s2_317, __p3_317)); \
+  __ret_317; \
+})
+#else
+#define vqrdmlshq_lane_s32(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \
+  int32x4_t __ret_318; \
+  int32x4_t __s0_318 = __p0_318; \
+  int32x4_t __s1_318 = __p1_318; \
+  int32x2_t __s2_318 = __p2_318; \
+  int32x4_t __rev0_318;  __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 3, 2, 1, 0); \
+  int32x4_t __rev1_318;  __rev1_318 = __builtin_shufflevector(__s1_318, __s1_318, 3, 2, 1, 0); \
+  int32x2_t __rev2_318;  __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 1, 0); \
+  __ret_318 = __noswap_vqrdmlshq_s32(__rev0_318, __rev1_318, __noswap_splatq_lane_s32(__rev2_318, __p3_318)); \
+  __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 3, 2, 1, 0); \
+  __ret_318; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_lane_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \
+  int16x8_t __ret_319; \
+  int16x8_t __s0_319 = __p0_319; \
+  int16x8_t __s1_319 = __p1_319; \
+  int16x4_t __s2_319 = __p2_319; \
+  __ret_319 = vqrdmlshq_s16(__s0_319, __s1_319, splatq_lane_s16(__s2_319, __p3_319)); \
+  __ret_319; \
+})
+#else
+#define vqrdmlshq_lane_s16(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \
+  int16x8_t __ret_320; \
+  int16x8_t __s0_320 = __p0_320; \
+  int16x8_t __s1_320 = __p1_320; \
+  int16x4_t __s2_320 = __p2_320; \
+  int16x8_t __rev0_320;  __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_320;  __rev1_320 = __builtin_shufflevector(__s1_320, __s1_320, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_320;  __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 3, 2, 1, 0); \
+  __ret_320 = __noswap_vqrdmlshq_s16(__rev0_320, __rev1_320, __noswap_splatq_lane_s16(__rev2_320, __p3_320)); \
+  __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_320; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_lane_s32(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \
+  int32x2_t __ret_321; \
+  int32x2_t __s0_321 = __p0_321; \
+  int32x2_t __s1_321 = __p1_321; \
+  int32x2_t __s2_321 = __p2_321; \
+  __ret_321 = vqrdmlsh_s32(__s0_321, __s1_321, splat_lane_s32(__s2_321, __p3_321)); \
+  __ret_321; \
+})
+#else
+#define vqrdmlsh_lane_s32(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \
+  int32x2_t __ret_322; \
+  int32x2_t __s0_322 = __p0_322; \
+  int32x2_t __s1_322 = __p1_322; \
+  int32x2_t __s2_322 = __p2_322; \
+  int32x2_t __rev0_322;  __rev0_322 = __builtin_shufflevector(__s0_322, __s0_322, 1, 0); \
+  int32x2_t __rev1_322;  __rev1_322 = __builtin_shufflevector(__s1_322, __s1_322, 1, 0); \
+  int32x2_t __rev2_322;  __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 1, 0); \
+  __ret_322 = __noswap_vqrdmlsh_s32(__rev0_322, __rev1_322, __noswap_splat_lane_s32(__rev2_322, __p3_322)); \
+  __ret_322 = __builtin_shufflevector(__ret_322, __ret_322, 1, 0); \
+  __ret_322; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_lane_s16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \
+  int16x4_t __ret_323; \
+  int16x4_t __s0_323 = __p0_323; \
+  int16x4_t __s1_323 = __p1_323; \
+  int16x4_t __s2_323 = __p2_323; \
+  __ret_323 = vqrdmlsh_s16(__s0_323, __s1_323, splat_lane_s16(__s2_323, __p3_323)); \
+  __ret_323; \
+})
+#else
+#define vqrdmlsh_lane_s16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \
+  int16x4_t __ret_324; \
+  int16x4_t __s0_324 = __p0_324; \
+  int16x4_t __s1_324 = __p1_324; \
+  int16x4_t __s2_324 = __p2_324; \
+  int16x4_t __rev0_324;  __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \
+  int16x4_t __rev1_324;  __rev1_324 = __builtin_shufflevector(__s1_324, __s1_324, 3, 2, 1, 0); \
+  int16x4_t __rev2_324;  __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 3, 2, 1, 0); \
+  __ret_324 = __noswap_vqrdmlsh_s16(__rev0_324, __rev1_324, __noswap_splat_lane_s16(__rev2_324, __p3_324)); \
+  __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \
+  __ret_324; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
+__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqrdmlahs_s32(__p0, __p1, __p2);
+  return __ret;
+}
+__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqrdmlahh_s16(__p0, __p1, __p2);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahs_lane_s32(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \
+  int32_t __ret_325; \
+  int32_t __s0_325 = __p0_325; \
+  int32_t __s1_325 = __p1_325; \
+  int32x2_t __s2_325 = __p2_325; \
+  __ret_325 = vqrdmlahs_s32(__s0_325, __s1_325, vget_lane_s32(__s2_325, __p3_325)); \
+  __ret_325; \
+})
+#else
+#define vqrdmlahs_lane_s32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \
+  int32_t __ret_326; \
+  int32_t __s0_326 = __p0_326; \
+  int32_t __s1_326 = __p1_326; \
+  int32x2_t __s2_326 = __p2_326; \
+  int32x2_t __rev2_326;  __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 1, 0); \
+  __ret_326 = vqrdmlahs_s32(__s0_326, __s1_326, __noswap_vget_lane_s32(__rev2_326, __p3_326)); \
+  __ret_326; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahh_lane_s16(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \
+  int16_t __ret_327; \
+  int16_t __s0_327 = __p0_327; \
+  int16_t __s1_327 = __p1_327; \
+  int16x4_t __s2_327 = __p2_327; \
+  __ret_327 = vqrdmlahh_s16(__s0_327, __s1_327, vget_lane_s16(__s2_327, __p3_327)); \
+  __ret_327; \
+})
+#else
+#define vqrdmlahh_lane_s16(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \
+  int16_t __ret_328; \
+  int16_t __s0_328 = __p0_328; \
+  int16_t __s1_328 = __p1_328; \
+  int16x4_t __s2_328 = __p2_328; \
+  int16x4_t __rev2_328;  __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 3, 2, 1, 0); \
+  __ret_328 = vqrdmlahh_s16(__s0_328, __s1_328, __noswap_vget_lane_s16(__rev2_328, __p3_328)); \
+  __ret_328; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahs_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \
+  int32_t __ret_329; \
+  int32_t __s0_329 = __p0_329; \
+  int32_t __s1_329 = __p1_329; \
+  int32x4_t __s2_329 = __p2_329; \
+  __ret_329 = vqrdmlahs_s32(__s0_329, __s1_329, vgetq_lane_s32(__s2_329, __p3_329)); \
+  __ret_329; \
+})
+#else
+#define vqrdmlahs_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \
+  int32_t __ret_330; \
+  int32_t __s0_330 = __p0_330; \
+  int32_t __s1_330 = __p1_330; \
+  int32x4_t __s2_330 = __p2_330; \
+  int32x4_t __rev2_330;  __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 3, 2, 1, 0); \
+  __ret_330 = vqrdmlahs_s32(__s0_330, __s1_330, __noswap_vgetq_lane_s32(__rev2_330, __p3_330)); \
+  __ret_330; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahh_laneq_s16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \
+  int16_t __ret_331; \
+  int16_t __s0_331 = __p0_331; \
+  int16_t __s1_331 = __p1_331; \
+  int16x8_t __s2_331 = __p2_331; \
+  __ret_331 = vqrdmlahh_s16(__s0_331, __s1_331, vgetq_lane_s16(__s2_331, __p3_331)); \
+  __ret_331; \
+})
+#else
+#define vqrdmlahh_laneq_s16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \
+  int16_t __ret_332; \
+  int16_t __s0_332 = __p0_332; \
+  int16_t __s1_332 = __p1_332; \
+  int16x8_t __s2_332 = __p2_332; \
+  int16x8_t __rev2_332;  __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_332 = vqrdmlahh_s16(__s0_332, __s1_332, __noswap_vgetq_lane_s16(__rev2_332, __p3_332)); \
+  __ret_332; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahq_laneq_s32(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \
+  int32x4_t __ret_333; \
+  int32x4_t __s0_333 = __p0_333; \
+  int32x4_t __s1_333 = __p1_333; \
+  int32x4_t __s2_333 = __p2_333; \
+  __ret_333 = vqrdmlahq_s32(__s0_333, __s1_333, splatq_laneq_s32(__s2_333, __p3_333)); \
+  __ret_333; \
+})
+#else
+#define vqrdmlahq_laneq_s32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \
+  int32x4_t __ret_334; \
+  int32x4_t __s0_334 = __p0_334; \
+  int32x4_t __s1_334 = __p1_334; \
+  int32x4_t __s2_334 = __p2_334; \
+  int32x4_t __rev0_334;  __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 3, 2, 1, 0); \
+  int32x4_t __rev1_334;  __rev1_334 = __builtin_shufflevector(__s1_334, __s1_334, 3, 2, 1, 0); \
+  int32x4_t __rev2_334;  __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 3, 2, 1, 0); \
+  __ret_334 = __noswap_vqrdmlahq_s32(__rev0_334, __rev1_334, __noswap_splatq_laneq_s32(__rev2_334, __p3_334)); \
+  __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 3, 2, 1, 0); \
+  __ret_334; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahq_laneq_s16(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \
+  int16x8_t __ret_335; \
+  int16x8_t __s0_335 = __p0_335; \
+  int16x8_t __s1_335 = __p1_335; \
+  int16x8_t __s2_335 = __p2_335; \
+  __ret_335 = vqrdmlahq_s16(__s0_335, __s1_335, splatq_laneq_s16(__s2_335, __p3_335)); \
+  __ret_335; \
+})
+#else
+#define vqrdmlahq_laneq_s16(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \
+  int16x8_t __ret_336; \
+  int16x8_t __s0_336 = __p0_336; \
+  int16x8_t __s1_336 = __p1_336; \
+  int16x8_t __s2_336 = __p2_336; \
+  int16x8_t __rev0_336;  __rev0_336 = __builtin_shufflevector(__s0_336, __s0_336, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_336;  __rev1_336 = __builtin_shufflevector(__s1_336, __s1_336, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_336;  __rev2_336 = __builtin_shufflevector(__s2_336, __s2_336, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_336 = __noswap_vqrdmlahq_s16(__rev0_336, __rev1_336, __noswap_splatq_laneq_s16(__rev2_336, __p3_336)); \
+  __ret_336 = __builtin_shufflevector(__ret_336, __ret_336, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_336; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlah_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \
+  int32x2_t __ret_337; \
+  int32x2_t __s0_337 = __p0_337; \
+  int32x2_t __s1_337 = __p1_337; \
+  int32x4_t __s2_337 = __p2_337; \
+  __ret_337 = vqrdmlah_s32(__s0_337, __s1_337, splat_laneq_s32(__s2_337, __p3_337)); \
+  __ret_337; \
+})
+#else
+#define vqrdmlah_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \
+  int32x2_t __ret_338; \
+  int32x2_t __s0_338 = __p0_338; \
+  int32x2_t __s1_338 = __p1_338; \
+  int32x4_t __s2_338 = __p2_338; \
+  int32x2_t __rev0_338;  __rev0_338 = __builtin_shufflevector(__s0_338, __s0_338, 1, 0); \
+  int32x2_t __rev1_338;  __rev1_338 = __builtin_shufflevector(__s1_338, __s1_338, 1, 0); \
+  int32x4_t __rev2_338;  __rev2_338 = __builtin_shufflevector(__s2_338, __s2_338, 3, 2, 1, 0); \
+  __ret_338 = __noswap_vqrdmlah_s32(__rev0_338, __rev1_338, __noswap_splat_laneq_s32(__rev2_338, __p3_338)); \
+  __ret_338 = __builtin_shufflevector(__ret_338, __ret_338, 1, 0); \
+  __ret_338; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlah_laneq_s16(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \
+  int16x4_t __ret_339; \
+  int16x4_t __s0_339 = __p0_339; \
+  int16x4_t __s1_339 = __p1_339; \
+  int16x8_t __s2_339 = __p2_339; \
+  __ret_339 = vqrdmlah_s16(__s0_339, __s1_339, splat_laneq_s16(__s2_339, __p3_339)); \
+  __ret_339; \
+})
+#else
+#define vqrdmlah_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \
+  int16x4_t __ret_340; \
+  int16x4_t __s0_340 = __p0_340; \
+  int16x4_t __s1_340 = __p1_340; \
+  int16x8_t __s2_340 = __p2_340; \
+  int16x4_t __rev0_340;  __rev0_340 = __builtin_shufflevector(__s0_340, __s0_340, 3, 2, 1, 0); \
+  int16x4_t __rev1_340;  __rev1_340 = __builtin_shufflevector(__s1_340, __s1_340, 3, 2, 1, 0); \
+  int16x8_t __rev2_340;  __rev2_340 = __builtin_shufflevector(__s2_340, __s2_340, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_340 = __noswap_vqrdmlah_s16(__rev0_340, __rev1_340, __noswap_splat_laneq_s16(__rev2_340, __p3_340)); \
+  __ret_340 = __builtin_shufflevector(__ret_340, __ret_340, 3, 2, 1, 0); \
+  __ret_340; \
+})
+#endif
+
+__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqrdmlshs_s32(__p0, __p1, __p2);
+  return __ret;
+}
+__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqrdmlshh_s16(__p0, __p1, __p2);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshs_lane_s32(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \
+  int32_t __ret_341; \
+  int32_t __s0_341 = __p0_341; \
+  int32_t __s1_341 = __p1_341; \
+  int32x2_t __s2_341 = __p2_341; \
+  __ret_341 = vqrdmlshs_s32(__s0_341, __s1_341, vget_lane_s32(__s2_341, __p3_341)); \
+  __ret_341; \
+})
+#else
+#define vqrdmlshs_lane_s32(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \
+  int32_t __ret_342; \
+  int32_t __s0_342 = __p0_342; \
+  int32_t __s1_342 = __p1_342; \
+  int32x2_t __s2_342 = __p2_342; \
+  int32x2_t __rev2_342;  __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 1, 0); \
+  __ret_342 = vqrdmlshs_s32(__s0_342, __s1_342, __noswap_vget_lane_s32(__rev2_342, __p3_342)); \
+  __ret_342; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshh_lane_s16(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \
+  int16_t __ret_343; \
+  int16_t __s0_343 = __p0_343; \
+  int16_t __s1_343 = __p1_343; \
+  int16x4_t __s2_343 = __p2_343; \
+  __ret_343 = vqrdmlshh_s16(__s0_343, __s1_343, vget_lane_s16(__s2_343, __p3_343)); \
+  __ret_343; \
+})
+#else
+#define vqrdmlshh_lane_s16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \
+  int16_t __ret_344; \
+  int16_t __s0_344 = __p0_344; \
+  int16_t __s1_344 = __p1_344; \
+  int16x4_t __s2_344 = __p2_344; \
+  int16x4_t __rev2_344;  __rev2_344 = __builtin_shufflevector(__s2_344, __s2_344, 3, 2, 1, 0); \
+  __ret_344 = vqrdmlshh_s16(__s0_344, __s1_344, __noswap_vget_lane_s16(__rev2_344, __p3_344)); \
+  __ret_344; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshs_laneq_s32(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \
+  int32_t __ret_345; \
+  int32_t __s0_345 = __p0_345; \
+  int32_t __s1_345 = __p1_345; \
+  int32x4_t __s2_345 = __p2_345; \
+  __ret_345 = vqrdmlshs_s32(__s0_345, __s1_345, vgetq_lane_s32(__s2_345, __p3_345)); \
+  __ret_345; \
+})
+#else
+#define vqrdmlshs_laneq_s32(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \
+  int32_t __ret_346; \
+  int32_t __s0_346 = __p0_346; \
+  int32_t __s1_346 = __p1_346; \
+  int32x4_t __s2_346 = __p2_346; \
+  int32x4_t __rev2_346;  __rev2_346 = __builtin_shufflevector(__s2_346, __s2_346, 3, 2, 1, 0); \
+  __ret_346 = vqrdmlshs_s32(__s0_346, __s1_346, __noswap_vgetq_lane_s32(__rev2_346, __p3_346)); \
+  __ret_346; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshh_laneq_s16(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \
+  int16_t __ret_347; \
+  int16_t __s0_347 = __p0_347; \
+  int16_t __s1_347 = __p1_347; \
+  int16x8_t __s2_347 = __p2_347; \
+  __ret_347 = vqrdmlshh_s16(__s0_347, __s1_347, vgetq_lane_s16(__s2_347, __p3_347)); \
+  __ret_347; \
+})
+#else
+#define vqrdmlshh_laneq_s16(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \
+  int16_t __ret_348; \
+  int16_t __s0_348 = __p0_348; \
+  int16_t __s1_348 = __p1_348; \
+  int16x8_t __s2_348 = __p2_348; \
+  int16x8_t __rev2_348;  __rev2_348 = __builtin_shufflevector(__s2_348, __s2_348, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_348 = vqrdmlshh_s16(__s0_348, __s1_348, __noswap_vgetq_lane_s16(__rev2_348, __p3_348)); \
+  __ret_348; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_laneq_s32(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \
+  int32x4_t __ret_349; \
+  int32x4_t __s0_349 = __p0_349; \
+  int32x4_t __s1_349 = __p1_349; \
+  int32x4_t __s2_349 = __p2_349; \
+  __ret_349 = vqrdmlshq_s32(__s0_349, __s1_349, splatq_laneq_s32(__s2_349, __p3_349)); \
+  __ret_349; \
+})
+#else
+#define vqrdmlshq_laneq_s32(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \
+  int32x4_t __ret_350; \
+  int32x4_t __s0_350 = __p0_350; \
+  int32x4_t __s1_350 = __p1_350; \
+  int32x4_t __s2_350 = __p2_350; \
+  int32x4_t __rev0_350;  __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 3, 2, 1, 0); \
+  int32x4_t __rev1_350;  __rev1_350 = __builtin_shufflevector(__s1_350, __s1_350, 3, 2, 1, 0); \
+  int32x4_t __rev2_350;  __rev2_350 = __builtin_shufflevector(__s2_350, __s2_350, 3, 2, 1, 0); \
+  __ret_350 = __noswap_vqrdmlshq_s32(__rev0_350, __rev1_350, __noswap_splatq_laneq_s32(__rev2_350, __p3_350)); \
+  __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 3, 2, 1, 0); \
+  __ret_350; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_laneq_s16(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \
+  int16x8_t __ret_351; \
+  int16x8_t __s0_351 = __p0_351; \
+  int16x8_t __s1_351 = __p1_351; \
+  int16x8_t __s2_351 = __p2_351; \
+  __ret_351 = vqrdmlshq_s16(__s0_351, __s1_351, splatq_laneq_s16(__s2_351, __p3_351)); \
+  __ret_351; \
+})
+#else
+#define vqrdmlshq_laneq_s16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \
+  int16x8_t __ret_352; \
+  int16x8_t __s0_352 = __p0_352; \
+  int16x8_t __s1_352 = __p1_352; \
+  int16x8_t __s2_352 = __p2_352; \
+  int16x8_t __rev0_352;  __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_352;  __rev1_352 = __builtin_shufflevector(__s1_352, __s1_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_352;  __rev2_352 = __builtin_shufflevector(__s2_352, __s2_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_352 = __noswap_vqrdmlshq_s16(__rev0_352, __rev1_352, __noswap_splatq_laneq_s16(__rev2_352, __p3_352)); \
+  __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_352; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_laneq_s32(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \
+  int32x2_t __ret_353; \
+  int32x2_t __s0_353 = __p0_353; \
+  int32x2_t __s1_353 = __p1_353; \
+  int32x4_t __s2_353 = __p2_353; \
+  __ret_353 = vqrdmlsh_s32(__s0_353, __s1_353, splat_laneq_s32(__s2_353, __p3_353)); \
+  __ret_353; \
+})
+#else
+#define vqrdmlsh_laneq_s32(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \
+  int32x2_t __ret_354; \
+  int32x2_t __s0_354 = __p0_354; \
+  int32x2_t __s1_354 = __p1_354; \
+  int32x4_t __s2_354 = __p2_354; \
+  int32x2_t __rev0_354;  __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 1, 0); \
+  int32x2_t __rev1_354;  __rev1_354 = __builtin_shufflevector(__s1_354, __s1_354, 1, 0); \
+  int32x4_t __rev2_354;  __rev2_354 = __builtin_shufflevector(__s2_354, __s2_354, 3, 2, 1, 0); \
+  __ret_354 = __noswap_vqrdmlsh_s32(__rev0_354, __rev1_354, __noswap_splat_laneq_s32(__rev2_354, __p3_354)); \
+  __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 1, 0); \
+  __ret_354; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_laneq_s16(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \
+  int16x4_t __ret_355; \
+  int16x4_t __s0_355 = __p0_355; \
+  int16x4_t __s1_355 = __p1_355; \
+  int16x8_t __s2_355 = __p2_355; \
+  __ret_355 = vqrdmlsh_s16(__s0_355, __s1_355, splat_laneq_s16(__s2_355, __p3_355)); \
+  __ret_355; \
+})
+#else
+#define vqrdmlsh_laneq_s16(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \
+  int16x4_t __ret_356; \
+  int16x4_t __s0_356 = __p0_356; \
+  int16x4_t __s1_356 = __p1_356; \
+  int16x8_t __s2_356 = __p2_356; \
+  int16x4_t __rev0_356;  __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 3, 2, 1, 0); \
+  int16x4_t __rev1_356;  __rev1_356 = __builtin_shufflevector(__s1_356, __s1_356, 3, 2, 1, 0); \
+  int16x8_t __rev2_356;  __rev2_356 = __builtin_shufflevector(__s2_356, __s2_356, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_356 = __noswap_vqrdmlsh_s16(__rev0_356, __rev1_356, __noswap_splat_laneq_s16(__rev2_356, __p3_356)); \
+  __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 3, 2, 1, 0); \
+  __ret_356; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_SHA3) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
 __ai uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
   uint8x16_t __ret;
@@ -35881,10 +44666,10 @@
 }
 #else
 __ai uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35899,10 +44684,10 @@
 }
 #else
 __ai uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35917,10 +44702,10 @@
 }
 #else
 __ai uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -35935,10 +44720,10 @@
 }
 #else
 __ai uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35953,10 +44738,10 @@
 }
 #else
 __ai int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -35971,10 +44756,10 @@
 }
 #else
 __ai int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -35989,10 +44774,10 @@
 }
 #else
 __ai int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36007,10 +44792,10 @@
 }
 #else
 __ai int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -36025,10 +44810,10 @@
 }
 #else
 __ai uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -36043,10 +44828,10 @@
 }
 #else
 __ai uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -36061,10 +44846,10 @@
 }
 #else
 __ai uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36079,10 +44864,10 @@
 }
 #else
 __ai uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -36097,10 +44882,10 @@
 }
 #else
 __ai int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -36115,10 +44900,10 @@
 }
 #else
 __ai int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -36133,10 +44918,10 @@
 }
 #else
 __ai int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36151,10 +44936,10 @@
 }
 #else
 __ai int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -36169,9 +44954,9 @@
 }
 #else
 __ai uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vrax1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36180,19 +44965,19 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vxarq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
   __ret; \
 })
 #else
 #define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2_t __ret; \
   uint64x2_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
   __ret = (uint64x2_t) __builtin_neon_vxarq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -36200,7 +44985,7 @@
 #endif
 
 #endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA512) && defined(__aarch64__)
+#if defined(__ARM_FEATURE_SHA512) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
 __ai uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
   uint64x2_t __ret;
@@ -36209,10 +44994,10 @@
 }
 #else
 __ai uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vsha512hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36227,10 +45012,10 @@
 }
 #else
 __ai uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vsha512h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36245,9 +45030,9 @@
 }
 #else
 __ai uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vsha512su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36262,10 +45047,10 @@
 }
 #else
 __ai uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vsha512su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36273,7 +45058,7 @@
 #endif
 
 #endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM3) && defined(__aarch64__)
+#if defined(__ARM_FEATURE_SM3) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
   uint32x4_t __ret;
@@ -36282,10 +45067,10 @@
 }
 #else
 __ai uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -36300,10 +45085,10 @@
 }
 #else
 __ai uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -36318,10 +45103,10 @@
 }
 #else
 __ai uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -36330,22 +45115,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
   __ret; \
 })
 #else
 #define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -36354,22 +45139,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
   __ret; \
 })
 #else
 #define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -36378,22 +45163,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
   __ret; \
 })
 #else
 #define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -36402,22 +45187,22 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
   __ret; \
 })
 #else
 #define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  uint32x4_t __ret; \
   uint32x4_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __s2 = __p2; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
   __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
@@ -36425,7 +45210,7 @@
 #endif
 
 #endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM4) && defined(__aarch64__)
+#if defined(__ARM_FEATURE_SM4) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) {
   uint32x4_t __ret;
@@ -36434,9 +45219,9 @@
 }
 #else
 __ai uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsm4eq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -36451,9 +45236,9 @@
 }
 #else
 __ai uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -36461,7 +45246,3705 @@
 #endif
 
 #endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__)
+#if defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+  return __ret;
+}
+__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
+  return __ret;
+}
+__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vabsq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vabsq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vabsq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35);
+  return __ret;
+}
+#else
+__ai int64x2_t vabsq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vabs_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+__ai int64x1_t vabs_s64(int64x1_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
+  return __ret;
+}
+__ai int64_t vabsd_s64(int64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = __p0 + __p1;
+  return __ret;
+}
+#else
+__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __rev0 + __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = __p0 + __p1;
+  return __ret;
+}
+__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
+  return __ret;
+}
+__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
+  return __ret;
+}
+__ai poly128_t vaddq_p128(poly128_t __p0, poly128_t __p1) {
+  poly128_t __ret;
+  __ret = (poly128_t) __builtin_neon_vaddq_p128(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint16x8_t __ret;
+  __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2));
+  return __ret;
+}
+#else
+__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint16x8_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint32x4_t __ret;
+  __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2));
+  return __ret;
+}
+#else
+__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint32x4_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint8x16_t __ret;
+  __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2));
+  return __ret;
+}
+#else
+__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint8x16_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int16x8_t __ret;
+  __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2));
+  return __ret;
+}
+#else
+__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int16x8_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int32x4_t __ret;
+  __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2));
+  return __ret;
+}
+#else
+__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int32x4_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int8x16_t __ret;
+  __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2));
+  return __ret;
+}
+#else
+__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int8x16_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
+  uint16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
+  uint64_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
+  uint32_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vaddlvq_s8(int8x16_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vaddlvq_s8(int8x16_t __p0) {
+  int16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vaddlvq_s32(int32x4_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0);
+  return __ret;
+}
+#else
+__ai int64_t vaddlvq_s32(int32x4_t __p0) {
+  int64_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vaddlvq_s16(int16x8_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vaddlvq_s16(int16x8_t __p0) {
+  int32_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
+  uint16_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
+  uint64_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
+  uint32_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vaddlv_s8(int8x8_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vaddlv_s8(int8x8_t __p0) {
+  int16_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vaddlv_s32(int32x2_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0);
+  return __ret;
+}
+#else
+__ai int64_t vaddlv_s32(int32x2_t __p0) {
+  int64_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vaddlv_s16(int16x4_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vaddlv_s16(int16x4_t __p0) {
+  int32_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
+  uint8_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
+  uint32_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0);
+  return __ret;
+}
+#else
+__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
+  uint64_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
+  uint16_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vaddvq_s8(int8x16_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0);
+  return __ret;
+}
+#else
+__ai int8_t vaddvq_s8(int8x16_t __p0) {
+  int8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vaddvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vaddvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vaddvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vaddvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vaddvq_s32(int32x4_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vaddvq_s32(int32x4_t __p0) {
+  int32_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vaddvq_s64(int64x2_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0);
+  return __ret;
+}
+#else
+__ai int64_t vaddvq_s64(int64x2_t __p0) {
+  int64_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vaddvq_s16(int16x8_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vaddvq_s16(int16x8_t __p0) {
+  int16_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vaddv_u8(uint8x8_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint8_t vaddv_u8(uint8x8_t __p0) {
+  uint8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vaddv_u32(uint32x2_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vaddv_u32(uint32x2_t __p0) {
+  uint32_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vaddv_u16(uint16x4_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vaddv_u16(uint16x4_t __p0) {
+  uint16_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vaddv_s8(int8x8_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vaddv_s8(__p0);
+  return __ret;
+}
+#else
+__ai int8_t vaddv_s8(int8x8_t __p0) {
+  int8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vaddv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vaddv_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vaddv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vaddv_s32(int32x2_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vaddv_s32(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vaddv_s32(int32x2_t __p0) {
+  int32_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vaddv_s16(int16x4_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vaddv_s16(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vaddv_s16(int16x4_t __p0) {
+  int16_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0);
+  return __ret;
+}
+#endif
+
+__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
+  poly64x1_t __ret;
+  __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38);
+  return __ret;
+}
+#else
+__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
+  poly64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  poly64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+  return __ret;
+}
+__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+  return __ret;
+}
+__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+  return __ret;
+}
+__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+  return __ret;
+}
+__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
+  return __ret;
+}
+__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 == __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 == __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  uint64x2_t __ret;
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 == __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 == __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 == __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 == __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 == __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 == __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 == __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 == __p1);
+  return __ret;
+}
+__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 == __p1);
+  return __ret;
+}
+__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 == __p1);
+  return __ret;
+}
+__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vceqd_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqd_s64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
+  uint8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
+  uint8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
+  uint64x2_t __ret;
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
+  uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
+  uint64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
+  uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
+  uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
+  uint32x2_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
+  uint16x4_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vceqz_f64(float64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vceqz_s64(int64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64_t vceqzd_u64(uint64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
+  return __ret;
+}
+__ai uint64_t vceqzd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqzd_s64(__p0);
+  return __ret;
+}
+__ai uint64_t vceqzd_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vceqzs_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 >= __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 >= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 >= __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 >= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 >= __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 >= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 >= __p1);
+  return __ret;
+}
+__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 >= __p1);
+  return __ret;
+}
+__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 >= __p1);
+  return __ret;
+}
+__ai uint64_t vcged_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcged_s64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcgez_f64(float64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcgez_s64(int64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64_t vcgezd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgezd_s64(__p0);
+  return __ret;
+}
+__ai uint64_t vcgezd_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vcgezs_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 > __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 > __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 > __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 > __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 > __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 > __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 > __p1);
+  return __ret;
+}
+__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 > __p1);
+  return __ret;
+}
+__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 > __p1);
+  return __ret;
+}
+__ai uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64_t vcgtzd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgtzd_s64(__p0);
+  return __ret;
+}
+__ai uint64_t vcgtzd_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vcgtzs_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 <= __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 <= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 <= __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 <= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 <= __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 <= __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 <= __p1);
+  return __ret;
+}
+__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 <= __p1);
+  return __ret;
+}
+__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 <= __p1);
+  return __ret;
+}
+__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcled_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcled_s64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vclez_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vclez_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vclez_f64(float64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vclez_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vclez_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vclez_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vclez_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vclez_s64(int64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vclez_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vclez_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64_t vclezd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vclezd_s64(__p0);
+  return __ret;
+}
+__ai uint64_t vclezd_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vclezs_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 < __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 < __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 < __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 < __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0 < __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t)(__rev0 < __rev1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 < __p1);
+  return __ret;
+}
+__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 < __p1);
+  return __ret;
+}
+__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0 < __p1);
+  return __ret;
+}
+__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcltd_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcltd_s64(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
+  uint8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
+  uint32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
+  uint32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
+  uint64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
+  uint16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
+  uint8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcltz_f64(float64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
+  uint32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
+  return __ret;
+}
+#else
+__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
+  uint32x2_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcltz_s64(int64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
+  return __ret;
+}
+#else
+__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
+  uint16x4_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64_t vcltzd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcltzd_s64(__p0);
+  return __ret;
+}
+__ai uint64_t vcltzd_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vcltzs_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
+  poly64x2_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+  return __ret;
+}
+#else
+__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
+  poly64x2_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x2_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+  return __ret;
+}
+#else
+__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x2_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_p8(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \
+  poly8x16_t __ret_357; \
+  poly8x16_t __s0_357 = __p0_357; \
+  poly8x8_t __s2_357 = __p2_357; \
+  __ret_357 = vsetq_lane_p8(vget_lane_p8(__s2_357, __p3_357), __s0_357, __p1_357); \
+  __ret_357; \
+})
+#else
+#define vcopyq_lane_p8(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \
+  poly8x16_t __ret_358; \
+  poly8x16_t __s0_358 = __p0_358; \
+  poly8x8_t __s2_358 = __p2_358; \
+  poly8x16_t __rev0_358;  __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __rev2_358;  __rev2_358 = __builtin_shufflevector(__s2_358, __s2_358, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_358 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_358, __p3_358), __rev0_358, __p1_358); \
+  __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_358; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_p16(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \
+  poly16x8_t __ret_359; \
+  poly16x8_t __s0_359 = __p0_359; \
+  poly16x4_t __s2_359 = __p2_359; \
+  __ret_359 = vsetq_lane_p16(vget_lane_p16(__s2_359, __p3_359), __s0_359, __p1_359); \
+  __ret_359; \
+})
+#else
+#define vcopyq_lane_p16(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \
+  poly16x8_t __ret_360; \
+  poly16x8_t __s0_360 = __p0_360; \
+  poly16x4_t __s2_360 = __p2_360; \
+  poly16x8_t __rev0_360;  __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __rev2_360;  __rev2_360 = __builtin_shufflevector(__s2_360, __s2_360, 3, 2, 1, 0); \
+  __ret_360 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_360, __p3_360), __rev0_360, __p1_360); \
+  __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_360; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_u8(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \
+  uint8x16_t __ret_361; \
+  uint8x16_t __s0_361 = __p0_361; \
+  uint8x8_t __s2_361 = __p2_361; \
+  __ret_361 = vsetq_lane_u8(vget_lane_u8(__s2_361, __p3_361), __s0_361, __p1_361); \
+  __ret_361; \
+})
+#else
+#define vcopyq_lane_u8(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \
+  uint8x16_t __ret_362; \
+  uint8x16_t __s0_362 = __p0_362; \
+  uint8x8_t __s2_362 = __p2_362; \
+  uint8x16_t __rev0_362;  __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_362;  __rev2_362 = __builtin_shufflevector(__s2_362, __s2_362, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_362 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_362, __p3_362), __rev0_362, __p1_362); \
+  __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_362; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_u32(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \
+  uint32x4_t __ret_363; \
+  uint32x4_t __s0_363 = __p0_363; \
+  uint32x2_t __s2_363 = __p2_363; \
+  __ret_363 = vsetq_lane_u32(vget_lane_u32(__s2_363, __p3_363), __s0_363, __p1_363); \
+  __ret_363; \
+})
+#else
+#define vcopyq_lane_u32(__p0_364, __p1_364, __p2_364, __p3_364) __extension__ ({ \
+  uint32x4_t __ret_364; \
+  uint32x4_t __s0_364 = __p0_364; \
+  uint32x2_t __s2_364 = __p2_364; \
+  uint32x4_t __rev0_364;  __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 3, 2, 1, 0); \
+  uint32x2_t __rev2_364;  __rev2_364 = __builtin_shufflevector(__s2_364, __s2_364, 1, 0); \
+  __ret_364 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_364, __p3_364), __rev0_364, __p1_364); \
+  __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 3, 2, 1, 0); \
+  __ret_364; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_u64(__p0_365, __p1_365, __p2_365, __p3_365) __extension__ ({ \
+  uint64x2_t __ret_365; \
+  uint64x2_t __s0_365 = __p0_365; \
+  uint64x1_t __s2_365 = __p2_365; \
+  __ret_365 = vsetq_lane_u64(vget_lane_u64(__s2_365, __p3_365), __s0_365, __p1_365); \
+  __ret_365; \
+})
+#else
+#define vcopyq_lane_u64(__p0_366, __p1_366, __p2_366, __p3_366) __extension__ ({ \
+  uint64x2_t __ret_366; \
+  uint64x2_t __s0_366 = __p0_366; \
+  uint64x1_t __s2_366 = __p2_366; \
+  uint64x2_t __rev0_366;  __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 1, 0); \
+  __ret_366 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_366, __p3_366), __rev0_366, __p1_366); \
+  __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 1, 0); \
+  __ret_366; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_u16(__p0_367, __p1_367, __p2_367, __p3_367) __extension__ ({ \
+  uint16x8_t __ret_367; \
+  uint16x8_t __s0_367 = __p0_367; \
+  uint16x4_t __s2_367 = __p2_367; \
+  __ret_367 = vsetq_lane_u16(vget_lane_u16(__s2_367, __p3_367), __s0_367, __p1_367); \
+  __ret_367; \
+})
+#else
+#define vcopyq_lane_u16(__p0_368, __p1_368, __p2_368, __p3_368) __extension__ ({ \
+  uint16x8_t __ret_368; \
+  uint16x8_t __s0_368 = __p0_368; \
+  uint16x4_t __s2_368 = __p2_368; \
+  uint16x8_t __rev0_368;  __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_368;  __rev2_368 = __builtin_shufflevector(__s2_368, __s2_368, 3, 2, 1, 0); \
+  __ret_368 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_368, __p3_368), __rev0_368, __p1_368); \
+  __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_368; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s8(__p0_369, __p1_369, __p2_369, __p3_369) __extension__ ({ \
+  int8x16_t __ret_369; \
+  int8x16_t __s0_369 = __p0_369; \
+  int8x8_t __s2_369 = __p2_369; \
+  __ret_369 = vsetq_lane_s8(vget_lane_s8(__s2_369, __p3_369), __s0_369, __p1_369); \
+  __ret_369; \
+})
+#else
+#define vcopyq_lane_s8(__p0_370, __p1_370, __p2_370, __p3_370) __extension__ ({ \
+  int8x16_t __ret_370; \
+  int8x16_t __s0_370 = __p0_370; \
+  int8x8_t __s2_370 = __p2_370; \
+  int8x16_t __rev0_370;  __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_370;  __rev2_370 = __builtin_shufflevector(__s2_370, __s2_370, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_370 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_370, __p3_370), __rev0_370, __p1_370); \
+  __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_370; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_f32(__p0_371, __p1_371, __p2_371, __p3_371) __extension__ ({ \
+  float32x4_t __ret_371; \
+  float32x4_t __s0_371 = __p0_371; \
+  float32x2_t __s2_371 = __p2_371; \
+  __ret_371 = vsetq_lane_f32(vget_lane_f32(__s2_371, __p3_371), __s0_371, __p1_371); \
+  __ret_371; \
+})
+#else
+#define vcopyq_lane_f32(__p0_372, __p1_372, __p2_372, __p3_372) __extension__ ({ \
+  float32x4_t __ret_372; \
+  float32x4_t __s0_372 = __p0_372; \
+  float32x2_t __s2_372 = __p2_372; \
+  float32x4_t __rev0_372;  __rev0_372 = __builtin_shufflevector(__s0_372, __s0_372, 3, 2, 1, 0); \
+  float32x2_t __rev2_372;  __rev2_372 = __builtin_shufflevector(__s2_372, __s2_372, 1, 0); \
+  __ret_372 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_372, __p3_372), __rev0_372, __p1_372); \
+  __ret_372 = __builtin_shufflevector(__ret_372, __ret_372, 3, 2, 1, 0); \
+  __ret_372; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s32(__p0_373, __p1_373, __p2_373, __p3_373) __extension__ ({ \
+  int32x4_t __ret_373; \
+  int32x4_t __s0_373 = __p0_373; \
+  int32x2_t __s2_373 = __p2_373; \
+  __ret_373 = vsetq_lane_s32(vget_lane_s32(__s2_373, __p3_373), __s0_373, __p1_373); \
+  __ret_373; \
+})
+#else
+#define vcopyq_lane_s32(__p0_374, __p1_374, __p2_374, __p3_374) __extension__ ({ \
+  int32x4_t __ret_374; \
+  int32x4_t __s0_374 = __p0_374; \
+  int32x2_t __s2_374 = __p2_374; \
+  int32x4_t __rev0_374;  __rev0_374 = __builtin_shufflevector(__s0_374, __s0_374, 3, 2, 1, 0); \
+  int32x2_t __rev2_374;  __rev2_374 = __builtin_shufflevector(__s2_374, __s2_374, 1, 0); \
+  __ret_374 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_374, __p3_374), __rev0_374, __p1_374); \
+  __ret_374 = __builtin_shufflevector(__ret_374, __ret_374, 3, 2, 1, 0); \
+  __ret_374; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s64(__p0_375, __p1_375, __p2_375, __p3_375) __extension__ ({ \
+  int64x2_t __ret_375; \
+  int64x2_t __s0_375 = __p0_375; \
+  int64x1_t __s2_375 = __p2_375; \
+  __ret_375 = vsetq_lane_s64(vget_lane_s64(__s2_375, __p3_375), __s0_375, __p1_375); \
+  __ret_375; \
+})
+#else
+#define vcopyq_lane_s64(__p0_376, __p1_376, __p2_376, __p3_376) __extension__ ({ \
+  int64x2_t __ret_376; \
+  int64x2_t __s0_376 = __p0_376; \
+  int64x1_t __s2_376 = __p2_376; \
+  int64x2_t __rev0_376;  __rev0_376 = __builtin_shufflevector(__s0_376, __s0_376, 1, 0); \
+  __ret_376 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_376, __p3_376), __rev0_376, __p1_376); \
+  __ret_376 = __builtin_shufflevector(__ret_376, __ret_376, 1, 0); \
+  __ret_376; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s16(__p0_377, __p1_377, __p2_377, __p3_377) __extension__ ({ \
+  int16x8_t __ret_377; \
+  int16x8_t __s0_377 = __p0_377; \
+  int16x4_t __s2_377 = __p2_377; \
+  __ret_377 = vsetq_lane_s16(vget_lane_s16(__s2_377, __p3_377), __s0_377, __p1_377); \
+  __ret_377; \
+})
+#else
+#define vcopyq_lane_s16(__p0_378, __p1_378, __p2_378, __p3_378) __extension__ ({ \
+  int16x8_t __ret_378; \
+  int16x8_t __s0_378 = __p0_378; \
+  int16x4_t __s2_378 = __p2_378; \
+  int16x8_t __rev0_378;  __rev0_378 = __builtin_shufflevector(__s0_378, __s0_378, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_378;  __rev2_378 = __builtin_shufflevector(__s2_378, __s2_378, 3, 2, 1, 0); \
+  __ret_378 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_378, __p3_378), __rev0_378, __p1_378); \
+  __ret_378 = __builtin_shufflevector(__ret_378, __ret_378, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_378; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_p8(__p0_379, __p1_379, __p2_379, __p3_379) __extension__ ({ \
+  poly8x8_t __ret_379; \
+  poly8x8_t __s0_379 = __p0_379; \
+  poly8x8_t __s2_379 = __p2_379; \
+  __ret_379 = vset_lane_p8(vget_lane_p8(__s2_379, __p3_379), __s0_379, __p1_379); \
+  __ret_379; \
+})
+#else
+#define vcopy_lane_p8(__p0_380, __p1_380, __p2_380, __p3_380) __extension__ ({ \
+  poly8x8_t __ret_380; \
+  poly8x8_t __s0_380 = __p0_380; \
+  poly8x8_t __s2_380 = __p2_380; \
+  poly8x8_t __rev0_380;  __rev0_380 = __builtin_shufflevector(__s0_380, __s0_380, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __rev2_380;  __rev2_380 = __builtin_shufflevector(__s2_380, __s2_380, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_380 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_380, __p3_380), __rev0_380, __p1_380); \
+  __ret_380 = __builtin_shufflevector(__ret_380, __ret_380, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_380; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_p16(__p0_381, __p1_381, __p2_381, __p3_381) __extension__ ({ \
+  poly16x4_t __ret_381; \
+  poly16x4_t __s0_381 = __p0_381; \
+  poly16x4_t __s2_381 = __p2_381; \
+  __ret_381 = vset_lane_p16(vget_lane_p16(__s2_381, __p3_381), __s0_381, __p1_381); \
+  __ret_381; \
+})
+#else
+#define vcopy_lane_p16(__p0_382, __p1_382, __p2_382, __p3_382) __extension__ ({ \
+  poly16x4_t __ret_382; \
+  poly16x4_t __s0_382 = __p0_382; \
+  poly16x4_t __s2_382 = __p2_382; \
+  poly16x4_t __rev0_382;  __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \
+  poly16x4_t __rev2_382;  __rev2_382 = __builtin_shufflevector(__s2_382, __s2_382, 3, 2, 1, 0); \
+  __ret_382 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_382, __p3_382), __rev0_382, __p1_382); \
+  __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 3, 2, 1, 0); \
+  __ret_382; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_u8(__p0_383, __p1_383, __p2_383, __p3_383) __extension__ ({ \
+  uint8x8_t __ret_383; \
+  uint8x8_t __s0_383 = __p0_383; \
+  uint8x8_t __s2_383 = __p2_383; \
+  __ret_383 = vset_lane_u8(vget_lane_u8(__s2_383, __p3_383), __s0_383, __p1_383); \
+  __ret_383; \
+})
+#else
+#define vcopy_lane_u8(__p0_384, __p1_384, __p2_384, __p3_384) __extension__ ({ \
+  uint8x8_t __ret_384; \
+  uint8x8_t __s0_384 = __p0_384; \
+  uint8x8_t __s2_384 = __p2_384; \
+  uint8x8_t __rev0_384;  __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_384;  __rev2_384 = __builtin_shufflevector(__s2_384, __s2_384, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_384 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_384, __p3_384), __rev0_384, __p1_384); \
+  __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_384; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_u32(__p0_385, __p1_385, __p2_385, __p3_385) __extension__ ({ \
+  uint32x2_t __ret_385; \
+  uint32x2_t __s0_385 = __p0_385; \
+  uint32x2_t __s2_385 = __p2_385; \
+  __ret_385 = vset_lane_u32(vget_lane_u32(__s2_385, __p3_385), __s0_385, __p1_385); \
+  __ret_385; \
+})
+#else
+#define vcopy_lane_u32(__p0_386, __p1_386, __p2_386, __p3_386) __extension__ ({ \
+  uint32x2_t __ret_386; \
+  uint32x2_t __s0_386 = __p0_386; \
+  uint32x2_t __s2_386 = __p2_386; \
+  uint32x2_t __rev0_386;  __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 1, 0); \
+  uint32x2_t __rev2_386;  __rev2_386 = __builtin_shufflevector(__s2_386, __s2_386, 1, 0); \
+  __ret_386 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_386, __p3_386), __rev0_386, __p1_386); \
+  __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 1, 0); \
+  __ret_386; \
+})
+#endif
+
+#define vcopy_lane_u64(__p0_387, __p1_387, __p2_387, __p3_387) __extension__ ({ \
+  uint64x1_t __ret_387; \
+  uint64x1_t __s0_387 = __p0_387; \
+  uint64x1_t __s2_387 = __p2_387; \
+  __ret_387 = vset_lane_u64(vget_lane_u64(__s2_387, __p3_387), __s0_387, __p1_387); \
+  __ret_387; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_u16(__p0_388, __p1_388, __p2_388, __p3_388) __extension__ ({ \
+  uint16x4_t __ret_388; \
+  uint16x4_t __s0_388 = __p0_388; \
+  uint16x4_t __s2_388 = __p2_388; \
+  __ret_388 = vset_lane_u16(vget_lane_u16(__s2_388, __p3_388), __s0_388, __p1_388); \
+  __ret_388; \
+})
+#else
+#define vcopy_lane_u16(__p0_389, __p1_389, __p2_389, __p3_389) __extension__ ({ \
+  uint16x4_t __ret_389; \
+  uint16x4_t __s0_389 = __p0_389; \
+  uint16x4_t __s2_389 = __p2_389; \
+  uint16x4_t __rev0_389;  __rev0_389 = __builtin_shufflevector(__s0_389, __s0_389, 3, 2, 1, 0); \
+  uint16x4_t __rev2_389;  __rev2_389 = __builtin_shufflevector(__s2_389, __s2_389, 3, 2, 1, 0); \
+  __ret_389 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_389, __p3_389), __rev0_389, __p1_389); \
+  __ret_389 = __builtin_shufflevector(__ret_389, __ret_389, 3, 2, 1, 0); \
+  __ret_389; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_s8(__p0_390, __p1_390, __p2_390, __p3_390) __extension__ ({ \
+  int8x8_t __ret_390; \
+  int8x8_t __s0_390 = __p0_390; \
+  int8x8_t __s2_390 = __p2_390; \
+  __ret_390 = vset_lane_s8(vget_lane_s8(__s2_390, __p3_390), __s0_390, __p1_390); \
+  __ret_390; \
+})
+#else
+#define vcopy_lane_s8(__p0_391, __p1_391, __p2_391, __p3_391) __extension__ ({ \
+  int8x8_t __ret_391; \
+  int8x8_t __s0_391 = __p0_391; \
+  int8x8_t __s2_391 = __p2_391; \
+  int8x8_t __rev0_391;  __rev0_391 = __builtin_shufflevector(__s0_391, __s0_391, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_391;  __rev2_391 = __builtin_shufflevector(__s2_391, __s2_391, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_391 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_391, __p3_391), __rev0_391, __p1_391); \
+  __ret_391 = __builtin_shufflevector(__ret_391, __ret_391, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_391; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_f32(__p0_392, __p1_392, __p2_392, __p3_392) __extension__ ({ \
+  float32x2_t __ret_392; \
+  float32x2_t __s0_392 = __p0_392; \
+  float32x2_t __s2_392 = __p2_392; \
+  __ret_392 = vset_lane_f32(vget_lane_f32(__s2_392, __p3_392), __s0_392, __p1_392); \
+  __ret_392; \
+})
+#else
+#define vcopy_lane_f32(__p0_393, __p1_393, __p2_393, __p3_393) __extension__ ({ \
+  float32x2_t __ret_393; \
+  float32x2_t __s0_393 = __p0_393; \
+  float32x2_t __s2_393 = __p2_393; \
+  float32x2_t __rev0_393;  __rev0_393 = __builtin_shufflevector(__s0_393, __s0_393, 1, 0); \
+  float32x2_t __rev2_393;  __rev2_393 = __builtin_shufflevector(__s2_393, __s2_393, 1, 0); \
+  __ret_393 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_393, __p3_393), __rev0_393, __p1_393); \
+  __ret_393 = __builtin_shufflevector(__ret_393, __ret_393, 1, 0); \
+  __ret_393; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_s32(__p0_394, __p1_394, __p2_394, __p3_394) __extension__ ({ \
+  int32x2_t __ret_394; \
+  int32x2_t __s0_394 = __p0_394; \
+  int32x2_t __s2_394 = __p2_394; \
+  __ret_394 = vset_lane_s32(vget_lane_s32(__s2_394, __p3_394), __s0_394, __p1_394); \
+  __ret_394; \
+})
+#else
+#define vcopy_lane_s32(__p0_395, __p1_395, __p2_395, __p3_395) __extension__ ({ \
+  int32x2_t __ret_395; \
+  int32x2_t __s0_395 = __p0_395; \
+  int32x2_t __s2_395 = __p2_395; \
+  int32x2_t __rev0_395;  __rev0_395 = __builtin_shufflevector(__s0_395, __s0_395, 1, 0); \
+  int32x2_t __rev2_395;  __rev2_395 = __builtin_shufflevector(__s2_395, __s2_395, 1, 0); \
+  __ret_395 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_395, __p3_395), __rev0_395, __p1_395); \
+  __ret_395 = __builtin_shufflevector(__ret_395, __ret_395, 1, 0); \
+  __ret_395; \
+})
+#endif
+
+#define vcopy_lane_s64(__p0_396, __p1_396, __p2_396, __p3_396) __extension__ ({ \
+  int64x1_t __ret_396; \
+  int64x1_t __s0_396 = __p0_396; \
+  int64x1_t __s2_396 = __p2_396; \
+  __ret_396 = vset_lane_s64(vget_lane_s64(__s2_396, __p3_396), __s0_396, __p1_396); \
+  __ret_396; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_s16(__p0_397, __p1_397, __p2_397, __p3_397) __extension__ ({ \
+  int16x4_t __ret_397; \
+  int16x4_t __s0_397 = __p0_397; \
+  int16x4_t __s2_397 = __p2_397; \
+  __ret_397 = vset_lane_s16(vget_lane_s16(__s2_397, __p3_397), __s0_397, __p1_397); \
+  __ret_397; \
+})
+#else
+#define vcopy_lane_s16(__p0_398, __p1_398, __p2_398, __p3_398) __extension__ ({ \
+  int16x4_t __ret_398; \
+  int16x4_t __s0_398 = __p0_398; \
+  int16x4_t __s2_398 = __p2_398; \
+  int16x4_t __rev0_398;  __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 3, 2, 1, 0); \
+  int16x4_t __rev2_398;  __rev2_398 = __builtin_shufflevector(__s2_398, __s2_398, 3, 2, 1, 0); \
+  __ret_398 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_398, __p3_398), __rev0_398, __p1_398); \
+  __ret_398 = __builtin_shufflevector(__ret_398, __ret_398, 3, 2, 1, 0); \
+  __ret_398; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_p8(__p0_399, __p1_399, __p2_399, __p3_399) __extension__ ({ \
+  poly8x16_t __ret_399; \
+  poly8x16_t __s0_399 = __p0_399; \
+  poly8x16_t __s2_399 = __p2_399; \
+  __ret_399 = vsetq_lane_p8(vgetq_lane_p8(__s2_399, __p3_399), __s0_399, __p1_399); \
+  __ret_399; \
+})
+#else
+#define vcopyq_laneq_p8(__p0_400, __p1_400, __p2_400, __p3_400) __extension__ ({ \
+  poly8x16_t __ret_400; \
+  poly8x16_t __s0_400 = __p0_400; \
+  poly8x16_t __s2_400 = __p2_400; \
+  poly8x16_t __rev0_400;  __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __rev2_400;  __rev2_400 = __builtin_shufflevector(__s2_400, __s2_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_400 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_400, __p3_400), __rev0_400, __p1_400); \
+  __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_400; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_p16(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \
+  poly16x8_t __ret_401; \
+  poly16x8_t __s0_401 = __p0_401; \
+  poly16x8_t __s2_401 = __p2_401; \
+  __ret_401 = vsetq_lane_p16(vgetq_lane_p16(__s2_401, __p3_401), __s0_401, __p1_401); \
+  __ret_401; \
+})
+#else
+#define vcopyq_laneq_p16(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \
+  poly16x8_t __ret_402; \
+  poly16x8_t __s0_402 = __p0_402; \
+  poly16x8_t __s2_402 = __p2_402; \
+  poly16x8_t __rev0_402;  __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __rev2_402;  __rev2_402 = __builtin_shufflevector(__s2_402, __s2_402, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_402 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_402, __p3_402), __rev0_402, __p1_402); \
+  __ret_402 = __builtin_shufflevector(__ret_402, __ret_402, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_402; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_u8(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \
+  uint8x16_t __ret_403; \
+  uint8x16_t __s0_403 = __p0_403; \
+  uint8x16_t __s2_403 = __p2_403; \
+  __ret_403 = vsetq_lane_u8(vgetq_lane_u8(__s2_403, __p3_403), __s0_403, __p1_403); \
+  __ret_403; \
+})
+#else
+#define vcopyq_laneq_u8(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \
+  uint8x16_t __ret_404; \
+  uint8x16_t __s0_404 = __p0_404; \
+  uint8x16_t __s2_404 = __p2_404; \
+  uint8x16_t __rev0_404;  __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_404;  __rev2_404 = __builtin_shufflevector(__s2_404, __s2_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_404 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_404, __p3_404), __rev0_404, __p1_404); \
+  __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_404; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_u32(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \
+  uint32x4_t __ret_405; \
+  uint32x4_t __s0_405 = __p0_405; \
+  uint32x4_t __s2_405 = __p2_405; \
+  __ret_405 = vsetq_lane_u32(vgetq_lane_u32(__s2_405, __p3_405), __s0_405, __p1_405); \
+  __ret_405; \
+})
+#else
+#define vcopyq_laneq_u32(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \
+  uint32x4_t __ret_406; \
+  uint32x4_t __s0_406 = __p0_406; \
+  uint32x4_t __s2_406 = __p2_406; \
+  uint32x4_t __rev0_406;  __rev0_406 = __builtin_shufflevector(__s0_406, __s0_406, 3, 2, 1, 0); \
+  uint32x4_t __rev2_406;  __rev2_406 = __builtin_shufflevector(__s2_406, __s2_406, 3, 2, 1, 0); \
+  __ret_406 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_406, __p3_406), __rev0_406, __p1_406); \
+  __ret_406 = __builtin_shufflevector(__ret_406, __ret_406, 3, 2, 1, 0); \
+  __ret_406; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_u64(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \
+  uint64x2_t __ret_407; \
+  uint64x2_t __s0_407 = __p0_407; \
+  uint64x2_t __s2_407 = __p2_407; \
+  __ret_407 = vsetq_lane_u64(vgetq_lane_u64(__s2_407, __p3_407), __s0_407, __p1_407); \
+  __ret_407; \
+})
+#else
+#define vcopyq_laneq_u64(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \
+  uint64x2_t __ret_408; \
+  uint64x2_t __s0_408 = __p0_408; \
+  uint64x2_t __s2_408 = __p2_408; \
+  uint64x2_t __rev0_408;  __rev0_408 = __builtin_shufflevector(__s0_408, __s0_408, 1, 0); \
+  uint64x2_t __rev2_408;  __rev2_408 = __builtin_shufflevector(__s2_408, __s2_408, 1, 0); \
+  __ret_408 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_408, __p3_408), __rev0_408, __p1_408); \
+  __ret_408 = __builtin_shufflevector(__ret_408, __ret_408, 1, 0); \
+  __ret_408; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_u16(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \
+  uint16x8_t __ret_409; \
+  uint16x8_t __s0_409 = __p0_409; \
+  uint16x8_t __s2_409 = __p2_409; \
+  __ret_409 = vsetq_lane_u16(vgetq_lane_u16(__s2_409, __p3_409), __s0_409, __p1_409); \
+  __ret_409; \
+})
+#else
+#define vcopyq_laneq_u16(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \
+  uint16x8_t __ret_410; \
+  uint16x8_t __s0_410 = __p0_410; \
+  uint16x8_t __s2_410 = __p2_410; \
+  uint16x8_t __rev0_410;  __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_410;  __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_410 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_410, __p3_410), __rev0_410, __p1_410); \
+  __ret_410 = __builtin_shufflevector(__ret_410, __ret_410, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_410; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_s8(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \
+  int8x16_t __ret_411; \
+  int8x16_t __s0_411 = __p0_411; \
+  int8x16_t __s2_411 = __p2_411; \
+  __ret_411 = vsetq_lane_s8(vgetq_lane_s8(__s2_411, __p3_411), __s0_411, __p1_411); \
+  __ret_411; \
+})
+#else
+#define vcopyq_laneq_s8(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \
+  int8x16_t __ret_412; \
+  int8x16_t __s0_412 = __p0_412; \
+  int8x16_t __s2_412 = __p2_412; \
+  int8x16_t __rev0_412;  __rev0_412 = __builtin_shufflevector(__s0_412, __s0_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_412;  __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_412 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_412, __p3_412), __rev0_412, __p1_412); \
+  __ret_412 = __builtin_shufflevector(__ret_412, __ret_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_412; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_f32(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \
+  float32x4_t __ret_413; \
+  float32x4_t __s0_413 = __p0_413; \
+  float32x4_t __s2_413 = __p2_413; \
+  __ret_413 = vsetq_lane_f32(vgetq_lane_f32(__s2_413, __p3_413), __s0_413, __p1_413); \
+  __ret_413; \
+})
+#else
+#define vcopyq_laneq_f32(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \
+  float32x4_t __ret_414; \
+  float32x4_t __s0_414 = __p0_414; \
+  float32x4_t __s2_414 = __p2_414; \
+  float32x4_t __rev0_414;  __rev0_414 = __builtin_shufflevector(__s0_414, __s0_414, 3, 2, 1, 0); \
+  float32x4_t __rev2_414;  __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 3, 2, 1, 0); \
+  __ret_414 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_414, __p3_414), __rev0_414, __p1_414); \
+  __ret_414 = __builtin_shufflevector(__ret_414, __ret_414, 3, 2, 1, 0); \
+  __ret_414; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_s32(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \
+  int32x4_t __ret_415; \
+  int32x4_t __s0_415 = __p0_415; \
+  int32x4_t __s2_415 = __p2_415; \
+  __ret_415 = vsetq_lane_s32(vgetq_lane_s32(__s2_415, __p3_415), __s0_415, __p1_415); \
+  __ret_415; \
+})
+#else
+#define vcopyq_laneq_s32(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \
+  int32x4_t __ret_416; \
+  int32x4_t __s0_416 = __p0_416; \
+  int32x4_t __s2_416 = __p2_416; \
+  int32x4_t __rev0_416;  __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 3, 2, 1, 0); \
+  int32x4_t __rev2_416;  __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 3, 2, 1, 0); \
+  __ret_416 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_416, __p3_416), __rev0_416, __p1_416); \
+  __ret_416 = __builtin_shufflevector(__ret_416, __ret_416, 3, 2, 1, 0); \
+  __ret_416; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_s64(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \
+  int64x2_t __ret_417; \
+  int64x2_t __s0_417 = __p0_417; \
+  int64x2_t __s2_417 = __p2_417; \
+  __ret_417 = vsetq_lane_s64(vgetq_lane_s64(__s2_417, __p3_417), __s0_417, __p1_417); \
+  __ret_417; \
+})
+#else
+#define vcopyq_laneq_s64(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \
+  int64x2_t __ret_418; \
+  int64x2_t __s0_418 = __p0_418; \
+  int64x2_t __s2_418 = __p2_418; \
+  int64x2_t __rev0_418;  __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 1, 0); \
+  int64x2_t __rev2_418;  __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 1, 0); \
+  __ret_418 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_418, __p3_418), __rev0_418, __p1_418); \
+  __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 1, 0); \
+  __ret_418; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_s16(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \
+  int16x8_t __ret_419; \
+  int16x8_t __s0_419 = __p0_419; \
+  int16x8_t __s2_419 = __p2_419; \
+  __ret_419 = vsetq_lane_s16(vgetq_lane_s16(__s2_419, __p3_419), __s0_419, __p1_419); \
+  __ret_419; \
+})
+#else
+#define vcopyq_laneq_s16(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \
+  int16x8_t __ret_420; \
+  int16x8_t __s0_420 = __p0_420; \
+  int16x8_t __s2_420 = __p2_420; \
+  int16x8_t __rev0_420;  __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_420;  __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_420 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_420, __p3_420), __rev0_420, __p1_420); \
+  __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_420; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_p8(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \
+  poly8x8_t __ret_421; \
+  poly8x8_t __s0_421 = __p0_421; \
+  poly8x16_t __s2_421 = __p2_421; \
+  __ret_421 = vset_lane_p8(vgetq_lane_p8(__s2_421, __p3_421), __s0_421, __p1_421); \
+  __ret_421; \
+})
+#else
+#define vcopy_laneq_p8(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \
+  poly8x8_t __ret_422; \
+  poly8x8_t __s0_422 = __p0_422; \
+  poly8x16_t __s2_422 = __p2_422; \
+  poly8x8_t __rev0_422;  __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __rev2_422;  __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_422 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_422, __p3_422), __rev0_422, __p1_422); \
+  __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_422; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_p16(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \
+  poly16x4_t __ret_423; \
+  poly16x4_t __s0_423 = __p0_423; \
+  poly16x8_t __s2_423 = __p2_423; \
+  __ret_423 = vset_lane_p16(vgetq_lane_p16(__s2_423, __p3_423), __s0_423, __p1_423); \
+  __ret_423; \
+})
+#else
+#define vcopy_laneq_p16(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \
+  poly16x4_t __ret_424; \
+  poly16x4_t __s0_424 = __p0_424; \
+  poly16x8_t __s2_424 = __p2_424; \
+  poly16x4_t __rev0_424;  __rev0_424 = __builtin_shufflevector(__s0_424, __s0_424, 3, 2, 1, 0); \
+  poly16x8_t __rev2_424;  __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_424 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_424, __p3_424), __rev0_424, __p1_424); \
+  __ret_424 = __builtin_shufflevector(__ret_424, __ret_424, 3, 2, 1, 0); \
+  __ret_424; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u8(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \
+  uint8x8_t __ret_425; \
+  uint8x8_t __s0_425 = __p0_425; \
+  uint8x16_t __s2_425 = __p2_425; \
+  __ret_425 = vset_lane_u8(vgetq_lane_u8(__s2_425, __p3_425), __s0_425, __p1_425); \
+  __ret_425; \
+})
+#else
+#define vcopy_laneq_u8(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \
+  uint8x8_t __ret_426; \
+  uint8x8_t __s0_426 = __p0_426; \
+  uint8x16_t __s2_426 = __p2_426; \
+  uint8x8_t __rev0_426;  __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_426;  __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_426 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_426, __p3_426), __rev0_426, __p1_426); \
+  __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_426; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \
+  uint32x2_t __ret_427; \
+  uint32x2_t __s0_427 = __p0_427; \
+  uint32x4_t __s2_427 = __p2_427; \
+  __ret_427 = vset_lane_u32(vgetq_lane_u32(__s2_427, __p3_427), __s0_427, __p1_427); \
+  __ret_427; \
+})
+#else
+#define vcopy_laneq_u32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \
+  uint32x2_t __ret_428; \
+  uint32x2_t __s0_428 = __p0_428; \
+  uint32x4_t __s2_428 = __p2_428; \
+  uint32x2_t __rev0_428;  __rev0_428 = __builtin_shufflevector(__s0_428, __s0_428, 1, 0); \
+  uint32x4_t __rev2_428;  __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 3, 2, 1, 0); \
+  __ret_428 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_428, __p3_428), __rev0_428, __p1_428); \
+  __ret_428 = __builtin_shufflevector(__ret_428, __ret_428, 1, 0); \
+  __ret_428; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u64(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \
+  uint64x1_t __ret_429; \
+  uint64x1_t __s0_429 = __p0_429; \
+  uint64x2_t __s2_429 = __p2_429; \
+  __ret_429 = vset_lane_u64(vgetq_lane_u64(__s2_429, __p3_429), __s0_429, __p1_429); \
+  __ret_429; \
+})
+#else
+#define vcopy_laneq_u64(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \
+  uint64x1_t __ret_430; \
+  uint64x1_t __s0_430 = __p0_430; \
+  uint64x2_t __s2_430 = __p2_430; \
+  uint64x2_t __rev2_430;  __rev2_430 = __builtin_shufflevector(__s2_430, __s2_430, 1, 0); \
+  __ret_430 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_430, __p3_430), __s0_430, __p1_430); \
+  __ret_430; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u16(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \
+  uint16x4_t __ret_431; \
+  uint16x4_t __s0_431 = __p0_431; \
+  uint16x8_t __s2_431 = __p2_431; \
+  __ret_431 = vset_lane_u16(vgetq_lane_u16(__s2_431, __p3_431), __s0_431, __p1_431); \
+  __ret_431; \
+})
+#else
+#define vcopy_laneq_u16(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \
+  uint16x4_t __ret_432; \
+  uint16x4_t __s0_432 = __p0_432; \
+  uint16x8_t __s2_432 = __p2_432; \
+  uint16x4_t __rev0_432;  __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 3, 2, 1, 0); \
+  uint16x8_t __rev2_432;  __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_432 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_432, __p3_432), __rev0_432, __p1_432); \
+  __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 3, 2, 1, 0); \
+  __ret_432; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s8(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \
+  int8x8_t __ret_433; \
+  int8x8_t __s0_433 = __p0_433; \
+  int8x16_t __s2_433 = __p2_433; \
+  __ret_433 = vset_lane_s8(vgetq_lane_s8(__s2_433, __p3_433), __s0_433, __p1_433); \
+  __ret_433; \
+})
+#else
+#define vcopy_laneq_s8(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \
+  int8x8_t __ret_434; \
+  int8x8_t __s0_434 = __p0_434; \
+  int8x16_t __s2_434 = __p2_434; \
+  int8x8_t __rev0_434;  __rev0_434 = __builtin_shufflevector(__s0_434, __s0_434, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_434;  __rev2_434 = __builtin_shufflevector(__s2_434, __s2_434, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_434 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_434, __p3_434), __rev0_434, __p1_434); \
+  __ret_434 = __builtin_shufflevector(__ret_434, __ret_434, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_434; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_f32(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \
+  float32x2_t __ret_435; \
+  float32x2_t __s0_435 = __p0_435; \
+  float32x4_t __s2_435 = __p2_435; \
+  __ret_435 = vset_lane_f32(vgetq_lane_f32(__s2_435, __p3_435), __s0_435, __p1_435); \
+  __ret_435; \
+})
+#else
+#define vcopy_laneq_f32(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \
+  float32x2_t __ret_436; \
+  float32x2_t __s0_436 = __p0_436; \
+  float32x4_t __s2_436 = __p2_436; \
+  float32x2_t __rev0_436;  __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 1, 0); \
+  float32x4_t __rev2_436;  __rev2_436 = __builtin_shufflevector(__s2_436, __s2_436, 3, 2, 1, 0); \
+  __ret_436 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_436, __p3_436), __rev0_436, __p1_436); \
+  __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 1, 0); \
+  __ret_436; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s32(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \
+  int32x2_t __ret_437; \
+  int32x2_t __s0_437 = __p0_437; \
+  int32x4_t __s2_437 = __p2_437; \
+  __ret_437 = vset_lane_s32(vgetq_lane_s32(__s2_437, __p3_437), __s0_437, __p1_437); \
+  __ret_437; \
+})
+#else
+#define vcopy_laneq_s32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \
+  int32x2_t __ret_438; \
+  int32x2_t __s0_438 = __p0_438; \
+  int32x4_t __s2_438 = __p2_438; \
+  int32x2_t __rev0_438;  __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 1, 0); \
+  int32x4_t __rev2_438;  __rev2_438 = __builtin_shufflevector(__s2_438, __s2_438, 3, 2, 1, 0); \
+  __ret_438 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_438, __p3_438), __rev0_438, __p1_438); \
+  __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 1, 0); \
+  __ret_438; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s64(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \
+  int64x1_t __ret_439; \
+  int64x1_t __s0_439 = __p0_439; \
+  int64x2_t __s2_439 = __p2_439; \
+  __ret_439 = vset_lane_s64(vgetq_lane_s64(__s2_439, __p3_439), __s0_439, __p1_439); \
+  __ret_439; \
+})
+#else
+#define vcopy_laneq_s64(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \
+  int64x1_t __ret_440; \
+  int64x1_t __s0_440 = __p0_440; \
+  int64x2_t __s2_440 = __p2_440; \
+  int64x2_t __rev2_440;  __rev2_440 = __builtin_shufflevector(__s2_440, __s2_440, 1, 0); \
+  __ret_440 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_440, __p3_440), __s0_440, __p1_440); \
+  __ret_440; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s16(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \
+  int16x4_t __ret_441; \
+  int16x4_t __s0_441 = __p0_441; \
+  int16x8_t __s2_441 = __p2_441; \
+  __ret_441 = vset_lane_s16(vgetq_lane_s16(__s2_441, __p3_441), __s0_441, __p1_441); \
+  __ret_441; \
+})
+#else
+#define vcopy_laneq_s16(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \
+  int16x4_t __ret_442; \
+  int16x4_t __s0_442 = __p0_442; \
+  int16x8_t __s2_442 = __p2_442; \
+  int16x4_t __rev0_442;  __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 3, 2, 1, 0); \
+  int16x8_t __rev2_442;  __rev2_442 = __builtin_shufflevector(__s2_442, __s2_442, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_442 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_442, __p3_442), __rev0_442, __p1_442); \
+  __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \
+  __ret_442; \
+})
+#endif
+
+#define vcreate_p64(__p0) __extension__ ({ \
+  poly64x1_t __ret; \
+  uint64_t __promote = __p0; \
+  __ret = (poly64x1_t)(__promote); \
+  __ret; \
+})
+#define vcreate_f64(__p0) __extension__ ({ \
+  float64x1_t __ret; \
+  uint64_t __promote = __p0; \
+  __ret = (float64x1_t)(__promote); \
+  __ret; \
+})
+__ai float32_t vcvts_f32_s32(int32_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
+  return __ret;
+}
+__ai float32_t vcvts_f32_u32(uint32_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
+  float32x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
+  return __ret;
+}
+#endif
+
+__ai float64_t vcvtd_f64_s64(int64_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
+  return __ret;
+}
+__ai float64_t vcvtd_f64_u64(uint64_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
+  float64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35);
+  return __ret;
+}
+#else
+__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
+  float64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
+  float64x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
+  float16x8_t __ret;
+  __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1));
+  return __ret;
+}
+#else
+__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
+  float16x8_t __ret;
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
+  float32x4_t __ret;
+  __ret = vcvt_f32_f16(vget_high_f16(__p0));
+  return __ret;
+}
+#else
+__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
+  float32x4_t __ret;
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
+  float32x4_t __ret;
+  __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1));
+  return __ret;
+}
+#else
+__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
+  float32x4_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
+  float64x2_t __ret;
+  __ret = vcvt_f64_f32(vget_high_f32(__p0));
+  return __ret;
+}
+#else
+__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
+  float64x2_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
+  uint32_t __s0 = __p0; \
+  __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
+  __ret; \
+})
+#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
+  int32_t __s0 = __p0; \
+  __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
+  float64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
+  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \
+  __ret; \
+})
+#else
+#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
+  float64x2_t __ret; \
+  uint64x2_t __s0 = __p0; \
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
+  float64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
+  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \
+  __ret; \
+})
+#else
+#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
+  float64x2_t __ret; \
+  int64x2_t __s0 = __p0; \
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
+  float64x1_t __ret; \
+  uint64x1_t __s0 = __p0; \
+  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
+  __ret; \
+})
+#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
+  float64x1_t __ret; \
+  int64x1_t __s0 = __p0; \
+  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
+  __ret; \
+})
+#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  uint64_t __s0 = __p0; \
+  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
+  __ret; \
+})
+#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
+  __ret; \
+})
+#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  float32_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \
+  __ret; \
+})
+#else
+#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
+  int64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
+  int64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
+  __ret; \
+})
+#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
+  float64_t __s0 = __p0; \
+  __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
+  __ret; \
+})
+#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  float32_t __s0 = __p0; \
+  __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \
+  __ret; \
+})
+#else
+#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
+  uint64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
+  __ret; \
+})
+#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
+  uint64_t __ret; \
+  float64_t __s0 = __p0; \
+  __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
+  __ret; \
+})
+__ai int32_t vcvts_s32_f32(float32_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
+  return __ret;
+}
+__ai int64_t vcvtd_s64_f64(float64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35);
+  return __ret;
+}
+#else
+__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
+  int64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
+  return __ret;
+}
+__ai uint32_t vcvts_u32_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
+  return __ret;
+}
+__ai uint64_t vcvtd_u64_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
+  uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
+  return __ret;
+}
+__ai int32_t vcvtas_s32_f32(float32_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
   int64x2_t __ret;
@@ -36470,8 +48953,8 @@
 }
 #else
 __ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36483,6 +48966,16 @@
   __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
   return __ret;
 }
+__ai int64_t vcvtad_s64_f64(float64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vcvtas_u32_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
   uint64x2_t __ret;
@@ -36491,8 +48984,8 @@
 }
 #else
 __ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36504,6 +48997,16 @@
   __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
   return __ret;
 }
+__ai uint64_t vcvtad_u64_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
+  return __ret;
+}
+__ai int32_t vcvtms_s32_f32(float32_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
   int64x2_t __ret;
@@ -36512,8 +49015,8 @@
 }
 #else
 __ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36525,6 +49028,16 @@
   __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
   return __ret;
 }
+__ai int64_t vcvtmd_s64_f64(float64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vcvtms_u32_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
   uint64x2_t __ret;
@@ -36533,8 +49046,8 @@
 }
 #else
 __ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36546,6 +49059,16 @@
   __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
   return __ret;
 }
+__ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
+  return __ret;
+}
+__ai int32_t vcvtns_s32_f32(float32_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
   int64x2_t __ret;
@@ -36554,8 +49077,8 @@
 }
 #else
 __ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36567,6 +49090,16 @@
   __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
   return __ret;
 }
+__ai int64_t vcvtnd_s64_f64(float64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vcvtns_u32_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
   uint64x2_t __ret;
@@ -36575,8 +49108,8 @@
 }
 #else
 __ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36588,6 +49121,16 @@
   __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
   return __ret;
 }
+__ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
+  return __ret;
+}
+__ai int32_t vcvtps_s32_f32(float32_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
   int64x2_t __ret;
@@ -36596,8 +49139,8 @@
 }
 #else
 __ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36609,6 +49152,16 @@
   __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
   return __ret;
 }
+__ai int64_t vcvtpd_s64_f64(float64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
+  return __ret;
+}
+__ai uint32_t vcvtps_u32_f32(float32_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
 __ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
   uint64x2_t __ret;
@@ -36617,8 +49170,8 @@
 }
 #else
 __ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -36630,6 +49183,10654 @@
   __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
   return __ret;
 }
+__ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
+  return __ret;
+}
+__ai float32_t vcvtxd_f32_f64(float64_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
+  float32x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
+  float32x4_t __ret;
+  __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1));
+  return __ret;
+}
+#else
+__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
+  float32x4_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = __p0 / __p1;
+  return __ret;
+}
+#else
+__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __rev0 / __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = __p0 / __p1;
+  return __ret;
+}
+#else
+__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __rev0 / __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = __p0 / __p1;
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  __ret = __p0 / __p1;
+  return __ret;
+}
+#else
+__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __rev0 / __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8_t __ret; \
+  poly8x8_t __s0 = __p0; \
+  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8_t __ret; \
+  poly8x8_t __s0 = __p0; \
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16_t __ret; \
+  poly16x4_t __s0 = __p0; \
+  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16_t __ret; \
+  poly16x4_t __s0 = __p0; \
+  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
+  uint8x8_t __s0 = __p0; \
+  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
+  uint8x8_t __s0 = __p0; \
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  uint32x2_t __s0 = __p0; \
+  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  uint32x2_t __s0 = __p0; \
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
+  uint64_t __ret; \
+  uint64x1_t __s0 = __p0; \
+  __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
+  uint16x4_t __s0 = __p0; \
+  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
+  uint16x4_t __s0 = __p0; \
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int8x8_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int8x8_t __s0 = __p0; \
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  float64x1_t __s0 = __p0; \
+  __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
+  float32x2_t __s0 = __p0; \
+  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int32x2_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
+  int64x1_t __s0 = __p0; \
+  __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int16x4_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#define vdup_lane_p64(__p0_443, __p1_443) __extension__ ({ \
+  poly64x1_t __ret_443; \
+  poly64x1_t __s0_443 = __p0_443; \
+  __ret_443 = splat_lane_p64(__s0_443, __p1_443); \
+  __ret_443; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_p64(__p0_444, __p1_444) __extension__ ({ \
+  poly64x2_t __ret_444; \
+  poly64x1_t __s0_444 = __p0_444; \
+  __ret_444 = splatq_lane_p64(__s0_444, __p1_444); \
+  __ret_444; \
+})
+#else
+#define vdupq_lane_p64(__p0_445, __p1_445) __extension__ ({ \
+  poly64x2_t __ret_445; \
+  poly64x1_t __s0_445 = __p0_445; \
+  __ret_445 = __noswap_splatq_lane_p64(__s0_445, __p1_445); \
+  __ret_445 = __builtin_shufflevector(__ret_445, __ret_445, 1, 0); \
+  __ret_445; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_f64(__p0_446, __p1_446) __extension__ ({ \
+  float64x2_t __ret_446; \
+  float64x1_t __s0_446 = __p0_446; \
+  __ret_446 = splatq_lane_f64(__s0_446, __p1_446); \
+  __ret_446; \
+})
+#else
+#define vdupq_lane_f64(__p0_447, __p1_447) __extension__ ({ \
+  float64x2_t __ret_447; \
+  float64x1_t __s0_447 = __p0_447; \
+  __ret_447 = __noswap_splatq_lane_f64(__s0_447, __p1_447); \
+  __ret_447 = __builtin_shufflevector(__ret_447, __ret_447, 1, 0); \
+  __ret_447; \
+})
+#endif
+
+#define vdup_lane_f64(__p0_448, __p1_448) __extension__ ({ \
+  float64x1_t __ret_448; \
+  float64x1_t __s0_448 = __p0_448; \
+  __ret_448 = splat_lane_f64(__s0_448, __p1_448); \
+  __ret_448; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8_t __ret; \
+  poly8x16_t __s0 = __p0; \
+  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8_t __ret; \
+  poly8x16_t __s0 = __p0; \
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16_t __ret; \
+  poly16x8_t __s0 = __p0; \
+  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16_t __ret; \
+  poly16x8_t __s0 = __p0; \
+  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
+  uint8x16_t __s0 = __p0; \
+  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
+  uint8x16_t __s0 = __p0; \
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  uint32x4_t __s0 = __p0; \
+  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  uint32x4_t __s0 = __p0; \
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64_t __ret; \
+  uint64x2_t __s0 = __p0; \
+  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64_t __ret; \
+  uint64x2_t __s0 = __p0; \
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
+  uint16x8_t __s0 = __p0; \
+  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
+  uint16x8_t __s0 = __p0; \
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int8x16_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int8x16_t __s0 = __p0; \
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  float64x2_t __s0 = __p0; \
+  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
+  float32x4_t __s0 = __p0; \
+  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32_t __ret; \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int32x4_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
+  int64x2_t __s0 = __p0; \
+  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
+  int64x2_t __s0 = __p0; \
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int16x8_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_p8(__p0_449, __p1_449) __extension__ ({ \
+  poly8x8_t __ret_449; \
+  poly8x16_t __s0_449 = __p0_449; \
+  __ret_449 = splat_laneq_p8(__s0_449, __p1_449); \
+  __ret_449; \
+})
+#else
+#define vdup_laneq_p8(__p0_450, __p1_450) __extension__ ({ \
+  poly8x8_t __ret_450; \
+  poly8x16_t __s0_450 = __p0_450; \
+  poly8x16_t __rev0_450;  __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_450 = __noswap_splat_laneq_p8(__rev0_450, __p1_450); \
+  __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_450; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_p64(__p0_451, __p1_451) __extension__ ({ \
+  poly64x1_t __ret_451; \
+  poly64x2_t __s0_451 = __p0_451; \
+  __ret_451 = splat_laneq_p64(__s0_451, __p1_451); \
+  __ret_451; \
+})
+#else
+#define vdup_laneq_p64(__p0_452, __p1_452) __extension__ ({ \
+  poly64x1_t __ret_452; \
+  poly64x2_t __s0_452 = __p0_452; \
+  poly64x2_t __rev0_452;  __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 1, 0); \
+  __ret_452 = __noswap_splat_laneq_p64(__rev0_452, __p1_452); \
+  __ret_452; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_p16(__p0_453, __p1_453) __extension__ ({ \
+  poly16x4_t __ret_453; \
+  poly16x8_t __s0_453 = __p0_453; \
+  __ret_453 = splat_laneq_p16(__s0_453, __p1_453); \
+  __ret_453; \
+})
+#else
+#define vdup_laneq_p16(__p0_454, __p1_454) __extension__ ({ \
+  poly16x4_t __ret_454; \
+  poly16x8_t __s0_454 = __p0_454; \
+  poly16x8_t __rev0_454;  __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_454 = __noswap_splat_laneq_p16(__rev0_454, __p1_454); \
+  __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 3, 2, 1, 0); \
+  __ret_454; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_p8(__p0_455, __p1_455) __extension__ ({ \
+  poly8x16_t __ret_455; \
+  poly8x16_t __s0_455 = __p0_455; \
+  __ret_455 = splatq_laneq_p8(__s0_455, __p1_455); \
+  __ret_455; \
+})
+#else
+#define vdupq_laneq_p8(__p0_456, __p1_456) __extension__ ({ \
+  poly8x16_t __ret_456; \
+  poly8x16_t __s0_456 = __p0_456; \
+  poly8x16_t __rev0_456;  __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_456 = __noswap_splatq_laneq_p8(__rev0_456, __p1_456); \
+  __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_456; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_p64(__p0_457, __p1_457) __extension__ ({ \
+  poly64x2_t __ret_457; \
+  poly64x2_t __s0_457 = __p0_457; \
+  __ret_457 = splatq_laneq_p64(__s0_457, __p1_457); \
+  __ret_457; \
+})
+#else
+#define vdupq_laneq_p64(__p0_458, __p1_458) __extension__ ({ \
+  poly64x2_t __ret_458; \
+  poly64x2_t __s0_458 = __p0_458; \
+  poly64x2_t __rev0_458;  __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 1, 0); \
+  __ret_458 = __noswap_splatq_laneq_p64(__rev0_458, __p1_458); \
+  __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 1, 0); \
+  __ret_458; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_p16(__p0_459, __p1_459) __extension__ ({ \
+  poly16x8_t __ret_459; \
+  poly16x8_t __s0_459 = __p0_459; \
+  __ret_459 = splatq_laneq_p16(__s0_459, __p1_459); \
+  __ret_459; \
+})
+#else
+#define vdupq_laneq_p16(__p0_460, __p1_460) __extension__ ({ \
+  poly16x8_t __ret_460; \
+  poly16x8_t __s0_460 = __p0_460; \
+  poly16x8_t __rev0_460;  __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_460 = __noswap_splatq_laneq_p16(__rev0_460, __p1_460); \
+  __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_460; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_u8(__p0_461, __p1_461) __extension__ ({ \
+  uint8x16_t __ret_461; \
+  uint8x16_t __s0_461 = __p0_461; \
+  __ret_461 = splatq_laneq_u8(__s0_461, __p1_461); \
+  __ret_461; \
+})
+#else
+#define vdupq_laneq_u8(__p0_462, __p1_462) __extension__ ({ \
+  uint8x16_t __ret_462; \
+  uint8x16_t __s0_462 = __p0_462; \
+  uint8x16_t __rev0_462;  __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_462 = __noswap_splatq_laneq_u8(__rev0_462, __p1_462); \
+  __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_462; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_u32(__p0_463, __p1_463) __extension__ ({ \
+  uint32x4_t __ret_463; \
+  uint32x4_t __s0_463 = __p0_463; \
+  __ret_463 = splatq_laneq_u32(__s0_463, __p1_463); \
+  __ret_463; \
+})
+#else
+#define vdupq_laneq_u32(__p0_464, __p1_464) __extension__ ({ \
+  uint32x4_t __ret_464; \
+  uint32x4_t __s0_464 = __p0_464; \
+  uint32x4_t __rev0_464;  __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 3, 2, 1, 0); \
+  __ret_464 = __noswap_splatq_laneq_u32(__rev0_464, __p1_464); \
+  __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 3, 2, 1, 0); \
+  __ret_464; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_u64(__p0_465, __p1_465) __extension__ ({ \
+  uint64x2_t __ret_465; \
+  uint64x2_t __s0_465 = __p0_465; \
+  __ret_465 = splatq_laneq_u64(__s0_465, __p1_465); \
+  __ret_465; \
+})
+#else
+#define vdupq_laneq_u64(__p0_466, __p1_466) __extension__ ({ \
+  uint64x2_t __ret_466; \
+  uint64x2_t __s0_466 = __p0_466; \
+  uint64x2_t __rev0_466;  __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 1, 0); \
+  __ret_466 = __noswap_splatq_laneq_u64(__rev0_466, __p1_466); \
+  __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 1, 0); \
+  __ret_466; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_u16(__p0_467, __p1_467) __extension__ ({ \
+  uint16x8_t __ret_467; \
+  uint16x8_t __s0_467 = __p0_467; \
+  __ret_467 = splatq_laneq_u16(__s0_467, __p1_467); \
+  __ret_467; \
+})
+#else
+#define vdupq_laneq_u16(__p0_468, __p1_468) __extension__ ({ \
+  uint16x8_t __ret_468; \
+  uint16x8_t __s0_468 = __p0_468; \
+  uint16x8_t __rev0_468;  __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_468 = __noswap_splatq_laneq_u16(__rev0_468, __p1_468); \
+  __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_468; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_s8(__p0_469, __p1_469) __extension__ ({ \
+  int8x16_t __ret_469; \
+  int8x16_t __s0_469 = __p0_469; \
+  __ret_469 = splatq_laneq_s8(__s0_469, __p1_469); \
+  __ret_469; \
+})
+#else
+#define vdupq_laneq_s8(__p0_470, __p1_470) __extension__ ({ \
+  int8x16_t __ret_470; \
+  int8x16_t __s0_470 = __p0_470; \
+  int8x16_t __rev0_470;  __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_470 = __noswap_splatq_laneq_s8(__rev0_470, __p1_470); \
+  __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_470; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_f64(__p0_471, __p1_471) __extension__ ({ \
+  float64x2_t __ret_471; \
+  float64x2_t __s0_471 = __p0_471; \
+  __ret_471 = splatq_laneq_f64(__s0_471, __p1_471); \
+  __ret_471; \
+})
+#else
+#define vdupq_laneq_f64(__p0_472, __p1_472) __extension__ ({ \
+  float64x2_t __ret_472; \
+  float64x2_t __s0_472 = __p0_472; \
+  float64x2_t __rev0_472;  __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 1, 0); \
+  __ret_472 = __noswap_splatq_laneq_f64(__rev0_472, __p1_472); \
+  __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 1, 0); \
+  __ret_472; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_f32(__p0_473, __p1_473) __extension__ ({ \
+  float32x4_t __ret_473; \
+  float32x4_t __s0_473 = __p0_473; \
+  __ret_473 = splatq_laneq_f32(__s0_473, __p1_473); \
+  __ret_473; \
+})
+#else
+#define vdupq_laneq_f32(__p0_474, __p1_474) __extension__ ({ \
+  float32x4_t __ret_474; \
+  float32x4_t __s0_474 = __p0_474; \
+  float32x4_t __rev0_474;  __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 3, 2, 1, 0); \
+  __ret_474 = __noswap_splatq_laneq_f32(__rev0_474, __p1_474); \
+  __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 3, 2, 1, 0); \
+  __ret_474; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_f16(__p0_475, __p1_475) __extension__ ({ \
+  float16x8_t __ret_475; \
+  float16x8_t __s0_475 = __p0_475; \
+  __ret_475 = splatq_laneq_f16(__s0_475, __p1_475); \
+  __ret_475; \
+})
+#else
+#define vdupq_laneq_f16(__p0_476, __p1_476) __extension__ ({ \
+  float16x8_t __ret_476; \
+  float16x8_t __s0_476 = __p0_476; \
+  float16x8_t __rev0_476;  __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_476 = __noswap_splatq_laneq_f16(__rev0_476, __p1_476); \
+  __ret_476 = __builtin_shufflevector(__ret_476, __ret_476, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_476; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_s32(__p0_477, __p1_477) __extension__ ({ \
+  int32x4_t __ret_477; \
+  int32x4_t __s0_477 = __p0_477; \
+  __ret_477 = splatq_laneq_s32(__s0_477, __p1_477); \
+  __ret_477; \
+})
+#else
+#define vdupq_laneq_s32(__p0_478, __p1_478) __extension__ ({ \
+  int32x4_t __ret_478; \
+  int32x4_t __s0_478 = __p0_478; \
+  int32x4_t __rev0_478;  __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 3, 2, 1, 0); \
+  __ret_478 = __noswap_splatq_laneq_s32(__rev0_478, __p1_478); \
+  __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 3, 2, 1, 0); \
+  __ret_478; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_s64(__p0_479, __p1_479) __extension__ ({ \
+  int64x2_t __ret_479; \
+  int64x2_t __s0_479 = __p0_479; \
+  __ret_479 = splatq_laneq_s64(__s0_479, __p1_479); \
+  __ret_479; \
+})
+#else
+#define vdupq_laneq_s64(__p0_480, __p1_480) __extension__ ({ \
+  int64x2_t __ret_480; \
+  int64x2_t __s0_480 = __p0_480; \
+  int64x2_t __rev0_480;  __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 1, 0); \
+  __ret_480 = __noswap_splatq_laneq_s64(__rev0_480, __p1_480); \
+  __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 1, 0); \
+  __ret_480; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_s16(__p0_481, __p1_481) __extension__ ({ \
+  int16x8_t __ret_481; \
+  int16x8_t __s0_481 = __p0_481; \
+  __ret_481 = splatq_laneq_s16(__s0_481, __p1_481); \
+  __ret_481; \
+})
+#else
+#define vdupq_laneq_s16(__p0_482, __p1_482) __extension__ ({ \
+  int16x8_t __ret_482; \
+  int16x8_t __s0_482 = __p0_482; \
+  int16x8_t __rev0_482;  __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_482 = __noswap_splatq_laneq_s16(__rev0_482, __p1_482); \
+  __ret_482 = __builtin_shufflevector(__ret_482, __ret_482, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_482; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_u8(__p0_483, __p1_483) __extension__ ({ \
+  uint8x8_t __ret_483; \
+  uint8x16_t __s0_483 = __p0_483; \
+  __ret_483 = splat_laneq_u8(__s0_483, __p1_483); \
+  __ret_483; \
+})
+#else
+#define vdup_laneq_u8(__p0_484, __p1_484) __extension__ ({ \
+  uint8x8_t __ret_484; \
+  uint8x16_t __s0_484 = __p0_484; \
+  uint8x16_t __rev0_484;  __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_484 = __noswap_splat_laneq_u8(__rev0_484, __p1_484); \
+  __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_484; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_u32(__p0_485, __p1_485) __extension__ ({ \
+  uint32x2_t __ret_485; \
+  uint32x4_t __s0_485 = __p0_485; \
+  __ret_485 = splat_laneq_u32(__s0_485, __p1_485); \
+  __ret_485; \
+})
+#else
+#define vdup_laneq_u32(__p0_486, __p1_486) __extension__ ({ \
+  uint32x2_t __ret_486; \
+  uint32x4_t __s0_486 = __p0_486; \
+  uint32x4_t __rev0_486;  __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 3, 2, 1, 0); \
+  __ret_486 = __noswap_splat_laneq_u32(__rev0_486, __p1_486); \
+  __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 1, 0); \
+  __ret_486; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_u64(__p0_487, __p1_487) __extension__ ({ \
+  uint64x1_t __ret_487; \
+  uint64x2_t __s0_487 = __p0_487; \
+  __ret_487 = splat_laneq_u64(__s0_487, __p1_487); \
+  __ret_487; \
+})
+#else
+#define vdup_laneq_u64(__p0_488, __p1_488) __extension__ ({ \
+  uint64x1_t __ret_488; \
+  uint64x2_t __s0_488 = __p0_488; \
+  uint64x2_t __rev0_488;  __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 1, 0); \
+  __ret_488 = __noswap_splat_laneq_u64(__rev0_488, __p1_488); \
+  __ret_488; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_u16(__p0_489, __p1_489) __extension__ ({ \
+  uint16x4_t __ret_489; \
+  uint16x8_t __s0_489 = __p0_489; \
+  __ret_489 = splat_laneq_u16(__s0_489, __p1_489); \
+  __ret_489; \
+})
+#else
+#define vdup_laneq_u16(__p0_490, __p1_490) __extension__ ({ \
+  uint16x4_t __ret_490; \
+  uint16x8_t __s0_490 = __p0_490; \
+  uint16x8_t __rev0_490;  __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_490 = __noswap_splat_laneq_u16(__rev0_490, __p1_490); \
+  __ret_490 = __builtin_shufflevector(__ret_490, __ret_490, 3, 2, 1, 0); \
+  __ret_490; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s8(__p0_491, __p1_491) __extension__ ({ \
+  int8x8_t __ret_491; \
+  int8x16_t __s0_491 = __p0_491; \
+  __ret_491 = splat_laneq_s8(__s0_491, __p1_491); \
+  __ret_491; \
+})
+#else
+#define vdup_laneq_s8(__p0_492, __p1_492) __extension__ ({ \
+  int8x8_t __ret_492; \
+  int8x16_t __s0_492 = __p0_492; \
+  int8x16_t __rev0_492;  __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_492 = __noswap_splat_laneq_s8(__rev0_492, __p1_492); \
+  __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_492; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_f64(__p0_493, __p1_493) __extension__ ({ \
+  float64x1_t __ret_493; \
+  float64x2_t __s0_493 = __p0_493; \
+  __ret_493 = splat_laneq_f64(__s0_493, __p1_493); \
+  __ret_493; \
+})
+#else
+#define vdup_laneq_f64(__p0_494, __p1_494) __extension__ ({ \
+  float64x1_t __ret_494; \
+  float64x2_t __s0_494 = __p0_494; \
+  float64x2_t __rev0_494;  __rev0_494 = __builtin_shufflevector(__s0_494, __s0_494, 1, 0); \
+  __ret_494 = __noswap_splat_laneq_f64(__rev0_494, __p1_494); \
+  __ret_494; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_f32(__p0_495, __p1_495) __extension__ ({ \
+  float32x2_t __ret_495; \
+  float32x4_t __s0_495 = __p0_495; \
+  __ret_495 = splat_laneq_f32(__s0_495, __p1_495); \
+  __ret_495; \
+})
+#else
+#define vdup_laneq_f32(__p0_496, __p1_496) __extension__ ({ \
+  float32x2_t __ret_496; \
+  float32x4_t __s0_496 = __p0_496; \
+  float32x4_t __rev0_496;  __rev0_496 = __builtin_shufflevector(__s0_496, __s0_496, 3, 2, 1, 0); \
+  __ret_496 = __noswap_splat_laneq_f32(__rev0_496, __p1_496); \
+  __ret_496 = __builtin_shufflevector(__ret_496, __ret_496, 1, 0); \
+  __ret_496; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_f16(__p0_497, __p1_497) __extension__ ({ \
+  float16x4_t __ret_497; \
+  float16x8_t __s0_497 = __p0_497; \
+  __ret_497 = splat_laneq_f16(__s0_497, __p1_497); \
+  __ret_497; \
+})
+#else
+#define vdup_laneq_f16(__p0_498, __p1_498) __extension__ ({ \
+  float16x4_t __ret_498; \
+  float16x8_t __s0_498 = __p0_498; \
+  float16x8_t __rev0_498;  __rev0_498 = __builtin_shufflevector(__s0_498, __s0_498, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_498 = __noswap_splat_laneq_f16(__rev0_498, __p1_498); \
+  __ret_498 = __builtin_shufflevector(__ret_498, __ret_498, 3, 2, 1, 0); \
+  __ret_498; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s32(__p0_499, __p1_499) __extension__ ({ \
+  int32x2_t __ret_499; \
+  int32x4_t __s0_499 = __p0_499; \
+  __ret_499 = splat_laneq_s32(__s0_499, __p1_499); \
+  __ret_499; \
+})
+#else
+#define vdup_laneq_s32(__p0_500, __p1_500) __extension__ ({ \
+  int32x2_t __ret_500; \
+  int32x4_t __s0_500 = __p0_500; \
+  int32x4_t __rev0_500;  __rev0_500 = __builtin_shufflevector(__s0_500, __s0_500, 3, 2, 1, 0); \
+  __ret_500 = __noswap_splat_laneq_s32(__rev0_500, __p1_500); \
+  __ret_500 = __builtin_shufflevector(__ret_500, __ret_500, 1, 0); \
+  __ret_500; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s64(__p0_501, __p1_501) __extension__ ({ \
+  int64x1_t __ret_501; \
+  int64x2_t __s0_501 = __p0_501; \
+  __ret_501 = splat_laneq_s64(__s0_501, __p1_501); \
+  __ret_501; \
+})
+#else
+#define vdup_laneq_s64(__p0_502, __p1_502) __extension__ ({ \
+  int64x1_t __ret_502; \
+  int64x2_t __s0_502 = __p0_502; \
+  int64x2_t __rev0_502;  __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 1, 0); \
+  __ret_502 = __noswap_splat_laneq_s64(__rev0_502, __p1_502); \
+  __ret_502; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s16(__p0_503, __p1_503) __extension__ ({ \
+  int16x4_t __ret_503; \
+  int16x8_t __s0_503 = __p0_503; \
+  __ret_503 = splat_laneq_s16(__s0_503, __p1_503); \
+  __ret_503; \
+})
+#else
+#define vdup_laneq_s16(__p0_504, __p1_504) __extension__ ({ \
+  int16x4_t __ret_504; \
+  int16x8_t __s0_504 = __p0_504; \
+  int16x8_t __rev0_504;  __rev0_504 = __builtin_shufflevector(__s0_504, __s0_504, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_504 = __noswap_splat_laneq_s16(__rev0_504, __p1_504); \
+  __ret_504 = __builtin_shufflevector(__ret_504, __ret_504, 3, 2, 1, 0); \
+  __ret_504; \
+})
+#endif
+
+__ai poly64x1_t vdup_n_p64(poly64_t __p0) {
+  poly64x1_t __ret;
+  __ret = (poly64x1_t) {__p0};
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t) {__p0, __p0};
+  return __ret;
+}
+#else
+__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t) {__p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vdupq_n_f64(float64_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) {__p0, __p0};
+  return __ret;
+}
+#else
+__ai float64x2_t vdupq_n_f64(float64_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) {__p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vdup_n_f64(float64_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) {__p0};
+  return __ret;
+}
+#define vext_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1_t __ret; \
+  poly64x1_t __s0 = __p0; \
+  poly64x1_t __s1 = __p1; \
+  __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
+  poly64x2_t __s0 = __p0; \
+  poly64x2_t __s1 = __p1; \
+  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
+  __ret; \
+})
+#else
+#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
+  poly64x2_t __s0 = __p0; \
+  poly64x2_t __s1 = __p1; \
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \
+  __ret; \
+})
+#else
+#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vext_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x1_t __s1 = __p1; \
+  __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+  return __ret;
+}
+#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64_t __ret; \
+  float64_t __s0 = __p0; \
+  float64_t __s1 = __p1; \
+  float64x1_t __s2 = __p2; \
+  __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32_t __ret; \
+  float32_t __s0 = __p0; \
+  float32_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
+  __ret; \
+})
+#else
+#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32_t __ret; \
+  float32_t __s0 = __p0; \
+  float32_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \
+  __ret; \
+})
+#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32_t __ret; \
+  float32_t __s0 = __p0; \
+  float32_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x1_t __s2 = __p2; \
+  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
+  __ret; \
+})
+#else
+#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x1_t __s2 = __p2; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x1_t __s2 = __p2; \
+  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
+  __ret; \
+})
+#else
+#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
+  __ret; \
+})
+#endif
+
+#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x1_t __s1 = __p1; \
+  float64x1_t __s2 = __p2; \
+  __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
+  __ret; \
+})
+#else
+#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __s1 = __p1; \
+  float32x2_t __s2 = __p2; \
+  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64_t __ret; \
+  float64_t __s0 = __p0; \
+  float64_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
+  __ret; \
+})
+#else
+#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64_t __ret; \
+  float64_t __s0 = __p0; \
+  float64_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \
+  __ret; \
+})
+#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64_t __ret; \
+  float64_t __s0 = __p0; \
+  float64_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32_t __ret; \
+  float32_t __s0 = __p0; \
+  float32_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
+  __ret; \
+})
+#else
+#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32_t __ret; \
+  float32_t __s0 = __p0; \
+  float32_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \
+  __ret; \
+})
+#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32_t __ret; \
+  float32_t __s0 = __p0; \
+  float32_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
+  __ret; \
+})
+#else
+#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
+  __ret; \
+})
+#else
+#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x4_t __ret; \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x1_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
+  __ret; \
+})
+#else
+#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x1_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \
+  __ret; \
+})
+#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x1_t __s1 = __p1; \
+  float64x2_t __s2 = __p2; \
+  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
+  __ret; \
+})
+#else
+#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  float32x2_t __ret; \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __s1 = __p1; \
+  float32x4_t __s2 = __p2; \
+  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+  float64x2_t __ret;
+  __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
+  return __ret;
+}
+#else
+__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
+  float64x1_t __ret;
+  __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2});
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = vfmaq_f64(__p0, -__p1, __p2);
+  return __ret;
+}
+#else
+__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = vfma_f64(__p0, -__p1, __p2);
+  return __ret;
+}
+#define vfmsd_lane_f64(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \
+  float64_t __ret_505; \
+  float64_t __s0_505 = __p0_505; \
+  float64_t __s1_505 = __p1_505; \
+  float64x1_t __s2_505 = __p2_505; \
+  __ret_505 = vfmad_lane_f64(__s0_505, -__s1_505, __s2_505, __p3_505); \
+  __ret_505; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vfmss_lane_f32(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \
+  float32_t __ret_506; \
+  float32_t __s0_506 = __p0_506; \
+  float32_t __s1_506 = __p1_506; \
+  float32x2_t __s2_506 = __p2_506; \
+  __ret_506 = vfmas_lane_f32(__s0_506, -__s1_506, __s2_506, __p3_506); \
+  __ret_506; \
+})
+#else
+#define vfmss_lane_f32(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \
+  float32_t __ret_507; \
+  float32_t __s0_507 = __p0_507; \
+  float32_t __s1_507 = __p1_507; \
+  float32x2_t __s2_507 = __p2_507; \
+  float32x2_t __rev2_507;  __rev2_507 = __builtin_shufflevector(__s2_507, __s2_507, 1, 0); \
+  __ret_507 = __noswap_vfmas_lane_f32(__s0_507, -__s1_507, __rev2_507, __p3_507); \
+  __ret_507; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_lane_f64(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \
+  float64x2_t __ret_508; \
+  float64x2_t __s0_508 = __p0_508; \
+  float64x2_t __s1_508 = __p1_508; \
+  float64x1_t __s2_508 = __p2_508; \
+  __ret_508 = vfmaq_lane_f64(__s0_508, -__s1_508, __s2_508, __p3_508); \
+  __ret_508; \
+})
+#else
+#define vfmsq_lane_f64(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \
+  float64x2_t __ret_509; \
+  float64x2_t __s0_509 = __p0_509; \
+  float64x2_t __s1_509 = __p1_509; \
+  float64x1_t __s2_509 = __p2_509; \
+  float64x2_t __rev0_509;  __rev0_509 = __builtin_shufflevector(__s0_509, __s0_509, 1, 0); \
+  float64x2_t __rev1_509;  __rev1_509 = __builtin_shufflevector(__s1_509, __s1_509, 1, 0); \
+  __ret_509 = __noswap_vfmaq_lane_f64(__rev0_509, -__rev1_509, __s2_509, __p3_509); \
+  __ret_509 = __builtin_shufflevector(__ret_509, __ret_509, 1, 0); \
+  __ret_509; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_lane_f32(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \
+  float32x4_t __ret_510; \
+  float32x4_t __s0_510 = __p0_510; \
+  float32x4_t __s1_510 = __p1_510; \
+  float32x2_t __s2_510 = __p2_510; \
+  __ret_510 = vfmaq_lane_f32(__s0_510, -__s1_510, __s2_510, __p3_510); \
+  __ret_510; \
+})
+#else
+#define vfmsq_lane_f32(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \
+  float32x4_t __ret_511; \
+  float32x4_t __s0_511 = __p0_511; \
+  float32x4_t __s1_511 = __p1_511; \
+  float32x2_t __s2_511 = __p2_511; \
+  float32x4_t __rev0_511;  __rev0_511 = __builtin_shufflevector(__s0_511, __s0_511, 3, 2, 1, 0); \
+  float32x4_t __rev1_511;  __rev1_511 = __builtin_shufflevector(__s1_511, __s1_511, 3, 2, 1, 0); \
+  float32x2_t __rev2_511;  __rev2_511 = __builtin_shufflevector(__s2_511, __s2_511, 1, 0); \
+  __ret_511 = __noswap_vfmaq_lane_f32(__rev0_511, -__rev1_511, __rev2_511, __p3_511); \
+  __ret_511 = __builtin_shufflevector(__ret_511, __ret_511, 3, 2, 1, 0); \
+  __ret_511; \
+})
+#endif
+
+#define vfms_lane_f64(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \
+  float64x1_t __ret_512; \
+  float64x1_t __s0_512 = __p0_512; \
+  float64x1_t __s1_512 = __p1_512; \
+  float64x1_t __s2_512 = __p2_512; \
+  __ret_512 = vfma_lane_f64(__s0_512, -__s1_512, __s2_512, __p3_512); \
+  __ret_512; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vfms_lane_f32(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \
+  float32x2_t __ret_513; \
+  float32x2_t __s0_513 = __p0_513; \
+  float32x2_t __s1_513 = __p1_513; \
+  float32x2_t __s2_513 = __p2_513; \
+  __ret_513 = vfma_lane_f32(__s0_513, -__s1_513, __s2_513, __p3_513); \
+  __ret_513; \
+})
+#else
+#define vfms_lane_f32(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \
+  float32x2_t __ret_514; \
+  float32x2_t __s0_514 = __p0_514; \
+  float32x2_t __s1_514 = __p1_514; \
+  float32x2_t __s2_514 = __p2_514; \
+  float32x2_t __rev0_514;  __rev0_514 = __builtin_shufflevector(__s0_514, __s0_514, 1, 0); \
+  float32x2_t __rev1_514;  __rev1_514 = __builtin_shufflevector(__s1_514, __s1_514, 1, 0); \
+  float32x2_t __rev2_514;  __rev2_514 = __builtin_shufflevector(__s2_514, __s2_514, 1, 0); \
+  __ret_514 = __noswap_vfma_lane_f32(__rev0_514, -__rev1_514, __rev2_514, __p3_514); \
+  __ret_514 = __builtin_shufflevector(__ret_514, __ret_514, 1, 0); \
+  __ret_514; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsd_laneq_f64(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \
+  float64_t __ret_515; \
+  float64_t __s0_515 = __p0_515; \
+  float64_t __s1_515 = __p1_515; \
+  float64x2_t __s2_515 = __p2_515; \
+  __ret_515 = vfmad_laneq_f64(__s0_515, -__s1_515, __s2_515, __p3_515); \
+  __ret_515; \
+})
+#else
+#define vfmsd_laneq_f64(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \
+  float64_t __ret_516; \
+  float64_t __s0_516 = __p0_516; \
+  float64_t __s1_516 = __p1_516; \
+  float64x2_t __s2_516 = __p2_516; \
+  float64x2_t __rev2_516;  __rev2_516 = __builtin_shufflevector(__s2_516, __s2_516, 1, 0); \
+  __ret_516 = __noswap_vfmad_laneq_f64(__s0_516, -__s1_516, __rev2_516, __p3_516); \
+  __ret_516; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmss_laneq_f32(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \
+  float32_t __ret_517; \
+  float32_t __s0_517 = __p0_517; \
+  float32_t __s1_517 = __p1_517; \
+  float32x4_t __s2_517 = __p2_517; \
+  __ret_517 = vfmas_laneq_f32(__s0_517, -__s1_517, __s2_517, __p3_517); \
+  __ret_517; \
+})
+#else
+#define vfmss_laneq_f32(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \
+  float32_t __ret_518; \
+  float32_t __s0_518 = __p0_518; \
+  float32_t __s1_518 = __p1_518; \
+  float32x4_t __s2_518 = __p2_518; \
+  float32x4_t __rev2_518;  __rev2_518 = __builtin_shufflevector(__s2_518, __s2_518, 3, 2, 1, 0); \
+  __ret_518 = __noswap_vfmas_laneq_f32(__s0_518, -__s1_518, __rev2_518, __p3_518); \
+  __ret_518; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_laneq_f64(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \
+  float64x2_t __ret_519; \
+  float64x2_t __s0_519 = __p0_519; \
+  float64x2_t __s1_519 = __p1_519; \
+  float64x2_t __s2_519 = __p2_519; \
+  __ret_519 = vfmaq_laneq_f64(__s0_519, -__s1_519, __s2_519, __p3_519); \
+  __ret_519; \
+})
+#else
+#define vfmsq_laneq_f64(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \
+  float64x2_t __ret_520; \
+  float64x2_t __s0_520 = __p0_520; \
+  float64x2_t __s1_520 = __p1_520; \
+  float64x2_t __s2_520 = __p2_520; \
+  float64x2_t __rev0_520;  __rev0_520 = __builtin_shufflevector(__s0_520, __s0_520, 1, 0); \
+  float64x2_t __rev1_520;  __rev1_520 = __builtin_shufflevector(__s1_520, __s1_520, 1, 0); \
+  float64x2_t __rev2_520;  __rev2_520 = __builtin_shufflevector(__s2_520, __s2_520, 1, 0); \
+  __ret_520 = __noswap_vfmaq_laneq_f64(__rev0_520, -__rev1_520, __rev2_520, __p3_520); \
+  __ret_520 = __builtin_shufflevector(__ret_520, __ret_520, 1, 0); \
+  __ret_520; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_laneq_f32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \
+  float32x4_t __ret_521; \
+  float32x4_t __s0_521 = __p0_521; \
+  float32x4_t __s1_521 = __p1_521; \
+  float32x4_t __s2_521 = __p2_521; \
+  __ret_521 = vfmaq_laneq_f32(__s0_521, -__s1_521, __s2_521, __p3_521); \
+  __ret_521; \
+})
+#else
+#define vfmsq_laneq_f32(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \
+  float32x4_t __ret_522; \
+  float32x4_t __s0_522 = __p0_522; \
+  float32x4_t __s1_522 = __p1_522; \
+  float32x4_t __s2_522 = __p2_522; \
+  float32x4_t __rev0_522;  __rev0_522 = __builtin_shufflevector(__s0_522, __s0_522, 3, 2, 1, 0); \
+  float32x4_t __rev1_522;  __rev1_522 = __builtin_shufflevector(__s1_522, __s1_522, 3, 2, 1, 0); \
+  float32x4_t __rev2_522;  __rev2_522 = __builtin_shufflevector(__s2_522, __s2_522, 3, 2, 1, 0); \
+  __ret_522 = __noswap_vfmaq_laneq_f32(__rev0_522, -__rev1_522, __rev2_522, __p3_522); \
+  __ret_522 = __builtin_shufflevector(__ret_522, __ret_522, 3, 2, 1, 0); \
+  __ret_522; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_laneq_f64(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \
+  float64x1_t __ret_523; \
+  float64x1_t __s0_523 = __p0_523; \
+  float64x1_t __s1_523 = __p1_523; \
+  float64x2_t __s2_523 = __p2_523; \
+  __ret_523 = vfma_laneq_f64(__s0_523, -__s1_523, __s2_523, __p3_523); \
+  __ret_523; \
+})
+#else
+#define vfms_laneq_f64(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \
+  float64x1_t __ret_524; \
+  float64x1_t __s0_524 = __p0_524; \
+  float64x1_t __s1_524 = __p1_524; \
+  float64x2_t __s2_524 = __p2_524; \
+  float64x2_t __rev2_524;  __rev2_524 = __builtin_shufflevector(__s2_524, __s2_524, 1, 0); \
+  __ret_524 = __noswap_vfma_laneq_f64(__s0_524, -__s1_524, __rev2_524, __p3_524); \
+  __ret_524; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_laneq_f32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \
+  float32x2_t __ret_525; \
+  float32x2_t __s0_525 = __p0_525; \
+  float32x2_t __s1_525 = __p1_525; \
+  float32x4_t __s2_525 = __p2_525; \
+  __ret_525 = vfma_laneq_f32(__s0_525, -__s1_525, __s2_525, __p3_525); \
+  __ret_525; \
+})
+#else
+#define vfms_laneq_f32(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \
+  float32x2_t __ret_526; \
+  float32x2_t __s0_526 = __p0_526; \
+  float32x2_t __s1_526 = __p1_526; \
+  float32x4_t __s2_526 = __p2_526; \
+  float32x2_t __rev0_526;  __rev0_526 = __builtin_shufflevector(__s0_526, __s0_526, 1, 0); \
+  float32x2_t __rev1_526;  __rev1_526 = __builtin_shufflevector(__s1_526, __s1_526, 1, 0); \
+  float32x4_t __rev2_526;  __rev2_526 = __builtin_shufflevector(__s2_526, __s2_526, 3, 2, 1, 0); \
+  __ret_526 = __noswap_vfma_laneq_f32(__rev0_526, -__rev1_526, __rev2_526, __p3_526); \
+  __ret_526 = __builtin_shufflevector(__ret_526, __ret_526, 1, 0); \
+  __ret_526; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+  float64x2_t __ret;
+  __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2});
+  return __ret;
+}
+#else
+__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2});
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+  float32x4_t __ret;
+  __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2});
+  return __ret;
+}
+#else
+__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
+  float64x1_t __ret;
+  __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2});
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+  float32x2_t __ret;
+  __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2});
+  return __ret;
+}
+#else
+__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2});
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
+  poly64x1_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 1);
+  return __ret;
+}
+#else
+__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
+  poly64x1_t __ret;
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
+  return __ret;
+}
+__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) {
+  poly64x1_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 1);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vget_high_f64(float64x2_t __p0) {
+  float64x1_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 1);
+  return __ret;
+}
+#else
+__ai float64x1_t vget_high_f64(float64x2_t __p0) {
+  float64x1_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
+  return __ret;
+}
+#endif
+
+#define vget_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64_t __ret; \
+  poly64x1_t __s0 = __p0; \
+  __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64_t __ret; \
+  poly64x2_t __s0 = __p0; \
+  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64_t __ret; \
+  poly64x2_t __s0 = __p0; \
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \
+  __ret; \
+})
+#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64_t __ret; \
+  poly64x2_t __s0 = __p0; \
+  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  float64x2_t __s0 = __p0; \
+  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \
+  __ret; \
+})
+#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  float64x2_t __s0 = __p0; \
+  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
+  __ret; \
+})
+#endif
+
+#define vget_lane_f64(__p0, __p1) __extension__ ({ \
+  float64_t __ret; \
+  float64x1_t __s0 = __p0; \
+  __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
+  poly64x1_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 0);
+  return __ret;
+}
+#else
+__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
+  poly64x1_t __ret;
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vget_low_f64(float64x2_t __p0) {
+  float64x1_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 0);
+  return __ret;
+}
+#else
+__ai float64x1_t vget_low_f64(float64x2_t __p0) {
+  float64x1_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
+  return __ret;
+}
+#endif
+
+#define vld1_p64(__p0) __extension__ ({ \
+  poly64x1_t __ret; \
+  __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p64(__p0) __extension__ ({ \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
+  __ret; \
+})
+#else
+#define vld1q_p64(__p0) __extension__ ({ \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f64(__p0) __extension__ ({ \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
+  __ret; \
+})
+#else
+#define vld1q_f64(__p0) __extension__ ({ \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld1_f64(__p0) __extension__ ({ \
+  float64x1_t __ret; \
+  __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
+  __ret; \
+})
+#define vld1_dup_p64(__p0) __extension__ ({ \
+  poly64x1_t __ret; \
+  __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_p64(__p0) __extension__ ({ \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
+  __ret; \
+})
+#else
+#define vld1q_dup_p64(__p0) __extension__ ({ \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_f64(__p0) __extension__ ({ \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
+  __ret; \
+})
+#else
+#define vld1q_dup_f64(__p0) __extension__ ({ \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld1_dup_f64(__p0) __extension__ ({ \
+  float64x1_t __ret; \
+  __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
+  __ret; \
+})
+#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1_t __ret; \
+  poly64x1_t __s1 = __p1; \
+  __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
+  poly64x2_t __s1 = __p1; \
+  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
+  __ret; \
+})
+#else
+#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
+  poly64x2_t __s1 = __p1; \
+  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s1 = __p1; \
+  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
+  __ret; \
+})
+#else
+#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2_t __ret; \
+  float64x2_t __s1 = __p1; \
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s1 = __p1; \
+  __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
+  __ret; \
+})
+#define vld1_p64_x2(__p0) __extension__ ({ \
+  poly64x1x2_t __ret; \
+  __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p64_x2(__p0) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld1q_p64_x2(__p0) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f64_x2(__p0) __extension__ ({ \
+  float64x2x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld1q_f64_x2(__p0) __extension__ ({ \
+  float64x2x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld1_f64_x2(__p0) __extension__ ({ \
+  float64x1x2_t __ret; \
+  __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld1_p64_x3(__p0) __extension__ ({ \
+  poly64x1x3_t __ret; \
+  __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p64_x3(__p0) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld1q_p64_x3(__p0) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f64_x3(__p0) __extension__ ({ \
+  float64x2x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld1q_f64_x3(__p0) __extension__ ({ \
+  float64x2x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld1_f64_x3(__p0) __extension__ ({ \
+  float64x1x3_t __ret; \
+  __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld1_p64_x4(__p0) __extension__ ({ \
+  poly64x1x4_t __ret; \
+  __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p64_x4(__p0) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld1q_p64_x4(__p0) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f64_x4(__p0) __extension__ ({ \
+  float64x2x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld1q_f64_x4(__p0) __extension__ ({ \
+  float64x2x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld1_f64_x4(__p0) __extension__ ({ \
+  float64x1x4_t __ret; \
+  __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld2_p64(__p0) __extension__ ({ \
+  poly64x1x2_t __ret; \
+  __builtin_neon_vld2_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_p64(__p0) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld2q_p64(__p0) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_u64(__p0) __extension__ ({ \
+  uint64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
+  __ret; \
+})
+#else
+#define vld2q_u64(__p0) __extension__ ({ \
+  uint64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_f64(__p0) __extension__ ({ \
+  float64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld2q_f64(__p0) __extension__ ({ \
+  float64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_s64(__p0) __extension__ ({ \
+  int64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
+  __ret; \
+})
+#else
+#define vld2q_s64(__p0) __extension__ ({ \
+  int64x2x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld2_f64(__p0) __extension__ ({ \
+  float64x1x2_t __ret; \
+  __builtin_neon_vld2_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld2_dup_p64(__p0) __extension__ ({ \
+  poly64x1x2_t __ret; \
+  __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_p64(__p0) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld2q_dup_p64(__p0) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_f64(__p0) __extension__ ({ \
+  float64x2x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld2q_dup_f64(__p0) __extension__ ({ \
+  float64x2x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld2_dup_f64(__p0) __extension__ ({ \
+  float64x1x2_t __ret; \
+  __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1x2_t __ret; \
+  poly64x1x2_t __s1 = __p1; \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16x2_t __ret; \
+  poly8x16x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
+  __ret; \
+})
+#else
+#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16x2_t __ret; \
+  poly8x16x2_t __s1 = __p1; \
+  poly8x16x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  poly64x2x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
+  __ret; \
+})
+#else
+#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2x2_t __ret; \
+  poly64x2x2_t __s1 = __p1; \
+  poly64x2x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16x2_t __ret; \
+  uint8x16x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
+  __ret; \
+})
+#else
+#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16x2_t __ret; \
+  uint8x16x2_t __s1 = __p1; \
+  uint8x16x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2x2_t __ret; \
+  uint64x2x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
+  __ret; \
+})
+#else
+#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2x2_t __ret; \
+  uint64x2x2_t __s1 = __p1; \
+  uint64x2x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16x2_t __ret; \
+  int8x16x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
+  __ret; \
+})
+#else
+#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16x2_t __ret; \
+  int8x16x2_t __s1 = __p1; \
+  int8x16x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2x2_t __ret; \
+  float64x2x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
+  __ret; \
+})
+#else
+#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2x2_t __ret; \
+  float64x2x2_t __s1 = __p1; \
+  float64x2x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2x2_t __ret; \
+  int64x2x2_t __s1 = __p1; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
+  __ret; \
+})
+#else
+#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2x2_t __ret; \
+  int64x2x2_t __s1 = __p1; \
+  int64x2x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1x2_t __ret; \
+  uint64x1x2_t __s1 = __p1; \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
+  __ret; \
+})
+#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1x2_t __ret; \
+  float64x1x2_t __s1 = __p1; \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
+  __ret; \
+})
+#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1x2_t __ret; \
+  int64x1x2_t __s1 = __p1; \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
+  __ret; \
+})
+#define vld3_p64(__p0) __extension__ ({ \
+  poly64x1x3_t __ret; \
+  __builtin_neon_vld3_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_p64(__p0) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld3q_p64(__p0) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_u64(__p0) __extension__ ({ \
+  uint64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
+  __ret; \
+})
+#else
+#define vld3q_u64(__p0) __extension__ ({ \
+  uint64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_f64(__p0) __extension__ ({ \
+  float64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld3q_f64(__p0) __extension__ ({ \
+  float64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_s64(__p0) __extension__ ({ \
+  int64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
+  __ret; \
+})
+#else
+#define vld3q_s64(__p0) __extension__ ({ \
+  int64x2x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld3_f64(__p0) __extension__ ({ \
+  float64x1x3_t __ret; \
+  __builtin_neon_vld3_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld3_dup_p64(__p0) __extension__ ({ \
+  poly64x1x3_t __ret; \
+  __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_p64(__p0) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld3q_dup_p64(__p0) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_f64(__p0) __extension__ ({ \
+  float64x2x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld3q_dup_f64(__p0) __extension__ ({ \
+  float64x2x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld3_dup_f64(__p0) __extension__ ({ \
+  float64x1x3_t __ret; \
+  __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1x3_t __ret; \
+  poly64x1x3_t __s1 = __p1; \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16x3_t __ret; \
+  poly8x16x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
+  __ret; \
+})
+#else
+#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16x3_t __ret; \
+  poly8x16x3_t __s1 = __p1; \
+  poly8x16x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  poly64x2x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
+  __ret; \
+})
+#else
+#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2x3_t __ret; \
+  poly64x2x3_t __s1 = __p1; \
+  poly64x2x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16x3_t __ret; \
+  uint8x16x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
+  __ret; \
+})
+#else
+#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16x3_t __ret; \
+  uint8x16x3_t __s1 = __p1; \
+  uint8x16x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2x3_t __ret; \
+  uint64x2x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
+  __ret; \
+})
+#else
+#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2x3_t __ret; \
+  uint64x2x3_t __s1 = __p1; \
+  uint64x2x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16x3_t __ret; \
+  int8x16x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
+  __ret; \
+})
+#else
+#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16x3_t __ret; \
+  int8x16x3_t __s1 = __p1; \
+  int8x16x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2x3_t __ret; \
+  float64x2x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
+  __ret; \
+})
+#else
+#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2x3_t __ret; \
+  float64x2x3_t __s1 = __p1; \
+  float64x2x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2x3_t __ret; \
+  int64x2x3_t __s1 = __p1; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
+  __ret; \
+})
+#else
+#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2x3_t __ret; \
+  int64x2x3_t __s1 = __p1; \
+  int64x2x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1x3_t __ret; \
+  uint64x1x3_t __s1 = __p1; \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
+  __ret; \
+})
+#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1x3_t __ret; \
+  float64x1x3_t __s1 = __p1; \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
+  __ret; \
+})
+#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1x3_t __ret; \
+  int64x1x3_t __s1 = __p1; \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
+  __ret; \
+})
+#define vld4_p64(__p0) __extension__ ({ \
+  poly64x1x4_t __ret; \
+  __builtin_neon_vld4_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_p64(__p0) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld4q_p64(__p0) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_u64(__p0) __extension__ ({ \
+  uint64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
+  __ret; \
+})
+#else
+#define vld4q_u64(__p0) __extension__ ({ \
+  uint64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_f64(__p0) __extension__ ({ \
+  float64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld4q_f64(__p0) __extension__ ({ \
+  float64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_s64(__p0) __extension__ ({ \
+  int64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
+  __ret; \
+})
+#else
+#define vld4q_s64(__p0) __extension__ ({ \
+  int64x2x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld4_f64(__p0) __extension__ ({ \
+  float64x1x4_t __ret; \
+  __builtin_neon_vld4_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld4_dup_p64(__p0) __extension__ ({ \
+  poly64x1x4_t __ret; \
+  __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_p64(__p0) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
+  __ret; \
+})
+#else
+#define vld4q_dup_p64(__p0) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_f64(__p0) __extension__ ({ \
+  float64x2x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
+  __ret; \
+})
+#else
+#define vld4q_dup_f64(__p0) __extension__ ({ \
+  float64x2x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld4_dup_f64(__p0) __extension__ ({ \
+  float64x1x4_t __ret; \
+  __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
+  __ret; \
+})
+#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1x4_t __ret; \
+  poly64x1x4_t __s1 = __p1; \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16x4_t __ret; \
+  poly8x16x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
+  __ret; \
+})
+#else
+#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+  poly8x16x4_t __ret; \
+  poly8x16x4_t __s1 = __p1; \
+  poly8x16x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  poly64x2x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
+  __ret; \
+})
+#else
+#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2x4_t __ret; \
+  poly64x2x4_t __s1 = __p1; \
+  poly64x2x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16x4_t __ret; \
+  uint8x16x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
+  __ret; \
+})
+#else
+#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+  uint8x16x4_t __ret; \
+  uint8x16x4_t __s1 = __p1; \
+  uint8x16x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2x4_t __ret; \
+  uint64x2x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
+  __ret; \
+})
+#else
+#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x2x4_t __ret; \
+  uint64x2x4_t __s1 = __p1; \
+  uint64x2x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16x4_t __ret; \
+  int8x16x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
+  __ret; \
+})
+#else
+#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+  int8x16x4_t __ret; \
+  int8x16x4_t __s1 = __p1; \
+  int8x16x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2x4_t __ret; \
+  float64x2x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
+  __ret; \
+})
+#else
+#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2x4_t __ret; \
+  float64x2x4_t __s1 = __p1; \
+  float64x2x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2x4_t __ret; \
+  int64x2x4_t __s1 = __p1; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
+  __ret; \
+})
+#else
+#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x2x4_t __ret; \
+  int64x2x4_t __s1 = __p1; \
+  int64x2x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+  __ret; \
+})
+#endif
+
+#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64x1x4_t __ret; \
+  uint64x1x4_t __s1 = __p1; \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
+  __ret; \
+})
+#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1x4_t __ret; \
+  float64x1x4_t __s1 = __p1; \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
+  __ret; \
+})
+#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64x1x4_t __ret; \
+  int64x1x4_t __s1 = __p1; \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
+  __ret; \
+})
+#define vldrq_p128(__p0) __extension__ ({ \
+  poly128_t __ret; \
+  __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
+  uint8_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
+  uint32_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
+  uint16_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vmaxvq_s8(int8x16_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0);
+  return __ret;
+}
+#else
+__ai int8_t vmaxvq_s8(int8x16_t __p0) {
+  int8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vmaxvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vmaxvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vmaxvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vmaxvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vmaxvq_s32(int32x4_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vmaxvq_s32(int32x4_t __p0) {
+  int32_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vmaxvq_s16(int16x8_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vmaxvq_s16(int16x8_t __p0) {
+  int16_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
+  uint8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
+  uint32_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
+  uint16_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vmaxv_s8(int8x8_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0);
+  return __ret;
+}
+#else
+__ai int8_t vmaxv_s8(int8x8_t __p0) {
+  int8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vmaxv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vmaxv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vmaxv_s32(int32x2_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vmaxv_s32(int32x2_t __p0) {
+  int32_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vmaxv_s16(int16x4_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vmaxv_s16(int16x4_t __p0) {
+  int16_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vminnmvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vminnmvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vminnmvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vminnmvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vminnmv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vminnmv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vminvq_u8(uint8x16_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint8_t vminvq_u8(uint8x16_t __p0) {
+  uint8_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vminvq_u32(uint32x4_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vminvq_u32(uint32x4_t __p0) {
+  uint32_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vminvq_u16(uint16x8_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vminvq_u16(uint16x8_t __p0) {
+  uint16_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vminvq_s8(int8x16_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vminvq_s8(__p0);
+  return __ret;
+}
+#else
+__ai int8_t vminvq_s8(int8x16_t __p0) {
+  int8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vminvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vminvq_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vminvq_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vminvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vminvq_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vminvq_f32(float32x4_t __p0) {
+  float32_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vminvq_s32(int32x4_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vminvq_s32(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vminvq_s32(int32x4_t __p0) {
+  int32_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vminvq_s16(int16x8_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vminvq_s16(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vminvq_s16(int16x8_t __p0) {
+  int16_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vminv_u8(uint8x8_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vminv_u8(__p0);
+  return __ret;
+}
+#else
+__ai uint8_t vminv_u8(uint8x8_t __p0) {
+  uint8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vminv_u32(uint32x2_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vminv_u32(__p0);
+  return __ret;
+}
+#else
+__ai uint32_t vminv_u32(uint32x2_t __p0) {
+  uint32_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vminv_u16(uint16x4_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vminv_u16(__p0);
+  return __ret;
+}
+#else
+__ai uint16_t vminv_u16(uint16x4_t __p0) {
+  uint16_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vminv_s8(int8x8_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vminv_s8(__p0);
+  return __ret;
+}
+#else
+__ai int8_t vminv_s8(int8x8_t __p0) {
+  int8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8_t) __builtin_neon_vminv_s8(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vminv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vminv_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vminv_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vminv_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vminv_s32(int32x2_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vminv_s32(__p0);
+  return __ret;
+}
+#else
+__ai int32_t vminv_s32(int32x2_t __p0) {
+  int32_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int32_t) __builtin_neon_vminv_s32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vminv_s16(int16x4_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vminv_s16(__p0);
+  return __ret;
+}
+#else
+__ai int16_t vminv_s16(int16x4_t __p0) {
+  int16_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (int16_t) __builtin_neon_vminv_s16(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = __p0 + __p1 * __p2;
+  return __ret;
+}
+#else
+__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __rev0 + __rev1 * __rev2;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = __p0 + __p1 * __p2;
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_laneq_u32(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \
+  uint32x4_t __ret_527; \
+  uint32x4_t __s0_527 = __p0_527; \
+  uint32x4_t __s1_527 = __p1_527; \
+  uint32x4_t __s2_527 = __p2_527; \
+  __ret_527 = __s0_527 + __s1_527 * splatq_laneq_u32(__s2_527, __p3_527); \
+  __ret_527; \
+})
+#else
+#define vmlaq_laneq_u32(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \
+  uint32x4_t __ret_528; \
+  uint32x4_t __s0_528 = __p0_528; \
+  uint32x4_t __s1_528 = __p1_528; \
+  uint32x4_t __s2_528 = __p2_528; \
+  uint32x4_t __rev0_528;  __rev0_528 = __builtin_shufflevector(__s0_528, __s0_528, 3, 2, 1, 0); \
+  uint32x4_t __rev1_528;  __rev1_528 = __builtin_shufflevector(__s1_528, __s1_528, 3, 2, 1, 0); \
+  uint32x4_t __rev2_528;  __rev2_528 = __builtin_shufflevector(__s2_528, __s2_528, 3, 2, 1, 0); \
+  __ret_528 = __rev0_528 + __rev1_528 * __noswap_splatq_laneq_u32(__rev2_528, __p3_528); \
+  __ret_528 = __builtin_shufflevector(__ret_528, __ret_528, 3, 2, 1, 0); \
+  __ret_528; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_laneq_u16(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \
+  uint16x8_t __ret_529; \
+  uint16x8_t __s0_529 = __p0_529; \
+  uint16x8_t __s1_529 = __p1_529; \
+  uint16x8_t __s2_529 = __p2_529; \
+  __ret_529 = __s0_529 + __s1_529 * splatq_laneq_u16(__s2_529, __p3_529); \
+  __ret_529; \
+})
+#else
+#define vmlaq_laneq_u16(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \
+  uint16x8_t __ret_530; \
+  uint16x8_t __s0_530 = __p0_530; \
+  uint16x8_t __s1_530 = __p1_530; \
+  uint16x8_t __s2_530 = __p2_530; \
+  uint16x8_t __rev0_530;  __rev0_530 = __builtin_shufflevector(__s0_530, __s0_530, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_530;  __rev1_530 = __builtin_shufflevector(__s1_530, __s1_530, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_530;  __rev2_530 = __builtin_shufflevector(__s2_530, __s2_530, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_530 = __rev0_530 + __rev1_530 * __noswap_splatq_laneq_u16(__rev2_530, __p3_530); \
+  __ret_530 = __builtin_shufflevector(__ret_530, __ret_530, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_530; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_laneq_f32(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \
+  float32x4_t __ret_531; \
+  float32x4_t __s0_531 = __p0_531; \
+  float32x4_t __s1_531 = __p1_531; \
+  float32x4_t __s2_531 = __p2_531; \
+  __ret_531 = __s0_531 + __s1_531 * splatq_laneq_f32(__s2_531, __p3_531); \
+  __ret_531; \
+})
+#else
+#define vmlaq_laneq_f32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \
+  float32x4_t __ret_532; \
+  float32x4_t __s0_532 = __p0_532; \
+  float32x4_t __s1_532 = __p1_532; \
+  float32x4_t __s2_532 = __p2_532; \
+  float32x4_t __rev0_532;  __rev0_532 = __builtin_shufflevector(__s0_532, __s0_532, 3, 2, 1, 0); \
+  float32x4_t __rev1_532;  __rev1_532 = __builtin_shufflevector(__s1_532, __s1_532, 3, 2, 1, 0); \
+  float32x4_t __rev2_532;  __rev2_532 = __builtin_shufflevector(__s2_532, __s2_532, 3, 2, 1, 0); \
+  __ret_532 = __rev0_532 + __rev1_532 * __noswap_splatq_laneq_f32(__rev2_532, __p3_532); \
+  __ret_532 = __builtin_shufflevector(__ret_532, __ret_532, 3, 2, 1, 0); \
+  __ret_532; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_laneq_s32(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \
+  int32x4_t __ret_533; \
+  int32x4_t __s0_533 = __p0_533; \
+  int32x4_t __s1_533 = __p1_533; \
+  int32x4_t __s2_533 = __p2_533; \
+  __ret_533 = __s0_533 + __s1_533 * splatq_laneq_s32(__s2_533, __p3_533); \
+  __ret_533; \
+})
+#else
+#define vmlaq_laneq_s32(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \
+  int32x4_t __ret_534; \
+  int32x4_t __s0_534 = __p0_534; \
+  int32x4_t __s1_534 = __p1_534; \
+  int32x4_t __s2_534 = __p2_534; \
+  int32x4_t __rev0_534;  __rev0_534 = __builtin_shufflevector(__s0_534, __s0_534, 3, 2, 1, 0); \
+  int32x4_t __rev1_534;  __rev1_534 = __builtin_shufflevector(__s1_534, __s1_534, 3, 2, 1, 0); \
+  int32x4_t __rev2_534;  __rev2_534 = __builtin_shufflevector(__s2_534, __s2_534, 3, 2, 1, 0); \
+  __ret_534 = __rev0_534 + __rev1_534 * __noswap_splatq_laneq_s32(__rev2_534, __p3_534); \
+  __ret_534 = __builtin_shufflevector(__ret_534, __ret_534, 3, 2, 1, 0); \
+  __ret_534; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_laneq_s16(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \
+  int16x8_t __ret_535; \
+  int16x8_t __s0_535 = __p0_535; \
+  int16x8_t __s1_535 = __p1_535; \
+  int16x8_t __s2_535 = __p2_535; \
+  __ret_535 = __s0_535 + __s1_535 * splatq_laneq_s16(__s2_535, __p3_535); \
+  __ret_535; \
+})
+#else
+#define vmlaq_laneq_s16(__p0_536, __p1_536, __p2_536, __p3_536) __extension__ ({ \
+  int16x8_t __ret_536; \
+  int16x8_t __s0_536 = __p0_536; \
+  int16x8_t __s1_536 = __p1_536; \
+  int16x8_t __s2_536 = __p2_536; \
+  int16x8_t __rev0_536;  __rev0_536 = __builtin_shufflevector(__s0_536, __s0_536, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_536;  __rev1_536 = __builtin_shufflevector(__s1_536, __s1_536, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_536;  __rev2_536 = __builtin_shufflevector(__s2_536, __s2_536, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_536 = __rev0_536 + __rev1_536 * __noswap_splatq_laneq_s16(__rev2_536, __p3_536); \
+  __ret_536 = __builtin_shufflevector(__ret_536, __ret_536, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_536; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_laneq_u32(__p0_537, __p1_537, __p2_537, __p3_537) __extension__ ({ \
+  uint32x2_t __ret_537; \
+  uint32x2_t __s0_537 = __p0_537; \
+  uint32x2_t __s1_537 = __p1_537; \
+  uint32x4_t __s2_537 = __p2_537; \
+  __ret_537 = __s0_537 + __s1_537 * splat_laneq_u32(__s2_537, __p3_537); \
+  __ret_537; \
+})
+#else
+#define vmla_laneq_u32(__p0_538, __p1_538, __p2_538, __p3_538) __extension__ ({ \
+  uint32x2_t __ret_538; \
+  uint32x2_t __s0_538 = __p0_538; \
+  uint32x2_t __s1_538 = __p1_538; \
+  uint32x4_t __s2_538 = __p2_538; \
+  uint32x2_t __rev0_538;  __rev0_538 = __builtin_shufflevector(__s0_538, __s0_538, 1, 0); \
+  uint32x2_t __rev1_538;  __rev1_538 = __builtin_shufflevector(__s1_538, __s1_538, 1, 0); \
+  uint32x4_t __rev2_538;  __rev2_538 = __builtin_shufflevector(__s2_538, __s2_538, 3, 2, 1, 0); \
+  __ret_538 = __rev0_538 + __rev1_538 * __noswap_splat_laneq_u32(__rev2_538, __p3_538); \
+  __ret_538 = __builtin_shufflevector(__ret_538, __ret_538, 1, 0); \
+  __ret_538; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_laneq_u16(__p0_539, __p1_539, __p2_539, __p3_539) __extension__ ({ \
+  uint16x4_t __ret_539; \
+  uint16x4_t __s0_539 = __p0_539; \
+  uint16x4_t __s1_539 = __p1_539; \
+  uint16x8_t __s2_539 = __p2_539; \
+  __ret_539 = __s0_539 + __s1_539 * splat_laneq_u16(__s2_539, __p3_539); \
+  __ret_539; \
+})
+#else
+#define vmla_laneq_u16(__p0_540, __p1_540, __p2_540, __p3_540) __extension__ ({ \
+  uint16x4_t __ret_540; \
+  uint16x4_t __s0_540 = __p0_540; \
+  uint16x4_t __s1_540 = __p1_540; \
+  uint16x8_t __s2_540 = __p2_540; \
+  uint16x4_t __rev0_540;  __rev0_540 = __builtin_shufflevector(__s0_540, __s0_540, 3, 2, 1, 0); \
+  uint16x4_t __rev1_540;  __rev1_540 = __builtin_shufflevector(__s1_540, __s1_540, 3, 2, 1, 0); \
+  uint16x8_t __rev2_540;  __rev2_540 = __builtin_shufflevector(__s2_540, __s2_540, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_540 = __rev0_540 + __rev1_540 * __noswap_splat_laneq_u16(__rev2_540, __p3_540); \
+  __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 3, 2, 1, 0); \
+  __ret_540; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_laneq_f32(__p0_541, __p1_541, __p2_541, __p3_541) __extension__ ({ \
+  float32x2_t __ret_541; \
+  float32x2_t __s0_541 = __p0_541; \
+  float32x2_t __s1_541 = __p1_541; \
+  float32x4_t __s2_541 = __p2_541; \
+  __ret_541 = __s0_541 + __s1_541 * splat_laneq_f32(__s2_541, __p3_541); \
+  __ret_541; \
+})
+#else
+#define vmla_laneq_f32(__p0_542, __p1_542, __p2_542, __p3_542) __extension__ ({ \
+  float32x2_t __ret_542; \
+  float32x2_t __s0_542 = __p0_542; \
+  float32x2_t __s1_542 = __p1_542; \
+  float32x4_t __s2_542 = __p2_542; \
+  float32x2_t __rev0_542;  __rev0_542 = __builtin_shufflevector(__s0_542, __s0_542, 1, 0); \
+  float32x2_t __rev1_542;  __rev1_542 = __builtin_shufflevector(__s1_542, __s1_542, 1, 0); \
+  float32x4_t __rev2_542;  __rev2_542 = __builtin_shufflevector(__s2_542, __s2_542, 3, 2, 1, 0); \
+  __ret_542 = __rev0_542 + __rev1_542 * __noswap_splat_laneq_f32(__rev2_542, __p3_542); \
+  __ret_542 = __builtin_shufflevector(__ret_542, __ret_542, 1, 0); \
+  __ret_542; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_laneq_s32(__p0_543, __p1_543, __p2_543, __p3_543) __extension__ ({ \
+  int32x2_t __ret_543; \
+  int32x2_t __s0_543 = __p0_543; \
+  int32x2_t __s1_543 = __p1_543; \
+  int32x4_t __s2_543 = __p2_543; \
+  __ret_543 = __s0_543 + __s1_543 * splat_laneq_s32(__s2_543, __p3_543); \
+  __ret_543; \
+})
+#else
+#define vmla_laneq_s32(__p0_544, __p1_544, __p2_544, __p3_544) __extension__ ({ \
+  int32x2_t __ret_544; \
+  int32x2_t __s0_544 = __p0_544; \
+  int32x2_t __s1_544 = __p1_544; \
+  int32x4_t __s2_544 = __p2_544; \
+  int32x2_t __rev0_544;  __rev0_544 = __builtin_shufflevector(__s0_544, __s0_544, 1, 0); \
+  int32x2_t __rev1_544;  __rev1_544 = __builtin_shufflevector(__s1_544, __s1_544, 1, 0); \
+  int32x4_t __rev2_544;  __rev2_544 = __builtin_shufflevector(__s2_544, __s2_544, 3, 2, 1, 0); \
+  __ret_544 = __rev0_544 + __rev1_544 * __noswap_splat_laneq_s32(__rev2_544, __p3_544); \
+  __ret_544 = __builtin_shufflevector(__ret_544, __ret_544, 1, 0); \
+  __ret_544; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_laneq_s16(__p0_545, __p1_545, __p2_545, __p3_545) __extension__ ({ \
+  int16x4_t __ret_545; \
+  int16x4_t __s0_545 = __p0_545; \
+  int16x4_t __s1_545 = __p1_545; \
+  int16x8_t __s2_545 = __p2_545; \
+  __ret_545 = __s0_545 + __s1_545 * splat_laneq_s16(__s2_545, __p3_545); \
+  __ret_545; \
+})
+#else
+#define vmla_laneq_s16(__p0_546, __p1_546, __p2_546, __p3_546) __extension__ ({ \
+  int16x4_t __ret_546; \
+  int16x4_t __s0_546 = __p0_546; \
+  int16x4_t __s1_546 = __p1_546; \
+  int16x8_t __s2_546 = __p2_546; \
+  int16x4_t __rev0_546;  __rev0_546 = __builtin_shufflevector(__s0_546, __s0_546, 3, 2, 1, 0); \
+  int16x4_t __rev1_546;  __rev1_546 = __builtin_shufflevector(__s1_546, __s1_546, 3, 2, 1, 0); \
+  int16x8_t __rev2_546;  __rev2_546 = __builtin_shufflevector(__s2_546, __s2_546, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_546 = __rev0_546 + __rev1_546 * __noswap_splat_laneq_s16(__rev2_546, __p3_546); \
+  __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 3, 2, 1, 0); \
+  __ret_546; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_lane_u32(__p0_547, __p1_547, __p2_547, __p3_547) __extension__ ({ \
+  uint64x2_t __ret_547; \
+  uint64x2_t __s0_547 = __p0_547; \
+  uint32x4_t __s1_547 = __p1_547; \
+  uint32x2_t __s2_547 = __p2_547; \
+  __ret_547 = __s0_547 + vmull_u32(vget_high_u32(__s1_547), splat_lane_u32(__s2_547, __p3_547)); \
+  __ret_547; \
+})
+#else
+#define vmlal_high_lane_u32(__p0_548, __p1_548, __p2_548, __p3_548) __extension__ ({ \
+  uint64x2_t __ret_548; \
+  uint64x2_t __s0_548 = __p0_548; \
+  uint32x4_t __s1_548 = __p1_548; \
+  uint32x2_t __s2_548 = __p2_548; \
+  uint64x2_t __rev0_548;  __rev0_548 = __builtin_shufflevector(__s0_548, __s0_548, 1, 0); \
+  uint32x4_t __rev1_548;  __rev1_548 = __builtin_shufflevector(__s1_548, __s1_548, 3, 2, 1, 0); \
+  uint32x2_t __rev2_548;  __rev2_548 = __builtin_shufflevector(__s2_548, __s2_548, 1, 0); \
+  __ret_548 = __rev0_548 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_548), __noswap_splat_lane_u32(__rev2_548, __p3_548)); \
+  __ret_548 = __builtin_shufflevector(__ret_548, __ret_548, 1, 0); \
+  __ret_548; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_lane_u16(__p0_549, __p1_549, __p2_549, __p3_549) __extension__ ({ \
+  uint32x4_t __ret_549; \
+  uint32x4_t __s0_549 = __p0_549; \
+  uint16x8_t __s1_549 = __p1_549; \
+  uint16x4_t __s2_549 = __p2_549; \
+  __ret_549 = __s0_549 + vmull_u16(vget_high_u16(__s1_549), splat_lane_u16(__s2_549, __p3_549)); \
+  __ret_549; \
+})
+#else
+#define vmlal_high_lane_u16(__p0_550, __p1_550, __p2_550, __p3_550) __extension__ ({ \
+  uint32x4_t __ret_550; \
+  uint32x4_t __s0_550 = __p0_550; \
+  uint16x8_t __s1_550 = __p1_550; \
+  uint16x4_t __s2_550 = __p2_550; \
+  uint32x4_t __rev0_550;  __rev0_550 = __builtin_shufflevector(__s0_550, __s0_550, 3, 2, 1, 0); \
+  uint16x8_t __rev1_550;  __rev1_550 = __builtin_shufflevector(__s1_550, __s1_550, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_550;  __rev2_550 = __builtin_shufflevector(__s2_550, __s2_550, 3, 2, 1, 0); \
+  __ret_550 = __rev0_550 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_550), __noswap_splat_lane_u16(__rev2_550, __p3_550)); \
+  __ret_550 = __builtin_shufflevector(__ret_550, __ret_550, 3, 2, 1, 0); \
+  __ret_550; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_lane_s32(__p0_551, __p1_551, __p2_551, __p3_551) __extension__ ({ \
+  int64x2_t __ret_551; \
+  int64x2_t __s0_551 = __p0_551; \
+  int32x4_t __s1_551 = __p1_551; \
+  int32x2_t __s2_551 = __p2_551; \
+  __ret_551 = __s0_551 + vmull_s32(vget_high_s32(__s1_551), splat_lane_s32(__s2_551, __p3_551)); \
+  __ret_551; \
+})
+#else
+#define vmlal_high_lane_s32(__p0_552, __p1_552, __p2_552, __p3_552) __extension__ ({ \
+  int64x2_t __ret_552; \
+  int64x2_t __s0_552 = __p0_552; \
+  int32x4_t __s1_552 = __p1_552; \
+  int32x2_t __s2_552 = __p2_552; \
+  int64x2_t __rev0_552;  __rev0_552 = __builtin_shufflevector(__s0_552, __s0_552, 1, 0); \
+  int32x4_t __rev1_552;  __rev1_552 = __builtin_shufflevector(__s1_552, __s1_552, 3, 2, 1, 0); \
+  int32x2_t __rev2_552;  __rev2_552 = __builtin_shufflevector(__s2_552, __s2_552, 1, 0); \
+  __ret_552 = __rev0_552 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_552), __noswap_splat_lane_s32(__rev2_552, __p3_552)); \
+  __ret_552 = __builtin_shufflevector(__ret_552, __ret_552, 1, 0); \
+  __ret_552; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_lane_s16(__p0_553, __p1_553, __p2_553, __p3_553) __extension__ ({ \
+  int32x4_t __ret_553; \
+  int32x4_t __s0_553 = __p0_553; \
+  int16x8_t __s1_553 = __p1_553; \
+  int16x4_t __s2_553 = __p2_553; \
+  __ret_553 = __s0_553 + vmull_s16(vget_high_s16(__s1_553), splat_lane_s16(__s2_553, __p3_553)); \
+  __ret_553; \
+})
+#else
+#define vmlal_high_lane_s16(__p0_554, __p1_554, __p2_554, __p3_554) __extension__ ({ \
+  int32x4_t __ret_554; \
+  int32x4_t __s0_554 = __p0_554; \
+  int16x8_t __s1_554 = __p1_554; \
+  int16x4_t __s2_554 = __p2_554; \
+  int32x4_t __rev0_554;  __rev0_554 = __builtin_shufflevector(__s0_554, __s0_554, 3, 2, 1, 0); \
+  int16x8_t __rev1_554;  __rev1_554 = __builtin_shufflevector(__s1_554, __s1_554, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_554;  __rev2_554 = __builtin_shufflevector(__s2_554, __s2_554, 3, 2, 1, 0); \
+  __ret_554 = __rev0_554 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_554), __noswap_splat_lane_s16(__rev2_554, __p3_554)); \
+  __ret_554 = __builtin_shufflevector(__ret_554, __ret_554, 3, 2, 1, 0); \
+  __ret_554; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_u32(__p0_555, __p1_555, __p2_555, __p3_555) __extension__ ({ \
+  uint64x2_t __ret_555; \
+  uint64x2_t __s0_555 = __p0_555; \
+  uint32x4_t __s1_555 = __p1_555; \
+  uint32x4_t __s2_555 = __p2_555; \
+  __ret_555 = __s0_555 + vmull_u32(vget_high_u32(__s1_555), splat_laneq_u32(__s2_555, __p3_555)); \
+  __ret_555; \
+})
+#else
+#define vmlal_high_laneq_u32(__p0_556, __p1_556, __p2_556, __p3_556) __extension__ ({ \
+  uint64x2_t __ret_556; \
+  uint64x2_t __s0_556 = __p0_556; \
+  uint32x4_t __s1_556 = __p1_556; \
+  uint32x4_t __s2_556 = __p2_556; \
+  uint64x2_t __rev0_556;  __rev0_556 = __builtin_shufflevector(__s0_556, __s0_556, 1, 0); \
+  uint32x4_t __rev1_556;  __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 3, 2, 1, 0); \
+  uint32x4_t __rev2_556;  __rev2_556 = __builtin_shufflevector(__s2_556, __s2_556, 3, 2, 1, 0); \
+  __ret_556 = __rev0_556 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_556), __noswap_splat_laneq_u32(__rev2_556, __p3_556)); \
+  __ret_556 = __builtin_shufflevector(__ret_556, __ret_556, 1, 0); \
+  __ret_556; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_u16(__p0_557, __p1_557, __p2_557, __p3_557) __extension__ ({ \
+  uint32x4_t __ret_557; \
+  uint32x4_t __s0_557 = __p0_557; \
+  uint16x8_t __s1_557 = __p1_557; \
+  uint16x8_t __s2_557 = __p2_557; \
+  __ret_557 = __s0_557 + vmull_u16(vget_high_u16(__s1_557), splat_laneq_u16(__s2_557, __p3_557)); \
+  __ret_557; \
+})
+#else
+#define vmlal_high_laneq_u16(__p0_558, __p1_558, __p2_558, __p3_558) __extension__ ({ \
+  uint32x4_t __ret_558; \
+  uint32x4_t __s0_558 = __p0_558; \
+  uint16x8_t __s1_558 = __p1_558; \
+  uint16x8_t __s2_558 = __p2_558; \
+  uint32x4_t __rev0_558;  __rev0_558 = __builtin_shufflevector(__s0_558, __s0_558, 3, 2, 1, 0); \
+  uint16x8_t __rev1_558;  __rev1_558 = __builtin_shufflevector(__s1_558, __s1_558, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_558;  __rev2_558 = __builtin_shufflevector(__s2_558, __s2_558, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_558 = __rev0_558 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_558), __noswap_splat_laneq_u16(__rev2_558, __p3_558)); \
+  __ret_558 = __builtin_shufflevector(__ret_558, __ret_558, 3, 2, 1, 0); \
+  __ret_558; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_s32(__p0_559, __p1_559, __p2_559, __p3_559) __extension__ ({ \
+  int64x2_t __ret_559; \
+  int64x2_t __s0_559 = __p0_559; \
+  int32x4_t __s1_559 = __p1_559; \
+  int32x4_t __s2_559 = __p2_559; \
+  __ret_559 = __s0_559 + vmull_s32(vget_high_s32(__s1_559), splat_laneq_s32(__s2_559, __p3_559)); \
+  __ret_559; \
+})
+#else
+#define vmlal_high_laneq_s32(__p0_560, __p1_560, __p2_560, __p3_560) __extension__ ({ \
+  int64x2_t __ret_560; \
+  int64x2_t __s0_560 = __p0_560; \
+  int32x4_t __s1_560 = __p1_560; \
+  int32x4_t __s2_560 = __p2_560; \
+  int64x2_t __rev0_560;  __rev0_560 = __builtin_shufflevector(__s0_560, __s0_560, 1, 0); \
+  int32x4_t __rev1_560;  __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 3, 2, 1, 0); \
+  int32x4_t __rev2_560;  __rev2_560 = __builtin_shufflevector(__s2_560, __s2_560, 3, 2, 1, 0); \
+  __ret_560 = __rev0_560 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_560), __noswap_splat_laneq_s32(__rev2_560, __p3_560)); \
+  __ret_560 = __builtin_shufflevector(__ret_560, __ret_560, 1, 0); \
+  __ret_560; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_s16(__p0_561, __p1_561, __p2_561, __p3_561) __extension__ ({ \
+  int32x4_t __ret_561; \
+  int32x4_t __s0_561 = __p0_561; \
+  int16x8_t __s1_561 = __p1_561; \
+  int16x8_t __s2_561 = __p2_561; \
+  __ret_561 = __s0_561 + vmull_s16(vget_high_s16(__s1_561), splat_laneq_s16(__s2_561, __p3_561)); \
+  __ret_561; \
+})
+#else
+#define vmlal_high_laneq_s16(__p0_562, __p1_562, __p2_562, __p3_562) __extension__ ({ \
+  int32x4_t __ret_562; \
+  int32x4_t __s0_562 = __p0_562; \
+  int16x8_t __s1_562 = __p1_562; \
+  int16x8_t __s2_562 = __p2_562; \
+  int32x4_t __rev0_562;  __rev0_562 = __builtin_shufflevector(__s0_562, __s0_562, 3, 2, 1, 0); \
+  int16x8_t __rev1_562;  __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_562;  __rev2_562 = __builtin_shufflevector(__s2_562, __s2_562, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_562 = __rev0_562 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_562), __noswap_splat_laneq_s16(__rev2_562, __p3_562)); \
+  __ret_562 = __builtin_shufflevector(__ret_562, __ret_562, 3, 2, 1, 0); \
+  __ret_562; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_u32(__p0_563, __p1_563, __p2_563, __p3_563) __extension__ ({ \
+  uint64x2_t __ret_563; \
+  uint64x2_t __s0_563 = __p0_563; \
+  uint32x2_t __s1_563 = __p1_563; \
+  uint32x4_t __s2_563 = __p2_563; \
+  __ret_563 = __s0_563 + vmull_u32(__s1_563, splat_laneq_u32(__s2_563, __p3_563)); \
+  __ret_563; \
+})
+#else
+#define vmlal_laneq_u32(__p0_564, __p1_564, __p2_564, __p3_564) __extension__ ({ \
+  uint64x2_t __ret_564; \
+  uint64x2_t __s0_564 = __p0_564; \
+  uint32x2_t __s1_564 = __p1_564; \
+  uint32x4_t __s2_564 = __p2_564; \
+  uint64x2_t __rev0_564;  __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 1, 0); \
+  uint32x2_t __rev1_564;  __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 1, 0); \
+  uint32x4_t __rev2_564;  __rev2_564 = __builtin_shufflevector(__s2_564, __s2_564, 3, 2, 1, 0); \
+  __ret_564 = __rev0_564 + __noswap_vmull_u32(__rev1_564, __noswap_splat_laneq_u32(__rev2_564, __p3_564)); \
+  __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 1, 0); \
+  __ret_564; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_u16(__p0_565, __p1_565, __p2_565, __p3_565) __extension__ ({ \
+  uint32x4_t __ret_565; \
+  uint32x4_t __s0_565 = __p0_565; \
+  uint16x4_t __s1_565 = __p1_565; \
+  uint16x8_t __s2_565 = __p2_565; \
+  __ret_565 = __s0_565 + vmull_u16(__s1_565, splat_laneq_u16(__s2_565, __p3_565)); \
+  __ret_565; \
+})
+#else
+#define vmlal_laneq_u16(__p0_566, __p1_566, __p2_566, __p3_566) __extension__ ({ \
+  uint32x4_t __ret_566; \
+  uint32x4_t __s0_566 = __p0_566; \
+  uint16x4_t __s1_566 = __p1_566; \
+  uint16x8_t __s2_566 = __p2_566; \
+  uint32x4_t __rev0_566;  __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 3, 2, 1, 0); \
+  uint16x4_t __rev1_566;  __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 3, 2, 1, 0); \
+  uint16x8_t __rev2_566;  __rev2_566 = __builtin_shufflevector(__s2_566, __s2_566, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_566 = __rev0_566 + __noswap_vmull_u16(__rev1_566, __noswap_splat_laneq_u16(__rev2_566, __p3_566)); \
+  __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 3, 2, 1, 0); \
+  __ret_566; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_s32(__p0_567, __p1_567, __p2_567, __p3_567) __extension__ ({ \
+  int64x2_t __ret_567; \
+  int64x2_t __s0_567 = __p0_567; \
+  int32x2_t __s1_567 = __p1_567; \
+  int32x4_t __s2_567 = __p2_567; \
+  __ret_567 = __s0_567 + vmull_s32(__s1_567, splat_laneq_s32(__s2_567, __p3_567)); \
+  __ret_567; \
+})
+#else
+#define vmlal_laneq_s32(__p0_568, __p1_568, __p2_568, __p3_568) __extension__ ({ \
+  int64x2_t __ret_568; \
+  int64x2_t __s0_568 = __p0_568; \
+  int32x2_t __s1_568 = __p1_568; \
+  int32x4_t __s2_568 = __p2_568; \
+  int64x2_t __rev0_568;  __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 1, 0); \
+  int32x2_t __rev1_568;  __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 1, 0); \
+  int32x4_t __rev2_568;  __rev2_568 = __builtin_shufflevector(__s2_568, __s2_568, 3, 2, 1, 0); \
+  __ret_568 = __rev0_568 + __noswap_vmull_s32(__rev1_568, __noswap_splat_laneq_s32(__rev2_568, __p3_568)); \
+  __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 1, 0); \
+  __ret_568; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_s16(__p0_569, __p1_569, __p2_569, __p3_569) __extension__ ({ \
+  int32x4_t __ret_569; \
+  int32x4_t __s0_569 = __p0_569; \
+  int16x4_t __s1_569 = __p1_569; \
+  int16x8_t __s2_569 = __p2_569; \
+  __ret_569 = __s0_569 + vmull_s16(__s1_569, splat_laneq_s16(__s2_569, __p3_569)); \
+  __ret_569; \
+})
+#else
+#define vmlal_laneq_s16(__p0_570, __p1_570, __p2_570, __p3_570) __extension__ ({ \
+  int32x4_t __ret_570; \
+  int32x4_t __s0_570 = __p0_570; \
+  int16x4_t __s1_570 = __p1_570; \
+  int16x8_t __s2_570 = __p2_570; \
+  int32x4_t __rev0_570;  __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 3, 2, 1, 0); \
+  int16x4_t __rev1_570;  __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 3, 2, 1, 0); \
+  int16x8_t __rev2_570;  __rev2_570 = __builtin_shufflevector(__s2_570, __s2_570, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_570 = __rev0_570 + __noswap_vmull_s16(__rev1_570, __noswap_splat_laneq_s16(__rev2_570, __p3_570)); \
+  __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 3, 2, 1, 0); \
+  __ret_570; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  __ret = __p0 - __p1 * __p2;
+  return __ret;
+}
+#else
+__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __rev0 - __rev1 * __rev2;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+  float64x1_t __ret;
+  __ret = __p0 - __p1 * __p2;
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_laneq_u32(__p0_571, __p1_571, __p2_571, __p3_571) __extension__ ({ \
+  uint32x4_t __ret_571; \
+  uint32x4_t __s0_571 = __p0_571; \
+  uint32x4_t __s1_571 = __p1_571; \
+  uint32x4_t __s2_571 = __p2_571; \
+  __ret_571 = __s0_571 - __s1_571 * splatq_laneq_u32(__s2_571, __p3_571); \
+  __ret_571; \
+})
+#else
+#define vmlsq_laneq_u32(__p0_572, __p1_572, __p2_572, __p3_572) __extension__ ({ \
+  uint32x4_t __ret_572; \
+  uint32x4_t __s0_572 = __p0_572; \
+  uint32x4_t __s1_572 = __p1_572; \
+  uint32x4_t __s2_572 = __p2_572; \
+  uint32x4_t __rev0_572;  __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 3, 2, 1, 0); \
+  uint32x4_t __rev1_572;  __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 3, 2, 1, 0); \
+  uint32x4_t __rev2_572;  __rev2_572 = __builtin_shufflevector(__s2_572, __s2_572, 3, 2, 1, 0); \
+  __ret_572 = __rev0_572 - __rev1_572 * __noswap_splatq_laneq_u32(__rev2_572, __p3_572); \
+  __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 3, 2, 1, 0); \
+  __ret_572; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_laneq_u16(__p0_573, __p1_573, __p2_573, __p3_573) __extension__ ({ \
+  uint16x8_t __ret_573; \
+  uint16x8_t __s0_573 = __p0_573; \
+  uint16x8_t __s1_573 = __p1_573; \
+  uint16x8_t __s2_573 = __p2_573; \
+  __ret_573 = __s0_573 - __s1_573 * splatq_laneq_u16(__s2_573, __p3_573); \
+  __ret_573; \
+})
+#else
+#define vmlsq_laneq_u16(__p0_574, __p1_574, __p2_574, __p3_574) __extension__ ({ \
+  uint16x8_t __ret_574; \
+  uint16x8_t __s0_574 = __p0_574; \
+  uint16x8_t __s1_574 = __p1_574; \
+  uint16x8_t __s2_574 = __p2_574; \
+  uint16x8_t __rev0_574;  __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_574;  __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_574;  __rev2_574 = __builtin_shufflevector(__s2_574, __s2_574, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_574 = __rev0_574 - __rev1_574 * __noswap_splatq_laneq_u16(__rev2_574, __p3_574); \
+  __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_574; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_laneq_f32(__p0_575, __p1_575, __p2_575, __p3_575) __extension__ ({ \
+  float32x4_t __ret_575; \
+  float32x4_t __s0_575 = __p0_575; \
+  float32x4_t __s1_575 = __p1_575; \
+  float32x4_t __s2_575 = __p2_575; \
+  __ret_575 = __s0_575 - __s1_575 * splatq_laneq_f32(__s2_575, __p3_575); \
+  __ret_575; \
+})
+#else
+#define vmlsq_laneq_f32(__p0_576, __p1_576, __p2_576, __p3_576) __extension__ ({ \
+  float32x4_t __ret_576; \
+  float32x4_t __s0_576 = __p0_576; \
+  float32x4_t __s1_576 = __p1_576; \
+  float32x4_t __s2_576 = __p2_576; \
+  float32x4_t __rev0_576;  __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 3, 2, 1, 0); \
+  float32x4_t __rev1_576;  __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 3, 2, 1, 0); \
+  float32x4_t __rev2_576;  __rev2_576 = __builtin_shufflevector(__s2_576, __s2_576, 3, 2, 1, 0); \
+  __ret_576 = __rev0_576 - __rev1_576 * __noswap_splatq_laneq_f32(__rev2_576, __p3_576); \
+  __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 3, 2, 1, 0); \
+  __ret_576; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_laneq_s32(__p0_577, __p1_577, __p2_577, __p3_577) __extension__ ({ \
+  int32x4_t __ret_577; \
+  int32x4_t __s0_577 = __p0_577; \
+  int32x4_t __s1_577 = __p1_577; \
+  int32x4_t __s2_577 = __p2_577; \
+  __ret_577 = __s0_577 - __s1_577 * splatq_laneq_s32(__s2_577, __p3_577); \
+  __ret_577; \
+})
+#else
+#define vmlsq_laneq_s32(__p0_578, __p1_578, __p2_578, __p3_578) __extension__ ({ \
+  int32x4_t __ret_578; \
+  int32x4_t __s0_578 = __p0_578; \
+  int32x4_t __s1_578 = __p1_578; \
+  int32x4_t __s2_578 = __p2_578; \
+  int32x4_t __rev0_578;  __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 3, 2, 1, 0); \
+  int32x4_t __rev1_578;  __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 3, 2, 1, 0); \
+  int32x4_t __rev2_578;  __rev2_578 = __builtin_shufflevector(__s2_578, __s2_578, 3, 2, 1, 0); \
+  __ret_578 = __rev0_578 - __rev1_578 * __noswap_splatq_laneq_s32(__rev2_578, __p3_578); \
+  __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 3, 2, 1, 0); \
+  __ret_578; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_laneq_s16(__p0_579, __p1_579, __p2_579, __p3_579) __extension__ ({ \
+  int16x8_t __ret_579; \
+  int16x8_t __s0_579 = __p0_579; \
+  int16x8_t __s1_579 = __p1_579; \
+  int16x8_t __s2_579 = __p2_579; \
+  __ret_579 = __s0_579 - __s1_579 * splatq_laneq_s16(__s2_579, __p3_579); \
+  __ret_579; \
+})
+#else
+#define vmlsq_laneq_s16(__p0_580, __p1_580, __p2_580, __p3_580) __extension__ ({ \
+  int16x8_t __ret_580; \
+  int16x8_t __s0_580 = __p0_580; \
+  int16x8_t __s1_580 = __p1_580; \
+  int16x8_t __s2_580 = __p2_580; \
+  int16x8_t __rev0_580;  __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_580;  __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_580;  __rev2_580 = __builtin_shufflevector(__s2_580, __s2_580, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_580 = __rev0_580 - __rev1_580 * __noswap_splatq_laneq_s16(__rev2_580, __p3_580); \
+  __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_580; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_laneq_u32(__p0_581, __p1_581, __p2_581, __p3_581) __extension__ ({ \
+  uint32x2_t __ret_581; \
+  uint32x2_t __s0_581 = __p0_581; \
+  uint32x2_t __s1_581 = __p1_581; \
+  uint32x4_t __s2_581 = __p2_581; \
+  __ret_581 = __s0_581 - __s1_581 * splat_laneq_u32(__s2_581, __p3_581); \
+  __ret_581; \
+})
+#else
+#define vmls_laneq_u32(__p0_582, __p1_582, __p2_582, __p3_582) __extension__ ({ \
+  uint32x2_t __ret_582; \
+  uint32x2_t __s0_582 = __p0_582; \
+  uint32x2_t __s1_582 = __p1_582; \
+  uint32x4_t __s2_582 = __p2_582; \
+  uint32x2_t __rev0_582;  __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 1, 0); \
+  uint32x2_t __rev1_582;  __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 1, 0); \
+  uint32x4_t __rev2_582;  __rev2_582 = __builtin_shufflevector(__s2_582, __s2_582, 3, 2, 1, 0); \
+  __ret_582 = __rev0_582 - __rev1_582 * __noswap_splat_laneq_u32(__rev2_582, __p3_582); \
+  __ret_582 = __builtin_shufflevector(__ret_582, __ret_582, 1, 0); \
+  __ret_582; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_laneq_u16(__p0_583, __p1_583, __p2_583, __p3_583) __extension__ ({ \
+  uint16x4_t __ret_583; \
+  uint16x4_t __s0_583 = __p0_583; \
+  uint16x4_t __s1_583 = __p1_583; \
+  uint16x8_t __s2_583 = __p2_583; \
+  __ret_583 = __s0_583 - __s1_583 * splat_laneq_u16(__s2_583, __p3_583); \
+  __ret_583; \
+})
+#else
+#define vmls_laneq_u16(__p0_584, __p1_584, __p2_584, __p3_584) __extension__ ({ \
+  uint16x4_t __ret_584; \
+  uint16x4_t __s0_584 = __p0_584; \
+  uint16x4_t __s1_584 = __p1_584; \
+  uint16x8_t __s2_584 = __p2_584; \
+  uint16x4_t __rev0_584;  __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 3, 2, 1, 0); \
+  uint16x4_t __rev1_584;  __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 3, 2, 1, 0); \
+  uint16x8_t __rev2_584;  __rev2_584 = __builtin_shufflevector(__s2_584, __s2_584, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_584 = __rev0_584 - __rev1_584 * __noswap_splat_laneq_u16(__rev2_584, __p3_584); \
+  __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 3, 2, 1, 0); \
+  __ret_584; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_laneq_f32(__p0_585, __p1_585, __p2_585, __p3_585) __extension__ ({ \
+  float32x2_t __ret_585; \
+  float32x2_t __s0_585 = __p0_585; \
+  float32x2_t __s1_585 = __p1_585; \
+  float32x4_t __s2_585 = __p2_585; \
+  __ret_585 = __s0_585 - __s1_585 * splat_laneq_f32(__s2_585, __p3_585); \
+  __ret_585; \
+})
+#else
+#define vmls_laneq_f32(__p0_586, __p1_586, __p2_586, __p3_586) __extension__ ({ \
+  float32x2_t __ret_586; \
+  float32x2_t __s0_586 = __p0_586; \
+  float32x2_t __s1_586 = __p1_586; \
+  float32x4_t __s2_586 = __p2_586; \
+  float32x2_t __rev0_586;  __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 1, 0); \
+  float32x2_t __rev1_586;  __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 1, 0); \
+  float32x4_t __rev2_586;  __rev2_586 = __builtin_shufflevector(__s2_586, __s2_586, 3, 2, 1, 0); \
+  __ret_586 = __rev0_586 - __rev1_586 * __noswap_splat_laneq_f32(__rev2_586, __p3_586); \
+  __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 1, 0); \
+  __ret_586; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_laneq_s32(__p0_587, __p1_587, __p2_587, __p3_587) __extension__ ({ \
+  int32x2_t __ret_587; \
+  int32x2_t __s0_587 = __p0_587; \
+  int32x2_t __s1_587 = __p1_587; \
+  int32x4_t __s2_587 = __p2_587; \
+  __ret_587 = __s0_587 - __s1_587 * splat_laneq_s32(__s2_587, __p3_587); \
+  __ret_587; \
+})
+#else
+#define vmls_laneq_s32(__p0_588, __p1_588, __p2_588, __p3_588) __extension__ ({ \
+  int32x2_t __ret_588; \
+  int32x2_t __s0_588 = __p0_588; \
+  int32x2_t __s1_588 = __p1_588; \
+  int32x4_t __s2_588 = __p2_588; \
+  int32x2_t __rev0_588;  __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 1, 0); \
+  int32x2_t __rev1_588;  __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 1, 0); \
+  int32x4_t __rev2_588;  __rev2_588 = __builtin_shufflevector(__s2_588, __s2_588, 3, 2, 1, 0); \
+  __ret_588 = __rev0_588 - __rev1_588 * __noswap_splat_laneq_s32(__rev2_588, __p3_588); \
+  __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 1, 0); \
+  __ret_588; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_laneq_s16(__p0_589, __p1_589, __p2_589, __p3_589) __extension__ ({ \
+  int16x4_t __ret_589; \
+  int16x4_t __s0_589 = __p0_589; \
+  int16x4_t __s1_589 = __p1_589; \
+  int16x8_t __s2_589 = __p2_589; \
+  __ret_589 = __s0_589 - __s1_589 * splat_laneq_s16(__s2_589, __p3_589); \
+  __ret_589; \
+})
+#else
+#define vmls_laneq_s16(__p0_590, __p1_590, __p2_590, __p3_590) __extension__ ({ \
+  int16x4_t __ret_590; \
+  int16x4_t __s0_590 = __p0_590; \
+  int16x4_t __s1_590 = __p1_590; \
+  int16x8_t __s2_590 = __p2_590; \
+  int16x4_t __rev0_590;  __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 3, 2, 1, 0); \
+  int16x4_t __rev1_590;  __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 3, 2, 1, 0); \
+  int16x8_t __rev2_590;  __rev2_590 = __builtin_shufflevector(__s2_590, __s2_590, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_590 = __rev0_590 - __rev1_590 * __noswap_splat_laneq_s16(__rev2_590, __p3_590); \
+  __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 3, 2, 1, 0); \
+  __ret_590; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_lane_u32(__p0_591, __p1_591, __p2_591, __p3_591) __extension__ ({ \
+  uint64x2_t __ret_591; \
+  uint64x2_t __s0_591 = __p0_591; \
+  uint32x4_t __s1_591 = __p1_591; \
+  uint32x2_t __s2_591 = __p2_591; \
+  __ret_591 = __s0_591 - vmull_u32(vget_high_u32(__s1_591), splat_lane_u32(__s2_591, __p3_591)); \
+  __ret_591; \
+})
+#else
+#define vmlsl_high_lane_u32(__p0_592, __p1_592, __p2_592, __p3_592) __extension__ ({ \
+  uint64x2_t __ret_592; \
+  uint64x2_t __s0_592 = __p0_592; \
+  uint32x4_t __s1_592 = __p1_592; \
+  uint32x2_t __s2_592 = __p2_592; \
+  uint64x2_t __rev0_592;  __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \
+  uint32x4_t __rev1_592;  __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 3, 2, 1, 0); \
+  uint32x2_t __rev2_592;  __rev2_592 = __builtin_shufflevector(__s2_592, __s2_592, 1, 0); \
+  __ret_592 = __rev0_592 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_592), __noswap_splat_lane_u32(__rev2_592, __p3_592)); \
+  __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \
+  __ret_592; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_lane_u16(__p0_593, __p1_593, __p2_593, __p3_593) __extension__ ({ \
+  uint32x4_t __ret_593; \
+  uint32x4_t __s0_593 = __p0_593; \
+  uint16x8_t __s1_593 = __p1_593; \
+  uint16x4_t __s2_593 = __p2_593; \
+  __ret_593 = __s0_593 - vmull_u16(vget_high_u16(__s1_593), splat_lane_u16(__s2_593, __p3_593)); \
+  __ret_593; \
+})
+#else
+#define vmlsl_high_lane_u16(__p0_594, __p1_594, __p2_594, __p3_594) __extension__ ({ \
+  uint32x4_t __ret_594; \
+  uint32x4_t __s0_594 = __p0_594; \
+  uint16x8_t __s1_594 = __p1_594; \
+  uint16x4_t __s2_594 = __p2_594; \
+  uint32x4_t __rev0_594;  __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \
+  uint16x8_t __rev1_594;  __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_594;  __rev2_594 = __builtin_shufflevector(__s2_594, __s2_594, 3, 2, 1, 0); \
+  __ret_594 = __rev0_594 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_594), __noswap_splat_lane_u16(__rev2_594, __p3_594)); \
+  __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 3, 2, 1, 0); \
+  __ret_594; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_lane_s32(__p0_595, __p1_595, __p2_595, __p3_595) __extension__ ({ \
+  int64x2_t __ret_595; \
+  int64x2_t __s0_595 = __p0_595; \
+  int32x4_t __s1_595 = __p1_595; \
+  int32x2_t __s2_595 = __p2_595; \
+  __ret_595 = __s0_595 - vmull_s32(vget_high_s32(__s1_595), splat_lane_s32(__s2_595, __p3_595)); \
+  __ret_595; \
+})
+#else
+#define vmlsl_high_lane_s32(__p0_596, __p1_596, __p2_596, __p3_596) __extension__ ({ \
+  int64x2_t __ret_596; \
+  int64x2_t __s0_596 = __p0_596; \
+  int32x4_t __s1_596 = __p1_596; \
+  int32x2_t __s2_596 = __p2_596; \
+  int64x2_t __rev0_596;  __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 1, 0); \
+  int32x4_t __rev1_596;  __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 3, 2, 1, 0); \
+  int32x2_t __rev2_596;  __rev2_596 = __builtin_shufflevector(__s2_596, __s2_596, 1, 0); \
+  __ret_596 = __rev0_596 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_596), __noswap_splat_lane_s32(__rev2_596, __p3_596)); \
+  __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 1, 0); \
+  __ret_596; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_lane_s16(__p0_597, __p1_597, __p2_597, __p3_597) __extension__ ({ \
+  int32x4_t __ret_597; \
+  int32x4_t __s0_597 = __p0_597; \
+  int16x8_t __s1_597 = __p1_597; \
+  int16x4_t __s2_597 = __p2_597; \
+  __ret_597 = __s0_597 - vmull_s16(vget_high_s16(__s1_597), splat_lane_s16(__s2_597, __p3_597)); \
+  __ret_597; \
+})
+#else
+#define vmlsl_high_lane_s16(__p0_598, __p1_598, __p2_598, __p3_598) __extension__ ({ \
+  int32x4_t __ret_598; \
+  int32x4_t __s0_598 = __p0_598; \
+  int16x8_t __s1_598 = __p1_598; \
+  int16x4_t __s2_598 = __p2_598; \
+  int32x4_t __rev0_598;  __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 3, 2, 1, 0); \
+  int16x8_t __rev1_598;  __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_598;  __rev2_598 = __builtin_shufflevector(__s2_598, __s2_598, 3, 2, 1, 0); \
+  __ret_598 = __rev0_598 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_598), __noswap_splat_lane_s16(__rev2_598, __p3_598)); \
+  __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 3, 2, 1, 0); \
+  __ret_598; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_u32(__p0_599, __p1_599, __p2_599, __p3_599) __extension__ ({ \
+  uint64x2_t __ret_599; \
+  uint64x2_t __s0_599 = __p0_599; \
+  uint32x4_t __s1_599 = __p1_599; \
+  uint32x4_t __s2_599 = __p2_599; \
+  __ret_599 = __s0_599 - vmull_u32(vget_high_u32(__s1_599), splat_laneq_u32(__s2_599, __p3_599)); \
+  __ret_599; \
+})
+#else
+#define vmlsl_high_laneq_u32(__p0_600, __p1_600, __p2_600, __p3_600) __extension__ ({ \
+  uint64x2_t __ret_600; \
+  uint64x2_t __s0_600 = __p0_600; \
+  uint32x4_t __s1_600 = __p1_600; \
+  uint32x4_t __s2_600 = __p2_600; \
+  uint64x2_t __rev0_600;  __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 1, 0); \
+  uint32x4_t __rev1_600;  __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 3, 2, 1, 0); \
+  uint32x4_t __rev2_600;  __rev2_600 = __builtin_shufflevector(__s2_600, __s2_600, 3, 2, 1, 0); \
+  __ret_600 = __rev0_600 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_600), __noswap_splat_laneq_u32(__rev2_600, __p3_600)); \
+  __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 1, 0); \
+  __ret_600; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_u16(__p0_601, __p1_601, __p2_601, __p3_601) __extension__ ({ \
+  uint32x4_t __ret_601; \
+  uint32x4_t __s0_601 = __p0_601; \
+  uint16x8_t __s1_601 = __p1_601; \
+  uint16x8_t __s2_601 = __p2_601; \
+  __ret_601 = __s0_601 - vmull_u16(vget_high_u16(__s1_601), splat_laneq_u16(__s2_601, __p3_601)); \
+  __ret_601; \
+})
+#else
+#define vmlsl_high_laneq_u16(__p0_602, __p1_602, __p2_602, __p3_602) __extension__ ({ \
+  uint32x4_t __ret_602; \
+  uint32x4_t __s0_602 = __p0_602; \
+  uint16x8_t __s1_602 = __p1_602; \
+  uint16x8_t __s2_602 = __p2_602; \
+  uint32x4_t __rev0_602;  __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 3, 2, 1, 0); \
+  uint16x8_t __rev1_602;  __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_602;  __rev2_602 = __builtin_shufflevector(__s2_602, __s2_602, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_602 = __rev0_602 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_602), __noswap_splat_laneq_u16(__rev2_602, __p3_602)); \
+  __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 3, 2, 1, 0); \
+  __ret_602; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_s32(__p0_603, __p1_603, __p2_603, __p3_603) __extension__ ({ \
+  int64x2_t __ret_603; \
+  int64x2_t __s0_603 = __p0_603; \
+  int32x4_t __s1_603 = __p1_603; \
+  int32x4_t __s2_603 = __p2_603; \
+  __ret_603 = __s0_603 - vmull_s32(vget_high_s32(__s1_603), splat_laneq_s32(__s2_603, __p3_603)); \
+  __ret_603; \
+})
+#else
+#define vmlsl_high_laneq_s32(__p0_604, __p1_604, __p2_604, __p3_604) __extension__ ({ \
+  int64x2_t __ret_604; \
+  int64x2_t __s0_604 = __p0_604; \
+  int32x4_t __s1_604 = __p1_604; \
+  int32x4_t __s2_604 = __p2_604; \
+  int64x2_t __rev0_604;  __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 1, 0); \
+  int32x4_t __rev1_604;  __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 3, 2, 1, 0); \
+  int32x4_t __rev2_604;  __rev2_604 = __builtin_shufflevector(__s2_604, __s2_604, 3, 2, 1, 0); \
+  __ret_604 = __rev0_604 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_604), __noswap_splat_laneq_s32(__rev2_604, __p3_604)); \
+  __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 1, 0); \
+  __ret_604; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_s16(__p0_605, __p1_605, __p2_605, __p3_605) __extension__ ({ \
+  int32x4_t __ret_605; \
+  int32x4_t __s0_605 = __p0_605; \
+  int16x8_t __s1_605 = __p1_605; \
+  int16x8_t __s2_605 = __p2_605; \
+  __ret_605 = __s0_605 - vmull_s16(vget_high_s16(__s1_605), splat_laneq_s16(__s2_605, __p3_605)); \
+  __ret_605; \
+})
+#else
+#define vmlsl_high_laneq_s16(__p0_606, __p1_606, __p2_606, __p3_606) __extension__ ({ \
+  int32x4_t __ret_606; \
+  int32x4_t __s0_606 = __p0_606; \
+  int16x8_t __s1_606 = __p1_606; \
+  int16x8_t __s2_606 = __p2_606; \
+  int32x4_t __rev0_606;  __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 3, 2, 1, 0); \
+  int16x8_t __rev1_606;  __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_606;  __rev2_606 = __builtin_shufflevector(__s2_606, __s2_606, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_606 = __rev0_606 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_606), __noswap_splat_laneq_s16(__rev2_606, __p3_606)); \
+  __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 3, 2, 1, 0); \
+  __ret_606; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_u32(__p0_607, __p1_607, __p2_607, __p3_607) __extension__ ({ \
+  uint64x2_t __ret_607; \
+  uint64x2_t __s0_607 = __p0_607; \
+  uint32x2_t __s1_607 = __p1_607; \
+  uint32x4_t __s2_607 = __p2_607; \
+  __ret_607 = __s0_607 - vmull_u32(__s1_607, splat_laneq_u32(__s2_607, __p3_607)); \
+  __ret_607; \
+})
+#else
+#define vmlsl_laneq_u32(__p0_608, __p1_608, __p2_608, __p3_608) __extension__ ({ \
+  uint64x2_t __ret_608; \
+  uint64x2_t __s0_608 = __p0_608; \
+  uint32x2_t __s1_608 = __p1_608; \
+  uint32x4_t __s2_608 = __p2_608; \
+  uint64x2_t __rev0_608;  __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 1, 0); \
+  uint32x2_t __rev1_608;  __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 1, 0); \
+  uint32x4_t __rev2_608;  __rev2_608 = __builtin_shufflevector(__s2_608, __s2_608, 3, 2, 1, 0); \
+  __ret_608 = __rev0_608 - __noswap_vmull_u32(__rev1_608, __noswap_splat_laneq_u32(__rev2_608, __p3_608)); \
+  __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 1, 0); \
+  __ret_608; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_u16(__p0_609, __p1_609, __p2_609, __p3_609) __extension__ ({ \
+  uint32x4_t __ret_609; \
+  uint32x4_t __s0_609 = __p0_609; \
+  uint16x4_t __s1_609 = __p1_609; \
+  uint16x8_t __s2_609 = __p2_609; \
+  __ret_609 = __s0_609 - vmull_u16(__s1_609, splat_laneq_u16(__s2_609, __p3_609)); \
+  __ret_609; \
+})
+#else
+#define vmlsl_laneq_u16(__p0_610, __p1_610, __p2_610, __p3_610) __extension__ ({ \
+  uint32x4_t __ret_610; \
+  uint32x4_t __s0_610 = __p0_610; \
+  uint16x4_t __s1_610 = __p1_610; \
+  uint16x8_t __s2_610 = __p2_610; \
+  uint32x4_t __rev0_610;  __rev0_610 = __builtin_shufflevector(__s0_610, __s0_610, 3, 2, 1, 0); \
+  uint16x4_t __rev1_610;  __rev1_610 = __builtin_shufflevector(__s1_610, __s1_610, 3, 2, 1, 0); \
+  uint16x8_t __rev2_610;  __rev2_610 = __builtin_shufflevector(__s2_610, __s2_610, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_610 = __rev0_610 - __noswap_vmull_u16(__rev1_610, __noswap_splat_laneq_u16(__rev2_610, __p3_610)); \
+  __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0); \
+  __ret_610; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_s32(__p0_611, __p1_611, __p2_611, __p3_611) __extension__ ({ \
+  int64x2_t __ret_611; \
+  int64x2_t __s0_611 = __p0_611; \
+  int32x2_t __s1_611 = __p1_611; \
+  int32x4_t __s2_611 = __p2_611; \
+  __ret_611 = __s0_611 - vmull_s32(__s1_611, splat_laneq_s32(__s2_611, __p3_611)); \
+  __ret_611; \
+})
+#else
+#define vmlsl_laneq_s32(__p0_612, __p1_612, __p2_612, __p3_612) __extension__ ({ \
+  int64x2_t __ret_612; \
+  int64x2_t __s0_612 = __p0_612; \
+  int32x2_t __s1_612 = __p1_612; \
+  int32x4_t __s2_612 = __p2_612; \
+  int64x2_t __rev0_612;  __rev0_612 = __builtin_shufflevector(__s0_612, __s0_612, 1, 0); \
+  int32x2_t __rev1_612;  __rev1_612 = __builtin_shufflevector(__s1_612, __s1_612, 1, 0); \
+  int32x4_t __rev2_612;  __rev2_612 = __builtin_shufflevector(__s2_612, __s2_612, 3, 2, 1, 0); \
+  __ret_612 = __rev0_612 - __noswap_vmull_s32(__rev1_612, __noswap_splat_laneq_s32(__rev2_612, __p3_612)); \
+  __ret_612 = __builtin_shufflevector(__ret_612, __ret_612, 1, 0); \
+  __ret_612; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_s16(__p0_613, __p1_613, __p2_613, __p3_613) __extension__ ({ \
+  int32x4_t __ret_613; \
+  int32x4_t __s0_613 = __p0_613; \
+  int16x4_t __s1_613 = __p1_613; \
+  int16x8_t __s2_613 = __p2_613; \
+  __ret_613 = __s0_613 - vmull_s16(__s1_613, splat_laneq_s16(__s2_613, __p3_613)); \
+  __ret_613; \
+})
+#else
+#define vmlsl_laneq_s16(__p0_614, __p1_614, __p2_614, __p3_614) __extension__ ({ \
+  int32x4_t __ret_614; \
+  int32x4_t __s0_614 = __p0_614; \
+  int16x4_t __s1_614 = __p1_614; \
+  int16x8_t __s2_614 = __p2_614; \
+  int32x4_t __rev0_614;  __rev0_614 = __builtin_shufflevector(__s0_614, __s0_614, 3, 2, 1, 0); \
+  int16x4_t __rev1_614;  __rev1_614 = __builtin_shufflevector(__s1_614, __s1_614, 3, 2, 1, 0); \
+  int16x8_t __rev2_614;  __rev2_614 = __builtin_shufflevector(__s2_614, __s2_614, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_614 = __rev0_614 - __noswap_vmull_s16(__rev1_614, __noswap_splat_laneq_s16(__rev2_614, __p3_614)); \
+  __ret_614 = __builtin_shufflevector(__ret_614, __ret_614, 3, 2, 1, 0); \
+  __ret_614; \
+})
+#endif
+
+__ai poly64x1_t vmov_n_p64(poly64_t __p0) {
+  poly64x1_t __ret;
+  __ret = (poly64x1_t) {__p0};
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t) {__p0, __p0};
+  return __ret;
+}
+#else
+__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t) {__p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmovq_n_f64(float64_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) {__p0, __p0};
+  return __ret;
+}
+#else
+__ai float64x2_t vmovq_n_f64(float64_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) {__p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmov_n_f64(float64_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) {__p0};
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_615) {
+  uint16x8_t __ret_615;
+  uint8x8_t __a1_615 = vget_high_u8(__p0_615);
+  __ret_615 = (uint16x8_t)(vshll_n_u8(__a1_615, 0));
+  return __ret_615;
+}
+#else
+__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_616) {
+  uint16x8_t __ret_616;
+  uint8x16_t __rev0_616;  __rev0_616 = __builtin_shufflevector(__p0_616, __p0_616, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __a1_616 = __noswap_vget_high_u8(__rev0_616);
+  __ret_616 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_616, 0));
+  __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret_616;
+}
+__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_617) {
+  uint16x8_t __ret_617;
+  uint8x8_t __a1_617 = __noswap_vget_high_u8(__p0_617);
+  __ret_617 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_617, 0));
+  return __ret_617;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_618) {
+  uint64x2_t __ret_618;
+  uint32x2_t __a1_618 = vget_high_u32(__p0_618);
+  __ret_618 = (uint64x2_t)(vshll_n_u32(__a1_618, 0));
+  return __ret_618;
+}
+#else
+__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_619) {
+  uint64x2_t __ret_619;
+  uint32x4_t __rev0_619;  __rev0_619 = __builtin_shufflevector(__p0_619, __p0_619, 3, 2, 1, 0);
+  uint32x2_t __a1_619 = __noswap_vget_high_u32(__rev0_619);
+  __ret_619 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_619, 0));
+  __ret_619 = __builtin_shufflevector(__ret_619, __ret_619, 1, 0);
+  return __ret_619;
+}
+__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_620) {
+  uint64x2_t __ret_620;
+  uint32x2_t __a1_620 = __noswap_vget_high_u32(__p0_620);
+  __ret_620 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_620, 0));
+  return __ret_620;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_621) {
+  uint32x4_t __ret_621;
+  uint16x4_t __a1_621 = vget_high_u16(__p0_621);
+  __ret_621 = (uint32x4_t)(vshll_n_u16(__a1_621, 0));
+  return __ret_621;
+}
+#else
+__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_622) {
+  uint32x4_t __ret_622;
+  uint16x8_t __rev0_622;  __rev0_622 = __builtin_shufflevector(__p0_622, __p0_622, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x4_t __a1_622 = __noswap_vget_high_u16(__rev0_622);
+  __ret_622 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_622, 0));
+  __ret_622 = __builtin_shufflevector(__ret_622, __ret_622, 3, 2, 1, 0);
+  return __ret_622;
+}
+__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_623) {
+  uint32x4_t __ret_623;
+  uint16x4_t __a1_623 = __noswap_vget_high_u16(__p0_623);
+  __ret_623 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_623, 0));
+  return __ret_623;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmovl_high_s8(int8x16_t __p0_624) {
+  int16x8_t __ret_624;
+  int8x8_t __a1_624 = vget_high_s8(__p0_624);
+  __ret_624 = (int16x8_t)(vshll_n_s8(__a1_624, 0));
+  return __ret_624;
+}
+#else
+__ai int16x8_t vmovl_high_s8(int8x16_t __p0_625) {
+  int16x8_t __ret_625;
+  int8x16_t __rev0_625;  __rev0_625 = __builtin_shufflevector(__p0_625, __p0_625, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x8_t __a1_625 = __noswap_vget_high_s8(__rev0_625);
+  __ret_625 = (int16x8_t)(__noswap_vshll_n_s8(__a1_625, 0));
+  __ret_625 = __builtin_shufflevector(__ret_625, __ret_625, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret_625;
+}
+__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_626) {
+  int16x8_t __ret_626;
+  int8x8_t __a1_626 = __noswap_vget_high_s8(__p0_626);
+  __ret_626 = (int16x8_t)(__noswap_vshll_n_s8(__a1_626, 0));
+  return __ret_626;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmovl_high_s32(int32x4_t __p0_627) {
+  int64x2_t __ret_627;
+  int32x2_t __a1_627 = vget_high_s32(__p0_627);
+  __ret_627 = (int64x2_t)(vshll_n_s32(__a1_627, 0));
+  return __ret_627;
+}
+#else
+__ai int64x2_t vmovl_high_s32(int32x4_t __p0_628) {
+  int64x2_t __ret_628;
+  int32x4_t __rev0_628;  __rev0_628 = __builtin_shufflevector(__p0_628, __p0_628, 3, 2, 1, 0);
+  int32x2_t __a1_628 = __noswap_vget_high_s32(__rev0_628);
+  __ret_628 = (int64x2_t)(__noswap_vshll_n_s32(__a1_628, 0));
+  __ret_628 = __builtin_shufflevector(__ret_628, __ret_628, 1, 0);
+  return __ret_628;
+}
+__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_629) {
+  int64x2_t __ret_629;
+  int32x2_t __a1_629 = __noswap_vget_high_s32(__p0_629);
+  __ret_629 = (int64x2_t)(__noswap_vshll_n_s32(__a1_629, 0));
+  return __ret_629;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmovl_high_s16(int16x8_t __p0_630) {
+  int32x4_t __ret_630;
+  int16x4_t __a1_630 = vget_high_s16(__p0_630);
+  __ret_630 = (int32x4_t)(vshll_n_s16(__a1_630, 0));
+  return __ret_630;
+}
+#else
+__ai int32x4_t vmovl_high_s16(int16x8_t __p0_631) {
+  int32x4_t __ret_631;
+  int16x8_t __rev0_631;  __rev0_631 = __builtin_shufflevector(__p0_631, __p0_631, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x4_t __a1_631 = __noswap_vget_high_s16(__rev0_631);
+  __ret_631 = (int32x4_t)(__noswap_vshll_n_s16(__a1_631, 0));
+  __ret_631 = __builtin_shufflevector(__ret_631, __ret_631, 3, 2, 1, 0);
+  return __ret_631;
+}
+__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_632) {
+  int32x4_t __ret_632;
+  int16x4_t __a1_632 = __noswap_vget_high_s16(__p0_632);
+  __ret_632 = (int32x4_t)(__noswap_vshll_n_s16(__a1_632, 0));
+  return __ret_632;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
+  uint16x8_t __ret;
+  __ret = vcombine_u16(__p0, vmovn_u32(__p1));
+  return __ret;
+}
+#else
+__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
+  uint16x8_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
+  uint32x4_t __ret;
+  __ret = vcombine_u32(__p0, vmovn_u64(__p1));
+  return __ret;
+}
+#else
+__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
+  uint32x4_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
+  uint8x16_t __ret;
+  __ret = vcombine_u8(__p0, vmovn_u16(__p1));
+  return __ret;
+}
+#else
+__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
+  uint8x16_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
+  int16x8_t __ret;
+  __ret = vcombine_s16(__p0, vmovn_s32(__p1));
+  return __ret;
+}
+#else
+__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
+  int16x8_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
+  int32x4_t __ret;
+  __ret = vcombine_s32(__p0, vmovn_s64(__p1));
+  return __ret;
+}
+#else
+__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
+  int32x4_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
+  int8x16_t __ret;
+  __ret = vcombine_s8(__p0, vmovn_s16(__p1));
+  return __ret;
+}
+#else
+__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
+  int8x16_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = __p0 * __p1;
+  return __ret;
+}
+#else
+__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __rev0 * __rev1;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = __p0 * __p1;
+  return __ret;
+}
+#define vmuld_lane_f64(__p0_633, __p1_633, __p2_633) __extension__ ({ \
+  float64_t __ret_633; \
+  float64_t __s0_633 = __p0_633; \
+  float64x1_t __s1_633 = __p1_633; \
+  __ret_633 = __s0_633 * vget_lane_f64(__s1_633, __p2_633); \
+  __ret_633; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vmuls_lane_f32(__p0_634, __p1_634, __p2_634) __extension__ ({ \
+  float32_t __ret_634; \
+  float32_t __s0_634 = __p0_634; \
+  float32x2_t __s1_634 = __p1_634; \
+  __ret_634 = __s0_634 * vget_lane_f32(__s1_634, __p2_634); \
+  __ret_634; \
+})
+#else
+#define vmuls_lane_f32(__p0_635, __p1_635, __p2_635) __extension__ ({ \
+  float32_t __ret_635; \
+  float32_t __s0_635 = __p0_635; \
+  float32x2_t __s1_635 = __p1_635; \
+  float32x2_t __rev1_635;  __rev1_635 = __builtin_shufflevector(__s1_635, __s1_635, 1, 0); \
+  __ret_635 = __s0_635 * __noswap_vget_lane_f32(__rev1_635, __p2_635); \
+  __ret_635; \
+})
+#endif
+
+#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x1_t __s1 = __p1; \
+  __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_lane_f64(__p0_636, __p1_636, __p2_636) __extension__ ({ \
+  float64x2_t __ret_636; \
+  float64x2_t __s0_636 = __p0_636; \
+  float64x1_t __s1_636 = __p1_636; \
+  __ret_636 = __s0_636 * splatq_lane_f64(__s1_636, __p2_636); \
+  __ret_636; \
+})
+#else
+#define vmulq_lane_f64(__p0_637, __p1_637, __p2_637) __extension__ ({ \
+  float64x2_t __ret_637; \
+  float64x2_t __s0_637 = __p0_637; \
+  float64x1_t __s1_637 = __p1_637; \
+  float64x2_t __rev0_637;  __rev0_637 = __builtin_shufflevector(__s0_637, __s0_637, 1, 0); \
+  __ret_637 = __rev0_637 * __noswap_splatq_lane_f64(__s1_637, __p2_637); \
+  __ret_637 = __builtin_shufflevector(__ret_637, __ret_637, 1, 0); \
+  __ret_637; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmuld_laneq_f64(__p0_638, __p1_638, __p2_638) __extension__ ({ \
+  float64_t __ret_638; \
+  float64_t __s0_638 = __p0_638; \
+  float64x2_t __s1_638 = __p1_638; \
+  __ret_638 = __s0_638 * vgetq_lane_f64(__s1_638, __p2_638); \
+  __ret_638; \
+})
+#else
+#define vmuld_laneq_f64(__p0_639, __p1_639, __p2_639) __extension__ ({ \
+  float64_t __ret_639; \
+  float64_t __s0_639 = __p0_639; \
+  float64x2_t __s1_639 = __p1_639; \
+  float64x2_t __rev1_639;  __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 1, 0); \
+  __ret_639 = __s0_639 * __noswap_vgetq_lane_f64(__rev1_639, __p2_639); \
+  __ret_639; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmuls_laneq_f32(__p0_640, __p1_640, __p2_640) __extension__ ({ \
+  float32_t __ret_640; \
+  float32_t __s0_640 = __p0_640; \
+  float32x4_t __s1_640 = __p1_640; \
+  __ret_640 = __s0_640 * vgetq_lane_f32(__s1_640, __p2_640); \
+  __ret_640; \
+})
+#else
+#define vmuls_laneq_f32(__p0_641, __p1_641, __p2_641) __extension__ ({ \
+  float32_t __ret_641; \
+  float32_t __s0_641 = __p0_641; \
+  float32x4_t __s1_641 = __p1_641; \
+  float32x4_t __rev1_641;  __rev1_641 = __builtin_shufflevector(__s1_641, __s1_641, 3, 2, 1, 0); \
+  __ret_641 = __s0_641 * __noswap_vgetq_lane_f32(__rev1_641, __p2_641); \
+  __ret_641; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \
+  __ret; \
+})
+#else
+#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1_t __ret; \
+  float64x1_t __s0 = __p0; \
+  float64x2_t __s1 = __p1; \
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_u32(__p0_642, __p1_642, __p2_642) __extension__ ({ \
+  uint32x4_t __ret_642; \
+  uint32x4_t __s0_642 = __p0_642; \
+  uint32x4_t __s1_642 = __p1_642; \
+  __ret_642 = __s0_642 * splatq_laneq_u32(__s1_642, __p2_642); \
+  __ret_642; \
+})
+#else
+#define vmulq_laneq_u32(__p0_643, __p1_643, __p2_643) __extension__ ({ \
+  uint32x4_t __ret_643; \
+  uint32x4_t __s0_643 = __p0_643; \
+  uint32x4_t __s1_643 = __p1_643; \
+  uint32x4_t __rev0_643;  __rev0_643 = __builtin_shufflevector(__s0_643, __s0_643, 3, 2, 1, 0); \
+  uint32x4_t __rev1_643;  __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 3, 2, 1, 0); \
+  __ret_643 = __rev0_643 * __noswap_splatq_laneq_u32(__rev1_643, __p2_643); \
+  __ret_643 = __builtin_shufflevector(__ret_643, __ret_643, 3, 2, 1, 0); \
+  __ret_643; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_u16(__p0_644, __p1_644, __p2_644) __extension__ ({ \
+  uint16x8_t __ret_644; \
+  uint16x8_t __s0_644 = __p0_644; \
+  uint16x8_t __s1_644 = __p1_644; \
+  __ret_644 = __s0_644 * splatq_laneq_u16(__s1_644, __p2_644); \
+  __ret_644; \
+})
+#else
+#define vmulq_laneq_u16(__p0_645, __p1_645, __p2_645) __extension__ ({ \
+  uint16x8_t __ret_645; \
+  uint16x8_t __s0_645 = __p0_645; \
+  uint16x8_t __s1_645 = __p1_645; \
+  uint16x8_t __rev0_645;  __rev0_645 = __builtin_shufflevector(__s0_645, __s0_645, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_645;  __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_645 = __rev0_645 * __noswap_splatq_laneq_u16(__rev1_645, __p2_645); \
+  __ret_645 = __builtin_shufflevector(__ret_645, __ret_645, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_645; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_f64(__p0_646, __p1_646, __p2_646) __extension__ ({ \
+  float64x2_t __ret_646; \
+  float64x2_t __s0_646 = __p0_646; \
+  float64x2_t __s1_646 = __p1_646; \
+  __ret_646 = __s0_646 * splatq_laneq_f64(__s1_646, __p2_646); \
+  __ret_646; \
+})
+#else
+#define vmulq_laneq_f64(__p0_647, __p1_647, __p2_647) __extension__ ({ \
+  float64x2_t __ret_647; \
+  float64x2_t __s0_647 = __p0_647; \
+  float64x2_t __s1_647 = __p1_647; \
+  float64x2_t __rev0_647;  __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 1, 0); \
+  float64x2_t __rev1_647;  __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 1, 0); \
+  __ret_647 = __rev0_647 * __noswap_splatq_laneq_f64(__rev1_647, __p2_647); \
+  __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 1, 0); \
+  __ret_647; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_f32(__p0_648, __p1_648, __p2_648) __extension__ ({ \
+  float32x4_t __ret_648; \
+  float32x4_t __s0_648 = __p0_648; \
+  float32x4_t __s1_648 = __p1_648; \
+  __ret_648 = __s0_648 * splatq_laneq_f32(__s1_648, __p2_648); \
+  __ret_648; \
+})
+#else
+#define vmulq_laneq_f32(__p0_649, __p1_649, __p2_649) __extension__ ({ \
+  float32x4_t __ret_649; \
+  float32x4_t __s0_649 = __p0_649; \
+  float32x4_t __s1_649 = __p1_649; \
+  float32x4_t __rev0_649;  __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 3, 2, 1, 0); \
+  float32x4_t __rev1_649;  __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 3, 2, 1, 0); \
+  __ret_649 = __rev0_649 * __noswap_splatq_laneq_f32(__rev1_649, __p2_649); \
+  __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 3, 2, 1, 0); \
+  __ret_649; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_s32(__p0_650, __p1_650, __p2_650) __extension__ ({ \
+  int32x4_t __ret_650; \
+  int32x4_t __s0_650 = __p0_650; \
+  int32x4_t __s1_650 = __p1_650; \
+  __ret_650 = __s0_650 * splatq_laneq_s32(__s1_650, __p2_650); \
+  __ret_650; \
+})
+#else
+#define vmulq_laneq_s32(__p0_651, __p1_651, __p2_651) __extension__ ({ \
+  int32x4_t __ret_651; \
+  int32x4_t __s0_651 = __p0_651; \
+  int32x4_t __s1_651 = __p1_651; \
+  int32x4_t __rev0_651;  __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 3, 2, 1, 0); \
+  int32x4_t __rev1_651;  __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 3, 2, 1, 0); \
+  __ret_651 = __rev0_651 * __noswap_splatq_laneq_s32(__rev1_651, __p2_651); \
+  __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 3, 2, 1, 0); \
+  __ret_651; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_s16(__p0_652, __p1_652, __p2_652) __extension__ ({ \
+  int16x8_t __ret_652; \
+  int16x8_t __s0_652 = __p0_652; \
+  int16x8_t __s1_652 = __p1_652; \
+  __ret_652 = __s0_652 * splatq_laneq_s16(__s1_652, __p2_652); \
+  __ret_652; \
+})
+#else
+#define vmulq_laneq_s16(__p0_653, __p1_653, __p2_653) __extension__ ({ \
+  int16x8_t __ret_653; \
+  int16x8_t __s0_653 = __p0_653; \
+  int16x8_t __s1_653 = __p1_653; \
+  int16x8_t __rev0_653;  __rev0_653 = __builtin_shufflevector(__s0_653, __s0_653, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_653;  __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_653 = __rev0_653 * __noswap_splatq_laneq_s16(__rev1_653, __p2_653); \
+  __ret_653 = __builtin_shufflevector(__ret_653, __ret_653, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_653; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_u32(__p0_654, __p1_654, __p2_654) __extension__ ({ \
+  uint32x2_t __ret_654; \
+  uint32x2_t __s0_654 = __p0_654; \
+  uint32x4_t __s1_654 = __p1_654; \
+  __ret_654 = __s0_654 * splat_laneq_u32(__s1_654, __p2_654); \
+  __ret_654; \
+})
+#else
+#define vmul_laneq_u32(__p0_655, __p1_655, __p2_655) __extension__ ({ \
+  uint32x2_t __ret_655; \
+  uint32x2_t __s0_655 = __p0_655; \
+  uint32x4_t __s1_655 = __p1_655; \
+  uint32x2_t __rev0_655;  __rev0_655 = __builtin_shufflevector(__s0_655, __s0_655, 1, 0); \
+  uint32x4_t __rev1_655;  __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \
+  __ret_655 = __rev0_655 * __noswap_splat_laneq_u32(__rev1_655, __p2_655); \
+  __ret_655 = __builtin_shufflevector(__ret_655, __ret_655, 1, 0); \
+  __ret_655; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_u16(__p0_656, __p1_656, __p2_656) __extension__ ({ \
+  uint16x4_t __ret_656; \
+  uint16x4_t __s0_656 = __p0_656; \
+  uint16x8_t __s1_656 = __p1_656; \
+  __ret_656 = __s0_656 * splat_laneq_u16(__s1_656, __p2_656); \
+  __ret_656; \
+})
+#else
+#define vmul_laneq_u16(__p0_657, __p1_657, __p2_657) __extension__ ({ \
+  uint16x4_t __ret_657; \
+  uint16x4_t __s0_657 = __p0_657; \
+  uint16x8_t __s1_657 = __p1_657; \
+  uint16x4_t __rev0_657;  __rev0_657 = __builtin_shufflevector(__s0_657, __s0_657, 3, 2, 1, 0); \
+  uint16x8_t __rev1_657;  __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_657 = __rev0_657 * __noswap_splat_laneq_u16(__rev1_657, __p2_657); \
+  __ret_657 = __builtin_shufflevector(__ret_657, __ret_657, 3, 2, 1, 0); \
+  __ret_657; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_f32(__p0_658, __p1_658, __p2_658) __extension__ ({ \
+  float32x2_t __ret_658; \
+  float32x2_t __s0_658 = __p0_658; \
+  float32x4_t __s1_658 = __p1_658; \
+  __ret_658 = __s0_658 * splat_laneq_f32(__s1_658, __p2_658); \
+  __ret_658; \
+})
+#else
+#define vmul_laneq_f32(__p0_659, __p1_659, __p2_659) __extension__ ({ \
+  float32x2_t __ret_659; \
+  float32x2_t __s0_659 = __p0_659; \
+  float32x4_t __s1_659 = __p1_659; \
+  float32x2_t __rev0_659;  __rev0_659 = __builtin_shufflevector(__s0_659, __s0_659, 1, 0); \
+  float32x4_t __rev1_659;  __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 3, 2, 1, 0); \
+  __ret_659 = __rev0_659 * __noswap_splat_laneq_f32(__rev1_659, __p2_659); \
+  __ret_659 = __builtin_shufflevector(__ret_659, __ret_659, 1, 0); \
+  __ret_659; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_s32(__p0_660, __p1_660, __p2_660) __extension__ ({ \
+  int32x2_t __ret_660; \
+  int32x2_t __s0_660 = __p0_660; \
+  int32x4_t __s1_660 = __p1_660; \
+  __ret_660 = __s0_660 * splat_laneq_s32(__s1_660, __p2_660); \
+  __ret_660; \
+})
+#else
+#define vmul_laneq_s32(__p0_661, __p1_661, __p2_661) __extension__ ({ \
+  int32x2_t __ret_661; \
+  int32x2_t __s0_661 = __p0_661; \
+  int32x4_t __s1_661 = __p1_661; \
+  int32x2_t __rev0_661;  __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 1, 0); \
+  int32x4_t __rev1_661;  __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 3, 2, 1, 0); \
+  __ret_661 = __rev0_661 * __noswap_splat_laneq_s32(__rev1_661, __p2_661); \
+  __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 1, 0); \
+  __ret_661; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_s16(__p0_662, __p1_662, __p2_662) __extension__ ({ \
+  int16x4_t __ret_662; \
+  int16x4_t __s0_662 = __p0_662; \
+  int16x8_t __s1_662 = __p1_662; \
+  __ret_662 = __s0_662 * splat_laneq_s16(__s1_662, __p2_662); \
+  __ret_662; \
+})
+#else
+#define vmul_laneq_s16(__p0_663, __p1_663, __p2_663) __extension__ ({ \
+  int16x4_t __ret_663; \
+  int16x4_t __s0_663 = __p0_663; \
+  int16x8_t __s1_663 = __p1_663; \
+  int16x4_t __rev0_663;  __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 3, 2, 1, 0); \
+  int16x8_t __rev1_663;  __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_663 = __rev0_663 * __noswap_splat_laneq_s16(__rev1_663, __p2_663); \
+  __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 3, 2, 1, 0); \
+  __ret_663; \
+})
+#endif
+
+__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
+  float64x2_t __ret;
+  __ret = __p0 * (float64x2_t) {__p1, __p1};
+  return __ret;
+}
+#else
+__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = __rev0 * (float64x2_t) {__p1, __p1};
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
+  poly128_t __ret;
+  __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly16x8_t __ret;
+  __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1));
+  return __ret;
+}
+#else
+__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly16x8_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
+  __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1));
+  return __ret;
+}
+#else
+__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
+  __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1));
+  return __ret;
+}
+#else
+__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
+  __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1));
+  return __ret;
+}
+#else
+__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
+  __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1));
+  return __ret;
+}
+#else
+__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
+  __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
+  return __ret;
+}
+#else
+__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
+  __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
+  return __ret;
+}
+#else
+__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly128_t __ret;
+  __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1)));
+  return __ret;
+}
+#else
+__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly128_t __ret;
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1)));
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_lane_u32(__p0_664, __p1_664, __p2_664) __extension__ ({ \
+  uint64x2_t __ret_664; \
+  uint32x4_t __s0_664 = __p0_664; \
+  uint32x2_t __s1_664 = __p1_664; \
+  __ret_664 = vmull_u32(vget_high_u32(__s0_664), splat_lane_u32(__s1_664, __p2_664)); \
+  __ret_664; \
+})
+#else
+#define vmull_high_lane_u32(__p0_665, __p1_665, __p2_665) __extension__ ({ \
+  uint64x2_t __ret_665; \
+  uint32x4_t __s0_665 = __p0_665; \
+  uint32x2_t __s1_665 = __p1_665; \
+  uint32x4_t __rev0_665;  __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 3, 2, 1, 0); \
+  uint32x2_t __rev1_665;  __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 1, 0); \
+  __ret_665 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_665), __noswap_splat_lane_u32(__rev1_665, __p2_665)); \
+  __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \
+  __ret_665; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_lane_u16(__p0_666, __p1_666, __p2_666) __extension__ ({ \
+  uint32x4_t __ret_666; \
+  uint16x8_t __s0_666 = __p0_666; \
+  uint16x4_t __s1_666 = __p1_666; \
+  __ret_666 = vmull_u16(vget_high_u16(__s0_666), splat_lane_u16(__s1_666, __p2_666)); \
+  __ret_666; \
+})
+#else
+#define vmull_high_lane_u16(__p0_667, __p1_667, __p2_667) __extension__ ({ \
+  uint32x4_t __ret_667; \
+  uint16x8_t __s0_667 = __p0_667; \
+  uint16x4_t __s1_667 = __p1_667; \
+  uint16x8_t __rev0_667;  __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev1_667;  __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 3, 2, 1, 0); \
+  __ret_667 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_667), __noswap_splat_lane_u16(__rev1_667, __p2_667)); \
+  __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \
+  __ret_667; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_lane_s32(__p0_668, __p1_668, __p2_668) __extension__ ({ \
+  int64x2_t __ret_668; \
+  int32x4_t __s0_668 = __p0_668; \
+  int32x2_t __s1_668 = __p1_668; \
+  __ret_668 = vmull_s32(vget_high_s32(__s0_668), splat_lane_s32(__s1_668, __p2_668)); \
+  __ret_668; \
+})
+#else
+#define vmull_high_lane_s32(__p0_669, __p1_669, __p2_669) __extension__ ({ \
+  int64x2_t __ret_669; \
+  int32x4_t __s0_669 = __p0_669; \
+  int32x2_t __s1_669 = __p1_669; \
+  int32x4_t __rev0_669;  __rev0_669 = __builtin_shufflevector(__s0_669, __s0_669, 3, 2, 1, 0); \
+  int32x2_t __rev1_669;  __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 1, 0); \
+  __ret_669 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_669), __noswap_splat_lane_s32(__rev1_669, __p2_669)); \
+  __ret_669 = __builtin_shufflevector(__ret_669, __ret_669, 1, 0); \
+  __ret_669; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_lane_s16(__p0_670, __p1_670, __p2_670) __extension__ ({ \
+  int32x4_t __ret_670; \
+  int16x8_t __s0_670 = __p0_670; \
+  int16x4_t __s1_670 = __p1_670; \
+  __ret_670 = vmull_s16(vget_high_s16(__s0_670), splat_lane_s16(__s1_670, __p2_670)); \
+  __ret_670; \
+})
+#else
+#define vmull_high_lane_s16(__p0_671, __p1_671, __p2_671) __extension__ ({ \
+  int32x4_t __ret_671; \
+  int16x8_t __s0_671 = __p0_671; \
+  int16x4_t __s1_671 = __p1_671; \
+  int16x8_t __rev0_671;  __rev0_671 = __builtin_shufflevector(__s0_671, __s0_671, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_671;  __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 3, 2, 1, 0); \
+  __ret_671 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_671), __noswap_splat_lane_s16(__rev1_671, __p2_671)); \
+  __ret_671 = __builtin_shufflevector(__ret_671, __ret_671, 3, 2, 1, 0); \
+  __ret_671; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_laneq_u32(__p0_672, __p1_672, __p2_672) __extension__ ({ \
+  uint64x2_t __ret_672; \
+  uint32x4_t __s0_672 = __p0_672; \
+  uint32x4_t __s1_672 = __p1_672; \
+  __ret_672 = vmull_u32(vget_high_u32(__s0_672), splat_laneq_u32(__s1_672, __p2_672)); \
+  __ret_672; \
+})
+#else
+#define vmull_high_laneq_u32(__p0_673, __p1_673, __p2_673) __extension__ ({ \
+  uint64x2_t __ret_673; \
+  uint32x4_t __s0_673 = __p0_673; \
+  uint32x4_t __s1_673 = __p1_673; \
+  uint32x4_t __rev0_673;  __rev0_673 = __builtin_shufflevector(__s0_673, __s0_673, 3, 2, 1, 0); \
+  uint32x4_t __rev1_673;  __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 3, 2, 1, 0); \
+  __ret_673 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_673), __noswap_splat_laneq_u32(__rev1_673, __p2_673)); \
+  __ret_673 = __builtin_shufflevector(__ret_673, __ret_673, 1, 0); \
+  __ret_673; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_laneq_u16(__p0_674, __p1_674, __p2_674) __extension__ ({ \
+  uint32x4_t __ret_674; \
+  uint16x8_t __s0_674 = __p0_674; \
+  uint16x8_t __s1_674 = __p1_674; \
+  __ret_674 = vmull_u16(vget_high_u16(__s0_674), splat_laneq_u16(__s1_674, __p2_674)); \
+  __ret_674; \
+})
+#else
+#define vmull_high_laneq_u16(__p0_675, __p1_675, __p2_675) __extension__ ({ \
+  uint32x4_t __ret_675; \
+  uint16x8_t __s0_675 = __p0_675; \
+  uint16x8_t __s1_675 = __p1_675; \
+  uint16x8_t __rev0_675;  __rev0_675 = __builtin_shufflevector(__s0_675, __s0_675, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_675;  __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_675 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_675), __noswap_splat_laneq_u16(__rev1_675, __p2_675)); \
+  __ret_675 = __builtin_shufflevector(__ret_675, __ret_675, 3, 2, 1, 0); \
+  __ret_675; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_laneq_s32(__p0_676, __p1_676, __p2_676) __extension__ ({ \
+  int64x2_t __ret_676; \
+  int32x4_t __s0_676 = __p0_676; \
+  int32x4_t __s1_676 = __p1_676; \
+  __ret_676 = vmull_s32(vget_high_s32(__s0_676), splat_laneq_s32(__s1_676, __p2_676)); \
+  __ret_676; \
+})
+#else
+#define vmull_high_laneq_s32(__p0_677, __p1_677, __p2_677) __extension__ ({ \
+  int64x2_t __ret_677; \
+  int32x4_t __s0_677 = __p0_677; \
+  int32x4_t __s1_677 = __p1_677; \
+  int32x4_t __rev0_677;  __rev0_677 = __builtin_shufflevector(__s0_677, __s0_677, 3, 2, 1, 0); \
+  int32x4_t __rev1_677;  __rev1_677 = __builtin_shufflevector(__s1_677, __s1_677, 3, 2, 1, 0); \
+  __ret_677 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_677), __noswap_splat_laneq_s32(__rev1_677, __p2_677)); \
+  __ret_677 = __builtin_shufflevector(__ret_677, __ret_677, 1, 0); \
+  __ret_677; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_laneq_s16(__p0_678, __p1_678, __p2_678) __extension__ ({ \
+  int32x4_t __ret_678; \
+  int16x8_t __s0_678 = __p0_678; \
+  int16x8_t __s1_678 = __p1_678; \
+  __ret_678 = vmull_s16(vget_high_s16(__s0_678), splat_laneq_s16(__s1_678, __p2_678)); \
+  __ret_678; \
+})
+#else
+#define vmull_high_laneq_s16(__p0_679, __p1_679, __p2_679) __extension__ ({ \
+  int32x4_t __ret_679; \
+  int16x8_t __s0_679 = __p0_679; \
+  int16x8_t __s1_679 = __p1_679; \
+  int16x8_t __rev0_679;  __rev0_679 = __builtin_shufflevector(__s0_679, __s0_679, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_679;  __rev1_679 = __builtin_shufflevector(__s1_679, __s1_679, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_679 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_679), __noswap_splat_laneq_s16(__rev1_679, __p2_679)); \
+  __ret_679 = __builtin_shufflevector(__ret_679, __ret_679, 3, 2, 1, 0); \
+  __ret_679; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
+  uint64x2_t __ret;
+  __ret = vmull_n_u32(vget_high_u32(__p0), __p1);
+  return __ret;
+}
+#else
+__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
+  uint64x2_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
+  uint32x4_t __ret;
+  __ret = vmull_n_u16(vget_high_u16(__p0), __p1);
+  return __ret;
+}
+#else
+__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
+  uint32x4_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
+  int64x2_t __ret;
+  __ret = vmull_n_s32(vget_high_s32(__p0), __p1);
+  return __ret;
+}
+#else
+__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
+  int64x2_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
+  int32x4_t __ret;
+  __ret = vmull_n_s16(vget_high_s16(__p0), __p1);
+  return __ret;
+}
+#else
+__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
+  int32x4_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_laneq_u32(__p0_680, __p1_680, __p2_680) __extension__ ({ \
+  uint64x2_t __ret_680; \
+  uint32x2_t __s0_680 = __p0_680; \
+  uint32x4_t __s1_680 = __p1_680; \
+  __ret_680 = vmull_u32(__s0_680, splat_laneq_u32(__s1_680, __p2_680)); \
+  __ret_680; \
+})
+#else
+#define vmull_laneq_u32(__p0_681, __p1_681, __p2_681) __extension__ ({ \
+  uint64x2_t __ret_681; \
+  uint32x2_t __s0_681 = __p0_681; \
+  uint32x4_t __s1_681 = __p1_681; \
+  uint32x2_t __rev0_681;  __rev0_681 = __builtin_shufflevector(__s0_681, __s0_681, 1, 0); \
+  uint32x4_t __rev1_681;  __rev1_681 = __builtin_shufflevector(__s1_681, __s1_681, 3, 2, 1, 0); \
+  __ret_681 = __noswap_vmull_u32(__rev0_681, __noswap_splat_laneq_u32(__rev1_681, __p2_681)); \
+  __ret_681 = __builtin_shufflevector(__ret_681, __ret_681, 1, 0); \
+  __ret_681; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_laneq_u16(__p0_682, __p1_682, __p2_682) __extension__ ({ \
+  uint32x4_t __ret_682; \
+  uint16x4_t __s0_682 = __p0_682; \
+  uint16x8_t __s1_682 = __p1_682; \
+  __ret_682 = vmull_u16(__s0_682, splat_laneq_u16(__s1_682, __p2_682)); \
+  __ret_682; \
+})
+#else
+#define vmull_laneq_u16(__p0_683, __p1_683, __p2_683) __extension__ ({ \
+  uint32x4_t __ret_683; \
+  uint16x4_t __s0_683 = __p0_683; \
+  uint16x8_t __s1_683 = __p1_683; \
+  uint16x4_t __rev0_683;  __rev0_683 = __builtin_shufflevector(__s0_683, __s0_683, 3, 2, 1, 0); \
+  uint16x8_t __rev1_683;  __rev1_683 = __builtin_shufflevector(__s1_683, __s1_683, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_683 = __noswap_vmull_u16(__rev0_683, __noswap_splat_laneq_u16(__rev1_683, __p2_683)); \
+  __ret_683 = __builtin_shufflevector(__ret_683, __ret_683, 3, 2, 1, 0); \
+  __ret_683; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_laneq_s32(__p0_684, __p1_684, __p2_684) __extension__ ({ \
+  int64x2_t __ret_684; \
+  int32x2_t __s0_684 = __p0_684; \
+  int32x4_t __s1_684 = __p1_684; \
+  __ret_684 = vmull_s32(__s0_684, splat_laneq_s32(__s1_684, __p2_684)); \
+  __ret_684; \
+})
+#else
+#define vmull_laneq_s32(__p0_685, __p1_685, __p2_685) __extension__ ({ \
+  int64x2_t __ret_685; \
+  int32x2_t __s0_685 = __p0_685; \
+  int32x4_t __s1_685 = __p1_685; \
+  int32x2_t __rev0_685;  __rev0_685 = __builtin_shufflevector(__s0_685, __s0_685, 1, 0); \
+  int32x4_t __rev1_685;  __rev1_685 = __builtin_shufflevector(__s1_685, __s1_685, 3, 2, 1, 0); \
+  __ret_685 = __noswap_vmull_s32(__rev0_685, __noswap_splat_laneq_s32(__rev1_685, __p2_685)); \
+  __ret_685 = __builtin_shufflevector(__ret_685, __ret_685, 1, 0); \
+  __ret_685; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_laneq_s16(__p0_686, __p1_686, __p2_686) __extension__ ({ \
+  int32x4_t __ret_686; \
+  int16x4_t __s0_686 = __p0_686; \
+  int16x8_t __s1_686 = __p1_686; \
+  __ret_686 = vmull_s16(__s0_686, splat_laneq_s16(__s1_686, __p2_686)); \
+  __ret_686; \
+})
+#else
+#define vmull_laneq_s16(__p0_687, __p1_687, __p2_687) __extension__ ({ \
+  int32x4_t __ret_687; \
+  int16x4_t __s0_687 = __p0_687; \
+  int16x8_t __s1_687 = __p1_687; \
+  int16x4_t __rev0_687;  __rev0_687 = __builtin_shufflevector(__s0_687, __s0_687, 3, 2, 1, 0); \
+  int16x8_t __rev1_687;  __rev1_687 = __builtin_shufflevector(__s1_687, __s1_687, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_687 = __noswap_vmull_s16(__rev0_687, __noswap_splat_laneq_s16(__rev1_687, __p2_687)); \
+  __ret_687 = __builtin_shufflevector(__ret_687, __ret_687, 3, 2, 1, 0); \
+  __ret_687; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+  return __ret;
+}
+#endif
+
+__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
+  return __ret;
+}
+__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
+  return __ret;
+}
+#define vmulxd_lane_f64(__p0_688, __p1_688, __p2_688) __extension__ ({ \
+  float64_t __ret_688; \
+  float64_t __s0_688 = __p0_688; \
+  float64x1_t __s1_688 = __p1_688; \
+  __ret_688 = vmulxd_f64(__s0_688, vget_lane_f64(__s1_688, __p2_688)); \
+  __ret_688; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vmulxs_lane_f32(__p0_689, __p1_689, __p2_689) __extension__ ({ \
+  float32_t __ret_689; \
+  float32_t __s0_689 = __p0_689; \
+  float32x2_t __s1_689 = __p1_689; \
+  __ret_689 = vmulxs_f32(__s0_689, vget_lane_f32(__s1_689, __p2_689)); \
+  __ret_689; \
+})
+#else
+#define vmulxs_lane_f32(__p0_690, __p1_690, __p2_690) __extension__ ({ \
+  float32_t __ret_690; \
+  float32_t __s0_690 = __p0_690; \
+  float32x2_t __s1_690 = __p1_690; \
+  float32x2_t __rev1_690;  __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 1, 0); \
+  __ret_690 = vmulxs_f32(__s0_690, __noswap_vget_lane_f32(__rev1_690, __p2_690)); \
+  __ret_690; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_lane_f64(__p0_691, __p1_691, __p2_691) __extension__ ({ \
+  float64x2_t __ret_691; \
+  float64x2_t __s0_691 = __p0_691; \
+  float64x1_t __s1_691 = __p1_691; \
+  __ret_691 = vmulxq_f64(__s0_691, splatq_lane_f64(__s1_691, __p2_691)); \
+  __ret_691; \
+})
+#else
+#define vmulxq_lane_f64(__p0_692, __p1_692, __p2_692) __extension__ ({ \
+  float64x2_t __ret_692; \
+  float64x2_t __s0_692 = __p0_692; \
+  float64x1_t __s1_692 = __p1_692; \
+  float64x2_t __rev0_692;  __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 1, 0); \
+  __ret_692 = __noswap_vmulxq_f64(__rev0_692, __noswap_splatq_lane_f64(__s1_692, __p2_692)); \
+  __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 1, 0); \
+  __ret_692; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_lane_f32(__p0_693, __p1_693, __p2_693) __extension__ ({ \
+  float32x4_t __ret_693; \
+  float32x4_t __s0_693 = __p0_693; \
+  float32x2_t __s1_693 = __p1_693; \
+  __ret_693 = vmulxq_f32(__s0_693, splatq_lane_f32(__s1_693, __p2_693)); \
+  __ret_693; \
+})
+#else
+#define vmulxq_lane_f32(__p0_694, __p1_694, __p2_694) __extension__ ({ \
+  float32x4_t __ret_694; \
+  float32x4_t __s0_694 = __p0_694; \
+  float32x2_t __s1_694 = __p1_694; \
+  float32x4_t __rev0_694;  __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 3, 2, 1, 0); \
+  float32x2_t __rev1_694;  __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 1, 0); \
+  __ret_694 = __noswap_vmulxq_f32(__rev0_694, __noswap_splatq_lane_f32(__rev1_694, __p2_694)); \
+  __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 3, 2, 1, 0); \
+  __ret_694; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_lane_f32(__p0_695, __p1_695, __p2_695) __extension__ ({ \
+  float32x2_t __ret_695; \
+  float32x2_t __s0_695 = __p0_695; \
+  float32x2_t __s1_695 = __p1_695; \
+  __ret_695 = vmulx_f32(__s0_695, splat_lane_f32(__s1_695, __p2_695)); \
+  __ret_695; \
+})
+#else
+#define vmulx_lane_f32(__p0_696, __p1_696, __p2_696) __extension__ ({ \
+  float32x2_t __ret_696; \
+  float32x2_t __s0_696 = __p0_696; \
+  float32x2_t __s1_696 = __p1_696; \
+  float32x2_t __rev0_696;  __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \
+  float32x2_t __rev1_696;  __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 1, 0); \
+  __ret_696 = __noswap_vmulx_f32(__rev0_696, __noswap_splat_lane_f32(__rev1_696, __p2_696)); \
+  __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 1, 0); \
+  __ret_696; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxd_laneq_f64(__p0_697, __p1_697, __p2_697) __extension__ ({ \
+  float64_t __ret_697; \
+  float64_t __s0_697 = __p0_697; \
+  float64x2_t __s1_697 = __p1_697; \
+  __ret_697 = vmulxd_f64(__s0_697, vgetq_lane_f64(__s1_697, __p2_697)); \
+  __ret_697; \
+})
+#else
+#define vmulxd_laneq_f64(__p0_698, __p1_698, __p2_698) __extension__ ({ \
+  float64_t __ret_698; \
+  float64_t __s0_698 = __p0_698; \
+  float64x2_t __s1_698 = __p1_698; \
+  float64x2_t __rev1_698;  __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 1, 0); \
+  __ret_698 = vmulxd_f64(__s0_698, __noswap_vgetq_lane_f64(__rev1_698, __p2_698)); \
+  __ret_698; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxs_laneq_f32(__p0_699, __p1_699, __p2_699) __extension__ ({ \
+  float32_t __ret_699; \
+  float32_t __s0_699 = __p0_699; \
+  float32x4_t __s1_699 = __p1_699; \
+  __ret_699 = vmulxs_f32(__s0_699, vgetq_lane_f32(__s1_699, __p2_699)); \
+  __ret_699; \
+})
+#else
+#define vmulxs_laneq_f32(__p0_700, __p1_700, __p2_700) __extension__ ({ \
+  float32_t __ret_700; \
+  float32_t __s0_700 = __p0_700; \
+  float32x4_t __s1_700 = __p1_700; \
+  float32x4_t __rev1_700;  __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 3, 2, 1, 0); \
+  __ret_700 = vmulxs_f32(__s0_700, __noswap_vgetq_lane_f32(__rev1_700, __p2_700)); \
+  __ret_700; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_laneq_f64(__p0_701, __p1_701, __p2_701) __extension__ ({ \
+  float64x2_t __ret_701; \
+  float64x2_t __s0_701 = __p0_701; \
+  float64x2_t __s1_701 = __p1_701; \
+  __ret_701 = vmulxq_f64(__s0_701, splatq_laneq_f64(__s1_701, __p2_701)); \
+  __ret_701; \
+})
+#else
+#define vmulxq_laneq_f64(__p0_702, __p1_702, __p2_702) __extension__ ({ \
+  float64x2_t __ret_702; \
+  float64x2_t __s0_702 = __p0_702; \
+  float64x2_t __s1_702 = __p1_702; \
+  float64x2_t __rev0_702;  __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 1, 0); \
+  float64x2_t __rev1_702;  __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 1, 0); \
+  __ret_702 = __noswap_vmulxq_f64(__rev0_702, __noswap_splatq_laneq_f64(__rev1_702, __p2_702)); \
+  __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 1, 0); \
+  __ret_702; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_laneq_f32(__p0_703, __p1_703, __p2_703) __extension__ ({ \
+  float32x4_t __ret_703; \
+  float32x4_t __s0_703 = __p0_703; \
+  float32x4_t __s1_703 = __p1_703; \
+  __ret_703 = vmulxq_f32(__s0_703, splatq_laneq_f32(__s1_703, __p2_703)); \
+  __ret_703; \
+})
+#else
+#define vmulxq_laneq_f32(__p0_704, __p1_704, __p2_704) __extension__ ({ \
+  float32x4_t __ret_704; \
+  float32x4_t __s0_704 = __p0_704; \
+  float32x4_t __s1_704 = __p1_704; \
+  float32x4_t __rev0_704;  __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 3, 2, 1, 0); \
+  float32x4_t __rev1_704;  __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 3, 2, 1, 0); \
+  __ret_704 = __noswap_vmulxq_f32(__rev0_704, __noswap_splatq_laneq_f32(__rev1_704, __p2_704)); \
+  __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 3, 2, 1, 0); \
+  __ret_704; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_laneq_f32(__p0_705, __p1_705, __p2_705) __extension__ ({ \
+  float32x2_t __ret_705; \
+  float32x2_t __s0_705 = __p0_705; \
+  float32x4_t __s1_705 = __p1_705; \
+  __ret_705 = vmulx_f32(__s0_705, splat_laneq_f32(__s1_705, __p2_705)); \
+  __ret_705; \
+})
+#else
+#define vmulx_laneq_f32(__p0_706, __p1_706, __p2_706) __extension__ ({ \
+  float32x2_t __ret_706; \
+  float32x2_t __s0_706 = __p0_706; \
+  float32x4_t __s1_706 = __p1_706; \
+  float32x2_t __rev0_706;  __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 1, 0); \
+  float32x4_t __rev1_706;  __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 3, 2, 1, 0); \
+  __ret_706 = __noswap_vmulx_f32(__rev0_706, __noswap_splat_laneq_f32(__rev1_706, __p2_706)); \
+  __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 1, 0); \
+  __ret_706; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vnegq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = -__p0;
+  return __ret;
+}
+#else
+__ai float64x2_t vnegq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = -__rev0;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vnegq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  __ret = -__p0;
+  return __ret;
+}
+#else
+__ai int64x2_t vnegq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = -__rev0;
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vneg_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = -__p0;
+  return __ret;
+}
+__ai int64x1_t vneg_s64(int64x1_t __p0) {
+  int64x1_t __ret;
+  __ret = -__p0;
+  return __ret;
+}
+__ai int64_t vnegd_s64(int64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+  return __ret;
+}
+#else
+__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
+  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+  return __ret;
+}
+#else
+__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0);
+  return __ret;
+}
+#else
+__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
+  uint64_t __ret;
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vpaddd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vpaddd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vpaddd_s64(int64x2_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0);
+  return __ret;
+}
+#else
+__ai int64_t vpaddd_s64(int64x2_t __p0) {
+  int64_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vpadds_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vpadds_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vpadds_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vpmaxs_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vpmaxs_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+  return __ret;
+}
+#else
+__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+  return __ret;
+}
+#else
+__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vpminqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vpminqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vpmins_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vpmins_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vpmins_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0);
+  return __ret;
+}
+#else
+__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
+  float64_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vpminnms_f32(float32x2_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0);
+  return __ret;
+}
+#else
+__ai float32_t vpminnms_f32(float32x2_t __p0) {
+  float32_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35);
+  return __ret;
+}
+#else
+__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai int64x1_t vqabs_s64(int64x1_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
+  return __ret;
+}
+__ai int8_t vqabsb_s8(int8_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
+  return __ret;
+}
+__ai int32_t vqabss_s32(int32_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
+  return __ret;
+}
+__ai int64_t vqabsd_s64(int64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
+  return __ret;
+}
+__ai int16_t vqabsh_s16(int16_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
+  return __ret;
+}
+__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
+  return __ret;
+}
+__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
+  return __ret;
+}
+__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
+  return __ret;
+}
+__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
+  return __ret;
+}
+__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
+  return __ret;
+}
+__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
+  return __ret;
+}
+__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int64x2_t __ret;
+  __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
+  return __ret;
+}
+#else
+__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int32x4_t __ret;
+  __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
+  return __ret;
+}
+#else
+__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_high_lane_s32(__p0_707, __p1_707, __p2_707, __p3_707) __extension__ ({ \
+  int64x2_t __ret_707; \
+  int64x2_t __s0_707 = __p0_707; \
+  int32x4_t __s1_707 = __p1_707; \
+  int32x2_t __s2_707 = __p2_707; \
+  __ret_707 = vqdmlal_s32(__s0_707, vget_high_s32(__s1_707), splat_lane_s32(__s2_707, __p3_707)); \
+  __ret_707; \
+})
+#else
+#define vqdmlal_high_lane_s32(__p0_708, __p1_708, __p2_708, __p3_708) __extension__ ({ \
+  int64x2_t __ret_708; \
+  int64x2_t __s0_708 = __p0_708; \
+  int32x4_t __s1_708 = __p1_708; \
+  int32x2_t __s2_708 = __p2_708; \
+  int64x2_t __rev0_708;  __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 1, 0); \
+  int32x4_t __rev1_708;  __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 3, 2, 1, 0); \
+  int32x2_t __rev2_708;  __rev2_708 = __builtin_shufflevector(__s2_708, __s2_708, 1, 0); \
+  __ret_708 = __noswap_vqdmlal_s32(__rev0_708, __noswap_vget_high_s32(__rev1_708), __noswap_splat_lane_s32(__rev2_708, __p3_708)); \
+  __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 1, 0); \
+  __ret_708; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_high_lane_s16(__p0_709, __p1_709, __p2_709, __p3_709) __extension__ ({ \
+  int32x4_t __ret_709; \
+  int32x4_t __s0_709 = __p0_709; \
+  int16x8_t __s1_709 = __p1_709; \
+  int16x4_t __s2_709 = __p2_709; \
+  __ret_709 = vqdmlal_s16(__s0_709, vget_high_s16(__s1_709), splat_lane_s16(__s2_709, __p3_709)); \
+  __ret_709; \
+})
+#else
+#define vqdmlal_high_lane_s16(__p0_710, __p1_710, __p2_710, __p3_710) __extension__ ({ \
+  int32x4_t __ret_710; \
+  int32x4_t __s0_710 = __p0_710; \
+  int16x8_t __s1_710 = __p1_710; \
+  int16x4_t __s2_710 = __p2_710; \
+  int32x4_t __rev0_710;  __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 3, 2, 1, 0); \
+  int16x8_t __rev1_710;  __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_710;  __rev2_710 = __builtin_shufflevector(__s2_710, __s2_710, 3, 2, 1, 0); \
+  __ret_710 = __noswap_vqdmlal_s16(__rev0_710, __noswap_vget_high_s16(__rev1_710), __noswap_splat_lane_s16(__rev2_710, __p3_710)); \
+  __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 3, 2, 1, 0); \
+  __ret_710; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_high_laneq_s32(__p0_711, __p1_711, __p2_711, __p3_711) __extension__ ({ \
+  int64x2_t __ret_711; \
+  int64x2_t __s0_711 = __p0_711; \
+  int32x4_t __s1_711 = __p1_711; \
+  int32x4_t __s2_711 = __p2_711; \
+  __ret_711 = vqdmlal_s32(__s0_711, vget_high_s32(__s1_711), splat_laneq_s32(__s2_711, __p3_711)); \
+  __ret_711; \
+})
+#else
+#define vqdmlal_high_laneq_s32(__p0_712, __p1_712, __p2_712, __p3_712) __extension__ ({ \
+  int64x2_t __ret_712; \
+  int64x2_t __s0_712 = __p0_712; \
+  int32x4_t __s1_712 = __p1_712; \
+  int32x4_t __s2_712 = __p2_712; \
+  int64x2_t __rev0_712;  __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \
+  int32x4_t __rev1_712;  __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 3, 2, 1, 0); \
+  int32x4_t __rev2_712;  __rev2_712 = __builtin_shufflevector(__s2_712, __s2_712, 3, 2, 1, 0); \
+  __ret_712 = __noswap_vqdmlal_s32(__rev0_712, __noswap_vget_high_s32(__rev1_712), __noswap_splat_laneq_s32(__rev2_712, __p3_712)); \
+  __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 1, 0); \
+  __ret_712; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_high_laneq_s16(__p0_713, __p1_713, __p2_713, __p3_713) __extension__ ({ \
+  int32x4_t __ret_713; \
+  int32x4_t __s0_713 = __p0_713; \
+  int16x8_t __s1_713 = __p1_713; \
+  int16x8_t __s2_713 = __p2_713; \
+  __ret_713 = vqdmlal_s16(__s0_713, vget_high_s16(__s1_713), splat_laneq_s16(__s2_713, __p3_713)); \
+  __ret_713; \
+})
+#else
+#define vqdmlal_high_laneq_s16(__p0_714, __p1_714, __p2_714, __p3_714) __extension__ ({ \
+  int32x4_t __ret_714; \
+  int32x4_t __s0_714 = __p0_714; \
+  int16x8_t __s1_714 = __p1_714; \
+  int16x8_t __s2_714 = __p2_714; \
+  int32x4_t __rev0_714;  __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \
+  int16x8_t __rev1_714;  __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_714;  __rev2_714 = __builtin_shufflevector(__s2_714, __s2_714, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_714 = __noswap_vqdmlal_s16(__rev0_714, __noswap_vget_high_s16(__rev1_714), __noswap_splat_laneq_s16(__rev2_714, __p3_714)); \
+  __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 3, 2, 1, 0); \
+  __ret_714; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+  int64x2_t __ret;
+  __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
+  return __ret;
+}
+#else
+__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+  int32x4_t __ret;
+  __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
+  return __ret;
+}
+#else
+__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x2_t __s2 = __p2; \
+  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x2_t __s2 = __p2; \
+  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x4_t __s2 = __p2; \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x4_t __s2 = __p2; \
+  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x4_t __s2 = __p2; \
+  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x4_t __s2 = __p2; \
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x8_t __s2 = __p2; \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x8_t __s2 = __p2; \
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_laneq_s32(__p0_715, __p1_715, __p2_715, __p3_715) __extension__ ({ \
+  int64x2_t __ret_715; \
+  int64x2_t __s0_715 = __p0_715; \
+  int32x2_t __s1_715 = __p1_715; \
+  int32x4_t __s2_715 = __p2_715; \
+  __ret_715 = vqdmlal_s32(__s0_715, __s1_715, splat_laneq_s32(__s2_715, __p3_715)); \
+  __ret_715; \
+})
+#else
+#define vqdmlal_laneq_s32(__p0_716, __p1_716, __p2_716, __p3_716) __extension__ ({ \
+  int64x2_t __ret_716; \
+  int64x2_t __s0_716 = __p0_716; \
+  int32x2_t __s1_716 = __p1_716; \
+  int32x4_t __s2_716 = __p2_716; \
+  int64x2_t __rev0_716;  __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 1, 0); \
+  int32x2_t __rev1_716;  __rev1_716 = __builtin_shufflevector(__s1_716, __s1_716, 1, 0); \
+  int32x4_t __rev2_716;  __rev2_716 = __builtin_shufflevector(__s2_716, __s2_716, 3, 2, 1, 0); \
+  __ret_716 = __noswap_vqdmlal_s32(__rev0_716, __rev1_716, __noswap_splat_laneq_s32(__rev2_716, __p3_716)); \
+  __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 1, 0); \
+  __ret_716; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_laneq_s16(__p0_717, __p1_717, __p2_717, __p3_717) __extension__ ({ \
+  int32x4_t __ret_717; \
+  int32x4_t __s0_717 = __p0_717; \
+  int16x4_t __s1_717 = __p1_717; \
+  int16x8_t __s2_717 = __p2_717; \
+  __ret_717 = vqdmlal_s16(__s0_717, __s1_717, splat_laneq_s16(__s2_717, __p3_717)); \
+  __ret_717; \
+})
+#else
+#define vqdmlal_laneq_s16(__p0_718, __p1_718, __p2_718, __p3_718) __extension__ ({ \
+  int32x4_t __ret_718; \
+  int32x4_t __s0_718 = __p0_718; \
+  int16x4_t __s1_718 = __p1_718; \
+  int16x8_t __s2_718 = __p2_718; \
+  int32x4_t __rev0_718;  __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \
+  int16x4_t __rev1_718;  __rev1_718 = __builtin_shufflevector(__s1_718, __s1_718, 3, 2, 1, 0); \
+  int16x8_t __rev2_718;  __rev2_718 = __builtin_shufflevector(__s2_718, __s2_718, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_718 = __noswap_vqdmlal_s16(__rev0_718, __rev1_718, __noswap_splat_laneq_s16(__rev2_718, __p3_718)); \
+  __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 3, 2, 1, 0); \
+  __ret_718; \
+})
+#endif
+
+__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
+  return __ret;
+}
+__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int64x2_t __ret;
+  __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
+  return __ret;
+}
+#else
+__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int32x4_t __ret;
+  __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
+  return __ret;
+}
+#else
+__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_high_lane_s32(__p0_719, __p1_719, __p2_719, __p3_719) __extension__ ({ \
+  int64x2_t __ret_719; \
+  int64x2_t __s0_719 = __p0_719; \
+  int32x4_t __s1_719 = __p1_719; \
+  int32x2_t __s2_719 = __p2_719; \
+  __ret_719 = vqdmlsl_s32(__s0_719, vget_high_s32(__s1_719), splat_lane_s32(__s2_719, __p3_719)); \
+  __ret_719; \
+})
+#else
+#define vqdmlsl_high_lane_s32(__p0_720, __p1_720, __p2_720, __p3_720) __extension__ ({ \
+  int64x2_t __ret_720; \
+  int64x2_t __s0_720 = __p0_720; \
+  int32x4_t __s1_720 = __p1_720; \
+  int32x2_t __s2_720 = __p2_720; \
+  int64x2_t __rev0_720;  __rev0_720 = __builtin_shufflevector(__s0_720, __s0_720, 1, 0); \
+  int32x4_t __rev1_720;  __rev1_720 = __builtin_shufflevector(__s1_720, __s1_720, 3, 2, 1, 0); \
+  int32x2_t __rev2_720;  __rev2_720 = __builtin_shufflevector(__s2_720, __s2_720, 1, 0); \
+  __ret_720 = __noswap_vqdmlsl_s32(__rev0_720, __noswap_vget_high_s32(__rev1_720), __noswap_splat_lane_s32(__rev2_720, __p3_720)); \
+  __ret_720 = __builtin_shufflevector(__ret_720, __ret_720, 1, 0); \
+  __ret_720; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_high_lane_s16(__p0_721, __p1_721, __p2_721, __p3_721) __extension__ ({ \
+  int32x4_t __ret_721; \
+  int32x4_t __s0_721 = __p0_721; \
+  int16x8_t __s1_721 = __p1_721; \
+  int16x4_t __s2_721 = __p2_721; \
+  __ret_721 = vqdmlsl_s16(__s0_721, vget_high_s16(__s1_721), splat_lane_s16(__s2_721, __p3_721)); \
+  __ret_721; \
+})
+#else
+#define vqdmlsl_high_lane_s16(__p0_722, __p1_722, __p2_722, __p3_722) __extension__ ({ \
+  int32x4_t __ret_722; \
+  int32x4_t __s0_722 = __p0_722; \
+  int16x8_t __s1_722 = __p1_722; \
+  int16x4_t __s2_722 = __p2_722; \
+  int32x4_t __rev0_722;  __rev0_722 = __builtin_shufflevector(__s0_722, __s0_722, 3, 2, 1, 0); \
+  int16x8_t __rev1_722;  __rev1_722 = __builtin_shufflevector(__s1_722, __s1_722, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_722;  __rev2_722 = __builtin_shufflevector(__s2_722, __s2_722, 3, 2, 1, 0); \
+  __ret_722 = __noswap_vqdmlsl_s16(__rev0_722, __noswap_vget_high_s16(__rev1_722), __noswap_splat_lane_s16(__rev2_722, __p3_722)); \
+  __ret_722 = __builtin_shufflevector(__ret_722, __ret_722, 3, 2, 1, 0); \
+  __ret_722; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_high_laneq_s32(__p0_723, __p1_723, __p2_723, __p3_723) __extension__ ({ \
+  int64x2_t __ret_723; \
+  int64x2_t __s0_723 = __p0_723; \
+  int32x4_t __s1_723 = __p1_723; \
+  int32x4_t __s2_723 = __p2_723; \
+  __ret_723 = vqdmlsl_s32(__s0_723, vget_high_s32(__s1_723), splat_laneq_s32(__s2_723, __p3_723)); \
+  __ret_723; \
+})
+#else
+#define vqdmlsl_high_laneq_s32(__p0_724, __p1_724, __p2_724, __p3_724) __extension__ ({ \
+  int64x2_t __ret_724; \
+  int64x2_t __s0_724 = __p0_724; \
+  int32x4_t __s1_724 = __p1_724; \
+  int32x4_t __s2_724 = __p2_724; \
+  int64x2_t __rev0_724;  __rev0_724 = __builtin_shufflevector(__s0_724, __s0_724, 1, 0); \
+  int32x4_t __rev1_724;  __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \
+  int32x4_t __rev2_724;  __rev2_724 = __builtin_shufflevector(__s2_724, __s2_724, 3, 2, 1, 0); \
+  __ret_724 = __noswap_vqdmlsl_s32(__rev0_724, __noswap_vget_high_s32(__rev1_724), __noswap_splat_laneq_s32(__rev2_724, __p3_724)); \
+  __ret_724 = __builtin_shufflevector(__ret_724, __ret_724, 1, 0); \
+  __ret_724; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_high_laneq_s16(__p0_725, __p1_725, __p2_725, __p3_725) __extension__ ({ \
+  int32x4_t __ret_725; \
+  int32x4_t __s0_725 = __p0_725; \
+  int16x8_t __s1_725 = __p1_725; \
+  int16x8_t __s2_725 = __p2_725; \
+  __ret_725 = vqdmlsl_s16(__s0_725, vget_high_s16(__s1_725), splat_laneq_s16(__s2_725, __p3_725)); \
+  __ret_725; \
+})
+#else
+#define vqdmlsl_high_laneq_s16(__p0_726, __p1_726, __p2_726, __p3_726) __extension__ ({ \
+  int32x4_t __ret_726; \
+  int32x4_t __s0_726 = __p0_726; \
+  int16x8_t __s1_726 = __p1_726; \
+  int16x8_t __s2_726 = __p2_726; \
+  int32x4_t __rev0_726;  __rev0_726 = __builtin_shufflevector(__s0_726, __s0_726, 3, 2, 1, 0); \
+  int16x8_t __rev1_726;  __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_726;  __rev2_726 = __builtin_shufflevector(__s2_726, __s2_726, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_726 = __noswap_vqdmlsl_s16(__rev0_726, __noswap_vget_high_s16(__rev1_726), __noswap_splat_laneq_s16(__rev2_726, __p3_726)); \
+  __ret_726 = __builtin_shufflevector(__ret_726, __ret_726, 3, 2, 1, 0); \
+  __ret_726; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+  int64x2_t __ret;
+  __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
+  return __ret;
+}
+#else
+__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+  int32x4_t __ret;
+  __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
+  return __ret;
+}
+#else
+__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+  int32x4_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x2_t __s2 = __p2; \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x2_t __s2 = __p2; \
+  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x4_t __s2 = __p2; \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x4_t __s2 = __p2; \
+  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x4_t __s2 = __p2; \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  int32_t __s1 = __p1; \
+  int32x4_t __s2 = __p2; \
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x8_t __s2 = __p2; \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \
+  __ret; \
+})
+#else
+#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  int16_t __s1 = __p1; \
+  int16x8_t __s2 = __p2; \
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_laneq_s32(__p0_727, __p1_727, __p2_727, __p3_727) __extension__ ({ \
+  int64x2_t __ret_727; \
+  int64x2_t __s0_727 = __p0_727; \
+  int32x2_t __s1_727 = __p1_727; \
+  int32x4_t __s2_727 = __p2_727; \
+  __ret_727 = vqdmlsl_s32(__s0_727, __s1_727, splat_laneq_s32(__s2_727, __p3_727)); \
+  __ret_727; \
+})
+#else
+#define vqdmlsl_laneq_s32(__p0_728, __p1_728, __p2_728, __p3_728) __extension__ ({ \
+  int64x2_t __ret_728; \
+  int64x2_t __s0_728 = __p0_728; \
+  int32x2_t __s1_728 = __p1_728; \
+  int32x4_t __s2_728 = __p2_728; \
+  int64x2_t __rev0_728;  __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 1, 0); \
+  int32x2_t __rev1_728;  __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 1, 0); \
+  int32x4_t __rev2_728;  __rev2_728 = __builtin_shufflevector(__s2_728, __s2_728, 3, 2, 1, 0); \
+  __ret_728 = __noswap_vqdmlsl_s32(__rev0_728, __rev1_728, __noswap_splat_laneq_s32(__rev2_728, __p3_728)); \
+  __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 1, 0); \
+  __ret_728; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_laneq_s16(__p0_729, __p1_729, __p2_729, __p3_729) __extension__ ({ \
+  int32x4_t __ret_729; \
+  int32x4_t __s0_729 = __p0_729; \
+  int16x4_t __s1_729 = __p1_729; \
+  int16x8_t __s2_729 = __p2_729; \
+  __ret_729 = vqdmlsl_s16(__s0_729, __s1_729, splat_laneq_s16(__s2_729, __p3_729)); \
+  __ret_729; \
+})
+#else
+#define vqdmlsl_laneq_s16(__p0_730, __p1_730, __p2_730, __p3_730) __extension__ ({ \
+  int32x4_t __ret_730; \
+  int32x4_t __s0_730 = __p0_730; \
+  int16x4_t __s1_730 = __p1_730; \
+  int16x8_t __s2_730 = __p2_730; \
+  int32x4_t __rev0_730;  __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 3, 2, 1, 0); \
+  int16x4_t __rev1_730;  __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 3, 2, 1, 0); \
+  int16x8_t __rev2_730;  __rev2_730 = __builtin_shufflevector(__s2_730, __s2_730, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_730 = __noswap_vqdmlsl_s16(__rev0_730, __rev1_730, __noswap_splat_laneq_s16(__rev2_730, __p3_730)); \
+  __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \
+  __ret_730; \
+})
+#endif
+
+__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
+  return __ret;
+}
+__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
+  __ret; \
+})
+#else
+#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
+  __ret; \
+})
+#else
+#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
+  __ret; \
+})
+#else
+#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
+  __ret; \
+})
+#else
+#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhs_lane_s32(__p0_731, __p1_731, __p2_731) __extension__ ({ \
+  int32_t __ret_731; \
+  int32_t __s0_731 = __p0_731; \
+  int32x2_t __s1_731 = __p1_731; \
+  __ret_731 = vqdmulhs_s32(__s0_731, vget_lane_s32(__s1_731, __p2_731)); \
+  __ret_731; \
+})
+#else
+#define vqdmulhs_lane_s32(__p0_732, __p1_732, __p2_732) __extension__ ({ \
+  int32_t __ret_732; \
+  int32_t __s0_732 = __p0_732; \
+  int32x2_t __s1_732 = __p1_732; \
+  int32x2_t __rev1_732;  __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 1, 0); \
+  __ret_732 = vqdmulhs_s32(__s0_732, __noswap_vget_lane_s32(__rev1_732, __p2_732)); \
+  __ret_732; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhh_lane_s16(__p0_733, __p1_733, __p2_733) __extension__ ({ \
+  int16_t __ret_733; \
+  int16_t __s0_733 = __p0_733; \
+  int16x4_t __s1_733 = __p1_733; \
+  __ret_733 = vqdmulhh_s16(__s0_733, vget_lane_s16(__s1_733, __p2_733)); \
+  __ret_733; \
+})
+#else
+#define vqdmulhh_lane_s16(__p0_734, __p1_734, __p2_734) __extension__ ({ \
+  int16_t __ret_734; \
+  int16_t __s0_734 = __p0_734; \
+  int16x4_t __s1_734 = __p1_734; \
+  int16x4_t __rev1_734;  __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 3, 2, 1, 0); \
+  __ret_734 = vqdmulhh_s16(__s0_734, __noswap_vget_lane_s16(__rev1_734, __p2_734)); \
+  __ret_734; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhs_laneq_s32(__p0_735, __p1_735, __p2_735) __extension__ ({ \
+  int32_t __ret_735; \
+  int32_t __s0_735 = __p0_735; \
+  int32x4_t __s1_735 = __p1_735; \
+  __ret_735 = vqdmulhs_s32(__s0_735, vgetq_lane_s32(__s1_735, __p2_735)); \
+  __ret_735; \
+})
+#else
+#define vqdmulhs_laneq_s32(__p0_736, __p1_736, __p2_736) __extension__ ({ \
+  int32_t __ret_736; \
+  int32_t __s0_736 = __p0_736; \
+  int32x4_t __s1_736 = __p1_736; \
+  int32x4_t __rev1_736;  __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 3, 2, 1, 0); \
+  __ret_736 = vqdmulhs_s32(__s0_736, __noswap_vgetq_lane_s32(__rev1_736, __p2_736)); \
+  __ret_736; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhh_laneq_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \
+  int16_t __ret_737; \
+  int16_t __s0_737 = __p0_737; \
+  int16x8_t __s1_737 = __p1_737; \
+  __ret_737 = vqdmulhh_s16(__s0_737, vgetq_lane_s16(__s1_737, __p2_737)); \
+  __ret_737; \
+})
+#else
+#define vqdmulhh_laneq_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \
+  int16_t __ret_738; \
+  int16_t __s0_738 = __p0_738; \
+  int16x8_t __s1_738 = __p1_738; \
+  int16x8_t __rev1_738;  __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_738 = vqdmulhh_s16(__s0_738, __noswap_vgetq_lane_s16(__rev1_738, __p2_738)); \
+  __ret_738; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
+  __ret; \
+})
+#else
+#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
+  __ret; \
+})
+#else
+#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
+  __ret; \
+})
+#else
+#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
+  __ret; \
+})
+#else
+#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
+  return __ret;
+}
+__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
+  __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
+  return __ret;
+}
+#else
+__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
+  __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
+  return __ret;
+}
+#else
+__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_high_lane_s32(__p0_739, __p1_739, __p2_739) __extension__ ({ \
+  int64x2_t __ret_739; \
+  int32x4_t __s0_739 = __p0_739; \
+  int32x2_t __s1_739 = __p1_739; \
+  __ret_739 = vqdmull_s32(vget_high_s32(__s0_739), splat_lane_s32(__s1_739, __p2_739)); \
+  __ret_739; \
+})
+#else
+#define vqdmull_high_lane_s32(__p0_740, __p1_740, __p2_740) __extension__ ({ \
+  int64x2_t __ret_740; \
+  int32x4_t __s0_740 = __p0_740; \
+  int32x2_t __s1_740 = __p1_740; \
+  int32x4_t __rev0_740;  __rev0_740 = __builtin_shufflevector(__s0_740, __s0_740, 3, 2, 1, 0); \
+  int32x2_t __rev1_740;  __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 1, 0); \
+  __ret_740 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_740), __noswap_splat_lane_s32(__rev1_740, __p2_740)); \
+  __ret_740 = __builtin_shufflevector(__ret_740, __ret_740, 1, 0); \
+  __ret_740; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_high_lane_s16(__p0_741, __p1_741, __p2_741) __extension__ ({ \
+  int32x4_t __ret_741; \
+  int16x8_t __s0_741 = __p0_741; \
+  int16x4_t __s1_741 = __p1_741; \
+  __ret_741 = vqdmull_s16(vget_high_s16(__s0_741), splat_lane_s16(__s1_741, __p2_741)); \
+  __ret_741; \
+})
+#else
+#define vqdmull_high_lane_s16(__p0_742, __p1_742, __p2_742) __extension__ ({ \
+  int32x4_t __ret_742; \
+  int16x8_t __s0_742 = __p0_742; \
+  int16x4_t __s1_742 = __p1_742; \
+  int16x8_t __rev0_742;  __rev0_742 = __builtin_shufflevector(__s0_742, __s0_742, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_742;  __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 3, 2, 1, 0); \
+  __ret_742 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_742), __noswap_splat_lane_s16(__rev1_742, __p2_742)); \
+  __ret_742 = __builtin_shufflevector(__ret_742, __ret_742, 3, 2, 1, 0); \
+  __ret_742; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_high_laneq_s32(__p0_743, __p1_743, __p2_743) __extension__ ({ \
+  int64x2_t __ret_743; \
+  int32x4_t __s0_743 = __p0_743; \
+  int32x4_t __s1_743 = __p1_743; \
+  __ret_743 = vqdmull_s32(vget_high_s32(__s0_743), splat_laneq_s32(__s1_743, __p2_743)); \
+  __ret_743; \
+})
+#else
+#define vqdmull_high_laneq_s32(__p0_744, __p1_744, __p2_744) __extension__ ({ \
+  int64x2_t __ret_744; \
+  int32x4_t __s0_744 = __p0_744; \
+  int32x4_t __s1_744 = __p1_744; \
+  int32x4_t __rev0_744;  __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 3, 2, 1, 0); \
+  int32x4_t __rev1_744;  __rev1_744 = __builtin_shufflevector(__s1_744, __s1_744, 3, 2, 1, 0); \
+  __ret_744 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_744), __noswap_splat_laneq_s32(__rev1_744, __p2_744)); \
+  __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 1, 0); \
+  __ret_744; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_high_laneq_s16(__p0_745, __p1_745, __p2_745) __extension__ ({ \
+  int32x4_t __ret_745; \
+  int16x8_t __s0_745 = __p0_745; \
+  int16x8_t __s1_745 = __p1_745; \
+  __ret_745 = vqdmull_s16(vget_high_s16(__s0_745), splat_laneq_s16(__s1_745, __p2_745)); \
+  __ret_745; \
+})
+#else
+#define vqdmull_high_laneq_s16(__p0_746, __p1_746, __p2_746) __extension__ ({ \
+  int32x4_t __ret_746; \
+  int16x8_t __s0_746 = __p0_746; \
+  int16x8_t __s1_746 = __p1_746; \
+  int16x8_t __rev0_746;  __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_746;  __rev1_746 = __builtin_shufflevector(__s1_746, __s1_746, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_746 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_746), __noswap_splat_laneq_s16(__rev1_746, __p2_746)); \
+  __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \
+  __ret_746; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
+  int64x2_t __ret;
+  __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1);
+  return __ret;
+}
+#else
+__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
+  int64x2_t __ret;
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
+  int32x4_t __ret;
+  __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1);
+  return __ret;
+}
+#else
+__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
+  int32x4_t __ret;
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulls_lane_s32(__p0_747, __p1_747, __p2_747) __extension__ ({ \
+  int64_t __ret_747; \
+  int32_t __s0_747 = __p0_747; \
+  int32x2_t __s1_747 = __p1_747; \
+  __ret_747 = vqdmulls_s32(__s0_747, vget_lane_s32(__s1_747, __p2_747)); \
+  __ret_747; \
+})
+#else
+#define vqdmulls_lane_s32(__p0_748, __p1_748, __p2_748) __extension__ ({ \
+  int64_t __ret_748; \
+  int32_t __s0_748 = __p0_748; \
+  int32x2_t __s1_748 = __p1_748; \
+  int32x2_t __rev1_748;  __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 1, 0); \
+  __ret_748 = vqdmulls_s32(__s0_748, __noswap_vget_lane_s32(__rev1_748, __p2_748)); \
+  __ret_748; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmullh_lane_s16(__p0_749, __p1_749, __p2_749) __extension__ ({ \
+  int32_t __ret_749; \
+  int16_t __s0_749 = __p0_749; \
+  int16x4_t __s1_749 = __p1_749; \
+  __ret_749 = vqdmullh_s16(__s0_749, vget_lane_s16(__s1_749, __p2_749)); \
+  __ret_749; \
+})
+#else
+#define vqdmullh_lane_s16(__p0_750, __p1_750, __p2_750) __extension__ ({ \
+  int32_t __ret_750; \
+  int16_t __s0_750 = __p0_750; \
+  int16x4_t __s1_750 = __p1_750; \
+  int16x4_t __rev1_750;  __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 3, 2, 1, 0); \
+  __ret_750 = vqdmullh_s16(__s0_750, __noswap_vget_lane_s16(__rev1_750, __p2_750)); \
+  __ret_750; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulls_laneq_s32(__p0_751, __p1_751, __p2_751) __extension__ ({ \
+  int64_t __ret_751; \
+  int32_t __s0_751 = __p0_751; \
+  int32x4_t __s1_751 = __p1_751; \
+  __ret_751 = vqdmulls_s32(__s0_751, vgetq_lane_s32(__s1_751, __p2_751)); \
+  __ret_751; \
+})
+#else
+#define vqdmulls_laneq_s32(__p0_752, __p1_752, __p2_752) __extension__ ({ \
+  int64_t __ret_752; \
+  int32_t __s0_752 = __p0_752; \
+  int32x4_t __s1_752 = __p1_752; \
+  int32x4_t __rev1_752;  __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 3, 2, 1, 0); \
+  __ret_752 = vqdmulls_s32(__s0_752, __noswap_vgetq_lane_s32(__rev1_752, __p2_752)); \
+  __ret_752; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmullh_laneq_s16(__p0_753, __p1_753, __p2_753) __extension__ ({ \
+  int32_t __ret_753; \
+  int16_t __s0_753 = __p0_753; \
+  int16x8_t __s1_753 = __p1_753; \
+  __ret_753 = vqdmullh_s16(__s0_753, vgetq_lane_s16(__s1_753, __p2_753)); \
+  __ret_753; \
+})
+#else
+#define vqdmullh_laneq_s16(__p0_754, __p1_754, __p2_754) __extension__ ({ \
+  int32_t __ret_754; \
+  int16_t __s0_754 = __p0_754; \
+  int16x8_t __s1_754 = __p1_754; \
+  int16x8_t __rev1_754;  __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_754 = vqdmullh_s16(__s0_754, __noswap_vgetq_lane_s16(__rev1_754, __p2_754)); \
+  __ret_754; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_laneq_s32(__p0_755, __p1_755, __p2_755) __extension__ ({ \
+  int64x2_t __ret_755; \
+  int32x2_t __s0_755 = __p0_755; \
+  int32x4_t __s1_755 = __p1_755; \
+  __ret_755 = vqdmull_s32(__s0_755, splat_laneq_s32(__s1_755, __p2_755)); \
+  __ret_755; \
+})
+#else
+#define vqdmull_laneq_s32(__p0_756, __p1_756, __p2_756) __extension__ ({ \
+  int64x2_t __ret_756; \
+  int32x2_t __s0_756 = __p0_756; \
+  int32x4_t __s1_756 = __p1_756; \
+  int32x2_t __rev0_756;  __rev0_756 = __builtin_shufflevector(__s0_756, __s0_756, 1, 0); \
+  int32x4_t __rev1_756;  __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \
+  __ret_756 = __noswap_vqdmull_s32(__rev0_756, __noswap_splat_laneq_s32(__rev1_756, __p2_756)); \
+  __ret_756 = __builtin_shufflevector(__ret_756, __ret_756, 1, 0); \
+  __ret_756; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_laneq_s16(__p0_757, __p1_757, __p2_757) __extension__ ({ \
+  int32x4_t __ret_757; \
+  int16x4_t __s0_757 = __p0_757; \
+  int16x8_t __s1_757 = __p1_757; \
+  __ret_757 = vqdmull_s16(__s0_757, splat_laneq_s16(__s1_757, __p2_757)); \
+  __ret_757; \
+})
+#else
+#define vqdmull_laneq_s16(__p0_758, __p1_758, __p2_758) __extension__ ({ \
+  int32x4_t __ret_758; \
+  int16x4_t __s0_758 = __p0_758; \
+  int16x8_t __s1_758 = __p1_758; \
+  int16x4_t __rev0_758;  __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 3, 2, 1, 0); \
+  int16x8_t __rev1_758;  __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_758 = __noswap_vqdmull_s16(__rev0_758, __noswap_splat_laneq_s16(__rev1_758, __p2_758)); \
+  __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 3, 2, 1, 0); \
+  __ret_758; \
+})
+#endif
+
+__ai int16_t vqmovns_s32(int32_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
+  return __ret;
+}
+__ai int32_t vqmovnd_s64(int64_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
+  return __ret;
+}
+__ai int8_t vqmovnh_s16(int16_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
+  return __ret;
+}
+__ai uint16_t vqmovns_u32(uint32_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
+  return __ret;
+}
+__ai uint32_t vqmovnd_u64(uint64_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
+  return __ret;
+}
+__ai uint8_t vqmovnh_u16(uint16_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
+  uint16x8_t __ret;
+  __ret = vcombine_u16(__p0, vqmovn_u32(__p1));
+  return __ret;
+}
+#else
+__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
+  uint16x8_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
+  uint32x4_t __ret;
+  __ret = vcombine_u32(__p0, vqmovn_u64(__p1));
+  return __ret;
+}
+#else
+__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
+  uint32x4_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
+  uint8x16_t __ret;
+  __ret = vcombine_u8(__p0, vqmovn_u16(__p1));
+  return __ret;
+}
+#else
+__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
+  uint8x16_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
+  int16x8_t __ret;
+  __ret = vcombine_s16(__p0, vqmovn_s32(__p1));
+  return __ret;
+}
+#else
+__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
+  int16x8_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
+  int32x4_t __ret;
+  __ret = vcombine_s32(__p0, vqmovn_s64(__p1));
+  return __ret;
+}
+#else
+__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
+  int32x4_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
+  int8x16_t __ret;
+  __ret = vcombine_s8(__p0, vqmovn_s16(__p1));
+  return __ret;
+}
+#else
+__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
+  int8x16_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai uint16_t vqmovuns_s32(int32_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vqmovuns_s32(__p0);
+  return __ret;
+}
+__ai uint32_t vqmovund_s64(int64_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vqmovund_s64(__p0);
+  return __ret;
+}
+__ai uint8_t vqmovunh_s16(int16_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vqmovunh_s16(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
+  uint16x8_t __ret;
+  __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
+  return __ret;
+}
+#else
+__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
+  uint16x8_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
+  uint32x4_t __ret;
+  __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
+  return __ret;
+}
+#else
+__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
+  uint32x4_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
+  uint8x16_t __ret;
+  __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
+  return __ret;
+}
+#else
+__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
+  uint8x16_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35);
+  return __ret;
+}
+#else
+__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
+  int64x2_t __ret;
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai int64x1_t vqneg_s64(int64x1_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
+  return __ret;
+}
+__ai int8_t vqnegb_s8(int8_t __p0) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
+  return __ret;
+}
+__ai int32_t vqnegs_s32(int32_t __p0) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
+  return __ret;
+}
+__ai int64_t vqnegd_s64(int64_t __p0) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
+  return __ret;
+}
+__ai int16_t vqnegh_s16(int16_t __p0) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
+  return __ret;
+}
+__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
+  return __ret;
+}
+__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
+  __ret; \
+})
+#else
+#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
+  __ret; \
+})
+#else
+#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
+  __ret; \
+})
+#else
+#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __s1 = __p1; \
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
+  __ret; \
+})
+#else
+#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __s1 = __p1; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhs_lane_s32(__p0_759, __p1_759, __p2_759) __extension__ ({ \
+  int32_t __ret_759; \
+  int32_t __s0_759 = __p0_759; \
+  int32x2_t __s1_759 = __p1_759; \
+  __ret_759 = vqrdmulhs_s32(__s0_759, vget_lane_s32(__s1_759, __p2_759)); \
+  __ret_759; \
+})
+#else
+#define vqrdmulhs_lane_s32(__p0_760, __p1_760, __p2_760) __extension__ ({ \
+  int32_t __ret_760; \
+  int32_t __s0_760 = __p0_760; \
+  int32x2_t __s1_760 = __p1_760; \
+  int32x2_t __rev1_760;  __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 1, 0); \
+  __ret_760 = vqrdmulhs_s32(__s0_760, __noswap_vget_lane_s32(__rev1_760, __p2_760)); \
+  __ret_760; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhh_lane_s16(__p0_761, __p1_761, __p2_761) __extension__ ({ \
+  int16_t __ret_761; \
+  int16_t __s0_761 = __p0_761; \
+  int16x4_t __s1_761 = __p1_761; \
+  __ret_761 = vqrdmulhh_s16(__s0_761, vget_lane_s16(__s1_761, __p2_761)); \
+  __ret_761; \
+})
+#else
+#define vqrdmulhh_lane_s16(__p0_762, __p1_762, __p2_762) __extension__ ({ \
+  int16_t __ret_762; \
+  int16_t __s0_762 = __p0_762; \
+  int16x4_t __s1_762 = __p1_762; \
+  int16x4_t __rev1_762;  __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 3, 2, 1, 0); \
+  __ret_762 = vqrdmulhh_s16(__s0_762, __noswap_vget_lane_s16(__rev1_762, __p2_762)); \
+  __ret_762; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhs_laneq_s32(__p0_763, __p1_763, __p2_763) __extension__ ({ \
+  int32_t __ret_763; \
+  int32_t __s0_763 = __p0_763; \
+  int32x4_t __s1_763 = __p1_763; \
+  __ret_763 = vqrdmulhs_s32(__s0_763, vgetq_lane_s32(__s1_763, __p2_763)); \
+  __ret_763; \
+})
+#else
+#define vqrdmulhs_laneq_s32(__p0_764, __p1_764, __p2_764) __extension__ ({ \
+  int32_t __ret_764; \
+  int32_t __s0_764 = __p0_764; \
+  int32x4_t __s1_764 = __p1_764; \
+  int32x4_t __rev1_764;  __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 3, 2, 1, 0); \
+  __ret_764 = vqrdmulhs_s32(__s0_764, __noswap_vgetq_lane_s32(__rev1_764, __p2_764)); \
+  __ret_764; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhh_laneq_s16(__p0_765, __p1_765, __p2_765) __extension__ ({ \
+  int16_t __ret_765; \
+  int16_t __s0_765 = __p0_765; \
+  int16x8_t __s1_765 = __p1_765; \
+  __ret_765 = vqrdmulhh_s16(__s0_765, vgetq_lane_s16(__s1_765, __p2_765)); \
+  __ret_765; \
+})
+#else
+#define vqrdmulhh_laneq_s16(__p0_766, __p1_766, __p2_766) __extension__ ({ \
+  int16_t __ret_766; \
+  int16_t __s0_766 = __p0_766; \
+  int16x8_t __s1_766 = __p1_766; \
+  int16x8_t __rev1_766;  __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_766 = vqrdmulhh_s16(__s0_766, __noswap_vgetq_lane_s16(__rev1_766, __p2_766)); \
+  __ret_766; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
+  __ret; \
+})
+#else
+#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x4_t __ret; \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
+  __ret; \
+})
+#else
+#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x8_t __ret; \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
+  __ret; \
+})
+#else
+#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+  int32x2_t __ret; \
+  int32x2_t __s0 = __p0; \
+  int32x4_t __s1 = __p1; \
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
+  __ret; \
+})
+#else
+#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+  int16x4_t __ret; \
+  int16x4_t __s0 = __p0; \
+  int16x8_t __s1 = __p1; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+__ai uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
+  return __ret;
+}
+__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
+  return __ret;
+}
+__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
+  return __ret;
+}
+__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
+  return __ret;
+}
+__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_u32(__p0_767, __p1_767, __p2_767) __extension__ ({ \
+  uint16x8_t __ret_767; \
+  uint16x4_t __s0_767 = __p0_767; \
+  uint32x4_t __s1_767 = __p1_767; \
+  __ret_767 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_767), (uint16x4_t)(vqrshrn_n_u32(__s1_767, __p2_767)))); \
+  __ret_767; \
+})
+#else
+#define vqrshrn_high_n_u32(__p0_768, __p1_768, __p2_768) __extension__ ({ \
+  uint16x8_t __ret_768; \
+  uint16x4_t __s0_768 = __p0_768; \
+  uint32x4_t __s1_768 = __p1_768; \
+  uint16x4_t __rev0_768;  __rev0_768 = __builtin_shufflevector(__s0_768, __s0_768, 3, 2, 1, 0); \
+  uint32x4_t __rev1_768;  __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 3, 2, 1, 0); \
+  __ret_768 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_768), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_768, __p2_768)))); \
+  __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_768; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_u64(__p0_769, __p1_769, __p2_769) __extension__ ({ \
+  uint32x4_t __ret_769; \
+  uint32x2_t __s0_769 = __p0_769; \
+  uint64x2_t __s1_769 = __p1_769; \
+  __ret_769 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_769), (uint32x2_t)(vqrshrn_n_u64(__s1_769, __p2_769)))); \
+  __ret_769; \
+})
+#else
+#define vqrshrn_high_n_u64(__p0_770, __p1_770, __p2_770) __extension__ ({ \
+  uint32x4_t __ret_770; \
+  uint32x2_t __s0_770 = __p0_770; \
+  uint64x2_t __s1_770 = __p1_770; \
+  uint32x2_t __rev0_770;  __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 1, 0); \
+  uint64x2_t __rev1_770;  __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 1, 0); \
+  __ret_770 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_770), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_770, __p2_770)))); \
+  __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \
+  __ret_770; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_u16(__p0_771, __p1_771, __p2_771) __extension__ ({ \
+  uint8x16_t __ret_771; \
+  uint8x8_t __s0_771 = __p0_771; \
+  uint16x8_t __s1_771 = __p1_771; \
+  __ret_771 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_771), (uint8x8_t)(vqrshrn_n_u16(__s1_771, __p2_771)))); \
+  __ret_771; \
+})
+#else
+#define vqrshrn_high_n_u16(__p0_772, __p1_772, __p2_772) __extension__ ({ \
+  uint8x16_t __ret_772; \
+  uint8x8_t __s0_772 = __p0_772; \
+  uint16x8_t __s1_772 = __p1_772; \
+  uint8x8_t __rev0_772;  __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_772;  __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_772 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_772), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_772, __p2_772)))); \
+  __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_772; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_s32(__p0_773, __p1_773, __p2_773) __extension__ ({ \
+  int16x8_t __ret_773; \
+  int16x4_t __s0_773 = __p0_773; \
+  int32x4_t __s1_773 = __p1_773; \
+  __ret_773 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_773), (int16x4_t)(vqrshrn_n_s32(__s1_773, __p2_773)))); \
+  __ret_773; \
+})
+#else
+#define vqrshrn_high_n_s32(__p0_774, __p1_774, __p2_774) __extension__ ({ \
+  int16x8_t __ret_774; \
+  int16x4_t __s0_774 = __p0_774; \
+  int32x4_t __s1_774 = __p1_774; \
+  int16x4_t __rev0_774;  __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \
+  int32x4_t __rev1_774;  __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 3, 2, 1, 0); \
+  __ret_774 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_774), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_774, __p2_774)))); \
+  __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_774; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_s64(__p0_775, __p1_775, __p2_775) __extension__ ({ \
+  int32x4_t __ret_775; \
+  int32x2_t __s0_775 = __p0_775; \
+  int64x2_t __s1_775 = __p1_775; \
+  __ret_775 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_775), (int32x2_t)(vqrshrn_n_s64(__s1_775, __p2_775)))); \
+  __ret_775; \
+})
+#else
+#define vqrshrn_high_n_s64(__p0_776, __p1_776, __p2_776) __extension__ ({ \
+  int32x4_t __ret_776; \
+  int32x2_t __s0_776 = __p0_776; \
+  int64x2_t __s1_776 = __p1_776; \
+  int32x2_t __rev0_776;  __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 1, 0); \
+  int64x2_t __rev1_776;  __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 1, 0); \
+  __ret_776 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_776), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_776, __p2_776)))); \
+  __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 3, 2, 1, 0); \
+  __ret_776; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_s16(__p0_777, __p1_777, __p2_777) __extension__ ({ \
+  int8x16_t __ret_777; \
+  int8x8_t __s0_777 = __p0_777; \
+  int16x8_t __s1_777 = __p1_777; \
+  __ret_777 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_777), (int8x8_t)(vqrshrn_n_s16(__s1_777, __p2_777)))); \
+  __ret_777; \
+})
+#else
+#define vqrshrn_high_n_s16(__p0_778, __p1_778, __p2_778) __extension__ ({ \
+  int8x16_t __ret_778; \
+  int8x8_t __s0_778 = __p0_778; \
+  int16x8_t __s1_778 = __p1_778; \
+  int8x8_t __rev0_778;  __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_778;  __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_778 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_778), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_778, __p2_778)))); \
+  __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_778; \
+})
+#endif
+
+#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
+  uint32_t __s0 = __p0; \
+  __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
+  __ret; \
+})
+#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  uint64_t __s0 = __p0; \
+  __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
+  __ret; \
+})
+#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
+  uint16_t __s0 = __p0; \
+  __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
+  __ret; \
+})
+#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int32_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
+  __ret; \
+})
+#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
+  __ret; \
+})
+#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int16_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrun_high_n_s32(__p0_779, __p1_779, __p2_779) __extension__ ({ \
+  int16x8_t __ret_779; \
+  int16x4_t __s0_779 = __p0_779; \
+  int32x4_t __s1_779 = __p1_779; \
+  __ret_779 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_779), (int16x4_t)(vqrshrun_n_s32(__s1_779, __p2_779)))); \
+  __ret_779; \
+})
+#else
+#define vqrshrun_high_n_s32(__p0_780, __p1_780, __p2_780) __extension__ ({ \
+  int16x8_t __ret_780; \
+  int16x4_t __s0_780 = __p0_780; \
+  int32x4_t __s1_780 = __p1_780; \
+  int16x4_t __rev0_780;  __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 3, 2, 1, 0); \
+  int32x4_t __rev1_780;  __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 3, 2, 1, 0); \
+  __ret_780 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_780), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_780, __p2_780)))); \
+  __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_780; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrun_high_n_s64(__p0_781, __p1_781, __p2_781) __extension__ ({ \
+  int32x4_t __ret_781; \
+  int32x2_t __s0_781 = __p0_781; \
+  int64x2_t __s1_781 = __p1_781; \
+  __ret_781 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_781), (int32x2_t)(vqrshrun_n_s64(__s1_781, __p2_781)))); \
+  __ret_781; \
+})
+#else
+#define vqrshrun_high_n_s64(__p0_782, __p1_782, __p2_782) __extension__ ({ \
+  int32x4_t __ret_782; \
+  int32x2_t __s0_782 = __p0_782; \
+  int64x2_t __s1_782 = __p1_782; \
+  int32x2_t __rev0_782;  __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 1, 0); \
+  int64x2_t __rev1_782;  __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 1, 0); \
+  __ret_782 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_782), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_782, __p2_782)))); \
+  __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 3, 2, 1, 0); \
+  __ret_782; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrun_high_n_s16(__p0_783, __p1_783, __p2_783) __extension__ ({ \
+  int8x16_t __ret_783; \
+  int8x8_t __s0_783 = __p0_783; \
+  int16x8_t __s1_783 = __p1_783; \
+  __ret_783 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_783), (int8x8_t)(vqrshrun_n_s16(__s1_783, __p2_783)))); \
+  __ret_783; \
+})
+#else
+#define vqrshrun_high_n_s16(__p0_784, __p1_784, __p2_784) __extension__ ({ \
+  int8x16_t __ret_784; \
+  int8x8_t __s0_784 = __p0_784; \
+  int16x8_t __s1_784 = __p1_784; \
+  int8x8_t __rev0_784;  __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_784;  __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_784 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_784), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_784, __p2_784)))); \
+  __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_784; \
+})
+#endif
+
+#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int32_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
+  __ret; \
+})
+#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
+  __ret; \
+})
+#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int16_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
+  __ret; \
+})
+__ai uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
+  return __ret;
+}
+__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
+  return __ret;
+}
+__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
+  return __ret;
+}
+__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
+  return __ret;
+}
+__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
+  return __ret;
+}
+#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
+  uint8_t __s0 = __p0; \
+  __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
+  __ret; \
+})
+#define vqshls_n_u32(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  uint32_t __s0 = __p0; \
+  __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
+  __ret; \
+})
+#define vqshld_n_u64(__p0, __p1) __extension__ ({ \
+  uint64_t __ret; \
+  uint64_t __s0 = __p0; \
+  __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
+  __ret; \
+})
+#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
+  uint16_t __s0 = __p0; \
+  __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
+  __ret; \
+})
+#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int8_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
+  __ret; \
+})
+#define vqshls_n_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
+  __ret; \
+})
+#define vqshld_n_s64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
+  __ret; \
+})
+#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int16_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
+  __ret; \
+})
+#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int8_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
+  __ret; \
+})
+#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int32_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
+  __ret; \
+})
+#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
+  __ret; \
+})
+#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int16_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_u32(__p0_785, __p1_785, __p2_785) __extension__ ({ \
+  uint16x8_t __ret_785; \
+  uint16x4_t __s0_785 = __p0_785; \
+  uint32x4_t __s1_785 = __p1_785; \
+  __ret_785 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_785), (uint16x4_t)(vqshrn_n_u32(__s1_785, __p2_785)))); \
+  __ret_785; \
+})
+#else
+#define vqshrn_high_n_u32(__p0_786, __p1_786, __p2_786) __extension__ ({ \
+  uint16x8_t __ret_786; \
+  uint16x4_t __s0_786 = __p0_786; \
+  uint32x4_t __s1_786 = __p1_786; \
+  uint16x4_t __rev0_786;  __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 3, 2, 1, 0); \
+  uint32x4_t __rev1_786;  __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 3, 2, 1, 0); \
+  __ret_786 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_786), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_786, __p2_786)))); \
+  __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_786; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_u64(__p0_787, __p1_787, __p2_787) __extension__ ({ \
+  uint32x4_t __ret_787; \
+  uint32x2_t __s0_787 = __p0_787; \
+  uint64x2_t __s1_787 = __p1_787; \
+  __ret_787 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_787), (uint32x2_t)(vqshrn_n_u64(__s1_787, __p2_787)))); \
+  __ret_787; \
+})
+#else
+#define vqshrn_high_n_u64(__p0_788, __p1_788, __p2_788) __extension__ ({ \
+  uint32x4_t __ret_788; \
+  uint32x2_t __s0_788 = __p0_788; \
+  uint64x2_t __s1_788 = __p1_788; \
+  uint32x2_t __rev0_788;  __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 1, 0); \
+  uint64x2_t __rev1_788;  __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 1, 0); \
+  __ret_788 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_788), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_788, __p2_788)))); \
+  __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 3, 2, 1, 0); \
+  __ret_788; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_u16(__p0_789, __p1_789, __p2_789) __extension__ ({ \
+  uint8x16_t __ret_789; \
+  uint8x8_t __s0_789 = __p0_789; \
+  uint16x8_t __s1_789 = __p1_789; \
+  __ret_789 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_789), (uint8x8_t)(vqshrn_n_u16(__s1_789, __p2_789)))); \
+  __ret_789; \
+})
+#else
+#define vqshrn_high_n_u16(__p0_790, __p1_790, __p2_790) __extension__ ({ \
+  uint8x16_t __ret_790; \
+  uint8x8_t __s0_790 = __p0_790; \
+  uint16x8_t __s1_790 = __p1_790; \
+  uint8x8_t __rev0_790;  __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_790;  __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_790 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_790), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_790, __p2_790)))); \
+  __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_790; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_s32(__p0_791, __p1_791, __p2_791) __extension__ ({ \
+  int16x8_t __ret_791; \
+  int16x4_t __s0_791 = __p0_791; \
+  int32x4_t __s1_791 = __p1_791; \
+  __ret_791 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_791), (int16x4_t)(vqshrn_n_s32(__s1_791, __p2_791)))); \
+  __ret_791; \
+})
+#else
+#define vqshrn_high_n_s32(__p0_792, __p1_792, __p2_792) __extension__ ({ \
+  int16x8_t __ret_792; \
+  int16x4_t __s0_792 = __p0_792; \
+  int32x4_t __s1_792 = __p1_792; \
+  int16x4_t __rev0_792;  __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 3, 2, 1, 0); \
+  int32x4_t __rev1_792;  __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 3, 2, 1, 0); \
+  __ret_792 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_792), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_792, __p2_792)))); \
+  __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_792; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_s64(__p0_793, __p1_793, __p2_793) __extension__ ({ \
+  int32x4_t __ret_793; \
+  int32x2_t __s0_793 = __p0_793; \
+  int64x2_t __s1_793 = __p1_793; \
+  __ret_793 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_793), (int32x2_t)(vqshrn_n_s64(__s1_793, __p2_793)))); \
+  __ret_793; \
+})
+#else
+#define vqshrn_high_n_s64(__p0_794, __p1_794, __p2_794) __extension__ ({ \
+  int32x4_t __ret_794; \
+  int32x2_t __s0_794 = __p0_794; \
+  int64x2_t __s1_794 = __p1_794; \
+  int32x2_t __rev0_794;  __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 1, 0); \
+  int64x2_t __rev1_794;  __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 1, 0); \
+  __ret_794 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_794), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_794, __p2_794)))); \
+  __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 3, 2, 1, 0); \
+  __ret_794; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_s16(__p0_795, __p1_795, __p2_795) __extension__ ({ \
+  int8x16_t __ret_795; \
+  int8x8_t __s0_795 = __p0_795; \
+  int16x8_t __s1_795 = __p1_795; \
+  __ret_795 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_795), (int8x8_t)(vqshrn_n_s16(__s1_795, __p2_795)))); \
+  __ret_795; \
+})
+#else
+#define vqshrn_high_n_s16(__p0_796, __p1_796, __p2_796) __extension__ ({ \
+  int8x16_t __ret_796; \
+  int8x8_t __s0_796 = __p0_796; \
+  int16x8_t __s1_796 = __p1_796; \
+  int8x8_t __rev0_796;  __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_796;  __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_796 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_796), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_796, __p2_796)))); \
+  __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_796; \
+})
+#endif
+
+#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
+  uint16_t __ret; \
+  uint32_t __s0 = __p0; \
+  __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
+  __ret; \
+})
+#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
+  uint32_t __ret; \
+  uint64_t __s0 = __p0; \
+  __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
+  __ret; \
+})
+#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
+  uint8_t __ret; \
+  uint16_t __s0 = __p0; \
+  __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
+  __ret; \
+})
+#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int32_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
+  __ret; \
+})
+#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
+  __ret; \
+})
+#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int16_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vqshrun_high_n_s32(__p0_797, __p1_797, __p2_797) __extension__ ({ \
+  int16x8_t __ret_797; \
+  int16x4_t __s0_797 = __p0_797; \
+  int32x4_t __s1_797 = __p1_797; \
+  __ret_797 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_797), (int16x4_t)(vqshrun_n_s32(__s1_797, __p2_797)))); \
+  __ret_797; \
+})
+#else
+#define vqshrun_high_n_s32(__p0_798, __p1_798, __p2_798) __extension__ ({ \
+  int16x8_t __ret_798; \
+  int16x4_t __s0_798 = __p0_798; \
+  int32x4_t __s1_798 = __p1_798; \
+  int16x4_t __rev0_798;  __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 3, 2, 1, 0); \
+  int32x4_t __rev1_798;  __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 3, 2, 1, 0); \
+  __ret_798 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_798), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_798, __p2_798)))); \
+  __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_798; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrun_high_n_s64(__p0_799, __p1_799, __p2_799) __extension__ ({ \
+  int32x4_t __ret_799; \
+  int32x2_t __s0_799 = __p0_799; \
+  int64x2_t __s1_799 = __p1_799; \
+  __ret_799 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_799), (int32x2_t)(vqshrun_n_s64(__s1_799, __p2_799)))); \
+  __ret_799; \
+})
+#else
+#define vqshrun_high_n_s64(__p0_800, __p1_800, __p2_800) __extension__ ({ \
+  int32x4_t __ret_800; \
+  int32x2_t __s0_800 = __p0_800; \
+  int64x2_t __s1_800 = __p1_800; \
+  int32x2_t __rev0_800;  __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \
+  int64x2_t __rev1_800;  __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 1, 0); \
+  __ret_800 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_800), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_800, __p2_800)))); \
+  __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 3, 2, 1, 0); \
+  __ret_800; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrun_high_n_s16(__p0_801, __p1_801, __p2_801) __extension__ ({ \
+  int8x16_t __ret_801; \
+  int8x8_t __s0_801 = __p0_801; \
+  int16x8_t __s1_801 = __p1_801; \
+  __ret_801 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_801), (int8x8_t)(vqshrun_n_s16(__s1_801, __p2_801)))); \
+  __ret_801; \
+})
+#else
+#define vqshrun_high_n_s16(__p0_802, __p1_802, __p2_802) __extension__ ({ \
+  int8x16_t __ret_802; \
+  int8x8_t __s0_802 = __p0_802; \
+  int16x8_t __s1_802 = __p1_802; \
+  int8x8_t __rev0_802;  __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_802;  __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_802 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_802), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_802, __p2_802)))); \
+  __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_802; \
+})
+#endif
+
+#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
+  int16_t __ret; \
+  int32_t __s0 = __p0; \
+  __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
+  __ret; \
+})
+#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
+  int32_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
+  __ret; \
+})
+#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
+  int8_t __ret; \
+  int16_t __s0 = __p0; \
+  __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
+  __ret; \
+})
+__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
+  return __ret;
+}
+__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
+  return __ret;
+}
+__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
+  return __ret;
+}
+__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
+  return __ret;
+}
+__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
+  int8_t __ret;
+  __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
+  return __ret;
+}
+__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
+  return __ret;
+}
+__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
+  int64_t __ret;
+  __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
+  return __ret;
+}
+__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  poly8x16x2_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  poly8x16x2_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  uint8x16x2_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  int8x16x2_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  uint8x16x2_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  int8x16x2_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  poly8x16x3_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  poly8x16x3_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  uint8x16x3_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  int8x16x3_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  uint8x16x3_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  int8x16x3_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
+  poly8x8_t __ret;
+  poly8x16x4_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
+  poly8x16_t __ret;
+  poly8x16x4_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
+  uint8x16x4_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
+  int8x16x4_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
+  uint8x16x4_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
+  int8x16x4_t __rev0;
+  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16x2_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16x2_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16x2_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16x2_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16x2_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16x2_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16x3_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16x3_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16x3_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16x3_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16x3_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16x3_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
+  poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16x4_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
+  poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  poly8x16x4_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16x4_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16x4_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16x4_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
+  int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16x4_t __rev1;
+  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint16x8_t __ret;
+  __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2));
+  return __ret;
+}
+#else
+__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint16x8_t __ret;
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint32x4_t __ret;
+  __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2));
+  return __ret;
+}
+#else
+__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint32x4_t __ret;
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint8x16_t __ret;
+  __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2));
+  return __ret;
+}
+#else
+__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint8x16_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int16x8_t __ret;
+  __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2));
+  return __ret;
+}
+#else
+__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int16x8_t __ret;
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int32x4_t __ret;
+  __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2));
+  return __ret;
+}
+#else
+__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int32x4_t __ret;
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+  __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int8x16_t __ret;
+  __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2));
+  return __ret;
+}
+#else
+__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int8x16_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2));
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4);
+  return __ret;
+}
+#else
+__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
+  poly8x8_t __ret;
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36);
+  return __ret;
+}
+#else
+__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
+  poly8x16_t __ret;
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48);
+  return __ret;
+}
+#else
+__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
+  uint8x16_t __ret;
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
+  int8x16_t __ret;
+  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32);
+  return __ret;
+}
+#else
+__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
+  int8x16_t __ret;
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32);
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16);
+  return __ret;
+}
+#else
+__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
+  uint8x8_t __ret;
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vrbit_s8(int8x8_t __p0) {
+  int8x8_t __ret;
+  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0);
+  return __ret;
+}
+#else
+__ai int8x8_t vrbit_s8(int8x8_t __p0) {
+  int8x8_t __ret;
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrecpe_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+__ai float64_t vrecped_f64(float64_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
+  return __ret;
+}
+__ai float32_t vrecpes_f32(float32_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+  return __ret;
+}
+__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
+  return __ret;
+}
+__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
+  return __ret;
+}
+__ai float64_t vrecpxd_f64(float64_t __p0) {
+  float64_t __ret;
+  __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
+  return __ret;
+}
+__ai float32_t vrecpxs_f32(float32_t __p0) {
+  float32_t __ret;
+  __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
+  return __ret;
+}
 __ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
   poly8x8_t __ret;
   __ret = (poly8x8_t)(__p0);
@@ -38590,23450 +61791,6 @@
   __ret = (int16x4_t)(__p0);
   return __ret;
 }
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrnd_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrnda_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndi_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndm_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndn_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndp_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndx_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_FRINT)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd32x_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd32x_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd32z_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd32z_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd64x_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd64x_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd64z_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd64z_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_BF16) && !defined(__aarch64__)
-__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_BF16) && defined(__aarch64__)
-__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#else
-#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#else
-#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#else
-#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#else
-#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdotq_lane_f32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \
-  float32x4_t __s0_142 = __p0_142; \
-  bfloat16x8_t __s1_142 = __p1_142; \
-  bfloat16x4_t __s2_142 = __p2_142; \
-  float32x4_t __ret_142; \
-bfloat16x4_t __reint_142 = __s2_142; \
-float32x4_t __reint1_142 = splatq_lane_f32(*(float32x2_t *) &__reint_142, __p3_142); \
-  __ret_142 = vbfdotq_f32(__s0_142, __s1_142, *(bfloat16x8_t *) &__reint1_142); \
-  __ret_142; \
-})
-#else
-#define vbfdotq_lane_f32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \
-  float32x4_t __s0_143 = __p0_143; \
-  bfloat16x8_t __s1_143 = __p1_143; \
-  bfloat16x4_t __s2_143 = __p2_143; \
-  float32x4_t __rev0_143;  __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_143;  __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_143;  __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, 3, 2, 1, 0); \
-  float32x4_t __ret_143; \
-bfloat16x4_t __reint_143 = __rev2_143; \
-float32x4_t __reint1_143 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_143, __p3_143); \
-  __ret_143 = __noswap_vbfdotq_f32(__rev0_143, __rev1_143, *(bfloat16x8_t *) &__reint1_143); \
-  __ret_143 = __builtin_shufflevector(__ret_143, __ret_143, 3, 2, 1, 0); \
-  __ret_143; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdot_lane_f32(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \
-  float32x2_t __s0_144 = __p0_144; \
-  bfloat16x4_t __s1_144 = __p1_144; \
-  bfloat16x4_t __s2_144 = __p2_144; \
-  float32x2_t __ret_144; \
-bfloat16x4_t __reint_144 = __s2_144; \
-float32x2_t __reint1_144 = splat_lane_f32(*(float32x2_t *) &__reint_144, __p3_144); \
-  __ret_144 = vbfdot_f32(__s0_144, __s1_144, *(bfloat16x4_t *) &__reint1_144); \
-  __ret_144; \
-})
-#else
-#define vbfdot_lane_f32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \
-  float32x2_t __s0_145 = __p0_145; \
-  bfloat16x4_t __s1_145 = __p1_145; \
-  bfloat16x4_t __s2_145 = __p2_145; \
-  float32x2_t __rev0_145;  __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, 1, 0); \
-  bfloat16x4_t __rev1_145;  __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_145;  __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, 3, 2, 1, 0); \
-  float32x2_t __ret_145; \
-bfloat16x4_t __reint_145 = __rev2_145; \
-float32x2_t __reint1_145 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_145, __p3_145); \
-  __ret_145 = __noswap_vbfdot_f32(__rev0_145, __rev1_145, *(bfloat16x4_t *) &__reint1_145); \
-  __ret_145 = __builtin_shufflevector(__ret_145, __ret_145, 1, 0); \
-  __ret_145; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdotq_laneq_f32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \
-  float32x4_t __s0_146 = __p0_146; \
-  bfloat16x8_t __s1_146 = __p1_146; \
-  bfloat16x8_t __s2_146 = __p2_146; \
-  float32x4_t __ret_146; \
-bfloat16x8_t __reint_146 = __s2_146; \
-float32x4_t __reint1_146 = splatq_laneq_f32(*(float32x4_t *) &__reint_146, __p3_146); \
-  __ret_146 = vbfdotq_f32(__s0_146, __s1_146, *(bfloat16x8_t *) &__reint1_146); \
-  __ret_146; \
-})
-#else
-#define vbfdotq_laneq_f32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \
-  float32x4_t __s0_147 = __p0_147; \
-  bfloat16x8_t __s1_147 = __p1_147; \
-  bfloat16x8_t __s2_147 = __p2_147; \
-  float32x4_t __rev0_147;  __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_147;  __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_147;  __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_147; \
-bfloat16x8_t __reint_147 = __rev2_147; \
-float32x4_t __reint1_147 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_147, __p3_147); \
-  __ret_147 = __noswap_vbfdotq_f32(__rev0_147, __rev1_147, *(bfloat16x8_t *) &__reint1_147); \
-  __ret_147 = __builtin_shufflevector(__ret_147, __ret_147, 3, 2, 1, 0); \
-  __ret_147; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdot_laneq_f32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \
-  float32x2_t __s0_148 = __p0_148; \
-  bfloat16x4_t __s1_148 = __p1_148; \
-  bfloat16x8_t __s2_148 = __p2_148; \
-  float32x2_t __ret_148; \
-bfloat16x8_t __reint_148 = __s2_148; \
-float32x2_t __reint1_148 = splat_laneq_f32(*(float32x4_t *) &__reint_148, __p3_148); \
-  __ret_148 = vbfdot_f32(__s0_148, __s1_148, *(bfloat16x4_t *) &__reint1_148); \
-  __ret_148; \
-})
-#else
-#define vbfdot_laneq_f32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \
-  float32x2_t __s0_149 = __p0_149; \
-  bfloat16x4_t __s1_149 = __p1_149; \
-  bfloat16x8_t __s2_149 = __p2_149; \
-  float32x2_t __rev0_149;  __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, 1, 0); \
-  bfloat16x4_t __rev1_149;  __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_149;  __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_149; \
-bfloat16x8_t __reint_149 = __rev2_149; \
-float32x2_t __reint1_149 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_149, __p3_149); \
-  __ret_149 = __noswap_vbfdot_f32(__rev0_149, __rev1_149, *(bfloat16x4_t *) &__reint1_149); \
-  __ret_149 = __builtin_shufflevector(__ret_149, __ret_149, 1, 0); \
-  __ret_149; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#define vcreate_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (bfloat16x4_t)(__promote); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_150) {
-  float32x4_t __ret_150;
-bfloat16x4_t __reint_150 = __p0_150;
-int32x4_t __reint1_150 = vshll_n_s16(*(int16x4_t *) &__reint_150, 16);
-  __ret_150 = *(float32x4_t *) &__reint1_150;
-  return __ret_150;
-}
-#else
-__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_151) {
-  bfloat16x4_t __rev0_151;  __rev0_151 = __builtin_shufflevector(__p0_151, __p0_151, 3, 2, 1, 0);
-  float32x4_t __ret_151;
-bfloat16x4_t __reint_151 = __rev0_151;
-int32x4_t __reint1_151 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_151, 16);
-  __ret_151 = *(float32x4_t *) &__reint1_151;
-  __ret_151 = __builtin_shufflevector(__ret_151, __ret_151, 3, 2, 1, 0);
-  return __ret_151;
-}
-__ai float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_152) {
-  float32x4_t __ret_152;
-bfloat16x4_t __reint_152 = __p0_152;
-int32x4_t __reint1_152 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_152, 16);
-  __ret_152 = *(float32x4_t *) &__reint1_152;
-  return __ret_152;
-}
-#endif
-
-__ai float32_t vcvtah_f32_bf16(bfloat16_t __p0) {
-  float32_t __ret;
-bfloat16_t __reint = __p0;
-int32_t __reint1 = *(int32_t *) &__reint << 16;
-  __ret = *(float32_t *) &__reint1;
-  return __ret;
-}
-__ai bfloat16_t vcvth_bf16_f32(float32_t __p0) {
-  bfloat16_t __ret;
-  __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_bf16(__p0_153, __p1_153) __extension__ ({ \
-  bfloat16x4_t __s0_153 = __p0_153; \
-  bfloat16x8_t __ret_153; \
-  __ret_153 = splatq_lane_bf16(__s0_153, __p1_153); \
-  __ret_153; \
-})
-#else
-#define vdupq_lane_bf16(__p0_154, __p1_154) __extension__ ({ \
-  bfloat16x4_t __s0_154 = __p0_154; \
-  bfloat16x4_t __rev0_154;  __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_154; \
-  __ret_154 = __noswap_splatq_lane_bf16(__rev0_154, __p1_154); \
-  __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_154; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_bf16(__p0_155, __p1_155) __extension__ ({ \
-  bfloat16x4_t __s0_155 = __p0_155; \
-  bfloat16x4_t __ret_155; \
-  __ret_155 = splat_lane_bf16(__s0_155, __p1_155); \
-  __ret_155; \
-})
-#else
-#define vdup_lane_bf16(__p0_156, __p1_156) __extension__ ({ \
-  bfloat16x4_t __s0_156 = __p0_156; \
-  bfloat16x4_t __rev0_156;  __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_156; \
-  __ret_156 = __noswap_splat_lane_bf16(__rev0_156, __p1_156); \
-  __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \
-  __ret_156; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_bf16(__p0_157, __p1_157) __extension__ ({ \
-  bfloat16x8_t __s0_157 = __p0_157; \
-  bfloat16x8_t __ret_157; \
-  __ret_157 = splatq_laneq_bf16(__s0_157, __p1_157); \
-  __ret_157; \
-})
-#else
-#define vdupq_laneq_bf16(__p0_158, __p1_158) __extension__ ({ \
-  bfloat16x8_t __s0_158 = __p0_158; \
-  bfloat16x8_t __rev0_158;  __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_158; \
-  __ret_158 = __noswap_splatq_laneq_bf16(__rev0_158, __p1_158); \
-  __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_158; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_bf16(__p0_159, __p1_159) __extension__ ({ \
-  bfloat16x8_t __s0_159 = __p0_159; \
-  bfloat16x4_t __ret_159; \
-  __ret_159 = splat_laneq_bf16(__s0_159, __p1_159); \
-  __ret_159; \
-})
-#else
-#define vdup_laneq_bf16(__p0_160, __p1_160) __extension__ ({ \
-  bfloat16x8_t __s0_160 = __p0_160; \
-  bfloat16x8_t __rev0_160;  __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_160; \
-  __ret_160 = __noswap_splat_laneq_bf16(__rev0_160, __p1_160); \
-  __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 3, 2, 1, 0); \
-  __ret_160; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
-  __ret; \
-})
-#else
-#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
-  __ret; \
-})
-#else
-#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld2q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld2_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld2q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld2_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
-  __ret; \
-})
-#else
-#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
-  __ret; \
-})
-#else
-#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld3q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld3_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld3q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld3_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
-  __ret; \
-})
-#else
-#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
-  __ret; \
-})
-#else
-#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld4q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld4_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld4q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld4_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
-  __ret; \
-})
-#else
-#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
-  __ret; \
-})
-#else
-#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 43); \
-})
-#else
-#define vst1q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 11); \
-})
-#else
-#define vst1_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
-})
-#else
-#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
-})
-#else
-#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
-})
-#else
-#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
-})
-#else
-#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
-})
-#else
-#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
-})
-#else
-#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
-})
-#else
-#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
-})
-#else
-#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
-})
-#else
-#define vst2q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
-})
-#else
-#define vst2_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
-})
-#else
-#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
-})
-#else
-#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
-})
-#else
-#define vst3q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
-})
-#else
-#define vst3_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
-})
-#else
-#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
-})
-#else
-#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
-})
-#else
-#define vst4q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
-})
-#else
-#define vst4_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
-})
-#else
-#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
-})
-#else
-#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__rev0, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __a32_vcvt_bf16_f32(__p0);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __noswap___a32_vcvt_bf16_f32(__rev0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0));
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0));
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__rev0, 43);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_bf16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \
-  bfloat16x8_t __s0_161 = __p0_161; \
-  bfloat16x4_t __s2_161 = __p2_161; \
-  bfloat16x8_t __ret_161; \
-  __ret_161 = vsetq_lane_bf16(vget_lane_bf16(__s2_161, __p3_161), __s0_161, __p1_161); \
-  __ret_161; \
-})
-#else
-#define vcopyq_lane_bf16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \
-  bfloat16x8_t __s0_162 = __p0_162; \
-  bfloat16x4_t __s2_162 = __p2_162; \
-  bfloat16x8_t __rev0_162;  __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_162;  __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_162; \
-  __ret_162 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_162, __p3_162), __rev0_162, __p1_162); \
-  __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_162; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_bf16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \
-  bfloat16x4_t __s0_163 = __p0_163; \
-  bfloat16x4_t __s2_163 = __p2_163; \
-  bfloat16x4_t __ret_163; \
-  __ret_163 = vset_lane_bf16(vget_lane_bf16(__s2_163, __p3_163), __s0_163, __p1_163); \
-  __ret_163; \
-})
-#else
-#define vcopy_lane_bf16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \
-  bfloat16x4_t __s0_164 = __p0_164; \
-  bfloat16x4_t __s2_164 = __p2_164; \
-  bfloat16x4_t __rev0_164;  __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_164;  __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_164; \
-  __ret_164 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_164, __p3_164), __rev0_164, __p1_164); \
-  __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 3, 2, 1, 0); \
-  __ret_164; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_bf16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \
-  bfloat16x8_t __s0_165 = __p0_165; \
-  bfloat16x8_t __s2_165 = __p2_165; \
-  bfloat16x8_t __ret_165; \
-  __ret_165 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_165, __p3_165), __s0_165, __p1_165); \
-  __ret_165; \
-})
-#else
-#define vcopyq_laneq_bf16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \
-  bfloat16x8_t __s0_166 = __p0_166; \
-  bfloat16x8_t __s2_166 = __p2_166; \
-  bfloat16x8_t __rev0_166;  __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_166;  __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_166; \
-  __ret_166 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_166, __p3_166), __rev0_166, __p1_166); \
-  __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_166; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_bf16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \
-  bfloat16x4_t __s0_167 = __p0_167; \
-  bfloat16x8_t __s2_167 = __p2_167; \
-  bfloat16x4_t __ret_167; \
-  __ret_167 = vset_lane_bf16(vgetq_lane_bf16(__s2_167, __p3_167), __s0_167, __p1_167); \
-  __ret_167; \
-})
-#else
-#define vcopy_laneq_bf16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \
-  bfloat16x4_t __s0_168 = __p0_168; \
-  bfloat16x8_t __s2_168 = __p2_168; \
-  bfloat16x4_t __rev0_168;  __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_168;  __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_168; \
-  __ret_168 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_168, __p3_168), __rev0_168, __p1_168); \
-  __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \
-  __ret_168; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0));
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__p0, (int8x16_t)__p1, 43);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__rev0, (int8x16_t)__rev1, 43);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = __a64_vcvtq_low_bf16_f32(__p0);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_lane_f32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \
-  float32x2_t __s0_169 = __p0_169; \
-  float32x2_t __s1_169 = __p1_169; \
-  float32x2_t __s2_169 = __p2_169; \
-  float32x2_t __ret_169; \
-float32x2_t __reint_169 = __s2_169; \
-uint64x1_t __reint1_169 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_169, __p3_169)}; \
-  __ret_169 = vcmla_f32(__s0_169, __s1_169, *(float32x2_t *) &__reint1_169); \
-  __ret_169; \
-})
-#else
-#define vcmla_lane_f32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \
-  float32x2_t __s0_170 = __p0_170; \
-  float32x2_t __s1_170 = __p1_170; \
-  float32x2_t __s2_170 = __p2_170; \
-  float32x2_t __rev0_170;  __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 1, 0); \
-  float32x2_t __rev1_170;  __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 1, 0); \
-  float32x2_t __rev2_170;  __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 1, 0); \
-  float32x2_t __ret_170; \
-float32x2_t __reint_170 = __rev2_170; \
-uint64x1_t __reint1_170 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_170, __p3_170)}; \
-  __ret_170 = __noswap_vcmla_f32(__rev0_170, __rev1_170, *(float32x2_t *) &__reint1_170); \
-  __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 1, 0); \
-  __ret_170; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_lane_f32(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \
-  float32x4_t __s0_171 = __p0_171; \
-  float32x4_t __s1_171 = __p1_171; \
-  float32x2_t __s2_171 = __p2_171; \
-  float32x4_t __ret_171; \
-float32x2_t __reint_171 = __s2_171; \
-uint64x2_t __reint1_171 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171), vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171)}; \
-  __ret_171 = vcmlaq_f32(__s0_171, __s1_171, *(float32x4_t *) &__reint1_171); \
-  __ret_171; \
-})
-#else
-#define vcmlaq_lane_f32(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \
-  float32x4_t __s0_172 = __p0_172; \
-  float32x4_t __s1_172 = __p1_172; \
-  float32x2_t __s2_172 = __p2_172; \
-  float32x4_t __rev0_172;  __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 3, 2, 1, 0); \
-  float32x4_t __rev1_172;  __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 3, 2, 1, 0); \
-  float32x2_t __rev2_172;  __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 1, 0); \
-  float32x4_t __ret_172; \
-float32x2_t __reint_172 = __rev2_172; \
-uint64x2_t __reint1_172 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172), vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172)}; \
-  __ret_172 = __noswap_vcmlaq_f32(__rev0_172, __rev1_172, *(float32x4_t *) &__reint1_172); \
-  __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 3, 2, 1, 0); \
-  __ret_172; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_laneq_f32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \
-  float32x2_t __s0_173 = __p0_173; \
-  float32x2_t __s1_173 = __p1_173; \
-  float32x4_t __s2_173 = __p2_173; \
-  float32x2_t __ret_173; \
-float32x4_t __reint_173 = __s2_173; \
-uint64x1_t __reint1_173 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_173, __p3_173)}; \
-  __ret_173 = vcmla_f32(__s0_173, __s1_173, *(float32x2_t *) &__reint1_173); \
-  __ret_173; \
-})
-#else
-#define vcmla_laneq_f32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \
-  float32x2_t __s0_174 = __p0_174; \
-  float32x2_t __s1_174 = __p1_174; \
-  float32x4_t __s2_174 = __p2_174; \
-  float32x2_t __rev0_174;  __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \
-  float32x2_t __rev1_174;  __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 1, 0); \
-  float32x4_t __rev2_174;  __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 3, 2, 1, 0); \
-  float32x2_t __ret_174; \
-float32x4_t __reint_174 = __rev2_174; \
-uint64x1_t __reint1_174 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_174, __p3_174)}; \
-  __ret_174 = __noswap_vcmla_f32(__rev0_174, __rev1_174, *(float32x2_t *) &__reint1_174); \
-  __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \
-  __ret_174; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_laneq_f32(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \
-  float32x4_t __s0_175 = __p0_175; \
-  float32x4_t __s1_175 = __p1_175; \
-  float32x4_t __s2_175 = __p2_175; \
-  float32x4_t __ret_175; \
-float32x4_t __reint_175 = __s2_175; \
-uint64x2_t __reint1_175 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175), vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175)}; \
-  __ret_175 = vcmlaq_f32(__s0_175, __s1_175, *(float32x4_t *) &__reint1_175); \
-  __ret_175; \
-})
-#else
-#define vcmlaq_laneq_f32(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \
-  float32x4_t __s0_176 = __p0_176; \
-  float32x4_t __s1_176 = __p1_176; \
-  float32x4_t __s2_176 = __p2_176; \
-  float32x4_t __rev0_176;  __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 3, 2, 1, 0); \
-  float32x4_t __rev1_176;  __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 3, 2, 1, 0); \
-  float32x4_t __rev2_176;  __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 3, 2, 1, 0); \
-  float32x4_t __ret_176; \
-float32x4_t __reint_176 = __rev2_176; \
-uint64x2_t __reint1_176 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176)}; \
-  __ret_176 = __noswap_vcmlaq_f32(__rev0_176, __rev1_176, *(float32x4_t *) &__reint1_176); \
-  __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 3, 2, 1, 0); \
-  __ret_176; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_lane_f32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \
-  float32x2_t __s0_177 = __p0_177; \
-  float32x2_t __s1_177 = __p1_177; \
-  float32x2_t __s2_177 = __p2_177; \
-  float32x2_t __ret_177; \
-float32x2_t __reint_177 = __s2_177; \
-uint64x1_t __reint1_177 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_177, __p3_177)}; \
-  __ret_177 = vcmla_rot180_f32(__s0_177, __s1_177, *(float32x2_t *) &__reint1_177); \
-  __ret_177; \
-})
-#else
-#define vcmla_rot180_lane_f32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \
-  float32x2_t __s0_178 = __p0_178; \
-  float32x2_t __s1_178 = __p1_178; \
-  float32x2_t __s2_178 = __p2_178; \
-  float32x2_t __rev0_178;  __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 1, 0); \
-  float32x2_t __rev1_178;  __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 1, 0); \
-  float32x2_t __rev2_178;  __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 1, 0); \
-  float32x2_t __ret_178; \
-float32x2_t __reint_178 = __rev2_178; \
-uint64x1_t __reint1_178 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_178, __p3_178)}; \
-  __ret_178 = __noswap_vcmla_rot180_f32(__rev0_178, __rev1_178, *(float32x2_t *) &__reint1_178); \
-  __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 1, 0); \
-  __ret_178; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_lane_f32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \
-  float32x4_t __s0_179 = __p0_179; \
-  float32x4_t __s1_179 = __p1_179; \
-  float32x2_t __s2_179 = __p2_179; \
-  float32x4_t __ret_179; \
-float32x2_t __reint_179 = __s2_179; \
-uint64x2_t __reint1_179 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179), vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179)}; \
-  __ret_179 = vcmlaq_rot180_f32(__s0_179, __s1_179, *(float32x4_t *) &__reint1_179); \
-  __ret_179; \
-})
-#else
-#define vcmlaq_rot180_lane_f32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \
-  float32x4_t __s0_180 = __p0_180; \
-  float32x4_t __s1_180 = __p1_180; \
-  float32x2_t __s2_180 = __p2_180; \
-  float32x4_t __rev0_180;  __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \
-  float32x4_t __rev1_180;  __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 3, 2, 1, 0); \
-  float32x2_t __rev2_180;  __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 1, 0); \
-  float32x4_t __ret_180; \
-float32x2_t __reint_180 = __rev2_180; \
-uint64x2_t __reint1_180 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180), vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180)}; \
-  __ret_180 = __noswap_vcmlaq_rot180_f32(__rev0_180, __rev1_180, *(float32x4_t *) &__reint1_180); \
-  __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \
-  __ret_180; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_laneq_f32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \
-  float32x2_t __s0_181 = __p0_181; \
-  float32x2_t __s1_181 = __p1_181; \
-  float32x4_t __s2_181 = __p2_181; \
-  float32x2_t __ret_181; \
-float32x4_t __reint_181 = __s2_181; \
-uint64x1_t __reint1_181 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_181, __p3_181)}; \
-  __ret_181 = vcmla_rot180_f32(__s0_181, __s1_181, *(float32x2_t *) &__reint1_181); \
-  __ret_181; \
-})
-#else
-#define vcmla_rot180_laneq_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \
-  float32x2_t __s0_182 = __p0_182; \
-  float32x2_t __s1_182 = __p1_182; \
-  float32x4_t __s2_182 = __p2_182; \
-  float32x2_t __rev0_182;  __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \
-  float32x2_t __rev1_182;  __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 1, 0); \
-  float32x4_t __rev2_182;  __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 3, 2, 1, 0); \
-  float32x2_t __ret_182; \
-float32x4_t __reint_182 = __rev2_182; \
-uint64x1_t __reint1_182 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_182, __p3_182)}; \
-  __ret_182 = __noswap_vcmla_rot180_f32(__rev0_182, __rev1_182, *(float32x2_t *) &__reint1_182); \
-  __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \
-  __ret_182; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_laneq_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \
-  float32x4_t __s0_183 = __p0_183; \
-  float32x4_t __s1_183 = __p1_183; \
-  float32x4_t __s2_183 = __p2_183; \
-  float32x4_t __ret_183; \
-float32x4_t __reint_183 = __s2_183; \
-uint64x2_t __reint1_183 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183), vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183)}; \
-  __ret_183 = vcmlaq_rot180_f32(__s0_183, __s1_183, *(float32x4_t *) &__reint1_183); \
-  __ret_183; \
-})
-#else
-#define vcmlaq_rot180_laneq_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \
-  float32x4_t __s0_184 = __p0_184; \
-  float32x4_t __s1_184 = __p1_184; \
-  float32x4_t __s2_184 = __p2_184; \
-  float32x4_t __rev0_184;  __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 3, 2, 1, 0); \
-  float32x4_t __rev1_184;  __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 3, 2, 1, 0); \
-  float32x4_t __rev2_184;  __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 3, 2, 1, 0); \
-  float32x4_t __ret_184; \
-float32x4_t __reint_184 = __rev2_184; \
-uint64x2_t __reint1_184 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184)}; \
-  __ret_184 = __noswap_vcmlaq_rot180_f32(__rev0_184, __rev1_184, *(float32x4_t *) &__reint1_184); \
-  __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 3, 2, 1, 0); \
-  __ret_184; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \
-  float32x2_t __s0_185 = __p0_185; \
-  float32x2_t __s1_185 = __p1_185; \
-  float32x2_t __s2_185 = __p2_185; \
-  float32x2_t __ret_185; \
-float32x2_t __reint_185 = __s2_185; \
-uint64x1_t __reint1_185 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \
-  __ret_185 = vcmla_rot270_f32(__s0_185, __s1_185, *(float32x2_t *) &__reint1_185); \
-  __ret_185; \
-})
-#else
-#define vcmla_rot270_lane_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \
-  float32x2_t __s0_186 = __p0_186; \
-  float32x2_t __s1_186 = __p1_186; \
-  float32x2_t __s2_186 = __p2_186; \
-  float32x2_t __rev0_186;  __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 1, 0); \
-  float32x2_t __rev1_186;  __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 1, 0); \
-  float32x2_t __rev2_186;  __rev2_186 = __builtin_shufflevector(__s2_186, __s2_186, 1, 0); \
-  float32x2_t __ret_186; \
-float32x2_t __reint_186 = __rev2_186; \
-uint64x1_t __reint1_186 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_186, __p3_186)}; \
-  __ret_186 = __noswap_vcmla_rot270_f32(__rev0_186, __rev1_186, *(float32x2_t *) &__reint1_186); \
-  __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 1, 0); \
-  __ret_186; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_lane_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \
-  float32x4_t __s0_187 = __p0_187; \
-  float32x4_t __s1_187 = __p1_187; \
-  float32x2_t __s2_187 = __p2_187; \
-  float32x4_t __ret_187; \
-float32x2_t __reint_187 = __s2_187; \
-uint64x2_t __reint1_187 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187), vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187)}; \
-  __ret_187 = vcmlaq_rot270_f32(__s0_187, __s1_187, *(float32x4_t *) &__reint1_187); \
-  __ret_187; \
-})
-#else
-#define vcmlaq_rot270_lane_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \
-  float32x4_t __s0_188 = __p0_188; \
-  float32x4_t __s1_188 = __p1_188; \
-  float32x2_t __s2_188 = __p2_188; \
-  float32x4_t __rev0_188;  __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \
-  float32x4_t __rev1_188;  __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \
-  float32x2_t __rev2_188;  __rev2_188 = __builtin_shufflevector(__s2_188, __s2_188, 1, 0); \
-  float32x4_t __ret_188; \
-float32x2_t __reint_188 = __rev2_188; \
-uint64x2_t __reint1_188 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188), vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188)}; \
-  __ret_188 = __noswap_vcmlaq_rot270_f32(__rev0_188, __rev1_188, *(float32x4_t *) &__reint1_188); \
-  __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \
-  __ret_188; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \
-  float32x2_t __s0_189 = __p0_189; \
-  float32x2_t __s1_189 = __p1_189; \
-  float32x4_t __s2_189 = __p2_189; \
-  float32x2_t __ret_189; \
-float32x4_t __reint_189 = __s2_189; \
-uint64x1_t __reint1_189 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \
-  __ret_189 = vcmla_rot270_f32(__s0_189, __s1_189, *(float32x2_t *) &__reint1_189); \
-  __ret_189; \
-})
-#else
-#define vcmla_rot270_laneq_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \
-  float32x2_t __s0_190 = __p0_190; \
-  float32x2_t __s1_190 = __p1_190; \
-  float32x4_t __s2_190 = __p2_190; \
-  float32x2_t __rev0_190;  __rev0_190 = __builtin_shufflevector(__s0_190, __s0_190, 1, 0); \
-  float32x2_t __rev1_190;  __rev1_190 = __builtin_shufflevector(__s1_190, __s1_190, 1, 0); \
-  float32x4_t __rev2_190;  __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \
-  float32x2_t __ret_190; \
-float32x4_t __reint_190 = __rev2_190; \
-uint64x1_t __reint1_190 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_190, __p3_190)}; \
-  __ret_190 = __noswap_vcmla_rot270_f32(__rev0_190, __rev1_190, *(float32x2_t *) &__reint1_190); \
-  __ret_190 = __builtin_shufflevector(__ret_190, __ret_190, 1, 0); \
-  __ret_190; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_laneq_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \
-  float32x4_t __s0_191 = __p0_191; \
-  float32x4_t __s1_191 = __p1_191; \
-  float32x4_t __s2_191 = __p2_191; \
-  float32x4_t __ret_191; \
-float32x4_t __reint_191 = __s2_191; \
-uint64x2_t __reint1_191 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191), vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191)}; \
-  __ret_191 = vcmlaq_rot270_f32(__s0_191, __s1_191, *(float32x4_t *) &__reint1_191); \
-  __ret_191; \
-})
-#else
-#define vcmlaq_rot270_laneq_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \
-  float32x4_t __s0_192 = __p0_192; \
-  float32x4_t __s1_192 = __p1_192; \
-  float32x4_t __s2_192 = __p2_192; \
-  float32x4_t __rev0_192;  __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 3, 2, 1, 0); \
-  float32x4_t __rev1_192;  __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 3, 2, 1, 0); \
-  float32x4_t __rev2_192;  __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \
-  float32x4_t __ret_192; \
-float32x4_t __reint_192 = __rev2_192; \
-uint64x2_t __reint1_192 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192)}; \
-  __ret_192 = __noswap_vcmlaq_rot270_f32(__rev0_192, __rev1_192, *(float32x4_t *) &__reint1_192); \
-  __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 3, 2, 1, 0); \
-  __ret_192; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \
-  float32x2_t __s0_193 = __p0_193; \
-  float32x2_t __s1_193 = __p1_193; \
-  float32x2_t __s2_193 = __p2_193; \
-  float32x2_t __ret_193; \
-float32x2_t __reint_193 = __s2_193; \
-uint64x1_t __reint1_193 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \
-  __ret_193 = vcmla_rot90_f32(__s0_193, __s1_193, *(float32x2_t *) &__reint1_193); \
-  __ret_193; \
-})
-#else
-#define vcmla_rot90_lane_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \
-  float32x2_t __s0_194 = __p0_194; \
-  float32x2_t __s1_194 = __p1_194; \
-  float32x2_t __s2_194 = __p2_194; \
-  float32x2_t __rev0_194;  __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 1, 0); \
-  float32x2_t __rev1_194;  __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 1, 0); \
-  float32x2_t __rev2_194;  __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 1, 0); \
-  float32x2_t __ret_194; \
-float32x2_t __reint_194 = __rev2_194; \
-uint64x1_t __reint1_194 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_194, __p3_194)}; \
-  __ret_194 = __noswap_vcmla_rot90_f32(__rev0_194, __rev1_194, *(float32x2_t *) &__reint1_194); \
-  __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 1, 0); \
-  __ret_194; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_lane_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \
-  float32x4_t __s0_195 = __p0_195; \
-  float32x4_t __s1_195 = __p1_195; \
-  float32x2_t __s2_195 = __p2_195; \
-  float32x4_t __ret_195; \
-float32x2_t __reint_195 = __s2_195; \
-uint64x2_t __reint1_195 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195), vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195)}; \
-  __ret_195 = vcmlaq_rot90_f32(__s0_195, __s1_195, *(float32x4_t *) &__reint1_195); \
-  __ret_195; \
-})
-#else
-#define vcmlaq_rot90_lane_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \
-  float32x4_t __s0_196 = __p0_196; \
-  float32x4_t __s1_196 = __p1_196; \
-  float32x2_t __s2_196 = __p2_196; \
-  float32x4_t __rev0_196;  __rev0_196 = __builtin_shufflevector(__s0_196, __s0_196, 3, 2, 1, 0); \
-  float32x4_t __rev1_196;  __rev1_196 = __builtin_shufflevector(__s1_196, __s1_196, 3, 2, 1, 0); \
-  float32x2_t __rev2_196;  __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 1, 0); \
-  float32x4_t __ret_196; \
-float32x2_t __reint_196 = __rev2_196; \
-uint64x2_t __reint1_196 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196), vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196)}; \
-  __ret_196 = __noswap_vcmlaq_rot90_f32(__rev0_196, __rev1_196, *(float32x4_t *) &__reint1_196); \
-  __ret_196 = __builtin_shufflevector(__ret_196, __ret_196, 3, 2, 1, 0); \
-  __ret_196; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \
-  float32x2_t __s0_197 = __p0_197; \
-  float32x2_t __s1_197 = __p1_197; \
-  float32x4_t __s2_197 = __p2_197; \
-  float32x2_t __ret_197; \
-float32x4_t __reint_197 = __s2_197; \
-uint64x1_t __reint1_197 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \
-  __ret_197 = vcmla_rot90_f32(__s0_197, __s1_197, *(float32x2_t *) &__reint1_197); \
-  __ret_197; \
-})
-#else
-#define vcmla_rot90_laneq_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \
-  float32x2_t __s0_198 = __p0_198; \
-  float32x2_t __s1_198 = __p1_198; \
-  float32x4_t __s2_198 = __p2_198; \
-  float32x2_t __rev0_198;  __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 1, 0); \
-  float32x2_t __rev1_198;  __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 1, 0); \
-  float32x4_t __rev2_198;  __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 3, 2, 1, 0); \
-  float32x2_t __ret_198; \
-float32x4_t __reint_198 = __rev2_198; \
-uint64x1_t __reint1_198 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_198, __p3_198)}; \
-  __ret_198 = __noswap_vcmla_rot90_f32(__rev0_198, __rev1_198, *(float32x2_t *) &__reint1_198); \
-  __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 1, 0); \
-  __ret_198; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_laneq_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \
-  float32x4_t __s0_199 = __p0_199; \
-  float32x4_t __s1_199 = __p1_199; \
-  float32x4_t __s2_199 = __p2_199; \
-  float32x4_t __ret_199; \
-float32x4_t __reint_199 = __s2_199; \
-uint64x2_t __reint1_199 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199), vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199)}; \
-  __ret_199 = vcmlaq_rot90_f32(__s0_199, __s1_199, *(float32x4_t *) &__reint1_199); \
-  __ret_199; \
-})
-#else
-#define vcmlaq_rot90_laneq_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \
-  float32x4_t __s0_200 = __p0_200; \
-  float32x4_t __s1_200 = __p1_200; \
-  float32x4_t __s2_200 = __p2_200; \
-  float32x4_t __rev0_200;  __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \
-  float32x4_t __rev1_200;  __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \
-  float32x4_t __rev2_200;  __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 3, 2, 1, 0); \
-  float32x4_t __ret_200; \
-float32x4_t __reint_200 = __rev2_200; \
-uint64x2_t __reint1_200 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200)}; \
-  __ret_200 = __noswap_vcmlaq_rot90_f32(__rev0_200, __rev1_200, *(float32x4_t *) &__reint1_200); \
-  __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \
-  __ret_200; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_lane_f16(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \
-  float16x4_t __s0_201 = __p0_201; \
-  float16x4_t __s1_201 = __p1_201; \
-  float16x4_t __s2_201 = __p2_201; \
-  float16x4_t __ret_201; \
-float16x4_t __reint_201 = __s2_201; \
-uint32x2_t __reint1_201 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201), vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201)}; \
-  __ret_201 = vcmla_f16(__s0_201, __s1_201, *(float16x4_t *) &__reint1_201); \
-  __ret_201; \
-})
-#else
-#define vcmla_lane_f16(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \
-  float16x4_t __s0_202 = __p0_202; \
-  float16x4_t __s1_202 = __p1_202; \
-  float16x4_t __s2_202 = __p2_202; \
-  float16x4_t __rev0_202;  __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 3, 2, 1, 0); \
-  float16x4_t __rev1_202;  __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 3, 2, 1, 0); \
-  float16x4_t __rev2_202;  __rev2_202 = __builtin_shufflevector(__s2_202, __s2_202, 3, 2, 1, 0); \
-  float16x4_t __ret_202; \
-float16x4_t __reint_202 = __rev2_202; \
-uint32x2_t __reint1_202 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202)}; \
-  __ret_202 = __noswap_vcmla_f16(__rev0_202, __rev1_202, *(float16x4_t *) &__reint1_202); \
-  __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 3, 2, 1, 0); \
-  __ret_202; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_lane_f16(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \
-  float16x8_t __s0_203 = __p0_203; \
-  float16x8_t __s1_203 = __p1_203; \
-  float16x4_t __s2_203 = __p2_203; \
-  float16x8_t __ret_203; \
-float16x4_t __reint_203 = __s2_203; \
-uint32x4_t __reint1_203 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203)}; \
-  __ret_203 = vcmlaq_f16(__s0_203, __s1_203, *(float16x8_t *) &__reint1_203); \
-  __ret_203; \
-})
-#else
-#define vcmlaq_lane_f16(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \
-  float16x8_t __s0_204 = __p0_204; \
-  float16x8_t __s1_204 = __p1_204; \
-  float16x4_t __s2_204 = __p2_204; \
-  float16x8_t __rev0_204;  __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_204;  __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_204;  __rev2_204 = __builtin_shufflevector(__s2_204, __s2_204, 3, 2, 1, 0); \
-  float16x8_t __ret_204; \
-float16x4_t __reint_204 = __rev2_204; \
-uint32x4_t __reint1_204 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204)}; \
-  __ret_204 = __noswap_vcmlaq_f16(__rev0_204, __rev1_204, *(float16x8_t *) &__reint1_204); \
-  __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_204; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_laneq_f16(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \
-  float16x4_t __s0_205 = __p0_205; \
-  float16x4_t __s1_205 = __p1_205; \
-  float16x8_t __s2_205 = __p2_205; \
-  float16x4_t __ret_205; \
-float16x8_t __reint_205 = __s2_205; \
-uint32x2_t __reint1_205 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205), vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205)}; \
-  __ret_205 = vcmla_f16(__s0_205, __s1_205, *(float16x4_t *) &__reint1_205); \
-  __ret_205; \
-})
-#else
-#define vcmla_laneq_f16(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \
-  float16x4_t __s0_206 = __p0_206; \
-  float16x4_t __s1_206 = __p1_206; \
-  float16x8_t __s2_206 = __p2_206; \
-  float16x4_t __rev0_206;  __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 3, 2, 1, 0); \
-  float16x4_t __rev1_206;  __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 3, 2, 1, 0); \
-  float16x8_t __rev2_206;  __rev2_206 = __builtin_shufflevector(__s2_206, __s2_206, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_206; \
-float16x8_t __reint_206 = __rev2_206; \
-uint32x2_t __reint1_206 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206)}; \
-  __ret_206 = __noswap_vcmla_f16(__rev0_206, __rev1_206, *(float16x4_t *) &__reint1_206); \
-  __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 3, 2, 1, 0); \
-  __ret_206; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_laneq_f16(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \
-  float16x8_t __s0_207 = __p0_207; \
-  float16x8_t __s1_207 = __p1_207; \
-  float16x8_t __s2_207 = __p2_207; \
-  float16x8_t __ret_207; \
-float16x8_t __reint_207 = __s2_207; \
-uint32x4_t __reint1_207 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207)}; \
-  __ret_207 = vcmlaq_f16(__s0_207, __s1_207, *(float16x8_t *) &__reint1_207); \
-  __ret_207; \
-})
-#else
-#define vcmlaq_laneq_f16(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \
-  float16x8_t __s0_208 = __p0_208; \
-  float16x8_t __s1_208 = __p1_208; \
-  float16x8_t __s2_208 = __p2_208; \
-  float16x8_t __rev0_208;  __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_208;  __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_208;  __rev2_208 = __builtin_shufflevector(__s2_208, __s2_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_208; \
-float16x8_t __reint_208 = __rev2_208; \
-uint32x4_t __reint1_208 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208)}; \
-  __ret_208 = __noswap_vcmlaq_f16(__rev0_208, __rev1_208, *(float16x8_t *) &__reint1_208); \
-  __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_208; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_lane_f16(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \
-  float16x4_t __s0_209 = __p0_209; \
-  float16x4_t __s1_209 = __p1_209; \
-  float16x4_t __s2_209 = __p2_209; \
-  float16x4_t __ret_209; \
-float16x4_t __reint_209 = __s2_209; \
-uint32x2_t __reint1_209 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209), vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209)}; \
-  __ret_209 = vcmla_rot180_f16(__s0_209, __s1_209, *(float16x4_t *) &__reint1_209); \
-  __ret_209; \
-})
-#else
-#define vcmla_rot180_lane_f16(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \
-  float16x4_t __s0_210 = __p0_210; \
-  float16x4_t __s1_210 = __p1_210; \
-  float16x4_t __s2_210 = __p2_210; \
-  float16x4_t __rev0_210;  __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 3, 2, 1, 0); \
-  float16x4_t __rev1_210;  __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 3, 2, 1, 0); \
-  float16x4_t __rev2_210;  __rev2_210 = __builtin_shufflevector(__s2_210, __s2_210, 3, 2, 1, 0); \
-  float16x4_t __ret_210; \
-float16x4_t __reint_210 = __rev2_210; \
-uint32x2_t __reint1_210 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210)}; \
-  __ret_210 = __noswap_vcmla_rot180_f16(__rev0_210, __rev1_210, *(float16x4_t *) &__reint1_210); \
-  __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 3, 2, 1, 0); \
-  __ret_210; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_lane_f16(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \
-  float16x8_t __s0_211 = __p0_211; \
-  float16x8_t __s1_211 = __p1_211; \
-  float16x4_t __s2_211 = __p2_211; \
-  float16x8_t __ret_211; \
-float16x4_t __reint_211 = __s2_211; \
-uint32x4_t __reint1_211 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211)}; \
-  __ret_211 = vcmlaq_rot180_f16(__s0_211, __s1_211, *(float16x8_t *) &__reint1_211); \
-  __ret_211; \
-})
-#else
-#define vcmlaq_rot180_lane_f16(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \
-  float16x8_t __s0_212 = __p0_212; \
-  float16x8_t __s1_212 = __p1_212; \
-  float16x4_t __s2_212 = __p2_212; \
-  float16x8_t __rev0_212;  __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_212;  __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_212;  __rev2_212 = __builtin_shufflevector(__s2_212, __s2_212, 3, 2, 1, 0); \
-  float16x8_t __ret_212; \
-float16x4_t __reint_212 = __rev2_212; \
-uint32x4_t __reint1_212 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212)}; \
-  __ret_212 = __noswap_vcmlaq_rot180_f16(__rev0_212, __rev1_212, *(float16x8_t *) &__reint1_212); \
-  __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_212; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_laneq_f16(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \
-  float16x4_t __s0_213 = __p0_213; \
-  float16x4_t __s1_213 = __p1_213; \
-  float16x8_t __s2_213 = __p2_213; \
-  float16x4_t __ret_213; \
-float16x8_t __reint_213 = __s2_213; \
-uint32x2_t __reint1_213 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213), vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213)}; \
-  __ret_213 = vcmla_rot180_f16(__s0_213, __s1_213, *(float16x4_t *) &__reint1_213); \
-  __ret_213; \
-})
-#else
-#define vcmla_rot180_laneq_f16(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \
-  float16x4_t __s0_214 = __p0_214; \
-  float16x4_t __s1_214 = __p1_214; \
-  float16x8_t __s2_214 = __p2_214; \
-  float16x4_t __rev0_214;  __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \
-  float16x4_t __rev1_214;  __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 3, 2, 1, 0); \
-  float16x8_t __rev2_214;  __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_214; \
-float16x8_t __reint_214 = __rev2_214; \
-uint32x2_t __reint1_214 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214)}; \
-  __ret_214 = __noswap_vcmla_rot180_f16(__rev0_214, __rev1_214, *(float16x4_t *) &__reint1_214); \
-  __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \
-  __ret_214; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_laneq_f16(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \
-  float16x8_t __s0_215 = __p0_215; \
-  float16x8_t __s1_215 = __p1_215; \
-  float16x8_t __s2_215 = __p2_215; \
-  float16x8_t __ret_215; \
-float16x8_t __reint_215 = __s2_215; \
-uint32x4_t __reint1_215 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215)}; \
-  __ret_215 = vcmlaq_rot180_f16(__s0_215, __s1_215, *(float16x8_t *) &__reint1_215); \
-  __ret_215; \
-})
-#else
-#define vcmlaq_rot180_laneq_f16(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \
-  float16x8_t __s0_216 = __p0_216; \
-  float16x8_t __s1_216 = __p1_216; \
-  float16x8_t __s2_216 = __p2_216; \
-  float16x8_t __rev0_216;  __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_216;  __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_216;  __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_216; \
-float16x8_t __reint_216 = __rev2_216; \
-uint32x4_t __reint1_216 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216)}; \
-  __ret_216 = __noswap_vcmlaq_rot180_f16(__rev0_216, __rev1_216, *(float16x8_t *) &__reint1_216); \
-  __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_216; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_lane_f16(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \
-  float16x4_t __s0_217 = __p0_217; \
-  float16x4_t __s1_217 = __p1_217; \
-  float16x4_t __s2_217 = __p2_217; \
-  float16x4_t __ret_217; \
-float16x4_t __reint_217 = __s2_217; \
-uint32x2_t __reint1_217 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217), vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217)}; \
-  __ret_217 = vcmla_rot270_f16(__s0_217, __s1_217, *(float16x4_t *) &__reint1_217); \
-  __ret_217; \
-})
-#else
-#define vcmla_rot270_lane_f16(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \
-  float16x4_t __s0_218 = __p0_218; \
-  float16x4_t __s1_218 = __p1_218; \
-  float16x4_t __s2_218 = __p2_218; \
-  float16x4_t __rev0_218;  __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \
-  float16x4_t __rev1_218;  __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \
-  float16x4_t __rev2_218;  __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 3, 2, 1, 0); \
-  float16x4_t __ret_218; \
-float16x4_t __reint_218 = __rev2_218; \
-uint32x2_t __reint1_218 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218)}; \
-  __ret_218 = __noswap_vcmla_rot270_f16(__rev0_218, __rev1_218, *(float16x4_t *) &__reint1_218); \
-  __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \
-  __ret_218; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_lane_f16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \
-  float16x8_t __s0_219 = __p0_219; \
-  float16x8_t __s1_219 = __p1_219; \
-  float16x4_t __s2_219 = __p2_219; \
-  float16x8_t __ret_219; \
-float16x4_t __reint_219 = __s2_219; \
-uint32x4_t __reint1_219 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219)}; \
-  __ret_219 = vcmlaq_rot270_f16(__s0_219, __s1_219, *(float16x8_t *) &__reint1_219); \
-  __ret_219; \
-})
-#else
-#define vcmlaq_rot270_lane_f16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \
-  float16x8_t __s0_220 = __p0_220; \
-  float16x8_t __s1_220 = __p1_220; \
-  float16x4_t __s2_220 = __p2_220; \
-  float16x8_t __rev0_220;  __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_220;  __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_220;  __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \
-  float16x8_t __ret_220; \
-float16x4_t __reint_220 = __rev2_220; \
-uint32x4_t __reint1_220 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220)}; \
-  __ret_220 = __noswap_vcmlaq_rot270_f16(__rev0_220, __rev1_220, *(float16x8_t *) &__reint1_220); \
-  __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_220; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_laneq_f16(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \
-  float16x4_t __s0_221 = __p0_221; \
-  float16x4_t __s1_221 = __p1_221; \
-  float16x8_t __s2_221 = __p2_221; \
-  float16x4_t __ret_221; \
-float16x8_t __reint_221 = __s2_221; \
-uint32x2_t __reint1_221 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221), vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221)}; \
-  __ret_221 = vcmla_rot270_f16(__s0_221, __s1_221, *(float16x4_t *) &__reint1_221); \
-  __ret_221; \
-})
-#else
-#define vcmla_rot270_laneq_f16(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \
-  float16x4_t __s0_222 = __p0_222; \
-  float16x4_t __s1_222 = __p1_222; \
-  float16x8_t __s2_222 = __p2_222; \
-  float16x4_t __rev0_222;  __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 3, 2, 1, 0); \
-  float16x4_t __rev1_222;  __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 3, 2, 1, 0); \
-  float16x8_t __rev2_222;  __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_222; \
-float16x8_t __reint_222 = __rev2_222; \
-uint32x2_t __reint1_222 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222)}; \
-  __ret_222 = __noswap_vcmla_rot270_f16(__rev0_222, __rev1_222, *(float16x4_t *) &__reint1_222); \
-  __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 3, 2, 1, 0); \
-  __ret_222; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_laneq_f16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \
-  float16x8_t __s0_223 = __p0_223; \
-  float16x8_t __s1_223 = __p1_223; \
-  float16x8_t __s2_223 = __p2_223; \
-  float16x8_t __ret_223; \
-float16x8_t __reint_223 = __s2_223; \
-uint32x4_t __reint1_223 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223)}; \
-  __ret_223 = vcmlaq_rot270_f16(__s0_223, __s1_223, *(float16x8_t *) &__reint1_223); \
-  __ret_223; \
-})
-#else
-#define vcmlaq_rot270_laneq_f16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \
-  float16x8_t __s0_224 = __p0_224; \
-  float16x8_t __s1_224 = __p1_224; \
-  float16x8_t __s2_224 = __p2_224; \
-  float16x8_t __rev0_224;  __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_224;  __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_224;  __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_224; \
-float16x8_t __reint_224 = __rev2_224; \
-uint32x4_t __reint1_224 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224)}; \
-  __ret_224 = __noswap_vcmlaq_rot270_f16(__rev0_224, __rev1_224, *(float16x8_t *) &__reint1_224); \
-  __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_224; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_lane_f16(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \
-  float16x4_t __s0_225 = __p0_225; \
-  float16x4_t __s1_225 = __p1_225; \
-  float16x4_t __s2_225 = __p2_225; \
-  float16x4_t __ret_225; \
-float16x4_t __reint_225 = __s2_225; \
-uint32x2_t __reint1_225 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225), vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225)}; \
-  __ret_225 = vcmla_rot90_f16(__s0_225, __s1_225, *(float16x4_t *) &__reint1_225); \
-  __ret_225; \
-})
-#else
-#define vcmla_rot90_lane_f16(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \
-  float16x4_t __s0_226 = __p0_226; \
-  float16x4_t __s1_226 = __p1_226; \
-  float16x4_t __s2_226 = __p2_226; \
-  float16x4_t __rev0_226;  __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \
-  float16x4_t __rev1_226;  __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \
-  float16x4_t __rev2_226;  __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 3, 2, 1, 0); \
-  float16x4_t __ret_226; \
-float16x4_t __reint_226 = __rev2_226; \
-uint32x2_t __reint1_226 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226)}; \
-  __ret_226 = __noswap_vcmla_rot90_f16(__rev0_226, __rev1_226, *(float16x4_t *) &__reint1_226); \
-  __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \
-  __ret_226; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_lane_f16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \
-  float16x8_t __s0_227 = __p0_227; \
-  float16x8_t __s1_227 = __p1_227; \
-  float16x4_t __s2_227 = __p2_227; \
-  float16x8_t __ret_227; \
-float16x4_t __reint_227 = __s2_227; \
-uint32x4_t __reint1_227 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227)}; \
-  __ret_227 = vcmlaq_rot90_f16(__s0_227, __s1_227, *(float16x8_t *) &__reint1_227); \
-  __ret_227; \
-})
-#else
-#define vcmlaq_rot90_lane_f16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \
-  float16x8_t __s0_228 = __p0_228; \
-  float16x8_t __s1_228 = __p1_228; \
-  float16x4_t __s2_228 = __p2_228; \
-  float16x8_t __rev0_228;  __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_228;  __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_228;  __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \
-  float16x8_t __ret_228; \
-float16x4_t __reint_228 = __rev2_228; \
-uint32x4_t __reint1_228 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228)}; \
-  __ret_228 = __noswap_vcmlaq_rot90_f16(__rev0_228, __rev1_228, *(float16x8_t *) &__reint1_228); \
-  __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_228; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_laneq_f16(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \
-  float16x4_t __s0_229 = __p0_229; \
-  float16x4_t __s1_229 = __p1_229; \
-  float16x8_t __s2_229 = __p2_229; \
-  float16x4_t __ret_229; \
-float16x8_t __reint_229 = __s2_229; \
-uint32x2_t __reint1_229 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229), vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229)}; \
-  __ret_229 = vcmla_rot90_f16(__s0_229, __s1_229, *(float16x4_t *) &__reint1_229); \
-  __ret_229; \
-})
-#else
-#define vcmla_rot90_laneq_f16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \
-  float16x4_t __s0_230 = __p0_230; \
-  float16x4_t __s1_230 = __p1_230; \
-  float16x8_t __s2_230 = __p2_230; \
-  float16x4_t __rev0_230;  __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 3, 2, 1, 0); \
-  float16x4_t __rev1_230;  __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 3, 2, 1, 0); \
-  float16x8_t __rev2_230;  __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_230; \
-float16x8_t __reint_230 = __rev2_230; \
-uint32x2_t __reint1_230 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230)}; \
-  __ret_230 = __noswap_vcmla_rot90_f16(__rev0_230, __rev1_230, *(float16x4_t *) &__reint1_230); \
-  __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 3, 2, 1, 0); \
-  __ret_230; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_laneq_f16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \
-  float16x8_t __s0_231 = __p0_231; \
-  float16x8_t __s1_231 = __p1_231; \
-  float16x8_t __s2_231 = __p2_231; \
-  float16x8_t __ret_231; \
-float16x8_t __reint_231 = __s2_231; \
-uint32x4_t __reint1_231 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231)}; \
-  __ret_231 = vcmlaq_rot90_f16(__s0_231, __s1_231, *(float16x8_t *) &__reint1_231); \
-  __ret_231; \
-})
-#else
-#define vcmlaq_rot90_laneq_f16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \
-  float16x8_t __s0_232 = __p0_232; \
-  float16x8_t __s1_232 = __p1_232; \
-  float16x8_t __s2_232 = __p2_232; \
-  float16x8_t __rev0_232;  __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_232;  __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_232;  __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_232; \
-float16x8_t __reint_232 = __rev2_232; \
-uint32x4_t __reint1_232 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232)}; \
-  __ret_232 = __noswap_vcmlaq_rot90_f16(__rev0_232, __rev1_232, *(float16x8_t *) &__reint1_232); \
-  __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_232; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_lane_f64(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \
-  float64x1_t __s0_233 = __p0_233; \
-  float64x1_t __s1_233 = __p1_233; \
-  float64x1_t __s2_233 = __p2_233; \
-  float64x1_t __ret_233; \
-float64x1_t __reint_233 = __s2_233; \
-uint64x2_t __reint1_233 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233), vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233)}; \
-  __ret_233 = vcmla_f64(__s0_233, __s1_233, *(float64x1_t *) &__reint1_233); \
-  __ret_233; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_lane_f64(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \
-  float64x2_t __s0_234 = __p0_234; \
-  float64x2_t __s1_234 = __p1_234; \
-  float64x1_t __s2_234 = __p2_234; \
-  float64x2_t __ret_234; \
-float64x1_t __reint_234 = __s2_234; \
-uint64x2_t __reint1_234 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234), vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234)}; \
-  __ret_234 = vcmlaq_f64(__s0_234, __s1_234, *(float64x2_t *) &__reint1_234); \
-  __ret_234; \
-})
-#else
-#define vcmlaq_lane_f64(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \
-  float64x2_t __s0_235 = __p0_235; \
-  float64x2_t __s1_235 = __p1_235; \
-  float64x1_t __s2_235 = __p2_235; \
-  float64x2_t __rev0_235;  __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 1, 0); \
-  float64x2_t __rev1_235;  __rev1_235 = __builtin_shufflevector(__s1_235, __s1_235, 1, 0); \
-  float64x2_t __ret_235; \
-float64x1_t __reint_235 = __s2_235; \
-uint64x2_t __reint1_235 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235)}; \
-  __ret_235 = __noswap_vcmlaq_f64(__rev0_235, __rev1_235, *(float64x2_t *) &__reint1_235); \
-  __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 1, 0); \
-  __ret_235; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_laneq_f64(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \
-  float64x1_t __s0_236 = __p0_236; \
-  float64x1_t __s1_236 = __p1_236; \
-  float64x2_t __s2_236 = __p2_236; \
-  float64x1_t __ret_236; \
-float64x2_t __reint_236 = __s2_236; \
-uint64x2_t __reint1_236 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236), vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236)}; \
-  __ret_236 = vcmla_f64(__s0_236, __s1_236, *(float64x1_t *) &__reint1_236); \
-  __ret_236; \
-})
-#else
-#define vcmla_laneq_f64(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \
-  float64x1_t __s0_237 = __p0_237; \
-  float64x1_t __s1_237 = __p1_237; \
-  float64x2_t __s2_237 = __p2_237; \
-  float64x2_t __rev2_237;  __rev2_237 = __builtin_shufflevector(__s2_237, __s2_237, 1, 0); \
-  float64x1_t __ret_237; \
-float64x2_t __reint_237 = __rev2_237; \
-uint64x2_t __reint1_237 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237)}; \
-  __ret_237 = vcmla_f64(__s0_237, __s1_237, *(float64x1_t *) &__reint1_237); \
-  __ret_237; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_laneq_f64(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \
-  float64x2_t __s0_238 = __p0_238; \
-  float64x2_t __s1_238 = __p1_238; \
-  float64x2_t __s2_238 = __p2_238; \
-  float64x2_t __ret_238; \
-float64x2_t __reint_238 = __s2_238; \
-uint64x2_t __reint1_238 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238), vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238)}; \
-  __ret_238 = vcmlaq_f64(__s0_238, __s1_238, *(float64x2_t *) &__reint1_238); \
-  __ret_238; \
-})
-#else
-#define vcmlaq_laneq_f64(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \
-  float64x2_t __s0_239 = __p0_239; \
-  float64x2_t __s1_239 = __p1_239; \
-  float64x2_t __s2_239 = __p2_239; \
-  float64x2_t __rev0_239;  __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 1, 0); \
-  float64x2_t __rev1_239;  __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 1, 0); \
-  float64x2_t __rev2_239;  __rev2_239 = __builtin_shufflevector(__s2_239, __s2_239, 1, 0); \
-  float64x2_t __ret_239; \
-float64x2_t __reint_239 = __rev2_239; \
-uint64x2_t __reint1_239 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239)}; \
-  __ret_239 = __noswap_vcmlaq_f64(__rev0_239, __rev1_239, *(float64x2_t *) &__reint1_239); \
-  __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 1, 0); \
-  __ret_239; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_rot180_lane_f64(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \
-  float64x1_t __s0_240 = __p0_240; \
-  float64x1_t __s1_240 = __p1_240; \
-  float64x1_t __s2_240 = __p2_240; \
-  float64x1_t __ret_240; \
-float64x1_t __reint_240 = __s2_240; \
-uint64x2_t __reint1_240 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240), vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240)}; \
-  __ret_240 = vcmla_rot180_f64(__s0_240, __s1_240, *(float64x1_t *) &__reint1_240); \
-  __ret_240; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_lane_f64(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \
-  float64x2_t __s0_241 = __p0_241; \
-  float64x2_t __s1_241 = __p1_241; \
-  float64x1_t __s2_241 = __p2_241; \
-  float64x2_t __ret_241; \
-float64x1_t __reint_241 = __s2_241; \
-uint64x2_t __reint1_241 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241), vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241)}; \
-  __ret_241 = vcmlaq_rot180_f64(__s0_241, __s1_241, *(float64x2_t *) &__reint1_241); \
-  __ret_241; \
-})
-#else
-#define vcmlaq_rot180_lane_f64(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \
-  float64x2_t __s0_242 = __p0_242; \
-  float64x2_t __s1_242 = __p1_242; \
-  float64x1_t __s2_242 = __p2_242; \
-  float64x2_t __rev0_242;  __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 1, 0); \
-  float64x2_t __rev1_242;  __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 1, 0); \
-  float64x2_t __ret_242; \
-float64x1_t __reint_242 = __s2_242; \
-uint64x2_t __reint1_242 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242)}; \
-  __ret_242 = __noswap_vcmlaq_rot180_f64(__rev0_242, __rev1_242, *(float64x2_t *) &__reint1_242); \
-  __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 1, 0); \
-  __ret_242; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_laneq_f64(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \
-  float64x1_t __s0_243 = __p0_243; \
-  float64x1_t __s1_243 = __p1_243; \
-  float64x2_t __s2_243 = __p2_243; \
-  float64x1_t __ret_243; \
-float64x2_t __reint_243 = __s2_243; \
-uint64x2_t __reint1_243 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243), vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243)}; \
-  __ret_243 = vcmla_rot180_f64(__s0_243, __s1_243, *(float64x1_t *) &__reint1_243); \
-  __ret_243; \
-})
-#else
-#define vcmla_rot180_laneq_f64(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \
-  float64x1_t __s0_244 = __p0_244; \
-  float64x1_t __s1_244 = __p1_244; \
-  float64x2_t __s2_244 = __p2_244; \
-  float64x2_t __rev2_244;  __rev2_244 = __builtin_shufflevector(__s2_244, __s2_244, 1, 0); \
-  float64x1_t __ret_244; \
-float64x2_t __reint_244 = __rev2_244; \
-uint64x2_t __reint1_244 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244)}; \
-  __ret_244 = vcmla_rot180_f64(__s0_244, __s1_244, *(float64x1_t *) &__reint1_244); \
-  __ret_244; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_laneq_f64(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \
-  float64x2_t __s0_245 = __p0_245; \
-  float64x2_t __s1_245 = __p1_245; \
-  float64x2_t __s2_245 = __p2_245; \
-  float64x2_t __ret_245; \
-float64x2_t __reint_245 = __s2_245; \
-uint64x2_t __reint1_245 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245), vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245)}; \
-  __ret_245 = vcmlaq_rot180_f64(__s0_245, __s1_245, *(float64x2_t *) &__reint1_245); \
-  __ret_245; \
-})
-#else
-#define vcmlaq_rot180_laneq_f64(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \
-  float64x2_t __s0_246 = __p0_246; \
-  float64x2_t __s1_246 = __p1_246; \
-  float64x2_t __s2_246 = __p2_246; \
-  float64x2_t __rev0_246;  __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \
-  float64x2_t __rev1_246;  __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \
-  float64x2_t __rev2_246;  __rev2_246 = __builtin_shufflevector(__s2_246, __s2_246, 1, 0); \
-  float64x2_t __ret_246; \
-float64x2_t __reint_246 = __rev2_246; \
-uint64x2_t __reint1_246 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246)}; \
-  __ret_246 = __noswap_vcmlaq_rot180_f64(__rev0_246, __rev1_246, *(float64x2_t *) &__reint1_246); \
-  __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \
-  __ret_246; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_rot270_lane_f64(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \
-  float64x1_t __s0_247 = __p0_247; \
-  float64x1_t __s1_247 = __p1_247; \
-  float64x1_t __s2_247 = __p2_247; \
-  float64x1_t __ret_247; \
-float64x1_t __reint_247 = __s2_247; \
-uint64x2_t __reint1_247 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247), vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247)}; \
-  __ret_247 = vcmla_rot270_f64(__s0_247, __s1_247, *(float64x1_t *) &__reint1_247); \
-  __ret_247; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_lane_f64(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \
-  float64x2_t __s0_248 = __p0_248; \
-  float64x2_t __s1_248 = __p1_248; \
-  float64x1_t __s2_248 = __p2_248; \
-  float64x2_t __ret_248; \
-float64x1_t __reint_248 = __s2_248; \
-uint64x2_t __reint1_248 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248), vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248)}; \
-  __ret_248 = vcmlaq_rot270_f64(__s0_248, __s1_248, *(float64x2_t *) &__reint1_248); \
-  __ret_248; \
-})
-#else
-#define vcmlaq_rot270_lane_f64(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \
-  float64x2_t __s0_249 = __p0_249; \
-  float64x2_t __s1_249 = __p1_249; \
-  float64x1_t __s2_249 = __p2_249; \
-  float64x2_t __rev0_249;  __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 1, 0); \
-  float64x2_t __rev1_249;  __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 1, 0); \
-  float64x2_t __ret_249; \
-float64x1_t __reint_249 = __s2_249; \
-uint64x2_t __reint1_249 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249)}; \
-  __ret_249 = __noswap_vcmlaq_rot270_f64(__rev0_249, __rev1_249, *(float64x2_t *) &__reint1_249); \
-  __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 1, 0); \
-  __ret_249; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_laneq_f64(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \
-  float64x1_t __s0_250 = __p0_250; \
-  float64x1_t __s1_250 = __p1_250; \
-  float64x2_t __s2_250 = __p2_250; \
-  float64x1_t __ret_250; \
-float64x2_t __reint_250 = __s2_250; \
-uint64x2_t __reint1_250 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250), vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250)}; \
-  __ret_250 = vcmla_rot270_f64(__s0_250, __s1_250, *(float64x1_t *) &__reint1_250); \
-  __ret_250; \
-})
-#else
-#define vcmla_rot270_laneq_f64(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \
-  float64x1_t __s0_251 = __p0_251; \
-  float64x1_t __s1_251 = __p1_251; \
-  float64x2_t __s2_251 = __p2_251; \
-  float64x2_t __rev2_251;  __rev2_251 = __builtin_shufflevector(__s2_251, __s2_251, 1, 0); \
-  float64x1_t __ret_251; \
-float64x2_t __reint_251 = __rev2_251; \
-uint64x2_t __reint1_251 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251)}; \
-  __ret_251 = vcmla_rot270_f64(__s0_251, __s1_251, *(float64x1_t *) &__reint1_251); \
-  __ret_251; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_laneq_f64(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \
-  float64x2_t __s0_252 = __p0_252; \
-  float64x2_t __s1_252 = __p1_252; \
-  float64x2_t __s2_252 = __p2_252; \
-  float64x2_t __ret_252; \
-float64x2_t __reint_252 = __s2_252; \
-uint64x2_t __reint1_252 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252), vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252)}; \
-  __ret_252 = vcmlaq_rot270_f64(__s0_252, __s1_252, *(float64x2_t *) &__reint1_252); \
-  __ret_252; \
-})
-#else
-#define vcmlaq_rot270_laneq_f64(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \
-  float64x2_t __s0_253 = __p0_253; \
-  float64x2_t __s1_253 = __p1_253; \
-  float64x2_t __s2_253 = __p2_253; \
-  float64x2_t __rev0_253;  __rev0_253 = __builtin_shufflevector(__s0_253, __s0_253, 1, 0); \
-  float64x2_t __rev1_253;  __rev1_253 = __builtin_shufflevector(__s1_253, __s1_253, 1, 0); \
-  float64x2_t __rev2_253;  __rev2_253 = __builtin_shufflevector(__s2_253, __s2_253, 1, 0); \
-  float64x2_t __ret_253; \
-float64x2_t __reint_253 = __rev2_253; \
-uint64x2_t __reint1_253 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253)}; \
-  __ret_253 = __noswap_vcmlaq_rot270_f64(__rev0_253, __rev1_253, *(float64x2_t *) &__reint1_253); \
-  __ret_253 = __builtin_shufflevector(__ret_253, __ret_253, 1, 0); \
-  __ret_253; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_rot90_lane_f64(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \
-  float64x1_t __s0_254 = __p0_254; \
-  float64x1_t __s1_254 = __p1_254; \
-  float64x1_t __s2_254 = __p2_254; \
-  float64x1_t __ret_254; \
-float64x1_t __reint_254 = __s2_254; \
-uint64x2_t __reint1_254 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254), vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254)}; \
-  __ret_254 = vcmla_rot90_f64(__s0_254, __s1_254, *(float64x1_t *) &__reint1_254); \
-  __ret_254; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_lane_f64(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \
-  float64x2_t __s0_255 = __p0_255; \
-  float64x2_t __s1_255 = __p1_255; \
-  float64x1_t __s2_255 = __p2_255; \
-  float64x2_t __ret_255; \
-float64x1_t __reint_255 = __s2_255; \
-uint64x2_t __reint1_255 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255), vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255)}; \
-  __ret_255 = vcmlaq_rot90_f64(__s0_255, __s1_255, *(float64x2_t *) &__reint1_255); \
-  __ret_255; \
-})
-#else
-#define vcmlaq_rot90_lane_f64(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \
-  float64x2_t __s0_256 = __p0_256; \
-  float64x2_t __s1_256 = __p1_256; \
-  float64x1_t __s2_256 = __p2_256; \
-  float64x2_t __rev0_256;  __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 1, 0); \
-  float64x2_t __rev1_256;  __rev1_256 = __builtin_shufflevector(__s1_256, __s1_256, 1, 0); \
-  float64x2_t __ret_256; \
-float64x1_t __reint_256 = __s2_256; \
-uint64x2_t __reint1_256 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256)}; \
-  __ret_256 = __noswap_vcmlaq_rot90_f64(__rev0_256, __rev1_256, *(float64x2_t *) &__reint1_256); \
-  __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 1, 0); \
-  __ret_256; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_laneq_f64(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \
-  float64x1_t __s0_257 = __p0_257; \
-  float64x1_t __s1_257 = __p1_257; \
-  float64x2_t __s2_257 = __p2_257; \
-  float64x1_t __ret_257; \
-float64x2_t __reint_257 = __s2_257; \
-uint64x2_t __reint1_257 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257), vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257)}; \
-  __ret_257 = vcmla_rot90_f64(__s0_257, __s1_257, *(float64x1_t *) &__reint1_257); \
-  __ret_257; \
-})
-#else
-#define vcmla_rot90_laneq_f64(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
-  float64x1_t __s0_258 = __p0_258; \
-  float64x1_t __s1_258 = __p1_258; \
-  float64x2_t __s2_258 = __p2_258; \
-  float64x2_t __rev2_258;  __rev2_258 = __builtin_shufflevector(__s2_258, __s2_258, 1, 0); \
-  float64x1_t __ret_258; \
-float64x2_t __reint_258 = __rev2_258; \
-uint64x2_t __reint1_258 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258)}; \
-  __ret_258 = vcmla_rot90_f64(__s0_258, __s1_258, *(float64x1_t *) &__reint1_258); \
-  __ret_258; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_laneq_f64(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
-  float64x2_t __s0_259 = __p0_259; \
-  float64x2_t __s1_259 = __p1_259; \
-  float64x2_t __s2_259 = __p2_259; \
-  float64x2_t __ret_259; \
-float64x2_t __reint_259 = __s2_259; \
-uint64x2_t __reint1_259 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259), vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259)}; \
-  __ret_259 = vcmlaq_rot90_f64(__s0_259, __s1_259, *(float64x2_t *) &__reint1_259); \
-  __ret_259; \
-})
-#else
-#define vcmlaq_rot90_laneq_f64(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
-  float64x2_t __s0_260 = __p0_260; \
-  float64x2_t __s1_260 = __p1_260; \
-  float64x2_t __s2_260 = __p2_260; \
-  float64x2_t __rev0_260;  __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 1, 0); \
-  float64x2_t __rev1_260;  __rev1_260 = __builtin_shufflevector(__s1_260, __s1_260, 1, 0); \
-  float64x2_t __rev2_260;  __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 1, 0); \
-  float64x2_t __ret_260; \
-float64x2_t __reint_260 = __rev2_260; \
-uint64x2_t __reint1_260 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260)}; \
-  __ret_260 = __noswap_vcmlaq_rot90_f64(__rev0_260, __rev1_260, *(float64x2_t *) &__reint1_260); \
-  __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 1, 0); \
-  __ret_260; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_DOTPROD)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_u32(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
-  uint32x4_t __s0_261 = __p0_261; \
-  uint8x16_t __s1_261 = __p1_261; \
-  uint8x8_t __s2_261 = __p2_261; \
-  uint32x4_t __ret_261; \
-uint8x8_t __reint_261 = __s2_261; \
-uint32x4_t __reint1_261 = splatq_lane_u32(*(uint32x2_t *) &__reint_261, __p3_261); \
-  __ret_261 = vdotq_u32(__s0_261, __s1_261, *(uint8x16_t *) &__reint1_261); \
-  __ret_261; \
-})
-#else
-#define vdotq_lane_u32(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
-  uint32x4_t __s0_262 = __p0_262; \
-  uint8x16_t __s1_262 = __p1_262; \
-  uint8x8_t __s2_262 = __p2_262; \
-  uint32x4_t __rev0_262;  __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 3, 2, 1, 0); \
-  uint8x16_t __rev1_262;  __rev1_262 = __builtin_shufflevector(__s1_262, __s1_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_262;  __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_262; \
-uint8x8_t __reint_262 = __rev2_262; \
-uint32x4_t __reint1_262 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_262, __p3_262); \
-  __ret_262 = __noswap_vdotq_u32(__rev0_262, __rev1_262, *(uint8x16_t *) &__reint1_262); \
-  __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 3, 2, 1, 0); \
-  __ret_262; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_s32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
-  int32x4_t __s0_263 = __p0_263; \
-  int8x16_t __s1_263 = __p1_263; \
-  int8x8_t __s2_263 = __p2_263; \
-  int32x4_t __ret_263; \
-int8x8_t __reint_263 = __s2_263; \
-int32x4_t __reint1_263 = splatq_lane_s32(*(int32x2_t *) &__reint_263, __p3_263); \
-  __ret_263 = vdotq_s32(__s0_263, __s1_263, *(int8x16_t *) &__reint1_263); \
-  __ret_263; \
-})
-#else
-#define vdotq_lane_s32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
-  int32x4_t __s0_264 = __p0_264; \
-  int8x16_t __s1_264 = __p1_264; \
-  int8x8_t __s2_264 = __p2_264; \
-  int32x4_t __rev0_264;  __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \
-  int8x16_t __rev1_264;  __rev1_264 = __builtin_shufflevector(__s1_264, __s1_264, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_264;  __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_264; \
-int8x8_t __reint_264 = __rev2_264; \
-int32x4_t __reint1_264 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_264, __p3_264); \
-  __ret_264 = __noswap_vdotq_s32(__rev0_264, __rev1_264, *(int8x16_t *) &__reint1_264); \
-  __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \
-  __ret_264; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_lane_u32(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
-  uint32x2_t __s0_265 = __p0_265; \
-  uint8x8_t __s1_265 = __p1_265; \
-  uint8x8_t __s2_265 = __p2_265; \
-  uint32x2_t __ret_265; \
-uint8x8_t __reint_265 = __s2_265; \
-uint32x2_t __reint1_265 = splat_lane_u32(*(uint32x2_t *) &__reint_265, __p3_265); \
-  __ret_265 = vdot_u32(__s0_265, __s1_265, *(uint8x8_t *) &__reint1_265); \
-  __ret_265; \
-})
-#else
-#define vdot_lane_u32(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \
-  uint32x2_t __s0_266 = __p0_266; \
-  uint8x8_t __s1_266 = __p1_266; \
-  uint8x8_t __s2_266 = __p2_266; \
-  uint32x2_t __rev0_266;  __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 1, 0); \
-  uint8x8_t __rev1_266;  __rev1_266 = __builtin_shufflevector(__s1_266, __s1_266, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_266;  __rev2_266 = __builtin_shufflevector(__s2_266, __s2_266, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret_266; \
-uint8x8_t __reint_266 = __rev2_266; \
-uint32x2_t __reint1_266 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_266, __p3_266); \
-  __ret_266 = __noswap_vdot_u32(__rev0_266, __rev1_266, *(uint8x8_t *) &__reint1_266); \
-  __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 1, 0); \
-  __ret_266; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_lane_s32(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \
-  int32x2_t __s0_267 = __p0_267; \
-  int8x8_t __s1_267 = __p1_267; \
-  int8x8_t __s2_267 = __p2_267; \
-  int32x2_t __ret_267; \
-int8x8_t __reint_267 = __s2_267; \
-int32x2_t __reint1_267 = splat_lane_s32(*(int32x2_t *) &__reint_267, __p3_267); \
-  __ret_267 = vdot_s32(__s0_267, __s1_267, *(int8x8_t *) &__reint1_267); \
-  __ret_267; \
-})
-#else
-#define vdot_lane_s32(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
-  int32x2_t __s0_268 = __p0_268; \
-  int8x8_t __s1_268 = __p1_268; \
-  int8x8_t __s2_268 = __p2_268; \
-  int32x2_t __rev0_268;  __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 1, 0); \
-  int8x8_t __rev1_268;  __rev1_268 = __builtin_shufflevector(__s1_268, __s1_268, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_268;  __rev2_268 = __builtin_shufflevector(__s2_268, __s2_268, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_268; \
-int8x8_t __reint_268 = __rev2_268; \
-int32x2_t __reint1_268 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_268, __p3_268); \
-  __ret_268 = __noswap_vdot_s32(__rev0_268, __rev1_268, *(int8x8_t *) &__reint1_268); \
-  __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 1, 0); \
-  __ret_268; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_u32(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
-  uint32x4_t __s0_269 = __p0_269; \
-  uint8x16_t __s1_269 = __p1_269; \
-  uint8x16_t __s2_269 = __p2_269; \
-  uint32x4_t __ret_269; \
-uint8x16_t __reint_269 = __s2_269; \
-uint32x4_t __reint1_269 = splatq_laneq_u32(*(uint32x4_t *) &__reint_269, __p3_269); \
-  __ret_269 = vdotq_u32(__s0_269, __s1_269, *(uint8x16_t *) &__reint1_269); \
-  __ret_269; \
-})
-#else
-#define vdotq_laneq_u32(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
-  uint32x4_t __s0_270 = __p0_270; \
-  uint8x16_t __s1_270 = __p1_270; \
-  uint8x16_t __s2_270 = __p2_270; \
-  uint32x4_t __rev0_270;  __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 3, 2, 1, 0); \
-  uint8x16_t __rev1_270;  __rev1_270 = __builtin_shufflevector(__s1_270, __s1_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_270;  __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_270; \
-uint8x16_t __reint_270 = __rev2_270; \
-uint32x4_t __reint1_270 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_270, __p3_270); \
-  __ret_270 = __noswap_vdotq_u32(__rev0_270, __rev1_270, *(uint8x16_t *) &__reint1_270); \
-  __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 3, 2, 1, 0); \
-  __ret_270; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_s32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
-  int32x4_t __s0_271 = __p0_271; \
-  int8x16_t __s1_271 = __p1_271; \
-  int8x16_t __s2_271 = __p2_271; \
-  int32x4_t __ret_271; \
-int8x16_t __reint_271 = __s2_271; \
-int32x4_t __reint1_271 = splatq_laneq_s32(*(int32x4_t *) &__reint_271, __p3_271); \
-  __ret_271 = vdotq_s32(__s0_271, __s1_271, *(int8x16_t *) &__reint1_271); \
-  __ret_271; \
-})
-#else
-#define vdotq_laneq_s32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
-  int32x4_t __s0_272 = __p0_272; \
-  int8x16_t __s1_272 = __p1_272; \
-  int8x16_t __s2_272 = __p2_272; \
-  int32x4_t __rev0_272;  __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 3, 2, 1, 0); \
-  int8x16_t __rev1_272;  __rev1_272 = __builtin_shufflevector(__s1_272, __s1_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_272;  __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_272; \
-int8x16_t __reint_272 = __rev2_272; \
-int32x4_t __reint1_272 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_272, __p3_272); \
-  __ret_272 = __noswap_vdotq_s32(__rev0_272, __rev1_272, *(int8x16_t *) &__reint1_272); \
-  __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 3, 2, 1, 0); \
-  __ret_272; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_u32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
-  uint32x2_t __s0_273 = __p0_273; \
-  uint8x8_t __s1_273 = __p1_273; \
-  uint8x16_t __s2_273 = __p2_273; \
-  uint32x2_t __ret_273; \
-uint8x16_t __reint_273 = __s2_273; \
-uint32x2_t __reint1_273 = splat_laneq_u32(*(uint32x4_t *) &__reint_273, __p3_273); \
-  __ret_273 = vdot_u32(__s0_273, __s1_273, *(uint8x8_t *) &__reint1_273); \
-  __ret_273; \
-})
-#else
-#define vdot_laneq_u32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
-  uint32x2_t __s0_274 = __p0_274; \
-  uint8x8_t __s1_274 = __p1_274; \
-  uint8x16_t __s2_274 = __p2_274; \
-  uint32x2_t __rev0_274;  __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 1, 0); \
-  uint8x8_t __rev1_274;  __rev1_274 = __builtin_shufflevector(__s1_274, __s1_274, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_274;  __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret_274; \
-uint8x16_t __reint_274 = __rev2_274; \
-uint32x2_t __reint1_274 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_274, __p3_274); \
-  __ret_274 = __noswap_vdot_u32(__rev0_274, __rev1_274, *(uint8x8_t *) &__reint1_274); \
-  __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 1, 0); \
-  __ret_274; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_s32(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
-  int32x2_t __s0_275 = __p0_275; \
-  int8x8_t __s1_275 = __p1_275; \
-  int8x16_t __s2_275 = __p2_275; \
-  int32x2_t __ret_275; \
-int8x16_t __reint_275 = __s2_275; \
-int32x2_t __reint1_275 = splat_laneq_s32(*(int32x4_t *) &__reint_275, __p3_275); \
-  __ret_275 = vdot_s32(__s0_275, __s1_275, *(int8x8_t *) &__reint1_275); \
-  __ret_275; \
-})
-#else
-#define vdot_laneq_s32(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
-  int32x2_t __s0_276 = __p0_276; \
-  int8x8_t __s1_276 = __p1_276; \
-  int8x16_t __s2_276 = __p2_276; \
-  int32x2_t __rev0_276;  __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 1, 0); \
-  int8x8_t __rev1_276;  __rev1_276 = __builtin_shufflevector(__s1_276, __s1_276, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_276;  __rev2_276 = __builtin_shufflevector(__s2_276, __s2_276, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_276; \
-int8x16_t __reint_276 = __rev2_276; \
-int32x2_t __reint1_276 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_276, __p3_276); \
-  __ret_276 = __noswap_vdot_s32(__rev0_276, __rev1_276, *(int8x8_t *) &__reint1_276); \
-  __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 1, 0); \
-  __ret_276; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FMA)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vabsq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vabsq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vabs_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vabs_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclez_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclez_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \
-  __ret; \
-})
-#else
-#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \
-  __ret; \
-})
-#else
-#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = vfmaq_f16(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = vfma_f16(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f16(__p0_277, __p1_277, __p2_277) __extension__ ({ \
-  float16x8_t __s0_277 = __p0_277; \
-  float16x4_t __s1_277 = __p1_277; \
-  float16x8_t __ret_277; \
-  __ret_277 = __s0_277 * splatq_lane_f16(__s1_277, __p2_277); \
-  __ret_277; \
-})
-#else
-#define vmulq_lane_f16(__p0_278, __p1_278, __p2_278) __extension__ ({ \
-  float16x8_t __s0_278 = __p0_278; \
-  float16x4_t __s1_278 = __p1_278; \
-  float16x8_t __rev0_278;  __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1_278;  __rev1_278 = __builtin_shufflevector(__s1_278, __s1_278, 3, 2, 1, 0); \
-  float16x8_t __ret_278; \
-  __ret_278 = __rev0_278 * __noswap_splatq_lane_f16(__rev1_278, __p2_278); \
-  __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_278; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f16(__p0_279, __p1_279, __p2_279) __extension__ ({ \
-  float16x4_t __s0_279 = __p0_279; \
-  float16x4_t __s1_279 = __p1_279; \
-  float16x4_t __ret_279; \
-  __ret_279 = __s0_279 * splat_lane_f16(__s1_279, __p2_279); \
-  __ret_279; \
-})
-#else
-#define vmul_lane_f16(__p0_280, __p1_280, __p2_280) __extension__ ({ \
-  float16x4_t __s0_280 = __p0_280; \
-  float16x4_t __s1_280 = __p1_280; \
-  float16x4_t __rev0_280;  __rev0_280 = __builtin_shufflevector(__s0_280, __s0_280, 3, 2, 1, 0); \
-  float16x4_t __rev1_280;  __rev1_280 = __builtin_shufflevector(__s1_280, __s1_280, 3, 2, 1, 0); \
-  float16x4_t __ret_280; \
-  __ret_280 = __rev0_280 * __noswap_splat_lane_f16(__rev1_280, __p2_280); \
-  __ret_280 = __builtin_shufflevector(__ret_280, __ret_280, 3, 2, 1, 0); \
-  __ret_280; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
-  __ret; \
-})
-#else
-#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
-  __ret; \
-})
-#else
-#define vmul_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vnegq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float16x8_t vnegq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vneg_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float16x4_t vneg_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrev64_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai float16x4_t vrev64_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
-  __ret; \
-})
-#else
-#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsh_lane_f16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
-  float16_t __s0_281 = __p0_281; \
-  float16_t __s1_281 = __p1_281; \
-  float16x4_t __s2_281 = __p2_281; \
-  float16_t __ret_281; \
-  __ret_281 = vfmah_lane_f16(__s0_281, -__s1_281, __s2_281, __p3_281); \
-  __ret_281; \
-})
-#else
-#define vfmsh_lane_f16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
-  float16_t __s0_282 = __p0_282; \
-  float16_t __s1_282 = __p1_282; \
-  float16x4_t __s2_282 = __p2_282; \
-  float16x4_t __rev2_282;  __rev2_282 = __builtin_shufflevector(__s2_282, __s2_282, 3, 2, 1, 0); \
-  float16_t __ret_282; \
-  __ret_282 = __noswap_vfmah_lane_f16(__s0_282, -__s1_282, __rev2_282, __p3_282); \
-  __ret_282; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f16(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
-  float16x8_t __s0_283 = __p0_283; \
-  float16x8_t __s1_283 = __p1_283; \
-  float16x4_t __s2_283 = __p2_283; \
-  float16x8_t __ret_283; \
-  __ret_283 = vfmaq_lane_f16(__s0_283, -__s1_283, __s2_283, __p3_283); \
-  __ret_283; \
-})
-#else
-#define vfmsq_lane_f16(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
-  float16x8_t __s0_284 = __p0_284; \
-  float16x8_t __s1_284 = __p1_284; \
-  float16x4_t __s2_284 = __p2_284; \
-  float16x8_t __rev0_284;  __rev0_284 = __builtin_shufflevector(__s0_284, __s0_284, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_284;  __rev1_284 = __builtin_shufflevector(__s1_284, __s1_284, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_284;  __rev2_284 = __builtin_shufflevector(__s2_284, __s2_284, 3, 2, 1, 0); \
-  float16x8_t __ret_284; \
-  __ret_284 = __noswap_vfmaq_lane_f16(__rev0_284, -__rev1_284, __rev2_284, __p3_284); \
-  __ret_284 = __builtin_shufflevector(__ret_284, __ret_284, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_284; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f16(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
-  float16x4_t __s0_285 = __p0_285; \
-  float16x4_t __s1_285 = __p1_285; \
-  float16x4_t __s2_285 = __p2_285; \
-  float16x4_t __ret_285; \
-  __ret_285 = vfma_lane_f16(__s0_285, -__s1_285, __s2_285, __p3_285); \
-  __ret_285; \
-})
-#else
-#define vfms_lane_f16(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
-  float16x4_t __s0_286 = __p0_286; \
-  float16x4_t __s1_286 = __p1_286; \
-  float16x4_t __s2_286 = __p2_286; \
-  float16x4_t __rev0_286;  __rev0_286 = __builtin_shufflevector(__s0_286, __s0_286, 3, 2, 1, 0); \
-  float16x4_t __rev1_286;  __rev1_286 = __builtin_shufflevector(__s1_286, __s1_286, 3, 2, 1, 0); \
-  float16x4_t __rev2_286;  __rev2_286 = __builtin_shufflevector(__s2_286, __s2_286, 3, 2, 1, 0); \
-  float16x4_t __ret_286; \
-  __ret_286 = __noswap_vfma_lane_f16(__rev0_286, -__rev1_286, __rev2_286, __p3_286); \
-  __ret_286 = __builtin_shufflevector(__ret_286, __ret_286, 3, 2, 1, 0); \
-  __ret_286; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsh_laneq_f16(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
-  float16_t __s0_287 = __p0_287; \
-  float16_t __s1_287 = __p1_287; \
-  float16x8_t __s2_287 = __p2_287; \
-  float16_t __ret_287; \
-  __ret_287 = vfmah_laneq_f16(__s0_287, -__s1_287, __s2_287, __p3_287); \
-  __ret_287; \
-})
-#else
-#define vfmsh_laneq_f16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
-  float16_t __s0_288 = __p0_288; \
-  float16_t __s1_288 = __p1_288; \
-  float16x8_t __s2_288 = __p2_288; \
-  float16x8_t __rev2_288;  __rev2_288 = __builtin_shufflevector(__s2_288, __s2_288, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_288; \
-  __ret_288 = __noswap_vfmah_laneq_f16(__s0_288, -__s1_288, __rev2_288, __p3_288); \
-  __ret_288; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
-  float16x8_t __s0_289 = __p0_289; \
-  float16x8_t __s1_289 = __p1_289; \
-  float16x8_t __s2_289 = __p2_289; \
-  float16x8_t __ret_289; \
-  __ret_289 = vfmaq_laneq_f16(__s0_289, -__s1_289, __s2_289, __p3_289); \
-  __ret_289; \
-})
-#else
-#define vfmsq_laneq_f16(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \
-  float16x8_t __s0_290 = __p0_290; \
-  float16x8_t __s1_290 = __p1_290; \
-  float16x8_t __s2_290 = __p2_290; \
-  float16x8_t __rev0_290;  __rev0_290 = __builtin_shufflevector(__s0_290, __s0_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_290;  __rev1_290 = __builtin_shufflevector(__s1_290, __s1_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_290;  __rev2_290 = __builtin_shufflevector(__s2_290, __s2_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_290; \
-  __ret_290 = __noswap_vfmaq_laneq_f16(__rev0_290, -__rev1_290, __rev2_290, __p3_290); \
-  __ret_290 = __builtin_shufflevector(__ret_290, __ret_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_290; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f16(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \
-  float16x4_t __s0_291 = __p0_291; \
-  float16x4_t __s1_291 = __p1_291; \
-  float16x8_t __s2_291 = __p2_291; \
-  float16x4_t __ret_291; \
-  __ret_291 = vfma_laneq_f16(__s0_291, -__s1_291, __s2_291, __p3_291); \
-  __ret_291; \
-})
-#else
-#define vfms_laneq_f16(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \
-  float16x4_t __s0_292 = __p0_292; \
-  float16x4_t __s1_292 = __p1_292; \
-  float16x8_t __s2_292 = __p2_292; \
-  float16x4_t __rev0_292;  __rev0_292 = __builtin_shufflevector(__s0_292, __s0_292, 3, 2, 1, 0); \
-  float16x4_t __rev1_292;  __rev1_292 = __builtin_shufflevector(__s1_292, __s1_292, 3, 2, 1, 0); \
-  float16x8_t __rev2_292;  __rev2_292 = __builtin_shufflevector(__s2_292, __s2_292, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_292; \
-  __ret_292 = __noswap_vfma_laneq_f16(__rev0_292, -__rev1_292, __rev2_292, __p3_292); \
-  __ret_292 = __builtin_shufflevector(__ret_292, __ret_292, 3, 2, 1, 0); \
-  __ret_292; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vminnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vminnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vminvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vminv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f16(__p0_293, __p1_293, __p2_293) __extension__ ({ \
-  float16x8_t __s0_293 = __p0_293; \
-  float16x8_t __s1_293 = __p1_293; \
-  float16x8_t __ret_293; \
-  __ret_293 = __s0_293 * splatq_laneq_f16(__s1_293, __p2_293); \
-  __ret_293; \
-})
-#else
-#define vmulq_laneq_f16(__p0_294, __p1_294, __p2_294) __extension__ ({ \
-  float16x8_t __s0_294 = __p0_294; \
-  float16x8_t __s1_294 = __p1_294; \
-  float16x8_t __rev0_294;  __rev0_294 = __builtin_shufflevector(__s0_294, __s0_294, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_294;  __rev1_294 = __builtin_shufflevector(__s1_294, __s1_294, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_294; \
-  __ret_294 = __rev0_294 * __noswap_splatq_laneq_f16(__rev1_294, __p2_294); \
-  __ret_294 = __builtin_shufflevector(__ret_294, __ret_294, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_294; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f16(__p0_295, __p1_295, __p2_295) __extension__ ({ \
-  float16x4_t __s0_295 = __p0_295; \
-  float16x8_t __s1_295 = __p1_295; \
-  float16x4_t __ret_295; \
-  __ret_295 = __s0_295 * splat_laneq_f16(__s1_295, __p2_295); \
-  __ret_295; \
-})
-#else
-#define vmul_laneq_f16(__p0_296, __p1_296, __p2_296) __extension__ ({ \
-  float16x4_t __s0_296 = __p0_296; \
-  float16x8_t __s1_296 = __p1_296; \
-  float16x4_t __rev0_296;  __rev0_296 = __builtin_shufflevector(__s0_296, __s0_296, 3, 2, 1, 0); \
-  float16x8_t __rev1_296;  __rev1_296 = __builtin_shufflevector(__s1_296, __s1_296, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_296; \
-  __ret_296 = __rev0_296 * __noswap_splat_laneq_f16(__rev1_296, __p2_296); \
-  __ret_296 = __builtin_shufflevector(__ret_296, __ret_296, 3, 2, 1, 0); \
-  __ret_296; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f16(__p0_297, __p1_297, __p2_297) __extension__ ({ \
-  float16x8_t __s0_297 = __p0_297; \
-  float16x4_t __s1_297 = __p1_297; \
-  float16x8_t __ret_297; \
-  __ret_297 = vmulxq_f16(__s0_297, splatq_lane_f16(__s1_297, __p2_297)); \
-  __ret_297; \
-})
-#else
-#define vmulxq_lane_f16(__p0_298, __p1_298, __p2_298) __extension__ ({ \
-  float16x8_t __s0_298 = __p0_298; \
-  float16x4_t __s1_298 = __p1_298; \
-  float16x8_t __rev0_298;  __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1_298;  __rev1_298 = __builtin_shufflevector(__s1_298, __s1_298, 3, 2, 1, 0); \
-  float16x8_t __ret_298; \
-  __ret_298 = __noswap_vmulxq_f16(__rev0_298, __noswap_splatq_lane_f16(__rev1_298, __p2_298)); \
-  __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_298; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f16(__p0_299, __p1_299, __p2_299) __extension__ ({ \
-  float16x4_t __s0_299 = __p0_299; \
-  float16x4_t __s1_299 = __p1_299; \
-  float16x4_t __ret_299; \
-  __ret_299 = vmulx_f16(__s0_299, splat_lane_f16(__s1_299, __p2_299)); \
-  __ret_299; \
-})
-#else
-#define vmulx_lane_f16(__p0_300, __p1_300, __p2_300) __extension__ ({ \
-  float16x4_t __s0_300 = __p0_300; \
-  float16x4_t __s1_300 = __p1_300; \
-  float16x4_t __rev0_300;  __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 3, 2, 1, 0); \
-  float16x4_t __rev1_300;  __rev1_300 = __builtin_shufflevector(__s1_300, __s1_300, 3, 2, 1, 0); \
-  float16x4_t __ret_300; \
-  __ret_300 = __noswap_vmulx_f16(__rev0_300, __noswap_splat_lane_f16(__rev1_300, __p2_300)); \
-  __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 3, 2, 1, 0); \
-  __ret_300; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f16(__p0_301, __p1_301, __p2_301) __extension__ ({ \
-  float16x8_t __s0_301 = __p0_301; \
-  float16x8_t __s1_301 = __p1_301; \
-  float16x8_t __ret_301; \
-  __ret_301 = vmulxq_f16(__s0_301, splatq_laneq_f16(__s1_301, __p2_301)); \
-  __ret_301; \
-})
-#else
-#define vmulxq_laneq_f16(__p0_302, __p1_302, __p2_302) __extension__ ({ \
-  float16x8_t __s0_302 = __p0_302; \
-  float16x8_t __s1_302 = __p1_302; \
-  float16x8_t __rev0_302;  __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_302;  __rev1_302 = __builtin_shufflevector(__s1_302, __s1_302, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_302; \
-  __ret_302 = __noswap_vmulxq_f16(__rev0_302, __noswap_splatq_laneq_f16(__rev1_302, __p2_302)); \
-  __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_302; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f16(__p0_303, __p1_303, __p2_303) __extension__ ({ \
-  float16x4_t __s0_303 = __p0_303; \
-  float16x8_t __s1_303 = __p1_303; \
-  float16x4_t __ret_303; \
-  __ret_303 = vmulx_f16(__s0_303, splat_laneq_f16(__s1_303, __p2_303)); \
-  __ret_303; \
-})
-#else
-#define vmulx_laneq_f16(__p0_304, __p1_304, __p2_304) __extension__ ({ \
-  float16x4_t __s0_304 = __p0_304; \
-  float16x8_t __s1_304 = __p1_304; \
-  float16x4_t __rev0_304;  __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 3, 2, 1, 0); \
-  float16x8_t __rev1_304;  __rev1_304 = __builtin_shufflevector(__s1_304, __s1_304, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_304; \
-  __ret_304 = __noswap_vmulx_f16(__rev0_304, __noswap_splat_laneq_f16(__rev1_304, __p2_304)); \
-  __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 3, 2, 1, 0); \
-  __ret_304; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
-  __ret; \
-})
-#else
-#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
-  __ret; \
-})
-#else
-#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndi_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndi_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_MATMUL_INT8)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdotq_lane_s32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \
-  int32x4_t __s0_305 = __p0_305; \
-  uint8x16_t __s1_305 = __p1_305; \
-  int8x8_t __s2_305 = __p2_305; \
-  int32x4_t __ret_305; \
-int8x8_t __reint_305 = __s2_305; \
-  __ret_305 = vusdotq_s32(__s0_305, __s1_305, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_305, __p3_305))); \
-  __ret_305; \
-})
-#else
-#define vusdotq_lane_s32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \
-  int32x4_t __s0_306 = __p0_306; \
-  uint8x16_t __s1_306 = __p1_306; \
-  int8x8_t __s2_306 = __p2_306; \
-  int32x4_t __rev0_306;  __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \
-  uint8x16_t __rev1_306;  __rev1_306 = __builtin_shufflevector(__s1_306, __s1_306, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_306;  __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_306; \
-int8x8_t __reint_306 = __rev2_306; \
-  __ret_306 = __noswap_vusdotq_s32(__rev0_306, __rev1_306, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_306, __p3_306))); \
-  __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \
-  __ret_306; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdot_lane_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \
-  int32x2_t __s0_307 = __p0_307; \
-  uint8x8_t __s1_307 = __p1_307; \
-  int8x8_t __s2_307 = __p2_307; \
-  int32x2_t __ret_307; \
-int8x8_t __reint_307 = __s2_307; \
-  __ret_307 = vusdot_s32(__s0_307, __s1_307, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_307, __p3_307))); \
-  __ret_307; \
-})
-#else
-#define vusdot_lane_s32(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \
-  int32x2_t __s0_308 = __p0_308; \
-  uint8x8_t __s1_308 = __p1_308; \
-  int8x8_t __s2_308 = __p2_308; \
-  int32x2_t __rev0_308;  __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 1, 0); \
-  uint8x8_t __rev1_308;  __rev1_308 = __builtin_shufflevector(__s1_308, __s1_308, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_308;  __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_308; \
-int8x8_t __reint_308 = __rev2_308; \
-  __ret_308 = __noswap_vusdot_s32(__rev0_308, __rev1_308, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_308, __p3_308))); \
-  __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 1, 0); \
-  __ret_308; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX)
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s32(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \
-  int32x4_t __s0_309 = __p0_309; \
-  int32x4_t __s1_309 = __p1_309; \
-  int32x2_t __s2_309 = __p2_309; \
-  int32x4_t __ret_309; \
-  __ret_309 = vqaddq_s32(__s0_309, vqrdmulhq_s32(__s1_309, splatq_lane_s32(__s2_309, __p3_309))); \
-  __ret_309; \
-})
-#else
-#define vqrdmlahq_lane_s32(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \
-  int32x4_t __s0_310 = __p0_310; \
-  int32x4_t __s1_310 = __p1_310; \
-  int32x2_t __s2_310 = __p2_310; \
-  int32x4_t __rev0_310;  __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 3, 2, 1, 0); \
-  int32x4_t __rev1_310;  __rev1_310 = __builtin_shufflevector(__s1_310, __s1_310, 3, 2, 1, 0); \
-  int32x2_t __rev2_310;  __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 1, 0); \
-  int32x4_t __ret_310; \
-  __ret_310 = __noswap_vqaddq_s32(__rev0_310, __noswap_vqrdmulhq_s32(__rev1_310, __noswap_splatq_lane_s32(__rev2_310, __p3_310))); \
-  __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 3, 2, 1, 0); \
-  __ret_310; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s16(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \
-  int16x8_t __s0_311 = __p0_311; \
-  int16x8_t __s1_311 = __p1_311; \
-  int16x4_t __s2_311 = __p2_311; \
-  int16x8_t __ret_311; \
-  __ret_311 = vqaddq_s16(__s0_311, vqrdmulhq_s16(__s1_311, splatq_lane_s16(__s2_311, __p3_311))); \
-  __ret_311; \
-})
-#else
-#define vqrdmlahq_lane_s16(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \
-  int16x8_t __s0_312 = __p0_312; \
-  int16x8_t __s1_312 = __p1_312; \
-  int16x4_t __s2_312 = __p2_312; \
-  int16x8_t __rev0_312;  __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_312;  __rev1_312 = __builtin_shufflevector(__s1_312, __s1_312, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_312;  __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 3, 2, 1, 0); \
-  int16x8_t __ret_312; \
-  __ret_312 = __noswap_vqaddq_s16(__rev0_312, __noswap_vqrdmulhq_s16(__rev1_312, __noswap_splatq_lane_s16(__rev2_312, __p3_312))); \
-  __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_312; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \
-  int32x2_t __s0_313 = __p0_313; \
-  int32x2_t __s1_313 = __p1_313; \
-  int32x2_t __s2_313 = __p2_313; \
-  int32x2_t __ret_313; \
-  __ret_313 = vqadd_s32(__s0_313, vqrdmulh_s32(__s1_313, splat_lane_s32(__s2_313, __p3_313))); \
-  __ret_313; \
-})
-#else
-#define vqrdmlah_lane_s32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \
-  int32x2_t __s0_314 = __p0_314; \
-  int32x2_t __s1_314 = __p1_314; \
-  int32x2_t __s2_314 = __p2_314; \
-  int32x2_t __rev0_314;  __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 1, 0); \
-  int32x2_t __rev1_314;  __rev1_314 = __builtin_shufflevector(__s1_314, __s1_314, 1, 0); \
-  int32x2_t __rev2_314;  __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 1, 0); \
-  int32x2_t __ret_314; \
-  __ret_314 = __noswap_vqadd_s32(__rev0_314, __noswap_vqrdmulh_s32(__rev1_314, __noswap_splat_lane_s32(__rev2_314, __p3_314))); \
-  __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 1, 0); \
-  __ret_314; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s16(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \
-  int16x4_t __s0_315 = __p0_315; \
-  int16x4_t __s1_315 = __p1_315; \
-  int16x4_t __s2_315 = __p2_315; \
-  int16x4_t __ret_315; \
-  __ret_315 = vqadd_s16(__s0_315, vqrdmulh_s16(__s1_315, splat_lane_s16(__s2_315, __p3_315))); \
-  __ret_315; \
-})
-#else
-#define vqrdmlah_lane_s16(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \
-  int16x4_t __s0_316 = __p0_316; \
-  int16x4_t __s1_316 = __p1_316; \
-  int16x4_t __s2_316 = __p2_316; \
-  int16x4_t __rev0_316;  __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \
-  int16x4_t __rev1_316;  __rev1_316 = __builtin_shufflevector(__s1_316, __s1_316, 3, 2, 1, 0); \
-  int16x4_t __rev2_316;  __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 3, 2, 1, 0); \
-  int16x4_t __ret_316; \
-  __ret_316 = __noswap_vqadd_s16(__rev0_316, __noswap_vqrdmulh_s16(__rev1_316, __noswap_splat_lane_s16(__rev2_316, __p3_316))); \
-  __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \
-  __ret_316; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s32(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \
-  int32x4_t __s0_317 = __p0_317; \
-  int32x4_t __s1_317 = __p1_317; \
-  int32x2_t __s2_317 = __p2_317; \
-  int32x4_t __ret_317; \
-  __ret_317 = vqsubq_s32(__s0_317, vqrdmulhq_s32(__s1_317, splatq_lane_s32(__s2_317, __p3_317))); \
-  __ret_317; \
-})
-#else
-#define vqrdmlshq_lane_s32(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \
-  int32x4_t __s0_318 = __p0_318; \
-  int32x4_t __s1_318 = __p1_318; \
-  int32x2_t __s2_318 = __p2_318; \
-  int32x4_t __rev0_318;  __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 3, 2, 1, 0); \
-  int32x4_t __rev1_318;  __rev1_318 = __builtin_shufflevector(__s1_318, __s1_318, 3, 2, 1, 0); \
-  int32x2_t __rev2_318;  __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 1, 0); \
-  int32x4_t __ret_318; \
-  __ret_318 = __noswap_vqsubq_s32(__rev0_318, __noswap_vqrdmulhq_s32(__rev1_318, __noswap_splatq_lane_s32(__rev2_318, __p3_318))); \
-  __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 3, 2, 1, 0); \
-  __ret_318; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \
-  int16x8_t __s0_319 = __p0_319; \
-  int16x8_t __s1_319 = __p1_319; \
-  int16x4_t __s2_319 = __p2_319; \
-  int16x8_t __ret_319; \
-  __ret_319 = vqsubq_s16(__s0_319, vqrdmulhq_s16(__s1_319, splatq_lane_s16(__s2_319, __p3_319))); \
-  __ret_319; \
-})
-#else
-#define vqrdmlshq_lane_s16(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \
-  int16x8_t __s0_320 = __p0_320; \
-  int16x8_t __s1_320 = __p1_320; \
-  int16x4_t __s2_320 = __p2_320; \
-  int16x8_t __rev0_320;  __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_320;  __rev1_320 = __builtin_shufflevector(__s1_320, __s1_320, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_320;  __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 3, 2, 1, 0); \
-  int16x8_t __ret_320; \
-  __ret_320 = __noswap_vqsubq_s16(__rev0_320, __noswap_vqrdmulhq_s16(__rev1_320, __noswap_splatq_lane_s16(__rev2_320, __p3_320))); \
-  __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_320; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s32(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \
-  int32x2_t __s0_321 = __p0_321; \
-  int32x2_t __s1_321 = __p1_321; \
-  int32x2_t __s2_321 = __p2_321; \
-  int32x2_t __ret_321; \
-  __ret_321 = vqsub_s32(__s0_321, vqrdmulh_s32(__s1_321, splat_lane_s32(__s2_321, __p3_321))); \
-  __ret_321; \
-})
-#else
-#define vqrdmlsh_lane_s32(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \
-  int32x2_t __s0_322 = __p0_322; \
-  int32x2_t __s1_322 = __p1_322; \
-  int32x2_t __s2_322 = __p2_322; \
-  int32x2_t __rev0_322;  __rev0_322 = __builtin_shufflevector(__s0_322, __s0_322, 1, 0); \
-  int32x2_t __rev1_322;  __rev1_322 = __builtin_shufflevector(__s1_322, __s1_322, 1, 0); \
-  int32x2_t __rev2_322;  __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 1, 0); \
-  int32x2_t __ret_322; \
-  __ret_322 = __noswap_vqsub_s32(__rev0_322, __noswap_vqrdmulh_s32(__rev1_322, __noswap_splat_lane_s32(__rev2_322, __p3_322))); \
-  __ret_322 = __builtin_shufflevector(__ret_322, __ret_322, 1, 0); \
-  __ret_322; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \
-  int16x4_t __s0_323 = __p0_323; \
-  int16x4_t __s1_323 = __p1_323; \
-  int16x4_t __s2_323 = __p2_323; \
-  int16x4_t __ret_323; \
-  __ret_323 = vqsub_s16(__s0_323, vqrdmulh_s16(__s1_323, splat_lane_s16(__s2_323, __p3_323))); \
-  __ret_323; \
-})
-#else
-#define vqrdmlsh_lane_s16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \
-  int16x4_t __s0_324 = __p0_324; \
-  int16x4_t __s1_324 = __p1_324; \
-  int16x4_t __s2_324 = __p2_324; \
-  int16x4_t __rev0_324;  __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \
-  int16x4_t __rev1_324;  __rev1_324 = __builtin_shufflevector(__s1_324, __s1_324, 3, 2, 1, 0); \
-  int16x4_t __rev2_324;  __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 3, 2, 1, 0); \
-  int16x4_t __ret_324; \
-  __ret_324 = __noswap_vqsub_s16(__rev0_324, __noswap_vqrdmulh_s16(__rev1_324, __noswap_splat_lane_s16(__rev2_324, __p3_324))); \
-  __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \
-  __ret_324; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s32(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \
-  int32x4_t __s0_325 = __p0_325; \
-  int32x4_t __s1_325 = __p1_325; \
-  int32x4_t __s2_325 = __p2_325; \
-  int32x4_t __ret_325; \
-  __ret_325 = vqaddq_s32(__s0_325, vqrdmulhq_s32(__s1_325, splatq_laneq_s32(__s2_325, __p3_325))); \
-  __ret_325; \
-})
-#else
-#define vqrdmlahq_laneq_s32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \
-  int32x4_t __s0_326 = __p0_326; \
-  int32x4_t __s1_326 = __p1_326; \
-  int32x4_t __s2_326 = __p2_326; \
-  int32x4_t __rev0_326;  __rev0_326 = __builtin_shufflevector(__s0_326, __s0_326, 3, 2, 1, 0); \
-  int32x4_t __rev1_326;  __rev1_326 = __builtin_shufflevector(__s1_326, __s1_326, 3, 2, 1, 0); \
-  int32x4_t __rev2_326;  __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 3, 2, 1, 0); \
-  int32x4_t __ret_326; \
-  __ret_326 = __noswap_vqaddq_s32(__rev0_326, __noswap_vqrdmulhq_s32(__rev1_326, __noswap_splatq_laneq_s32(__rev2_326, __p3_326))); \
-  __ret_326 = __builtin_shufflevector(__ret_326, __ret_326, 3, 2, 1, 0); \
-  __ret_326; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s16(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \
-  int16x8_t __s0_327 = __p0_327; \
-  int16x8_t __s1_327 = __p1_327; \
-  int16x8_t __s2_327 = __p2_327; \
-  int16x8_t __ret_327; \
-  __ret_327 = vqaddq_s16(__s0_327, vqrdmulhq_s16(__s1_327, splatq_laneq_s16(__s2_327, __p3_327))); \
-  __ret_327; \
-})
-#else
-#define vqrdmlahq_laneq_s16(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \
-  int16x8_t __s0_328 = __p0_328; \
-  int16x8_t __s1_328 = __p1_328; \
-  int16x8_t __s2_328 = __p2_328; \
-  int16x8_t __rev0_328;  __rev0_328 = __builtin_shufflevector(__s0_328, __s0_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_328;  __rev1_328 = __builtin_shufflevector(__s1_328, __s1_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_328;  __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_328; \
-  __ret_328 = __noswap_vqaddq_s16(__rev0_328, __noswap_vqrdmulhq_s16(__rev1_328, __noswap_splatq_laneq_s16(__rev2_328, __p3_328))); \
-  __ret_328 = __builtin_shufflevector(__ret_328, __ret_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_328; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \
-  int32x2_t __s0_329 = __p0_329; \
-  int32x2_t __s1_329 = __p1_329; \
-  int32x4_t __s2_329 = __p2_329; \
-  int32x2_t __ret_329; \
-  __ret_329 = vqadd_s32(__s0_329, vqrdmulh_s32(__s1_329, splat_laneq_s32(__s2_329, __p3_329))); \
-  __ret_329; \
-})
-#else
-#define vqrdmlah_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \
-  int32x2_t __s0_330 = __p0_330; \
-  int32x2_t __s1_330 = __p1_330; \
-  int32x4_t __s2_330 = __p2_330; \
-  int32x2_t __rev0_330;  __rev0_330 = __builtin_shufflevector(__s0_330, __s0_330, 1, 0); \
-  int32x2_t __rev1_330;  __rev1_330 = __builtin_shufflevector(__s1_330, __s1_330, 1, 0); \
-  int32x4_t __rev2_330;  __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 3, 2, 1, 0); \
-  int32x2_t __ret_330; \
-  __ret_330 = __noswap_vqadd_s32(__rev0_330, __noswap_vqrdmulh_s32(__rev1_330, __noswap_splat_laneq_s32(__rev2_330, __p3_330))); \
-  __ret_330 = __builtin_shufflevector(__ret_330, __ret_330, 1, 0); \
-  __ret_330; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \
-  int16x4_t __s0_331 = __p0_331; \
-  int16x4_t __s1_331 = __p1_331; \
-  int16x8_t __s2_331 = __p2_331; \
-  int16x4_t __ret_331; \
-  __ret_331 = vqadd_s16(__s0_331, vqrdmulh_s16(__s1_331, splat_laneq_s16(__s2_331, __p3_331))); \
-  __ret_331; \
-})
-#else
-#define vqrdmlah_laneq_s16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \
-  int16x4_t __s0_332 = __p0_332; \
-  int16x4_t __s1_332 = __p1_332; \
-  int16x8_t __s2_332 = __p2_332; \
-  int16x4_t __rev0_332;  __rev0_332 = __builtin_shufflevector(__s0_332, __s0_332, 3, 2, 1, 0); \
-  int16x4_t __rev1_332;  __rev1_332 = __builtin_shufflevector(__s1_332, __s1_332, 3, 2, 1, 0); \
-  int16x8_t __rev2_332;  __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_332; \
-  __ret_332 = __noswap_vqadd_s16(__rev0_332, __noswap_vqrdmulh_s16(__rev1_332, __noswap_splat_laneq_s16(__rev2_332, __p3_332))); \
-  __ret_332 = __builtin_shufflevector(__ret_332, __ret_332, 3, 2, 1, 0); \
-  __ret_332; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s32(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \
-  int32x4_t __s0_333 = __p0_333; \
-  int32x4_t __s1_333 = __p1_333; \
-  int32x4_t __s2_333 = __p2_333; \
-  int32x4_t __ret_333; \
-  __ret_333 = vqsubq_s32(__s0_333, vqrdmulhq_s32(__s1_333, splatq_laneq_s32(__s2_333, __p3_333))); \
-  __ret_333; \
-})
-#else
-#define vqrdmlshq_laneq_s32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \
-  int32x4_t __s0_334 = __p0_334; \
-  int32x4_t __s1_334 = __p1_334; \
-  int32x4_t __s2_334 = __p2_334; \
-  int32x4_t __rev0_334;  __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 3, 2, 1, 0); \
-  int32x4_t __rev1_334;  __rev1_334 = __builtin_shufflevector(__s1_334, __s1_334, 3, 2, 1, 0); \
-  int32x4_t __rev2_334;  __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 3, 2, 1, 0); \
-  int32x4_t __ret_334; \
-  __ret_334 = __noswap_vqsubq_s32(__rev0_334, __noswap_vqrdmulhq_s32(__rev1_334, __noswap_splatq_laneq_s32(__rev2_334, __p3_334))); \
-  __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 3, 2, 1, 0); \
-  __ret_334; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s16(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \
-  int16x8_t __s0_335 = __p0_335; \
-  int16x8_t __s1_335 = __p1_335; \
-  int16x8_t __s2_335 = __p2_335; \
-  int16x8_t __ret_335; \
-  __ret_335 = vqsubq_s16(__s0_335, vqrdmulhq_s16(__s1_335, splatq_laneq_s16(__s2_335, __p3_335))); \
-  __ret_335; \
-})
-#else
-#define vqrdmlshq_laneq_s16(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \
-  int16x8_t __s0_336 = __p0_336; \
-  int16x8_t __s1_336 = __p1_336; \
-  int16x8_t __s2_336 = __p2_336; \
-  int16x8_t __rev0_336;  __rev0_336 = __builtin_shufflevector(__s0_336, __s0_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_336;  __rev1_336 = __builtin_shufflevector(__s1_336, __s1_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_336;  __rev2_336 = __builtin_shufflevector(__s2_336, __s2_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_336; \
-  __ret_336 = __noswap_vqsubq_s16(__rev0_336, __noswap_vqrdmulhq_s16(__rev1_336, __noswap_splatq_laneq_s16(__rev2_336, __p3_336))); \
-  __ret_336 = __builtin_shufflevector(__ret_336, __ret_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_336; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \
-  int32x2_t __s0_337 = __p0_337; \
-  int32x2_t __s1_337 = __p1_337; \
-  int32x4_t __s2_337 = __p2_337; \
-  int32x2_t __ret_337; \
-  __ret_337 = vqsub_s32(__s0_337, vqrdmulh_s32(__s1_337, splat_laneq_s32(__s2_337, __p3_337))); \
-  __ret_337; \
-})
-#else
-#define vqrdmlsh_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \
-  int32x2_t __s0_338 = __p0_338; \
-  int32x2_t __s1_338 = __p1_338; \
-  int32x4_t __s2_338 = __p2_338; \
-  int32x2_t __rev0_338;  __rev0_338 = __builtin_shufflevector(__s0_338, __s0_338, 1, 0); \
-  int32x2_t __rev1_338;  __rev1_338 = __builtin_shufflevector(__s1_338, __s1_338, 1, 0); \
-  int32x4_t __rev2_338;  __rev2_338 = __builtin_shufflevector(__s2_338, __s2_338, 3, 2, 1, 0); \
-  int32x2_t __ret_338; \
-  __ret_338 = __noswap_vqsub_s32(__rev0_338, __noswap_vqrdmulh_s32(__rev1_338, __noswap_splat_laneq_s32(__rev2_338, __p3_338))); \
-  __ret_338 = __builtin_shufflevector(__ret_338, __ret_338, 1, 0); \
-  __ret_338; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s16(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \
-  int16x4_t __s0_339 = __p0_339; \
-  int16x4_t __s1_339 = __p1_339; \
-  int16x8_t __s2_339 = __p2_339; \
-  int16x4_t __ret_339; \
-  __ret_339 = vqsub_s16(__s0_339, vqrdmulh_s16(__s1_339, splat_laneq_s16(__s2_339, __p3_339))); \
-  __ret_339; \
-})
-#else
-#define vqrdmlsh_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \
-  int16x4_t __s0_340 = __p0_340; \
-  int16x4_t __s1_340 = __p1_340; \
-  int16x8_t __s2_340 = __p2_340; \
-  int16x4_t __rev0_340;  __rev0_340 = __builtin_shufflevector(__s0_340, __s0_340, 3, 2, 1, 0); \
-  int16x4_t __rev1_340;  __rev1_340 = __builtin_shufflevector(__s1_340, __s1_340, 3, 2, 1, 0); \
-  int16x8_t __rev2_340;  __rev2_340 = __builtin_shufflevector(__s2_340, __s2_340, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_340; \
-  __ret_340 = __noswap_vqsub_s16(__rev0_340, __noswap_vqrdmulh_s16(__rev1_340, __noswap_splat_laneq_s16(__rev2_340, __p3_340))); \
-  __ret_340 = __builtin_shufflevector(__ret_340, __ret_340, 3, 2, 1, 0); \
-  __ret_340; \
-})
-#endif
-
-#endif
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vabsq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vabsq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabsq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vabsq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vabs_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai int64x1_t vabs_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int64_t vabsd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai poly128_t vaddq_p128(poly128_t __p0, poly128_t __p1) {
-  poly128_t __ret;
-  __ret = (poly128_t) __builtin_neon_vaddq_p128(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddlvq_s8(int8x16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddlvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddlvq_s32(int32x4_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddlvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddlvq_s16(int16x8_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddlvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddlv_s8(int8x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddlv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddlv_s32(int32x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddlv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddlv_s16(int16x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddlv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vaddvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vaddvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vaddvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vaddvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vaddvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vaddvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddvq_s64(int64x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddvq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vaddv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vaddv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vaddv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vaddv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vaddv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vaddv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38);
-  return __ret;
-}
-#else
-__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vceqzd_u64(uint64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
-  return __ret;
-}
-__ai uint64_t vceqzd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vceqzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vceqzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64_t vcged_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgez_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgez_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vcgezd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgezd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcgezd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcgezs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vcgtzd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcgtzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcgtzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcled_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclez_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclez_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclez_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclez_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclez_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclez_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclez_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclez_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclez_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclez_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vclezd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vclezd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vclezd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vclezs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcltz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcltz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vcltzd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcltzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcltzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p8(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \
-  poly8x16_t __s0_341 = __p0_341; \
-  poly8x8_t __s2_341 = __p2_341; \
-  poly8x16_t __ret_341; \
-  __ret_341 = vsetq_lane_p8(vget_lane_p8(__s2_341, __p3_341), __s0_341, __p1_341); \
-  __ret_341; \
-})
-#else
-#define vcopyq_lane_p8(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \
-  poly8x16_t __s0_342 = __p0_342; \
-  poly8x8_t __s2_342 = __p2_342; \
-  poly8x16_t __rev0_342;  __rev0_342 = __builtin_shufflevector(__s0_342, __s0_342, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_342;  __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_342; \
-  __ret_342 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_342, __p3_342), __rev0_342, __p1_342); \
-  __ret_342 = __builtin_shufflevector(__ret_342, __ret_342, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_342; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p16(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \
-  poly16x8_t __s0_343 = __p0_343; \
-  poly16x4_t __s2_343 = __p2_343; \
-  poly16x8_t __ret_343; \
-  __ret_343 = vsetq_lane_p16(vget_lane_p16(__s2_343, __p3_343), __s0_343, __p1_343); \
-  __ret_343; \
-})
-#else
-#define vcopyq_lane_p16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \
-  poly16x8_t __s0_344 = __p0_344; \
-  poly16x4_t __s2_344 = __p2_344; \
-  poly16x8_t __rev0_344;  __rev0_344 = __builtin_shufflevector(__s0_344, __s0_344, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __rev2_344;  __rev2_344 = __builtin_shufflevector(__s2_344, __s2_344, 3, 2, 1, 0); \
-  poly16x8_t __ret_344; \
-  __ret_344 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_344, __p3_344), __rev0_344, __p1_344); \
-  __ret_344 = __builtin_shufflevector(__ret_344, __ret_344, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_344; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u8(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \
-  uint8x16_t __s0_345 = __p0_345; \
-  uint8x8_t __s2_345 = __p2_345; \
-  uint8x16_t __ret_345; \
-  __ret_345 = vsetq_lane_u8(vget_lane_u8(__s2_345, __p3_345), __s0_345, __p1_345); \
-  __ret_345; \
-})
-#else
-#define vcopyq_lane_u8(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \
-  uint8x16_t __s0_346 = __p0_346; \
-  uint8x8_t __s2_346 = __p2_346; \
-  uint8x16_t __rev0_346;  __rev0_346 = __builtin_shufflevector(__s0_346, __s0_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_346;  __rev2_346 = __builtin_shufflevector(__s2_346, __s2_346, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_346; \
-  __ret_346 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_346, __p3_346), __rev0_346, __p1_346); \
-  __ret_346 = __builtin_shufflevector(__ret_346, __ret_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_346; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u32(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \
-  uint32x4_t __s0_347 = __p0_347; \
-  uint32x2_t __s2_347 = __p2_347; \
-  uint32x4_t __ret_347; \
-  __ret_347 = vsetq_lane_u32(vget_lane_u32(__s2_347, __p3_347), __s0_347, __p1_347); \
-  __ret_347; \
-})
-#else
-#define vcopyq_lane_u32(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \
-  uint32x4_t __s0_348 = __p0_348; \
-  uint32x2_t __s2_348 = __p2_348; \
-  uint32x4_t __rev0_348;  __rev0_348 = __builtin_shufflevector(__s0_348, __s0_348, 3, 2, 1, 0); \
-  uint32x2_t __rev2_348;  __rev2_348 = __builtin_shufflevector(__s2_348, __s2_348, 1, 0); \
-  uint32x4_t __ret_348; \
-  __ret_348 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_348, __p3_348), __rev0_348, __p1_348); \
-  __ret_348 = __builtin_shufflevector(__ret_348, __ret_348, 3, 2, 1, 0); \
-  __ret_348; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u64(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \
-  uint64x2_t __s0_349 = __p0_349; \
-  uint64x1_t __s2_349 = __p2_349; \
-  uint64x2_t __ret_349; \
-  __ret_349 = vsetq_lane_u64(vget_lane_u64(__s2_349, __p3_349), __s0_349, __p1_349); \
-  __ret_349; \
-})
-#else
-#define vcopyq_lane_u64(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \
-  uint64x2_t __s0_350 = __p0_350; \
-  uint64x1_t __s2_350 = __p2_350; \
-  uint64x2_t __rev0_350;  __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 1, 0); \
-  uint64x2_t __ret_350; \
-  __ret_350 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_350, __p3_350), __rev0_350, __p1_350); \
-  __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 1, 0); \
-  __ret_350; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u16(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \
-  uint16x8_t __s0_351 = __p0_351; \
-  uint16x4_t __s2_351 = __p2_351; \
-  uint16x8_t __ret_351; \
-  __ret_351 = vsetq_lane_u16(vget_lane_u16(__s2_351, __p3_351), __s0_351, __p1_351); \
-  __ret_351; \
-})
-#else
-#define vcopyq_lane_u16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \
-  uint16x8_t __s0_352 = __p0_352; \
-  uint16x4_t __s2_352 = __p2_352; \
-  uint16x8_t __rev0_352;  __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_352;  __rev2_352 = __builtin_shufflevector(__s2_352, __s2_352, 3, 2, 1, 0); \
-  uint16x8_t __ret_352; \
-  __ret_352 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_352, __p3_352), __rev0_352, __p1_352); \
-  __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_352; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s8(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \
-  int8x16_t __s0_353 = __p0_353; \
-  int8x8_t __s2_353 = __p2_353; \
-  int8x16_t __ret_353; \
-  __ret_353 = vsetq_lane_s8(vget_lane_s8(__s2_353, __p3_353), __s0_353, __p1_353); \
-  __ret_353; \
-})
-#else
-#define vcopyq_lane_s8(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \
-  int8x16_t __s0_354 = __p0_354; \
-  int8x8_t __s2_354 = __p2_354; \
-  int8x16_t __rev0_354;  __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_354;  __rev2_354 = __builtin_shufflevector(__s2_354, __s2_354, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_354; \
-  __ret_354 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_354, __p3_354), __rev0_354, __p1_354); \
-  __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_354; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f32(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \
-  float32x4_t __s0_355 = __p0_355; \
-  float32x2_t __s2_355 = __p2_355; \
-  float32x4_t __ret_355; \
-  __ret_355 = vsetq_lane_f32(vget_lane_f32(__s2_355, __p3_355), __s0_355, __p1_355); \
-  __ret_355; \
-})
-#else
-#define vcopyq_lane_f32(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \
-  float32x4_t __s0_356 = __p0_356; \
-  float32x2_t __s2_356 = __p2_356; \
-  float32x4_t __rev0_356;  __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 3, 2, 1, 0); \
-  float32x2_t __rev2_356;  __rev2_356 = __builtin_shufflevector(__s2_356, __s2_356, 1, 0); \
-  float32x4_t __ret_356; \
-  __ret_356 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_356, __p3_356), __rev0_356, __p1_356); \
-  __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 3, 2, 1, 0); \
-  __ret_356; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s32(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \
-  int32x4_t __s0_357 = __p0_357; \
-  int32x2_t __s2_357 = __p2_357; \
-  int32x4_t __ret_357; \
-  __ret_357 = vsetq_lane_s32(vget_lane_s32(__s2_357, __p3_357), __s0_357, __p1_357); \
-  __ret_357; \
-})
-#else
-#define vcopyq_lane_s32(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \
-  int32x4_t __s0_358 = __p0_358; \
-  int32x2_t __s2_358 = __p2_358; \
-  int32x4_t __rev0_358;  __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 3, 2, 1, 0); \
-  int32x2_t __rev2_358;  __rev2_358 = __builtin_shufflevector(__s2_358, __s2_358, 1, 0); \
-  int32x4_t __ret_358; \
-  __ret_358 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_358, __p3_358), __rev0_358, __p1_358); \
-  __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 3, 2, 1, 0); \
-  __ret_358; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s64(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \
-  int64x2_t __s0_359 = __p0_359; \
-  int64x1_t __s2_359 = __p2_359; \
-  int64x2_t __ret_359; \
-  __ret_359 = vsetq_lane_s64(vget_lane_s64(__s2_359, __p3_359), __s0_359, __p1_359); \
-  __ret_359; \
-})
-#else
-#define vcopyq_lane_s64(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \
-  int64x2_t __s0_360 = __p0_360; \
-  int64x1_t __s2_360 = __p2_360; \
-  int64x2_t __rev0_360;  __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 1, 0); \
-  int64x2_t __ret_360; \
-  __ret_360 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_360, __p3_360), __rev0_360, __p1_360); \
-  __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 1, 0); \
-  __ret_360; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s16(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \
-  int16x8_t __s0_361 = __p0_361; \
-  int16x4_t __s2_361 = __p2_361; \
-  int16x8_t __ret_361; \
-  __ret_361 = vsetq_lane_s16(vget_lane_s16(__s2_361, __p3_361), __s0_361, __p1_361); \
-  __ret_361; \
-})
-#else
-#define vcopyq_lane_s16(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \
-  int16x8_t __s0_362 = __p0_362; \
-  int16x4_t __s2_362 = __p2_362; \
-  int16x8_t __rev0_362;  __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_362;  __rev2_362 = __builtin_shufflevector(__s2_362, __s2_362, 3, 2, 1, 0); \
-  int16x8_t __ret_362; \
-  __ret_362 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_362, __p3_362), __rev0_362, __p1_362); \
-  __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_362; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p8(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \
-  poly8x8_t __s0_363 = __p0_363; \
-  poly8x8_t __s2_363 = __p2_363; \
-  poly8x8_t __ret_363; \
-  __ret_363 = vset_lane_p8(vget_lane_p8(__s2_363, __p3_363), __s0_363, __p1_363); \
-  __ret_363; \
-})
-#else
-#define vcopy_lane_p8(__p0_364, __p1_364, __p2_364, __p3_364) __extension__ ({ \
-  poly8x8_t __s0_364 = __p0_364; \
-  poly8x8_t __s2_364 = __p2_364; \
-  poly8x8_t __rev0_364;  __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_364;  __rev2_364 = __builtin_shufflevector(__s2_364, __s2_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_364; \
-  __ret_364 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_364, __p3_364), __rev0_364, __p1_364); \
-  __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_364; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p16(__p0_365, __p1_365, __p2_365, __p3_365) __extension__ ({ \
-  poly16x4_t __s0_365 = __p0_365; \
-  poly16x4_t __s2_365 = __p2_365; \
-  poly16x4_t __ret_365; \
-  __ret_365 = vset_lane_p16(vget_lane_p16(__s2_365, __p3_365), __s0_365, __p1_365); \
-  __ret_365; \
-})
-#else
-#define vcopy_lane_p16(__p0_366, __p1_366, __p2_366, __p3_366) __extension__ ({ \
-  poly16x4_t __s0_366 = __p0_366; \
-  poly16x4_t __s2_366 = __p2_366; \
-  poly16x4_t __rev0_366;  __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 3, 2, 1, 0); \
-  poly16x4_t __rev2_366;  __rev2_366 = __builtin_shufflevector(__s2_366, __s2_366, 3, 2, 1, 0); \
-  poly16x4_t __ret_366; \
-  __ret_366 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_366, __p3_366), __rev0_366, __p1_366); \
-  __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 3, 2, 1, 0); \
-  __ret_366; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u8(__p0_367, __p1_367, __p2_367, __p3_367) __extension__ ({ \
-  uint8x8_t __s0_367 = __p0_367; \
-  uint8x8_t __s2_367 = __p2_367; \
-  uint8x8_t __ret_367; \
-  __ret_367 = vset_lane_u8(vget_lane_u8(__s2_367, __p3_367), __s0_367, __p1_367); \
-  __ret_367; \
-})
-#else
-#define vcopy_lane_u8(__p0_368, __p1_368, __p2_368, __p3_368) __extension__ ({ \
-  uint8x8_t __s0_368 = __p0_368; \
-  uint8x8_t __s2_368 = __p2_368; \
-  uint8x8_t __rev0_368;  __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_368;  __rev2_368 = __builtin_shufflevector(__s2_368, __s2_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_368; \
-  __ret_368 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_368, __p3_368), __rev0_368, __p1_368); \
-  __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_368; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u32(__p0_369, __p1_369, __p2_369, __p3_369) __extension__ ({ \
-  uint32x2_t __s0_369 = __p0_369; \
-  uint32x2_t __s2_369 = __p2_369; \
-  uint32x2_t __ret_369; \
-  __ret_369 = vset_lane_u32(vget_lane_u32(__s2_369, __p3_369), __s0_369, __p1_369); \
-  __ret_369; \
-})
-#else
-#define vcopy_lane_u32(__p0_370, __p1_370, __p2_370, __p3_370) __extension__ ({ \
-  uint32x2_t __s0_370 = __p0_370; \
-  uint32x2_t __s2_370 = __p2_370; \
-  uint32x2_t __rev0_370;  __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 1, 0); \
-  uint32x2_t __rev2_370;  __rev2_370 = __builtin_shufflevector(__s2_370, __s2_370, 1, 0); \
-  uint32x2_t __ret_370; \
-  __ret_370 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_370, __p3_370), __rev0_370, __p1_370); \
-  __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 1, 0); \
-  __ret_370; \
-})
-#endif
-
-#define vcopy_lane_u64(__p0_371, __p1_371, __p2_371, __p3_371) __extension__ ({ \
-  uint64x1_t __s0_371 = __p0_371; \
-  uint64x1_t __s2_371 = __p2_371; \
-  uint64x1_t __ret_371; \
-  __ret_371 = vset_lane_u64(vget_lane_u64(__s2_371, __p3_371), __s0_371, __p1_371); \
-  __ret_371; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u16(__p0_372, __p1_372, __p2_372, __p3_372) __extension__ ({ \
-  uint16x4_t __s0_372 = __p0_372; \
-  uint16x4_t __s2_372 = __p2_372; \
-  uint16x4_t __ret_372; \
-  __ret_372 = vset_lane_u16(vget_lane_u16(__s2_372, __p3_372), __s0_372, __p1_372); \
-  __ret_372; \
-})
-#else
-#define vcopy_lane_u16(__p0_373, __p1_373, __p2_373, __p3_373) __extension__ ({ \
-  uint16x4_t __s0_373 = __p0_373; \
-  uint16x4_t __s2_373 = __p2_373; \
-  uint16x4_t __rev0_373;  __rev0_373 = __builtin_shufflevector(__s0_373, __s0_373, 3, 2, 1, 0); \
-  uint16x4_t __rev2_373;  __rev2_373 = __builtin_shufflevector(__s2_373, __s2_373, 3, 2, 1, 0); \
-  uint16x4_t __ret_373; \
-  __ret_373 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_373, __p3_373), __rev0_373, __p1_373); \
-  __ret_373 = __builtin_shufflevector(__ret_373, __ret_373, 3, 2, 1, 0); \
-  __ret_373; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s8(__p0_374, __p1_374, __p2_374, __p3_374) __extension__ ({ \
-  int8x8_t __s0_374 = __p0_374; \
-  int8x8_t __s2_374 = __p2_374; \
-  int8x8_t __ret_374; \
-  __ret_374 = vset_lane_s8(vget_lane_s8(__s2_374, __p3_374), __s0_374, __p1_374); \
-  __ret_374; \
-})
-#else
-#define vcopy_lane_s8(__p0_375, __p1_375, __p2_375, __p3_375) __extension__ ({ \
-  int8x8_t __s0_375 = __p0_375; \
-  int8x8_t __s2_375 = __p2_375; \
-  int8x8_t __rev0_375;  __rev0_375 = __builtin_shufflevector(__s0_375, __s0_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_375;  __rev2_375 = __builtin_shufflevector(__s2_375, __s2_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_375; \
-  __ret_375 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_375, __p3_375), __rev0_375, __p1_375); \
-  __ret_375 = __builtin_shufflevector(__ret_375, __ret_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_375; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_f32(__p0_376, __p1_376, __p2_376, __p3_376) __extension__ ({ \
-  float32x2_t __s0_376 = __p0_376; \
-  float32x2_t __s2_376 = __p2_376; \
-  float32x2_t __ret_376; \
-  __ret_376 = vset_lane_f32(vget_lane_f32(__s2_376, __p3_376), __s0_376, __p1_376); \
-  __ret_376; \
-})
-#else
-#define vcopy_lane_f32(__p0_377, __p1_377, __p2_377, __p3_377) __extension__ ({ \
-  float32x2_t __s0_377 = __p0_377; \
-  float32x2_t __s2_377 = __p2_377; \
-  float32x2_t __rev0_377;  __rev0_377 = __builtin_shufflevector(__s0_377, __s0_377, 1, 0); \
-  float32x2_t __rev2_377;  __rev2_377 = __builtin_shufflevector(__s2_377, __s2_377, 1, 0); \
-  float32x2_t __ret_377; \
-  __ret_377 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_377, __p3_377), __rev0_377, __p1_377); \
-  __ret_377 = __builtin_shufflevector(__ret_377, __ret_377, 1, 0); \
-  __ret_377; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s32(__p0_378, __p1_378, __p2_378, __p3_378) __extension__ ({ \
-  int32x2_t __s0_378 = __p0_378; \
-  int32x2_t __s2_378 = __p2_378; \
-  int32x2_t __ret_378; \
-  __ret_378 = vset_lane_s32(vget_lane_s32(__s2_378, __p3_378), __s0_378, __p1_378); \
-  __ret_378; \
-})
-#else
-#define vcopy_lane_s32(__p0_379, __p1_379, __p2_379, __p3_379) __extension__ ({ \
-  int32x2_t __s0_379 = __p0_379; \
-  int32x2_t __s2_379 = __p2_379; \
-  int32x2_t __rev0_379;  __rev0_379 = __builtin_shufflevector(__s0_379, __s0_379, 1, 0); \
-  int32x2_t __rev2_379;  __rev2_379 = __builtin_shufflevector(__s2_379, __s2_379, 1, 0); \
-  int32x2_t __ret_379; \
-  __ret_379 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_379, __p3_379), __rev0_379, __p1_379); \
-  __ret_379 = __builtin_shufflevector(__ret_379, __ret_379, 1, 0); \
-  __ret_379; \
-})
-#endif
-
-#define vcopy_lane_s64(__p0_380, __p1_380, __p2_380, __p3_380) __extension__ ({ \
-  int64x1_t __s0_380 = __p0_380; \
-  int64x1_t __s2_380 = __p2_380; \
-  int64x1_t __ret_380; \
-  __ret_380 = vset_lane_s64(vget_lane_s64(__s2_380, __p3_380), __s0_380, __p1_380); \
-  __ret_380; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s16(__p0_381, __p1_381, __p2_381, __p3_381) __extension__ ({ \
-  int16x4_t __s0_381 = __p0_381; \
-  int16x4_t __s2_381 = __p2_381; \
-  int16x4_t __ret_381; \
-  __ret_381 = vset_lane_s16(vget_lane_s16(__s2_381, __p3_381), __s0_381, __p1_381); \
-  __ret_381; \
-})
-#else
-#define vcopy_lane_s16(__p0_382, __p1_382, __p2_382, __p3_382) __extension__ ({ \
-  int16x4_t __s0_382 = __p0_382; \
-  int16x4_t __s2_382 = __p2_382; \
-  int16x4_t __rev0_382;  __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \
-  int16x4_t __rev2_382;  __rev2_382 = __builtin_shufflevector(__s2_382, __s2_382, 3, 2, 1, 0); \
-  int16x4_t __ret_382; \
-  __ret_382 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_382, __p3_382), __rev0_382, __p1_382); \
-  __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 3, 2, 1, 0); \
-  __ret_382; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p8(__p0_383, __p1_383, __p2_383, __p3_383) __extension__ ({ \
-  poly8x16_t __s0_383 = __p0_383; \
-  poly8x16_t __s2_383 = __p2_383; \
-  poly8x16_t __ret_383; \
-  __ret_383 = vsetq_lane_p8(vgetq_lane_p8(__s2_383, __p3_383), __s0_383, __p1_383); \
-  __ret_383; \
-})
-#else
-#define vcopyq_laneq_p8(__p0_384, __p1_384, __p2_384, __p3_384) __extension__ ({ \
-  poly8x16_t __s0_384 = __p0_384; \
-  poly8x16_t __s2_384 = __p2_384; \
-  poly8x16_t __rev0_384;  __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_384;  __rev2_384 = __builtin_shufflevector(__s2_384, __s2_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_384; \
-  __ret_384 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_384, __p3_384), __rev0_384, __p1_384); \
-  __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_384; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p16(__p0_385, __p1_385, __p2_385, __p3_385) __extension__ ({ \
-  poly16x8_t __s0_385 = __p0_385; \
-  poly16x8_t __s2_385 = __p2_385; \
-  poly16x8_t __ret_385; \
-  __ret_385 = vsetq_lane_p16(vgetq_lane_p16(__s2_385, __p3_385), __s0_385, __p1_385); \
-  __ret_385; \
-})
-#else
-#define vcopyq_laneq_p16(__p0_386, __p1_386, __p2_386, __p3_386) __extension__ ({ \
-  poly16x8_t __s0_386 = __p0_386; \
-  poly16x8_t __s2_386 = __p2_386; \
-  poly16x8_t __rev0_386;  __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev2_386;  __rev2_386 = __builtin_shufflevector(__s2_386, __s2_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_386; \
-  __ret_386 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_386, __p3_386), __rev0_386, __p1_386); \
-  __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_386; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u8(__p0_387, __p1_387, __p2_387, __p3_387) __extension__ ({ \
-  uint8x16_t __s0_387 = __p0_387; \
-  uint8x16_t __s2_387 = __p2_387; \
-  uint8x16_t __ret_387; \
-  __ret_387 = vsetq_lane_u8(vgetq_lane_u8(__s2_387, __p3_387), __s0_387, __p1_387); \
-  __ret_387; \
-})
-#else
-#define vcopyq_laneq_u8(__p0_388, __p1_388, __p2_388, __p3_388) __extension__ ({ \
-  uint8x16_t __s0_388 = __p0_388; \
-  uint8x16_t __s2_388 = __p2_388; \
-  uint8x16_t __rev0_388;  __rev0_388 = __builtin_shufflevector(__s0_388, __s0_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_388;  __rev2_388 = __builtin_shufflevector(__s2_388, __s2_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_388; \
-  __ret_388 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_388, __p3_388), __rev0_388, __p1_388); \
-  __ret_388 = __builtin_shufflevector(__ret_388, __ret_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_388; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u32(__p0_389, __p1_389, __p2_389, __p3_389) __extension__ ({ \
-  uint32x4_t __s0_389 = __p0_389; \
-  uint32x4_t __s2_389 = __p2_389; \
-  uint32x4_t __ret_389; \
-  __ret_389 = vsetq_lane_u32(vgetq_lane_u32(__s2_389, __p3_389), __s0_389, __p1_389); \
-  __ret_389; \
-})
-#else
-#define vcopyq_laneq_u32(__p0_390, __p1_390, __p2_390, __p3_390) __extension__ ({ \
-  uint32x4_t __s0_390 = __p0_390; \
-  uint32x4_t __s2_390 = __p2_390; \
-  uint32x4_t __rev0_390;  __rev0_390 = __builtin_shufflevector(__s0_390, __s0_390, 3, 2, 1, 0); \
-  uint32x4_t __rev2_390;  __rev2_390 = __builtin_shufflevector(__s2_390, __s2_390, 3, 2, 1, 0); \
-  uint32x4_t __ret_390; \
-  __ret_390 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_390, __p3_390), __rev0_390, __p1_390); \
-  __ret_390 = __builtin_shufflevector(__ret_390, __ret_390, 3, 2, 1, 0); \
-  __ret_390; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u64(__p0_391, __p1_391, __p2_391, __p3_391) __extension__ ({ \
-  uint64x2_t __s0_391 = __p0_391; \
-  uint64x2_t __s2_391 = __p2_391; \
-  uint64x2_t __ret_391; \
-  __ret_391 = vsetq_lane_u64(vgetq_lane_u64(__s2_391, __p3_391), __s0_391, __p1_391); \
-  __ret_391; \
-})
-#else
-#define vcopyq_laneq_u64(__p0_392, __p1_392, __p2_392, __p3_392) __extension__ ({ \
-  uint64x2_t __s0_392 = __p0_392; \
-  uint64x2_t __s2_392 = __p2_392; \
-  uint64x2_t __rev0_392;  __rev0_392 = __builtin_shufflevector(__s0_392, __s0_392, 1, 0); \
-  uint64x2_t __rev2_392;  __rev2_392 = __builtin_shufflevector(__s2_392, __s2_392, 1, 0); \
-  uint64x2_t __ret_392; \
-  __ret_392 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_392, __p3_392), __rev0_392, __p1_392); \
-  __ret_392 = __builtin_shufflevector(__ret_392, __ret_392, 1, 0); \
-  __ret_392; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u16(__p0_393, __p1_393, __p2_393, __p3_393) __extension__ ({ \
-  uint16x8_t __s0_393 = __p0_393; \
-  uint16x8_t __s2_393 = __p2_393; \
-  uint16x8_t __ret_393; \
-  __ret_393 = vsetq_lane_u16(vgetq_lane_u16(__s2_393, __p3_393), __s0_393, __p1_393); \
-  __ret_393; \
-})
-#else
-#define vcopyq_laneq_u16(__p0_394, __p1_394, __p2_394, __p3_394) __extension__ ({ \
-  uint16x8_t __s0_394 = __p0_394; \
-  uint16x8_t __s2_394 = __p2_394; \
-  uint16x8_t __rev0_394;  __rev0_394 = __builtin_shufflevector(__s0_394, __s0_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_394;  __rev2_394 = __builtin_shufflevector(__s2_394, __s2_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_394; \
-  __ret_394 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_394, __p3_394), __rev0_394, __p1_394); \
-  __ret_394 = __builtin_shufflevector(__ret_394, __ret_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_394; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s8(__p0_395, __p1_395, __p2_395, __p3_395) __extension__ ({ \
-  int8x16_t __s0_395 = __p0_395; \
-  int8x16_t __s2_395 = __p2_395; \
-  int8x16_t __ret_395; \
-  __ret_395 = vsetq_lane_s8(vgetq_lane_s8(__s2_395, __p3_395), __s0_395, __p1_395); \
-  __ret_395; \
-})
-#else
-#define vcopyq_laneq_s8(__p0_396, __p1_396, __p2_396, __p3_396) __extension__ ({ \
-  int8x16_t __s0_396 = __p0_396; \
-  int8x16_t __s2_396 = __p2_396; \
-  int8x16_t __rev0_396;  __rev0_396 = __builtin_shufflevector(__s0_396, __s0_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_396;  __rev2_396 = __builtin_shufflevector(__s2_396, __s2_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_396; \
-  __ret_396 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_396, __p3_396), __rev0_396, __p1_396); \
-  __ret_396 = __builtin_shufflevector(__ret_396, __ret_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_396; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f32(__p0_397, __p1_397, __p2_397, __p3_397) __extension__ ({ \
-  float32x4_t __s0_397 = __p0_397; \
-  float32x4_t __s2_397 = __p2_397; \
-  float32x4_t __ret_397; \
-  __ret_397 = vsetq_lane_f32(vgetq_lane_f32(__s2_397, __p3_397), __s0_397, __p1_397); \
-  __ret_397; \
-})
-#else
-#define vcopyq_laneq_f32(__p0_398, __p1_398, __p2_398, __p3_398) __extension__ ({ \
-  float32x4_t __s0_398 = __p0_398; \
-  float32x4_t __s2_398 = __p2_398; \
-  float32x4_t __rev0_398;  __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 3, 2, 1, 0); \
-  float32x4_t __rev2_398;  __rev2_398 = __builtin_shufflevector(__s2_398, __s2_398, 3, 2, 1, 0); \
-  float32x4_t __ret_398; \
-  __ret_398 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_398, __p3_398), __rev0_398, __p1_398); \
-  __ret_398 = __builtin_shufflevector(__ret_398, __ret_398, 3, 2, 1, 0); \
-  __ret_398; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s32(__p0_399, __p1_399, __p2_399, __p3_399) __extension__ ({ \
-  int32x4_t __s0_399 = __p0_399; \
-  int32x4_t __s2_399 = __p2_399; \
-  int32x4_t __ret_399; \
-  __ret_399 = vsetq_lane_s32(vgetq_lane_s32(__s2_399, __p3_399), __s0_399, __p1_399); \
-  __ret_399; \
-})
-#else
-#define vcopyq_laneq_s32(__p0_400, __p1_400, __p2_400, __p3_400) __extension__ ({ \
-  int32x4_t __s0_400 = __p0_400; \
-  int32x4_t __s2_400 = __p2_400; \
-  int32x4_t __rev0_400;  __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 3, 2, 1, 0); \
-  int32x4_t __rev2_400;  __rev2_400 = __builtin_shufflevector(__s2_400, __s2_400, 3, 2, 1, 0); \
-  int32x4_t __ret_400; \
-  __ret_400 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_400, __p3_400), __rev0_400, __p1_400); \
-  __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 3, 2, 1, 0); \
-  __ret_400; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s64(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \
-  int64x2_t __s0_401 = __p0_401; \
-  int64x2_t __s2_401 = __p2_401; \
-  int64x2_t __ret_401; \
-  __ret_401 = vsetq_lane_s64(vgetq_lane_s64(__s2_401, __p3_401), __s0_401, __p1_401); \
-  __ret_401; \
-})
-#else
-#define vcopyq_laneq_s64(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \
-  int64x2_t __s0_402 = __p0_402; \
-  int64x2_t __s2_402 = __p2_402; \
-  int64x2_t __rev0_402;  __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 1, 0); \
-  int64x2_t __rev2_402;  __rev2_402 = __builtin_shufflevector(__s2_402, __s2_402, 1, 0); \
-  int64x2_t __ret_402; \
-  __ret_402 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_402, __p3_402), __rev0_402, __p1_402); \
-  __ret_402 = __builtin_shufflevector(__ret_402, __ret_402, 1, 0); \
-  __ret_402; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s16(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \
-  int16x8_t __s0_403 = __p0_403; \
-  int16x8_t __s2_403 = __p2_403; \
-  int16x8_t __ret_403; \
-  __ret_403 = vsetq_lane_s16(vgetq_lane_s16(__s2_403, __p3_403), __s0_403, __p1_403); \
-  __ret_403; \
-})
-#else
-#define vcopyq_laneq_s16(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \
-  int16x8_t __s0_404 = __p0_404; \
-  int16x8_t __s2_404 = __p2_404; \
-  int16x8_t __rev0_404;  __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_404;  __rev2_404 = __builtin_shufflevector(__s2_404, __s2_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_404; \
-  __ret_404 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_404, __p3_404), __rev0_404, __p1_404); \
-  __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_404; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p8(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \
-  poly8x8_t __s0_405 = __p0_405; \
-  poly8x16_t __s2_405 = __p2_405; \
-  poly8x8_t __ret_405; \
-  __ret_405 = vset_lane_p8(vgetq_lane_p8(__s2_405, __p3_405), __s0_405, __p1_405); \
-  __ret_405; \
-})
-#else
-#define vcopy_laneq_p8(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \
-  poly8x8_t __s0_406 = __p0_406; \
-  poly8x16_t __s2_406 = __p2_406; \
-  poly8x8_t __rev0_406;  __rev0_406 = __builtin_shufflevector(__s0_406, __s0_406, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_406;  __rev2_406 = __builtin_shufflevector(__s2_406, __s2_406, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_406; \
-  __ret_406 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_406, __p3_406), __rev0_406, __p1_406); \
-  __ret_406 = __builtin_shufflevector(__ret_406, __ret_406, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_406; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p16(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \
-  poly16x4_t __s0_407 = __p0_407; \
-  poly16x8_t __s2_407 = __p2_407; \
-  poly16x4_t __ret_407; \
-  __ret_407 = vset_lane_p16(vgetq_lane_p16(__s2_407, __p3_407), __s0_407, __p1_407); \
-  __ret_407; \
-})
-#else
-#define vcopy_laneq_p16(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \
-  poly16x4_t __s0_408 = __p0_408; \
-  poly16x8_t __s2_408 = __p2_408; \
-  poly16x4_t __rev0_408;  __rev0_408 = __builtin_shufflevector(__s0_408, __s0_408, 3, 2, 1, 0); \
-  poly16x8_t __rev2_408;  __rev2_408 = __builtin_shufflevector(__s2_408, __s2_408, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_408; \
-  __ret_408 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_408, __p3_408), __rev0_408, __p1_408); \
-  __ret_408 = __builtin_shufflevector(__ret_408, __ret_408, 3, 2, 1, 0); \
-  __ret_408; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u8(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \
-  uint8x8_t __s0_409 = __p0_409; \
-  uint8x16_t __s2_409 = __p2_409; \
-  uint8x8_t __ret_409; \
-  __ret_409 = vset_lane_u8(vgetq_lane_u8(__s2_409, __p3_409), __s0_409, __p1_409); \
-  __ret_409; \
-})
-#else
-#define vcopy_laneq_u8(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \
-  uint8x8_t __s0_410 = __p0_410; \
-  uint8x16_t __s2_410 = __p2_410; \
-  uint8x8_t __rev0_410;  __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_410;  __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_410; \
-  __ret_410 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_410, __p3_410), __rev0_410, __p1_410); \
-  __ret_410 = __builtin_shufflevector(__ret_410, __ret_410, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_410; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u32(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \
-  uint32x2_t __s0_411 = __p0_411; \
-  uint32x4_t __s2_411 = __p2_411; \
-  uint32x2_t __ret_411; \
-  __ret_411 = vset_lane_u32(vgetq_lane_u32(__s2_411, __p3_411), __s0_411, __p1_411); \
-  __ret_411; \
-})
-#else
-#define vcopy_laneq_u32(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \
-  uint32x2_t __s0_412 = __p0_412; \
-  uint32x4_t __s2_412 = __p2_412; \
-  uint32x2_t __rev0_412;  __rev0_412 = __builtin_shufflevector(__s0_412, __s0_412, 1, 0); \
-  uint32x4_t __rev2_412;  __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 3, 2, 1, 0); \
-  uint32x2_t __ret_412; \
-  __ret_412 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_412, __p3_412), __rev0_412, __p1_412); \
-  __ret_412 = __builtin_shufflevector(__ret_412, __ret_412, 1, 0); \
-  __ret_412; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u64(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \
-  uint64x1_t __s0_413 = __p0_413; \
-  uint64x2_t __s2_413 = __p2_413; \
-  uint64x1_t __ret_413; \
-  __ret_413 = vset_lane_u64(vgetq_lane_u64(__s2_413, __p3_413), __s0_413, __p1_413); \
-  __ret_413; \
-})
-#else
-#define vcopy_laneq_u64(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \
-  uint64x1_t __s0_414 = __p0_414; \
-  uint64x2_t __s2_414 = __p2_414; \
-  uint64x2_t __rev2_414;  __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 1, 0); \
-  uint64x1_t __ret_414; \
-  __ret_414 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_414, __p3_414), __s0_414, __p1_414); \
-  __ret_414; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u16(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \
-  uint16x4_t __s0_415 = __p0_415; \
-  uint16x8_t __s2_415 = __p2_415; \
-  uint16x4_t __ret_415; \
-  __ret_415 = vset_lane_u16(vgetq_lane_u16(__s2_415, __p3_415), __s0_415, __p1_415); \
-  __ret_415; \
-})
-#else
-#define vcopy_laneq_u16(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \
-  uint16x4_t __s0_416 = __p0_416; \
-  uint16x8_t __s2_416 = __p2_416; \
-  uint16x4_t __rev0_416;  __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 3, 2, 1, 0); \
-  uint16x8_t __rev2_416;  __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_416; \
-  __ret_416 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_416, __p3_416), __rev0_416, __p1_416); \
-  __ret_416 = __builtin_shufflevector(__ret_416, __ret_416, 3, 2, 1, 0); \
-  __ret_416; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s8(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \
-  int8x8_t __s0_417 = __p0_417; \
-  int8x16_t __s2_417 = __p2_417; \
-  int8x8_t __ret_417; \
-  __ret_417 = vset_lane_s8(vgetq_lane_s8(__s2_417, __p3_417), __s0_417, __p1_417); \
-  __ret_417; \
-})
-#else
-#define vcopy_laneq_s8(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \
-  int8x8_t __s0_418 = __p0_418; \
-  int8x16_t __s2_418 = __p2_418; \
-  int8x8_t __rev0_418;  __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_418;  __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_418; \
-  __ret_418 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_418, __p3_418), __rev0_418, __p1_418); \
-  __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_418; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f32(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \
-  float32x2_t __s0_419 = __p0_419; \
-  float32x4_t __s2_419 = __p2_419; \
-  float32x2_t __ret_419; \
-  __ret_419 = vset_lane_f32(vgetq_lane_f32(__s2_419, __p3_419), __s0_419, __p1_419); \
-  __ret_419; \
-})
-#else
-#define vcopy_laneq_f32(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \
-  float32x2_t __s0_420 = __p0_420; \
-  float32x4_t __s2_420 = __p2_420; \
-  float32x2_t __rev0_420;  __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 1, 0); \
-  float32x4_t __rev2_420;  __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 3, 2, 1, 0); \
-  float32x2_t __ret_420; \
-  __ret_420 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_420, __p3_420), __rev0_420, __p1_420); \
-  __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 1, 0); \
-  __ret_420; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s32(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \
-  int32x2_t __s0_421 = __p0_421; \
-  int32x4_t __s2_421 = __p2_421; \
-  int32x2_t __ret_421; \
-  __ret_421 = vset_lane_s32(vgetq_lane_s32(__s2_421, __p3_421), __s0_421, __p1_421); \
-  __ret_421; \
-})
-#else
-#define vcopy_laneq_s32(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \
-  int32x2_t __s0_422 = __p0_422; \
-  int32x4_t __s2_422 = __p2_422; \
-  int32x2_t __rev0_422;  __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 1, 0); \
-  int32x4_t __rev2_422;  __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 3, 2, 1, 0); \
-  int32x2_t __ret_422; \
-  __ret_422 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_422, __p3_422), __rev0_422, __p1_422); \
-  __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 1, 0); \
-  __ret_422; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s64(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \
-  int64x1_t __s0_423 = __p0_423; \
-  int64x2_t __s2_423 = __p2_423; \
-  int64x1_t __ret_423; \
-  __ret_423 = vset_lane_s64(vgetq_lane_s64(__s2_423, __p3_423), __s0_423, __p1_423); \
-  __ret_423; \
-})
-#else
-#define vcopy_laneq_s64(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \
-  int64x1_t __s0_424 = __p0_424; \
-  int64x2_t __s2_424 = __p2_424; \
-  int64x2_t __rev2_424;  __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 1, 0); \
-  int64x1_t __ret_424; \
-  __ret_424 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_424, __p3_424), __s0_424, __p1_424); \
-  __ret_424; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s16(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \
-  int16x4_t __s0_425 = __p0_425; \
-  int16x8_t __s2_425 = __p2_425; \
-  int16x4_t __ret_425; \
-  __ret_425 = vset_lane_s16(vgetq_lane_s16(__s2_425, __p3_425), __s0_425, __p1_425); \
-  __ret_425; \
-})
-#else
-#define vcopy_laneq_s16(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \
-  int16x4_t __s0_426 = __p0_426; \
-  int16x8_t __s2_426 = __p2_426; \
-  int16x4_t __rev0_426;  __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 3, 2, 1, 0); \
-  int16x8_t __rev2_426;  __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_426; \
-  __ret_426 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_426, __p3_426), __rev0_426, __p1_426); \
-  __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 3, 2, 1, 0); \
-  __ret_426; \
-})
-#endif
-
-#define vcreate_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (poly64x1_t)(__promote); \
-  __ret; \
-})
-#define vcreate_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (float64x1_t)(__promote); \
-  __ret; \
-})
-__ai float32_t vcvts_f32_s32(int32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
-  return __ret;
-}
-__ai float32_t vcvts_f32_u32(uint32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
-  return __ret;
-}
-#endif
-
-__ai float64_t vcvtd_f64_s64(int64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
-  return __ret;
-}
-__ai float64_t vcvtd_f64_u64(uint64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
-  float16x8_t __ret;
-  __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1));
-  return __ret;
-}
-#else
-__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = vcvt_f32_f16(vget_high_f16(__p0));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x4_t __ret;
-  __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = vcvt_f64_f32(vget_high_f32(__p0));
-  return __ret;
-}
-#else
-__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
-  __ret; \
-})
-#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
-  __ret; \
-})
-__ai int32_t vcvts_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai uint32_t vcvts_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai int32_t vcvtas_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtad_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtas_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtad_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtms_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtmd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtms_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtns_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtnd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtns_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtps_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtpd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtps_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
-  return __ret;
-}
-__ai float32_t vcvtxd_f32_f64(float64_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x4_t __ret;
-  __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdup_lane_p64(__p0_427, __p1_427) __extension__ ({ \
-  poly64x1_t __s0_427 = __p0_427; \
-  poly64x1_t __ret_427; \
-  __ret_427 = splat_lane_p64(__s0_427, __p1_427); \
-  __ret_427; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p64(__p0_428, __p1_428) __extension__ ({ \
-  poly64x1_t __s0_428 = __p0_428; \
-  poly64x2_t __ret_428; \
-  __ret_428 = splatq_lane_p64(__s0_428, __p1_428); \
-  __ret_428; \
-})
-#else
-#define vdupq_lane_p64(__p0_429, __p1_429) __extension__ ({ \
-  poly64x1_t __s0_429 = __p0_429; \
-  poly64x2_t __ret_429; \
-  __ret_429 = __noswap_splatq_lane_p64(__s0_429, __p1_429); \
-  __ret_429 = __builtin_shufflevector(__ret_429, __ret_429, 1, 0); \
-  __ret_429; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f64(__p0_430, __p1_430) __extension__ ({ \
-  float64x1_t __s0_430 = __p0_430; \
-  float64x2_t __ret_430; \
-  __ret_430 = splatq_lane_f64(__s0_430, __p1_430); \
-  __ret_430; \
-})
-#else
-#define vdupq_lane_f64(__p0_431, __p1_431) __extension__ ({ \
-  float64x1_t __s0_431 = __p0_431; \
-  float64x2_t __ret_431; \
-  __ret_431 = __noswap_splatq_lane_f64(__s0_431, __p1_431); \
-  __ret_431 = __builtin_shufflevector(__ret_431, __ret_431, 1, 0); \
-  __ret_431; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0_432, __p1_432) __extension__ ({ \
-  float16x4_t __s0_432 = __p0_432; \
-  float16x8_t __ret_432; \
-  __ret_432 = splatq_lane_f16(__s0_432, __p1_432); \
-  __ret_432; \
-})
-#else
-#define vdupq_lane_f16(__p0_433, __p1_433) __extension__ ({ \
-  float16x4_t __s0_433 = __p0_433; \
-  float16x4_t __rev0_433;  __rev0_433 = __builtin_shufflevector(__s0_433, __s0_433, 3, 2, 1, 0); \
-  float16x8_t __ret_433; \
-  __ret_433 = __noswap_splatq_lane_f16(__rev0_433, __p1_433); \
-  __ret_433 = __builtin_shufflevector(__ret_433, __ret_433, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_433; \
-})
-#endif
-
-#define vdup_lane_f64(__p0_434, __p1_434) __extension__ ({ \
-  float64x1_t __s0_434 = __p0_434; \
-  float64x1_t __ret_434; \
-  __ret_434 = splat_lane_f64(__s0_434, __p1_434); \
-  __ret_434; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0_435, __p1_435) __extension__ ({ \
-  float16x4_t __s0_435 = __p0_435; \
-  float16x4_t __ret_435; \
-  __ret_435 = splat_lane_f16(__s0_435, __p1_435); \
-  __ret_435; \
-})
-#else
-#define vdup_lane_f16(__p0_436, __p1_436) __extension__ ({ \
-  float16x4_t __s0_436 = __p0_436; \
-  float16x4_t __rev0_436;  __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 3, 2, 1, 0); \
-  float16x4_t __ret_436; \
-  __ret_436 = __noswap_splat_lane_f16(__rev0_436, __p1_436); \
-  __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 3, 2, 1, 0); \
-  __ret_436; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p8(__p0_437, __p1_437) __extension__ ({ \
-  poly8x16_t __s0_437 = __p0_437; \
-  poly8x8_t __ret_437; \
-  __ret_437 = splat_laneq_p8(__s0_437, __p1_437); \
-  __ret_437; \
-})
-#else
-#define vdup_laneq_p8(__p0_438, __p1_438) __extension__ ({ \
-  poly8x16_t __s0_438 = __p0_438; \
-  poly8x16_t __rev0_438;  __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_438; \
-  __ret_438 = __noswap_splat_laneq_p8(__rev0_438, __p1_438); \
-  __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_438; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p64(__p0_439, __p1_439) __extension__ ({ \
-  poly64x2_t __s0_439 = __p0_439; \
-  poly64x1_t __ret_439; \
-  __ret_439 = splat_laneq_p64(__s0_439, __p1_439); \
-  __ret_439; \
-})
-#else
-#define vdup_laneq_p64(__p0_440, __p1_440) __extension__ ({ \
-  poly64x2_t __s0_440 = __p0_440; \
-  poly64x2_t __rev0_440;  __rev0_440 = __builtin_shufflevector(__s0_440, __s0_440, 1, 0); \
-  poly64x1_t __ret_440; \
-  __ret_440 = __noswap_splat_laneq_p64(__rev0_440, __p1_440); \
-  __ret_440; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p16(__p0_441, __p1_441) __extension__ ({ \
-  poly16x8_t __s0_441 = __p0_441; \
-  poly16x4_t __ret_441; \
-  __ret_441 = splat_laneq_p16(__s0_441, __p1_441); \
-  __ret_441; \
-})
-#else
-#define vdup_laneq_p16(__p0_442, __p1_442) __extension__ ({ \
-  poly16x8_t __s0_442 = __p0_442; \
-  poly16x8_t __rev0_442;  __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_442; \
-  __ret_442 = __noswap_splat_laneq_p16(__rev0_442, __p1_442); \
-  __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \
-  __ret_442; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p8(__p0_443, __p1_443) __extension__ ({ \
-  poly8x16_t __s0_443 = __p0_443; \
-  poly8x16_t __ret_443; \
-  __ret_443 = splatq_laneq_p8(__s0_443, __p1_443); \
-  __ret_443; \
-})
-#else
-#define vdupq_laneq_p8(__p0_444, __p1_444) __extension__ ({ \
-  poly8x16_t __s0_444 = __p0_444; \
-  poly8x16_t __rev0_444;  __rev0_444 = __builtin_shufflevector(__s0_444, __s0_444, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_444; \
-  __ret_444 = __noswap_splatq_laneq_p8(__rev0_444, __p1_444); \
-  __ret_444 = __builtin_shufflevector(__ret_444, __ret_444, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_444; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p64(__p0_445, __p1_445) __extension__ ({ \
-  poly64x2_t __s0_445 = __p0_445; \
-  poly64x2_t __ret_445; \
-  __ret_445 = splatq_laneq_p64(__s0_445, __p1_445); \
-  __ret_445; \
-})
-#else
-#define vdupq_laneq_p64(__p0_446, __p1_446) __extension__ ({ \
-  poly64x2_t __s0_446 = __p0_446; \
-  poly64x2_t __rev0_446;  __rev0_446 = __builtin_shufflevector(__s0_446, __s0_446, 1, 0); \
-  poly64x2_t __ret_446; \
-  __ret_446 = __noswap_splatq_laneq_p64(__rev0_446, __p1_446); \
-  __ret_446 = __builtin_shufflevector(__ret_446, __ret_446, 1, 0); \
-  __ret_446; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p16(__p0_447, __p1_447) __extension__ ({ \
-  poly16x8_t __s0_447 = __p0_447; \
-  poly16x8_t __ret_447; \
-  __ret_447 = splatq_laneq_p16(__s0_447, __p1_447); \
-  __ret_447; \
-})
-#else
-#define vdupq_laneq_p16(__p0_448, __p1_448) __extension__ ({ \
-  poly16x8_t __s0_448 = __p0_448; \
-  poly16x8_t __rev0_448;  __rev0_448 = __builtin_shufflevector(__s0_448, __s0_448, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_448; \
-  __ret_448 = __noswap_splatq_laneq_p16(__rev0_448, __p1_448); \
-  __ret_448 = __builtin_shufflevector(__ret_448, __ret_448, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_448; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u8(__p0_449, __p1_449) __extension__ ({ \
-  uint8x16_t __s0_449 = __p0_449; \
-  uint8x16_t __ret_449; \
-  __ret_449 = splatq_laneq_u8(__s0_449, __p1_449); \
-  __ret_449; \
-})
-#else
-#define vdupq_laneq_u8(__p0_450, __p1_450) __extension__ ({ \
-  uint8x16_t __s0_450 = __p0_450; \
-  uint8x16_t __rev0_450;  __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_450; \
-  __ret_450 = __noswap_splatq_laneq_u8(__rev0_450, __p1_450); \
-  __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_450; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u32(__p0_451, __p1_451) __extension__ ({ \
-  uint32x4_t __s0_451 = __p0_451; \
-  uint32x4_t __ret_451; \
-  __ret_451 = splatq_laneq_u32(__s0_451, __p1_451); \
-  __ret_451; \
-})
-#else
-#define vdupq_laneq_u32(__p0_452, __p1_452) __extension__ ({ \
-  uint32x4_t __s0_452 = __p0_452; \
-  uint32x4_t __rev0_452;  __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 3, 2, 1, 0); \
-  uint32x4_t __ret_452; \
-  __ret_452 = __noswap_splatq_laneq_u32(__rev0_452, __p1_452); \
-  __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 3, 2, 1, 0); \
-  __ret_452; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u64(__p0_453, __p1_453) __extension__ ({ \
-  uint64x2_t __s0_453 = __p0_453; \
-  uint64x2_t __ret_453; \
-  __ret_453 = splatq_laneq_u64(__s0_453, __p1_453); \
-  __ret_453; \
-})
-#else
-#define vdupq_laneq_u64(__p0_454, __p1_454) __extension__ ({ \
-  uint64x2_t __s0_454 = __p0_454; \
-  uint64x2_t __rev0_454;  __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 1, 0); \
-  uint64x2_t __ret_454; \
-  __ret_454 = __noswap_splatq_laneq_u64(__rev0_454, __p1_454); \
-  __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 1, 0); \
-  __ret_454; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u16(__p0_455, __p1_455) __extension__ ({ \
-  uint16x8_t __s0_455 = __p0_455; \
-  uint16x8_t __ret_455; \
-  __ret_455 = splatq_laneq_u16(__s0_455, __p1_455); \
-  __ret_455; \
-})
-#else
-#define vdupq_laneq_u16(__p0_456, __p1_456) __extension__ ({ \
-  uint16x8_t __s0_456 = __p0_456; \
-  uint16x8_t __rev0_456;  __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_456; \
-  __ret_456 = __noswap_splatq_laneq_u16(__rev0_456, __p1_456); \
-  __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_456; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s8(__p0_457, __p1_457) __extension__ ({ \
-  int8x16_t __s0_457 = __p0_457; \
-  int8x16_t __ret_457; \
-  __ret_457 = splatq_laneq_s8(__s0_457, __p1_457); \
-  __ret_457; \
-})
-#else
-#define vdupq_laneq_s8(__p0_458, __p1_458) __extension__ ({ \
-  int8x16_t __s0_458 = __p0_458; \
-  int8x16_t __rev0_458;  __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_458; \
-  __ret_458 = __noswap_splatq_laneq_s8(__rev0_458, __p1_458); \
-  __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_458; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f64(__p0_459, __p1_459) __extension__ ({ \
-  float64x2_t __s0_459 = __p0_459; \
-  float64x2_t __ret_459; \
-  __ret_459 = splatq_laneq_f64(__s0_459, __p1_459); \
-  __ret_459; \
-})
-#else
-#define vdupq_laneq_f64(__p0_460, __p1_460) __extension__ ({ \
-  float64x2_t __s0_460 = __p0_460; \
-  float64x2_t __rev0_460;  __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 1, 0); \
-  float64x2_t __ret_460; \
-  __ret_460 = __noswap_splatq_laneq_f64(__rev0_460, __p1_460); \
-  __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 1, 0); \
-  __ret_460; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f32(__p0_461, __p1_461) __extension__ ({ \
-  float32x4_t __s0_461 = __p0_461; \
-  float32x4_t __ret_461; \
-  __ret_461 = splatq_laneq_f32(__s0_461, __p1_461); \
-  __ret_461; \
-})
-#else
-#define vdupq_laneq_f32(__p0_462, __p1_462) __extension__ ({ \
-  float32x4_t __s0_462 = __p0_462; \
-  float32x4_t __rev0_462;  __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 3, 2, 1, 0); \
-  float32x4_t __ret_462; \
-  __ret_462 = __noswap_splatq_laneq_f32(__rev0_462, __p1_462); \
-  __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 3, 2, 1, 0); \
-  __ret_462; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f16(__p0_463, __p1_463) __extension__ ({ \
-  float16x8_t __s0_463 = __p0_463; \
-  float16x8_t __ret_463; \
-  __ret_463 = splatq_laneq_f16(__s0_463, __p1_463); \
-  __ret_463; \
-})
-#else
-#define vdupq_laneq_f16(__p0_464, __p1_464) __extension__ ({ \
-  float16x8_t __s0_464 = __p0_464; \
-  float16x8_t __rev0_464;  __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_464; \
-  __ret_464 = __noswap_splatq_laneq_f16(__rev0_464, __p1_464); \
-  __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_464; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s32(__p0_465, __p1_465) __extension__ ({ \
-  int32x4_t __s0_465 = __p0_465; \
-  int32x4_t __ret_465; \
-  __ret_465 = splatq_laneq_s32(__s0_465, __p1_465); \
-  __ret_465; \
-})
-#else
-#define vdupq_laneq_s32(__p0_466, __p1_466) __extension__ ({ \
-  int32x4_t __s0_466 = __p0_466; \
-  int32x4_t __rev0_466;  __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 3, 2, 1, 0); \
-  int32x4_t __ret_466; \
-  __ret_466 = __noswap_splatq_laneq_s32(__rev0_466, __p1_466); \
-  __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 3, 2, 1, 0); \
-  __ret_466; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s64(__p0_467, __p1_467) __extension__ ({ \
-  int64x2_t __s0_467 = __p0_467; \
-  int64x2_t __ret_467; \
-  __ret_467 = splatq_laneq_s64(__s0_467, __p1_467); \
-  __ret_467; \
-})
-#else
-#define vdupq_laneq_s64(__p0_468, __p1_468) __extension__ ({ \
-  int64x2_t __s0_468 = __p0_468; \
-  int64x2_t __rev0_468;  __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 1, 0); \
-  int64x2_t __ret_468; \
-  __ret_468 = __noswap_splatq_laneq_s64(__rev0_468, __p1_468); \
-  __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 1, 0); \
-  __ret_468; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s16(__p0_469, __p1_469) __extension__ ({ \
-  int16x8_t __s0_469 = __p0_469; \
-  int16x8_t __ret_469; \
-  __ret_469 = splatq_laneq_s16(__s0_469, __p1_469); \
-  __ret_469; \
-})
-#else
-#define vdupq_laneq_s16(__p0_470, __p1_470) __extension__ ({ \
-  int16x8_t __s0_470 = __p0_470; \
-  int16x8_t __rev0_470;  __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_470; \
-  __ret_470 = __noswap_splatq_laneq_s16(__rev0_470, __p1_470); \
-  __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_470; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u8(__p0_471, __p1_471) __extension__ ({ \
-  uint8x16_t __s0_471 = __p0_471; \
-  uint8x8_t __ret_471; \
-  __ret_471 = splat_laneq_u8(__s0_471, __p1_471); \
-  __ret_471; \
-})
-#else
-#define vdup_laneq_u8(__p0_472, __p1_472) __extension__ ({ \
-  uint8x16_t __s0_472 = __p0_472; \
-  uint8x16_t __rev0_472;  __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_472; \
-  __ret_472 = __noswap_splat_laneq_u8(__rev0_472, __p1_472); \
-  __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_472; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u32(__p0_473, __p1_473) __extension__ ({ \
-  uint32x4_t __s0_473 = __p0_473; \
-  uint32x2_t __ret_473; \
-  __ret_473 = splat_laneq_u32(__s0_473, __p1_473); \
-  __ret_473; \
-})
-#else
-#define vdup_laneq_u32(__p0_474, __p1_474) __extension__ ({ \
-  uint32x4_t __s0_474 = __p0_474; \
-  uint32x4_t __rev0_474;  __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 3, 2, 1, 0); \
-  uint32x2_t __ret_474; \
-  __ret_474 = __noswap_splat_laneq_u32(__rev0_474, __p1_474); \
-  __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 1, 0); \
-  __ret_474; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u64(__p0_475, __p1_475) __extension__ ({ \
-  uint64x2_t __s0_475 = __p0_475; \
-  uint64x1_t __ret_475; \
-  __ret_475 = splat_laneq_u64(__s0_475, __p1_475); \
-  __ret_475; \
-})
-#else
-#define vdup_laneq_u64(__p0_476, __p1_476) __extension__ ({ \
-  uint64x2_t __s0_476 = __p0_476; \
-  uint64x2_t __rev0_476;  __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 1, 0); \
-  uint64x1_t __ret_476; \
-  __ret_476 = __noswap_splat_laneq_u64(__rev0_476, __p1_476); \
-  __ret_476; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u16(__p0_477, __p1_477) __extension__ ({ \
-  uint16x8_t __s0_477 = __p0_477; \
-  uint16x4_t __ret_477; \
-  __ret_477 = splat_laneq_u16(__s0_477, __p1_477); \
-  __ret_477; \
-})
-#else
-#define vdup_laneq_u16(__p0_478, __p1_478) __extension__ ({ \
-  uint16x8_t __s0_478 = __p0_478; \
-  uint16x8_t __rev0_478;  __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_478; \
-  __ret_478 = __noswap_splat_laneq_u16(__rev0_478, __p1_478); \
-  __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 3, 2, 1, 0); \
-  __ret_478; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s8(__p0_479, __p1_479) __extension__ ({ \
-  int8x16_t __s0_479 = __p0_479; \
-  int8x8_t __ret_479; \
-  __ret_479 = splat_laneq_s8(__s0_479, __p1_479); \
-  __ret_479; \
-})
-#else
-#define vdup_laneq_s8(__p0_480, __p1_480) __extension__ ({ \
-  int8x16_t __s0_480 = __p0_480; \
-  int8x16_t __rev0_480;  __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_480; \
-  __ret_480 = __noswap_splat_laneq_s8(__rev0_480, __p1_480); \
-  __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_480; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f64(__p0_481, __p1_481) __extension__ ({ \
-  float64x2_t __s0_481 = __p0_481; \
-  float64x1_t __ret_481; \
-  __ret_481 = splat_laneq_f64(__s0_481, __p1_481); \
-  __ret_481; \
-})
-#else
-#define vdup_laneq_f64(__p0_482, __p1_482) __extension__ ({ \
-  float64x2_t __s0_482 = __p0_482; \
-  float64x2_t __rev0_482;  __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 1, 0); \
-  float64x1_t __ret_482; \
-  __ret_482 = __noswap_splat_laneq_f64(__rev0_482, __p1_482); \
-  __ret_482; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f32(__p0_483, __p1_483) __extension__ ({ \
-  float32x4_t __s0_483 = __p0_483; \
-  float32x2_t __ret_483; \
-  __ret_483 = splat_laneq_f32(__s0_483, __p1_483); \
-  __ret_483; \
-})
-#else
-#define vdup_laneq_f32(__p0_484, __p1_484) __extension__ ({ \
-  float32x4_t __s0_484 = __p0_484; \
-  float32x4_t __rev0_484;  __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 3, 2, 1, 0); \
-  float32x2_t __ret_484; \
-  __ret_484 = __noswap_splat_laneq_f32(__rev0_484, __p1_484); \
-  __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 1, 0); \
-  __ret_484; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f16(__p0_485, __p1_485) __extension__ ({ \
-  float16x8_t __s0_485 = __p0_485; \
-  float16x4_t __ret_485; \
-  __ret_485 = splat_laneq_f16(__s0_485, __p1_485); \
-  __ret_485; \
-})
-#else
-#define vdup_laneq_f16(__p0_486, __p1_486) __extension__ ({ \
-  float16x8_t __s0_486 = __p0_486; \
-  float16x8_t __rev0_486;  __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_486; \
-  __ret_486 = __noswap_splat_laneq_f16(__rev0_486, __p1_486); \
-  __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 3, 2, 1, 0); \
-  __ret_486; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s32(__p0_487, __p1_487) __extension__ ({ \
-  int32x4_t __s0_487 = __p0_487; \
-  int32x2_t __ret_487; \
-  __ret_487 = splat_laneq_s32(__s0_487, __p1_487); \
-  __ret_487; \
-})
-#else
-#define vdup_laneq_s32(__p0_488, __p1_488) __extension__ ({ \
-  int32x4_t __s0_488 = __p0_488; \
-  int32x4_t __rev0_488;  __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 3, 2, 1, 0); \
-  int32x2_t __ret_488; \
-  __ret_488 = __noswap_splat_laneq_s32(__rev0_488, __p1_488); \
-  __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 1, 0); \
-  __ret_488; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s64(__p0_489, __p1_489) __extension__ ({ \
-  int64x2_t __s0_489 = __p0_489; \
-  int64x1_t __ret_489; \
-  __ret_489 = splat_laneq_s64(__s0_489, __p1_489); \
-  __ret_489; \
-})
-#else
-#define vdup_laneq_s64(__p0_490, __p1_490) __extension__ ({ \
-  int64x2_t __s0_490 = __p0_490; \
-  int64x2_t __rev0_490;  __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 1, 0); \
-  int64x1_t __ret_490; \
-  __ret_490 = __noswap_splat_laneq_s64(__rev0_490, __p1_490); \
-  __ret_490; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s16(__p0_491, __p1_491) __extension__ ({ \
-  int16x8_t __s0_491 = __p0_491; \
-  int16x4_t __ret_491; \
-  __ret_491 = splat_laneq_s16(__s0_491, __p1_491); \
-  __ret_491; \
-})
-#else
-#define vdup_laneq_s16(__p0_492, __p1_492) __extension__ ({ \
-  int16x8_t __s0_492 = __p0_492; \
-  int16x8_t __rev0_492;  __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_492; \
-  __ret_492 = __noswap_splat_laneq_s16(__rev0_492, __p1_492); \
-  __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 3, 2, 1, 0); \
-  __ret_492; \
-})
-#endif
-
-__ai poly64x1_t vdup_n_p64(poly64_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vdupq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float64x2_t vdupq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vdup_n_f64(float64_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) {__p0};
-  return __ret;
-}
-#define vext_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \
-  __ret; \
-})
-#else
-#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
-  __ret; \
-})
-#endif
-
-#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
-  __ret; \
-})
-#else
-#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2});
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, -__p1, __p2);
-  return __ret;
-}
-#define vfmsd_lane_f64(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \
-  float64_t __s0_493 = __p0_493; \
-  float64_t __s1_493 = __p1_493; \
-  float64x1_t __s2_493 = __p2_493; \
-  float64_t __ret_493; \
-  __ret_493 = vfmad_lane_f64(__s0_493, -__s1_493, __s2_493, __p3_493); \
-  __ret_493; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_lane_f32(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \
-  float32_t __s0_494 = __p0_494; \
-  float32_t __s1_494 = __p1_494; \
-  float32x2_t __s2_494 = __p2_494; \
-  float32_t __ret_494; \
-  __ret_494 = vfmas_lane_f32(__s0_494, -__s1_494, __s2_494, __p3_494); \
-  __ret_494; \
-})
-#else
-#define vfmss_lane_f32(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \
-  float32_t __s0_495 = __p0_495; \
-  float32_t __s1_495 = __p1_495; \
-  float32x2_t __s2_495 = __p2_495; \
-  float32x2_t __rev2_495;  __rev2_495 = __builtin_shufflevector(__s2_495, __s2_495, 1, 0); \
-  float32_t __ret_495; \
-  __ret_495 = __noswap_vfmas_lane_f32(__s0_495, -__s1_495, __rev2_495, __p3_495); \
-  __ret_495; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f64(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \
-  float64x2_t __s0_496 = __p0_496; \
-  float64x2_t __s1_496 = __p1_496; \
-  float64x1_t __s2_496 = __p2_496; \
-  float64x2_t __ret_496; \
-  __ret_496 = vfmaq_lane_f64(__s0_496, -__s1_496, __s2_496, __p3_496); \
-  __ret_496; \
-})
-#else
-#define vfmsq_lane_f64(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \
-  float64x2_t __s0_497 = __p0_497; \
-  float64x2_t __s1_497 = __p1_497; \
-  float64x1_t __s2_497 = __p2_497; \
-  float64x2_t __rev0_497;  __rev0_497 = __builtin_shufflevector(__s0_497, __s0_497, 1, 0); \
-  float64x2_t __rev1_497;  __rev1_497 = __builtin_shufflevector(__s1_497, __s1_497, 1, 0); \
-  float64x2_t __ret_497; \
-  __ret_497 = __noswap_vfmaq_lane_f64(__rev0_497, -__rev1_497, __s2_497, __p3_497); \
-  __ret_497 = __builtin_shufflevector(__ret_497, __ret_497, 1, 0); \
-  __ret_497; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f32(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \
-  float32x4_t __s0_498 = __p0_498; \
-  float32x4_t __s1_498 = __p1_498; \
-  float32x2_t __s2_498 = __p2_498; \
-  float32x4_t __ret_498; \
-  __ret_498 = vfmaq_lane_f32(__s0_498, -__s1_498, __s2_498, __p3_498); \
-  __ret_498; \
-})
-#else
-#define vfmsq_lane_f32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \
-  float32x4_t __s0_499 = __p0_499; \
-  float32x4_t __s1_499 = __p1_499; \
-  float32x2_t __s2_499 = __p2_499; \
-  float32x4_t __rev0_499;  __rev0_499 = __builtin_shufflevector(__s0_499, __s0_499, 3, 2, 1, 0); \
-  float32x4_t __rev1_499;  __rev1_499 = __builtin_shufflevector(__s1_499, __s1_499, 3, 2, 1, 0); \
-  float32x2_t __rev2_499;  __rev2_499 = __builtin_shufflevector(__s2_499, __s2_499, 1, 0); \
-  float32x4_t __ret_499; \
-  __ret_499 = __noswap_vfmaq_lane_f32(__rev0_499, -__rev1_499, __rev2_499, __p3_499); \
-  __ret_499 = __builtin_shufflevector(__ret_499, __ret_499, 3, 2, 1, 0); \
-  __ret_499; \
-})
-#endif
-
-#define vfms_lane_f64(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \
-  float64x1_t __s0_500 = __p0_500; \
-  float64x1_t __s1_500 = __p1_500; \
-  float64x1_t __s2_500 = __p2_500; \
-  float64x1_t __ret_500; \
-  __ret_500 = vfma_lane_f64(__s0_500, -__s1_500, __s2_500, __p3_500); \
-  __ret_500; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f32(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \
-  float32x2_t __s0_501 = __p0_501; \
-  float32x2_t __s1_501 = __p1_501; \
-  float32x2_t __s2_501 = __p2_501; \
-  float32x2_t __ret_501; \
-  __ret_501 = vfma_lane_f32(__s0_501, -__s1_501, __s2_501, __p3_501); \
-  __ret_501; \
-})
-#else
-#define vfms_lane_f32(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \
-  float32x2_t __s0_502 = __p0_502; \
-  float32x2_t __s1_502 = __p1_502; \
-  float32x2_t __s2_502 = __p2_502; \
-  float32x2_t __rev0_502;  __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 1, 0); \
-  float32x2_t __rev1_502;  __rev1_502 = __builtin_shufflevector(__s1_502, __s1_502, 1, 0); \
-  float32x2_t __rev2_502;  __rev2_502 = __builtin_shufflevector(__s2_502, __s2_502, 1, 0); \
-  float32x2_t __ret_502; \
-  __ret_502 = __noswap_vfma_lane_f32(__rev0_502, -__rev1_502, __rev2_502, __p3_502); \
-  __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 1, 0); \
-  __ret_502; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsd_laneq_f64(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \
-  float64_t __s0_503 = __p0_503; \
-  float64_t __s1_503 = __p1_503; \
-  float64x2_t __s2_503 = __p2_503; \
-  float64_t __ret_503; \
-  __ret_503 = vfmad_laneq_f64(__s0_503, -__s1_503, __s2_503, __p3_503); \
-  __ret_503; \
-})
-#else
-#define vfmsd_laneq_f64(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \
-  float64_t __s0_504 = __p0_504; \
-  float64_t __s1_504 = __p1_504; \
-  float64x2_t __s2_504 = __p2_504; \
-  float64x2_t __rev2_504;  __rev2_504 = __builtin_shufflevector(__s2_504, __s2_504, 1, 0); \
-  float64_t __ret_504; \
-  __ret_504 = __noswap_vfmad_laneq_f64(__s0_504, -__s1_504, __rev2_504, __p3_504); \
-  __ret_504; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_laneq_f32(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \
-  float32_t __s0_505 = __p0_505; \
-  float32_t __s1_505 = __p1_505; \
-  float32x4_t __s2_505 = __p2_505; \
-  float32_t __ret_505; \
-  __ret_505 = vfmas_laneq_f32(__s0_505, -__s1_505, __s2_505, __p3_505); \
-  __ret_505; \
-})
-#else
-#define vfmss_laneq_f32(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \
-  float32_t __s0_506 = __p0_506; \
-  float32_t __s1_506 = __p1_506; \
-  float32x4_t __s2_506 = __p2_506; \
-  float32x4_t __rev2_506;  __rev2_506 = __builtin_shufflevector(__s2_506, __s2_506, 3, 2, 1, 0); \
-  float32_t __ret_506; \
-  __ret_506 = __noswap_vfmas_laneq_f32(__s0_506, -__s1_506, __rev2_506, __p3_506); \
-  __ret_506; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f64(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \
-  float64x2_t __s0_507 = __p0_507; \
-  float64x2_t __s1_507 = __p1_507; \
-  float64x2_t __s2_507 = __p2_507; \
-  float64x2_t __ret_507; \
-  __ret_507 = vfmaq_laneq_f64(__s0_507, -__s1_507, __s2_507, __p3_507); \
-  __ret_507; \
-})
-#else
-#define vfmsq_laneq_f64(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \
-  float64x2_t __s0_508 = __p0_508; \
-  float64x2_t __s1_508 = __p1_508; \
-  float64x2_t __s2_508 = __p2_508; \
-  float64x2_t __rev0_508;  __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 1, 0); \
-  float64x2_t __rev1_508;  __rev1_508 = __builtin_shufflevector(__s1_508, __s1_508, 1, 0); \
-  float64x2_t __rev2_508;  __rev2_508 = __builtin_shufflevector(__s2_508, __s2_508, 1, 0); \
-  float64x2_t __ret_508; \
-  __ret_508 = __noswap_vfmaq_laneq_f64(__rev0_508, -__rev1_508, __rev2_508, __p3_508); \
-  __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 1, 0); \
-  __ret_508; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f32(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \
-  float32x4_t __s0_509 = __p0_509; \
-  float32x4_t __s1_509 = __p1_509; \
-  float32x4_t __s2_509 = __p2_509; \
-  float32x4_t __ret_509; \
-  __ret_509 = vfmaq_laneq_f32(__s0_509, -__s1_509, __s2_509, __p3_509); \
-  __ret_509; \
-})
-#else
-#define vfmsq_laneq_f32(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \
-  float32x4_t __s0_510 = __p0_510; \
-  float32x4_t __s1_510 = __p1_510; \
-  float32x4_t __s2_510 = __p2_510; \
-  float32x4_t __rev0_510;  __rev0_510 = __builtin_shufflevector(__s0_510, __s0_510, 3, 2, 1, 0); \
-  float32x4_t __rev1_510;  __rev1_510 = __builtin_shufflevector(__s1_510, __s1_510, 3, 2, 1, 0); \
-  float32x4_t __rev2_510;  __rev2_510 = __builtin_shufflevector(__s2_510, __s2_510, 3, 2, 1, 0); \
-  float32x4_t __ret_510; \
-  __ret_510 = __noswap_vfmaq_laneq_f32(__rev0_510, -__rev1_510, __rev2_510, __p3_510); \
-  __ret_510 = __builtin_shufflevector(__ret_510, __ret_510, 3, 2, 1, 0); \
-  __ret_510; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f64(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \
-  float64x1_t __s0_511 = __p0_511; \
-  float64x1_t __s1_511 = __p1_511; \
-  float64x2_t __s2_511 = __p2_511; \
-  float64x1_t __ret_511; \
-  __ret_511 = vfma_laneq_f64(__s0_511, -__s1_511, __s2_511, __p3_511); \
-  __ret_511; \
-})
-#else
-#define vfms_laneq_f64(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \
-  float64x1_t __s0_512 = __p0_512; \
-  float64x1_t __s1_512 = __p1_512; \
-  float64x2_t __s2_512 = __p2_512; \
-  float64x2_t __rev2_512;  __rev2_512 = __builtin_shufflevector(__s2_512, __s2_512, 1, 0); \
-  float64x1_t __ret_512; \
-  __ret_512 = __noswap_vfma_laneq_f64(__s0_512, -__s1_512, __rev2_512, __p3_512); \
-  __ret_512; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f32(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \
-  float32x2_t __s0_513 = __p0_513; \
-  float32x2_t __s1_513 = __p1_513; \
-  float32x4_t __s2_513 = __p2_513; \
-  float32x2_t __ret_513; \
-  __ret_513 = vfma_laneq_f32(__s0_513, -__s1_513, __s2_513, __p3_513); \
-  __ret_513; \
-})
-#else
-#define vfms_laneq_f32(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \
-  float32x2_t __s0_514 = __p0_514; \
-  float32x2_t __s1_514 = __p1_514; \
-  float32x4_t __s2_514 = __p2_514; \
-  float32x2_t __rev0_514;  __rev0_514 = __builtin_shufflevector(__s0_514, __s0_514, 1, 0); \
-  float32x2_t __rev1_514;  __rev1_514 = __builtin_shufflevector(__s1_514, __s1_514, 1, 0); \
-  float32x4_t __rev2_514;  __rev2_514 = __builtin_shufflevector(__s2_514, __s2_514, 3, 2, 1, 0); \
-  float32x2_t __ret_514; \
-  __ret_514 = __noswap_vfma_laneq_f32(__rev0_514, -__rev1_514, __rev2_514, __p3_514); \
-  __ret_514 = __builtin_shufflevector(__ret_514, __ret_514, 1, 0); \
-  __ret_514; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2});
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x1_t vget_high_f64(float64x2_t __p0) {
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai float64x1_t vget_high_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#define vget_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x1_t vget_low_f64(float64x2_t __p0) {
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai float64x1_t vget_low_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#define vld1_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
-  __ret; \
-})
-#define vld1_dup_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
-  __ret; \
-})
-#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#define vld1_p64_x2(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x2(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x2(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x2(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x2(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x2(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld1_p64_x3(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x3(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x3(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x3(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x3(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x3(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld1_p64_x4(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x4(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x4(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x4(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x4(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x4(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_p64(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld2q_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld2q_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld2q_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld2q_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_f64(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_dup_p64(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_f64(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
-  __ret; \
-})
-#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
-  __ret; \
-})
-#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
-  __ret; \
-})
-#define vld3_p64(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld3q_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld3q_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld3q_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld3q_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_f64(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld3_dup_p64(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_f64(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
-  __ret; \
-})
-#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
-  __ret; \
-})
-#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
-  __ret; \
-})
-#define vld4_p64(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld4q_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld4q_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld4q_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld4q_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_f64(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld4_dup_p64(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_f64(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
-  __ret; \
-})
-#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
-  __ret; \
-})
-#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
-  __ret; \
-})
-#define vldrq_p128(__p0) __extension__ ({ \
-  poly128_t __ret; \
-  __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vmaxvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vmaxvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vmaxvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vmaxvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vmaxvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vmaxvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vmaxvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vmaxvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vmaxv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vmaxv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vmaxv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vmaxv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vmaxv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vmaxv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vminnmvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vminnmvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminnmvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminnmvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminnmv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminnmv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vminvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vminvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vminvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vminvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vminvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vminvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vminvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vminvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vminvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vminvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vminvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vminvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vminvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vminvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vminv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vminv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vminv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vminv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vminv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vminv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vminv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vminv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vminv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vminv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vminv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vminv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u32(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \
-  uint32x4_t __s0_515 = __p0_515; \
-  uint32x4_t __s1_515 = __p1_515; \
-  uint32x4_t __s2_515 = __p2_515; \
-  uint32x4_t __ret_515; \
-  __ret_515 = __s0_515 + __s1_515 * splatq_laneq_u32(__s2_515, __p3_515); \
-  __ret_515; \
-})
-#else
-#define vmlaq_laneq_u32(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \
-  uint32x4_t __s0_516 = __p0_516; \
-  uint32x4_t __s1_516 = __p1_516; \
-  uint32x4_t __s2_516 = __p2_516; \
-  uint32x4_t __rev0_516;  __rev0_516 = __builtin_shufflevector(__s0_516, __s0_516, 3, 2, 1, 0); \
-  uint32x4_t __rev1_516;  __rev1_516 = __builtin_shufflevector(__s1_516, __s1_516, 3, 2, 1, 0); \
-  uint32x4_t __rev2_516;  __rev2_516 = __builtin_shufflevector(__s2_516, __s2_516, 3, 2, 1, 0); \
-  uint32x4_t __ret_516; \
-  __ret_516 = __rev0_516 + __rev1_516 * __noswap_splatq_laneq_u32(__rev2_516, __p3_516); \
-  __ret_516 = __builtin_shufflevector(__ret_516, __ret_516, 3, 2, 1, 0); \
-  __ret_516; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u16(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \
-  uint16x8_t __s0_517 = __p0_517; \
-  uint16x8_t __s1_517 = __p1_517; \
-  uint16x8_t __s2_517 = __p2_517; \
-  uint16x8_t __ret_517; \
-  __ret_517 = __s0_517 + __s1_517 * splatq_laneq_u16(__s2_517, __p3_517); \
-  __ret_517; \
-})
-#else
-#define vmlaq_laneq_u16(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \
-  uint16x8_t __s0_518 = __p0_518; \
-  uint16x8_t __s1_518 = __p1_518; \
-  uint16x8_t __s2_518 = __p2_518; \
-  uint16x8_t __rev0_518;  __rev0_518 = __builtin_shufflevector(__s0_518, __s0_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_518;  __rev1_518 = __builtin_shufflevector(__s1_518, __s1_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_518;  __rev2_518 = __builtin_shufflevector(__s2_518, __s2_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_518; \
-  __ret_518 = __rev0_518 + __rev1_518 * __noswap_splatq_laneq_u16(__rev2_518, __p3_518); \
-  __ret_518 = __builtin_shufflevector(__ret_518, __ret_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_518; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_f32(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \
-  float32x4_t __s0_519 = __p0_519; \
-  float32x4_t __s1_519 = __p1_519; \
-  float32x4_t __s2_519 = __p2_519; \
-  float32x4_t __ret_519; \
-  __ret_519 = __s0_519 + __s1_519 * splatq_laneq_f32(__s2_519, __p3_519); \
-  __ret_519; \
-})
-#else
-#define vmlaq_laneq_f32(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \
-  float32x4_t __s0_520 = __p0_520; \
-  float32x4_t __s1_520 = __p1_520; \
-  float32x4_t __s2_520 = __p2_520; \
-  float32x4_t __rev0_520;  __rev0_520 = __builtin_shufflevector(__s0_520, __s0_520, 3, 2, 1, 0); \
-  float32x4_t __rev1_520;  __rev1_520 = __builtin_shufflevector(__s1_520, __s1_520, 3, 2, 1, 0); \
-  float32x4_t __rev2_520;  __rev2_520 = __builtin_shufflevector(__s2_520, __s2_520, 3, 2, 1, 0); \
-  float32x4_t __ret_520; \
-  __ret_520 = __rev0_520 + __rev1_520 * __noswap_splatq_laneq_f32(__rev2_520, __p3_520); \
-  __ret_520 = __builtin_shufflevector(__ret_520, __ret_520, 3, 2, 1, 0); \
-  __ret_520; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \
-  int32x4_t __s0_521 = __p0_521; \
-  int32x4_t __s1_521 = __p1_521; \
-  int32x4_t __s2_521 = __p2_521; \
-  int32x4_t __ret_521; \
-  __ret_521 = __s0_521 + __s1_521 * splatq_laneq_s32(__s2_521, __p3_521); \
-  __ret_521; \
-})
-#else
-#define vmlaq_laneq_s32(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \
-  int32x4_t __s0_522 = __p0_522; \
-  int32x4_t __s1_522 = __p1_522; \
-  int32x4_t __s2_522 = __p2_522; \
-  int32x4_t __rev0_522;  __rev0_522 = __builtin_shufflevector(__s0_522, __s0_522, 3, 2, 1, 0); \
-  int32x4_t __rev1_522;  __rev1_522 = __builtin_shufflevector(__s1_522, __s1_522, 3, 2, 1, 0); \
-  int32x4_t __rev2_522;  __rev2_522 = __builtin_shufflevector(__s2_522, __s2_522, 3, 2, 1, 0); \
-  int32x4_t __ret_522; \
-  __ret_522 = __rev0_522 + __rev1_522 * __noswap_splatq_laneq_s32(__rev2_522, __p3_522); \
-  __ret_522 = __builtin_shufflevector(__ret_522, __ret_522, 3, 2, 1, 0); \
-  __ret_522; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s16(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \
-  int16x8_t __s0_523 = __p0_523; \
-  int16x8_t __s1_523 = __p1_523; \
-  int16x8_t __s2_523 = __p2_523; \
-  int16x8_t __ret_523; \
-  __ret_523 = __s0_523 + __s1_523 * splatq_laneq_s16(__s2_523, __p3_523); \
-  __ret_523; \
-})
-#else
-#define vmlaq_laneq_s16(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \
-  int16x8_t __s0_524 = __p0_524; \
-  int16x8_t __s1_524 = __p1_524; \
-  int16x8_t __s2_524 = __p2_524; \
-  int16x8_t __rev0_524;  __rev0_524 = __builtin_shufflevector(__s0_524, __s0_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_524;  __rev1_524 = __builtin_shufflevector(__s1_524, __s1_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_524;  __rev2_524 = __builtin_shufflevector(__s2_524, __s2_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_524; \
-  __ret_524 = __rev0_524 + __rev1_524 * __noswap_splatq_laneq_s16(__rev2_524, __p3_524); \
-  __ret_524 = __builtin_shufflevector(__ret_524, __ret_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_524; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \
-  uint32x2_t __s0_525 = __p0_525; \
-  uint32x2_t __s1_525 = __p1_525; \
-  uint32x4_t __s2_525 = __p2_525; \
-  uint32x2_t __ret_525; \
-  __ret_525 = __s0_525 + __s1_525 * splat_laneq_u32(__s2_525, __p3_525); \
-  __ret_525; \
-})
-#else
-#define vmla_laneq_u32(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \
-  uint32x2_t __s0_526 = __p0_526; \
-  uint32x2_t __s1_526 = __p1_526; \
-  uint32x4_t __s2_526 = __p2_526; \
-  uint32x2_t __rev0_526;  __rev0_526 = __builtin_shufflevector(__s0_526, __s0_526, 1, 0); \
-  uint32x2_t __rev1_526;  __rev1_526 = __builtin_shufflevector(__s1_526, __s1_526, 1, 0); \
-  uint32x4_t __rev2_526;  __rev2_526 = __builtin_shufflevector(__s2_526, __s2_526, 3, 2, 1, 0); \
-  uint32x2_t __ret_526; \
-  __ret_526 = __rev0_526 + __rev1_526 * __noswap_splat_laneq_u32(__rev2_526, __p3_526); \
-  __ret_526 = __builtin_shufflevector(__ret_526, __ret_526, 1, 0); \
-  __ret_526; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u16(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \
-  uint16x4_t __s0_527 = __p0_527; \
-  uint16x4_t __s1_527 = __p1_527; \
-  uint16x8_t __s2_527 = __p2_527; \
-  uint16x4_t __ret_527; \
-  __ret_527 = __s0_527 + __s1_527 * splat_laneq_u16(__s2_527, __p3_527); \
-  __ret_527; \
-})
-#else
-#define vmla_laneq_u16(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \
-  uint16x4_t __s0_528 = __p0_528; \
-  uint16x4_t __s1_528 = __p1_528; \
-  uint16x8_t __s2_528 = __p2_528; \
-  uint16x4_t __rev0_528;  __rev0_528 = __builtin_shufflevector(__s0_528, __s0_528, 3, 2, 1, 0); \
-  uint16x4_t __rev1_528;  __rev1_528 = __builtin_shufflevector(__s1_528, __s1_528, 3, 2, 1, 0); \
-  uint16x8_t __rev2_528;  __rev2_528 = __builtin_shufflevector(__s2_528, __s2_528, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_528; \
-  __ret_528 = __rev0_528 + __rev1_528 * __noswap_splat_laneq_u16(__rev2_528, __p3_528); \
-  __ret_528 = __builtin_shufflevector(__ret_528, __ret_528, 3, 2, 1, 0); \
-  __ret_528; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_f32(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \
-  float32x2_t __s0_529 = __p0_529; \
-  float32x2_t __s1_529 = __p1_529; \
-  float32x4_t __s2_529 = __p2_529; \
-  float32x2_t __ret_529; \
-  __ret_529 = __s0_529 + __s1_529 * splat_laneq_f32(__s2_529, __p3_529); \
-  __ret_529; \
-})
-#else
-#define vmla_laneq_f32(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \
-  float32x2_t __s0_530 = __p0_530; \
-  float32x2_t __s1_530 = __p1_530; \
-  float32x4_t __s2_530 = __p2_530; \
-  float32x2_t __rev0_530;  __rev0_530 = __builtin_shufflevector(__s0_530, __s0_530, 1, 0); \
-  float32x2_t __rev1_530;  __rev1_530 = __builtin_shufflevector(__s1_530, __s1_530, 1, 0); \
-  float32x4_t __rev2_530;  __rev2_530 = __builtin_shufflevector(__s2_530, __s2_530, 3, 2, 1, 0); \
-  float32x2_t __ret_530; \
-  __ret_530 = __rev0_530 + __rev1_530 * __noswap_splat_laneq_f32(__rev2_530, __p3_530); \
-  __ret_530 = __builtin_shufflevector(__ret_530, __ret_530, 1, 0); \
-  __ret_530; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s32(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \
-  int32x2_t __s0_531 = __p0_531; \
-  int32x2_t __s1_531 = __p1_531; \
-  int32x4_t __s2_531 = __p2_531; \
-  int32x2_t __ret_531; \
-  __ret_531 = __s0_531 + __s1_531 * splat_laneq_s32(__s2_531, __p3_531); \
-  __ret_531; \
-})
-#else
-#define vmla_laneq_s32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \
-  int32x2_t __s0_532 = __p0_532; \
-  int32x2_t __s1_532 = __p1_532; \
-  int32x4_t __s2_532 = __p2_532; \
-  int32x2_t __rev0_532;  __rev0_532 = __builtin_shufflevector(__s0_532, __s0_532, 1, 0); \
-  int32x2_t __rev1_532;  __rev1_532 = __builtin_shufflevector(__s1_532, __s1_532, 1, 0); \
-  int32x4_t __rev2_532;  __rev2_532 = __builtin_shufflevector(__s2_532, __s2_532, 3, 2, 1, 0); \
-  int32x2_t __ret_532; \
-  __ret_532 = __rev0_532 + __rev1_532 * __noswap_splat_laneq_s32(__rev2_532, __p3_532); \
-  __ret_532 = __builtin_shufflevector(__ret_532, __ret_532, 1, 0); \
-  __ret_532; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s16(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \
-  int16x4_t __s0_533 = __p0_533; \
-  int16x4_t __s1_533 = __p1_533; \
-  int16x8_t __s2_533 = __p2_533; \
-  int16x4_t __ret_533; \
-  __ret_533 = __s0_533 + __s1_533 * splat_laneq_s16(__s2_533, __p3_533); \
-  __ret_533; \
-})
-#else
-#define vmla_laneq_s16(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \
-  int16x4_t __s0_534 = __p0_534; \
-  int16x4_t __s1_534 = __p1_534; \
-  int16x8_t __s2_534 = __p2_534; \
-  int16x4_t __rev0_534;  __rev0_534 = __builtin_shufflevector(__s0_534, __s0_534, 3, 2, 1, 0); \
-  int16x4_t __rev1_534;  __rev1_534 = __builtin_shufflevector(__s1_534, __s1_534, 3, 2, 1, 0); \
-  int16x8_t __rev2_534;  __rev2_534 = __builtin_shufflevector(__s2_534, __s2_534, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_534; \
-  __ret_534 = __rev0_534 + __rev1_534 * __noswap_splat_laneq_s16(__rev2_534, __p3_534); \
-  __ret_534 = __builtin_shufflevector(__ret_534, __ret_534, 3, 2, 1, 0); \
-  __ret_534; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u32(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \
-  uint64x2_t __s0_535 = __p0_535; \
-  uint32x4_t __s1_535 = __p1_535; \
-  uint32x2_t __s2_535 = __p2_535; \
-  uint64x2_t __ret_535; \
-  __ret_535 = __s0_535 + vmull_u32(vget_high_u32(__s1_535), splat_lane_u32(__s2_535, __p3_535)); \
-  __ret_535; \
-})
-#else
-#define vmlal_high_lane_u32(__p0_536, __p1_536, __p2_536, __p3_536) __extension__ ({ \
-  uint64x2_t __s0_536 = __p0_536; \
-  uint32x4_t __s1_536 = __p1_536; \
-  uint32x2_t __s2_536 = __p2_536; \
-  uint64x2_t __rev0_536;  __rev0_536 = __builtin_shufflevector(__s0_536, __s0_536, 1, 0); \
-  uint32x4_t __rev1_536;  __rev1_536 = __builtin_shufflevector(__s1_536, __s1_536, 3, 2, 1, 0); \
-  uint32x2_t __rev2_536;  __rev2_536 = __builtin_shufflevector(__s2_536, __s2_536, 1, 0); \
-  uint64x2_t __ret_536; \
-  __ret_536 = __rev0_536 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_536), __noswap_splat_lane_u32(__rev2_536, __p3_536)); \
-  __ret_536 = __builtin_shufflevector(__ret_536, __ret_536, 1, 0); \
-  __ret_536; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u16(__p0_537, __p1_537, __p2_537, __p3_537) __extension__ ({ \
-  uint32x4_t __s0_537 = __p0_537; \
-  uint16x8_t __s1_537 = __p1_537; \
-  uint16x4_t __s2_537 = __p2_537; \
-  uint32x4_t __ret_537; \
-  __ret_537 = __s0_537 + vmull_u16(vget_high_u16(__s1_537), splat_lane_u16(__s2_537, __p3_537)); \
-  __ret_537; \
-})
-#else
-#define vmlal_high_lane_u16(__p0_538, __p1_538, __p2_538, __p3_538) __extension__ ({ \
-  uint32x4_t __s0_538 = __p0_538; \
-  uint16x8_t __s1_538 = __p1_538; \
-  uint16x4_t __s2_538 = __p2_538; \
-  uint32x4_t __rev0_538;  __rev0_538 = __builtin_shufflevector(__s0_538, __s0_538, 3, 2, 1, 0); \
-  uint16x8_t __rev1_538;  __rev1_538 = __builtin_shufflevector(__s1_538, __s1_538, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_538;  __rev2_538 = __builtin_shufflevector(__s2_538, __s2_538, 3, 2, 1, 0); \
-  uint32x4_t __ret_538; \
-  __ret_538 = __rev0_538 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_538), __noswap_splat_lane_u16(__rev2_538, __p3_538)); \
-  __ret_538 = __builtin_shufflevector(__ret_538, __ret_538, 3, 2, 1, 0); \
-  __ret_538; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s32(__p0_539, __p1_539, __p2_539, __p3_539) __extension__ ({ \
-  int64x2_t __s0_539 = __p0_539; \
-  int32x4_t __s1_539 = __p1_539; \
-  int32x2_t __s2_539 = __p2_539; \
-  int64x2_t __ret_539; \
-  __ret_539 = __s0_539 + vmull_s32(vget_high_s32(__s1_539), splat_lane_s32(__s2_539, __p3_539)); \
-  __ret_539; \
-})
-#else
-#define vmlal_high_lane_s32(__p0_540, __p1_540, __p2_540, __p3_540) __extension__ ({ \
-  int64x2_t __s0_540 = __p0_540; \
-  int32x4_t __s1_540 = __p1_540; \
-  int32x2_t __s2_540 = __p2_540; \
-  int64x2_t __rev0_540;  __rev0_540 = __builtin_shufflevector(__s0_540, __s0_540, 1, 0); \
-  int32x4_t __rev1_540;  __rev1_540 = __builtin_shufflevector(__s1_540, __s1_540, 3, 2, 1, 0); \
-  int32x2_t __rev2_540;  __rev2_540 = __builtin_shufflevector(__s2_540, __s2_540, 1, 0); \
-  int64x2_t __ret_540; \
-  __ret_540 = __rev0_540 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_540), __noswap_splat_lane_s32(__rev2_540, __p3_540)); \
-  __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 1, 0); \
-  __ret_540; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s16(__p0_541, __p1_541, __p2_541, __p3_541) __extension__ ({ \
-  int32x4_t __s0_541 = __p0_541; \
-  int16x8_t __s1_541 = __p1_541; \
-  int16x4_t __s2_541 = __p2_541; \
-  int32x4_t __ret_541; \
-  __ret_541 = __s0_541 + vmull_s16(vget_high_s16(__s1_541), splat_lane_s16(__s2_541, __p3_541)); \
-  __ret_541; \
-})
-#else
-#define vmlal_high_lane_s16(__p0_542, __p1_542, __p2_542, __p3_542) __extension__ ({ \
-  int32x4_t __s0_542 = __p0_542; \
-  int16x8_t __s1_542 = __p1_542; \
-  int16x4_t __s2_542 = __p2_542; \
-  int32x4_t __rev0_542;  __rev0_542 = __builtin_shufflevector(__s0_542, __s0_542, 3, 2, 1, 0); \
-  int16x8_t __rev1_542;  __rev1_542 = __builtin_shufflevector(__s1_542, __s1_542, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_542;  __rev2_542 = __builtin_shufflevector(__s2_542, __s2_542, 3, 2, 1, 0); \
-  int32x4_t __ret_542; \
-  __ret_542 = __rev0_542 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_542), __noswap_splat_lane_s16(__rev2_542, __p3_542)); \
-  __ret_542 = __builtin_shufflevector(__ret_542, __ret_542, 3, 2, 1, 0); \
-  __ret_542; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u32(__p0_543, __p1_543, __p2_543, __p3_543) __extension__ ({ \
-  uint64x2_t __s0_543 = __p0_543; \
-  uint32x4_t __s1_543 = __p1_543; \
-  uint32x4_t __s2_543 = __p2_543; \
-  uint64x2_t __ret_543; \
-  __ret_543 = __s0_543 + vmull_u32(vget_high_u32(__s1_543), splat_laneq_u32(__s2_543, __p3_543)); \
-  __ret_543; \
-})
-#else
-#define vmlal_high_laneq_u32(__p0_544, __p1_544, __p2_544, __p3_544) __extension__ ({ \
-  uint64x2_t __s0_544 = __p0_544; \
-  uint32x4_t __s1_544 = __p1_544; \
-  uint32x4_t __s2_544 = __p2_544; \
-  uint64x2_t __rev0_544;  __rev0_544 = __builtin_shufflevector(__s0_544, __s0_544, 1, 0); \
-  uint32x4_t __rev1_544;  __rev1_544 = __builtin_shufflevector(__s1_544, __s1_544, 3, 2, 1, 0); \
-  uint32x4_t __rev2_544;  __rev2_544 = __builtin_shufflevector(__s2_544, __s2_544, 3, 2, 1, 0); \
-  uint64x2_t __ret_544; \
-  __ret_544 = __rev0_544 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_544), __noswap_splat_laneq_u32(__rev2_544, __p3_544)); \
-  __ret_544 = __builtin_shufflevector(__ret_544, __ret_544, 1, 0); \
-  __ret_544; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u16(__p0_545, __p1_545, __p2_545, __p3_545) __extension__ ({ \
-  uint32x4_t __s0_545 = __p0_545; \
-  uint16x8_t __s1_545 = __p1_545; \
-  uint16x8_t __s2_545 = __p2_545; \
-  uint32x4_t __ret_545; \
-  __ret_545 = __s0_545 + vmull_u16(vget_high_u16(__s1_545), splat_laneq_u16(__s2_545, __p3_545)); \
-  __ret_545; \
-})
-#else
-#define vmlal_high_laneq_u16(__p0_546, __p1_546, __p2_546, __p3_546) __extension__ ({ \
-  uint32x4_t __s0_546 = __p0_546; \
-  uint16x8_t __s1_546 = __p1_546; \
-  uint16x8_t __s2_546 = __p2_546; \
-  uint32x4_t __rev0_546;  __rev0_546 = __builtin_shufflevector(__s0_546, __s0_546, 3, 2, 1, 0); \
-  uint16x8_t __rev1_546;  __rev1_546 = __builtin_shufflevector(__s1_546, __s1_546, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_546;  __rev2_546 = __builtin_shufflevector(__s2_546, __s2_546, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_546; \
-  __ret_546 = __rev0_546 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_546), __noswap_splat_laneq_u16(__rev2_546, __p3_546)); \
-  __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 3, 2, 1, 0); \
-  __ret_546; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s32(__p0_547, __p1_547, __p2_547, __p3_547) __extension__ ({ \
-  int64x2_t __s0_547 = __p0_547; \
-  int32x4_t __s1_547 = __p1_547; \
-  int32x4_t __s2_547 = __p2_547; \
-  int64x2_t __ret_547; \
-  __ret_547 = __s0_547 + vmull_s32(vget_high_s32(__s1_547), splat_laneq_s32(__s2_547, __p3_547)); \
-  __ret_547; \
-})
-#else
-#define vmlal_high_laneq_s32(__p0_548, __p1_548, __p2_548, __p3_548) __extension__ ({ \
-  int64x2_t __s0_548 = __p0_548; \
-  int32x4_t __s1_548 = __p1_548; \
-  int32x4_t __s2_548 = __p2_548; \
-  int64x2_t __rev0_548;  __rev0_548 = __builtin_shufflevector(__s0_548, __s0_548, 1, 0); \
-  int32x4_t __rev1_548;  __rev1_548 = __builtin_shufflevector(__s1_548, __s1_548, 3, 2, 1, 0); \
-  int32x4_t __rev2_548;  __rev2_548 = __builtin_shufflevector(__s2_548, __s2_548, 3, 2, 1, 0); \
-  int64x2_t __ret_548; \
-  __ret_548 = __rev0_548 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_548), __noswap_splat_laneq_s32(__rev2_548, __p3_548)); \
-  __ret_548 = __builtin_shufflevector(__ret_548, __ret_548, 1, 0); \
-  __ret_548; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s16(__p0_549, __p1_549, __p2_549, __p3_549) __extension__ ({ \
-  int32x4_t __s0_549 = __p0_549; \
-  int16x8_t __s1_549 = __p1_549; \
-  int16x8_t __s2_549 = __p2_549; \
-  int32x4_t __ret_549; \
-  __ret_549 = __s0_549 + vmull_s16(vget_high_s16(__s1_549), splat_laneq_s16(__s2_549, __p3_549)); \
-  __ret_549; \
-})
-#else
-#define vmlal_high_laneq_s16(__p0_550, __p1_550, __p2_550, __p3_550) __extension__ ({ \
-  int32x4_t __s0_550 = __p0_550; \
-  int16x8_t __s1_550 = __p1_550; \
-  int16x8_t __s2_550 = __p2_550; \
-  int32x4_t __rev0_550;  __rev0_550 = __builtin_shufflevector(__s0_550, __s0_550, 3, 2, 1, 0); \
-  int16x8_t __rev1_550;  __rev1_550 = __builtin_shufflevector(__s1_550, __s1_550, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_550;  __rev2_550 = __builtin_shufflevector(__s2_550, __s2_550, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_550; \
-  __ret_550 = __rev0_550 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_550), __noswap_splat_laneq_s16(__rev2_550, __p3_550)); \
-  __ret_550 = __builtin_shufflevector(__ret_550, __ret_550, 3, 2, 1, 0); \
-  __ret_550; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u32(__p0_551, __p1_551, __p2_551, __p3_551) __extension__ ({ \
-  uint64x2_t __s0_551 = __p0_551; \
-  uint32x2_t __s1_551 = __p1_551; \
-  uint32x4_t __s2_551 = __p2_551; \
-  uint64x2_t __ret_551; \
-  __ret_551 = __s0_551 + vmull_u32(__s1_551, splat_laneq_u32(__s2_551, __p3_551)); \
-  __ret_551; \
-})
-#else
-#define vmlal_laneq_u32(__p0_552, __p1_552, __p2_552, __p3_552) __extension__ ({ \
-  uint64x2_t __s0_552 = __p0_552; \
-  uint32x2_t __s1_552 = __p1_552; \
-  uint32x4_t __s2_552 = __p2_552; \
-  uint64x2_t __rev0_552;  __rev0_552 = __builtin_shufflevector(__s0_552, __s0_552, 1, 0); \
-  uint32x2_t __rev1_552;  __rev1_552 = __builtin_shufflevector(__s1_552, __s1_552, 1, 0); \
-  uint32x4_t __rev2_552;  __rev2_552 = __builtin_shufflevector(__s2_552, __s2_552, 3, 2, 1, 0); \
-  uint64x2_t __ret_552; \
-  __ret_552 = __rev0_552 + __noswap_vmull_u32(__rev1_552, __noswap_splat_laneq_u32(__rev2_552, __p3_552)); \
-  __ret_552 = __builtin_shufflevector(__ret_552, __ret_552, 1, 0); \
-  __ret_552; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u16(__p0_553, __p1_553, __p2_553, __p3_553) __extension__ ({ \
-  uint32x4_t __s0_553 = __p0_553; \
-  uint16x4_t __s1_553 = __p1_553; \
-  uint16x8_t __s2_553 = __p2_553; \
-  uint32x4_t __ret_553; \
-  __ret_553 = __s0_553 + vmull_u16(__s1_553, splat_laneq_u16(__s2_553, __p3_553)); \
-  __ret_553; \
-})
-#else
-#define vmlal_laneq_u16(__p0_554, __p1_554, __p2_554, __p3_554) __extension__ ({ \
-  uint32x4_t __s0_554 = __p0_554; \
-  uint16x4_t __s1_554 = __p1_554; \
-  uint16x8_t __s2_554 = __p2_554; \
-  uint32x4_t __rev0_554;  __rev0_554 = __builtin_shufflevector(__s0_554, __s0_554, 3, 2, 1, 0); \
-  uint16x4_t __rev1_554;  __rev1_554 = __builtin_shufflevector(__s1_554, __s1_554, 3, 2, 1, 0); \
-  uint16x8_t __rev2_554;  __rev2_554 = __builtin_shufflevector(__s2_554, __s2_554, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_554; \
-  __ret_554 = __rev0_554 + __noswap_vmull_u16(__rev1_554, __noswap_splat_laneq_u16(__rev2_554, __p3_554)); \
-  __ret_554 = __builtin_shufflevector(__ret_554, __ret_554, 3, 2, 1, 0); \
-  __ret_554; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s32(__p0_555, __p1_555, __p2_555, __p3_555) __extension__ ({ \
-  int64x2_t __s0_555 = __p0_555; \
-  int32x2_t __s1_555 = __p1_555; \
-  int32x4_t __s2_555 = __p2_555; \
-  int64x2_t __ret_555; \
-  __ret_555 = __s0_555 + vmull_s32(__s1_555, splat_laneq_s32(__s2_555, __p3_555)); \
-  __ret_555; \
-})
-#else
-#define vmlal_laneq_s32(__p0_556, __p1_556, __p2_556, __p3_556) __extension__ ({ \
-  int64x2_t __s0_556 = __p0_556; \
-  int32x2_t __s1_556 = __p1_556; \
-  int32x4_t __s2_556 = __p2_556; \
-  int64x2_t __rev0_556;  __rev0_556 = __builtin_shufflevector(__s0_556, __s0_556, 1, 0); \
-  int32x2_t __rev1_556;  __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 1, 0); \
-  int32x4_t __rev2_556;  __rev2_556 = __builtin_shufflevector(__s2_556, __s2_556, 3, 2, 1, 0); \
-  int64x2_t __ret_556; \
-  __ret_556 = __rev0_556 + __noswap_vmull_s32(__rev1_556, __noswap_splat_laneq_s32(__rev2_556, __p3_556)); \
-  __ret_556 = __builtin_shufflevector(__ret_556, __ret_556, 1, 0); \
-  __ret_556; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s16(__p0_557, __p1_557, __p2_557, __p3_557) __extension__ ({ \
-  int32x4_t __s0_557 = __p0_557; \
-  int16x4_t __s1_557 = __p1_557; \
-  int16x8_t __s2_557 = __p2_557; \
-  int32x4_t __ret_557; \
-  __ret_557 = __s0_557 + vmull_s16(__s1_557, splat_laneq_s16(__s2_557, __p3_557)); \
-  __ret_557; \
-})
-#else
-#define vmlal_laneq_s16(__p0_558, __p1_558, __p2_558, __p3_558) __extension__ ({ \
-  int32x4_t __s0_558 = __p0_558; \
-  int16x4_t __s1_558 = __p1_558; \
-  int16x8_t __s2_558 = __p2_558; \
-  int32x4_t __rev0_558;  __rev0_558 = __builtin_shufflevector(__s0_558, __s0_558, 3, 2, 1, 0); \
-  int16x4_t __rev1_558;  __rev1_558 = __builtin_shufflevector(__s1_558, __s1_558, 3, 2, 1, 0); \
-  int16x8_t __rev2_558;  __rev2_558 = __builtin_shufflevector(__s2_558, __s2_558, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_558; \
-  __ret_558 = __rev0_558 + __noswap_vmull_s16(__rev1_558, __noswap_splat_laneq_s16(__rev2_558, __p3_558)); \
-  __ret_558 = __builtin_shufflevector(__ret_558, __ret_558, 3, 2, 1, 0); \
-  __ret_558; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u32(__p0_559, __p1_559, __p2_559, __p3_559) __extension__ ({ \
-  uint32x4_t __s0_559 = __p0_559; \
-  uint32x4_t __s1_559 = __p1_559; \
-  uint32x4_t __s2_559 = __p2_559; \
-  uint32x4_t __ret_559; \
-  __ret_559 = __s0_559 - __s1_559 * splatq_laneq_u32(__s2_559, __p3_559); \
-  __ret_559; \
-})
-#else
-#define vmlsq_laneq_u32(__p0_560, __p1_560, __p2_560, __p3_560) __extension__ ({ \
-  uint32x4_t __s0_560 = __p0_560; \
-  uint32x4_t __s1_560 = __p1_560; \
-  uint32x4_t __s2_560 = __p2_560; \
-  uint32x4_t __rev0_560;  __rev0_560 = __builtin_shufflevector(__s0_560, __s0_560, 3, 2, 1, 0); \
-  uint32x4_t __rev1_560;  __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 3, 2, 1, 0); \
-  uint32x4_t __rev2_560;  __rev2_560 = __builtin_shufflevector(__s2_560, __s2_560, 3, 2, 1, 0); \
-  uint32x4_t __ret_560; \
-  __ret_560 = __rev0_560 - __rev1_560 * __noswap_splatq_laneq_u32(__rev2_560, __p3_560); \
-  __ret_560 = __builtin_shufflevector(__ret_560, __ret_560, 3, 2, 1, 0); \
-  __ret_560; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u16(__p0_561, __p1_561, __p2_561, __p3_561) __extension__ ({ \
-  uint16x8_t __s0_561 = __p0_561; \
-  uint16x8_t __s1_561 = __p1_561; \
-  uint16x8_t __s2_561 = __p2_561; \
-  uint16x8_t __ret_561; \
-  __ret_561 = __s0_561 - __s1_561 * splatq_laneq_u16(__s2_561, __p3_561); \
-  __ret_561; \
-})
-#else
-#define vmlsq_laneq_u16(__p0_562, __p1_562, __p2_562, __p3_562) __extension__ ({ \
-  uint16x8_t __s0_562 = __p0_562; \
-  uint16x8_t __s1_562 = __p1_562; \
-  uint16x8_t __s2_562 = __p2_562; \
-  uint16x8_t __rev0_562;  __rev0_562 = __builtin_shufflevector(__s0_562, __s0_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_562;  __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_562;  __rev2_562 = __builtin_shufflevector(__s2_562, __s2_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_562; \
-  __ret_562 = __rev0_562 - __rev1_562 * __noswap_splatq_laneq_u16(__rev2_562, __p3_562); \
-  __ret_562 = __builtin_shufflevector(__ret_562, __ret_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_562; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_f32(__p0_563, __p1_563, __p2_563, __p3_563) __extension__ ({ \
-  float32x4_t __s0_563 = __p0_563; \
-  float32x4_t __s1_563 = __p1_563; \
-  float32x4_t __s2_563 = __p2_563; \
-  float32x4_t __ret_563; \
-  __ret_563 = __s0_563 - __s1_563 * splatq_laneq_f32(__s2_563, __p3_563); \
-  __ret_563; \
-})
-#else
-#define vmlsq_laneq_f32(__p0_564, __p1_564, __p2_564, __p3_564) __extension__ ({ \
-  float32x4_t __s0_564 = __p0_564; \
-  float32x4_t __s1_564 = __p1_564; \
-  float32x4_t __s2_564 = __p2_564; \
-  float32x4_t __rev0_564;  __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 3, 2, 1, 0); \
-  float32x4_t __rev1_564;  __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 3, 2, 1, 0); \
-  float32x4_t __rev2_564;  __rev2_564 = __builtin_shufflevector(__s2_564, __s2_564, 3, 2, 1, 0); \
-  float32x4_t __ret_564; \
-  __ret_564 = __rev0_564 - __rev1_564 * __noswap_splatq_laneq_f32(__rev2_564, __p3_564); \
-  __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 3, 2, 1, 0); \
-  __ret_564; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s32(__p0_565, __p1_565, __p2_565, __p3_565) __extension__ ({ \
-  int32x4_t __s0_565 = __p0_565; \
-  int32x4_t __s1_565 = __p1_565; \
-  int32x4_t __s2_565 = __p2_565; \
-  int32x4_t __ret_565; \
-  __ret_565 = __s0_565 - __s1_565 * splatq_laneq_s32(__s2_565, __p3_565); \
-  __ret_565; \
-})
-#else
-#define vmlsq_laneq_s32(__p0_566, __p1_566, __p2_566, __p3_566) __extension__ ({ \
-  int32x4_t __s0_566 = __p0_566; \
-  int32x4_t __s1_566 = __p1_566; \
-  int32x4_t __s2_566 = __p2_566; \
-  int32x4_t __rev0_566;  __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 3, 2, 1, 0); \
-  int32x4_t __rev1_566;  __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 3, 2, 1, 0); \
-  int32x4_t __rev2_566;  __rev2_566 = __builtin_shufflevector(__s2_566, __s2_566, 3, 2, 1, 0); \
-  int32x4_t __ret_566; \
-  __ret_566 = __rev0_566 - __rev1_566 * __noswap_splatq_laneq_s32(__rev2_566, __p3_566); \
-  __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 3, 2, 1, 0); \
-  __ret_566; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s16(__p0_567, __p1_567, __p2_567, __p3_567) __extension__ ({ \
-  int16x8_t __s0_567 = __p0_567; \
-  int16x8_t __s1_567 = __p1_567; \
-  int16x8_t __s2_567 = __p2_567; \
-  int16x8_t __ret_567; \
-  __ret_567 = __s0_567 - __s1_567 * splatq_laneq_s16(__s2_567, __p3_567); \
-  __ret_567; \
-})
-#else
-#define vmlsq_laneq_s16(__p0_568, __p1_568, __p2_568, __p3_568) __extension__ ({ \
-  int16x8_t __s0_568 = __p0_568; \
-  int16x8_t __s1_568 = __p1_568; \
-  int16x8_t __s2_568 = __p2_568; \
-  int16x8_t __rev0_568;  __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_568;  __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_568;  __rev2_568 = __builtin_shufflevector(__s2_568, __s2_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_568; \
-  __ret_568 = __rev0_568 - __rev1_568 * __noswap_splatq_laneq_s16(__rev2_568, __p3_568); \
-  __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_568; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u32(__p0_569, __p1_569, __p2_569, __p3_569) __extension__ ({ \
-  uint32x2_t __s0_569 = __p0_569; \
-  uint32x2_t __s1_569 = __p1_569; \
-  uint32x4_t __s2_569 = __p2_569; \
-  uint32x2_t __ret_569; \
-  __ret_569 = __s0_569 - __s1_569 * splat_laneq_u32(__s2_569, __p3_569); \
-  __ret_569; \
-})
-#else
-#define vmls_laneq_u32(__p0_570, __p1_570, __p2_570, __p3_570) __extension__ ({ \
-  uint32x2_t __s0_570 = __p0_570; \
-  uint32x2_t __s1_570 = __p1_570; \
-  uint32x4_t __s2_570 = __p2_570; \
-  uint32x2_t __rev0_570;  __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 1, 0); \
-  uint32x2_t __rev1_570;  __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 1, 0); \
-  uint32x4_t __rev2_570;  __rev2_570 = __builtin_shufflevector(__s2_570, __s2_570, 3, 2, 1, 0); \
-  uint32x2_t __ret_570; \
-  __ret_570 = __rev0_570 - __rev1_570 * __noswap_splat_laneq_u32(__rev2_570, __p3_570); \
-  __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 1, 0); \
-  __ret_570; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u16(__p0_571, __p1_571, __p2_571, __p3_571) __extension__ ({ \
-  uint16x4_t __s0_571 = __p0_571; \
-  uint16x4_t __s1_571 = __p1_571; \
-  uint16x8_t __s2_571 = __p2_571; \
-  uint16x4_t __ret_571; \
-  __ret_571 = __s0_571 - __s1_571 * splat_laneq_u16(__s2_571, __p3_571); \
-  __ret_571; \
-})
-#else
-#define vmls_laneq_u16(__p0_572, __p1_572, __p2_572, __p3_572) __extension__ ({ \
-  uint16x4_t __s0_572 = __p0_572; \
-  uint16x4_t __s1_572 = __p1_572; \
-  uint16x8_t __s2_572 = __p2_572; \
-  uint16x4_t __rev0_572;  __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 3, 2, 1, 0); \
-  uint16x4_t __rev1_572;  __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 3, 2, 1, 0); \
-  uint16x8_t __rev2_572;  __rev2_572 = __builtin_shufflevector(__s2_572, __s2_572, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_572; \
-  __ret_572 = __rev0_572 - __rev1_572 * __noswap_splat_laneq_u16(__rev2_572, __p3_572); \
-  __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 3, 2, 1, 0); \
-  __ret_572; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_f32(__p0_573, __p1_573, __p2_573, __p3_573) __extension__ ({ \
-  float32x2_t __s0_573 = __p0_573; \
-  float32x2_t __s1_573 = __p1_573; \
-  float32x4_t __s2_573 = __p2_573; \
-  float32x2_t __ret_573; \
-  __ret_573 = __s0_573 - __s1_573 * splat_laneq_f32(__s2_573, __p3_573); \
-  __ret_573; \
-})
-#else
-#define vmls_laneq_f32(__p0_574, __p1_574, __p2_574, __p3_574) __extension__ ({ \
-  float32x2_t __s0_574 = __p0_574; \
-  float32x2_t __s1_574 = __p1_574; \
-  float32x4_t __s2_574 = __p2_574; \
-  float32x2_t __rev0_574;  __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 1, 0); \
-  float32x2_t __rev1_574;  __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 1, 0); \
-  float32x4_t __rev2_574;  __rev2_574 = __builtin_shufflevector(__s2_574, __s2_574, 3, 2, 1, 0); \
-  float32x2_t __ret_574; \
-  __ret_574 = __rev0_574 - __rev1_574 * __noswap_splat_laneq_f32(__rev2_574, __p3_574); \
-  __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 1, 0); \
-  __ret_574; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s32(__p0_575, __p1_575, __p2_575, __p3_575) __extension__ ({ \
-  int32x2_t __s0_575 = __p0_575; \
-  int32x2_t __s1_575 = __p1_575; \
-  int32x4_t __s2_575 = __p2_575; \
-  int32x2_t __ret_575; \
-  __ret_575 = __s0_575 - __s1_575 * splat_laneq_s32(__s2_575, __p3_575); \
-  __ret_575; \
-})
-#else
-#define vmls_laneq_s32(__p0_576, __p1_576, __p2_576, __p3_576) __extension__ ({ \
-  int32x2_t __s0_576 = __p0_576; \
-  int32x2_t __s1_576 = __p1_576; \
-  int32x4_t __s2_576 = __p2_576; \
-  int32x2_t __rev0_576;  __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 1, 0); \
-  int32x2_t __rev1_576;  __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 1, 0); \
-  int32x4_t __rev2_576;  __rev2_576 = __builtin_shufflevector(__s2_576, __s2_576, 3, 2, 1, 0); \
-  int32x2_t __ret_576; \
-  __ret_576 = __rev0_576 - __rev1_576 * __noswap_splat_laneq_s32(__rev2_576, __p3_576); \
-  __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 1, 0); \
-  __ret_576; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s16(__p0_577, __p1_577, __p2_577, __p3_577) __extension__ ({ \
-  int16x4_t __s0_577 = __p0_577; \
-  int16x4_t __s1_577 = __p1_577; \
-  int16x8_t __s2_577 = __p2_577; \
-  int16x4_t __ret_577; \
-  __ret_577 = __s0_577 - __s1_577 * splat_laneq_s16(__s2_577, __p3_577); \
-  __ret_577; \
-})
-#else
-#define vmls_laneq_s16(__p0_578, __p1_578, __p2_578, __p3_578) __extension__ ({ \
-  int16x4_t __s0_578 = __p0_578; \
-  int16x4_t __s1_578 = __p1_578; \
-  int16x8_t __s2_578 = __p2_578; \
-  int16x4_t __rev0_578;  __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 3, 2, 1, 0); \
-  int16x4_t __rev1_578;  __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 3, 2, 1, 0); \
-  int16x8_t __rev2_578;  __rev2_578 = __builtin_shufflevector(__s2_578, __s2_578, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_578; \
-  __ret_578 = __rev0_578 - __rev1_578 * __noswap_splat_laneq_s16(__rev2_578, __p3_578); \
-  __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 3, 2, 1, 0); \
-  __ret_578; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u32(__p0_579, __p1_579, __p2_579, __p3_579) __extension__ ({ \
-  uint64x2_t __s0_579 = __p0_579; \
-  uint32x4_t __s1_579 = __p1_579; \
-  uint32x2_t __s2_579 = __p2_579; \
-  uint64x2_t __ret_579; \
-  __ret_579 = __s0_579 - vmull_u32(vget_high_u32(__s1_579), splat_lane_u32(__s2_579, __p3_579)); \
-  __ret_579; \
-})
-#else
-#define vmlsl_high_lane_u32(__p0_580, __p1_580, __p2_580, __p3_580) __extension__ ({ \
-  uint64x2_t __s0_580 = __p0_580; \
-  uint32x4_t __s1_580 = __p1_580; \
-  uint32x2_t __s2_580 = __p2_580; \
-  uint64x2_t __rev0_580;  __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 1, 0); \
-  uint32x4_t __rev1_580;  __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 3, 2, 1, 0); \
-  uint32x2_t __rev2_580;  __rev2_580 = __builtin_shufflevector(__s2_580, __s2_580, 1, 0); \
-  uint64x2_t __ret_580; \
-  __ret_580 = __rev0_580 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_580), __noswap_splat_lane_u32(__rev2_580, __p3_580)); \
-  __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 1, 0); \
-  __ret_580; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u16(__p0_581, __p1_581, __p2_581, __p3_581) __extension__ ({ \
-  uint32x4_t __s0_581 = __p0_581; \
-  uint16x8_t __s1_581 = __p1_581; \
-  uint16x4_t __s2_581 = __p2_581; \
-  uint32x4_t __ret_581; \
-  __ret_581 = __s0_581 - vmull_u16(vget_high_u16(__s1_581), splat_lane_u16(__s2_581, __p3_581)); \
-  __ret_581; \
-})
-#else
-#define vmlsl_high_lane_u16(__p0_582, __p1_582, __p2_582, __p3_582) __extension__ ({ \
-  uint32x4_t __s0_582 = __p0_582; \
-  uint16x8_t __s1_582 = __p1_582; \
-  uint16x4_t __s2_582 = __p2_582; \
-  uint32x4_t __rev0_582;  __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 3, 2, 1, 0); \
-  uint16x8_t __rev1_582;  __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_582;  __rev2_582 = __builtin_shufflevector(__s2_582, __s2_582, 3, 2, 1, 0); \
-  uint32x4_t __ret_582; \
-  __ret_582 = __rev0_582 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_582), __noswap_splat_lane_u16(__rev2_582, __p3_582)); \
-  __ret_582 = __builtin_shufflevector(__ret_582, __ret_582, 3, 2, 1, 0); \
-  __ret_582; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s32(__p0_583, __p1_583, __p2_583, __p3_583) __extension__ ({ \
-  int64x2_t __s0_583 = __p0_583; \
-  int32x4_t __s1_583 = __p1_583; \
-  int32x2_t __s2_583 = __p2_583; \
-  int64x2_t __ret_583; \
-  __ret_583 = __s0_583 - vmull_s32(vget_high_s32(__s1_583), splat_lane_s32(__s2_583, __p3_583)); \
-  __ret_583; \
-})
-#else
-#define vmlsl_high_lane_s32(__p0_584, __p1_584, __p2_584, __p3_584) __extension__ ({ \
-  int64x2_t __s0_584 = __p0_584; \
-  int32x4_t __s1_584 = __p1_584; \
-  int32x2_t __s2_584 = __p2_584; \
-  int64x2_t __rev0_584;  __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 1, 0); \
-  int32x4_t __rev1_584;  __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 3, 2, 1, 0); \
-  int32x2_t __rev2_584;  __rev2_584 = __builtin_shufflevector(__s2_584, __s2_584, 1, 0); \
-  int64x2_t __ret_584; \
-  __ret_584 = __rev0_584 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_584), __noswap_splat_lane_s32(__rev2_584, __p3_584)); \
-  __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 1, 0); \
-  __ret_584; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s16(__p0_585, __p1_585, __p2_585, __p3_585) __extension__ ({ \
-  int32x4_t __s0_585 = __p0_585; \
-  int16x8_t __s1_585 = __p1_585; \
-  int16x4_t __s2_585 = __p2_585; \
-  int32x4_t __ret_585; \
-  __ret_585 = __s0_585 - vmull_s16(vget_high_s16(__s1_585), splat_lane_s16(__s2_585, __p3_585)); \
-  __ret_585; \
-})
-#else
-#define vmlsl_high_lane_s16(__p0_586, __p1_586, __p2_586, __p3_586) __extension__ ({ \
-  int32x4_t __s0_586 = __p0_586; \
-  int16x8_t __s1_586 = __p1_586; \
-  int16x4_t __s2_586 = __p2_586; \
-  int32x4_t __rev0_586;  __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 3, 2, 1, 0); \
-  int16x8_t __rev1_586;  __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_586;  __rev2_586 = __builtin_shufflevector(__s2_586, __s2_586, 3, 2, 1, 0); \
-  int32x4_t __ret_586; \
-  __ret_586 = __rev0_586 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_586), __noswap_splat_lane_s16(__rev2_586, __p3_586)); \
-  __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 3, 2, 1, 0); \
-  __ret_586; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u32(__p0_587, __p1_587, __p2_587, __p3_587) __extension__ ({ \
-  uint64x2_t __s0_587 = __p0_587; \
-  uint32x4_t __s1_587 = __p1_587; \
-  uint32x4_t __s2_587 = __p2_587; \
-  uint64x2_t __ret_587; \
-  __ret_587 = __s0_587 - vmull_u32(vget_high_u32(__s1_587), splat_laneq_u32(__s2_587, __p3_587)); \
-  __ret_587; \
-})
-#else
-#define vmlsl_high_laneq_u32(__p0_588, __p1_588, __p2_588, __p3_588) __extension__ ({ \
-  uint64x2_t __s0_588 = __p0_588; \
-  uint32x4_t __s1_588 = __p1_588; \
-  uint32x4_t __s2_588 = __p2_588; \
-  uint64x2_t __rev0_588;  __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 1, 0); \
-  uint32x4_t __rev1_588;  __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 3, 2, 1, 0); \
-  uint32x4_t __rev2_588;  __rev2_588 = __builtin_shufflevector(__s2_588, __s2_588, 3, 2, 1, 0); \
-  uint64x2_t __ret_588; \
-  __ret_588 = __rev0_588 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_588), __noswap_splat_laneq_u32(__rev2_588, __p3_588)); \
-  __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 1, 0); \
-  __ret_588; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u16(__p0_589, __p1_589, __p2_589, __p3_589) __extension__ ({ \
-  uint32x4_t __s0_589 = __p0_589; \
-  uint16x8_t __s1_589 = __p1_589; \
-  uint16x8_t __s2_589 = __p2_589; \
-  uint32x4_t __ret_589; \
-  __ret_589 = __s0_589 - vmull_u16(vget_high_u16(__s1_589), splat_laneq_u16(__s2_589, __p3_589)); \
-  __ret_589; \
-})
-#else
-#define vmlsl_high_laneq_u16(__p0_590, __p1_590, __p2_590, __p3_590) __extension__ ({ \
-  uint32x4_t __s0_590 = __p0_590; \
-  uint16x8_t __s1_590 = __p1_590; \
-  uint16x8_t __s2_590 = __p2_590; \
-  uint32x4_t __rev0_590;  __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 3, 2, 1, 0); \
-  uint16x8_t __rev1_590;  __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_590;  __rev2_590 = __builtin_shufflevector(__s2_590, __s2_590, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_590; \
-  __ret_590 = __rev0_590 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_590), __noswap_splat_laneq_u16(__rev2_590, __p3_590)); \
-  __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 3, 2, 1, 0); \
-  __ret_590; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s32(__p0_591, __p1_591, __p2_591, __p3_591) __extension__ ({ \
-  int64x2_t __s0_591 = __p0_591; \
-  int32x4_t __s1_591 = __p1_591; \
-  int32x4_t __s2_591 = __p2_591; \
-  int64x2_t __ret_591; \
-  __ret_591 = __s0_591 - vmull_s32(vget_high_s32(__s1_591), splat_laneq_s32(__s2_591, __p3_591)); \
-  __ret_591; \
-})
-#else
-#define vmlsl_high_laneq_s32(__p0_592, __p1_592, __p2_592, __p3_592) __extension__ ({ \
-  int64x2_t __s0_592 = __p0_592; \
-  int32x4_t __s1_592 = __p1_592; \
-  int32x4_t __s2_592 = __p2_592; \
-  int64x2_t __rev0_592;  __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \
-  int32x4_t __rev1_592;  __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 3, 2, 1, 0); \
-  int32x4_t __rev2_592;  __rev2_592 = __builtin_shufflevector(__s2_592, __s2_592, 3, 2, 1, 0); \
-  int64x2_t __ret_592; \
-  __ret_592 = __rev0_592 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_592), __noswap_splat_laneq_s32(__rev2_592, __p3_592)); \
-  __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \
-  __ret_592; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s16(__p0_593, __p1_593, __p2_593, __p3_593) __extension__ ({ \
-  int32x4_t __s0_593 = __p0_593; \
-  int16x8_t __s1_593 = __p1_593; \
-  int16x8_t __s2_593 = __p2_593; \
-  int32x4_t __ret_593; \
-  __ret_593 = __s0_593 - vmull_s16(vget_high_s16(__s1_593), splat_laneq_s16(__s2_593, __p3_593)); \
-  __ret_593; \
-})
-#else
-#define vmlsl_high_laneq_s16(__p0_594, __p1_594, __p2_594, __p3_594) __extension__ ({ \
-  int32x4_t __s0_594 = __p0_594; \
-  int16x8_t __s1_594 = __p1_594; \
-  int16x8_t __s2_594 = __p2_594; \
-  int32x4_t __rev0_594;  __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \
-  int16x8_t __rev1_594;  __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_594;  __rev2_594 = __builtin_shufflevector(__s2_594, __s2_594, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_594; \
-  __ret_594 = __rev0_594 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_594), __noswap_splat_laneq_s16(__rev2_594, __p3_594)); \
-  __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 3, 2, 1, 0); \
-  __ret_594; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u32(__p0_595, __p1_595, __p2_595, __p3_595) __extension__ ({ \
-  uint64x2_t __s0_595 = __p0_595; \
-  uint32x2_t __s1_595 = __p1_595; \
-  uint32x4_t __s2_595 = __p2_595; \
-  uint64x2_t __ret_595; \
-  __ret_595 = __s0_595 - vmull_u32(__s1_595, splat_laneq_u32(__s2_595, __p3_595)); \
-  __ret_595; \
-})
-#else
-#define vmlsl_laneq_u32(__p0_596, __p1_596, __p2_596, __p3_596) __extension__ ({ \
-  uint64x2_t __s0_596 = __p0_596; \
-  uint32x2_t __s1_596 = __p1_596; \
-  uint32x4_t __s2_596 = __p2_596; \
-  uint64x2_t __rev0_596;  __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 1, 0); \
-  uint32x2_t __rev1_596;  __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 1, 0); \
-  uint32x4_t __rev2_596;  __rev2_596 = __builtin_shufflevector(__s2_596, __s2_596, 3, 2, 1, 0); \
-  uint64x2_t __ret_596; \
-  __ret_596 = __rev0_596 - __noswap_vmull_u32(__rev1_596, __noswap_splat_laneq_u32(__rev2_596, __p3_596)); \
-  __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 1, 0); \
-  __ret_596; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u16(__p0_597, __p1_597, __p2_597, __p3_597) __extension__ ({ \
-  uint32x4_t __s0_597 = __p0_597; \
-  uint16x4_t __s1_597 = __p1_597; \
-  uint16x8_t __s2_597 = __p2_597; \
-  uint32x4_t __ret_597; \
-  __ret_597 = __s0_597 - vmull_u16(__s1_597, splat_laneq_u16(__s2_597, __p3_597)); \
-  __ret_597; \
-})
-#else
-#define vmlsl_laneq_u16(__p0_598, __p1_598, __p2_598, __p3_598) __extension__ ({ \
-  uint32x4_t __s0_598 = __p0_598; \
-  uint16x4_t __s1_598 = __p1_598; \
-  uint16x8_t __s2_598 = __p2_598; \
-  uint32x4_t __rev0_598;  __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 3, 2, 1, 0); \
-  uint16x4_t __rev1_598;  __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 3, 2, 1, 0); \
-  uint16x8_t __rev2_598;  __rev2_598 = __builtin_shufflevector(__s2_598, __s2_598, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_598; \
-  __ret_598 = __rev0_598 - __noswap_vmull_u16(__rev1_598, __noswap_splat_laneq_u16(__rev2_598, __p3_598)); \
-  __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 3, 2, 1, 0); \
-  __ret_598; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s32(__p0_599, __p1_599, __p2_599, __p3_599) __extension__ ({ \
-  int64x2_t __s0_599 = __p0_599; \
-  int32x2_t __s1_599 = __p1_599; \
-  int32x4_t __s2_599 = __p2_599; \
-  int64x2_t __ret_599; \
-  __ret_599 = __s0_599 - vmull_s32(__s1_599, splat_laneq_s32(__s2_599, __p3_599)); \
-  __ret_599; \
-})
-#else
-#define vmlsl_laneq_s32(__p0_600, __p1_600, __p2_600, __p3_600) __extension__ ({ \
-  int64x2_t __s0_600 = __p0_600; \
-  int32x2_t __s1_600 = __p1_600; \
-  int32x4_t __s2_600 = __p2_600; \
-  int64x2_t __rev0_600;  __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 1, 0); \
-  int32x2_t __rev1_600;  __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 1, 0); \
-  int32x4_t __rev2_600;  __rev2_600 = __builtin_shufflevector(__s2_600, __s2_600, 3, 2, 1, 0); \
-  int64x2_t __ret_600; \
-  __ret_600 = __rev0_600 - __noswap_vmull_s32(__rev1_600, __noswap_splat_laneq_s32(__rev2_600, __p3_600)); \
-  __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 1, 0); \
-  __ret_600; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s16(__p0_601, __p1_601, __p2_601, __p3_601) __extension__ ({ \
-  int32x4_t __s0_601 = __p0_601; \
-  int16x4_t __s1_601 = __p1_601; \
-  int16x8_t __s2_601 = __p2_601; \
-  int32x4_t __ret_601; \
-  __ret_601 = __s0_601 - vmull_s16(__s1_601, splat_laneq_s16(__s2_601, __p3_601)); \
-  __ret_601; \
-})
-#else
-#define vmlsl_laneq_s16(__p0_602, __p1_602, __p2_602, __p3_602) __extension__ ({ \
-  int32x4_t __s0_602 = __p0_602; \
-  int16x4_t __s1_602 = __p1_602; \
-  int16x8_t __s2_602 = __p2_602; \
-  int32x4_t __rev0_602;  __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 3, 2, 1, 0); \
-  int16x4_t __rev1_602;  __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 3, 2, 1, 0); \
-  int16x8_t __rev2_602;  __rev2_602 = __builtin_shufflevector(__s2_602, __s2_602, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_602; \
-  __ret_602 = __rev0_602 - __noswap_vmull_s16(__rev1_602, __noswap_splat_laneq_s16(__rev2_602, __p3_602)); \
-  __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 3, 2, 1, 0); \
-  __ret_602; \
-})
-#endif
-
-__ai poly64x1_t vmov_n_p64(poly64_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmovq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float64x2_t vmovq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmov_n_f64(float64_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_603) {
-  uint16x8_t __ret_603;
-  uint8x8_t __a1_603 = vget_high_u8(__p0_603);
-  __ret_603 = (uint16x8_t)(vshll_n_u8(__a1_603, 0));
-  return __ret_603;
-}
-#else
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_604) {
-  uint8x16_t __rev0_604;  __rev0_604 = __builtin_shufflevector(__p0_604, __p0_604, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret_604;
-  uint8x8_t __a1_604 = __noswap_vget_high_u8(__rev0_604);
-  __ret_604 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_604, 0));
-  __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_604;
-}
-__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_605) {
-  uint16x8_t __ret_605;
-  uint8x8_t __a1_605 = __noswap_vget_high_u8(__p0_605);
-  __ret_605 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_605, 0));
-  return __ret_605;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_606) {
-  uint64x2_t __ret_606;
-  uint32x2_t __a1_606 = vget_high_u32(__p0_606);
-  __ret_606 = (uint64x2_t)(vshll_n_u32(__a1_606, 0));
-  return __ret_606;
-}
-#else
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_607) {
-  uint32x4_t __rev0_607;  __rev0_607 = __builtin_shufflevector(__p0_607, __p0_607, 3, 2, 1, 0);
-  uint64x2_t __ret_607;
-  uint32x2_t __a1_607 = __noswap_vget_high_u32(__rev0_607);
-  __ret_607 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_607, 0));
-  __ret_607 = __builtin_shufflevector(__ret_607, __ret_607, 1, 0);
-  return __ret_607;
-}
-__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_608) {
-  uint64x2_t __ret_608;
-  uint32x2_t __a1_608 = __noswap_vget_high_u32(__p0_608);
-  __ret_608 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_608, 0));
-  return __ret_608;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_609) {
-  uint32x4_t __ret_609;
-  uint16x4_t __a1_609 = vget_high_u16(__p0_609);
-  __ret_609 = (uint32x4_t)(vshll_n_u16(__a1_609, 0));
-  return __ret_609;
-}
-#else
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_610) {
-  uint16x8_t __rev0_610;  __rev0_610 = __builtin_shufflevector(__p0_610, __p0_610, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret_610;
-  uint16x4_t __a1_610 = __noswap_vget_high_u16(__rev0_610);
-  __ret_610 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_610, 0));
-  __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0);
-  return __ret_610;
-}
-__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_611) {
-  uint32x4_t __ret_611;
-  uint16x4_t __a1_611 = __noswap_vget_high_u16(__p0_611);
-  __ret_611 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_611, 0));
-  return __ret_611;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_612) {
-  int16x8_t __ret_612;
-  int8x8_t __a1_612 = vget_high_s8(__p0_612);
-  __ret_612 = (int16x8_t)(vshll_n_s8(__a1_612, 0));
-  return __ret_612;
-}
-#else
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_613) {
-  int8x16_t __rev0_613;  __rev0_613 = __builtin_shufflevector(__p0_613, __p0_613, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret_613;
-  int8x8_t __a1_613 = __noswap_vget_high_s8(__rev0_613);
-  __ret_613 = (int16x8_t)(__noswap_vshll_n_s8(__a1_613, 0));
-  __ret_613 = __builtin_shufflevector(__ret_613, __ret_613, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_613;
-}
-__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_614) {
-  int16x8_t __ret_614;
-  int8x8_t __a1_614 = __noswap_vget_high_s8(__p0_614);
-  __ret_614 = (int16x8_t)(__noswap_vshll_n_s8(__a1_614, 0));
-  return __ret_614;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_615) {
-  int64x2_t __ret_615;
-  int32x2_t __a1_615 = vget_high_s32(__p0_615);
-  __ret_615 = (int64x2_t)(vshll_n_s32(__a1_615, 0));
-  return __ret_615;
-}
-#else
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_616) {
-  int32x4_t __rev0_616;  __rev0_616 = __builtin_shufflevector(__p0_616, __p0_616, 3, 2, 1, 0);
-  int64x2_t __ret_616;
-  int32x2_t __a1_616 = __noswap_vget_high_s32(__rev0_616);
-  __ret_616 = (int64x2_t)(__noswap_vshll_n_s32(__a1_616, 0));
-  __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0);
-  return __ret_616;
-}
-__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_617) {
-  int64x2_t __ret_617;
-  int32x2_t __a1_617 = __noswap_vget_high_s32(__p0_617);
-  __ret_617 = (int64x2_t)(__noswap_vshll_n_s32(__a1_617, 0));
-  return __ret_617;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_618) {
-  int32x4_t __ret_618;
-  int16x4_t __a1_618 = vget_high_s16(__p0_618);
-  __ret_618 = (int32x4_t)(vshll_n_s16(__a1_618, 0));
-  return __ret_618;
-}
-#else
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_619) {
-  int16x8_t __rev0_619;  __rev0_619 = __builtin_shufflevector(__p0_619, __p0_619, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret_619;
-  int16x4_t __a1_619 = __noswap_vget_high_s16(__rev0_619);
-  __ret_619 = (int32x4_t)(__noswap_vshll_n_s16(__a1_619, 0));
-  __ret_619 = __builtin_shufflevector(__ret_619, __ret_619, 3, 2, 1, 0);
-  return __ret_619;
-}
-__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_620) {
-  int32x4_t __ret_620;
-  int16x4_t __a1_620 = __noswap_vget_high_s16(__p0_620);
-  __ret_620 = (int32x4_t)(__noswap_vshll_n_s16(__a1_620, 0));
-  return __ret_620;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vmovn_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vmovn_u64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vmovn_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vmovn_s32(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vmovn_s64(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vmovn_s16(__p1));
-  return __ret;
-}
-#else
-__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#define vmuld_lane_f64(__p0_621, __p1_621, __p2_621) __extension__ ({ \
-  float64_t __s0_621 = __p0_621; \
-  float64x1_t __s1_621 = __p1_621; \
-  float64_t __ret_621; \
-  __ret_621 = __s0_621 * vget_lane_f64(__s1_621, __p2_621); \
-  __ret_621; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmuls_lane_f32(__p0_622, __p1_622, __p2_622) __extension__ ({ \
-  float32_t __s0_622 = __p0_622; \
-  float32x2_t __s1_622 = __p1_622; \
-  float32_t __ret_622; \
-  __ret_622 = __s0_622 * vget_lane_f32(__s1_622, __p2_622); \
-  __ret_622; \
-})
-#else
-#define vmuls_lane_f32(__p0_623, __p1_623, __p2_623) __extension__ ({ \
-  float32_t __s0_623 = __p0_623; \
-  float32x2_t __s1_623 = __p1_623; \
-  float32x2_t __rev1_623;  __rev1_623 = __builtin_shufflevector(__s1_623, __s1_623, 1, 0); \
-  float32_t __ret_623; \
-  __ret_623 = __s0_623 * __noswap_vget_lane_f32(__rev1_623, __p2_623); \
-  __ret_623; \
-})
-#endif
-
-#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f64(__p0_624, __p1_624, __p2_624) __extension__ ({ \
-  float64x2_t __s0_624 = __p0_624; \
-  float64x1_t __s1_624 = __p1_624; \
-  float64x2_t __ret_624; \
-  __ret_624 = __s0_624 * splatq_lane_f64(__s1_624, __p2_624); \
-  __ret_624; \
-})
-#else
-#define vmulq_lane_f64(__p0_625, __p1_625, __p2_625) __extension__ ({ \
-  float64x2_t __s0_625 = __p0_625; \
-  float64x1_t __s1_625 = __p1_625; \
-  float64x2_t __rev0_625;  __rev0_625 = __builtin_shufflevector(__s0_625, __s0_625, 1, 0); \
-  float64x2_t __ret_625; \
-  __ret_625 = __rev0_625 * __noswap_splatq_lane_f64(__s1_625, __p2_625); \
-  __ret_625 = __builtin_shufflevector(__ret_625, __ret_625, 1, 0); \
-  __ret_625; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmuld_laneq_f64(__p0_626, __p1_626, __p2_626) __extension__ ({ \
-  float64_t __s0_626 = __p0_626; \
-  float64x2_t __s1_626 = __p1_626; \
-  float64_t __ret_626; \
-  __ret_626 = __s0_626 * vgetq_lane_f64(__s1_626, __p2_626); \
-  __ret_626; \
-})
-#else
-#define vmuld_laneq_f64(__p0_627, __p1_627, __p2_627) __extension__ ({ \
-  float64_t __s0_627 = __p0_627; \
-  float64x2_t __s1_627 = __p1_627; \
-  float64x2_t __rev1_627;  __rev1_627 = __builtin_shufflevector(__s1_627, __s1_627, 1, 0); \
-  float64_t __ret_627; \
-  __ret_627 = __s0_627 * __noswap_vgetq_lane_f64(__rev1_627, __p2_627); \
-  __ret_627; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmuls_laneq_f32(__p0_628, __p1_628, __p2_628) __extension__ ({ \
-  float32_t __s0_628 = __p0_628; \
-  float32x4_t __s1_628 = __p1_628; \
-  float32_t __ret_628; \
-  __ret_628 = __s0_628 * vgetq_lane_f32(__s1_628, __p2_628); \
-  __ret_628; \
-})
-#else
-#define vmuls_laneq_f32(__p0_629, __p1_629, __p2_629) __extension__ ({ \
-  float32_t __s0_629 = __p0_629; \
-  float32x4_t __s1_629 = __p1_629; \
-  float32x4_t __rev1_629;  __rev1_629 = __builtin_shufflevector(__s1_629, __s1_629, 3, 2, 1, 0); \
-  float32_t __ret_629; \
-  __ret_629 = __s0_629 * __noswap_vgetq_lane_f32(__rev1_629, __p2_629); \
-  __ret_629; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \
-  __ret; \
-})
-#else
-#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u32(__p0_630, __p1_630, __p2_630) __extension__ ({ \
-  uint32x4_t __s0_630 = __p0_630; \
-  uint32x4_t __s1_630 = __p1_630; \
-  uint32x4_t __ret_630; \
-  __ret_630 = __s0_630 * splatq_laneq_u32(__s1_630, __p2_630); \
-  __ret_630; \
-})
-#else
-#define vmulq_laneq_u32(__p0_631, __p1_631, __p2_631) __extension__ ({ \
-  uint32x4_t __s0_631 = __p0_631; \
-  uint32x4_t __s1_631 = __p1_631; \
-  uint32x4_t __rev0_631;  __rev0_631 = __builtin_shufflevector(__s0_631, __s0_631, 3, 2, 1, 0); \
-  uint32x4_t __rev1_631;  __rev1_631 = __builtin_shufflevector(__s1_631, __s1_631, 3, 2, 1, 0); \
-  uint32x4_t __ret_631; \
-  __ret_631 = __rev0_631 * __noswap_splatq_laneq_u32(__rev1_631, __p2_631); \
-  __ret_631 = __builtin_shufflevector(__ret_631, __ret_631, 3, 2, 1, 0); \
-  __ret_631; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u16(__p0_632, __p1_632, __p2_632) __extension__ ({ \
-  uint16x8_t __s0_632 = __p0_632; \
-  uint16x8_t __s1_632 = __p1_632; \
-  uint16x8_t __ret_632; \
-  __ret_632 = __s0_632 * splatq_laneq_u16(__s1_632, __p2_632); \
-  __ret_632; \
-})
-#else
-#define vmulq_laneq_u16(__p0_633, __p1_633, __p2_633) __extension__ ({ \
-  uint16x8_t __s0_633 = __p0_633; \
-  uint16x8_t __s1_633 = __p1_633; \
-  uint16x8_t __rev0_633;  __rev0_633 = __builtin_shufflevector(__s0_633, __s0_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_633;  __rev1_633 = __builtin_shufflevector(__s1_633, __s1_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_633; \
-  __ret_633 = __rev0_633 * __noswap_splatq_laneq_u16(__rev1_633, __p2_633); \
-  __ret_633 = __builtin_shufflevector(__ret_633, __ret_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_633; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f64(__p0_634, __p1_634, __p2_634) __extension__ ({ \
-  float64x2_t __s0_634 = __p0_634; \
-  float64x2_t __s1_634 = __p1_634; \
-  float64x2_t __ret_634; \
-  __ret_634 = __s0_634 * splatq_laneq_f64(__s1_634, __p2_634); \
-  __ret_634; \
-})
-#else
-#define vmulq_laneq_f64(__p0_635, __p1_635, __p2_635) __extension__ ({ \
-  float64x2_t __s0_635 = __p0_635; \
-  float64x2_t __s1_635 = __p1_635; \
-  float64x2_t __rev0_635;  __rev0_635 = __builtin_shufflevector(__s0_635, __s0_635, 1, 0); \
-  float64x2_t __rev1_635;  __rev1_635 = __builtin_shufflevector(__s1_635, __s1_635, 1, 0); \
-  float64x2_t __ret_635; \
-  __ret_635 = __rev0_635 * __noswap_splatq_laneq_f64(__rev1_635, __p2_635); \
-  __ret_635 = __builtin_shufflevector(__ret_635, __ret_635, 1, 0); \
-  __ret_635; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f32(__p0_636, __p1_636, __p2_636) __extension__ ({ \
-  float32x4_t __s0_636 = __p0_636; \
-  float32x4_t __s1_636 = __p1_636; \
-  float32x4_t __ret_636; \
-  __ret_636 = __s0_636 * splatq_laneq_f32(__s1_636, __p2_636); \
-  __ret_636; \
-})
-#else
-#define vmulq_laneq_f32(__p0_637, __p1_637, __p2_637) __extension__ ({ \
-  float32x4_t __s0_637 = __p0_637; \
-  float32x4_t __s1_637 = __p1_637; \
-  float32x4_t __rev0_637;  __rev0_637 = __builtin_shufflevector(__s0_637, __s0_637, 3, 2, 1, 0); \
-  float32x4_t __rev1_637;  __rev1_637 = __builtin_shufflevector(__s1_637, __s1_637, 3, 2, 1, 0); \
-  float32x4_t __ret_637; \
-  __ret_637 = __rev0_637 * __noswap_splatq_laneq_f32(__rev1_637, __p2_637); \
-  __ret_637 = __builtin_shufflevector(__ret_637, __ret_637, 3, 2, 1, 0); \
-  __ret_637; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s32(__p0_638, __p1_638, __p2_638) __extension__ ({ \
-  int32x4_t __s0_638 = __p0_638; \
-  int32x4_t __s1_638 = __p1_638; \
-  int32x4_t __ret_638; \
-  __ret_638 = __s0_638 * splatq_laneq_s32(__s1_638, __p2_638); \
-  __ret_638; \
-})
-#else
-#define vmulq_laneq_s32(__p0_639, __p1_639, __p2_639) __extension__ ({ \
-  int32x4_t __s0_639 = __p0_639; \
-  int32x4_t __s1_639 = __p1_639; \
-  int32x4_t __rev0_639;  __rev0_639 = __builtin_shufflevector(__s0_639, __s0_639, 3, 2, 1, 0); \
-  int32x4_t __rev1_639;  __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 3, 2, 1, 0); \
-  int32x4_t __ret_639; \
-  __ret_639 = __rev0_639 * __noswap_splatq_laneq_s32(__rev1_639, __p2_639); \
-  __ret_639 = __builtin_shufflevector(__ret_639, __ret_639, 3, 2, 1, 0); \
-  __ret_639; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s16(__p0_640, __p1_640, __p2_640) __extension__ ({ \
-  int16x8_t __s0_640 = __p0_640; \
-  int16x8_t __s1_640 = __p1_640; \
-  int16x8_t __ret_640; \
-  __ret_640 = __s0_640 * splatq_laneq_s16(__s1_640, __p2_640); \
-  __ret_640; \
-})
-#else
-#define vmulq_laneq_s16(__p0_641, __p1_641, __p2_641) __extension__ ({ \
-  int16x8_t __s0_641 = __p0_641; \
-  int16x8_t __s1_641 = __p1_641; \
-  int16x8_t __rev0_641;  __rev0_641 = __builtin_shufflevector(__s0_641, __s0_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_641;  __rev1_641 = __builtin_shufflevector(__s1_641, __s1_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_641; \
-  __ret_641 = __rev0_641 * __noswap_splatq_laneq_s16(__rev1_641, __p2_641); \
-  __ret_641 = __builtin_shufflevector(__ret_641, __ret_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_641; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u32(__p0_642, __p1_642, __p2_642) __extension__ ({ \
-  uint32x2_t __s0_642 = __p0_642; \
-  uint32x4_t __s1_642 = __p1_642; \
-  uint32x2_t __ret_642; \
-  __ret_642 = __s0_642 * splat_laneq_u32(__s1_642, __p2_642); \
-  __ret_642; \
-})
-#else
-#define vmul_laneq_u32(__p0_643, __p1_643, __p2_643) __extension__ ({ \
-  uint32x2_t __s0_643 = __p0_643; \
-  uint32x4_t __s1_643 = __p1_643; \
-  uint32x2_t __rev0_643;  __rev0_643 = __builtin_shufflevector(__s0_643, __s0_643, 1, 0); \
-  uint32x4_t __rev1_643;  __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 3, 2, 1, 0); \
-  uint32x2_t __ret_643; \
-  __ret_643 = __rev0_643 * __noswap_splat_laneq_u32(__rev1_643, __p2_643); \
-  __ret_643 = __builtin_shufflevector(__ret_643, __ret_643, 1, 0); \
-  __ret_643; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u16(__p0_644, __p1_644, __p2_644) __extension__ ({ \
-  uint16x4_t __s0_644 = __p0_644; \
-  uint16x8_t __s1_644 = __p1_644; \
-  uint16x4_t __ret_644; \
-  __ret_644 = __s0_644 * splat_laneq_u16(__s1_644, __p2_644); \
-  __ret_644; \
-})
-#else
-#define vmul_laneq_u16(__p0_645, __p1_645, __p2_645) __extension__ ({ \
-  uint16x4_t __s0_645 = __p0_645; \
-  uint16x8_t __s1_645 = __p1_645; \
-  uint16x4_t __rev0_645;  __rev0_645 = __builtin_shufflevector(__s0_645, __s0_645, 3, 2, 1, 0); \
-  uint16x8_t __rev1_645;  __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_645; \
-  __ret_645 = __rev0_645 * __noswap_splat_laneq_u16(__rev1_645, __p2_645); \
-  __ret_645 = __builtin_shufflevector(__ret_645, __ret_645, 3, 2, 1, 0); \
-  __ret_645; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f32(__p0_646, __p1_646, __p2_646) __extension__ ({ \
-  float32x2_t __s0_646 = __p0_646; \
-  float32x4_t __s1_646 = __p1_646; \
-  float32x2_t __ret_646; \
-  __ret_646 = __s0_646 * splat_laneq_f32(__s1_646, __p2_646); \
-  __ret_646; \
-})
-#else
-#define vmul_laneq_f32(__p0_647, __p1_647, __p2_647) __extension__ ({ \
-  float32x2_t __s0_647 = __p0_647; \
-  float32x4_t __s1_647 = __p1_647; \
-  float32x2_t __rev0_647;  __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 1, 0); \
-  float32x4_t __rev1_647;  __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 3, 2, 1, 0); \
-  float32x2_t __ret_647; \
-  __ret_647 = __rev0_647 * __noswap_splat_laneq_f32(__rev1_647, __p2_647); \
-  __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 1, 0); \
-  __ret_647; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s32(__p0_648, __p1_648, __p2_648) __extension__ ({ \
-  int32x2_t __s0_648 = __p0_648; \
-  int32x4_t __s1_648 = __p1_648; \
-  int32x2_t __ret_648; \
-  __ret_648 = __s0_648 * splat_laneq_s32(__s1_648, __p2_648); \
-  __ret_648; \
-})
-#else
-#define vmul_laneq_s32(__p0_649, __p1_649, __p2_649) __extension__ ({ \
-  int32x2_t __s0_649 = __p0_649; \
-  int32x4_t __s1_649 = __p1_649; \
-  int32x2_t __rev0_649;  __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 1, 0); \
-  int32x4_t __rev1_649;  __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 3, 2, 1, 0); \
-  int32x2_t __ret_649; \
-  __ret_649 = __rev0_649 * __noswap_splat_laneq_s32(__rev1_649, __p2_649); \
-  __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 1, 0); \
-  __ret_649; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s16(__p0_650, __p1_650, __p2_650) __extension__ ({ \
-  int16x4_t __s0_650 = __p0_650; \
-  int16x8_t __s1_650 = __p1_650; \
-  int16x4_t __ret_650; \
-  __ret_650 = __s0_650 * splat_laneq_s16(__s1_650, __p2_650); \
-  __ret_650; \
-})
-#else
-#define vmul_laneq_s16(__p0_651, __p1_651, __p2_651) __extension__ ({ \
-  int16x4_t __s0_651 = __p0_651; \
-  int16x8_t __s1_651 = __p1_651; \
-  int16x4_t __rev0_651;  __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 3, 2, 1, 0); \
-  int16x8_t __rev1_651;  __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_651; \
-  __ret_651 = __rev0_651 * __noswap_splat_laneq_s16(__rev1_651, __p2_651); \
-  __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 3, 2, 1, 0); \
-  __ret_651; \
-})
-#endif
-
-__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 * (float64x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 * (float64x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
-  poly128_t __ret;
-  __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly16x8_t __ret;
-  __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1));
-  return __ret;
-}
-#else
-__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly128_t __ret;
-  __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1)));
-  return __ret;
-}
-#else
-__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly128_t __ret;
-  __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1)));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u32(__p0_652, __p1_652, __p2_652) __extension__ ({ \
-  uint32x4_t __s0_652 = __p0_652; \
-  uint32x2_t __s1_652 = __p1_652; \
-  uint64x2_t __ret_652; \
-  __ret_652 = vmull_u32(vget_high_u32(__s0_652), splat_lane_u32(__s1_652, __p2_652)); \
-  __ret_652; \
-})
-#else
-#define vmull_high_lane_u32(__p0_653, __p1_653, __p2_653) __extension__ ({ \
-  uint32x4_t __s0_653 = __p0_653; \
-  uint32x2_t __s1_653 = __p1_653; \
-  uint32x4_t __rev0_653;  __rev0_653 = __builtin_shufflevector(__s0_653, __s0_653, 3, 2, 1, 0); \
-  uint32x2_t __rev1_653;  __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 1, 0); \
-  uint64x2_t __ret_653; \
-  __ret_653 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_653), __noswap_splat_lane_u32(__rev1_653, __p2_653)); \
-  __ret_653 = __builtin_shufflevector(__ret_653, __ret_653, 1, 0); \
-  __ret_653; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u16(__p0_654, __p1_654, __p2_654) __extension__ ({ \
-  uint16x8_t __s0_654 = __p0_654; \
-  uint16x4_t __s1_654 = __p1_654; \
-  uint32x4_t __ret_654; \
-  __ret_654 = vmull_u16(vget_high_u16(__s0_654), splat_lane_u16(__s1_654, __p2_654)); \
-  __ret_654; \
-})
-#else
-#define vmull_high_lane_u16(__p0_655, __p1_655, __p2_655) __extension__ ({ \
-  uint16x8_t __s0_655 = __p0_655; \
-  uint16x4_t __s1_655 = __p1_655; \
-  uint16x8_t __rev0_655;  __rev0_655 = __builtin_shufflevector(__s0_655, __s0_655, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1_655;  __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \
-  uint32x4_t __ret_655; \
-  __ret_655 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_655), __noswap_splat_lane_u16(__rev1_655, __p2_655)); \
-  __ret_655 = __builtin_shufflevector(__ret_655, __ret_655, 3, 2, 1, 0); \
-  __ret_655; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \
-  int32x4_t __s0_656 = __p0_656; \
-  int32x2_t __s1_656 = __p1_656; \
-  int64x2_t __ret_656; \
-  __ret_656 = vmull_s32(vget_high_s32(__s0_656), splat_lane_s32(__s1_656, __p2_656)); \
-  __ret_656; \
-})
-#else
-#define vmull_high_lane_s32(__p0_657, __p1_657, __p2_657) __extension__ ({ \
-  int32x4_t __s0_657 = __p0_657; \
-  int32x2_t __s1_657 = __p1_657; \
-  int32x4_t __rev0_657;  __rev0_657 = __builtin_shufflevector(__s0_657, __s0_657, 3, 2, 1, 0); \
-  int32x2_t __rev1_657;  __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 1, 0); \
-  int64x2_t __ret_657; \
-  __ret_657 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_657), __noswap_splat_lane_s32(__rev1_657, __p2_657)); \
-  __ret_657 = __builtin_shufflevector(__ret_657, __ret_657, 1, 0); \
-  __ret_657; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \
-  int16x8_t __s0_658 = __p0_658; \
-  int16x4_t __s1_658 = __p1_658; \
-  int32x4_t __ret_658; \
-  __ret_658 = vmull_s16(vget_high_s16(__s0_658), splat_lane_s16(__s1_658, __p2_658)); \
-  __ret_658; \
-})
-#else
-#define vmull_high_lane_s16(__p0_659, __p1_659, __p2_659) __extension__ ({ \
-  int16x8_t __s0_659 = __p0_659; \
-  int16x4_t __s1_659 = __p1_659; \
-  int16x8_t __rev0_659;  __rev0_659 = __builtin_shufflevector(__s0_659, __s0_659, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_659;  __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 3, 2, 1, 0); \
-  int32x4_t __ret_659; \
-  __ret_659 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_659), __noswap_splat_lane_s16(__rev1_659, __p2_659)); \
-  __ret_659 = __builtin_shufflevector(__ret_659, __ret_659, 3, 2, 1, 0); \
-  __ret_659; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u32(__p0_660, __p1_660, __p2_660) __extension__ ({ \
-  uint32x4_t __s0_660 = __p0_660; \
-  uint32x4_t __s1_660 = __p1_660; \
-  uint64x2_t __ret_660; \
-  __ret_660 = vmull_u32(vget_high_u32(__s0_660), splat_laneq_u32(__s1_660, __p2_660)); \
-  __ret_660; \
-})
-#else
-#define vmull_high_laneq_u32(__p0_661, __p1_661, __p2_661) __extension__ ({ \
-  uint32x4_t __s0_661 = __p0_661; \
-  uint32x4_t __s1_661 = __p1_661; \
-  uint32x4_t __rev0_661;  __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 3, 2, 1, 0); \
-  uint32x4_t __rev1_661;  __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 3, 2, 1, 0); \
-  uint64x2_t __ret_661; \
-  __ret_661 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_661), __noswap_splat_laneq_u32(__rev1_661, __p2_661)); \
-  __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 1, 0); \
-  __ret_661; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u16(__p0_662, __p1_662, __p2_662) __extension__ ({ \
-  uint16x8_t __s0_662 = __p0_662; \
-  uint16x8_t __s1_662 = __p1_662; \
-  uint32x4_t __ret_662; \
-  __ret_662 = vmull_u16(vget_high_u16(__s0_662), splat_laneq_u16(__s1_662, __p2_662)); \
-  __ret_662; \
-})
-#else
-#define vmull_high_laneq_u16(__p0_663, __p1_663, __p2_663) __extension__ ({ \
-  uint16x8_t __s0_663 = __p0_663; \
-  uint16x8_t __s1_663 = __p1_663; \
-  uint16x8_t __rev0_663;  __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_663;  __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_663; \
-  __ret_663 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_663), __noswap_splat_laneq_u16(__rev1_663, __p2_663)); \
-  __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 3, 2, 1, 0); \
-  __ret_663; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \
-  int32x4_t __s0_664 = __p0_664; \
-  int32x4_t __s1_664 = __p1_664; \
-  int64x2_t __ret_664; \
-  __ret_664 = vmull_s32(vget_high_s32(__s0_664), splat_laneq_s32(__s1_664, __p2_664)); \
-  __ret_664; \
-})
-#else
-#define vmull_high_laneq_s32(__p0_665, __p1_665, __p2_665) __extension__ ({ \
-  int32x4_t __s0_665 = __p0_665; \
-  int32x4_t __s1_665 = __p1_665; \
-  int32x4_t __rev0_665;  __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 3, 2, 1, 0); \
-  int32x4_t __rev1_665;  __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 3, 2, 1, 0); \
-  int64x2_t __ret_665; \
-  __ret_665 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_665), __noswap_splat_laneq_s32(__rev1_665, __p2_665)); \
-  __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \
-  __ret_665; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \
-  int16x8_t __s0_666 = __p0_666; \
-  int16x8_t __s1_666 = __p1_666; \
-  int32x4_t __ret_666; \
-  __ret_666 = vmull_s16(vget_high_s16(__s0_666), splat_laneq_s16(__s1_666, __p2_666)); \
-  __ret_666; \
-})
-#else
-#define vmull_high_laneq_s16(__p0_667, __p1_667, __p2_667) __extension__ ({ \
-  int16x8_t __s0_667 = __p0_667; \
-  int16x8_t __s1_667 = __p1_667; \
-  int16x8_t __rev0_667;  __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_667;  __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_667; \
-  __ret_667 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_667), __noswap_splat_laneq_s16(__rev1_667, __p2_667)); \
-  __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \
-  __ret_667; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_n_u32(vget_high_u32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_n_u16(vget_high_u16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_n_s32(vget_high_s32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_n_s16(vget_high_s16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u32(__p0_668, __p1_668, __p2_668) __extension__ ({ \
-  uint32x2_t __s0_668 = __p0_668; \
-  uint32x4_t __s1_668 = __p1_668; \
-  uint64x2_t __ret_668; \
-  __ret_668 = vmull_u32(__s0_668, splat_laneq_u32(__s1_668, __p2_668)); \
-  __ret_668; \
-})
-#else
-#define vmull_laneq_u32(__p0_669, __p1_669, __p2_669) __extension__ ({ \
-  uint32x2_t __s0_669 = __p0_669; \
-  uint32x4_t __s1_669 = __p1_669; \
-  uint32x2_t __rev0_669;  __rev0_669 = __builtin_shufflevector(__s0_669, __s0_669, 1, 0); \
-  uint32x4_t __rev1_669;  __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 3, 2, 1, 0); \
-  uint64x2_t __ret_669; \
-  __ret_669 = __noswap_vmull_u32(__rev0_669, __noswap_splat_laneq_u32(__rev1_669, __p2_669)); \
-  __ret_669 = __builtin_shufflevector(__ret_669, __ret_669, 1, 0); \
-  __ret_669; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u16(__p0_670, __p1_670, __p2_670) __extension__ ({ \
-  uint16x4_t __s0_670 = __p0_670; \
-  uint16x8_t __s1_670 = __p1_670; \
-  uint32x4_t __ret_670; \
-  __ret_670 = vmull_u16(__s0_670, splat_laneq_u16(__s1_670, __p2_670)); \
-  __ret_670; \
-})
-#else
-#define vmull_laneq_u16(__p0_671, __p1_671, __p2_671) __extension__ ({ \
-  uint16x4_t __s0_671 = __p0_671; \
-  uint16x8_t __s1_671 = __p1_671; \
-  uint16x4_t __rev0_671;  __rev0_671 = __builtin_shufflevector(__s0_671, __s0_671, 3, 2, 1, 0); \
-  uint16x8_t __rev1_671;  __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_671; \
-  __ret_671 = __noswap_vmull_u16(__rev0_671, __noswap_splat_laneq_u16(__rev1_671, __p2_671)); \
-  __ret_671 = __builtin_shufflevector(__ret_671, __ret_671, 3, 2, 1, 0); \
-  __ret_671; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s32(__p0_672, __p1_672, __p2_672) __extension__ ({ \
-  int32x2_t __s0_672 = __p0_672; \
-  int32x4_t __s1_672 = __p1_672; \
-  int64x2_t __ret_672; \
-  __ret_672 = vmull_s32(__s0_672, splat_laneq_s32(__s1_672, __p2_672)); \
-  __ret_672; \
-})
-#else
-#define vmull_laneq_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \
-  int32x2_t __s0_673 = __p0_673; \
-  int32x4_t __s1_673 = __p1_673; \
-  int32x2_t __rev0_673;  __rev0_673 = __builtin_shufflevector(__s0_673, __s0_673, 1, 0); \
-  int32x4_t __rev1_673;  __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 3, 2, 1, 0); \
-  int64x2_t __ret_673; \
-  __ret_673 = __noswap_vmull_s32(__rev0_673, __noswap_splat_laneq_s32(__rev1_673, __p2_673)); \
-  __ret_673 = __builtin_shufflevector(__ret_673, __ret_673, 1, 0); \
-  __ret_673; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \
-  int16x4_t __s0_674 = __p0_674; \
-  int16x8_t __s1_674 = __p1_674; \
-  int32x4_t __ret_674; \
-  __ret_674 = vmull_s16(__s0_674, splat_laneq_s16(__s1_674, __p2_674)); \
-  __ret_674; \
-})
-#else
-#define vmull_laneq_s16(__p0_675, __p1_675, __p2_675) __extension__ ({ \
-  int16x4_t __s0_675 = __p0_675; \
-  int16x8_t __s1_675 = __p1_675; \
-  int16x4_t __rev0_675;  __rev0_675 = __builtin_shufflevector(__s0_675, __s0_675, 3, 2, 1, 0); \
-  int16x8_t __rev1_675;  __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_675; \
-  __ret_675 = __noswap_vmull_s16(__rev0_675, __noswap_splat_laneq_s16(__rev1_675, __p2_675)); \
-  __ret_675 = __builtin_shufflevector(__ret_675, __ret_675, 3, 2, 1, 0); \
-  __ret_675; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#endif
-
-__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
-  return __ret;
-}
-#define vmulxd_lane_f64(__p0_676, __p1_676, __p2_676) __extension__ ({ \
-  float64_t __s0_676 = __p0_676; \
-  float64x1_t __s1_676 = __p1_676; \
-  float64_t __ret_676; \
-  __ret_676 = vmulxd_f64(__s0_676, vget_lane_f64(__s1_676, __p2_676)); \
-  __ret_676; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_lane_f32(__p0_677, __p1_677, __p2_677) __extension__ ({ \
-  float32_t __s0_677 = __p0_677; \
-  float32x2_t __s1_677 = __p1_677; \
-  float32_t __ret_677; \
-  __ret_677 = vmulxs_f32(__s0_677, vget_lane_f32(__s1_677, __p2_677)); \
-  __ret_677; \
-})
-#else
-#define vmulxs_lane_f32(__p0_678, __p1_678, __p2_678) __extension__ ({ \
-  float32_t __s0_678 = __p0_678; \
-  float32x2_t __s1_678 = __p1_678; \
-  float32x2_t __rev1_678;  __rev1_678 = __builtin_shufflevector(__s1_678, __s1_678, 1, 0); \
-  float32_t __ret_678; \
-  __ret_678 = vmulxs_f32(__s0_678, __noswap_vget_lane_f32(__rev1_678, __p2_678)); \
-  __ret_678; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f64(__p0_679, __p1_679, __p2_679) __extension__ ({ \
-  float64x2_t __s0_679 = __p0_679; \
-  float64x1_t __s1_679 = __p1_679; \
-  float64x2_t __ret_679; \
-  __ret_679 = vmulxq_f64(__s0_679, splatq_lane_f64(__s1_679, __p2_679)); \
-  __ret_679; \
-})
-#else
-#define vmulxq_lane_f64(__p0_680, __p1_680, __p2_680) __extension__ ({ \
-  float64x2_t __s0_680 = __p0_680; \
-  float64x1_t __s1_680 = __p1_680; \
-  float64x2_t __rev0_680;  __rev0_680 = __builtin_shufflevector(__s0_680, __s0_680, 1, 0); \
-  float64x2_t __ret_680; \
-  __ret_680 = __noswap_vmulxq_f64(__rev0_680, __noswap_splatq_lane_f64(__s1_680, __p2_680)); \
-  __ret_680 = __builtin_shufflevector(__ret_680, __ret_680, 1, 0); \
-  __ret_680; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f32(__p0_681, __p1_681, __p2_681) __extension__ ({ \
-  float32x4_t __s0_681 = __p0_681; \
-  float32x2_t __s1_681 = __p1_681; \
-  float32x4_t __ret_681; \
-  __ret_681 = vmulxq_f32(__s0_681, splatq_lane_f32(__s1_681, __p2_681)); \
-  __ret_681; \
-})
-#else
-#define vmulxq_lane_f32(__p0_682, __p1_682, __p2_682) __extension__ ({ \
-  float32x4_t __s0_682 = __p0_682; \
-  float32x2_t __s1_682 = __p1_682; \
-  float32x4_t __rev0_682;  __rev0_682 = __builtin_shufflevector(__s0_682, __s0_682, 3, 2, 1, 0); \
-  float32x2_t __rev1_682;  __rev1_682 = __builtin_shufflevector(__s1_682, __s1_682, 1, 0); \
-  float32x4_t __ret_682; \
-  __ret_682 = __noswap_vmulxq_f32(__rev0_682, __noswap_splatq_lane_f32(__rev1_682, __p2_682)); \
-  __ret_682 = __builtin_shufflevector(__ret_682, __ret_682, 3, 2, 1, 0); \
-  __ret_682; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f32(__p0_683, __p1_683, __p2_683) __extension__ ({ \
-  float32x2_t __s0_683 = __p0_683; \
-  float32x2_t __s1_683 = __p1_683; \
-  float32x2_t __ret_683; \
-  __ret_683 = vmulx_f32(__s0_683, splat_lane_f32(__s1_683, __p2_683)); \
-  __ret_683; \
-})
-#else
-#define vmulx_lane_f32(__p0_684, __p1_684, __p2_684) __extension__ ({ \
-  float32x2_t __s0_684 = __p0_684; \
-  float32x2_t __s1_684 = __p1_684; \
-  float32x2_t __rev0_684;  __rev0_684 = __builtin_shufflevector(__s0_684, __s0_684, 1, 0); \
-  float32x2_t __rev1_684;  __rev1_684 = __builtin_shufflevector(__s1_684, __s1_684, 1, 0); \
-  float32x2_t __ret_684; \
-  __ret_684 = __noswap_vmulx_f32(__rev0_684, __noswap_splat_lane_f32(__rev1_684, __p2_684)); \
-  __ret_684 = __builtin_shufflevector(__ret_684, __ret_684, 1, 0); \
-  __ret_684; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxd_laneq_f64(__p0_685, __p1_685, __p2_685) __extension__ ({ \
-  float64_t __s0_685 = __p0_685; \
-  float64x2_t __s1_685 = __p1_685; \
-  float64_t __ret_685; \
-  __ret_685 = vmulxd_f64(__s0_685, vgetq_lane_f64(__s1_685, __p2_685)); \
-  __ret_685; \
-})
-#else
-#define vmulxd_laneq_f64(__p0_686, __p1_686, __p2_686) __extension__ ({ \
-  float64_t __s0_686 = __p0_686; \
-  float64x2_t __s1_686 = __p1_686; \
-  float64x2_t __rev1_686;  __rev1_686 = __builtin_shufflevector(__s1_686, __s1_686, 1, 0); \
-  float64_t __ret_686; \
-  __ret_686 = vmulxd_f64(__s0_686, __noswap_vgetq_lane_f64(__rev1_686, __p2_686)); \
-  __ret_686; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_laneq_f32(__p0_687, __p1_687, __p2_687) __extension__ ({ \
-  float32_t __s0_687 = __p0_687; \
-  float32x4_t __s1_687 = __p1_687; \
-  float32_t __ret_687; \
-  __ret_687 = vmulxs_f32(__s0_687, vgetq_lane_f32(__s1_687, __p2_687)); \
-  __ret_687; \
-})
-#else
-#define vmulxs_laneq_f32(__p0_688, __p1_688, __p2_688) __extension__ ({ \
-  float32_t __s0_688 = __p0_688; \
-  float32x4_t __s1_688 = __p1_688; \
-  float32x4_t __rev1_688;  __rev1_688 = __builtin_shufflevector(__s1_688, __s1_688, 3, 2, 1, 0); \
-  float32_t __ret_688; \
-  __ret_688 = vmulxs_f32(__s0_688, __noswap_vgetq_lane_f32(__rev1_688, __p2_688)); \
-  __ret_688; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f64(__p0_689, __p1_689, __p2_689) __extension__ ({ \
-  float64x2_t __s0_689 = __p0_689; \
-  float64x2_t __s1_689 = __p1_689; \
-  float64x2_t __ret_689; \
-  __ret_689 = vmulxq_f64(__s0_689, splatq_laneq_f64(__s1_689, __p2_689)); \
-  __ret_689; \
-})
-#else
-#define vmulxq_laneq_f64(__p0_690, __p1_690, __p2_690) __extension__ ({ \
-  float64x2_t __s0_690 = __p0_690; \
-  float64x2_t __s1_690 = __p1_690; \
-  float64x2_t __rev0_690;  __rev0_690 = __builtin_shufflevector(__s0_690, __s0_690, 1, 0); \
-  float64x2_t __rev1_690;  __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 1, 0); \
-  float64x2_t __ret_690; \
-  __ret_690 = __noswap_vmulxq_f64(__rev0_690, __noswap_splatq_laneq_f64(__rev1_690, __p2_690)); \
-  __ret_690 = __builtin_shufflevector(__ret_690, __ret_690, 1, 0); \
-  __ret_690; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f32(__p0_691, __p1_691, __p2_691) __extension__ ({ \
-  float32x4_t __s0_691 = __p0_691; \
-  float32x4_t __s1_691 = __p1_691; \
-  float32x4_t __ret_691; \
-  __ret_691 = vmulxq_f32(__s0_691, splatq_laneq_f32(__s1_691, __p2_691)); \
-  __ret_691; \
-})
-#else
-#define vmulxq_laneq_f32(__p0_692, __p1_692, __p2_692) __extension__ ({ \
-  float32x4_t __s0_692 = __p0_692; \
-  float32x4_t __s1_692 = __p1_692; \
-  float32x4_t __rev0_692;  __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 3, 2, 1, 0); \
-  float32x4_t __rev1_692;  __rev1_692 = __builtin_shufflevector(__s1_692, __s1_692, 3, 2, 1, 0); \
-  float32x4_t __ret_692; \
-  __ret_692 = __noswap_vmulxq_f32(__rev0_692, __noswap_splatq_laneq_f32(__rev1_692, __p2_692)); \
-  __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 3, 2, 1, 0); \
-  __ret_692; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f32(__p0_693, __p1_693, __p2_693) __extension__ ({ \
-  float32x2_t __s0_693 = __p0_693; \
-  float32x4_t __s1_693 = __p1_693; \
-  float32x2_t __ret_693; \
-  __ret_693 = vmulx_f32(__s0_693, splat_laneq_f32(__s1_693, __p2_693)); \
-  __ret_693; \
-})
-#else
-#define vmulx_laneq_f32(__p0_694, __p1_694, __p2_694) __extension__ ({ \
-  float32x2_t __s0_694 = __p0_694; \
-  float32x4_t __s1_694 = __p1_694; \
-  float32x2_t __rev0_694;  __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 1, 0); \
-  float32x4_t __rev1_694;  __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 3, 2, 1, 0); \
-  float32x2_t __ret_694; \
-  __ret_694 = __noswap_vmulx_f32(__rev0_694, __noswap_splat_laneq_f32(__rev1_694, __p2_694)); \
-  __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 1, 0); \
-  __ret_694; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vnegq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float64x2_t vnegq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vnegq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int64x2_t vnegq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vneg_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-__ai int64x1_t vneg_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-__ai int64_t vnegd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpaddd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpaddd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vpaddd_s64(int64x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vpaddd_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpadds_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpadds_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmaxs_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmaxs_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpminqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpminqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmins_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmins_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpminnms_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpminnms_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqabs_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int8_t vqabsb_s8(int8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
-  return __ret;
-}
-__ai int32_t vqabss_s32(int32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
-  return __ret;
-}
-__ai int64_t vqabsd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
-  return __ret;
-}
-__ai int16_t vqabsh_s16(int16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
-  return __ret;
-}
-__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
-  return __ret;
-}
-__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s32(__p0_695, __p1_695, __p2_695, __p3_695) __extension__ ({ \
-  int64x2_t __s0_695 = __p0_695; \
-  int32x4_t __s1_695 = __p1_695; \
-  int32x2_t __s2_695 = __p2_695; \
-  int64x2_t __ret_695; \
-  __ret_695 = vqdmlal_s32(__s0_695, vget_high_s32(__s1_695), splat_lane_s32(__s2_695, __p3_695)); \
-  __ret_695; \
-})
-#else
-#define vqdmlal_high_lane_s32(__p0_696, __p1_696, __p2_696, __p3_696) __extension__ ({ \
-  int64x2_t __s0_696 = __p0_696; \
-  int32x4_t __s1_696 = __p1_696; \
-  int32x2_t __s2_696 = __p2_696; \
-  int64x2_t __rev0_696;  __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \
-  int32x4_t __rev1_696;  __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 3, 2, 1, 0); \
-  int32x2_t __rev2_696;  __rev2_696 = __builtin_shufflevector(__s2_696, __s2_696, 1, 0); \
-  int64x2_t __ret_696; \
-  __ret_696 = __noswap_vqdmlal_s32(__rev0_696, __noswap_vget_high_s32(__rev1_696), __noswap_splat_lane_s32(__rev2_696, __p3_696)); \
-  __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 1, 0); \
-  __ret_696; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s16(__p0_697, __p1_697, __p2_697, __p3_697) __extension__ ({ \
-  int32x4_t __s0_697 = __p0_697; \
-  int16x8_t __s1_697 = __p1_697; \
-  int16x4_t __s2_697 = __p2_697; \
-  int32x4_t __ret_697; \
-  __ret_697 = vqdmlal_s16(__s0_697, vget_high_s16(__s1_697), splat_lane_s16(__s2_697, __p3_697)); \
-  __ret_697; \
-})
-#else
-#define vqdmlal_high_lane_s16(__p0_698, __p1_698, __p2_698, __p3_698) __extension__ ({ \
-  int32x4_t __s0_698 = __p0_698; \
-  int16x8_t __s1_698 = __p1_698; \
-  int16x4_t __s2_698 = __p2_698; \
-  int32x4_t __rev0_698;  __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 3, 2, 1, 0); \
-  int16x8_t __rev1_698;  __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_698;  __rev2_698 = __builtin_shufflevector(__s2_698, __s2_698, 3, 2, 1, 0); \
-  int32x4_t __ret_698; \
-  __ret_698 = __noswap_vqdmlal_s16(__rev0_698, __noswap_vget_high_s16(__rev1_698), __noswap_splat_lane_s16(__rev2_698, __p3_698)); \
-  __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 3, 2, 1, 0); \
-  __ret_698; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s32(__p0_699, __p1_699, __p2_699, __p3_699) __extension__ ({ \
-  int64x2_t __s0_699 = __p0_699; \
-  int32x4_t __s1_699 = __p1_699; \
-  int32x4_t __s2_699 = __p2_699; \
-  int64x2_t __ret_699; \
-  __ret_699 = vqdmlal_s32(__s0_699, vget_high_s32(__s1_699), splat_laneq_s32(__s2_699, __p3_699)); \
-  __ret_699; \
-})
-#else
-#define vqdmlal_high_laneq_s32(__p0_700, __p1_700, __p2_700, __p3_700) __extension__ ({ \
-  int64x2_t __s0_700 = __p0_700; \
-  int32x4_t __s1_700 = __p1_700; \
-  int32x4_t __s2_700 = __p2_700; \
-  int64x2_t __rev0_700;  __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 1, 0); \
-  int32x4_t __rev1_700;  __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 3, 2, 1, 0); \
-  int32x4_t __rev2_700;  __rev2_700 = __builtin_shufflevector(__s2_700, __s2_700, 3, 2, 1, 0); \
-  int64x2_t __ret_700; \
-  __ret_700 = __noswap_vqdmlal_s32(__rev0_700, __noswap_vget_high_s32(__rev1_700), __noswap_splat_laneq_s32(__rev2_700, __p3_700)); \
-  __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 1, 0); \
-  __ret_700; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s16(__p0_701, __p1_701, __p2_701, __p3_701) __extension__ ({ \
-  int32x4_t __s0_701 = __p0_701; \
-  int16x8_t __s1_701 = __p1_701; \
-  int16x8_t __s2_701 = __p2_701; \
-  int32x4_t __ret_701; \
-  __ret_701 = vqdmlal_s16(__s0_701, vget_high_s16(__s1_701), splat_laneq_s16(__s2_701, __p3_701)); \
-  __ret_701; \
-})
-#else
-#define vqdmlal_high_laneq_s16(__p0_702, __p1_702, __p2_702, __p3_702) __extension__ ({ \
-  int32x4_t __s0_702 = __p0_702; \
-  int16x8_t __s1_702 = __p1_702; \
-  int16x8_t __s2_702 = __p2_702; \
-  int32x4_t __rev0_702;  __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 3, 2, 1, 0); \
-  int16x8_t __rev1_702;  __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_702;  __rev2_702 = __builtin_shufflevector(__s2_702, __s2_702, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_702; \
-  __ret_702 = __noswap_vqdmlal_s16(__rev0_702, __noswap_vget_high_s16(__rev1_702), __noswap_splat_laneq_s16(__rev2_702, __p3_702)); \
-  __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 3, 2, 1, 0); \
-  __ret_702; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s32(__p0_703, __p1_703, __p2_703, __p3_703) __extension__ ({ \
-  int64x2_t __s0_703 = __p0_703; \
-  int32x2_t __s1_703 = __p1_703; \
-  int32x4_t __s2_703 = __p2_703; \
-  int64x2_t __ret_703; \
-  __ret_703 = vqdmlal_s32(__s0_703, __s1_703, splat_laneq_s32(__s2_703, __p3_703)); \
-  __ret_703; \
-})
-#else
-#define vqdmlal_laneq_s32(__p0_704, __p1_704, __p2_704, __p3_704) __extension__ ({ \
-  int64x2_t __s0_704 = __p0_704; \
-  int32x2_t __s1_704 = __p1_704; \
-  int32x4_t __s2_704 = __p2_704; \
-  int64x2_t __rev0_704;  __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 1, 0); \
-  int32x2_t __rev1_704;  __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 1, 0); \
-  int32x4_t __rev2_704;  __rev2_704 = __builtin_shufflevector(__s2_704, __s2_704, 3, 2, 1, 0); \
-  int64x2_t __ret_704; \
-  __ret_704 = __noswap_vqdmlal_s32(__rev0_704, __rev1_704, __noswap_splat_laneq_s32(__rev2_704, __p3_704)); \
-  __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 1, 0); \
-  __ret_704; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s16(__p0_705, __p1_705, __p2_705, __p3_705) __extension__ ({ \
-  int32x4_t __s0_705 = __p0_705; \
-  int16x4_t __s1_705 = __p1_705; \
-  int16x8_t __s2_705 = __p2_705; \
-  int32x4_t __ret_705; \
-  __ret_705 = vqdmlal_s16(__s0_705, __s1_705, splat_laneq_s16(__s2_705, __p3_705)); \
-  __ret_705; \
-})
-#else
-#define vqdmlal_laneq_s16(__p0_706, __p1_706, __p2_706, __p3_706) __extension__ ({ \
-  int32x4_t __s0_706 = __p0_706; \
-  int16x4_t __s1_706 = __p1_706; \
-  int16x8_t __s2_706 = __p2_706; \
-  int32x4_t __rev0_706;  __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 3, 2, 1, 0); \
-  int16x4_t __rev1_706;  __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 3, 2, 1, 0); \
-  int16x8_t __rev2_706;  __rev2_706 = __builtin_shufflevector(__s2_706, __s2_706, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_706; \
-  __ret_706 = __noswap_vqdmlal_s16(__rev0_706, __rev1_706, __noswap_splat_laneq_s16(__rev2_706, __p3_706)); \
-  __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 3, 2, 1, 0); \
-  __ret_706; \
-})
-#endif
-
-__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
-  return __ret;
-}
-__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s32(__p0_707, __p1_707, __p2_707, __p3_707) __extension__ ({ \
-  int64x2_t __s0_707 = __p0_707; \
-  int32x4_t __s1_707 = __p1_707; \
-  int32x2_t __s2_707 = __p2_707; \
-  int64x2_t __ret_707; \
-  __ret_707 = vqdmlsl_s32(__s0_707, vget_high_s32(__s1_707), splat_lane_s32(__s2_707, __p3_707)); \
-  __ret_707; \
-})
-#else
-#define vqdmlsl_high_lane_s32(__p0_708, __p1_708, __p2_708, __p3_708) __extension__ ({ \
-  int64x2_t __s0_708 = __p0_708; \
-  int32x4_t __s1_708 = __p1_708; \
-  int32x2_t __s2_708 = __p2_708; \
-  int64x2_t __rev0_708;  __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 1, 0); \
-  int32x4_t __rev1_708;  __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 3, 2, 1, 0); \
-  int32x2_t __rev2_708;  __rev2_708 = __builtin_shufflevector(__s2_708, __s2_708, 1, 0); \
-  int64x2_t __ret_708; \
-  __ret_708 = __noswap_vqdmlsl_s32(__rev0_708, __noswap_vget_high_s32(__rev1_708), __noswap_splat_lane_s32(__rev2_708, __p3_708)); \
-  __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 1, 0); \
-  __ret_708; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s16(__p0_709, __p1_709, __p2_709, __p3_709) __extension__ ({ \
-  int32x4_t __s0_709 = __p0_709; \
-  int16x8_t __s1_709 = __p1_709; \
-  int16x4_t __s2_709 = __p2_709; \
-  int32x4_t __ret_709; \
-  __ret_709 = vqdmlsl_s16(__s0_709, vget_high_s16(__s1_709), splat_lane_s16(__s2_709, __p3_709)); \
-  __ret_709; \
-})
-#else
-#define vqdmlsl_high_lane_s16(__p0_710, __p1_710, __p2_710, __p3_710) __extension__ ({ \
-  int32x4_t __s0_710 = __p0_710; \
-  int16x8_t __s1_710 = __p1_710; \
-  int16x4_t __s2_710 = __p2_710; \
-  int32x4_t __rev0_710;  __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 3, 2, 1, 0); \
-  int16x8_t __rev1_710;  __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_710;  __rev2_710 = __builtin_shufflevector(__s2_710, __s2_710, 3, 2, 1, 0); \
-  int32x4_t __ret_710; \
-  __ret_710 = __noswap_vqdmlsl_s16(__rev0_710, __noswap_vget_high_s16(__rev1_710), __noswap_splat_lane_s16(__rev2_710, __p3_710)); \
-  __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 3, 2, 1, 0); \
-  __ret_710; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s32(__p0_711, __p1_711, __p2_711, __p3_711) __extension__ ({ \
-  int64x2_t __s0_711 = __p0_711; \
-  int32x4_t __s1_711 = __p1_711; \
-  int32x4_t __s2_711 = __p2_711; \
-  int64x2_t __ret_711; \
-  __ret_711 = vqdmlsl_s32(__s0_711, vget_high_s32(__s1_711), splat_laneq_s32(__s2_711, __p3_711)); \
-  __ret_711; \
-})
-#else
-#define vqdmlsl_high_laneq_s32(__p0_712, __p1_712, __p2_712, __p3_712) __extension__ ({ \
-  int64x2_t __s0_712 = __p0_712; \
-  int32x4_t __s1_712 = __p1_712; \
-  int32x4_t __s2_712 = __p2_712; \
-  int64x2_t __rev0_712;  __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \
-  int32x4_t __rev1_712;  __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 3, 2, 1, 0); \
-  int32x4_t __rev2_712;  __rev2_712 = __builtin_shufflevector(__s2_712, __s2_712, 3, 2, 1, 0); \
-  int64x2_t __ret_712; \
-  __ret_712 = __noswap_vqdmlsl_s32(__rev0_712, __noswap_vget_high_s32(__rev1_712), __noswap_splat_laneq_s32(__rev2_712, __p3_712)); \
-  __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 1, 0); \
-  __ret_712; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s16(__p0_713, __p1_713, __p2_713, __p3_713) __extension__ ({ \
-  int32x4_t __s0_713 = __p0_713; \
-  int16x8_t __s1_713 = __p1_713; \
-  int16x8_t __s2_713 = __p2_713; \
-  int32x4_t __ret_713; \
-  __ret_713 = vqdmlsl_s16(__s0_713, vget_high_s16(__s1_713), splat_laneq_s16(__s2_713, __p3_713)); \
-  __ret_713; \
-})
-#else
-#define vqdmlsl_high_laneq_s16(__p0_714, __p1_714, __p2_714, __p3_714) __extension__ ({ \
-  int32x4_t __s0_714 = __p0_714; \
-  int16x8_t __s1_714 = __p1_714; \
-  int16x8_t __s2_714 = __p2_714; \
-  int32x4_t __rev0_714;  __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \
-  int16x8_t __rev1_714;  __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_714;  __rev2_714 = __builtin_shufflevector(__s2_714, __s2_714, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_714; \
-  __ret_714 = __noswap_vqdmlsl_s16(__rev0_714, __noswap_vget_high_s16(__rev1_714), __noswap_splat_laneq_s16(__rev2_714, __p3_714)); \
-  __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 3, 2, 1, 0); \
-  __ret_714; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s32(__p0_715, __p1_715, __p2_715, __p3_715) __extension__ ({ \
-  int64x2_t __s0_715 = __p0_715; \
-  int32x2_t __s1_715 = __p1_715; \
-  int32x4_t __s2_715 = __p2_715; \
-  int64x2_t __ret_715; \
-  __ret_715 = vqdmlsl_s32(__s0_715, __s1_715, splat_laneq_s32(__s2_715, __p3_715)); \
-  __ret_715; \
-})
-#else
-#define vqdmlsl_laneq_s32(__p0_716, __p1_716, __p2_716, __p3_716) __extension__ ({ \
-  int64x2_t __s0_716 = __p0_716; \
-  int32x2_t __s1_716 = __p1_716; \
-  int32x4_t __s2_716 = __p2_716; \
-  int64x2_t __rev0_716;  __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 1, 0); \
-  int32x2_t __rev1_716;  __rev1_716 = __builtin_shufflevector(__s1_716, __s1_716, 1, 0); \
-  int32x4_t __rev2_716;  __rev2_716 = __builtin_shufflevector(__s2_716, __s2_716, 3, 2, 1, 0); \
-  int64x2_t __ret_716; \
-  __ret_716 = __noswap_vqdmlsl_s32(__rev0_716, __rev1_716, __noswap_splat_laneq_s32(__rev2_716, __p3_716)); \
-  __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 1, 0); \
-  __ret_716; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s16(__p0_717, __p1_717, __p2_717, __p3_717) __extension__ ({ \
-  int32x4_t __s0_717 = __p0_717; \
-  int16x4_t __s1_717 = __p1_717; \
-  int16x8_t __s2_717 = __p2_717; \
-  int32x4_t __ret_717; \
-  __ret_717 = vqdmlsl_s16(__s0_717, __s1_717, splat_laneq_s16(__s2_717, __p3_717)); \
-  __ret_717; \
-})
-#else
-#define vqdmlsl_laneq_s16(__p0_718, __p1_718, __p2_718, __p3_718) __extension__ ({ \
-  int32x4_t __s0_718 = __p0_718; \
-  int16x4_t __s1_718 = __p1_718; \
-  int16x8_t __s2_718 = __p2_718; \
-  int32x4_t __rev0_718;  __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \
-  int16x4_t __rev1_718;  __rev1_718 = __builtin_shufflevector(__s1_718, __s1_718, 3, 2, 1, 0); \
-  int16x8_t __rev2_718;  __rev2_718 = __builtin_shufflevector(__s2_718, __s2_718, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_718; \
-  __ret_718 = __noswap_vqdmlsl_s16(__rev0_718, __rev1_718, __noswap_splat_laneq_s16(__rev2_718, __p3_718)); \
-  __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 3, 2, 1, 0); \
-  __ret_718; \
-})
-#endif
-
-__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_lane_s32(__p0_719, __p1_719, __p2_719) __extension__ ({ \
-  int32_t __s0_719 = __p0_719; \
-  int32x2_t __s1_719 = __p1_719; \
-  int32_t __ret_719; \
-  __ret_719 = vqdmulhs_s32(__s0_719, vget_lane_s32(__s1_719, __p2_719)); \
-  __ret_719; \
-})
-#else
-#define vqdmulhs_lane_s32(__p0_720, __p1_720, __p2_720) __extension__ ({ \
-  int32_t __s0_720 = __p0_720; \
-  int32x2_t __s1_720 = __p1_720; \
-  int32x2_t __rev1_720;  __rev1_720 = __builtin_shufflevector(__s1_720, __s1_720, 1, 0); \
-  int32_t __ret_720; \
-  __ret_720 = vqdmulhs_s32(__s0_720, __noswap_vget_lane_s32(__rev1_720, __p2_720)); \
-  __ret_720; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_lane_s16(__p0_721, __p1_721, __p2_721) __extension__ ({ \
-  int16_t __s0_721 = __p0_721; \
-  int16x4_t __s1_721 = __p1_721; \
-  int16_t __ret_721; \
-  __ret_721 = vqdmulhh_s16(__s0_721, vget_lane_s16(__s1_721, __p2_721)); \
-  __ret_721; \
-})
-#else
-#define vqdmulhh_lane_s16(__p0_722, __p1_722, __p2_722) __extension__ ({ \
-  int16_t __s0_722 = __p0_722; \
-  int16x4_t __s1_722 = __p1_722; \
-  int16x4_t __rev1_722;  __rev1_722 = __builtin_shufflevector(__s1_722, __s1_722, 3, 2, 1, 0); \
-  int16_t __ret_722; \
-  __ret_722 = vqdmulhh_s16(__s0_722, __noswap_vget_lane_s16(__rev1_722, __p2_722)); \
-  __ret_722; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_laneq_s32(__p0_723, __p1_723, __p2_723) __extension__ ({ \
-  int32_t __s0_723 = __p0_723; \
-  int32x4_t __s1_723 = __p1_723; \
-  int32_t __ret_723; \
-  __ret_723 = vqdmulhs_s32(__s0_723, vgetq_lane_s32(__s1_723, __p2_723)); \
-  __ret_723; \
-})
-#else
-#define vqdmulhs_laneq_s32(__p0_724, __p1_724, __p2_724) __extension__ ({ \
-  int32_t __s0_724 = __p0_724; \
-  int32x4_t __s1_724 = __p1_724; \
-  int32x4_t __rev1_724;  __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \
-  int32_t __ret_724; \
-  __ret_724 = vqdmulhs_s32(__s0_724, __noswap_vgetq_lane_s32(__rev1_724, __p2_724)); \
-  __ret_724; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_laneq_s16(__p0_725, __p1_725, __p2_725) __extension__ ({ \
-  int16_t __s0_725 = __p0_725; \
-  int16x8_t __s1_725 = __p1_725; \
-  int16_t __ret_725; \
-  __ret_725 = vqdmulhh_s16(__s0_725, vgetq_lane_s16(__s1_725, __p2_725)); \
-  __ret_725; \
-})
-#else
-#define vqdmulhh_laneq_s16(__p0_726, __p1_726, __p2_726) __extension__ ({ \
-  int16_t __s0_726 = __p0_726; \
-  int16x8_t __s1_726 = __p1_726; \
-  int16x8_t __rev1_726;  __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_726; \
-  __ret_726 = vqdmulhh_s16(__s0_726, __noswap_vgetq_lane_s16(__rev1_726, __p2_726)); \
-  __ret_726; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s32(__p0_727, __p1_727, __p2_727) __extension__ ({ \
-  int32x4_t __s0_727 = __p0_727; \
-  int32x2_t __s1_727 = __p1_727; \
-  int64x2_t __ret_727; \
-  __ret_727 = vqdmull_s32(vget_high_s32(__s0_727), splat_lane_s32(__s1_727, __p2_727)); \
-  __ret_727; \
-})
-#else
-#define vqdmull_high_lane_s32(__p0_728, __p1_728, __p2_728) __extension__ ({ \
-  int32x4_t __s0_728 = __p0_728; \
-  int32x2_t __s1_728 = __p1_728; \
-  int32x4_t __rev0_728;  __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 3, 2, 1, 0); \
-  int32x2_t __rev1_728;  __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 1, 0); \
-  int64x2_t __ret_728; \
-  __ret_728 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_728), __noswap_splat_lane_s32(__rev1_728, __p2_728)); \
-  __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 1, 0); \
-  __ret_728; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s16(__p0_729, __p1_729, __p2_729) __extension__ ({ \
-  int16x8_t __s0_729 = __p0_729; \
-  int16x4_t __s1_729 = __p1_729; \
-  int32x4_t __ret_729; \
-  __ret_729 = vqdmull_s16(vget_high_s16(__s0_729), splat_lane_s16(__s1_729, __p2_729)); \
-  __ret_729; \
-})
-#else
-#define vqdmull_high_lane_s16(__p0_730, __p1_730, __p2_730) __extension__ ({ \
-  int16x8_t __s0_730 = __p0_730; \
-  int16x4_t __s1_730 = __p1_730; \
-  int16x8_t __rev0_730;  __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_730;  __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 3, 2, 1, 0); \
-  int32x4_t __ret_730; \
-  __ret_730 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_730), __noswap_splat_lane_s16(__rev1_730, __p2_730)); \
-  __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \
-  __ret_730; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s32(__p0_731, __p1_731, __p2_731) __extension__ ({ \
-  int32x4_t __s0_731 = __p0_731; \
-  int32x4_t __s1_731 = __p1_731; \
-  int64x2_t __ret_731; \
-  __ret_731 = vqdmull_s32(vget_high_s32(__s0_731), splat_laneq_s32(__s1_731, __p2_731)); \
-  __ret_731; \
-})
-#else
-#define vqdmull_high_laneq_s32(__p0_732, __p1_732, __p2_732) __extension__ ({ \
-  int32x4_t __s0_732 = __p0_732; \
-  int32x4_t __s1_732 = __p1_732; \
-  int32x4_t __rev0_732;  __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 3, 2, 1, 0); \
-  int32x4_t __rev1_732;  __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 3, 2, 1, 0); \
-  int64x2_t __ret_732; \
-  __ret_732 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_732), __noswap_splat_laneq_s32(__rev1_732, __p2_732)); \
-  __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 1, 0); \
-  __ret_732; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s16(__p0_733, __p1_733, __p2_733) __extension__ ({ \
-  int16x8_t __s0_733 = __p0_733; \
-  int16x8_t __s1_733 = __p1_733; \
-  int32x4_t __ret_733; \
-  __ret_733 = vqdmull_s16(vget_high_s16(__s0_733), splat_laneq_s16(__s1_733, __p2_733)); \
-  __ret_733; \
-})
-#else
-#define vqdmull_high_laneq_s16(__p0_734, __p1_734, __p2_734) __extension__ ({ \
-  int16x8_t __s0_734 = __p0_734; \
-  int16x8_t __s1_734 = __p1_734; \
-  int16x8_t __rev0_734;  __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_734;  __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_734; \
-  __ret_734 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_734), __noswap_splat_laneq_s16(__rev1_734, __p2_734)); \
-  __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 3, 2, 1, 0); \
-  __ret_734; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulls_lane_s32(__p0_735, __p1_735, __p2_735) __extension__ ({ \
-  int32_t __s0_735 = __p0_735; \
-  int32x2_t __s1_735 = __p1_735; \
-  int64_t __ret_735; \
-  __ret_735 = vqdmulls_s32(__s0_735, vget_lane_s32(__s1_735, __p2_735)); \
-  __ret_735; \
-})
-#else
-#define vqdmulls_lane_s32(__p0_736, __p1_736, __p2_736) __extension__ ({ \
-  int32_t __s0_736 = __p0_736; \
-  int32x2_t __s1_736 = __p1_736; \
-  int32x2_t __rev1_736;  __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 1, 0); \
-  int64_t __ret_736; \
-  __ret_736 = vqdmulls_s32(__s0_736, __noswap_vget_lane_s32(__rev1_736, __p2_736)); \
-  __ret_736; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmullh_lane_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \
-  int16_t __s0_737 = __p0_737; \
-  int16x4_t __s1_737 = __p1_737; \
-  int32_t __ret_737; \
-  __ret_737 = vqdmullh_s16(__s0_737, vget_lane_s16(__s1_737, __p2_737)); \
-  __ret_737; \
-})
-#else
-#define vqdmullh_lane_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \
-  int16_t __s0_738 = __p0_738; \
-  int16x4_t __s1_738 = __p1_738; \
-  int16x4_t __rev1_738;  __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 3, 2, 1, 0); \
-  int32_t __ret_738; \
-  __ret_738 = vqdmullh_s16(__s0_738, __noswap_vget_lane_s16(__rev1_738, __p2_738)); \
-  __ret_738; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulls_laneq_s32(__p0_739, __p1_739, __p2_739) __extension__ ({ \
-  int32_t __s0_739 = __p0_739; \
-  int32x4_t __s1_739 = __p1_739; \
-  int64_t __ret_739; \
-  __ret_739 = vqdmulls_s32(__s0_739, vgetq_lane_s32(__s1_739, __p2_739)); \
-  __ret_739; \
-})
-#else
-#define vqdmulls_laneq_s32(__p0_740, __p1_740, __p2_740) __extension__ ({ \
-  int32_t __s0_740 = __p0_740; \
-  int32x4_t __s1_740 = __p1_740; \
-  int32x4_t __rev1_740;  __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 3, 2, 1, 0); \
-  int64_t __ret_740; \
-  __ret_740 = vqdmulls_s32(__s0_740, __noswap_vgetq_lane_s32(__rev1_740, __p2_740)); \
-  __ret_740; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmullh_laneq_s16(__p0_741, __p1_741, __p2_741) __extension__ ({ \
-  int16_t __s0_741 = __p0_741; \
-  int16x8_t __s1_741 = __p1_741; \
-  int32_t __ret_741; \
-  __ret_741 = vqdmullh_s16(__s0_741, vgetq_lane_s16(__s1_741, __p2_741)); \
-  __ret_741; \
-})
-#else
-#define vqdmullh_laneq_s16(__p0_742, __p1_742, __p2_742) __extension__ ({ \
-  int16_t __s0_742 = __p0_742; \
-  int16x8_t __s1_742 = __p1_742; \
-  int16x8_t __rev1_742;  __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret_742; \
-  __ret_742 = vqdmullh_s16(__s0_742, __noswap_vgetq_lane_s16(__rev1_742, __p2_742)); \
-  __ret_742; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s32(__p0_743, __p1_743, __p2_743) __extension__ ({ \
-  int32x2_t __s0_743 = __p0_743; \
-  int32x4_t __s1_743 = __p1_743; \
-  int64x2_t __ret_743; \
-  __ret_743 = vqdmull_s32(__s0_743, splat_laneq_s32(__s1_743, __p2_743)); \
-  __ret_743; \
-})
-#else
-#define vqdmull_laneq_s32(__p0_744, __p1_744, __p2_744) __extension__ ({ \
-  int32x2_t __s0_744 = __p0_744; \
-  int32x4_t __s1_744 = __p1_744; \
-  int32x2_t __rev0_744;  __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 1, 0); \
-  int32x4_t __rev1_744;  __rev1_744 = __builtin_shufflevector(__s1_744, __s1_744, 3, 2, 1, 0); \
-  int64x2_t __ret_744; \
-  __ret_744 = __noswap_vqdmull_s32(__rev0_744, __noswap_splat_laneq_s32(__rev1_744, __p2_744)); \
-  __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 1, 0); \
-  __ret_744; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s16(__p0_745, __p1_745, __p2_745) __extension__ ({ \
-  int16x4_t __s0_745 = __p0_745; \
-  int16x8_t __s1_745 = __p1_745; \
-  int32x4_t __ret_745; \
-  __ret_745 = vqdmull_s16(__s0_745, splat_laneq_s16(__s1_745, __p2_745)); \
-  __ret_745; \
-})
-#else
-#define vqdmull_laneq_s16(__p0_746, __p1_746, __p2_746) __extension__ ({ \
-  int16x4_t __s0_746 = __p0_746; \
-  int16x8_t __s1_746 = __p1_746; \
-  int16x4_t __rev0_746;  __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 3, 2, 1, 0); \
-  int16x8_t __rev1_746;  __rev1_746 = __builtin_shufflevector(__s1_746, __s1_746, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_746; \
-  __ret_746 = __noswap_vqdmull_s16(__rev0_746, __noswap_splat_laneq_s16(__rev1_746, __p2_746)); \
-  __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \
-  __ret_746; \
-})
-#endif
-
-__ai int16_t vqmovns_s32(int32_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
-  return __ret;
-}
-__ai int32_t vqmovnd_s64(int64_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
-  return __ret;
-}
-__ai int8_t vqmovnh_s16(int16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
-  return __ret;
-}
-__ai uint16_t vqmovns_u32(uint32_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
-  return __ret;
-}
-__ai uint32_t vqmovnd_u64(uint64_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
-  return __ret;
-}
-__ai uint8_t vqmovnh_u16(uint16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vqmovn_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vqmovn_u64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vqmovn_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vqmovn_s32(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vqmovn_s64(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vqmovn_s16(__p1));
-  return __ret;
-}
-#else
-__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint16_t vqmovuns_s32(int32_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqmovuns_s32(__p0);
-  return __ret;
-}
-__ai uint32_t vqmovund_s64(int64_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqmovund_s64(__p0);
-  return __ret;
-}
-__ai uint8_t vqmovunh_s16(int16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqmovunh_s16(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqneg_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int8_t vqnegb_s8(int8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
-  return __ret;
-}
-__ai int32_t vqnegs_s32(int32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
-  return __ret;
-}
-__ai int64_t vqnegd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
-  return __ret;
-}
-__ai int16_t vqnegh_s16(int16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
-  return __ret;
-}
-__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_lane_s32(__p0_747, __p1_747, __p2_747) __extension__ ({ \
-  int32_t __s0_747 = __p0_747; \
-  int32x2_t __s1_747 = __p1_747; \
-  int32_t __ret_747; \
-  __ret_747 = vqrdmulhs_s32(__s0_747, vget_lane_s32(__s1_747, __p2_747)); \
-  __ret_747; \
-})
-#else
-#define vqrdmulhs_lane_s32(__p0_748, __p1_748, __p2_748) __extension__ ({ \
-  int32_t __s0_748 = __p0_748; \
-  int32x2_t __s1_748 = __p1_748; \
-  int32x2_t __rev1_748;  __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 1, 0); \
-  int32_t __ret_748; \
-  __ret_748 = vqrdmulhs_s32(__s0_748, __noswap_vget_lane_s32(__rev1_748, __p2_748)); \
-  __ret_748; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_lane_s16(__p0_749, __p1_749, __p2_749) __extension__ ({ \
-  int16_t __s0_749 = __p0_749; \
-  int16x4_t __s1_749 = __p1_749; \
-  int16_t __ret_749; \
-  __ret_749 = vqrdmulhh_s16(__s0_749, vget_lane_s16(__s1_749, __p2_749)); \
-  __ret_749; \
-})
-#else
-#define vqrdmulhh_lane_s16(__p0_750, __p1_750, __p2_750) __extension__ ({ \
-  int16_t __s0_750 = __p0_750; \
-  int16x4_t __s1_750 = __p1_750; \
-  int16x4_t __rev1_750;  __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 3, 2, 1, 0); \
-  int16_t __ret_750; \
-  __ret_750 = vqrdmulhh_s16(__s0_750, __noswap_vget_lane_s16(__rev1_750, __p2_750)); \
-  __ret_750; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_laneq_s32(__p0_751, __p1_751, __p2_751) __extension__ ({ \
-  int32_t __s0_751 = __p0_751; \
-  int32x4_t __s1_751 = __p1_751; \
-  int32_t __ret_751; \
-  __ret_751 = vqrdmulhs_s32(__s0_751, vgetq_lane_s32(__s1_751, __p2_751)); \
-  __ret_751; \
-})
-#else
-#define vqrdmulhs_laneq_s32(__p0_752, __p1_752, __p2_752) __extension__ ({ \
-  int32_t __s0_752 = __p0_752; \
-  int32x4_t __s1_752 = __p1_752; \
-  int32x4_t __rev1_752;  __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 3, 2, 1, 0); \
-  int32_t __ret_752; \
-  __ret_752 = vqrdmulhs_s32(__s0_752, __noswap_vgetq_lane_s32(__rev1_752, __p2_752)); \
-  __ret_752; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_laneq_s16(__p0_753, __p1_753, __p2_753) __extension__ ({ \
-  int16_t __s0_753 = __p0_753; \
-  int16x8_t __s1_753 = __p1_753; \
-  int16_t __ret_753; \
-  __ret_753 = vqrdmulhh_s16(__s0_753, vgetq_lane_s16(__s1_753, __p2_753)); \
-  __ret_753; \
-})
-#else
-#define vqrdmulhh_laneq_s16(__p0_754, __p1_754, __p2_754) __extension__ ({ \
-  int16_t __s0_754 = __p0_754; \
-  int16x8_t __s1_754 = __p1_754; \
-  int16x8_t __rev1_754;  __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_754; \
-  __ret_754 = vqrdmulhh_s16(__s0_754, __noswap_vgetq_lane_s16(__rev1_754, __p2_754)); \
-  __ret_754; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u32(__p0_755, __p1_755, __p2_755) __extension__ ({ \
-  uint16x4_t __s0_755 = __p0_755; \
-  uint32x4_t __s1_755 = __p1_755; \
-  uint16x8_t __ret_755; \
-  __ret_755 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_755), (uint16x4_t)(vqrshrn_n_u32(__s1_755, __p2_755)))); \
-  __ret_755; \
-})
-#else
-#define vqrshrn_high_n_u32(__p0_756, __p1_756, __p2_756) __extension__ ({ \
-  uint16x4_t __s0_756 = __p0_756; \
-  uint32x4_t __s1_756 = __p1_756; \
-  uint16x4_t __rev0_756;  __rev0_756 = __builtin_shufflevector(__s0_756, __s0_756, 3, 2, 1, 0); \
-  uint32x4_t __rev1_756;  __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \
-  uint16x8_t __ret_756; \
-  __ret_756 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_756), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_756, __p2_756)))); \
-  __ret_756 = __builtin_shufflevector(__ret_756, __ret_756, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_756; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u64(__p0_757, __p1_757, __p2_757) __extension__ ({ \
-  uint32x2_t __s0_757 = __p0_757; \
-  uint64x2_t __s1_757 = __p1_757; \
-  uint32x4_t __ret_757; \
-  __ret_757 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_757), (uint32x2_t)(vqrshrn_n_u64(__s1_757, __p2_757)))); \
-  __ret_757; \
-})
-#else
-#define vqrshrn_high_n_u64(__p0_758, __p1_758, __p2_758) __extension__ ({ \
-  uint32x2_t __s0_758 = __p0_758; \
-  uint64x2_t __s1_758 = __p1_758; \
-  uint32x2_t __rev0_758;  __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 1, 0); \
-  uint64x2_t __rev1_758;  __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 1, 0); \
-  uint32x4_t __ret_758; \
-  __ret_758 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_758), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_758, __p2_758)))); \
-  __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 3, 2, 1, 0); \
-  __ret_758; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u16(__p0_759, __p1_759, __p2_759) __extension__ ({ \
-  uint8x8_t __s0_759 = __p0_759; \
-  uint16x8_t __s1_759 = __p1_759; \
-  uint8x16_t __ret_759; \
-  __ret_759 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_759), (uint8x8_t)(vqrshrn_n_u16(__s1_759, __p2_759)))); \
-  __ret_759; \
-})
-#else
-#define vqrshrn_high_n_u16(__p0_760, __p1_760, __p2_760) __extension__ ({ \
-  uint8x8_t __s0_760 = __p0_760; \
-  uint16x8_t __s1_760 = __p1_760; \
-  uint8x8_t __rev0_760;  __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_760;  __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_760; \
-  __ret_760 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_760), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_760, __p2_760)))); \
-  __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_760; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s32(__p0_761, __p1_761, __p2_761) __extension__ ({ \
-  int16x4_t __s0_761 = __p0_761; \
-  int32x4_t __s1_761 = __p1_761; \
-  int16x8_t __ret_761; \
-  __ret_761 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_761), (int16x4_t)(vqrshrn_n_s32(__s1_761, __p2_761)))); \
-  __ret_761; \
-})
-#else
-#define vqrshrn_high_n_s32(__p0_762, __p1_762, __p2_762) __extension__ ({ \
-  int16x4_t __s0_762 = __p0_762; \
-  int32x4_t __s1_762 = __p1_762; \
-  int16x4_t __rev0_762;  __rev0_762 = __builtin_shufflevector(__s0_762, __s0_762, 3, 2, 1, 0); \
-  int32x4_t __rev1_762;  __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 3, 2, 1, 0); \
-  int16x8_t __ret_762; \
-  __ret_762 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_762), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_762, __p2_762)))); \
-  __ret_762 = __builtin_shufflevector(__ret_762, __ret_762, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_762; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s64(__p0_763, __p1_763, __p2_763) __extension__ ({ \
-  int32x2_t __s0_763 = __p0_763; \
-  int64x2_t __s1_763 = __p1_763; \
-  int32x4_t __ret_763; \
-  __ret_763 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_763), (int32x2_t)(vqrshrn_n_s64(__s1_763, __p2_763)))); \
-  __ret_763; \
-})
-#else
-#define vqrshrn_high_n_s64(__p0_764, __p1_764, __p2_764) __extension__ ({ \
-  int32x2_t __s0_764 = __p0_764; \
-  int64x2_t __s1_764 = __p1_764; \
-  int32x2_t __rev0_764;  __rev0_764 = __builtin_shufflevector(__s0_764, __s0_764, 1, 0); \
-  int64x2_t __rev1_764;  __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 1, 0); \
-  int32x4_t __ret_764; \
-  __ret_764 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_764), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_764, __p2_764)))); \
-  __ret_764 = __builtin_shufflevector(__ret_764, __ret_764, 3, 2, 1, 0); \
-  __ret_764; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s16(__p0_765, __p1_765, __p2_765) __extension__ ({ \
-  int8x8_t __s0_765 = __p0_765; \
-  int16x8_t __s1_765 = __p1_765; \
-  int8x16_t __ret_765; \
-  __ret_765 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_765), (int8x8_t)(vqrshrn_n_s16(__s1_765, __p2_765)))); \
-  __ret_765; \
-})
-#else
-#define vqrshrn_high_n_s16(__p0_766, __p1_766, __p2_766) __extension__ ({ \
-  int8x8_t __s0_766 = __p0_766; \
-  int16x8_t __s1_766 = __p1_766; \
-  int8x8_t __rev0_766;  __rev0_766 = __builtin_shufflevector(__s0_766, __s0_766, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_766;  __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_766; \
-  __ret_766 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_766), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_766, __p2_766)))); \
-  __ret_766 = __builtin_shufflevector(__ret_766, __ret_766, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_766; \
-})
-#endif
-
-#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s32(__p0_767, __p1_767, __p2_767) __extension__ ({ \
-  int16x4_t __s0_767 = __p0_767; \
-  int32x4_t __s1_767 = __p1_767; \
-  int16x8_t __ret_767; \
-  __ret_767 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_767), (int16x4_t)(vqrshrun_n_s32(__s1_767, __p2_767)))); \
-  __ret_767; \
-})
-#else
-#define vqrshrun_high_n_s32(__p0_768, __p1_768, __p2_768) __extension__ ({ \
-  int16x4_t __s0_768 = __p0_768; \
-  int32x4_t __s1_768 = __p1_768; \
-  int16x4_t __rev0_768;  __rev0_768 = __builtin_shufflevector(__s0_768, __s0_768, 3, 2, 1, 0); \
-  int32x4_t __rev1_768;  __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 3, 2, 1, 0); \
-  int16x8_t __ret_768; \
-  __ret_768 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_768), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_768, __p2_768)))); \
-  __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_768; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s64(__p0_769, __p1_769, __p2_769) __extension__ ({ \
-  int32x2_t __s0_769 = __p0_769; \
-  int64x2_t __s1_769 = __p1_769; \
-  int32x4_t __ret_769; \
-  __ret_769 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_769), (int32x2_t)(vqrshrun_n_s64(__s1_769, __p2_769)))); \
-  __ret_769; \
-})
-#else
-#define vqrshrun_high_n_s64(__p0_770, __p1_770, __p2_770) __extension__ ({ \
-  int32x2_t __s0_770 = __p0_770; \
-  int64x2_t __s1_770 = __p1_770; \
-  int32x2_t __rev0_770;  __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 1, 0); \
-  int64x2_t __rev1_770;  __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 1, 0); \
-  int32x4_t __ret_770; \
-  __ret_770 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_770), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_770, __p2_770)))); \
-  __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \
-  __ret_770; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s16(__p0_771, __p1_771, __p2_771) __extension__ ({ \
-  int8x8_t __s0_771 = __p0_771; \
-  int16x8_t __s1_771 = __p1_771; \
-  int8x16_t __ret_771; \
-  __ret_771 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_771), (int8x8_t)(vqrshrun_n_s16(__s1_771, __p2_771)))); \
-  __ret_771; \
-})
-#else
-#define vqrshrun_high_n_s16(__p0_772, __p1_772, __p2_772) __extension__ ({ \
-  int8x8_t __s0_772 = __p0_772; \
-  int16x8_t __s1_772 = __p1_772; \
-  int8x8_t __rev0_772;  __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_772;  __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_772; \
-  __ret_772 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_772), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_772, __p2_772)))); \
-  __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_772; \
-})
-#endif
-
-#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
-  __ret; \
-})
-__ai uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
-  return __ret;
-}
-#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
-  __ret; \
-})
-#define vqshls_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqshld_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
-  __ret; \
-})
-#define vqshls_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshld_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
-  __ret; \
-})
-#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u32(__p0_773, __p1_773, __p2_773) __extension__ ({ \
-  uint16x4_t __s0_773 = __p0_773; \
-  uint32x4_t __s1_773 = __p1_773; \
-  uint16x8_t __ret_773; \
-  __ret_773 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_773), (uint16x4_t)(vqshrn_n_u32(__s1_773, __p2_773)))); \
-  __ret_773; \
-})
-#else
-#define vqshrn_high_n_u32(__p0_774, __p1_774, __p2_774) __extension__ ({ \
-  uint16x4_t __s0_774 = __p0_774; \
-  uint32x4_t __s1_774 = __p1_774; \
-  uint16x4_t __rev0_774;  __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \
-  uint32x4_t __rev1_774;  __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 3, 2, 1, 0); \
-  uint16x8_t __ret_774; \
-  __ret_774 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_774), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_774, __p2_774)))); \
-  __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_774; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u64(__p0_775, __p1_775, __p2_775) __extension__ ({ \
-  uint32x2_t __s0_775 = __p0_775; \
-  uint64x2_t __s1_775 = __p1_775; \
-  uint32x4_t __ret_775; \
-  __ret_775 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_775), (uint32x2_t)(vqshrn_n_u64(__s1_775, __p2_775)))); \
-  __ret_775; \
-})
-#else
-#define vqshrn_high_n_u64(__p0_776, __p1_776, __p2_776) __extension__ ({ \
-  uint32x2_t __s0_776 = __p0_776; \
-  uint64x2_t __s1_776 = __p1_776; \
-  uint32x2_t __rev0_776;  __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 1, 0); \
-  uint64x2_t __rev1_776;  __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 1, 0); \
-  uint32x4_t __ret_776; \
-  __ret_776 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_776), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_776, __p2_776)))); \
-  __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 3, 2, 1, 0); \
-  __ret_776; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u16(__p0_777, __p1_777, __p2_777) __extension__ ({ \
-  uint8x8_t __s0_777 = __p0_777; \
-  uint16x8_t __s1_777 = __p1_777; \
-  uint8x16_t __ret_777; \
-  __ret_777 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_777), (uint8x8_t)(vqshrn_n_u16(__s1_777, __p2_777)))); \
-  __ret_777; \
-})
-#else
-#define vqshrn_high_n_u16(__p0_778, __p1_778, __p2_778) __extension__ ({ \
-  uint8x8_t __s0_778 = __p0_778; \
-  uint16x8_t __s1_778 = __p1_778; \
-  uint8x8_t __rev0_778;  __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_778;  __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_778; \
-  __ret_778 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_778), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_778, __p2_778)))); \
-  __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_778; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s32(__p0_779, __p1_779, __p2_779) __extension__ ({ \
-  int16x4_t __s0_779 = __p0_779; \
-  int32x4_t __s1_779 = __p1_779; \
-  int16x8_t __ret_779; \
-  __ret_779 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_779), (int16x4_t)(vqshrn_n_s32(__s1_779, __p2_779)))); \
-  __ret_779; \
-})
-#else
-#define vqshrn_high_n_s32(__p0_780, __p1_780, __p2_780) __extension__ ({ \
-  int16x4_t __s0_780 = __p0_780; \
-  int32x4_t __s1_780 = __p1_780; \
-  int16x4_t __rev0_780;  __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 3, 2, 1, 0); \
-  int32x4_t __rev1_780;  __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 3, 2, 1, 0); \
-  int16x8_t __ret_780; \
-  __ret_780 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_780), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_780, __p2_780)))); \
-  __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_780; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s64(__p0_781, __p1_781, __p2_781) __extension__ ({ \
-  int32x2_t __s0_781 = __p0_781; \
-  int64x2_t __s1_781 = __p1_781; \
-  int32x4_t __ret_781; \
-  __ret_781 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_781), (int32x2_t)(vqshrn_n_s64(__s1_781, __p2_781)))); \
-  __ret_781; \
-})
-#else
-#define vqshrn_high_n_s64(__p0_782, __p1_782, __p2_782) __extension__ ({ \
-  int32x2_t __s0_782 = __p0_782; \
-  int64x2_t __s1_782 = __p1_782; \
-  int32x2_t __rev0_782;  __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 1, 0); \
-  int64x2_t __rev1_782;  __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 1, 0); \
-  int32x4_t __ret_782; \
-  __ret_782 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_782), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_782, __p2_782)))); \
-  __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 3, 2, 1, 0); \
-  __ret_782; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s16(__p0_783, __p1_783, __p2_783) __extension__ ({ \
-  int8x8_t __s0_783 = __p0_783; \
-  int16x8_t __s1_783 = __p1_783; \
-  int8x16_t __ret_783; \
-  __ret_783 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_783), (int8x8_t)(vqshrn_n_s16(__s1_783, __p2_783)))); \
-  __ret_783; \
-})
-#else
-#define vqshrn_high_n_s16(__p0_784, __p1_784, __p2_784) __extension__ ({ \
-  int8x8_t __s0_784 = __p0_784; \
-  int16x8_t __s1_784 = __p1_784; \
-  int8x8_t __rev0_784;  __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_784;  __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_784; \
-  __ret_784 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_784), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_784, __p2_784)))); \
-  __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_784; \
-})
-#endif
-
-#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s32(__p0_785, __p1_785, __p2_785) __extension__ ({ \
-  int16x4_t __s0_785 = __p0_785; \
-  int32x4_t __s1_785 = __p1_785; \
-  int16x8_t __ret_785; \
-  __ret_785 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_785), (int16x4_t)(vqshrun_n_s32(__s1_785, __p2_785)))); \
-  __ret_785; \
-})
-#else
-#define vqshrun_high_n_s32(__p0_786, __p1_786, __p2_786) __extension__ ({ \
-  int16x4_t __s0_786 = __p0_786; \
-  int32x4_t __s1_786 = __p1_786; \
-  int16x4_t __rev0_786;  __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 3, 2, 1, 0); \
-  int32x4_t __rev1_786;  __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 3, 2, 1, 0); \
-  int16x8_t __ret_786; \
-  __ret_786 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_786), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_786, __p2_786)))); \
-  __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_786; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s64(__p0_787, __p1_787, __p2_787) __extension__ ({ \
-  int32x2_t __s0_787 = __p0_787; \
-  int64x2_t __s1_787 = __p1_787; \
-  int32x4_t __ret_787; \
-  __ret_787 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_787), (int32x2_t)(vqshrun_n_s64(__s1_787, __p2_787)))); \
-  __ret_787; \
-})
-#else
-#define vqshrun_high_n_s64(__p0_788, __p1_788, __p2_788) __extension__ ({ \
-  int32x2_t __s0_788 = __p0_788; \
-  int64x2_t __s1_788 = __p1_788; \
-  int32x2_t __rev0_788;  __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 1, 0); \
-  int64x2_t __rev1_788;  __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 1, 0); \
-  int32x4_t __ret_788; \
-  __ret_788 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_788), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_788, __p2_788)))); \
-  __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 3, 2, 1, 0); \
-  __ret_788; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s16(__p0_789, __p1_789, __p2_789) __extension__ ({ \
-  int8x8_t __s0_789 = __p0_789; \
-  int16x8_t __s1_789 = __p1_789; \
-  int8x16_t __ret_789; \
-  __ret_789 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_789), (int8x8_t)(vqshrun_n_s16(__s1_789, __p2_789)))); \
-  __ret_789; \
-})
-#else
-#define vqshrun_high_n_s16(__p0_790, __p1_790, __p2_790) __extension__ ({ \
-  int8x8_t __s0_790 = __p0_790; \
-  int16x8_t __s1_790 = __p1_790; \
-  int8x8_t __rev0_790;  __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_790;  __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_790; \
-  __ret_790 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_790), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_790, __p2_790)))); \
-  __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_790; \
-})
-#endif
-
-#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
-  __ret; \
-})
-__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
-  poly8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
-  poly8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
-  int8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
-  uint8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
-  int8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
-  poly8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
-  poly8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
-  uint8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
-  int8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
-  uint8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
-  int8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
-  poly8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
-  poly8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
-  uint8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
-  int8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
-  uint8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
-  int8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrbit_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrbit_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrecpe_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai float64_t vrecped_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrecpes_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
-  return __ret;
-}
-__ai float64_t vrecpxd_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrecpxs_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
-  return __ret;
-}
 __ai uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) {
   uint64_t __ret;
   __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
@@ -62045,140 +61802,140 @@
   return __ret;
 }
 #define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
   uint64_t __ret; \
+  uint64_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
   __ret; \
 })
 #define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
   int64_t __ret; \
+  int64_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u32(__p0_791, __p1_791, __p2_791) __extension__ ({ \
-  uint16x4_t __s0_791 = __p0_791; \
-  uint32x4_t __s1_791 = __p1_791; \
-  uint16x8_t __ret_791; \
-  __ret_791 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_791), (uint16x4_t)(vrshrn_n_u32(__s1_791, __p2_791)))); \
-  __ret_791; \
+#define vrshrn_high_n_u32(__p0_803, __p1_803, __p2_803) __extension__ ({ \
+  uint16x8_t __ret_803; \
+  uint16x4_t __s0_803 = __p0_803; \
+  uint32x4_t __s1_803 = __p1_803; \
+  __ret_803 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_803), (uint16x4_t)(vrshrn_n_u32(__s1_803, __p2_803)))); \
+  __ret_803; \
 })
 #else
-#define vrshrn_high_n_u32(__p0_792, __p1_792, __p2_792) __extension__ ({ \
-  uint16x4_t __s0_792 = __p0_792; \
-  uint32x4_t __s1_792 = __p1_792; \
-  uint16x4_t __rev0_792;  __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 3, 2, 1, 0); \
-  uint32x4_t __rev1_792;  __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 3, 2, 1, 0); \
-  uint16x8_t __ret_792; \
-  __ret_792 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_792), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_792, __p2_792)))); \
-  __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_792; \
+#define vrshrn_high_n_u32(__p0_804, __p1_804, __p2_804) __extension__ ({ \
+  uint16x8_t __ret_804; \
+  uint16x4_t __s0_804 = __p0_804; \
+  uint32x4_t __s1_804 = __p1_804; \
+  uint16x4_t __rev0_804;  __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 3, 2, 1, 0); \
+  uint32x4_t __rev1_804;  __rev1_804 = __builtin_shufflevector(__s1_804, __s1_804, 3, 2, 1, 0); \
+  __ret_804 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_804), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_804, __p2_804)))); \
+  __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_804; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u64(__p0_793, __p1_793, __p2_793) __extension__ ({ \
-  uint32x2_t __s0_793 = __p0_793; \
-  uint64x2_t __s1_793 = __p1_793; \
-  uint32x4_t __ret_793; \
-  __ret_793 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_793), (uint32x2_t)(vrshrn_n_u64(__s1_793, __p2_793)))); \
-  __ret_793; \
+#define vrshrn_high_n_u64(__p0_805, __p1_805, __p2_805) __extension__ ({ \
+  uint32x4_t __ret_805; \
+  uint32x2_t __s0_805 = __p0_805; \
+  uint64x2_t __s1_805 = __p1_805; \
+  __ret_805 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_805), (uint32x2_t)(vrshrn_n_u64(__s1_805, __p2_805)))); \
+  __ret_805; \
 })
 #else
-#define vrshrn_high_n_u64(__p0_794, __p1_794, __p2_794) __extension__ ({ \
-  uint32x2_t __s0_794 = __p0_794; \
-  uint64x2_t __s1_794 = __p1_794; \
-  uint32x2_t __rev0_794;  __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 1, 0); \
-  uint64x2_t __rev1_794;  __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 1, 0); \
-  uint32x4_t __ret_794; \
-  __ret_794 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_794), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_794, __p2_794)))); \
-  __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 3, 2, 1, 0); \
-  __ret_794; \
+#define vrshrn_high_n_u64(__p0_806, __p1_806, __p2_806) __extension__ ({ \
+  uint32x4_t __ret_806; \
+  uint32x2_t __s0_806 = __p0_806; \
+  uint64x2_t __s1_806 = __p1_806; \
+  uint32x2_t __rev0_806;  __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 1, 0); \
+  uint64x2_t __rev1_806;  __rev1_806 = __builtin_shufflevector(__s1_806, __s1_806, 1, 0); \
+  __ret_806 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_806), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_806, __p2_806)))); \
+  __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 3, 2, 1, 0); \
+  __ret_806; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u16(__p0_795, __p1_795, __p2_795) __extension__ ({ \
-  uint8x8_t __s0_795 = __p0_795; \
-  uint16x8_t __s1_795 = __p1_795; \
-  uint8x16_t __ret_795; \
-  __ret_795 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_795), (uint8x8_t)(vrshrn_n_u16(__s1_795, __p2_795)))); \
-  __ret_795; \
+#define vrshrn_high_n_u16(__p0_807, __p1_807, __p2_807) __extension__ ({ \
+  uint8x16_t __ret_807; \
+  uint8x8_t __s0_807 = __p0_807; \
+  uint16x8_t __s1_807 = __p1_807; \
+  __ret_807 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_807), (uint8x8_t)(vrshrn_n_u16(__s1_807, __p2_807)))); \
+  __ret_807; \
 })
 #else
-#define vrshrn_high_n_u16(__p0_796, __p1_796, __p2_796) __extension__ ({ \
-  uint8x8_t __s0_796 = __p0_796; \
-  uint16x8_t __s1_796 = __p1_796; \
-  uint8x8_t __rev0_796;  __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_796;  __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_796; \
-  __ret_796 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_796), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_796, __p2_796)))); \
-  __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_796; \
+#define vrshrn_high_n_u16(__p0_808, __p1_808, __p2_808) __extension__ ({ \
+  uint8x16_t __ret_808; \
+  uint8x8_t __s0_808 = __p0_808; \
+  uint16x8_t __s1_808 = __p1_808; \
+  uint8x8_t __rev0_808;  __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_808;  __rev1_808 = __builtin_shufflevector(__s1_808, __s1_808, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_808 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_808), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_808, __p2_808)))); \
+  __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_808; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s32(__p0_797, __p1_797, __p2_797) __extension__ ({ \
-  int16x4_t __s0_797 = __p0_797; \
-  int32x4_t __s1_797 = __p1_797; \
-  int16x8_t __ret_797; \
-  __ret_797 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_797), (int16x4_t)(vrshrn_n_s32(__s1_797, __p2_797)))); \
-  __ret_797; \
+#define vrshrn_high_n_s32(__p0_809, __p1_809, __p2_809) __extension__ ({ \
+  int16x8_t __ret_809; \
+  int16x4_t __s0_809 = __p0_809; \
+  int32x4_t __s1_809 = __p1_809; \
+  __ret_809 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_809), (int16x4_t)(vrshrn_n_s32(__s1_809, __p2_809)))); \
+  __ret_809; \
 })
 #else
-#define vrshrn_high_n_s32(__p0_798, __p1_798, __p2_798) __extension__ ({ \
-  int16x4_t __s0_798 = __p0_798; \
-  int32x4_t __s1_798 = __p1_798; \
-  int16x4_t __rev0_798;  __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 3, 2, 1, 0); \
-  int32x4_t __rev1_798;  __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 3, 2, 1, 0); \
-  int16x8_t __ret_798; \
-  __ret_798 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_798), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_798, __p2_798)))); \
-  __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_798; \
+#define vrshrn_high_n_s32(__p0_810, __p1_810, __p2_810) __extension__ ({ \
+  int16x8_t __ret_810; \
+  int16x4_t __s0_810 = __p0_810; \
+  int32x4_t __s1_810 = __p1_810; \
+  int16x4_t __rev0_810;  __rev0_810 = __builtin_shufflevector(__s0_810, __s0_810, 3, 2, 1, 0); \
+  int32x4_t __rev1_810;  __rev1_810 = __builtin_shufflevector(__s1_810, __s1_810, 3, 2, 1, 0); \
+  __ret_810 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_810), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_810, __p2_810)))); \
+  __ret_810 = __builtin_shufflevector(__ret_810, __ret_810, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_810; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s64(__p0_799, __p1_799, __p2_799) __extension__ ({ \
-  int32x2_t __s0_799 = __p0_799; \
-  int64x2_t __s1_799 = __p1_799; \
-  int32x4_t __ret_799; \
-  __ret_799 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_799), (int32x2_t)(vrshrn_n_s64(__s1_799, __p2_799)))); \
-  __ret_799; \
+#define vrshrn_high_n_s64(__p0_811, __p1_811, __p2_811) __extension__ ({ \
+  int32x4_t __ret_811; \
+  int32x2_t __s0_811 = __p0_811; \
+  int64x2_t __s1_811 = __p1_811; \
+  __ret_811 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_811), (int32x2_t)(vrshrn_n_s64(__s1_811, __p2_811)))); \
+  __ret_811; \
 })
 #else
-#define vrshrn_high_n_s64(__p0_800, __p1_800, __p2_800) __extension__ ({ \
-  int32x2_t __s0_800 = __p0_800; \
-  int64x2_t __s1_800 = __p1_800; \
-  int32x2_t __rev0_800;  __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \
-  int64x2_t __rev1_800;  __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 1, 0); \
-  int32x4_t __ret_800; \
-  __ret_800 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_800), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_800, __p2_800)))); \
-  __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 3, 2, 1, 0); \
-  __ret_800; \
+#define vrshrn_high_n_s64(__p0_812, __p1_812, __p2_812) __extension__ ({ \
+  int32x4_t __ret_812; \
+  int32x2_t __s0_812 = __p0_812; \
+  int64x2_t __s1_812 = __p1_812; \
+  int32x2_t __rev0_812;  __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 1, 0); \
+  int64x2_t __rev1_812;  __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 1, 0); \
+  __ret_812 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_812), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_812, __p2_812)))); \
+  __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 3, 2, 1, 0); \
+  __ret_812; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s16(__p0_801, __p1_801, __p2_801) __extension__ ({ \
-  int8x8_t __s0_801 = __p0_801; \
-  int16x8_t __s1_801 = __p1_801; \
-  int8x16_t __ret_801; \
-  __ret_801 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_801), (int8x8_t)(vrshrn_n_s16(__s1_801, __p2_801)))); \
-  __ret_801; \
+#define vrshrn_high_n_s16(__p0_813, __p1_813, __p2_813) __extension__ ({ \
+  int8x16_t __ret_813; \
+  int8x8_t __s0_813 = __p0_813; \
+  int16x8_t __s1_813 = __p1_813; \
+  __ret_813 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_813), (int8x8_t)(vrshrn_n_s16(__s1_813, __p2_813)))); \
+  __ret_813; \
 })
 #else
-#define vrshrn_high_n_s16(__p0_802, __p1_802, __p2_802) __extension__ ({ \
-  int8x8_t __s0_802 = __p0_802; \
-  int16x8_t __s1_802 = __p1_802; \
-  int8x8_t __rev0_802;  __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_802;  __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_802; \
-  __ret_802 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_802), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_802, __p2_802)))); \
-  __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_802; \
+#define vrshrn_high_n_s16(__p0_814, __p1_814, __p2_814) __extension__ ({ \
+  int8x16_t __ret_814; \
+  int8x8_t __s0_814 = __p0_814; \
+  int16x8_t __s1_814 = __p1_814; \
+  int8x8_t __rev0_814;  __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_814;  __rev1_814 = __builtin_shufflevector(__s1_814, __s1_814, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_814 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_814), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_814, __p2_814)))); \
+  __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_814; \
 })
 #endif
 
@@ -62190,8 +61947,8 @@
 }
 #else
 __ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -62221,9 +61978,9 @@
 }
 #else
 __ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -62246,16 +62003,16 @@
   return __ret;
 }
 #define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64_t __ret; \
   uint64_t __s0 = __p0; \
   uint64_t __s1 = __p1; \
-  uint64_t __ret; \
   __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
   __ret; \
 })
 #define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64_t __ret; \
   int64_t __s0 = __p0; \
   int64_t __s1 = __p1; \
-  int64_t __ret; \
   __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
   __ret; \
 })
@@ -62267,10 +62024,10 @@
 }
 #else
 __ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint16x8_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -62285,10 +62042,10 @@
 }
 #else
 __ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint32x4_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -62303,10 +62060,10 @@
 }
 #else
 __ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint8x16_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -62321,10 +62078,10 @@
 }
 #else
 __ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int16x8_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -62339,10 +62096,10 @@
 }
 #else
 __ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int32x4_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -62357,10 +62114,10 @@
 }
 #else
 __ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int8x16_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -62368,34 +62125,34 @@
 #endif
 
 #define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1_t __ret; \
   poly64_t __s0 = __p0; \
   poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
   __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (poly64x1_t)__s1, __p2); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
   __ret; \
 })
@@ -62403,35 +62160,35 @@
 
 #ifdef __LITTLE_ENDIAN__
 #define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2_t __ret; \
   float64_t __s0 = __p0; \
   float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
   __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
 #define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2_t __ret; \
   float64_t __s0 = __p0; \
   float64x2_t __s1 = __p1; \
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
   __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
 #define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x2_t __ret; \
   float64_t __s0 = __p0; \
   float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
   __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
 
 #define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+  float64x1_t __ret; \
   float64_t __s0 = __p0; \
   float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
   __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \
   __ret; \
 })
@@ -62446,299 +62203,299 @@
   return __ret;
 }
 #define vshld_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
   uint64_t __ret; \
+  uint64_t __s0 = __p0; \
   __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
   __ret; \
 })
 #define vshld_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
   int64_t __ret; \
+  int64_t __s0 = __p0; \
   __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u8(__p0_803, __p1_803) __extension__ ({ \
-  uint8x16_t __s0_803 = __p0_803; \
-  uint16x8_t __ret_803; \
-  __ret_803 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_803), __p1_803)); \
-  __ret_803; \
-})
-#else
-#define vshll_high_n_u8(__p0_804, __p1_804) __extension__ ({ \
-  uint8x16_t __s0_804 = __p0_804; \
-  uint8x16_t __rev0_804;  __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_804; \
-  __ret_804 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_804), __p1_804)); \
-  __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_804; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u32(__p0_805, __p1_805) __extension__ ({ \
-  uint32x4_t __s0_805 = __p0_805; \
-  uint64x2_t __ret_805; \
-  __ret_805 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_805), __p1_805)); \
-  __ret_805; \
-})
-#else
-#define vshll_high_n_u32(__p0_806, __p1_806) __extension__ ({ \
-  uint32x4_t __s0_806 = __p0_806; \
-  uint32x4_t __rev0_806;  __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 3, 2, 1, 0); \
-  uint64x2_t __ret_806; \
-  __ret_806 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_806), __p1_806)); \
-  __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 1, 0); \
-  __ret_806; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u16(__p0_807, __p1_807) __extension__ ({ \
-  uint16x8_t __s0_807 = __p0_807; \
-  uint32x4_t __ret_807; \
-  __ret_807 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_807), __p1_807)); \
-  __ret_807; \
-})
-#else
-#define vshll_high_n_u16(__p0_808, __p1_808) __extension__ ({ \
-  uint16x8_t __s0_808 = __p0_808; \
-  uint16x8_t __rev0_808;  __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_808; \
-  __ret_808 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_808), __p1_808)); \
-  __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 3, 2, 1, 0); \
-  __ret_808; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s8(__p0_809, __p1_809) __extension__ ({ \
-  int8x16_t __s0_809 = __p0_809; \
-  int16x8_t __ret_809; \
-  __ret_809 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_809), __p1_809)); \
-  __ret_809; \
-})
-#else
-#define vshll_high_n_s8(__p0_810, __p1_810) __extension__ ({ \
-  int8x16_t __s0_810 = __p0_810; \
-  int8x16_t __rev0_810;  __rev0_810 = __builtin_shufflevector(__s0_810, __s0_810, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_810; \
-  __ret_810 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_810), __p1_810)); \
-  __ret_810 = __builtin_shufflevector(__ret_810, __ret_810, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_810; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s32(__p0_811, __p1_811) __extension__ ({ \
-  int32x4_t __s0_811 = __p0_811; \
-  int64x2_t __ret_811; \
-  __ret_811 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_811), __p1_811)); \
-  __ret_811; \
-})
-#else
-#define vshll_high_n_s32(__p0_812, __p1_812) __extension__ ({ \
-  int32x4_t __s0_812 = __p0_812; \
-  int32x4_t __rev0_812;  __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 3, 2, 1, 0); \
-  int64x2_t __ret_812; \
-  __ret_812 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_812), __p1_812)); \
-  __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 1, 0); \
-  __ret_812; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s16(__p0_813, __p1_813) __extension__ ({ \
-  int16x8_t __s0_813 = __p0_813; \
-  int32x4_t __ret_813; \
-  __ret_813 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_813), __p1_813)); \
-  __ret_813; \
-})
-#else
-#define vshll_high_n_s16(__p0_814, __p1_814) __extension__ ({ \
-  int16x8_t __s0_814 = __p0_814; \
-  int16x8_t __rev0_814;  __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_814; \
-  __ret_814 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_814), __p1_814)); \
-  __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 3, 2, 1, 0); \
-  __ret_814; \
-})
-#endif
-
-#define vshrd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vshrd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u32(__p0_815, __p1_815, __p2_815) __extension__ ({ \
-  uint16x4_t __s0_815 = __p0_815; \
-  uint32x4_t __s1_815 = __p1_815; \
+#define vshll_high_n_u8(__p0_815, __p1_815) __extension__ ({ \
   uint16x8_t __ret_815; \
-  __ret_815 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_815), (uint16x4_t)(vshrn_n_u32(__s1_815, __p2_815)))); \
+  uint8x16_t __s0_815 = __p0_815; \
+  __ret_815 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_815), __p1_815)); \
   __ret_815; \
 })
 #else
-#define vshrn_high_n_u32(__p0_816, __p1_816, __p2_816) __extension__ ({ \
-  uint16x4_t __s0_816 = __p0_816; \
-  uint32x4_t __s1_816 = __p1_816; \
-  uint16x4_t __rev0_816;  __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 3, 2, 1, 0); \
-  uint32x4_t __rev1_816;  __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 3, 2, 1, 0); \
+#define vshll_high_n_u8(__p0_816, __p1_816) __extension__ ({ \
   uint16x8_t __ret_816; \
-  __ret_816 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_816), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_816, __p2_816)))); \
+  uint8x16_t __s0_816 = __p0_816; \
+  uint8x16_t __rev0_816;  __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_816 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_816), __p1_816)); \
   __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_816; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u64(__p0_817, __p1_817, __p2_817) __extension__ ({ \
-  uint32x2_t __s0_817 = __p0_817; \
-  uint64x2_t __s1_817 = __p1_817; \
-  uint32x4_t __ret_817; \
-  __ret_817 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_817), (uint32x2_t)(vshrn_n_u64(__s1_817, __p2_817)))); \
+#define vshll_high_n_u32(__p0_817, __p1_817) __extension__ ({ \
+  uint64x2_t __ret_817; \
+  uint32x4_t __s0_817 = __p0_817; \
+  __ret_817 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_817), __p1_817)); \
   __ret_817; \
 })
 #else
-#define vshrn_high_n_u64(__p0_818, __p1_818, __p2_818) __extension__ ({ \
-  uint32x2_t __s0_818 = __p0_818; \
-  uint64x2_t __s1_818 = __p1_818; \
-  uint32x2_t __rev0_818;  __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 1, 0); \
-  uint64x2_t __rev1_818;  __rev1_818 = __builtin_shufflevector(__s1_818, __s1_818, 1, 0); \
-  uint32x4_t __ret_818; \
-  __ret_818 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_818), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_818, __p2_818)))); \
-  __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 3, 2, 1, 0); \
+#define vshll_high_n_u32(__p0_818, __p1_818) __extension__ ({ \
+  uint64x2_t __ret_818; \
+  uint32x4_t __s0_818 = __p0_818; \
+  uint32x4_t __rev0_818;  __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 3, 2, 1, 0); \
+  __ret_818 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_818), __p1_818)); \
+  __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 1, 0); \
   __ret_818; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u16(__p0_819, __p1_819, __p2_819) __extension__ ({ \
-  uint8x8_t __s0_819 = __p0_819; \
-  uint16x8_t __s1_819 = __p1_819; \
-  uint8x16_t __ret_819; \
-  __ret_819 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_819), (uint8x8_t)(vshrn_n_u16(__s1_819, __p2_819)))); \
+#define vshll_high_n_u16(__p0_819, __p1_819) __extension__ ({ \
+  uint32x4_t __ret_819; \
+  uint16x8_t __s0_819 = __p0_819; \
+  __ret_819 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_819), __p1_819)); \
   __ret_819; \
 })
 #else
-#define vshrn_high_n_u16(__p0_820, __p1_820, __p2_820) __extension__ ({ \
-  uint8x8_t __s0_820 = __p0_820; \
-  uint16x8_t __s1_820 = __p1_820; \
-  uint8x8_t __rev0_820;  __rev0_820 = __builtin_shufflevector(__s0_820, __s0_820, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_820;  __rev1_820 = __builtin_shufflevector(__s1_820, __s1_820, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_820; \
-  __ret_820 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_820), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_820, __p2_820)))); \
-  __ret_820 = __builtin_shufflevector(__ret_820, __ret_820, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vshll_high_n_u16(__p0_820, __p1_820) __extension__ ({ \
+  uint32x4_t __ret_820; \
+  uint16x8_t __s0_820 = __p0_820; \
+  uint16x8_t __rev0_820;  __rev0_820 = __builtin_shufflevector(__s0_820, __s0_820, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_820 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_820), __p1_820)); \
+  __ret_820 = __builtin_shufflevector(__ret_820, __ret_820, 3, 2, 1, 0); \
   __ret_820; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s32(__p0_821, __p1_821, __p2_821) __extension__ ({ \
-  int16x4_t __s0_821 = __p0_821; \
-  int32x4_t __s1_821 = __p1_821; \
+#define vshll_high_n_s8(__p0_821, __p1_821) __extension__ ({ \
   int16x8_t __ret_821; \
-  __ret_821 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_821), (int16x4_t)(vshrn_n_s32(__s1_821, __p2_821)))); \
+  int8x16_t __s0_821 = __p0_821; \
+  __ret_821 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_821), __p1_821)); \
   __ret_821; \
 })
 #else
-#define vshrn_high_n_s32(__p0_822, __p1_822, __p2_822) __extension__ ({ \
-  int16x4_t __s0_822 = __p0_822; \
-  int32x4_t __s1_822 = __p1_822; \
-  int16x4_t __rev0_822;  __rev0_822 = __builtin_shufflevector(__s0_822, __s0_822, 3, 2, 1, 0); \
-  int32x4_t __rev1_822;  __rev1_822 = __builtin_shufflevector(__s1_822, __s1_822, 3, 2, 1, 0); \
+#define vshll_high_n_s8(__p0_822, __p1_822) __extension__ ({ \
   int16x8_t __ret_822; \
-  __ret_822 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_822), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_822, __p2_822)))); \
+  int8x16_t __s0_822 = __p0_822; \
+  int8x16_t __rev0_822;  __rev0_822 = __builtin_shufflevector(__s0_822, __s0_822, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_822 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_822), __p1_822)); \
   __ret_822 = __builtin_shufflevector(__ret_822, __ret_822, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_822; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s64(__p0_823, __p1_823, __p2_823) __extension__ ({ \
-  int32x2_t __s0_823 = __p0_823; \
-  int64x2_t __s1_823 = __p1_823; \
-  int32x4_t __ret_823; \
-  __ret_823 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_823), (int32x2_t)(vshrn_n_s64(__s1_823, __p2_823)))); \
+#define vshll_high_n_s32(__p0_823, __p1_823) __extension__ ({ \
+  int64x2_t __ret_823; \
+  int32x4_t __s0_823 = __p0_823; \
+  __ret_823 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_823), __p1_823)); \
   __ret_823; \
 })
 #else
-#define vshrn_high_n_s64(__p0_824, __p1_824, __p2_824) __extension__ ({ \
-  int32x2_t __s0_824 = __p0_824; \
-  int64x2_t __s1_824 = __p1_824; \
-  int32x2_t __rev0_824;  __rev0_824 = __builtin_shufflevector(__s0_824, __s0_824, 1, 0); \
-  int64x2_t __rev1_824;  __rev1_824 = __builtin_shufflevector(__s1_824, __s1_824, 1, 0); \
-  int32x4_t __ret_824; \
-  __ret_824 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_824), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_824, __p2_824)))); \
-  __ret_824 = __builtin_shufflevector(__ret_824, __ret_824, 3, 2, 1, 0); \
+#define vshll_high_n_s32(__p0_824, __p1_824) __extension__ ({ \
+  int64x2_t __ret_824; \
+  int32x4_t __s0_824 = __p0_824; \
+  int32x4_t __rev0_824;  __rev0_824 = __builtin_shufflevector(__s0_824, __s0_824, 3, 2, 1, 0); \
+  __ret_824 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_824), __p1_824)); \
+  __ret_824 = __builtin_shufflevector(__ret_824, __ret_824, 1, 0); \
   __ret_824; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s16(__p0_825, __p1_825, __p2_825) __extension__ ({ \
-  int8x8_t __s0_825 = __p0_825; \
-  int16x8_t __s1_825 = __p1_825; \
-  int8x16_t __ret_825; \
-  __ret_825 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_825), (int8x8_t)(vshrn_n_s16(__s1_825, __p2_825)))); \
+#define vshll_high_n_s16(__p0_825, __p1_825) __extension__ ({ \
+  int32x4_t __ret_825; \
+  int16x8_t __s0_825 = __p0_825; \
+  __ret_825 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_825), __p1_825)); \
   __ret_825; \
 })
 #else
-#define vshrn_high_n_s16(__p0_826, __p1_826, __p2_826) __extension__ ({ \
-  int8x8_t __s0_826 = __p0_826; \
-  int16x8_t __s1_826 = __p1_826; \
-  int8x8_t __rev0_826;  __rev0_826 = __builtin_shufflevector(__s0_826, __s0_826, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_826;  __rev1_826 = __builtin_shufflevector(__s1_826, __s1_826, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_826; \
-  __ret_826 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_826), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_826, __p2_826)))); \
-  __ret_826 = __builtin_shufflevector(__ret_826, __ret_826, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vshll_high_n_s16(__p0_826, __p1_826) __extension__ ({ \
+  int32x4_t __ret_826; \
+  int16x8_t __s0_826 = __p0_826; \
+  int16x8_t __rev0_826;  __rev0_826 = __builtin_shufflevector(__s0_826, __s0_826, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_826 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_826), __p1_826)); \
+  __ret_826 = __builtin_shufflevector(__ret_826, __ret_826, 3, 2, 1, 0); \
   __ret_826; \
 })
 #endif
 
+#define vshrd_n_u64(__p0, __p1) __extension__ ({ \
+  uint64_t __ret; \
+  uint64_t __s0 = __p0; \
+  __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
+  __ret; \
+})
+#define vshrd_n_s64(__p0, __p1) __extension__ ({ \
+  int64_t __ret; \
+  int64_t __s0 = __p0; \
+  __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_u32(__p0_827, __p1_827, __p2_827) __extension__ ({ \
+  uint16x8_t __ret_827; \
+  uint16x4_t __s0_827 = __p0_827; \
+  uint32x4_t __s1_827 = __p1_827; \
+  __ret_827 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_827), (uint16x4_t)(vshrn_n_u32(__s1_827, __p2_827)))); \
+  __ret_827; \
+})
+#else
+#define vshrn_high_n_u32(__p0_828, __p1_828, __p2_828) __extension__ ({ \
+  uint16x8_t __ret_828; \
+  uint16x4_t __s0_828 = __p0_828; \
+  uint32x4_t __s1_828 = __p1_828; \
+  uint16x4_t __rev0_828;  __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 3, 2, 1, 0); \
+  uint32x4_t __rev1_828;  __rev1_828 = __builtin_shufflevector(__s1_828, __s1_828, 3, 2, 1, 0); \
+  __ret_828 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_828), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_828, __p2_828)))); \
+  __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_828; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_u64(__p0_829, __p1_829, __p2_829) __extension__ ({ \
+  uint32x4_t __ret_829; \
+  uint32x2_t __s0_829 = __p0_829; \
+  uint64x2_t __s1_829 = __p1_829; \
+  __ret_829 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_829), (uint32x2_t)(vshrn_n_u64(__s1_829, __p2_829)))); \
+  __ret_829; \
+})
+#else
+#define vshrn_high_n_u64(__p0_830, __p1_830, __p2_830) __extension__ ({ \
+  uint32x4_t __ret_830; \
+  uint32x2_t __s0_830 = __p0_830; \
+  uint64x2_t __s1_830 = __p1_830; \
+  uint32x2_t __rev0_830;  __rev0_830 = __builtin_shufflevector(__s0_830, __s0_830, 1, 0); \
+  uint64x2_t __rev1_830;  __rev1_830 = __builtin_shufflevector(__s1_830, __s1_830, 1, 0); \
+  __ret_830 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_830), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_830, __p2_830)))); \
+  __ret_830 = __builtin_shufflevector(__ret_830, __ret_830, 3, 2, 1, 0); \
+  __ret_830; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_u16(__p0_831, __p1_831, __p2_831) __extension__ ({ \
+  uint8x16_t __ret_831; \
+  uint8x8_t __s0_831 = __p0_831; \
+  uint16x8_t __s1_831 = __p1_831; \
+  __ret_831 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_831), (uint8x8_t)(vshrn_n_u16(__s1_831, __p2_831)))); \
+  __ret_831; \
+})
+#else
+#define vshrn_high_n_u16(__p0_832, __p1_832, __p2_832) __extension__ ({ \
+  uint8x16_t __ret_832; \
+  uint8x8_t __s0_832 = __p0_832; \
+  uint16x8_t __s1_832 = __p1_832; \
+  uint8x8_t __rev0_832;  __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_832;  __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_832 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_832), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_832, __p2_832)))); \
+  __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_832; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_s32(__p0_833, __p1_833, __p2_833) __extension__ ({ \
+  int16x8_t __ret_833; \
+  int16x4_t __s0_833 = __p0_833; \
+  int32x4_t __s1_833 = __p1_833; \
+  __ret_833 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_833), (int16x4_t)(vshrn_n_s32(__s1_833, __p2_833)))); \
+  __ret_833; \
+})
+#else
+#define vshrn_high_n_s32(__p0_834, __p1_834, __p2_834) __extension__ ({ \
+  int16x8_t __ret_834; \
+  int16x4_t __s0_834 = __p0_834; \
+  int32x4_t __s1_834 = __p1_834; \
+  int16x4_t __rev0_834;  __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 3, 2, 1, 0); \
+  int32x4_t __rev1_834;  __rev1_834 = __builtin_shufflevector(__s1_834, __s1_834, 3, 2, 1, 0); \
+  __ret_834 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_834), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_834, __p2_834)))); \
+  __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_834; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_s64(__p0_835, __p1_835, __p2_835) __extension__ ({ \
+  int32x4_t __ret_835; \
+  int32x2_t __s0_835 = __p0_835; \
+  int64x2_t __s1_835 = __p1_835; \
+  __ret_835 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_835), (int32x2_t)(vshrn_n_s64(__s1_835, __p2_835)))); \
+  __ret_835; \
+})
+#else
+#define vshrn_high_n_s64(__p0_836, __p1_836, __p2_836) __extension__ ({ \
+  int32x4_t __ret_836; \
+  int32x2_t __s0_836 = __p0_836; \
+  int64x2_t __s1_836 = __p1_836; \
+  int32x2_t __rev0_836;  __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 1, 0); \
+  int64x2_t __rev1_836;  __rev1_836 = __builtin_shufflevector(__s1_836, __s1_836, 1, 0); \
+  __ret_836 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_836), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_836, __p2_836)))); \
+  __ret_836 = __builtin_shufflevector(__ret_836, __ret_836, 3, 2, 1, 0); \
+  __ret_836; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_s16(__p0_837, __p1_837, __p2_837) __extension__ ({ \
+  int8x16_t __ret_837; \
+  int8x8_t __s0_837 = __p0_837; \
+  int16x8_t __s1_837 = __p1_837; \
+  __ret_837 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_837), (int8x8_t)(vshrn_n_s16(__s1_837, __p2_837)))); \
+  __ret_837; \
+})
+#else
+#define vshrn_high_n_s16(__p0_838, __p1_838, __p2_838) __extension__ ({ \
+  int8x16_t __ret_838; \
+  int8x8_t __s0_838 = __p0_838; \
+  int16x8_t __s1_838 = __p1_838; \
+  int8x8_t __rev0_838;  __rev0_838 = __builtin_shufflevector(__s0_838, __s0_838, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_838;  __rev1_838 = __builtin_shufflevector(__s1_838, __s1_838, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_838 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_838), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_838, __p2_838)))); \
+  __ret_838 = __builtin_shufflevector(__ret_838, __ret_838, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_838; \
+})
+#endif
+
 #define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64_t __ret; \
   uint64_t __s0 = __p0; \
   uint64_t __s1 = __p1; \
-  uint64_t __ret; \
   __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
   __ret; \
 })
 #define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64_t __ret; \
   int64_t __s0 = __p0; \
   int64_t __s1 = __p1; \
-  int64_t __ret; \
   __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
   __ret; \
 })
 #define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1_t __ret; \
   poly64x1_t __s0 = __p0; \
   poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
   __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64x2_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
   __ret; \
 })
 #else
 #define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64x2_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -62773,9 +62530,9 @@
 }
 #else
 __ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -62790,9 +62547,9 @@
 }
 #else
 __ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -62807,9 +62564,9 @@
 }
 #else
 __ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -62824,9 +62581,9 @@
 }
 #else
 __ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -62841,9 +62598,9 @@
 }
 #else
 __ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -62858,9 +62615,9 @@
 }
 #else
 __ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -62880,9 +62637,9 @@
 }
 #else
 __ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -62897,8 +62654,8 @@
 }
 #else
 __ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -62913,8 +62670,8 @@
 }
 #else
 __ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -62934,8 +62691,8 @@
 }
 #else
 __ai float32x2_t vsqrt_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -62943,55 +62700,55 @@
 #endif
 
 #define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64_t __ret; \
   uint64_t __s0 = __p0; \
   uint64_t __s1 = __p1; \
-  uint64_t __ret; \
   __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
   __ret; \
 })
 #define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64_t __ret; \
   int64_t __s0 = __p0; \
   int64_t __s1 = __p1; \
-  int64_t __ret; \
   __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
   __ret; \
 })
 #define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
+  uint64_t __ret; \
   uint64_t __s0 = __p0; \
   uint64_t __s1 = __p1; \
-  uint64_t __ret; \
   __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
   __ret; \
 })
 #define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
+  int64_t __ret; \
   int64_t __s0 = __p0; \
   int64_t __s1 = __p1; \
-  int64_t __ret; \
   __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
   __ret; \
 })
 #define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x1_t __ret; \
   poly64x1_t __s0 = __p0; \
   poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
   __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64x2_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
   __ret; \
 })
 #else
 #define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
+  poly64x2_t __ret; \
   poly64x2_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
   __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
@@ -63808,9 +63565,9 @@
 }
 #else
 __ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = __rev0 - __rev1;
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -63830,10 +63587,10 @@
 }
 #else
 __ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint16x8_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -63848,10 +63605,10 @@
 }
 #else
 __ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+  uint32x4_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -63866,10 +63623,10 @@
 }
 #else
 __ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint8x16_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -63884,10 +63641,10 @@
 }
 #else
 __ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int16x8_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -63902,10 +63659,10 @@
 }
 #else
 __ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+  int32x4_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -63920,10 +63677,10 @@
 }
 #else
 __ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int8x16_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -63938,9 +63695,9 @@
 }
 #else
 __ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -63955,9 +63712,9 @@
 }
 #else
 __ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -63972,9 +63729,9 @@
 }
 #else
 __ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -63989,9 +63746,9 @@
 }
 #else
 __ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64006,9 +63763,9 @@
 }
 #else
 __ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64023,9 +63780,9 @@
 }
 #else
 __ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64040,9 +63797,9 @@
 }
 #else
 __ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 - __noswap_vmovl_high_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64057,9 +63814,9 @@
 }
 #else
 __ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 - __noswap_vmovl_high_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64074,9 +63831,9 @@
 }
 #else
 __ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 - __noswap_vmovl_high_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64091,9 +63848,9 @@
 }
 #else
 __ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 - __noswap_vmovl_high_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64108,9 +63865,9 @@
 }
 #else
 __ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 - __noswap_vmovl_high_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64125,9 +63882,9 @@
 }
 #else
 __ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 - __noswap_vmovl_high_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64135,54 +63892,54 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsudotq_laneq_s32(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \
-  int32x4_t __s0_827 = __p0_827; \
-  int8x16_t __s1_827 = __p1_827; \
-  uint8x16_t __s2_827 = __p2_827; \
-  int32x4_t __ret_827; \
-uint8x16_t __reint_827 = __s2_827; \
-  __ret_827 = vusdotq_s32(__s0_827, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_827, __p3_827)), __s1_827); \
-  __ret_827; \
+#define vsudotq_laneq_s32(__p0_839, __p1_839, __p2_839, __p3_839) __extension__ ({ \
+  int32x4_t __ret_839; \
+  int32x4_t __s0_839 = __p0_839; \
+  int8x16_t __s1_839 = __p1_839; \
+  uint8x16_t __s2_839 = __p2_839; \
+uint8x16_t __reint_839 = __s2_839; \
+  __ret_839 = vusdotq_s32(__s0_839, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_839, __p3_839)), __s1_839); \
+  __ret_839; \
 })
 #else
-#define vsudotq_laneq_s32(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \
-  int32x4_t __s0_828 = __p0_828; \
-  int8x16_t __s1_828 = __p1_828; \
-  uint8x16_t __s2_828 = __p2_828; \
-  int32x4_t __rev0_828;  __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 3, 2, 1, 0); \
-  int8x16_t __rev1_828;  __rev1_828 = __builtin_shufflevector(__s1_828, __s1_828, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_828;  __rev2_828 = __builtin_shufflevector(__s2_828, __s2_828, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_828; \
-uint8x16_t __reint_828 = __rev2_828; \
-  __ret_828 = __noswap_vusdotq_s32(__rev0_828, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_828, __p3_828)), __rev1_828); \
-  __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 3, 2, 1, 0); \
-  __ret_828; \
+#define vsudotq_laneq_s32(__p0_840, __p1_840, __p2_840, __p3_840) __extension__ ({ \
+  int32x4_t __ret_840; \
+  int32x4_t __s0_840 = __p0_840; \
+  int8x16_t __s1_840 = __p1_840; \
+  uint8x16_t __s2_840 = __p2_840; \
+  int32x4_t __rev0_840;  __rev0_840 = __builtin_shufflevector(__s0_840, __s0_840, 3, 2, 1, 0); \
+  int8x16_t __rev1_840;  __rev1_840 = __builtin_shufflevector(__s1_840, __s1_840, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_840;  __rev2_840 = __builtin_shufflevector(__s2_840, __s2_840, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x16_t __reint_840 = __rev2_840; \
+  __ret_840 = __noswap_vusdotq_s32(__rev0_840, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_840, __p3_840)), __rev1_840); \
+  __ret_840 = __builtin_shufflevector(__ret_840, __ret_840, 3, 2, 1, 0); \
+  __ret_840; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsudot_laneq_s32(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \
-  int32x2_t __s0_829 = __p0_829; \
-  int8x8_t __s1_829 = __p1_829; \
-  uint8x16_t __s2_829 = __p2_829; \
-  int32x2_t __ret_829; \
-uint8x16_t __reint_829 = __s2_829; \
-  __ret_829 = vusdot_s32(__s0_829, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_829, __p3_829)), __s1_829); \
-  __ret_829; \
+#define vsudot_laneq_s32(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \
+  int32x2_t __ret_841; \
+  int32x2_t __s0_841 = __p0_841; \
+  int8x8_t __s1_841 = __p1_841; \
+  uint8x16_t __s2_841 = __p2_841; \
+uint8x16_t __reint_841 = __s2_841; \
+  __ret_841 = vusdot_s32(__s0_841, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_841, __p3_841)), __s1_841); \
+  __ret_841; \
 })
 #else
-#define vsudot_laneq_s32(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \
-  int32x2_t __s0_830 = __p0_830; \
-  int8x8_t __s1_830 = __p1_830; \
-  uint8x16_t __s2_830 = __p2_830; \
-  int32x2_t __rev0_830;  __rev0_830 = __builtin_shufflevector(__s0_830, __s0_830, 1, 0); \
-  int8x8_t __rev1_830;  __rev1_830 = __builtin_shufflevector(__s1_830, __s1_830, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_830;  __rev2_830 = __builtin_shufflevector(__s2_830, __s2_830, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_830; \
-uint8x16_t __reint_830 = __rev2_830; \
-  __ret_830 = __noswap_vusdot_s32(__rev0_830, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_830, __p3_830)), __rev1_830); \
-  __ret_830 = __builtin_shufflevector(__ret_830, __ret_830, 1, 0); \
-  __ret_830; \
+#define vsudot_laneq_s32(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \
+  int32x2_t __ret_842; \
+  int32x2_t __s0_842 = __p0_842; \
+  int8x8_t __s1_842 = __p1_842; \
+  uint8x16_t __s2_842 = __p2_842; \
+  int32x2_t __rev0_842;  __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \
+  int8x8_t __rev1_842;  __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_842;  __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x16_t __reint_842 = __rev2_842; \
+  __ret_842 = __noswap_vusdot_s32(__rev0_842, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_842, __p3_842)), __rev1_842); \
+  __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \
+  __ret_842; \
 })
 #endif
 
@@ -64194,9 +63951,9 @@
 }
 #else
 __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64211,9 +63968,9 @@
 }
 #else
 __ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64228,9 +63985,9 @@
 }
 #else
 __ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64245,9 +64002,9 @@
 }
 #else
 __ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64262,9 +64019,9 @@
 }
 #else
 __ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64279,9 +64036,9 @@
 }
 #else
 __ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64296,9 +64053,9 @@
 }
 #else
 __ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64313,9 +64070,9 @@
 }
 #else
 __ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64330,9 +64087,9 @@
 }
 #else
 __ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64347,9 +64104,9 @@
 }
 #else
 __ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64364,9 +64121,9 @@
 }
 #else
 __ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64381,9 +64138,9 @@
 }
 #else
 __ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64398,9 +64155,9 @@
 }
 #else
 __ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64415,9 +64172,9 @@
 }
 #else
 __ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64432,9 +64189,9 @@
 }
 #else
 __ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64449,9 +64206,9 @@
 }
 #else
 __ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64466,9 +64223,9 @@
 }
 #else
 __ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64483,9 +64240,9 @@
 }
 #else
 __ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64500,9 +64257,9 @@
 }
 #else
 __ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64517,9 +64274,9 @@
 }
 #else
 __ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64534,9 +64291,9 @@
 }
 #else
 __ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64551,9 +64308,9 @@
 }
 #else
 __ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64568,9 +64325,9 @@
 }
 #else
 __ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64585,9 +64342,9 @@
 }
 #else
 __ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64602,9 +64359,9 @@
 }
 #else
 __ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64619,9 +64376,9 @@
 }
 #else
 __ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64636,9 +64393,9 @@
 }
 #else
 __ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64653,9 +64410,9 @@
 }
 #else
 __ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64670,9 +64427,9 @@
 }
 #else
 __ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64687,9 +64444,9 @@
 }
 #else
 __ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64704,9 +64461,9 @@
 }
 #else
 __ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64721,9 +64478,9 @@
 }
 #else
 __ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64738,9 +64495,9 @@
 }
 #else
 __ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64755,9 +64512,9 @@
 }
 #else
 __ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64772,9 +64529,9 @@
 }
 #else
 __ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64789,9 +64546,9 @@
 }
 #else
 __ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64806,9 +64563,9 @@
 }
 #else
 __ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64823,9 +64580,9 @@
 }
 #else
 __ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64840,9 +64597,9 @@
 }
 #else
 __ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64857,9 +64614,9 @@
 }
 #else
 __ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64874,9 +64631,9 @@
 }
 #else
 __ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -64891,9 +64648,9 @@
 }
 #else
 __ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64908,9 +64665,9 @@
 }
 #else
 __ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64925,9 +64682,9 @@
 }
 #else
 __ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -64947,9 +64704,9 @@
 }
 #else
 __ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  uint64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64964,9 +64721,9 @@
 }
 #else
 __ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -64981,9 +64738,9 @@
 }
 #else
 __ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
+  uint64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65038,9 +64795,9 @@
 }
 #else
 __ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65055,9 +64812,9 @@
 }
 #else
 __ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65072,9 +64829,9 @@
 }
 #else
 __ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65089,9 +64846,9 @@
 }
 #else
 __ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65106,9 +64863,9 @@
 }
 #else
 __ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65123,9 +64880,9 @@
 }
 #else
 __ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65145,9 +64902,9 @@
 }
 #else
 __ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65155,54 +64912,54 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vusdotq_laneq_s32(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \
-  int32x4_t __s0_831 = __p0_831; \
-  uint8x16_t __s1_831 = __p1_831; \
-  int8x16_t __s2_831 = __p2_831; \
-  int32x4_t __ret_831; \
-int8x16_t __reint_831 = __s2_831; \
-  __ret_831 = vusdotq_s32(__s0_831, __s1_831, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_831, __p3_831))); \
-  __ret_831; \
+#define vusdotq_laneq_s32(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \
+  int32x4_t __ret_843; \
+  int32x4_t __s0_843 = __p0_843; \
+  uint8x16_t __s1_843 = __p1_843; \
+  int8x16_t __s2_843 = __p2_843; \
+int8x16_t __reint_843 = __s2_843; \
+  __ret_843 = vusdotq_s32(__s0_843, __s1_843, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_843, __p3_843))); \
+  __ret_843; \
 })
 #else
-#define vusdotq_laneq_s32(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \
-  int32x4_t __s0_832 = __p0_832; \
-  uint8x16_t __s1_832 = __p1_832; \
-  int8x16_t __s2_832 = __p2_832; \
-  int32x4_t __rev0_832;  __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 3, 2, 1, 0); \
-  uint8x16_t __rev1_832;  __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_832;  __rev2_832 = __builtin_shufflevector(__s2_832, __s2_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_832; \
-int8x16_t __reint_832 = __rev2_832; \
-  __ret_832 = __noswap_vusdotq_s32(__rev0_832, __rev1_832, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_832, __p3_832))); \
-  __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 3, 2, 1, 0); \
-  __ret_832; \
+#define vusdotq_laneq_s32(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \
+  int32x4_t __ret_844; \
+  int32x4_t __s0_844 = __p0_844; \
+  uint8x16_t __s1_844 = __p1_844; \
+  int8x16_t __s2_844 = __p2_844; \
+  int32x4_t __rev0_844;  __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 3, 2, 1, 0); \
+  uint8x16_t __rev1_844;  __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_844;  __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x16_t __reint_844 = __rev2_844; \
+  __ret_844 = __noswap_vusdotq_s32(__rev0_844, __rev1_844, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_844, __p3_844))); \
+  __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 3, 2, 1, 0); \
+  __ret_844; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vusdot_laneq_s32(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \
-  int32x2_t __s0_833 = __p0_833; \
-  uint8x8_t __s1_833 = __p1_833; \
-  int8x16_t __s2_833 = __p2_833; \
-  int32x2_t __ret_833; \
-int8x16_t __reint_833 = __s2_833; \
-  __ret_833 = vusdot_s32(__s0_833, __s1_833, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_833, __p3_833))); \
-  __ret_833; \
+#define vusdot_laneq_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \
+  int32x2_t __ret_845; \
+  int32x2_t __s0_845 = __p0_845; \
+  uint8x8_t __s1_845 = __p1_845; \
+  int8x16_t __s2_845 = __p2_845; \
+int8x16_t __reint_845 = __s2_845; \
+  __ret_845 = vusdot_s32(__s0_845, __s1_845, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_845, __p3_845))); \
+  __ret_845; \
 })
 #else
-#define vusdot_laneq_s32(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \
-  int32x2_t __s0_834 = __p0_834; \
-  uint8x8_t __s1_834 = __p1_834; \
-  int8x16_t __s2_834 = __p2_834; \
-  int32x2_t __rev0_834;  __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 1, 0); \
-  uint8x8_t __rev1_834;  __rev1_834 = __builtin_shufflevector(__s1_834, __s1_834, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_834;  __rev2_834 = __builtin_shufflevector(__s2_834, __s2_834, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_834; \
-int8x16_t __reint_834 = __rev2_834; \
-  __ret_834 = __noswap_vusdot_s32(__rev0_834, __rev1_834, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_834, __p3_834))); \
-  __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 1, 0); \
-  __ret_834; \
+#define vusdot_laneq_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \
+  int32x2_t __ret_846; \
+  int32x2_t __s0_846 = __p0_846; \
+  uint8x8_t __s1_846 = __p1_846; \
+  int8x16_t __s2_846 = __p2_846; \
+  int32x2_t __rev0_846;  __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \
+  uint8x8_t __rev1_846;  __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_846;  __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+int8x16_t __reint_846 = __rev2_846; \
+  __ret_846 = __noswap_vusdot_s32(__rev0_846, __rev1_846, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_846, __p3_846))); \
+  __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \
+  __ret_846; \
 })
 #endif
 
@@ -65214,9 +64971,9 @@
 }
 #else
 __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65231,9 +64988,9 @@
 }
 #else
 __ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65248,9 +65005,9 @@
 }
 #else
 __ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65265,9 +65022,9 @@
 }
 #else
 __ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65282,9 +65039,9 @@
 }
 #else
 __ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65299,9 +65056,9 @@
 }
 #else
 __ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65316,9 +65073,9 @@
 }
 #else
 __ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65333,9 +65090,9 @@
 }
 #else
 __ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65350,9 +65107,9 @@
 }
 #else
 __ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65367,9 +65124,9 @@
 }
 #else
 __ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65384,9 +65141,9 @@
 }
 #else
 __ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65401,9 +65158,9 @@
 }
 #else
 __ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65418,9 +65175,9 @@
 }
 #else
 __ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65435,9 +65192,9 @@
 }
 #else
 __ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65452,9 +65209,9 @@
 }
 #else
 __ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65469,9 +65226,9 @@
 }
 #else
 __ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65486,9 +65243,9 @@
 }
 #else
 __ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65503,9 +65260,9 @@
 }
 #else
 __ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65520,9 +65277,9 @@
 }
 #else
 __ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65537,9 +65294,9 @@
 }
 #else
 __ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65554,9 +65311,9 @@
 }
 #else
 __ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65571,9 +65328,9 @@
 }
 #else
 __ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65588,9 +65345,9 @@
 }
 #else
 __ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65605,9 +65362,9 @@
 }
 #else
 __ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65622,9 +65379,9 @@
 }
 #else
 __ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65639,9 +65396,9 @@
 }
 #else
 __ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65656,9 +65413,9 @@
 }
 #else
 __ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65673,9 +65430,9 @@
 }
 #else
 __ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65690,9 +65447,9 @@
 }
 #else
 __ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65707,9 +65464,9 @@
 }
 #else
 __ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65724,9 +65481,9 @@
 }
 #else
 __ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65741,9 +65498,9 @@
 }
 #else
 __ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65758,9 +65515,9 @@
 }
 #else
 __ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65775,9 +65532,9 @@
 }
 #else
 __ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65792,9 +65549,9 @@
 }
 #else
 __ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65809,9 +65566,9 @@
 }
 #else
 __ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65826,9 +65583,9 @@
 }
 #else
 __ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65843,9 +65600,9 @@
 }
 #else
 __ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65860,9 +65617,9 @@
 }
 #else
 __ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65877,9 +65634,9 @@
 }
 #else
 __ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65894,9 +65651,9 @@
 }
 #else
 __ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65911,9 +65668,9 @@
 }
 #else
 __ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65928,9 +65685,9 @@
 }
 #else
 __ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -65945,9 +65702,9 @@
 }
 #else
 __ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65962,9 +65719,9 @@
 }
 #else
 __ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -65979,9 +65736,9 @@
 }
 #else
 __ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -65996,9 +65753,9 @@
 }
 #else
 __ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66013,9 +65770,9 @@
 }
 #else
 __ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66030,9 +65787,9 @@
 }
 #else
 __ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66047,9 +65804,9 @@
 }
 #else
 __ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66064,9 +65821,9 @@
 }
 #else
 __ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66081,9 +65838,9 @@
 }
 #else
 __ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66098,9 +65855,9 @@
 }
 #else
 __ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66115,9 +65872,9 @@
 }
 #else
 __ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66132,9 +65889,9 @@
 }
 #else
 __ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66149,9 +65906,9 @@
 }
 #else
 __ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66166,9 +65923,9 @@
 }
 #else
 __ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66183,9 +65940,9 @@
 }
 #else
 __ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66200,9 +65957,9 @@
 }
 #else
 __ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66217,9 +65974,9 @@
 }
 #else
 __ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66234,9 +65991,9 @@
 }
 #else
 __ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66251,9 +66008,9 @@
 }
 #else
 __ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66268,9 +66025,9 @@
 }
 #else
 __ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66285,9 +66042,9 @@
 }
 #else
 __ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66302,9 +66059,9 @@
 }
 #else
 __ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66319,9 +66076,9 @@
 }
 #else
 __ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66336,9 +66093,9 @@
 }
 #else
 __ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
+  poly8x8_t __ret;
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66353,9 +66110,9 @@
 }
 #else
 __ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
+  poly16x4_t __ret;
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66370,9 +66127,9 @@
 }
 #else
 __ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+  poly8x16_t __ret;
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66387,9 +66144,9 @@
 }
 #else
 __ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+  poly64x2_t __ret;
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66404,9 +66161,9 @@
 }
 #else
 __ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+  poly16x8_t __ret;
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66421,9 +66178,9 @@
 }
 #else
 __ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66438,9 +66195,9 @@
 }
 #else
 __ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66455,9 +66212,9 @@
 }
 #else
 __ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66472,9 +66229,9 @@
 }
 #else
 __ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66489,9 +66246,9 @@
 }
 #else
 __ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66506,9 +66263,9 @@
 }
 #else
 __ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66523,9 +66280,9 @@
 }
 #else
 __ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
+  float32x4_t __ret;
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66540,9 +66297,9 @@
 }
 #else
 __ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66557,9 +66314,9 @@
 }
 #else
 __ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66574,9 +66331,9 @@
 }
 #else
 __ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66591,9 +66348,9 @@
 }
 #else
 __ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66608,9 +66365,9 @@
 }
 #else
 __ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66625,9 +66382,9 @@
 }
 #else
 __ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66642,9 +66399,9 @@
 }
 #else
 __ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66659,9 +66416,9 @@
 }
 #else
 __ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
+  float32x2_t __ret;
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66676,9 +66433,9 @@
 }
 #else
 __ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66693,9 +66450,9 @@
 }
 #else
 __ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66703,6 +66460,331 @@
 #endif
 
 #endif
+#if defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrndq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrnd_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrnda_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrndi_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrndm_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrndn_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrndp_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vrndx_f64(float64x1_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
+  return __ret;
+}
+#endif
+#if defined(__aarch64__) && defined(__ARM_FEATURE_FRINT)
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__p0, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__rev0, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrnd32x_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__p0, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vrnd32x_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__rev0, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__p0, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__rev0, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrnd32z_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__p0, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vrnd32z_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__rev0, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__p0, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__rev0, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrnd64x_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__p0, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vrnd64x_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__rev0, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__p0, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) {
+  float32x4_t __ret;
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__rev0, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrnd64z_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__p0, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vrnd64z_f32(float32x2_t __p0) {
+  float32x2_t __ret;
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__rev0, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+  return __ret;
+}
+#else
+__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+  float64x2_t __ret;
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+#endif
+
+__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
+  float64x1_t __ret;
+  __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+  return __ret;
+}
+#endif
 #ifdef __LITTLE_ENDIAN__
 __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
   uint8x16_t __ret;
@@ -66711,10 +66793,10 @@
 }
 #else
 __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint8x16_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
   __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66729,10 +66811,10 @@
 }
 #else
 __ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66747,10 +66829,10 @@
 }
 #else
 __ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66765,10 +66847,10 @@
 }
 #else
 __ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int8x16_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
   __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66783,10 +66865,10 @@
 }
 #else
 __ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66801,10 +66883,10 @@
 }
 #else
 __ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66819,10 +66901,10 @@
 }
 #else
 __ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint8x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
   __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66837,10 +66919,10 @@
 }
 #else
 __ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+  uint32x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
   __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66855,10 +66937,10 @@
 }
 #else
 __ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+  uint16x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
   __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66873,10 +66955,10 @@
 }
 #else
 __ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int8x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
   __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66891,10 +66973,10 @@
 }
 #else
 __ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
   __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66909,10 +66991,10 @@
 }
 #else
 __ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
   __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66927,9 +67009,9 @@
 }
 #else
 __ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint16x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1))));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -66949,9 +67031,9 @@
 }
 #else
 __ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint64x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1))));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -66971,9 +67053,9 @@
 }
 #else
 __ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint32x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1))));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -66993,9 +67075,9 @@
 }
 #else
 __ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
+  int16x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1))));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67015,9 +67097,9 @@
 }
 #else
 __ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
+  int64x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1))));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67037,9 +67119,9 @@
 }
 #else
 __ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
+  int32x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1))));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67059,9 +67141,9 @@
 }
 #else
 __ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
+  uint16x8_t __ret;
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67076,9 +67158,9 @@
 }
 #else
 __ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
+  uint64x2_t __ret;
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67093,9 +67175,9 @@
 }
 #else
 __ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
+  uint32x4_t __ret;
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67110,9 +67192,9 @@
 }
 #else
 __ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
+  int16x8_t __ret;
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67127,9 +67209,9 @@
 }
 #else
 __ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
+  int64x2_t __ret;
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67144,9 +67226,9 @@
 }
 #else
 __ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
+  int32x4_t __ret;
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67161,9 +67243,9 @@
 }
 #else
 __ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __noswap_vmovl_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67178,9 +67260,9 @@
 }
 #else
 __ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 + __noswap_vmovl_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67195,9 +67277,9 @@
 }
 #else
 __ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __noswap_vmovl_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67212,9 +67294,9 @@
 }
 #else
 __ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __noswap_vmovl_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67229,9 +67311,9 @@
 }
 #else
 __ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 + __noswap_vmovl_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67246,9 +67328,9 @@
 }
 #else
 __ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __noswap_vmovl_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67256,60 +67338,60 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vget_lane_f16(__p0_835, __p1_835) __extension__ ({ \
-  float16x4_t __s0_835 = __p0_835; \
-  float16_t __ret_835; \
-float16x4_t __reint_835 = __s0_835; \
-int16_t __reint1_835 = vget_lane_s16(*(int16x4_t *) &__reint_835, __p1_835); \
-  __ret_835 = *(float16_t *) &__reint1_835; \
-  __ret_835; \
+#define vget_lane_f16(__p0_847, __p1_847) __extension__ ({ \
+  float16_t __ret_847; \
+  float16x4_t __s0_847 = __p0_847; \
+float16x4_t __reint_847 = __s0_847; \
+int16_t __reint1_847 = vget_lane_s16(*(int16x4_t *) &__reint_847, __p1_847); \
+  __ret_847 = *(float16_t *) &__reint1_847; \
+  __ret_847; \
 })
 #else
-#define vget_lane_f16(__p0_836, __p1_836) __extension__ ({ \
-  float16x4_t __s0_836 = __p0_836; \
-  float16x4_t __rev0_836;  __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 3, 2, 1, 0); \
-  float16_t __ret_836; \
-float16x4_t __reint_836 = __rev0_836; \
-int16_t __reint1_836 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_836, __p1_836); \
-  __ret_836 = *(float16_t *) &__reint1_836; \
-  __ret_836; \
+#define vget_lane_f16(__p0_848, __p1_848) __extension__ ({ \
+  float16_t __ret_848; \
+  float16x4_t __s0_848 = __p0_848; \
+  float16x4_t __rev0_848;  __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \
+float16x4_t __reint_848 = __rev0_848; \
+int16_t __reint1_848 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_848, __p1_848); \
+  __ret_848 = *(float16_t *) &__reint1_848; \
+  __ret_848; \
 })
-#define __noswap_vget_lane_f16(__p0_837, __p1_837) __extension__ ({ \
-  float16x4_t __s0_837 = __p0_837; \
-  float16_t __ret_837; \
-float16x4_t __reint_837 = __s0_837; \
-int16_t __reint1_837 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_837, __p1_837); \
-  __ret_837 = *(float16_t *) &__reint1_837; \
-  __ret_837; \
+#define __noswap_vget_lane_f16(__p0_849, __p1_849) __extension__ ({ \
+  float16_t __ret_849; \
+  float16x4_t __s0_849 = __p0_849; \
+float16x4_t __reint_849 = __s0_849; \
+int16_t __reint1_849 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_849, __p1_849); \
+  __ret_849 = *(float16_t *) &__reint1_849; \
+  __ret_849; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f16(__p0_838, __p1_838) __extension__ ({ \
-  float16x8_t __s0_838 = __p0_838; \
-  float16_t __ret_838; \
-float16x8_t __reint_838 = __s0_838; \
-int16_t __reint1_838 = vgetq_lane_s16(*(int16x8_t *) &__reint_838, __p1_838); \
-  __ret_838 = *(float16_t *) &__reint1_838; \
-  __ret_838; \
+#define vgetq_lane_f16(__p0_850, __p1_850) __extension__ ({ \
+  float16_t __ret_850; \
+  float16x8_t __s0_850 = __p0_850; \
+float16x8_t __reint_850 = __s0_850; \
+int16_t __reint1_850 = vgetq_lane_s16(*(int16x8_t *) &__reint_850, __p1_850); \
+  __ret_850 = *(float16_t *) &__reint1_850; \
+  __ret_850; \
 })
 #else
-#define vgetq_lane_f16(__p0_839, __p1_839) __extension__ ({ \
-  float16x8_t __s0_839 = __p0_839; \
-  float16x8_t __rev0_839;  __rev0_839 = __builtin_shufflevector(__s0_839, __s0_839, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_839; \
-float16x8_t __reint_839 = __rev0_839; \
-int16_t __reint1_839 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_839, __p1_839); \
-  __ret_839 = *(float16_t *) &__reint1_839; \
-  __ret_839; \
+#define vgetq_lane_f16(__p0_851, __p1_851) __extension__ ({ \
+  float16_t __ret_851; \
+  float16x8_t __s0_851 = __p0_851; \
+  float16x8_t __rev0_851;  __rev0_851 = __builtin_shufflevector(__s0_851, __s0_851, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16x8_t __reint_851 = __rev0_851; \
+int16_t __reint1_851 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_851, __p1_851); \
+  __ret_851 = *(float16_t *) &__reint1_851; \
+  __ret_851; \
 })
-#define __noswap_vgetq_lane_f16(__p0_840, __p1_840) __extension__ ({ \
-  float16x8_t __s0_840 = __p0_840; \
-  float16_t __ret_840; \
-float16x8_t __reint_840 = __s0_840; \
-int16_t __reint1_840 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_840, __p1_840); \
-  __ret_840 = *(float16_t *) &__reint1_840; \
-  __ret_840; \
+#define __noswap_vgetq_lane_f16(__p0_852, __p1_852) __extension__ ({ \
+  float16_t __ret_852; \
+  float16x8_t __s0_852 = __p0_852; \
+float16x8_t __reint_852 = __s0_852; \
+int16_t __reint1_852 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_852, __p1_852); \
+  __ret_852 = *(float16_t *) &__reint1_852; \
+  __ret_852; \
 })
 #endif
 
@@ -67321,10 +67403,10 @@
 }
 #else
 __ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67344,10 +67426,10 @@
 }
 #else
 __ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67367,10 +67449,10 @@
 }
 #else
 __ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67390,10 +67472,10 @@
 }
 #else
 __ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67413,10 +67495,10 @@
 }
 #else
 __ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67436,10 +67518,10 @@
 }
 #else
 __ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67452,98 +67534,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u32(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \
-  uint64x2_t __s0_841 = __p0_841; \
-  uint32x2_t __s1_841 = __p1_841; \
-  uint32x2_t __s2_841 = __p2_841; \
-  uint64x2_t __ret_841; \
-  __ret_841 = __s0_841 + vmull_u32(__s1_841, splat_lane_u32(__s2_841, __p3_841)); \
-  __ret_841; \
+#define vmlal_lane_u32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \
+  uint64x2_t __ret_853; \
+  uint64x2_t __s0_853 = __p0_853; \
+  uint32x2_t __s1_853 = __p1_853; \
+  uint32x2_t __s2_853 = __p2_853; \
+  __ret_853 = __s0_853 + vmull_u32(__s1_853, splat_lane_u32(__s2_853, __p3_853)); \
+  __ret_853; \
 })
 #else
-#define vmlal_lane_u32(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \
-  uint64x2_t __s0_842 = __p0_842; \
-  uint32x2_t __s1_842 = __p1_842; \
-  uint32x2_t __s2_842 = __p2_842; \
-  uint64x2_t __rev0_842;  __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \
-  uint32x2_t __rev1_842;  __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 1, 0); \
-  uint32x2_t __rev2_842;  __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, 1, 0); \
-  uint64x2_t __ret_842; \
-  __ret_842 = __rev0_842 + __noswap_vmull_u32(__rev1_842, __noswap_splat_lane_u32(__rev2_842, __p3_842)); \
-  __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \
-  __ret_842; \
+#define vmlal_lane_u32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \
+  uint64x2_t __ret_854; \
+  uint64x2_t __s0_854 = __p0_854; \
+  uint32x2_t __s1_854 = __p1_854; \
+  uint32x2_t __s2_854 = __p2_854; \
+  uint64x2_t __rev0_854;  __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \
+  uint32x2_t __rev1_854;  __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 1, 0); \
+  uint32x2_t __rev2_854;  __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 1, 0); \
+  __ret_854 = __rev0_854 + __noswap_vmull_u32(__rev1_854, __noswap_splat_lane_u32(__rev2_854, __p3_854)); \
+  __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \
+  __ret_854; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u16(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \
-  uint32x4_t __s0_843 = __p0_843; \
-  uint16x4_t __s1_843 = __p1_843; \
-  uint16x4_t __s2_843 = __p2_843; \
-  uint32x4_t __ret_843; \
-  __ret_843 = __s0_843 + vmull_u16(__s1_843, splat_lane_u16(__s2_843, __p3_843)); \
-  __ret_843; \
+#define vmlal_lane_u16(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \
+  uint32x4_t __ret_855; \
+  uint32x4_t __s0_855 = __p0_855; \
+  uint16x4_t __s1_855 = __p1_855; \
+  uint16x4_t __s2_855 = __p2_855; \
+  __ret_855 = __s0_855 + vmull_u16(__s1_855, splat_lane_u16(__s2_855, __p3_855)); \
+  __ret_855; \
 })
 #else
-#define vmlal_lane_u16(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \
-  uint32x4_t __s0_844 = __p0_844; \
-  uint16x4_t __s1_844 = __p1_844; \
-  uint16x4_t __s2_844 = __p2_844; \
-  uint32x4_t __rev0_844;  __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 3, 2, 1, 0); \
-  uint16x4_t __rev1_844;  __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 3, 2, 1, 0); \
-  uint16x4_t __rev2_844;  __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 3, 2, 1, 0); \
-  uint32x4_t __ret_844; \
-  __ret_844 = __rev0_844 + __noswap_vmull_u16(__rev1_844, __noswap_splat_lane_u16(__rev2_844, __p3_844)); \
-  __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 3, 2, 1, 0); \
-  __ret_844; \
+#define vmlal_lane_u16(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \
+  uint32x4_t __ret_856; \
+  uint32x4_t __s0_856 = __p0_856; \
+  uint16x4_t __s1_856 = __p1_856; \
+  uint16x4_t __s2_856 = __p2_856; \
+  uint32x4_t __rev0_856;  __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \
+  uint16x4_t __rev1_856;  __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 3, 2, 1, 0); \
+  uint16x4_t __rev2_856;  __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 3, 2, 1, 0); \
+  __ret_856 = __rev0_856 + __noswap_vmull_u16(__rev1_856, __noswap_splat_lane_u16(__rev2_856, __p3_856)); \
+  __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \
+  __ret_856; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \
-  int64x2_t __s0_845 = __p0_845; \
-  int32x2_t __s1_845 = __p1_845; \
-  int32x2_t __s2_845 = __p2_845; \
-  int64x2_t __ret_845; \
-  __ret_845 = __s0_845 + vmull_s32(__s1_845, splat_lane_s32(__s2_845, __p3_845)); \
-  __ret_845; \
+#define vmlal_lane_s32(__p0_857, __p1_857, __p2_857, __p3_857) __extension__ ({ \
+  int64x2_t __ret_857; \
+  int64x2_t __s0_857 = __p0_857; \
+  int32x2_t __s1_857 = __p1_857; \
+  int32x2_t __s2_857 = __p2_857; \
+  __ret_857 = __s0_857 + vmull_s32(__s1_857, splat_lane_s32(__s2_857, __p3_857)); \
+  __ret_857; \
 })
 #else
-#define vmlal_lane_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \
-  int64x2_t __s0_846 = __p0_846; \
-  int32x2_t __s1_846 = __p1_846; \
-  int32x2_t __s2_846 = __p2_846; \
-  int64x2_t __rev0_846;  __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \
-  int32x2_t __rev1_846;  __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 1, 0); \
-  int32x2_t __rev2_846;  __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 1, 0); \
-  int64x2_t __ret_846; \
-  __ret_846 = __rev0_846 + __noswap_vmull_s32(__rev1_846, __noswap_splat_lane_s32(__rev2_846, __p3_846)); \
-  __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \
-  __ret_846; \
+#define vmlal_lane_s32(__p0_858, __p1_858, __p2_858, __p3_858) __extension__ ({ \
+  int64x2_t __ret_858; \
+  int64x2_t __s0_858 = __p0_858; \
+  int32x2_t __s1_858 = __p1_858; \
+  int32x2_t __s2_858 = __p2_858; \
+  int64x2_t __rev0_858;  __rev0_858 = __builtin_shufflevector(__s0_858, __s0_858, 1, 0); \
+  int32x2_t __rev1_858;  __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 1, 0); \
+  int32x2_t __rev2_858;  __rev2_858 = __builtin_shufflevector(__s2_858, __s2_858, 1, 0); \
+  __ret_858 = __rev0_858 + __noswap_vmull_s32(__rev1_858, __noswap_splat_lane_s32(__rev2_858, __p3_858)); \
+  __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 1, 0); \
+  __ret_858; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s16(__p0_847, __p1_847, __p2_847, __p3_847) __extension__ ({ \
-  int32x4_t __s0_847 = __p0_847; \
-  int16x4_t __s1_847 = __p1_847; \
-  int16x4_t __s2_847 = __p2_847; \
-  int32x4_t __ret_847; \
-  __ret_847 = __s0_847 + vmull_s16(__s1_847, splat_lane_s16(__s2_847, __p3_847)); \
-  __ret_847; \
+#define vmlal_lane_s16(__p0_859, __p1_859, __p2_859, __p3_859) __extension__ ({ \
+  int32x4_t __ret_859; \
+  int32x4_t __s0_859 = __p0_859; \
+  int16x4_t __s1_859 = __p1_859; \
+  int16x4_t __s2_859 = __p2_859; \
+  __ret_859 = __s0_859 + vmull_s16(__s1_859, splat_lane_s16(__s2_859, __p3_859)); \
+  __ret_859; \
 })
 #else
-#define vmlal_lane_s16(__p0_848, __p1_848, __p2_848, __p3_848) __extension__ ({ \
-  int32x4_t __s0_848 = __p0_848; \
-  int16x4_t __s1_848 = __p1_848; \
-  int16x4_t __s2_848 = __p2_848; \
-  int32x4_t __rev0_848;  __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \
-  int16x4_t __rev1_848;  __rev1_848 = __builtin_shufflevector(__s1_848, __s1_848, 3, 2, 1, 0); \
-  int16x4_t __rev2_848;  __rev2_848 = __builtin_shufflevector(__s2_848, __s2_848, 3, 2, 1, 0); \
-  int32x4_t __ret_848; \
-  __ret_848 = __rev0_848 + __noswap_vmull_s16(__rev1_848, __noswap_splat_lane_s16(__rev2_848, __p3_848)); \
-  __ret_848 = __builtin_shufflevector(__ret_848, __ret_848, 3, 2, 1, 0); \
-  __ret_848; \
+#define vmlal_lane_s16(__p0_860, __p1_860, __p2_860, __p3_860) __extension__ ({ \
+  int32x4_t __ret_860; \
+  int32x4_t __s0_860 = __p0_860; \
+  int16x4_t __s1_860 = __p1_860; \
+  int16x4_t __s2_860 = __p2_860; \
+  int32x4_t __rev0_860;  __rev0_860 = __builtin_shufflevector(__s0_860, __s0_860, 3, 2, 1, 0); \
+  int16x4_t __rev1_860;  __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 3, 2, 1, 0); \
+  int16x4_t __rev2_860;  __rev2_860 = __builtin_shufflevector(__s2_860, __s2_860, 3, 2, 1, 0); \
+  __ret_860 = __rev0_860 + __noswap_vmull_s16(__rev1_860, __noswap_splat_lane_s16(__rev2_860, __p3_860)); \
+  __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 3, 2, 1, 0); \
+  __ret_860; \
 })
 #endif
 
@@ -67555,9 +67637,9 @@
 }
 #else
 __ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67577,9 +67659,9 @@
 }
 #else
 __ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67599,9 +67681,9 @@
 }
 #else
 __ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67621,9 +67703,9 @@
 }
 #else
 __ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67643,10 +67725,10 @@
 }
 #else
 __ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67666,10 +67748,10 @@
 }
 #else
 __ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67689,10 +67771,10 @@
 }
 #else
 __ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67712,10 +67794,10 @@
 }
 #else
 __ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -67735,10 +67817,10 @@
 }
 #else
 __ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67758,10 +67840,10 @@
 }
 #else
 __ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67774,98 +67856,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u32(__p0_849, __p1_849, __p2_849, __p3_849) __extension__ ({ \
-  uint64x2_t __s0_849 = __p0_849; \
-  uint32x2_t __s1_849 = __p1_849; \
-  uint32x2_t __s2_849 = __p2_849; \
-  uint64x2_t __ret_849; \
-  __ret_849 = __s0_849 - vmull_u32(__s1_849, splat_lane_u32(__s2_849, __p3_849)); \
-  __ret_849; \
+#define vmlsl_lane_u32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \
+  uint64x2_t __ret_861; \
+  uint64x2_t __s0_861 = __p0_861; \
+  uint32x2_t __s1_861 = __p1_861; \
+  uint32x2_t __s2_861 = __p2_861; \
+  __ret_861 = __s0_861 - vmull_u32(__s1_861, splat_lane_u32(__s2_861, __p3_861)); \
+  __ret_861; \
 })
 #else
-#define vmlsl_lane_u32(__p0_850, __p1_850, __p2_850, __p3_850) __extension__ ({ \
-  uint64x2_t __s0_850 = __p0_850; \
-  uint32x2_t __s1_850 = __p1_850; \
-  uint32x2_t __s2_850 = __p2_850; \
-  uint64x2_t __rev0_850;  __rev0_850 = __builtin_shufflevector(__s0_850, __s0_850, 1, 0); \
-  uint32x2_t __rev1_850;  __rev1_850 = __builtin_shufflevector(__s1_850, __s1_850, 1, 0); \
-  uint32x2_t __rev2_850;  __rev2_850 = __builtin_shufflevector(__s2_850, __s2_850, 1, 0); \
-  uint64x2_t __ret_850; \
-  __ret_850 = __rev0_850 - __noswap_vmull_u32(__rev1_850, __noswap_splat_lane_u32(__rev2_850, __p3_850)); \
-  __ret_850 = __builtin_shufflevector(__ret_850, __ret_850, 1, 0); \
-  __ret_850; \
+#define vmlsl_lane_u32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \
+  uint64x2_t __ret_862; \
+  uint64x2_t __s0_862 = __p0_862; \
+  uint32x2_t __s1_862 = __p1_862; \
+  uint32x2_t __s2_862 = __p2_862; \
+  uint64x2_t __rev0_862;  __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 1, 0); \
+  uint32x2_t __rev1_862;  __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 1, 0); \
+  uint32x2_t __rev2_862;  __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 1, 0); \
+  __ret_862 = __rev0_862 - __noswap_vmull_u32(__rev1_862, __noswap_splat_lane_u32(__rev2_862, __p3_862)); \
+  __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 1, 0); \
+  __ret_862; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u16(__p0_851, __p1_851, __p2_851, __p3_851) __extension__ ({ \
-  uint32x4_t __s0_851 = __p0_851; \
-  uint16x4_t __s1_851 = __p1_851; \
-  uint16x4_t __s2_851 = __p2_851; \
-  uint32x4_t __ret_851; \
-  __ret_851 = __s0_851 - vmull_u16(__s1_851, splat_lane_u16(__s2_851, __p3_851)); \
-  __ret_851; \
+#define vmlsl_lane_u16(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \
+  uint32x4_t __ret_863; \
+  uint32x4_t __s0_863 = __p0_863; \
+  uint16x4_t __s1_863 = __p1_863; \
+  uint16x4_t __s2_863 = __p2_863; \
+  __ret_863 = __s0_863 - vmull_u16(__s1_863, splat_lane_u16(__s2_863, __p3_863)); \
+  __ret_863; \
 })
 #else
-#define vmlsl_lane_u16(__p0_852, __p1_852, __p2_852, __p3_852) __extension__ ({ \
-  uint32x4_t __s0_852 = __p0_852; \
-  uint16x4_t __s1_852 = __p1_852; \
-  uint16x4_t __s2_852 = __p2_852; \
-  uint32x4_t __rev0_852;  __rev0_852 = __builtin_shufflevector(__s0_852, __s0_852, 3, 2, 1, 0); \
-  uint16x4_t __rev1_852;  __rev1_852 = __builtin_shufflevector(__s1_852, __s1_852, 3, 2, 1, 0); \
-  uint16x4_t __rev2_852;  __rev2_852 = __builtin_shufflevector(__s2_852, __s2_852, 3, 2, 1, 0); \
-  uint32x4_t __ret_852; \
-  __ret_852 = __rev0_852 - __noswap_vmull_u16(__rev1_852, __noswap_splat_lane_u16(__rev2_852, __p3_852)); \
-  __ret_852 = __builtin_shufflevector(__ret_852, __ret_852, 3, 2, 1, 0); \
-  __ret_852; \
+#define vmlsl_lane_u16(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \
+  uint32x4_t __ret_864; \
+  uint32x4_t __s0_864 = __p0_864; \
+  uint16x4_t __s1_864 = __p1_864; \
+  uint16x4_t __s2_864 = __p2_864; \
+  uint32x4_t __rev0_864;  __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \
+  uint16x4_t __rev1_864;  __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 3, 2, 1, 0); \
+  uint16x4_t __rev2_864;  __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 3, 2, 1, 0); \
+  __ret_864 = __rev0_864 - __noswap_vmull_u16(__rev1_864, __noswap_splat_lane_u16(__rev2_864, __p3_864)); \
+  __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \
+  __ret_864; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \
-  int64x2_t __s0_853 = __p0_853; \
-  int32x2_t __s1_853 = __p1_853; \
-  int32x2_t __s2_853 = __p2_853; \
-  int64x2_t __ret_853; \
-  __ret_853 = __s0_853 - vmull_s32(__s1_853, splat_lane_s32(__s2_853, __p3_853)); \
-  __ret_853; \
+#define vmlsl_lane_s32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \
+  int64x2_t __ret_865; \
+  int64x2_t __s0_865 = __p0_865; \
+  int32x2_t __s1_865 = __p1_865; \
+  int32x2_t __s2_865 = __p2_865; \
+  __ret_865 = __s0_865 - vmull_s32(__s1_865, splat_lane_s32(__s2_865, __p3_865)); \
+  __ret_865; \
 })
 #else
-#define vmlsl_lane_s32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \
-  int64x2_t __s0_854 = __p0_854; \
-  int32x2_t __s1_854 = __p1_854; \
-  int32x2_t __s2_854 = __p2_854; \
-  int64x2_t __rev0_854;  __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \
-  int32x2_t __rev1_854;  __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 1, 0); \
-  int32x2_t __rev2_854;  __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 1, 0); \
-  int64x2_t __ret_854; \
-  __ret_854 = __rev0_854 - __noswap_vmull_s32(__rev1_854, __noswap_splat_lane_s32(__rev2_854, __p3_854)); \
-  __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \
-  __ret_854; \
+#define vmlsl_lane_s32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \
+  int64x2_t __ret_866; \
+  int64x2_t __s0_866 = __p0_866; \
+  int32x2_t __s1_866 = __p1_866; \
+  int32x2_t __s2_866 = __p2_866; \
+  int64x2_t __rev0_866;  __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 1, 0); \
+  int32x2_t __rev1_866;  __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 1, 0); \
+  int32x2_t __rev2_866;  __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 1, 0); \
+  __ret_866 = __rev0_866 - __noswap_vmull_s32(__rev1_866, __noswap_splat_lane_s32(__rev2_866, __p3_866)); \
+  __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 1, 0); \
+  __ret_866; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s16(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \
-  int32x4_t __s0_855 = __p0_855; \
-  int16x4_t __s1_855 = __p1_855; \
-  int16x4_t __s2_855 = __p2_855; \
-  int32x4_t __ret_855; \
-  __ret_855 = __s0_855 - vmull_s16(__s1_855, splat_lane_s16(__s2_855, __p3_855)); \
-  __ret_855; \
+#define vmlsl_lane_s16(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \
+  int32x4_t __ret_867; \
+  int32x4_t __s0_867 = __p0_867; \
+  int16x4_t __s1_867 = __p1_867; \
+  int16x4_t __s2_867 = __p2_867; \
+  __ret_867 = __s0_867 - vmull_s16(__s1_867, splat_lane_s16(__s2_867, __p3_867)); \
+  __ret_867; \
 })
 #else
-#define vmlsl_lane_s16(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \
-  int32x4_t __s0_856 = __p0_856; \
-  int16x4_t __s1_856 = __p1_856; \
-  int16x4_t __s2_856 = __p2_856; \
-  int32x4_t __rev0_856;  __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \
-  int16x4_t __rev1_856;  __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 3, 2, 1, 0); \
-  int16x4_t __rev2_856;  __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 3, 2, 1, 0); \
-  int32x4_t __ret_856; \
-  __ret_856 = __rev0_856 - __noswap_vmull_s16(__rev1_856, __noswap_splat_lane_s16(__rev2_856, __p3_856)); \
-  __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \
-  __ret_856; \
+#define vmlsl_lane_s16(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \
+  int32x4_t __ret_868; \
+  int32x4_t __s0_868 = __p0_868; \
+  int16x4_t __s1_868 = __p1_868; \
+  int16x4_t __s2_868 = __p2_868; \
+  int32x4_t __rev0_868;  __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \
+  int16x4_t __rev1_868;  __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 3, 2, 1, 0); \
+  int16x4_t __rev2_868;  __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 3, 2, 1, 0); \
+  __ret_868 = __rev0_868 - __noswap_vmull_s16(__rev1_868, __noswap_splat_lane_s16(__rev2_868, __p3_868)); \
+  __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \
+  __ret_868; \
 })
 #endif
 
@@ -67877,9 +67959,9 @@
 }
 #else
 __ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67899,9 +67981,9 @@
 }
 #else
 __ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67921,9 +68003,9 @@
 }
 #else
 __ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -67943,9 +68025,9 @@
 }
 #else
 __ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -67958,151 +68040,151 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vset_lane_f16(__p0_857, __p1_857, __p2_857) __extension__ ({ \
-  float16_t __s0_857 = __p0_857; \
-  float16x4_t __s1_857 = __p1_857; \
-  float16x4_t __ret_857; \
-float16_t __reint_857 = __s0_857; \
-float16x4_t __reint1_857 = __s1_857; \
-int16x4_t __reint2_857 = vset_lane_s16(*(int16_t *) &__reint_857, *(int16x4_t *) &__reint1_857, __p2_857); \
-  __ret_857 = *(float16x4_t *) &__reint2_857; \
-  __ret_857; \
+#define vset_lane_f16(__p0_869, __p1_869, __p2_869) __extension__ ({ \
+  float16x4_t __ret_869; \
+  float16_t __s0_869 = __p0_869; \
+  float16x4_t __s1_869 = __p1_869; \
+float16_t __reint_869 = __s0_869; \
+float16x4_t __reint1_869 = __s1_869; \
+int16x4_t __reint2_869 = vset_lane_s16(*(int16_t *) &__reint_869, *(int16x4_t *) &__reint1_869, __p2_869); \
+  __ret_869 = *(float16x4_t *) &__reint2_869; \
+  __ret_869; \
 })
 #else
-#define vset_lane_f16(__p0_858, __p1_858, __p2_858) __extension__ ({ \
-  float16_t __s0_858 = __p0_858; \
-  float16x4_t __s1_858 = __p1_858; \
-  float16x4_t __rev1_858;  __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 3, 2, 1, 0); \
-  float16x4_t __ret_858; \
-float16_t __reint_858 = __s0_858; \
-float16x4_t __reint1_858 = __rev1_858; \
-int16x4_t __reint2_858 = __noswap_vset_lane_s16(*(int16_t *) &__reint_858, *(int16x4_t *) &__reint1_858, __p2_858); \
-  __ret_858 = *(float16x4_t *) &__reint2_858; \
-  __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 3, 2, 1, 0); \
-  __ret_858; \
+#define vset_lane_f16(__p0_870, __p1_870, __p2_870) __extension__ ({ \
+  float16x4_t __ret_870; \
+  float16_t __s0_870 = __p0_870; \
+  float16x4_t __s1_870 = __p1_870; \
+  float16x4_t __rev1_870;  __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 3, 2, 1, 0); \
+float16_t __reint_870 = __s0_870; \
+float16x4_t __reint1_870 = __rev1_870; \
+int16x4_t __reint2_870 = __noswap_vset_lane_s16(*(int16_t *) &__reint_870, *(int16x4_t *) &__reint1_870, __p2_870); \
+  __ret_870 = *(float16x4_t *) &__reint2_870; \
+  __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 3, 2, 1, 0); \
+  __ret_870; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f16(__p0_859, __p1_859, __p2_859) __extension__ ({ \
-  float16_t __s0_859 = __p0_859; \
-  float16x8_t __s1_859 = __p1_859; \
-  float16x8_t __ret_859; \
-float16_t __reint_859 = __s0_859; \
-float16x8_t __reint1_859 = __s1_859; \
-int16x8_t __reint2_859 = vsetq_lane_s16(*(int16_t *) &__reint_859, *(int16x8_t *) &__reint1_859, __p2_859); \
-  __ret_859 = *(float16x8_t *) &__reint2_859; \
-  __ret_859; \
+#define vsetq_lane_f16(__p0_871, __p1_871, __p2_871) __extension__ ({ \
+  float16x8_t __ret_871; \
+  float16_t __s0_871 = __p0_871; \
+  float16x8_t __s1_871 = __p1_871; \
+float16_t __reint_871 = __s0_871; \
+float16x8_t __reint1_871 = __s1_871; \
+int16x8_t __reint2_871 = vsetq_lane_s16(*(int16_t *) &__reint_871, *(int16x8_t *) &__reint1_871, __p2_871); \
+  __ret_871 = *(float16x8_t *) &__reint2_871; \
+  __ret_871; \
 })
 #else
-#define vsetq_lane_f16(__p0_860, __p1_860, __p2_860) __extension__ ({ \
-  float16_t __s0_860 = __p0_860; \
-  float16x8_t __s1_860 = __p1_860; \
-  float16x8_t __rev1_860;  __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_860; \
-float16_t __reint_860 = __s0_860; \
-float16x8_t __reint1_860 = __rev1_860; \
-int16x8_t __reint2_860 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_860, *(int16x8_t *) &__reint1_860, __p2_860); \
-  __ret_860 = *(float16x8_t *) &__reint2_860; \
-  __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_860; \
+#define vsetq_lane_f16(__p0_872, __p1_872, __p2_872) __extension__ ({ \
+  float16x8_t __ret_872; \
+  float16_t __s0_872 = __p0_872; \
+  float16x8_t __s1_872 = __p1_872; \
+  float16x8_t __rev1_872;  __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 7, 6, 5, 4, 3, 2, 1, 0); \
+float16_t __reint_872 = __s0_872; \
+float16x8_t __reint1_872 = __rev1_872; \
+int16x8_t __reint2_872 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_872, *(int16x8_t *) &__reint1_872, __p2_872); \
+  __ret_872 = *(float16x8_t *) &__reint2_872; \
+  __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_872; \
 })
 #endif
 
 #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlalbq_lane_f32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \
-  float32x4_t __s0_861 = __p0_861; \
-  bfloat16x8_t __s1_861 = __p1_861; \
-  bfloat16x4_t __s2_861 = __p2_861; \
-  float32x4_t __ret_861; \
-  __ret_861 = vbfmlalbq_f32(__s0_861, __s1_861, (bfloat16x8_t) {vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861)}); \
-  __ret_861; \
+#define vbfmlalbq_lane_f32(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \
+  float32x4_t __ret_873; \
+  float32x4_t __s0_873 = __p0_873; \
+  bfloat16x8_t __s1_873 = __p1_873; \
+  bfloat16x4_t __s2_873 = __p2_873; \
+  __ret_873 = vbfmlalbq_f32(__s0_873, __s1_873, (bfloat16x8_t) {vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873)}); \
+  __ret_873; \
 })
 #else
-#define vbfmlalbq_lane_f32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \
-  float32x4_t __s0_862 = __p0_862; \
-  bfloat16x8_t __s1_862 = __p1_862; \
-  bfloat16x4_t __s2_862 = __p2_862; \
-  float32x4_t __rev0_862;  __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_862;  __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_862;  __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 3, 2, 1, 0); \
-  float32x4_t __ret_862; \
-  __ret_862 = __noswap_vbfmlalbq_f32(__rev0_862, __rev1_862, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862)}); \
-  __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 3, 2, 1, 0); \
-  __ret_862; \
+#define vbfmlalbq_lane_f32(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \
+  float32x4_t __ret_874; \
+  float32x4_t __s0_874 = __p0_874; \
+  bfloat16x8_t __s1_874 = __p1_874; \
+  bfloat16x4_t __s2_874 = __p2_874; \
+  float32x4_t __rev0_874;  __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_874;  __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_874;  __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 3, 2, 1, 0); \
+  __ret_874 = __noswap_vbfmlalbq_f32(__rev0_874, __rev1_874, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874)}); \
+  __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \
+  __ret_874; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlalbq_laneq_f32(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \
-  float32x4_t __s0_863 = __p0_863; \
-  bfloat16x8_t __s1_863 = __p1_863; \
-  bfloat16x8_t __s2_863 = __p2_863; \
-  float32x4_t __ret_863; \
-  __ret_863 = vbfmlalbq_f32(__s0_863, __s1_863, (bfloat16x8_t) {vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863)}); \
-  __ret_863; \
+#define vbfmlalbq_laneq_f32(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \
+  float32x4_t __ret_875; \
+  float32x4_t __s0_875 = __p0_875; \
+  bfloat16x8_t __s1_875 = __p1_875; \
+  bfloat16x8_t __s2_875 = __p2_875; \
+  __ret_875 = vbfmlalbq_f32(__s0_875, __s1_875, (bfloat16x8_t) {vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875)}); \
+  __ret_875; \
 })
 #else
-#define vbfmlalbq_laneq_f32(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \
-  float32x4_t __s0_864 = __p0_864; \
-  bfloat16x8_t __s1_864 = __p1_864; \
-  bfloat16x8_t __s2_864 = __p2_864; \
-  float32x4_t __rev0_864;  __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_864;  __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_864;  __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_864; \
-  __ret_864 = __noswap_vbfmlalbq_f32(__rev0_864, __rev1_864, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864)}); \
-  __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \
-  __ret_864; \
+#define vbfmlalbq_laneq_f32(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \
+  float32x4_t __ret_876; \
+  float32x4_t __s0_876 = __p0_876; \
+  bfloat16x8_t __s1_876 = __p1_876; \
+  bfloat16x8_t __s2_876 = __p2_876; \
+  float32x4_t __rev0_876;  __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_876;  __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_876;  __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_876 = __noswap_vbfmlalbq_f32(__rev0_876, __rev1_876, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876)}); \
+  __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 3, 2, 1, 0); \
+  __ret_876; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlaltq_lane_f32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \
-  float32x4_t __s0_865 = __p0_865; \
-  bfloat16x8_t __s1_865 = __p1_865; \
-  bfloat16x4_t __s2_865 = __p2_865; \
-  float32x4_t __ret_865; \
-  __ret_865 = vbfmlaltq_f32(__s0_865, __s1_865, (bfloat16x8_t) {vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865)}); \
-  __ret_865; \
+#define vbfmlaltq_lane_f32(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \
+  float32x4_t __ret_877; \
+  float32x4_t __s0_877 = __p0_877; \
+  bfloat16x8_t __s1_877 = __p1_877; \
+  bfloat16x4_t __s2_877 = __p2_877; \
+  __ret_877 = vbfmlaltq_f32(__s0_877, __s1_877, (bfloat16x8_t) {vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877)}); \
+  __ret_877; \
 })
 #else
-#define vbfmlaltq_lane_f32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \
-  float32x4_t __s0_866 = __p0_866; \
-  bfloat16x8_t __s1_866 = __p1_866; \
-  bfloat16x4_t __s2_866 = __p2_866; \
-  float32x4_t __rev0_866;  __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_866;  __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_866;  __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 3, 2, 1, 0); \
-  float32x4_t __ret_866; \
-  __ret_866 = __noswap_vbfmlaltq_f32(__rev0_866, __rev1_866, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866)}); \
-  __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 3, 2, 1, 0); \
-  __ret_866; \
+#define vbfmlaltq_lane_f32(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \
+  float32x4_t __ret_878; \
+  float32x4_t __s0_878 = __p0_878; \
+  bfloat16x8_t __s1_878 = __p1_878; \
+  bfloat16x4_t __s2_878 = __p2_878; \
+  float32x4_t __rev0_878;  __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_878;  __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_878;  __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 3, 2, 1, 0); \
+  __ret_878 = __noswap_vbfmlaltq_f32(__rev0_878, __rev1_878, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878)}); \
+  __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \
+  __ret_878; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlaltq_laneq_f32(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \
-  float32x4_t __s0_867 = __p0_867; \
-  bfloat16x8_t __s1_867 = __p1_867; \
-  bfloat16x8_t __s2_867 = __p2_867; \
-  float32x4_t __ret_867; \
-  __ret_867 = vbfmlaltq_f32(__s0_867, __s1_867, (bfloat16x8_t) {vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867)}); \
-  __ret_867; \
+#define vbfmlaltq_laneq_f32(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \
+  float32x4_t __ret_879; \
+  float32x4_t __s0_879 = __p0_879; \
+  bfloat16x8_t __s1_879 = __p1_879; \
+  bfloat16x8_t __s2_879 = __p2_879; \
+  __ret_879 = vbfmlaltq_f32(__s0_879, __s1_879, (bfloat16x8_t) {vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879)}); \
+  __ret_879; \
 })
 #else
-#define vbfmlaltq_laneq_f32(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \
-  float32x4_t __s0_868 = __p0_868; \
-  bfloat16x8_t __s1_868 = __p1_868; \
-  bfloat16x8_t __s2_868 = __p2_868; \
-  float32x4_t __rev0_868;  __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_868;  __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_868;  __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_868; \
-  __ret_868 = __noswap_vbfmlaltq_f32(__rev0_868, __rev1_868, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868)}); \
-  __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \
-  __ret_868; \
+#define vbfmlaltq_laneq_f32(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \
+  float32x4_t __ret_880; \
+  float32x4_t __s0_880 = __p0_880; \
+  bfloat16x8_t __s1_880 = __p1_880; \
+  bfloat16x8_t __s2_880 = __p2_880; \
+  float32x4_t __rev0_880;  __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_880;  __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_880;  __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_880 = __noswap_vbfmlaltq_f32(__rev0_880, __rev1_880, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880)}); \
+  __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 3, 2, 1, 0); \
+  __ret_880; \
 })
 #endif
 
@@ -68114,8 +68196,8 @@
 }
 #else
 __ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float32x4_t __ret;
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -68130,8 +68212,8 @@
 }
 #else
 __ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   float32x4_t __ret;
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -68141,673 +68223,483 @@
 #endif
 #if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_high_f16(__p0_869, __p1_869, __p2_869, __p3_869) __extension__ ({ \
-  float32x4_t __s0_869 = __p0_869; \
-  float16x8_t __s1_869 = __p1_869; \
-  float16x4_t __s2_869 = __p2_869; \
-  float32x4_t __ret_869; \
-  __ret_869 = vfmlalq_high_f16(__s0_869, __s1_869, (float16x8_t) {vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869)}); \
-  __ret_869; \
-})
-#else
-#define vfmlalq_lane_high_f16(__p0_870, __p1_870, __p2_870, __p3_870) __extension__ ({ \
-  float32x4_t __s0_870 = __p0_870; \
-  float16x8_t __s1_870 = __p1_870; \
-  float16x4_t __s2_870 = __p2_870; \
-  float32x4_t __rev0_870;  __rev0_870 = __builtin_shufflevector(__s0_870, __s0_870, 3, 2, 1, 0); \
-  float16x8_t __rev1_870;  __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_870;  __rev2_870 = __builtin_shufflevector(__s2_870, __s2_870, 3, 2, 1, 0); \
-  float32x4_t __ret_870; \
-  __ret_870 = __noswap_vfmlalq_high_f16(__rev0_870, __rev1_870, (float16x8_t) {__noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870)}); \
-  __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 3, 2, 1, 0); \
-  __ret_870; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_high_f16(__p0_871, __p1_871, __p2_871, __p3_871) __extension__ ({ \
-  float32x2_t __s0_871 = __p0_871; \
-  float16x4_t __s1_871 = __p1_871; \
-  float16x4_t __s2_871 = __p2_871; \
-  float32x2_t __ret_871; \
-  __ret_871 = vfmlal_high_f16(__s0_871, __s1_871, (float16x4_t) {vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871)}); \
-  __ret_871; \
-})
-#else
-#define vfmlal_lane_high_f16(__p0_872, __p1_872, __p2_872, __p3_872) __extension__ ({ \
-  float32x2_t __s0_872 = __p0_872; \
-  float16x4_t __s1_872 = __p1_872; \
-  float16x4_t __s2_872 = __p2_872; \
-  float32x2_t __rev0_872;  __rev0_872 = __builtin_shufflevector(__s0_872, __s0_872, 1, 0); \
-  float16x4_t __rev1_872;  __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 3, 2, 1, 0); \
-  float16x4_t __rev2_872;  __rev2_872 = __builtin_shufflevector(__s2_872, __s2_872, 3, 2, 1, 0); \
-  float32x2_t __ret_872; \
-  __ret_872 = __noswap_vfmlal_high_f16(__rev0_872, __rev1_872, (float16x4_t) {__noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872)}); \
-  __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 1, 0); \
-  __ret_872; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_low_f16(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \
-  float32x4_t __s0_873 = __p0_873; \
-  float16x8_t __s1_873 = __p1_873; \
-  float16x4_t __s2_873 = __p2_873; \
-  float32x4_t __ret_873; \
-  __ret_873 = vfmlalq_low_f16(__s0_873, __s1_873, (float16x8_t) {vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873)}); \
-  __ret_873; \
-})
-#else
-#define vfmlalq_lane_low_f16(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \
-  float32x4_t __s0_874 = __p0_874; \
-  float16x8_t __s1_874 = __p1_874; \
-  float16x4_t __s2_874 = __p2_874; \
-  float32x4_t __rev0_874;  __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 3, 2, 1, 0); \
-  float16x8_t __rev1_874;  __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_874;  __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 3, 2, 1, 0); \
-  float32x4_t __ret_874; \
-  __ret_874 = __noswap_vfmlalq_low_f16(__rev0_874, __rev1_874, (float16x8_t) {__noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874)}); \
-  __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \
-  __ret_874; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_low_f16(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \
-  float32x2_t __s0_875 = __p0_875; \
-  float16x4_t __s1_875 = __p1_875; \
-  float16x4_t __s2_875 = __p2_875; \
-  float32x2_t __ret_875; \
-  __ret_875 = vfmlal_low_f16(__s0_875, __s1_875, (float16x4_t) {vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875)}); \
-  __ret_875; \
-})
-#else
-#define vfmlal_lane_low_f16(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \
-  float32x2_t __s0_876 = __p0_876; \
-  float16x4_t __s1_876 = __p1_876; \
-  float16x4_t __s2_876 = __p2_876; \
-  float32x2_t __rev0_876;  __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 1, 0); \
-  float16x4_t __rev1_876;  __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 3, 2, 1, 0); \
-  float16x4_t __rev2_876;  __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 3, 2, 1, 0); \
-  float32x2_t __ret_876; \
-  __ret_876 = __noswap_vfmlal_low_f16(__rev0_876, __rev1_876, (float16x4_t) {__noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876)}); \
-  __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 1, 0); \
-  __ret_876; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_high_f16(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \
-  float32x4_t __s0_877 = __p0_877; \
-  float16x8_t __s1_877 = __p1_877; \
-  float16x8_t __s2_877 = __p2_877; \
-  float32x4_t __ret_877; \
-  __ret_877 = vfmlalq_high_f16(__s0_877, __s1_877, (float16x8_t) {vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877)}); \
-  __ret_877; \
-})
-#else
-#define vfmlalq_laneq_high_f16(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \
-  float32x4_t __s0_878 = __p0_878; \
-  float16x8_t __s1_878 = __p1_878; \
-  float16x8_t __s2_878 = __p2_878; \
-  float32x4_t __rev0_878;  __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \
-  float16x8_t __rev1_878;  __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_878;  __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_878; \
-  __ret_878 = __noswap_vfmlalq_high_f16(__rev0_878, __rev1_878, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878)}); \
-  __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \
-  __ret_878; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_high_f16(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \
-  float32x2_t __s0_879 = __p0_879; \
-  float16x4_t __s1_879 = __p1_879; \
-  float16x8_t __s2_879 = __p2_879; \
-  float32x2_t __ret_879; \
-  __ret_879 = vfmlal_high_f16(__s0_879, __s1_879, (float16x4_t) {vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879)}); \
-  __ret_879; \
-})
-#else
-#define vfmlal_laneq_high_f16(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \
-  float32x2_t __s0_880 = __p0_880; \
-  float16x4_t __s1_880 = __p1_880; \
-  float16x8_t __s2_880 = __p2_880; \
-  float32x2_t __rev0_880;  __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 1, 0); \
-  float16x4_t __rev1_880;  __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 3, 2, 1, 0); \
-  float16x8_t __rev2_880;  __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_880; \
-  __ret_880 = __noswap_vfmlal_high_f16(__rev0_880, __rev1_880, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880)}); \
-  __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 1, 0); \
-  __ret_880; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_low_f16(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \
+#define vfmlalq_lane_high_f16(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \
+  float32x4_t __ret_881; \
   float32x4_t __s0_881 = __p0_881; \
   float16x8_t __s1_881 = __p1_881; \
-  float16x8_t __s2_881 = __p2_881; \
-  float32x4_t __ret_881; \
-  __ret_881 = vfmlalq_low_f16(__s0_881, __s1_881, (float16x8_t) {vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881)}); \
+  float16x4_t __s2_881 = __p2_881; \
+  __ret_881 = vfmlalq_high_f16(__s0_881, __s1_881, (float16x8_t) {vget_lane_f16(__s2_881, __p3_881), vget_lane_f16(__s2_881, __p3_881), vget_lane_f16(__s2_881, __p3_881), vget_lane_f16(__s2_881, __p3_881), vget_lane_f16(__s2_881, __p3_881), vget_lane_f16(__s2_881, __p3_881), vget_lane_f16(__s2_881, __p3_881), vget_lane_f16(__s2_881, __p3_881)}); \
   __ret_881; \
 })
 #else
-#define vfmlalq_laneq_low_f16(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \
+#define vfmlalq_lane_high_f16(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \
+  float32x4_t __ret_882; \
   float32x4_t __s0_882 = __p0_882; \
   float16x8_t __s1_882 = __p1_882; \
-  float16x8_t __s2_882 = __p2_882; \
+  float16x4_t __s2_882 = __p2_882; \
   float32x4_t __rev0_882;  __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, 3, 2, 1, 0); \
   float16x8_t __rev1_882;  __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_882;  __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_882; \
-  __ret_882 = __noswap_vfmlalq_low_f16(__rev0_882, __rev1_882, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882)}); \
+  float16x4_t __rev2_882;  __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 3, 2, 1, 0); \
+  __ret_882 = __noswap_vfmlalq_high_f16(__rev0_882, __rev1_882, (float16x8_t) {__noswap_vget_lane_f16(__rev2_882, __p3_882), __noswap_vget_lane_f16(__rev2_882, __p3_882), __noswap_vget_lane_f16(__rev2_882, __p3_882), __noswap_vget_lane_f16(__rev2_882, __p3_882), __noswap_vget_lane_f16(__rev2_882, __p3_882), __noswap_vget_lane_f16(__rev2_882, __p3_882), __noswap_vget_lane_f16(__rev2_882, __p3_882), __noswap_vget_lane_f16(__rev2_882, __p3_882)}); \
   __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \
   __ret_882; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_low_f16(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \
+#define vfmlal_lane_high_f16(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \
+  float32x2_t __ret_883; \
   float32x2_t __s0_883 = __p0_883; \
   float16x4_t __s1_883 = __p1_883; \
-  float16x8_t __s2_883 = __p2_883; \
-  float32x2_t __ret_883; \
-  __ret_883 = vfmlal_low_f16(__s0_883, __s1_883, (float16x4_t) {vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883)}); \
+  float16x4_t __s2_883 = __p2_883; \
+  __ret_883 = vfmlal_high_f16(__s0_883, __s1_883, (float16x4_t) {vget_lane_f16(__s2_883, __p3_883), vget_lane_f16(__s2_883, __p3_883), vget_lane_f16(__s2_883, __p3_883), vget_lane_f16(__s2_883, __p3_883)}); \
   __ret_883; \
 })
 #else
-#define vfmlal_laneq_low_f16(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \
+#define vfmlal_lane_high_f16(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \
+  float32x2_t __ret_884; \
   float32x2_t __s0_884 = __p0_884; \
   float16x4_t __s1_884 = __p1_884; \
-  float16x8_t __s2_884 = __p2_884; \
+  float16x4_t __s2_884 = __p2_884; \
   float32x2_t __rev0_884;  __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, 1, 0); \
   float16x4_t __rev1_884;  __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 3, 2, 1, 0); \
-  float16x8_t __rev2_884;  __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_884; \
-  __ret_884 = __noswap_vfmlal_low_f16(__rev0_884, __rev1_884, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884)}); \
+  float16x4_t __rev2_884;  __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 3, 2, 1, 0); \
+  __ret_884 = __noswap_vfmlal_high_f16(__rev0_884, __rev1_884, (float16x4_t) {__noswap_vget_lane_f16(__rev2_884, __p3_884), __noswap_vget_lane_f16(__rev2_884, __p3_884), __noswap_vget_lane_f16(__rev2_884, __p3_884), __noswap_vget_lane_f16(__rev2_884, __p3_884)}); \
   __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 1, 0); \
   __ret_884; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_high_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \
+#define vfmlalq_lane_low_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \
+  float32x4_t __ret_885; \
   float32x4_t __s0_885 = __p0_885; \
   float16x8_t __s1_885 = __p1_885; \
   float16x4_t __s2_885 = __p2_885; \
-  float32x4_t __ret_885; \
-  __ret_885 = vfmlslq_high_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \
+  __ret_885 = vfmlalq_low_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \
   __ret_885; \
 })
 #else
-#define vfmlslq_lane_high_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \
+#define vfmlalq_lane_low_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \
+  float32x4_t __ret_886; \
   float32x4_t __s0_886 = __p0_886; \
   float16x8_t __s1_886 = __p1_886; \
   float16x4_t __s2_886 = __p2_886; \
   float32x4_t __rev0_886;  __rev0_886 = __builtin_shufflevector(__s0_886, __s0_886, 3, 2, 1, 0); \
   float16x8_t __rev1_886;  __rev1_886 = __builtin_shufflevector(__s1_886, __s1_886, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x4_t __rev2_886;  __rev2_886 = __builtin_shufflevector(__s2_886, __s2_886, 3, 2, 1, 0); \
-  float32x4_t __ret_886; \
-  __ret_886 = __noswap_vfmlslq_high_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \
+  __ret_886 = __noswap_vfmlalq_low_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \
   __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 3, 2, 1, 0); \
   __ret_886; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_high_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \
+#define vfmlal_lane_low_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \
+  float32x2_t __ret_887; \
   float32x2_t __s0_887 = __p0_887; \
   float16x4_t __s1_887 = __p1_887; \
   float16x4_t __s2_887 = __p2_887; \
-  float32x2_t __ret_887; \
-  __ret_887 = vfmlsl_high_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \
+  __ret_887 = vfmlal_low_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \
   __ret_887; \
 })
 #else
-#define vfmlsl_lane_high_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \
+#define vfmlal_lane_low_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \
+  float32x2_t __ret_888; \
   float32x2_t __s0_888 = __p0_888; \
   float16x4_t __s1_888 = __p1_888; \
   float16x4_t __s2_888 = __p2_888; \
   float32x2_t __rev0_888;  __rev0_888 = __builtin_shufflevector(__s0_888, __s0_888, 1, 0); \
   float16x4_t __rev1_888;  __rev1_888 = __builtin_shufflevector(__s1_888, __s1_888, 3, 2, 1, 0); \
   float16x4_t __rev2_888;  __rev2_888 = __builtin_shufflevector(__s2_888, __s2_888, 3, 2, 1, 0); \
-  float32x2_t __ret_888; \
-  __ret_888 = __noswap_vfmlsl_high_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \
+  __ret_888 = __noswap_vfmlal_low_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \
   __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \
   __ret_888; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_low_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \
+#define vfmlalq_laneq_high_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \
+  float32x4_t __ret_889; \
   float32x4_t __s0_889 = __p0_889; \
   float16x8_t __s1_889 = __p1_889; \
-  float16x4_t __s2_889 = __p2_889; \
-  float32x4_t __ret_889; \
-  __ret_889 = vfmlslq_low_f16(__s0_889, __s1_889, (float16x8_t) {vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889)}); \
+  float16x8_t __s2_889 = __p2_889; \
+  __ret_889 = vfmlalq_high_f16(__s0_889, __s1_889, (float16x8_t) {vgetq_lane_f16(__s2_889, __p3_889), vgetq_lane_f16(__s2_889, __p3_889), vgetq_lane_f16(__s2_889, __p3_889), vgetq_lane_f16(__s2_889, __p3_889), vgetq_lane_f16(__s2_889, __p3_889), vgetq_lane_f16(__s2_889, __p3_889), vgetq_lane_f16(__s2_889, __p3_889), vgetq_lane_f16(__s2_889, __p3_889)}); \
   __ret_889; \
 })
 #else
-#define vfmlslq_lane_low_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \
+#define vfmlalq_laneq_high_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \
+  float32x4_t __ret_890; \
   float32x4_t __s0_890 = __p0_890; \
   float16x8_t __s1_890 = __p1_890; \
-  float16x4_t __s2_890 = __p2_890; \
+  float16x8_t __s2_890 = __p2_890; \
   float32x4_t __rev0_890;  __rev0_890 = __builtin_shufflevector(__s0_890, __s0_890, 3, 2, 1, 0); \
   float16x8_t __rev1_890;  __rev1_890 = __builtin_shufflevector(__s1_890, __s1_890, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_890;  __rev2_890 = __builtin_shufflevector(__s2_890, __s2_890, 3, 2, 1, 0); \
-  float32x4_t __ret_890; \
-  __ret_890 = __noswap_vfmlslq_low_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890)}); \
+  float16x8_t __rev2_890;  __rev2_890 = __builtin_shufflevector(__s2_890, __s2_890, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_890 = __noswap_vfmlalq_high_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_890, __p3_890), __noswap_vgetq_lane_f16(__rev2_890, __p3_890), __noswap_vgetq_lane_f16(__rev2_890, __p3_890), __noswap_vgetq_lane_f16(__rev2_890, __p3_890), __noswap_vgetq_lane_f16(__rev2_890, __p3_890), __noswap_vgetq_lane_f16(__rev2_890, __p3_890), __noswap_vgetq_lane_f16(__rev2_890, __p3_890), __noswap_vgetq_lane_f16(__rev2_890, __p3_890)}); \
   __ret_890 = __builtin_shufflevector(__ret_890, __ret_890, 3, 2, 1, 0); \
   __ret_890; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_low_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \
+#define vfmlal_laneq_high_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \
+  float32x2_t __ret_891; \
   float32x2_t __s0_891 = __p0_891; \
   float16x4_t __s1_891 = __p1_891; \
-  float16x4_t __s2_891 = __p2_891; \
-  float32x2_t __ret_891; \
-  __ret_891 = vfmlsl_low_f16(__s0_891, __s1_891, (float16x4_t) {vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891)}); \
+  float16x8_t __s2_891 = __p2_891; \
+  __ret_891 = vfmlal_high_f16(__s0_891, __s1_891, (float16x4_t) {vgetq_lane_f16(__s2_891, __p3_891), vgetq_lane_f16(__s2_891, __p3_891), vgetq_lane_f16(__s2_891, __p3_891), vgetq_lane_f16(__s2_891, __p3_891)}); \
   __ret_891; \
 })
 #else
-#define vfmlsl_lane_low_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \
+#define vfmlal_laneq_high_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \
+  float32x2_t __ret_892; \
   float32x2_t __s0_892 = __p0_892; \
   float16x4_t __s1_892 = __p1_892; \
-  float16x4_t __s2_892 = __p2_892; \
+  float16x8_t __s2_892 = __p2_892; \
   float32x2_t __rev0_892;  __rev0_892 = __builtin_shufflevector(__s0_892, __s0_892, 1, 0); \
   float16x4_t __rev1_892;  __rev1_892 = __builtin_shufflevector(__s1_892, __s1_892, 3, 2, 1, 0); \
-  float16x4_t __rev2_892;  __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 3, 2, 1, 0); \
-  float32x2_t __ret_892; \
-  __ret_892 = __noswap_vfmlsl_low_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892)}); \
+  float16x8_t __rev2_892;  __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_892 = __noswap_vfmlal_high_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_892, __p3_892), __noswap_vgetq_lane_f16(__rev2_892, __p3_892), __noswap_vgetq_lane_f16(__rev2_892, __p3_892), __noswap_vgetq_lane_f16(__rev2_892, __p3_892)}); \
   __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \
   __ret_892; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_high_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \
+#define vfmlalq_laneq_low_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \
+  float32x4_t __ret_893; \
   float32x4_t __s0_893 = __p0_893; \
   float16x8_t __s1_893 = __p1_893; \
   float16x8_t __s2_893 = __p2_893; \
-  float32x4_t __ret_893; \
-  __ret_893 = vfmlslq_high_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \
+  __ret_893 = vfmlalq_low_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \
   __ret_893; \
 })
 #else
-#define vfmlslq_laneq_high_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \
+#define vfmlalq_laneq_low_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \
+  float32x4_t __ret_894; \
   float32x4_t __s0_894 = __p0_894; \
   float16x8_t __s1_894 = __p1_894; \
   float16x8_t __s2_894 = __p2_894; \
   float32x4_t __rev0_894;  __rev0_894 = __builtin_shufflevector(__s0_894, __s0_894, 3, 2, 1, 0); \
   float16x8_t __rev1_894;  __rev1_894 = __builtin_shufflevector(__s1_894, __s1_894, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x8_t __rev2_894;  __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_894; \
-  __ret_894 = __noswap_vfmlslq_high_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \
+  __ret_894 = __noswap_vfmlalq_low_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \
   __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 3, 2, 1, 0); \
   __ret_894; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_high_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \
+#define vfmlal_laneq_low_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \
+  float32x2_t __ret_895; \
   float32x2_t __s0_895 = __p0_895; \
   float16x4_t __s1_895 = __p1_895; \
   float16x8_t __s2_895 = __p2_895; \
-  float32x2_t __ret_895; \
-  __ret_895 = vfmlsl_high_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \
+  __ret_895 = vfmlal_low_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \
   __ret_895; \
 })
 #else
-#define vfmlsl_laneq_high_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \
+#define vfmlal_laneq_low_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \
+  float32x2_t __ret_896; \
   float32x2_t __s0_896 = __p0_896; \
   float16x4_t __s1_896 = __p1_896; \
   float16x8_t __s2_896 = __p2_896; \
   float32x2_t __rev0_896;  __rev0_896 = __builtin_shufflevector(__s0_896, __s0_896, 1, 0); \
   float16x4_t __rev1_896;  __rev1_896 = __builtin_shufflevector(__s1_896, __s1_896, 3, 2, 1, 0); \
   float16x8_t __rev2_896;  __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_896; \
-  __ret_896 = __noswap_vfmlsl_high_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \
+  __ret_896 = __noswap_vfmlal_low_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \
   __ret_896 = __builtin_shufflevector(__ret_896, __ret_896, 1, 0); \
   __ret_896; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_low_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \
+#define vfmlslq_lane_high_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \
+  float32x4_t __ret_897; \
   float32x4_t __s0_897 = __p0_897; \
   float16x8_t __s1_897 = __p1_897; \
-  float16x8_t __s2_897 = __p2_897; \
-  float32x4_t __ret_897; \
-  __ret_897 = vfmlslq_low_f16(__s0_897, __s1_897, (float16x8_t) {vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897)}); \
+  float16x4_t __s2_897 = __p2_897; \
+  __ret_897 = vfmlslq_high_f16(__s0_897, __s1_897, (float16x8_t) {vget_lane_f16(__s2_897, __p3_897), vget_lane_f16(__s2_897, __p3_897), vget_lane_f16(__s2_897, __p3_897), vget_lane_f16(__s2_897, __p3_897), vget_lane_f16(__s2_897, __p3_897), vget_lane_f16(__s2_897, __p3_897), vget_lane_f16(__s2_897, __p3_897), vget_lane_f16(__s2_897, __p3_897)}); \
   __ret_897; \
 })
 #else
-#define vfmlslq_laneq_low_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \
+#define vfmlslq_lane_high_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \
+  float32x4_t __ret_898; \
   float32x4_t __s0_898 = __p0_898; \
   float16x8_t __s1_898 = __p1_898; \
-  float16x8_t __s2_898 = __p2_898; \
+  float16x4_t __s2_898 = __p2_898; \
   float32x4_t __rev0_898;  __rev0_898 = __builtin_shufflevector(__s0_898, __s0_898, 3, 2, 1, 0); \
   float16x8_t __rev1_898;  __rev1_898 = __builtin_shufflevector(__s1_898, __s1_898, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_898;  __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_898; \
-  __ret_898 = __noswap_vfmlslq_low_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898)}); \
+  float16x4_t __rev2_898;  __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 3, 2, 1, 0); \
+  __ret_898 = __noswap_vfmlslq_high_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vget_lane_f16(__rev2_898, __p3_898), __noswap_vget_lane_f16(__rev2_898, __p3_898), __noswap_vget_lane_f16(__rev2_898, __p3_898), __noswap_vget_lane_f16(__rev2_898, __p3_898), __noswap_vget_lane_f16(__rev2_898, __p3_898), __noswap_vget_lane_f16(__rev2_898, __p3_898), __noswap_vget_lane_f16(__rev2_898, __p3_898), __noswap_vget_lane_f16(__rev2_898, __p3_898)}); \
   __ret_898 = __builtin_shufflevector(__ret_898, __ret_898, 3, 2, 1, 0); \
   __ret_898; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_low_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \
+#define vfmlsl_lane_high_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \
+  float32x2_t __ret_899; \
   float32x2_t __s0_899 = __p0_899; \
   float16x4_t __s1_899 = __p1_899; \
-  float16x8_t __s2_899 = __p2_899; \
-  float32x2_t __ret_899; \
-  __ret_899 = vfmlsl_low_f16(__s0_899, __s1_899, (float16x4_t) {vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899)}); \
+  float16x4_t __s2_899 = __p2_899; \
+  __ret_899 = vfmlsl_high_f16(__s0_899, __s1_899, (float16x4_t) {vget_lane_f16(__s2_899, __p3_899), vget_lane_f16(__s2_899, __p3_899), vget_lane_f16(__s2_899, __p3_899), vget_lane_f16(__s2_899, __p3_899)}); \
   __ret_899; \
 })
 #else
-#define vfmlsl_laneq_low_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \
+#define vfmlsl_lane_high_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \
+  float32x2_t __ret_900; \
   float32x2_t __s0_900 = __p0_900; \
   float16x4_t __s1_900 = __p1_900; \
-  float16x8_t __s2_900 = __p2_900; \
+  float16x4_t __s2_900 = __p2_900; \
   float32x2_t __rev0_900;  __rev0_900 = __builtin_shufflevector(__s0_900, __s0_900, 1, 0); \
   float16x4_t __rev1_900;  __rev1_900 = __builtin_shufflevector(__s1_900, __s1_900, 3, 2, 1, 0); \
-  float16x8_t __rev2_900;  __rev2_900 = __builtin_shufflevector(__s2_900, __s2_900, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_900; \
-  __ret_900 = __noswap_vfmlsl_low_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900)}); \
+  float16x4_t __rev2_900;  __rev2_900 = __builtin_shufflevector(__s2_900, __s2_900, 3, 2, 1, 0); \
+  __ret_900 = __noswap_vfmlsl_high_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vget_lane_f16(__rev2_900, __p3_900), __noswap_vget_lane_f16(__rev2_900, __p3_900), __noswap_vget_lane_f16(__rev2_900, __p3_900), __noswap_vget_lane_f16(__rev2_900, __p3_900)}); \
   __ret_900 = __builtin_shufflevector(__ret_900, __ret_900, 1, 0); \
   __ret_900; \
 })
 #endif
 
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vmulh_lane_f16(__p0_901, __p1_901, __p2_901) __extension__ ({ \
-  float16_t __s0_901 = __p0_901; \
-  float16x4_t __s1_901 = __p1_901; \
-  float16_t __ret_901; \
-  __ret_901 = __s0_901 * vget_lane_f16(__s1_901, __p2_901); \
+#define vfmlslq_lane_low_f16(__p0_901, __p1_901, __p2_901, __p3_901) __extension__ ({ \
+  float32x4_t __ret_901; \
+  float32x4_t __s0_901 = __p0_901; \
+  float16x8_t __s1_901 = __p1_901; \
+  float16x4_t __s2_901 = __p2_901; \
+  __ret_901 = vfmlslq_low_f16(__s0_901, __s1_901, (float16x8_t) {vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901)}); \
   __ret_901; \
 })
 #else
-#define vmulh_lane_f16(__p0_902, __p1_902, __p2_902) __extension__ ({ \
-  float16_t __s0_902 = __p0_902; \
-  float16x4_t __s1_902 = __p1_902; \
-  float16x4_t __rev1_902;  __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 3, 2, 1, 0); \
-  float16_t __ret_902; \
-  __ret_902 = __s0_902 * __noswap_vget_lane_f16(__rev1_902, __p2_902); \
+#define vfmlslq_lane_low_f16(__p0_902, __p1_902, __p2_902, __p3_902) __extension__ ({ \
+  float32x4_t __ret_902; \
+  float32x4_t __s0_902 = __p0_902; \
+  float16x8_t __s1_902 = __p1_902; \
+  float16x4_t __s2_902 = __p2_902; \
+  float32x4_t __rev0_902;  __rev0_902 = __builtin_shufflevector(__s0_902, __s0_902, 3, 2, 1, 0); \
+  float16x8_t __rev1_902;  __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_902;  __rev2_902 = __builtin_shufflevector(__s2_902, __s2_902, 3, 2, 1, 0); \
+  __ret_902 = __noswap_vfmlslq_low_f16(__rev0_902, __rev1_902, (float16x8_t) {__noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902)}); \
+  __ret_902 = __builtin_shufflevector(__ret_902, __ret_902, 3, 2, 1, 0); \
   __ret_902; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulh_laneq_f16(__p0_903, __p1_903, __p2_903) __extension__ ({ \
-  float16_t __s0_903 = __p0_903; \
-  float16x8_t __s1_903 = __p1_903; \
-  float16_t __ret_903; \
-  __ret_903 = __s0_903 * vgetq_lane_f16(__s1_903, __p2_903); \
+#define vfmlsl_lane_low_f16(__p0_903, __p1_903, __p2_903, __p3_903) __extension__ ({ \
+  float32x2_t __ret_903; \
+  float32x2_t __s0_903 = __p0_903; \
+  float16x4_t __s1_903 = __p1_903; \
+  float16x4_t __s2_903 = __p2_903; \
+  __ret_903 = vfmlsl_low_f16(__s0_903, __s1_903, (float16x4_t) {vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903)}); \
   __ret_903; \
 })
 #else
-#define vmulh_laneq_f16(__p0_904, __p1_904, __p2_904) __extension__ ({ \
-  float16_t __s0_904 = __p0_904; \
-  float16x8_t __s1_904 = __p1_904; \
-  float16x8_t __rev1_904;  __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_904; \
-  __ret_904 = __s0_904 * __noswap_vgetq_lane_f16(__rev1_904, __p2_904); \
+#define vfmlsl_lane_low_f16(__p0_904, __p1_904, __p2_904, __p3_904) __extension__ ({ \
+  float32x2_t __ret_904; \
+  float32x2_t __s0_904 = __p0_904; \
+  float16x4_t __s1_904 = __p1_904; \
+  float16x4_t __s2_904 = __p2_904; \
+  float32x2_t __rev0_904;  __rev0_904 = __builtin_shufflevector(__s0_904, __s0_904, 1, 0); \
+  float16x4_t __rev1_904;  __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 3, 2, 1, 0); \
+  float16x4_t __rev2_904;  __rev2_904 = __builtin_shufflevector(__s2_904, __s2_904, 3, 2, 1, 0); \
+  __ret_904 = __noswap_vfmlsl_low_f16(__rev0_904, __rev1_904, (float16x4_t) {__noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904)}); \
+  __ret_904 = __builtin_shufflevector(__ret_904, __ret_904, 1, 0); \
   __ret_904; \
 })
 #endif
 
-#endif
-#if defined(__ARM_FEATURE_MATMUL_INT8)
 #ifdef __LITTLE_ENDIAN__
-#define vsudotq_lane_s32(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \
-  int32x4_t __s0_905 = __p0_905; \
-  int8x16_t __s1_905 = __p1_905; \
-  uint8x8_t __s2_905 = __p2_905; \
-  int32x4_t __ret_905; \
-uint8x8_t __reint_905 = __s2_905; \
-  __ret_905 = vusdotq_s32(__s0_905, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_905, __p3_905)), __s1_905); \
+#define vfmlslq_laneq_high_f16(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \
+  float32x4_t __ret_905; \
+  float32x4_t __s0_905 = __p0_905; \
+  float16x8_t __s1_905 = __p1_905; \
+  float16x8_t __s2_905 = __p2_905; \
+  __ret_905 = vfmlslq_high_f16(__s0_905, __s1_905, (float16x8_t) {vgetq_lane_f16(__s2_905, __p3_905), vgetq_lane_f16(__s2_905, __p3_905), vgetq_lane_f16(__s2_905, __p3_905), vgetq_lane_f16(__s2_905, __p3_905), vgetq_lane_f16(__s2_905, __p3_905), vgetq_lane_f16(__s2_905, __p3_905), vgetq_lane_f16(__s2_905, __p3_905), vgetq_lane_f16(__s2_905, __p3_905)}); \
   __ret_905; \
 })
 #else
-#define vsudotq_lane_s32(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \
-  int32x4_t __s0_906 = __p0_906; \
-  int8x16_t __s1_906 = __p1_906; \
-  uint8x8_t __s2_906 = __p2_906; \
-  int32x4_t __rev0_906;  __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \
-  int8x16_t __rev1_906;  __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_906;  __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_906; \
-uint8x8_t __reint_906 = __rev2_906; \
-  __ret_906 = __noswap_vusdotq_s32(__rev0_906, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_906, __p3_906)), __rev1_906); \
+#define vfmlslq_laneq_high_f16(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \
+  float32x4_t __ret_906; \
+  float32x4_t __s0_906 = __p0_906; \
+  float16x8_t __s1_906 = __p1_906; \
+  float16x8_t __s2_906 = __p2_906; \
+  float32x4_t __rev0_906;  __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \
+  float16x8_t __rev1_906;  __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_906;  __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_906 = __noswap_vfmlslq_high_f16(__rev0_906, __rev1_906, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_906, __p3_906), __noswap_vgetq_lane_f16(__rev2_906, __p3_906), __noswap_vgetq_lane_f16(__rev2_906, __p3_906), __noswap_vgetq_lane_f16(__rev2_906, __p3_906), __noswap_vgetq_lane_f16(__rev2_906, __p3_906), __noswap_vgetq_lane_f16(__rev2_906, __p3_906), __noswap_vgetq_lane_f16(__rev2_906, __p3_906), __noswap_vgetq_lane_f16(__rev2_906, __p3_906)}); \
   __ret_906 = __builtin_shufflevector(__ret_906, __ret_906, 3, 2, 1, 0); \
   __ret_906; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsudot_lane_s32(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \
-  int32x2_t __s0_907 = __p0_907; \
-  int8x8_t __s1_907 = __p1_907; \
-  uint8x8_t __s2_907 = __p2_907; \
-  int32x2_t __ret_907; \
-uint8x8_t __reint_907 = __s2_907; \
-  __ret_907 = vusdot_s32(__s0_907, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_907, __p3_907)), __s1_907); \
+#define vfmlsl_laneq_high_f16(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \
+  float32x2_t __ret_907; \
+  float32x2_t __s0_907 = __p0_907; \
+  float16x4_t __s1_907 = __p1_907; \
+  float16x8_t __s2_907 = __p2_907; \
+  __ret_907 = vfmlsl_high_f16(__s0_907, __s1_907, (float16x4_t) {vgetq_lane_f16(__s2_907, __p3_907), vgetq_lane_f16(__s2_907, __p3_907), vgetq_lane_f16(__s2_907, __p3_907), vgetq_lane_f16(__s2_907, __p3_907)}); \
   __ret_907; \
 })
 #else
-#define vsudot_lane_s32(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \
-  int32x2_t __s0_908 = __p0_908; \
-  int8x8_t __s1_908 = __p1_908; \
-  uint8x8_t __s2_908 = __p2_908; \
-  int32x2_t __rev0_908;  __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \
-  int8x8_t __rev1_908;  __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_908;  __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_908; \
-uint8x8_t __reint_908 = __rev2_908; \
-  __ret_908 = __noswap_vusdot_s32(__rev0_908, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_908, __p3_908)), __rev1_908); \
+#define vfmlsl_laneq_high_f16(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \
+  float32x2_t __ret_908; \
+  float32x2_t __s0_908 = __p0_908; \
+  float16x4_t __s1_908 = __p1_908; \
+  float16x8_t __s2_908 = __p2_908; \
+  float32x2_t __rev0_908;  __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \
+  float16x4_t __rev1_908;  __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 3, 2, 1, 0); \
+  float16x8_t __rev2_908;  __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_908 = __noswap_vfmlsl_high_f16(__rev0_908, __rev1_908, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_908, __p3_908), __noswap_vgetq_lane_f16(__rev2_908, __p3_908), __noswap_vgetq_lane_f16(__rev2_908, __p3_908), __noswap_vgetq_lane_f16(__rev2_908, __p3_908)}); \
   __ret_908 = __builtin_shufflevector(__ret_908, __ret_908, 1, 0); \
   __ret_908; \
 })
 #endif
 
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_lane_s32(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \
-  int32_t __s0_909 = __p0_909; \
-  int32_t __s1_909 = __p1_909; \
-  int32x2_t __s2_909 = __p2_909; \
-  int32_t __ret_909; \
-  __ret_909 = vqadds_s32(__s0_909, vqrdmulhs_s32(__s1_909, vget_lane_s32(__s2_909, __p3_909))); \
+#define vfmlslq_laneq_low_f16(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \
+  float32x4_t __ret_909; \
+  float32x4_t __s0_909 = __p0_909; \
+  float16x8_t __s1_909 = __p1_909; \
+  float16x8_t __s2_909 = __p2_909; \
+  __ret_909 = vfmlslq_low_f16(__s0_909, __s1_909, (float16x8_t) {vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909)}); \
   __ret_909; \
 })
 #else
-#define vqrdmlahs_lane_s32(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \
-  int32_t __s0_910 = __p0_910; \
-  int32_t __s1_910 = __p1_910; \
-  int32x2_t __s2_910 = __p2_910; \
-  int32x2_t __rev2_910;  __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 1, 0); \
-  int32_t __ret_910; \
-  __ret_910 = vqadds_s32(__s0_910, vqrdmulhs_s32(__s1_910, __noswap_vget_lane_s32(__rev2_910, __p3_910))); \
+#define vfmlslq_laneq_low_f16(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \
+  float32x4_t __ret_910; \
+  float32x4_t __s0_910 = __p0_910; \
+  float16x8_t __s1_910 = __p1_910; \
+  float16x8_t __s2_910 = __p2_910; \
+  float32x4_t __rev0_910;  __rev0_910 = __builtin_shufflevector(__s0_910, __s0_910, 3, 2, 1, 0); \
+  float16x8_t __rev1_910;  __rev1_910 = __builtin_shufflevector(__s1_910, __s1_910, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_910;  __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_910 = __noswap_vfmlslq_low_f16(__rev0_910, __rev1_910, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910)}); \
+  __ret_910 = __builtin_shufflevector(__ret_910, __ret_910, 3, 2, 1, 0); \
   __ret_910; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_lane_s16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \
-  int16_t __s0_911 = __p0_911; \
-  int16_t __s1_911 = __p1_911; \
-  int16x4_t __s2_911 = __p2_911; \
-  int16_t __ret_911; \
-  __ret_911 = vqaddh_s16(__s0_911, vqrdmulhh_s16(__s1_911, vget_lane_s16(__s2_911, __p3_911))); \
+#define vfmlsl_laneq_low_f16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \
+  float32x2_t __ret_911; \
+  float32x2_t __s0_911 = __p0_911; \
+  float16x4_t __s1_911 = __p1_911; \
+  float16x8_t __s2_911 = __p2_911; \
+  __ret_911 = vfmlsl_low_f16(__s0_911, __s1_911, (float16x4_t) {vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911)}); \
   __ret_911; \
 })
 #else
-#define vqrdmlahh_lane_s16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \
-  int16_t __s0_912 = __p0_912; \
-  int16_t __s1_912 = __p1_912; \
-  int16x4_t __s2_912 = __p2_912; \
-  int16x4_t __rev2_912;  __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 3, 2, 1, 0); \
-  int16_t __ret_912; \
-  __ret_912 = vqaddh_s16(__s0_912, vqrdmulhh_s16(__s1_912, __noswap_vget_lane_s16(__rev2_912, __p3_912))); \
+#define vfmlsl_laneq_low_f16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \
+  float32x2_t __ret_912; \
+  float32x2_t __s0_912 = __p0_912; \
+  float16x4_t __s1_912 = __p1_912; \
+  float16x8_t __s2_912 = __p2_912; \
+  float32x2_t __rev0_912;  __rev0_912 = __builtin_shufflevector(__s0_912, __s0_912, 1, 0); \
+  float16x4_t __rev1_912;  __rev1_912 = __builtin_shufflevector(__s1_912, __s1_912, 3, 2, 1, 0); \
+  float16x8_t __rev2_912;  __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_912 = __noswap_vfmlsl_low_f16(__rev0_912, __rev1_912, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912)}); \
+  __ret_912 = __builtin_shufflevector(__ret_912, __ret_912, 1, 0); \
   __ret_912; \
 })
 #endif
 
+#endif
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_laneq_s32(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \
-  int32_t __s0_913 = __p0_913; \
-  int32_t __s1_913 = __p1_913; \
-  int32x4_t __s2_913 = __p2_913; \
-  int32_t __ret_913; \
-  __ret_913 = vqadds_s32(__s0_913, vqrdmulhs_s32(__s1_913, vgetq_lane_s32(__s2_913, __p3_913))); \
+#define vmulh_lane_f16(__p0_913, __p1_913, __p2_913) __extension__ ({ \
+  float16_t __ret_913; \
+  float16_t __s0_913 = __p0_913; \
+  float16x4_t __s1_913 = __p1_913; \
+  __ret_913 = __s0_913 * vget_lane_f16(__s1_913, __p2_913); \
   __ret_913; \
 })
 #else
-#define vqrdmlahs_laneq_s32(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \
-  int32_t __s0_914 = __p0_914; \
-  int32_t __s1_914 = __p1_914; \
-  int32x4_t __s2_914 = __p2_914; \
-  int32x4_t __rev2_914;  __rev2_914 = __builtin_shufflevector(__s2_914, __s2_914, 3, 2, 1, 0); \
-  int32_t __ret_914; \
-  __ret_914 = vqadds_s32(__s0_914, vqrdmulhs_s32(__s1_914, __noswap_vgetq_lane_s32(__rev2_914, __p3_914))); \
+#define vmulh_lane_f16(__p0_914, __p1_914, __p2_914) __extension__ ({ \
+  float16_t __ret_914; \
+  float16_t __s0_914 = __p0_914; \
+  float16x4_t __s1_914 = __p1_914; \
+  float16x4_t __rev1_914;  __rev1_914 = __builtin_shufflevector(__s1_914, __s1_914, 3, 2, 1, 0); \
+  __ret_914 = __s0_914 * __noswap_vget_lane_f16(__rev1_914, __p2_914); \
   __ret_914; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_laneq_s16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \
-  int16_t __s0_915 = __p0_915; \
-  int16_t __s1_915 = __p1_915; \
-  int16x8_t __s2_915 = __p2_915; \
-  int16_t __ret_915; \
-  __ret_915 = vqaddh_s16(__s0_915, vqrdmulhh_s16(__s1_915, vgetq_lane_s16(__s2_915, __p3_915))); \
+#define vmulh_laneq_f16(__p0_915, __p1_915, __p2_915) __extension__ ({ \
+  float16_t __ret_915; \
+  float16_t __s0_915 = __p0_915; \
+  float16x8_t __s1_915 = __p1_915; \
+  __ret_915 = __s0_915 * vgetq_lane_f16(__s1_915, __p2_915); \
   __ret_915; \
 })
 #else
-#define vqrdmlahh_laneq_s16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \
-  int16_t __s0_916 = __p0_916; \
-  int16_t __s1_916 = __p1_916; \
-  int16x8_t __s2_916 = __p2_916; \
-  int16x8_t __rev2_916;  __rev2_916 = __builtin_shufflevector(__s2_916, __s2_916, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_916; \
-  __ret_916 = vqaddh_s16(__s0_916, vqrdmulhh_s16(__s1_916, __noswap_vgetq_lane_s16(__rev2_916, __p3_916))); \
+#define vmulh_laneq_f16(__p0_916, __p1_916, __p2_916) __extension__ ({ \
+  float16_t __ret_916; \
+  float16_t __s0_916 = __p0_916; \
+  float16x8_t __s1_916 = __p1_916; \
+  float16x8_t __rev1_916;  __rev1_916 = __builtin_shufflevector(__s1_916, __s1_916, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_916 = __s0_916 * __noswap_vgetq_lane_f16(__rev1_916, __p2_916); \
   __ret_916; \
 })
 #endif
 
-__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
+#endif
+#if defined(__ARM_FEATURE_MATMUL_INT8)
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_lane_s32(__p0_917, __p1_917, __p2_917, __p3_917) __extension__ ({ \
-  int32_t __s0_917 = __p0_917; \
-  int32_t __s1_917 = __p1_917; \
-  int32x2_t __s2_917 = __p2_917; \
-  int32_t __ret_917; \
-  __ret_917 = vqsubs_s32(__s0_917, vqrdmulhs_s32(__s1_917, vget_lane_s32(__s2_917, __p3_917))); \
+#define vsudotq_lane_s32(__p0_917, __p1_917, __p2_917, __p3_917) __extension__ ({ \
+  int32x4_t __ret_917; \
+  int32x4_t __s0_917 = __p0_917; \
+  int8x16_t __s1_917 = __p1_917; \
+  uint8x8_t __s2_917 = __p2_917; \
+uint8x8_t __reint_917 = __s2_917; \
+  __ret_917 = vusdotq_s32(__s0_917, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_917, __p3_917)), __s1_917); \
   __ret_917; \
 })
 #else
-#define vqrdmlshs_lane_s32(__p0_918, __p1_918, __p2_918, __p3_918) __extension__ ({ \
-  int32_t __s0_918 = __p0_918; \
-  int32_t __s1_918 = __p1_918; \
-  int32x2_t __s2_918 = __p2_918; \
-  int32x2_t __rev2_918;  __rev2_918 = __builtin_shufflevector(__s2_918, __s2_918, 1, 0); \
-  int32_t __ret_918; \
-  __ret_918 = vqsubs_s32(__s0_918, vqrdmulhs_s32(__s1_918, __noswap_vget_lane_s32(__rev2_918, __p3_918))); \
+#define vsudotq_lane_s32(__p0_918, __p1_918, __p2_918, __p3_918) __extension__ ({ \
+  int32x4_t __ret_918; \
+  int32x4_t __s0_918 = __p0_918; \
+  int8x16_t __s1_918 = __p1_918; \
+  uint8x8_t __s2_918 = __p2_918; \
+  int32x4_t __rev0_918;  __rev0_918 = __builtin_shufflevector(__s0_918, __s0_918, 3, 2, 1, 0); \
+  int8x16_t __rev1_918;  __rev1_918 = __builtin_shufflevector(__s1_918, __s1_918, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_918;  __rev2_918 = __builtin_shufflevector(__s2_918, __s2_918, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x8_t __reint_918 = __rev2_918; \
+  __ret_918 = __noswap_vusdotq_s32(__rev0_918, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_918, __p3_918)), __rev1_918); \
+  __ret_918 = __builtin_shufflevector(__ret_918, __ret_918, 3, 2, 1, 0); \
   __ret_918; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_lane_s16(__p0_919, __p1_919, __p2_919, __p3_919) __extension__ ({ \
-  int16_t __s0_919 = __p0_919; \
-  int16_t __s1_919 = __p1_919; \
-  int16x4_t __s2_919 = __p2_919; \
-  int16_t __ret_919; \
-  __ret_919 = vqsubh_s16(__s0_919, vqrdmulhh_s16(__s1_919, vget_lane_s16(__s2_919, __p3_919))); \
+#define vsudot_lane_s32(__p0_919, __p1_919, __p2_919, __p3_919) __extension__ ({ \
+  int32x2_t __ret_919; \
+  int32x2_t __s0_919 = __p0_919; \
+  int8x8_t __s1_919 = __p1_919; \
+  uint8x8_t __s2_919 = __p2_919; \
+uint8x8_t __reint_919 = __s2_919; \
+  __ret_919 = vusdot_s32(__s0_919, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_919, __p3_919)), __s1_919); \
   __ret_919; \
 })
 #else
-#define vqrdmlshh_lane_s16(__p0_920, __p1_920, __p2_920, __p3_920) __extension__ ({ \
-  int16_t __s0_920 = __p0_920; \
-  int16_t __s1_920 = __p1_920; \
-  int16x4_t __s2_920 = __p2_920; \
-  int16x4_t __rev2_920;  __rev2_920 = __builtin_shufflevector(__s2_920, __s2_920, 3, 2, 1, 0); \
-  int16_t __ret_920; \
-  __ret_920 = vqsubh_s16(__s0_920, vqrdmulhh_s16(__s1_920, __noswap_vget_lane_s16(__rev2_920, __p3_920))); \
+#define vsudot_lane_s32(__p0_920, __p1_920, __p2_920, __p3_920) __extension__ ({ \
+  int32x2_t __ret_920; \
+  int32x2_t __s0_920 = __p0_920; \
+  int8x8_t __s1_920 = __p1_920; \
+  uint8x8_t __s2_920 = __p2_920; \
+  int32x2_t __rev0_920;  __rev0_920 = __builtin_shufflevector(__s0_920, __s0_920, 1, 0); \
+  int8x8_t __rev1_920;  __rev1_920 = __builtin_shufflevector(__s1_920, __s1_920, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_920;  __rev2_920 = __builtin_shufflevector(__s2_920, __s2_920, 7, 6, 5, 4, 3, 2, 1, 0); \
+uint8x8_t __reint_920 = __rev2_920; \
+  __ret_920 = __noswap_vusdot_s32(__rev0_920, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_920, __p3_920)), __rev1_920); \
+  __ret_920 = __builtin_shufflevector(__ret_920, __ret_920, 1, 0); \
   __ret_920; \
 })
 #endif
 
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_laneq_s32(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \
-  int32_t __s0_921 = __p0_921; \
-  int32_t __s1_921 = __p1_921; \
-  int32x4_t __s2_921 = __p2_921; \
-  int32_t __ret_921; \
-  __ret_921 = vqsubs_s32(__s0_921, vqrdmulhs_s32(__s1_921, vgetq_lane_s32(__s2_921, __p3_921))); \
-  __ret_921; \
-})
-#else
-#define vqrdmlshs_laneq_s32(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \
-  int32_t __s0_922 = __p0_922; \
-  int32_t __s1_922 = __p1_922; \
-  int32x4_t __s2_922 = __p2_922; \
-  int32x4_t __rev2_922;  __rev2_922 = __builtin_shufflevector(__s2_922, __s2_922, 3, 2, 1, 0); \
-  int32_t __ret_922; \
-  __ret_922 = vqsubs_s32(__s0_922, vqrdmulhs_s32(__s1_922, __noswap_vgetq_lane_s32(__rev2_922, __p3_922))); \
-  __ret_922; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_laneq_s16(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \
-  int16_t __s0_923 = __p0_923; \
-  int16_t __s1_923 = __p1_923; \
-  int16x8_t __s2_923 = __p2_923; \
-  int16_t __ret_923; \
-  __ret_923 = vqsubh_s16(__s0_923, vqrdmulhh_s16(__s1_923, vgetq_lane_s16(__s2_923, __p3_923))); \
-  __ret_923; \
-})
-#else
-#define vqrdmlshh_laneq_s16(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \
-  int16_t __s0_924 = __p0_924; \
-  int16_t __s1_924 = __p1_924; \
-  int16x8_t __s2_924 = __p2_924; \
-  int16x8_t __rev2_924;  __rev2_924 = __builtin_shufflevector(__s2_924, __s2_924, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_924; \
-  __ret_924 = vqsubh_s16(__s0_924, vqrdmulhh_s16(__s1_924, __noswap_vgetq_lane_s16(__rev2_924, __p3_924))); \
-  __ret_924; \
-})
-#endif
-
 #endif
 #if defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
@@ -68818,9 +68710,9 @@
 }
 #else
 __ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -68835,9 +68727,9 @@
 }
 #else
 __ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -68852,9 +68744,9 @@
 }
 #else
 __ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -68869,9 +68761,9 @@
 }
 #else
 __ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -68886,9 +68778,9 @@
 }
 #else
 __ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -68903,9 +68795,9 @@
 }
 #else
 __ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -68920,9 +68812,9 @@
 }
 #else
 __ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -68937,9 +68829,9 @@
 }
 #else
 __ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -68954,9 +68846,9 @@
 }
 #else
 __ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -68971,9 +68863,9 @@
 }
 #else
 __ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -68988,9 +68880,9 @@
 }
 #else
 __ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69005,9 +68897,9 @@
 }
 #else
 __ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69022,9 +68914,9 @@
 }
 #else
 __ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __noswap_vmovl_high_u8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69039,9 +68931,9 @@
 }
 #else
 __ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 + __noswap_vmovl_high_u32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69056,9 +68948,9 @@
 }
 #else
 __ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __noswap_vmovl_high_u16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69073,9 +68965,9 @@
 }
 #else
 __ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __noswap_vmovl_high_s8(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69090,9 +68982,9 @@
 }
 #else
 __ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 + __noswap_vmovl_high_s32(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69107,9 +68999,9 @@
 }
 #else
 __ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __noswap_vmovl_high_s16(__rev1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69117,140 +69009,140 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p64(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \
-  poly64x2_t __s0_925 = __p0_925; \
-  poly64x1_t __s2_925 = __p2_925; \
-  poly64x2_t __ret_925; \
-  __ret_925 = vsetq_lane_p64(vget_lane_p64(__s2_925, __p3_925), __s0_925, __p1_925); \
-  __ret_925; \
+#define vcopyq_lane_p64(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \
+  poly64x2_t __ret_921; \
+  poly64x2_t __s0_921 = __p0_921; \
+  poly64x1_t __s2_921 = __p2_921; \
+  __ret_921 = vsetq_lane_p64(vget_lane_p64(__s2_921, __p3_921), __s0_921, __p1_921); \
+  __ret_921; \
 })
 #else
-#define vcopyq_lane_p64(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \
-  poly64x2_t __s0_926 = __p0_926; \
-  poly64x1_t __s2_926 = __p2_926; \
-  poly64x2_t __rev0_926;  __rev0_926 = __builtin_shufflevector(__s0_926, __s0_926, 1, 0); \
-  poly64x2_t __ret_926; \
-  __ret_926 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_926, __p3_926), __rev0_926, __p1_926); \
-  __ret_926 = __builtin_shufflevector(__ret_926, __ret_926, 1, 0); \
-  __ret_926; \
+#define vcopyq_lane_p64(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \
+  poly64x2_t __ret_922; \
+  poly64x2_t __s0_922 = __p0_922; \
+  poly64x1_t __s2_922 = __p2_922; \
+  poly64x2_t __rev0_922;  __rev0_922 = __builtin_shufflevector(__s0_922, __s0_922, 1, 0); \
+  __ret_922 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_922, __p3_922), __rev0_922, __p1_922); \
+  __ret_922 = __builtin_shufflevector(__ret_922, __ret_922, 1, 0); \
+  __ret_922; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f64(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \
-  float64x2_t __s0_927 = __p0_927; \
-  float64x1_t __s2_927 = __p2_927; \
-  float64x2_t __ret_927; \
-  __ret_927 = vsetq_lane_f64(vget_lane_f64(__s2_927, __p3_927), __s0_927, __p1_927); \
+#define vcopyq_lane_f64(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \
+  float64x2_t __ret_923; \
+  float64x2_t __s0_923 = __p0_923; \
+  float64x1_t __s2_923 = __p2_923; \
+  __ret_923 = vsetq_lane_f64(vget_lane_f64(__s2_923, __p3_923), __s0_923, __p1_923); \
+  __ret_923; \
+})
+#else
+#define vcopyq_lane_f64(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \
+  float64x2_t __ret_924; \
+  float64x2_t __s0_924 = __p0_924; \
+  float64x1_t __s2_924 = __p2_924; \
+  float64x2_t __rev0_924;  __rev0_924 = __builtin_shufflevector(__s0_924, __s0_924, 1, 0); \
+  __ret_924 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_924, __p3_924), __rev0_924, __p1_924); \
+  __ret_924 = __builtin_shufflevector(__ret_924, __ret_924, 1, 0); \
+  __ret_924; \
+})
+#endif
+
+#define vcopy_lane_p64(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \
+  poly64x1_t __ret_925; \
+  poly64x1_t __s0_925 = __p0_925; \
+  poly64x1_t __s2_925 = __p2_925; \
+  __ret_925 = vset_lane_p64(vget_lane_p64(__s2_925, __p3_925), __s0_925, __p1_925); \
+  __ret_925; \
+})
+#define vcopy_lane_f64(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \
+  float64x1_t __ret_926; \
+  float64x1_t __s0_926 = __p0_926; \
+  float64x1_t __s2_926 = __p2_926; \
+  __ret_926 = vset_lane_f64(vget_lane_f64(__s2_926, __p3_926), __s0_926, __p1_926); \
+  __ret_926; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_p64(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \
+  poly64x2_t __ret_927; \
+  poly64x2_t __s0_927 = __p0_927; \
+  poly64x2_t __s2_927 = __p2_927; \
+  __ret_927 = vsetq_lane_p64(vgetq_lane_p64(__s2_927, __p3_927), __s0_927, __p1_927); \
   __ret_927; \
 })
 #else
-#define vcopyq_lane_f64(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \
-  float64x2_t __s0_928 = __p0_928; \
-  float64x1_t __s2_928 = __p2_928; \
-  float64x2_t __rev0_928;  __rev0_928 = __builtin_shufflevector(__s0_928, __s0_928, 1, 0); \
-  float64x2_t __ret_928; \
-  __ret_928 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_928, __p3_928), __rev0_928, __p1_928); \
+#define vcopyq_laneq_p64(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \
+  poly64x2_t __ret_928; \
+  poly64x2_t __s0_928 = __p0_928; \
+  poly64x2_t __s2_928 = __p2_928; \
+  poly64x2_t __rev0_928;  __rev0_928 = __builtin_shufflevector(__s0_928, __s0_928, 1, 0); \
+  poly64x2_t __rev2_928;  __rev2_928 = __builtin_shufflevector(__s2_928, __s2_928, 1, 0); \
+  __ret_928 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_928, __p3_928), __rev0_928, __p1_928); \
   __ret_928 = __builtin_shufflevector(__ret_928, __ret_928, 1, 0); \
   __ret_928; \
 })
 #endif
 
-#define vcopy_lane_p64(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \
-  poly64x1_t __s0_929 = __p0_929; \
-  poly64x1_t __s2_929 = __p2_929; \
-  poly64x1_t __ret_929; \
-  __ret_929 = vset_lane_p64(vget_lane_p64(__s2_929, __p3_929), __s0_929, __p1_929); \
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_f64(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \
+  float64x2_t __ret_929; \
+  float64x2_t __s0_929 = __p0_929; \
+  float64x2_t __s2_929 = __p2_929; \
+  __ret_929 = vsetq_lane_f64(vgetq_lane_f64(__s2_929, __p3_929), __s0_929, __p1_929); \
   __ret_929; \
 })
-#define vcopy_lane_f64(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \
-  float64x1_t __s0_930 = __p0_930; \
-  float64x1_t __s2_930 = __p2_930; \
-  float64x1_t __ret_930; \
-  __ret_930 = vset_lane_f64(vget_lane_f64(__s2_930, __p3_930), __s0_930, __p1_930); \
+#else
+#define vcopyq_laneq_f64(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \
+  float64x2_t __ret_930; \
+  float64x2_t __s0_930 = __p0_930; \
+  float64x2_t __s2_930 = __p2_930; \
+  float64x2_t __rev0_930;  __rev0_930 = __builtin_shufflevector(__s0_930, __s0_930, 1, 0); \
+  float64x2_t __rev2_930;  __rev2_930 = __builtin_shufflevector(__s2_930, __s2_930, 1, 0); \
+  __ret_930 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_930, __p3_930), __rev0_930, __p1_930); \
+  __ret_930 = __builtin_shufflevector(__ret_930, __ret_930, 1, 0); \
   __ret_930; \
 })
+#endif
+
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p64(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \
-  poly64x2_t __s0_931 = __p0_931; \
+#define vcopy_laneq_p64(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \
+  poly64x1_t __ret_931; \
+  poly64x1_t __s0_931 = __p0_931; \
   poly64x2_t __s2_931 = __p2_931; \
-  poly64x2_t __ret_931; \
-  __ret_931 = vsetq_lane_p64(vgetq_lane_p64(__s2_931, __p3_931), __s0_931, __p1_931); \
+  __ret_931 = vset_lane_p64(vgetq_lane_p64(__s2_931, __p3_931), __s0_931, __p1_931); \
   __ret_931; \
 })
 #else
-#define vcopyq_laneq_p64(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \
-  poly64x2_t __s0_932 = __p0_932; \
+#define vcopy_laneq_p64(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \
+  poly64x1_t __ret_932; \
+  poly64x1_t __s0_932 = __p0_932; \
   poly64x2_t __s2_932 = __p2_932; \
-  poly64x2_t __rev0_932;  __rev0_932 = __builtin_shufflevector(__s0_932, __s0_932, 1, 0); \
   poly64x2_t __rev2_932;  __rev2_932 = __builtin_shufflevector(__s2_932, __s2_932, 1, 0); \
-  poly64x2_t __ret_932; \
-  __ret_932 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_932, __p3_932), __rev0_932, __p1_932); \
-  __ret_932 = __builtin_shufflevector(__ret_932, __ret_932, 1, 0); \
+  __ret_932 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_932, __p3_932), __s0_932, __p1_932); \
   __ret_932; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f64(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \
-  float64x2_t __s0_933 = __p0_933; \
+#define vcopy_laneq_f64(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \
+  float64x1_t __ret_933; \
+  float64x1_t __s0_933 = __p0_933; \
   float64x2_t __s2_933 = __p2_933; \
-  float64x2_t __ret_933; \
-  __ret_933 = vsetq_lane_f64(vgetq_lane_f64(__s2_933, __p3_933), __s0_933, __p1_933); \
+  __ret_933 = vset_lane_f64(vgetq_lane_f64(__s2_933, __p3_933), __s0_933, __p1_933); \
   __ret_933; \
 })
 #else
-#define vcopyq_laneq_f64(__p0_934, __p1_934, __p2_934, __p3_934) __extension__ ({ \
-  float64x2_t __s0_934 = __p0_934; \
+#define vcopy_laneq_f64(__p0_934, __p1_934, __p2_934, __p3_934) __extension__ ({ \
+  float64x1_t __ret_934; \
+  float64x1_t __s0_934 = __p0_934; \
   float64x2_t __s2_934 = __p2_934; \
-  float64x2_t __rev0_934;  __rev0_934 = __builtin_shufflevector(__s0_934, __s0_934, 1, 0); \
   float64x2_t __rev2_934;  __rev2_934 = __builtin_shufflevector(__s2_934, __s2_934, 1, 0); \
-  float64x2_t __ret_934; \
-  __ret_934 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_934, __p3_934), __rev0_934, __p1_934); \
-  __ret_934 = __builtin_shufflevector(__ret_934, __ret_934, 1, 0); \
+  __ret_934 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_934, __p3_934), __s0_934, __p1_934); \
   __ret_934; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p64(__p0_935, __p1_935, __p2_935, __p3_935) __extension__ ({ \
-  poly64x1_t __s0_935 = __p0_935; \
-  poly64x2_t __s2_935 = __p2_935; \
-  poly64x1_t __ret_935; \
-  __ret_935 = vset_lane_p64(vgetq_lane_p64(__s2_935, __p3_935), __s0_935, __p1_935); \
-  __ret_935; \
-})
-#else
-#define vcopy_laneq_p64(__p0_936, __p1_936, __p2_936, __p3_936) __extension__ ({ \
-  poly64x1_t __s0_936 = __p0_936; \
-  poly64x2_t __s2_936 = __p2_936; \
-  poly64x2_t __rev2_936;  __rev2_936 = __builtin_shufflevector(__s2_936, __s2_936, 1, 0); \
-  poly64x1_t __ret_936; \
-  __ret_936 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_936, __p3_936), __s0_936, __p1_936); \
-  __ret_936; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f64(__p0_937, __p1_937, __p2_937, __p3_937) __extension__ ({ \
-  float64x1_t __s0_937 = __p0_937; \
-  float64x2_t __s2_937 = __p2_937; \
-  float64x1_t __ret_937; \
-  __ret_937 = vset_lane_f64(vgetq_lane_f64(__s2_937, __p3_937), __s0_937, __p1_937); \
-  __ret_937; \
-})
-#else
-#define vcopy_laneq_f64(__p0_938, __p1_938, __p2_938, __p3_938) __extension__ ({ \
-  float64x1_t __s0_938 = __p0_938; \
-  float64x2_t __s2_938 = __p2_938; \
-  float64x2_t __rev2_938;  __rev2_938 = __builtin_shufflevector(__s2_938, __s2_938, 1, 0); \
-  float64x1_t __ret_938; \
-  __ret_938 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_938, __p3_938), __s0_938, __p1_938); \
-  __ret_938; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
 __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
   uint16x8_t __ret;
   __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
@@ -69258,10 +69150,10 @@
 }
 #else
 __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69276,10 +69168,10 @@
 }
 #else
 __ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69294,10 +69186,10 @@
 }
 #else
 __ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69312,10 +69204,10 @@
 }
 #else
 __ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69330,10 +69222,10 @@
 }
 #else
 __ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69348,10 +69240,10 @@
 }
 #else
 __ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69366,9 +69258,9 @@
 }
 #else
 __ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69383,9 +69275,9 @@
 }
 #else
 __ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69400,9 +69292,9 @@
 }
 #else
 __ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69417,9 +69309,9 @@
 }
 #else
 __ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69434,10 +69326,10 @@
 }
 #else
 __ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69452,10 +69344,10 @@
 }
 #else
 __ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69470,10 +69362,10 @@
 }
 #else
 __ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69488,10 +69380,10 @@
 }
 #else
 __ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69506,10 +69398,10 @@
 }
 #else
 __ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69524,10 +69416,10 @@
 }
 #else
 __ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69542,9 +69434,9 @@
 }
 #else
 __ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69559,9 +69451,9 @@
 }
 #else
 __ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69576,9 +69468,9 @@
 }
 #else
 __ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69593,47 +69485,47 @@
 }
 #else
 __ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
 #endif
 
-#define vmulx_lane_f64(__p0_939, __p1_939, __p2_939) __extension__ ({ \
-  float64x1_t __s0_939 = __p0_939; \
-  float64x1_t __s1_939 = __p1_939; \
-  float64x1_t __ret_939; \
-  float64_t __x_939 = vget_lane_f64(__s0_939, 0); \
-  float64_t __y_939 = vget_lane_f64(__s1_939, __p2_939); \
-  float64_t __z_939 = vmulxd_f64(__x_939, __y_939); \
-  __ret_939 = vset_lane_f64(__z_939, __s0_939, __p2_939); \
-  __ret_939; \
+#define vmulx_lane_f64(__p0_935, __p1_935, __p2_935) __extension__ ({ \
+  float64x1_t __ret_935; \
+  float64x1_t __s0_935 = __p0_935; \
+  float64x1_t __s1_935 = __p1_935; \
+  float64_t __x_935 = vget_lane_f64(__s0_935, 0); \
+  float64_t __y_935 = vget_lane_f64(__s1_935, __p2_935); \
+  float64_t __z_935 = vmulxd_f64(__x_935, __y_935); \
+  __ret_935 = vset_lane_f64(__z_935, __s0_935, __p2_935); \
+  __ret_935; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f64(__p0_940, __p1_940, __p2_940) __extension__ ({ \
-  float64x1_t __s0_940 = __p0_940; \
-  float64x2_t __s1_940 = __p1_940; \
-  float64x1_t __ret_940; \
-  float64_t __x_940 = vget_lane_f64(__s0_940, 0); \
-  float64_t __y_940 = vgetq_lane_f64(__s1_940, __p2_940); \
-  float64_t __z_940 = vmulxd_f64(__x_940, __y_940); \
-  __ret_940 = vset_lane_f64(__z_940, __s0_940, 0); \
-  __ret_940; \
+#define vmulx_laneq_f64(__p0_936, __p1_936, __p2_936) __extension__ ({ \
+  float64x1_t __ret_936; \
+  float64x1_t __s0_936 = __p0_936; \
+  float64x2_t __s1_936 = __p1_936; \
+  float64_t __x_936 = vget_lane_f64(__s0_936, 0); \
+  float64_t __y_936 = vgetq_lane_f64(__s1_936, __p2_936); \
+  float64_t __z_936 = vmulxd_f64(__x_936, __y_936); \
+  __ret_936 = vset_lane_f64(__z_936, __s0_936, 0); \
+  __ret_936; \
 })
 #else
-#define vmulx_laneq_f64(__p0_941, __p1_941, __p2_941) __extension__ ({ \
-  float64x1_t __s0_941 = __p0_941; \
-  float64x2_t __s1_941 = __p1_941; \
-  float64x2_t __rev1_941;  __rev1_941 = __builtin_shufflevector(__s1_941, __s1_941, 1, 0); \
-  float64x1_t __ret_941; \
-  float64_t __x_941 = vget_lane_f64(__s0_941, 0); \
-  float64_t __y_941 = __noswap_vgetq_lane_f64(__rev1_941, __p2_941); \
-  float64_t __z_941 = vmulxd_f64(__x_941, __y_941); \
-  __ret_941 = vset_lane_f64(__z_941, __s0_941, 0); \
-  __ret_941; \
+#define vmulx_laneq_f64(__p0_937, __p1_937, __p2_937) __extension__ ({ \
+  float64x1_t __ret_937; \
+  float64x1_t __s0_937 = __p0_937; \
+  float64x2_t __s1_937 = __p1_937; \
+  float64x2_t __rev1_937;  __rev1_937 = __builtin_shufflevector(__s1_937, __s1_937, 1, 0); \
+  float64_t __x_937 = vget_lane_f64(__s0_937, 0); \
+  float64_t __y_937 = __noswap_vgetq_lane_f64(__rev1_937, __p2_937); \
+  float64_t __z_937 = vmulxd_f64(__x_937, __y_937); \
+  __ret_937 = vset_lane_f64(__z_937, __s0_937, 0); \
+  __ret_937; \
 })
 #endif
 
@@ -69646,10 +69538,10 @@
 }
 #else
 __ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69669,10 +69561,10 @@
 }
 #else
 __ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
   __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69692,10 +69584,10 @@
 }
 #else
 __ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69715,10 +69607,10 @@
 }
 #else
 __ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69738,10 +69630,10 @@
 }
 #else
 __ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
   __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69761,10 +69653,10 @@
 }
 #else
 __ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69785,10 +69677,10 @@
 }
 #else
 __ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint16x8_t __ret;
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
   __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69803,10 +69695,10 @@
 }
 #else
 __ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+  uint64x2_t __ret;
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
   __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69821,10 +69713,10 @@
 }
 #else
 __ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+  uint32x4_t __ret;
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
   __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
@@ -69839,10 +69731,10 @@
 }
 #else
 __ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int16x8_t __ret;
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
   __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
@@ -69857,10 +69749,10 @@
 }
 #else
 __ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int64x2_t __ret;
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
   __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
@@ -69875,10 +69767,10 @@
 }
 #else
 __ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int32x4_t __ret;
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
   __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_sve.h b/linux-x86/lib64/clang/16.0.2/include/arm_sve.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/arm_sve.h
copy to linux-x86/lib64/clang/16.0.2/include/arm_sve.h
index 909634a..03dc3ff 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/arm_sve.h
+++ b/linux-x86/lib64/clang/16.0.2/include/arm_sve.h
@@ -2407,15 +2407,15 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z)))
 svuint16_t svcnt_s16_z(svbool_t, svint16_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb)))
-uint64_t svcntb();
+uint64_t svcntb(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb_pat)))
 uint64_t svcntb_pat(enum svpattern);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd)))
-uint64_t svcntd();
+uint64_t svcntd(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd_pat)))
 uint64_t svcntd_pat(enum svpattern);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth)))
-uint64_t svcnth();
+uint64_t svcnth(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth_pat)))
 uint64_t svcnth_pat(enum svpattern);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b8)))
@@ -2427,7 +2427,7 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b16)))
 uint64_t svcntp_b16(svbool_t, svbool_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw)))
-uint64_t svcntw();
+uint64_t svcntw(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw_pat)))
 uint64_t svcntw_pat(enum svpattern);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32)))
@@ -6521,7 +6521,7 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16)))
 int16_t svorv_s16(svbool_t, svint16_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b)))
-svbool_t svpfalse_b();
+svbool_t svpfalse_b(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b)))
 svbool_t svpfirst_b(svbool_t, svbool_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b8)))
@@ -6627,13 +6627,13 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b16)))
 svbool_t svptrue_pat_b16(enum svpattern);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b8)))
-svbool_t svptrue_b8();
+svbool_t svptrue_b8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b32)))
-svbool_t svptrue_b32();
+svbool_t svptrue_b32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b64)))
-svbool_t svptrue_b64();
+svbool_t svptrue_b64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b16)))
-svbool_t svptrue_b16();
+svbool_t svptrue_b16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8)))
 svint8_t svqadd_n_s8(svint8_t, int8_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32)))
@@ -7011,7 +7011,7 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z)))
 svint16_t svrbit_s16_z(svbool_t, svint16_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr)))
-svbool_t svrdffr();
+svbool_t svrdffr(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr_z)))
 svbool_t svrdffr_z(svbool_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64)))
@@ -7411,7 +7411,7 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16)))
 svint16x4_t svset4_s16(svint16x4_t, uint64_t, svint16_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsetffr)))
-void svsetffr();
+void svsetffr(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8)))
 svuint8_t svsplice_u8(svbool_t, svuint8_t, svuint8_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32)))
@@ -8285,93 +8285,93 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16)))
 svfloat16_t svtssel_f16(svfloat16_t, svuint16_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u8)))
-svuint8x2_t svundef2_u8();
+svuint8x2_t svundef2_u8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u32)))
-svuint32x2_t svundef2_u32();
+svuint32x2_t svundef2_u32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u64)))
-svuint64x2_t svundef2_u64();
+svuint64x2_t svundef2_u64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u16)))
-svuint16x2_t svundef2_u16();
+svuint16x2_t svundef2_u16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s8)))
-svint8x2_t svundef2_s8();
+svint8x2_t svundef2_s8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f64)))
-svfloat64x2_t svundef2_f64();
+svfloat64x2_t svundef2_f64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f32)))
-svfloat32x2_t svundef2_f32();
+svfloat32x2_t svundef2_f32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f16)))
-svfloat16x2_t svundef2_f16();
+svfloat16x2_t svundef2_f16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s32)))
-svint32x2_t svundef2_s32();
+svint32x2_t svundef2_s32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s64)))
-svint64x2_t svundef2_s64();
+svint64x2_t svundef2_s64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s16)))
-svint16x2_t svundef2_s16();
+svint16x2_t svundef2_s16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u8)))
-svuint8x3_t svundef3_u8();
+svuint8x3_t svundef3_u8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u32)))
-svuint32x3_t svundef3_u32();
+svuint32x3_t svundef3_u32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u64)))
-svuint64x3_t svundef3_u64();
+svuint64x3_t svundef3_u64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u16)))
-svuint16x3_t svundef3_u16();
+svuint16x3_t svundef3_u16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s8)))
-svint8x3_t svundef3_s8();
+svint8x3_t svundef3_s8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f64)))
-svfloat64x3_t svundef3_f64();
+svfloat64x3_t svundef3_f64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f32)))
-svfloat32x3_t svundef3_f32();
+svfloat32x3_t svundef3_f32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f16)))
-svfloat16x3_t svundef3_f16();
+svfloat16x3_t svundef3_f16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s32)))
-svint32x3_t svundef3_s32();
+svint32x3_t svundef3_s32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s64)))
-svint64x3_t svundef3_s64();
+svint64x3_t svundef3_s64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s16)))
-svint16x3_t svundef3_s16();
+svint16x3_t svundef3_s16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u8)))
-svuint8x4_t svundef4_u8();
+svuint8x4_t svundef4_u8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u32)))
-svuint32x4_t svundef4_u32();
+svuint32x4_t svundef4_u32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u64)))
-svuint64x4_t svundef4_u64();
+svuint64x4_t svundef4_u64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u16)))
-svuint16x4_t svundef4_u16();
+svuint16x4_t svundef4_u16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s8)))
-svint8x4_t svundef4_s8();
+svint8x4_t svundef4_s8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f64)))
-svfloat64x4_t svundef4_f64();
+svfloat64x4_t svundef4_f64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f32)))
-svfloat32x4_t svundef4_f32();
+svfloat32x4_t svundef4_f32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f16)))
-svfloat16x4_t svundef4_f16();
+svfloat16x4_t svundef4_f16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s32)))
-svint32x4_t svundef4_s32();
+svint32x4_t svundef4_s32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s64)))
-svint64x4_t svundef4_s64();
+svint64x4_t svundef4_s64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s16)))
-svint16x4_t svundef4_s16();
+svint16x4_t svundef4_s16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u8)))
-svuint8_t svundef_u8();
+svuint8_t svundef_u8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u32)))
-svuint32_t svundef_u32();
+svuint32_t svundef_u32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u64)))
-svuint64_t svundef_u64();
+svuint64_t svundef_u64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u16)))
-svuint16_t svundef_u16();
+svuint16_t svundef_u16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s8)))
-svint8_t svundef_s8();
+svint8_t svundef_s8(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f64)))
-svfloat64_t svundef_f64();
+svfloat64_t svundef_f64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f32)))
-svfloat32_t svundef_f32();
+svfloat32_t svundef_f32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f16)))
-svfloat16_t svundef_f16();
+svfloat16_t svundef_f16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s32)))
-svint32_t svundef_s32();
+svint32_t svundef_s32(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s64)))
-svint64_t svundef_s64();
+svint64_t svundef_s64(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s16)))
-svint16_t svundef_s16();
+svint16_t svundef_s16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b)))
 svbool_t svunpkhi_b(svbool_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32)))
@@ -13830,8 +13830,8 @@
 int64_t svorv(svbool_t, svint64_t);
 __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16)))
 int16_t svorv(svbool_t, svint16_t);
-__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b)))
-svbool_t svpfalse();
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b)))
+svbool_t svpfalse(void);
 __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b)))
 svbool_t svpfirst(svbool_t, svbool_t);
 __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base)))
@@ -23456,13 +23456,13 @@
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16)))
 svbfloat16_t svtrn2_bf16(svbfloat16_t, svbfloat16_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_bf16)))
-svbfloat16x2_t svundef2_bf16();
+svbfloat16x2_t svundef2_bf16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_bf16)))
-svbfloat16x3_t svundef3_bf16();
+svbfloat16x3_t svundef3_bf16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_bf16)))
-svbfloat16x4_t svundef4_bf16();
+svbfloat16x4_t svundef4_bf16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_bf16)))
-svbfloat16_t svundef_bf16();
+svbfloat16_t svundef_bf16(void);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16)))
 svbfloat16_t svuzp1_bf16(svbfloat16_t, svbfloat16_t);
 __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16)))
@@ -24038,6 +24038,10 @@
 } // extern "C"
 #endif
 
+#undef __ai
+
+#undef __aio
+
 #endif /*__ARM_FEATURE_SVE */
 
 #endif /* __ARM_SVE_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/armintr.h b/linux-x86/lib64/clang/16.0.2/include/armintr.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/armintr.h
rename to linux-x86/lib64/clang/16.0.2/include/armintr.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx2intrin.h b/linux-x86/lib64/clang/16.0.2/include/avx2intrin.h
similarity index 95%
copy from darwin-x86/lib64/clang/14.0.2/include/avx2intrin.h
copy to linux-x86/lib64/clang/16.0.2/include/avx2intrin.h
index 5064c87..f8521e7 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx2intrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/avx2intrin.h
@@ -26,19 +26,19 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi8(__m256i __a)
 {
-    return (__m256i)__builtin_ia32_pabsb256((__v32qi)__a);
+    return (__m256i)__builtin_elementwise_abs((__v32qs)__a);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi16(__m256i __a)
 {
-    return (__m256i)__builtin_ia32_pabsw256((__v16hi)__a);
+    return (__m256i)__builtin_elementwise_abs((__v16hi)__a);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi32(__m256i __a)
 {
-    return (__m256i)__builtin_ia32_pabsd256((__v8si)__a);
+    return (__m256i)__builtin_elementwise_abs((__v8si)__a);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -92,25 +92,25 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_adds_epi8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_paddsb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_add_sat((__v32qs)__a, (__v32qs)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_adds_epi16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_paddsw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_add_sat((__v16hi)__a, (__v16hi)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_adds_epu8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_paddusb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_add_sat((__v32qu)__a, (__v32qu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_adds_epu16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_paddusw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_add_sat((__v16hu)__a, (__v16hu)__b);
 }
 
 #define _mm256_alignr_epi8(a, b, n) \
@@ -253,73 +253,73 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_max((__v32qs)__a, (__v32qs)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_max((__v16hi)__a, (__v16hi)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_max((__v8si)__a, (__v8si)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxub256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_max((__v32qu)__a, (__v32qu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_max((__v16hu)__a, (__v16hu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_max((__v8su)__a, (__v8su)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminsb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_min((__v32qs)__a, (__v32qs)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminsw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_min((__v16hi)__a, (__v16hi)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminsd256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_min((__v8si)__a, (__v8si)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminub256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_min((__v32qu)__a, (__v32qu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_min((__v16hu)__a, (__v16hu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminud256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_min((__v8su)__a, (__v8su)__b);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS256
@@ -628,25 +628,25 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_subs_epi8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_psubsb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_sub_sat((__v32qs)__a, (__v32qs)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_subs_epi16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_psubsw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_sub_sat((__v16hi)__a, (__v16hi)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_subs_epu8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_psubusb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_sub_sat((__v32qu)__a, (__v32qu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_subs_epu16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_psubusw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_sub_sat((__v16hu)__a, (__v16hu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512bf16intrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512bf16intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512bf16intrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512bf16intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512bitalgintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512bitalgintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512bitalgintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512bitalgintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512bwintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512bwintrin.h
similarity index 97%
copy from darwin-x86/lib64/clang/14.0.2/include/avx512bwintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/avx512bwintrin.h
index 6aee8ae..aaeb936 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512bwintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/avx512bwintrin.h
@@ -485,7 +485,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi8 (__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsb512((__v64qi)__A);
+  return (__m512i)__builtin_elementwise_abs((__v64qs)__A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -507,7 +507,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi16 (__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsw512((__v32hi)__A);
+  return (__m512i)__builtin_elementwise_abs((__v32hi)__A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -617,7 +617,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_adds_epi8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_paddsb512((__v64qi)__A, (__v64qi)__B);
+  return (__m512i)__builtin_elementwise_add_sat((__v64qs)__A, (__v64qs)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -639,7 +639,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_adds_epi16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_paddsw512((__v32hi)__A, (__v32hi)__B);
+  return (__m512i)__builtin_elementwise_add_sat((__v32hi)__A, (__v32hi)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -661,7 +661,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_adds_epu8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_paddusb512((__v64qi) __A, (__v64qi) __B);
+  return (__m512i)__builtin_elementwise_add_sat((__v64qu) __A, (__v64qu) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -683,7 +683,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_adds_epu16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_paddusw512((__v32hi) __A, (__v32hi) __B);
+  return (__m512i)__builtin_elementwise_add_sat((__v32hu) __A, (__v32hu) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -751,7 +751,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epi8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsb512((__v64qi) __A, (__v64qi) __B);
+  return (__m512i)__builtin_elementwise_max((__v64qs) __A, (__v64qs) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -773,7 +773,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epi16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsw512((__v32hi) __A, (__v32hi) __B);
+  return (__m512i)__builtin_elementwise_max((__v32hi) __A, (__v32hi) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -796,7 +796,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxub512((__v64qi)__A, (__v64qi)__B);
+  return (__m512i)__builtin_elementwise_max((__v64qu)__A, (__v64qu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -818,7 +818,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxuw512((__v32hi)__A, (__v32hi)__B);
+  return (__m512i)__builtin_elementwise_max((__v32hu)__A, (__v32hu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -840,7 +840,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epi8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsb512((__v64qi) __A, (__v64qi) __B);
+  return (__m512i)__builtin_elementwise_min((__v64qs) __A, (__v64qs) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -862,7 +862,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epi16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsw512((__v32hi) __A, (__v32hi) __B);
+  return (__m512i)__builtin_elementwise_min((__v32hi) __A, (__v32hi) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -884,7 +884,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminub512((__v64qi)__A, (__v64qi)__B);
+  return (__m512i)__builtin_elementwise_min((__v64qu)__A, (__v64qu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -906,7 +906,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminuw512((__v32hi)__A, (__v32hi)__B);
+  return (__m512i)__builtin_elementwise_min((__v32hu)__A, (__v32hu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -950,7 +950,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_subs_epi8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_psubsb512((__v64qi)__A, (__v64qi)__B);
+  return (__m512i)__builtin_elementwise_sub_sat((__v64qs)__A, (__v64qs)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -972,7 +972,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_subs_epi16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_psubsw512((__v32hi)__A, (__v32hi)__B);
+  return (__m512i)__builtin_elementwise_sub_sat((__v32hi)__A, (__v32hi)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -994,7 +994,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_subs_epu8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_psubusb512((__v64qi) __A, (__v64qi) __B);
+  return (__m512i)__builtin_elementwise_sub_sat((__v64qu) __A, (__v64qu) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1016,7 +1016,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_subs_epu16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_psubusw512((__v32hi) __A, (__v32hi) __B);
+  return (__m512i)__builtin_elementwise_sub_sat((__v32hu) __A, (__v32hu) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1506,7 +1506,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_slli_epi16(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, __B);
+  return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1598,7 +1598,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_srai_epi16(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, __B);
+  return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1643,7 +1643,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_srli_epi16(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, __B);
+  return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1659,7 +1659,7 @@
 _mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B)
 {
   return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_srli_epi16(__A, __B),
+                                         (__v32hi)_mm512_srli_epi16(__A, (unsigned int)__B),
                                          (__v32hi)_mm512_setzero_si512());
 }
 
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512cdintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512cdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512cdintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512cdintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512dqintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512dqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512dqintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512dqintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512erintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512erintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512erintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512erintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512fintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512fintrin.h
similarity index 98%
copy from darwin-x86/lib64/clang/14.0.2/include/avx512fintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/avx512fintrin.h
index df29864..61bc89c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512fintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/avx512fintrin.h
@@ -26,6 +26,10 @@
 typedef unsigned long long __v8du __attribute__((__vector_size__(64)));
 typedef unsigned int __v16su __attribute__((__vector_size__(64)));
 
+/* We need an explicitly signed variant for char. Note that this shouldn't
+ * appear in the interface though. */
+typedef signed char __v64qs __attribute__((__vector_size__(64)));
+
 typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64)));
 typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64)));
 typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64)));
@@ -1086,7 +1090,7 @@
 __DEFAULT_FN_ATTRS512
 _mm512_max_epi32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_max((__v16si)__A, (__v16si)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1108,7 +1112,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_max((__v16su)__A, (__v16su)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1130,7 +1134,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epi64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_max((__v8di)__A, (__v8di)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1152,7 +1156,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxuq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_max((__v8du)__A, (__v8du)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1321,7 +1325,7 @@
 __DEFAULT_FN_ATTRS512
 _mm512_min_epi32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_min((__v16si)__A, (__v16si)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1343,7 +1347,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_min((__v16su)__A, (__v16su)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1365,7 +1369,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epi64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_min((__v8di)__A, (__v8di)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1387,7 +1391,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminuq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_min((__v8du)__A, (__v8du)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1776,7 +1780,7 @@
 {
   return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
                                                   _MM_FROUND_FLOOR,
-                                                  (__v16sf) __A, -1,
+                                                  (__v16sf) __A, (unsigned short)-1,
                                                   _MM_FROUND_CUR_DIRECTION);
 }
 
@@ -1794,7 +1798,7 @@
 {
   return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
                                                    _MM_FROUND_FLOOR,
-                                                   (__v8df) __A, -1,
+                                                   (__v8df) __A, (unsigned char)-1,
                                                    _MM_FROUND_CUR_DIRECTION);
 }
 
@@ -1821,7 +1825,7 @@
 {
   return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
                                                   _MM_FROUND_CEIL,
-                                                  (__v16sf) __A, -1,
+                                                  (__v16sf) __A, (unsigned short)-1,
                                                   _MM_FROUND_CUR_DIRECTION);
 }
 
@@ -1830,7 +1834,7 @@
 {
   return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
                                                    _MM_FROUND_CEIL,
-                                                   (__v8df) __A, -1,
+                                                   (__v8df) __A, (unsigned char)-1,
                                                    _MM_FROUND_CUR_DIRECTION);
 }
 
@@ -1846,7 +1850,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi64(__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsq512((__v8di)__A);
+  return (__m512i)__builtin_elementwise_abs((__v8di)__A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1868,7 +1872,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi32(__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsd512((__v16si) __A);
+  return (__m512i)__builtin_elementwise_abs((__v16si) __A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5113,7 +5117,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_slli_epi32(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B);
+  return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5135,7 +5139,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_slli_epi64(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B);
+  return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5157,7 +5161,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_srli_epi32(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B);
+  return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5179,7 +5183,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_srli_epi64(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B);
+  return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5925,41 +5929,44 @@
                                             (__v8di)_mm512_setzero_si512());
 }
 
-#define _mm512_ternarylogic_epi32(A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
-                                             (__v16si)(__m512i)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)-1))
+/// \enum _MM_TERNLOG_ENUM
+///    A helper to represent the ternary logic operations among vector \a A,
+///    \a B and \a C. The representation is passed to \a imm.
+typedef enum {
+  _MM_TERNLOG_A = 0xF0,
+  _MM_TERNLOG_B = 0xCC,
+  _MM_TERNLOG_C = 0xAA
+} _MM_TERNLOG_ENUM;
 
-#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
-                                             (__v16si)(__m512i)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)(U)))
+#define _mm512_ternarylogic_epi32(A, B, C, imm)                                \
+  ((__m512i)__builtin_ia32_pternlogd512_mask(                                  \
+      (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C),     \
+      (unsigned char)(imm), (__mmask16)-1))
 
-#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \
-                                              (__v16si)(__m512i)(B), \
-                                              (__v16si)(__m512i)(C), \
-                                              (int)(imm), (__mmask16)(U)))
+#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm)                        \
+  ((__m512i)__builtin_ia32_pternlogd512_mask(                                  \
+      (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C),     \
+      (unsigned char)(imm), (__mmask16)(U)))
 
-#define _mm512_ternarylogic_epi64(A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
-                                             (__v8di)(__m512i)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)-1))
+#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm)                       \
+  ((__m512i)__builtin_ia32_pternlogd512_maskz(                                 \
+      (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C),     \
+      (unsigned char)(imm), (__mmask16)(U)))
 
-#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
-                                             (__v8di)(__m512i)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
+#define _mm512_ternarylogic_epi64(A, B, C, imm)                                \
+  ((__m512i)__builtin_ia32_pternlogq512_mask(                                  \
+      (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C),        \
+      (unsigned char)(imm), (__mmask8)-1))
 
-#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \
-                                              (__v8di)(__m512i)(B), \
-                                              (__v8di)(__m512i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
+#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm)                        \
+  ((__m512i)__builtin_ia32_pternlogq512_mask(                                  \
+      (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm)                       \
+  ((__m512i)__builtin_ia32_pternlogq512_maskz(                                 \
+      (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
 #ifdef __x86_64__
 #define _mm_cvt_roundsd_i64(A, R) \
@@ -6599,7 +6606,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_srai_epi32(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B);
+  return (__m512i)__builtin_ia32_psradi512((__v16si)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -6622,7 +6629,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_srai_epi64(__m512i __A, unsigned int __B)
 {
-  return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B);
+  return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, (int)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -9312,43 +9319,43 @@
  */
 
 static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_add_q512(__W);
+  return __builtin_reduce_add((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_mul_q512(__W);
+  return __builtin_reduce_mul((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_and_q512(__W);
+  return __builtin_reduce_and((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_or_q512(__W);
+  return __builtin_reduce_or((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W) {
   __W = _mm512_maskz_mov_epi64(__M, __W);
-  return __builtin_ia32_reduce_add_q512(__W);
+  return __builtin_reduce_add((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) {
   __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(1), __M, __W);
-  return __builtin_ia32_reduce_mul_q512(__W);
+  return __builtin_reduce_mul((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __W);
-  return __builtin_ia32_reduce_and_q512(__W);
+  __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(-1LL), __M, __W);
+  return __builtin_reduce_and((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
   __W = _mm512_maskz_mov_epi64(__M, __W);
-  return __builtin_ia32_reduce_or_q512(__W);
+  return __builtin_reduce_or((__v8di)__W);
 }
 
 // -0.0 is used to ignore the start value since it is the neutral value of
@@ -9376,46 +9383,46 @@
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_add_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_add_d512((__v16si)__W);
+  return __builtin_reduce_add((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_mul_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_mul_d512((__v16si)__W);
+  return __builtin_reduce_mul((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_and_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_and_d512((__v16si)__W);
+  return __builtin_reduce_and((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_or_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_or_d512((__v16si)__W);
+  return __builtin_reduce_or((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_add_epi32( __mmask16 __M, __m512i __W) {
   __W = _mm512_maskz_mov_epi32(__M, __W);
-  return __builtin_ia32_reduce_add_d512((__v16si)__W);
+  return __builtin_reduce_add((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) {
   __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(1), __M, __W);
-  return __builtin_ia32_reduce_mul_d512((__v16si)__W);
+  return __builtin_reduce_mul((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __W);
-  return __builtin_ia32_reduce_and_d512((__v16si)__W);
+  __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(-1), __M, __W);
+  return __builtin_reduce_and((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
   __W = _mm512_maskz_mov_epi32(__M, __W);
-  return __builtin_ia32_reduce_or_d512((__v16si)__W);
+  return __builtin_reduce_or((__v16si)__W);
 }
 
 static __inline__ float __DEFAULT_FN_ATTRS512
@@ -9442,89 +9449,89 @@
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epi64(__m512i __V) {
-  return __builtin_ia32_reduce_smax_q512(__V);
+  return __builtin_reduce_max((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epu64(__m512i __V) {
-  return __builtin_ia32_reduce_umax_q512(__V);
+  return __builtin_reduce_max((__v8du)__V);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epi64(__m512i __V) {
-  return __builtin_ia32_reduce_smin_q512(__V);
+  return __builtin_reduce_min((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epu64(__m512i __V) {
-  return __builtin_ia32_reduce_umin_q512(__V);
+  return __builtin_reduce_min((__v8du)__V);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1LL), __M, __V);
-  return __builtin_ia32_reduce_smax_q512(__V);
+  return __builtin_reduce_max((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) {
   __V = _mm512_maskz_mov_epi64(__M, __V);
-  return __builtin_ia32_reduce_umax_q512(__V);
+  return __builtin_reduce_max((__v8du)__V);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __M, __V);
-  return __builtin_ia32_reduce_smin_q512(__V);
+  return __builtin_reduce_min((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __V);
-  return __builtin_ia32_reduce_umin_q512(__V);
+  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-1LL), __M, __V);
+  return __builtin_reduce_min((__v8du)__V);
 }
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epi32(__m512i __V) {
-  return __builtin_ia32_reduce_smax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epu32(__m512i __V) {
-  return __builtin_ia32_reduce_umax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16su)__V);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epi32(__m512i __V) {
-  return __builtin_ia32_reduce_smin_d512((__v16si)__V);
+  return __builtin_reduce_min((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epu32(__m512i __V) {
-  return __builtin_ia32_reduce_umin_d512((__v16si)__V);
+  return __builtin_reduce_min((__v16su)__V);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __M, __V);
-  return __builtin_ia32_reduce_smax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) {
   __V = _mm512_maskz_mov_epi32(__M, __V);
-  return __builtin_ia32_reduce_umax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16su)__V);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __M, __V);
-  return __builtin_ia32_reduce_smin_d512((__v16si)__V);
+  return __builtin_reduce_min((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __V);
-  return __builtin_ia32_reduce_umin_d512((__v16si)__V);
+  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-1), __M, __V);
+  return __builtin_reduce_min((__v16su)__V);
 }
 
 static __inline__ double __DEFAULT_FN_ATTRS512
@@ -9594,7 +9601,7 @@
 ///
 /// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
@@ -9602,7 +9609,7 @@
 ///   dst[i+63:i] := MEM[addr+63:addr]
 /// ENDFOR
 /// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
 #define _mm512_i32logather_pd(vindex, base_addr, scale)                        \
   _mm512_i32gather_pd(_mm512_castsi512_si256(vindex), (base_addr), (scale))
 
@@ -9614,7 +9621,7 @@
 ///
 /// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
@@ -9626,7 +9633,7 @@
 ///   FI
 /// ENDFOR
 /// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
 #define _mm512_mask_i32logather_pd(src, mask, vindex, base_addr, scale)        \
   _mm512_mask_i32gather_pd((src), (mask), _mm512_castsi512_si256(vindex),      \
                            (base_addr), (scale))
@@ -9637,7 +9644,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
@@ -9645,7 +9652,7 @@
 ///   dst[i+63:i] := MEM[addr+63:addr]
 /// ENDFOR
 /// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
 #define _mm512_i32logather_epi64(vindex, base_addr, scale)                     \
   _mm512_i32gather_epi64(_mm512_castsi512_si256(vindex), (base_addr), (scale))
 
@@ -9656,7 +9663,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
@@ -9668,7 +9675,7 @@
 ///   FI
 /// ENDFOR
 /// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
 #define _mm512_mask_i32logather_epi64(src, mask, vindex, base_addr, scale)     \
   _mm512_mask_i32gather_epi64((src), (mask), _mm512_castsi512_si256(vindex),   \
                               (base_addr), (scale))
@@ -9679,14 +9686,14 @@
 ///
 /// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
 ///   addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
 ///   MEM[addr+63:addr] := v1[i+63:i]
 /// ENDFOR
-/// \endoperation
+/// \endcode
 #define _mm512_i32loscatter_pd(base_addr, vindex, v1, scale)                   \
   _mm512_i32scatter_pd((base_addr), _mm512_castsi512_si256(vindex), (v1), (scale))
 
@@ -9698,7 +9705,7 @@
 ///
 /// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
@@ -9707,7 +9714,7 @@
 ///     MEM[addr+63:addr] := a[i+63:i]
 ///   FI
 /// ENDFOR
-/// \endoperation
+/// \endcode
 #define _mm512_mask_i32loscatter_pd(base_addr, mask, vindex, v1, scale)        \
   _mm512_mask_i32scatter_pd((base_addr), (mask),                               \
                             _mm512_castsi512_si256(vindex), (v1), (scale))
@@ -9718,14 +9725,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
 ///   addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
 ///   MEM[addr+63:addr] := a[i+63:i]
 /// ENDFOR
-/// \endoperation
+/// \endcode
 #define _mm512_i32loscatter_epi64(base_addr, vindex, v1, scale)                \
   _mm512_i32scatter_epi64((base_addr),                                         \
                           _mm512_castsi512_si256(vindex), (v1), (scale))
@@ -9737,7 +9744,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// FOR j := 0 to 7
 ///   i := j*64
 ///   m := j*32
@@ -9746,7 +9753,7 @@
 ///     MEM[addr+63:addr] := a[i+63:i]
 ///   FI
 /// ENDFOR
-/// \endoperation
+/// \endcode
 #define _mm512_mask_i32loscatter_epi64(base_addr, mask, vindex, v1, scale)     \
   _mm512_mask_i32scatter_epi64((base_addr), (mask),                            \
                                _mm512_castsi512_si256(vindex), (v1), (scale))
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512fp16intrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512fp16intrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/avx512fp16intrin.h
copy to linux-x86/lib64/clang/16.0.2/include/avx512fp16intrin.h
index 99409a3..7693857 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512fp16intrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/avx512fp16intrin.h
@@ -10,6 +10,8 @@
 #error "Never use <avx512fp16intrin.h> directly; include <immintrin.h> instead."
 #endif
 
+#ifdef __SSE2__
+
 #ifndef __AVX512FP16INTRIN_H
 #define __AVX512FP16INTRIN_H
 
@@ -829,7 +831,7 @@
   struct __mm_load_sh_struct {
     _Float16 __u;
   } __attribute__((__packed__, __may_alias__));
-  _Float16 __u = ((struct __mm_load_sh_struct *)__dp)->__u;
+  _Float16 __u = ((const struct __mm_load_sh_struct *)__dp)->__u;
   return (__m128h){__u, 0, 0, 0, 0, 0, 0, 0};
 }
 
@@ -838,13 +840,13 @@
   __m128h src = (__v8hf)__builtin_shufflevector(
       (__v8hf)__W, (__v8hf)_mm_setzero_ph(), 0, 8, 8, 8, 8, 8, 8, 8);
 
-  return (__m128h)__builtin_ia32_loadsh128_mask((__v8hf *)__A, src, __U & 1);
+  return (__m128h)__builtin_ia32_loadsh128_mask((const __v8hf *)__A, src, __U & 1);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_load_sh(__mmask8 __U, const void *__A) {
   return (__m128h)__builtin_ia32_loadsh128_mask(
-      (__v8hf *)__A, (__v8hf)_mm_setzero_ph(), __U & 1);
+      (const __v8hf *)__A, (__v8hf)_mm_setzero_ph(), __U & 1);
 }
 
 static __inline__ __m512h __DEFAULT_FN_ATTRS512
@@ -3347,3 +3349,4 @@
 #undef __DEFAULT_FN_ATTRS512
 
 #endif
+#endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512ifmaintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512ifmaintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512ifmaintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512ifmaintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512ifmavlintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512ifmavlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512ifmavlintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512ifmavlintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512pfintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512pfintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512pfintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512pfintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vbmi2intrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vbmi2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vbmi2intrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512vbmi2intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vbmiintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vbmiintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vbmiintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512vbmiintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vbmivlintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vbmivlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vbmivlintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512vbmivlintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlbf16intrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vlbf16intrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/avx512vlbf16intrin.h
copy to linux-x86/lib64/clang/16.0.2/include/avx512vlbf16intrin.h
index 6a5a860..1cdbb28 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512vlbf16intrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/avx512vlbf16intrin.h
@@ -417,7 +417,7 @@
   __v4sf __V = {__A, 0, 0, 0};
   __v8hi __R = __builtin_ia32_cvtneps2bf16_128_mask(
       (__v4sf)__V, (__v8hi)_mm_undefined_si128(), (__mmask8)-1);
-  return __R[0];
+  return (__bfloat16)__R[0];
 }
 
 /// Convert Packed BF16 Data to Packed float Data.
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlbitalgintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vlbitalgintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vlbitalgintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512vlbitalgintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlbwintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vlbwintrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/avx512vlbwintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/avx512vlbwintrin.h
index 7873516..521ccab 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512vlbwintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/avx512vlbwintrin.h
@@ -1942,7 +1942,7 @@
 _mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_slli_epi16(__A, __B),
+                                             (__v8hi)_mm_slli_epi16(__A, (int)__B),
                                              (__v8hi)__W);
 }
 
@@ -1950,7 +1950,7 @@
 _mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_slli_epi16(__A, __B),
+                                             (__v8hi)_mm_slli_epi16(__A, (int)__B),
                                              (__v8hi)_mm_setzero_si128());
 }
 
@@ -1959,7 +1959,7 @@
                        unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_slli_epi16(__A, __B),
+                                         (__v16hi)_mm256_slli_epi16(__A, (int)__B),
                                          (__v16hi)__W);
 }
 
@@ -1967,7 +1967,7 @@
 _mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_slli_epi16(__A, __B),
+                                         (__v16hi)_mm256_slli_epi16(__A, (int)__B),
                                          (__v16hi)_mm256_setzero_si256());
 }
 
@@ -2095,7 +2095,7 @@
 _mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_srai_epi16(__A, __B),
+                                             (__v8hi)_mm_srai_epi16(__A, (int)__B),
                                              (__v8hi)__W);
 }
 
@@ -2103,7 +2103,7 @@
 _mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
-                                             (__v8hi)_mm_srai_epi16(__A, __B),
+                                             (__v8hi)_mm_srai_epi16(__A, (int)__B),
                                              (__v8hi)_mm_setzero_si128());
 }
 
@@ -2112,7 +2112,7 @@
                        unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_srai_epi16(__A, __B),
+                                         (__v16hi)_mm256_srai_epi16(__A, (int)__B),
                                          (__v16hi)__W);
 }
 
@@ -2120,7 +2120,7 @@
 _mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
-                                         (__v16hi)_mm256_srai_epi16(__A, __B),
+                                         (__v16hi)_mm256_srai_epi16(__A, (int)__B),
                                          (__v16hi)_mm256_setzero_si256());
 }
 
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlcdintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vlcdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vlcdintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512vlcdintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vldqintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vldqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vldqintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512vldqintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlfp16intrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vlfp16intrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/avx512vlfp16intrin.h
copy to linux-x86/lib64/clang/16.0.2/include/avx512vlfp16intrin.h
index 3d27853..d4a7d1b 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512vlfp16intrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/avx512vlfp16intrin.h
@@ -11,6 +11,8 @@
     "Never use <avx512vlfp16intrin.h> directly; include <immintrin.h> instead."
 #endif
 
+#ifdef __SSE2__
+
 #ifndef __AVX512VLFP16INTRIN_H
 #define __AVX512VLFP16INTRIN_H
 
@@ -2066,3 +2068,4 @@
 #undef __DEFAULT_FN_ATTRS256
 
 #endif
+#endif
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vlintrin.h
similarity index 98%
copy from darwin-x86/lib64/clang/14.0.2/include/avx512vlintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/avx512vlintrin.h
index 0519dba..3e8355f 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512vlintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/avx512vlintrin.h
@@ -2988,7 +2988,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_abs_epi64 (__m128i __A) {
-  return (__m128i)__builtin_ia32_pabsq128((__v2di)__A);
+  return (__m128i)__builtin_elementwise_abs((__v2di)__A);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3007,7 +3007,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi64 (__m256i __A) {
-  return (__m256i)__builtin_ia32_pabsq256 ((__v4di)__A);
+  return (__m256i)__builtin_elementwise_abs((__v4di)__A);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3054,7 +3054,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_max_epi64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pmaxsq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_max((__v2di)__A, (__v2di)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3073,7 +3073,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pmaxsq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_max((__v4di)__A, (__v4di)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3120,7 +3120,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_max_epu64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pmaxuq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_max((__v2du)__A, (__v2du)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3139,7 +3139,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pmaxuq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_max((__v4du)__A, (__v4du)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3186,7 +3186,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_min_epi64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pminsq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_min((__v2di)__A, (__v2di)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3205,7 +3205,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pminsq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_min((__v4di)__A, (__v4di)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3252,7 +3252,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_min_epu64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pminuq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_min((__v2du)__A, (__v2du)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3271,7 +3271,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pminuq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_min((__v4du)__A, (__v4du)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -4525,7 +4525,7 @@
 _mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_slli_epi32(__A, __B),
+                                             (__v4si)_mm_slli_epi32(__A, (int)__B),
                                              (__v4si)__W);
 }
 
@@ -4533,7 +4533,7 @@
 _mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_slli_epi32(__A, __B),
+                                             (__v4si)_mm_slli_epi32(__A, (int)__B),
                                              (__v4si)_mm_setzero_si128());
 }
 
@@ -4541,7 +4541,7 @@
 _mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_slli_epi32(__A, __B),
+                                             (__v8si)_mm256_slli_epi32(__A, (int)__B),
                                              (__v8si)__W);
 }
 
@@ -4549,7 +4549,7 @@
 _mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_slli_epi32(__A, __B),
+                                             (__v8si)_mm256_slli_epi32(__A, (int)__B),
                                              (__v8si)_mm256_setzero_si256());
 }
 
@@ -4589,7 +4589,7 @@
 _mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_slli_epi64(__A, __B),
+                                             (__v2di)_mm_slli_epi64(__A, (int)__B),
                                              (__v2di)__W);
 }
 
@@ -4597,7 +4597,7 @@
 _mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_slli_epi64(__A, __B),
+                                             (__v2di)_mm_slli_epi64(__A, (int)__B),
                                              (__v2di)_mm_setzero_si128());
 }
 
@@ -4605,7 +4605,7 @@
 _mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_slli_epi64(__A, __B),
+                                             (__v4di)_mm256_slli_epi64(__A, (int)__B),
                                              (__v4di)__W);
 }
 
@@ -4613,7 +4613,7 @@
 _mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_slli_epi64(__A, __B),
+                                             (__v4di)_mm256_slli_epi64(__A, (int)__B),
                                              (__v4di)_mm256_setzero_si256());
 }
 
@@ -4869,7 +4869,7 @@
 _mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srli_epi32(__A, __B),
+                                             (__v4si)_mm_srli_epi32(__A, (int)__B),
                                              (__v4si)__W);
 }
 
@@ -4877,7 +4877,7 @@
 _mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srli_epi32(__A, __B),
+                                             (__v4si)_mm_srli_epi32(__A, (int)__B),
                                              (__v4si)_mm_setzero_si128());
 }
 
@@ -4885,7 +4885,7 @@
 _mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srli_epi32(__A, __B),
+                                             (__v8si)_mm256_srli_epi32(__A, (int)__B),
                                              (__v8si)__W);
 }
 
@@ -4893,7 +4893,7 @@
 _mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srli_epi32(__A, __B),
+                                             (__v8si)_mm256_srli_epi32(__A, (int)__B),
                                              (__v8si)_mm256_setzero_si256());
 }
 
@@ -4933,7 +4933,7 @@
 _mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srli_epi64(__A, __B),
+                                             (__v2di)_mm_srli_epi64(__A, (int)__B),
                                              (__v2di)__W);
 }
 
@@ -4941,7 +4941,7 @@
 _mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srli_epi64(__A, __B),
+                                             (__v2di)_mm_srli_epi64(__A, (int)__B),
                                              (__v2di)_mm_setzero_si128());
 }
 
@@ -4949,7 +4949,7 @@
 _mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srli_epi64(__A, __B),
+                                             (__v4di)_mm256_srli_epi64(__A, (int)__B),
                                              (__v4di)__W);
 }
 
@@ -4957,7 +4957,7 @@
 _mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srli_epi64(__A, __B),
+                                             (__v4di)_mm256_srli_epi64(__A, (int)__B),
                                              (__v4di)_mm256_setzero_si256());
 }
 
@@ -6408,7 +6408,7 @@
 _mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srai_epi32(__A, __B),
+                                             (__v4si)_mm_srai_epi32(__A, (int)__B),
                                              (__v4si)__W);
 }
 
@@ -6416,7 +6416,7 @@
 _mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srai_epi32(__A, __B),
+                                             (__v4si)_mm_srai_epi32(__A, (int)__B),
                                              (__v4si)_mm_setzero_si128());
 }
 
@@ -6424,7 +6424,7 @@
 _mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srai_epi32(__A, __B),
+                                             (__v8si)_mm256_srai_epi32(__A, (int)__B),
                                              (__v8si)__W);
 }
 
@@ -6432,7 +6432,7 @@
 _mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srai_epi32(__A, __B),
+                                             (__v8si)_mm256_srai_epi32(__A, (int)__B),
                                              (__v8si)_mm256_setzero_si256());
 }
 
@@ -6483,7 +6483,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_srai_epi64(__m128i __A, unsigned int __imm)
 {
-  return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm);
+  return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, (int)__imm);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -6505,7 +6505,7 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_srai_epi64(__m256i __A, unsigned int __imm)
 {
-  return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm);
+  return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, (int)__imm);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -6525,79 +6525,65 @@
                                         (__v4di)_mm256_setzero_si256());
 }
 
-#define _mm_ternarylogic_epi32(A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
-                                             (__v4si)(__m128i)(B), \
-                                             (__v4si)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)-1))
+#define _mm_ternarylogic_epi32(A, B, C, imm)                                   \
+  ((__m128i)__builtin_ia32_pternlogd128_mask(                                  \
+      (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C),        \
+      (unsigned char)(imm), (__mmask8)-1))
 
-#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
-                                             (__v4si)(__m128i)(B), \
-                                             (__v4si)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
+#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm)                           \
+  ((__m128i)__builtin_ia32_pternlogd128_mask(                                  \
+      (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
-#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogd128_maskz((__v4si)(__m128i)(A), \
-                                              (__v4si)(__m128i)(B), \
-                                              (__v4si)(__m128i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
+#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm)                          \
+  ((__m128i)__builtin_ia32_pternlogd128_maskz(                                 \
+      (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
-#define _mm256_ternarylogic_epi32(A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
-                                             (__v8si)(__m256i)(B), \
-                                             (__v8si)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)-1))
+#define _mm256_ternarylogic_epi32(A, B, C, imm)                                \
+  ((__m256i)__builtin_ia32_pternlogd256_mask(                                  \
+      (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C),        \
+      (unsigned char)(imm), (__mmask8)-1))
 
-#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
-                                             (__v8si)(__m256i)(B), \
-                                             (__v8si)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
+#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm)                        \
+  ((__m256i)__builtin_ia32_pternlogd256_mask(                                  \
+      (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
-#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogd256_maskz((__v8si)(__m256i)(A), \
-                                              (__v8si)(__m256i)(B), \
-                                              (__v8si)(__m256i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
+#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm)                       \
+  ((__m256i)__builtin_ia32_pternlogd256_maskz(                                 \
+      (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
-#define _mm_ternarylogic_epi64(A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
-                                             (__v2di)(__m128i)(B), \
-                                             (__v2di)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)-1))
+#define _mm_ternarylogic_epi64(A, B, C, imm)                                   \
+  ((__m128i)__builtin_ia32_pternlogq128_mask(                                  \
+      (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C),        \
+      (unsigned char)(imm), (__mmask8)-1))
 
-#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
-                                             (__v2di)(__m128i)(B), \
-                                             (__v2di)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
+#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm)                           \
+  ((__m128i)__builtin_ia32_pternlogq128_mask(                                  \
+      (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
-#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogq128_maskz((__v2di)(__m128i)(A), \
-                                              (__v2di)(__m128i)(B), \
-                                              (__v2di)(__m128i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
+#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm)                          \
+  ((__m128i)__builtin_ia32_pternlogq128_maskz(                                 \
+      (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
-#define _mm256_ternarylogic_epi64(A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
-                                             (__v4di)(__m256i)(B), \
-                                             (__v4di)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)-1))
+#define _mm256_ternarylogic_epi64(A, B, C, imm)                                \
+  ((__m256i)__builtin_ia32_pternlogq256_mask(                                  \
+      (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C),        \
+      (unsigned char)(imm), (__mmask8)-1))
 
-#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
-                                             (__v4di)(__m256i)(B), \
-                                             (__v4di)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
+#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm)                        \
+  ((__m256i)__builtin_ia32_pternlogq256_mask(                                  \
+      (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
-#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogq256_maskz((__v4di)(__m256i)(A), \
-                                              (__v4di)(__m256i)(B), \
-                                              (__v4di)(__m256i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-
+#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm)                       \
+  ((__m256i)__builtin_ia32_pternlogq256_maskz(                                 \
+      (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C),        \
+      (unsigned char)(imm), (__mmask8)(U)))
 
 #define _mm256_shuffle_f32x4(A, B, imm) \
   ((__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlvbmi2intrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vlvbmi2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vlvbmi2intrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512vlvbmi2intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlvnniintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vlvnniintrin.h
similarity index 97%
copy from darwin-x86/lib64/clang/14.0.2/include/avx512vlvnniintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/avx512vlvnniintrin.h
index 0fb29af..8bc0694 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512vlvnniintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/avx512vlvnniintrin.h
@@ -25,7 +25,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -34,7 +34,7 @@
 ///      DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 #define _mm256_dpbusd_epi32(S, A, B) \
   ((__m256i)__builtin_ia32_vpdpbusd256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
 
@@ -45,7 +45,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -54,7 +54,7 @@
 ///      DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 #define _mm256_dpbusds_epi32(S, A, B) \
   ((__m256i)__builtin_ia32_vpdpbusds256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
 
@@ -65,14 +65,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
 ///      tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
 ///      DST.dword[j] := S.dword[j] + tmp1 + tmp2
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 #define _mm256_dpwssd_epi32(S, A, B) \
   ((__m256i)__builtin_ia32_vpdpwssd256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
 
@@ -83,14 +83,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
 ///      tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
 ///      DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2)
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 #define _mm256_dpwssds_epi32(S, A, B) \
   ((__m256i)__builtin_ia32_vpdpwssds256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
 
@@ -101,7 +101,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -110,7 +110,7 @@
 ///      DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 #define _mm_dpbusd_epi32(S, A, B) \
   ((__m128i)__builtin_ia32_vpdpbusd128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
 
@@ -121,7 +121,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -130,7 +130,7 @@
 ///      DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 #define _mm_dpbusds_epi32(S, A, B) \
   ((__m128i)__builtin_ia32_vpdpbusds128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
 
@@ -141,14 +141,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
 ///      tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
 ///      DST.dword[j] := S.dword[j] + tmp1 + tmp2
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 #define _mm_dpwssd_epi32(S, A, B) \
   ((__m128i)__builtin_ia32_vpdpwssd128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
 
@@ -159,14 +159,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
 ///      tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
 ///      DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2)
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 #define _mm_dpwssds_epi32(S, A, B) \
   ((__m128i)__builtin_ia32_vpdpwssds128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
 
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlvp2intersectintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vlvp2intersectintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vlvp2intersectintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512vlvp2intersectintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vnniintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vnniintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vnniintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512vnniintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vp2intersectintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vp2intersectintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vp2intersectintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512vp2intersectintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vpopcntdqintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vpopcntdqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vpopcntdqintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512vpopcntdqintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vpopcntdqvlintrin.h b/linux-x86/lib64/clang/16.0.2/include/avx512vpopcntdqvlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vpopcntdqvlintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avx512vpopcntdqvlintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avxintrin.h b/linux-x86/lib64/clang/16.0.2/include/avxintrin.h
similarity index 98%
copy from darwin-x86/lib64/clang/14.0.2/include/avxintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/avxintrin.h
index 17fe636..a8f953c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avxintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/avxintrin.h
@@ -1504,7 +1504,10 @@
 ///    00: Bits [31:0] and [159:128] are copied from the selected operand. \n
 ///    01: Bits [63:32] and [191:160] are copied from the selected operand. \n
 ///    10: Bits [95:64] and [223:192] are copied from the selected operand. \n
-///    11: Bits [127:96] and [255:224] are copied from the selected operand.
+///    11: Bits [127:96] and [255:224] are copied from the selected operand. \n
+///    Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+///    <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+///    <c>[b6, b4, b2, b0]</c>.
 /// \returns A 256-bit vector of [8 x float] containing the shuffled values.
 #define _mm256_shuffle_ps(a, b, mask) \
   ((__m256)__builtin_ia32_shufps256((__v8sf)(__m256)(a), \
@@ -1953,12 +1956,16 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// int _mm256_extract_epi32(__m256i X, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A 256-bit vector of [8 x i32].
-/// \param __imm
+/// \param N
 ///    An immediate integer operand with bits [2:0] determining which vector
 ///    element is extracted and returned.
 /// \returns A 32-bit integer containing the extracted 32 bits of extended
@@ -1971,12 +1978,16 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// int _mm256_extract_epi16(__m256i X, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A 256-bit integer vector of [16 x i16].
-/// \param __imm
+/// \param N
 ///    An immediate integer operand with bits [3:0] determining which vector
 ///    element is extracted and returned.
 /// \returns A 32-bit integer containing the extracted 16 bits of zero extended
@@ -1990,12 +2001,16 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// int _mm256_extract_epi8(__m256i X, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A 256-bit integer vector of [32 x i8].
-/// \param __imm
+/// \param N
 ///    An immediate integer operand with bits [4:0] determining which vector
 ///    element is extracted and returned.
 /// \returns A 32-bit integer containing the extracted 8 bits of zero extended
@@ -2010,12 +2025,16 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// long long _mm256_extract_epi64(__m256i X, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A 256-bit integer vector of [4 x i64].
-/// \param __imm
+/// \param N
 ///    An immediate integer operand with bits [1:0] determining which vector
 ///    element is extracted and returned.
 /// \returns A 64-bit integer containing the extracted 64 bits of extended
@@ -2030,18 +2049,22 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// __m256i _mm256_insert_epi32(__m256i X, int I, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A vector of [8 x i32] to be used by the insert operation.
-/// \param __b
+/// \param I
 ///    An integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
 ///    An immediate integer specifying the index of the vector element to be
 ///    replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-///    \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+///    \a N with \a I.
 #define _mm256_insert_epi32(X, I, N) \
   ((__m256i)__builtin_ia32_vec_set_v8si((__v8si)(__m256i)(X), \
                                         (int)(I), (int)(N)))
@@ -2053,18 +2076,22 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// __m256i _mm256_insert_epi16(__m256i X, int I, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A vector of [16 x i16] to be used by the insert operation.
-/// \param __b
+/// \param I
 ///    An i16 integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
 ///    An immediate integer specifying the index of the vector element to be
 ///    replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-///    \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+///    \a N with \a I.
 #define _mm256_insert_epi16(X, I, N) \
   ((__m256i)__builtin_ia32_vec_set_v16hi((__v16hi)(__m256i)(X), \
                                          (int)(I), (int)(N)))
@@ -2075,18 +2102,22 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// __m256i _mm256_insert_epi8(__m256i X, int I, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A vector of [32 x i8] to be used by the insert operation.
-/// \param __b
+/// \param I
 ///    An i8 integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
 ///    An immediate integer specifying the index of the vector element to be
 ///    replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-///    \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+///    \a N with \a I.
 #define _mm256_insert_epi8(X, I, N) \
   ((__m256i)__builtin_ia32_vec_set_v32qi((__v32qi)(__m256i)(X), \
                                          (int)(I), (int)(N)))
@@ -2098,18 +2129,22 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// __m256i _mm256_insert_epi64(__m256i X, int I, const int N);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
 ///   instruction.
 ///
-/// \param __a
+/// \param X
 ///    A vector of [4 x i64] to be used by the insert operation.
-/// \param __b
+/// \param I
 ///    A 64-bit integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
 ///    An immediate integer specifying the index of the vector element to be
 ///    replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-///     \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+///     \a N with \a I.
 #define _mm256_insert_epi64(X, I, N) \
   ((__m256i)__builtin_ia32_vec_set_v4di((__v4di)(__m256i)(X), \
                                         (long long)(I), (int)(N)))
@@ -3177,7 +3212,7 @@
 ///    A pointer to a 256-bit integer vector containing integer values.
 /// \returns A 256-bit integer vector containing the moved values.
 static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_lddqu_si256(__m256i const *__p)
+_mm256_lddqu_si256(__m256i_u const *__p)
 {
   return (__m256i)__builtin_ia32_lddqu256((char const *)__p);
 }
diff --git a/linux-x86/lib64/clang/14.0.2/include/avxvnniintrin.h b/linux-x86/lib64/clang/16.0.2/include/avxvnniintrin.h
similarity index 97%
rename from linux-x86/lib64/clang/14.0.2/include/avxvnniintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/avxvnniintrin.h
index ad45cb7..b7de562 100644
--- a/linux-x86/lib64/clang/14.0.2/include/avxvnniintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/avxvnniintrin.h
@@ -50,7 +50,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -59,7 +59,7 @@
 ///      DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_dpbusd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
 {
@@ -73,7 +73,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -82,7 +82,7 @@
 ///      DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_dpbusds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
 {
@@ -96,14 +96,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
 ///      tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
 ///      DST.dword[j] := __S.dword[j] + tmp1 + tmp2
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_dpwssd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
 {
@@ -117,14 +117,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 7
 ///      tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
 ///      tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
 ///      DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2)
 ///    ENDFOR
 ///    DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_dpwssds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
 {
@@ -138,7 +138,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -147,7 +147,7 @@
 ///      DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_dpbusd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
 {
@@ -161,7 +161,7 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
 ///      tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -170,7 +170,7 @@
 ///      DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_dpbusds_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
 {
@@ -184,14 +184,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
 ///      tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
 ///      DST.dword[j] := __S.dword[j] + tmp1 + tmp2
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_dpwssd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
 {
@@ -205,14 +205,14 @@
 ///
 /// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 ///    FOR j := 0 to 3
 ///      tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
 ///      tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
 ///      DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2)
 ///    ENDFOR
 ///    DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_dpwssds_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
 {
diff --git a/linux-x86/lib64/clang/14.0.2/include/bits/stdatomic.h b/linux-x86/lib64/clang/16.0.2/include/bits/stdatomic.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/bits/stdatomic.h
rename to linux-x86/lib64/clang/16.0.2/include/bits/stdatomic.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/bmi2intrin.h b/linux-x86/lib64/clang/16.0.2/include/bmi2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/bmi2intrin.h
rename to linux-x86/lib64/clang/16.0.2/include/bmi2intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/bmiintrin.h b/linux-x86/lib64/clang/16.0.2/include/bmiintrin.h
similarity index 98%
copy from darwin-x86/lib64/clang/14.0.2/include/bmiintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/bmiintrin.h
index f583c21..ffb94be 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/bmiintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/bmiintrin.h
@@ -47,6 +47,7 @@
 ///    An unsigned 32-bit integer whose trailing zeros are to be counted.
 /// \returns An unsigned 32-bit integer containing the number of trailing zero
 ///    bits in the operand.
+/// \see _mm_tzcnt_32
 static __inline__ unsigned int __RELAXED_FN_ATTRS
 __tzcnt_u32(unsigned int __X)
 {
@@ -63,10 +64,11 @@
 ///    An unsigned 32-bit integer whose trailing zeros are to be counted.
 /// \returns An 32-bit integer containing the number of trailing zero bits in
 ///    the operand.
+/// \see __tzcnt_u32
 static __inline__ int __RELAXED_FN_ATTRS
 _mm_tzcnt_32(unsigned int __X)
 {
-  return __builtin_ia32_tzcnt_u32(__X);
+  return (int)__builtin_ia32_tzcnt_u32(__X);
 }
 
 #define _tzcnt_u32(a)     (__tzcnt_u32((a)))
@@ -83,6 +85,7 @@
 ///    An unsigned 64-bit integer whose trailing zeros are to be counted.
 /// \returns An unsigned 64-bit integer containing the number of trailing zero
 ///    bits in the operand.
+/// \see _mm_tzcnt_64
 static __inline__ unsigned long long __RELAXED_FN_ATTRS
 __tzcnt_u64(unsigned long long __X)
 {
@@ -99,10 +102,11 @@
 ///    An unsigned 64-bit integer whose trailing zeros are to be counted.
 /// \returns An 64-bit integer containing the number of trailing zero bits in
 ///    the operand.
+/// \see __tzcnt_u64
 static __inline__ long long __RELAXED_FN_ATTRS
 _mm_tzcnt_64(unsigned long long __X)
 {
-  return __builtin_ia32_tzcnt_u64(__X);
+  return (long long)__builtin_ia32_tzcnt_u64(__X);
 }
 
 #define _tzcnt_u64(a)     (__tzcnt_u64((a)))
diff --git a/linux-x86/lib64/clang/14.0.2/include/builtins.h b/linux-x86/lib64/clang/16.0.2/include/builtins.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/builtins.h
rename to linux-x86/lib64/clang/16.0.2/include/builtins.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/cet.h b/linux-x86/lib64/clang/16.0.2/include/cet.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/cet.h
rename to linux-x86/lib64/clang/16.0.2/include/cet.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cetintrin.h b/linux-x86/lib64/clang/16.0.2/include/cetintrin.h
similarity index 80%
copy from darwin-x86/lib64/clang/14.0.2/include/cetintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/cetintrin.h
index 4290e9d..a68df5b 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/cetintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/cetintrin.h
@@ -19,7 +19,7 @@
   __attribute__((__always_inline__, __nodebug__, __target__("shstk")))
 
 static __inline__ void __DEFAULT_FN_ATTRS _incsspd(int __a) {
-  __builtin_ia32_incsspd(__a);
+  __builtin_ia32_incsspd((unsigned int)__a);
 }
 
 #ifdef __x86_64__
@@ -34,7 +34,7 @@
 }
 #else /* __x86_64__ */
 static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) {
-  __builtin_ia32_incsspd((int)__a);
+  __builtin_ia32_incsspd(__a);
 }
 #endif /* __x86_64__ */
 
@@ -42,10 +42,26 @@
   return __builtin_ia32_rdsspd(__a);
 }
 
+static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd_i32(void) {
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wuninitialized"
+  unsigned int t;
+  return __builtin_ia32_rdsspd(t);
+#pragma clang diagnostic pop
+}
+
 #ifdef __x86_64__
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq(unsigned long long __a) {
   return __builtin_ia32_rdsspq(__a);
 }
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq_i64(void) {
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wuninitialized"
+  unsigned long long t;
+  return __builtin_ia32_rdsspq(t);
+#pragma clang diagnostic pop
+}
 #endif /* __x86_64__ */
 
 #ifdef __x86_64__
@@ -58,7 +74,7 @@
 }
 #endif /* __x86_64__ */
 
-static __inline__ void __DEFAULT_FN_ATTRS _saveprevssp() {
+static __inline__ void __DEFAULT_FN_ATTRS _saveprevssp(void) {
   __builtin_ia32_saveprevssp();
 }
 
@@ -86,7 +102,7 @@
 }
 #endif /* __x86_64__ */
 
-static __inline__ void __DEFAULT_FN_ATTRS _setssbsy() {
+static __inline__ void __DEFAULT_FN_ATTRS _setssbsy(void) {
   __builtin_ia32_setssbsy();
 }
 
diff --git a/linux-x86/lib64/clang/14.0.2/include/cldemoteintrin.h b/linux-x86/lib64/clang/16.0.2/include/cldemoteintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/cldemoteintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/cldemoteintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/clflushoptintrin.h b/linux-x86/lib64/clang/16.0.2/include/clflushoptintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/clflushoptintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/clflushoptintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/clwbintrin.h b/linux-x86/lib64/clang/16.0.2/include/clwbintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/clwbintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/clwbintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/clzerointrin.h b/linux-x86/lib64/clang/16.0.2/include/clzerointrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/clzerointrin.h
rename to linux-x86/lib64/clang/16.0.2/include/clzerointrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/cpuid.h b/linux-x86/lib64/clang/16.0.2/include/cpuid.h
similarity index 99%
rename from linux-x86/lib64/clang/14.0.2/include/cpuid.h
rename to linux-x86/lib64/clang/16.0.2/include/cpuid.h
index 6df1b4a..caa0069 100644
--- a/linux-x86/lib64/clang/14.0.2/include/cpuid.h
+++ b/linux-x86/lib64/clang/16.0.2/include/cpuid.h
@@ -200,7 +200,7 @@
 #define bit_AMXINT8       0x02000000
 
 /* Features in %eax for leaf 7 sub-leaf 1 */
-#define bit_AVXVNNI       0x00000008
+#define bit_AVXVNNI       0x00000010
 #define bit_AVX512BF16    0x00000020
 #define bit_HRESET        0x00400000
 
@@ -232,6 +232,7 @@
 
 /* Features in %ebx for leaf 0x80000008 */
 #define bit_CLZERO      0x00000001
+#define bit_RDPRU       0x00000010
 #define bit_WBNOINVD    0x00000200
 
 
diff --git a/linux-x86/lib64/clang/14.0.2/include/crc32intrin.h b/linux-x86/lib64/clang/16.0.2/include/crc32intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/crc32intrin.h
rename to linux-x86/lib64/clang/16.0.2/include/crc32intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/cuda_wrappers/algorithm b/linux-x86/lib64/clang/16.0.2/include/cuda_wrappers/algorithm
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/cuda_wrappers/algorithm
rename to linux-x86/lib64/clang/16.0.2/include/cuda_wrappers/algorithm
diff --git a/linux-x86/lib64/clang/14.0.2/include/cuda_wrappers/complex b/linux-x86/lib64/clang/16.0.2/include/cuda_wrappers/complex
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/cuda_wrappers/complex
rename to linux-x86/lib64/clang/16.0.2/include/cuda_wrappers/complex
diff --git a/linux-x86/lib64/clang/14.0.2/include/cuda_wrappers/new b/linux-x86/lib64/clang/16.0.2/include/cuda_wrappers/new
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/cuda_wrappers/new
rename to linux-x86/lib64/clang/16.0.2/include/cuda_wrappers/new
diff --git a/darwin-x86/lib64/clang/14.0.2/include/emmintrin.h b/linux-x86/lib64/clang/16.0.2/include/emmintrin.h
similarity index 81%
copy from darwin-x86/lib64/clang/14.0.2/include/emmintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/emmintrin.h
index 6e9c303..a3f56e8 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/emmintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/emmintrin.h
@@ -20,16 +20,17 @@
 typedef long long __m128i __attribute__((__vector_size__(16), __aligned__(16)));
 
 typedef double __m128d_u __attribute__((__vector_size__(16), __aligned__(1)));
-typedef long long __m128i_u __attribute__((__vector_size__(16), __aligned__(1)));
+typedef long long __m128i_u
+    __attribute__((__vector_size__(16), __aligned__(1)));
 
 /* Type defines.  */
-typedef double __v2df __attribute__ ((__vector_size__ (16)));
-typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+typedef double __v2df __attribute__((__vector_size__(16)));
+typedef long long __v2di __attribute__((__vector_size__(16)));
 typedef short __v8hi __attribute__((__vector_size__(16)));
 typedef char __v16qi __attribute__((__vector_size__(16)));
 
 /* Unsigned types */
-typedef unsigned long long __v2du __attribute__ ((__vector_size__ (16)));
+typedef unsigned long long __v2du __attribute__((__vector_size__(16)));
 typedef unsigned short __v8hu __attribute__((__vector_size__(16)));
 typedef unsigned char __v16qu __attribute__((__vector_size__(16)));
 
@@ -38,8 +39,12 @@
 typedef signed char __v16qs __attribute__((__vector_size__(16)));
 
 /* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse2"), __min_vector_width__(64)))
+#define __DEFAULT_FN_ATTRS                                                     \
+  __attribute__((__always_inline__, __nodebug__, __target__("sse2"),           \
+                 __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS_MMX                                                 \
+  __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse2"),       \
+                 __min_vector_width__(64)))
 
 /// Adds lower double-precision values in both operands and returns the
 ///    sum in the lower 64 bits of the result. The upper 64 bits of the result
@@ -56,9 +61,8 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    sum of the lower 64 bits of both operands. The upper 64 bits are copied
 ///    from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_sd(__m128d __a,
+                                                        __m128d __b) {
   __a[0] += __b[0];
   return __a;
 }
@@ -75,9 +79,8 @@
 ///    A 128-bit vector of [2 x double] containing one of the source operands.
 /// \returns A 128-bit vector of [2 x double] containing the sums of both
 ///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_pd(__m128d __a,
+                                                        __m128d __b) {
   return (__m128d)((__v2df)__a + (__v2df)__b);
 }
 
@@ -98,9 +101,8 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    difference of the lower 64 bits of both operands. The upper 64 bits are
 ///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_sd(__m128d __a,
+                                                        __m128d __b) {
   __a[0] -= __b[0];
   return __a;
 }
@@ -117,9 +119,8 @@
 ///    A 128-bit vector of [2 x double] containing the subtrahend.
 /// \returns A 128-bit vector of [2 x double] containing the differences between
 ///    both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_pd(__m128d __a,
+                                                        __m128d __b) {
   return (__m128d)((__v2df)__a - (__v2df)__b);
 }
 
@@ -139,9 +140,8 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    product of the lower 64 bits of both operands. The upper 64 bits are
 ///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_sd(__m128d __a,
+                                                        __m128d __b) {
   __a[0] *= __b[0];
   return __a;
 }
@@ -158,9 +158,8 @@
 ///    A 128-bit vector of [2 x double] containing one of the operands.
 /// \returns A 128-bit vector of [2 x double] containing the products of both
 ///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_pd(__m128d __a,
+                                                        __m128d __b) {
   return (__m128d)((__v2df)__a * (__v2df)__b);
 }
 
@@ -181,9 +180,8 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    quotient of the lower 64 bits of both operands. The upper 64 bits are
 ///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_sd(__m128d __a,
+                                                        __m128d __b) {
   __a[0] /= __b[0];
   return __a;
 }
@@ -201,9 +199,8 @@
 ///    A 128-bit vector of [2 x double] containing the divisor.
 /// \returns A 128-bit vector of [2 x double] containing the quotients of both
 ///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_pd(__m128d __a,
+                                                        __m128d __b) {
   return (__m128d)((__v2df)__a / (__v2df)__b);
 }
 
@@ -226,11 +223,10 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    square root of the lower 64 bits of operand \a __b, and whose upper 64
 ///    bits are copied from the upper 64 bits of operand \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_sd(__m128d __a,
+                                                         __m128d __b) {
   __m128d __c = __builtin_ia32_sqrtsd((__v2df)__b);
-  return __extension__ (__m128d) { __c[0], __a[1] };
+  return __extension__(__m128d){__c[0], __a[1]};
 }
 
 /// Calculates the square root of the each of two values stored in a
@@ -244,9 +240,7 @@
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector of [2 x double] containing the square roots of the
 ///    values in the operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_pd(__m128d __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a) {
   return __builtin_ia32_sqrtpd((__v2df)__a);
 }
 
@@ -268,9 +262,8 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    minimum value between both operands. The upper 64 bits are copied from
 ///    the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -288,9 +281,8 @@
 ///    A 128-bit vector of [2 x double] containing one of the operands.
 /// \returns A 128-bit vector of [2 x double] containing the minimum values
 ///    between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -312,9 +304,8 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    maximum value between both operands. The upper 64 bits are copied from
 ///    the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -332,9 +323,8 @@
 ///    A 128-bit vector of [2 x double] containing one of the operands.
 /// \returns A 128-bit vector of [2 x double] containing the maximum values
 ///    between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_pd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -350,9 +340,8 @@
 ///    A 128-bit vector of [2 x double] containing one of the source operands.
 /// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
 ///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_and_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_and_pd(__m128d __a,
+                                                        __m128d __b) {
   return (__m128d)((__v2du)__a & (__v2du)__b);
 }
 
@@ -371,9 +360,8 @@
 /// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
 ///    values in the second operand and the one's complement of the first
 ///    operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_andnot_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_andnot_pd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)(~(__v2du)__a & (__v2du)__b);
 }
 
@@ -389,9 +377,8 @@
 ///    A 128-bit vector of [2 x double] containing one of the source operands.
 /// \returns A 128-bit vector of [2 x double] containing the bitwise OR of the
 ///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_or_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_or_pd(__m128d __a,
+                                                       __m128d __b) {
   return (__m128d)((__v2du)__a | (__v2du)__b);
 }
 
@@ -407,9 +394,8 @@
 ///    A 128-bit vector of [2 x double] containing one of the source operands.
 /// \returns A 128-bit vector of [2 x double] containing the bitwise XOR of the
 ///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_xor_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_xor_pd(__m128d __a,
+                                                        __m128d __b) {
   return (__m128d)((__v2du)__a ^ (__v2du)__b);
 }
 
@@ -426,9 +412,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_pd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -446,9 +431,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_pd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -467,9 +451,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_pd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b);
 }
 
@@ -488,9 +471,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_pd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a);
 }
 
@@ -509,9 +491,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_pd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a);
 }
 
@@ -532,9 +513,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_pd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -556,9 +536,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_pd(__m128d __a,
+                                                             __m128d __b) {
   return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -577,9 +556,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_pd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -598,9 +576,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_pd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b);
 }
 
@@ -619,9 +596,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_pd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b);
 }
 
@@ -640,9 +616,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_pd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a);
 }
 
@@ -661,9 +636,8 @@
 /// \param __b
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_pd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a);
 }
 
@@ -684,9 +658,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_sd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -709,9 +682,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_sd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -734,9 +706,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_sd(__m128d __a,
+                                                          __m128d __b) {
   return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b);
 }
 
@@ -759,11 +730,10 @@
 ///     compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///     results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_sd(__m128d __a,
+                                                          __m128d __b) {
   __m128d __c = __builtin_ia32_cmpltsd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
+  return __extension__(__m128d){__c[0], __a[1]};
 }
 
 /// Compares the lower double-precision floating-point values in each of
@@ -785,11 +755,10 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_sd(__m128d __a,
+                                                          __m128d __b) {
   __m128d __c = __builtin_ia32_cmplesd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
+  return __extension__(__m128d){__c[0], __a[1]};
 }
 
 /// Compares the lower double-precision floating-point values in each of
@@ -813,9 +782,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_sd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -841,9 +809,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_sd(__m128d __a,
+                                                             __m128d __b) {
   return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -866,9 +833,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_sd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -891,9 +857,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_sd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b);
 }
 
@@ -916,9 +881,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns  A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_sd(__m128d __a,
+                                                           __m128d __b) {
   return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b);
 }
 
@@ -941,11 +905,10 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_sd(__m128d __a,
+                                                           __m128d __b) {
   __m128d __c = __builtin_ia32_cmpnltsd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
+  return __extension__(__m128d){__c[0], __a[1]};
 }
 
 /// Compares the lower double-precision floating-point values in each of
@@ -967,11 +930,10 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns A 128-bit vector. The lower 64 bits contains the comparison
 ///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a,
+                                                           __m128d __b) {
   __m128d __c = __builtin_ia32_cmpnlesd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
+  return __extension__(__m128d){__c[0], __a[1]};
 }
 
 /// Compares the lower double-precision floating-point values in each of
@@ -992,9 +954,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comieq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a,
+                                                       __m128d __b) {
   return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b);
 }
 
@@ -1018,9 +979,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comilt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a,
+                                                       __m128d __b) {
   return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b);
 }
 
@@ -1044,9 +1004,8 @@
 ///     compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comile_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a,
+                                                       __m128d __b) {
   return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b);
 }
 
@@ -1070,9 +1029,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comigt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a,
+                                                       __m128d __b) {
   return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b);
 }
 
@@ -1096,9 +1054,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comige_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a,
+                                                       __m128d __b) {
   return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b);
 }
 
@@ -1122,9 +1079,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///     lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comineq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b);
 }
 
@@ -1146,9 +1102,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomieq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b);
 }
 
@@ -1172,9 +1127,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomilt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b);
 }
 
@@ -1198,9 +1152,8 @@
 ///     compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomile_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b);
 }
 
@@ -1224,9 +1177,8 @@
 ///     compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomigt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b);
 }
 
@@ -1250,9 +1202,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison results. If either of the two
 ///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomige_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a,
+                                                        __m128d __b) {
   return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b);
 }
 
@@ -1276,9 +1227,8 @@
 ///    compared to the lower double-precision value of \a __a.
 /// \returns An integer containing the comparison result. If either of the two
 ///    lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomineq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_sd(__m128d __a,
+                                                         __m128d __b) {
   return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b);
 }
 
@@ -1295,9 +1245,7 @@
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
 ///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtpd_ps(__m128d __a)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpd_ps(__m128d __a) {
   return __builtin_ia32_cvtpd2ps((__v2df)__a);
 }
 
@@ -1315,9 +1263,7 @@
 ///    floating-point elements are converted to double-precision values. The
 ///    upper two elements are unused.
 /// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtps_pd(__m128 __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtps_pd(__m128 __a) {
   return (__m128d) __builtin_convertvector(
       __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df);
 }
@@ -1338,9 +1284,7 @@
 ///
 ///    The upper two elements are unused.
 /// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtepi32_pd(__m128i __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtepi32_pd(__m128i __a) {
   return (__m128d) __builtin_convertvector(
       __builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df);
 }
@@ -1358,9 +1302,7 @@
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
 ///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtpd_epi32(__m128d __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtpd_epi32(__m128d __a) {
   return __builtin_ia32_cvtpd2dq((__v2df)__a);
 }
 
@@ -1375,9 +1317,7 @@
 ///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
 ///    conversion.
 /// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsd_si32(__m128d __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsd_si32(__m128d __a) {
   return __builtin_ia32_cvtsd2si((__v2df)__a);
 }
 
@@ -1400,9 +1340,8 @@
 /// \returns A 128-bit vector of [4 x float]. The lower 32 bits contain the
 ///    converted value from the second parameter. The upper 96 bits are copied
 ///    from the upper 96 bits of the first parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtsd_ss(__m128 __a, __m128d __b)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtsd_ss(__m128 __a,
+                                                         __m128d __b) {
   return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)__a, (__v2df)__b);
 }
 
@@ -1423,9 +1362,8 @@
 /// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
 ///    converted value from the second parameter. The upper 64 bits are copied
 ///    from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi32_sd(__m128d __a, int __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi32_sd(__m128d __a,
+                                                            int __b) {
   __a[0] = __b;
   return __a;
 }
@@ -1449,9 +1387,8 @@
 /// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
 ///    converted value from the second parameter. The upper 64 bits are copied
 ///    from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtss_sd(__m128d __a, __m128 __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtss_sd(__m128d __a,
+                                                          __m128 __b) {
   __a[0] = __b[0];
   return __a;
 }
@@ -1473,9 +1410,7 @@
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
 ///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttpd_epi32(__m128d __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi32(__m128d __a) {
   return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a);
 }
 
@@ -1491,9 +1426,7 @@
 ///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
 ///    conversion.
 /// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvttsd_si32(__m128d __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttsd_si32(__m128d __a) {
   return __builtin_ia32_cvttsd2si((__v2df)__a);
 }
 
@@ -1508,9 +1441,7 @@
 /// \param __a
 ///    A 128-bit vector of [2 x double].
 /// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpd_pi32(__m128d __a)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvtpd_pi32(__m128d __a) {
   return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a);
 }
 
@@ -1528,9 +1459,7 @@
 /// \param __a
 ///    A 128-bit vector of [2 x double].
 /// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvttpd_pi32(__m128d __a)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvttpd_pi32(__m128d __a) {
   return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a);
 }
 
@@ -1545,9 +1474,7 @@
 /// \param __a
 ///    A 64-bit vector of [2 x i32].
 /// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi32_pd(__m64 __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX _mm_cvtpi32_pd(__m64 __a) {
   return __builtin_ia32_cvtpi2pd((__v2si)__a);
 }
 
@@ -1562,9 +1489,7 @@
 ///    A 128-bit vector of [2 x double]. The lower 64 bits are returned.
 /// \returns A double-precision floating-point value copied from the lower 64
 ///    bits of \a __a.
-static __inline__ double __DEFAULT_FN_ATTRS
-_mm_cvtsd_f64(__m128d __a)
-{
+static __inline__ double __DEFAULT_FN_ATTRS _mm_cvtsd_f64(__m128d __a) {
   return __a[0];
 }
 
@@ -1579,10 +1504,8 @@
 ///    A pointer to a 128-bit memory location. The address of the memory
 ///    location has to be 16-byte aligned.
 /// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_pd(double const *__dp)
-{
-  return *(const __m128d*)__dp;
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_pd(double const *__dp) {
+  return *(const __m128d *)__dp;
 }
 
 /// Loads a double-precision floating-point value from a specified memory
@@ -1597,17 +1520,15 @@
 ///    A pointer to a memory location containing a double-precision value.
 /// \returns A 128-bit vector of [2 x double] containing the loaded and
 ///    duplicated values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load1_pd(double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load1_pd(double const *__dp) {
   struct __mm_load1_pd_struct {
     double __u;
   } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_load1_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, __u };
+  double __u = ((const struct __mm_load1_pd_struct *)__dp)->__u;
+  return __extension__(__m128d){__u, __u};
 }
 
-#define        _mm_load_pd1(dp)        _mm_load1_pd(dp)
+#define _mm_load_pd1(dp) _mm_load1_pd(dp)
 
 /// Loads two double-precision values, in reverse order, from an aligned
 ///    memory location into a 128-bit vector of [2 x double].
@@ -1623,10 +1544,8 @@
 ///    loaded in reverse order.
 /// \returns A 128-bit vector of [2 x double] containing the reversed loaded
 ///    values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadr_pd(double const *__dp)
-{
-  __m128d __u = *(const __m128d*)__dp;
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadr_pd(double const *__dp) {
+  __m128d __u = *(const __m128d *)__dp;
   return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
 }
 
@@ -1641,13 +1560,11 @@
 ///    A pointer to a 128-bit memory location. The address of the memory
 ///    location does not have to be aligned.
 /// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadu_pd(double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadu_pd(double const *__dp) {
   struct __loadu_pd {
     __m128d_u __v;
   } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_pd*)__dp)->__v;
+  return ((const struct __loadu_pd *)__dp)->__v;
 }
 
 /// Loads a 64-bit integer value to the low element of a 128-bit integer
@@ -1661,14 +1578,12 @@
 ///    A pointer to a 64-bit memory location. The address of the memory
 ///    location does not have to be aligned.
 /// \returns A 128-bit vector of [2 x i64] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si64(void const *__a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si64(void const *__a) {
   struct __loadu_si64 {
     long long __v;
   } __attribute__((__packed__, __may_alias__));
-  long long __u = ((const struct __loadu_si64*)__a)->__v;
-  return __extension__ (__m128i)(__v2di){__u, 0LL};
+  long long __u = ((const struct __loadu_si64 *)__a)->__v;
+  return __extension__(__m128i)(__v2di){__u, 0LL};
 }
 
 /// Loads a 32-bit integer value to the low element of a 128-bit integer
@@ -1682,14 +1597,12 @@
 ///    A pointer to a 32-bit memory location. The address of the memory
 ///    location does not have to be aligned.
 /// \returns A 128-bit vector of [4 x i32] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si32(void const *__a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si32(void const *__a) {
   struct __loadu_si32 {
     int __v;
   } __attribute__((__packed__, __may_alias__));
-  int __u = ((const struct __loadu_si32*)__a)->__v;
-  return __extension__ (__m128i)(__v4si){__u, 0, 0, 0};
+  int __u = ((const struct __loadu_si32 *)__a)->__v;
+  return __extension__(__m128i)(__v4si){__u, 0, 0, 0};
 }
 
 /// Loads a 16-bit integer value to the low element of a 128-bit integer
@@ -1703,14 +1616,12 @@
 ///    A pointer to a 16-bit memory location. The address of the memory
 ///    location does not have to be aligned.
 /// \returns A 128-bit vector of [8 x i16] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si16(void const *__a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si16(void const *__a) {
   struct __loadu_si16 {
     short __v;
   } __attribute__((__packed__, __may_alias__));
-  short __u = ((const struct __loadu_si16*)__a)->__v;
-  return __extension__ (__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
+  short __u = ((const struct __loadu_si16 *)__a)->__v;
+  return __extension__(__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
 }
 
 /// Loads a 64-bit double-precision value to the low element of a
@@ -1724,14 +1635,12 @@
 ///    A pointer to a memory location containing a double-precision value.
 ///    The address of the memory location does not have to be aligned.
 /// \returns A 128-bit vector of [2 x double] containing the loaded value.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_sd(double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_sd(double const *__dp) {
   struct __mm_load_sd_struct {
     double __u;
   } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_load_sd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, 0 };
+  double __u = ((const struct __mm_load_sd_struct *)__dp)->__u;
+  return __extension__(__m128d){__u, 0};
 }
 
 /// Loads a double-precision value into the high-order bits of a 128-bit
@@ -1751,14 +1660,13 @@
 ///    [127:64] of the result. The address of the memory location does not have
 ///    to be aligned.
 /// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadh_pd(__m128d __a, double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadh_pd(__m128d __a,
+                                                          double const *__dp) {
   struct __mm_loadh_pd_struct {
     double __u;
   } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_loadh_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __a[0], __u };
+  double __u = ((const struct __mm_loadh_pd_struct *)__dp)->__u;
+  return __extension__(__m128d){__a[0], __u};
 }
 
 /// Loads a double-precision value into the low-order bits of a 128-bit
@@ -1778,14 +1686,13 @@
 ///    [63:0] of the result. The address of the memory location does not have to
 ///    be aligned.
 /// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadl_pd(__m128d __a, double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadl_pd(__m128d __a,
+                                                          double const *__dp) {
   struct __mm_loadl_pd_struct {
     double __u;
   } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_loadl_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, __a[1] };
+  double __u = ((const struct __mm_loadl_pd_struct *)__dp)->__u;
+  return __extension__(__m128d){__u, __a[1]};
 }
 
 /// Constructs a 128-bit floating-point vector of [2 x double] with
@@ -1799,9 +1706,7 @@
 ///
 /// \returns A 128-bit floating-point vector of [2 x double] with unspecified
 ///    content.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_undefined_pd(void)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_undefined_pd(void) {
   return (__m128d)__builtin_ia32_undef128();
 }
 
@@ -1819,10 +1724,8 @@
 /// \returns An initialized 128-bit floating-point vector of [2 x double]. The
 ///    lower 64 bits contain the value of the parameter. The upper 64 bits are
 ///    set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_sd(double __w)
-{
-  return __extension__ (__m128d){ __w, 0 };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_sd(double __w) {
+  return __extension__(__m128d){__w, 0};
 }
 
 /// Constructs a 128-bit floating-point vector of [2 x double], with each
@@ -1837,10 +1740,8 @@
 ///    A double-precision floating-point value used to initialize each vector
 ///    element of the result.
 /// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set1_pd(double __w)
-{
-  return __extension__ (__m128d){ __w, __w };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set1_pd(double __w) {
+  return __extension__(__m128d){__w, __w};
 }
 
 /// Constructs a 128-bit floating-point vector of [2 x double], with each
@@ -1855,9 +1756,7 @@
 ///    A double-precision floating-point value used to initialize each vector
 ///    element of the result.
 /// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd1(double __w)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd1(double __w) {
   return _mm_set1_pd(__w);
 }
 
@@ -1875,10 +1774,9 @@
 ///    A double-precision floating-point value used to initialize the lower 64
 ///    bits of the result.
 /// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd(double __w, double __x)
-{
-  return __extension__ (__m128d){ __x, __w };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd(double __w,
+                                                        double __x) {
+  return __extension__(__m128d){__x, __w};
 }
 
 /// Constructs a 128-bit floating-point vector of [2 x double],
@@ -1896,10 +1794,9 @@
 ///    A double-precision floating-point value used to initialize the upper 64
 ///    bits of the result.
 /// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setr_pd(double __w, double __x)
-{
-  return __extension__ (__m128d){ __w, __x };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setr_pd(double __w,
+                                                         double __x) {
+  return __extension__(__m128d){__w, __x};
 }
 
 /// Constructs a 128-bit floating-point vector of [2 x double]
@@ -1911,10 +1808,8 @@
 ///
 /// \returns An initialized 128-bit floating-point vector of [2 x double] with
 ///    all elements set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setzero_pd(void)
-{
-  return __extension__ (__m128d){ 0, 0 };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setzero_pd(void) {
+  return __extension__(__m128d){0, 0};
 }
 
 /// Constructs a 128-bit floating-point vector of [2 x double]. The lower
@@ -1932,9 +1827,8 @@
 ///    A 128-bit vector of [2 x double]. The lower 64 bits are written to the
 ///    lower 64 bits of the result.
 /// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_move_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_move_sd(__m128d __a,
+                                                         __m128d __b) {
   __a[0] = __b[0];
   return __a;
 }
@@ -1950,13 +1844,12 @@
 ///    A pointer to a 64-bit memory location.
 /// \param __a
 ///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_sd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_sd(double *__dp,
+                                                       __m128d __a) {
   struct __mm_store_sd_struct {
     double __u;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_store_sd_struct*)__dp)->__u = __a[0];
+  ((struct __mm_store_sd_struct *)__dp)->__u = __a[0];
 }
 
 /// Moves packed double-precision values from a 128-bit vector of
@@ -1972,10 +1865,9 @@
 /// \param __a
 ///    A packed 128-bit vector of [2 x double] containing the values to be
 ///    moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd(double *__dp, __m128d __a)
-{
-  *(__m128d*)__dp = __a;
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd(double *__dp,
+                                                       __m128d __a) {
+  *(__m128d *)__dp = __a;
 }
 
 /// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to
@@ -1992,9 +1884,8 @@
 /// \param __a
 ///    A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
 ///    of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store1_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store1_pd(double *__dp,
+                                                        __m128d __a) {
   __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
   _mm_store_pd(__dp, __a);
 }
@@ -2013,9 +1904,8 @@
 /// \param __a
 ///    A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
 ///    of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd1(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd1(double *__dp,
+                                                        __m128d __a) {
   _mm_store1_pd(__dp, __a);
 }
 
@@ -2031,13 +1921,12 @@
 ///    location does not have to be aligned.
 /// \param __a
 ///    A 128-bit vector of [2 x double] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_pd(double *__dp,
+                                                        __m128d __a) {
   struct __storeu_pd {
     __m128d_u __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_pd*)__dp)->__v = __a;
+  ((struct __storeu_pd *)__dp)->__v = __a;
 }
 
 /// Stores two double-precision values, in reverse order, from a 128-bit
@@ -2054,9 +1943,8 @@
 /// \param __a
 ///    A 128-bit vector of [2 x double] containing the values to be reversed and
 ///    stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storer_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storer_pd(double *__dp,
+                                                        __m128d __a) {
   __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 1, 0);
   *(__m128d *)__dp = __a;
 }
@@ -2072,13 +1960,12 @@
 ///    A pointer to a 64-bit memory location.
 /// \param __a
 ///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeh_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeh_pd(double *__dp,
+                                                        __m128d __a) {
   struct __mm_storeh_pd_struct {
     double __u;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1];
+  ((struct __mm_storeh_pd_struct *)__dp)->__u = __a[1];
 }
 
 /// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
@@ -2092,13 +1979,12 @@
 ///    A pointer to a 64-bit memory location.
 /// \param __a
 ///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_pd(double *__dp,
+                                                        __m128d __a) {
   struct __mm_storeh_pd_struct {
     double __u;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0];
+  ((struct __mm_storeh_pd_struct *)__dp)->__u = __a[0];
 }
 
 /// Adds the corresponding elements of two 128-bit vectors of [16 x i8],
@@ -2117,9 +2003,8 @@
 ///    A 128-bit vector of [16 x i8].
 /// \returns A 128-bit vector of [16 x i8] containing the sums of both
 ///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi8(__m128i __a,
+                                                          __m128i __b) {
   return (__m128i)((__v16qu)__a + (__v16qu)__b);
 }
 
@@ -2139,9 +2024,8 @@
 ///    A 128-bit vector of [8 x i16].
 /// \returns A 128-bit vector of [8 x i16] containing the sums of both
 ///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi16(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v8hu)__a + (__v8hu)__b);
 }
 
@@ -2161,9 +2045,8 @@
 ///    A 128-bit vector of [4 x i32].
 /// \returns A 128-bit vector of [4 x i32] containing the sums of both
 ///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi32(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v4su)__a + (__v4su)__b);
 }
 
@@ -2179,9 +2062,8 @@
 /// \param __b
 ///    A 64-bit integer.
 /// \returns A 64-bit integer containing the sum of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_add_si64(__m64 __a, __m64 __b)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_add_si64(__m64 __a,
+                                                            __m64 __b) {
   return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b);
 }
 
@@ -2201,9 +2083,8 @@
 ///    A 128-bit vector of [2 x i64].
 /// \returns A 128-bit vector of [2 x i64] containing the sums of both
 ///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi64(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi64(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v2du)__a + (__v2du)__b);
 }
 
@@ -2222,10 +2103,9 @@
 ///    A 128-bit signed [16 x i8] vector.
 /// \returns A 128-bit signed [16 x i8] vector containing the saturated sums of
 ///    both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a,
+                                                           __m128i __b) {
+  return (__m128i)__builtin_elementwise_add_sat((__v16qs)__a, (__v16qs)__b);
 }
 
 /// Adds, with saturation, the corresponding elements of two 128-bit
@@ -2244,10 +2124,9 @@
 ///    A 128-bit signed [8 x i16] vector.
 /// \returns A 128-bit signed [8 x i16] vector containing the saturated sums of
 ///    both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a,
+                                                            __m128i __b) {
+  return (__m128i)__builtin_elementwise_add_sat((__v8hi)__a, (__v8hi)__b);
 }
 
 /// Adds, with saturation, the corresponding elements of two 128-bit
@@ -2265,10 +2144,9 @@
 ///    A 128-bit unsigned [16 x i8] vector.
 /// \returns A 128-bit unsigned [16 x i8] vector containing the saturated sums
 ///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a,
+                                                           __m128i __b) {
+  return (__m128i)__builtin_elementwise_add_sat((__v16qu)__a, (__v16qu)__b);
 }
 
 /// Adds, with saturation, the corresponding elements of two 128-bit
@@ -2286,10 +2164,9 @@
 ///    A 128-bit unsigned [8 x i16] vector.
 /// \returns A 128-bit unsigned [8 x i16] vector containing the saturated sums
 ///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu16(__m128i __a,
+                                                            __m128i __b) {
+  return (__m128i)__builtin_elementwise_add_sat((__v8hu)__a, (__v8hu)__b);
 }
 
 /// Computes the rounded averages of corresponding elements of two
@@ -2306,9 +2183,8 @@
 ///    A 128-bit unsigned [16 x i8] vector.
 /// \returns A 128-bit unsigned [16 x i8] vector containing the rounded
 ///    averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu8(__m128i __a,
+                                                          __m128i __b) {
   return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
 }
 
@@ -2326,9 +2202,8 @@
 ///    A 128-bit unsigned [8 x i16] vector.
 /// \returns A 128-bit unsigned [8 x i16] vector containing the rounded
 ///    averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu16(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b);
 }
 
@@ -2352,9 +2227,8 @@
 ///    A 128-bit signed [8 x i16] vector.
 /// \returns A 128-bit signed [4 x i32] vector containing the sums of products
 ///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_madd_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_madd_epi16(__m128i __a,
+                                                            __m128i __b) {
   return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b);
 }
 
@@ -2372,10 +2246,9 @@
 ///    A 128-bit signed [8 x i16] vector.
 /// \returns A 128-bit signed [8 x i16] vector containing the greater value of
 ///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi16(__m128i __a,
+                                                           __m128i __b) {
+  return (__m128i)__builtin_elementwise_max((__v8hi)__a, (__v8hi)__b);
 }
 
 /// Compares corresponding elements of two 128-bit unsigned [16 x i8]
@@ -2392,10 +2265,9 @@
 ///    A 128-bit unsigned [16 x i8] vector.
 /// \returns A 128-bit unsigned [16 x i8] vector containing the greater value of
 ///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu8(__m128i __a,
+                                                          __m128i __b) {
+  return (__m128i)__builtin_elementwise_max((__v16qu)__a, (__v16qu)__b);
 }
 
 /// Compares corresponding elements of two 128-bit signed [8 x i16]
@@ -2412,10 +2284,9 @@
 ///    A 128-bit signed [8 x i16] vector.
 /// \returns A 128-bit signed [8 x i16] vector containing the smaller value of
 ///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi16(__m128i __a,
+                                                           __m128i __b) {
+  return (__m128i)__builtin_elementwise_min((__v8hi)__a, (__v8hi)__b);
 }
 
 /// Compares corresponding elements of two 128-bit unsigned [16 x i8]
@@ -2432,10 +2303,9 @@
 ///    A 128-bit unsigned [16 x i8] vector.
 /// \returns A 128-bit unsigned [16 x i8] vector containing the smaller value of
 ///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu8(__m128i __a,
+                                                          __m128i __b) {
+  return (__m128i)__builtin_elementwise_min((__v16qu)__a, (__v16qu)__b);
 }
 
 /// Multiplies the corresponding elements of two signed [8 x i16]
@@ -2452,9 +2322,8 @@
 ///    A 128-bit signed [8 x i16] vector.
 /// \returns A 128-bit signed [8 x i16] vector containing the upper 16 bits of
 ///    each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epi16(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
 }
 
@@ -2472,9 +2341,8 @@
 ///    A 128-bit unsigned [8 x i16] vector.
 /// \returns A 128-bit unsigned [8 x i16] vector containing the upper 16 bits
 ///    of each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epu16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epu16(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
 }
 
@@ -2492,9 +2360,8 @@
 ///    A 128-bit signed [8 x i16] vector.
 /// \returns A 128-bit signed [8 x i16] vector containing the lower 16 bits of
 ///    each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mullo_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi16(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)((__v8hu)__a * (__v8hu)__b);
 }
 
@@ -2511,9 +2378,8 @@
 /// \param __b
 ///    A 64-bit integer containing one of the source operands.
 /// \returns A 64-bit integer vector containing the product of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_mul_su32(__m64 __a, __m64 __b)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_mul_su32(__m64 __a,
+                                                            __m64 __b) {
   return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b);
 }
 
@@ -2530,9 +2396,8 @@
 /// \param __b
 ///    A [2 x i64] vector containing one of the source operands.
 /// \returns A [2 x i64] vector containing the product of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mul_epu32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epu32(__m128i __a,
+                                                           __m128i __b) {
   return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);
 }
 
@@ -2552,9 +2417,8 @@
 ///    A 128-bit integer vector containing one of the source operands.
 /// \returns A [2 x i64] vector containing the sums of the sets of absolute
 ///    differences between both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sad_epu8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sad_epu8(__m128i __a,
+                                                          __m128i __b) {
   return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b);
 }
 
@@ -2570,9 +2434,8 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the differences of the values
 ///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi8(__m128i __a,
+                                                          __m128i __b) {
   return (__m128i)((__v16qu)__a - (__v16qu)__b);
 }
 
@@ -2588,9 +2451,8 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the differences of the values
 ///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi16(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v8hu)__a - (__v8hu)__b);
 }
 
@@ -2606,9 +2468,8 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the differences of the values
 ///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi32(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v4su)__a - (__v4su)__b);
 }
 
@@ -2625,9 +2486,8 @@
 ///    A 64-bit integer vector containing the subtrahend.
 /// \returns A 64-bit integer vector containing the difference of the values in
 ///    the operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_sub_si64(__m64 __a, __m64 __b)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_sub_si64(__m64 __a,
+                                                            __m64 __b) {
   return (__m64)__builtin_ia32_psubq((__v1di)__a, (__v1di)__b);
 }
 
@@ -2643,9 +2503,8 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the differences of the values
 ///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi64(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi64(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v2du)__a - (__v2du)__b);
 }
 
@@ -2664,10 +2523,9 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the differences of the values
 ///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a,
+                                                           __m128i __b) {
+  return (__m128i)__builtin_elementwise_sub_sat((__v16qs)__a, (__v16qs)__b);
 }
 
 /// Subtracts corresponding 16-bit signed integer values in the input and
@@ -2685,10 +2543,9 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the differences of the values
 ///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a,
+                                                            __m128i __b) {
+  return (__m128i)__builtin_elementwise_sub_sat((__v8hi)__a, (__v8hi)__b);
 }
 
 /// Subtracts corresponding 8-bit unsigned integer values in the input
@@ -2705,10 +2562,9 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the unsigned integer
 ///    differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a,
+                                                           __m128i __b) {
+  return (__m128i)__builtin_elementwise_sub_sat((__v16qu)__a, (__v16qu)__b);
 }
 
 /// Subtracts corresponding 16-bit unsigned integer values in the input
@@ -2725,10 +2581,9 @@
 ///    A 128-bit integer vector containing the subtrahends.
 /// \returns A 128-bit integer vector containing the unsigned integer
 ///    differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu16(__m128i __a,
+                                                            __m128i __b) {
+  return (__m128i)__builtin_elementwise_sub_sat((__v8hu)__a, (__v8hu)__b);
 }
 
 /// Performs a bitwise AND of two 128-bit integer vectors.
@@ -2743,9 +2598,8 @@
 ///    A 128-bit integer vector containing one of the source operands.
 /// \returns A 128-bit integer vector containing the bitwise AND of the values
 ///    in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_and_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v2du)__a & (__v2du)__b);
 }
 
@@ -2763,9 +2617,8 @@
 ///    A 128-bit vector containing the right source operand.
 /// \returns A 128-bit integer vector containing the bitwise AND of the one's
 ///    complement of the first operand and the values in the second operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_andnot_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a,
+                                                              __m128i __b) {
   return (__m128i)(~(__v2du)__a & (__v2du)__b);
 }
 /// Performs a bitwise OR of two 128-bit integer vectors.
@@ -2780,9 +2633,8 @@
 ///    A 128-bit integer vector containing one of the source operands.
 /// \returns A 128-bit integer vector containing the bitwise OR of the values
 ///    in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_or_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a,
+                                                          __m128i __b) {
   return (__m128i)((__v2du)__a | (__v2du)__b);
 }
 
@@ -2798,9 +2650,8 @@
 ///    A 128-bit integer vector containing one of the source operands.
 /// \returns A 128-bit integer vector containing the bitwise exclusive OR of the
 ///    values in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_xor_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_xor_si128(__m128i __a,
+                                                           __m128i __b) {
   return (__m128i)((__v2du)__a ^ (__v2du)__b);
 }
 
@@ -2821,11 +2672,13 @@
 ///    An immediate value specifying the number of bytes to left-shift operand
 ///    \a a.
 /// \returns A 128-bit integer vector containing the left-shifted value.
-#define _mm_slli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
+#define _mm_slli_si128(a, imm)                                                 \
+  ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a),          \
+                                                (int)(imm)))
 
-#define _mm_bslli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
+#define _mm_bslli_si128(a, imm)                                                \
+  ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a),          \
+                                                (int)(imm)))
 
 /// Left-shifts each 16-bit value in the 128-bit integer vector operand
 ///    by the specified number of bits. Low-order bits are cleared.
@@ -2840,9 +2693,8 @@
 ///    An integer value specifying the number of bits to left-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi16(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi16(__m128i __a,
+                                                            int __count) {
   return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);
 }
 
@@ -2859,9 +2711,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to left-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi16(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi16(__m128i __a,
+                                                           __m128i __count) {
   return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count);
 }
 
@@ -2878,9 +2729,8 @@
 ///    An integer value specifying the number of bits to left-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi32(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi32(__m128i __a,
+                                                            int __count) {
   return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);
 }
 
@@ -2897,9 +2747,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to left-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi32(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a,
+                                                           __m128i __count) {
   return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count);
 }
 
@@ -2916,9 +2765,8 @@
 ///    An integer value specifying the number of bits to left-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi64(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi64(__m128i __a,
+                                                            int __count) {
   return __builtin_ia32_psllqi128((__v2di)__a, __count);
 }
 
@@ -2935,9 +2783,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to left-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi64(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a,
+                                                           __m128i __count) {
   return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count);
 }
 
@@ -2955,9 +2802,8 @@
 ///    An integer value specifying the number of bits to right-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi16(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi16(__m128i __a,
+                                                            int __count) {
   return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);
 }
 
@@ -2975,9 +2821,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to right-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi16(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi16(__m128i __a,
+                                                           __m128i __count) {
   return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count);
 }
 
@@ -2995,9 +2840,8 @@
 ///    An integer value specifying the number of bits to right-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi32(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi32(__m128i __a,
+                                                            int __count) {
   return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);
 }
 
@@ -3015,9 +2859,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to right-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi32(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a,
+                                                           __m128i __count) {
   return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count);
 }
 
@@ -3038,11 +2881,13 @@
 ///    An immediate value specifying the number of bytes to right-shift operand
 ///    \a a.
 /// \returns A 128-bit integer vector containing the right-shifted value.
-#define _mm_srli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
+#define _mm_srli_si128(a, imm)                                                 \
+  ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a),          \
+                                                (int)(imm)))
 
-#define _mm_bsrli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
+#define _mm_bsrli_si128(a, imm)                                                \
+  ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a),          \
+                                                (int)(imm)))
 
 /// Right-shifts each of 16-bit values in the 128-bit integer vector
 ///    operand by the specified number of bits. High-order bits are cleared.
@@ -3057,9 +2902,8 @@
 ///    An integer value specifying the number of bits to right-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi16(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi16(__m128i __a,
+                                                            int __count) {
   return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);
 }
 
@@ -3076,9 +2920,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to right-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi16(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi16(__m128i __a,
+                                                           __m128i __count) {
   return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count);
 }
 
@@ -3095,9 +2938,8 @@
 ///    An integer value specifying the number of bits to right-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi32(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi32(__m128i __a,
+                                                            int __count) {
   return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);
 }
 
@@ -3114,9 +2956,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to right-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi32(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a,
+                                                           __m128i __count) {
   return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count);
 }
 
@@ -3133,9 +2974,8 @@
 ///    An integer value specifying the number of bits to right-shift each value
 ///    in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi64(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi64(__m128i __a,
+                                                            int __count) {
   return __builtin_ia32_psrlqi128((__v2di)__a, __count);
 }
 
@@ -3152,9 +2992,8 @@
 ///    A 128-bit integer vector in which bits [63:0] specify the number of bits
 ///    to right-shift each value in operand \a __a.
 /// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi64(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a,
+                                                           __m128i __count) {
   return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count);
 }
 
@@ -3171,9 +3010,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a,
+                                                            __m128i __b) {
   return (__m128i)((__v16qi)__a == (__v16qi)__b);
 }
 
@@ -3190,9 +3028,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)((__v8hi)__a == (__v8hi)__b);
 }
 
@@ -3209,9 +3046,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)((__v4si)__a == (__v4si)__b);
 }
 
@@ -3229,9 +3065,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a,
+                                                            __m128i __b) {
   /* This function always performs a signed comparison, but __v16qi is a char
      which may be signed or unsigned, so use __v16qs. */
   return (__m128i)((__v16qs)__a > (__v16qs)__b);
@@ -3252,9 +3087,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)((__v8hi)__a > (__v8hi)__b);
 }
 
@@ -3273,9 +3107,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)((__v4si)__a > (__v4si)__b);
 }
 
@@ -3294,9 +3127,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a,
+                                                            __m128i __b) {
   return _mm_cmpgt_epi8(__b, __a);
 }
 
@@ -3315,9 +3147,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a,
+                                                             __m128i __b) {
   return _mm_cmpgt_epi16(__b, __a);
 }
 
@@ -3336,9 +3167,8 @@
 /// \param __b
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi32(__m128i __a,
+                                                             __m128i __b) {
   return _mm_cmpgt_epi32(__b, __a);
 }
 
@@ -3360,9 +3190,8 @@
 /// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
 ///    converted value of the second operand. The upper 64 bits are copied from
 ///    the upper 64 bits of the first operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi64_sd(__m128d __a, long long __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi64_sd(__m128d __a,
+                                                            long long __b) {
   __a[0] = __b;
   return __a;
 }
@@ -3378,9 +3207,7 @@
 ///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
 ///    conversion.
 /// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsd_si64(__m128d __a)
-{
+static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsd_si64(__m128d __a) {
   return __builtin_ia32_cvtsd2si64((__v2df)__a);
 }
 
@@ -3396,9 +3223,7 @@
 ///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
 ///    conversion.
 /// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvttsd_si64(__m128d __a)
-{
+static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvttsd_si64(__m128d __a) {
   return __builtin_ia32_cvttsd2si64((__v2df)__a);
 }
 #endif
@@ -3412,10 +3237,8 @@
 /// \param __a
 ///    A 128-bit integer vector.
 /// \returns A 128-bit vector of [4 x float] containing the converted values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtepi32_ps(__m128i __a)
-{
-  return (__m128)__builtin_convertvector((__v4si)__a, __v4sf);
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtepi32_ps(__m128i __a) {
+  return (__m128) __builtin_convertvector((__v4si)__a, __v4sf);
 }
 
 /// Converts a vector of [4 x float] into a vector of [4 x i32].
@@ -3428,9 +3251,7 @@
 ///    A 128-bit vector of [4 x float].
 /// \returns A 128-bit integer vector of [4 x i32] containing the converted
 ///    values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtps_epi32(__m128 __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a) {
   return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a);
 }
 
@@ -3445,9 +3266,7 @@
 /// \param __a
 ///    A 128-bit vector of [4 x float].
 /// \returns A 128-bit vector of [4 x i32] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttps_epi32(__m128 __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a) {
   return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)__a);
 }
 
@@ -3461,29 +3280,24 @@
 /// \param __a
 ///    A 32-bit signed integer operand.
 /// \returns A 128-bit vector of [4 x i32].
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi32_si128(int __a)
-{
-  return __extension__ (__m128i)(__v4si){ __a, 0, 0, 0 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi32_si128(int __a) {
+  return __extension__(__m128i)(__v4si){__a, 0, 0, 0};
 }
 
-#ifdef __x86_64__
 /// Returns a vector of [2 x i64] where the lower element is the input
 ///    operand and the upper element is zero.
 ///
 /// \headerfile <x86intrin.h>
 ///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
+/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction
+/// in 64-bit mode.
 ///
 /// \param __a
 ///    A 64-bit signed integer operand containing the value to be converted.
 /// \returns A 128-bit vector of [2 x i64] containing the converted value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi64_si128(long long __a)
-{
-  return __extension__ (__m128i)(__v2di){ __a, 0 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi64_si128(long long __a) {
+  return __extension__(__m128i)(__v2di){__a, 0};
 }
-#endif
 
 /// Moves the least significant 32 bits of a vector of [4 x i32] to a
 ///    32-bit signed integer value.
@@ -3496,14 +3310,11 @@
 ///    A vector of [4 x i32]. The least significant 32 bits are moved to the
 ///    destination.
 /// \returns A 32-bit signed integer containing the moved value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si32(__m128i __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsi128_si32(__m128i __a) {
   __v4si __b = (__v4si)__a;
   return __b[0];
 }
 
-#ifdef __x86_64__
 /// Moves the least significant 64 bits of a vector of [2 x i64] to a
 ///    64-bit signed integer value.
 ///
@@ -3515,12 +3326,9 @@
 ///    A vector of [2 x i64]. The least significant 64 bits are moved to the
 ///    destination.
 /// \returns A 64-bit signed integer containing the moved value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si64(__m128i __a)
-{
+static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsi128_si64(__m128i __a) {
   return __a[0];
 }
-#endif
 
 /// Moves packed integer values from an aligned 128-bit memory location
 ///    to elements in a 128-bit integer vector.
@@ -3533,8 +3341,7 @@
 ///    An aligned pointer to a memory location containing integer values.
 /// \returns A 128-bit integer vector containing the moved values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_load_si128(__m128i const *__p)
-{
+_mm_load_si128(__m128i const *__p) {
   return *__p;
 }
 
@@ -3549,12 +3356,11 @@
 ///    A pointer to a memory location containing integer values.
 /// \returns A 128-bit integer vector containing the moved values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si128(__m128i_u const *__p)
-{
+_mm_loadu_si128(__m128i_u const *__p) {
   struct __loadu_si128 {
     __m128i_u __v;
   } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_si128*)__p)->__v;
+  return ((const struct __loadu_si128 *)__p)->__v;
 }
 
 /// Returns a vector of [2 x i64] where the lower element is taken from
@@ -3570,12 +3376,12 @@
 /// \returns A 128-bit vector of [2 x i64]. The lower order bits contain the
 ///    moved value. The higher order bits are cleared.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadl_epi64(__m128i_u const *__p)
-{
+_mm_loadl_epi64(__m128i_u const *__p) {
   struct __mm_loadl_epi64_struct {
     long long __u;
   } __attribute__((__packed__, __may_alias__));
-  return __extension__ (__m128i) { ((const struct __mm_loadl_epi64_struct*)__p)->__u, 0};
+  return __extension__(__m128i){
+      ((const struct __mm_loadl_epi64_struct *)__p)->__u, 0};
 }
 
 /// Generates a 128-bit vector of [4 x i32] with unspecified content.
@@ -3587,9 +3393,7 @@
 /// This intrinsic has no corresponding instruction.
 ///
 /// \returns A 128-bit vector of [4 x i32] with unspecified content.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_undefined_si128(void)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void) {
   return (__m128i)__builtin_ia32_undef128();
 }
 
@@ -3609,10 +3413,9 @@
 ///    destination vector of [2 x i64].
 /// \returns An initialized 128-bit vector of [2 x i64] containing the values
 ///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64x(long long __q1, long long __q0)
-{
-  return __extension__ (__m128i)(__v2di){ __q0, __q1 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64x(long long __q1,
+                                                            long long __q0) {
+  return __extension__(__m128i)(__v2di){__q0, __q1};
 }
 
 /// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
@@ -3631,9 +3434,8 @@
 ///    destination vector of [2 x i64].
 /// \returns An initialized 128-bit vector of [2 x i64] containing the values
 ///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64(__m64 __q1, __m64 __q0)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64(__m64 __q1,
+                                                           __m64 __q0) {
   return _mm_set_epi64x((long long)__q1, (long long)__q0);
 }
 
@@ -3659,10 +3461,9 @@
 ///    vector.
 /// \returns An initialized 128-bit vector of [4 x i32] containing the values
 ///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
-{
-  return __extension__ (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi32(int __i3, int __i2,
+                                                           int __i1, int __i0) {
+  return __extension__(__m128i)(__v4si){__i0, __i1, __i2, __i3};
 }
 
 /// Initializes the 16-bit values in a 128-bit vector of [8 x i16] with
@@ -3700,9 +3501,10 @@
 /// \returns An initialized 128-bit vector of [8 x i16] containing the values
 ///    provided in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
-{
-  return __extension__ (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
+_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3,
+              short __w2, short __w1, short __w0) {
+  return __extension__(__m128i)(__v8hi){__w0, __w1, __w2, __w3,
+                                        __w4, __w5, __w6, __w7};
 }
 
 /// Initializes the 8-bit values in a 128-bit vector of [16 x i8] with
@@ -3748,9 +3550,12 @@
 /// \returns An initialized 128-bit vector of [16 x i8] containing the values
 ///    provided in the operands.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
-{
-  return __extension__ (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
+_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11,
+             char __b10, char __b9, char __b8, char __b7, char __b6, char __b5,
+             char __b4, char __b3, char __b2, char __b1, char __b0) {
+  return __extension__(__m128i)(__v16qi){
+      __b0, __b1, __b2,  __b3,  __b4,  __b5,  __b6,  __b7,
+      __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15};
 }
 
 /// Initializes both values in a 128-bit integer vector with the
@@ -3766,9 +3571,7 @@
 ///    vector.
 /// \returns An initialized 128-bit integer vector of [2 x i64] with both
 ///    elements containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64x(long long __q)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64x(long long __q) {
   return _mm_set_epi64x(__q, __q);
 }
 
@@ -3785,9 +3588,7 @@
 ///    vector.
 /// \returns An initialized 128-bit vector of [2 x i64] with all elements
 ///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64(__m64 __q)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64(__m64 __q) {
   return _mm_set_epi64(__q, __q);
 }
 
@@ -3804,9 +3605,7 @@
 ///    vector.
 /// \returns An initialized 128-bit vector of [4 x i32] with all elements
 ///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi32(int __i)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi32(int __i) {
   return _mm_set_epi32(__i, __i, __i, __i);
 }
 
@@ -3823,9 +3622,7 @@
 ///    vector.
 /// \returns An initialized 128-bit vector of [8 x i16] with all elements
 ///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi16(short __w)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi16(short __w) {
   return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w);
 }
 
@@ -3842,10 +3639,9 @@
 ///    vector.
 /// \returns An initialized 128-bit vector of [16 x i8] with all elements
 ///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi8(char __b)
-{
-  return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi8(char __b) {
+  return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
+                      __b, __b, __b, __b, __b);
 }
 
 /// Constructs a 128-bit integer vector, initialized in reverse order
@@ -3862,9 +3658,8 @@
 ///    A 64-bit integral value used to initialize the upper 64 bits of the
 ///    result.
 /// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi64(__m64 __q0, __m64 __q1)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi64(__m64 __q0,
+                                                            __m64 __q1) {
   return _mm_set_epi64(__q1, __q0);
 }
 
@@ -3885,9 +3680,9 @@
 /// \param __i3
 ///    A 32-bit integral value used to initialize bits [127:96] of the result.
 /// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi32(int __i0, int __i1,
+                                                            int __i2,
+                                                            int __i3) {
   return _mm_set_epi32(__i3, __i2, __i1, __i0);
 }
 
@@ -3917,8 +3712,8 @@
 ///    A 16-bit integral value used to initialize bits [127:112] of the result.
 /// \returns An initialized 128-bit integer vector.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
-{
+_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4,
+               short __w5, short __w6, short __w7) {
   return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0);
 }
 
@@ -3964,9 +3759,11 @@
 ///    An 8-bit integral value used to initialize bits [127:120] of the result.
 /// \returns An initialized 128-bit integer vector.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
-{
-  return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8, __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
+_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
+              char __b6, char __b7, char __b8, char __b9, char __b10,
+              char __b11, char __b12, char __b13, char __b14, char __b15) {
+  return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8,
+                      __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
 }
 
 /// Creates a 128-bit integer vector initialized to zero.
@@ -3977,10 +3774,8 @@
 ///
 /// \returns An initialized 128-bit integer vector with all elements set to
 ///    zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setzero_si128(void)
-{
-  return __extension__ (__m128i)(__v2di){ 0LL, 0LL };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void) {
+  return __extension__(__m128i)(__v2di){0LL, 0LL};
 }
 
 /// Stores a 128-bit integer vector to a memory location aligned on a
@@ -3995,9 +3790,8 @@
 ///    values.
 /// \param __b
 ///    A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_si128(__m128i *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_si128(__m128i *__p,
+                                                          __m128i __b) {
   *__p = __b;
 }
 
@@ -4011,13 +3805,12 @@
 ///    A pointer to a memory location that will receive the integer values.
 /// \param __b
 ///    A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si128(__m128i_u *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si128(__m128i_u *__p,
+                                                           __m128i __b) {
   struct __storeu_si128 {
     __m128i_u __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si128*)__p)->__v = __b;
+  ((struct __storeu_si128 *)__p)->__v = __b;
 }
 
 /// Stores a 64-bit integer value from the low element of a 128-bit integer
@@ -4032,13 +3825,12 @@
 ///    location does not have to be aligned.
 /// \param __b
 ///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si64(void *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si64(void *__p,
+                                                          __m128i __b) {
   struct __storeu_si64 {
     long long __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si64*)__p)->__v = ((__v2di)__b)[0];
+  ((struct __storeu_si64 *)__p)->__v = ((__v2di)__b)[0];
 }
 
 /// Stores a 32-bit integer value from the low element of a 128-bit integer
@@ -4053,13 +3845,12 @@
 ///    location does not have to be aligned.
 /// \param __b
 ///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si32(void *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si32(void *__p,
+                                                          __m128i __b) {
   struct __storeu_si32 {
     int __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si32*)__p)->__v = ((__v4si)__b)[0];
+  ((struct __storeu_si32 *)__p)->__v = ((__v4si)__b)[0];
 }
 
 /// Stores a 16-bit integer value from the low element of a 128-bit integer
@@ -4074,13 +3865,12 @@
 ///    location does not have to be aligned.
 /// \param __b
 ///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si16(void *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si16(void *__p,
+                                                          __m128i __b) {
   struct __storeu_si16 {
     short __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si16*)__p)->__v = ((__v8hi)__b)[0];
+  ((struct __storeu_si16 *)__p)->__v = ((__v8hi)__b)[0];
 }
 
 /// Moves bytes selected by the mask from the first operand to the
@@ -4104,9 +3894,9 @@
 /// \param __p
 ///    A pointer to an unaligned 128-bit memory location where the specified
 ///    values are moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_maskmoveu_si128(__m128i __d,
+                                                              __m128i __n,
+                                                              char *__p) {
   __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p);
 }
 
@@ -4123,13 +3913,12 @@
 /// \param __a
 ///    A 128-bit integer vector of [2 x i64]. The lower 64 bits contain the
 ///    value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_epi64(__m128i_u *__p, __m128i __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_epi64(__m128i_u *__p,
+                                                           __m128i __a) {
   struct __mm_storel_epi64_struct {
     long long __u;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storel_epi64_struct*)__p)->__u = __a[0];
+  ((struct __mm_storel_epi64_struct *)__p)->__u = __a[0];
 }
 
 /// Stores a 128-bit floating point vector of [2 x double] to a 128-bit
@@ -4146,10 +3935,9 @@
 ///    A pointer to the 128-bit aligned memory location used to store the value.
 /// \param __a
 ///    A vector of [2 x double] containing the 64-bit values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_pd(double *__p, __m128d __a)
-{
-  __builtin_nontemporal_store((__v2df)__a, (__v2df*)__p);
+static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_pd(double *__p,
+                                                        __m128d __a) {
+  __builtin_nontemporal_store((__v2df)__a, (__v2df *)__p);
 }
 
 /// Stores a 128-bit integer vector to a 128-bit aligned memory location.
@@ -4165,10 +3953,9 @@
 ///    A pointer to the 128-bit aligned memory location used to store the value.
 /// \param __a
 ///    A 128-bit integer vector containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_si128(__m128i *__p, __m128i __a)
-{
-  __builtin_nontemporal_store((__v2di)__a, (__v2di*)__p);
+static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_si128(__m128i *__p,
+                                                           __m128i __a) {
+  __builtin_nontemporal_store((__v2di)__a, (__v2di *)__p);
 }
 
 /// Stores a 32-bit integer value in the specified memory location.
@@ -4184,9 +3971,9 @@
 ///    A pointer to the 32-bit memory location used to store the value.
 /// \param __a
 ///    A 32-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si32(int *__p, int __a)
-{
+static __inline__ void
+    __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
+    _mm_stream_si32(int *__p, int __a) {
   __builtin_ia32_movnti(__p, __a);
 }
 
@@ -4204,9 +3991,9 @@
 ///    A pointer to the 64-bit memory location used to store the value.
 /// \param __a
 ///    A 64-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si64(long long *__p, long long __a)
-{
+static __inline__ void
+    __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
+    _mm_stream_si64(long long *__p, long long __a) {
   __builtin_ia32_movnti64(__p, __a);
 }
 #endif
@@ -4225,7 +4012,7 @@
 /// \param __p
 ///    A pointer to the memory location used to identify the cache line to be
 ///    flushed.
-void _mm_clflush(void const * __p);
+void _mm_clflush(void const *__p);
 
 /// Forces strong memory ordering (serialization) between load
 ///    instructions preceding this instruction and load instructions following
@@ -4275,9 +4062,8 @@
 ///   than 0x80 are saturated to 0x80. The converted [8 x i8] values are
 ///   written to the higher 64 bits of the result.
 /// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
 }
 
@@ -4303,9 +4089,8 @@
 ///    less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
 ///    are written to the higher 64 bits of the result.
 /// \returns A 128-bit vector of [8 x i16] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a,
+                                                             __m128i __b) {
   return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
 }
 
@@ -4331,9 +4116,8 @@
 ///    than 0x00 are saturated to 0x00. The converted [8 x i8] values are
 ///    written to the higher 64 bits of the result.
 /// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packus_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a,
+                                                              __m128i __b) {
   return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b);
 }
 
@@ -4342,25 +4126,29 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// __m128i _mm_extract_epi16(__m128i a, const int imm);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VPEXTRW / PEXTRW </c> instruction.
 ///
-/// \param __a
+/// \param a
 ///    A 128-bit integer vector.
-/// \param __imm
-///    An immediate value. Bits [2:0] selects values from \a __a to be assigned
+/// \param imm
+///    An immediate value. Bits [2:0] selects values from \a a to be assigned
 ///    to bits[15:0] of the result. \n
-///    000: assign values from bits [15:0] of \a __a. \n
-///    001: assign values from bits [31:16] of \a __a. \n
-///    010: assign values from bits [47:32] of \a __a. \n
-///    011: assign values from bits [63:48] of \a __a. \n
-///    100: assign values from bits [79:64] of \a __a. \n
-///    101: assign values from bits [95:80] of \a __a. \n
-///    110: assign values from bits [111:96] of \a __a. \n
-///    111: assign values from bits [127:112] of \a __a.
+///    000: assign values from bits [15:0] of \a a. \n
+///    001: assign values from bits [31:16] of \a a. \n
+///    010: assign values from bits [47:32] of \a a. \n
+///    011: assign values from bits [63:48] of \a a. \n
+///    100: assign values from bits [79:64] of \a a. \n
+///    101: assign values from bits [95:80] of \a a. \n
+///    110: assign values from bits [111:96] of \a a. \n
+///    111: assign values from bits [127:112] of \a a.
 /// \returns An integer, whose lower 16 bits are selected from the 128-bit
 ///    integer vector parameter and the remaining bits are assigned zeros.
-#define _mm_extract_epi16(a, imm) \
-  ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \
+#define _mm_extract_epi16(a, imm)                                              \
+  ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a),      \
                                                     (int)(imm)))
 
 /// Constructs a 128-bit integer vector by first making a copy of the
@@ -4370,21 +4158,25 @@
 ///
 /// \headerfile <x86intrin.h>
 ///
+/// \code
+/// __m128i _mm_insert_epi16(__m128i a, int b, const int imm);
+/// \endcode
+///
 /// This intrinsic corresponds to the <c> VPINSRW / PINSRW </c> instruction.
 ///
-/// \param __a
+/// \param a
 ///    A 128-bit integer vector of [8 x i16]. This vector is copied to the
 ///    result and then one of the eight elements in the result is replaced by
-///    the lower 16 bits of \a __b.
-/// \param __b
+///    the lower 16 bits of \a b.
+/// \param b
 ///    An integer. The lower 16 bits of this parameter are written to the
-///    result beginning at an offset specified by \a __imm.
-/// \param __imm
+///    result beginning at an offset specified by \a imm.
+/// \param imm
 ///    An immediate value specifying the bit offset in the result at which the
-///    lower 16 bits of \a __b are written.
+///    lower 16 bits of \a b are written.
 /// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi16(a, b, imm) \
-  ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \
+#define _mm_insert_epi16(a, b, imm)                                            \
+  ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b),        \
                                         (int)(imm)))
 
 /// Copies the values of the most significant bits from each 8-bit
@@ -4399,9 +4191,7 @@
 ///    A 128-bit integer vector containing the values with bits to be extracted.
 /// \returns The most significant bits from each 8-bit element in \a __a,
 ///    written to bits [15:0]. The other bits are assigned zeros.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_epi8(__m128i __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_epi8(__m128i __a) {
   return __builtin_ia32_pmovmskb128((__v16qi)__a);
 }
 
@@ -4431,9 +4221,12 @@
 ///    00: assign values from bits [31:0] of \a a. \n
 ///    01: assign values from bits [63:32] of \a a. \n
 ///    10: assign values from bits [95:64] of \a a. \n
-///    11: assign values from bits [127:96] of \a a.
+///    11: assign values from bits [127:96] of \a a. \n
+///    Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+///    <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+///    <c>[b6, b4, b2, b0]</c>.
 /// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shuffle_epi32(a, imm) \
+#define _mm_shuffle_epi32(a, imm)                                              \
   ((__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm)))
 
 /// Constructs a 128-bit integer vector by shuffling four lower 16-bit
@@ -4462,8 +4255,11 @@
 ///    01: assign values from bits [31:16] of \a a. \n
 ///    10: assign values from bits [47:32] of \a a. \n
 ///    11: assign values from bits [63:48] of \a a. \n
+///    Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+///    <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+///    <c>[b6, b4, b2, b0]</c>.
 /// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflelo_epi16(a, imm) \
+#define _mm_shufflelo_epi16(a, imm)                                            \
   ((__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm)))
 
 /// Constructs a 128-bit integer vector by shuffling four upper 16-bit
@@ -4492,8 +4288,11 @@
 ///    01: assign values from bits [95:80] of \a a. \n
 ///    10: assign values from bits [111:96] of \a a. \n
 ///    11: assign values from bits [127:112] of \a a. \n
+///    Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+///    <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+///    <c>[b6, b4, b2, b0]</c>.
 /// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflehi_epi16(a, imm) \
+#define _mm_shufflehi_epi16(a, imm)                                            \
   ((__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm)))
 
 /// Unpacks the high-order (index 8-15) values from two 128-bit vectors
@@ -4525,10 +4324,11 @@
 ///    Bits [119:112] are written to bits [111:104] of the result. \n
 ///    Bits [127:120] are written to bits [127:120] of the result.
 /// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi8(__m128i __a,
+                                                               __m128i __b) {
+  return (__m128i)__builtin_shufflevector(
+      (__v16qi)__a, (__v16qi)__b, 8, 16 + 8, 9, 16 + 9, 10, 16 + 10, 11,
+      16 + 11, 12, 16 + 12, 13, 16 + 13, 14, 16 + 14, 15, 16 + 15);
 }
 
 /// Unpacks the high-order (index 4-7) values from two 128-bit vectors of
@@ -4552,10 +4352,10 @@
 ///    Bits [111:96] are written to bits [95:80] of the result. \n
 ///    Bits [127:112] are written to bits [127:112] of the result.
 /// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi16(__m128i __a,
+                                                                __m128i __b) {
+  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8 + 4, 5,
+                                          8 + 5, 6, 8 + 6, 7, 8 + 7);
 }
 
 /// Unpacks the high-order (index 2,3) values from two 128-bit vectors of
@@ -4575,10 +4375,10 @@
 ///    Bits [95:64] are written to bits [64:32] of the destination. \n
 ///    Bits [127:96] are written to bits [127:96] of the destination.
 /// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi32(__m128i __a,
+                                                                __m128i __b) {
+  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4 + 2, 3,
+                                          4 + 3);
 }
 
 /// Unpacks the high-order 64-bit elements from two 128-bit vectors of
@@ -4596,10 +4396,9 @@
 ///    A 128-bit vector of [2 x i64]. \n
 ///    Bits [127:64] are written to bits [127:64] of the destination.
 /// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2+1);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi64(__m128i __a,
+                                                                __m128i __b) {
+  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2 + 1);
 }
 
 /// Unpacks the low-order (index 0-7) values from two 128-bit vectors of
@@ -4631,10 +4430,11 @@
 ///    Bits [55:48] are written to bits [111:104] of the result. \n
 ///    Bits [63:56] are written to bits [127:120] of the result.
 /// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi8(__m128i __a,
+                                                               __m128i __b) {
+  return (__m128i)__builtin_shufflevector(
+      (__v16qi)__a, (__v16qi)__b, 0, 16 + 0, 1, 16 + 1, 2, 16 + 2, 3, 16 + 3, 4,
+      16 + 4, 5, 16 + 5, 6, 16 + 6, 7, 16 + 7);
 }
 
 /// Unpacks the low-order (index 0-3) values from each of the two 128-bit
@@ -4659,10 +4459,10 @@
 ///    Bits [47:32] are written to bits [95:80] of the result. \n
 ///    Bits [63:48] are written to bits [127:112] of the result.
 /// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi16(__m128i __a,
+                                                                __m128i __b) {
+  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8 + 0, 1,
+                                          8 + 1, 2, 8 + 2, 3, 8 + 3);
 }
 
 /// Unpacks the low-order (index 0,1) values from two 128-bit vectors of
@@ -4682,10 +4482,10 @@
 ///    Bits [31:0] are written to bits [64:32] of the destination. \n
 ///    Bits [63:32] are written to bits [127:96] of the destination.
 /// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi32(__m128i __a,
+                                                                __m128i __b) {
+  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4 + 0, 1,
+                                          4 + 1);
 }
 
 /// Unpacks the low-order 64-bit elements from two 128-bit vectors of
@@ -4703,10 +4503,9 @@
 ///    A 128-bit vector of [2 x i64]. \n
 ///    Bits [63:0] are written to bits [127:64] of the destination. \n
 /// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2+0);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi64(__m128i __a,
+                                                                __m128i __b) {
+  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2 + 0);
 }
 
 /// Returns the lower 64 bits of a 128-bit integer vector as a 64-bit
@@ -4720,9 +4519,7 @@
 ///    A 128-bit integer vector operand. The lower 64 bits are moved to the
 ///    destination.
 /// \returns A 64-bit integer containing the lower 64 bits of the parameter.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_movepi64_pi64(__m128i __a)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_movepi64_pi64(__m128i __a) {
   return (__m64)__a[0];
 }
 
@@ -4737,10 +4534,8 @@
 ///    A 64-bit value.
 /// \returns A 128-bit integer vector. The lower 64 bits contain the value from
 ///    the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_movpi64_epi64(__m64 __a)
-{
-  return __extension__ (__m128i)(__v2di){ (long long)__a, 0 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_movpi64_epi64(__m64 __a) {
+  return __extension__(__m128i)(__v2di){(long long)__a, 0};
 }
 
 /// Moves the lower 64 bits of a 128-bit integer vector to a 128-bit
@@ -4755,9 +4550,7 @@
 ///    destination.
 /// \returns A 128-bit integer vector. The lower 64 bits contain the value from
 ///    the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_move_epi64(__m128i __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_move_epi64(__m128i __a) {
   return __builtin_shufflevector((__v2di)__a, _mm_setzero_si128(), 0, 2);
 }
 
@@ -4776,10 +4569,9 @@
 ///    A 128-bit vector of [2 x double]. \n
 ///    Bits [127:64] are written to bits [127:64] of the destination.
 /// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpackhi_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2+1);
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpackhi_pd(__m128d __a,
+                                                             __m128d __b) {
+  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2 + 1);
 }
 
 /// Unpacks the low-order 64-bit elements from two 128-bit vectors
@@ -4797,10 +4589,9 @@
 ///    A 128-bit vector of [2 x double]. \n
 ///    Bits [63:0] are written to bits [127:64] of the destination.
 /// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpacklo_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2+0);
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpacklo_pd(__m128d __a,
+                                                             __m128d __b) {
+  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2 + 0);
 }
 
 /// Extracts the sign bits of the double-precision values in the 128-bit
@@ -4816,13 +4607,10 @@
 ///    be extracted.
 /// \returns The sign bits from each of the double-precision elements in \a __a,
 ///    written to bits [1:0]. The remaining bits are assigned values of zero.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_pd(__m128d __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_pd(__m128d __a) {
   return __builtin_ia32_movmskpd((__v2df)__a);
 }
 
-
 /// Constructs a 128-bit floating-point vector of [2 x double] from two
 ///    128-bit vector parameters of [2 x double], using the immediate-value
 ///     parameter as a specifier.
@@ -4846,9 +4634,12 @@
 ///    Bit[0] = 1: upper element of \a a copied to lower element of result. \n
 ///    Bit[1] = 0: lower element of \a b copied to upper element of result. \n
 ///    Bit[1] = 1: upper element of \a b copied to upper element of result. \n
+///    Note: To generate a mask, you can use the \c _MM_SHUFFLE2 macro.
+///    <c>_MM_SHUFFLE2(b1, b0)</c> can create a 2-bit mask of the form
+///    <c>[b1, b0]</c>.
 /// \returns A 128-bit vector of [2 x double] containing the shuffled values.
-#define _mm_shuffle_pd(a, b, i) \
-  ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
+#define _mm_shuffle_pd(a, b, i)                                                \
+  ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b),  \
                                   (int)(i)))
 
 /// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
@@ -4862,9 +4653,7 @@
 ///    A 128-bit floating-point vector of [2 x double].
 /// \returns A 128-bit floating-point vector of [4 x float] containing the same
 ///    bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castpd_ps(__m128d __a)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castpd_ps(__m128d __a) {
   return (__m128)__a;
 }
 
@@ -4879,9 +4668,7 @@
 ///    A 128-bit floating-point vector of [2 x double].
 /// \returns A 128-bit integer vector containing the same bitwise pattern as the
 ///    parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castpd_si128(__m128d __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castpd_si128(__m128d __a) {
   return (__m128i)__a;
 }
 
@@ -4896,9 +4683,7 @@
 ///    A 128-bit floating-point vector of [4 x float].
 /// \returns A 128-bit floating-point vector of [2 x double] containing the same
 ///    bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castps_pd(__m128 __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castps_pd(__m128 __a) {
   return (__m128d)__a;
 }
 
@@ -4913,9 +4698,7 @@
 ///    A 128-bit floating-point vector of [4 x float].
 /// \returns A 128-bit integer vector containing the same bitwise pattern as the
 ///    parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castps_si128(__m128 __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castps_si128(__m128 __a) {
   return (__m128i)__a;
 }
 
@@ -4930,9 +4713,7 @@
 ///    A 128-bit integer vector.
 /// \returns A 128-bit floating-point vector of [4 x float] containing the same
 ///    bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castsi128_ps(__m128i __a)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castsi128_ps(__m128i __a) {
   return (__m128)__a;
 }
 
@@ -4947,9 +4728,7 @@
 ///    A 128-bit integer vector.
 /// \returns A 128-bit floating-point vector of [2 x double] containing the same
 ///    bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castsi128_pd(__m128i __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castsi128_pd(__m128i __a) {
   return (__m128d)__a;
 }
 
@@ -4974,12 +4753,13 @@
 
 #define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
 
-#define _MM_DENORMALS_ZERO_ON   (0x0040U)
-#define _MM_DENORMALS_ZERO_OFF  (0x0000U)
+#define _MM_DENORMALS_ZERO_ON (0x0040U)
+#define _MM_DENORMALS_ZERO_OFF (0x0000U)
 
 #define _MM_DENORMALS_ZERO_MASK (0x0040U)
 
 #define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
-#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
+#define _MM_SET_DENORMALS_ZERO_MODE(x)                                         \
+  (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
 
 #endif /* __EMMINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/enqcmdintrin.h b/linux-x86/lib64/clang/16.0.2/include/enqcmdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/enqcmdintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/enqcmdintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/f16cintrin.h b/linux-x86/lib64/clang/16.0.2/include/f16cintrin.h
similarity index 97%
copy from darwin-x86/lib64/clang/14.0.2/include/f16cintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/f16cintrin.h
index 13905e6..94a662c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/f16cintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/f16cintrin.h
@@ -65,9 +65,9 @@
 ///    011: Truncate \n
 ///    1XX: Use MXCSR.RC for rounding
 /// \returns The converted 16-bit half-precision float value.
-#define _cvtss_sh(a, imm) \
-  ((unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
-                                                     (imm)))[0]))
+#define _cvtss_sh(a, imm) __extension__ ({ \
+  (unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
+                                                     (imm)))[0]); })
 
 /// Converts a 128-bit vector containing 32-bit float values into a
 ///    128-bit vector containing 16-bit half-precision float values.
diff --git a/linux-x86/lib64/clang/14.0.2/include/float.h b/linux-x86/lib64/clang/16.0.2/include/float.h
similarity index 74%
rename from linux-x86/lib64/clang/14.0.2/include/float.h
rename to linux-x86/lib64/clang/16.0.2/include/float.h
index ed610b2..5dace7a 100644
--- a/linux-x86/lib64/clang/14.0.2/include/float.h
+++ b/linux-x86/lib64/clang/16.0.2/include/float.h
@@ -14,10 +14,11 @@
  * additional definitions provided for Windows.
  * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
  *
- * Also fall back on Darwin to allow additional definitions and
+ * Also fall back on Darwin and AIX to allow additional definitions and
  * implementation-defined values.
  */
-#if (defined(__APPLE__) || (defined(__MINGW32__) || defined(_MSC_VER))) && \
+#if (defined(__APPLE__) || defined(__MINGW32__) || defined(_MSC_VER) ||        \
+     defined(_AIX)) &&                                                         \
     __STDC_HOSTED__ && __has_include_next(<float.h>)
 
 /* Prior to Apple's 10.7 SDK, float.h SDK header used to apply an extra level
@@ -37,7 +38,10 @@
 #  undef FLT_MANT_DIG
 #  undef DBL_MANT_DIG
 #  undef LDBL_MANT_DIG
-#  if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) ||              \
+    !defined(__STRICT_ANSI__) ||                                               \
+    (defined(__cplusplus) && __cplusplus >= 201103L) ||                        \
+    (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #    undef DECIMAL_DIG
 #  endif
 #  undef FLT_DIG
@@ -64,7 +68,10 @@
 #  undef FLT_MIN
 #  undef DBL_MIN
 #  undef LDBL_MIN
-#  if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) ||              \
+    !defined(__STRICT_ANSI__) ||                                               \
+    (defined(__cplusplus) && __cplusplus >= 201703L) ||                        \
+    (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #    undef FLT_TRUE_MIN
 #    undef DBL_TRUE_MIN
 #    undef LDBL_TRUE_MIN
@@ -87,7 +94,10 @@
 #define DBL_MANT_DIG __DBL_MANT_DIG__
 #define LDBL_MANT_DIG __LDBL_MANT_DIG__
 
-#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) ||              \
+    !defined(__STRICT_ANSI__) ||                                               \
+    (defined(__cplusplus) && __cplusplus >= 201103L) ||                        \
+    (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #  define DECIMAL_DIG __DECIMAL_DIG__
 #endif
 
@@ -123,7 +133,10 @@
 #define DBL_MIN __DBL_MIN__
 #define LDBL_MIN __LDBL_MIN__
 
-#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) ||              \
+    !defined(__STRICT_ANSI__) ||                                               \
+    (defined(__cplusplus) && __cplusplus >= 201703L) ||                        \
+    (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #  define FLT_TRUE_MIN __FLT_DENORM_MIN__
 #  define DBL_TRUE_MIN __DBL_DENORM_MIN__
 #  define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
diff --git a/linux-x86/lib64/clang/14.0.2/include/fma4intrin.h b/linux-x86/lib64/clang/16.0.2/include/fma4intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/fma4intrin.h
rename to linux-x86/lib64/clang/16.0.2/include/fma4intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/fmaintrin.h b/linux-x86/lib64/clang/16.0.2/include/fmaintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/fmaintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/fmaintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/fuzzer/FuzzedDataProvider.h b/linux-x86/lib64/clang/16.0.2/include/fuzzer/FuzzedDataProvider.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/fuzzer/FuzzedDataProvider.h
rename to linux-x86/lib64/clang/16.0.2/include/fuzzer/FuzzedDataProvider.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/fxsrintrin.h b/linux-x86/lib64/clang/16.0.2/include/fxsrintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/fxsrintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/fxsrintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/gfniintrin.h b/linux-x86/lib64/clang/16.0.2/include/gfniintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/gfniintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/gfniintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/hexagon_circ_brev_intrinsics.h b/linux-x86/lib64/clang/16.0.2/include/hexagon_circ_brev_intrinsics.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/hexagon_circ_brev_intrinsics.h
rename to linux-x86/lib64/clang/16.0.2/include/hexagon_circ_brev_intrinsics.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hexagon_protos.h b/linux-x86/lib64/clang/16.0.2/include/hexagon_protos.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/hexagon_protos.h
copy to linux-x86/lib64/clang/16.0.2/include/hexagon_protos.h
index cdffd93..2642f3c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/hexagon_protos.h
+++ b/linux-x86/lib64/clang/16.0.2/include/hexagon_protos.h
@@ -8003,17 +8003,6 @@
 #define Q6_P_vtrunohb_PP __builtin_HEXAGON_S6_vtrunohb_ppp
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vmem(Rt32):nt
-   C Intrinsic Prototype: HVX_Vector Q6_V_vmem_R_nt(Word32 Rt)
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vmem_R_nt __builtin_HEXAGON_V6_ldntnt0
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
 #if __HEXAGON_ARCH__ >= 65
 /* ==========================================================================
    Assembly Syntax:       Pd4=!any8(vcmpb.eq(Rss32,Rtt32))
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hexagon_types.h b/linux-x86/lib64/clang/16.0.2/include/hexagon_types.h
similarity index 98%
copy from darwin-x86/lib64/clang/14.0.2/include/hexagon_types.h
copy to linux-x86/lib64/clang/16.0.2/include/hexagon_types.h
index 6958809..029727c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/hexagon_types.h
+++ b/linux-x86/lib64/clang/16.0.2/include/hexagon_types.h
@@ -1177,37 +1177,6 @@
 
 #endif /* __cplusplus */
 
-// V65 Silver types
-#if __Q6S_ARCH__ >= 65
-  // Silver vector types are 128 bytes, and pairs are 256. The vector predicate
-  // types are 16 bytes and 32 bytes for pairs.
-  typedef long HEXAGON_VecPred128 __attribute__((__vector_size__(16)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_VecPred256 __attribute__((__vector_size__(32)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(256)));
-
-  typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(4)));
-
-  typedef long HEXAGON_UVect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(4)));
-
-  #define Q6S_VectorPredPair HEXAGON_VecPred256
-  #define Q6S_VectorPred     HEXAGON_VecPred128
-  #define Q6S_Vector         HEXAGON_Vect1024
-  #define Q6S_VectorPair     HEXAGON_Vect2048
-  #define Q6S_UVector        HEXAGON_UVect1024
-  #define Q6S_UVectorPair    HEXAGON_UVect2048
-
-#else /* __Q6S_ARCH__ >= 65 */
-
 // V65 Vector types
 #if __HVX_ARCH__ >= 65
 #if defined __HVX__ && (__HVX_LENGTH__ == 128)
@@ -1256,7 +1225,6 @@
 #endif /* defined __HVX__ &&  (__HVX_LENGTH__ == 64) */
 #endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
 #endif /* __HVX_ARCH__ >= 65 */
-#endif /* __Q6S_ARCH__ >= 65 */
 
 /* Predicates */
 
diff --git a/linux-x86/lib64/clang/16.0.2/include/hlsl.h b/linux-x86/lib64/clang/16.0.2/include/hlsl.h
new file mode 100644
index 0000000..a9dce45
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/hlsl.h
@@ -0,0 +1,15 @@
+//===----- hlsl.h - HLSL definitions --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _HLSL_H_
+#define _HLSL_H_
+
+#include "hlsl/hlsl_basic_types.h"
+#include "hlsl/hlsl_intrinsics.h"
+
+#endif //_HLSL_H_
diff --git a/linux-x86/lib64/clang/16.0.2/include/hlsl_basic_types.h b/linux-x86/lib64/clang/16.0.2/include/hlsl_basic_types.h
new file mode 100644
index 0000000..e68715f
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/hlsl_basic_types.h
@@ -0,0 +1,64 @@
+//===----- hlsl_basic_types.h - HLSL definitions for basic types ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _HLSL_HLSL_BASIC_TYPES_H_
+#define _HLSL_HLSL_BASIC_TYPES_H_
+
+// built-in scalar data types:
+
+#ifdef __HLSL_ENABLE_16_BIT
+// 16-bit integer.
+typedef unsigned short uint16_t;
+typedef short int16_t;
+#endif
+
+// unsigned 32-bit integer.
+typedef unsigned int uint;
+
+// 64-bit integer.
+typedef unsigned long uint64_t;
+typedef long int64_t;
+
+// built-in vector data types:
+
+#ifdef __HLSL_ENABLE_16_BIT
+typedef vector<int16_t, 2> int16_t2;
+typedef vector<int16_t, 3> int16_t3;
+typedef vector<int16_t, 4> int16_t4;
+typedef vector<uint16_t, 2> uint16_t2;
+typedef vector<uint16_t, 3> uint16_t3;
+typedef vector<uint16_t, 4> uint16_t4;
+#endif
+
+typedef vector<int, 2> int2;
+typedef vector<int, 3> int3;
+typedef vector<int, 4> int4;
+typedef vector<uint, 2> uint2;
+typedef vector<uint, 3> uint3;
+typedef vector<uint, 4> uint4;
+typedef vector<int64_t, 2> int64_t2;
+typedef vector<int64_t, 3> int64_t3;
+typedef vector<int64_t, 4> int64_t4;
+typedef vector<uint64_t, 2> uint64_t2;
+typedef vector<uint64_t, 3> uint64_t3;
+typedef vector<uint64_t, 4> uint64_t4;
+
+#ifdef __HLSL_ENABLE_16_BIT
+typedef vector<half, 2> half2;
+typedef vector<half, 3> half3;
+typedef vector<half, 4> half4;
+#endif
+
+typedef vector<float, 2> float2;
+typedef vector<float, 3> float3;
+typedef vector<float, 4> float4;
+typedef vector<double, 2> double2;
+typedef vector<double, 3> double3;
+typedef vector<double, 4> double4;
+
+#endif //_HLSL_HLSL_BASIC_TYPES_H_
diff --git a/linux-x86/lib64/clang/16.0.2/include/hlsl_intrinsics.h b/linux-x86/lib64/clang/16.0.2/include/hlsl_intrinsics.h
new file mode 100644
index 0000000..43f8dfe
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/hlsl_intrinsics.h
@@ -0,0 +1,102 @@
+//===----- hlsl_intrinsics.h - HLSL definitions for intrinsics ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _HLSL_HLSL_INTRINSICS_H_
+#define _HLSL_HLSL_INTRINSICS_H_
+
+__attribute__((availability(shadermodel, introduced = 6.0)))
+__attribute__((clang_builtin_alias(__builtin_hlsl_wave_active_count_bits))) uint
+WaveActiveCountBits(bool bBit);
+
+// abs builtins
+#ifdef __HLSL_ENABLE_16_BIT
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int16_t abs(int16_t);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int16_t2 abs(int16_t2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int16_t3 abs(int16_t3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int16_t4 abs(int16_t4);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) half abs(half);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+half2 abs(half2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+half3 abs(half3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+half4 abs(half4);
+#endif
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int abs(int);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int2 abs(int2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int3 abs(int3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int4 abs(int4);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) float
+abs(float);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+float2 abs(float2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+float3 abs(float3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+float4 abs(float4);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int64_t abs(int64_t);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int64_t2 abs(int64_t2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int64_t3 abs(int64_t3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+int64_t4 abs(int64_t4);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) double
+abs(double);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+double2 abs(double2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+double3 abs(double3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
+double4 abs(double4);
+
+// sqrt builtins
+__attribute__((clang_builtin_alias(__builtin_sqrt))) double sqrt(double In);
+__attribute__((clang_builtin_alias(__builtin_sqrtf))) float sqrt(float In);
+
+#ifdef __HLSL_ENABLE_16_BIT
+__attribute__((clang_builtin_alias(__builtin_sqrtf16))) half sqrt(half In);
+#endif
+
+// ceil builtins
+#ifdef __HLSL_ENABLE_16_BIT
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+half ceil(half);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+half2 ceil(half2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+half3 ceil(half3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+half4 ceil(half4);
+#endif
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) float
+ceil(float);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+float2 ceil(float2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+float3 ceil(float3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+float4 ceil(float4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) double
+ceil(double);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+double2 ceil(double2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+double3 ceil(double3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
+double4 ceil(double4);
+
+#endif //_HLSL_HLSL_INTRINSICS_H_
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hresetintrin.h b/linux-x86/lib64/clang/16.0.2/include/hresetintrin.h
similarity index 97%
copy from darwin-x86/lib64/clang/14.0.2/include/hresetintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/hresetintrin.h
index 13e31a2..646f6c1 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/hresetintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/hresetintrin.h
@@ -25,7 +25,7 @@
 ///
 /// This intrinsic corresponds to the <c> HRESET </c> instruction.
 ///
-/// \operation
+/// \code{.operation}
 ///    IF __eax == 0
 ///      // nop
 ///    ELSE
@@ -35,7 +35,7 @@
 ///        FI
 ///      ENDFOR
 ///    FI
-/// \endoperation
+/// \endcode
 static __inline void __DEFAULT_FN_ATTRS
 _hreset(int __eax)
 {
diff --git a/linux-x86/lib64/clang/14.0.2/include/htmintrin.h b/linux-x86/lib64/clang/16.0.2/include/htmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/htmintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/htmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/htmxlintrin.h b/linux-x86/lib64/clang/16.0.2/include/htmxlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/htmxlintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/htmxlintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h b/linux-x86/lib64/clang/16.0.2/include/hvx_hexagon_protos.h
similarity index 67%
copy from darwin-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h
copy to linux-x86/lib64/clang/16.0.2/include/hvx_hexagon_protos.h
index 41ce7a6..7e3679a 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h
+++ b/linux-x86/lib64/clang/16.0.2/include/hvx_hexagon_protos.h
@@ -9,7 +9,6 @@
 //===----------------------------------------------------------------------===//
 
 
-
 #ifndef _HVX_HEXAGON_PROTOS_H_
 #define _HVX_HEXAGON_PROTOS_H_ 1
 
@@ -28,7 +27,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_R_vextract_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)
+#define Q6_R_vextract_VR(Vu,Rs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)(Vu,Rs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -39,7 +38,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_hi_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)
+#define Q6_V_hi_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)(Vss)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -50,7 +49,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_lo_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)
+#define Q6_V_lo_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)(Vss)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -61,7 +60,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)
+#define Q6_V_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)(Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -72,7 +71,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_and_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)
+#define Q6_Q_and_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -83,7 +82,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_and_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)
+#define Q6_Q_and_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -94,7 +93,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_not_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)
+#define Q6_Q_not_Q(Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -105,7 +104,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_or_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)
+#define Q6_Q_or_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -116,7 +115,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_or_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)
+#define Q6_Q_or_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -127,7 +126,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vsetq_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)
+#define Q6_Q_vsetq_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)(Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -138,7 +137,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_xor_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)
+#define Q6_Q_xor_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -149,7 +148,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QnRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)
+#define Q6_vmem_QnRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -160,7 +159,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QnRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)
+#define Q6_vmem_QnRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -171,7 +170,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)
+#define Q6_vmem_QRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -182,7 +181,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)
+#define Q6_vmem_QRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -193,7 +192,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuh_vabsdiff_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)
+#define Q6_Vuh_vabsdiff_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -204,7 +203,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vub_vabsdiff_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)
+#define Q6_Vub_vabsdiff_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -215,7 +214,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuh_vabsdiff_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)
+#define Q6_Vuh_vabsdiff_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -226,7 +225,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vabsdiff_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)
+#define Q6_Vuw_vabsdiff_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -237,7 +236,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vabs_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)
+#define Q6_Vh_vabs_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -248,7 +247,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vabs_Vh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)
+#define Q6_Vh_vabs_Vh_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -259,7 +258,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vabs_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)
+#define Q6_Vw_vabs_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -270,7 +269,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vabs_Vw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)
+#define Q6_Vw_vabs_Vw_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -281,7 +280,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vadd_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)
+#define Q6_Vb_vadd_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -292,7 +291,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vadd_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)
+#define Q6_Wb_vadd_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -303,7 +302,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condacc_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)
+#define Q6_Vb_condacc_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -314,7 +313,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)
+#define Q6_Vb_condacc_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -325,7 +324,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)
+#define Q6_Vh_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -336,7 +335,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vadd_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)
+#define Q6_Wh_vadd_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -347,7 +346,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condacc_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)
+#define Q6_Vh_condacc_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -358,7 +357,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)
+#define Q6_Vh_condacc_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -369,7 +368,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vadd_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)
+#define Q6_Vh_vadd_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -380,7 +379,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vadd_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)
+#define Q6_Wh_vadd_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -391,7 +390,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)
+#define Q6_Ww_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -402,7 +401,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vadd_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)
+#define Q6_Wh_vadd_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -413,7 +412,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vadd_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)
+#define Q6_Vub_vadd_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -424,7 +423,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wub_vadd_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)
+#define Q6_Wub_vadd_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -435,7 +434,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vadd_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)
+#define Q6_Vuh_vadd_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -446,7 +445,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vadd_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)
+#define Q6_Wuh_vadd_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -457,7 +456,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vadd_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)
+#define Q6_Ww_vadd_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -468,7 +467,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)
+#define Q6_Vw_vadd_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -479,7 +478,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vadd_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)
+#define Q6_Ww_vadd_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -490,7 +489,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condacc_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)
+#define Q6_Vw_condacc_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -501,7 +500,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)
+#define Q6_Vw_condacc_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -512,7 +511,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)
+#define Q6_Vw_vadd_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -523,7 +522,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vadd_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)
+#define Q6_Ww_vadd_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -534,7 +533,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_valign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)
+#define Q6_V_valign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -545,7 +544,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_valign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)
+#define Q6_V_valign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -556,7 +555,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vand_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)
+#define Q6_V_vand_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -567,7 +566,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vand_QR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)
+#define Q6_V_vand_QR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -578,7 +577,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vandor_VQR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)
+#define Q6_V_vandor_VQR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -589,7 +588,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Q_vand_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)
+#define Q6_Q_vand_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)(Vu,Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -600,7 +599,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Q_vandor_QVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)
+#define Q6_Q_vandor_QVR(Qx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -611,7 +610,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasl_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)
+#define Q6_Vh_vasl_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -622,7 +621,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasl_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)
+#define Q6_Vh_vasl_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -633,7 +632,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasl_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)
+#define Q6_Vw_vasl_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -644,7 +643,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vaslacc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)
+#define Q6_Vw_vaslacc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -655,7 +654,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasl_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)
+#define Q6_Vw_vasl_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -666,7 +665,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)
+#define Q6_Vh_vasr_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -677,7 +676,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)
+#define Q6_Vb_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -688,7 +687,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)
+#define Q6_Vub_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -699,7 +698,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)
+#define Q6_Vub_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -710,7 +709,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)
+#define Q6_Vh_vasr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -721,7 +720,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasr_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)
+#define Q6_Vw_vasr_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -732,7 +731,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasracc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)
+#define Q6_Vw_vasracc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -743,7 +742,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)
+#define Q6_Vh_vasr_VwVwR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -754,7 +753,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)
+#define Q6_Vh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -765,7 +764,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)
+#define Q6_Vh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -776,7 +775,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)
+#define Q6_Vuh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -787,7 +786,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)
+#define Q6_Vw_vasr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -798,7 +797,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_equals_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)
+#define Q6_V_equals_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -809,7 +808,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_equals_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)
+#define Q6_W_equals_W(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)(Vuu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -820,7 +819,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)
+#define Q6_Vh_vavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -831,7 +830,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vavg_VhVh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)
+#define Q6_Vh_vavg_VhVh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -842,7 +841,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)
+#define Q6_Vub_vavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -853,7 +852,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vavg_VubVub_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)
+#define Q6_Vub_vavg_VubVub_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -864,7 +863,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vavg_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)
+#define Q6_Vuh_vavg_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -875,7 +874,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vavg_VuhVuh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)
+#define Q6_Vuh_vavg_VuhVuh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -886,7 +885,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)
+#define Q6_Vw_vavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -897,7 +896,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vavg_VwVw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)
+#define Q6_Vw_vavg_VwVw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -908,7 +907,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vcl0_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)
+#define Q6_Vuh_vcl0_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -919,7 +918,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vcl0_Vuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)
+#define Q6_Vuw_vcl0_Vuw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -930,7 +929,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vcombine_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)
+#define Q6_W_vcombine_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -941,7 +940,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)
+#define Q6_V_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)()
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -952,7 +951,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vdeal_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)
+#define Q6_Vb_vdeal_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -963,7 +962,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vdeale_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)
+#define Q6_Vb_vdeale_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -974,7 +973,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vdeal_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)
+#define Q6_Vh_vdeal_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -985,7 +984,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vdeal_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)
+#define Q6_W_vdeal_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -996,7 +995,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)
+#define Q6_V_vdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1007,7 +1006,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vdmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)
+#define Q6_Vh_vdmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1018,7 +1017,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vdmpyacc_VhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)
+#define Q6_Vh_vdmpyacc_VhVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1029,7 +1028,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vdmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)
+#define Q6_Wh_vdmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1040,7 +1039,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vdmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)
+#define Q6_Wh_vdmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1051,7 +1050,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)
+#define Q6_Vw_vdmpy_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1062,7 +1061,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)
+#define Q6_Vw_vdmpyacc_VwVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1073,7 +1072,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vdmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)
+#define Q6_Ww_vdmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1084,7 +1083,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vdmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)
+#define Q6_Ww_vdmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1095,7 +1094,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_WhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)
+#define Q6_Vw_vdmpy_WhRh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1106,29 +1105,29 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwWhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)
+#define Q6_Vw_vdmpyacc_VwWhRh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)(Vx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Rt32.h):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRh_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)
+#define Q6_Vw_vdmpy_VhRh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)
+#define Q6_Vw_vdmpyacc_VwVhRh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1139,7 +1138,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_WhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)
+#define Q6_Vw_vdmpy_WhRuh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1150,40 +1149,40 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwWhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)
+#define Q6_Vw_vdmpyacc_VwWhRuh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)(Vx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRuh_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)
+#define Q6_Vw_vdmpy_VhRuh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)
+#define Q6_Vw_vdmpyacc_VwVhRuh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Vv32.h):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)
+#define Q6_Vw_vdmpy_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1194,7 +1193,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)
+#define Q6_Vw_vdmpyacc_VwVhVh_sat(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1205,7 +1204,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vdsad_WuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)
+#define Q6_Wuw_vdsad_WuhRuh(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1216,7 +1215,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vdsadacc_WuwWuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)
+#define Q6_Wuw_vdsadacc_WuwWuhRuh(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1227,7 +1226,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eq_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)
+#define Q6_Q_vcmp_eq_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1238,7 +1237,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)
+#define Q6_Q_vcmp_eqand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1249,7 +1248,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)
+#define Q6_Q_vcmp_eqor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1260,7 +1259,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)
+#define Q6_Q_vcmp_eqxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1271,7 +1270,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eq_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)
+#define Q6_Q_vcmp_eq_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1282,7 +1281,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)
+#define Q6_Q_vcmp_eqand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1293,7 +1292,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)
+#define Q6_Q_vcmp_eqor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1304,7 +1303,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)
+#define Q6_Q_vcmp_eqxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1315,7 +1314,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eq_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)
+#define Q6_Q_vcmp_eq_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1326,7 +1325,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)
+#define Q6_Q_vcmp_eqand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1337,7 +1336,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)
+#define Q6_Q_vcmp_eqor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1348,7 +1347,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)
+#define Q6_Q_vcmp_eqxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1359,7 +1358,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)
+#define Q6_Q_vcmp_gt_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1370,7 +1369,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)
+#define Q6_Q_vcmp_gtand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1381,7 +1380,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)
+#define Q6_Q_vcmp_gtor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1392,7 +1391,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)
+#define Q6_Q_vcmp_gtxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1403,7 +1402,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)
+#define Q6_Q_vcmp_gt_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1414,7 +1413,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)
+#define Q6_Q_vcmp_gtand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1425,7 +1424,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)
+#define Q6_Q_vcmp_gtor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1436,7 +1435,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)
+#define Q6_Q_vcmp_gtxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1447,7 +1446,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)
+#define Q6_Q_vcmp_gt_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1458,7 +1457,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)
+#define Q6_Q_vcmp_gtand_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1469,7 +1468,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)
+#define Q6_Q_vcmp_gtor_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1480,7 +1479,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)
+#define Q6_Q_vcmp_gtxacc_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1491,7 +1490,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)
+#define Q6_Q_vcmp_gt_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1502,7 +1501,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)
+#define Q6_Q_vcmp_gtand_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1513,7 +1512,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)
+#define Q6_Q_vcmp_gtor_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1524,7 +1523,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)
+#define Q6_Q_vcmp_gtxacc_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1535,7 +1534,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)
+#define Q6_Q_vcmp_gt_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1546,7 +1545,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)
+#define Q6_Q_vcmp_gtand_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1557,7 +1556,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)
+#define Q6_Q_vcmp_gtor_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1568,7 +1567,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)
+#define Q6_Q_vcmp_gtxacc_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1579,7 +1578,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)
+#define Q6_Q_vcmp_gt_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1590,7 +1589,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)
+#define Q6_Q_vcmp_gtand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1601,7 +1600,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)
+#define Q6_Q_vcmp_gtor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1612,7 +1611,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)
+#define Q6_Q_vcmp_gtxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1623,7 +1622,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vinsert_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)
+#define Q6_Vw_vinsert_VwR(Vx,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)(Vx,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1634,7 +1633,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vlalign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)
+#define Q6_V_vlalign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1645,7 +1644,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vlalign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)
+#define Q6_V_vlalign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1656,7 +1655,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vlsr_VuhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)
+#define Q6_Vuh_vlsr_VuhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1667,7 +1666,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vlsr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)
+#define Q6_Vh_vlsr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1678,7 +1677,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vlsr_VuwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)
+#define Q6_Vuw_vlsr_VuwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1689,7 +1688,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vlsr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)
+#define Q6_Vw_vlsr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1700,7 +1699,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32_VbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)
+#define Q6_Vb_vlut32_VbVbR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1711,7 +1710,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32or_VbVbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)
+#define Q6_Vb_vlut32or_VbVbVbR(Vx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)(Vx,Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1722,7 +1721,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16_VbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)
+#define Q6_Wh_vlut16_VbVhR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1733,7 +1732,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16or_WhVbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)
+#define Q6_Wh_vlut16or_WhVbVhR(Vxx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)(Vxx,Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1744,7 +1743,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vmax_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)
+#define Q6_Vh_vmax_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1755,7 +1754,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vmax_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)
+#define Q6_Vub_vmax_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1766,7 +1765,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vmax_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)
+#define Q6_Vuh_vmax_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1777,7 +1776,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vmax_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)
+#define Q6_Vw_vmax_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1788,7 +1787,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vmin_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)
+#define Q6_Vh_vmin_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1799,7 +1798,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vmin_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)
+#define Q6_Vub_vmin_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1810,7 +1809,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vmin_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)
+#define Q6_Vuh_vmin_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1821,7 +1820,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vmin_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)
+#define Q6_Vw_vmin_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1832,7 +1831,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)
+#define Q6_Wh_vmpa_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1843,7 +1842,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpaacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)
+#define Q6_Wh_vmpaacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1854,7 +1853,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)
+#define Q6_Wh_vmpa_WubWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1865,7 +1864,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubWub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)
+#define Q6_Wh_vmpa_WubWub(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1876,7 +1875,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpa_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)
+#define Q6_Ww_vmpa_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1887,7 +1886,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpaacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)
+#define Q6_Ww_vmpaacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1898,7 +1897,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)
+#define Q6_Wh_vmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1909,7 +1908,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpyacc_WhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)
+#define Q6_Wh_vmpyacc_WhVubRb(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1920,7 +1919,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)
+#define Q6_Wh_vmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1931,7 +1930,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpyacc_WhVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)
+#define Q6_Wh_vmpyacc_WhVubVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1942,7 +1941,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)
+#define Q6_Wh_vmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1953,7 +1952,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpyacc_WhVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)
+#define Q6_Wh_vmpyacc_WhVbVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1964,7 +1963,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)
+#define Q6_Vw_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1975,7 +1974,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpy_VhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)
+#define Q6_Ww_vmpy_VhRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1986,29 +1985,29 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)
+#define Q6_Ww_vmpyacc_WwVhRh_sat(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat
    C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_rnd_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpy_VhRh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)
+#define Q6_Vh_vmpy_VhRh_s1_rnd_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat
    C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpy_VhRh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)
+#define Q6_Vh_vmpy_VhRh_s1_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2019,7 +2018,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpy_VhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)
+#define Q6_Ww_vmpy_VhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2030,7 +2029,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)
+#define Q6_Ww_vmpyacc_WwVhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2041,7 +2040,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpy_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)
+#define Q6_Ww_vmpy_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2052,18 +2051,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)
+#define Q6_Ww_vmpyacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat
    C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpy_VhVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)
+#define Q6_Vh_vmpy_VhVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2074,7 +2073,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyieo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)
+#define Q6_Vw_vmpyieo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2085,7 +2084,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyieacc_VwVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)
+#define Q6_Vw_vmpyieacc_VwVwVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2096,7 +2095,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyie_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)
+#define Q6_Vw_vmpyie_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2107,7 +2106,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyieacc_VwVwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)
+#define Q6_Vw_vmpyieacc_VwVwVuh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2118,7 +2117,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyi_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)
+#define Q6_Vh_vmpyi_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2129,7 +2128,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyiacc_VhVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)
+#define Q6_Vh_vmpyiacc_VhVhVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2140,7 +2139,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyi_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)
+#define Q6_Vh_vmpyi_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2151,7 +2150,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyiacc_VhVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)
+#define Q6_Vh_vmpyiacc_VhVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2162,7 +2161,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyio_VwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)
+#define Q6_Vw_vmpyio_VwVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2173,7 +2172,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyi_VwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)
+#define Q6_Vw_vmpyi_VwRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2184,7 +2183,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyiacc_VwVwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)
+#define Q6_Vw_vmpyiacc_VwVwRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2195,7 +2194,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyi_VwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)
+#define Q6_Vw_vmpyi_VwRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2206,7 +2205,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyiacc_VwVwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)
+#define Q6_Vw_vmpyiacc_VwVwRh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2217,7 +2216,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyo_VwVh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)
+#define Q6_Vw_vmpyo_VwVh_s1_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2228,7 +2227,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)
+#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2239,7 +2238,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)
+#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2250,7 +2249,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)
+#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2261,7 +2260,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)
+#define Q6_Wuh_vmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2272,7 +2271,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpyacc_WuhVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)
+#define Q6_Wuh_vmpyacc_WuhVubRub(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2283,7 +2282,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)
+#define Q6_Wuh_vmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2294,7 +2293,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpyacc_WuhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)
+#define Q6_Wuh_vmpyacc_WuhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2305,7 +2304,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpy_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)
+#define Q6_Wuw_vmpy_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2316,7 +2315,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpyacc_WuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)
+#define Q6_Wuw_vmpyacc_WuwVuhRuh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2327,7 +2326,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpy_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)
+#define Q6_Wuw_vmpy_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2338,7 +2337,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpyacc_WuwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)
+#define Q6_Wuw_vmpyacc_WuwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2349,7 +2348,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vmux_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)
+#define Q6_V_vmux_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2360,7 +2359,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vnavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)
+#define Q6_Vh_vnavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2371,7 +2370,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vnavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)
+#define Q6_Vb_vnavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2382,7 +2381,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vnavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)
+#define Q6_Vw_vnavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2393,7 +2392,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vnormamt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)
+#define Q6_Vh_vnormamt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2404,7 +2403,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vnormamt_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)
+#define Q6_Vw_vnormamt_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2415,7 +2414,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vnot_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)
+#define Q6_V_vnot_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2426,7 +2425,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)
+#define Q6_V_vor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2437,7 +2436,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vpacke_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)
+#define Q6_Vb_vpacke_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2448,7 +2447,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpacke_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)
+#define Q6_Vh_vpacke_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2459,7 +2458,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)
+#define Q6_Vb_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2470,7 +2469,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)
+#define Q6_Vub_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2481,7 +2480,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vpacko_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)
+#define Q6_Vb_vpacko_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2492,7 +2491,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpacko_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)
+#define Q6_Vh_vpacko_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2503,7 +2502,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)
+#define Q6_Vh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2514,7 +2513,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)
+#define Q6_Vuh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2525,7 +2524,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpopcount_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)
+#define Q6_Vh_vpopcount_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2536,7 +2535,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vrdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)
+#define Q6_V_vrdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2547,7 +2546,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)
+#define Q6_Vw_vrmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2558,7 +2557,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpyacc_VwVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)
+#define Q6_Vw_vrmpyacc_VwVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2569,7 +2568,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vrmpy_WubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)
+#define Q6_Ww_vrmpy_WubRbI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)(Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2580,7 +2579,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vrmpyacc_WwWubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)
+#define Q6_Ww_vrmpyacc_WwWubRbI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)(Vxx,Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2591,18 +2590,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)
+#define Q6_Vw_vrmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vrmpy(Vu32.ub,Vv32.b)
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpyacc_VwVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)
+#define Q6_Vw_vrmpyacc_VwVubVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2613,18 +2612,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)
+#define Q6_Vw_vrmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vrmpy(Vu32.b,Vv32.b)
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVbVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpyacc_VwVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)
+#define Q6_Vw_vrmpyacc_VwVbVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2635,7 +2634,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)
+#define Q6_Vuw_vrmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2646,7 +2645,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpyacc_VuwVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)
+#define Q6_Vuw_vrmpyacc_VuwVubRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2657,7 +2656,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrmpy_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)
+#define Q6_Wuw_vrmpy_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)(Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2668,7 +2667,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrmpyacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)
+#define Q6_Wuw_vrmpyacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)(Vxx,Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2679,18 +2678,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)
+#define Q6_Vuw_vrmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)
    C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubVub(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpyacc_VuwVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)
+#define Q6_Vuw_vrmpyacc_VuwVubVub(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2701,7 +2700,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vror_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)
+#define Q6_V_vror_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2712,7 +2711,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)
+#define Q6_Vb_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2723,7 +2722,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)
+#define Q6_Vub_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2734,7 +2733,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)
+#define Q6_Vh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2745,7 +2744,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)
+#define Q6_Vuh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2756,7 +2755,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrsad_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)
+#define Q6_Wuw_vrsad_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)(Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2767,7 +2766,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrsadacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)
+#define Q6_Wuw_vrsadacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)(Vxx,Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2778,7 +2777,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vsat_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)
+#define Q6_Vub_vsat_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2789,7 +2788,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vsat_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)
+#define Q6_Vh_vsat_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2800,7 +2799,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vsxt_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)
+#define Q6_Wh_vsxt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2811,7 +2810,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vsxt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)
+#define Q6_Ww_vsxt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2822,7 +2821,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vshuffe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)
+#define Q6_Vh_vshuffe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2833,7 +2832,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vshuff_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)
+#define Q6_Vb_vshuff_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2844,7 +2843,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vshuffe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)
+#define Q6_Vb_vshuffe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2855,7 +2854,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vshuff_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)
+#define Q6_Vh_vshuff_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2866,7 +2865,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vshuffo_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)
+#define Q6_Vb_vshuffo_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2877,7 +2876,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vshuff_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)
+#define Q6_W_vshuff_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2888,7 +2887,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vshuffoe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)
+#define Q6_Wb_vshuffoe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2899,7 +2898,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vshuffoe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)
+#define Q6_Wh_vshuffoe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2910,7 +2909,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vshuffo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)
+#define Q6_Vh_vshuffo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2921,7 +2920,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vsub_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)
+#define Q6_Vb_vsub_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2932,7 +2931,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vsub_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)
+#define Q6_Wb_vsub_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2943,7 +2942,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condnac_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)
+#define Q6_Vb_condnac_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2954,7 +2953,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condnac_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)
+#define Q6_Vb_condnac_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2965,7 +2964,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)
+#define Q6_Vh_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2976,7 +2975,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vsub_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)
+#define Q6_Wh_vsub_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2987,7 +2986,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condnac_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)
+#define Q6_Vh_condnac_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2998,7 +2997,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condnac_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)
+#define Q6_Vh_condnac_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3009,7 +3008,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vsub_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)
+#define Q6_Vh_vsub_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3020,7 +3019,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vsub_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)
+#define Q6_Wh_vsub_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3031,7 +3030,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)
+#define Q6_Ww_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3042,7 +3041,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vsub_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)
+#define Q6_Wh_vsub_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3053,7 +3052,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vsub_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)
+#define Q6_Vub_vsub_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3064,7 +3063,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wub_vsub_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)
+#define Q6_Wub_vsub_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3075,7 +3074,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vsub_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)
+#define Q6_Vuh_vsub_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3086,7 +3085,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vsub_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)
+#define Q6_Wuh_vsub_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3097,7 +3096,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vsub_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)
+#define Q6_Ww_vsub_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3108,7 +3107,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsub_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)
+#define Q6_Vw_vsub_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3119,7 +3118,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vsub_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)
+#define Q6_Ww_vsub_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3130,7 +3129,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condnac_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)
+#define Q6_Vw_condnac_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3141,7 +3140,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condnac_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)
+#define Q6_Vw_condnac_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3152,7 +3151,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsub_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)
+#define Q6_Vw_vsub_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3163,7 +3162,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vsub_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)
+#define Q6_Ww_vsub_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3174,7 +3173,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vswap_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)
+#define Q6_W_vswap_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3185,7 +3184,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpy_WbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)
+#define Q6_Wh_vtmpy_WbRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3196,7 +3195,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpyacc_WhWbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)
+#define Q6_Wh_vtmpyacc_WhWbRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3207,7 +3206,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)
+#define Q6_Wh_vtmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3218,7 +3217,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)
+#define Q6_Wh_vtmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3229,7 +3228,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vtmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)
+#define Q6_Ww_vtmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3240,7 +3239,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vtmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)
+#define Q6_Ww_vtmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3251,7 +3250,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vunpack_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)
+#define Q6_Wh_vunpack_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3262,7 +3261,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vunpack_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)
+#define Q6_Ww_vunpack_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3273,7 +3272,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vunpackoor_WhVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)
+#define Q6_Wh_vunpackoor_WhVb(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)(Vxx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3284,7 +3283,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vunpackoor_WwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)
+#define Q6_Ww_vunpackoor_WwVh(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)(Vxx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3295,7 +3294,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vunpack_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)
+#define Q6_Wuh_vunpack_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3306,7 +3305,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vunpack_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)
+#define Q6_Wuw_vunpack_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3317,7 +3316,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vxor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)
+#define Q6_V_vxor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3328,7 +3327,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vzxt_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)
+#define Q6_Wuh_vzxt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3339,7 +3338,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vzxt_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)
+#define Q6_Wuw_vzxt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 62
@@ -3350,7 +3349,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vb_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)
+#define Q6_Vb_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)(Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3361,7 +3360,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)
+#define Q6_Vh_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)(Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3372,7 +3371,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vsetq2_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)
+#define Q6_Q_vsetq2_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)(Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3383,7 +3382,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Qb_vshuffe_QhQh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)
+#define Q6_Qb_vshuffe_QhQh(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3394,7 +3393,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Qh_vshuffe_QwQw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)
+#define Q6_Qh_vshuffe_QwQw(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3405,7 +3404,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vadd_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)
+#define Q6_Vb_vadd_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3416,7 +3415,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vadd_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)
+#define Q6_Wb_vadd_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3427,7 +3426,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)
+#define Q6_Vw_vadd_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)(Vu,Vv,Qx)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3438,7 +3437,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vadd_vclb_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)
+#define Q6_Vh_vadd_vclb_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3449,7 +3448,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_vclb_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)
+#define Q6_Vw_vadd_vclb_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3460,7 +3459,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vaddacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)
+#define Q6_Ww_vaddacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3471,7 +3470,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vaddacc_WhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)
+#define Q6_Wh_vaddacc_WhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3482,7 +3481,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vadd_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)
+#define Q6_Vub_vadd_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3493,7 +3492,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vaddacc_WwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)
+#define Q6_Ww_vaddacc_WwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3504,7 +3503,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vadd_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)
+#define Q6_Vuw_vadd_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3515,7 +3514,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vadd_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)
+#define Q6_Wuw_vadd_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3526,7 +3525,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vand_QnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)
+#define Q6_V_vand_QnR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3537,7 +3536,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vandor_VQnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)
+#define Q6_V_vandor_VQnR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3548,7 +3547,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vand_QnV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)
+#define Q6_V_vand_QnV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3559,7 +3558,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vand_QV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)
+#define Q6_V_vand_QV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3570,7 +3569,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)
+#define Q6_Vb_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3581,7 +3580,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VuwVuwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)
+#define Q6_Vuh_vasr_VuwVuwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3592,7 +3591,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)
+#define Q6_Vuh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3603,7 +3602,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vlsr_VubR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)
+#define Q6_Vub_vlsr_VubR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3614,7 +3613,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32_VbVbR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)
+#define Q6_Vb_vlut32_VbVbR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3625,7 +3624,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32or_VbVbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)
+#define Q6_Vb_vlut32or_VbVbVbI(Vx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)(Vx,Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3636,7 +3635,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32_VbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)
+#define Q6_Vb_vlut32_VbVbI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3647,7 +3646,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16_VbVhR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)
+#define Q6_Wh_vlut16_VbVhR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3658,7 +3657,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16or_WhVbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)
+#define Q6_Wh_vlut16or_WhVbVhI(Vxx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)(Vxx,Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3669,7 +3668,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16_VbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)
+#define Q6_Wh_vlut16_VbVhI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3680,7 +3679,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vmax_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)
+#define Q6_Vb_vmax_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3691,7 +3690,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vmin_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)
+#define Q6_Vb_vmin_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3702,7 +3701,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpa_WuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)
+#define Q6_Ww_vmpa_WuhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3713,7 +3712,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpaacc_WwWuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)
+#define Q6_Ww_vmpaacc_WwWuhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3724,7 +3723,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_W_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)
+#define Q6_W_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3735,7 +3734,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyi_VwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)
+#define Q6_Vw_vmpyi_VwRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3746,7 +3745,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyiacc_VwVwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)
+#define Q6_Vw_vmpyiacc_VwVwRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3757,7 +3756,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_W_vmpyoacc_WVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)
+#define Q6_W_vmpyoacc_WVwVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3768,7 +3767,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vround_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)
+#define Q6_Vub_vround_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3779,7 +3778,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vround_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)
+#define Q6_Vuh_vround_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3790,7 +3789,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vsat_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)
+#define Q6_Vuh_vsat_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3801,7 +3800,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vsub_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)
+#define Q6_Vb_vsub_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3812,7 +3811,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vsub_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)
+#define Q6_Wb_vsub_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3823,7 +3822,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsub_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)
+#define Q6_Vw_vsub_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)(Vu,Vv,Qx)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3834,7 +3833,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vsub_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)
+#define Q6_Vub_vsub_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3845,7 +3844,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vsub_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)
+#define Q6_Vuw_vsub_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3856,7 +3855,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vsub_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)
+#define Q6_Wuw_vsub_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 65
@@ -3867,7 +3866,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vabs_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)
+#define Q6_Vb_vabs_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3878,7 +3877,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vabs_Vb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)
+#define Q6_Vb_vabs_Vb_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3889,7 +3888,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vaslacc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)
+#define Q6_Vh_vaslacc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3900,7 +3899,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasracc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)
+#define Q6_Vh_vasracc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3911,7 +3910,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VuhVuhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)
+#define Q6_Vub_vasr_VuhVuhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3922,7 +3921,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VuhVuhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)
+#define Q6_Vub_vasr_VuhVuhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3933,7 +3932,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VuwVuwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)
+#define Q6_Vuh_vasr_VuwVuwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3944,7 +3943,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)
+#define Q6_Vb_vavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3955,7 +3954,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vavg_VbVb_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)
+#define Q6_Vb_vavg_VbVb_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3966,7 +3965,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vavg_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)
+#define Q6_Vuw_vavg_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3977,7 +3976,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vavg_VuwVuw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)
+#define Q6_Vuw_vavg_VuwVuw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3988,7 +3987,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)
+#define Q6_W_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)()
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3999,7 +3998,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_ARMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)
+#define Q6_vgather_ARMVh(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)(Rs,Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4010,7 +4009,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_AQRMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)
+#define Q6_vgather_AQRMVh(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4021,7 +4020,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_ARMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)
+#define Q6_vgather_ARMWw(Rs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)(Rs,Rt,Mu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4032,7 +4031,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_AQRMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)
+#define Q6_vgather_AQRMWw(Rs,Qs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4043,7 +4042,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_ARMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)
+#define Q6_vgather_ARMVw(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)(Rs,Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4054,7 +4053,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_AQRMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)
+#define Q6_vgather_AQRMVw(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4065,7 +4064,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vlut4_VuhPh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)
+#define Q6_Vh_vlut4_VuhPh(Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)(Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4076,7 +4075,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)
+#define Q6_Wh_vmpa_WubRub(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4087,7 +4086,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpaacc_WhWubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)
+#define Q6_Wh_vmpaacc_WhWubRub(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4098,7 +4097,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vmpa_VhVhVhPh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)
+#define Q6_Vh_vmpa_VhVhVhPh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)(Vx,Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4109,7 +4108,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vmpa_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)
+#define Q6_Vh_vmpa_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)(Vx,Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4120,7 +4119,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vmps_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)
+#define Q6_Vh_vmps_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)(Vx,Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4131,7 +4130,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)
+#define Q6_Ww_vmpyacc_WwVhRh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4142,7 +4141,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vmpye_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)
+#define Q6_Vuw_vmpye_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4153,7 +4152,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vmpyeacc_VuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)
+#define Q6_Vuw_vmpyeacc_VuwVuhRuh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4164,7 +4163,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vnavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)
+#define Q6_Vb_vnavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4175,7 +4174,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)
+#define Q6_Vb_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4186,7 +4185,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)
+#define Q6_Vh_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4197,7 +4196,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)
+#define Q6_Vw_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4208,7 +4207,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)
+#define Q6_vscatter_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4219,7 +4218,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatteracc_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)
+#define Q6_vscatteracc_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4230,7 +4229,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_QRMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)
+#define Q6_vscatter_QRMVhV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4241,7 +4240,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)
+#define Q6_vscatter_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)(Rt,Mu,Vvv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4252,7 +4251,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatteracc_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)
+#define Q6_vscatteracc_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)(Rt,Mu,Vvv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4263,7 +4262,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_QRMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)
+#define Q6_vscatter_QRMWwV(Qs,Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4274,7 +4273,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)
+#define Q6_vscatter_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4285,7 +4284,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatteracc_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)
+#define Q6_vscatteracc_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4296,7 +4295,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_QRMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)
+#define Q6_vscatter_QRMVwV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 66
@@ -4307,7 +4306,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVwQ_carry_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)
+#define Q6_Vw_vadd_VwVwQ_carry_sat(Vu,Vv,Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)(Vu,Vv,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 66
@@ -4318,7 +4317,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vasrinto_WwVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)
+#define Q6_Ww_vasrinto_WwVwVw(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 66
@@ -4329,7 +4328,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vrotr_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)
+#define Q6_Vuw_vrotr_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 66
@@ -4340,7 +4339,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsatdw_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)
+#define Q6_Vw_vsatdw_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 68
@@ -4351,7 +4350,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpy_WubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)
+#define Q6_Ww_v6mpy_WubWbI_h(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)(Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
 #if __HVX_ARCH__ >= 68
@@ -4362,7 +4361,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpyacc_WwWubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)
+#define Q6_Ww_v6mpyacc_WwWubWbI_h(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)(Vxx,Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
 #if __HVX_ARCH__ >= 68
@@ -4373,7 +4372,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpy_WubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)
+#define Q6_Ww_v6mpy_WubWbI_v(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)(Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
 #if __HVX_ARCH__ >= 68
@@ -4384,9 +4383,801 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpyacc_WwWubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)
+#define Q6_Ww_v6mpyacc_WwWubWbI_v(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)(Vxx,Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vabs(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vabs_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vabs_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vabs(Vu32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vabs_Vsf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vabs_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_sf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vadd(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vadd(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vadd(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vadd_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vadd(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vadd_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vadd(Vu32.qf32,Vv32.qf32)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vadd_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vadd(Vu32.qf32,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vadd_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vadd(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vadd(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vadd(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.w=vfmv(Vu32.w)
+   C Intrinsic Prototype: HVX_Vector Q6_Vw_vfmv_Vw(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vw_vfmv_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign_fp)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=Vu32.qf16
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Vqf16(HVX_Vector Vu)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_equals_Vqf16(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf16)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=Vuu32.qf32
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Wqf32(HVX_VectorPair Vuu)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_equals_Wqf32(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf32)(Vuu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=Vu32.qf32
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_equals_Vqf32(HVX_Vector Vu)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vsf_equals_Vqf32(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_sf_qf32)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.b=vcvt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vb_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vb_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_b_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.h=vcvt(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vh_vcvt_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_h_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.hf=vcvt(Vu32.b)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vb(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Whf_vcvt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_b)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vcvt(Vu32.h)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vh(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vcvt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_h)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vcvt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vcvt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.hf=vcvt(Vu32.ub)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vub(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Whf_vcvt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_ub)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vcvt(Vu32.uh)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vuh(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vcvt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_uh)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vcvt(Vu32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vcvt_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_sf_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.ub=vcvt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vub_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vub_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_ub_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vcvt(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcvt_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vuh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_uh_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vdmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vdmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vx32.sf+=vdmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpyacc_VsfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vdmpyacc_VsfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf_acc)(Vx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vfmax(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vfmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vfmax(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vfmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vfmin(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vfmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vfmin(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vfmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vfneg(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfneg_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vfneg_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vfneg(Vu32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfneg_Vsf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vfneg_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_sf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qd4=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf)(Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qd4=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf)(Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vmax(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_vmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vmax(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vsf_vmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vmin(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_vmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vmin(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vsf_vmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vx32.hf+=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpyacc_VhfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vmpyacc_VhfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf_acc)(Vx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vmpy(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf16_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf16_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vmpy(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf16_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_mix_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vmpy(Vu32.qf32,Vv32.qf32)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf32_vmpy_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.qf32=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wqf32_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.qf32=vmpy(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wqf32_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_mix_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.qf32=vmpy(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wqf32_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vmpy(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf32_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vxx32.sf+=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpyacc_WsfVhfVhf(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vmpyacc_WsfVhfVhf(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf_acc)(Vxx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vmpy(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vsub(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vsub(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vsub(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vsub_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vsub(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vsub_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vsub(Vu32.qf32,Vv32.qf32)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vsub_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vsub(Vu32.qf32,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vsub_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vsub(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vsub(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vsub(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.ub=vasr(Vuu32.uh,Vv32.ub):rnd:sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vub_vasr_WuhVub_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubrndsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.ub=vasr(Vuu32.uh,Vv32.ub):sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vub_vasr_WuhVub_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vasr(Vuu32.w,Vv32.uh):rnd:sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vuh_vasr_WwVuh_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhrndsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vasr(Vuu32.w,Vv32.uh):sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vuh_vasr_WwVuh_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmpy_VuhVuh_rs16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vuh_vmpy_VuhVuh_rs16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhvs)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
 #endif /* __HVX__ */
 
 #endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/ia32intrin.h b/linux-x86/lib64/clang/16.0.2/include/ia32intrin.h
similarity index 95%
rename from linux-x86/lib64/clang/14.0.2/include/ia32intrin.h
rename to linux-x86/lib64/clang/16.0.2/include/ia32intrin.h
index ec8142b..f1904ef 100644
--- a/linux-x86/lib64/clang/14.0.2/include/ia32intrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/ia32intrin.h
@@ -40,7 +40,7 @@
  */
 static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsfd(int __A) {
-  return __builtin_ctz(__A);
+  return __builtin_ctz((unsigned int)__A);
 }
 
 /** Find the first set bit starting from the msb. Result is undefined if
@@ -57,7 +57,7 @@
  */
 static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsrd(int __A) {
-  return 31 - __builtin_clz(__A);
+  return 31 - __builtin_clz((unsigned int)__A);
 }
 
 /** Swaps the bytes in the input. Converting little endian to big endian or
@@ -73,12 +73,12 @@
  */
 static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bswapd(int __A) {
-  return __builtin_bswap32(__A);
+  return (int)__builtin_bswap32((unsigned int)__A);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 _bswap(int __A) {
-  return __builtin_bswap32(__A);
+  return (int)__builtin_bswap32((unsigned int)__A);
 }
 
 #define _bit_scan_forward(A) __bsfd((A))
@@ -99,7 +99,7 @@
  */
 static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsfq(long long __A) {
-  return __builtin_ctzll(__A);
+  return (long long)__builtin_ctzll((unsigned long long)__A);
 }
 
 /** Find the first set bit starting from the msb. Result is undefined if
@@ -116,7 +116,7 @@
  */
 static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsrq(long long __A) {
-  return 63 - __builtin_clzll(__A);
+  return 63 - __builtin_clzll((unsigned long long)__A);
 }
 
 /** Swaps the bytes in the input. Converting little endian to big endian or
@@ -132,7 +132,7 @@
  */
 static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
 __bswapq(long long __A) {
-  return __builtin_bswap64(__A);
+  return (long long)__builtin_bswap64((unsigned long long)__A);
 }
 
 #define _bswap64(A) __bswapq((A))
@@ -395,23 +395,23 @@
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR
 __rold(unsigned int __X, int __C) {
-  return __builtin_rotateleft32(__X, __C);
+  return __builtin_rotateleft32(__X, (unsigned int)__C);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR
 __rord(unsigned int __X, int __C) {
-  return __builtin_rotateright32(__X, __C);
+  return __builtin_rotateright32(__X, (unsigned int)__C);
 }
 
 #ifdef __x86_64__
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR
 __rolq(unsigned long long __X, int __C) {
-  return __builtin_rotateleft64(__X, __C);
+  return __builtin_rotateleft64(__X, (unsigned long long)__C);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR
 __rorq(unsigned long long __X, int __C) {
-  return __builtin_rotateright64(__X, __C);
+  return __builtin_rotateright64(__X, (unsigned long long)__C);
 }
 #endif /* __x86_64__ */
 
diff --git a/darwin-x86/lib64/clang/14.0.2/include/immintrin.h b/linux-x86/lib64/clang/16.0.2/include/immintrin.h
similarity index 92%
copy from darwin-x86/lib64/clang/14.0.2/include/immintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/immintrin.h
index e5174f8..f4e4cea 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/immintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/immintrin.h
@@ -214,17 +214,13 @@
 #include <avx512pfintrin.h>
 #endif
 
-/*
- * FIXME: _Float16 type is legal only when HW support float16 operation.
- * We use __AVX512FP16__ to identify if float16 is supported or not, so
- * when float16 is not supported, the related header is not included.
- *
- */
-#if defined(__AVX512FP16__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512FP16__)
 #include <avx512fp16intrin.h>
 #endif
 
-#if defined(__AVX512FP16__) && defined(__AVX512VL__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    (defined(__AVX512VL__) && defined(__AVX512FP16__))
 #include <avx512vlfp16intrin.h>
 #endif
 
@@ -276,20 +272,37 @@
 static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
 _rdrand16_step(unsigned short *__p)
 {
-  return __builtin_ia32_rdrand16_step(__p);
+  return (int)__builtin_ia32_rdrand16_step(__p);
 }
 
 static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
 _rdrand32_step(unsigned int *__p)
 {
-  return __builtin_ia32_rdrand32_step(__p);
+  return (int)__builtin_ia32_rdrand32_step(__p);
 }
 
 #ifdef __x86_64__
 static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
 _rdrand64_step(unsigned long long *__p)
 {
-  return __builtin_ia32_rdrand64_step(__p);
+  return (int)__builtin_ia32_rdrand64_step(__p);
+}
+#else
+// We need to emulate the functionality of 64-bit rdrand with 2 32-bit
+// rdrand instructions.
+static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
+_rdrand64_step(unsigned long long *__p)
+{
+  unsigned int __lo, __hi;
+  unsigned int __res_lo = __builtin_ia32_rdrand32_step(&__lo);
+  unsigned int __res_hi = __builtin_ia32_rdrand32_step(&__hi);
+  if (__res_lo && __res_hi) {
+    *__p = ((unsigned long long)__hi << 32) | (unsigned long long)__lo;
+    return 1;
+  } else {
+    *__p = 0;
+    return 0;
+  }
 }
 #endif
 #endif /* __RDRND__ */
@@ -360,50 +373,50 @@
 static __inline__ short __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
 _loadbe_i16(void const * __P) {
   struct __loadu_i16 {
-    short __v;
+    unsigned short __v;
   } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap16(((const struct __loadu_i16*)__P)->__v);
+  return (short)__builtin_bswap16(((const struct __loadu_i16*)__P)->__v);
 }
 
 static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
 _storebe_i16(void * __P, short __D) {
   struct __storeu_i16 {
-    short __v;
+    unsigned short __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i16*)__P)->__v = __builtin_bswap16(__D);
+  ((struct __storeu_i16*)__P)->__v = __builtin_bswap16((unsigned short)__D);
 }
 
 static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
 _loadbe_i32(void const * __P) {
   struct __loadu_i32 {
-    int __v;
+    unsigned int __v;
   } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap32(((const struct __loadu_i32*)__P)->__v);
+  return (int)__builtin_bswap32(((const struct __loadu_i32*)__P)->__v);
 }
 
 static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
 _storebe_i32(void * __P, int __D) {
   struct __storeu_i32 {
-    int __v;
+    unsigned int __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i32*)__P)->__v = __builtin_bswap32(__D);
+  ((struct __storeu_i32*)__P)->__v = __builtin_bswap32((unsigned int)__D);
 }
 
 #ifdef __x86_64__
 static __inline__ long long __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
 _loadbe_i64(void const * __P) {
   struct __loadu_i64 {
-    long long __v;
+    unsigned long long __v;
   } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap64(((const struct __loadu_i64*)__P)->__v);
+  return (long long)__builtin_bswap64(((const struct __loadu_i64*)__P)->__v);
 }
 
 static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
 _storebe_i64(void * __P, long long __D) {
   struct __storeu_i64 {
-    long long __v;
+    unsigned long long __v;
   } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i64*)__P)->__v = __builtin_bswap64(__D);
+  ((struct __storeu_i64*)__P)->__v = __builtin_bswap64((unsigned long long)__D);
 }
 #endif
 #endif /* __MOVBE */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/intrin.h b/linux-x86/lib64/clang/16.0.2/include/intrin.h
similarity index 95%
copy from darwin-x86/lib64/clang/14.0.2/include/intrin.h
copy to linux-x86/lib64/clang/16.0.2/include/intrin.h
index 02e66d0..de68b07 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/intrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/intrin.h
@@ -534,27 +534,6 @@
 |* Misc
 \*----------------------------------------------------------------------------*/
 #if defined(__i386__) || defined(__x86_64__)
-#if defined(__i386__)
-#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx)             \
-  __asm("cpuid"                                                                \
-        : "=a"(__eax), "=b"(__ebx), "=c"(__ecx), "=d"(__edx)                   \
-        : "0"(__leaf), "2"(__count))
-#else
-/* x86-64 uses %rbx as the base register, so preserve it. */
-#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx)             \
-  __asm("xchg{q} {%%rbx, %q1|%q1, rbx}\n"                                      \
-        "cpuid\n"                                                              \
-        "xchg{q} {%%rbx, %q1|%q1, rbx}"                                        \
-        : "=a"(__eax), "=r"(__ebx), "=c"(__ecx), "=d"(__edx)                   \
-        : "0"(__leaf), "2"(__count))
-#endif
-static __inline__ void __DEFAULT_FN_ATTRS __cpuid(int __info[4], int __level) {
-  __cpuid_count(__level, 0, __info[0], __info[1], __info[2], __info[3]);
-}
-static __inline__ void __DEFAULT_FN_ATTRS __cpuidex(int __info[4], int __level,
-                                                    int __ecx) {
-  __cpuid_count(__level, __ecx, __info[0], __info[1], __info[2], __info[3]);
-}
 static __inline__ void __DEFAULT_FN_ATTRS __halt(void) {
   __asm__ volatile("hlt");
 }
@@ -581,6 +560,18 @@
 
 __int64 __mulh(__int64 __a, __int64 __b);
 unsigned __int64 __umulh(unsigned __int64 __a, unsigned __int64 __b);
+
+void __break(int);
+
+void __writex18byte(unsigned long offset, unsigned char data);
+void __writex18word(unsigned long offset, unsigned short data);
+void __writex18dword(unsigned long offset, unsigned long data);
+void __writex18qword(unsigned long offset, unsigned __int64 data);
+
+unsigned char __readx18byte(unsigned long offset);
+unsigned short __readx18word(unsigned long offset);
+unsigned long __readx18dword(unsigned long offset);
+unsigned __int64 __readx18qword(unsigned long offset);
 #endif
 
 /*----------------------------------------------------------------------------*\
diff --git a/linux-x86/lib64/clang/14.0.2/include/inttypes.h b/linux-x86/lib64/clang/16.0.2/include/inttypes.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/inttypes.h
rename to linux-x86/lib64/clang/16.0.2/include/inttypes.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/invpcidintrin.h b/linux-x86/lib64/clang/16.0.2/include/invpcidintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/invpcidintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/invpcidintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/iso646.h b/linux-x86/lib64/clang/16.0.2/include/iso646.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/iso646.h
rename to linux-x86/lib64/clang/16.0.2/include/iso646.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/keylockerintrin.h b/linux-x86/lib64/clang/16.0.2/include/keylockerintrin.h
similarity index 97%
copy from darwin-x86/lib64/clang/14.0.2/include/keylockerintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/keylockerintrin.h
index ad9428e..1994ac4 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/keylockerintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/keylockerintrin.h
@@ -46,7 +46,7 @@
 ///
 /// This intrinsic corresponds to the <c> LOADIWKEY </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// IF CPL > 0 // LOADKWKEY only allowed at ring 0 (supervisor mode)
 ///   GP (0)
 /// FI
@@ -91,7 +91,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ void __DEFAULT_FN_ATTRS
 _mm_loadiwkey (unsigned int __ctl, __m128i __intkey,
                __m128i __enkey_lo, __m128i __enkey_hi) {
@@ -106,7 +106,7 @@
 ///
 /// This intrinsic corresponds to the <c> ENCODEKEY128 </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// InputKey[127:0] := __key[127:0]
 /// KeyMetadata[2:0] := __htype[2:0]
 /// KeyMetadata[23:3] := 0 // Reserved for future usage
@@ -126,7 +126,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 _mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) {
   return __builtin_ia32_encodekey128_u32(__htype, (__v2di)__key, __h);
@@ -141,7 +141,7 @@
 ///
 /// This intrinsic corresponds to the <c> ENCODEKEY256 </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// InputKey[127:0] := __key_lo[127:0]
 /// InputKey[255:128] := __key_hi[255:128]
 /// KeyMetadata[2:0] := __htype[2:0]
@@ -163,7 +163,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 _mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi,
                      void *__h) {
@@ -179,7 +179,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESENC128KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
 /// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
 ///                    (Handle[127:0] AND (CPL > 0)) ||
@@ -202,7 +202,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
   return __builtin_ia32_aesenc128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -216,7 +216,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESENC256KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle[511:0] := MEM[__h+511:__h] // Load is not guaranteed to be atomic.
 /// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
 ///                    (Handle[127:0] AND (CPL > 0)) ||
@@ -241,7 +241,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
   return __builtin_ia32_aesenc256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -255,7 +255,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESDEC128KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
 /// IllegalHandle := (HandleReservedBitSet (Handle[383:0]) ||
 ///                  (Handle[127:0] AND (CPL > 0)) ||
@@ -280,7 +280,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
   return __builtin_ia32_aesdec128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -294,7 +294,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESDEC256KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle[511:0] := MEM[__h+511:__h]
 /// IllegalHandle := (HandleReservedBitSet (Handle[511:0]) ||
 ///                   (Handle[127:0] AND (CPL > 0)) ||
@@ -319,7 +319,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
   return __builtin_ia32_aesdec256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -346,7 +346,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESENCWIDE128KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle := MEM[__h+383:__h]
 /// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
 ///                    (Handle[127:0] AND (CPL > 0)) ||
@@ -377,7 +377,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
   return __builtin_ia32_aesencwide128kl_u8((__v2di *)__odata,
@@ -392,7 +392,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESENCWIDE256KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle[511:0] := MEM[__h+511:__h]
 /// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
 ///                    (Handle[127:0] AND (CPL > 0)) ||
@@ -423,7 +423,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
   return __builtin_ia32_aesencwide256kl_u8((__v2di *)__odata,
@@ -438,7 +438,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESDECWIDE128KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle[383:0] := MEM[__h+383:__h]
 /// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
 ///                    (Handle[127:0] AND (CPL > 0)) ||
@@ -469,7 +469,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
   return __builtin_ia32_aesdecwide128kl_u8((__v2di *)__odata,
@@ -484,7 +484,7 @@
 ///
 /// This intrinsic corresponds to the <c> AESDECWIDE256KL </c> instructions.
 ///
-/// \operation
+/// \code{.operation}
 /// Handle[511:0] := MEM[__h+511:__h]
 /// IllegalHandle = ( HandleReservedBitSet (Handle[511:0]) ||
 ///                   (Handle[127:0] AND (CPL > 0)) ||
@@ -515,7 +515,7 @@
 /// AF := 0
 /// PF := 0
 /// CF := 0
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
   return __builtin_ia32_aesdecwide256kl_u8((__v2di *)__odata,
diff --git a/darwin-x86/lib64/clang/14.0.2/include/limits.h b/linux-x86/lib64/clang/16.0.2/include/limits.h
similarity index 76%
copy from darwin-x86/lib64/clang/14.0.2/include/limits.h
copy to linux-x86/lib64/clang/16.0.2/include/limits.h
index c653580..32cc901 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/limits.h
+++ b/linux-x86/lib64/clang/16.0.2/include/limits.h
@@ -62,6 +62,26 @@
 
 #define CHAR_BIT  __CHAR_BIT__
 
+/* C2x 5.2.4.2.1 */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+#define BOOL_WIDTH   __BOOL_WIDTH__
+#define CHAR_WIDTH   CHAR_BIT
+#define SCHAR_WIDTH  CHAR_BIT
+#define UCHAR_WIDTH  CHAR_BIT
+#define USHRT_WIDTH  __SHRT_WIDTH__
+#define SHRT_WIDTH   __SHRT_WIDTH__
+#define UINT_WIDTH   __INT_WIDTH__
+#define INT_WIDTH    __INT_WIDTH__
+#define ULONG_WIDTH  __LONG_WIDTH__
+#define LONG_WIDTH   __LONG_WIDTH__
+#define ULLONG_WIDTH __LLONG_WIDTH__
+#define LLONG_WIDTH  __LLONG_WIDTH__
+
+#define BITINT_MAXWIDTH __BITINT_MAXWIDTH__
+#endif
+
 #ifdef __CHAR_UNSIGNED__  /* -funsigned-char */
 #define CHAR_MIN 0
 #define CHAR_MAX UCHAR_MAX
@@ -73,7 +93,8 @@
 /* C99 5.2.4.2.1: Added long long.
    C++11 18.3.3.2: same contents as the Standard C Library header <limits.h>.
  */
-#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) ||              \
+    (defined(__cplusplus) && __cplusplus >= 201103L)
 
 #undef  LLONG_MIN
 #undef  LLONG_MAX
diff --git a/linux-x86/lib64/clang/14.0.2/include/lwpintrin.h b/linux-x86/lib64/clang/16.0.2/include/lwpintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/lwpintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/lwpintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/lzcntintrin.h b/linux-x86/lib64/clang/16.0.2/include/lzcntintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/lzcntintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/lzcntintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/mm3dnow.h b/linux-x86/lib64/clang/16.0.2/include/mm3dnow.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/mm3dnow.h
rename to linux-x86/lib64/clang/16.0.2/include/mm3dnow.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/mm_malloc.h b/linux-x86/lib64/clang/16.0.2/include/mm_malloc.h
similarity index 90%
copy from darwin-x86/lib64/clang/14.0.2/include/mm_malloc.h
copy to linux-x86/lib64/clang/16.0.2/include/mm_malloc.h
index 933dbaa..d32fe59 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/mm_malloc.h
+++ b/linux-x86/lib64/clang/16.0.2/include/mm_malloc.h
@@ -28,9 +28,9 @@
 
 #if !(defined(_WIN32) && defined(_mm_malloc))
 static __inline__ void *__attribute__((__always_inline__, __nodebug__,
-                                       __malloc__))
-_mm_malloc(size_t __size, size_t __align)
-{
+                                       __malloc__, __alloc_size__(1),
+                                       __alloc_align__(2)))
+_mm_malloc(size_t __size, size_t __align) {
   if (__align == 1) {
     return malloc(__size);
   }
diff --git a/linux-x86/lib64/clang/14.0.2/include/mmintrin.h b/linux-x86/lib64/clang/16.0.2/include/mmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/mmintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/mmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/module.modulemap b/linux-x86/lib64/clang/16.0.2/include/module.modulemap
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/module.modulemap
rename to linux-x86/lib64/clang/16.0.2/include/module.modulemap
diff --git a/linux-x86/lib64/clang/14.0.2/include/movdirintrin.h b/linux-x86/lib64/clang/16.0.2/include/movdirintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/movdirintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/movdirintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/msa.h b/linux-x86/lib64/clang/16.0.2/include/msa.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/msa.h
rename to linux-x86/lib64/clang/16.0.2/include/msa.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/mwaitxintrin.h b/linux-x86/lib64/clang/16.0.2/include/mwaitxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/mwaitxintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/mwaitxintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/nmmintrin.h b/linux-x86/lib64/clang/16.0.2/include/nmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/nmmintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/nmmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/omp-tools.h b/linux-x86/lib64/clang/16.0.2/include/omp-tools.h
similarity index 98%
rename from linux-x86/lib64/clang/14.0.2/include/omp-tools.h
rename to linux-x86/lib64/clang/16.0.2/include/omp-tools.h
index 5092174..6bae305 100644
--- a/linux-x86/lib64/clang/14.0.2/include/omp-tools.h
+++ b/linux-x86/lib64/clang/16.0.2/include/omp-tools.h
@@ -266,7 +266,10 @@
 
 typedef enum ompt_dispatch_t {
   ompt_dispatch_iteration             = 1,
-  ompt_dispatch_section               = 2
+  ompt_dispatch_section               = 2,
+  ompt_dispatch_ws_loop_chunk         = 3,
+  ompt_dispatch_taskloop_chunk        = 4,
+  ompt_dispatch_distribute_chunk      = 5
 } ompt_dispatch_t;
 
 typedef enum ompt_sync_region_t {
@@ -303,7 +306,11 @@
   ompt_work_workshare          = 5,
   ompt_work_distribute         = 6,
   ompt_work_taskloop           = 7,
-  ompt_work_scope              = 8
+  ompt_work_scope              = 8,
+  ompt_work_loop_static        = 10,
+  ompt_work_loop_dynamic       = 11,
+  ompt_work_loop_guided        = 12,
+  ompt_work_loop_other         = 13
 } ompt_work_t;
 
 typedef enum ompt_mutex_t {
@@ -554,6 +561,11 @@
   ompt_dependence_type_t dependence_type;
 } ompt_dependence_t;
 
+typedef struct ompt_dispatch_chunk_t {
+  uint64_t start;
+  uint64_t iterations;
+} ompt_dispatch_chunk_t;
+
 typedef int (*ompt_enumerate_states_t) (
   int current_state,
   int *next_state,
@@ -745,7 +757,7 @@
 } ompt_record_parallel_end_t;
 
 typedef void (*ompt_callback_work_t) (
-  ompt_work_t wstype,
+  ompt_work_t work_type,
   ompt_scope_endpoint_t endpoint,
   ompt_data_t *parallel_data,
   ompt_data_t *task_data,
@@ -754,7 +766,7 @@
 );
 
 typedef struct ompt_record_work_t {
-  ompt_work_t wstype;
+  ompt_work_t work_type;
   ompt_scope_endpoint_t endpoint;
   ompt_id_t parallel_id;
   ompt_id_t task_id;
diff --git a/linux-x86/lib64/clang/14.0.2/include/omp.h b/linux-x86/lib64/clang/16.0.2/include/omp.h
similarity index 98%
rename from linux-x86/lib64/clang/14.0.2/include/omp.h
rename to linux-x86/lib64/clang/16.0.2/include/omp.h
index 64bfae3..618e09d 100644
--- a/linux-x86/lib64/clang/14.0.2/include/omp.h
+++ b/linux-x86/lib64/clang/16.0.2/include/omp.h
@@ -368,7 +368,6 @@
     extern __KMP_IMP omp_allocator_handle_t const omp_cgroup_mem_alloc;
     extern __KMP_IMP omp_allocator_handle_t const omp_pteam_mem_alloc;
     extern __KMP_IMP omp_allocator_handle_t const omp_thread_mem_alloc;
-    /* Preview of target memory support */
     extern __KMP_IMP omp_allocator_handle_t const llvm_omp_target_host_mem_alloc;
     extern __KMP_IMP omp_allocator_handle_t const llvm_omp_target_shared_mem_alloc;
     extern __KMP_IMP omp_allocator_handle_t const llvm_omp_target_device_mem_alloc;
@@ -379,7 +378,6 @@
     extern __KMP_IMP omp_memspace_handle_t const omp_const_mem_space;
     extern __KMP_IMP omp_memspace_handle_t const omp_high_bw_mem_space;
     extern __KMP_IMP omp_memspace_handle_t const omp_low_lat_mem_space;
-    /* Preview of target memory support */
     extern __KMP_IMP omp_memspace_handle_t const llvm_omp_target_host_mem_space;
     extern __KMP_IMP omp_memspace_handle_t const llvm_omp_target_shared_mem_space;
     extern __KMP_IMP omp_memspace_handle_t const llvm_omp_target_device_mem_space;
@@ -399,7 +397,6 @@
       omp_cgroup_mem_alloc = 6,
       omp_pteam_mem_alloc = 7,
       omp_thread_mem_alloc = 8,
-      /* Preview of target memory support */
       llvm_omp_target_host_mem_alloc = 100,
       llvm_omp_target_shared_mem_alloc = 101,
       llvm_omp_target_device_mem_alloc = 102,
@@ -416,7 +413,6 @@
       omp_const_mem_space = 2,
       omp_high_bw_mem_space = 3,
       omp_low_lat_mem_space = 4,
-      /* Preview of target memory support */
       llvm_omp_target_host_mem_space = 100,
       llvm_omp_target_shared_mem_space = 101,
       llvm_omp_target_device_mem_space = 102,
@@ -497,6 +493,12 @@
     #pragma omp end declare variant
 #   endif
 
+    /* OpenMP 5.2 */
+    extern int __KAI_KMPC_CONVENTION omp_in_explicit_task(void);
+
+    /* LLVM Extensions */
+    extern void *llvm_omp_target_dynamic_shared_alloc();
+
 #   undef __KAI_KMPC_CONVENTION
 #   undef __KMP_IMP
 
diff --git a/darwin-x86/lib64/clang/14.0.2/include/opencl-c-base.h b/linux-x86/lib64/clang/16.0.2/include/opencl-c-base.h
similarity index 96%
copy from darwin-x86/lib64/clang/14.0.2/include/opencl-c-base.h
copy to linux-x86/lib64/clang/16.0.2/include/opencl-c-base.h
index 9c81ddb..c433b4f 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/opencl-c-base.h
+++ b/linux-x86/lib64/clang/16.0.2/include/opencl-c-base.h
@@ -21,6 +21,7 @@
 #define cl_khr_subgroup_shuffle 1
 #define cl_khr_subgroup_shuffle_relative 1
 #define cl_khr_subgroup_clustered_reduce 1
+#define cl_khr_subgroup_rotate 1
 #define cl_khr_extended_bit_ops 1
 #define cl_khr_integer_dot_product 1
 #define __opencl_c_integer_dot_product_input_4x8bit 1
@@ -67,10 +68,25 @@
 #if (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
 // For the SPIR and SPIR-V target all features are supported.
 #if defined(__SPIR__) || defined(__SPIRV__)
+#define __opencl_c_work_group_collective_functions 1
+#define __opencl_c_atomic_order_seq_cst 1
+#define __opencl_c_atomic_scope_device 1
 #define __opencl_c_atomic_scope_all_devices 1
+#define __opencl_c_read_write_images 1
 #endif // defined(__SPIR__)
 #endif // (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
 
+#if !defined(__opencl_c_generic_address_space)
+// Internal feature macro to provide named (global, local, private) address
+// space overloads for builtin functions that take a pointer argument.
+#define __opencl_c_named_address_space_builtins 1
+#endif // !defined(__opencl_c_generic_address_space)
+
+#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups)
+// Internal feature macro to provide subgroup builtins.
+#define __opencl_subgroup_builtins 1
+#endif
+
 // built-in scalar data types:
 
 /**
@@ -188,6 +204,9 @@
 typedef double double16 __attribute__((ext_vector_type(16)));
 #endif
 
+// An internal alias for half, for use by OpenCLBuiltins.td.
+#define __half half
+
 #if defined(__OPENCL_CPP_VERSION__)
 #define NULL nullptr
 #elif defined(__OPENCL_C_VERSION__)
@@ -498,12 +517,14 @@
 
 #define MAX_WORK_DIM 3
 
+#ifdef __opencl_c_device_enqueue
 typedef struct {
   unsigned int workDimension;
   size_t globalWorkOffset[MAX_WORK_DIM];
   size_t globalWorkSize[MAX_WORK_DIM];
   size_t localWorkSize[MAX_WORK_DIM];
 } ndrange_t;
+#endif // __opencl_c_device_enqueue
 
 #endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 
@@ -600,9 +621,11 @@
 // C++ for OpenCL - __remove_address_space
 #if defined(__OPENCL_CPP_VERSION__)
 template <typename _Tp> struct __remove_address_space { using type = _Tp; };
+#if defined(__opencl_c_generic_address_space)
 template <typename _Tp> struct __remove_address_space<__generic _Tp> {
   using type = _Tp;
 };
+#endif
 template <typename _Tp> struct __remove_address_space<__global _Tp> {
   using type = _Tp;
 };
diff --git a/linux-x86/lib64/clang/16.0.2/include/opencl-c.h b/linux-x86/lib64/clang/16.0.2/include/opencl-c.h
new file mode 100644
index 0000000..2afe023
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/opencl-c.h
@@ -0,0 +1,18338 @@
+//===--- opencl-c.h - OpenCL C language builtin function header -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _OPENCL_H_
+#define _OPENCL_H_
+
+#include "opencl-c-base.h"
+
+#if defined(__opencl_c_images)
+#ifndef cl_khr_depth_images
+#define cl_khr_depth_images
+#endif //cl_khr_depth_images
+#endif //defined(__opencl_c_images)
+
+#if __OPENCL_C_VERSION__ < CL_VERSION_2_0
+#ifdef cl_khr_3d_image_writes
+#pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable
+#endif //cl_khr_3d_image_writes
+#endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0
+
+#if (defined(__OPENCL_CPP_VERSION__) ||                                        \
+     (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) &&                              \
+    (defined(__SPIR__) || defined(__SPIRV__))
+#pragma OPENCL EXTENSION cl_intel_planar_yuv : begin
+#pragma OPENCL EXTENSION cl_intel_planar_yuv : end
+#endif // (defined(__OPENCL_CPP_VERSION__) ||
+       //  (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) &&
+       // (defined(__SPIR__) || defined(__SPIRV__))
+
+#define __ovld __attribute__((overloadable))
+#define __conv __attribute__((convergent))
+
+// Optimizations
+#define __purefn __attribute__((pure))
+#define __cnfn __attribute__((const))
+
+
+// OpenCL v1.1/1.2/2.0 s6.2.3 - Explicit conversions
+
+char __ovld __cnfn convert_char_rte(char);
+char __ovld __cnfn convert_char_sat_rte(char);
+char __ovld __cnfn convert_char_rtz(char);
+char __ovld __cnfn convert_char_sat_rtz(char);
+char __ovld __cnfn convert_char_rtp(char);
+char __ovld __cnfn convert_char_sat_rtp(char);
+char __ovld __cnfn convert_char_rtn(char);
+char __ovld __cnfn convert_char_sat_rtn(char);
+char __ovld __cnfn convert_char(char);
+char __ovld __cnfn convert_char_sat(char);
+char __ovld __cnfn convert_char_rte(uchar);
+char __ovld __cnfn convert_char_sat_rte(uchar);
+char __ovld __cnfn convert_char_rtz(uchar);
+char __ovld __cnfn convert_char_sat_rtz(uchar);
+char __ovld __cnfn convert_char_rtp(uchar);
+char __ovld __cnfn convert_char_sat_rtp(uchar);
+char __ovld __cnfn convert_char_rtn(uchar);
+char __ovld __cnfn convert_char_sat_rtn(uchar);
+char __ovld __cnfn convert_char(uchar);
+char __ovld __cnfn convert_char_sat(uchar);
+char __ovld __cnfn convert_char_rte(short);
+char __ovld __cnfn convert_char_sat_rte(short);
+char __ovld __cnfn convert_char_rtz(short);
+char __ovld __cnfn convert_char_sat_rtz(short);
+char __ovld __cnfn convert_char_rtp(short);
+char __ovld __cnfn convert_char_sat_rtp(short);
+char __ovld __cnfn convert_char_rtn(short);
+char __ovld __cnfn convert_char_sat_rtn(short);
+char __ovld __cnfn convert_char(short);
+char __ovld __cnfn convert_char_sat(short);
+char __ovld __cnfn convert_char_rte(ushort);
+char __ovld __cnfn convert_char_sat_rte(ushort);
+char __ovld __cnfn convert_char_rtz(ushort);
+char __ovld __cnfn convert_char_sat_rtz(ushort);
+char __ovld __cnfn convert_char_rtp(ushort);
+char __ovld __cnfn convert_char_sat_rtp(ushort);
+char __ovld __cnfn convert_char_rtn(ushort);
+char __ovld __cnfn convert_char_sat_rtn(ushort);
+char __ovld __cnfn convert_char(ushort);
+char __ovld __cnfn convert_char_sat(ushort);
+char __ovld __cnfn convert_char_rte(int);
+char __ovld __cnfn convert_char_sat_rte(int);
+char __ovld __cnfn convert_char_rtz(int);
+char __ovld __cnfn convert_char_sat_rtz(int);
+char __ovld __cnfn convert_char_rtp(int);
+char __ovld __cnfn convert_char_sat_rtp(int);
+char __ovld __cnfn convert_char_rtn(int);
+char __ovld __cnfn convert_char_sat_rtn(int);
+char __ovld __cnfn convert_char(int);
+char __ovld __cnfn convert_char_sat(int);
+char __ovld __cnfn convert_char_rte(uint);
+char __ovld __cnfn convert_char_sat_rte(uint);
+char __ovld __cnfn convert_char_rtz(uint);
+char __ovld __cnfn convert_char_sat_rtz(uint);
+char __ovld __cnfn convert_char_rtp(uint);
+char __ovld __cnfn convert_char_sat_rtp(uint);
+char __ovld __cnfn convert_char_rtn(uint);
+char __ovld __cnfn convert_char_sat_rtn(uint);
+char __ovld __cnfn convert_char(uint);
+char __ovld __cnfn convert_char_sat(uint);
+char __ovld __cnfn convert_char_rte(long);
+char __ovld __cnfn convert_char_sat_rte(long);
+char __ovld __cnfn convert_char_rtz(long);
+char __ovld __cnfn convert_char_sat_rtz(long);
+char __ovld __cnfn convert_char_rtp(long);
+char __ovld __cnfn convert_char_sat_rtp(long);
+char __ovld __cnfn convert_char_rtn(long);
+char __ovld __cnfn convert_char_sat_rtn(long);
+char __ovld __cnfn convert_char(long);
+char __ovld __cnfn convert_char_sat(long);
+char __ovld __cnfn convert_char_rte(ulong);
+char __ovld __cnfn convert_char_sat_rte(ulong);
+char __ovld __cnfn convert_char_rtz(ulong);
+char __ovld __cnfn convert_char_sat_rtz(ulong);
+char __ovld __cnfn convert_char_rtp(ulong);
+char __ovld __cnfn convert_char_sat_rtp(ulong);
+char __ovld __cnfn convert_char_rtn(ulong);
+char __ovld __cnfn convert_char_sat_rtn(ulong);
+char __ovld __cnfn convert_char(ulong);
+char __ovld __cnfn convert_char_sat(ulong);
+char __ovld __cnfn convert_char_rte(float);
+char __ovld __cnfn convert_char_sat_rte(float);
+char __ovld __cnfn convert_char_rtz(float);
+char __ovld __cnfn convert_char_sat_rtz(float);
+char __ovld __cnfn convert_char_rtp(float);
+char __ovld __cnfn convert_char_sat_rtp(float);
+char __ovld __cnfn convert_char_rtn(float);
+char __ovld __cnfn convert_char_sat_rtn(float);
+char __ovld __cnfn convert_char(float);
+char __ovld __cnfn convert_char_sat(float);
+uchar __ovld __cnfn convert_uchar_rte(char);
+uchar __ovld __cnfn convert_uchar_sat_rte(char);
+uchar __ovld __cnfn convert_uchar_rtz(char);
+uchar __ovld __cnfn convert_uchar_sat_rtz(char);
+uchar __ovld __cnfn convert_uchar_rtp(char);
+uchar __ovld __cnfn convert_uchar_sat_rtp(char);
+uchar __ovld __cnfn convert_uchar_rtn(char);
+uchar __ovld __cnfn convert_uchar_sat_rtn(char);
+uchar __ovld __cnfn convert_uchar(char);
+uchar __ovld __cnfn convert_uchar_sat(char);
+uchar __ovld __cnfn convert_uchar_rte(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rte(uchar);
+uchar __ovld __cnfn convert_uchar_rtz(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rtz(uchar);
+uchar __ovld __cnfn convert_uchar_rtp(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rtp(uchar);
+uchar __ovld __cnfn convert_uchar_rtn(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rtn(uchar);
+uchar __ovld __cnfn convert_uchar(uchar);
+uchar __ovld __cnfn convert_uchar_sat(uchar);
+uchar __ovld __cnfn convert_uchar_rte(short);
+uchar __ovld __cnfn convert_uchar_sat_rte(short);
+uchar __ovld __cnfn convert_uchar_rtz(short);
+uchar __ovld __cnfn convert_uchar_sat_rtz(short);
+uchar __ovld __cnfn convert_uchar_rtp(short);
+uchar __ovld __cnfn convert_uchar_sat_rtp(short);
+uchar __ovld __cnfn convert_uchar_rtn(short);
+uchar __ovld __cnfn convert_uchar_sat_rtn(short);
+uchar __ovld __cnfn convert_uchar(short);
+uchar __ovld __cnfn convert_uchar_sat(short);
+uchar __ovld __cnfn convert_uchar_rte(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rte(ushort);
+uchar __ovld __cnfn convert_uchar_rtz(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rtz(ushort);
+uchar __ovld __cnfn convert_uchar_rtp(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rtp(ushort);
+uchar __ovld __cnfn convert_uchar_rtn(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rtn(ushort);
+uchar __ovld __cnfn convert_uchar(ushort);
+uchar __ovld __cnfn convert_uchar_sat(ushort);
+uchar __ovld __cnfn convert_uchar_rte(int);
+uchar __ovld __cnfn convert_uchar_sat_rte(int);
+uchar __ovld __cnfn convert_uchar_rtz(int);
+uchar __ovld __cnfn convert_uchar_sat_rtz(int);
+uchar __ovld __cnfn convert_uchar_rtp(int);
+uchar __ovld __cnfn convert_uchar_sat_rtp(int);
+uchar __ovld __cnfn convert_uchar_rtn(int);
+uchar __ovld __cnfn convert_uchar_sat_rtn(int);
+uchar __ovld __cnfn convert_uchar(int);
+uchar __ovld __cnfn convert_uchar_sat(int);
+uchar __ovld __cnfn convert_uchar_rte(uint);
+uchar __ovld __cnfn convert_uchar_sat_rte(uint);
+uchar __ovld __cnfn convert_uchar_rtz(uint);
+uchar __ovld __cnfn convert_uchar_sat_rtz(uint);
+uchar __ovld __cnfn convert_uchar_rtp(uint);
+uchar __ovld __cnfn convert_uchar_sat_rtp(uint);
+uchar __ovld __cnfn convert_uchar_rtn(uint);
+uchar __ovld __cnfn convert_uchar_sat_rtn(uint);
+uchar __ovld __cnfn convert_uchar(uint);
+uchar __ovld __cnfn convert_uchar_sat(uint);
+uchar __ovld __cnfn convert_uchar_rte(long);
+uchar __ovld __cnfn convert_uchar_sat_rte(long);
+uchar __ovld __cnfn convert_uchar_rtz(long);
+uchar __ovld __cnfn convert_uchar_sat_rtz(long);
+uchar __ovld __cnfn convert_uchar_rtp(long);
+uchar __ovld __cnfn convert_uchar_sat_rtp(long);
+uchar __ovld __cnfn convert_uchar_rtn(long);
+uchar __ovld __cnfn convert_uchar_sat_rtn(long);
+uchar __ovld __cnfn convert_uchar(long);
+uchar __ovld __cnfn convert_uchar_sat(long);
+uchar __ovld __cnfn convert_uchar_rte(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rte(ulong);
+uchar __ovld __cnfn convert_uchar_rtz(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rtz(ulong);
+uchar __ovld __cnfn convert_uchar_rtp(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rtp(ulong);
+uchar __ovld __cnfn convert_uchar_rtn(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rtn(ulong);
+uchar __ovld __cnfn convert_uchar(ulong);
+uchar __ovld __cnfn convert_uchar_sat(ulong);
+uchar __ovld __cnfn convert_uchar_rte(float);
+uchar __ovld __cnfn convert_uchar_sat_rte(float);
+uchar __ovld __cnfn convert_uchar_rtz(float);
+uchar __ovld __cnfn convert_uchar_sat_rtz(float);
+uchar __ovld __cnfn convert_uchar_rtp(float);
+uchar __ovld __cnfn convert_uchar_sat_rtp(float);
+uchar __ovld __cnfn convert_uchar_rtn(float);
+uchar __ovld __cnfn convert_uchar_sat_rtn(float);
+uchar __ovld __cnfn convert_uchar(float);
+uchar __ovld __cnfn convert_uchar_sat(float);
+
+short __ovld __cnfn convert_short_rte(char);
+short __ovld __cnfn convert_short_sat_rte(char);
+short __ovld __cnfn convert_short_rtz(char);
+short __ovld __cnfn convert_short_sat_rtz(char);
+short __ovld __cnfn convert_short_rtp(char);
+short __ovld __cnfn convert_short_sat_rtp(char);
+short __ovld __cnfn convert_short_rtn(char);
+short __ovld __cnfn convert_short_sat_rtn(char);
+short __ovld __cnfn convert_short(char);
+short __ovld __cnfn convert_short_sat(char);
+short __ovld __cnfn convert_short_rte(uchar);
+short __ovld __cnfn convert_short_sat_rte(uchar);
+short __ovld __cnfn convert_short_rtz(uchar);
+short __ovld __cnfn convert_short_sat_rtz(uchar);
+short __ovld __cnfn convert_short_rtp(uchar);
+short __ovld __cnfn convert_short_sat_rtp(uchar);
+short __ovld __cnfn convert_short_rtn(uchar);
+short __ovld __cnfn convert_short_sat_rtn(uchar);
+short __ovld __cnfn convert_short(uchar);
+short __ovld __cnfn convert_short_sat(uchar);
+short __ovld __cnfn convert_short_rte(short);
+short __ovld __cnfn convert_short_sat_rte(short);
+short __ovld __cnfn convert_short_rtz(short);
+short __ovld __cnfn convert_short_sat_rtz(short);
+short __ovld __cnfn convert_short_rtp(short);
+short __ovld __cnfn convert_short_sat_rtp(short);
+short __ovld __cnfn convert_short_rtn(short);
+short __ovld __cnfn convert_short_sat_rtn(short);
+short __ovld __cnfn convert_short(short);
+short __ovld __cnfn convert_short_sat(short);
+short __ovld __cnfn convert_short_rte(ushort);
+short __ovld __cnfn convert_short_sat_rte(ushort);
+short __ovld __cnfn convert_short_rtz(ushort);
+short __ovld __cnfn convert_short_sat_rtz(ushort);
+short __ovld __cnfn convert_short_rtp(ushort);
+short __ovld __cnfn convert_short_sat_rtp(ushort);
+short __ovld __cnfn convert_short_rtn(ushort);
+short __ovld __cnfn convert_short_sat_rtn(ushort);
+short __ovld __cnfn convert_short(ushort);
+short __ovld __cnfn convert_short_sat(ushort);
+short __ovld __cnfn convert_short_rte(int);
+short __ovld __cnfn convert_short_sat_rte(int);
+short __ovld __cnfn convert_short_rtz(int);
+short __ovld __cnfn convert_short_sat_rtz(int);
+short __ovld __cnfn convert_short_rtp(int);
+short __ovld __cnfn convert_short_sat_rtp(int);
+short __ovld __cnfn convert_short_rtn(int);
+short __ovld __cnfn convert_short_sat_rtn(int);
+short __ovld __cnfn convert_short(int);
+short __ovld __cnfn convert_short_sat(int);
+short __ovld __cnfn convert_short_rte(uint);
+short __ovld __cnfn convert_short_sat_rte(uint);
+short __ovld __cnfn convert_short_rtz(uint);
+short __ovld __cnfn convert_short_sat_rtz(uint);
+short __ovld __cnfn convert_short_rtp(uint);
+short __ovld __cnfn convert_short_sat_rtp(uint);
+short __ovld __cnfn convert_short_rtn(uint);
+short __ovld __cnfn convert_short_sat_rtn(uint);
+short __ovld __cnfn convert_short(uint);
+short __ovld __cnfn convert_short_sat(uint);
+short __ovld __cnfn convert_short_rte(long);
+short __ovld __cnfn convert_short_sat_rte(long);
+short __ovld __cnfn convert_short_rtz(long);
+short __ovld __cnfn convert_short_sat_rtz(long);
+short __ovld __cnfn convert_short_rtp(long);
+short __ovld __cnfn convert_short_sat_rtp(long);
+short __ovld __cnfn convert_short_rtn(long);
+short __ovld __cnfn convert_short_sat_rtn(long);
+short __ovld __cnfn convert_short(long);
+short __ovld __cnfn convert_short_sat(long);
+short __ovld __cnfn convert_short_rte(ulong);
+short __ovld __cnfn convert_short_sat_rte(ulong);
+short __ovld __cnfn convert_short_rtz(ulong);
+short __ovld __cnfn convert_short_sat_rtz(ulong);
+short __ovld __cnfn convert_short_rtp(ulong);
+short __ovld __cnfn convert_short_sat_rtp(ulong);
+short __ovld __cnfn convert_short_rtn(ulong);
+short __ovld __cnfn convert_short_sat_rtn(ulong);
+short __ovld __cnfn convert_short(ulong);
+short __ovld __cnfn convert_short_sat(ulong);
+short __ovld __cnfn convert_short_rte(float);
+short __ovld __cnfn convert_short_sat_rte(float);
+short __ovld __cnfn convert_short_rtz(float);
+short __ovld __cnfn convert_short_sat_rtz(float);
+short __ovld __cnfn convert_short_rtp(float);
+short __ovld __cnfn convert_short_sat_rtp(float);
+short __ovld __cnfn convert_short_rtn(float);
+short __ovld __cnfn convert_short_sat_rtn(float);
+short __ovld __cnfn convert_short(float);
+short __ovld __cnfn convert_short_sat(float);
+ushort __ovld __cnfn convert_ushort_rte(char);
+ushort __ovld __cnfn convert_ushort_sat_rte(char);
+ushort __ovld __cnfn convert_ushort_rtz(char);
+ushort __ovld __cnfn convert_ushort_sat_rtz(char);
+ushort __ovld __cnfn convert_ushort_rtp(char);
+ushort __ovld __cnfn convert_ushort_sat_rtp(char);
+ushort __ovld __cnfn convert_ushort_rtn(char);
+ushort __ovld __cnfn convert_ushort_sat_rtn(char);
+ushort __ovld __cnfn convert_ushort(char);
+ushort __ovld __cnfn convert_ushort_sat(char);
+ushort __ovld __cnfn convert_ushort_rte(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rte(uchar);
+ushort __ovld __cnfn convert_ushort_rtz(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rtz(uchar);
+ushort __ovld __cnfn convert_ushort_rtp(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rtp(uchar);
+ushort __ovld __cnfn convert_ushort_rtn(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rtn(uchar);
+ushort __ovld __cnfn convert_ushort(uchar);
+ushort __ovld __cnfn convert_ushort_sat(uchar);
+ushort __ovld __cnfn convert_ushort_rte(short);
+ushort __ovld __cnfn convert_ushort_sat_rte(short);
+ushort __ovld __cnfn convert_ushort_rtz(short);
+ushort __ovld __cnfn convert_ushort_sat_rtz(short);
+ushort __ovld __cnfn convert_ushort_rtp(short);
+ushort __ovld __cnfn convert_ushort_sat_rtp(short);
+ushort __ovld __cnfn convert_ushort_rtn(short);
+ushort __ovld __cnfn convert_ushort_sat_rtn(short);
+ushort __ovld __cnfn convert_ushort(short);
+ushort __ovld __cnfn convert_ushort_sat(short);
+ushort __ovld __cnfn convert_ushort_rte(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rte(ushort);
+ushort __ovld __cnfn convert_ushort_rtz(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rtz(ushort);
+ushort __ovld __cnfn convert_ushort_rtp(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rtp(ushort);
+ushort __ovld __cnfn convert_ushort_rtn(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rtn(ushort);
+ushort __ovld __cnfn convert_ushort(ushort);
+ushort __ovld __cnfn convert_ushort_sat(ushort);
+ushort __ovld __cnfn convert_ushort_rte(int);
+ushort __ovld __cnfn convert_ushort_sat_rte(int);
+ushort __ovld __cnfn convert_ushort_rtz(int);
+ushort __ovld __cnfn convert_ushort_sat_rtz(int);
+ushort __ovld __cnfn convert_ushort_rtp(int);
+ushort __ovld __cnfn convert_ushort_sat_rtp(int);
+ushort __ovld __cnfn convert_ushort_rtn(int);
+ushort __ovld __cnfn convert_ushort_sat_rtn(int);
+ushort __ovld __cnfn convert_ushort(int);
+ushort __ovld __cnfn convert_ushort_sat(int);
+ushort __ovld __cnfn convert_ushort_rte(uint);
+ushort __ovld __cnfn convert_ushort_sat_rte(uint);
+ushort __ovld __cnfn convert_ushort_rtz(uint);
+ushort __ovld __cnfn convert_ushort_sat_rtz(uint);
+ushort __ovld __cnfn convert_ushort_rtp(uint);
+ushort __ovld __cnfn convert_ushort_sat_rtp(uint);
+ushort __ovld __cnfn convert_ushort_rtn(uint);
+ushort __ovld __cnfn convert_ushort_sat_rtn(uint);
+ushort __ovld __cnfn convert_ushort(uint);
+ushort __ovld __cnfn convert_ushort_sat(uint);
+ushort __ovld __cnfn convert_ushort_rte(long);
+ushort __ovld __cnfn convert_ushort_sat_rte(long);
+ushort __ovld __cnfn convert_ushort_rtz(long);
+ushort __ovld __cnfn convert_ushort_sat_rtz(long);
+ushort __ovld __cnfn convert_ushort_rtp(long);
+ushort __ovld __cnfn convert_ushort_sat_rtp(long);
+ushort __ovld __cnfn convert_ushort_rtn(long);
+ushort __ovld __cnfn convert_ushort_sat_rtn(long);
+ushort __ovld __cnfn convert_ushort(long);
+ushort __ovld __cnfn convert_ushort_sat(long);
+ushort __ovld __cnfn convert_ushort_rte(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rte(ulong);
+ushort __ovld __cnfn convert_ushort_rtz(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rtz(ulong);
+ushort __ovld __cnfn convert_ushort_rtp(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rtp(ulong);
+ushort __ovld __cnfn convert_ushort_rtn(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rtn(ulong);
+ushort __ovld __cnfn convert_ushort(ulong);
+ushort __ovld __cnfn convert_ushort_sat(ulong);
+ushort __ovld __cnfn convert_ushort_rte(float);
+ushort __ovld __cnfn convert_ushort_sat_rte(float);
+ushort __ovld __cnfn convert_ushort_rtz(float);
+ushort __ovld __cnfn convert_ushort_sat_rtz(float);
+ushort __ovld __cnfn convert_ushort_rtp(float);
+ushort __ovld __cnfn convert_ushort_sat_rtp(float);
+ushort __ovld __cnfn convert_ushort_rtn(float);
+ushort __ovld __cnfn convert_ushort_sat_rtn(float);
+ushort __ovld __cnfn convert_ushort(float);
+ushort __ovld __cnfn convert_ushort_sat(float);
+int __ovld __cnfn convert_int_rte(char);
+int __ovld __cnfn convert_int_sat_rte(char);
+int __ovld __cnfn convert_int_rtz(char);
+int __ovld __cnfn convert_int_sat_rtz(char);
+int __ovld __cnfn convert_int_rtp(char);
+int __ovld __cnfn convert_int_sat_rtp(char);
+int __ovld __cnfn convert_int_rtn(char);
+int __ovld __cnfn convert_int_sat_rtn(char);
+int __ovld __cnfn convert_int(char);
+int __ovld __cnfn convert_int_sat(char);
+int __ovld __cnfn convert_int_rte(uchar);
+int __ovld __cnfn convert_int_sat_rte(uchar);
+int __ovld __cnfn convert_int_rtz(uchar);
+int __ovld __cnfn convert_int_sat_rtz(uchar);
+int __ovld __cnfn convert_int_rtp(uchar);
+int __ovld __cnfn convert_int_sat_rtp(uchar);
+int __ovld __cnfn convert_int_rtn(uchar);
+int __ovld __cnfn convert_int_sat_rtn(uchar);
+int __ovld __cnfn convert_int(uchar);
+int __ovld __cnfn convert_int_sat(uchar);
+int __ovld __cnfn convert_int_rte(short);
+int __ovld __cnfn convert_int_sat_rte(short);
+int __ovld __cnfn convert_int_rtz(short);
+int __ovld __cnfn convert_int_sat_rtz(short);
+int __ovld __cnfn convert_int_rtp(short);
+int __ovld __cnfn convert_int_sat_rtp(short);
+int __ovld __cnfn convert_int_rtn(short);
+int __ovld __cnfn convert_int_sat_rtn(short);
+int __ovld __cnfn convert_int(short);
+int __ovld __cnfn convert_int_sat(short);
+int __ovld __cnfn convert_int_rte(ushort);
+int __ovld __cnfn convert_int_sat_rte(ushort);
+int __ovld __cnfn convert_int_rtz(ushort);
+int __ovld __cnfn convert_int_sat_rtz(ushort);
+int __ovld __cnfn convert_int_rtp(ushort);
+int __ovld __cnfn convert_int_sat_rtp(ushort);
+int __ovld __cnfn convert_int_rtn(ushort);
+int __ovld __cnfn convert_int_sat_rtn(ushort);
+int __ovld __cnfn convert_int(ushort);
+int __ovld __cnfn convert_int_sat(ushort);
+int __ovld __cnfn convert_int_rte(int);
+int __ovld __cnfn convert_int_sat_rte(int);
+int __ovld __cnfn convert_int_rtz(int);
+int __ovld __cnfn convert_int_sat_rtz(int);
+int __ovld __cnfn convert_int_rtp(int);
+int __ovld __cnfn convert_int_sat_rtp(int);
+int __ovld __cnfn convert_int_rtn(int);
+int __ovld __cnfn convert_int_sat_rtn(int);
+int __ovld __cnfn convert_int(int);
+int __ovld __cnfn convert_int_sat(int);
+int __ovld __cnfn convert_int_rte(uint);
+int __ovld __cnfn convert_int_sat_rte(uint);
+int __ovld __cnfn convert_int_rtz(uint);
+int __ovld __cnfn convert_int_sat_rtz(uint);
+int __ovld __cnfn convert_int_rtp(uint);
+int __ovld __cnfn convert_int_sat_rtp(uint);
+int __ovld __cnfn convert_int_rtn(uint);
+int __ovld __cnfn convert_int_sat_rtn(uint);
+int __ovld __cnfn convert_int(uint);
+int __ovld __cnfn convert_int_sat(uint);
+int __ovld __cnfn convert_int_rte(long);
+int __ovld __cnfn convert_int_sat_rte(long);
+int __ovld __cnfn convert_int_rtz(long);
+int __ovld __cnfn convert_int_sat_rtz(long);
+int __ovld __cnfn convert_int_rtp(long);
+int __ovld __cnfn convert_int_sat_rtp(long);
+int __ovld __cnfn convert_int_rtn(long);
+int __ovld __cnfn convert_int_sat_rtn(long);
+int __ovld __cnfn convert_int(long);
+int __ovld __cnfn convert_int_sat(long);
+int __ovld __cnfn convert_int_rte(ulong);
+int __ovld __cnfn convert_int_sat_rte(ulong);
+int __ovld __cnfn convert_int_rtz(ulong);
+int __ovld __cnfn convert_int_sat_rtz(ulong);
+int __ovld __cnfn convert_int_rtp(ulong);
+int __ovld __cnfn convert_int_sat_rtp(ulong);
+int __ovld __cnfn convert_int_rtn(ulong);
+int __ovld __cnfn convert_int_sat_rtn(ulong);
+int __ovld __cnfn convert_int(ulong);
+int __ovld __cnfn convert_int_sat(ulong);
+int __ovld __cnfn convert_int_rte(float);
+int __ovld __cnfn convert_int_sat_rte(float);
+int __ovld __cnfn convert_int_rtz(float);
+int __ovld __cnfn convert_int_sat_rtz(float);
+int __ovld __cnfn convert_int_rtp(float);
+int __ovld __cnfn convert_int_sat_rtp(float);
+int __ovld __cnfn convert_int_rtn(float);
+int __ovld __cnfn convert_int_sat_rtn(float);
+int __ovld __cnfn convert_int(float);
+int __ovld __cnfn convert_int_sat(float);
+uint __ovld __cnfn convert_uint_rte(char);
+uint __ovld __cnfn convert_uint_sat_rte(char);
+uint __ovld __cnfn convert_uint_rtz(char);
+uint __ovld __cnfn convert_uint_sat_rtz(char);
+uint __ovld __cnfn convert_uint_rtp(char);
+uint __ovld __cnfn convert_uint_sat_rtp(char);
+uint __ovld __cnfn convert_uint_rtn(char);
+uint __ovld __cnfn convert_uint_sat_rtn(char);
+uint __ovld __cnfn convert_uint(char);
+uint __ovld __cnfn convert_uint_sat(char);
+uint __ovld __cnfn convert_uint_rte(uchar);
+uint __ovld __cnfn convert_uint_sat_rte(uchar);
+uint __ovld __cnfn convert_uint_rtz(uchar);
+uint __ovld __cnfn convert_uint_sat_rtz(uchar);
+uint __ovld __cnfn convert_uint_rtp(uchar);
+uint __ovld __cnfn convert_uint_sat_rtp(uchar);
+uint __ovld __cnfn convert_uint_rtn(uchar);
+uint __ovld __cnfn convert_uint_sat_rtn(uchar);
+uint __ovld __cnfn convert_uint(uchar);
+uint __ovld __cnfn convert_uint_sat(uchar);
+uint __ovld __cnfn convert_uint_rte(short);
+uint __ovld __cnfn convert_uint_sat_rte(short);
+uint __ovld __cnfn convert_uint_rtz(short);
+uint __ovld __cnfn convert_uint_sat_rtz(short);
+uint __ovld __cnfn convert_uint_rtp(short);
+uint __ovld __cnfn convert_uint_sat_rtp(short);
+uint __ovld __cnfn convert_uint_rtn(short);
+uint __ovld __cnfn convert_uint_sat_rtn(short);
+uint __ovld __cnfn convert_uint(short);
+uint __ovld __cnfn convert_uint_sat(short);
+uint __ovld __cnfn convert_uint_rte(ushort);
+uint __ovld __cnfn convert_uint_sat_rte(ushort);
+uint __ovld __cnfn convert_uint_rtz(ushort);
+uint __ovld __cnfn convert_uint_sat_rtz(ushort);
+uint __ovld __cnfn convert_uint_rtp(ushort);
+uint __ovld __cnfn convert_uint_sat_rtp(ushort);
+uint __ovld __cnfn convert_uint_rtn(ushort);
+uint __ovld __cnfn convert_uint_sat_rtn(ushort);
+uint __ovld __cnfn convert_uint(ushort);
+uint __ovld __cnfn convert_uint_sat(ushort);
+uint __ovld __cnfn convert_uint_rte(int);
+uint __ovld __cnfn convert_uint_sat_rte(int);
+uint __ovld __cnfn convert_uint_rtz(int);
+uint __ovld __cnfn convert_uint_sat_rtz(int);
+uint __ovld __cnfn convert_uint_rtp(int);
+uint __ovld __cnfn convert_uint_sat_rtp(int);
+uint __ovld __cnfn convert_uint_rtn(int);
+uint __ovld __cnfn convert_uint_sat_rtn(int);
+uint __ovld __cnfn convert_uint(int);
+uint __ovld __cnfn convert_uint_sat(int);
+uint __ovld __cnfn convert_uint_rte(uint);
+uint __ovld __cnfn convert_uint_sat_rte(uint);
+uint __ovld __cnfn convert_uint_rtz(uint);
+uint __ovld __cnfn convert_uint_sat_rtz(uint);
+uint __ovld __cnfn convert_uint_rtp(uint);
+uint __ovld __cnfn convert_uint_sat_rtp(uint);
+uint __ovld __cnfn convert_uint_rtn(uint);
+uint __ovld __cnfn convert_uint_sat_rtn(uint);
+uint __ovld __cnfn convert_uint(uint);
+uint __ovld __cnfn convert_uint_sat(uint);
+uint __ovld __cnfn convert_uint_rte(long);
+uint __ovld __cnfn convert_uint_sat_rte(long);
+uint __ovld __cnfn convert_uint_rtz(long);
+uint __ovld __cnfn convert_uint_sat_rtz(long);
+uint __ovld __cnfn convert_uint_rtp(long);
+uint __ovld __cnfn convert_uint_sat_rtp(long);
+uint __ovld __cnfn convert_uint_rtn(long);
+uint __ovld __cnfn convert_uint_sat_rtn(long);
+uint __ovld __cnfn convert_uint(long);
+uint __ovld __cnfn convert_uint_sat(long);
+uint __ovld __cnfn convert_uint_rte(ulong);
+uint __ovld __cnfn convert_uint_sat_rte(ulong);
+uint __ovld __cnfn convert_uint_rtz(ulong);
+uint __ovld __cnfn convert_uint_sat_rtz(ulong);
+uint __ovld __cnfn convert_uint_rtp(ulong);
+uint __ovld __cnfn convert_uint_sat_rtp(ulong);
+uint __ovld __cnfn convert_uint_rtn(ulong);
+uint __ovld __cnfn convert_uint_sat_rtn(ulong);
+uint __ovld __cnfn convert_uint(ulong);
+uint __ovld __cnfn convert_uint_sat(ulong);
+uint __ovld __cnfn convert_uint_rte(float);
+uint __ovld __cnfn convert_uint_sat_rte(float);
+uint __ovld __cnfn convert_uint_rtz(float);
+uint __ovld __cnfn convert_uint_sat_rtz(float);
+uint __ovld __cnfn convert_uint_rtp(float);
+uint __ovld __cnfn convert_uint_sat_rtp(float);
+uint __ovld __cnfn convert_uint_rtn(float);
+uint __ovld __cnfn convert_uint_sat_rtn(float);
+uint __ovld __cnfn convert_uint(float);
+uint __ovld __cnfn convert_uint_sat(float);
+long __ovld __cnfn convert_long_rte(char);
+long __ovld __cnfn convert_long_sat_rte(char);
+long __ovld __cnfn convert_long_rtz(char);
+long __ovld __cnfn convert_long_sat_rtz(char);
+long __ovld __cnfn convert_long_rtp(char);
+long __ovld __cnfn convert_long_sat_rtp(char);
+long __ovld __cnfn convert_long_rtn(char);
+long __ovld __cnfn convert_long_sat_rtn(char);
+long __ovld __cnfn convert_long(char);
+long __ovld __cnfn convert_long_sat(char);
+long __ovld __cnfn convert_long_rte(uchar);
+long __ovld __cnfn convert_long_sat_rte(uchar);
+long __ovld __cnfn convert_long_rtz(uchar);
+long __ovld __cnfn convert_long_sat_rtz(uchar);
+long __ovld __cnfn convert_long_rtp(uchar);
+long __ovld __cnfn convert_long_sat_rtp(uchar);
+long __ovld __cnfn convert_long_rtn(uchar);
+long __ovld __cnfn convert_long_sat_rtn(uchar);
+long __ovld __cnfn convert_long(uchar);
+long __ovld __cnfn convert_long_sat(uchar);
+long __ovld __cnfn convert_long_rte(short);
+long __ovld __cnfn convert_long_sat_rte(short);
+long __ovld __cnfn convert_long_rtz(short);
+long __ovld __cnfn convert_long_sat_rtz(short);
+long __ovld __cnfn convert_long_rtp(short);
+long __ovld __cnfn convert_long_sat_rtp(short);
+long __ovld __cnfn convert_long_rtn(short);
+long __ovld __cnfn convert_long_sat_rtn(short);
+long __ovld __cnfn convert_long(short);
+long __ovld __cnfn convert_long_sat(short);
+long __ovld __cnfn convert_long_rte(ushort);
+long __ovld __cnfn convert_long_sat_rte(ushort);
+long __ovld __cnfn convert_long_rtz(ushort);
+long __ovld __cnfn convert_long_sat_rtz(ushort);
+long __ovld __cnfn convert_long_rtp(ushort);
+long __ovld __cnfn convert_long_sat_rtp(ushort);
+long __ovld __cnfn convert_long_rtn(ushort);
+long __ovld __cnfn convert_long_sat_rtn(ushort);
+long __ovld __cnfn convert_long(ushort);
+long __ovld __cnfn convert_long_sat(ushort);
+long __ovld __cnfn convert_long_rte(int);
+long __ovld __cnfn convert_long_sat_rte(int);
+long __ovld __cnfn convert_long_rtz(int);
+long __ovld __cnfn convert_long_sat_rtz(int);
+long __ovld __cnfn convert_long_rtp(int);
+long __ovld __cnfn convert_long_sat_rtp(int);
+long __ovld __cnfn convert_long_rtn(int);
+long __ovld __cnfn convert_long_sat_rtn(int);
+long __ovld __cnfn convert_long(int);
+long __ovld __cnfn convert_long_sat(int);
+long __ovld __cnfn convert_long_rte(uint);
+long __ovld __cnfn convert_long_sat_rte(uint);
+long __ovld __cnfn convert_long_rtz(uint);
+long __ovld __cnfn convert_long_sat_rtz(uint);
+long __ovld __cnfn convert_long_rtp(uint);
+long __ovld __cnfn convert_long_sat_rtp(uint);
+long __ovld __cnfn convert_long_rtn(uint);
+long __ovld __cnfn convert_long_sat_rtn(uint);
+long __ovld __cnfn convert_long(uint);
+long __ovld __cnfn convert_long_sat(uint);
+long __ovld __cnfn convert_long_rte(long);
+long __ovld __cnfn convert_long_sat_rte(long);
+long __ovld __cnfn convert_long_rtz(long);
+long __ovld __cnfn convert_long_sat_rtz(long);
+long __ovld __cnfn convert_long_rtp(long);
+long __ovld __cnfn convert_long_sat_rtp(long);
+long __ovld __cnfn convert_long_rtn(long);
+long __ovld __cnfn convert_long_sat_rtn(long);
+long __ovld __cnfn convert_long(long);
+long __ovld __cnfn convert_long_sat(long);
+long __ovld __cnfn convert_long_rte(ulong);
+long __ovld __cnfn convert_long_sat_rte(ulong);
+long __ovld __cnfn convert_long_rtz(ulong);
+long __ovld __cnfn convert_long_sat_rtz(ulong);
+long __ovld __cnfn convert_long_rtp(ulong);
+long __ovld __cnfn convert_long_sat_rtp(ulong);
+long __ovld __cnfn convert_long_rtn(ulong);
+long __ovld __cnfn convert_long_sat_rtn(ulong);
+long __ovld __cnfn convert_long(ulong);
+long __ovld __cnfn convert_long_sat(ulong);
+long __ovld __cnfn convert_long_rte(float);
+long __ovld __cnfn convert_long_sat_rte(float);
+long __ovld __cnfn convert_long_rtz(float);
+long __ovld __cnfn convert_long_sat_rtz(float);
+long __ovld __cnfn convert_long_rtp(float);
+long __ovld __cnfn convert_long_sat_rtp(float);
+long __ovld __cnfn convert_long_rtn(float);
+long __ovld __cnfn convert_long_sat_rtn(float);
+long __ovld __cnfn convert_long(float);
+long __ovld __cnfn convert_long_sat(float);
+ulong __ovld __cnfn convert_ulong_rte(char);
+ulong __ovld __cnfn convert_ulong_sat_rte(char);
+ulong __ovld __cnfn convert_ulong_rtz(char);
+ulong __ovld __cnfn convert_ulong_sat_rtz(char);
+ulong __ovld __cnfn convert_ulong_rtp(char);
+ulong __ovld __cnfn convert_ulong_sat_rtp(char);
+ulong __ovld __cnfn convert_ulong_rtn(char);
+ulong __ovld __cnfn convert_ulong_sat_rtn(char);
+ulong __ovld __cnfn convert_ulong(char);
+ulong __ovld __cnfn convert_ulong_sat(char);
+ulong __ovld __cnfn convert_ulong_rte(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rte(uchar);
+ulong __ovld __cnfn convert_ulong_rtz(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rtz(uchar);
+ulong __ovld __cnfn convert_ulong_rtp(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rtp(uchar);
+ulong __ovld __cnfn convert_ulong_rtn(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rtn(uchar);
+ulong __ovld __cnfn convert_ulong(uchar);
+ulong __ovld __cnfn convert_ulong_sat(uchar);
+ulong __ovld __cnfn convert_ulong_rte(short);
+ulong __ovld __cnfn convert_ulong_sat_rte(short);
+ulong __ovld __cnfn convert_ulong_rtz(short);
+ulong __ovld __cnfn convert_ulong_sat_rtz(short);
+ulong __ovld __cnfn convert_ulong_rtp(short);
+ulong __ovld __cnfn convert_ulong_sat_rtp(short);
+ulong __ovld __cnfn convert_ulong_rtn(short);
+ulong __ovld __cnfn convert_ulong_sat_rtn(short);
+ulong __ovld __cnfn convert_ulong(short);
+ulong __ovld __cnfn convert_ulong_sat(short);
+ulong __ovld __cnfn convert_ulong_rte(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rte(ushort);
+ulong __ovld __cnfn convert_ulong_rtz(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rtz(ushort);
+ulong __ovld __cnfn convert_ulong_rtp(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rtp(ushort);
+ulong __ovld __cnfn convert_ulong_rtn(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rtn(ushort);
+ulong __ovld __cnfn convert_ulong(ushort);
+ulong __ovld __cnfn convert_ulong_sat(ushort);
+ulong __ovld __cnfn convert_ulong_rte(int);
+ulong __ovld __cnfn convert_ulong_sat_rte(int);
+ulong __ovld __cnfn convert_ulong_rtz(int);
+ulong __ovld __cnfn convert_ulong_sat_rtz(int);
+ulong __ovld __cnfn convert_ulong_rtp(int);
+ulong __ovld __cnfn convert_ulong_sat_rtp(int);
+ulong __ovld __cnfn convert_ulong_rtn(int);
+ulong __ovld __cnfn convert_ulong_sat_rtn(int);
+ulong __ovld __cnfn convert_ulong(int);
+ulong __ovld __cnfn convert_ulong_sat(int);
+ulong __ovld __cnfn convert_ulong_rte(uint);
+ulong __ovld __cnfn convert_ulong_sat_rte(uint);
+ulong __ovld __cnfn convert_ulong_rtz(uint);
+ulong __ovld __cnfn convert_ulong_sat_rtz(uint);
+ulong __ovld __cnfn convert_ulong_rtp(uint);
+ulong __ovld __cnfn convert_ulong_sat_rtp(uint);
+ulong __ovld __cnfn convert_ulong_rtn(uint);
+ulong __ovld __cnfn convert_ulong_sat_rtn(uint);
+ulong __ovld __cnfn convert_ulong(uint);
+ulong __ovld __cnfn convert_ulong_sat(uint);
+ulong __ovld __cnfn convert_ulong_rte(long);
+ulong __ovld __cnfn convert_ulong_sat_rte(long);
+ulong __ovld __cnfn convert_ulong_rtz(long);
+ulong __ovld __cnfn convert_ulong_sat_rtz(long);
+ulong __ovld __cnfn convert_ulong_rtp(long);
+ulong __ovld __cnfn convert_ulong_sat_rtp(long);
+ulong __ovld __cnfn convert_ulong_rtn(long);
+ulong __ovld __cnfn convert_ulong_sat_rtn(long);
+ulong __ovld __cnfn convert_ulong(long);
+ulong __ovld __cnfn convert_ulong_sat(long);
+ulong __ovld __cnfn convert_ulong_rte(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rte(ulong);
+ulong __ovld __cnfn convert_ulong_rtz(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rtz(ulong);
+ulong __ovld __cnfn convert_ulong_rtp(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rtp(ulong);
+ulong __ovld __cnfn convert_ulong_rtn(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rtn(ulong);
+ulong __ovld __cnfn convert_ulong(ulong);
+ulong __ovld __cnfn convert_ulong_sat(ulong);
+ulong __ovld __cnfn convert_ulong_rte(float);
+ulong __ovld __cnfn convert_ulong_sat_rte(float);
+ulong __ovld __cnfn convert_ulong_rtz(float);
+ulong __ovld __cnfn convert_ulong_sat_rtz(float);
+ulong __ovld __cnfn convert_ulong_rtp(float);
+ulong __ovld __cnfn convert_ulong_sat_rtp(float);
+ulong __ovld __cnfn convert_ulong_rtn(float);
+ulong __ovld __cnfn convert_ulong_sat_rtn(float);
+ulong __ovld __cnfn convert_ulong(float);
+ulong __ovld __cnfn convert_ulong_sat(float);
+float __ovld __cnfn convert_float_rte(char);
+float __ovld __cnfn convert_float_rtz(char);
+float __ovld __cnfn convert_float_rtp(char);
+float __ovld __cnfn convert_float_rtn(char);
+float __ovld __cnfn convert_float(char);
+float __ovld __cnfn convert_float_rte(uchar);
+float __ovld __cnfn convert_float_rtz(uchar);
+float __ovld __cnfn convert_float_rtp(uchar);
+float __ovld __cnfn convert_float_rtn(uchar);
+float __ovld __cnfn convert_float(uchar);
+float __ovld __cnfn convert_float_rte(short);
+float __ovld __cnfn convert_float_rtz(short);
+float __ovld __cnfn convert_float_rtp(short);
+float __ovld __cnfn convert_float_rtn(short);
+float __ovld __cnfn convert_float(short);
+float __ovld __cnfn convert_float_rte(ushort);
+float __ovld __cnfn convert_float_rtz(ushort);
+float __ovld __cnfn convert_float_rtp(ushort);
+float __ovld __cnfn convert_float_rtn(ushort);
+float __ovld __cnfn convert_float(ushort);
+float __ovld __cnfn convert_float_rte(int);
+float __ovld __cnfn convert_float_rtz(int);
+float __ovld __cnfn convert_float_rtp(int);
+float __ovld __cnfn convert_float_rtn(int);
+float __ovld __cnfn convert_float(int);
+float __ovld __cnfn convert_float_rte(uint);
+float __ovld __cnfn convert_float_rtz(uint);
+float __ovld __cnfn convert_float_rtp(uint);
+float __ovld __cnfn convert_float_rtn(uint);
+float __ovld __cnfn convert_float(uint);
+float __ovld __cnfn convert_float_rte(long);
+float __ovld __cnfn convert_float_rtz(long);
+float __ovld __cnfn convert_float_rtp(long);
+float __ovld __cnfn convert_float_rtn(long);
+float __ovld __cnfn convert_float(long);
+float __ovld __cnfn convert_float_rte(ulong);
+float __ovld __cnfn convert_float_rtz(ulong);
+float __ovld __cnfn convert_float_rtp(ulong);
+float __ovld __cnfn convert_float_rtn(ulong);
+float __ovld __cnfn convert_float(ulong);
+float __ovld __cnfn convert_float_rte(float);
+float __ovld __cnfn convert_float_rtz(float);
+float __ovld __cnfn convert_float_rtp(float);
+float __ovld __cnfn convert_float_rtn(float);
+float __ovld __cnfn convert_float(float);
+char2 __ovld __cnfn convert_char2_rte(char2);
+char2 __ovld __cnfn convert_char2_sat_rte(char2);
+char2 __ovld __cnfn convert_char2_rtz(char2);
+char2 __ovld __cnfn convert_char2_sat_rtz(char2);
+char2 __ovld __cnfn convert_char2_rtp(char2);
+char2 __ovld __cnfn convert_char2_sat_rtp(char2);
+char2 __ovld __cnfn convert_char2_rtn(char2);
+char2 __ovld __cnfn convert_char2_sat_rtn(char2);
+char2 __ovld __cnfn convert_char2(char2);
+char2 __ovld __cnfn convert_char2_sat(char2);
+char2 __ovld __cnfn convert_char2_rte(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rte(uchar2);
+char2 __ovld __cnfn convert_char2_rtz(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rtz(uchar2);
+char2 __ovld __cnfn convert_char2_rtp(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rtp(uchar2);
+char2 __ovld __cnfn convert_char2_rtn(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rtn(uchar2);
+char2 __ovld __cnfn convert_char2(uchar2);
+char2 __ovld __cnfn convert_char2_sat(uchar2);
+char2 __ovld __cnfn convert_char2_rte(short2);
+char2 __ovld __cnfn convert_char2_sat_rte(short2);
+char2 __ovld __cnfn convert_char2_rtz(short2);
+char2 __ovld __cnfn convert_char2_sat_rtz(short2);
+char2 __ovld __cnfn convert_char2_rtp(short2);
+char2 __ovld __cnfn convert_char2_sat_rtp(short2);
+char2 __ovld __cnfn convert_char2_rtn(short2);
+char2 __ovld __cnfn convert_char2_sat_rtn(short2);
+char2 __ovld __cnfn convert_char2(short2);
+char2 __ovld __cnfn convert_char2_sat(short2);
+char2 __ovld __cnfn convert_char2_rte(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rte(ushort2);
+char2 __ovld __cnfn convert_char2_rtz(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rtz(ushort2);
+char2 __ovld __cnfn convert_char2_rtp(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rtp(ushort2);
+char2 __ovld __cnfn convert_char2_rtn(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rtn(ushort2);
+char2 __ovld __cnfn convert_char2(ushort2);
+char2 __ovld __cnfn convert_char2_sat(ushort2);
+char2 __ovld __cnfn convert_char2_rte(int2);
+char2 __ovld __cnfn convert_char2_sat_rte(int2);
+char2 __ovld __cnfn convert_char2_rtz(int2);
+char2 __ovld __cnfn convert_char2_sat_rtz(int2);
+char2 __ovld __cnfn convert_char2_rtp(int2);
+char2 __ovld __cnfn convert_char2_sat_rtp(int2);
+char2 __ovld __cnfn convert_char2_rtn(int2);
+char2 __ovld __cnfn convert_char2_sat_rtn(int2);
+char2 __ovld __cnfn convert_char2(int2);
+char2 __ovld __cnfn convert_char2_sat(int2);
+char2 __ovld __cnfn convert_char2_rte(uint2);
+char2 __ovld __cnfn convert_char2_sat_rte(uint2);
+char2 __ovld __cnfn convert_char2_rtz(uint2);
+char2 __ovld __cnfn convert_char2_sat_rtz(uint2);
+char2 __ovld __cnfn convert_char2_rtp(uint2);
+char2 __ovld __cnfn convert_char2_sat_rtp(uint2);
+char2 __ovld __cnfn convert_char2_rtn(uint2);
+char2 __ovld __cnfn convert_char2_sat_rtn(uint2);
+char2 __ovld __cnfn convert_char2(uint2);
+char2 __ovld __cnfn convert_char2_sat(uint2);
+char2 __ovld __cnfn convert_char2_rte(long2);
+char2 __ovld __cnfn convert_char2_sat_rte(long2);
+char2 __ovld __cnfn convert_char2_rtz(long2);
+char2 __ovld __cnfn convert_char2_sat_rtz(long2);
+char2 __ovld __cnfn convert_char2_rtp(long2);
+char2 __ovld __cnfn convert_char2_sat_rtp(long2);
+char2 __ovld __cnfn convert_char2_rtn(long2);
+char2 __ovld __cnfn convert_char2_sat_rtn(long2);
+char2 __ovld __cnfn convert_char2(long2);
+char2 __ovld __cnfn convert_char2_sat(long2);
+char2 __ovld __cnfn convert_char2_rte(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rte(ulong2);
+char2 __ovld __cnfn convert_char2_rtz(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rtz(ulong2);
+char2 __ovld __cnfn convert_char2_rtp(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rtp(ulong2);
+char2 __ovld __cnfn convert_char2_rtn(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rtn(ulong2);
+char2 __ovld __cnfn convert_char2(ulong2);
+char2 __ovld __cnfn convert_char2_sat(ulong2);
+char2 __ovld __cnfn convert_char2_rte(float2);
+char2 __ovld __cnfn convert_char2_sat_rte(float2);
+char2 __ovld __cnfn convert_char2_rtz(float2);
+char2 __ovld __cnfn convert_char2_sat_rtz(float2);
+char2 __ovld __cnfn convert_char2_rtp(float2);
+char2 __ovld __cnfn convert_char2_sat_rtp(float2);
+char2 __ovld __cnfn convert_char2_rtn(float2);
+char2 __ovld __cnfn convert_char2_sat_rtn(float2);
+char2 __ovld __cnfn convert_char2(float2);
+char2 __ovld __cnfn convert_char2_sat(float2);
+uchar2 __ovld __cnfn convert_uchar2_rte(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(char2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(char2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(char2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(char2);
+uchar2 __ovld __cnfn convert_uchar2(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat(char2);
+uchar2 __ovld __cnfn convert_uchar2_rte(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uchar2);
+uchar2 __ovld __cnfn convert_uchar2(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rte(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(short2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(short2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(short2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(short2);
+uchar2 __ovld __cnfn convert_uchar2(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat(short2);
+uchar2 __ovld __cnfn convert_uchar2_rte(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ushort2);
+uchar2 __ovld __cnfn convert_uchar2(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rte(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(int2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(int2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(int2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(int2);
+uchar2 __ovld __cnfn convert_uchar2(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat(int2);
+uchar2 __ovld __cnfn convert_uchar2_rte(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uint2);
+uchar2 __ovld __cnfn convert_uchar2(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rte(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(long2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(long2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(long2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(long2);
+uchar2 __ovld __cnfn convert_uchar2(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat(long2);
+uchar2 __ovld __cnfn convert_uchar2_rte(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ulong2);
+uchar2 __ovld __cnfn convert_uchar2(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rte(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(float2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(float2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(float2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(float2);
+uchar2 __ovld __cnfn convert_uchar2(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat(float2);
+short2 __ovld __cnfn convert_short2_rte(char2);
+short2 __ovld __cnfn convert_short2_sat_rte(char2);
+short2 __ovld __cnfn convert_short2_rtz(char2);
+short2 __ovld __cnfn convert_short2_sat_rtz(char2);
+short2 __ovld __cnfn convert_short2_rtp(char2);
+short2 __ovld __cnfn convert_short2_sat_rtp(char2);
+short2 __ovld __cnfn convert_short2_rtn(char2);
+short2 __ovld __cnfn convert_short2_sat_rtn(char2);
+short2 __ovld __cnfn convert_short2(char2);
+short2 __ovld __cnfn convert_short2_sat(char2);
+short2 __ovld __cnfn convert_short2_rte(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rte(uchar2);
+short2 __ovld __cnfn convert_short2_rtz(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rtz(uchar2);
+short2 __ovld __cnfn convert_short2_rtp(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rtp(uchar2);
+short2 __ovld __cnfn convert_short2_rtn(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rtn(uchar2);
+short2 __ovld __cnfn convert_short2(uchar2);
+short2 __ovld __cnfn convert_short2_sat(uchar2);
+short2 __ovld __cnfn convert_short2_rte(short2);
+short2 __ovld __cnfn convert_short2_sat_rte(short2);
+short2 __ovld __cnfn convert_short2_rtz(short2);
+short2 __ovld __cnfn convert_short2_sat_rtz(short2);
+short2 __ovld __cnfn convert_short2_rtp(short2);
+short2 __ovld __cnfn convert_short2_sat_rtp(short2);
+short2 __ovld __cnfn convert_short2_rtn(short2);
+short2 __ovld __cnfn convert_short2_sat_rtn(short2);
+short2 __ovld __cnfn convert_short2(short2);
+short2 __ovld __cnfn convert_short2_sat(short2);
+short2 __ovld __cnfn convert_short2_rte(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rte(ushort2);
+short2 __ovld __cnfn convert_short2_rtz(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rtz(ushort2);
+short2 __ovld __cnfn convert_short2_rtp(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rtp(ushort2);
+short2 __ovld __cnfn convert_short2_rtn(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rtn(ushort2);
+short2 __ovld __cnfn convert_short2(ushort2);
+short2 __ovld __cnfn convert_short2_sat(ushort2);
+short2 __ovld __cnfn convert_short2_rte(int2);
+short2 __ovld __cnfn convert_short2_sat_rte(int2);
+short2 __ovld __cnfn convert_short2_rtz(int2);
+short2 __ovld __cnfn convert_short2_sat_rtz(int2);
+short2 __ovld __cnfn convert_short2_rtp(int2);
+short2 __ovld __cnfn convert_short2_sat_rtp(int2);
+short2 __ovld __cnfn convert_short2_rtn(int2);
+short2 __ovld __cnfn convert_short2_sat_rtn(int2);
+short2 __ovld __cnfn convert_short2(int2);
+short2 __ovld __cnfn convert_short2_sat(int2);
+short2 __ovld __cnfn convert_short2_rte(uint2);
+short2 __ovld __cnfn convert_short2_sat_rte(uint2);
+short2 __ovld __cnfn convert_short2_rtz(uint2);
+short2 __ovld __cnfn convert_short2_sat_rtz(uint2);
+short2 __ovld __cnfn convert_short2_rtp(uint2);
+short2 __ovld __cnfn convert_short2_sat_rtp(uint2);
+short2 __ovld __cnfn convert_short2_rtn(uint2);
+short2 __ovld __cnfn convert_short2_sat_rtn(uint2);
+short2 __ovld __cnfn convert_short2(uint2);
+short2 __ovld __cnfn convert_short2_sat(uint2);
+short2 __ovld __cnfn convert_short2_rte(long2);
+short2 __ovld __cnfn convert_short2_sat_rte(long2);
+short2 __ovld __cnfn convert_short2_rtz(long2);
+short2 __ovld __cnfn convert_short2_sat_rtz(long2);
+short2 __ovld __cnfn convert_short2_rtp(long2);
+short2 __ovld __cnfn convert_short2_sat_rtp(long2);
+short2 __ovld __cnfn convert_short2_rtn(long2);
+short2 __ovld __cnfn convert_short2_sat_rtn(long2);
+short2 __ovld __cnfn convert_short2(long2);
+short2 __ovld __cnfn convert_short2_sat(long2);
+short2 __ovld __cnfn convert_short2_rte(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rte(ulong2);
+short2 __ovld __cnfn convert_short2_rtz(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rtz(ulong2);
+short2 __ovld __cnfn convert_short2_rtp(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rtp(ulong2);
+short2 __ovld __cnfn convert_short2_rtn(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rtn(ulong2);
+short2 __ovld __cnfn convert_short2(ulong2);
+short2 __ovld __cnfn convert_short2_sat(ulong2);
+short2 __ovld __cnfn convert_short2_rte(float2);
+short2 __ovld __cnfn convert_short2_sat_rte(float2);
+short2 __ovld __cnfn convert_short2_rtz(float2);
+short2 __ovld __cnfn convert_short2_sat_rtz(float2);
+short2 __ovld __cnfn convert_short2_rtp(float2);
+short2 __ovld __cnfn convert_short2_sat_rtp(float2);
+short2 __ovld __cnfn convert_short2_rtn(float2);
+short2 __ovld __cnfn convert_short2_sat_rtn(float2);
+short2 __ovld __cnfn convert_short2(float2);
+short2 __ovld __cnfn convert_short2_sat(float2);
+ushort2 __ovld __cnfn convert_ushort2_rte(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(char2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(char2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(char2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(char2);
+ushort2 __ovld __cnfn convert_ushort2(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat(char2);
+ushort2 __ovld __cnfn convert_ushort2_rte(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uchar2);
+ushort2 __ovld __cnfn convert_ushort2(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rte(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(short2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(short2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(short2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(short2);
+ushort2 __ovld __cnfn convert_ushort2(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat(short2);
+ushort2 __ovld __cnfn convert_ushort2_rte(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ushort2);
+ushort2 __ovld __cnfn convert_ushort2(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rte(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(int2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(int2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(int2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(int2);
+ushort2 __ovld __cnfn convert_ushort2(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat(int2);
+ushort2 __ovld __cnfn convert_ushort2_rte(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uint2);
+ushort2 __ovld __cnfn convert_ushort2(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rte(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(long2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(long2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(long2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(long2);
+ushort2 __ovld __cnfn convert_ushort2(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat(long2);
+ushort2 __ovld __cnfn convert_ushort2_rte(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ulong2);
+ushort2 __ovld __cnfn convert_ushort2(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rte(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(float2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(float2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(float2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(float2);
+ushort2 __ovld __cnfn convert_ushort2(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat(float2);
+int2 __ovld __cnfn convert_int2_rte(char2);
+int2 __ovld __cnfn convert_int2_sat_rte(char2);
+int2 __ovld __cnfn convert_int2_rtz(char2);
+int2 __ovld __cnfn convert_int2_sat_rtz(char2);
+int2 __ovld __cnfn convert_int2_rtp(char2);
+int2 __ovld __cnfn convert_int2_sat_rtp(char2);
+int2 __ovld __cnfn convert_int2_rtn(char2);
+int2 __ovld __cnfn convert_int2_sat_rtn(char2);
+int2 __ovld __cnfn convert_int2(char2);
+int2 __ovld __cnfn convert_int2_sat(char2);
+int2 __ovld __cnfn convert_int2_rte(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rte(uchar2);
+int2 __ovld __cnfn convert_int2_rtz(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rtz(uchar2);
+int2 __ovld __cnfn convert_int2_rtp(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rtp(uchar2);
+int2 __ovld __cnfn convert_int2_rtn(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rtn(uchar2);
+int2 __ovld __cnfn convert_int2(uchar2);
+int2 __ovld __cnfn convert_int2_sat(uchar2);
+int2 __ovld __cnfn convert_int2_rte(short2);
+int2 __ovld __cnfn convert_int2_sat_rte(short2);
+int2 __ovld __cnfn convert_int2_rtz(short2);
+int2 __ovld __cnfn convert_int2_sat_rtz(short2);
+int2 __ovld __cnfn convert_int2_rtp(short2);
+int2 __ovld __cnfn convert_int2_sat_rtp(short2);
+int2 __ovld __cnfn convert_int2_rtn(short2);
+int2 __ovld __cnfn convert_int2_sat_rtn(short2);
+int2 __ovld __cnfn convert_int2(short2);
+int2 __ovld __cnfn convert_int2_sat(short2);
+int2 __ovld __cnfn convert_int2_rte(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rte(ushort2);
+int2 __ovld __cnfn convert_int2_rtz(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rtz(ushort2);
+int2 __ovld __cnfn convert_int2_rtp(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rtp(ushort2);
+int2 __ovld __cnfn convert_int2_rtn(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rtn(ushort2);
+int2 __ovld __cnfn convert_int2(ushort2);
+int2 __ovld __cnfn convert_int2_sat(ushort2);
+int2 __ovld __cnfn convert_int2_rte(int2);
+int2 __ovld __cnfn convert_int2_sat_rte(int2);
+int2 __ovld __cnfn convert_int2_rtz(int2);
+int2 __ovld __cnfn convert_int2_sat_rtz(int2);
+int2 __ovld __cnfn convert_int2_rtp(int2);
+int2 __ovld __cnfn convert_int2_sat_rtp(int2);
+int2 __ovld __cnfn convert_int2_rtn(int2);
+int2 __ovld __cnfn convert_int2_sat_rtn(int2);
+int2 __ovld __cnfn convert_int2(int2);
+int2 __ovld __cnfn convert_int2_sat(int2);
+int2 __ovld __cnfn convert_int2_rte(uint2);
+int2 __ovld __cnfn convert_int2_sat_rte(uint2);
+int2 __ovld __cnfn convert_int2_rtz(uint2);
+int2 __ovld __cnfn convert_int2_sat_rtz(uint2);
+int2 __ovld __cnfn convert_int2_rtp(uint2);
+int2 __ovld __cnfn convert_int2_sat_rtp(uint2);
+int2 __ovld __cnfn convert_int2_rtn(uint2);
+int2 __ovld __cnfn convert_int2_sat_rtn(uint2);
+int2 __ovld __cnfn convert_int2(uint2);
+int2 __ovld __cnfn convert_int2_sat(uint2);
+int2 __ovld __cnfn convert_int2_rte(long2);
+int2 __ovld __cnfn convert_int2_sat_rte(long2);
+int2 __ovld __cnfn convert_int2_rtz(long2);
+int2 __ovld __cnfn convert_int2_sat_rtz(long2);
+int2 __ovld __cnfn convert_int2_rtp(long2);
+int2 __ovld __cnfn convert_int2_sat_rtp(long2);
+int2 __ovld __cnfn convert_int2_rtn(long2);
+int2 __ovld __cnfn convert_int2_sat_rtn(long2);
+int2 __ovld __cnfn convert_int2(long2);
+int2 __ovld __cnfn convert_int2_sat(long2);
+int2 __ovld __cnfn convert_int2_rte(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rte(ulong2);
+int2 __ovld __cnfn convert_int2_rtz(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rtz(ulong2);
+int2 __ovld __cnfn convert_int2_rtp(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rtp(ulong2);
+int2 __ovld __cnfn convert_int2_rtn(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rtn(ulong2);
+int2 __ovld __cnfn convert_int2(ulong2);
+int2 __ovld __cnfn convert_int2_sat(ulong2);
+int2 __ovld __cnfn convert_int2_rte(float2);
+int2 __ovld __cnfn convert_int2_sat_rte(float2);
+int2 __ovld __cnfn convert_int2_rtz(float2);
+int2 __ovld __cnfn convert_int2_sat_rtz(float2);
+int2 __ovld __cnfn convert_int2_rtp(float2);
+int2 __ovld __cnfn convert_int2_sat_rtp(float2);
+int2 __ovld __cnfn convert_int2_rtn(float2);
+int2 __ovld __cnfn convert_int2_sat_rtn(float2);
+int2 __ovld __cnfn convert_int2(float2);
+int2 __ovld __cnfn convert_int2_sat(float2);
+uint2 __ovld __cnfn convert_uint2_rte(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(char2);
+uint2 __ovld __cnfn convert_uint2_rtz(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(char2);
+uint2 __ovld __cnfn convert_uint2_rtp(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(char2);
+uint2 __ovld __cnfn convert_uint2_rtn(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(char2);
+uint2 __ovld __cnfn convert_uint2(char2);
+uint2 __ovld __cnfn convert_uint2_sat(char2);
+uint2 __ovld __cnfn convert_uint2_rte(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(uchar2);
+uint2 __ovld __cnfn convert_uint2_rtz(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(uchar2);
+uint2 __ovld __cnfn convert_uint2_rtp(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(uchar2);
+uint2 __ovld __cnfn convert_uint2_rtn(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(uchar2);
+uint2 __ovld __cnfn convert_uint2(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat(uchar2);
+uint2 __ovld __cnfn convert_uint2_rte(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(short2);
+uint2 __ovld __cnfn convert_uint2_rtz(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(short2);
+uint2 __ovld __cnfn convert_uint2_rtp(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(short2);
+uint2 __ovld __cnfn convert_uint2_rtn(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(short2);
+uint2 __ovld __cnfn convert_uint2(short2);
+uint2 __ovld __cnfn convert_uint2_sat(short2);
+uint2 __ovld __cnfn convert_uint2_rte(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(ushort2);
+uint2 __ovld __cnfn convert_uint2_rtz(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(ushort2);
+uint2 __ovld __cnfn convert_uint2_rtp(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(ushort2);
+uint2 __ovld __cnfn convert_uint2_rtn(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(ushort2);
+uint2 __ovld __cnfn convert_uint2(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat(ushort2);
+uint2 __ovld __cnfn convert_uint2_rte(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(int2);
+uint2 __ovld __cnfn convert_uint2_rtz(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(int2);
+uint2 __ovld __cnfn convert_uint2_rtp(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(int2);
+uint2 __ovld __cnfn convert_uint2_rtn(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(int2);
+uint2 __ovld __cnfn convert_uint2(int2);
+uint2 __ovld __cnfn convert_uint2_sat(int2);
+uint2 __ovld __cnfn convert_uint2_rte(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(uint2);
+uint2 __ovld __cnfn convert_uint2_rtz(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(uint2);
+uint2 __ovld __cnfn convert_uint2_rtp(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(uint2);
+uint2 __ovld __cnfn convert_uint2_rtn(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(uint2);
+uint2 __ovld __cnfn convert_uint2(uint2);
+uint2 __ovld __cnfn convert_uint2_sat(uint2);
+uint2 __ovld __cnfn convert_uint2_rte(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(long2);
+uint2 __ovld __cnfn convert_uint2_rtz(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(long2);
+uint2 __ovld __cnfn convert_uint2_rtp(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(long2);
+uint2 __ovld __cnfn convert_uint2_rtn(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(long2);
+uint2 __ovld __cnfn convert_uint2(long2);
+uint2 __ovld __cnfn convert_uint2_sat(long2);
+uint2 __ovld __cnfn convert_uint2_rte(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(ulong2);
+uint2 __ovld __cnfn convert_uint2_rtz(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(ulong2);
+uint2 __ovld __cnfn convert_uint2_rtp(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(ulong2);
+uint2 __ovld __cnfn convert_uint2_rtn(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(ulong2);
+uint2 __ovld __cnfn convert_uint2(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat(ulong2);
+uint2 __ovld __cnfn convert_uint2_rte(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(float2);
+uint2 __ovld __cnfn convert_uint2_rtz(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(float2);
+uint2 __ovld __cnfn convert_uint2_rtp(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(float2);
+uint2 __ovld __cnfn convert_uint2_rtn(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(float2);
+uint2 __ovld __cnfn convert_uint2(float2);
+uint2 __ovld __cnfn convert_uint2_sat(float2);
+long2 __ovld __cnfn convert_long2_rte(char2);
+long2 __ovld __cnfn convert_long2_sat_rte(char2);
+long2 __ovld __cnfn convert_long2_rtz(char2);
+long2 __ovld __cnfn convert_long2_sat_rtz(char2);
+long2 __ovld __cnfn convert_long2_rtp(char2);
+long2 __ovld __cnfn convert_long2_sat_rtp(char2);
+long2 __ovld __cnfn convert_long2_rtn(char2);
+long2 __ovld __cnfn convert_long2_sat_rtn(char2);
+long2 __ovld __cnfn convert_long2(char2);
+long2 __ovld __cnfn convert_long2_sat(char2);
+long2 __ovld __cnfn convert_long2_rte(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rte(uchar2);
+long2 __ovld __cnfn convert_long2_rtz(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rtz(uchar2);
+long2 __ovld __cnfn convert_long2_rtp(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rtp(uchar2);
+long2 __ovld __cnfn convert_long2_rtn(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rtn(uchar2);
+long2 __ovld __cnfn convert_long2(uchar2);
+long2 __ovld __cnfn convert_long2_sat(uchar2);
+long2 __ovld __cnfn convert_long2_rte(short2);
+long2 __ovld __cnfn convert_long2_sat_rte(short2);
+long2 __ovld __cnfn convert_long2_rtz(short2);
+long2 __ovld __cnfn convert_long2_sat_rtz(short2);
+long2 __ovld __cnfn convert_long2_rtp(short2);
+long2 __ovld __cnfn convert_long2_sat_rtp(short2);
+long2 __ovld __cnfn convert_long2_rtn(short2);
+long2 __ovld __cnfn convert_long2_sat_rtn(short2);
+long2 __ovld __cnfn convert_long2(short2);
+long2 __ovld __cnfn convert_long2_sat(short2);
+long2 __ovld __cnfn convert_long2_rte(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rte(ushort2);
+long2 __ovld __cnfn convert_long2_rtz(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rtz(ushort2);
+long2 __ovld __cnfn convert_long2_rtp(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rtp(ushort2);
+long2 __ovld __cnfn convert_long2_rtn(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rtn(ushort2);
+long2 __ovld __cnfn convert_long2(ushort2);
+long2 __ovld __cnfn convert_long2_sat(ushort2);
+long2 __ovld __cnfn convert_long2_rte(int2);
+long2 __ovld __cnfn convert_long2_sat_rte(int2);
+long2 __ovld __cnfn convert_long2_rtz(int2);
+long2 __ovld __cnfn convert_long2_sat_rtz(int2);
+long2 __ovld __cnfn convert_long2_rtp(int2);
+long2 __ovld __cnfn convert_long2_sat_rtp(int2);
+long2 __ovld __cnfn convert_long2_rtn(int2);
+long2 __ovld __cnfn convert_long2_sat_rtn(int2);
+long2 __ovld __cnfn convert_long2(int2);
+long2 __ovld __cnfn convert_long2_sat(int2);
+long2 __ovld __cnfn convert_long2_rte(uint2);
+long2 __ovld __cnfn convert_long2_sat_rte(uint2);
+long2 __ovld __cnfn convert_long2_rtz(uint2);
+long2 __ovld __cnfn convert_long2_sat_rtz(uint2);
+long2 __ovld __cnfn convert_long2_rtp(uint2);
+long2 __ovld __cnfn convert_long2_sat_rtp(uint2);
+long2 __ovld __cnfn convert_long2_rtn(uint2);
+long2 __ovld __cnfn convert_long2_sat_rtn(uint2);
+long2 __ovld __cnfn convert_long2(uint2);
+long2 __ovld __cnfn convert_long2_sat(uint2);
+long2 __ovld __cnfn convert_long2_rte(long2);
+long2 __ovld __cnfn convert_long2_sat_rte(long2);
+long2 __ovld __cnfn convert_long2_rtz(long2);
+long2 __ovld __cnfn convert_long2_sat_rtz(long2);
+long2 __ovld __cnfn convert_long2_rtp(long2);
+long2 __ovld __cnfn convert_long2_sat_rtp(long2);
+long2 __ovld __cnfn convert_long2_rtn(long2);
+long2 __ovld __cnfn convert_long2_sat_rtn(long2);
+long2 __ovld __cnfn convert_long2(long2);
+long2 __ovld __cnfn convert_long2_sat(long2);
+long2 __ovld __cnfn convert_long2_rte(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rte(ulong2);
+long2 __ovld __cnfn convert_long2_rtz(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rtz(ulong2);
+long2 __ovld __cnfn convert_long2_rtp(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rtp(ulong2);
+long2 __ovld __cnfn convert_long2_rtn(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rtn(ulong2);
+long2 __ovld __cnfn convert_long2(ulong2);
+long2 __ovld __cnfn convert_long2_sat(ulong2);
+long2 __ovld __cnfn convert_long2_rte(float2);
+long2 __ovld __cnfn convert_long2_sat_rte(float2);
+long2 __ovld __cnfn convert_long2_rtz(float2);
+long2 __ovld __cnfn convert_long2_sat_rtz(float2);
+long2 __ovld __cnfn convert_long2_rtp(float2);
+long2 __ovld __cnfn convert_long2_sat_rtp(float2);
+long2 __ovld __cnfn convert_long2_rtn(float2);
+long2 __ovld __cnfn convert_long2_sat_rtn(float2);
+long2 __ovld __cnfn convert_long2(float2);
+long2 __ovld __cnfn convert_long2_sat(float2);
+ulong2 __ovld __cnfn convert_ulong2_rte(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(char2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(char2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(char2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(char2);
+ulong2 __ovld __cnfn convert_ulong2(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat(char2);
+ulong2 __ovld __cnfn convert_ulong2_rte(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uchar2);
+ulong2 __ovld __cnfn convert_ulong2(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rte(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(short2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(short2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(short2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(short2);
+ulong2 __ovld __cnfn convert_ulong2(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat(short2);
+ulong2 __ovld __cnfn convert_ulong2_rte(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ushort2);
+ulong2 __ovld __cnfn convert_ulong2(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rte(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(int2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(int2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(int2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(int2);
+ulong2 __ovld __cnfn convert_ulong2(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat(int2);
+ulong2 __ovld __cnfn convert_ulong2_rte(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uint2);
+ulong2 __ovld __cnfn convert_ulong2(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rte(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(long2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(long2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(long2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(long2);
+ulong2 __ovld __cnfn convert_ulong2(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat(long2);
+ulong2 __ovld __cnfn convert_ulong2_rte(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ulong2);
+ulong2 __ovld __cnfn convert_ulong2(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rte(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(float2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(float2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(float2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(float2);
+ulong2 __ovld __cnfn convert_ulong2(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat(float2);
+float2 __ovld __cnfn convert_float2_rte(char2);
+float2 __ovld __cnfn convert_float2_rtz(char2);
+float2 __ovld __cnfn convert_float2_rtp(char2);
+float2 __ovld __cnfn convert_float2_rtn(char2);
+float2 __ovld __cnfn convert_float2(char2);
+float2 __ovld __cnfn convert_float2_rte(uchar2);
+float2 __ovld __cnfn convert_float2_rtz(uchar2);
+float2 __ovld __cnfn convert_float2_rtp(uchar2);
+float2 __ovld __cnfn convert_float2_rtn(uchar2);
+float2 __ovld __cnfn convert_float2(uchar2);
+float2 __ovld __cnfn convert_float2_rte(short2);
+float2 __ovld __cnfn convert_float2_rtz(short2);
+float2 __ovld __cnfn convert_float2_rtp(short2);
+float2 __ovld __cnfn convert_float2_rtn(short2);
+float2 __ovld __cnfn convert_float2(short2);
+float2 __ovld __cnfn convert_float2_rte(ushort2);
+float2 __ovld __cnfn convert_float2_rtz(ushort2);
+float2 __ovld __cnfn convert_float2_rtp(ushort2);
+float2 __ovld __cnfn convert_float2_rtn(ushort2);
+float2 __ovld __cnfn convert_float2(ushort2);
+float2 __ovld __cnfn convert_float2_rte(int2);
+float2 __ovld __cnfn convert_float2_rtz(int2);
+float2 __ovld __cnfn convert_float2_rtp(int2);
+float2 __ovld __cnfn convert_float2_rtn(int2);
+float2 __ovld __cnfn convert_float2(int2);
+float2 __ovld __cnfn convert_float2_rte(uint2);
+float2 __ovld __cnfn convert_float2_rtz(uint2);
+float2 __ovld __cnfn convert_float2_rtp(uint2);
+float2 __ovld __cnfn convert_float2_rtn(uint2);
+float2 __ovld __cnfn convert_float2(uint2);
+float2 __ovld __cnfn convert_float2_rte(long2);
+float2 __ovld __cnfn convert_float2_rtz(long2);
+float2 __ovld __cnfn convert_float2_rtp(long2);
+float2 __ovld __cnfn convert_float2_rtn(long2);
+float2 __ovld __cnfn convert_float2(long2);
+float2 __ovld __cnfn convert_float2_rte(ulong2);
+float2 __ovld __cnfn convert_float2_rtz(ulong2);
+float2 __ovld __cnfn convert_float2_rtp(ulong2);
+float2 __ovld __cnfn convert_float2_rtn(ulong2);
+float2 __ovld __cnfn convert_float2(ulong2);
+float2 __ovld __cnfn convert_float2_rte(float2);
+float2 __ovld __cnfn convert_float2_rtz(float2);
+float2 __ovld __cnfn convert_float2_rtp(float2);
+float2 __ovld __cnfn convert_float2_rtn(float2);
+float2 __ovld __cnfn convert_float2(float2);
+char3 __ovld __cnfn convert_char3_rte(char3);
+char3 __ovld __cnfn convert_char3_sat_rte(char3);
+char3 __ovld __cnfn convert_char3_rtz(char3);
+char3 __ovld __cnfn convert_char3_sat_rtz(char3);
+char3 __ovld __cnfn convert_char3_rtp(char3);
+char3 __ovld __cnfn convert_char3_sat_rtp(char3);
+char3 __ovld __cnfn convert_char3_rtn(char3);
+char3 __ovld __cnfn convert_char3_sat_rtn(char3);
+char3 __ovld __cnfn convert_char3(char3);
+char3 __ovld __cnfn convert_char3_sat(char3);
+char3 __ovld __cnfn convert_char3_rte(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rte(uchar3);
+char3 __ovld __cnfn convert_char3_rtz(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rtz(uchar3);
+char3 __ovld __cnfn convert_char3_rtp(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rtp(uchar3);
+char3 __ovld __cnfn convert_char3_rtn(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rtn(uchar3);
+char3 __ovld __cnfn convert_char3(uchar3);
+char3 __ovld __cnfn convert_char3_sat(uchar3);
+char3 __ovld __cnfn convert_char3_rte(short3);
+char3 __ovld __cnfn convert_char3_sat_rte(short3);
+char3 __ovld __cnfn convert_char3_rtz(short3);
+char3 __ovld __cnfn convert_char3_sat_rtz(short3);
+char3 __ovld __cnfn convert_char3_rtp(short3);
+char3 __ovld __cnfn convert_char3_sat_rtp(short3);
+char3 __ovld __cnfn convert_char3_rtn(short3);
+char3 __ovld __cnfn convert_char3_sat_rtn(short3);
+char3 __ovld __cnfn convert_char3(short3);
+char3 __ovld __cnfn convert_char3_sat(short3);
+char3 __ovld __cnfn convert_char3_rte(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rte(ushort3);
+char3 __ovld __cnfn convert_char3_rtz(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rtz(ushort3);
+char3 __ovld __cnfn convert_char3_rtp(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rtp(ushort3);
+char3 __ovld __cnfn convert_char3_rtn(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rtn(ushort3);
+char3 __ovld __cnfn convert_char3(ushort3);
+char3 __ovld __cnfn convert_char3_sat(ushort3);
+char3 __ovld __cnfn convert_char3_rte(int3);
+char3 __ovld __cnfn convert_char3_sat_rte(int3);
+char3 __ovld __cnfn convert_char3_rtz(int3);
+char3 __ovld __cnfn convert_char3_sat_rtz(int3);
+char3 __ovld __cnfn convert_char3_rtp(int3);
+char3 __ovld __cnfn convert_char3_sat_rtp(int3);
+char3 __ovld __cnfn convert_char3_rtn(int3);
+char3 __ovld __cnfn convert_char3_sat_rtn(int3);
+char3 __ovld __cnfn convert_char3(int3);
+char3 __ovld __cnfn convert_char3_sat(int3);
+char3 __ovld __cnfn convert_char3_rte(uint3);
+char3 __ovld __cnfn convert_char3_sat_rte(uint3);
+char3 __ovld __cnfn convert_char3_rtz(uint3);
+char3 __ovld __cnfn convert_char3_sat_rtz(uint3);
+char3 __ovld __cnfn convert_char3_rtp(uint3);
+char3 __ovld __cnfn convert_char3_sat_rtp(uint3);
+char3 __ovld __cnfn convert_char3_rtn(uint3);
+char3 __ovld __cnfn convert_char3_sat_rtn(uint3);
+char3 __ovld __cnfn convert_char3(uint3);
+char3 __ovld __cnfn convert_char3_sat(uint3);
+char3 __ovld __cnfn convert_char3_rte(long3);
+char3 __ovld __cnfn convert_char3_sat_rte(long3);
+char3 __ovld __cnfn convert_char3_rtz(long3);
+char3 __ovld __cnfn convert_char3_sat_rtz(long3);
+char3 __ovld __cnfn convert_char3_rtp(long3);
+char3 __ovld __cnfn convert_char3_sat_rtp(long3);
+char3 __ovld __cnfn convert_char3_rtn(long3);
+char3 __ovld __cnfn convert_char3_sat_rtn(long3);
+char3 __ovld __cnfn convert_char3(long3);
+char3 __ovld __cnfn convert_char3_sat(long3);
+char3 __ovld __cnfn convert_char3_rte(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rte(ulong3);
+char3 __ovld __cnfn convert_char3_rtz(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rtz(ulong3);
+char3 __ovld __cnfn convert_char3_rtp(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rtp(ulong3);
+char3 __ovld __cnfn convert_char3_rtn(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rtn(ulong3);
+char3 __ovld __cnfn convert_char3(ulong3);
+char3 __ovld __cnfn convert_char3_sat(ulong3);
+char3 __ovld __cnfn convert_char3_rte(float3);
+char3 __ovld __cnfn convert_char3_sat_rte(float3);
+char3 __ovld __cnfn convert_char3_rtz(float3);
+char3 __ovld __cnfn convert_char3_sat_rtz(float3);
+char3 __ovld __cnfn convert_char3_rtp(float3);
+char3 __ovld __cnfn convert_char3_sat_rtp(float3);
+char3 __ovld __cnfn convert_char3_rtn(float3);
+char3 __ovld __cnfn convert_char3_sat_rtn(float3);
+char3 __ovld __cnfn convert_char3(float3);
+char3 __ovld __cnfn convert_char3_sat(float3);
+uchar3 __ovld __cnfn convert_uchar3_rte(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(char3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(char3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(char3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(char3);
+uchar3 __ovld __cnfn convert_uchar3(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat(char3);
+uchar3 __ovld __cnfn convert_uchar3_rte(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uchar3);
+uchar3 __ovld __cnfn convert_uchar3(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rte(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(short3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(short3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(short3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(short3);
+uchar3 __ovld __cnfn convert_uchar3(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat(short3);
+uchar3 __ovld __cnfn convert_uchar3_rte(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ushort3);
+uchar3 __ovld __cnfn convert_uchar3(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rte(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(int3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(int3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(int3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(int3);
+uchar3 __ovld __cnfn convert_uchar3(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat(int3);
+uchar3 __ovld __cnfn convert_uchar3_rte(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uint3);
+uchar3 __ovld __cnfn convert_uchar3(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rte(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(long3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(long3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(long3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(long3);
+uchar3 __ovld __cnfn convert_uchar3(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat(long3);
+uchar3 __ovld __cnfn convert_uchar3_rte(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ulong3);
+uchar3 __ovld __cnfn convert_uchar3(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rte(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(float3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(float3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(float3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(float3);
+uchar3 __ovld __cnfn convert_uchar3(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat(float3);
+short3 __ovld __cnfn convert_short3_rte(char3);
+short3 __ovld __cnfn convert_short3_sat_rte(char3);
+short3 __ovld __cnfn convert_short3_rtz(char3);
+short3 __ovld __cnfn convert_short3_sat_rtz(char3);
+short3 __ovld __cnfn convert_short3_rtp(char3);
+short3 __ovld __cnfn convert_short3_sat_rtp(char3);
+short3 __ovld __cnfn convert_short3_rtn(char3);
+short3 __ovld __cnfn convert_short3_sat_rtn(char3);
+short3 __ovld __cnfn convert_short3(char3);
+short3 __ovld __cnfn convert_short3_sat(char3);
+short3 __ovld __cnfn convert_short3_rte(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rte(uchar3);
+short3 __ovld __cnfn convert_short3_rtz(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rtz(uchar3);
+short3 __ovld __cnfn convert_short3_rtp(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rtp(uchar3);
+short3 __ovld __cnfn convert_short3_rtn(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rtn(uchar3);
+short3 __ovld __cnfn convert_short3(uchar3);
+short3 __ovld __cnfn convert_short3_sat(uchar3);
+short3 __ovld __cnfn convert_short3_rte(short3);
+short3 __ovld __cnfn convert_short3_sat_rte(short3);
+short3 __ovld __cnfn convert_short3_rtz(short3);
+short3 __ovld __cnfn convert_short3_sat_rtz(short3);
+short3 __ovld __cnfn convert_short3_rtp(short3);
+short3 __ovld __cnfn convert_short3_sat_rtp(short3);
+short3 __ovld __cnfn convert_short3_rtn(short3);
+short3 __ovld __cnfn convert_short3_sat_rtn(short3);
+short3 __ovld __cnfn convert_short3(short3);
+short3 __ovld __cnfn convert_short3_sat(short3);
+short3 __ovld __cnfn convert_short3_rte(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rte(ushort3);
+short3 __ovld __cnfn convert_short3_rtz(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rtz(ushort3);
+short3 __ovld __cnfn convert_short3_rtp(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rtp(ushort3);
+short3 __ovld __cnfn convert_short3_rtn(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rtn(ushort3);
+short3 __ovld __cnfn convert_short3(ushort3);
+short3 __ovld __cnfn convert_short3_sat(ushort3);
+short3 __ovld __cnfn convert_short3_rte(int3);
+short3 __ovld __cnfn convert_short3_sat_rte(int3);
+short3 __ovld __cnfn convert_short3_rtz(int3);
+short3 __ovld __cnfn convert_short3_sat_rtz(int3);
+short3 __ovld __cnfn convert_short3_rtp(int3);
+short3 __ovld __cnfn convert_short3_sat_rtp(int3);
+short3 __ovld __cnfn convert_short3_rtn(int3);
+short3 __ovld __cnfn convert_short3_sat_rtn(int3);
+short3 __ovld __cnfn convert_short3(int3);
+short3 __ovld __cnfn convert_short3_sat(int3);
+short3 __ovld __cnfn convert_short3_rte(uint3);
+short3 __ovld __cnfn convert_short3_sat_rte(uint3);
+short3 __ovld __cnfn convert_short3_rtz(uint3);
+short3 __ovld __cnfn convert_short3_sat_rtz(uint3);
+short3 __ovld __cnfn convert_short3_rtp(uint3);
+short3 __ovld __cnfn convert_short3_sat_rtp(uint3);
+short3 __ovld __cnfn convert_short3_rtn(uint3);
+short3 __ovld __cnfn convert_short3_sat_rtn(uint3);
+short3 __ovld __cnfn convert_short3(uint3);
+short3 __ovld __cnfn convert_short3_sat(uint3);
+short3 __ovld __cnfn convert_short3_rte(long3);
+short3 __ovld __cnfn convert_short3_sat_rte(long3);
+short3 __ovld __cnfn convert_short3_rtz(long3);
+short3 __ovld __cnfn convert_short3_sat_rtz(long3);
+short3 __ovld __cnfn convert_short3_rtp(long3);
+short3 __ovld __cnfn convert_short3_sat_rtp(long3);
+short3 __ovld __cnfn convert_short3_rtn(long3);
+short3 __ovld __cnfn convert_short3_sat_rtn(long3);
+short3 __ovld __cnfn convert_short3(long3);
+short3 __ovld __cnfn convert_short3_sat(long3);
+short3 __ovld __cnfn convert_short3_rte(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rte(ulong3);
+short3 __ovld __cnfn convert_short3_rtz(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rtz(ulong3);
+short3 __ovld __cnfn convert_short3_rtp(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rtp(ulong3);
+short3 __ovld __cnfn convert_short3_rtn(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rtn(ulong3);
+short3 __ovld __cnfn convert_short3(ulong3);
+short3 __ovld __cnfn convert_short3_sat(ulong3);
+short3 __ovld __cnfn convert_short3_rte(float3);
+short3 __ovld __cnfn convert_short3_sat_rte(float3);
+short3 __ovld __cnfn convert_short3_rtz(float3);
+short3 __ovld __cnfn convert_short3_sat_rtz(float3);
+short3 __ovld __cnfn convert_short3_rtp(float3);
+short3 __ovld __cnfn convert_short3_sat_rtp(float3);
+short3 __ovld __cnfn convert_short3_rtn(float3);
+short3 __ovld __cnfn convert_short3_sat_rtn(float3);
+short3 __ovld __cnfn convert_short3(float3);
+short3 __ovld __cnfn convert_short3_sat(float3);
+ushort3 __ovld __cnfn convert_ushort3_rte(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(char3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(char3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(char3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(char3);
+ushort3 __ovld __cnfn convert_ushort3(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat(char3);
+ushort3 __ovld __cnfn convert_ushort3_rte(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uchar3);
+ushort3 __ovld __cnfn convert_ushort3(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rte(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(short3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(short3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(short3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(short3);
+ushort3 __ovld __cnfn convert_ushort3(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat(short3);
+ushort3 __ovld __cnfn convert_ushort3_rte(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ushort3);
+ushort3 __ovld __cnfn convert_ushort3(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rte(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(int3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(int3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(int3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(int3);
+ushort3 __ovld __cnfn convert_ushort3(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat(int3);
+ushort3 __ovld __cnfn convert_ushort3_rte(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uint3);
+ushort3 __ovld __cnfn convert_ushort3(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rte(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(long3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(long3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(long3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(long3);
+ushort3 __ovld __cnfn convert_ushort3(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat(long3);
+ushort3 __ovld __cnfn convert_ushort3_rte(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ulong3);
+ushort3 __ovld __cnfn convert_ushort3(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rte(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(float3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(float3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(float3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(float3);
+ushort3 __ovld __cnfn convert_ushort3(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat(float3);
+int3 __ovld __cnfn convert_int3_rte(char3);
+int3 __ovld __cnfn convert_int3_sat_rte(char3);
+int3 __ovld __cnfn convert_int3_rtz(char3);
+int3 __ovld __cnfn convert_int3_sat_rtz(char3);
+int3 __ovld __cnfn convert_int3_rtp(char3);
+int3 __ovld __cnfn convert_int3_sat_rtp(char3);
+int3 __ovld __cnfn convert_int3_rtn(char3);
+int3 __ovld __cnfn convert_int3_sat_rtn(char3);
+int3 __ovld __cnfn convert_int3(char3);
+int3 __ovld __cnfn convert_int3_sat(char3);
+int3 __ovld __cnfn convert_int3_rte(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rte(uchar3);
+int3 __ovld __cnfn convert_int3_rtz(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rtz(uchar3);
+int3 __ovld __cnfn convert_int3_rtp(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rtp(uchar3);
+int3 __ovld __cnfn convert_int3_rtn(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rtn(uchar3);
+int3 __ovld __cnfn convert_int3(uchar3);
+int3 __ovld __cnfn convert_int3_sat(uchar3);
+int3 __ovld __cnfn convert_int3_rte(short3);
+int3 __ovld __cnfn convert_int3_sat_rte(short3);
+int3 __ovld __cnfn convert_int3_rtz(short3);
+int3 __ovld __cnfn convert_int3_sat_rtz(short3);
+int3 __ovld __cnfn convert_int3_rtp(short3);
+int3 __ovld __cnfn convert_int3_sat_rtp(short3);
+int3 __ovld __cnfn convert_int3_rtn(short3);
+int3 __ovld __cnfn convert_int3_sat_rtn(short3);
+int3 __ovld __cnfn convert_int3(short3);
+int3 __ovld __cnfn convert_int3_sat(short3);
+int3 __ovld __cnfn convert_int3_rte(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rte(ushort3);
+int3 __ovld __cnfn convert_int3_rtz(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rtz(ushort3);
+int3 __ovld __cnfn convert_int3_rtp(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rtp(ushort3);
+int3 __ovld __cnfn convert_int3_rtn(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rtn(ushort3);
+int3 __ovld __cnfn convert_int3(ushort3);
+int3 __ovld __cnfn convert_int3_sat(ushort3);
+int3 __ovld __cnfn convert_int3_rte(int3);
+int3 __ovld __cnfn convert_int3_sat_rte(int3);
+int3 __ovld __cnfn convert_int3_rtz(int3);
+int3 __ovld __cnfn convert_int3_sat_rtz(int3);
+int3 __ovld __cnfn convert_int3_rtp(int3);
+int3 __ovld __cnfn convert_int3_sat_rtp(int3);
+int3 __ovld __cnfn convert_int3_rtn(int3);
+int3 __ovld __cnfn convert_int3_sat_rtn(int3);
+int3 __ovld __cnfn convert_int3(int3);
+int3 __ovld __cnfn convert_int3_sat(int3);
+int3 __ovld __cnfn convert_int3_rte(uint3);
+int3 __ovld __cnfn convert_int3_sat_rte(uint3);
+int3 __ovld __cnfn convert_int3_rtz(uint3);
+int3 __ovld __cnfn convert_int3_sat_rtz(uint3);
+int3 __ovld __cnfn convert_int3_rtp(uint3);
+int3 __ovld __cnfn convert_int3_sat_rtp(uint3);
+int3 __ovld __cnfn convert_int3_rtn(uint3);
+int3 __ovld __cnfn convert_int3_sat_rtn(uint3);
+int3 __ovld __cnfn convert_int3(uint3);
+int3 __ovld __cnfn convert_int3_sat(uint3);
+int3 __ovld __cnfn convert_int3_rte(long3);
+int3 __ovld __cnfn convert_int3_sat_rte(long3);
+int3 __ovld __cnfn convert_int3_rtz(long3);
+int3 __ovld __cnfn convert_int3_sat_rtz(long3);
+int3 __ovld __cnfn convert_int3_rtp(long3);
+int3 __ovld __cnfn convert_int3_sat_rtp(long3);
+int3 __ovld __cnfn convert_int3_rtn(long3);
+int3 __ovld __cnfn convert_int3_sat_rtn(long3);
+int3 __ovld __cnfn convert_int3(long3);
+int3 __ovld __cnfn convert_int3_sat(long3);
+int3 __ovld __cnfn convert_int3_rte(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rte(ulong3);
+int3 __ovld __cnfn convert_int3_rtz(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rtz(ulong3);
+int3 __ovld __cnfn convert_int3_rtp(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rtp(ulong3);
+int3 __ovld __cnfn convert_int3_rtn(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rtn(ulong3);
+int3 __ovld __cnfn convert_int3(ulong3);
+int3 __ovld __cnfn convert_int3_sat(ulong3);
+int3 __ovld __cnfn convert_int3_rte(float3);
+int3 __ovld __cnfn convert_int3_sat_rte(float3);
+int3 __ovld __cnfn convert_int3_rtz(float3);
+int3 __ovld __cnfn convert_int3_sat_rtz(float3);
+int3 __ovld __cnfn convert_int3_rtp(float3);
+int3 __ovld __cnfn convert_int3_sat_rtp(float3);
+int3 __ovld __cnfn convert_int3_rtn(float3);
+int3 __ovld __cnfn convert_int3_sat_rtn(float3);
+int3 __ovld __cnfn convert_int3(float3);
+int3 __ovld __cnfn convert_int3_sat(float3);
+uint3 __ovld __cnfn convert_uint3_rte(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(char3);
+uint3 __ovld __cnfn convert_uint3_rtz(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(char3);
+uint3 __ovld __cnfn convert_uint3_rtp(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(char3);
+uint3 __ovld __cnfn convert_uint3_rtn(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(char3);
+uint3 __ovld __cnfn convert_uint3(char3);
+uint3 __ovld __cnfn convert_uint3_sat(char3);
+uint3 __ovld __cnfn convert_uint3_rte(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(uchar3);
+uint3 __ovld __cnfn convert_uint3_rtz(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(uchar3);
+uint3 __ovld __cnfn convert_uint3_rtp(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(uchar3);
+uint3 __ovld __cnfn convert_uint3_rtn(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(uchar3);
+uint3 __ovld __cnfn convert_uint3(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat(uchar3);
+uint3 __ovld __cnfn convert_uint3_rte(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(short3);
+uint3 __ovld __cnfn convert_uint3_rtz(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(short3);
+uint3 __ovld __cnfn convert_uint3_rtp(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(short3);
+uint3 __ovld __cnfn convert_uint3_rtn(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(short3);
+uint3 __ovld __cnfn convert_uint3(short3);
+uint3 __ovld __cnfn convert_uint3_sat(short3);
+uint3 __ovld __cnfn convert_uint3_rte(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(ushort3);
+uint3 __ovld __cnfn convert_uint3_rtz(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(ushort3);
+uint3 __ovld __cnfn convert_uint3_rtp(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(ushort3);
+uint3 __ovld __cnfn convert_uint3_rtn(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(ushort3);
+uint3 __ovld __cnfn convert_uint3(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat(ushort3);
+uint3 __ovld __cnfn convert_uint3_rte(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(int3);
+uint3 __ovld __cnfn convert_uint3_rtz(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(int3);
+uint3 __ovld __cnfn convert_uint3_rtp(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(int3);
+uint3 __ovld __cnfn convert_uint3_rtn(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(int3);
+uint3 __ovld __cnfn convert_uint3(int3);
+uint3 __ovld __cnfn convert_uint3_sat(int3);
+uint3 __ovld __cnfn convert_uint3_rte(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(uint3);
+uint3 __ovld __cnfn convert_uint3_rtz(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(uint3);
+uint3 __ovld __cnfn convert_uint3_rtp(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(uint3);
+uint3 __ovld __cnfn convert_uint3_rtn(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(uint3);
+uint3 __ovld __cnfn convert_uint3(uint3);
+uint3 __ovld __cnfn convert_uint3_sat(uint3);
+uint3 __ovld __cnfn convert_uint3_rte(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(long3);
+uint3 __ovld __cnfn convert_uint3_rtz(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(long3);
+uint3 __ovld __cnfn convert_uint3_rtp(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(long3);
+uint3 __ovld __cnfn convert_uint3_rtn(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(long3);
+uint3 __ovld __cnfn convert_uint3(long3);
+uint3 __ovld __cnfn convert_uint3_sat(long3);
+uint3 __ovld __cnfn convert_uint3_rte(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(ulong3);
+uint3 __ovld __cnfn convert_uint3_rtz(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(ulong3);
+uint3 __ovld __cnfn convert_uint3_rtp(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(ulong3);
+uint3 __ovld __cnfn convert_uint3_rtn(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(ulong3);
+uint3 __ovld __cnfn convert_uint3(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat(ulong3);
+uint3 __ovld __cnfn convert_uint3_rte(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(float3);
+uint3 __ovld __cnfn convert_uint3_rtz(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(float3);
+uint3 __ovld __cnfn convert_uint3_rtp(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(float3);
+uint3 __ovld __cnfn convert_uint3_rtn(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(float3);
+uint3 __ovld __cnfn convert_uint3(float3);
+uint3 __ovld __cnfn convert_uint3_sat(float3);
+long3 __ovld __cnfn convert_long3_rte(char3);
+long3 __ovld __cnfn convert_long3_sat_rte(char3);
+long3 __ovld __cnfn convert_long3_rtz(char3);
+long3 __ovld __cnfn convert_long3_sat_rtz(char3);
+long3 __ovld __cnfn convert_long3_rtp(char3);
+long3 __ovld __cnfn convert_long3_sat_rtp(char3);
+long3 __ovld __cnfn convert_long3_rtn(char3);
+long3 __ovld __cnfn convert_long3_sat_rtn(char3);
+long3 __ovld __cnfn convert_long3(char3);
+long3 __ovld __cnfn convert_long3_sat(char3);
+long3 __ovld __cnfn convert_long3_rte(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rte(uchar3);
+long3 __ovld __cnfn convert_long3_rtz(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rtz(uchar3);
+long3 __ovld __cnfn convert_long3_rtp(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rtp(uchar3);
+long3 __ovld __cnfn convert_long3_rtn(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rtn(uchar3);
+long3 __ovld __cnfn convert_long3(uchar3);
+long3 __ovld __cnfn convert_long3_sat(uchar3);
+long3 __ovld __cnfn convert_long3_rte(short3);
+long3 __ovld __cnfn convert_long3_sat_rte(short3);
+long3 __ovld __cnfn convert_long3_rtz(short3);
+long3 __ovld __cnfn convert_long3_sat_rtz(short3);
+long3 __ovld __cnfn convert_long3_rtp(short3);
+long3 __ovld __cnfn convert_long3_sat_rtp(short3);
+long3 __ovld __cnfn convert_long3_rtn(short3);
+long3 __ovld __cnfn convert_long3_sat_rtn(short3);
+long3 __ovld __cnfn convert_long3(short3);
+long3 __ovld __cnfn convert_long3_sat(short3);
+long3 __ovld __cnfn convert_long3_rte(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rte(ushort3);
+long3 __ovld __cnfn convert_long3_rtz(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rtz(ushort3);
+long3 __ovld __cnfn convert_long3_rtp(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rtp(ushort3);
+long3 __ovld __cnfn convert_long3_rtn(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rtn(ushort3);
+long3 __ovld __cnfn convert_long3(ushort3);
+long3 __ovld __cnfn convert_long3_sat(ushort3);
+long3 __ovld __cnfn convert_long3_rte(int3);
+long3 __ovld __cnfn convert_long3_sat_rte(int3);
+long3 __ovld __cnfn convert_long3_rtz(int3);
+long3 __ovld __cnfn convert_long3_sat_rtz(int3);
+long3 __ovld __cnfn convert_long3_rtp(int3);
+long3 __ovld __cnfn convert_long3_sat_rtp(int3);
+long3 __ovld __cnfn convert_long3_rtn(int3);
+long3 __ovld __cnfn convert_long3_sat_rtn(int3);
+long3 __ovld __cnfn convert_long3(int3);
+long3 __ovld __cnfn convert_long3_sat(int3);
+long3 __ovld __cnfn convert_long3_rte(uint3);
+long3 __ovld __cnfn convert_long3_sat_rte(uint3);
+long3 __ovld __cnfn convert_long3_rtz(uint3);
+long3 __ovld __cnfn convert_long3_sat_rtz(uint3);
+long3 __ovld __cnfn convert_long3_rtp(uint3);
+long3 __ovld __cnfn convert_long3_sat_rtp(uint3);
+long3 __ovld __cnfn convert_long3_rtn(uint3);
+long3 __ovld __cnfn convert_long3_sat_rtn(uint3);
+long3 __ovld __cnfn convert_long3(uint3);
+long3 __ovld __cnfn convert_long3_sat(uint3);
+long3 __ovld __cnfn convert_long3_rte(long3);
+long3 __ovld __cnfn convert_long3_sat_rte(long3);
+long3 __ovld __cnfn convert_long3_rtz(long3);
+long3 __ovld __cnfn convert_long3_sat_rtz(long3);
+long3 __ovld __cnfn convert_long3_rtp(long3);
+long3 __ovld __cnfn convert_long3_sat_rtp(long3);
+long3 __ovld __cnfn convert_long3_rtn(long3);
+long3 __ovld __cnfn convert_long3_sat_rtn(long3);
+long3 __ovld __cnfn convert_long3(long3);
+long3 __ovld __cnfn convert_long3_sat(long3);
+long3 __ovld __cnfn convert_long3_rte(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rte(ulong3);
+long3 __ovld __cnfn convert_long3_rtz(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rtz(ulong3);
+long3 __ovld __cnfn convert_long3_rtp(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rtp(ulong3);
+long3 __ovld __cnfn convert_long3_rtn(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rtn(ulong3);
+long3 __ovld __cnfn convert_long3(ulong3);
+long3 __ovld __cnfn convert_long3_sat(ulong3);
+long3 __ovld __cnfn convert_long3_rte(float3);
+long3 __ovld __cnfn convert_long3_sat_rte(float3);
+long3 __ovld __cnfn convert_long3_rtz(float3);
+long3 __ovld __cnfn convert_long3_sat_rtz(float3);
+long3 __ovld __cnfn convert_long3_rtp(float3);
+long3 __ovld __cnfn convert_long3_sat_rtp(float3);
+long3 __ovld __cnfn convert_long3_rtn(float3);
+long3 __ovld __cnfn convert_long3_sat_rtn(float3);
+long3 __ovld __cnfn convert_long3(float3);
+long3 __ovld __cnfn convert_long3_sat(float3);
+ulong3 __ovld __cnfn convert_ulong3_rte(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(char3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(char3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(char3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(char3);
+ulong3 __ovld __cnfn convert_ulong3(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat(char3);
+ulong3 __ovld __cnfn convert_ulong3_rte(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uchar3);
+ulong3 __ovld __cnfn convert_ulong3(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rte(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(short3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(short3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(short3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(short3);
+ulong3 __ovld __cnfn convert_ulong3(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat(short3);
+ulong3 __ovld __cnfn convert_ulong3_rte(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ushort3);
+ulong3 __ovld __cnfn convert_ulong3(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rte(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(int3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(int3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(int3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(int3);
+ulong3 __ovld __cnfn convert_ulong3(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat(int3);
+ulong3 __ovld __cnfn convert_ulong3_rte(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uint3);
+ulong3 __ovld __cnfn convert_ulong3(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rte(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(long3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(long3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(long3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(long3);
+ulong3 __ovld __cnfn convert_ulong3(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat(long3);
+ulong3 __ovld __cnfn convert_ulong3_rte(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ulong3);
+ulong3 __ovld __cnfn convert_ulong3(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rte(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(float3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(float3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(float3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(float3);
+ulong3 __ovld __cnfn convert_ulong3(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat(float3);
+float3 __ovld __cnfn convert_float3_rte(char3);
+float3 __ovld __cnfn convert_float3_rtz(char3);
+float3 __ovld __cnfn convert_float3_rtp(char3);
+float3 __ovld __cnfn convert_float3_rtn(char3);
+float3 __ovld __cnfn convert_float3(char3);
+float3 __ovld __cnfn convert_float3_rte(uchar3);
+float3 __ovld __cnfn convert_float3_rtz(uchar3);
+float3 __ovld __cnfn convert_float3_rtp(uchar3);
+float3 __ovld __cnfn convert_float3_rtn(uchar3);
+float3 __ovld __cnfn convert_float3(uchar3);
+float3 __ovld __cnfn convert_float3_rte(short3);
+float3 __ovld __cnfn convert_float3_rtz(short3);
+float3 __ovld __cnfn convert_float3_rtp(short3);
+float3 __ovld __cnfn convert_float3_rtn(short3);
+float3 __ovld __cnfn convert_float3(short3);
+float3 __ovld __cnfn convert_float3_rte(ushort3);
+float3 __ovld __cnfn convert_float3_rtz(ushort3);
+float3 __ovld __cnfn convert_float3_rtp(ushort3);
+float3 __ovld __cnfn convert_float3_rtn(ushort3);
+float3 __ovld __cnfn convert_float3(ushort3);
+float3 __ovld __cnfn convert_float3_rte(int3);
+float3 __ovld __cnfn convert_float3_rtz(int3);
+float3 __ovld __cnfn convert_float3_rtp(int3);
+float3 __ovld __cnfn convert_float3_rtn(int3);
+float3 __ovld __cnfn convert_float3(int3);
+float3 __ovld __cnfn convert_float3_rte(uint3);
+float3 __ovld __cnfn convert_float3_rtz(uint3);
+float3 __ovld __cnfn convert_float3_rtp(uint3);
+float3 __ovld __cnfn convert_float3_rtn(uint3);
+float3 __ovld __cnfn convert_float3(uint3);
+float3 __ovld __cnfn convert_float3_rte(long3);
+float3 __ovld __cnfn convert_float3_rtz(long3);
+float3 __ovld __cnfn convert_float3_rtp(long3);
+float3 __ovld __cnfn convert_float3_rtn(long3);
+float3 __ovld __cnfn convert_float3(long3);
+float3 __ovld __cnfn convert_float3_rte(ulong3);
+float3 __ovld __cnfn convert_float3_rtz(ulong3);
+float3 __ovld __cnfn convert_float3_rtp(ulong3);
+float3 __ovld __cnfn convert_float3_rtn(ulong3);
+float3 __ovld __cnfn convert_float3(ulong3);
+float3 __ovld __cnfn convert_float3_rte(float3);
+float3 __ovld __cnfn convert_float3_rtz(float3);
+float3 __ovld __cnfn convert_float3_rtp(float3);
+float3 __ovld __cnfn convert_float3_rtn(float3);
+float3 __ovld __cnfn convert_float3(float3);
+char4 __ovld __cnfn convert_char4_rte(char4);
+char4 __ovld __cnfn convert_char4_sat_rte(char4);
+char4 __ovld __cnfn convert_char4_rtz(char4);
+char4 __ovld __cnfn convert_char4_sat_rtz(char4);
+char4 __ovld __cnfn convert_char4_rtp(char4);
+char4 __ovld __cnfn convert_char4_sat_rtp(char4);
+char4 __ovld __cnfn convert_char4_rtn(char4);
+char4 __ovld __cnfn convert_char4_sat_rtn(char4);
+char4 __ovld __cnfn convert_char4(char4);
+char4 __ovld __cnfn convert_char4_sat(char4);
+char4 __ovld __cnfn convert_char4_rte(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rte(uchar4);
+char4 __ovld __cnfn convert_char4_rtz(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rtz(uchar4);
+char4 __ovld __cnfn convert_char4_rtp(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rtp(uchar4);
+char4 __ovld __cnfn convert_char4_rtn(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rtn(uchar4);
+char4 __ovld __cnfn convert_char4(uchar4);
+char4 __ovld __cnfn convert_char4_sat(uchar4);
+char4 __ovld __cnfn convert_char4_rte(short4);
+char4 __ovld __cnfn convert_char4_sat_rte(short4);
+char4 __ovld __cnfn convert_char4_rtz(short4);
+char4 __ovld __cnfn convert_char4_sat_rtz(short4);
+char4 __ovld __cnfn convert_char4_rtp(short4);
+char4 __ovld __cnfn convert_char4_sat_rtp(short4);
+char4 __ovld __cnfn convert_char4_rtn(short4);
+char4 __ovld __cnfn convert_char4_sat_rtn(short4);
+char4 __ovld __cnfn convert_char4(short4);
+char4 __ovld __cnfn convert_char4_sat(short4);
+char4 __ovld __cnfn convert_char4_rte(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rte(ushort4);
+char4 __ovld __cnfn convert_char4_rtz(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rtz(ushort4);
+char4 __ovld __cnfn convert_char4_rtp(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rtp(ushort4);
+char4 __ovld __cnfn convert_char4_rtn(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rtn(ushort4);
+char4 __ovld __cnfn convert_char4(ushort4);
+char4 __ovld __cnfn convert_char4_sat(ushort4);
+char4 __ovld __cnfn convert_char4_rte(int4);
+char4 __ovld __cnfn convert_char4_sat_rte(int4);
+char4 __ovld __cnfn convert_char4_rtz(int4);
+char4 __ovld __cnfn convert_char4_sat_rtz(int4);
+char4 __ovld __cnfn convert_char4_rtp(int4);
+char4 __ovld __cnfn convert_char4_sat_rtp(int4);
+char4 __ovld __cnfn convert_char4_rtn(int4);
+char4 __ovld __cnfn convert_char4_sat_rtn(int4);
+char4 __ovld __cnfn convert_char4(int4);
+char4 __ovld __cnfn convert_char4_sat(int4);
+char4 __ovld __cnfn convert_char4_rte(uint4);
+char4 __ovld __cnfn convert_char4_sat_rte(uint4);
+char4 __ovld __cnfn convert_char4_rtz(uint4);
+char4 __ovld __cnfn convert_char4_sat_rtz(uint4);
+char4 __ovld __cnfn convert_char4_rtp(uint4);
+char4 __ovld __cnfn convert_char4_sat_rtp(uint4);
+char4 __ovld __cnfn convert_char4_rtn(uint4);
+char4 __ovld __cnfn convert_char4_sat_rtn(uint4);
+char4 __ovld __cnfn convert_char4(uint4);
+char4 __ovld __cnfn convert_char4_sat(uint4);
+char4 __ovld __cnfn convert_char4_rte(long4);
+char4 __ovld __cnfn convert_char4_sat_rte(long4);
+char4 __ovld __cnfn convert_char4_rtz(long4);
+char4 __ovld __cnfn convert_char4_sat_rtz(long4);
+char4 __ovld __cnfn convert_char4_rtp(long4);
+char4 __ovld __cnfn convert_char4_sat_rtp(long4);
+char4 __ovld __cnfn convert_char4_rtn(long4);
+char4 __ovld __cnfn convert_char4_sat_rtn(long4);
+char4 __ovld __cnfn convert_char4(long4);
+char4 __ovld __cnfn convert_char4_sat(long4);
+char4 __ovld __cnfn convert_char4_rte(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rte(ulong4);
+char4 __ovld __cnfn convert_char4_rtz(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rtz(ulong4);
+char4 __ovld __cnfn convert_char4_rtp(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rtp(ulong4);
+char4 __ovld __cnfn convert_char4_rtn(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rtn(ulong4);
+char4 __ovld __cnfn convert_char4(ulong4);
+char4 __ovld __cnfn convert_char4_sat(ulong4);
+char4 __ovld __cnfn convert_char4_rte(float4);
+char4 __ovld __cnfn convert_char4_sat_rte(float4);
+char4 __ovld __cnfn convert_char4_rtz(float4);
+char4 __ovld __cnfn convert_char4_sat_rtz(float4);
+char4 __ovld __cnfn convert_char4_rtp(float4);
+char4 __ovld __cnfn convert_char4_sat_rtp(float4);
+char4 __ovld __cnfn convert_char4_rtn(float4);
+char4 __ovld __cnfn convert_char4_sat_rtn(float4);
+char4 __ovld __cnfn convert_char4(float4);
+char4 __ovld __cnfn convert_char4_sat(float4);
+uchar4 __ovld __cnfn convert_uchar4_rte(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(char4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(char4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(char4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(char4);
+uchar4 __ovld __cnfn convert_uchar4(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat(char4);
+uchar4 __ovld __cnfn convert_uchar4_rte(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uchar4);
+uchar4 __ovld __cnfn convert_uchar4(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rte(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(short4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(short4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(short4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(short4);
+uchar4 __ovld __cnfn convert_uchar4(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat(short4);
+uchar4 __ovld __cnfn convert_uchar4_rte(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ushort4);
+uchar4 __ovld __cnfn convert_uchar4(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rte(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(int4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(int4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(int4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(int4);
+uchar4 __ovld __cnfn convert_uchar4(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat(int4);
+uchar4 __ovld __cnfn convert_uchar4_rte(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uint4);
+uchar4 __ovld __cnfn convert_uchar4(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rte(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(long4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(long4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(long4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(long4);
+uchar4 __ovld __cnfn convert_uchar4(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat(long4);
+uchar4 __ovld __cnfn convert_uchar4_rte(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ulong4);
+uchar4 __ovld __cnfn convert_uchar4(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rte(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(float4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(float4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(float4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(float4);
+uchar4 __ovld __cnfn convert_uchar4(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat(float4);
+short4 __ovld __cnfn convert_short4_rte(char4);
+short4 __ovld __cnfn convert_short4_sat_rte(char4);
+short4 __ovld __cnfn convert_short4_rtz(char4);
+short4 __ovld __cnfn convert_short4_sat_rtz(char4);
+short4 __ovld __cnfn convert_short4_rtp(char4);
+short4 __ovld __cnfn convert_short4_sat_rtp(char4);
+short4 __ovld __cnfn convert_short4_rtn(char4);
+short4 __ovld __cnfn convert_short4_sat_rtn(char4);
+short4 __ovld __cnfn convert_short4(char4);
+short4 __ovld __cnfn convert_short4_sat(char4);
+short4 __ovld __cnfn convert_short4_rte(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rte(uchar4);
+short4 __ovld __cnfn convert_short4_rtz(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rtz(uchar4);
+short4 __ovld __cnfn convert_short4_rtp(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rtp(uchar4);
+short4 __ovld __cnfn convert_short4_rtn(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rtn(uchar4);
+short4 __ovld __cnfn convert_short4(uchar4);
+short4 __ovld __cnfn convert_short4_sat(uchar4);
+short4 __ovld __cnfn convert_short4_rte(short4);
+short4 __ovld __cnfn convert_short4_sat_rte(short4);
+short4 __ovld __cnfn convert_short4_rtz(short4);
+short4 __ovld __cnfn convert_short4_sat_rtz(short4);
+short4 __ovld __cnfn convert_short4_rtp(short4);
+short4 __ovld __cnfn convert_short4_sat_rtp(short4);
+short4 __ovld __cnfn convert_short4_rtn(short4);
+short4 __ovld __cnfn convert_short4_sat_rtn(short4);
+short4 __ovld __cnfn convert_short4(short4);
+short4 __ovld __cnfn convert_short4_sat(short4);
+short4 __ovld __cnfn convert_short4_rte(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rte(ushort4);
+short4 __ovld __cnfn convert_short4_rtz(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rtz(ushort4);
+short4 __ovld __cnfn convert_short4_rtp(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rtp(ushort4);
+short4 __ovld __cnfn convert_short4_rtn(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rtn(ushort4);
+short4 __ovld __cnfn convert_short4(ushort4);
+short4 __ovld __cnfn convert_short4_sat(ushort4);
+short4 __ovld __cnfn convert_short4_rte(int4);
+short4 __ovld __cnfn convert_short4_sat_rte(int4);
+short4 __ovld __cnfn convert_short4_rtz(int4);
+short4 __ovld __cnfn convert_short4_sat_rtz(int4);
+short4 __ovld __cnfn convert_short4_rtp(int4);
+short4 __ovld __cnfn convert_short4_sat_rtp(int4);
+short4 __ovld __cnfn convert_short4_rtn(int4);
+short4 __ovld __cnfn convert_short4_sat_rtn(int4);
+short4 __ovld __cnfn convert_short4(int4);
+short4 __ovld __cnfn convert_short4_sat(int4);
+short4 __ovld __cnfn convert_short4_rte(uint4);
+short4 __ovld __cnfn convert_short4_sat_rte(uint4);
+short4 __ovld __cnfn convert_short4_rtz(uint4);
+short4 __ovld __cnfn convert_short4_sat_rtz(uint4);
+short4 __ovld __cnfn convert_short4_rtp(uint4);
+short4 __ovld __cnfn convert_short4_sat_rtp(uint4);
+short4 __ovld __cnfn convert_short4_rtn(uint4);
+short4 __ovld __cnfn convert_short4_sat_rtn(uint4);
+short4 __ovld __cnfn convert_short4(uint4);
+short4 __ovld __cnfn convert_short4_sat(uint4);
+short4 __ovld __cnfn convert_short4_rte(long4);
+short4 __ovld __cnfn convert_short4_sat_rte(long4);
+short4 __ovld __cnfn convert_short4_rtz(long4);
+short4 __ovld __cnfn convert_short4_sat_rtz(long4);
+short4 __ovld __cnfn convert_short4_rtp(long4);
+short4 __ovld __cnfn convert_short4_sat_rtp(long4);
+short4 __ovld __cnfn convert_short4_rtn(long4);
+short4 __ovld __cnfn convert_short4_sat_rtn(long4);
+short4 __ovld __cnfn convert_short4(long4);
+short4 __ovld __cnfn convert_short4_sat(long4);
+short4 __ovld __cnfn convert_short4_rte(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rte(ulong4);
+short4 __ovld __cnfn convert_short4_rtz(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rtz(ulong4);
+short4 __ovld __cnfn convert_short4_rtp(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rtp(ulong4);
+short4 __ovld __cnfn convert_short4_rtn(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rtn(ulong4);
+short4 __ovld __cnfn convert_short4(ulong4);
+short4 __ovld __cnfn convert_short4_sat(ulong4);
+short4 __ovld __cnfn convert_short4_rte(float4);
+short4 __ovld __cnfn convert_short4_sat_rte(float4);
+short4 __ovld __cnfn convert_short4_rtz(float4);
+short4 __ovld __cnfn convert_short4_sat_rtz(float4);
+short4 __ovld __cnfn convert_short4_rtp(float4);
+short4 __ovld __cnfn convert_short4_sat_rtp(float4);
+short4 __ovld __cnfn convert_short4_rtn(float4);
+short4 __ovld __cnfn convert_short4_sat_rtn(float4);
+short4 __ovld __cnfn convert_short4(float4);
+short4 __ovld __cnfn convert_short4_sat(float4);
+ushort4 __ovld __cnfn convert_ushort4_rte(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(char4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(char4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(char4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(char4);
+ushort4 __ovld __cnfn convert_ushort4(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat(char4);
+ushort4 __ovld __cnfn convert_ushort4_rte(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uchar4);
+ushort4 __ovld __cnfn convert_ushort4(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rte(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(short4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(short4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(short4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(short4);
+ushort4 __ovld __cnfn convert_ushort4(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat(short4);
+ushort4 __ovld __cnfn convert_ushort4_rte(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ushort4);
+ushort4 __ovld __cnfn convert_ushort4(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rte(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(int4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(int4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(int4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(int4);
+ushort4 __ovld __cnfn convert_ushort4(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat(int4);
+ushort4 __ovld __cnfn convert_ushort4_rte(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uint4);
+ushort4 __ovld __cnfn convert_ushort4(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rte(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(long4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(long4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(long4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(long4);
+ushort4 __ovld __cnfn convert_ushort4(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat(long4);
+ushort4 __ovld __cnfn convert_ushort4_rte(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ulong4);
+ushort4 __ovld __cnfn convert_ushort4(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rte(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(float4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(float4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(float4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(float4);
+ushort4 __ovld __cnfn convert_ushort4(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat(float4);
+int4 __ovld __cnfn convert_int4_rte(char4);
+int4 __ovld __cnfn convert_int4_sat_rte(char4);
+int4 __ovld __cnfn convert_int4_rtz(char4);
+int4 __ovld __cnfn convert_int4_sat_rtz(char4);
+int4 __ovld __cnfn convert_int4_rtp(char4);
+int4 __ovld __cnfn convert_int4_sat_rtp(char4);
+int4 __ovld __cnfn convert_int4_rtn(char4);
+int4 __ovld __cnfn convert_int4_sat_rtn(char4);
+int4 __ovld __cnfn convert_int4(char4);
+int4 __ovld __cnfn convert_int4_sat(char4);
+int4 __ovld __cnfn convert_int4_rte(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rte(uchar4);
+int4 __ovld __cnfn convert_int4_rtz(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rtz(uchar4);
+int4 __ovld __cnfn convert_int4_rtp(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rtp(uchar4);
+int4 __ovld __cnfn convert_int4_rtn(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rtn(uchar4);
+int4 __ovld __cnfn convert_int4(uchar4);
+int4 __ovld __cnfn convert_int4_sat(uchar4);
+int4 __ovld __cnfn convert_int4_rte(short4);
+int4 __ovld __cnfn convert_int4_sat_rte(short4);
+int4 __ovld __cnfn convert_int4_rtz(short4);
+int4 __ovld __cnfn convert_int4_sat_rtz(short4);
+int4 __ovld __cnfn convert_int4_rtp(short4);
+int4 __ovld __cnfn convert_int4_sat_rtp(short4);
+int4 __ovld __cnfn convert_int4_rtn(short4);
+int4 __ovld __cnfn convert_int4_sat_rtn(short4);
+int4 __ovld __cnfn convert_int4(short4);
+int4 __ovld __cnfn convert_int4_sat(short4);
+int4 __ovld __cnfn convert_int4_rte(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rte(ushort4);
+int4 __ovld __cnfn convert_int4_rtz(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rtz(ushort4);
+int4 __ovld __cnfn convert_int4_rtp(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rtp(ushort4);
+int4 __ovld __cnfn convert_int4_rtn(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rtn(ushort4);
+int4 __ovld __cnfn convert_int4(ushort4);
+int4 __ovld __cnfn convert_int4_sat(ushort4);
+int4 __ovld __cnfn convert_int4_rte(int4);
+int4 __ovld __cnfn convert_int4_sat_rte(int4);
+int4 __ovld __cnfn convert_int4_rtz(int4);
+int4 __ovld __cnfn convert_int4_sat_rtz(int4);
+int4 __ovld __cnfn convert_int4_rtp(int4);
+int4 __ovld __cnfn convert_int4_sat_rtp(int4);
+int4 __ovld __cnfn convert_int4_rtn(int4);
+int4 __ovld __cnfn convert_int4_sat_rtn(int4);
+int4 __ovld __cnfn convert_int4(int4);
+int4 __ovld __cnfn convert_int4_sat(int4);
+int4 __ovld __cnfn convert_int4_rte(uint4);
+int4 __ovld __cnfn convert_int4_sat_rte(uint4);
+int4 __ovld __cnfn convert_int4_rtz(uint4);
+int4 __ovld __cnfn convert_int4_sat_rtz(uint4);
+int4 __ovld __cnfn convert_int4_rtp(uint4);
+int4 __ovld __cnfn convert_int4_sat_rtp(uint4);
+int4 __ovld __cnfn convert_int4_rtn(uint4);
+int4 __ovld __cnfn convert_int4_sat_rtn(uint4);
+int4 __ovld __cnfn convert_int4(uint4);
+int4 __ovld __cnfn convert_int4_sat(uint4);
+int4 __ovld __cnfn convert_int4_rte(long4);
+int4 __ovld __cnfn convert_int4_sat_rte(long4);
+int4 __ovld __cnfn convert_int4_rtz(long4);
+int4 __ovld __cnfn convert_int4_sat_rtz(long4);
+int4 __ovld __cnfn convert_int4_rtp(long4);
+int4 __ovld __cnfn convert_int4_sat_rtp(long4);
+int4 __ovld __cnfn convert_int4_rtn(long4);
+int4 __ovld __cnfn convert_int4_sat_rtn(long4);
+int4 __ovld __cnfn convert_int4(long4);
+int4 __ovld __cnfn convert_int4_sat(long4);
+int4 __ovld __cnfn convert_int4_rte(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rte(ulong4);
+int4 __ovld __cnfn convert_int4_rtz(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rtz(ulong4);
+int4 __ovld __cnfn convert_int4_rtp(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rtp(ulong4);
+int4 __ovld __cnfn convert_int4_rtn(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rtn(ulong4);
+int4 __ovld __cnfn convert_int4(ulong4);
+int4 __ovld __cnfn convert_int4_sat(ulong4);
+int4 __ovld __cnfn convert_int4_rte(float4);
+int4 __ovld __cnfn convert_int4_sat_rte(float4);
+int4 __ovld __cnfn convert_int4_rtz(float4);
+int4 __ovld __cnfn convert_int4_sat_rtz(float4);
+int4 __ovld __cnfn convert_int4_rtp(float4);
+int4 __ovld __cnfn convert_int4_sat_rtp(float4);
+int4 __ovld __cnfn convert_int4_rtn(float4);
+int4 __ovld __cnfn convert_int4_sat_rtn(float4);
+int4 __ovld __cnfn convert_int4(float4);
+int4 __ovld __cnfn convert_int4_sat(float4);
+uint4 __ovld __cnfn convert_uint4_rte(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(char4);
+uint4 __ovld __cnfn convert_uint4_rtz(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(char4);
+uint4 __ovld __cnfn convert_uint4_rtp(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(char4);
+uint4 __ovld __cnfn convert_uint4_rtn(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(char4);
+uint4 __ovld __cnfn convert_uint4(char4);
+uint4 __ovld __cnfn convert_uint4_sat(char4);
+uint4 __ovld __cnfn convert_uint4_rte(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(uchar4);
+uint4 __ovld __cnfn convert_uint4_rtz(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(uchar4);
+uint4 __ovld __cnfn convert_uint4_rtp(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(uchar4);
+uint4 __ovld __cnfn convert_uint4_rtn(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(uchar4);
+uint4 __ovld __cnfn convert_uint4(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat(uchar4);
+uint4 __ovld __cnfn convert_uint4_rte(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(short4);
+uint4 __ovld __cnfn convert_uint4_rtz(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(short4);
+uint4 __ovld __cnfn convert_uint4_rtp(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(short4);
+uint4 __ovld __cnfn convert_uint4_rtn(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(short4);
+uint4 __ovld __cnfn convert_uint4(short4);
+uint4 __ovld __cnfn convert_uint4_sat(short4);
+uint4 __ovld __cnfn convert_uint4_rte(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(ushort4);
+uint4 __ovld __cnfn convert_uint4_rtz(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(ushort4);
+uint4 __ovld __cnfn convert_uint4_rtp(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(ushort4);
+uint4 __ovld __cnfn convert_uint4_rtn(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(ushort4);
+uint4 __ovld __cnfn convert_uint4(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat(ushort4);
+uint4 __ovld __cnfn convert_uint4_rte(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(int4);
+uint4 __ovld __cnfn convert_uint4_rtz(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(int4);
+uint4 __ovld __cnfn convert_uint4_rtp(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(int4);
+uint4 __ovld __cnfn convert_uint4_rtn(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(int4);
+uint4 __ovld __cnfn convert_uint4(int4);
+uint4 __ovld __cnfn convert_uint4_sat(int4);
+uint4 __ovld __cnfn convert_uint4_rte(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(uint4);
+uint4 __ovld __cnfn convert_uint4_rtz(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(uint4);
+uint4 __ovld __cnfn convert_uint4_rtp(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(uint4);
+uint4 __ovld __cnfn convert_uint4_rtn(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(uint4);
+uint4 __ovld __cnfn convert_uint4(uint4);
+uint4 __ovld __cnfn convert_uint4_sat(uint4);
+uint4 __ovld __cnfn convert_uint4_rte(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(long4);
+uint4 __ovld __cnfn convert_uint4_rtz(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(long4);
+uint4 __ovld __cnfn convert_uint4_rtp(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(long4);
+uint4 __ovld __cnfn convert_uint4_rtn(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(long4);
+uint4 __ovld __cnfn convert_uint4(long4);
+uint4 __ovld __cnfn convert_uint4_sat(long4);
+uint4 __ovld __cnfn convert_uint4_rte(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(ulong4);
+uint4 __ovld __cnfn convert_uint4_rtz(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(ulong4);
+uint4 __ovld __cnfn convert_uint4_rtp(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(ulong4);
+uint4 __ovld __cnfn convert_uint4_rtn(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(ulong4);
+uint4 __ovld __cnfn convert_uint4(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat(ulong4);
+uint4 __ovld __cnfn convert_uint4_rte(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(float4);
+uint4 __ovld __cnfn convert_uint4_rtz(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(float4);
+uint4 __ovld __cnfn convert_uint4_rtp(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(float4);
+uint4 __ovld __cnfn convert_uint4_rtn(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(float4);
+uint4 __ovld __cnfn convert_uint4(float4);
+uint4 __ovld __cnfn convert_uint4_sat(float4);
+long4 __ovld __cnfn convert_long4_rte(char4);
+long4 __ovld __cnfn convert_long4_sat_rte(char4);
+long4 __ovld __cnfn convert_long4_rtz(char4);
+long4 __ovld __cnfn convert_long4_sat_rtz(char4);
+long4 __ovld __cnfn convert_long4_rtp(char4);
+long4 __ovld __cnfn convert_long4_sat_rtp(char4);
+long4 __ovld __cnfn convert_long4_rtn(char4);
+long4 __ovld __cnfn convert_long4_sat_rtn(char4);
+long4 __ovld __cnfn convert_long4(char4);
+long4 __ovld __cnfn convert_long4_sat(char4);
+long4 __ovld __cnfn convert_long4_rte(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rte(uchar4);
+long4 __ovld __cnfn convert_long4_rtz(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rtz(uchar4);
+long4 __ovld __cnfn convert_long4_rtp(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rtp(uchar4);
+long4 __ovld __cnfn convert_long4_rtn(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rtn(uchar4);
+long4 __ovld __cnfn convert_long4(uchar4);
+long4 __ovld __cnfn convert_long4_sat(uchar4);
+long4 __ovld __cnfn convert_long4_rte(short4);
+long4 __ovld __cnfn convert_long4_sat_rte(short4);
+long4 __ovld __cnfn convert_long4_rtz(short4);
+long4 __ovld __cnfn convert_long4_sat_rtz(short4);
+long4 __ovld __cnfn convert_long4_rtp(short4);
+long4 __ovld __cnfn convert_long4_sat_rtp(short4);
+long4 __ovld __cnfn convert_long4_rtn(short4);
+long4 __ovld __cnfn convert_long4_sat_rtn(short4);
+long4 __ovld __cnfn convert_long4(short4);
+long4 __ovld __cnfn convert_long4_sat(short4);
+long4 __ovld __cnfn convert_long4_rte(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rte(ushort4);
+long4 __ovld __cnfn convert_long4_rtz(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rtz(ushort4);
+long4 __ovld __cnfn convert_long4_rtp(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rtp(ushort4);
+long4 __ovld __cnfn convert_long4_rtn(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rtn(ushort4);
+long4 __ovld __cnfn convert_long4(ushort4);
+long4 __ovld __cnfn convert_long4_sat(ushort4);
+long4 __ovld __cnfn convert_long4_rte(int4);
+long4 __ovld __cnfn convert_long4_sat_rte(int4);
+long4 __ovld __cnfn convert_long4_rtz(int4);
+long4 __ovld __cnfn convert_long4_sat_rtz(int4);
+long4 __ovld __cnfn convert_long4_rtp(int4);
+long4 __ovld __cnfn convert_long4_sat_rtp(int4);
+long4 __ovld __cnfn convert_long4_rtn(int4);
+long4 __ovld __cnfn convert_long4_sat_rtn(int4);
+long4 __ovld __cnfn convert_long4(int4);
+long4 __ovld __cnfn convert_long4_sat(int4);
+long4 __ovld __cnfn convert_long4_rte(uint4);
+long4 __ovld __cnfn convert_long4_sat_rte(uint4);
+long4 __ovld __cnfn convert_long4_rtz(uint4);
+long4 __ovld __cnfn convert_long4_sat_rtz(uint4);
+long4 __ovld __cnfn convert_long4_rtp(uint4);
+long4 __ovld __cnfn convert_long4_sat_rtp(uint4);
+long4 __ovld __cnfn convert_long4_rtn(uint4);
+long4 __ovld __cnfn convert_long4_sat_rtn(uint4);
+long4 __ovld __cnfn convert_long4(uint4);
+long4 __ovld __cnfn convert_long4_sat(uint4);
+long4 __ovld __cnfn convert_long4_rte(long4);
+long4 __ovld __cnfn convert_long4_sat_rte(long4);
+long4 __ovld __cnfn convert_long4_rtz(long4);
+long4 __ovld __cnfn convert_long4_sat_rtz(long4);
+long4 __ovld __cnfn convert_long4_rtp(long4);
+long4 __ovld __cnfn convert_long4_sat_rtp(long4);
+long4 __ovld __cnfn convert_long4_rtn(long4);
+long4 __ovld __cnfn convert_long4_sat_rtn(long4);
+long4 __ovld __cnfn convert_long4(long4);
+long4 __ovld __cnfn convert_long4_sat(long4);
+long4 __ovld __cnfn convert_long4_rte(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rte(ulong4);
+long4 __ovld __cnfn convert_long4_rtz(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rtz(ulong4);
+long4 __ovld __cnfn convert_long4_rtp(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rtp(ulong4);
+long4 __ovld __cnfn convert_long4_rtn(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rtn(ulong4);
+long4 __ovld __cnfn convert_long4(ulong4);
+long4 __ovld __cnfn convert_long4_sat(ulong4);
+long4 __ovld __cnfn convert_long4_rte(float4);
+long4 __ovld __cnfn convert_long4_sat_rte(float4);
+long4 __ovld __cnfn convert_long4_rtz(float4);
+long4 __ovld __cnfn convert_long4_sat_rtz(float4);
+long4 __ovld __cnfn convert_long4_rtp(float4);
+long4 __ovld __cnfn convert_long4_sat_rtp(float4);
+long4 __ovld __cnfn convert_long4_rtn(float4);
+long4 __ovld __cnfn convert_long4_sat_rtn(float4);
+long4 __ovld __cnfn convert_long4(float4);
+long4 __ovld __cnfn convert_long4_sat(float4);
+ulong4 __ovld __cnfn convert_ulong4_rte(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(char4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(char4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(char4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(char4);
+ulong4 __ovld __cnfn convert_ulong4(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat(char4);
+ulong4 __ovld __cnfn convert_ulong4_rte(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uchar4);
+ulong4 __ovld __cnfn convert_ulong4(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rte(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(short4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(short4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(short4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(short4);
+ulong4 __ovld __cnfn convert_ulong4(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat(short4);
+ulong4 __ovld __cnfn convert_ulong4_rte(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ushort4);
+ulong4 __ovld __cnfn convert_ulong4(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rte(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(int4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(int4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(int4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(int4);
+ulong4 __ovld __cnfn convert_ulong4(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat(int4);
+ulong4 __ovld __cnfn convert_ulong4_rte(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uint4);
+ulong4 __ovld __cnfn convert_ulong4(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rte(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(long4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(long4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(long4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(long4);
+ulong4 __ovld __cnfn convert_ulong4(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat(long4);
+ulong4 __ovld __cnfn convert_ulong4_rte(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ulong4);
+ulong4 __ovld __cnfn convert_ulong4(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rte(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(float4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(float4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(float4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(float4);
+ulong4 __ovld __cnfn convert_ulong4(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat(float4);
+float4 __ovld __cnfn convert_float4_rte(char4);
+float4 __ovld __cnfn convert_float4_rtz(char4);
+float4 __ovld __cnfn convert_float4_rtp(char4);
+float4 __ovld __cnfn convert_float4_rtn(char4);
+float4 __ovld __cnfn convert_float4(char4);
+float4 __ovld __cnfn convert_float4_rte(uchar4);
+float4 __ovld __cnfn convert_float4_rtz(uchar4);
+float4 __ovld __cnfn convert_float4_rtp(uchar4);
+float4 __ovld __cnfn convert_float4_rtn(uchar4);
+float4 __ovld __cnfn convert_float4(uchar4);
+float4 __ovld __cnfn convert_float4_rte(short4);
+float4 __ovld __cnfn convert_float4_rtz(short4);
+float4 __ovld __cnfn convert_float4_rtp(short4);
+float4 __ovld __cnfn convert_float4_rtn(short4);
+float4 __ovld __cnfn convert_float4(short4);
+float4 __ovld __cnfn convert_float4_rte(ushort4);
+float4 __ovld __cnfn convert_float4_rtz(ushort4);
+float4 __ovld __cnfn convert_float4_rtp(ushort4);
+float4 __ovld __cnfn convert_float4_rtn(ushort4);
+float4 __ovld __cnfn convert_float4(ushort4);
+float4 __ovld __cnfn convert_float4_rte(int4);
+float4 __ovld __cnfn convert_float4_rtz(int4);
+float4 __ovld __cnfn convert_float4_rtp(int4);
+float4 __ovld __cnfn convert_float4_rtn(int4);
+float4 __ovld __cnfn convert_float4(int4);
+float4 __ovld __cnfn convert_float4_rte(uint4);
+float4 __ovld __cnfn convert_float4_rtz(uint4);
+float4 __ovld __cnfn convert_float4_rtp(uint4);
+float4 __ovld __cnfn convert_float4_rtn(uint4);
+float4 __ovld __cnfn convert_float4(uint4);
+float4 __ovld __cnfn convert_float4_rte(long4);
+float4 __ovld __cnfn convert_float4_rtz(long4);
+float4 __ovld __cnfn convert_float4_rtp(long4);
+float4 __ovld __cnfn convert_float4_rtn(long4);
+float4 __ovld __cnfn convert_float4(long4);
+float4 __ovld __cnfn convert_float4_rte(ulong4);
+float4 __ovld __cnfn convert_float4_rtz(ulong4);
+float4 __ovld __cnfn convert_float4_rtp(ulong4);
+float4 __ovld __cnfn convert_float4_rtn(ulong4);
+float4 __ovld __cnfn convert_float4(ulong4);
+float4 __ovld __cnfn convert_float4_rte(float4);
+float4 __ovld __cnfn convert_float4_rtz(float4);
+float4 __ovld __cnfn convert_float4_rtp(float4);
+float4 __ovld __cnfn convert_float4_rtn(float4);
+float4 __ovld __cnfn convert_float4(float4);
+char8 __ovld __cnfn convert_char8_rte(char8);
+char8 __ovld __cnfn convert_char8_sat_rte(char8);
+char8 __ovld __cnfn convert_char8_rtz(char8);
+char8 __ovld __cnfn convert_char8_sat_rtz(char8);
+char8 __ovld __cnfn convert_char8_rtp(char8);
+char8 __ovld __cnfn convert_char8_sat_rtp(char8);
+char8 __ovld __cnfn convert_char8_rtn(char8);
+char8 __ovld __cnfn convert_char8_sat_rtn(char8);
+char8 __ovld __cnfn convert_char8(char8);
+char8 __ovld __cnfn convert_char8_sat(char8);
+char8 __ovld __cnfn convert_char8_rte(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rte(uchar8);
+char8 __ovld __cnfn convert_char8_rtz(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rtz(uchar8);
+char8 __ovld __cnfn convert_char8_rtp(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rtp(uchar8);
+char8 __ovld __cnfn convert_char8_rtn(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rtn(uchar8);
+char8 __ovld __cnfn convert_char8(uchar8);
+char8 __ovld __cnfn convert_char8_sat(uchar8);
+char8 __ovld __cnfn convert_char8_rte(short8);
+char8 __ovld __cnfn convert_char8_sat_rte(short8);
+char8 __ovld __cnfn convert_char8_rtz(short8);
+char8 __ovld __cnfn convert_char8_sat_rtz(short8);
+char8 __ovld __cnfn convert_char8_rtp(short8);
+char8 __ovld __cnfn convert_char8_sat_rtp(short8);
+char8 __ovld __cnfn convert_char8_rtn(short8);
+char8 __ovld __cnfn convert_char8_sat_rtn(short8);
+char8 __ovld __cnfn convert_char8(short8);
+char8 __ovld __cnfn convert_char8_sat(short8);
+char8 __ovld __cnfn convert_char8_rte(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rte(ushort8);
+char8 __ovld __cnfn convert_char8_rtz(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rtz(ushort8);
+char8 __ovld __cnfn convert_char8_rtp(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rtp(ushort8);
+char8 __ovld __cnfn convert_char8_rtn(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rtn(ushort8);
+char8 __ovld __cnfn convert_char8(ushort8);
+char8 __ovld __cnfn convert_char8_sat(ushort8);
+char8 __ovld __cnfn convert_char8_rte(int8);
+char8 __ovld __cnfn convert_char8_sat_rte(int8);
+char8 __ovld __cnfn convert_char8_rtz(int8);
+char8 __ovld __cnfn convert_char8_sat_rtz(int8);
+char8 __ovld __cnfn convert_char8_rtp(int8);
+char8 __ovld __cnfn convert_char8_sat_rtp(int8);
+char8 __ovld __cnfn convert_char8_rtn(int8);
+char8 __ovld __cnfn convert_char8_sat_rtn(int8);
+char8 __ovld __cnfn convert_char8(int8);
+char8 __ovld __cnfn convert_char8_sat(int8);
+char8 __ovld __cnfn convert_char8_rte(uint8);
+char8 __ovld __cnfn convert_char8_sat_rte(uint8);
+char8 __ovld __cnfn convert_char8_rtz(uint8);
+char8 __ovld __cnfn convert_char8_sat_rtz(uint8);
+char8 __ovld __cnfn convert_char8_rtp(uint8);
+char8 __ovld __cnfn convert_char8_sat_rtp(uint8);
+char8 __ovld __cnfn convert_char8_rtn(uint8);
+char8 __ovld __cnfn convert_char8_sat_rtn(uint8);
+char8 __ovld __cnfn convert_char8(uint8);
+char8 __ovld __cnfn convert_char8_sat(uint8);
+char8 __ovld __cnfn convert_char8_rte(long8);
+char8 __ovld __cnfn convert_char8_sat_rte(long8);
+char8 __ovld __cnfn convert_char8_rtz(long8);
+char8 __ovld __cnfn convert_char8_sat_rtz(long8);
+char8 __ovld __cnfn convert_char8_rtp(long8);
+char8 __ovld __cnfn convert_char8_sat_rtp(long8);
+char8 __ovld __cnfn convert_char8_rtn(long8);
+char8 __ovld __cnfn convert_char8_sat_rtn(long8);
+char8 __ovld __cnfn convert_char8(long8);
+char8 __ovld __cnfn convert_char8_sat(long8);
+char8 __ovld __cnfn convert_char8_rte(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rte(ulong8);
+char8 __ovld __cnfn convert_char8_rtz(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rtz(ulong8);
+char8 __ovld __cnfn convert_char8_rtp(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rtp(ulong8);
+char8 __ovld __cnfn convert_char8_rtn(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rtn(ulong8);
+char8 __ovld __cnfn convert_char8(ulong8);
+char8 __ovld __cnfn convert_char8_sat(ulong8);
+char8 __ovld __cnfn convert_char8_rte(float8);
+char8 __ovld __cnfn convert_char8_sat_rte(float8);
+char8 __ovld __cnfn convert_char8_rtz(float8);
+char8 __ovld __cnfn convert_char8_sat_rtz(float8);
+char8 __ovld __cnfn convert_char8_rtp(float8);
+char8 __ovld __cnfn convert_char8_sat_rtp(float8);
+char8 __ovld __cnfn convert_char8_rtn(float8);
+char8 __ovld __cnfn convert_char8_sat_rtn(float8);
+char8 __ovld __cnfn convert_char8(float8);
+char8 __ovld __cnfn convert_char8_sat(float8);
+uchar8 __ovld __cnfn convert_uchar8_rte(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(char8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(char8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(char8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(char8);
+uchar8 __ovld __cnfn convert_uchar8(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat(char8);
+uchar8 __ovld __cnfn convert_uchar8_rte(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uchar8);
+uchar8 __ovld __cnfn convert_uchar8(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rte(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(short8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(short8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(short8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(short8);
+uchar8 __ovld __cnfn convert_uchar8(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat(short8);
+uchar8 __ovld __cnfn convert_uchar8_rte(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ushort8);
+uchar8 __ovld __cnfn convert_uchar8(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rte(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(int8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(int8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(int8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(int8);
+uchar8 __ovld __cnfn convert_uchar8(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat(int8);
+uchar8 __ovld __cnfn convert_uchar8_rte(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uint8);
+uchar8 __ovld __cnfn convert_uchar8(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rte(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(long8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(long8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(long8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(long8);
+uchar8 __ovld __cnfn convert_uchar8(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat(long8);
+uchar8 __ovld __cnfn convert_uchar8_rte(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ulong8);
+uchar8 __ovld __cnfn convert_uchar8(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rte(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(float8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(float8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(float8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(float8);
+uchar8 __ovld __cnfn convert_uchar8(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat(float8);
+short8 __ovld __cnfn convert_short8_rte(char8);
+short8 __ovld __cnfn convert_short8_sat_rte(char8);
+short8 __ovld __cnfn convert_short8_rtz(char8);
+short8 __ovld __cnfn convert_short8_sat_rtz(char8);
+short8 __ovld __cnfn convert_short8_rtp(char8);
+short8 __ovld __cnfn convert_short8_sat_rtp(char8);
+short8 __ovld __cnfn convert_short8_rtn(char8);
+short8 __ovld __cnfn convert_short8_sat_rtn(char8);
+short8 __ovld __cnfn convert_short8(char8);
+short8 __ovld __cnfn convert_short8_sat(char8);
+short8 __ovld __cnfn convert_short8_rte(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rte(uchar8);
+short8 __ovld __cnfn convert_short8_rtz(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rtz(uchar8);
+short8 __ovld __cnfn convert_short8_rtp(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rtp(uchar8);
+short8 __ovld __cnfn convert_short8_rtn(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rtn(uchar8);
+short8 __ovld __cnfn convert_short8(uchar8);
+short8 __ovld __cnfn convert_short8_sat(uchar8);
+short8 __ovld __cnfn convert_short8_rte(short8);
+short8 __ovld __cnfn convert_short8_sat_rte(short8);
+short8 __ovld __cnfn convert_short8_rtz(short8);
+short8 __ovld __cnfn convert_short8_sat_rtz(short8);
+short8 __ovld __cnfn convert_short8_rtp(short8);
+short8 __ovld __cnfn convert_short8_sat_rtp(short8);
+short8 __ovld __cnfn convert_short8_rtn(short8);
+short8 __ovld __cnfn convert_short8_sat_rtn(short8);
+short8 __ovld __cnfn convert_short8(short8);
+short8 __ovld __cnfn convert_short8_sat(short8);
+short8 __ovld __cnfn convert_short8_rte(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rte(ushort8);
+short8 __ovld __cnfn convert_short8_rtz(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rtz(ushort8);
+short8 __ovld __cnfn convert_short8_rtp(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rtp(ushort8);
+short8 __ovld __cnfn convert_short8_rtn(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rtn(ushort8);
+short8 __ovld __cnfn convert_short8(ushort8);
+short8 __ovld __cnfn convert_short8_sat(ushort8);
+short8 __ovld __cnfn convert_short8_rte(int8);
+short8 __ovld __cnfn convert_short8_sat_rte(int8);
+short8 __ovld __cnfn convert_short8_rtz(int8);
+short8 __ovld __cnfn convert_short8_sat_rtz(int8);
+short8 __ovld __cnfn convert_short8_rtp(int8);
+short8 __ovld __cnfn convert_short8_sat_rtp(int8);
+short8 __ovld __cnfn convert_short8_rtn(int8);
+short8 __ovld __cnfn convert_short8_sat_rtn(int8);
+short8 __ovld __cnfn convert_short8(int8);
+short8 __ovld __cnfn convert_short8_sat(int8);
+short8 __ovld __cnfn convert_short8_rte(uint8);
+short8 __ovld __cnfn convert_short8_sat_rte(uint8);
+short8 __ovld __cnfn convert_short8_rtz(uint8);
+short8 __ovld __cnfn convert_short8_sat_rtz(uint8);
+short8 __ovld __cnfn convert_short8_rtp(uint8);
+short8 __ovld __cnfn convert_short8_sat_rtp(uint8);
+short8 __ovld __cnfn convert_short8_rtn(uint8);
+short8 __ovld __cnfn convert_short8_sat_rtn(uint8);
+short8 __ovld __cnfn convert_short8(uint8);
+short8 __ovld __cnfn convert_short8_sat(uint8);
+short8 __ovld __cnfn convert_short8_rte(long8);
+short8 __ovld __cnfn convert_short8_sat_rte(long8);
+short8 __ovld __cnfn convert_short8_rtz(long8);
+short8 __ovld __cnfn convert_short8_sat_rtz(long8);
+short8 __ovld __cnfn convert_short8_rtp(long8);
+short8 __ovld __cnfn convert_short8_sat_rtp(long8);
+short8 __ovld __cnfn convert_short8_rtn(long8);
+short8 __ovld __cnfn convert_short8_sat_rtn(long8);
+short8 __ovld __cnfn convert_short8(long8);
+short8 __ovld __cnfn convert_short8_sat(long8);
+short8 __ovld __cnfn convert_short8_rte(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rte(ulong8);
+short8 __ovld __cnfn convert_short8_rtz(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rtz(ulong8);
+short8 __ovld __cnfn convert_short8_rtp(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rtp(ulong8);
+short8 __ovld __cnfn convert_short8_rtn(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rtn(ulong8);
+short8 __ovld __cnfn convert_short8(ulong8);
+short8 __ovld __cnfn convert_short8_sat(ulong8);
+short8 __ovld __cnfn convert_short8_rte(float8);
+short8 __ovld __cnfn convert_short8_sat_rte(float8);
+short8 __ovld __cnfn convert_short8_rtz(float8);
+short8 __ovld __cnfn convert_short8_sat_rtz(float8);
+short8 __ovld __cnfn convert_short8_rtp(float8);
+short8 __ovld __cnfn convert_short8_sat_rtp(float8);
+short8 __ovld __cnfn convert_short8_rtn(float8);
+short8 __ovld __cnfn convert_short8_sat_rtn(float8);
+short8 __ovld __cnfn convert_short8(float8);
+short8 __ovld __cnfn convert_short8_sat(float8);
+ushort8 __ovld __cnfn convert_ushort8_rte(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(char8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(char8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(char8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(char8);
+ushort8 __ovld __cnfn convert_ushort8(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat(char8);
+ushort8 __ovld __cnfn convert_ushort8_rte(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uchar8);
+ushort8 __ovld __cnfn convert_ushort8(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rte(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(short8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(short8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(short8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(short8);
+ushort8 __ovld __cnfn convert_ushort8(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat(short8);
+ushort8 __ovld __cnfn convert_ushort8_rte(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ushort8);
+ushort8 __ovld __cnfn convert_ushort8(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rte(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(int8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(int8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(int8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(int8);
+ushort8 __ovld __cnfn convert_ushort8(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat(int8);
+ushort8 __ovld __cnfn convert_ushort8_rte(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uint8);
+ushort8 __ovld __cnfn convert_ushort8(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rte(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(long8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(long8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(long8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(long8);
+ushort8 __ovld __cnfn convert_ushort8(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat(long8);
+ushort8 __ovld __cnfn convert_ushort8_rte(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ulong8);
+ushort8 __ovld __cnfn convert_ushort8(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rte(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(float8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(float8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(float8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(float8);
+ushort8 __ovld __cnfn convert_ushort8(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat(float8);
+int8 __ovld __cnfn convert_int8_rte(char8);
+int8 __ovld __cnfn convert_int8_sat_rte(char8);
+int8 __ovld __cnfn convert_int8_rtz(char8);
+int8 __ovld __cnfn convert_int8_sat_rtz(char8);
+int8 __ovld __cnfn convert_int8_rtp(char8);
+int8 __ovld __cnfn convert_int8_sat_rtp(char8);
+int8 __ovld __cnfn convert_int8_rtn(char8);
+int8 __ovld __cnfn convert_int8_sat_rtn(char8);
+int8 __ovld __cnfn convert_int8(char8);
+int8 __ovld __cnfn convert_int8_sat(char8);
+int8 __ovld __cnfn convert_int8_rte(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rte(uchar8);
+int8 __ovld __cnfn convert_int8_rtz(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rtz(uchar8);
+int8 __ovld __cnfn convert_int8_rtp(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rtp(uchar8);
+int8 __ovld __cnfn convert_int8_rtn(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rtn(uchar8);
+int8 __ovld __cnfn convert_int8(uchar8);
+int8 __ovld __cnfn convert_int8_sat(uchar8);
+int8 __ovld __cnfn convert_int8_rte(short8);
+int8 __ovld __cnfn convert_int8_sat_rte(short8);
+int8 __ovld __cnfn convert_int8_rtz(short8);
+int8 __ovld __cnfn convert_int8_sat_rtz(short8);
+int8 __ovld __cnfn convert_int8_rtp(short8);
+int8 __ovld __cnfn convert_int8_sat_rtp(short8);
+int8 __ovld __cnfn convert_int8_rtn(short8);
+int8 __ovld __cnfn convert_int8_sat_rtn(short8);
+int8 __ovld __cnfn convert_int8(short8);
+int8 __ovld __cnfn convert_int8_sat(short8);
+int8 __ovld __cnfn convert_int8_rte(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rte(ushort8);
+int8 __ovld __cnfn convert_int8_rtz(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rtz(ushort8);
+int8 __ovld __cnfn convert_int8_rtp(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rtp(ushort8);
+int8 __ovld __cnfn convert_int8_rtn(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rtn(ushort8);
+int8 __ovld __cnfn convert_int8(ushort8);
+int8 __ovld __cnfn convert_int8_sat(ushort8);
+int8 __ovld __cnfn convert_int8_rte(int8);
+int8 __ovld __cnfn convert_int8_sat_rte(int8);
+int8 __ovld __cnfn convert_int8_rtz(int8);
+int8 __ovld __cnfn convert_int8_sat_rtz(int8);
+int8 __ovld __cnfn convert_int8_rtp(int8);
+int8 __ovld __cnfn convert_int8_sat_rtp(int8);
+int8 __ovld __cnfn convert_int8_rtn(int8);
+int8 __ovld __cnfn convert_int8_sat_rtn(int8);
+int8 __ovld __cnfn convert_int8(int8);
+int8 __ovld __cnfn convert_int8_sat(int8);
+int8 __ovld __cnfn convert_int8_rte(uint8);
+int8 __ovld __cnfn convert_int8_sat_rte(uint8);
+int8 __ovld __cnfn convert_int8_rtz(uint8);
+int8 __ovld __cnfn convert_int8_sat_rtz(uint8);
+int8 __ovld __cnfn convert_int8_rtp(uint8);
+int8 __ovld __cnfn convert_int8_sat_rtp(uint8);
+int8 __ovld __cnfn convert_int8_rtn(uint8);
+int8 __ovld __cnfn convert_int8_sat_rtn(uint8);
+int8 __ovld __cnfn convert_int8(uint8);
+int8 __ovld __cnfn convert_int8_sat(uint8);
+int8 __ovld __cnfn convert_int8_rte(long8);
+int8 __ovld __cnfn convert_int8_sat_rte(long8);
+int8 __ovld __cnfn convert_int8_rtz(long8);
+int8 __ovld __cnfn convert_int8_sat_rtz(long8);
+int8 __ovld __cnfn convert_int8_rtp(long8);
+int8 __ovld __cnfn convert_int8_sat_rtp(long8);
+int8 __ovld __cnfn convert_int8_rtn(long8);
+int8 __ovld __cnfn convert_int8_sat_rtn(long8);
+int8 __ovld __cnfn convert_int8(long8);
+int8 __ovld __cnfn convert_int8_sat(long8);
+int8 __ovld __cnfn convert_int8_rte(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rte(ulong8);
+int8 __ovld __cnfn convert_int8_rtz(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rtz(ulong8);
+int8 __ovld __cnfn convert_int8_rtp(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rtp(ulong8);
+int8 __ovld __cnfn convert_int8_rtn(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rtn(ulong8);
+int8 __ovld __cnfn convert_int8(ulong8);
+int8 __ovld __cnfn convert_int8_sat(ulong8);
+int8 __ovld __cnfn convert_int8_rte(float8);
+int8 __ovld __cnfn convert_int8_sat_rte(float8);
+int8 __ovld __cnfn convert_int8_rtz(float8);
+int8 __ovld __cnfn convert_int8_sat_rtz(float8);
+int8 __ovld __cnfn convert_int8_rtp(float8);
+int8 __ovld __cnfn convert_int8_sat_rtp(float8);
+int8 __ovld __cnfn convert_int8_rtn(float8);
+int8 __ovld __cnfn convert_int8_sat_rtn(float8);
+int8 __ovld __cnfn convert_int8(float8);
+int8 __ovld __cnfn convert_int8_sat(float8);
+uint8 __ovld __cnfn convert_uint8_rte(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(char8);
+uint8 __ovld __cnfn convert_uint8_rtz(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(char8);
+uint8 __ovld __cnfn convert_uint8_rtp(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(char8);
+uint8 __ovld __cnfn convert_uint8_rtn(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(char8);
+uint8 __ovld __cnfn convert_uint8(char8);
+uint8 __ovld __cnfn convert_uint8_sat(char8);
+uint8 __ovld __cnfn convert_uint8_rte(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(uchar8);
+uint8 __ovld __cnfn convert_uint8_rtz(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(uchar8);
+uint8 __ovld __cnfn convert_uint8_rtp(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(uchar8);
+uint8 __ovld __cnfn convert_uint8_rtn(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(uchar8);
+uint8 __ovld __cnfn convert_uint8(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat(uchar8);
+uint8 __ovld __cnfn convert_uint8_rte(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(short8);
+uint8 __ovld __cnfn convert_uint8_rtz(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(short8);
+uint8 __ovld __cnfn convert_uint8_rtp(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(short8);
+uint8 __ovld __cnfn convert_uint8_rtn(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(short8);
+uint8 __ovld __cnfn convert_uint8(short8);
+uint8 __ovld __cnfn convert_uint8_sat(short8);
+uint8 __ovld __cnfn convert_uint8_rte(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(ushort8);
+uint8 __ovld __cnfn convert_uint8_rtz(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(ushort8);
+uint8 __ovld __cnfn convert_uint8_rtp(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(ushort8);
+uint8 __ovld __cnfn convert_uint8_rtn(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(ushort8);
+uint8 __ovld __cnfn convert_uint8(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat(ushort8);
+uint8 __ovld __cnfn convert_uint8_rte(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(int8);
+uint8 __ovld __cnfn convert_uint8_rtz(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(int8);
+uint8 __ovld __cnfn convert_uint8_rtp(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(int8);
+uint8 __ovld __cnfn convert_uint8_rtn(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(int8);
+uint8 __ovld __cnfn convert_uint8(int8);
+uint8 __ovld __cnfn convert_uint8_sat(int8);
+uint8 __ovld __cnfn convert_uint8_rte(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(uint8);
+uint8 __ovld __cnfn convert_uint8_rtz(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(uint8);
+uint8 __ovld __cnfn convert_uint8_rtp(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(uint8);
+uint8 __ovld __cnfn convert_uint8_rtn(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(uint8);
+uint8 __ovld __cnfn convert_uint8(uint8);
+uint8 __ovld __cnfn convert_uint8_sat(uint8);
+uint8 __ovld __cnfn convert_uint8_rte(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(long8);
+uint8 __ovld __cnfn convert_uint8_rtz(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(long8);
+uint8 __ovld __cnfn convert_uint8_rtp(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(long8);
+uint8 __ovld __cnfn convert_uint8_rtn(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(long8);
+uint8 __ovld __cnfn convert_uint8(long8);
+uint8 __ovld __cnfn convert_uint8_sat(long8);
+uint8 __ovld __cnfn convert_uint8_rte(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(ulong8);
+uint8 __ovld __cnfn convert_uint8_rtz(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(ulong8);
+uint8 __ovld __cnfn convert_uint8_rtp(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(ulong8);
+uint8 __ovld __cnfn convert_uint8_rtn(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(ulong8);
+uint8 __ovld __cnfn convert_uint8(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat(ulong8);
+uint8 __ovld __cnfn convert_uint8_rte(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(float8);
+uint8 __ovld __cnfn convert_uint8_rtz(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(float8);
+uint8 __ovld __cnfn convert_uint8_rtp(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(float8);
+uint8 __ovld __cnfn convert_uint8_rtn(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(float8);
+uint8 __ovld __cnfn convert_uint8(float8);
+uint8 __ovld __cnfn convert_uint8_sat(float8);
+long8 __ovld __cnfn convert_long8_rte(char8);
+long8 __ovld __cnfn convert_long8_sat_rte(char8);
+long8 __ovld __cnfn convert_long8_rtz(char8);
+long8 __ovld __cnfn convert_long8_sat_rtz(char8);
+long8 __ovld __cnfn convert_long8_rtp(char8);
+long8 __ovld __cnfn convert_long8_sat_rtp(char8);
+long8 __ovld __cnfn convert_long8_rtn(char8);
+long8 __ovld __cnfn convert_long8_sat_rtn(char8);
+long8 __ovld __cnfn convert_long8(char8);
+long8 __ovld __cnfn convert_long8_sat(char8);
+long8 __ovld __cnfn convert_long8_rte(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rte(uchar8);
+long8 __ovld __cnfn convert_long8_rtz(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rtz(uchar8);
+long8 __ovld __cnfn convert_long8_rtp(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rtp(uchar8);
+long8 __ovld __cnfn convert_long8_rtn(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rtn(uchar8);
+long8 __ovld __cnfn convert_long8(uchar8);
+long8 __ovld __cnfn convert_long8_sat(uchar8);
+long8 __ovld __cnfn convert_long8_rte(short8);
+long8 __ovld __cnfn convert_long8_sat_rte(short8);
+long8 __ovld __cnfn convert_long8_rtz(short8);
+long8 __ovld __cnfn convert_long8_sat_rtz(short8);
+long8 __ovld __cnfn convert_long8_rtp(short8);
+long8 __ovld __cnfn convert_long8_sat_rtp(short8);
+long8 __ovld __cnfn convert_long8_rtn(short8);
+long8 __ovld __cnfn convert_long8_sat_rtn(short8);
+long8 __ovld __cnfn convert_long8(short8);
+long8 __ovld __cnfn convert_long8_sat(short8);
+long8 __ovld __cnfn convert_long8_rte(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rte(ushort8);
+long8 __ovld __cnfn convert_long8_rtz(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rtz(ushort8);
+long8 __ovld __cnfn convert_long8_rtp(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rtp(ushort8);
+long8 __ovld __cnfn convert_long8_rtn(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rtn(ushort8);
+long8 __ovld __cnfn convert_long8(ushort8);
+long8 __ovld __cnfn convert_long8_sat(ushort8);
+long8 __ovld __cnfn convert_long8_rte(int8);
+long8 __ovld __cnfn convert_long8_sat_rte(int8);
+long8 __ovld __cnfn convert_long8_rtz(int8);
+long8 __ovld __cnfn convert_long8_sat_rtz(int8);
+long8 __ovld __cnfn convert_long8_rtp(int8);
+long8 __ovld __cnfn convert_long8_sat_rtp(int8);
+long8 __ovld __cnfn convert_long8_rtn(int8);
+long8 __ovld __cnfn convert_long8_sat_rtn(int8);
+long8 __ovld __cnfn convert_long8(int8);
+long8 __ovld __cnfn convert_long8_sat(int8);
+long8 __ovld __cnfn convert_long8_rte(uint8);
+long8 __ovld __cnfn convert_long8_sat_rte(uint8);
+long8 __ovld __cnfn convert_long8_rtz(uint8);
+long8 __ovld __cnfn convert_long8_sat_rtz(uint8);
+long8 __ovld __cnfn convert_long8_rtp(uint8);
+long8 __ovld __cnfn convert_long8_sat_rtp(uint8);
+long8 __ovld __cnfn convert_long8_rtn(uint8);
+long8 __ovld __cnfn convert_long8_sat_rtn(uint8);
+long8 __ovld __cnfn convert_long8(uint8);
+long8 __ovld __cnfn convert_long8_sat(uint8);
+long8 __ovld __cnfn convert_long8_rte(long8);
+long8 __ovld __cnfn convert_long8_sat_rte(long8);
+long8 __ovld __cnfn convert_long8_rtz(long8);
+long8 __ovld __cnfn convert_long8_sat_rtz(long8);
+long8 __ovld __cnfn convert_long8_rtp(long8);
+long8 __ovld __cnfn convert_long8_sat_rtp(long8);
+long8 __ovld __cnfn convert_long8_rtn(long8);
+long8 __ovld __cnfn convert_long8_sat_rtn(long8);
+long8 __ovld __cnfn convert_long8(long8);
+long8 __ovld __cnfn convert_long8_sat(long8);
+long8 __ovld __cnfn convert_long8_rte(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rte(ulong8);
+long8 __ovld __cnfn convert_long8_rtz(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rtz(ulong8);
+long8 __ovld __cnfn convert_long8_rtp(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rtp(ulong8);
+long8 __ovld __cnfn convert_long8_rtn(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rtn(ulong8);
+long8 __ovld __cnfn convert_long8(ulong8);
+long8 __ovld __cnfn convert_long8_sat(ulong8);
+long8 __ovld __cnfn convert_long8_rte(float8);
+long8 __ovld __cnfn convert_long8_sat_rte(float8);
+long8 __ovld __cnfn convert_long8_rtz(float8);
+long8 __ovld __cnfn convert_long8_sat_rtz(float8);
+long8 __ovld __cnfn convert_long8_rtp(float8);
+long8 __ovld __cnfn convert_long8_sat_rtp(float8);
+long8 __ovld __cnfn convert_long8_rtn(float8);
+long8 __ovld __cnfn convert_long8_sat_rtn(float8);
+long8 __ovld __cnfn convert_long8(float8);
+long8 __ovld __cnfn convert_long8_sat(float8);
+ulong8 __ovld __cnfn convert_ulong8_rte(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(char8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(char8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(char8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(char8);
+ulong8 __ovld __cnfn convert_ulong8(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat(char8);
+ulong8 __ovld __cnfn convert_ulong8_rte(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uchar8);
+ulong8 __ovld __cnfn convert_ulong8(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rte(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(short8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(short8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(short8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(short8);
+ulong8 __ovld __cnfn convert_ulong8(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat(short8);
+ulong8 __ovld __cnfn convert_ulong8_rte(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ushort8);
+ulong8 __ovld __cnfn convert_ulong8(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rte(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(int8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(int8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(int8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(int8);
+ulong8 __ovld __cnfn convert_ulong8(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat(int8);
+ulong8 __ovld __cnfn convert_ulong8_rte(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uint8);
+ulong8 __ovld __cnfn convert_ulong8(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rte(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(long8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(long8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(long8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(long8);
+ulong8 __ovld __cnfn convert_ulong8(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat(long8);
+ulong8 __ovld __cnfn convert_ulong8_rte(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ulong8);
+ulong8 __ovld __cnfn convert_ulong8(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rte(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(float8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(float8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(float8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(float8);
+ulong8 __ovld __cnfn convert_ulong8(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat(float8);
+float8 __ovld __cnfn convert_float8_rte(char8);
+float8 __ovld __cnfn convert_float8_rtz(char8);
+float8 __ovld __cnfn convert_float8_rtp(char8);
+float8 __ovld __cnfn convert_float8_rtn(char8);
+float8 __ovld __cnfn convert_float8(char8);
+float8 __ovld __cnfn convert_float8_rte(uchar8);
+float8 __ovld __cnfn convert_float8_rtz(uchar8);
+float8 __ovld __cnfn convert_float8_rtp(uchar8);
+float8 __ovld __cnfn convert_float8_rtn(uchar8);
+float8 __ovld __cnfn convert_float8(uchar8);
+float8 __ovld __cnfn convert_float8_rte(short8);
+float8 __ovld __cnfn convert_float8_rtz(short8);
+float8 __ovld __cnfn convert_float8_rtp(short8);
+float8 __ovld __cnfn convert_float8_rtn(short8);
+float8 __ovld __cnfn convert_float8(short8);
+float8 __ovld __cnfn convert_float8_rte(ushort8);
+float8 __ovld __cnfn convert_float8_rtz(ushort8);
+float8 __ovld __cnfn convert_float8_rtp(ushort8);
+float8 __ovld __cnfn convert_float8_rtn(ushort8);
+float8 __ovld __cnfn convert_float8(ushort8);
+float8 __ovld __cnfn convert_float8_rte(int8);
+float8 __ovld __cnfn convert_float8_rtz(int8);
+float8 __ovld __cnfn convert_float8_rtp(int8);
+float8 __ovld __cnfn convert_float8_rtn(int8);
+float8 __ovld __cnfn convert_float8(int8);
+float8 __ovld __cnfn convert_float8_rte(uint8);
+float8 __ovld __cnfn convert_float8_rtz(uint8);
+float8 __ovld __cnfn convert_float8_rtp(uint8);
+float8 __ovld __cnfn convert_float8_rtn(uint8);
+float8 __ovld __cnfn convert_float8(uint8);
+float8 __ovld __cnfn convert_float8_rte(long8);
+float8 __ovld __cnfn convert_float8_rtz(long8);
+float8 __ovld __cnfn convert_float8_rtp(long8);
+float8 __ovld __cnfn convert_float8_rtn(long8);
+float8 __ovld __cnfn convert_float8(long8);
+float8 __ovld __cnfn convert_float8_rte(ulong8);
+float8 __ovld __cnfn convert_float8_rtz(ulong8);
+float8 __ovld __cnfn convert_float8_rtp(ulong8);
+float8 __ovld __cnfn convert_float8_rtn(ulong8);
+float8 __ovld __cnfn convert_float8(ulong8);
+float8 __ovld __cnfn convert_float8_rte(float8);
+float8 __ovld __cnfn convert_float8_rtz(float8);
+float8 __ovld __cnfn convert_float8_rtp(float8);
+float8 __ovld __cnfn convert_float8_rtn(float8);
+float8 __ovld __cnfn convert_float8(float8);
+char16 __ovld __cnfn convert_char16_rte(char16);
+char16 __ovld __cnfn convert_char16_sat_rte(char16);
+char16 __ovld __cnfn convert_char16_rtz(char16);
+char16 __ovld __cnfn convert_char16_sat_rtz(char16);
+char16 __ovld __cnfn convert_char16_rtp(char16);
+char16 __ovld __cnfn convert_char16_sat_rtp(char16);
+char16 __ovld __cnfn convert_char16_rtn(char16);
+char16 __ovld __cnfn convert_char16_sat_rtn(char16);
+char16 __ovld __cnfn convert_char16(char16);
+char16 __ovld __cnfn convert_char16_sat(char16);
+char16 __ovld __cnfn convert_char16_rte(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rte(uchar16);
+char16 __ovld __cnfn convert_char16_rtz(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rtz(uchar16);
+char16 __ovld __cnfn convert_char16_rtp(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rtp(uchar16);
+char16 __ovld __cnfn convert_char16_rtn(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rtn(uchar16);
+char16 __ovld __cnfn convert_char16(uchar16);
+char16 __ovld __cnfn convert_char16_sat(uchar16);
+char16 __ovld __cnfn convert_char16_rte(short16);
+char16 __ovld __cnfn convert_char16_sat_rte(short16);
+char16 __ovld __cnfn convert_char16_rtz(short16);
+char16 __ovld __cnfn convert_char16_sat_rtz(short16);
+char16 __ovld __cnfn convert_char16_rtp(short16);
+char16 __ovld __cnfn convert_char16_sat_rtp(short16);
+char16 __ovld __cnfn convert_char16_rtn(short16);
+char16 __ovld __cnfn convert_char16_sat_rtn(short16);
+char16 __ovld __cnfn convert_char16(short16);
+char16 __ovld __cnfn convert_char16_sat(short16);
+char16 __ovld __cnfn convert_char16_rte(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rte(ushort16);
+char16 __ovld __cnfn convert_char16_rtz(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rtz(ushort16);
+char16 __ovld __cnfn convert_char16_rtp(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rtp(ushort16);
+char16 __ovld __cnfn convert_char16_rtn(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rtn(ushort16);
+char16 __ovld __cnfn convert_char16(ushort16);
+char16 __ovld __cnfn convert_char16_sat(ushort16);
+char16 __ovld __cnfn convert_char16_rte(int16);
+char16 __ovld __cnfn convert_char16_sat_rte(int16);
+char16 __ovld __cnfn convert_char16_rtz(int16);
+char16 __ovld __cnfn convert_char16_sat_rtz(int16);
+char16 __ovld __cnfn convert_char16_rtp(int16);
+char16 __ovld __cnfn convert_char16_sat_rtp(int16);
+char16 __ovld __cnfn convert_char16_rtn(int16);
+char16 __ovld __cnfn convert_char16_sat_rtn(int16);
+char16 __ovld __cnfn convert_char16(int16);
+char16 __ovld __cnfn convert_char16_sat(int16);
+char16 __ovld __cnfn convert_char16_rte(uint16);
+char16 __ovld __cnfn convert_char16_sat_rte(uint16);
+char16 __ovld __cnfn convert_char16_rtz(uint16);
+char16 __ovld __cnfn convert_char16_sat_rtz(uint16);
+char16 __ovld __cnfn convert_char16_rtp(uint16);
+char16 __ovld __cnfn convert_char16_sat_rtp(uint16);
+char16 __ovld __cnfn convert_char16_rtn(uint16);
+char16 __ovld __cnfn convert_char16_sat_rtn(uint16);
+char16 __ovld __cnfn convert_char16(uint16);
+char16 __ovld __cnfn convert_char16_sat(uint16);
+char16 __ovld __cnfn convert_char16_rte(long16);
+char16 __ovld __cnfn convert_char16_sat_rte(long16);
+char16 __ovld __cnfn convert_char16_rtz(long16);
+char16 __ovld __cnfn convert_char16_sat_rtz(long16);
+char16 __ovld __cnfn convert_char16_rtp(long16);
+char16 __ovld __cnfn convert_char16_sat_rtp(long16);
+char16 __ovld __cnfn convert_char16_rtn(long16);
+char16 __ovld __cnfn convert_char16_sat_rtn(long16);
+char16 __ovld __cnfn convert_char16(long16);
+char16 __ovld __cnfn convert_char16_sat(long16);
+char16 __ovld __cnfn convert_char16_rte(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rte(ulong16);
+char16 __ovld __cnfn convert_char16_rtz(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rtz(ulong16);
+char16 __ovld __cnfn convert_char16_rtp(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rtp(ulong16);
+char16 __ovld __cnfn convert_char16_rtn(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rtn(ulong16);
+char16 __ovld __cnfn convert_char16(ulong16);
+char16 __ovld __cnfn convert_char16_sat(ulong16);
+char16 __ovld __cnfn convert_char16_rte(float16);
+char16 __ovld __cnfn convert_char16_sat_rte(float16);
+char16 __ovld __cnfn convert_char16_rtz(float16);
+char16 __ovld __cnfn convert_char16_sat_rtz(float16);
+char16 __ovld __cnfn convert_char16_rtp(float16);
+char16 __ovld __cnfn convert_char16_sat_rtp(float16);
+char16 __ovld __cnfn convert_char16_rtn(float16);
+char16 __ovld __cnfn convert_char16_sat_rtn(float16);
+char16 __ovld __cnfn convert_char16(float16);
+char16 __ovld __cnfn convert_char16_sat(float16);
+uchar16 __ovld __cnfn convert_uchar16_rte(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(char16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(char16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(char16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(char16);
+uchar16 __ovld __cnfn convert_uchar16(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat(char16);
+uchar16 __ovld __cnfn convert_uchar16_rte(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uchar16);
+uchar16 __ovld __cnfn convert_uchar16(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rte(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(short16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(short16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(short16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(short16);
+uchar16 __ovld __cnfn convert_uchar16(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat(short16);
+uchar16 __ovld __cnfn convert_uchar16_rte(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ushort16);
+uchar16 __ovld __cnfn convert_uchar16(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rte(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(int16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(int16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(int16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(int16);
+uchar16 __ovld __cnfn convert_uchar16(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat(int16);
+uchar16 __ovld __cnfn convert_uchar16_rte(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uint16);
+uchar16 __ovld __cnfn convert_uchar16(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rte(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(long16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(long16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(long16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(long16);
+uchar16 __ovld __cnfn convert_uchar16(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat(long16);
+uchar16 __ovld __cnfn convert_uchar16_rte(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ulong16);
+uchar16 __ovld __cnfn convert_uchar16(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rte(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(float16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(float16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(float16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(float16);
+uchar16 __ovld __cnfn convert_uchar16(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat(float16);
+short16 __ovld __cnfn convert_short16_rte(char16);
+short16 __ovld __cnfn convert_short16_sat_rte(char16);
+short16 __ovld __cnfn convert_short16_rtz(char16);
+short16 __ovld __cnfn convert_short16_sat_rtz(char16);
+short16 __ovld __cnfn convert_short16_rtp(char16);
+short16 __ovld __cnfn convert_short16_sat_rtp(char16);
+short16 __ovld __cnfn convert_short16_rtn(char16);
+short16 __ovld __cnfn convert_short16_sat_rtn(char16);
+short16 __ovld __cnfn convert_short16(char16);
+short16 __ovld __cnfn convert_short16_sat(char16);
+short16 __ovld __cnfn convert_short16_rte(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rte(uchar16);
+short16 __ovld __cnfn convert_short16_rtz(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rtz(uchar16);
+short16 __ovld __cnfn convert_short16_rtp(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rtp(uchar16);
+short16 __ovld __cnfn convert_short16_rtn(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rtn(uchar16);
+short16 __ovld __cnfn convert_short16(uchar16);
+short16 __ovld __cnfn convert_short16_sat(uchar16);
+short16 __ovld __cnfn convert_short16_rte(short16);
+short16 __ovld __cnfn convert_short16_sat_rte(short16);
+short16 __ovld __cnfn convert_short16_rtz(short16);
+short16 __ovld __cnfn convert_short16_sat_rtz(short16);
+short16 __ovld __cnfn convert_short16_rtp(short16);
+short16 __ovld __cnfn convert_short16_sat_rtp(short16);
+short16 __ovld __cnfn convert_short16_rtn(short16);
+short16 __ovld __cnfn convert_short16_sat_rtn(short16);
+short16 __ovld __cnfn convert_short16(short16);
+short16 __ovld __cnfn convert_short16_sat(short16);
+short16 __ovld __cnfn convert_short16_rte(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rte(ushort16);
+short16 __ovld __cnfn convert_short16_rtz(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rtz(ushort16);
+short16 __ovld __cnfn convert_short16_rtp(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rtp(ushort16);
+short16 __ovld __cnfn convert_short16_rtn(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rtn(ushort16);
+short16 __ovld __cnfn convert_short16(ushort16);
+short16 __ovld __cnfn convert_short16_sat(ushort16);
+short16 __ovld __cnfn convert_short16_rte(int16);
+short16 __ovld __cnfn convert_short16_sat_rte(int16);
+short16 __ovld __cnfn convert_short16_rtz(int16);
+short16 __ovld __cnfn convert_short16_sat_rtz(int16);
+short16 __ovld __cnfn convert_short16_rtp(int16);
+short16 __ovld __cnfn convert_short16_sat_rtp(int16);
+short16 __ovld __cnfn convert_short16_rtn(int16);
+short16 __ovld __cnfn convert_short16_sat_rtn(int16);
+short16 __ovld __cnfn convert_short16(int16);
+short16 __ovld __cnfn convert_short16_sat(int16);
+short16 __ovld __cnfn convert_short16_rte(uint16);
+short16 __ovld __cnfn convert_short16_sat_rte(uint16);
+short16 __ovld __cnfn convert_short16_rtz(uint16);
+short16 __ovld __cnfn convert_short16_sat_rtz(uint16);
+short16 __ovld __cnfn convert_short16_rtp(uint16);
+short16 __ovld __cnfn convert_short16_sat_rtp(uint16);
+short16 __ovld __cnfn convert_short16_rtn(uint16);
+short16 __ovld __cnfn convert_short16_sat_rtn(uint16);
+short16 __ovld __cnfn convert_short16(uint16);
+short16 __ovld __cnfn convert_short16_sat(uint16);
+short16 __ovld __cnfn convert_short16_rte(long16);
+short16 __ovld __cnfn convert_short16_sat_rte(long16);
+short16 __ovld __cnfn convert_short16_rtz(long16);
+short16 __ovld __cnfn convert_short16_sat_rtz(long16);
+short16 __ovld __cnfn convert_short16_rtp(long16);
+short16 __ovld __cnfn convert_short16_sat_rtp(long16);
+short16 __ovld __cnfn convert_short16_rtn(long16);
+short16 __ovld __cnfn convert_short16_sat_rtn(long16);
+short16 __ovld __cnfn convert_short16(long16);
+short16 __ovld __cnfn convert_short16_sat(long16);
+short16 __ovld __cnfn convert_short16_rte(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rte(ulong16);
+short16 __ovld __cnfn convert_short16_rtz(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rtz(ulong16);
+short16 __ovld __cnfn convert_short16_rtp(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rtp(ulong16);
+short16 __ovld __cnfn convert_short16_rtn(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rtn(ulong16);
+short16 __ovld __cnfn convert_short16(ulong16);
+short16 __ovld __cnfn convert_short16_sat(ulong16);
+short16 __ovld __cnfn convert_short16_rte(float16);
+short16 __ovld __cnfn convert_short16_sat_rte(float16);
+short16 __ovld __cnfn convert_short16_rtz(float16);
+short16 __ovld __cnfn convert_short16_sat_rtz(float16);
+short16 __ovld __cnfn convert_short16_rtp(float16);
+short16 __ovld __cnfn convert_short16_sat_rtp(float16);
+short16 __ovld __cnfn convert_short16_rtn(float16);
+short16 __ovld __cnfn convert_short16_sat_rtn(float16);
+short16 __ovld __cnfn convert_short16(float16);
+short16 __ovld __cnfn convert_short16_sat(float16);
+ushort16 __ovld __cnfn convert_ushort16_rte(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(char16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(char16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(char16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(char16);
+ushort16 __ovld __cnfn convert_ushort16(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat(char16);
+ushort16 __ovld __cnfn convert_ushort16_rte(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uchar16);
+ushort16 __ovld __cnfn convert_ushort16(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rte(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(short16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(short16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(short16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(short16);
+ushort16 __ovld __cnfn convert_ushort16(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat(short16);
+ushort16 __ovld __cnfn convert_ushort16_rte(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ushort16);
+ushort16 __ovld __cnfn convert_ushort16(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rte(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(int16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(int16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(int16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(int16);
+ushort16 __ovld __cnfn convert_ushort16(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat(int16);
+ushort16 __ovld __cnfn convert_ushort16_rte(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uint16);
+ushort16 __ovld __cnfn convert_ushort16(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rte(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(long16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(long16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(long16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(long16);
+ushort16 __ovld __cnfn convert_ushort16(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat(long16);
+ushort16 __ovld __cnfn convert_ushort16_rte(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ulong16);
+ushort16 __ovld __cnfn convert_ushort16(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rte(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(float16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(float16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(float16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(float16);
+ushort16 __ovld __cnfn convert_ushort16(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat(float16);
+int16 __ovld __cnfn convert_int16_rte(char16);
+int16 __ovld __cnfn convert_int16_sat_rte(char16);
+int16 __ovld __cnfn convert_int16_rtz(char16);
+int16 __ovld __cnfn convert_int16_sat_rtz(char16);
+int16 __ovld __cnfn convert_int16_rtp(char16);
+int16 __ovld __cnfn convert_int16_sat_rtp(char16);
+int16 __ovld __cnfn convert_int16_rtn(char16);
+int16 __ovld __cnfn convert_int16_sat_rtn(char16);
+int16 __ovld __cnfn convert_int16(char16);
+int16 __ovld __cnfn convert_int16_sat(char16);
+int16 __ovld __cnfn convert_int16_rte(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rte(uchar16);
+int16 __ovld __cnfn convert_int16_rtz(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rtz(uchar16);
+int16 __ovld __cnfn convert_int16_rtp(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rtp(uchar16);
+int16 __ovld __cnfn convert_int16_rtn(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rtn(uchar16);
+int16 __ovld __cnfn convert_int16(uchar16);
+int16 __ovld __cnfn convert_int16_sat(uchar16);
+int16 __ovld __cnfn convert_int16_rte(short16);
+int16 __ovld __cnfn convert_int16_sat_rte(short16);
+int16 __ovld __cnfn convert_int16_rtz(short16);
+int16 __ovld __cnfn convert_int16_sat_rtz(short16);
+int16 __ovld __cnfn convert_int16_rtp(short16);
+int16 __ovld __cnfn convert_int16_sat_rtp(short16);
+int16 __ovld __cnfn convert_int16_rtn(short16);
+int16 __ovld __cnfn convert_int16_sat_rtn(short16);
+int16 __ovld __cnfn convert_int16(short16);
+int16 __ovld __cnfn convert_int16_sat(short16);
+int16 __ovld __cnfn convert_int16_rte(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rte(ushort16);
+int16 __ovld __cnfn convert_int16_rtz(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rtz(ushort16);
+int16 __ovld __cnfn convert_int16_rtp(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rtp(ushort16);
+int16 __ovld __cnfn convert_int16_rtn(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rtn(ushort16);
+int16 __ovld __cnfn convert_int16(ushort16);
+int16 __ovld __cnfn convert_int16_sat(ushort16);
+int16 __ovld __cnfn convert_int16_rte(int16);
+int16 __ovld __cnfn convert_int16_sat_rte(int16);
+int16 __ovld __cnfn convert_int16_rtz(int16);
+int16 __ovld __cnfn convert_int16_sat_rtz(int16);
+int16 __ovld __cnfn convert_int16_rtp(int16);
+int16 __ovld __cnfn convert_int16_sat_rtp(int16);
+int16 __ovld __cnfn convert_int16_rtn(int16);
+int16 __ovld __cnfn convert_int16_sat_rtn(int16);
+int16 __ovld __cnfn convert_int16(int16);
+int16 __ovld __cnfn convert_int16_sat(int16);
+int16 __ovld __cnfn convert_int16_rte(uint16);
+int16 __ovld __cnfn convert_int16_sat_rte(uint16);
+int16 __ovld __cnfn convert_int16_rtz(uint16);
+int16 __ovld __cnfn convert_int16_sat_rtz(uint16);
+int16 __ovld __cnfn convert_int16_rtp(uint16);
+int16 __ovld __cnfn convert_int16_sat_rtp(uint16);
+int16 __ovld __cnfn convert_int16_rtn(uint16);
+int16 __ovld __cnfn convert_int16_sat_rtn(uint16);
+int16 __ovld __cnfn convert_int16(uint16);
+int16 __ovld __cnfn convert_int16_sat(uint16);
+int16 __ovld __cnfn convert_int16_rte(long16);
+int16 __ovld __cnfn convert_int16_sat_rte(long16);
+int16 __ovld __cnfn convert_int16_rtz(long16);
+int16 __ovld __cnfn convert_int16_sat_rtz(long16);
+int16 __ovld __cnfn convert_int16_rtp(long16);
+int16 __ovld __cnfn convert_int16_sat_rtp(long16);
+int16 __ovld __cnfn convert_int16_rtn(long16);
+int16 __ovld __cnfn convert_int16_sat_rtn(long16);
+int16 __ovld __cnfn convert_int16(long16);
+int16 __ovld __cnfn convert_int16_sat(long16);
+int16 __ovld __cnfn convert_int16_rte(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rte(ulong16);
+int16 __ovld __cnfn convert_int16_rtz(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rtz(ulong16);
+int16 __ovld __cnfn convert_int16_rtp(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rtp(ulong16);
+int16 __ovld __cnfn convert_int16_rtn(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rtn(ulong16);
+int16 __ovld __cnfn convert_int16(ulong16);
+int16 __ovld __cnfn convert_int16_sat(ulong16);
+int16 __ovld __cnfn convert_int16_rte(float16);
+int16 __ovld __cnfn convert_int16_sat_rte(float16);
+int16 __ovld __cnfn convert_int16_rtz(float16);
+int16 __ovld __cnfn convert_int16_sat_rtz(float16);
+int16 __ovld __cnfn convert_int16_rtp(float16);
+int16 __ovld __cnfn convert_int16_sat_rtp(float16);
+int16 __ovld __cnfn convert_int16_rtn(float16);
+int16 __ovld __cnfn convert_int16_sat_rtn(float16);
+int16 __ovld __cnfn convert_int16(float16);
+int16 __ovld __cnfn convert_int16_sat(float16);
+uint16 __ovld __cnfn convert_uint16_rte(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(char16);
+uint16 __ovld __cnfn convert_uint16_rtz(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(char16);
+uint16 __ovld __cnfn convert_uint16_rtp(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(char16);
+uint16 __ovld __cnfn convert_uint16_rtn(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(char16);
+uint16 __ovld __cnfn convert_uint16(char16);
+uint16 __ovld __cnfn convert_uint16_sat(char16);
+uint16 __ovld __cnfn convert_uint16_rte(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(uchar16);
+uint16 __ovld __cnfn convert_uint16_rtz(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(uchar16);
+uint16 __ovld __cnfn convert_uint16_rtp(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(uchar16);
+uint16 __ovld __cnfn convert_uint16_rtn(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(uchar16);
+uint16 __ovld __cnfn convert_uint16(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat(uchar16);
+uint16 __ovld __cnfn convert_uint16_rte(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(short16);
+uint16 __ovld __cnfn convert_uint16_rtz(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(short16);
+uint16 __ovld __cnfn convert_uint16_rtp(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(short16);
+uint16 __ovld __cnfn convert_uint16_rtn(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(short16);
+uint16 __ovld __cnfn convert_uint16(short16);
+uint16 __ovld __cnfn convert_uint16_sat(short16);
+uint16 __ovld __cnfn convert_uint16_rte(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(ushort16);
+uint16 __ovld __cnfn convert_uint16_rtz(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(ushort16);
+uint16 __ovld __cnfn convert_uint16_rtp(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(ushort16);
+uint16 __ovld __cnfn convert_uint16_rtn(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(ushort16);
+uint16 __ovld __cnfn convert_uint16(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat(ushort16);
+uint16 __ovld __cnfn convert_uint16_rte(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(int16);
+uint16 __ovld __cnfn convert_uint16_rtz(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(int16);
+uint16 __ovld __cnfn convert_uint16_rtp(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(int16);
+uint16 __ovld __cnfn convert_uint16_rtn(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(int16);
+uint16 __ovld __cnfn convert_uint16(int16);
+uint16 __ovld __cnfn convert_uint16_sat(int16);
+uint16 __ovld __cnfn convert_uint16_rte(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(uint16);
+uint16 __ovld __cnfn convert_uint16_rtz(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(uint16);
+uint16 __ovld __cnfn convert_uint16_rtp(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(uint16);
+uint16 __ovld __cnfn convert_uint16_rtn(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(uint16);
+uint16 __ovld __cnfn convert_uint16(uint16);
+uint16 __ovld __cnfn convert_uint16_sat(uint16);
+uint16 __ovld __cnfn convert_uint16_rte(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(long16);
+uint16 __ovld __cnfn convert_uint16_rtz(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(long16);
+uint16 __ovld __cnfn convert_uint16_rtp(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(long16);
+uint16 __ovld __cnfn convert_uint16_rtn(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(long16);
+uint16 __ovld __cnfn convert_uint16(long16);
+uint16 __ovld __cnfn convert_uint16_sat(long16);
+uint16 __ovld __cnfn convert_uint16_rte(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(ulong16);
+uint16 __ovld __cnfn convert_uint16_rtz(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(ulong16);
+uint16 __ovld __cnfn convert_uint16_rtp(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(ulong16);
+uint16 __ovld __cnfn convert_uint16_rtn(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(ulong16);
+uint16 __ovld __cnfn convert_uint16(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat(ulong16);
+uint16 __ovld __cnfn convert_uint16_rte(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(float16);
+uint16 __ovld __cnfn convert_uint16_rtz(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(float16);
+uint16 __ovld __cnfn convert_uint16_rtp(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(float16);
+uint16 __ovld __cnfn convert_uint16_rtn(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(float16);
+uint16 __ovld __cnfn convert_uint16(float16);
+uint16 __ovld __cnfn convert_uint16_sat(float16);
+long16 __ovld __cnfn convert_long16_rte(char16);
+long16 __ovld __cnfn convert_long16_sat_rte(char16);
+long16 __ovld __cnfn convert_long16_rtz(char16);
+long16 __ovld __cnfn convert_long16_sat_rtz(char16);
+long16 __ovld __cnfn convert_long16_rtp(char16);
+long16 __ovld __cnfn convert_long16_sat_rtp(char16);
+long16 __ovld __cnfn convert_long16_rtn(char16);
+long16 __ovld __cnfn convert_long16_sat_rtn(char16);
+long16 __ovld __cnfn convert_long16(char16);
+long16 __ovld __cnfn convert_long16_sat(char16);
+long16 __ovld __cnfn convert_long16_rte(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rte(uchar16);
+long16 __ovld __cnfn convert_long16_rtz(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rtz(uchar16);
+long16 __ovld __cnfn convert_long16_rtp(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rtp(uchar16);
+long16 __ovld __cnfn convert_long16_rtn(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rtn(uchar16);
+long16 __ovld __cnfn convert_long16(uchar16);
+long16 __ovld __cnfn convert_long16_sat(uchar16);
+long16 __ovld __cnfn convert_long16_rte(short16);
+long16 __ovld __cnfn convert_long16_sat_rte(short16);
+long16 __ovld __cnfn convert_long16_rtz(short16);
+long16 __ovld __cnfn convert_long16_sat_rtz(short16);
+long16 __ovld __cnfn convert_long16_rtp(short16);
+long16 __ovld __cnfn convert_long16_sat_rtp(short16);
+long16 __ovld __cnfn convert_long16_rtn(short16);
+long16 __ovld __cnfn convert_long16_sat_rtn(short16);
+long16 __ovld __cnfn convert_long16(short16);
+long16 __ovld __cnfn convert_long16_sat(short16);
+long16 __ovld __cnfn convert_long16_rte(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rte(ushort16);
+long16 __ovld __cnfn convert_long16_rtz(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rtz(ushort16);
+long16 __ovld __cnfn convert_long16_rtp(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rtp(ushort16);
+long16 __ovld __cnfn convert_long16_rtn(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rtn(ushort16);
+long16 __ovld __cnfn convert_long16(ushort16);
+long16 __ovld __cnfn convert_long16_sat(ushort16);
+long16 __ovld __cnfn convert_long16_rte(int16);
+long16 __ovld __cnfn convert_long16_sat_rte(int16);
+long16 __ovld __cnfn convert_long16_rtz(int16);
+long16 __ovld __cnfn convert_long16_sat_rtz(int16);
+long16 __ovld __cnfn convert_long16_rtp(int16);
+long16 __ovld __cnfn convert_long16_sat_rtp(int16);
+long16 __ovld __cnfn convert_long16_rtn(int16);
+long16 __ovld __cnfn convert_long16_sat_rtn(int16);
+long16 __ovld __cnfn convert_long16(int16);
+long16 __ovld __cnfn convert_long16_sat(int16);
+long16 __ovld __cnfn convert_long16_rte(uint16);
+long16 __ovld __cnfn convert_long16_sat_rte(uint16);
+long16 __ovld __cnfn convert_long16_rtz(uint16);
+long16 __ovld __cnfn convert_long16_sat_rtz(uint16);
+long16 __ovld __cnfn convert_long16_rtp(uint16);
+long16 __ovld __cnfn convert_long16_sat_rtp(uint16);
+long16 __ovld __cnfn convert_long16_rtn(uint16);
+long16 __ovld __cnfn convert_long16_sat_rtn(uint16);
+long16 __ovld __cnfn convert_long16(uint16);
+long16 __ovld __cnfn convert_long16_sat(uint16);
+long16 __ovld __cnfn convert_long16_rte(long16);
+long16 __ovld __cnfn convert_long16_sat_rte(long16);
+long16 __ovld __cnfn convert_long16_rtz(long16);
+long16 __ovld __cnfn convert_long16_sat_rtz(long16);
+long16 __ovld __cnfn convert_long16_rtp(long16);
+long16 __ovld __cnfn convert_long16_sat_rtp(long16);
+long16 __ovld __cnfn convert_long16_rtn(long16);
+long16 __ovld __cnfn convert_long16_sat_rtn(long16);
+long16 __ovld __cnfn convert_long16(long16);
+long16 __ovld __cnfn convert_long16_sat(long16);
+long16 __ovld __cnfn convert_long16_rte(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rte(ulong16);
+long16 __ovld __cnfn convert_long16_rtz(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rtz(ulong16);
+long16 __ovld __cnfn convert_long16_rtp(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rtp(ulong16);
+long16 __ovld __cnfn convert_long16_rtn(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rtn(ulong16);
+long16 __ovld __cnfn convert_long16(ulong16);
+long16 __ovld __cnfn convert_long16_sat(ulong16);
+long16 __ovld __cnfn convert_long16_rte(float16);
+long16 __ovld __cnfn convert_long16_sat_rte(float16);
+long16 __ovld __cnfn convert_long16_rtz(float16);
+long16 __ovld __cnfn convert_long16_sat_rtz(float16);
+long16 __ovld __cnfn convert_long16_rtp(float16);
+long16 __ovld __cnfn convert_long16_sat_rtp(float16);
+long16 __ovld __cnfn convert_long16_rtn(float16);
+long16 __ovld __cnfn convert_long16_sat_rtn(float16);
+long16 __ovld __cnfn convert_long16(float16);
+long16 __ovld __cnfn convert_long16_sat(float16);
+ulong16 __ovld __cnfn convert_ulong16_rte(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(char16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(char16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(char16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(char16);
+ulong16 __ovld __cnfn convert_ulong16(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat(char16);
+ulong16 __ovld __cnfn convert_ulong16_rte(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uchar16);
+ulong16 __ovld __cnfn convert_ulong16(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rte(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(short16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(short16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(short16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(short16);
+ulong16 __ovld __cnfn convert_ulong16(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat(short16);
+ulong16 __ovld __cnfn convert_ulong16_rte(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ushort16);
+ulong16 __ovld __cnfn convert_ulong16(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rte(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(int16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(int16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(int16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(int16);
+ulong16 __ovld __cnfn convert_ulong16(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat(int16);
+ulong16 __ovld __cnfn convert_ulong16_rte(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uint16);
+ulong16 __ovld __cnfn convert_ulong16(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rte(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(long16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(long16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(long16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(long16);
+ulong16 __ovld __cnfn convert_ulong16(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat(long16);
+ulong16 __ovld __cnfn convert_ulong16_rte(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ulong16);
+ulong16 __ovld __cnfn convert_ulong16(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rte(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(float16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(float16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(float16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(float16);
+ulong16 __ovld __cnfn convert_ulong16(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat(float16);
+float16 __ovld __cnfn convert_float16_rte(char16);
+float16 __ovld __cnfn convert_float16_rtz(char16);
+float16 __ovld __cnfn convert_float16_rtp(char16);
+float16 __ovld __cnfn convert_float16_rtn(char16);
+float16 __ovld __cnfn convert_float16(char16);
+float16 __ovld __cnfn convert_float16_rte(uchar16);
+float16 __ovld __cnfn convert_float16_rtz(uchar16);
+float16 __ovld __cnfn convert_float16_rtp(uchar16);
+float16 __ovld __cnfn convert_float16_rtn(uchar16);
+float16 __ovld __cnfn convert_float16(uchar16);
+float16 __ovld __cnfn convert_float16_rte(short16);
+float16 __ovld __cnfn convert_float16_rtz(short16);
+float16 __ovld __cnfn convert_float16_rtp(short16);
+float16 __ovld __cnfn convert_float16_rtn(short16);
+float16 __ovld __cnfn convert_float16(short16);
+float16 __ovld __cnfn convert_float16_rte(ushort16);
+float16 __ovld __cnfn convert_float16_rtz(ushort16);
+float16 __ovld __cnfn convert_float16_rtp(ushort16);
+float16 __ovld __cnfn convert_float16_rtn(ushort16);
+float16 __ovld __cnfn convert_float16(ushort16);
+float16 __ovld __cnfn convert_float16_rte(int16);
+float16 __ovld __cnfn convert_float16_rtz(int16);
+float16 __ovld __cnfn convert_float16_rtp(int16);
+float16 __ovld __cnfn convert_float16_rtn(int16);
+float16 __ovld __cnfn convert_float16(int16);
+float16 __ovld __cnfn convert_float16_rte(uint16);
+float16 __ovld __cnfn convert_float16_rtz(uint16);
+float16 __ovld __cnfn convert_float16_rtp(uint16);
+float16 __ovld __cnfn convert_float16_rtn(uint16);
+float16 __ovld __cnfn convert_float16(uint16);
+float16 __ovld __cnfn convert_float16_rte(long16);
+float16 __ovld __cnfn convert_float16_rtz(long16);
+float16 __ovld __cnfn convert_float16_rtp(long16);
+float16 __ovld __cnfn convert_float16_rtn(long16);
+float16 __ovld __cnfn convert_float16(long16);
+float16 __ovld __cnfn convert_float16_rte(ulong16);
+float16 __ovld __cnfn convert_float16_rtz(ulong16);
+float16 __ovld __cnfn convert_float16_rtp(ulong16);
+float16 __ovld __cnfn convert_float16_rtn(ulong16);
+float16 __ovld __cnfn convert_float16(ulong16);
+float16 __ovld __cnfn convert_float16_rte(float16);
+float16 __ovld __cnfn convert_float16_rtz(float16);
+float16 __ovld __cnfn convert_float16_rtp(float16);
+float16 __ovld __cnfn convert_float16_rtn(float16);
+float16 __ovld __cnfn convert_float16(float16);
+
+// Conversions with double data type parameters or return value.
+
+#ifdef cl_khr_fp64
+#pragma OPENCL EXTENSION cl_khr_fp64 : enable
+char __ovld __cnfn convert_char(double);
+char __ovld __cnfn convert_char_rte(double);
+char __ovld __cnfn convert_char_rtn(double);
+char __ovld __cnfn convert_char_rtp(double);
+char __ovld __cnfn convert_char_rtz(double);
+char __ovld __cnfn convert_char_sat(double);
+char __ovld __cnfn convert_char_sat_rte(double);
+char __ovld __cnfn convert_char_sat_rtn(double);
+char __ovld __cnfn convert_char_sat_rtp(double);
+char __ovld __cnfn convert_char_sat_rtz(double);
+char2 __ovld __cnfn convert_char2(double2);
+char2 __ovld __cnfn convert_char2_rte(double2);
+char2 __ovld __cnfn convert_char2_rtn(double2);
+char2 __ovld __cnfn convert_char2_rtp(double2);
+char2 __ovld __cnfn convert_char2_rtz(double2);
+char2 __ovld __cnfn convert_char2_sat(double2);
+char2 __ovld __cnfn convert_char2_sat_rte(double2);
+char2 __ovld __cnfn convert_char2_sat_rtn(double2);
+char2 __ovld __cnfn convert_char2_sat_rtp(double2);
+char2 __ovld __cnfn convert_char2_sat_rtz(double2);
+char3 __ovld __cnfn convert_char3(double3);
+char3 __ovld __cnfn convert_char3_rte(double3);
+char3 __ovld __cnfn convert_char3_rtn(double3);
+char3 __ovld __cnfn convert_char3_rtp(double3);
+char3 __ovld __cnfn convert_char3_rtz(double3);
+char3 __ovld __cnfn convert_char3_sat(double3);
+char3 __ovld __cnfn convert_char3_sat_rte(double3);
+char3 __ovld __cnfn convert_char3_sat_rtn(double3);
+char3 __ovld __cnfn convert_char3_sat_rtp(double3);
+char3 __ovld __cnfn convert_char3_sat_rtz(double3);
+char4 __ovld __cnfn convert_char4(double4);
+char4 __ovld __cnfn convert_char4_rte(double4);
+char4 __ovld __cnfn convert_char4_rtn(double4);
+char4 __ovld __cnfn convert_char4_rtp(double4);
+char4 __ovld __cnfn convert_char4_rtz(double4);
+char4 __ovld __cnfn convert_char4_sat(double4);
+char4 __ovld __cnfn convert_char4_sat_rte(double4);
+char4 __ovld __cnfn convert_char4_sat_rtn(double4);
+char4 __ovld __cnfn convert_char4_sat_rtp(double4);
+char4 __ovld __cnfn convert_char4_sat_rtz(double4);
+char8 __ovld __cnfn convert_char8(double8);
+char8 __ovld __cnfn convert_char8_rte(double8);
+char8 __ovld __cnfn convert_char8_rtn(double8);
+char8 __ovld __cnfn convert_char8_rtp(double8);
+char8 __ovld __cnfn convert_char8_rtz(double8);
+char8 __ovld __cnfn convert_char8_sat(double8);
+char8 __ovld __cnfn convert_char8_sat_rte(double8);
+char8 __ovld __cnfn convert_char8_sat_rtn(double8);
+char8 __ovld __cnfn convert_char8_sat_rtp(double8);
+char8 __ovld __cnfn convert_char8_sat_rtz(double8);
+char16 __ovld __cnfn convert_char16(double16);
+char16 __ovld __cnfn convert_char16_rte(double16);
+char16 __ovld __cnfn convert_char16_rtn(double16);
+char16 __ovld __cnfn convert_char16_rtp(double16);
+char16 __ovld __cnfn convert_char16_rtz(double16);
+char16 __ovld __cnfn convert_char16_sat(double16);
+char16 __ovld __cnfn convert_char16_sat_rte(double16);
+char16 __ovld __cnfn convert_char16_sat_rtn(double16);
+char16 __ovld __cnfn convert_char16_sat_rtp(double16);
+char16 __ovld __cnfn convert_char16_sat_rtz(double16);
+
+uchar __ovld __cnfn convert_uchar(double);
+uchar __ovld __cnfn convert_uchar_rte(double);
+uchar __ovld __cnfn convert_uchar_rtn(double);
+uchar __ovld __cnfn convert_uchar_rtp(double);
+uchar __ovld __cnfn convert_uchar_rtz(double);
+uchar __ovld __cnfn convert_uchar_sat(double);
+uchar __ovld __cnfn convert_uchar_sat_rte(double);
+uchar __ovld __cnfn convert_uchar_sat_rtn(double);
+uchar __ovld __cnfn convert_uchar_sat_rtp(double);
+uchar __ovld __cnfn convert_uchar_sat_rtz(double);
+uchar2 __ovld __cnfn convert_uchar2(double2);
+uchar2 __ovld __cnfn convert_uchar2_rte(double2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(double2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(double2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(double2);
+uchar3 __ovld __cnfn convert_uchar3(double3);
+uchar3 __ovld __cnfn convert_uchar3_rte(double3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(double3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(double3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(double3);
+uchar4 __ovld __cnfn convert_uchar4(double4);
+uchar4 __ovld __cnfn convert_uchar4_rte(double4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(double4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(double4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(double4);
+uchar8 __ovld __cnfn convert_uchar8(double8);
+uchar8 __ovld __cnfn convert_uchar8_rte(double8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(double8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(double8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(double8);
+uchar16 __ovld __cnfn convert_uchar16(double16);
+uchar16 __ovld __cnfn convert_uchar16_rte(double16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(double16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(double16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(double16);
+
+short __ovld __cnfn convert_short(double);
+short __ovld __cnfn convert_short_rte(double);
+short __ovld __cnfn convert_short_rtn(double);
+short __ovld __cnfn convert_short_rtp(double);
+short __ovld __cnfn convert_short_rtz(double);
+short __ovld __cnfn convert_short_sat(double);
+short __ovld __cnfn convert_short_sat_rte(double);
+short __ovld __cnfn convert_short_sat_rtn(double);
+short __ovld __cnfn convert_short_sat_rtp(double);
+short __ovld __cnfn convert_short_sat_rtz(double);
+short2 __ovld __cnfn convert_short2(double2);
+short2 __ovld __cnfn convert_short2_rte(double2);
+short2 __ovld __cnfn convert_short2_rtn(double2);
+short2 __ovld __cnfn convert_short2_rtp(double2);
+short2 __ovld __cnfn convert_short2_rtz(double2);
+short2 __ovld __cnfn convert_short2_sat(double2);
+short2 __ovld __cnfn convert_short2_sat_rte(double2);
+short2 __ovld __cnfn convert_short2_sat_rtn(double2);
+short2 __ovld __cnfn convert_short2_sat_rtp(double2);
+short2 __ovld __cnfn convert_short2_sat_rtz(double2);
+short3 __ovld __cnfn convert_short3(double3);
+short3 __ovld __cnfn convert_short3_rte(double3);
+short3 __ovld __cnfn convert_short3_rtn(double3);
+short3 __ovld __cnfn convert_short3_rtp(double3);
+short3 __ovld __cnfn convert_short3_rtz(double3);
+short3 __ovld __cnfn convert_short3_sat(double3);
+short3 __ovld __cnfn convert_short3_sat_rte(double3);
+short3 __ovld __cnfn convert_short3_sat_rtn(double3);
+short3 __ovld __cnfn convert_short3_sat_rtp(double3);
+short3 __ovld __cnfn convert_short3_sat_rtz(double3);
+short4 __ovld __cnfn convert_short4(double4);
+short4 __ovld __cnfn convert_short4_rte(double4);
+short4 __ovld __cnfn convert_short4_rtn(double4);
+short4 __ovld __cnfn convert_short4_rtp(double4);
+short4 __ovld __cnfn convert_short4_rtz(double4);
+short4 __ovld __cnfn convert_short4_sat(double4);
+short4 __ovld __cnfn convert_short4_sat_rte(double4);
+short4 __ovld __cnfn convert_short4_sat_rtn(double4);
+short4 __ovld __cnfn convert_short4_sat_rtp(double4);
+short4 __ovld __cnfn convert_short4_sat_rtz(double4);
+short8 __ovld __cnfn convert_short8(double8);
+short8 __ovld __cnfn convert_short8_rte(double8);
+short8 __ovld __cnfn convert_short8_rtn(double8);
+short8 __ovld __cnfn convert_short8_rtp(double8);
+short8 __ovld __cnfn convert_short8_rtz(double8);
+short8 __ovld __cnfn convert_short8_sat(double8);
+short8 __ovld __cnfn convert_short8_sat_rte(double8);
+short8 __ovld __cnfn convert_short8_sat_rtn(double8);
+short8 __ovld __cnfn convert_short8_sat_rtp(double8);
+short8 __ovld __cnfn convert_short8_sat_rtz(double8);
+short16 __ovld __cnfn convert_short16(double16);
+short16 __ovld __cnfn convert_short16_rte(double16);
+short16 __ovld __cnfn convert_short16_rtn(double16);
+short16 __ovld __cnfn convert_short16_rtp(double16);
+short16 __ovld __cnfn convert_short16_rtz(double16);
+short16 __ovld __cnfn convert_short16_sat(double16);
+short16 __ovld __cnfn convert_short16_sat_rte(double16);
+short16 __ovld __cnfn convert_short16_sat_rtn(double16);
+short16 __ovld __cnfn convert_short16_sat_rtp(double16);
+short16 __ovld __cnfn convert_short16_sat_rtz(double16);
+
+ushort __ovld __cnfn convert_ushort(double);
+ushort __ovld __cnfn convert_ushort_rte(double);
+ushort __ovld __cnfn convert_ushort_rtn(double);
+ushort __ovld __cnfn convert_ushort_rtp(double);
+ushort __ovld __cnfn convert_ushort_rtz(double);
+ushort __ovld __cnfn convert_ushort_sat(double);
+ushort __ovld __cnfn convert_ushort_sat_rte(double);
+ushort __ovld __cnfn convert_ushort_sat_rtn(double);
+ushort __ovld __cnfn convert_ushort_sat_rtp(double);
+ushort __ovld __cnfn convert_ushort_sat_rtz(double);
+ushort2 __ovld __cnfn convert_ushort2(double2);
+ushort2 __ovld __cnfn convert_ushort2_rte(double2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(double2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(double2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(double2);
+ushort3 __ovld __cnfn convert_ushort3(double3);
+ushort3 __ovld __cnfn convert_ushort3_rte(double3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(double3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(double3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(double3);
+ushort4 __ovld __cnfn convert_ushort4(double4);
+ushort4 __ovld __cnfn convert_ushort4_rte(double4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(double4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(double4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(double4);
+ushort8 __ovld __cnfn convert_ushort8(double8);
+ushort8 __ovld __cnfn convert_ushort8_rte(double8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(double8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(double8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(double8);
+ushort16 __ovld __cnfn convert_ushort16(double16);
+ushort16 __ovld __cnfn convert_ushort16_rte(double16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(double16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(double16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(double16);
+
+int __ovld __cnfn convert_int(double);
+int __ovld __cnfn convert_int_rte(double);
+int __ovld __cnfn convert_int_rtn(double);
+int __ovld __cnfn convert_int_rtp(double);
+int __ovld __cnfn convert_int_rtz(double);
+int __ovld __cnfn convert_int_sat(double);
+int __ovld __cnfn convert_int_sat_rte(double);
+int __ovld __cnfn convert_int_sat_rtn(double);
+int __ovld __cnfn convert_int_sat_rtp(double);
+int __ovld __cnfn convert_int_sat_rtz(double);
+int2 __ovld __cnfn convert_int2(double2);
+int2 __ovld __cnfn convert_int2_rte(double2);
+int2 __ovld __cnfn convert_int2_rtn(double2);
+int2 __ovld __cnfn convert_int2_rtp(double2);
+int2 __ovld __cnfn convert_int2_rtz(double2);
+int2 __ovld __cnfn convert_int2_sat(double2);
+int2 __ovld __cnfn convert_int2_sat_rte(double2);
+int2 __ovld __cnfn convert_int2_sat_rtn(double2);
+int2 __ovld __cnfn convert_int2_sat_rtp(double2);
+int2 __ovld __cnfn convert_int2_sat_rtz(double2);
+int3 __ovld __cnfn convert_int3(double3);
+int3 __ovld __cnfn convert_int3_rte(double3);
+int3 __ovld __cnfn convert_int3_rtn(double3);
+int3 __ovld __cnfn convert_int3_rtp(double3);
+int3 __ovld __cnfn convert_int3_rtz(double3);
+int3 __ovld __cnfn convert_int3_sat(double3);
+int3 __ovld __cnfn convert_int3_sat_rte(double3);
+int3 __ovld __cnfn convert_int3_sat_rtn(double3);
+int3 __ovld __cnfn convert_int3_sat_rtp(double3);
+int3 __ovld __cnfn convert_int3_sat_rtz(double3);
+int4 __ovld __cnfn convert_int4(double4);
+int4 __ovld __cnfn convert_int4_rte(double4);
+int4 __ovld __cnfn convert_int4_rtn(double4);
+int4 __ovld __cnfn convert_int4_rtp(double4);
+int4 __ovld __cnfn convert_int4_rtz(double4);
+int4 __ovld __cnfn convert_int4_sat(double4);
+int4 __ovld __cnfn convert_int4_sat_rte(double4);
+int4 __ovld __cnfn convert_int4_sat_rtn(double4);
+int4 __ovld __cnfn convert_int4_sat_rtp(double4);
+int4 __ovld __cnfn convert_int4_sat_rtz(double4);
+int8 __ovld __cnfn convert_int8(double8);
+int8 __ovld __cnfn convert_int8_rte(double8);
+int8 __ovld __cnfn convert_int8_rtn(double8);
+int8 __ovld __cnfn convert_int8_rtp(double8);
+int8 __ovld __cnfn convert_int8_rtz(double8);
+int8 __ovld __cnfn convert_int8_sat(double8);
+int8 __ovld __cnfn convert_int8_sat_rte(double8);
+int8 __ovld __cnfn convert_int8_sat_rtn(double8);
+int8 __ovld __cnfn convert_int8_sat_rtp(double8);
+int8 __ovld __cnfn convert_int8_sat_rtz(double8);
+int16 __ovld __cnfn convert_int16(double16);
+int16 __ovld __cnfn convert_int16_rte(double16);
+int16 __ovld __cnfn convert_int16_rtn(double16);
+int16 __ovld __cnfn convert_int16_rtp(double16);
+int16 __ovld __cnfn convert_int16_rtz(double16);
+int16 __ovld __cnfn convert_int16_sat(double16);
+int16 __ovld __cnfn convert_int16_sat_rte(double16);
+int16 __ovld __cnfn convert_int16_sat_rtn(double16);
+int16 __ovld __cnfn convert_int16_sat_rtp(double16);
+int16 __ovld __cnfn convert_int16_sat_rtz(double16);
+
+uint __ovld __cnfn convert_uint(double);
+uint __ovld __cnfn convert_uint_rte(double);
+uint __ovld __cnfn convert_uint_rtn(double);
+uint __ovld __cnfn convert_uint_rtp(double);
+uint __ovld __cnfn convert_uint_rtz(double);
+uint __ovld __cnfn convert_uint_sat(double);
+uint __ovld __cnfn convert_uint_sat_rte(double);
+uint __ovld __cnfn convert_uint_sat_rtn(double);
+uint __ovld __cnfn convert_uint_sat_rtp(double);
+uint __ovld __cnfn convert_uint_sat_rtz(double);
+uint2 __ovld __cnfn convert_uint2(double2);
+uint2 __ovld __cnfn convert_uint2_rte(double2);
+uint2 __ovld __cnfn convert_uint2_rtn(double2);
+uint2 __ovld __cnfn convert_uint2_rtp(double2);
+uint2 __ovld __cnfn convert_uint2_rtz(double2);
+uint2 __ovld __cnfn convert_uint2_sat(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(double2);
+uint3 __ovld __cnfn convert_uint3(double3);
+uint3 __ovld __cnfn convert_uint3_rte(double3);
+uint3 __ovld __cnfn convert_uint3_rtn(double3);
+uint3 __ovld __cnfn convert_uint3_rtp(double3);
+uint3 __ovld __cnfn convert_uint3_rtz(double3);
+uint3 __ovld __cnfn convert_uint3_sat(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(double3);
+uint4 __ovld __cnfn convert_uint4(double4);
+uint4 __ovld __cnfn convert_uint4_rte(double4);
+uint4 __ovld __cnfn convert_uint4_rtn(double4);
+uint4 __ovld __cnfn convert_uint4_rtp(double4);
+uint4 __ovld __cnfn convert_uint4_rtz(double4);
+uint4 __ovld __cnfn convert_uint4_sat(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(double4);
+uint8 __ovld __cnfn convert_uint8(double8);
+uint8 __ovld __cnfn convert_uint8_rte(double8);
+uint8 __ovld __cnfn convert_uint8_rtn(double8);
+uint8 __ovld __cnfn convert_uint8_rtp(double8);
+uint8 __ovld __cnfn convert_uint8_rtz(double8);
+uint8 __ovld __cnfn convert_uint8_sat(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(double8);
+uint16 __ovld __cnfn convert_uint16(double16);
+uint16 __ovld __cnfn convert_uint16_rte(double16);
+uint16 __ovld __cnfn convert_uint16_rtn(double16);
+uint16 __ovld __cnfn convert_uint16_rtp(double16);
+uint16 __ovld __cnfn convert_uint16_rtz(double16);
+uint16 __ovld __cnfn convert_uint16_sat(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(double16);
+
+long __ovld __cnfn convert_long(double);
+long __ovld __cnfn convert_long_rte(double);
+long __ovld __cnfn convert_long_rtn(double);
+long __ovld __cnfn convert_long_rtp(double);
+long __ovld __cnfn convert_long_rtz(double);
+long __ovld __cnfn convert_long_sat(double);
+long __ovld __cnfn convert_long_sat_rte(double);
+long __ovld __cnfn convert_long_sat_rtn(double);
+long __ovld __cnfn convert_long_sat_rtp(double);
+long __ovld __cnfn convert_long_sat_rtz(double);
+long2 __ovld __cnfn convert_long2(double2);
+long2 __ovld __cnfn convert_long2_rte(double2);
+long2 __ovld __cnfn convert_long2_rtn(double2);
+long2 __ovld __cnfn convert_long2_rtp(double2);
+long2 __ovld __cnfn convert_long2_rtz(double2);
+long2 __ovld __cnfn convert_long2_sat(double2);
+long2 __ovld __cnfn convert_long2_sat_rte(double2);
+long2 __ovld __cnfn convert_long2_sat_rtn(double2);
+long2 __ovld __cnfn convert_long2_sat_rtp(double2);
+long2 __ovld __cnfn convert_long2_sat_rtz(double2);
+long3 __ovld __cnfn convert_long3(double3);
+long3 __ovld __cnfn convert_long3_rte(double3);
+long3 __ovld __cnfn convert_long3_rtn(double3);
+long3 __ovld __cnfn convert_long3_rtp(double3);
+long3 __ovld __cnfn convert_long3_rtz(double3);
+long3 __ovld __cnfn convert_long3_sat(double3);
+long3 __ovld __cnfn convert_long3_sat_rte(double3);
+long3 __ovld __cnfn convert_long3_sat_rtn(double3);
+long3 __ovld __cnfn convert_long3_sat_rtp(double3);
+long3 __ovld __cnfn convert_long3_sat_rtz(double3);
+long4 __ovld __cnfn convert_long4(double4);
+long4 __ovld __cnfn convert_long4_rte(double4);
+long4 __ovld __cnfn convert_long4_rtn(double4);
+long4 __ovld __cnfn convert_long4_rtp(double4);
+long4 __ovld __cnfn convert_long4_rtz(double4);
+long4 __ovld __cnfn convert_long4_sat(double4);
+long4 __ovld __cnfn convert_long4_sat_rte(double4);
+long4 __ovld __cnfn convert_long4_sat_rtn(double4);
+long4 __ovld __cnfn convert_long4_sat_rtp(double4);
+long4 __ovld __cnfn convert_long4_sat_rtz(double4);
+long8 __ovld __cnfn convert_long8(double8);
+long8 __ovld __cnfn convert_long8_rte(double8);
+long8 __ovld __cnfn convert_long8_rtn(double8);
+long8 __ovld __cnfn convert_long8_rtp(double8);
+long8 __ovld __cnfn convert_long8_rtz(double8);
+long8 __ovld __cnfn convert_long8_sat(double8);
+long8 __ovld __cnfn convert_long8_sat_rte(double8);
+long8 __ovld __cnfn convert_long8_sat_rtn(double8);
+long8 __ovld __cnfn convert_long8_sat_rtp(double8);
+long8 __ovld __cnfn convert_long8_sat_rtz(double8);
+long16 __ovld __cnfn convert_long16(double16);
+long16 __ovld __cnfn convert_long16_rte(double16);
+long16 __ovld __cnfn convert_long16_rtn(double16);
+long16 __ovld __cnfn convert_long16_rtp(double16);
+long16 __ovld __cnfn convert_long16_rtz(double16);
+long16 __ovld __cnfn convert_long16_sat(double16);
+long16 __ovld __cnfn convert_long16_sat_rte(double16);
+long16 __ovld __cnfn convert_long16_sat_rtn(double16);
+long16 __ovld __cnfn convert_long16_sat_rtp(double16);
+long16 __ovld __cnfn convert_long16_sat_rtz(double16);
+
+ulong __ovld __cnfn convert_ulong(double);
+ulong __ovld __cnfn convert_ulong_rte(double);
+ulong __ovld __cnfn convert_ulong_rtn(double);
+ulong __ovld __cnfn convert_ulong_rtp(double);
+ulong __ovld __cnfn convert_ulong_rtz(double);
+ulong __ovld __cnfn convert_ulong_sat(double);
+ulong __ovld __cnfn convert_ulong_sat_rte(double);
+ulong __ovld __cnfn convert_ulong_sat_rtn(double);
+ulong __ovld __cnfn convert_ulong_sat_rtp(double);
+ulong __ovld __cnfn convert_ulong_sat_rtz(double);
+ulong2 __ovld __cnfn convert_ulong2(double2);
+ulong2 __ovld __cnfn convert_ulong2_rte(double2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(double2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(double2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(double2);
+ulong3 __ovld __cnfn convert_ulong3(double3);
+ulong3 __ovld __cnfn convert_ulong3_rte(double3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(double3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(double3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(double3);
+ulong4 __ovld __cnfn convert_ulong4(double4);
+ulong4 __ovld __cnfn convert_ulong4_rte(double4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(double4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(double4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(double4);
+ulong8 __ovld __cnfn convert_ulong8(double8);
+ulong8 __ovld __cnfn convert_ulong8_rte(double8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(double8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(double8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(double8);
+ulong16 __ovld __cnfn convert_ulong16(double16);
+ulong16 __ovld __cnfn convert_ulong16_rte(double16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(double16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(double16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(double16);
+
+float __ovld __cnfn convert_float(double);
+float __ovld __cnfn convert_float_rte(double);
+float __ovld __cnfn convert_float_rtn(double);
+float __ovld __cnfn convert_float_rtp(double);
+float __ovld __cnfn convert_float_rtz(double);
+float2 __ovld __cnfn convert_float2(double2);
+float2 __ovld __cnfn convert_float2_rte(double2);
+float2 __ovld __cnfn convert_float2_rtn(double2);
+float2 __ovld __cnfn convert_float2_rtp(double2);
+float2 __ovld __cnfn convert_float2_rtz(double2);
+float3 __ovld __cnfn convert_float3(double3);
+float3 __ovld __cnfn convert_float3_rte(double3);
+float3 __ovld __cnfn convert_float3_rtn(double3);
+float3 __ovld __cnfn convert_float3_rtp(double3);
+float3 __ovld __cnfn convert_float3_rtz(double3);
+float4 __ovld __cnfn convert_float4(double4);
+float4 __ovld __cnfn convert_float4_rte(double4);
+float4 __ovld __cnfn convert_float4_rtn(double4);
+float4 __ovld __cnfn convert_float4_rtp(double4);
+float4 __ovld __cnfn convert_float4_rtz(double4);
+float8 __ovld __cnfn convert_float8(double8);
+float8 __ovld __cnfn convert_float8_rte(double8);
+float8 __ovld __cnfn convert_float8_rtn(double8);
+float8 __ovld __cnfn convert_float8_rtp(double8);
+float8 __ovld __cnfn convert_float8_rtz(double8);
+float16 __ovld __cnfn convert_float16(double16);
+float16 __ovld __cnfn convert_float16_rte(double16);
+float16 __ovld __cnfn convert_float16_rtn(double16);
+float16 __ovld __cnfn convert_float16_rtp(double16);
+float16 __ovld __cnfn convert_float16_rtz(double16);
+
+double __ovld __cnfn convert_double(char);
+double __ovld __cnfn convert_double(double);
+double __ovld __cnfn convert_double(float);
+double __ovld __cnfn convert_double(int);
+double __ovld __cnfn convert_double(long);
+double __ovld __cnfn convert_double(short);
+double __ovld __cnfn convert_double(uchar);
+double __ovld __cnfn convert_double(uint);
+double __ovld __cnfn convert_double(ulong);
+double __ovld __cnfn convert_double(ushort);
+double __ovld __cnfn convert_double_rte(char);
+double __ovld __cnfn convert_double_rte(double);
+double __ovld __cnfn convert_double_rte(float);
+double __ovld __cnfn convert_double_rte(int);
+double __ovld __cnfn convert_double_rte(long);
+double __ovld __cnfn convert_double_rte(short);
+double __ovld __cnfn convert_double_rte(uchar);
+double __ovld __cnfn convert_double_rte(uint);
+double __ovld __cnfn convert_double_rte(ulong);
+double __ovld __cnfn convert_double_rte(ushort);
+double __ovld __cnfn convert_double_rtn(char);
+double __ovld __cnfn convert_double_rtn(double);
+double __ovld __cnfn convert_double_rtn(float);
+double __ovld __cnfn convert_double_rtn(int);
+double __ovld __cnfn convert_double_rtn(long);
+double __ovld __cnfn convert_double_rtn(short);
+double __ovld __cnfn convert_double_rtn(uchar);
+double __ovld __cnfn convert_double_rtn(uint);
+double __ovld __cnfn convert_double_rtn(ulong);
+double __ovld __cnfn convert_double_rtn(ushort);
+double __ovld __cnfn convert_double_rtp(char);
+double __ovld __cnfn convert_double_rtp(double);
+double __ovld __cnfn convert_double_rtp(float);
+double __ovld __cnfn convert_double_rtp(int);
+double __ovld __cnfn convert_double_rtp(long);
+double __ovld __cnfn convert_double_rtp(short);
+double __ovld __cnfn convert_double_rtp(uchar);
+double __ovld __cnfn convert_double_rtp(uint);
+double __ovld __cnfn convert_double_rtp(ulong);
+double __ovld __cnfn convert_double_rtp(ushort);
+double __ovld __cnfn convert_double_rtz(char);
+double __ovld __cnfn convert_double_rtz(double);
+double __ovld __cnfn convert_double_rtz(float);
+double __ovld __cnfn convert_double_rtz(int);
+double __ovld __cnfn convert_double_rtz(long);
+double __ovld __cnfn convert_double_rtz(short);
+double __ovld __cnfn convert_double_rtz(uchar);
+double __ovld __cnfn convert_double_rtz(uint);
+double __ovld __cnfn convert_double_rtz(ulong);
+double __ovld __cnfn convert_double_rtz(ushort);
+double2 __ovld __cnfn convert_double2(char2);
+double2 __ovld __cnfn convert_double2(double2);
+double2 __ovld __cnfn convert_double2(float2);
+double2 __ovld __cnfn convert_double2(int2);
+double2 __ovld __cnfn convert_double2(long2);
+double2 __ovld __cnfn convert_double2(short2);
+double2 __ovld __cnfn convert_double2(uchar2);
+double2 __ovld __cnfn convert_double2(uint2);
+double2 __ovld __cnfn convert_double2(ulong2);
+double2 __ovld __cnfn convert_double2(ushort2);
+double2 __ovld __cnfn convert_double2_rte(char2);
+double2 __ovld __cnfn convert_double2_rte(double2);
+double2 __ovld __cnfn convert_double2_rte(float2);
+double2 __ovld __cnfn convert_double2_rte(int2);
+double2 __ovld __cnfn convert_double2_rte(long2);
+double2 __ovld __cnfn convert_double2_rte(short2);
+double2 __ovld __cnfn convert_double2_rte(uchar2);
+double2 __ovld __cnfn convert_double2_rte(uint2);
+double2 __ovld __cnfn convert_double2_rte(ulong2);
+double2 __ovld __cnfn convert_double2_rte(ushort2);
+double2 __ovld __cnfn convert_double2_rtn(char2);
+double2 __ovld __cnfn convert_double2_rtn(double2);
+double2 __ovld __cnfn convert_double2_rtn(float2);
+double2 __ovld __cnfn convert_double2_rtn(int2);
+double2 __ovld __cnfn convert_double2_rtn(long2);
+double2 __ovld __cnfn convert_double2_rtn(short2);
+double2 __ovld __cnfn convert_double2_rtn(uchar2);
+double2 __ovld __cnfn convert_double2_rtn(uint2);
+double2 __ovld __cnfn convert_double2_rtn(ulong2);
+double2 __ovld __cnfn convert_double2_rtn(ushort2);
+double2 __ovld __cnfn convert_double2_rtp(char2);
+double2 __ovld __cnfn convert_double2_rtp(double2);
+double2 __ovld __cnfn convert_double2_rtp(float2);
+double2 __ovld __cnfn convert_double2_rtp(int2);
+double2 __ovld __cnfn convert_double2_rtp(long2);
+double2 __ovld __cnfn convert_double2_rtp(short2);
+double2 __ovld __cnfn convert_double2_rtp(uchar2);
+double2 __ovld __cnfn convert_double2_rtp(uint2);
+double2 __ovld __cnfn convert_double2_rtp(ulong2);
+double2 __ovld __cnfn convert_double2_rtp(ushort2);
+double2 __ovld __cnfn convert_double2_rtz(char2);
+double2 __ovld __cnfn convert_double2_rtz(double2);
+double2 __ovld __cnfn convert_double2_rtz(float2);
+double2 __ovld __cnfn convert_double2_rtz(int2);
+double2 __ovld __cnfn convert_double2_rtz(long2);
+double2 __ovld __cnfn convert_double2_rtz(short2);
+double2 __ovld __cnfn convert_double2_rtz(uchar2);
+double2 __ovld __cnfn convert_double2_rtz(uint2);
+double2 __ovld __cnfn convert_double2_rtz(ulong2);
+double2 __ovld __cnfn convert_double2_rtz(ushort2);
+double3 __ovld __cnfn convert_double3(char3);
+double3 __ovld __cnfn convert_double3(double3);
+double3 __ovld __cnfn convert_double3(float3);
+double3 __ovld __cnfn convert_double3(int3);
+double3 __ovld __cnfn convert_double3(long3);
+double3 __ovld __cnfn convert_double3(short3);
+double3 __ovld __cnfn convert_double3(uchar3);
+double3 __ovld __cnfn convert_double3(uint3);
+double3 __ovld __cnfn convert_double3(ulong3);
+double3 __ovld __cnfn convert_double3(ushort3);
+double3 __ovld __cnfn convert_double3_rte(char3);
+double3 __ovld __cnfn convert_double3_rte(double3);
+double3 __ovld __cnfn convert_double3_rte(float3);
+double3 __ovld __cnfn convert_double3_rte(int3);
+double3 __ovld __cnfn convert_double3_rte(long3);
+double3 __ovld __cnfn convert_double3_rte(short3);
+double3 __ovld __cnfn convert_double3_rte(uchar3);
+double3 __ovld __cnfn convert_double3_rte(uint3);
+double3 __ovld __cnfn convert_double3_rte(ulong3);
+double3 __ovld __cnfn convert_double3_rte(ushort3);
+double3 __ovld __cnfn convert_double3_rtn(char3);
+double3 __ovld __cnfn convert_double3_rtn(double3);
+double3 __ovld __cnfn convert_double3_rtn(float3);
+double3 __ovld __cnfn convert_double3_rtn(int3);
+double3 __ovld __cnfn convert_double3_rtn(long3);
+double3 __ovld __cnfn convert_double3_rtn(short3);
+double3 __ovld __cnfn convert_double3_rtn(uchar3);
+double3 __ovld __cnfn convert_double3_rtn(uint3);
+double3 __ovld __cnfn convert_double3_rtn(ulong3);
+double3 __ovld __cnfn convert_double3_rtn(ushort3);
+double3 __ovld __cnfn convert_double3_rtp(char3);
+double3 __ovld __cnfn convert_double3_rtp(double3);
+double3 __ovld __cnfn convert_double3_rtp(float3);
+double3 __ovld __cnfn convert_double3_rtp(int3);
+double3 __ovld __cnfn convert_double3_rtp(long3);
+double3 __ovld __cnfn convert_double3_rtp(short3);
+double3 __ovld __cnfn convert_double3_rtp(uchar3);
+double3 __ovld __cnfn convert_double3_rtp(uint3);
+double3 __ovld __cnfn convert_double3_rtp(ulong3);
+double3 __ovld __cnfn convert_double3_rtp(ushort3);
+double3 __ovld __cnfn convert_double3_rtz(char3);
+double3 __ovld __cnfn convert_double3_rtz(double3);
+double3 __ovld __cnfn convert_double3_rtz(float3);
+double3 __ovld __cnfn convert_double3_rtz(int3);
+double3 __ovld __cnfn convert_double3_rtz(long3);
+double3 __ovld __cnfn convert_double3_rtz(short3);
+double3 __ovld __cnfn convert_double3_rtz(uchar3);
+double3 __ovld __cnfn convert_double3_rtz(uint3);
+double3 __ovld __cnfn convert_double3_rtz(ulong3);
+double3 __ovld __cnfn convert_double3_rtz(ushort3);
+double4 __ovld __cnfn convert_double4(char4);
+double4 __ovld __cnfn convert_double4(double4);
+double4 __ovld __cnfn convert_double4(float4);
+double4 __ovld __cnfn convert_double4(int4);
+double4 __ovld __cnfn convert_double4(long4);
+double4 __ovld __cnfn convert_double4(short4);
+double4 __ovld __cnfn convert_double4(uchar4);
+double4 __ovld __cnfn convert_double4(uint4);
+double4 __ovld __cnfn convert_double4(ulong4);
+double4 __ovld __cnfn convert_double4(ushort4);
+double4 __ovld __cnfn convert_double4_rte(char4);
+double4 __ovld __cnfn convert_double4_rte(double4);
+double4 __ovld __cnfn convert_double4_rte(float4);
+double4 __ovld __cnfn convert_double4_rte(int4);
+double4 __ovld __cnfn convert_double4_rte(long4);
+double4 __ovld __cnfn convert_double4_rte(short4);
+double4 __ovld __cnfn convert_double4_rte(uchar4);
+double4 __ovld __cnfn convert_double4_rte(uint4);
+double4 __ovld __cnfn convert_double4_rte(ulong4);
+double4 __ovld __cnfn convert_double4_rte(ushort4);
+double4 __ovld __cnfn convert_double4_rtn(char4);
+double4 __ovld __cnfn convert_double4_rtn(double4);
+double4 __ovld __cnfn convert_double4_rtn(float4);
+double4 __ovld __cnfn convert_double4_rtn(int4);
+double4 __ovld __cnfn convert_double4_rtn(long4);
+double4 __ovld __cnfn convert_double4_rtn(short4);
+double4 __ovld __cnfn convert_double4_rtn(uchar4);
+double4 __ovld __cnfn convert_double4_rtn(uint4);
+double4 __ovld __cnfn convert_double4_rtn(ulong4);
+double4 __ovld __cnfn convert_double4_rtn(ushort4);
+double4 __ovld __cnfn convert_double4_rtp(char4);
+double4 __ovld __cnfn convert_double4_rtp(double4);
+double4 __ovld __cnfn convert_double4_rtp(float4);
+double4 __ovld __cnfn convert_double4_rtp(int4);
+double4 __ovld __cnfn convert_double4_rtp(long4);
+double4 __ovld __cnfn convert_double4_rtp(short4);
+double4 __ovld __cnfn convert_double4_rtp(uchar4);
+double4 __ovld __cnfn convert_double4_rtp(uint4);
+double4 __ovld __cnfn convert_double4_rtp(ulong4);
+double4 __ovld __cnfn convert_double4_rtp(ushort4);
+double4 __ovld __cnfn convert_double4_rtz(char4);
+double4 __ovld __cnfn convert_double4_rtz(double4);
+double4 __ovld __cnfn convert_double4_rtz(float4);
+double4 __ovld __cnfn convert_double4_rtz(int4);
+double4 __ovld __cnfn convert_double4_rtz(long4);
+double4 __ovld __cnfn convert_double4_rtz(short4);
+double4 __ovld __cnfn convert_double4_rtz(uchar4);
+double4 __ovld __cnfn convert_double4_rtz(uint4);
+double4 __ovld __cnfn convert_double4_rtz(ulong4);
+double4 __ovld __cnfn convert_double4_rtz(ushort4);
+double8 __ovld __cnfn convert_double8(char8);
+double8 __ovld __cnfn convert_double8(double8);
+double8 __ovld __cnfn convert_double8(float8);
+double8 __ovld __cnfn convert_double8(int8);
+double8 __ovld __cnfn convert_double8(long8);
+double8 __ovld __cnfn convert_double8(short8);
+double8 __ovld __cnfn convert_double8(uchar8);
+double8 __ovld __cnfn convert_double8(uint8);
+double8 __ovld __cnfn convert_double8(ulong8);
+double8 __ovld __cnfn convert_double8(ushort8);
+double8 __ovld __cnfn convert_double8_rte(char8);
+double8 __ovld __cnfn convert_double8_rte(double8);
+double8 __ovld __cnfn convert_double8_rte(float8);
+double8 __ovld __cnfn convert_double8_rte(int8);
+double8 __ovld __cnfn convert_double8_rte(long8);
+double8 __ovld __cnfn convert_double8_rte(short8);
+double8 __ovld __cnfn convert_double8_rte(uchar8);
+double8 __ovld __cnfn convert_double8_rte(uint8);
+double8 __ovld __cnfn convert_double8_rte(ulong8);
+double8 __ovld __cnfn convert_double8_rte(ushort8);
+double8 __ovld __cnfn convert_double8_rtn(char8);
+double8 __ovld __cnfn convert_double8_rtn(double8);
+double8 __ovld __cnfn convert_double8_rtn(float8);
+double8 __ovld __cnfn convert_double8_rtn(int8);
+double8 __ovld __cnfn convert_double8_rtn(long8);
+double8 __ovld __cnfn convert_double8_rtn(short8);
+double8 __ovld __cnfn convert_double8_rtn(uchar8);
+double8 __ovld __cnfn convert_double8_rtn(uint8);
+double8 __ovld __cnfn convert_double8_rtn(ulong8);
+double8 __ovld __cnfn convert_double8_rtn(ushort8);
+double8 __ovld __cnfn convert_double8_rtp(char8);
+double8 __ovld __cnfn convert_double8_rtp(double8);
+double8 __ovld __cnfn convert_double8_rtp(float8);
+double8 __ovld __cnfn convert_double8_rtp(int8);
+double8 __ovld __cnfn convert_double8_rtp(long8);
+double8 __ovld __cnfn convert_double8_rtp(short8);
+double8 __ovld __cnfn convert_double8_rtp(uchar8);
+double8 __ovld __cnfn convert_double8_rtp(uint8);
+double8 __ovld __cnfn convert_double8_rtp(ulong8);
+double8 __ovld __cnfn convert_double8_rtp(ushort8);
+double8 __ovld __cnfn convert_double8_rtz(char8);
+double8 __ovld __cnfn convert_double8_rtz(double8);
+double8 __ovld __cnfn convert_double8_rtz(float8);
+double8 __ovld __cnfn convert_double8_rtz(int8);
+double8 __ovld __cnfn convert_double8_rtz(long8);
+double8 __ovld __cnfn convert_double8_rtz(short8);
+double8 __ovld __cnfn convert_double8_rtz(uchar8);
+double8 __ovld __cnfn convert_double8_rtz(uint8);
+double8 __ovld __cnfn convert_double8_rtz(ulong8);
+double8 __ovld __cnfn convert_double8_rtz(ushort8);
+double16 __ovld __cnfn convert_double16(char16);
+double16 __ovld __cnfn convert_double16(double16);
+double16 __ovld __cnfn convert_double16(float16);
+double16 __ovld __cnfn convert_double16(int16);
+double16 __ovld __cnfn convert_double16(long16);
+double16 __ovld __cnfn convert_double16(short16);
+double16 __ovld __cnfn convert_double16(uchar16);
+double16 __ovld __cnfn convert_double16(uint16);
+double16 __ovld __cnfn convert_double16(ulong16);
+double16 __ovld __cnfn convert_double16(ushort16);
+double16 __ovld __cnfn convert_double16_rte(char16);
+double16 __ovld __cnfn convert_double16_rte(double16);
+double16 __ovld __cnfn convert_double16_rte(float16);
+double16 __ovld __cnfn convert_double16_rte(int16);
+double16 __ovld __cnfn convert_double16_rte(long16);
+double16 __ovld __cnfn convert_double16_rte(short16);
+double16 __ovld __cnfn convert_double16_rte(uchar16);
+double16 __ovld __cnfn convert_double16_rte(uint16);
+double16 __ovld __cnfn convert_double16_rte(ulong16);
+double16 __ovld __cnfn convert_double16_rte(ushort16);
+double16 __ovld __cnfn convert_double16_rtn(char16);
+double16 __ovld __cnfn convert_double16_rtn(double16);
+double16 __ovld __cnfn convert_double16_rtn(float16);
+double16 __ovld __cnfn convert_double16_rtn(int16);
+double16 __ovld __cnfn convert_double16_rtn(long16);
+double16 __ovld __cnfn convert_double16_rtn(short16);
+double16 __ovld __cnfn convert_double16_rtn(uchar16);
+double16 __ovld __cnfn convert_double16_rtn(uint16);
+double16 __ovld __cnfn convert_double16_rtn(ulong16);
+double16 __ovld __cnfn convert_double16_rtn(ushort16);
+double16 __ovld __cnfn convert_double16_rtp(char16);
+double16 __ovld __cnfn convert_double16_rtp(double16);
+double16 __ovld __cnfn convert_double16_rtp(float16);
+double16 __ovld __cnfn convert_double16_rtp(int16);
+double16 __ovld __cnfn convert_double16_rtp(long16);
+double16 __ovld __cnfn convert_double16_rtp(short16);
+double16 __ovld __cnfn convert_double16_rtp(uchar16);
+double16 __ovld __cnfn convert_double16_rtp(uint16);
+double16 __ovld __cnfn convert_double16_rtp(ulong16);
+double16 __ovld __cnfn convert_double16_rtp(ushort16);
+double16 __ovld __cnfn convert_double16_rtz(char16);
+double16 __ovld __cnfn convert_double16_rtz(double16);
+double16 __ovld __cnfn convert_double16_rtz(float16);
+double16 __ovld __cnfn convert_double16_rtz(int16);
+double16 __ovld __cnfn convert_double16_rtz(long16);
+double16 __ovld __cnfn convert_double16_rtz(short16);
+double16 __ovld __cnfn convert_double16_rtz(uchar16);
+double16 __ovld __cnfn convert_double16_rtz(uint16);
+double16 __ovld __cnfn convert_double16_rtz(ulong16);
+double16 __ovld __cnfn convert_double16_rtz(ushort16);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+#pragma OPENCL EXTENSION cl_khr_fp16 : enable
+// Convert half types to non-double types.
+uchar __ovld __cnfn convert_uchar(half);
+uchar __ovld __cnfn convert_uchar_rte(half);
+uchar __ovld __cnfn convert_uchar_rtp(half);
+uchar __ovld __cnfn convert_uchar_rtn(half);
+uchar __ovld __cnfn convert_uchar_rtz(half);
+uchar __ovld __cnfn convert_uchar_sat(half);
+uchar __ovld __cnfn convert_uchar_sat_rte(half);
+uchar __ovld __cnfn convert_uchar_sat_rtp(half);
+uchar __ovld __cnfn convert_uchar_sat_rtn(half);
+uchar __ovld __cnfn convert_uchar_sat_rtz(half);
+uchar2 __ovld __cnfn convert_uchar2(half2);
+uchar2 __ovld __cnfn convert_uchar2_rte(half2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(half2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(half2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(half2);
+uchar3 __ovld __cnfn convert_uchar3(half3);
+uchar3 __ovld __cnfn convert_uchar3_rte(half3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(half3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(half3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(half3);
+uchar4 __ovld __cnfn convert_uchar4(half4);
+uchar4 __ovld __cnfn convert_uchar4_rte(half4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(half4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(half4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(half4);
+uchar8 __ovld __cnfn convert_uchar8(half8);
+uchar8 __ovld __cnfn convert_uchar8_rte(half8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(half8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(half8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(half8);
+uchar16 __ovld __cnfn convert_uchar16(half16);
+uchar16 __ovld __cnfn convert_uchar16_rte(half16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(half16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(half16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(half16);
+ushort __ovld __cnfn convert_ushort(half);
+ushort __ovld __cnfn convert_ushort_rte(half);
+ushort __ovld __cnfn convert_ushort_rtp(half);
+ushort __ovld __cnfn convert_ushort_rtn(half);
+ushort __ovld __cnfn convert_ushort_rtz(half);
+ushort __ovld __cnfn convert_ushort_sat(half);
+ushort __ovld __cnfn convert_ushort_sat_rte(half);
+ushort __ovld __cnfn convert_ushort_sat_rtp(half);
+ushort __ovld __cnfn convert_ushort_sat_rtn(half);
+ushort __ovld __cnfn convert_ushort_sat_rtz(half);
+ushort2 __ovld __cnfn convert_ushort2(half2);
+ushort2 __ovld __cnfn convert_ushort2_rte(half2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(half2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(half2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(half2);
+ushort3 __ovld __cnfn convert_ushort3(half3);
+ushort3 __ovld __cnfn convert_ushort3_rte(half3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(half3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(half3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(half3);
+ushort4 __ovld __cnfn convert_ushort4(half4);
+ushort4 __ovld __cnfn convert_ushort4_rte(half4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(half4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(half4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(half4);
+ushort8 __ovld __cnfn convert_ushort8(half8);
+ushort8 __ovld __cnfn convert_ushort8_rte(half8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(half8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(half8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(half8);
+ushort16 __ovld __cnfn convert_ushort16(half16);
+ushort16 __ovld __cnfn convert_ushort16_rte(half16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(half16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(half16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(half16);
+uint __ovld __cnfn convert_uint(half);
+uint __ovld __cnfn convert_uint_rte(half);
+uint __ovld __cnfn convert_uint_rtp(half);
+uint __ovld __cnfn convert_uint_rtn(half);
+uint __ovld __cnfn convert_uint_rtz(half);
+uint __ovld __cnfn convert_uint_sat(half);
+uint __ovld __cnfn convert_uint_sat_rte(half);
+uint __ovld __cnfn convert_uint_sat_rtp(half);
+uint __ovld __cnfn convert_uint_sat_rtn(half);
+uint __ovld __cnfn convert_uint_sat_rtz(half);
+uint2 __ovld __cnfn convert_uint2(half2);
+uint2 __ovld __cnfn convert_uint2_rte(half2);
+uint2 __ovld __cnfn convert_uint2_rtp(half2);
+uint2 __ovld __cnfn convert_uint2_rtn(half2);
+uint2 __ovld __cnfn convert_uint2_rtz(half2);
+uint2 __ovld __cnfn convert_uint2_sat(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(half2);
+uint3 __ovld __cnfn convert_uint3(half3);
+uint3 __ovld __cnfn convert_uint3_rte(half3);
+uint3 __ovld __cnfn convert_uint3_rtp(half3);
+uint3 __ovld __cnfn convert_uint3_rtn(half3);
+uint3 __ovld __cnfn convert_uint3_rtz(half3);
+uint3 __ovld __cnfn convert_uint3_sat(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(half3);
+uint4 __ovld __cnfn convert_uint4(half4);
+uint4 __ovld __cnfn convert_uint4_rte(half4);
+uint4 __ovld __cnfn convert_uint4_rtp(half4);
+uint4 __ovld __cnfn convert_uint4_rtn(half4);
+uint4 __ovld __cnfn convert_uint4_rtz(half4);
+uint4 __ovld __cnfn convert_uint4_sat(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(half4);
+uint8 __ovld __cnfn convert_uint8(half8);
+uint8 __ovld __cnfn convert_uint8_rte(half8);
+uint8 __ovld __cnfn convert_uint8_rtp(half8);
+uint8 __ovld __cnfn convert_uint8_rtn(half8);
+uint8 __ovld __cnfn convert_uint8_rtz(half8);
+uint8 __ovld __cnfn convert_uint8_sat(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(half8);
+uint16 __ovld __cnfn convert_uint16(half16);
+uint16 __ovld __cnfn convert_uint16_rte(half16);
+uint16 __ovld __cnfn convert_uint16_rtp(half16);
+uint16 __ovld __cnfn convert_uint16_rtn(half16);
+uint16 __ovld __cnfn convert_uint16_rtz(half16);
+uint16 __ovld __cnfn convert_uint16_sat(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(half16);
+ulong __ovld __cnfn convert_ulong(half);
+ulong __ovld __cnfn convert_ulong_rte(half);
+ulong __ovld __cnfn convert_ulong_rtp(half);
+ulong __ovld __cnfn convert_ulong_rtn(half);
+ulong __ovld __cnfn convert_ulong_rtz(half);
+ulong __ovld __cnfn convert_ulong_sat(half);
+ulong __ovld __cnfn convert_ulong_sat_rte(half);
+ulong __ovld __cnfn convert_ulong_sat_rtp(half);
+ulong __ovld __cnfn convert_ulong_sat_rtn(half);
+ulong __ovld __cnfn convert_ulong_sat_rtz(half);
+ulong2 __ovld __cnfn convert_ulong2(half2);
+ulong2 __ovld __cnfn convert_ulong2_rte(half2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(half2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(half2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(half2);
+ulong3 __ovld __cnfn convert_ulong3(half3);
+ulong3 __ovld __cnfn convert_ulong3_rte(half3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(half3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(half3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(half3);
+ulong4 __ovld __cnfn convert_ulong4(half4);
+ulong4 __ovld __cnfn convert_ulong4_rte(half4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(half4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(half4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(half4);
+ulong8 __ovld __cnfn convert_ulong8(half8);
+ulong8 __ovld __cnfn convert_ulong8_rte(half8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(half8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(half8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(half8);
+ulong16 __ovld __cnfn convert_ulong16(half16);
+ulong16 __ovld __cnfn convert_ulong16_rte(half16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(half16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(half16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(half16);
+char __ovld __cnfn convert_char(half);
+char __ovld __cnfn convert_char_rte(half);
+char __ovld __cnfn convert_char_rtp(half);
+char __ovld __cnfn convert_char_rtn(half);
+char __ovld __cnfn convert_char_rtz(half);
+char __ovld __cnfn convert_char_sat(half);
+char __ovld __cnfn convert_char_sat_rte(half);
+char __ovld __cnfn convert_char_sat_rtp(half);
+char __ovld __cnfn convert_char_sat_rtn(half);
+char __ovld __cnfn convert_char_sat_rtz(half);
+char2 __ovld __cnfn convert_char2(half2);
+char2 __ovld __cnfn convert_char2_rte(half2);
+char2 __ovld __cnfn convert_char2_rtp(half2);
+char2 __ovld __cnfn convert_char2_rtn(half2);
+char2 __ovld __cnfn convert_char2_rtz(half2);
+char2 __ovld __cnfn convert_char2_sat(half2);
+char2 __ovld __cnfn convert_char2_sat_rte(half2);
+char2 __ovld __cnfn convert_char2_sat_rtp(half2);
+char2 __ovld __cnfn convert_char2_sat_rtn(half2);
+char2 __ovld __cnfn convert_char2_sat_rtz(half2);
+char3 __ovld __cnfn convert_char3(half3);
+char3 __ovld __cnfn convert_char3_rte(half3);
+char3 __ovld __cnfn convert_char3_rtp(half3);
+char3 __ovld __cnfn convert_char3_rtn(half3);
+char3 __ovld __cnfn convert_char3_rtz(half3);
+char3 __ovld __cnfn convert_char3_sat(half3);
+char3 __ovld __cnfn convert_char3_sat_rte(half3);
+char3 __ovld __cnfn convert_char3_sat_rtp(half3);
+char3 __ovld __cnfn convert_char3_sat_rtn(half3);
+char3 __ovld __cnfn convert_char3_sat_rtz(half3);
+char4 __ovld __cnfn convert_char4(half4);
+char4 __ovld __cnfn convert_char4_rte(half4);
+char4 __ovld __cnfn convert_char4_rtp(half4);
+char4 __ovld __cnfn convert_char4_rtn(half4);
+char4 __ovld __cnfn convert_char4_rtz(half4);
+char4 __ovld __cnfn convert_char4_sat(half4);
+char4 __ovld __cnfn convert_char4_sat_rte(half4);
+char4 __ovld __cnfn convert_char4_sat_rtp(half4);
+char4 __ovld __cnfn convert_char4_sat_rtn(half4);
+char4 __ovld __cnfn convert_char4_sat_rtz(half4);
+char8 __ovld __cnfn convert_char8(half8);
+char8 __ovld __cnfn convert_char8_rte(half8);
+char8 __ovld __cnfn convert_char8_rtp(half8);
+char8 __ovld __cnfn convert_char8_rtn(half8);
+char8 __ovld __cnfn convert_char8_rtz(half8);
+char8 __ovld __cnfn convert_char8_sat(half8);
+char8 __ovld __cnfn convert_char8_sat_rte(half8);
+char8 __ovld __cnfn convert_char8_sat_rtp(half8);
+char8 __ovld __cnfn convert_char8_sat_rtn(half8);
+char8 __ovld __cnfn convert_char8_sat_rtz(half8);
+char16 __ovld __cnfn convert_char16(half16);
+char16 __ovld __cnfn convert_char16_rte(half16);
+char16 __ovld __cnfn convert_char16_rtp(half16);
+char16 __ovld __cnfn convert_char16_rtn(half16);
+char16 __ovld __cnfn convert_char16_rtz(half16);
+char16 __ovld __cnfn convert_char16_sat(half16);
+char16 __ovld __cnfn convert_char16_sat_rte(half16);
+char16 __ovld __cnfn convert_char16_sat_rtp(half16);
+char16 __ovld __cnfn convert_char16_sat_rtn(half16);
+char16 __ovld __cnfn convert_char16_sat_rtz(half16);
+short __ovld __cnfn convert_short(half);
+short __ovld __cnfn convert_short_rte(half);
+short __ovld __cnfn convert_short_rtp(half);
+short __ovld __cnfn convert_short_rtn(half);
+short __ovld __cnfn convert_short_rtz(half);
+short __ovld __cnfn convert_short_sat(half);
+short __ovld __cnfn convert_short_sat_rte(half);
+short __ovld __cnfn convert_short_sat_rtp(half);
+short __ovld __cnfn convert_short_sat_rtn(half);
+short __ovld __cnfn convert_short_sat_rtz(half);
+short2 __ovld __cnfn convert_short2(half2);
+short2 __ovld __cnfn convert_short2_rte(half2);
+short2 __ovld __cnfn convert_short2_rtp(half2);
+short2 __ovld __cnfn convert_short2_rtn(half2);
+short2 __ovld __cnfn convert_short2_rtz(half2);
+short2 __ovld __cnfn convert_short2_sat(half2);
+short2 __ovld __cnfn convert_short2_sat_rte(half2);
+short2 __ovld __cnfn convert_short2_sat_rtp(half2);
+short2 __ovld __cnfn convert_short2_sat_rtn(half2);
+short2 __ovld __cnfn convert_short2_sat_rtz(half2);
+short3 __ovld __cnfn convert_short3(half3);
+short3 __ovld __cnfn convert_short3_rte(half3);
+short3 __ovld __cnfn convert_short3_rtp(half3);
+short3 __ovld __cnfn convert_short3_rtn(half3);
+short3 __ovld __cnfn convert_short3_rtz(half3);
+short3 __ovld __cnfn convert_short3_sat(half3);
+short3 __ovld __cnfn convert_short3_sat_rte(half3);
+short3 __ovld __cnfn convert_short3_sat_rtp(half3);
+short3 __ovld __cnfn convert_short3_sat_rtn(half3);
+short3 __ovld __cnfn convert_short3_sat_rtz(half3);
+short4 __ovld __cnfn convert_short4(half4);
+short4 __ovld __cnfn convert_short4_rte(half4);
+short4 __ovld __cnfn convert_short4_rtp(half4);
+short4 __ovld __cnfn convert_short4_rtn(half4);
+short4 __ovld __cnfn convert_short4_rtz(half4);
+short4 __ovld __cnfn convert_short4_sat(half4);
+short4 __ovld __cnfn convert_short4_sat_rte(half4);
+short4 __ovld __cnfn convert_short4_sat_rtp(half4);
+short4 __ovld __cnfn convert_short4_sat_rtn(half4);
+short4 __ovld __cnfn convert_short4_sat_rtz(half4);
+short8 __ovld __cnfn convert_short8(half8);
+short8 __ovld __cnfn convert_short8_rte(half8);
+short8 __ovld __cnfn convert_short8_rtp(half8);
+short8 __ovld __cnfn convert_short8_rtn(half8);
+short8 __ovld __cnfn convert_short8_rtz(half8);
+short8 __ovld __cnfn convert_short8_sat(half8);
+short8 __ovld __cnfn convert_short8_sat_rte(half8);
+short8 __ovld __cnfn convert_short8_sat_rtp(half8);
+short8 __ovld __cnfn convert_short8_sat_rtn(half8);
+short8 __ovld __cnfn convert_short8_sat_rtz(half8);
+short16 __ovld __cnfn convert_short16(half16);
+short16 __ovld __cnfn convert_short16_rte(half16);
+short16 __ovld __cnfn convert_short16_rtp(half16);
+short16 __ovld __cnfn convert_short16_rtn(half16);
+short16 __ovld __cnfn convert_short16_rtz(half16);
+short16 __ovld __cnfn convert_short16_sat(half16);
+short16 __ovld __cnfn convert_short16_sat_rte(half16);
+short16 __ovld __cnfn convert_short16_sat_rtp(half16);
+short16 __ovld __cnfn convert_short16_sat_rtn(half16);
+short16 __ovld __cnfn convert_short16_sat_rtz(half16);
+int __ovld __cnfn convert_int(half);
+int __ovld __cnfn convert_int_rte(half);
+int __ovld __cnfn convert_int_rtp(half);
+int __ovld __cnfn convert_int_rtn(half);
+int __ovld __cnfn convert_int_rtz(half);
+int __ovld __cnfn convert_int_sat(half);
+int __ovld __cnfn convert_int_sat_rte(half);
+int __ovld __cnfn convert_int_sat_rtp(half);
+int __ovld __cnfn convert_int_sat_rtn(half);
+int __ovld __cnfn convert_int_sat_rtz(half);
+int2 __ovld __cnfn convert_int2(half2);
+int2 __ovld __cnfn convert_int2_rte(half2);
+int2 __ovld __cnfn convert_int2_rtp(half2);
+int2 __ovld __cnfn convert_int2_rtn(half2);
+int2 __ovld __cnfn convert_int2_rtz(half2);
+int2 __ovld __cnfn convert_int2_sat(half2);
+int2 __ovld __cnfn convert_int2_sat_rte(half2);
+int2 __ovld __cnfn convert_int2_sat_rtp(half2);
+int2 __ovld __cnfn convert_int2_sat_rtn(half2);
+int2 __ovld __cnfn convert_int2_sat_rtz(half2);
+int3 __ovld __cnfn convert_int3(half3);
+int3 __ovld __cnfn convert_int3_rte(half3);
+int3 __ovld __cnfn convert_int3_rtp(half3);
+int3 __ovld __cnfn convert_int3_rtn(half3);
+int3 __ovld __cnfn convert_int3_rtz(half3);
+int3 __ovld __cnfn convert_int3_sat(half3);
+int3 __ovld __cnfn convert_int3_sat_rte(half3);
+int3 __ovld __cnfn convert_int3_sat_rtp(half3);
+int3 __ovld __cnfn convert_int3_sat_rtn(half3);
+int3 __ovld __cnfn convert_int3_sat_rtz(half3);
+int4 __ovld __cnfn convert_int4(half4);
+int4 __ovld __cnfn convert_int4_rte(half4);
+int4 __ovld __cnfn convert_int4_rtp(half4);
+int4 __ovld __cnfn convert_int4_rtn(half4);
+int4 __ovld __cnfn convert_int4_rtz(half4);
+int4 __ovld __cnfn convert_int4_sat(half4);
+int4 __ovld __cnfn convert_int4_sat_rte(half4);
+int4 __ovld __cnfn convert_int4_sat_rtp(half4);
+int4 __ovld __cnfn convert_int4_sat_rtn(half4);
+int4 __ovld __cnfn convert_int4_sat_rtz(half4);
+int8 __ovld __cnfn convert_int8(half8);
+int8 __ovld __cnfn convert_int8_rte(half8);
+int8 __ovld __cnfn convert_int8_rtp(half8);
+int8 __ovld __cnfn convert_int8_rtn(half8);
+int8 __ovld __cnfn convert_int8_rtz(half8);
+int8 __ovld __cnfn convert_int8_sat(half8);
+int8 __ovld __cnfn convert_int8_sat_rte(half8);
+int8 __ovld __cnfn convert_int8_sat_rtp(half8);
+int8 __ovld __cnfn convert_int8_sat_rtn(half8);
+int8 __ovld __cnfn convert_int8_sat_rtz(half8);
+int16 __ovld __cnfn convert_int16(half16);
+int16 __ovld __cnfn convert_int16_rte(half16);
+int16 __ovld __cnfn convert_int16_rtp(half16);
+int16 __ovld __cnfn convert_int16_rtn(half16);
+int16 __ovld __cnfn convert_int16_rtz(half16);
+int16 __ovld __cnfn convert_int16_sat(half16);
+int16 __ovld __cnfn convert_int16_sat_rte(half16);
+int16 __ovld __cnfn convert_int16_sat_rtp(half16);
+int16 __ovld __cnfn convert_int16_sat_rtn(half16);
+int16 __ovld __cnfn convert_int16_sat_rtz(half16);
+long __ovld __cnfn convert_long(half);
+long __ovld __cnfn convert_long_rte(half);
+long __ovld __cnfn convert_long_rtp(half);
+long __ovld __cnfn convert_long_rtn(half);
+long __ovld __cnfn convert_long_rtz(half);
+long __ovld __cnfn convert_long_sat(half);
+long __ovld __cnfn convert_long_sat_rte(half);
+long __ovld __cnfn convert_long_sat_rtp(half);
+long __ovld __cnfn convert_long_sat_rtn(half);
+long __ovld __cnfn convert_long_sat_rtz(half);
+long2 __ovld __cnfn convert_long2(half2);
+long2 __ovld __cnfn convert_long2_rte(half2);
+long2 __ovld __cnfn convert_long2_rtp(half2);
+long2 __ovld __cnfn convert_long2_rtn(half2);
+long2 __ovld __cnfn convert_long2_rtz(half2);
+long2 __ovld __cnfn convert_long2_sat(half2);
+long2 __ovld __cnfn convert_long2_sat_rte(half2);
+long2 __ovld __cnfn convert_long2_sat_rtp(half2);
+long2 __ovld __cnfn convert_long2_sat_rtn(half2);
+long2 __ovld __cnfn convert_long2_sat_rtz(half2);
+long3 __ovld __cnfn convert_long3(half3);
+long3 __ovld __cnfn convert_long3_rte(half3);
+long3 __ovld __cnfn convert_long3_rtp(half3);
+long3 __ovld __cnfn convert_long3_rtn(half3);
+long3 __ovld __cnfn convert_long3_rtz(half3);
+long3 __ovld __cnfn convert_long3_sat(half3);
+long3 __ovld __cnfn convert_long3_sat_rte(half3);
+long3 __ovld __cnfn convert_long3_sat_rtp(half3);
+long3 __ovld __cnfn convert_long3_sat_rtn(half3);
+long3 __ovld __cnfn convert_long3_sat_rtz(half3);
+long4 __ovld __cnfn convert_long4(half4);
+long4 __ovld __cnfn convert_long4_rte(half4);
+long4 __ovld __cnfn convert_long4_rtp(half4);
+long4 __ovld __cnfn convert_long4_rtn(half4);
+long4 __ovld __cnfn convert_long4_rtz(half4);
+long4 __ovld __cnfn convert_long4_sat(half4);
+long4 __ovld __cnfn convert_long4_sat_rte(half4);
+long4 __ovld __cnfn convert_long4_sat_rtp(half4);
+long4 __ovld __cnfn convert_long4_sat_rtn(half4);
+long4 __ovld __cnfn convert_long4_sat_rtz(half4);
+long8 __ovld __cnfn convert_long8(half8);
+long8 __ovld __cnfn convert_long8_rte(half8);
+long8 __ovld __cnfn convert_long8_rtp(half8);
+long8 __ovld __cnfn convert_long8_rtn(half8);
+long8 __ovld __cnfn convert_long8_rtz(half8);
+long8 __ovld __cnfn convert_long8_sat(half8);
+long8 __ovld __cnfn convert_long8_sat_rte(half8);
+long8 __ovld __cnfn convert_long8_sat_rtp(half8);
+long8 __ovld __cnfn convert_long8_sat_rtn(half8);
+long8 __ovld __cnfn convert_long8_sat_rtz(half8);
+long16 __ovld __cnfn convert_long16(half16);
+long16 __ovld __cnfn convert_long16_rte(half16);
+long16 __ovld __cnfn convert_long16_rtp(half16);
+long16 __ovld __cnfn convert_long16_rtn(half16);
+long16 __ovld __cnfn convert_long16_rtz(half16);
+long16 __ovld __cnfn convert_long16_sat(half16);
+long16 __ovld __cnfn convert_long16_sat_rte(half16);
+long16 __ovld __cnfn convert_long16_sat_rtp(half16);
+long16 __ovld __cnfn convert_long16_sat_rtn(half16);
+long16 __ovld __cnfn convert_long16_sat_rtz(half16);
+float __ovld __cnfn convert_float(half);
+float __ovld __cnfn convert_float_rte(half);
+float __ovld __cnfn convert_float_rtp(half);
+float __ovld __cnfn convert_float_rtn(half);
+float __ovld __cnfn convert_float_rtz(half);
+float2 __ovld __cnfn convert_float2(half2);
+float2 __ovld __cnfn convert_float2_rte(half2);
+float2 __ovld __cnfn convert_float2_rtp(half2);
+float2 __ovld __cnfn convert_float2_rtn(half2);
+float2 __ovld __cnfn convert_float2_rtz(half2);
+float3 __ovld __cnfn convert_float3(half3);
+float3 __ovld __cnfn convert_float3_rte(half3);
+float3 __ovld __cnfn convert_float3_rtp(half3);
+float3 __ovld __cnfn convert_float3_rtn(half3);
+float3 __ovld __cnfn convert_float3_rtz(half3);
+float4 __ovld __cnfn convert_float4(half4);
+float4 __ovld __cnfn convert_float4_rte(half4);
+float4 __ovld __cnfn convert_float4_rtp(half4);
+float4 __ovld __cnfn convert_float4_rtn(half4);
+float4 __ovld __cnfn convert_float4_rtz(half4);
+float8 __ovld __cnfn convert_float8(half8);
+float8 __ovld __cnfn convert_float8_rte(half8);
+float8 __ovld __cnfn convert_float8_rtp(half8);
+float8 __ovld __cnfn convert_float8_rtn(half8);
+float8 __ovld __cnfn convert_float8_rtz(half8);
+float16 __ovld __cnfn convert_float16(half16);
+float16 __ovld __cnfn convert_float16_rte(half16);
+float16 __ovld __cnfn convert_float16_rtp(half16);
+float16 __ovld __cnfn convert_float16_rtn(half16);
+float16 __ovld __cnfn convert_float16_rtz(half16);
+
+// Convert non-double types to half types.
+half __ovld __cnfn convert_half(uchar);
+half __ovld __cnfn convert_half(ushort);
+half __ovld __cnfn convert_half(uint);
+half __ovld __cnfn convert_half(ulong);
+half __ovld __cnfn convert_half(char);
+half __ovld __cnfn convert_half(short);
+half __ovld __cnfn convert_half(int);
+half __ovld __cnfn convert_half(long);
+half __ovld __cnfn convert_half(float);
+half __ovld __cnfn convert_half(half);
+half __ovld __cnfn convert_half_rte(uchar);
+half __ovld __cnfn convert_half_rte(ushort);
+half __ovld __cnfn convert_half_rte(uint);
+half __ovld __cnfn convert_half_rte(ulong);
+half __ovld __cnfn convert_half_rte(char);
+half __ovld __cnfn convert_half_rte(short);
+half __ovld __cnfn convert_half_rte(int);
+half __ovld __cnfn convert_half_rte(long);
+half __ovld __cnfn convert_half_rte(float);
+half __ovld __cnfn convert_half_rte(half);
+half __ovld __cnfn convert_half_rtp(uchar);
+half __ovld __cnfn convert_half_rtp(ushort);
+half __ovld __cnfn convert_half_rtp(uint);
+half __ovld __cnfn convert_half_rtp(ulong);
+half __ovld __cnfn convert_half_rtp(char);
+half __ovld __cnfn convert_half_rtp(short);
+half __ovld __cnfn convert_half_rtp(int);
+half __ovld __cnfn convert_half_rtp(long);
+half __ovld __cnfn convert_half_rtp(float);
+half __ovld __cnfn convert_half_rtp(half);
+half __ovld __cnfn convert_half_rtn(uchar);
+half __ovld __cnfn convert_half_rtn(ushort);
+half __ovld __cnfn convert_half_rtn(uint);
+half __ovld __cnfn convert_half_rtn(ulong);
+half __ovld __cnfn convert_half_rtn(char);
+half __ovld __cnfn convert_half_rtn(short);
+half __ovld __cnfn convert_half_rtn(int);
+half __ovld __cnfn convert_half_rtn(long);
+half __ovld __cnfn convert_half_rtn(float);
+half __ovld __cnfn convert_half_rtn(half);
+half __ovld __cnfn convert_half_rtz(uchar);
+half __ovld __cnfn convert_half_rtz(ushort);
+half __ovld __cnfn convert_half_rtz(uint);
+half __ovld __cnfn convert_half_rtz(ulong);
+half __ovld __cnfn convert_half_rtz(char);
+half __ovld __cnfn convert_half_rtz(short);
+half __ovld __cnfn convert_half_rtz(int);
+half __ovld __cnfn convert_half_rtz(long);
+half __ovld __cnfn convert_half_rtz(float);
+half __ovld __cnfn convert_half_rtz(half);
+half2 __ovld __cnfn convert_half2(char2);
+half2 __ovld __cnfn convert_half2(uchar2);
+half2 __ovld __cnfn convert_half2(short2);
+half2 __ovld __cnfn convert_half2(ushort2);
+half2 __ovld __cnfn convert_half2(int2);
+half2 __ovld __cnfn convert_half2(uint2);
+half2 __ovld __cnfn convert_half2(long2);
+half2 __ovld __cnfn convert_half2(ulong2);
+half2 __ovld __cnfn convert_half2(float2);
+half2 __ovld __cnfn convert_half2(half2);
+half2 __ovld __cnfn convert_half2_rte(char2);
+half2 __ovld __cnfn convert_half2_rte(uchar2);
+half2 __ovld __cnfn convert_half2_rte(short2);
+half2 __ovld __cnfn convert_half2_rte(ushort2);
+half2 __ovld __cnfn convert_half2_rte(int2);
+half2 __ovld __cnfn convert_half2_rte(uint2);
+half2 __ovld __cnfn convert_half2_rte(long2);
+half2 __ovld __cnfn convert_half2_rte(ulong2);
+half2 __ovld __cnfn convert_half2_rte(float2);
+half2 __ovld __cnfn convert_half2_rte(half2);
+half2 __ovld __cnfn convert_half2_rtp(char2);
+half2 __ovld __cnfn convert_half2_rtp(uchar2);
+half2 __ovld __cnfn convert_half2_rtp(short2);
+half2 __ovld __cnfn convert_half2_rtp(ushort2);
+half2 __ovld __cnfn convert_half2_rtp(int2);
+half2 __ovld __cnfn convert_half2_rtp(uint2);
+half2 __ovld __cnfn convert_half2_rtp(long2);
+half2 __ovld __cnfn convert_half2_rtp(ulong2);
+half2 __ovld __cnfn convert_half2_rtp(float2);
+half2 __ovld __cnfn convert_half2_rtp(half2);
+half2 __ovld __cnfn convert_half2_rtn(char2);
+half2 __ovld __cnfn convert_half2_rtn(uchar2);
+half2 __ovld __cnfn convert_half2_rtn(short2);
+half2 __ovld __cnfn convert_half2_rtn(ushort2);
+half2 __ovld __cnfn convert_half2_rtn(int2);
+half2 __ovld __cnfn convert_half2_rtn(uint2);
+half2 __ovld __cnfn convert_half2_rtn(long2);
+half2 __ovld __cnfn convert_half2_rtn(ulong2);
+half2 __ovld __cnfn convert_half2_rtn(float2);
+half2 __ovld __cnfn convert_half2_rtn(half2);
+half2 __ovld __cnfn convert_half2_rtz(char2);
+half2 __ovld __cnfn convert_half2_rtz(uchar2);
+half2 __ovld __cnfn convert_half2_rtz(short2);
+half2 __ovld __cnfn convert_half2_rtz(ushort2);
+half2 __ovld __cnfn convert_half2_rtz(int2);
+half2 __ovld __cnfn convert_half2_rtz(uint2);
+half2 __ovld __cnfn convert_half2_rtz(long2);
+half2 __ovld __cnfn convert_half2_rtz(ulong2);
+half2 __ovld __cnfn convert_half2_rtz(float2);
+half2 __ovld __cnfn convert_half2_rtz(half2);
+half3 __ovld __cnfn convert_half3(char3);
+half3 __ovld __cnfn convert_half3(uchar3);
+half3 __ovld __cnfn convert_half3(short3);
+half3 __ovld __cnfn convert_half3(ushort3);
+half3 __ovld __cnfn convert_half3(int3);
+half3 __ovld __cnfn convert_half3(uint3);
+half3 __ovld __cnfn convert_half3(long3);
+half3 __ovld __cnfn convert_half3(ulong3);
+half3 __ovld __cnfn convert_half3(float3);
+half3 __ovld __cnfn convert_half3(half3);
+half3 __ovld __cnfn convert_half3_rte(char3);
+half3 __ovld __cnfn convert_half3_rte(uchar3);
+half3 __ovld __cnfn convert_half3_rte(short3);
+half3 __ovld __cnfn convert_half3_rte(ushort3);
+half3 __ovld __cnfn convert_half3_rte(int3);
+half3 __ovld __cnfn convert_half3_rte(uint3);
+half3 __ovld __cnfn convert_half3_rte(long3);
+half3 __ovld __cnfn convert_half3_rte(ulong3);
+half3 __ovld __cnfn convert_half3_rte(float3);
+half3 __ovld __cnfn convert_half3_rte(half3);
+half3 __ovld __cnfn convert_half3_rtp(char3);
+half3 __ovld __cnfn convert_half3_rtp(uchar3);
+half3 __ovld __cnfn convert_half3_rtp(short3);
+half3 __ovld __cnfn convert_half3_rtp(ushort3);
+half3 __ovld __cnfn convert_half3_rtp(int3);
+half3 __ovld __cnfn convert_half3_rtp(uint3);
+half3 __ovld __cnfn convert_half3_rtp(long3);
+half3 __ovld __cnfn convert_half3_rtp(ulong3);
+half3 __ovld __cnfn convert_half3_rtp(float3);
+half3 __ovld __cnfn convert_half3_rtp(half3);
+half3 __ovld __cnfn convert_half3_rtn(char3);
+half3 __ovld __cnfn convert_half3_rtn(uchar3);
+half3 __ovld __cnfn convert_half3_rtn(short3);
+half3 __ovld __cnfn convert_half3_rtn(ushort3);
+half3 __ovld __cnfn convert_half3_rtn(int3);
+half3 __ovld __cnfn convert_half3_rtn(uint3);
+half3 __ovld __cnfn convert_half3_rtn(long3);
+half3 __ovld __cnfn convert_half3_rtn(ulong3);
+half3 __ovld __cnfn convert_half3_rtn(float3);
+half3 __ovld __cnfn convert_half3_rtn(half3);
+half3 __ovld __cnfn convert_half3_rtz(char3);
+half3 __ovld __cnfn convert_half3_rtz(uchar3);
+half3 __ovld __cnfn convert_half3_rtz(short3);
+half3 __ovld __cnfn convert_half3_rtz(ushort3);
+half3 __ovld __cnfn convert_half3_rtz(int3);
+half3 __ovld __cnfn convert_half3_rtz(uint3);
+half3 __ovld __cnfn convert_half3_rtz(long3);
+half3 __ovld __cnfn convert_half3_rtz(ulong3);
+half3 __ovld __cnfn convert_half3_rtz(float3);
+half3 __ovld __cnfn convert_half3_rtz(half3);
+half4 __ovld __cnfn convert_half4(char4);
+half4 __ovld __cnfn convert_half4(uchar4);
+half4 __ovld __cnfn convert_half4(short4);
+half4 __ovld __cnfn convert_half4(ushort4);
+half4 __ovld __cnfn convert_half4(int4);
+half4 __ovld __cnfn convert_half4(uint4);
+half4 __ovld __cnfn convert_half4(long4);
+half4 __ovld __cnfn convert_half4(ulong4);
+half4 __ovld __cnfn convert_half4(float4);
+half4 __ovld __cnfn convert_half4(half4);
+half4 __ovld __cnfn convert_half4_rte(char4);
+half4 __ovld __cnfn convert_half4_rte(uchar4);
+half4 __ovld __cnfn convert_half4_rte(short4);
+half4 __ovld __cnfn convert_half4_rte(ushort4);
+half4 __ovld __cnfn convert_half4_rte(int4);
+half4 __ovld __cnfn convert_half4_rte(uint4);
+half4 __ovld __cnfn convert_half4_rte(long4);
+half4 __ovld __cnfn convert_half4_rte(ulong4);
+half4 __ovld __cnfn convert_half4_rte(float4);
+half4 __ovld __cnfn convert_half4_rte(half4);
+half4 __ovld __cnfn convert_half4_rtp(char4);
+half4 __ovld __cnfn convert_half4_rtp(uchar4);
+half4 __ovld __cnfn convert_half4_rtp(short4);
+half4 __ovld __cnfn convert_half4_rtp(ushort4);
+half4 __ovld __cnfn convert_half4_rtp(int4);
+half4 __ovld __cnfn convert_half4_rtp(uint4);
+half4 __ovld __cnfn convert_half4_rtp(long4);
+half4 __ovld __cnfn convert_half4_rtp(ulong4);
+half4 __ovld __cnfn convert_half4_rtp(float4);
+half4 __ovld __cnfn convert_half4_rtp(half4);
+half4 __ovld __cnfn convert_half4_rtn(char4);
+half4 __ovld __cnfn convert_half4_rtn(uchar4);
+half4 __ovld __cnfn convert_half4_rtn(short4);
+half4 __ovld __cnfn convert_half4_rtn(ushort4);
+half4 __ovld __cnfn convert_half4_rtn(int4);
+half4 __ovld __cnfn convert_half4_rtn(uint4);
+half4 __ovld __cnfn convert_half4_rtn(long4);
+half4 __ovld __cnfn convert_half4_rtn(ulong4);
+half4 __ovld __cnfn convert_half4_rtn(float4);
+half4 __ovld __cnfn convert_half4_rtn(half4);
+half4 __ovld __cnfn convert_half4_rtz(char4);
+half4 __ovld __cnfn convert_half4_rtz(uchar4);
+half4 __ovld __cnfn convert_half4_rtz(short4);
+half4 __ovld __cnfn convert_half4_rtz(ushort4);
+half4 __ovld __cnfn convert_half4_rtz(int4);
+half4 __ovld __cnfn convert_half4_rtz(uint4);
+half4 __ovld __cnfn convert_half4_rtz(long4);
+half4 __ovld __cnfn convert_half4_rtz(ulong4);
+half4 __ovld __cnfn convert_half4_rtz(float4);
+half4 __ovld __cnfn convert_half4_rtz(half4);
+half8 __ovld __cnfn convert_half8(char8);
+half8 __ovld __cnfn convert_half8(uchar8);
+half8 __ovld __cnfn convert_half8(short8);
+half8 __ovld __cnfn convert_half8(ushort8);
+half8 __ovld __cnfn convert_half8(int8);
+half8 __ovld __cnfn convert_half8(uint8);
+half8 __ovld __cnfn convert_half8(long8);
+half8 __ovld __cnfn convert_half8(ulong8);
+half8 __ovld __cnfn convert_half8(float8);
+half8 __ovld __cnfn convert_half8(half8);
+half8 __ovld __cnfn convert_half8_rte(char8);
+half8 __ovld __cnfn convert_half8_rte(uchar8);
+half8 __ovld __cnfn convert_half8_rte(short8);
+half8 __ovld __cnfn convert_half8_rte(ushort8);
+half8 __ovld __cnfn convert_half8_rte(int8);
+half8 __ovld __cnfn convert_half8_rte(uint8);
+half8 __ovld __cnfn convert_half8_rte(long8);
+half8 __ovld __cnfn convert_half8_rte(ulong8);
+half8 __ovld __cnfn convert_half8_rte(float8);
+half8 __ovld __cnfn convert_half8_rte(half8);
+half8 __ovld __cnfn convert_half8_rtp(char8);
+half8 __ovld __cnfn convert_half8_rtp(uchar8);
+half8 __ovld __cnfn convert_half8_rtp(short8);
+half8 __ovld __cnfn convert_half8_rtp(ushort8);
+half8 __ovld __cnfn convert_half8_rtp(int8);
+half8 __ovld __cnfn convert_half8_rtp(uint8);
+half8 __ovld __cnfn convert_half8_rtp(long8);
+half8 __ovld __cnfn convert_half8_rtp(ulong8);
+half8 __ovld __cnfn convert_half8_rtp(float8);
+half8 __ovld __cnfn convert_half8_rtp(half8);
+half8 __ovld __cnfn convert_half8_rtn(char8);
+half8 __ovld __cnfn convert_half8_rtn(uchar8);
+half8 __ovld __cnfn convert_half8_rtn(short8);
+half8 __ovld __cnfn convert_half8_rtn(ushort8);
+half8 __ovld __cnfn convert_half8_rtn(int8);
+half8 __ovld __cnfn convert_half8_rtn(uint8);
+half8 __ovld __cnfn convert_half8_rtn(long8);
+half8 __ovld __cnfn convert_half8_rtn(ulong8);
+half8 __ovld __cnfn convert_half8_rtn(float8);
+half8 __ovld __cnfn convert_half8_rtn(half8);
+half8 __ovld __cnfn convert_half8_rtz(char8);
+half8 __ovld __cnfn convert_half8_rtz(uchar8);
+half8 __ovld __cnfn convert_half8_rtz(short8);
+half8 __ovld __cnfn convert_half8_rtz(ushort8);
+half8 __ovld __cnfn convert_half8_rtz(int8);
+half8 __ovld __cnfn convert_half8_rtz(uint8);
+half8 __ovld __cnfn convert_half8_rtz(long8);
+half8 __ovld __cnfn convert_half8_rtz(ulong8);
+half8 __ovld __cnfn convert_half8_rtz(float8);
+half8 __ovld __cnfn convert_half8_rtz(half8);
+half16 __ovld __cnfn convert_half16(char16);
+half16 __ovld __cnfn convert_half16(uchar16);
+half16 __ovld __cnfn convert_half16(short16);
+half16 __ovld __cnfn convert_half16(ushort16);
+half16 __ovld __cnfn convert_half16(int16);
+half16 __ovld __cnfn convert_half16(uint16);
+half16 __ovld __cnfn convert_half16(long16);
+half16 __ovld __cnfn convert_half16(ulong16);
+half16 __ovld __cnfn convert_half16(float16);
+half16 __ovld __cnfn convert_half16(half16);
+half16 __ovld __cnfn convert_half16_rte(char16);
+half16 __ovld __cnfn convert_half16_rte(uchar16);
+half16 __ovld __cnfn convert_half16_rte(short16);
+half16 __ovld __cnfn convert_half16_rte(ushort16);
+half16 __ovld __cnfn convert_half16_rte(int16);
+half16 __ovld __cnfn convert_half16_rte(uint16);
+half16 __ovld __cnfn convert_half16_rte(long16);
+half16 __ovld __cnfn convert_half16_rte(ulong16);
+half16 __ovld __cnfn convert_half16_rte(float16);
+half16 __ovld __cnfn convert_half16_rte(half16);
+half16 __ovld __cnfn convert_half16_rtp(char16);
+half16 __ovld __cnfn convert_half16_rtp(uchar16);
+half16 __ovld __cnfn convert_half16_rtp(short16);
+half16 __ovld __cnfn convert_half16_rtp(ushort16);
+half16 __ovld __cnfn convert_half16_rtp(int16);
+half16 __ovld __cnfn convert_half16_rtp(uint16);
+half16 __ovld __cnfn convert_half16_rtp(long16);
+half16 __ovld __cnfn convert_half16_rtp(ulong16);
+half16 __ovld __cnfn convert_half16_rtp(float16);
+half16 __ovld __cnfn convert_half16_rtp(half16);
+half16 __ovld __cnfn convert_half16_rtn(char16);
+half16 __ovld __cnfn convert_half16_rtn(uchar16);
+half16 __ovld __cnfn convert_half16_rtn(short16);
+half16 __ovld __cnfn convert_half16_rtn(ushort16);
+half16 __ovld __cnfn convert_half16_rtn(int16);
+half16 __ovld __cnfn convert_half16_rtn(uint16);
+half16 __ovld __cnfn convert_half16_rtn(long16);
+half16 __ovld __cnfn convert_half16_rtn(ulong16);
+half16 __ovld __cnfn convert_half16_rtn(float16);
+half16 __ovld __cnfn convert_half16_rtn(half16);
+half16 __ovld __cnfn convert_half16_rtz(char16);
+half16 __ovld __cnfn convert_half16_rtz(uchar16);
+half16 __ovld __cnfn convert_half16_rtz(short16);
+half16 __ovld __cnfn convert_half16_rtz(ushort16);
+half16 __ovld __cnfn convert_half16_rtz(int16);
+half16 __ovld __cnfn convert_half16_rtz(uint16);
+half16 __ovld __cnfn convert_half16_rtz(long16);
+half16 __ovld __cnfn convert_half16_rtz(ulong16);
+half16 __ovld __cnfn convert_half16_rtz(float16);
+half16 __ovld __cnfn convert_half16_rtz(half16);
+
+// Convert half types to double types.
+#ifdef cl_khr_fp64
+double __ovld __cnfn convert_double(half);
+double __ovld __cnfn convert_double_rte(half);
+double __ovld __cnfn convert_double_rtp(half);
+double __ovld __cnfn convert_double_rtn(half);
+double __ovld __cnfn convert_double_rtz(half);
+double2 __ovld __cnfn convert_double2(half2);
+double2 __ovld __cnfn convert_double2_rte(half2);
+double2 __ovld __cnfn convert_double2_rtp(half2);
+double2 __ovld __cnfn convert_double2_rtn(half2);
+double2 __ovld __cnfn convert_double2_rtz(half2);
+double3 __ovld __cnfn convert_double3(half3);
+double3 __ovld __cnfn convert_double3_rte(half3);
+double3 __ovld __cnfn convert_double3_rtp(half3);
+double3 __ovld __cnfn convert_double3_rtn(half3);
+double3 __ovld __cnfn convert_double3_rtz(half3);
+double4 __ovld __cnfn convert_double4(half4);
+double4 __ovld __cnfn convert_double4_rte(half4);
+double4 __ovld __cnfn convert_double4_rtp(half4);
+double4 __ovld __cnfn convert_double4_rtn(half4);
+double4 __ovld __cnfn convert_double4_rtz(half4);
+double8 __ovld __cnfn convert_double8(half8);
+double8 __ovld __cnfn convert_double8_rte(half8);
+double8 __ovld __cnfn convert_double8_rtp(half8);
+double8 __ovld __cnfn convert_double8_rtn(half8);
+double8 __ovld __cnfn convert_double8_rtz(half8);
+double16 __ovld __cnfn convert_double16(half16);
+double16 __ovld __cnfn convert_double16_rte(half16);
+double16 __ovld __cnfn convert_double16_rtp(half16);
+double16 __ovld __cnfn convert_double16_rtn(half16);
+double16 __ovld __cnfn convert_double16_rtz(half16);
+
+// Convert double types to half types.
+half __ovld __cnfn convert_half(double);
+half __ovld __cnfn convert_half_rte(double);
+half __ovld __cnfn convert_half_rtp(double);
+half __ovld __cnfn convert_half_rtn(double);
+half __ovld __cnfn convert_half_rtz(double);
+half2 __ovld __cnfn convert_half2(double2);
+half2 __ovld __cnfn convert_half2_rte(double2);
+half2 __ovld __cnfn convert_half2_rtp(double2);
+half2 __ovld __cnfn convert_half2_rtn(double2);
+half2 __ovld __cnfn convert_half2_rtz(double2);
+half3 __ovld __cnfn convert_half3(double3);
+half3 __ovld __cnfn convert_half3_rte(double3);
+half3 __ovld __cnfn convert_half3_rtp(double3);
+half3 __ovld __cnfn convert_half3_rtn(double3);
+half3 __ovld __cnfn convert_half3_rtz(double3);
+half4 __ovld __cnfn convert_half4(double4);
+half4 __ovld __cnfn convert_half4_rte(double4);
+half4 __ovld __cnfn convert_half4_rtp(double4);
+half4 __ovld __cnfn convert_half4_rtn(double4);
+half4 __ovld __cnfn convert_half4_rtz(double4);
+half8 __ovld __cnfn convert_half8(double8);
+half8 __ovld __cnfn convert_half8_rte(double8);
+half8 __ovld __cnfn convert_half8_rtp(double8);
+half8 __ovld __cnfn convert_half8_rtn(double8);
+half8 __ovld __cnfn convert_half8_rtz(double8);
+half16 __ovld __cnfn convert_half16(double16);
+half16 __ovld __cnfn convert_half16_rte(double16);
+half16 __ovld __cnfn convert_half16_rtp(double16);
+half16 __ovld __cnfn convert_half16_rtn(double16);
+half16 __ovld __cnfn convert_half16_rtz(double16);
+#endif //cl_khr_fp64
+
+#endif // cl_khr_fp16
+
+// OpenCL v1.1 s6.11.1, v1.2 s6.12.1, v2.0 s6.13.1 - Work-item Functions
+
+/**
+ * Returns the number of dimensions in use. This is the
+ * value given to the work_dim argument specified in
+ * clEnqueueNDRangeKernel.
+ * For clEnqueueTask, this returns 1.
+ */
+uint __ovld __cnfn get_work_dim(void);
+
+/**
+ * Returns the number of global work-items specified for
+ * dimension identified by dimindx. This value is given by
+ * the global_work_size argument to
+ * clEnqueueNDRangeKernel. Valid values of dimindx
+ * are 0 to get_work_dim() - 1. For other values of
+ * dimindx, get_global_size() returns 1.
+ * For clEnqueueTask, this always returns 1.
+ */
+size_t __ovld __cnfn get_global_size(uint);
+
+/**
+ * Returns the unique global work-item ID value for
+ * dimension identified by dimindx. The global work-item
+ * ID specifies the work-item ID based on the number of
+ * global work-items specified to execute the kernel. Valid
+ * values of dimindx are 0 to get_work_dim() - 1. For
+ * other values of dimindx, get_global_id() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_global_id(uint);
+
+/**
+ * Returns the number of local work-items specified in
+ * dimension identified by dimindx. This value is given by
+ * the local_work_size argument to
+ * clEnqueueNDRangeKernel if local_work_size is not
+ * NULL; otherwise the OpenCL implementation chooses
+ * an appropriate local_work_size value which is returned
+ * by this function. Valid values of dimindx are 0 to
+ * get_work_dim() - 1. For other values of dimindx,
+ * get_local_size() returns 1.
+ * For clEnqueueTask, this always returns 1.
+ */
+size_t __ovld __cnfn get_local_size(uint);
+
+/**
+ * Returns the unique local work-item ID i.e. a work-item
+ * within a specific work-group for dimension identified by
+ * dimindx. Valid values of dimindx are 0 to
+ * get_work_dim() - 1. For other values of dimindx,
+ * get_local_id() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_local_id(uint);
+
+/**
+ * Returns the number of work-groups that will execute a
+ * kernel for dimension identified by dimindx.
+ * Valid values of dimindx are 0 to get_work_dim() - 1.
+ * For other values of dimindx, get_num_groups() returns 1.
+ * For clEnqueueTask, this always returns 1.
+ */
+size_t __ovld __cnfn get_num_groups(uint);
+
+/**
+ * get_group_id returns the work-group ID which is a
+ * number from 0 .. get_num_groups(dimindx) - 1.
+ * Valid values of dimindx are 0 to get_work_dim() - 1.
+ * For other values, get_group_id() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_group_id(uint);
+
+/**
+ * get_global_offset returns the offset values specified in
+ * global_work_offset argument to
+ * clEnqueueNDRangeKernel.
+ * Valid values of dimindx are 0 to get_work_dim() - 1.
+ * For other values, get_global_offset() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_global_offset(uint);
+
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+size_t __ovld get_enqueued_local_size(uint);
+size_t __ovld get_global_linear_id(void);
+size_t __ovld get_local_linear_id(void);
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+// OpenCL v1.1 s6.11.2, v1.2 s6.12.2, v2.0 s6.13.2 - Math functions
+
+/**
+ * Arc cosine function.
+ */
+float __ovld __cnfn acos(float);
+float2 __ovld __cnfn acos(float2);
+float3 __ovld __cnfn acos(float3);
+float4 __ovld __cnfn acos(float4);
+float8 __ovld __cnfn acos(float8);
+float16 __ovld __cnfn acos(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn acos(double);
+double2 __ovld __cnfn acos(double2);
+double3 __ovld __cnfn acos(double3);
+double4 __ovld __cnfn acos(double4);
+double8 __ovld __cnfn acos(double8);
+double16 __ovld __cnfn acos(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn acos(half);
+half2 __ovld __cnfn acos(half2);
+half3 __ovld __cnfn acos(half3);
+half4 __ovld __cnfn acos(half4);
+half8 __ovld __cnfn acos(half8);
+half16 __ovld __cnfn acos(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Inverse hyperbolic cosine.
+ */
+float __ovld __cnfn acosh(float);
+float2 __ovld __cnfn acosh(float2);
+float3 __ovld __cnfn acosh(float3);
+float4 __ovld __cnfn acosh(float4);
+float8 __ovld __cnfn acosh(float8);
+float16 __ovld __cnfn acosh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn acosh(double);
+double2 __ovld __cnfn acosh(double2);
+double3 __ovld __cnfn acosh(double3);
+double4 __ovld __cnfn acosh(double4);
+double8 __ovld __cnfn acosh(double8);
+double16 __ovld __cnfn acosh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn acosh(half);
+half2 __ovld __cnfn acosh(half2);
+half3 __ovld __cnfn acosh(half3);
+half4 __ovld __cnfn acosh(half4);
+half8 __ovld __cnfn acosh(half8);
+half16 __ovld __cnfn acosh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute acos (x) / PI.
+ */
+float __ovld __cnfn acospi(float);
+float2 __ovld __cnfn acospi(float2);
+float3 __ovld __cnfn acospi(float3);
+float4 __ovld __cnfn acospi(float4);
+float8 __ovld __cnfn acospi(float8);
+float16 __ovld __cnfn acospi(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn acospi(double);
+double2 __ovld __cnfn acospi(double2);
+double3 __ovld __cnfn acospi(double3);
+double4 __ovld __cnfn acospi(double4);
+double8 __ovld __cnfn acospi(double8);
+double16 __ovld __cnfn acospi(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn acospi(half);
+half2 __ovld __cnfn acospi(half2);
+half3 __ovld __cnfn acospi(half3);
+half4 __ovld __cnfn acospi(half4);
+half8 __ovld __cnfn acospi(half8);
+half16 __ovld __cnfn acospi(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Arc sine function.
+ */
+float __ovld __cnfn asin(float);
+float2 __ovld __cnfn asin(float2);
+float3 __ovld __cnfn asin(float3);
+float4 __ovld __cnfn asin(float4);
+float8 __ovld __cnfn asin(float8);
+float16 __ovld __cnfn asin(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn asin(double);
+double2 __ovld __cnfn asin(double2);
+double3 __ovld __cnfn asin(double3);
+double4 __ovld __cnfn asin(double4);
+double8 __ovld __cnfn asin(double8);
+double16 __ovld __cnfn asin(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn asin(half);
+half2 __ovld __cnfn asin(half2);
+half3 __ovld __cnfn asin(half3);
+half4 __ovld __cnfn asin(half4);
+half8 __ovld __cnfn asin(half8);
+half16 __ovld __cnfn asin(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Inverse hyperbolic sine.
+ */
+float __ovld __cnfn asinh(float);
+float2 __ovld __cnfn asinh(float2);
+float3 __ovld __cnfn asinh(float3);
+float4 __ovld __cnfn asinh(float4);
+float8 __ovld __cnfn asinh(float8);
+float16 __ovld __cnfn asinh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn asinh(double);
+double2 __ovld __cnfn asinh(double2);
+double3 __ovld __cnfn asinh(double3);
+double4 __ovld __cnfn asinh(double4);
+double8 __ovld __cnfn asinh(double8);
+double16 __ovld __cnfn asinh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn asinh(half);
+half2 __ovld __cnfn asinh(half2);
+half3 __ovld __cnfn asinh(half3);
+half4 __ovld __cnfn asinh(half4);
+half8 __ovld __cnfn asinh(half8);
+half16 __ovld __cnfn asinh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute asin (x) / PI.
+ */
+float __ovld __cnfn asinpi(float);
+float2 __ovld __cnfn asinpi(float2);
+float3 __ovld __cnfn asinpi(float3);
+float4 __ovld __cnfn asinpi(float4);
+float8 __ovld __cnfn asinpi(float8);
+float16 __ovld __cnfn asinpi(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn asinpi(double);
+double2 __ovld __cnfn asinpi(double2);
+double3 __ovld __cnfn asinpi(double3);
+double4 __ovld __cnfn asinpi(double4);
+double8 __ovld __cnfn asinpi(double8);
+double16 __ovld __cnfn asinpi(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn asinpi(half);
+half2 __ovld __cnfn asinpi(half2);
+half3 __ovld __cnfn asinpi(half3);
+half4 __ovld __cnfn asinpi(half4);
+half8 __ovld __cnfn asinpi(half8);
+half16 __ovld __cnfn asinpi(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Arc tangent function.
+ */
+float __ovld __cnfn atan(float);
+float2 __ovld __cnfn atan(float2);
+float3 __ovld __cnfn atan(float3);
+float4 __ovld __cnfn atan(float4);
+float8 __ovld __cnfn atan(float8);
+float16 __ovld __cnfn atan(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atan(double);
+double2 __ovld __cnfn atan(double2);
+double3 __ovld __cnfn atan(double3);
+double4 __ovld __cnfn atan(double4);
+double8 __ovld __cnfn atan(double8);
+double16 __ovld __cnfn atan(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atan(half);
+half2 __ovld __cnfn atan(half2);
+half3 __ovld __cnfn atan(half3);
+half4 __ovld __cnfn atan(half4);
+half8 __ovld __cnfn atan(half8);
+half16 __ovld __cnfn atan(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Arc tangent of y / x.
+ */
+float __ovld __cnfn atan2(float, float);
+float2 __ovld __cnfn atan2(float2, float2);
+float3 __ovld __cnfn atan2(float3, float3);
+float4 __ovld __cnfn atan2(float4, float4);
+float8 __ovld __cnfn atan2(float8, float8);
+float16 __ovld __cnfn atan2(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atan2(double, double);
+double2 __ovld __cnfn atan2(double2, double2);
+double3 __ovld __cnfn atan2(double3, double3);
+double4 __ovld __cnfn atan2(double4, double4);
+double8 __ovld __cnfn atan2(double8, double8);
+double16 __ovld __cnfn atan2(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atan2(half, half);
+half2 __ovld __cnfn atan2(half2, half2);
+half3 __ovld __cnfn atan2(half3, half3);
+half4 __ovld __cnfn atan2(half4, half4);
+half8 __ovld __cnfn atan2(half8, half8);
+half16 __ovld __cnfn atan2(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Hyperbolic arc tangent.
+ */
+float __ovld __cnfn atanh(float);
+float2 __ovld __cnfn atanh(float2);
+float3 __ovld __cnfn atanh(float3);
+float4 __ovld __cnfn atanh(float4);
+float8 __ovld __cnfn atanh(float8);
+float16 __ovld __cnfn atanh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atanh(double);
+double2 __ovld __cnfn atanh(double2);
+double3 __ovld __cnfn atanh(double3);
+double4 __ovld __cnfn atanh(double4);
+double8 __ovld __cnfn atanh(double8);
+double16 __ovld __cnfn atanh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atanh(half);
+half2 __ovld __cnfn atanh(half2);
+half3 __ovld __cnfn atanh(half3);
+half4 __ovld __cnfn atanh(half4);
+half8 __ovld __cnfn atanh(half8);
+half16 __ovld __cnfn atanh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute atan (x) / PI.
+ */
+float __ovld __cnfn atanpi(float);
+float2 __ovld __cnfn atanpi(float2);
+float3 __ovld __cnfn atanpi(float3);
+float4 __ovld __cnfn atanpi(float4);
+float8 __ovld __cnfn atanpi(float8);
+float16 __ovld __cnfn atanpi(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atanpi(double);
+double2 __ovld __cnfn atanpi(double2);
+double3 __ovld __cnfn atanpi(double3);
+double4 __ovld __cnfn atanpi(double4);
+double8 __ovld __cnfn atanpi(double8);
+double16 __ovld __cnfn atanpi(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atanpi(half);
+half2 __ovld __cnfn atanpi(half2);
+half3 __ovld __cnfn atanpi(half3);
+half4 __ovld __cnfn atanpi(half4);
+half8 __ovld __cnfn atanpi(half8);
+half16 __ovld __cnfn atanpi(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute atan2 (y, x) / PI.
+ */
+float __ovld __cnfn atan2pi(float, float);
+float2 __ovld __cnfn atan2pi(float2, float2);
+float3 __ovld __cnfn atan2pi(float3, float3);
+float4 __ovld __cnfn atan2pi(float4, float4);
+float8 __ovld __cnfn atan2pi(float8, float8);
+float16 __ovld __cnfn atan2pi(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atan2pi(double, double);
+double2 __ovld __cnfn atan2pi(double2, double2);
+double3 __ovld __cnfn atan2pi(double3, double3);
+double4 __ovld __cnfn atan2pi(double4, double4);
+double8 __ovld __cnfn atan2pi(double8, double8);
+double16 __ovld __cnfn atan2pi(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atan2pi(half, half);
+half2 __ovld __cnfn atan2pi(half2, half2);
+half3 __ovld __cnfn atan2pi(half3, half3);
+half4 __ovld __cnfn atan2pi(half4, half4);
+half8 __ovld __cnfn atan2pi(half8, half8);
+half16 __ovld __cnfn atan2pi(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cube-root.
+ */
+float __ovld __cnfn cbrt(float);
+float2 __ovld __cnfn cbrt(float2);
+float3 __ovld __cnfn cbrt(float3);
+float4 __ovld __cnfn cbrt(float4);
+float8 __ovld __cnfn cbrt(float8);
+float16 __ovld __cnfn cbrt(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cbrt(double);
+double2 __ovld __cnfn cbrt(double2);
+double3 __ovld __cnfn cbrt(double3);
+double4 __ovld __cnfn cbrt(double4);
+double8 __ovld __cnfn cbrt(double8);
+double16 __ovld __cnfn cbrt(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cbrt(half);
+half2 __ovld __cnfn cbrt(half2);
+half3 __ovld __cnfn cbrt(half3);
+half4 __ovld __cnfn cbrt(half4);
+half8 __ovld __cnfn cbrt(half8);
+half16 __ovld __cnfn cbrt(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Round to integral value using the round to positive
+ * infinity rounding mode.
+ */
+float __ovld __cnfn ceil(float);
+float2 __ovld __cnfn ceil(float2);
+float3 __ovld __cnfn ceil(float3);
+float4 __ovld __cnfn ceil(float4);
+float8 __ovld __cnfn ceil(float8);
+float16 __ovld __cnfn ceil(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn ceil(double);
+double2 __ovld __cnfn ceil(double2);
+double3 __ovld __cnfn ceil(double3);
+double4 __ovld __cnfn ceil(double4);
+double8 __ovld __cnfn ceil(double8);
+double16 __ovld __cnfn ceil(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn ceil(half);
+half2 __ovld __cnfn ceil(half2);
+half3 __ovld __cnfn ceil(half3);
+half4 __ovld __cnfn ceil(half4);
+half8 __ovld __cnfn ceil(half8);
+half16 __ovld __cnfn ceil(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns x with its sign changed to match the sign of y.
+ */
+float __ovld __cnfn copysign(float, float);
+float2 __ovld __cnfn copysign(float2, float2);
+float3 __ovld __cnfn copysign(float3, float3);
+float4 __ovld __cnfn copysign(float4, float4);
+float8 __ovld __cnfn copysign(float8, float8);
+float16 __ovld __cnfn copysign(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn copysign(double, double);
+double2 __ovld __cnfn copysign(double2, double2);
+double3 __ovld __cnfn copysign(double3, double3);
+double4 __ovld __cnfn copysign(double4, double4);
+double8 __ovld __cnfn copysign(double8, double8);
+double16 __ovld __cnfn copysign(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn copysign(half, half);
+half2 __ovld __cnfn copysign(half2, half2);
+half3 __ovld __cnfn copysign(half3, half3);
+half4 __ovld __cnfn copysign(half4, half4);
+half8 __ovld __cnfn copysign(half8, half8);
+half16 __ovld __cnfn copysign(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cosine.
+ */
+float __ovld __cnfn cos(float);
+float2 __ovld __cnfn cos(float2);
+float3 __ovld __cnfn cos(float3);
+float4 __ovld __cnfn cos(float4);
+float8 __ovld __cnfn cos(float8);
+float16 __ovld __cnfn cos(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cos(double);
+double2 __ovld __cnfn cos(double2);
+double3 __ovld __cnfn cos(double3);
+double4 __ovld __cnfn cos(double4);
+double8 __ovld __cnfn cos(double8);
+double16 __ovld __cnfn cos(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cos(half);
+half2 __ovld __cnfn cos(half2);
+half3 __ovld __cnfn cos(half3);
+half4 __ovld __cnfn cos(half4);
+half8 __ovld __cnfn cos(half8);
+half16 __ovld __cnfn cos(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute hyperbolic cosine.
+ */
+float __ovld __cnfn cosh(float);
+float2 __ovld __cnfn cosh(float2);
+float3 __ovld __cnfn cosh(float3);
+float4 __ovld __cnfn cosh(float4);
+float8 __ovld __cnfn cosh(float8);
+float16 __ovld __cnfn cosh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cosh(double);
+double2 __ovld __cnfn cosh(double2);
+double3 __ovld __cnfn cosh(double3);
+double4 __ovld __cnfn cosh(double4);
+double8 __ovld __cnfn cosh(double8);
+double16 __ovld __cnfn cosh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cosh(half);
+half2 __ovld __cnfn cosh(half2);
+half3 __ovld __cnfn cosh(half3);
+half4 __ovld __cnfn cosh(half4);
+half8 __ovld __cnfn cosh(half8);
+half16 __ovld __cnfn cosh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cos (PI * x).
+ */
+float __ovld __cnfn cospi(float);
+float2 __ovld __cnfn cospi(float2);
+float3 __ovld __cnfn cospi(float3);
+float4 __ovld __cnfn cospi(float4);
+float8 __ovld __cnfn cospi(float8);
+float16 __ovld __cnfn cospi(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cospi(double);
+double2 __ovld __cnfn cospi(double2);
+double3 __ovld __cnfn cospi(double3);
+double4 __ovld __cnfn cospi(double4);
+double8 __ovld __cnfn cospi(double8);
+double16 __ovld __cnfn cospi(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cospi(half);
+half2 __ovld __cnfn cospi(half2);
+half3 __ovld __cnfn cospi(half3);
+half4 __ovld __cnfn cospi(half4);
+half8 __ovld __cnfn cospi(half8);
+half16 __ovld __cnfn cospi(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Complementary error function.
+ */
+float __ovld __cnfn erfc(float);
+float2 __ovld __cnfn erfc(float2);
+float3 __ovld __cnfn erfc(float3);
+float4 __ovld __cnfn erfc(float4);
+float8 __ovld __cnfn erfc(float8);
+float16 __ovld __cnfn erfc(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn erfc(double);
+double2 __ovld __cnfn erfc(double2);
+double3 __ovld __cnfn erfc(double3);
+double4 __ovld __cnfn erfc(double4);
+double8 __ovld __cnfn erfc(double8);
+double16 __ovld __cnfn erfc(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn erfc(half);
+half2 __ovld __cnfn erfc(half2);
+half3 __ovld __cnfn erfc(half3);
+half4 __ovld __cnfn erfc(half4);
+half8 __ovld __cnfn erfc(half8);
+half16 __ovld __cnfn erfc(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Error function encountered in integrating the
+ * normal distribution.
+ */
+float __ovld __cnfn erf(float);
+float2 __ovld __cnfn erf(float2);
+float3 __ovld __cnfn erf(float3);
+float4 __ovld __cnfn erf(float4);
+float8 __ovld __cnfn erf(float8);
+float16 __ovld __cnfn erf(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn erf(double);
+double2 __ovld __cnfn erf(double2);
+double3 __ovld __cnfn erf(double3);
+double4 __ovld __cnfn erf(double4);
+double8 __ovld __cnfn erf(double8);
+double16 __ovld __cnfn erf(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn erf(half);
+half2 __ovld __cnfn erf(half2);
+half3 __ovld __cnfn erf(half3);
+half4 __ovld __cnfn erf(half4);
+half8 __ovld __cnfn erf(half8);
+half16 __ovld __cnfn erf(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the base e exponential function of x.
+ */
+float __ovld __cnfn exp(float);
+float2 __ovld __cnfn exp(float2);
+float3 __ovld __cnfn exp(float3);
+float4 __ovld __cnfn exp(float4);
+float8 __ovld __cnfn exp(float8);
+float16 __ovld __cnfn exp(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn exp(double);
+double2 __ovld __cnfn exp(double2);
+double3 __ovld __cnfn exp(double3);
+double4 __ovld __cnfn exp(double4);
+double8 __ovld __cnfn exp(double8);
+double16 __ovld __cnfn exp(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn exp(half);
+half2 __ovld __cnfn exp(half2);
+half3 __ovld __cnfn exp(half3);
+half4 __ovld __cnfn exp(half4);
+half8 __ovld __cnfn exp(half8);
+half16 __ovld __cnfn exp(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Exponential base 2 function.
+ */
+float __ovld __cnfn exp2(float);
+float2 __ovld __cnfn exp2(float2);
+float3 __ovld __cnfn exp2(float3);
+float4 __ovld __cnfn exp2(float4);
+float8 __ovld __cnfn exp2(float8);
+float16 __ovld __cnfn exp2(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn exp2(double);
+double2 __ovld __cnfn exp2(double2);
+double3 __ovld __cnfn exp2(double3);
+double4 __ovld __cnfn exp2(double4);
+double8 __ovld __cnfn exp2(double8);
+double16 __ovld __cnfn exp2(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn exp2(half);
+half2 __ovld __cnfn exp2(half2);
+half3 __ovld __cnfn exp2(half3);
+half4 __ovld __cnfn exp2(half4);
+half8 __ovld __cnfn exp2(half8);
+half16 __ovld __cnfn exp2(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Exponential base 10 function.
+ */
+float __ovld __cnfn exp10(float);
+float2 __ovld __cnfn exp10(float2);
+float3 __ovld __cnfn exp10(float3);
+float4 __ovld __cnfn exp10(float4);
+float8 __ovld __cnfn exp10(float8);
+float16 __ovld __cnfn exp10(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn exp10(double);
+double2 __ovld __cnfn exp10(double2);
+double3 __ovld __cnfn exp10(double3);
+double4 __ovld __cnfn exp10(double4);
+double8 __ovld __cnfn exp10(double8);
+double16 __ovld __cnfn exp10(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn exp10(half);
+half2 __ovld __cnfn exp10(half2);
+half3 __ovld __cnfn exp10(half3);
+half4 __ovld __cnfn exp10(half4);
+half8 __ovld __cnfn exp10(half8);
+half16 __ovld __cnfn exp10(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute e^x- 1.0.
+ */
+float __ovld __cnfn expm1(float);
+float2 __ovld __cnfn expm1(float2);
+float3 __ovld __cnfn expm1(float3);
+float4 __ovld __cnfn expm1(float4);
+float8 __ovld __cnfn expm1(float8);
+float16 __ovld __cnfn expm1(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn expm1(double);
+double2 __ovld __cnfn expm1(double2);
+double3 __ovld __cnfn expm1(double3);
+double4 __ovld __cnfn expm1(double4);
+double8 __ovld __cnfn expm1(double8);
+double16 __ovld __cnfn expm1(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn expm1(half);
+half2 __ovld __cnfn expm1(half2);
+half3 __ovld __cnfn expm1(half3);
+half4 __ovld __cnfn expm1(half4);
+half8 __ovld __cnfn expm1(half8);
+half16 __ovld __cnfn expm1(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute absolute value of a floating-point number.
+ */
+float __ovld __cnfn fabs(float);
+float2 __ovld __cnfn fabs(float2);
+float3 __ovld __cnfn fabs(float3);
+float4 __ovld __cnfn fabs(float4);
+float8 __ovld __cnfn fabs(float8);
+float16 __ovld __cnfn fabs(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fabs(double);
+double2 __ovld __cnfn fabs(double2);
+double3 __ovld __cnfn fabs(double3);
+double4 __ovld __cnfn fabs(double4);
+double8 __ovld __cnfn fabs(double8);
+double16 __ovld __cnfn fabs(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fabs(half);
+half2 __ovld __cnfn fabs(half2);
+half3 __ovld __cnfn fabs(half3);
+half4 __ovld __cnfn fabs(half4);
+half8 __ovld __cnfn fabs(half8);
+half16 __ovld __cnfn fabs(half16);
+#endif //cl_khr_fp16
+
+/**
+ * x - y if x > y, +0 if x is less than or equal to y.
+ */
+float __ovld __cnfn fdim(float, float);
+float2 __ovld __cnfn fdim(float2, float2);
+float3 __ovld __cnfn fdim(float3, float3);
+float4 __ovld __cnfn fdim(float4, float4);
+float8 __ovld __cnfn fdim(float8, float8);
+float16 __ovld __cnfn fdim(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fdim(double, double);
+double2 __ovld __cnfn fdim(double2, double2);
+double3 __ovld __cnfn fdim(double3, double3);
+double4 __ovld __cnfn fdim(double4, double4);
+double8 __ovld __cnfn fdim(double8, double8);
+double16 __ovld __cnfn fdim(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fdim(half, half);
+half2 __ovld __cnfn fdim(half2, half2);
+half3 __ovld __cnfn fdim(half3, half3);
+half4 __ovld __cnfn fdim(half4, half4);
+half8 __ovld __cnfn fdim(half8, half8);
+half16 __ovld __cnfn fdim(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Round to integral value using the round to -ve
+ * infinity rounding mode.
+ */
+float __ovld __cnfn floor(float);
+float2 __ovld __cnfn floor(float2);
+float3 __ovld __cnfn floor(float3);
+float4 __ovld __cnfn floor(float4);
+float8 __ovld __cnfn floor(float8);
+float16 __ovld __cnfn floor(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn floor(double);
+double2 __ovld __cnfn floor(double2);
+double3 __ovld __cnfn floor(double3);
+double4 __ovld __cnfn floor(double4);
+double8 __ovld __cnfn floor(double8);
+double16 __ovld __cnfn floor(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn floor(half);
+half2 __ovld __cnfn floor(half2);
+half3 __ovld __cnfn floor(half3);
+half4 __ovld __cnfn floor(half4);
+half8 __ovld __cnfn floor(half8);
+half16 __ovld __cnfn floor(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the correctly rounded floating-point
+ * representation of the sum of c with the infinitely
+ * precise product of a and b. Rounding of
+ * intermediate products shall not occur. Edge case
+ * behavior is per the IEEE 754-2008 standard.
+ */
+float __ovld __cnfn fma(float, float, float);
+float2 __ovld __cnfn fma(float2, float2, float2);
+float3 __ovld __cnfn fma(float3, float3, float3);
+float4 __ovld __cnfn fma(float4, float4, float4);
+float8 __ovld __cnfn fma(float8, float8, float8);
+float16 __ovld __cnfn fma(float16, float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fma(double, double, double);
+double2 __ovld __cnfn fma(double2, double2, double2);
+double3 __ovld __cnfn fma(double3, double3, double3);
+double4 __ovld __cnfn fma(double4, double4, double4);
+double8 __ovld __cnfn fma(double8, double8, double8);
+double16 __ovld __cnfn fma(double16, double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fma(half, half, half);
+half2 __ovld __cnfn fma(half2, half2, half2);
+half3 __ovld __cnfn fma(half3, half3, half3);
+half4 __ovld __cnfn fma(half4, half4, half4);
+half8 __ovld __cnfn fma(half8, half8, half8);
+half16 __ovld __cnfn fma(half16, half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if x < y, otherwise it returns x. If one
+ * argument is a NaN, fmax() returns the other
+ * argument. If both arguments are NaNs, fmax()
+ * returns a NaN.
+ */
+float __ovld __cnfn fmax(float, float);
+float2 __ovld __cnfn fmax(float2, float2);
+float3 __ovld __cnfn fmax(float3, float3);
+float4 __ovld __cnfn fmax(float4, float4);
+float8 __ovld __cnfn fmax(float8, float8);
+float16 __ovld __cnfn fmax(float16, float16);
+float2 __ovld __cnfn fmax(float2, float);
+float3 __ovld __cnfn fmax(float3, float);
+float4 __ovld __cnfn fmax(float4, float);
+float8 __ovld __cnfn fmax(float8, float);
+float16 __ovld __cnfn fmax(float16, float);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fmax(double, double);
+double2 __ovld __cnfn fmax(double2, double2);
+double3 __ovld __cnfn fmax(double3, double3);
+double4 __ovld __cnfn fmax(double4, double4);
+double8 __ovld __cnfn fmax(double8, double8);
+double16 __ovld __cnfn fmax(double16, double16);
+double2 __ovld __cnfn fmax(double2, double);
+double3 __ovld __cnfn fmax(double3, double);
+double4 __ovld __cnfn fmax(double4, double);
+double8 __ovld __cnfn fmax(double8, double);
+double16 __ovld __cnfn fmax(double16, double);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fmax(half, half);
+half2 __ovld __cnfn fmax(half2, half2);
+half3 __ovld __cnfn fmax(half3, half3);
+half4 __ovld __cnfn fmax(half4, half4);
+half8 __ovld __cnfn fmax(half8, half8);
+half16 __ovld __cnfn fmax(half16, half16);
+half2 __ovld __cnfn fmax(half2, half);
+half3 __ovld __cnfn fmax(half3, half);
+half4 __ovld __cnfn fmax(half4, half);
+half8 __ovld __cnfn fmax(half8, half);
+half16 __ovld __cnfn fmax(half16, half);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if y < x, otherwise it returns x. If one
+ * argument is a NaN, fmin() returns the other
+ * argument. If both arguments are NaNs, fmin()
+ * returns a NaN.
+ */
+float __ovld __cnfn fmin(float, float);
+float2 __ovld __cnfn fmin(float2, float2);
+float3 __ovld __cnfn fmin(float3, float3);
+float4 __ovld __cnfn fmin(float4, float4);
+float8 __ovld __cnfn fmin(float8, float8);
+float16 __ovld __cnfn fmin(float16, float16);
+float2 __ovld __cnfn fmin(float2, float);
+float3 __ovld __cnfn fmin(float3, float);
+float4 __ovld __cnfn fmin(float4, float);
+float8 __ovld __cnfn fmin(float8, float);
+float16 __ovld __cnfn fmin(float16, float);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fmin(double, double);
+double2 __ovld __cnfn fmin(double2, double2);
+double3 __ovld __cnfn fmin(double3, double3);
+double4 __ovld __cnfn fmin(double4, double4);
+double8 __ovld __cnfn fmin(double8, double8);
+double16 __ovld __cnfn fmin(double16, double16);
+double2 __ovld __cnfn fmin(double2, double);
+double3 __ovld __cnfn fmin(double3, double);
+double4 __ovld __cnfn fmin(double4, double);
+double8 __ovld __cnfn fmin(double8, double);
+double16 __ovld __cnfn fmin(double16, double);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fmin(half, half);
+half2 __ovld __cnfn fmin(half2, half2);
+half3 __ovld __cnfn fmin(half3, half3);
+half4 __ovld __cnfn fmin(half4, half4);
+half8 __ovld __cnfn fmin(half8, half8);
+half16 __ovld __cnfn fmin(half16, half16);
+half2 __ovld __cnfn fmin(half2, half);
+half3 __ovld __cnfn fmin(half3, half);
+half4 __ovld __cnfn fmin(half4, half);
+half8 __ovld __cnfn fmin(half8, half);
+half16 __ovld __cnfn fmin(half16, half);
+#endif //cl_khr_fp16
+
+/**
+ * Modulus. Returns x - y * trunc (x/y).
+ */
+float __ovld __cnfn fmod(float, float);
+float2 __ovld __cnfn fmod(float2, float2);
+float3 __ovld __cnfn fmod(float3, float3);
+float4 __ovld __cnfn fmod(float4, float4);
+float8 __ovld __cnfn fmod(float8, float8);
+float16 __ovld __cnfn fmod(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fmod(double, double);
+double2 __ovld __cnfn fmod(double2, double2);
+double3 __ovld __cnfn fmod(double3, double3);
+double4 __ovld __cnfn fmod(double4, double4);
+double8 __ovld __cnfn fmod(double8, double8);
+double16 __ovld __cnfn fmod(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fmod(half, half);
+half2 __ovld __cnfn fmod(half2, half2);
+half3 __ovld __cnfn fmod(half3, half3);
+half4 __ovld __cnfn fmod(half4, half4);
+half8 __ovld __cnfn fmod(half8, half8);
+half16 __ovld __cnfn fmod(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns fmin(x - floor (x), 0x1.fffffep-1f ).
+ * floor(x) is returned in iptr.
+ */
+#if defined(__opencl_c_generic_address_space)
+float __ovld fract(float, float *);
+float2 __ovld fract(float2, float2 *);
+float3 __ovld fract(float3, float3 *);
+float4 __ovld fract(float4, float4 *);
+float8 __ovld fract(float8, float8 *);
+float16 __ovld fract(float16, float16 *);
+#ifdef cl_khr_fp64
+double __ovld fract(double, double *);
+double2 __ovld fract(double2, double2 *);
+double3 __ovld fract(double3, double3 *);
+double4 __ovld fract(double4, double4 *);
+double8 __ovld fract(double8, double8 *);
+double16 __ovld fract(double16, double16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld fract(half, half *);
+half2 __ovld fract(half2, half2 *);
+half3 __ovld fract(half3, half3 *);
+half4 __ovld fract(half4, half4 *);
+half8 __ovld fract(half8, half8 *);
+half16 __ovld fract(half16, half16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld fract(float, __global float *);
+float2 __ovld fract(float2, __global float2 *);
+float3 __ovld fract(float3, __global float3 *);
+float4 __ovld fract(float4, __global float4 *);
+float8 __ovld fract(float8, __global float8 *);
+float16 __ovld fract(float16, __global float16 *);
+float __ovld fract(float, __local float *);
+float2 __ovld fract(float2, __local float2 *);
+float3 __ovld fract(float3, __local float3 *);
+float4 __ovld fract(float4, __local float4 *);
+float8 __ovld fract(float8, __local float8 *);
+float16 __ovld fract(float16, __local float16 *);
+float __ovld fract(float, __private float *);
+float2 __ovld fract(float2, __private float2 *);
+float3 __ovld fract(float3, __private float3 *);
+float4 __ovld fract(float4, __private float4 *);
+float8 __ovld fract(float8, __private float8 *);
+float16 __ovld fract(float16, __private float16 *);
+#ifdef cl_khr_fp64
+double __ovld fract(double, __global double *);
+double2 __ovld fract(double2, __global double2 *);
+double3 __ovld fract(double3, __global double3 *);
+double4 __ovld fract(double4, __global double4 *);
+double8 __ovld fract(double8, __global double8 *);
+double16 __ovld fract(double16, __global double16 *);
+double __ovld fract(double, __local double *);
+double2 __ovld fract(double2, __local double2 *);
+double3 __ovld fract(double3, __local double3 *);
+double4 __ovld fract(double4, __local double4 *);
+double8 __ovld fract(double8, __local double8 *);
+double16 __ovld fract(double16, __local double16 *);
+double __ovld fract(double, __private double *);
+double2 __ovld fract(double2, __private double2 *);
+double3 __ovld fract(double3, __private double3 *);
+double4 __ovld fract(double4, __private double4 *);
+double8 __ovld fract(double8, __private double8 *);
+double16 __ovld fract(double16, __private double16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld fract(half, __global half *);
+half2 __ovld fract(half2, __global half2 *);
+half3 __ovld fract(half3, __global half3 *);
+half4 __ovld fract(half4, __global half4 *);
+half8 __ovld fract(half8, __global half8 *);
+half16 __ovld fract(half16, __global half16 *);
+half __ovld fract(half, __local half *);
+half2 __ovld fract(half2, __local half2 *);
+half3 __ovld fract(half3, __local half3 *);
+half4 __ovld fract(half4, __local half4 *);
+half8 __ovld fract(half8, __local half8 *);
+half16 __ovld fract(half16, __local half16 *);
+half __ovld fract(half, __private half *);
+half2 __ovld fract(half2, __private half2 *);
+half3 __ovld fract(half3, __private half3 *);
+half4 __ovld fract(half4, __private half4 *);
+half8 __ovld fract(half8, __private half8 *);
+half16 __ovld fract(half16, __private half16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * Extract mantissa and exponent from x. For each
+ * component the mantissa returned is a float with
+ * magnitude in the interval [1/2, 1) or 0. Each
+ * component of x equals mantissa returned * 2^exp.
+ */
+#if defined(__opencl_c_generic_address_space)
+float __ovld frexp(float, int *);
+float2 __ovld frexp(float2, int2 *);
+float3 __ovld frexp(float3, int3 *);
+float4 __ovld frexp(float4, int4 *);
+float8 __ovld frexp(float8, int8 *);
+float16 __ovld frexp(float16, int16 *);
+#ifdef cl_khr_fp64
+double __ovld frexp(double, int *);
+double2 __ovld frexp(double2, int2 *);
+double3 __ovld frexp(double3, int3 *);
+double4 __ovld frexp(double4, int4 *);
+double8 __ovld frexp(double8, int8 *);
+double16 __ovld frexp(double16, int16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld frexp(half, int *);
+half2 __ovld frexp(half2, int2 *);
+half3 __ovld frexp(half3, int3 *);
+half4 __ovld frexp(half4, int4 *);
+half8 __ovld frexp(half8, int8 *);
+half16 __ovld frexp(half16, int16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld frexp(float, __global int *);
+float2 __ovld frexp(float2, __global int2 *);
+float3 __ovld frexp(float3, __global int3 *);
+float4 __ovld frexp(float4, __global int4 *);
+float8 __ovld frexp(float8, __global int8 *);
+float16 __ovld frexp(float16, __global int16 *);
+float __ovld frexp(float, __local int *);
+float2 __ovld frexp(float2, __local int2 *);
+float3 __ovld frexp(float3, __local int3 *);
+float4 __ovld frexp(float4, __local int4 *);
+float8 __ovld frexp(float8, __local int8 *);
+float16 __ovld frexp(float16, __local int16 *);
+float __ovld frexp(float, __private int *);
+float2 __ovld frexp(float2, __private int2 *);
+float3 __ovld frexp(float3, __private int3 *);
+float4 __ovld frexp(float4, __private int4 *);
+float8 __ovld frexp(float8, __private int8 *);
+float16 __ovld frexp(float16, __private int16 *);
+#ifdef cl_khr_fp64
+double __ovld frexp(double, __global int *);
+double2 __ovld frexp(double2, __global int2 *);
+double3 __ovld frexp(double3, __global int3 *);
+double4 __ovld frexp(double4, __global int4 *);
+double8 __ovld frexp(double8, __global int8 *);
+double16 __ovld frexp(double16, __global int16 *);
+double __ovld frexp(double, __local int *);
+double2 __ovld frexp(double2, __local int2 *);
+double3 __ovld frexp(double3, __local int3 *);
+double4 __ovld frexp(double4, __local int4 *);
+double8 __ovld frexp(double8, __local int8 *);
+double16 __ovld frexp(double16, __local int16 *);
+double __ovld frexp(double, __private int *);
+double2 __ovld frexp(double2, __private int2 *);
+double3 __ovld frexp(double3, __private int3 *);
+double4 __ovld frexp(double4, __private int4 *);
+double8 __ovld frexp(double8, __private int8 *);
+double16 __ovld frexp(double16, __private int16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld frexp(half, __global int *);
+half2 __ovld frexp(half2, __global int2 *);
+half3 __ovld frexp(half3, __global int3 *);
+half4 __ovld frexp(half4, __global int4 *);
+half8 __ovld frexp(half8, __global int8 *);
+half16 __ovld frexp(half16, __global int16 *);
+half __ovld frexp(half, __local int *);
+half2 __ovld frexp(half2, __local int2 *);
+half3 __ovld frexp(half3, __local int3 *);
+half4 __ovld frexp(half4, __local int4 *);
+half8 __ovld frexp(half8, __local int8 *);
+half16 __ovld frexp(half16, __local int16 *);
+half __ovld frexp(half, __private int *);
+half2 __ovld frexp(half2, __private int2 *);
+half3 __ovld frexp(half3, __private int3 *);
+half4 __ovld frexp(half4, __private int4 *);
+half8 __ovld frexp(half8, __private int8 *);
+half16 __ovld frexp(half16, __private int16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * Compute the value of the square root of x^2 + y^2
+ * without undue overflow or underflow.
+ */
+float __ovld __cnfn hypot(float, float);
+float2 __ovld __cnfn hypot(float2, float2);
+float3 __ovld __cnfn hypot(float3, float3);
+float4 __ovld __cnfn hypot(float4, float4);
+float8 __ovld __cnfn hypot(float8, float8);
+float16 __ovld __cnfn hypot(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn hypot(double, double);
+double2 __ovld __cnfn hypot(double2, double2);
+double3 __ovld __cnfn hypot(double3, double3);
+double4 __ovld __cnfn hypot(double4, double4);
+double8 __ovld __cnfn hypot(double8, double8);
+double16 __ovld __cnfn hypot(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn hypot(half, half);
+half2 __ovld __cnfn hypot(half2, half2);
+half3 __ovld __cnfn hypot(half3, half3);
+half4 __ovld __cnfn hypot(half4, half4);
+half8 __ovld __cnfn hypot(half8, half8);
+half16 __ovld __cnfn hypot(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Return the exponent as an integer value.
+ */
+int __ovld __cnfn ilogb(float);
+int2 __ovld __cnfn ilogb(float2);
+int3 __ovld __cnfn ilogb(float3);
+int4 __ovld __cnfn ilogb(float4);
+int8 __ovld __cnfn ilogb(float8);
+int16 __ovld __cnfn ilogb(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn ilogb(double);
+int2 __ovld __cnfn ilogb(double2);
+int3 __ovld __cnfn ilogb(double3);
+int4 __ovld __cnfn ilogb(double4);
+int8 __ovld __cnfn ilogb(double8);
+int16 __ovld __cnfn ilogb(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn ilogb(half);
+int2 __ovld __cnfn ilogb(half2);
+int3 __ovld __cnfn ilogb(half3);
+int4 __ovld __cnfn ilogb(half4);
+int8 __ovld __cnfn ilogb(half8);
+int16 __ovld __cnfn ilogb(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Multiply x by 2 to the power n.
+ */
+float __ovld __cnfn ldexp(float, int);
+float2 __ovld __cnfn ldexp(float2, int2);
+float3 __ovld __cnfn ldexp(float3, int3);
+float4 __ovld __cnfn ldexp(float4, int4);
+float8 __ovld __cnfn ldexp(float8, int8);
+float16 __ovld __cnfn ldexp(float16, int16);
+float2 __ovld __cnfn ldexp(float2, int);
+float3 __ovld __cnfn ldexp(float3, int);
+float4 __ovld __cnfn ldexp(float4, int);
+float8 __ovld __cnfn ldexp(float8, int);
+float16 __ovld __cnfn ldexp(float16, int);
+#ifdef cl_khr_fp64
+double __ovld __cnfn ldexp(double, int);
+double2 __ovld __cnfn ldexp(double2, int2);
+double3 __ovld __cnfn ldexp(double3, int3);
+double4 __ovld __cnfn ldexp(double4, int4);
+double8 __ovld __cnfn ldexp(double8, int8);
+double16 __ovld __cnfn ldexp(double16, int16);
+double2 __ovld __cnfn ldexp(double2, int);
+double3 __ovld __cnfn ldexp(double3, int);
+double4 __ovld __cnfn ldexp(double4, int);
+double8 __ovld __cnfn ldexp(double8, int);
+double16 __ovld __cnfn ldexp(double16, int);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn ldexp(half, int);
+half2 __ovld __cnfn ldexp(half2, int2);
+half3 __ovld __cnfn ldexp(half3, int3);
+half4 __ovld __cnfn ldexp(half4, int4);
+half8 __ovld __cnfn ldexp(half8, int8);
+half16 __ovld __cnfn ldexp(half16, int16);
+half2 __ovld __cnfn ldexp(half2, int);
+half3 __ovld __cnfn ldexp(half3, int);
+half4 __ovld __cnfn ldexp(half4, int);
+half8 __ovld __cnfn ldexp(half8, int);
+half16 __ovld __cnfn ldexp(half16, int);
+#endif //cl_khr_fp16
+
+/**
+ * Log gamma function. Returns the natural
+ * logarithm of the absolute value of the gamma
+ * function. The sign of the gamma function is
+ * returned in the signp argument of lgamma_r.
+ */
+float __ovld __cnfn lgamma(float);
+float2 __ovld __cnfn lgamma(float2);
+float3 __ovld __cnfn lgamma(float3);
+float4 __ovld __cnfn lgamma(float4);
+float8 __ovld __cnfn lgamma(float8);
+float16 __ovld __cnfn lgamma(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn lgamma(double);
+double2 __ovld __cnfn lgamma(double2);
+double3 __ovld __cnfn lgamma(double3);
+double4 __ovld __cnfn lgamma(double4);
+double8 __ovld __cnfn lgamma(double8);
+double16 __ovld __cnfn lgamma(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn lgamma(half);
+half2 __ovld __cnfn lgamma(half2);
+half3 __ovld __cnfn lgamma(half3);
+half4 __ovld __cnfn lgamma(half4);
+half8 __ovld __cnfn lgamma(half8);
+half16 __ovld __cnfn lgamma(half16);
+#endif //cl_khr_fp16
+
+#if defined(__opencl_c_generic_address_space)
+float __ovld lgamma_r(float, int *);
+float2 __ovld lgamma_r(float2, int2 *);
+float3 __ovld lgamma_r(float3, int3 *);
+float4 __ovld lgamma_r(float4, int4 *);
+float8 __ovld lgamma_r(float8, int8 *);
+float16 __ovld lgamma_r(float16, int16 *);
+#ifdef cl_khr_fp64
+double __ovld lgamma_r(double, int *);
+double2 __ovld lgamma_r(double2, int2 *);
+double3 __ovld lgamma_r(double3, int3 *);
+double4 __ovld lgamma_r(double4, int4 *);
+double8 __ovld lgamma_r(double8, int8 *);
+double16 __ovld lgamma_r(double16, int16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld lgamma_r(half, int *);
+half2 __ovld lgamma_r(half2, int2 *);
+half3 __ovld lgamma_r(half3, int3 *);
+half4 __ovld lgamma_r(half4, int4 *);
+half8 __ovld lgamma_r(half8, int8 *);
+half16 __ovld lgamma_r(half16, int16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld lgamma_r(float, __global int *);
+float2 __ovld lgamma_r(float2, __global int2 *);
+float3 __ovld lgamma_r(float3, __global int3 *);
+float4 __ovld lgamma_r(float4, __global int4 *);
+float8 __ovld lgamma_r(float8, __global int8 *);
+float16 __ovld lgamma_r(float16, __global int16 *);
+float __ovld lgamma_r(float, __local int *);
+float2 __ovld lgamma_r(float2, __local int2 *);
+float3 __ovld lgamma_r(float3, __local int3 *);
+float4 __ovld lgamma_r(float4, __local int4 *);
+float8 __ovld lgamma_r(float8, __local int8 *);
+float16 __ovld lgamma_r(float16, __local int16 *);
+float __ovld lgamma_r(float, __private int *);
+float2 __ovld lgamma_r(float2, __private int2 *);
+float3 __ovld lgamma_r(float3, __private int3 *);
+float4 __ovld lgamma_r(float4, __private int4 *);
+float8 __ovld lgamma_r(float8, __private int8 *);
+float16 __ovld lgamma_r(float16, __private int16 *);
+#ifdef cl_khr_fp64
+double __ovld lgamma_r(double, __global int *);
+double2 __ovld lgamma_r(double2, __global int2 *);
+double3 __ovld lgamma_r(double3, __global int3 *);
+double4 __ovld lgamma_r(double4, __global int4 *);
+double8 __ovld lgamma_r(double8, __global int8 *);
+double16 __ovld lgamma_r(double16, __global int16 *);
+double __ovld lgamma_r(double, __local int *);
+double2 __ovld lgamma_r(double2, __local int2 *);
+double3 __ovld lgamma_r(double3, __local int3 *);
+double4 __ovld lgamma_r(double4, __local int4 *);
+double8 __ovld lgamma_r(double8, __local int8 *);
+double16 __ovld lgamma_r(double16, __local int16 *);
+double __ovld lgamma_r(double, __private int *);
+double2 __ovld lgamma_r(double2, __private int2 *);
+double3 __ovld lgamma_r(double3, __private int3 *);
+double4 __ovld lgamma_r(double4, __private int4 *);
+double8 __ovld lgamma_r(double8, __private int8 *);
+double16 __ovld lgamma_r(double16, __private int16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld lgamma_r(half, __global int *);
+half2 __ovld lgamma_r(half2, __global int2 *);
+half3 __ovld lgamma_r(half3, __global int3 *);
+half4 __ovld lgamma_r(half4, __global int4 *);
+half8 __ovld lgamma_r(half8, __global int8 *);
+half16 __ovld lgamma_r(half16, __global int16 *);
+half __ovld lgamma_r(half, __local int *);
+half2 __ovld lgamma_r(half2, __local int2 *);
+half3 __ovld lgamma_r(half3, __local int3 *);
+half4 __ovld lgamma_r(half4, __local int4 *);
+half8 __ovld lgamma_r(half8, __local int8 *);
+half16 __ovld lgamma_r(half16, __local int16 *);
+half __ovld lgamma_r(half, __private int *);
+half2 __ovld lgamma_r(half2, __private int2 *);
+half3 __ovld lgamma_r(half3, __private int3 *);
+half4 __ovld lgamma_r(half4, __private int4 *);
+half8 __ovld lgamma_r(half8, __private int8 *);
+half16 __ovld lgamma_r(half16, __private int16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * Compute natural logarithm.
+ */
+float __ovld __cnfn log(float);
+float2 __ovld __cnfn log(float2);
+float3 __ovld __cnfn log(float3);
+float4 __ovld __cnfn log(float4);
+float8 __ovld __cnfn log(float8);
+float16 __ovld __cnfn log(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log(double);
+double2 __ovld __cnfn log(double2);
+double3 __ovld __cnfn log(double3);
+double4 __ovld __cnfn log(double4);
+double8 __ovld __cnfn log(double8);
+double16 __ovld __cnfn log(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log(half);
+half2 __ovld __cnfn log(half2);
+half3 __ovld __cnfn log(half3);
+half4 __ovld __cnfn log(half4);
+half8 __ovld __cnfn log(half8);
+half16 __ovld __cnfn log(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute a base 2 logarithm.
+ */
+float __ovld __cnfn log2(float);
+float2 __ovld __cnfn log2(float2);
+float3 __ovld __cnfn log2(float3);
+float4 __ovld __cnfn log2(float4);
+float8 __ovld __cnfn log2(float8);
+float16 __ovld __cnfn log2(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log2(double);
+double2 __ovld __cnfn log2(double2);
+double3 __ovld __cnfn log2(double3);
+double4 __ovld __cnfn log2(double4);
+double8 __ovld __cnfn log2(double8);
+double16 __ovld __cnfn log2(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log2(half);
+half2 __ovld __cnfn log2(half2);
+half3 __ovld __cnfn log2(half3);
+half4 __ovld __cnfn log2(half4);
+half8 __ovld __cnfn log2(half8);
+half16 __ovld __cnfn log2(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute a base 10 logarithm.
+ */
+float __ovld __cnfn log10(float);
+float2 __ovld __cnfn log10(float2);
+float3 __ovld __cnfn log10(float3);
+float4 __ovld __cnfn log10(float4);
+float8 __ovld __cnfn log10(float8);
+float16 __ovld __cnfn log10(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log10(double);
+double2 __ovld __cnfn log10(double2);
+double3 __ovld __cnfn log10(double3);
+double4 __ovld __cnfn log10(double4);
+double8 __ovld __cnfn log10(double8);
+double16 __ovld __cnfn log10(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log10(half);
+half2 __ovld __cnfn log10(half2);
+half3 __ovld __cnfn log10(half3);
+half4 __ovld __cnfn log10(half4);
+half8 __ovld __cnfn log10(half8);
+half16 __ovld __cnfn log10(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute a base e logarithm of (1.0 + x).
+ */
+float __ovld __cnfn log1p(float);
+float2 __ovld __cnfn log1p(float2);
+float3 __ovld __cnfn log1p(float3);
+float4 __ovld __cnfn log1p(float4);
+float8 __ovld __cnfn log1p(float8);
+float16 __ovld __cnfn log1p(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log1p(double);
+double2 __ovld __cnfn log1p(double2);
+double3 __ovld __cnfn log1p(double3);
+double4 __ovld __cnfn log1p(double4);
+double8 __ovld __cnfn log1p(double8);
+double16 __ovld __cnfn log1p(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log1p(half);
+half2 __ovld __cnfn log1p(half2);
+half3 __ovld __cnfn log1p(half3);
+half4 __ovld __cnfn log1p(half4);
+half8 __ovld __cnfn log1p(half8);
+half16 __ovld __cnfn log1p(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the exponent of x, which is the integral
+ * part of logr | x |.
+ */
+float __ovld __cnfn logb(float);
+float2 __ovld __cnfn logb(float2);
+float3 __ovld __cnfn logb(float3);
+float4 __ovld __cnfn logb(float4);
+float8 __ovld __cnfn logb(float8);
+float16 __ovld __cnfn logb(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn logb(double);
+double2 __ovld __cnfn logb(double2);
+double3 __ovld __cnfn logb(double3);
+double4 __ovld __cnfn logb(double4);
+double8 __ovld __cnfn logb(double8);
+double16 __ovld __cnfn logb(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn logb(half);
+half2 __ovld __cnfn logb(half2);
+half3 __ovld __cnfn logb(half3);
+half4 __ovld __cnfn logb(half4);
+half8 __ovld __cnfn logb(half8);
+half16 __ovld __cnfn logb(half16);
+#endif //cl_khr_fp16
+
+/**
+ * mad approximates a * b + c. Whether or how the
+ * product of a * b is rounded and how supernormal or
+ * subnormal intermediate products are handled is not
+ * defined. mad is intended to be used where speed is
+ * preferred over accuracy.
+ */
+float __ovld __cnfn mad(float, float, float);
+float2 __ovld __cnfn mad(float2, float2, float2);
+float3 __ovld __cnfn mad(float3, float3, float3);
+float4 __ovld __cnfn mad(float4, float4, float4);
+float8 __ovld __cnfn mad(float8, float8, float8);
+float16 __ovld __cnfn mad(float16, float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn mad(double, double, double);
+double2 __ovld __cnfn mad(double2, double2, double2);
+double3 __ovld __cnfn mad(double3, double3, double3);
+double4 __ovld __cnfn mad(double4, double4, double4);
+double8 __ovld __cnfn mad(double8, double8, double8);
+double16 __ovld __cnfn mad(double16, double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn mad(half, half, half);
+half2 __ovld __cnfn mad(half2, half2, half2);
+half3 __ovld __cnfn mad(half3, half3, half3);
+half4 __ovld __cnfn mad(half4, half4, half4);
+half8 __ovld __cnfn mad(half8, half8, half8);
+half16 __ovld __cnfn mad(half16, half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns x if | x | > | y |, y if | y | > | x |, otherwise
+ * fmax(x, y).
+ */
+float __ovld __cnfn maxmag(float, float);
+float2 __ovld __cnfn maxmag(float2, float2);
+float3 __ovld __cnfn maxmag(float3, float3);
+float4 __ovld __cnfn maxmag(float4, float4);
+float8 __ovld __cnfn maxmag(float8, float8);
+float16 __ovld __cnfn maxmag(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn maxmag(double, double);
+double2 __ovld __cnfn maxmag(double2, double2);
+double3 __ovld __cnfn maxmag(double3, double3);
+double4 __ovld __cnfn maxmag(double4, double4);
+double8 __ovld __cnfn maxmag(double8, double8);
+double16 __ovld __cnfn maxmag(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn maxmag(half, half);
+half2 __ovld __cnfn maxmag(half2, half2);
+half3 __ovld __cnfn maxmag(half3, half3);
+half4 __ovld __cnfn maxmag(half4, half4);
+half8 __ovld __cnfn maxmag(half8, half8);
+half16 __ovld __cnfn maxmag(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns x if | x | < | y |, y if | y | < | x |, otherwise
+ * fmin(x, y).
+ */
+float __ovld __cnfn minmag(float, float);
+float2 __ovld __cnfn minmag(float2, float2);
+float3 __ovld __cnfn minmag(float3, float3);
+float4 __ovld __cnfn minmag(float4, float4);
+float8 __ovld __cnfn minmag(float8, float8);
+float16 __ovld __cnfn minmag(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn minmag(double, double);
+double2 __ovld __cnfn minmag(double2, double2);
+double3 __ovld __cnfn minmag(double3, double3);
+double4 __ovld __cnfn minmag(double4, double4);
+double8 __ovld __cnfn minmag(double8, double8);
+double16 __ovld __cnfn minmag(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn minmag(half, half);
+half2 __ovld __cnfn minmag(half2, half2);
+half3 __ovld __cnfn minmag(half3, half3);
+half4 __ovld __cnfn minmag(half4, half4);
+half8 __ovld __cnfn minmag(half8, half8);
+half16 __ovld __cnfn minmag(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Decompose a floating-point number. The modf
+ * function breaks the argument x into integral and
+ * fractional parts, each of which has the same sign as
+ * the argument. It stores the integral part in the object
+ * pointed to by iptr.
+ */
+#if defined(__opencl_c_generic_address_space)
+float __ovld modf(float, float *);
+float2 __ovld modf(float2, float2 *);
+float3 __ovld modf(float3, float3 *);
+float4 __ovld modf(float4, float4 *);
+float8 __ovld modf(float8, float8 *);
+float16 __ovld modf(float16, float16 *);
+#ifdef cl_khr_fp64
+double __ovld modf(double, double *);
+double2 __ovld modf(double2, double2 *);
+double3 __ovld modf(double3, double3 *);
+double4 __ovld modf(double4, double4 *);
+double8 __ovld modf(double8, double8 *);
+double16 __ovld modf(double16, double16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld modf(half, half *);
+half2 __ovld modf(half2, half2 *);
+half3 __ovld modf(half3, half3 *);
+half4 __ovld modf(half4, half4 *);
+half8 __ovld modf(half8, half8 *);
+half16 __ovld modf(half16, half16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld modf(float, __global float *);
+float2 __ovld modf(float2, __global float2 *);
+float3 __ovld modf(float3, __global float3 *);
+float4 __ovld modf(float4, __global float4 *);
+float8 __ovld modf(float8, __global float8 *);
+float16 __ovld modf(float16, __global float16 *);
+float __ovld modf(float, __local float *);
+float2 __ovld modf(float2, __local float2 *);
+float3 __ovld modf(float3, __local float3 *);
+float4 __ovld modf(float4, __local float4 *);
+float8 __ovld modf(float8, __local float8 *);
+float16 __ovld modf(float16, __local float16 *);
+float __ovld modf(float, __private float *);
+float2 __ovld modf(float2, __private float2 *);
+float3 __ovld modf(float3, __private float3 *);
+float4 __ovld modf(float4, __private float4 *);
+float8 __ovld modf(float8, __private float8 *);
+float16 __ovld modf(float16, __private float16 *);
+#ifdef cl_khr_fp64
+double __ovld modf(double, __global double *);
+double2 __ovld modf(double2, __global double2 *);
+double3 __ovld modf(double3, __global double3 *);
+double4 __ovld modf(double4, __global double4 *);
+double8 __ovld modf(double8, __global double8 *);
+double16 __ovld modf(double16, __global double16 *);
+double __ovld modf(double, __local double *);
+double2 __ovld modf(double2, __local double2 *);
+double3 __ovld modf(double3, __local double3 *);
+double4 __ovld modf(double4, __local double4 *);
+double8 __ovld modf(double8, __local double8 *);
+double16 __ovld modf(double16, __local double16 *);
+double __ovld modf(double, __private double *);
+double2 __ovld modf(double2, __private double2 *);
+double3 __ovld modf(double3, __private double3 *);
+double4 __ovld modf(double4, __private double4 *);
+double8 __ovld modf(double8, __private double8 *);
+double16 __ovld modf(double16, __private double16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld modf(half, __global half *);
+half2 __ovld modf(half2, __global half2 *);
+half3 __ovld modf(half3, __global half3 *);
+half4 __ovld modf(half4, __global half4 *);
+half8 __ovld modf(half8, __global half8 *);
+half16 __ovld modf(half16, __global half16 *);
+half __ovld modf(half, __local half *);
+half2 __ovld modf(half2, __local half2 *);
+half3 __ovld modf(half3, __local half3 *);
+half4 __ovld modf(half4, __local half4 *);
+half8 __ovld modf(half8, __local half8 *);
+half16 __ovld modf(half16, __local half16 *);
+half __ovld modf(half, __private half *);
+half2 __ovld modf(half2, __private half2 *);
+half3 __ovld modf(half3, __private half3 *);
+half4 __ovld modf(half4, __private half4 *);
+half8 __ovld modf(half8, __private half8 *);
+half16 __ovld modf(half16, __private half16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * Returns a quiet NaN. The nancode may be placed
+ * in the significand of the resulting NaN.
+ */
+float __ovld __cnfn nan(uint);
+float2 __ovld __cnfn nan(uint2);
+float3 __ovld __cnfn nan(uint3);
+float4 __ovld __cnfn nan(uint4);
+float8 __ovld __cnfn nan(uint8);
+float16 __ovld __cnfn nan(uint16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn nan(ulong);
+double2 __ovld __cnfn nan(ulong2);
+double3 __ovld __cnfn nan(ulong3);
+double4 __ovld __cnfn nan(ulong4);
+double8 __ovld __cnfn nan(ulong8);
+double16 __ovld __cnfn nan(ulong16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn nan(ushort);
+half2 __ovld __cnfn nan(ushort2);
+half3 __ovld __cnfn nan(ushort3);
+half4 __ovld __cnfn nan(ushort4);
+half8 __ovld __cnfn nan(ushort8);
+half16 __ovld __cnfn nan(ushort16);
+#endif //cl_khr_fp16
+
+/**
+ * Computes the next representable single-precision
+ * floating-point value following x in the direction of
+ * y. Thus, if y is less than x, nextafter() returns the
+ * largest representable floating-point number less
+ * than x.
+ */
+float __ovld __cnfn nextafter(float, float);
+float2 __ovld __cnfn nextafter(float2, float2);
+float3 __ovld __cnfn nextafter(float3, float3);
+float4 __ovld __cnfn nextafter(float4, float4);
+float8 __ovld __cnfn nextafter(float8, float8);
+float16 __ovld __cnfn nextafter(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn nextafter(double, double);
+double2 __ovld __cnfn nextafter(double2, double2);
+double3 __ovld __cnfn nextafter(double3, double3);
+double4 __ovld __cnfn nextafter(double4, double4);
+double8 __ovld __cnfn nextafter(double8, double8);
+double16 __ovld __cnfn nextafter(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn nextafter(half, half);
+half2 __ovld __cnfn nextafter(half2, half2);
+half3 __ovld __cnfn nextafter(half3, half3);
+half4 __ovld __cnfn nextafter(half4, half4);
+half8 __ovld __cnfn nextafter(half8, half8);
+half16 __ovld __cnfn nextafter(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power y.
+ */
+float __ovld __cnfn pow(float, float);
+float2 __ovld __cnfn pow(float2, float2);
+float3 __ovld __cnfn pow(float3, float3);
+float4 __ovld __cnfn pow(float4, float4);
+float8 __ovld __cnfn pow(float8, float8);
+float16 __ovld __cnfn pow(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn pow(double, double);
+double2 __ovld __cnfn pow(double2, double2);
+double3 __ovld __cnfn pow(double3, double3);
+double4 __ovld __cnfn pow(double4, double4);
+double8 __ovld __cnfn pow(double8, double8);
+double16 __ovld __cnfn pow(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn pow(half, half);
+half2 __ovld __cnfn pow(half2, half2);
+half3 __ovld __cnfn pow(half3, half3);
+half4 __ovld __cnfn pow(half4, half4);
+half8 __ovld __cnfn pow(half8, half8);
+half16 __ovld __cnfn pow(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power y, where y is an integer.
+ */
+float __ovld __cnfn pown(float, int);
+float2 __ovld __cnfn pown(float2, int2);
+float3 __ovld __cnfn pown(float3, int3);
+float4 __ovld __cnfn pown(float4, int4);
+float8 __ovld __cnfn pown(float8, int8);
+float16 __ovld __cnfn pown(float16, int16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn pown(double, int);
+double2 __ovld __cnfn pown(double2, int2);
+double3 __ovld __cnfn pown(double3, int3);
+double4 __ovld __cnfn pown(double4, int4);
+double8 __ovld __cnfn pown(double8, int8);
+double16 __ovld __cnfn pown(double16, int16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn pown(half, int);
+half2 __ovld __cnfn pown(half2, int2);
+half3 __ovld __cnfn pown(half3, int3);
+half4 __ovld __cnfn pown(half4, int4);
+half8 __ovld __cnfn pown(half8, int8);
+half16 __ovld __cnfn pown(half16, int16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power y, where x is >= 0.
+ */
+float __ovld __cnfn powr(float, float);
+float2 __ovld __cnfn powr(float2, float2);
+float3 __ovld __cnfn powr(float3, float3);
+float4 __ovld __cnfn powr(float4, float4);
+float8 __ovld __cnfn powr(float8, float8);
+float16 __ovld __cnfn powr(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn powr(double, double);
+double2 __ovld __cnfn powr(double2, double2);
+double3 __ovld __cnfn powr(double3, double3);
+double4 __ovld __cnfn powr(double4, double4);
+double8 __ovld __cnfn powr(double8, double8);
+double16 __ovld __cnfn powr(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn powr(half, half);
+half2 __ovld __cnfn powr(half2, half2);
+half3 __ovld __cnfn powr(half3, half3);
+half4 __ovld __cnfn powr(half4, half4);
+half8 __ovld __cnfn powr(half8, half8);
+half16 __ovld __cnfn powr(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the value r such that r = x - n*y, where n
+ * is the integer nearest the exact value of x/y. If there
+ * are two integers closest to x/y, n shall be the even
+ * one. If r is zero, it is given the same sign as x.
+ */
+float __ovld __cnfn remainder(float, float);
+float2 __ovld __cnfn remainder(float2, float2);
+float3 __ovld __cnfn remainder(float3, float3);
+float4 __ovld __cnfn remainder(float4, float4);
+float8 __ovld __cnfn remainder(float8, float8);
+float16 __ovld __cnfn remainder(float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn remainder(double, double);
+double2 __ovld __cnfn remainder(double2, double2);
+double3 __ovld __cnfn remainder(double3, double3);
+double4 __ovld __cnfn remainder(double4, double4);
+double8 __ovld __cnfn remainder(double8, double8);
+double16 __ovld __cnfn remainder(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn remainder(half, half);
+half2 __ovld __cnfn remainder(half2, half2);
+half3 __ovld __cnfn remainder(half3, half3);
+half4 __ovld __cnfn remainder(half4, half4);
+half8 __ovld __cnfn remainder(half8, half8);
+half16 __ovld __cnfn remainder(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * The remquo function computes the value r such
+ * that r = x - n*y, where n is the integer nearest the
+ * exact value of x/y. If there are two integers closest
+ * to x/y, n shall be the even one. If r is zero, it is
+ * given the same sign as x. This is the same value
+ * that is returned by the remainder function.
+ * remquo also calculates the lower seven bits of the
+ * integral quotient x/y, and gives that value the same
+ * sign as x/y. It stores this signed value in the object
+ * pointed to by quo.
+ */
+#if defined(__opencl_c_generic_address_space)
+float __ovld remquo(float, float, int *);
+float2 __ovld remquo(float2, float2, int2 *);
+float3 __ovld remquo(float3, float3, int3 *);
+float4 __ovld remquo(float4, float4, int4 *);
+float8 __ovld remquo(float8, float8, int8 *);
+float16 __ovld remquo(float16, float16, int16 *);
+#ifdef cl_khr_fp64
+double __ovld remquo(double, double, int *);
+double2 __ovld remquo(double2, double2, int2 *);
+double3 __ovld remquo(double3, double3, int3 *);
+double4 __ovld remquo(double4, double4, int4 *);
+double8 __ovld remquo(double8, double8, int8 *);
+double16 __ovld remquo(double16, double16, int16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld remquo(half, half, int *);
+half2 __ovld remquo(half2, half2, int2 *);
+half3 __ovld remquo(half3, half3, int3 *);
+half4 __ovld remquo(half4, half4, int4 *);
+half8 __ovld remquo(half8, half8, int8 *);
+half16 __ovld remquo(half16, half16, int16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld remquo(float, float, __global int *);
+float2 __ovld remquo(float2, float2, __global int2 *);
+float3 __ovld remquo(float3, float3, __global int3 *);
+float4 __ovld remquo(float4, float4, __global int4 *);
+float8 __ovld remquo(float8, float8, __global int8 *);
+float16 __ovld remquo(float16, float16, __global int16 *);
+float __ovld remquo(float, float, __local int *);
+float2 __ovld remquo(float2, float2, __local int2 *);
+float3 __ovld remquo(float3, float3, __local int3 *);
+float4 __ovld remquo(float4, float4, __local int4 *);
+float8 __ovld remquo(float8, float8, __local int8 *);
+float16 __ovld remquo(float16, float16, __local int16 *);
+float __ovld remquo(float, float, __private int *);
+float2 __ovld remquo(float2, float2, __private int2 *);
+float3 __ovld remquo(float3, float3, __private int3 *);
+float4 __ovld remquo(float4, float4, __private int4 *);
+float8 __ovld remquo(float8, float8, __private int8 *);
+float16 __ovld remquo(float16, float16, __private int16 *);
+#ifdef cl_khr_fp64
+double __ovld remquo(double, double, __global int *);
+double2 __ovld remquo(double2, double2, __global int2 *);
+double3 __ovld remquo(double3, double3, __global int3 *);
+double4 __ovld remquo(double4, double4, __global int4 *);
+double8 __ovld remquo(double8, double8, __global int8 *);
+double16 __ovld remquo(double16, double16, __global int16 *);
+double __ovld remquo(double, double, __local int *);
+double2 __ovld remquo(double2, double2, __local int2 *);
+double3 __ovld remquo(double3, double3, __local int3 *);
+double4 __ovld remquo(double4, double4, __local int4 *);
+double8 __ovld remquo(double8, double8, __local int8 *);
+double16 __ovld remquo(double16, double16, __local int16 *);
+double __ovld remquo(double, double, __private int *);
+double2 __ovld remquo(double2, double2, __private int2 *);
+double3 __ovld remquo(double3, double3, __private int3 *);
+double4 __ovld remquo(double4, double4, __private int4 *);
+double8 __ovld remquo(double8, double8, __private int8 *);
+double16 __ovld remquo(double16, double16, __private int16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld remquo(half, half, __global int *);
+half2 __ovld remquo(half2, half2, __global int2 *);
+half3 __ovld remquo(half3, half3, __global int3 *);
+half4 __ovld remquo(half4, half4, __global int4 *);
+half8 __ovld remquo(half8, half8, __global int8 *);
+half16 __ovld remquo(half16, half16, __global int16 *);
+half __ovld remquo(half, half, __local int *);
+half2 __ovld remquo(half2, half2, __local int2 *);
+half3 __ovld remquo(half3, half3, __local int3 *);
+half4 __ovld remquo(half4, half4, __local int4 *);
+half8 __ovld remquo(half8, half8, __local int8 *);
+half16 __ovld remquo(half16, half16, __local int16 *);
+half __ovld remquo(half, half, __private int *);
+half2 __ovld remquo(half2, half2, __private int2 *);
+half3 __ovld remquo(half3, half3, __private int3 *);
+half4 __ovld remquo(half4, half4, __private int4 *);
+half8 __ovld remquo(half8, half8, __private int8 *);
+half16 __ovld remquo(half16, half16, __private int16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+/**
+ * Round to integral value (using round to nearest
+ * even rounding mode) in floating-point format.
+ * Refer to section 7.1 for description of rounding
+ * modes.
+ */
+float __ovld __cnfn rint(float);
+float2 __ovld __cnfn rint(float2);
+float3 __ovld __cnfn rint(float3);
+float4 __ovld __cnfn rint(float4);
+float8 __ovld __cnfn rint(float8);
+float16 __ovld __cnfn rint(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn rint(double);
+double2 __ovld __cnfn rint(double2);
+double3 __ovld __cnfn rint(double3);
+double4 __ovld __cnfn rint(double4);
+double8 __ovld __cnfn rint(double8);
+double16 __ovld __cnfn rint(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn rint(half);
+half2 __ovld __cnfn rint(half2);
+half3 __ovld __cnfn rint(half3);
+half4 __ovld __cnfn rint(half4);
+half8 __ovld __cnfn rint(half8);
+half16 __ovld __cnfn rint(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power 1/y.
+ */
+float __ovld __cnfn rootn(float, int);
+float2 __ovld __cnfn rootn(float2, int2);
+float3 __ovld __cnfn rootn(float3, int3);
+float4 __ovld __cnfn rootn(float4, int4);
+float8 __ovld __cnfn rootn(float8, int8);
+float16 __ovld __cnfn rootn(float16, int16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn rootn(double, int);
+double2 __ovld __cnfn rootn(double2, int2);
+double3 __ovld __cnfn rootn(double3, int3);
+double4 __ovld __cnfn rootn(double4, int4);
+double8 __ovld __cnfn rootn(double8, int8);
+double16 __ovld __cnfn rootn(double16, int16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn rootn(half, int);
+half2 __ovld __cnfn rootn(half2, int2);
+half3 __ovld __cnfn rootn(half3, int3);
+half4 __ovld __cnfn rootn(half4, int4);
+half8 __ovld __cnfn rootn(half8, int8);
+half16 __ovld __cnfn rootn(half16, int16);
+#endif //cl_khr_fp16
+
+/**
+ * Return the integral value nearest to x rounding
+ * halfway cases away from zero, regardless of the
+ * current rounding direction.
+ */
+float __ovld __cnfn round(float);
+float2 __ovld __cnfn round(float2);
+float3 __ovld __cnfn round(float3);
+float4 __ovld __cnfn round(float4);
+float8 __ovld __cnfn round(float8);
+float16 __ovld __cnfn round(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn round(double);
+double2 __ovld __cnfn round(double2);
+double3 __ovld __cnfn round(double3);
+double4 __ovld __cnfn round(double4);
+double8 __ovld __cnfn round(double8);
+double16 __ovld __cnfn round(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn round(half);
+half2 __ovld __cnfn round(half2);
+half3 __ovld __cnfn round(half3);
+half4 __ovld __cnfn round(half4);
+half8 __ovld __cnfn round(half8);
+half16 __ovld __cnfn round(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute inverse square root.
+ */
+float __ovld __cnfn rsqrt(float);
+float2 __ovld __cnfn rsqrt(float2);
+float3 __ovld __cnfn rsqrt(float3);
+float4 __ovld __cnfn rsqrt(float4);
+float8 __ovld __cnfn rsqrt(float8);
+float16 __ovld __cnfn rsqrt(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn rsqrt(double);
+double2 __ovld __cnfn rsqrt(double2);
+double3 __ovld __cnfn rsqrt(double3);
+double4 __ovld __cnfn rsqrt(double4);
+double8 __ovld __cnfn rsqrt(double8);
+double16 __ovld __cnfn rsqrt(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn rsqrt(half);
+half2 __ovld __cnfn rsqrt(half2);
+half3 __ovld __cnfn rsqrt(half3);
+half4 __ovld __cnfn rsqrt(half4);
+half8 __ovld __cnfn rsqrt(half8);
+half16 __ovld __cnfn rsqrt(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute sine.
+ */
+float __ovld __cnfn sin(float);
+float2 __ovld __cnfn sin(float2);
+float3 __ovld __cnfn sin(float3);
+float4 __ovld __cnfn sin(float4);
+float8 __ovld __cnfn sin(float8);
+float16 __ovld __cnfn sin(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sin(double);
+double2 __ovld __cnfn sin(double2);
+double3 __ovld __cnfn sin(double3);
+double4 __ovld __cnfn sin(double4);
+double8 __ovld __cnfn sin(double8);
+double16 __ovld __cnfn sin(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sin(half);
+half2 __ovld __cnfn sin(half2);
+half3 __ovld __cnfn sin(half3);
+half4 __ovld __cnfn sin(half4);
+half8 __ovld __cnfn sin(half8);
+half16 __ovld __cnfn sin(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute sine and cosine of x. The computed sine
+ * is the return value and computed cosine is returned
+ * in cosval.
+ */
+#if defined(__opencl_c_generic_address_space)
+float __ovld sincos(float, float *);
+float2 __ovld sincos(float2, float2 *);
+float3 __ovld sincos(float3, float3 *);
+float4 __ovld sincos(float4, float4 *);
+float8 __ovld sincos(float8, float8 *);
+float16 __ovld sincos(float16, float16 *);
+#ifdef cl_khr_fp64
+double __ovld sincos(double, double *);
+double2 __ovld sincos(double2, double2 *);
+double3 __ovld sincos(double3, double3 *);
+double4 __ovld sincos(double4, double4 *);
+double8 __ovld sincos(double8, double8 *);
+double16 __ovld sincos(double16, double16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld sincos(half, half *);
+half2 __ovld sincos(half2, half2 *);
+half3 __ovld sincos(half3, half3 *);
+half4 __ovld sincos(half4, half4 *);
+half8 __ovld sincos(half8, half8 *);
+half16 __ovld sincos(half16, half16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld sincos(float, __global float *);
+float2 __ovld sincos(float2, __global float2 *);
+float3 __ovld sincos(float3, __global float3 *);
+float4 __ovld sincos(float4, __global float4 *);
+float8 __ovld sincos(float8, __global float8 *);
+float16 __ovld sincos(float16, __global float16 *);
+float __ovld sincos(float, __local float *);
+float2 __ovld sincos(float2, __local float2 *);
+float3 __ovld sincos(float3, __local float3 *);
+float4 __ovld sincos(float4, __local float4 *);
+float8 __ovld sincos(float8, __local float8 *);
+float16 __ovld sincos(float16, __local float16 *);
+float __ovld sincos(float, __private float *);
+float2 __ovld sincos(float2, __private float2 *);
+float3 __ovld sincos(float3, __private float3 *);
+float4 __ovld sincos(float4, __private float4 *);
+float8 __ovld sincos(float8, __private float8 *);
+float16 __ovld sincos(float16, __private float16 *);
+#ifdef cl_khr_fp64
+double __ovld sincos(double, __global double *);
+double2 __ovld sincos(double2, __global double2 *);
+double3 __ovld sincos(double3, __global double3 *);
+double4 __ovld sincos(double4, __global double4 *);
+double8 __ovld sincos(double8, __global double8 *);
+double16 __ovld sincos(double16, __global double16 *);
+double __ovld sincos(double, __local double *);
+double2 __ovld sincos(double2, __local double2 *);
+double3 __ovld sincos(double3, __local double3 *);
+double4 __ovld sincos(double4, __local double4 *);
+double8 __ovld sincos(double8, __local double8 *);
+double16 __ovld sincos(double16, __local double16 *);
+double __ovld sincos(double, __private double *);
+double2 __ovld sincos(double2, __private double2 *);
+double3 __ovld sincos(double3, __private double3 *);
+double4 __ovld sincos(double4, __private double4 *);
+double8 __ovld sincos(double8, __private double8 *);
+double16 __ovld sincos(double16, __private double16 *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld sincos(half, __global half *);
+half2 __ovld sincos(half2, __global half2 *);
+half3 __ovld sincos(half3, __global half3 *);
+half4 __ovld sincos(half4, __global half4 *);
+half8 __ovld sincos(half8, __global half8 *);
+half16 __ovld sincos(half16, __global half16 *);
+half __ovld sincos(half, __local half *);
+half2 __ovld sincos(half2, __local half2 *);
+half3 __ovld sincos(half3, __local half3 *);
+half4 __ovld sincos(half4, __local half4 *);
+half8 __ovld sincos(half8, __local half8 *);
+half16 __ovld sincos(half16, __local half16 *);
+half __ovld sincos(half, __private half *);
+half2 __ovld sincos(half2, __private half2 *);
+half3 __ovld sincos(half3, __private half3 *);
+half4 __ovld sincos(half4, __private half4 *);
+half8 __ovld sincos(half8, __private half8 *);
+half16 __ovld sincos(half16, __private half16 *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * Compute hyperbolic sine.
+ */
+float __ovld __cnfn sinh(float);
+float2 __ovld __cnfn sinh(float2);
+float3 __ovld __cnfn sinh(float3);
+float4 __ovld __cnfn sinh(float4);
+float8 __ovld __cnfn sinh(float8);
+float16 __ovld __cnfn sinh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sinh(double);
+double2 __ovld __cnfn sinh(double2);
+double3 __ovld __cnfn sinh(double3);
+double4 __ovld __cnfn sinh(double4);
+double8 __ovld __cnfn sinh(double8);
+double16 __ovld __cnfn sinh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sinh(half);
+half2 __ovld __cnfn sinh(half2);
+half3 __ovld __cnfn sinh(half3);
+half4 __ovld __cnfn sinh(half4);
+half8 __ovld __cnfn sinh(half8);
+half16 __ovld __cnfn sinh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute sin (PI * x).
+ */
+float __ovld __cnfn sinpi(float);
+float2 __ovld __cnfn sinpi(float2);
+float3 __ovld __cnfn sinpi(float3);
+float4 __ovld __cnfn sinpi(float4);
+float8 __ovld __cnfn sinpi(float8);
+float16 __ovld __cnfn sinpi(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sinpi(double);
+double2 __ovld __cnfn sinpi(double2);
+double3 __ovld __cnfn sinpi(double3);
+double4 __ovld __cnfn sinpi(double4);
+double8 __ovld __cnfn sinpi(double8);
+double16 __ovld __cnfn sinpi(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sinpi(half);
+half2 __ovld __cnfn sinpi(half2);
+half3 __ovld __cnfn sinpi(half3);
+half4 __ovld __cnfn sinpi(half4);
+half8 __ovld __cnfn sinpi(half8);
+half16 __ovld __cnfn sinpi(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute square root.
+ */
+float __ovld __cnfn sqrt(float);
+float2 __ovld __cnfn sqrt(float2);
+float3 __ovld __cnfn sqrt(float3);
+float4 __ovld __cnfn sqrt(float4);
+float8 __ovld __cnfn sqrt(float8);
+float16 __ovld __cnfn sqrt(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sqrt(double);
+double2 __ovld __cnfn sqrt(double2);
+double3 __ovld __cnfn sqrt(double3);
+double4 __ovld __cnfn sqrt(double4);
+double8 __ovld __cnfn sqrt(double8);
+double16 __ovld __cnfn sqrt(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sqrt(half);
+half2 __ovld __cnfn sqrt(half2);
+half3 __ovld __cnfn sqrt(half3);
+half4 __ovld __cnfn sqrt(half4);
+half8 __ovld __cnfn sqrt(half8);
+half16 __ovld __cnfn sqrt(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute tangent.
+ */
+float __ovld __cnfn tan(float);
+float2 __ovld __cnfn tan(float2);
+float3 __ovld __cnfn tan(float3);
+float4 __ovld __cnfn tan(float4);
+float8 __ovld __cnfn tan(float8);
+float16 __ovld __cnfn tan(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tan(double);
+double2 __ovld __cnfn tan(double2);
+double3 __ovld __cnfn tan(double3);
+double4 __ovld __cnfn tan(double4);
+double8 __ovld __cnfn tan(double8);
+double16 __ovld __cnfn tan(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tan(half);
+half2 __ovld __cnfn tan(half2);
+half3 __ovld __cnfn tan(half3);
+half4 __ovld __cnfn tan(half4);
+half8 __ovld __cnfn tan(half8);
+half16 __ovld __cnfn tan(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute hyperbolic tangent.
+ */
+float __ovld __cnfn tanh(float);
+float2 __ovld __cnfn tanh(float2);
+float3 __ovld __cnfn tanh(float3);
+float4 __ovld __cnfn tanh(float4);
+float8 __ovld __cnfn tanh(float8);
+float16 __ovld __cnfn tanh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tanh(double);
+double2 __ovld __cnfn tanh(double2);
+double3 __ovld __cnfn tanh(double3);
+double4 __ovld __cnfn tanh(double4);
+double8 __ovld __cnfn tanh(double8);
+double16 __ovld __cnfn tanh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tanh(half);
+half2 __ovld __cnfn tanh(half2);
+half3 __ovld __cnfn tanh(half3);
+half4 __ovld __cnfn tanh(half4);
+half8 __ovld __cnfn tanh(half8);
+half16 __ovld __cnfn tanh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute tan (PI * x).
+ */
+float __ovld __cnfn tanpi(float);
+float2 __ovld __cnfn tanpi(float2);
+float3 __ovld __cnfn tanpi(float3);
+float4 __ovld __cnfn tanpi(float4);
+float8 __ovld __cnfn tanpi(float8);
+float16 __ovld __cnfn tanpi(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tanpi(double);
+double2 __ovld __cnfn tanpi(double2);
+double3 __ovld __cnfn tanpi(double3);
+double4 __ovld __cnfn tanpi(double4);
+double8 __ovld __cnfn tanpi(double8);
+double16 __ovld __cnfn tanpi(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tanpi(half);
+half2 __ovld __cnfn tanpi(half2);
+half3 __ovld __cnfn tanpi(half3);
+half4 __ovld __cnfn tanpi(half4);
+half8 __ovld __cnfn tanpi(half8);
+half16 __ovld __cnfn tanpi(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the gamma function.
+ */
+float __ovld __cnfn tgamma(float);
+float2 __ovld __cnfn tgamma(float2);
+float3 __ovld __cnfn tgamma(float3);
+float4 __ovld __cnfn tgamma(float4);
+float8 __ovld __cnfn tgamma(float8);
+float16 __ovld __cnfn tgamma(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tgamma(double);
+double2 __ovld __cnfn tgamma(double2);
+double3 __ovld __cnfn tgamma(double3);
+double4 __ovld __cnfn tgamma(double4);
+double8 __ovld __cnfn tgamma(double8);
+double16 __ovld __cnfn tgamma(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tgamma(half);
+half2 __ovld __cnfn tgamma(half2);
+half3 __ovld __cnfn tgamma(half3);
+half4 __ovld __cnfn tgamma(half4);
+half8 __ovld __cnfn tgamma(half8);
+half16 __ovld __cnfn tgamma(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Round to integral value using the round to zero
+ * rounding mode.
+ */
+float __ovld __cnfn trunc(float);
+float2 __ovld __cnfn trunc(float2);
+float3 __ovld __cnfn trunc(float3);
+float4 __ovld __cnfn trunc(float4);
+float8 __ovld __cnfn trunc(float8);
+float16 __ovld __cnfn trunc(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn trunc(double);
+double2 __ovld __cnfn trunc(double2);
+double3 __ovld __cnfn trunc(double3);
+double4 __ovld __cnfn trunc(double4);
+double8 __ovld __cnfn trunc(double8);
+double16 __ovld __cnfn trunc(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn trunc(half);
+half2 __ovld __cnfn trunc(half2);
+half3 __ovld __cnfn trunc(half3);
+half4 __ovld __cnfn trunc(half4);
+half8 __ovld __cnfn trunc(half8);
+half16 __ovld __cnfn trunc(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cosine. x must be in the range -2^16 ... +2^16.
+ */
+float __ovld __cnfn half_cos(float);
+float2 __ovld __cnfn half_cos(float2);
+float3 __ovld __cnfn half_cos(float3);
+float4 __ovld __cnfn half_cos(float4);
+float8 __ovld __cnfn half_cos(float8);
+float16 __ovld __cnfn half_cos(float16);
+
+/**
+ * Compute x / y.
+ */
+float __ovld __cnfn half_divide(float, float);
+float2 __ovld __cnfn half_divide(float2, float2);
+float3 __ovld __cnfn half_divide(float3, float3);
+float4 __ovld __cnfn half_divide(float4, float4);
+float8 __ovld __cnfn half_divide(float8, float8);
+float16 __ovld __cnfn half_divide(float16, float16);
+
+/**
+ * Compute the base- e exponential of x.
+ */
+float __ovld __cnfn half_exp(float);
+float2 __ovld __cnfn half_exp(float2);
+float3 __ovld __cnfn half_exp(float3);
+float4 __ovld __cnfn half_exp(float4);
+float8 __ovld __cnfn half_exp(float8);
+float16 __ovld __cnfn half_exp(float16);
+
+/**
+ * Compute the base- 2 exponential of x.
+ */
+float __ovld __cnfn half_exp2(float);
+float2 __ovld __cnfn half_exp2(float2);
+float3 __ovld __cnfn half_exp2(float3);
+float4 __ovld __cnfn half_exp2(float4);
+float8 __ovld __cnfn half_exp2(float8);
+float16 __ovld __cnfn half_exp2(float16);
+
+/**
+ * Compute the base- 10 exponential of x.
+ */
+float __ovld __cnfn half_exp10(float);
+float2 __ovld __cnfn half_exp10(float2);
+float3 __ovld __cnfn half_exp10(float3);
+float4 __ovld __cnfn half_exp10(float4);
+float8 __ovld __cnfn half_exp10(float8);
+float16 __ovld __cnfn half_exp10(float16);
+
+/**
+ * Compute natural logarithm.
+ */
+float __ovld __cnfn half_log(float);
+float2 __ovld __cnfn half_log(float2);
+float3 __ovld __cnfn half_log(float3);
+float4 __ovld __cnfn half_log(float4);
+float8 __ovld __cnfn half_log(float8);
+float16 __ovld __cnfn half_log(float16);
+
+/**
+ * Compute a base 2 logarithm.
+ */
+float __ovld __cnfn half_log2(float);
+float2 __ovld __cnfn half_log2(float2);
+float3 __ovld __cnfn half_log2(float3);
+float4 __ovld __cnfn half_log2(float4);
+float8 __ovld __cnfn half_log2(float8);
+float16 __ovld __cnfn half_log2(float16);
+
+/**
+ * Compute a base 10 logarithm.
+ */
+float __ovld __cnfn half_log10(float);
+float2 __ovld __cnfn half_log10(float2);
+float3 __ovld __cnfn half_log10(float3);
+float4 __ovld __cnfn half_log10(float4);
+float8 __ovld __cnfn half_log10(float8);
+float16 __ovld __cnfn half_log10(float16);
+
+/**
+ * Compute x to the power y, where x is >= 0.
+ */
+float __ovld __cnfn half_powr(float, float);
+float2 __ovld __cnfn half_powr(float2, float2);
+float3 __ovld __cnfn half_powr(float3, float3);
+float4 __ovld __cnfn half_powr(float4, float4);
+float8 __ovld __cnfn half_powr(float8, float8);
+float16 __ovld __cnfn half_powr(float16, float16);
+
+/**
+ * Compute reciprocal.
+ */
+float __ovld __cnfn half_recip(float);
+float2 __ovld __cnfn half_recip(float2);
+float3 __ovld __cnfn half_recip(float3);
+float4 __ovld __cnfn half_recip(float4);
+float8 __ovld __cnfn half_recip(float8);
+float16 __ovld __cnfn half_recip(float16);
+
+/**
+ * Compute inverse square root.
+ */
+float __ovld __cnfn half_rsqrt(float);
+float2 __ovld __cnfn half_rsqrt(float2);
+float3 __ovld __cnfn half_rsqrt(float3);
+float4 __ovld __cnfn half_rsqrt(float4);
+float8 __ovld __cnfn half_rsqrt(float8);
+float16 __ovld __cnfn half_rsqrt(float16);
+
+/**
+ * Compute sine. x must be in the range -2^16 ... +2^16.
+ */
+float __ovld __cnfn half_sin(float);
+float2 __ovld __cnfn half_sin(float2);
+float3 __ovld __cnfn half_sin(float3);
+float4 __ovld __cnfn half_sin(float4);
+float8 __ovld __cnfn half_sin(float8);
+float16 __ovld __cnfn half_sin(float16);
+
+/**
+ * Compute square root.
+ */
+float __ovld __cnfn half_sqrt(float);
+float2 __ovld __cnfn half_sqrt(float2);
+float3 __ovld __cnfn half_sqrt(float3);
+float4 __ovld __cnfn half_sqrt(float4);
+float8 __ovld __cnfn half_sqrt(float8);
+float16 __ovld __cnfn half_sqrt(float16);
+
+/**
+ * Compute tangent. x must be in the range -216 ... +216.
+ */
+float __ovld __cnfn half_tan(float);
+float2 __ovld __cnfn half_tan(float2);
+float3 __ovld __cnfn half_tan(float3);
+float4 __ovld __cnfn half_tan(float4);
+float8 __ovld __cnfn half_tan(float8);
+float16 __ovld __cnfn half_tan(float16);
+
+/**
+ * Compute cosine over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_cos(float);
+float2 __ovld __cnfn native_cos(float2);
+float3 __ovld __cnfn native_cos(float3);
+float4 __ovld __cnfn native_cos(float4);
+float8 __ovld __cnfn native_cos(float8);
+float16 __ovld __cnfn native_cos(float16);
+
+/**
+ * Compute x / y over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_divide(float, float);
+float2 __ovld __cnfn native_divide(float2, float2);
+float3 __ovld __cnfn native_divide(float3, float3);
+float4 __ovld __cnfn native_divide(float4, float4);
+float8 __ovld __cnfn native_divide(float8, float8);
+float16 __ovld __cnfn native_divide(float16, float16);
+
+/**
+ * Compute the base- e exponential of x over an
+ * implementation-defined range. The maximum error is
+ * implementation-defined.
+ */
+float __ovld __cnfn native_exp(float);
+float2 __ovld __cnfn native_exp(float2);
+float3 __ovld __cnfn native_exp(float3);
+float4 __ovld __cnfn native_exp(float4);
+float8 __ovld __cnfn native_exp(float8);
+float16 __ovld __cnfn native_exp(float16);
+
+/**
+ * Compute the base- 2 exponential of x over an
+ * implementation-defined range. The maximum error is
+ * implementation-defined.
+ */
+float __ovld __cnfn native_exp2(float);
+float2 __ovld __cnfn native_exp2(float2);
+float3 __ovld __cnfn native_exp2(float3);
+float4 __ovld __cnfn native_exp2(float4);
+float8 __ovld __cnfn native_exp2(float8);
+float16 __ovld __cnfn native_exp2(float16);
+
+/**
+ * Compute the base- 10 exponential of x over an
+ * implementation-defined range. The maximum error is
+ * implementation-defined.
+ */
+float __ovld __cnfn native_exp10(float);
+float2 __ovld __cnfn native_exp10(float2);
+float3 __ovld __cnfn native_exp10(float3);
+float4 __ovld __cnfn native_exp10(float4);
+float8 __ovld __cnfn native_exp10(float8);
+float16 __ovld __cnfn native_exp10(float16);
+
+/**
+ * Compute natural logarithm over an implementationdefined
+ * range. The maximum error is implementation
+ * defined.
+ */
+float __ovld __cnfn native_log(float);
+float2 __ovld __cnfn native_log(float2);
+float3 __ovld __cnfn native_log(float3);
+float4 __ovld __cnfn native_log(float4);
+float8 __ovld __cnfn native_log(float8);
+float16 __ovld __cnfn native_log(float16);
+
+/**
+ * Compute a base 2 logarithm over an implementationdefined
+ * range. The maximum error is implementationdefined.
+ */
+float __ovld __cnfn native_log2(float);
+float2 __ovld __cnfn native_log2(float2);
+float3 __ovld __cnfn native_log2(float3);
+float4 __ovld __cnfn native_log2(float4);
+float8 __ovld __cnfn native_log2(float8);
+float16 __ovld __cnfn native_log2(float16);
+
+/**
+ * Compute a base 10 logarithm over an implementationdefined
+ * range. The maximum error is implementationdefined.
+ */
+float __ovld __cnfn native_log10(float);
+float2 __ovld __cnfn native_log10(float2);
+float3 __ovld __cnfn native_log10(float3);
+float4 __ovld __cnfn native_log10(float4);
+float8 __ovld __cnfn native_log10(float8);
+float16 __ovld __cnfn native_log10(float16);
+
+/**
+ * Compute x to the power y, where x is >= 0. The range of
+ * x and y are implementation-defined. The maximum error
+ * is implementation-defined.
+ */
+float __ovld __cnfn native_powr(float, float);
+float2 __ovld __cnfn native_powr(float2, float2);
+float3 __ovld __cnfn native_powr(float3, float3);
+float4 __ovld __cnfn native_powr(float4, float4);
+float8 __ovld __cnfn native_powr(float8, float8);
+float16 __ovld __cnfn native_powr(float16, float16);
+
+/**
+ * Compute reciprocal over an implementation-defined
+ * range. The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_recip(float);
+float2 __ovld __cnfn native_recip(float2);
+float3 __ovld __cnfn native_recip(float3);
+float4 __ovld __cnfn native_recip(float4);
+float8 __ovld __cnfn native_recip(float8);
+float16 __ovld __cnfn native_recip(float16);
+
+/**
+ * Compute inverse square root over an implementationdefined
+ * range. The maximum error is implementationdefined.
+ */
+float __ovld __cnfn native_rsqrt(float);
+float2 __ovld __cnfn native_rsqrt(float2);
+float3 __ovld __cnfn native_rsqrt(float3);
+float4 __ovld __cnfn native_rsqrt(float4);
+float8 __ovld __cnfn native_rsqrt(float8);
+float16 __ovld __cnfn native_rsqrt(float16);
+
+/**
+ * Compute sine over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_sin(float);
+float2 __ovld __cnfn native_sin(float2);
+float3 __ovld __cnfn native_sin(float3);
+float4 __ovld __cnfn native_sin(float4);
+float8 __ovld __cnfn native_sin(float8);
+float16 __ovld __cnfn native_sin(float16);
+
+/**
+ * Compute square root over an implementation-defined
+ * range. The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_sqrt(float);
+float2 __ovld __cnfn native_sqrt(float2);
+float3 __ovld __cnfn native_sqrt(float3);
+float4 __ovld __cnfn native_sqrt(float4);
+float8 __ovld __cnfn native_sqrt(float8);
+float16 __ovld __cnfn native_sqrt(float16);
+
+/**
+ * Compute tangent over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_tan(float);
+float2 __ovld __cnfn native_tan(float2);
+float3 __ovld __cnfn native_tan(float3);
+float4 __ovld __cnfn native_tan(float4);
+float8 __ovld __cnfn native_tan(float8);
+float16 __ovld __cnfn native_tan(float16);
+
+// OpenCL v1.1 s6.11.3, v1.2 s6.12.3, v2.0 s6.13.3 - Integer Functions
+
+/**
+ * Returns | x |.
+ */
+uchar __ovld __cnfn abs(char);
+uchar __ovld __cnfn abs(uchar);
+uchar2 __ovld __cnfn abs(char2);
+uchar2 __ovld __cnfn abs(uchar2);
+uchar3 __ovld __cnfn abs(char3);
+uchar3 __ovld __cnfn abs(uchar3);
+uchar4 __ovld __cnfn abs(char4);
+uchar4 __ovld __cnfn abs(uchar4);
+uchar8 __ovld __cnfn abs(char8);
+uchar8 __ovld __cnfn abs(uchar8);
+uchar16 __ovld __cnfn abs(char16);
+uchar16 __ovld __cnfn abs(uchar16);
+ushort __ovld __cnfn abs(short);
+ushort __ovld __cnfn abs(ushort);
+ushort2 __ovld __cnfn abs(short2);
+ushort2 __ovld __cnfn abs(ushort2);
+ushort3 __ovld __cnfn abs(short3);
+ushort3 __ovld __cnfn abs(ushort3);
+ushort4 __ovld __cnfn abs(short4);
+ushort4 __ovld __cnfn abs(ushort4);
+ushort8 __ovld __cnfn abs(short8);
+ushort8 __ovld __cnfn abs(ushort8);
+ushort16 __ovld __cnfn abs(short16);
+ushort16 __ovld __cnfn abs(ushort16);
+uint __ovld __cnfn abs(int);
+uint __ovld __cnfn abs(uint);
+uint2 __ovld __cnfn abs(int2);
+uint2 __ovld __cnfn abs(uint2);
+uint3 __ovld __cnfn abs(int3);
+uint3 __ovld __cnfn abs(uint3);
+uint4 __ovld __cnfn abs(int4);
+uint4 __ovld __cnfn abs(uint4);
+uint8 __ovld __cnfn abs(int8);
+uint8 __ovld __cnfn abs(uint8);
+uint16 __ovld __cnfn abs(int16);
+uint16 __ovld __cnfn abs(uint16);
+ulong __ovld __cnfn abs(long);
+ulong __ovld __cnfn abs(ulong);
+ulong2 __ovld __cnfn abs(long2);
+ulong2 __ovld __cnfn abs(ulong2);
+ulong3 __ovld __cnfn abs(long3);
+ulong3 __ovld __cnfn abs(ulong3);
+ulong4 __ovld __cnfn abs(long4);
+ulong4 __ovld __cnfn abs(ulong4);
+ulong8 __ovld __cnfn abs(long8);
+ulong8 __ovld __cnfn abs(ulong8);
+ulong16 __ovld __cnfn abs(long16);
+ulong16 __ovld __cnfn abs(ulong16);
+
+/**
+ * Returns | x - y | without modulo overflow.
+ */
+uchar __ovld __cnfn abs_diff(char, char);
+uchar __ovld __cnfn abs_diff(uchar, uchar);
+uchar2 __ovld __cnfn abs_diff(char2, char2);
+uchar2 __ovld __cnfn abs_diff(uchar2, uchar2);
+uchar3 __ovld __cnfn abs_diff(char3, char3);
+uchar3 __ovld __cnfn abs_diff(uchar3, uchar3);
+uchar4 __ovld __cnfn abs_diff(char4, char4);
+uchar4 __ovld __cnfn abs_diff(uchar4, uchar4);
+uchar8 __ovld __cnfn abs_diff(char8, char8);
+uchar8 __ovld __cnfn abs_diff(uchar8, uchar8);
+uchar16 __ovld __cnfn abs_diff(char16, char16);
+uchar16 __ovld __cnfn abs_diff(uchar16, uchar16);
+ushort __ovld __cnfn abs_diff(short, short);
+ushort __ovld __cnfn abs_diff(ushort, ushort);
+ushort2 __ovld __cnfn abs_diff(short2, short2);
+ushort2 __ovld __cnfn abs_diff(ushort2, ushort2);
+ushort3 __ovld __cnfn abs_diff(short3, short3);
+ushort3 __ovld __cnfn abs_diff(ushort3, ushort3);
+ushort4 __ovld __cnfn abs_diff(short4, short4);
+ushort4 __ovld __cnfn abs_diff(ushort4, ushort4);
+ushort8 __ovld __cnfn abs_diff(short8, short8);
+ushort8 __ovld __cnfn abs_diff(ushort8, ushort8);
+ushort16 __ovld __cnfn abs_diff(short16, short16);
+ushort16 __ovld __cnfn abs_diff(ushort16, ushort16);
+uint __ovld __cnfn abs_diff(int, int);
+uint __ovld __cnfn abs_diff(uint, uint);
+uint2 __ovld __cnfn abs_diff(int2, int2);
+uint2 __ovld __cnfn abs_diff(uint2, uint2);
+uint3 __ovld __cnfn abs_diff(int3, int3);
+uint3 __ovld __cnfn abs_diff(uint3, uint3);
+uint4 __ovld __cnfn abs_diff(int4, int4);
+uint4 __ovld __cnfn abs_diff(uint4, uint4);
+uint8 __ovld __cnfn abs_diff(int8, int8);
+uint8 __ovld __cnfn abs_diff(uint8, uint8);
+uint16 __ovld __cnfn abs_diff(int16, int16);
+uint16 __ovld __cnfn abs_diff(uint16, uint16);
+ulong __ovld __cnfn abs_diff(long, long);
+ulong __ovld __cnfn abs_diff(ulong, ulong);
+ulong2 __ovld __cnfn abs_diff(long2, long2);
+ulong2 __ovld __cnfn abs_diff(ulong2, ulong2);
+ulong3 __ovld __cnfn abs_diff(long3, long3);
+ulong3 __ovld __cnfn abs_diff(ulong3, ulong3);
+ulong4 __ovld __cnfn abs_diff(long4, long4);
+ulong4 __ovld __cnfn abs_diff(ulong4, ulong4);
+ulong8 __ovld __cnfn abs_diff(long8, long8);
+ulong8 __ovld __cnfn abs_diff(ulong8, ulong8);
+ulong16 __ovld __cnfn abs_diff(long16, long16);
+ulong16 __ovld __cnfn abs_diff(ulong16, ulong16);
+
+/**
+ * Returns x + y and saturates the result.
+ */
+char __ovld __cnfn add_sat(char, char);
+uchar __ovld __cnfn add_sat(uchar, uchar);
+char2 __ovld __cnfn add_sat(char2, char2);
+uchar2 __ovld __cnfn add_sat(uchar2, uchar2);
+char3 __ovld __cnfn add_sat(char3, char3);
+uchar3 __ovld __cnfn add_sat(uchar3, uchar3);
+char4 __ovld __cnfn add_sat(char4, char4);
+uchar4 __ovld __cnfn add_sat(uchar4, uchar4);
+char8 __ovld __cnfn add_sat(char8, char8);
+uchar8 __ovld __cnfn add_sat(uchar8, uchar8);
+char16 __ovld __cnfn add_sat(char16, char16);
+uchar16 __ovld __cnfn add_sat(uchar16, uchar16);
+short __ovld __cnfn add_sat(short, short);
+ushort __ovld __cnfn add_sat(ushort, ushort);
+short2 __ovld __cnfn add_sat(short2, short2);
+ushort2 __ovld __cnfn add_sat(ushort2, ushort2);
+short3 __ovld __cnfn add_sat(short3, short3);
+ushort3 __ovld __cnfn add_sat(ushort3, ushort3);
+short4 __ovld __cnfn add_sat(short4, short4);
+ushort4 __ovld __cnfn add_sat(ushort4, ushort4);
+short8 __ovld __cnfn add_sat(short8, short8);
+ushort8 __ovld __cnfn add_sat(ushort8, ushort8);
+short16 __ovld __cnfn add_sat(short16, short16);
+ushort16 __ovld __cnfn add_sat(ushort16, ushort16);
+int __ovld __cnfn add_sat(int, int);
+uint __ovld __cnfn add_sat(uint, uint);
+int2 __ovld __cnfn add_sat(int2, int2);
+uint2 __ovld __cnfn add_sat(uint2, uint2);
+int3 __ovld __cnfn add_sat(int3, int3);
+uint3 __ovld __cnfn add_sat(uint3, uint3);
+int4 __ovld __cnfn add_sat(int4, int4);
+uint4 __ovld __cnfn add_sat(uint4, uint4);
+int8 __ovld __cnfn add_sat(int8, int8);
+uint8 __ovld __cnfn add_sat(uint8, uint8);
+int16 __ovld __cnfn add_sat(int16, int16);
+uint16 __ovld __cnfn add_sat(uint16, uint16);
+long __ovld __cnfn add_sat(long, long);
+ulong __ovld __cnfn add_sat(ulong, ulong);
+long2 __ovld __cnfn add_sat(long2, long2);
+ulong2 __ovld __cnfn add_sat(ulong2, ulong2);
+long3 __ovld __cnfn add_sat(long3, long3);
+ulong3 __ovld __cnfn add_sat(ulong3, ulong3);
+long4 __ovld __cnfn add_sat(long4, long4);
+ulong4 __ovld __cnfn add_sat(ulong4, ulong4);
+long8 __ovld __cnfn add_sat(long8, long8);
+ulong8 __ovld __cnfn add_sat(ulong8, ulong8);
+long16 __ovld __cnfn add_sat(long16, long16);
+ulong16 __ovld __cnfn add_sat(ulong16, ulong16);
+
+/**
+ * Returns (x + y) >> 1. The intermediate sum does
+ * not modulo overflow.
+ */
+char __ovld __cnfn hadd(char, char);
+uchar __ovld __cnfn hadd(uchar, uchar);
+char2 __ovld __cnfn hadd(char2, char2);
+uchar2 __ovld __cnfn hadd(uchar2, uchar2);
+char3 __ovld __cnfn hadd(char3, char3);
+uchar3 __ovld __cnfn hadd(uchar3, uchar3);
+char4 __ovld __cnfn hadd(char4, char4);
+uchar4 __ovld __cnfn hadd(uchar4, uchar4);
+char8 __ovld __cnfn hadd(char8, char8);
+uchar8 __ovld __cnfn hadd(uchar8, uchar8);
+char16 __ovld __cnfn hadd(char16, char16);
+uchar16 __ovld __cnfn hadd(uchar16, uchar16);
+short __ovld __cnfn hadd(short, short);
+ushort __ovld __cnfn hadd(ushort, ushort);
+short2 __ovld __cnfn hadd(short2, short2);
+ushort2 __ovld __cnfn hadd(ushort2, ushort2);
+short3 __ovld __cnfn hadd(short3, short3);
+ushort3 __ovld __cnfn hadd(ushort3, ushort3);
+short4 __ovld __cnfn hadd(short4, short4);
+ushort4 __ovld __cnfn hadd(ushort4, ushort4);
+short8 __ovld __cnfn hadd(short8, short8);
+ushort8 __ovld __cnfn hadd(ushort8, ushort8);
+short16 __ovld __cnfn hadd(short16, short16);
+ushort16 __ovld __cnfn hadd(ushort16, ushort16);
+int __ovld __cnfn hadd(int, int);
+uint __ovld __cnfn hadd(uint, uint);
+int2 __ovld __cnfn hadd(int2, int2);
+uint2 __ovld __cnfn hadd(uint2, uint2);
+int3 __ovld __cnfn hadd(int3, int3);
+uint3 __ovld __cnfn hadd(uint3, uint3);
+int4 __ovld __cnfn hadd(int4, int4);
+uint4 __ovld __cnfn hadd(uint4, uint4);
+int8 __ovld __cnfn hadd(int8, int8);
+uint8 __ovld __cnfn hadd(uint8, uint8);
+int16 __ovld __cnfn hadd(int16, int16);
+uint16 __ovld __cnfn hadd(uint16, uint16);
+long __ovld __cnfn hadd(long, long);
+ulong __ovld __cnfn hadd(ulong, ulong);
+long2 __ovld __cnfn hadd(long2, long2);
+ulong2 __ovld __cnfn hadd(ulong2, ulong2);
+long3 __ovld __cnfn hadd(long3, long3);
+ulong3 __ovld __cnfn hadd(ulong3, ulong3);
+long4 __ovld __cnfn hadd(long4, long4);
+ulong4 __ovld __cnfn hadd(ulong4, ulong4);
+long8 __ovld __cnfn hadd(long8, long8);
+ulong8 __ovld __cnfn hadd(ulong8, ulong8);
+long16 __ovld __cnfn hadd(long16, long16);
+ulong16 __ovld __cnfn hadd(ulong16, ulong16);
+
+/**
+ * Returns (x + y + 1) >> 1. The intermediate sum
+ * does not modulo overflow.
+ */
+char __ovld __cnfn rhadd(char, char);
+uchar __ovld __cnfn rhadd(uchar, uchar);
+char2 __ovld __cnfn rhadd(char2, char2);
+uchar2 __ovld __cnfn rhadd(uchar2, uchar2);
+char3 __ovld __cnfn rhadd(char3, char3);
+uchar3 __ovld __cnfn rhadd(uchar3, uchar3);
+char4 __ovld __cnfn rhadd(char4, char4);
+uchar4 __ovld __cnfn rhadd(uchar4, uchar4);
+char8 __ovld __cnfn rhadd(char8, char8);
+uchar8 __ovld __cnfn rhadd(uchar8, uchar8);
+char16 __ovld __cnfn rhadd(char16, char16);
+uchar16 __ovld __cnfn rhadd(uchar16, uchar16);
+short __ovld __cnfn rhadd(short, short);
+ushort __ovld __cnfn rhadd(ushort, ushort);
+short2 __ovld __cnfn rhadd(short2, short2);
+ushort2 __ovld __cnfn rhadd(ushort2, ushort2);
+short3 __ovld __cnfn rhadd(short3, short3);
+ushort3 __ovld __cnfn rhadd(ushort3, ushort3);
+short4 __ovld __cnfn rhadd(short4, short4);
+ushort4 __ovld __cnfn rhadd(ushort4, ushort4);
+short8 __ovld __cnfn rhadd(short8, short8);
+ushort8 __ovld __cnfn rhadd(ushort8, ushort8);
+short16 __ovld __cnfn rhadd(short16, short16);
+ushort16 __ovld __cnfn rhadd(ushort16, ushort16);
+int __ovld __cnfn rhadd(int, int);
+uint __ovld __cnfn rhadd(uint, uint);
+int2 __ovld __cnfn rhadd(int2, int2);
+uint2 __ovld __cnfn rhadd(uint2, uint2);
+int3 __ovld __cnfn rhadd(int3, int3);
+uint3 __ovld __cnfn rhadd(uint3, uint3);
+int4 __ovld __cnfn rhadd(int4, int4);
+uint4 __ovld __cnfn rhadd(uint4, uint4);
+int8 __ovld __cnfn rhadd(int8, int8);
+uint8 __ovld __cnfn rhadd(uint8, uint8);
+int16 __ovld __cnfn rhadd(int16, int16);
+uint16 __ovld __cnfn rhadd(uint16, uint16);
+long __ovld __cnfn rhadd(long, long);
+ulong __ovld __cnfn rhadd(ulong, ulong);
+long2 __ovld __cnfn rhadd(long2, long2);
+ulong2 __ovld __cnfn rhadd(ulong2, ulong2);
+long3 __ovld __cnfn rhadd(long3, long3);
+ulong3 __ovld __cnfn rhadd(ulong3, ulong3);
+long4 __ovld __cnfn rhadd(long4, long4);
+ulong4 __ovld __cnfn rhadd(ulong4, ulong4);
+long8 __ovld __cnfn rhadd(long8, long8);
+ulong8 __ovld __cnfn rhadd(ulong8, ulong8);
+long16 __ovld __cnfn rhadd(long16, long16);
+ulong16 __ovld __cnfn rhadd(ulong16, ulong16);
+
+/**
+ * Returns min(max(x, minval), maxval).
+ * Results are undefined if minval > maxval.
+ */
+char __ovld __cnfn clamp(char, char, char);
+uchar __ovld __cnfn clamp(uchar, uchar, uchar);
+char2 __ovld __cnfn clamp(char2, char2, char2);
+uchar2 __ovld __cnfn clamp(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn clamp(char3, char3, char3);
+uchar3 __ovld __cnfn clamp(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn clamp(char4, char4, char4);
+uchar4 __ovld __cnfn clamp(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn clamp(char8, char8, char8);
+uchar8 __ovld __cnfn clamp(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn clamp(char16, char16, char16);
+uchar16 __ovld __cnfn clamp(uchar16, uchar16, uchar16);
+short __ovld __cnfn clamp(short, short, short);
+ushort __ovld __cnfn clamp(ushort, ushort, ushort);
+short2 __ovld __cnfn clamp(short2, short2, short2);
+ushort2 __ovld __cnfn clamp(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn clamp(short3, short3, short3);
+ushort3 __ovld __cnfn clamp(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn clamp(short4, short4, short4);
+ushort4 __ovld __cnfn clamp(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn clamp(short8, short8, short8);
+ushort8 __ovld __cnfn clamp(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn clamp(short16, short16, short16);
+ushort16 __ovld __cnfn clamp(ushort16, ushort16, ushort16);
+int __ovld __cnfn clamp(int, int, int);
+uint __ovld __cnfn clamp(uint, uint, uint);
+int2 __ovld __cnfn clamp(int2, int2, int2);
+uint2 __ovld __cnfn clamp(uint2, uint2, uint2);
+int3 __ovld __cnfn clamp(int3, int3, int3);
+uint3 __ovld __cnfn clamp(uint3, uint3, uint3);
+int4 __ovld __cnfn clamp(int4, int4, int4);
+uint4 __ovld __cnfn clamp(uint4, uint4, uint4);
+int8 __ovld __cnfn clamp(int8, int8, int8);
+uint8 __ovld __cnfn clamp(uint8, uint8, uint8);
+int16 __ovld __cnfn clamp(int16, int16, int16);
+uint16 __ovld __cnfn clamp(uint16, uint16, uint16);
+long __ovld __cnfn clamp(long, long, long);
+ulong __ovld __cnfn clamp(ulong, ulong, ulong);
+long2 __ovld __cnfn clamp(long2, long2, long2);
+ulong2 __ovld __cnfn clamp(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn clamp(long3, long3, long3);
+ulong3 __ovld __cnfn clamp(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn clamp(long4, long4, long4);
+ulong4 __ovld __cnfn clamp(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn clamp(long8, long8, long8);
+ulong8 __ovld __cnfn clamp(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn clamp(long16, long16, long16);
+ulong16 __ovld __cnfn clamp(ulong16, ulong16, ulong16);
+char2 __ovld __cnfn clamp(char2, char, char);
+uchar2 __ovld __cnfn clamp(uchar2, uchar, uchar);
+char3 __ovld __cnfn clamp(char3, char, char);
+uchar3 __ovld __cnfn clamp(uchar3, uchar, uchar);
+char4 __ovld __cnfn clamp(char4, char, char);
+uchar4 __ovld __cnfn clamp(uchar4, uchar, uchar);
+char8 __ovld __cnfn clamp(char8, char, char);
+uchar8 __ovld __cnfn clamp(uchar8, uchar, uchar);
+char16 __ovld __cnfn clamp(char16, char, char);
+uchar16 __ovld __cnfn clamp(uchar16, uchar, uchar);
+short2 __ovld __cnfn clamp(short2, short, short);
+ushort2 __ovld __cnfn clamp(ushort2, ushort, ushort);
+short3 __ovld __cnfn clamp(short3, short, short);
+ushort3 __ovld __cnfn clamp(ushort3, ushort, ushort);
+short4 __ovld __cnfn clamp(short4, short, short);
+ushort4 __ovld __cnfn clamp(ushort4, ushort, ushort);
+short8 __ovld __cnfn clamp(short8, short, short);
+ushort8 __ovld __cnfn clamp(ushort8, ushort, ushort);
+short16 __ovld __cnfn clamp(short16, short, short);
+ushort16 __ovld __cnfn clamp(ushort16, ushort, ushort);
+int2 __ovld __cnfn clamp(int2, int, int);
+uint2 __ovld __cnfn clamp(uint2, uint, uint);
+int3 __ovld __cnfn clamp(int3, int, int);
+uint3 __ovld __cnfn clamp(uint3, uint, uint);
+int4 __ovld __cnfn clamp(int4, int, int);
+uint4 __ovld __cnfn clamp(uint4, uint, uint);
+int8 __ovld __cnfn clamp(int8, int, int);
+uint8 __ovld __cnfn clamp(uint8, uint, uint);
+int16 __ovld __cnfn clamp(int16, int, int);
+uint16 __ovld __cnfn clamp(uint16, uint, uint);
+long2 __ovld __cnfn clamp(long2, long, long);
+ulong2 __ovld __cnfn clamp(ulong2, ulong, ulong);
+long3 __ovld __cnfn clamp(long3, long, long);
+ulong3 __ovld __cnfn clamp(ulong3, ulong, ulong);
+long4 __ovld __cnfn clamp(long4, long, long);
+ulong4 __ovld __cnfn clamp(ulong4, ulong, ulong);
+long8 __ovld __cnfn clamp(long8, long, long);
+ulong8 __ovld __cnfn clamp(ulong8, ulong, ulong);
+long16 __ovld __cnfn clamp(long16, long, long);
+ulong16 __ovld __cnfn clamp(ulong16, ulong, ulong);
+
+/**
+ * Returns the number of leading 0-bits in x, starting
+ * at the most significant bit position.
+ */
+char __ovld __cnfn clz(char);
+uchar __ovld __cnfn clz(uchar);
+char2 __ovld __cnfn clz(char2);
+uchar2 __ovld __cnfn clz(uchar2);
+char3 __ovld __cnfn clz(char3);
+uchar3 __ovld __cnfn clz(uchar3);
+char4 __ovld __cnfn clz(char4);
+uchar4 __ovld __cnfn clz(uchar4);
+char8 __ovld __cnfn clz(char8);
+uchar8 __ovld __cnfn clz(uchar8);
+char16 __ovld __cnfn clz(char16);
+uchar16 __ovld __cnfn clz(uchar16);
+short __ovld __cnfn clz(short);
+ushort __ovld __cnfn clz(ushort);
+short2 __ovld __cnfn clz(short2);
+ushort2 __ovld __cnfn clz(ushort2);
+short3 __ovld __cnfn clz(short3);
+ushort3 __ovld __cnfn clz(ushort3);
+short4 __ovld __cnfn clz(short4);
+ushort4 __ovld __cnfn clz(ushort4);
+short8 __ovld __cnfn clz(short8);
+ushort8 __ovld __cnfn clz(ushort8);
+short16 __ovld __cnfn clz(short16);
+ushort16 __ovld __cnfn clz(ushort16);
+int __ovld __cnfn clz(int);
+uint __ovld __cnfn clz(uint);
+int2 __ovld __cnfn clz(int2);
+uint2 __ovld __cnfn clz(uint2);
+int3 __ovld __cnfn clz(int3);
+uint3 __ovld __cnfn clz(uint3);
+int4 __ovld __cnfn clz(int4);
+uint4 __ovld __cnfn clz(uint4);
+int8 __ovld __cnfn clz(int8);
+uint8 __ovld __cnfn clz(uint8);
+int16 __ovld __cnfn clz(int16);
+uint16 __ovld __cnfn clz(uint16);
+long __ovld __cnfn clz(long);
+ulong __ovld __cnfn clz(ulong);
+long2 __ovld __cnfn clz(long2);
+ulong2 __ovld __cnfn clz(ulong2);
+long3 __ovld __cnfn clz(long3);
+ulong3 __ovld __cnfn clz(ulong3);
+long4 __ovld __cnfn clz(long4);
+ulong4 __ovld __cnfn clz(ulong4);
+long8 __ovld __cnfn clz(long8);
+ulong8 __ovld __cnfn clz(ulong8);
+long16 __ovld __cnfn clz(long16);
+ulong16 __ovld __cnfn clz(ulong16);
+
+/**
+ * Returns the count of trailing 0-bits in x. If x is 0,
+ * returns the size in bits of the type of x or
+ * component type of x, if x is a vector.
+ */
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+char __ovld __cnfn ctz(char);
+uchar __ovld __cnfn ctz(uchar);
+char2 __ovld __cnfn ctz(char2);
+uchar2 __ovld __cnfn ctz(uchar2);
+char3 __ovld __cnfn ctz(char3);
+uchar3 __ovld __cnfn ctz(uchar3);
+char4 __ovld __cnfn ctz(char4);
+uchar4 __ovld __cnfn ctz(uchar4);
+char8 __ovld __cnfn ctz(char8);
+uchar8 __ovld __cnfn ctz(uchar8);
+char16 __ovld __cnfn ctz(char16);
+uchar16 __ovld __cnfn ctz(uchar16);
+short __ovld __cnfn ctz(short);
+ushort __ovld __cnfn ctz(ushort);
+short2 __ovld __cnfn ctz(short2);
+ushort2 __ovld __cnfn ctz(ushort2);
+short3 __ovld __cnfn ctz(short3);
+ushort3 __ovld __cnfn ctz(ushort3);
+short4 __ovld __cnfn ctz(short4);
+ushort4 __ovld __cnfn ctz(ushort4);
+short8 __ovld __cnfn ctz(short8);
+ushort8 __ovld __cnfn ctz(ushort8);
+short16 __ovld __cnfn ctz(short16);
+ushort16 __ovld __cnfn ctz(ushort16);
+int __ovld __cnfn ctz(int);
+uint __ovld __cnfn ctz(uint);
+int2 __ovld __cnfn ctz(int2);
+uint2 __ovld __cnfn ctz(uint2);
+int3 __ovld __cnfn ctz(int3);
+uint3 __ovld __cnfn ctz(uint3);
+int4 __ovld __cnfn ctz(int4);
+uint4 __ovld __cnfn ctz(uint4);
+int8 __ovld __cnfn ctz(int8);
+uint8 __ovld __cnfn ctz(uint8);
+int16 __ovld __cnfn ctz(int16);
+uint16 __ovld __cnfn ctz(uint16);
+long __ovld __cnfn ctz(long);
+ulong __ovld __cnfn ctz(ulong);
+long2 __ovld __cnfn ctz(long2);
+ulong2 __ovld __cnfn ctz(ulong2);
+long3 __ovld __cnfn ctz(long3);
+ulong3 __ovld __cnfn ctz(ulong3);
+long4 __ovld __cnfn ctz(long4);
+ulong4 __ovld __cnfn ctz(ulong4);
+long8 __ovld __cnfn ctz(long8);
+ulong8 __ovld __cnfn ctz(ulong8);
+long16 __ovld __cnfn ctz(long16);
+ulong16 __ovld __cnfn ctz(ulong16);
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+/**
+ * Returns mul_hi(a, b) + c.
+ */
+char __ovld __cnfn mad_hi(char, char, char);
+uchar __ovld __cnfn mad_hi(uchar, uchar, uchar);
+char2 __ovld __cnfn mad_hi(char2, char2, char2);
+uchar2 __ovld __cnfn mad_hi(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn mad_hi(char3, char3, char3);
+uchar3 __ovld __cnfn mad_hi(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn mad_hi(char4, char4, char4);
+uchar4 __ovld __cnfn mad_hi(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn mad_hi(char8, char8, char8);
+uchar8 __ovld __cnfn mad_hi(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn mad_hi(char16, char16, char16);
+uchar16 __ovld __cnfn mad_hi(uchar16, uchar16, uchar16);
+short __ovld __cnfn mad_hi(short, short, short);
+ushort __ovld __cnfn mad_hi(ushort, ushort, ushort);
+short2 __ovld __cnfn mad_hi(short2, short2, short2);
+ushort2 __ovld __cnfn mad_hi(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn mad_hi(short3, short3, short3);
+ushort3 __ovld __cnfn mad_hi(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn mad_hi(short4, short4, short4);
+ushort4 __ovld __cnfn mad_hi(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn mad_hi(short8, short8, short8);
+ushort8 __ovld __cnfn mad_hi(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn mad_hi(short16, short16, short16);
+ushort16 __ovld __cnfn mad_hi(ushort16, ushort16, ushort16);
+int __ovld __cnfn mad_hi(int, int, int);
+uint __ovld __cnfn mad_hi(uint, uint, uint);
+int2 __ovld __cnfn mad_hi(int2, int2, int2);
+uint2 __ovld __cnfn mad_hi(uint2, uint2, uint2);
+int3 __ovld __cnfn mad_hi(int3, int3, int3);
+uint3 __ovld __cnfn mad_hi(uint3, uint3, uint3);
+int4 __ovld __cnfn mad_hi(int4, int4, int4);
+uint4 __ovld __cnfn mad_hi(uint4, uint4, uint4);
+int8 __ovld __cnfn mad_hi(int8, int8, int8);
+uint8 __ovld __cnfn mad_hi(uint8, uint8, uint8);
+int16 __ovld __cnfn mad_hi(int16, int16, int16);
+uint16 __ovld __cnfn mad_hi(uint16, uint16, uint16);
+long __ovld __cnfn mad_hi(long, long, long);
+ulong __ovld __cnfn mad_hi(ulong, ulong, ulong);
+long2 __ovld __cnfn mad_hi(long2, long2, long2);
+ulong2 __ovld __cnfn mad_hi(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn mad_hi(long3, long3, long3);
+ulong3 __ovld __cnfn mad_hi(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn mad_hi(long4, long4, long4);
+ulong4 __ovld __cnfn mad_hi(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn mad_hi(long8, long8, long8);
+ulong8 __ovld __cnfn mad_hi(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn mad_hi(long16, long16, long16);
+ulong16 __ovld __cnfn mad_hi(ulong16, ulong16, ulong16);
+
+/**
+ * Returns a * b + c and saturates the result.
+ */
+char __ovld __cnfn mad_sat(char, char, char);
+uchar __ovld __cnfn mad_sat(uchar, uchar, uchar);
+char2 __ovld __cnfn mad_sat(char2, char2, char2);
+uchar2 __ovld __cnfn mad_sat(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn mad_sat(char3, char3, char3);
+uchar3 __ovld __cnfn mad_sat(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn mad_sat(char4, char4, char4);
+uchar4 __ovld __cnfn mad_sat(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn mad_sat(char8, char8, char8);
+uchar8 __ovld __cnfn mad_sat(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn mad_sat(char16, char16, char16);
+uchar16 __ovld __cnfn mad_sat(uchar16, uchar16, uchar16);
+short __ovld __cnfn mad_sat(short, short, short);
+ushort __ovld __cnfn mad_sat(ushort, ushort, ushort);
+short2 __ovld __cnfn mad_sat(short2, short2, short2);
+ushort2 __ovld __cnfn mad_sat(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn mad_sat(short3, short3, short3);
+ushort3 __ovld __cnfn mad_sat(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn mad_sat(short4, short4, short4);
+ushort4 __ovld __cnfn mad_sat(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn mad_sat(short8, short8, short8);
+ushort8 __ovld __cnfn mad_sat(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn mad_sat(short16, short16, short16);
+ushort16 __ovld __cnfn mad_sat(ushort16, ushort16, ushort16);
+int __ovld __cnfn mad_sat(int, int, int);
+uint __ovld __cnfn mad_sat(uint, uint, uint);
+int2 __ovld __cnfn mad_sat(int2, int2, int2);
+uint2 __ovld __cnfn mad_sat(uint2, uint2, uint2);
+int3 __ovld __cnfn mad_sat(int3, int3, int3);
+uint3 __ovld __cnfn mad_sat(uint3, uint3, uint3);
+int4 __ovld __cnfn mad_sat(int4, int4, int4);
+uint4 __ovld __cnfn mad_sat(uint4, uint4, uint4);
+int8 __ovld __cnfn mad_sat(int8, int8, int8);
+uint8 __ovld __cnfn mad_sat(uint8, uint8, uint8);
+int16 __ovld __cnfn mad_sat(int16, int16, int16);
+uint16 __ovld __cnfn mad_sat(uint16, uint16, uint16);
+long __ovld __cnfn mad_sat(long, long, long);
+ulong __ovld __cnfn mad_sat(ulong, ulong, ulong);
+long2 __ovld __cnfn mad_sat(long2, long2, long2);
+ulong2 __ovld __cnfn mad_sat(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn mad_sat(long3, long3, long3);
+ulong3 __ovld __cnfn mad_sat(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn mad_sat(long4, long4, long4);
+ulong4 __ovld __cnfn mad_sat(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn mad_sat(long8, long8, long8);
+ulong8 __ovld __cnfn mad_sat(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn mad_sat(long16, long16, long16);
+ulong16 __ovld __cnfn mad_sat(ulong16, ulong16, ulong16);
+
+/**
+ * Returns y if x < y, otherwise it returns x.
+ */
+char __ovld __cnfn max(char, char);
+uchar __ovld __cnfn max(uchar, uchar);
+char2 __ovld __cnfn max(char2, char2);
+uchar2 __ovld __cnfn max(uchar2, uchar2);
+char3 __ovld __cnfn max(char3, char3);
+uchar3 __ovld __cnfn max(uchar3, uchar3);
+char4 __ovld __cnfn max(char4, char4);
+uchar4 __ovld __cnfn max(uchar4, uchar4);
+char8 __ovld __cnfn max(char8, char8);
+uchar8 __ovld __cnfn max(uchar8, uchar8);
+char16 __ovld __cnfn max(char16, char16);
+uchar16 __ovld __cnfn max(uchar16, uchar16);
+short __ovld __cnfn max(short, short);
+ushort __ovld __cnfn max(ushort, ushort);
+short2 __ovld __cnfn max(short2, short2);
+ushort2 __ovld __cnfn max(ushort2, ushort2);
+short3 __ovld __cnfn max(short3, short3);
+ushort3 __ovld __cnfn max(ushort3, ushort3);
+short4 __ovld __cnfn max(short4, short4);
+ushort4 __ovld __cnfn max(ushort4, ushort4);
+short8 __ovld __cnfn max(short8, short8);
+ushort8 __ovld __cnfn max(ushort8, ushort8);
+short16 __ovld __cnfn max(short16, short16);
+ushort16 __ovld __cnfn max(ushort16, ushort16);
+int __ovld __cnfn max(int, int);
+uint __ovld __cnfn max(uint, uint);
+int2 __ovld __cnfn max(int2, int2);
+uint2 __ovld __cnfn max(uint2, uint2);
+int3 __ovld __cnfn max(int3, int3);
+uint3 __ovld __cnfn max(uint3, uint3);
+int4 __ovld __cnfn max(int4, int4);
+uint4 __ovld __cnfn max(uint4, uint4);
+int8 __ovld __cnfn max(int8, int8);
+uint8 __ovld __cnfn max(uint8, uint8);
+int16 __ovld __cnfn max(int16, int16);
+uint16 __ovld __cnfn max(uint16, uint16);
+long __ovld __cnfn max(long, long);
+ulong __ovld __cnfn max(ulong, ulong);
+long2 __ovld __cnfn max(long2, long2);
+ulong2 __ovld __cnfn max(ulong2, ulong2);
+long3 __ovld __cnfn max(long3, long3);
+ulong3 __ovld __cnfn max(ulong3, ulong3);
+long4 __ovld __cnfn max(long4, long4);
+ulong4 __ovld __cnfn max(ulong4, ulong4);
+long8 __ovld __cnfn max(long8, long8);
+ulong8 __ovld __cnfn max(ulong8, ulong8);
+long16 __ovld __cnfn max(long16, long16);
+ulong16 __ovld __cnfn max(ulong16, ulong16);
+char2 __ovld __cnfn max(char2, char);
+uchar2 __ovld __cnfn max(uchar2, uchar);
+char3 __ovld __cnfn max(char3, char);
+uchar3 __ovld __cnfn max(uchar3, uchar);
+char4 __ovld __cnfn max(char4, char);
+uchar4 __ovld __cnfn max(uchar4, uchar);
+char8 __ovld __cnfn max(char8, char);
+uchar8 __ovld __cnfn max(uchar8, uchar);
+char16 __ovld __cnfn max(char16, char);
+uchar16 __ovld __cnfn max(uchar16, uchar);
+short2 __ovld __cnfn max(short2, short);
+ushort2 __ovld __cnfn max(ushort2, ushort);
+short3 __ovld __cnfn max(short3, short);
+ushort3 __ovld __cnfn max(ushort3, ushort);
+short4 __ovld __cnfn max(short4, short);
+ushort4 __ovld __cnfn max(ushort4, ushort);
+short8 __ovld __cnfn max(short8, short);
+ushort8 __ovld __cnfn max(ushort8, ushort);
+short16 __ovld __cnfn max(short16, short);
+ushort16 __ovld __cnfn max(ushort16, ushort);
+int2 __ovld __cnfn max(int2, int);
+uint2 __ovld __cnfn max(uint2, uint);
+int3 __ovld __cnfn max(int3, int);
+uint3 __ovld __cnfn max(uint3, uint);
+int4 __ovld __cnfn max(int4, int);
+uint4 __ovld __cnfn max(uint4, uint);
+int8 __ovld __cnfn max(int8, int);
+uint8 __ovld __cnfn max(uint8, uint);
+int16 __ovld __cnfn max(int16, int);
+uint16 __ovld __cnfn max(uint16, uint);
+long2 __ovld __cnfn max(long2, long);
+ulong2 __ovld __cnfn max(ulong2, ulong);
+long3 __ovld __cnfn max(long3, long);
+ulong3 __ovld __cnfn max(ulong3, ulong);
+long4 __ovld __cnfn max(long4, long);
+ulong4 __ovld __cnfn max(ulong4, ulong);
+long8 __ovld __cnfn max(long8, long);
+ulong8 __ovld __cnfn max(ulong8, ulong);
+long16 __ovld __cnfn max(long16, long);
+ulong16 __ovld __cnfn max(ulong16, ulong);
+
+/**
+ * Returns y if y < x, otherwise it returns x.
+ */
+char __ovld __cnfn min(char, char);
+uchar __ovld __cnfn min(uchar, uchar);
+char2 __ovld __cnfn min(char2, char2);
+uchar2 __ovld __cnfn min(uchar2, uchar2);
+char3 __ovld __cnfn min(char3, char3);
+uchar3 __ovld __cnfn min(uchar3, uchar3);
+char4 __ovld __cnfn min(char4, char4);
+uchar4 __ovld __cnfn min(uchar4, uchar4);
+char8 __ovld __cnfn min(char8, char8);
+uchar8 __ovld __cnfn min(uchar8, uchar8);
+char16 __ovld __cnfn min(char16, char16);
+uchar16 __ovld __cnfn min(uchar16, uchar16);
+short __ovld __cnfn min(short, short);
+ushort __ovld __cnfn min(ushort, ushort);
+short2 __ovld __cnfn min(short2, short2);
+ushort2 __ovld __cnfn min(ushort2, ushort2);
+short3 __ovld __cnfn min(short3, short3);
+ushort3 __ovld __cnfn min(ushort3, ushort3);
+short4 __ovld __cnfn min(short4, short4);
+ushort4 __ovld __cnfn min(ushort4, ushort4);
+short8 __ovld __cnfn min(short8, short8);
+ushort8 __ovld __cnfn min(ushort8, ushort8);
+short16 __ovld __cnfn min(short16, short16);
+ushort16 __ovld __cnfn min(ushort16, ushort16);
+int __ovld __cnfn min(int, int);
+uint __ovld __cnfn min(uint, uint);
+int2 __ovld __cnfn min(int2, int2);
+uint2 __ovld __cnfn min(uint2, uint2);
+int3 __ovld __cnfn min(int3, int3);
+uint3 __ovld __cnfn min(uint3, uint3);
+int4 __ovld __cnfn min(int4, int4);
+uint4 __ovld __cnfn min(uint4, uint4);
+int8 __ovld __cnfn min(int8, int8);
+uint8 __ovld __cnfn min(uint8, uint8);
+int16 __ovld __cnfn min(int16, int16);
+uint16 __ovld __cnfn min(uint16, uint16);
+long __ovld __cnfn min(long, long);
+ulong __ovld __cnfn min(ulong, ulong);
+long2 __ovld __cnfn min(long2, long2);
+ulong2 __ovld __cnfn min(ulong2, ulong2);
+long3 __ovld __cnfn min(long3, long3);
+ulong3 __ovld __cnfn min(ulong3, ulong3);
+long4 __ovld __cnfn min(long4, long4);
+ulong4 __ovld __cnfn min(ulong4, ulong4);
+long8 __ovld __cnfn min(long8, long8);
+ulong8 __ovld __cnfn min(ulong8, ulong8);
+long16 __ovld __cnfn min(long16, long16);
+ulong16 __ovld __cnfn min(ulong16, ulong16);
+char2 __ovld __cnfn min(char2, char);
+uchar2 __ovld __cnfn min(uchar2, uchar);
+char3 __ovld __cnfn min(char3, char);
+uchar3 __ovld __cnfn min(uchar3, uchar);
+char4 __ovld __cnfn min(char4, char);
+uchar4 __ovld __cnfn min(uchar4, uchar);
+char8 __ovld __cnfn min(char8, char);
+uchar8 __ovld __cnfn min(uchar8, uchar);
+char16 __ovld __cnfn min(char16, char);
+uchar16 __ovld __cnfn min(uchar16, uchar);
+short2 __ovld __cnfn min(short2, short);
+ushort2 __ovld __cnfn min(ushort2, ushort);
+short3 __ovld __cnfn min(short3, short);
+ushort3 __ovld __cnfn min(ushort3, ushort);
+short4 __ovld __cnfn min(short4, short);
+ushort4 __ovld __cnfn min(ushort4, ushort);
+short8 __ovld __cnfn min(short8, short);
+ushort8 __ovld __cnfn min(ushort8, ushort);
+short16 __ovld __cnfn min(short16, short);
+ushort16 __ovld __cnfn min(ushort16, ushort);
+int2 __ovld __cnfn min(int2, int);
+uint2 __ovld __cnfn min(uint2, uint);
+int3 __ovld __cnfn min(int3, int);
+uint3 __ovld __cnfn min(uint3, uint);
+int4 __ovld __cnfn min(int4, int);
+uint4 __ovld __cnfn min(uint4, uint);
+int8 __ovld __cnfn min(int8, int);
+uint8 __ovld __cnfn min(uint8, uint);
+int16 __ovld __cnfn min(int16, int);
+uint16 __ovld __cnfn min(uint16, uint);
+long2 __ovld __cnfn min(long2, long);
+ulong2 __ovld __cnfn min(ulong2, ulong);
+long3 __ovld __cnfn min(long3, long);
+ulong3 __ovld __cnfn min(ulong3, ulong);
+long4 __ovld __cnfn min(long4, long);
+ulong4 __ovld __cnfn min(ulong4, ulong);
+long8 __ovld __cnfn min(long8, long);
+ulong8 __ovld __cnfn min(ulong8, ulong);
+long16 __ovld __cnfn min(long16, long);
+ulong16 __ovld __cnfn min(ulong16, ulong);
+
+/**
+ * Computes x * y and returns the high half of the
+ * product of x and y.
+ */
+char __ovld __cnfn mul_hi(char, char);
+uchar __ovld __cnfn mul_hi(uchar, uchar);
+char2 __ovld __cnfn mul_hi(char2, char2);
+uchar2 __ovld __cnfn mul_hi(uchar2, uchar2);
+char3 __ovld __cnfn mul_hi(char3, char3);
+uchar3 __ovld __cnfn mul_hi(uchar3, uchar3);
+char4 __ovld __cnfn mul_hi(char4, char4);
+uchar4 __ovld __cnfn mul_hi(uchar4, uchar4);
+char8 __ovld __cnfn mul_hi(char8, char8);
+uchar8 __ovld __cnfn mul_hi(uchar8, uchar8);
+char16 __ovld __cnfn mul_hi(char16, char16);
+uchar16 __ovld __cnfn mul_hi(uchar16, uchar16);
+short __ovld __cnfn mul_hi(short, short);
+ushort __ovld __cnfn mul_hi(ushort, ushort);
+short2 __ovld __cnfn mul_hi(short2, short2);
+ushort2 __ovld __cnfn mul_hi(ushort2, ushort2);
+short3 __ovld __cnfn mul_hi(short3, short3);
+ushort3 __ovld __cnfn mul_hi(ushort3, ushort3);
+short4 __ovld __cnfn mul_hi(short4, short4);
+ushort4 __ovld __cnfn mul_hi(ushort4, ushort4);
+short8 __ovld __cnfn mul_hi(short8, short8);
+ushort8 __ovld __cnfn mul_hi(ushort8, ushort8);
+short16 __ovld __cnfn mul_hi(short16, short16);
+ushort16 __ovld __cnfn mul_hi(ushort16, ushort16);
+int __ovld __cnfn mul_hi(int, int);
+uint __ovld __cnfn mul_hi(uint, uint);
+int2 __ovld __cnfn mul_hi(int2, int2);
+uint2 __ovld __cnfn mul_hi(uint2, uint2);
+int3 __ovld __cnfn mul_hi(int3, int3);
+uint3 __ovld __cnfn mul_hi(uint3, uint3);
+int4 __ovld __cnfn mul_hi(int4, int4);
+uint4 __ovld __cnfn mul_hi(uint4, uint4);
+int8 __ovld __cnfn mul_hi(int8, int8);
+uint8 __ovld __cnfn mul_hi(uint8, uint8);
+int16 __ovld __cnfn mul_hi(int16, int16);
+uint16 __ovld __cnfn mul_hi(uint16, uint16);
+long __ovld __cnfn mul_hi(long, long);
+ulong __ovld __cnfn mul_hi(ulong, ulong);
+long2 __ovld __cnfn mul_hi(long2, long2);
+ulong2 __ovld __cnfn mul_hi(ulong2, ulong2);
+long3 __ovld __cnfn mul_hi(long3, long3);
+ulong3 __ovld __cnfn mul_hi(ulong3, ulong3);
+long4 __ovld __cnfn mul_hi(long4, long4);
+ulong4 __ovld __cnfn mul_hi(ulong4, ulong4);
+long8 __ovld __cnfn mul_hi(long8, long8);
+ulong8 __ovld __cnfn mul_hi(ulong8, ulong8);
+long16 __ovld __cnfn mul_hi(long16, long16);
+ulong16 __ovld __cnfn mul_hi(ulong16, ulong16);
+
+/**
+ * For each element in v, the bits are shifted left by
+ * the number of bits given by the corresponding
+ * element in i (subject to usual shift modulo rules
+ * described in section 6.3). Bits shifted off the left
+ * side of the element are shifted back in from the
+ * right.
+ */
+char __ovld __cnfn rotate(char, char);
+uchar __ovld __cnfn rotate(uchar, uchar);
+char2 __ovld __cnfn rotate(char2, char2);
+uchar2 __ovld __cnfn rotate(uchar2, uchar2);
+char3 __ovld __cnfn rotate(char3, char3);
+uchar3 __ovld __cnfn rotate(uchar3, uchar3);
+char4 __ovld __cnfn rotate(char4, char4);
+uchar4 __ovld __cnfn rotate(uchar4, uchar4);
+char8 __ovld __cnfn rotate(char8, char8);
+uchar8 __ovld __cnfn rotate(uchar8, uchar8);
+char16 __ovld __cnfn rotate(char16, char16);
+uchar16 __ovld __cnfn rotate(uchar16, uchar16);
+short __ovld __cnfn rotate(short, short);
+ushort __ovld __cnfn rotate(ushort, ushort);
+short2 __ovld __cnfn rotate(short2, short2);
+ushort2 __ovld __cnfn rotate(ushort2, ushort2);
+short3 __ovld __cnfn rotate(short3, short3);
+ushort3 __ovld __cnfn rotate(ushort3, ushort3);
+short4 __ovld __cnfn rotate(short4, short4);
+ushort4 __ovld __cnfn rotate(ushort4, ushort4);
+short8 __ovld __cnfn rotate(short8, short8);
+ushort8 __ovld __cnfn rotate(ushort8, ushort8);
+short16 __ovld __cnfn rotate(short16, short16);
+ushort16 __ovld __cnfn rotate(ushort16, ushort16);
+int __ovld __cnfn rotate(int, int);
+uint __ovld __cnfn rotate(uint, uint);
+int2 __ovld __cnfn rotate(int2, int2);
+uint2 __ovld __cnfn rotate(uint2, uint2);
+int3 __ovld __cnfn rotate(int3, int3);
+uint3 __ovld __cnfn rotate(uint3, uint3);
+int4 __ovld __cnfn rotate(int4, int4);
+uint4 __ovld __cnfn rotate(uint4, uint4);
+int8 __ovld __cnfn rotate(int8, int8);
+uint8 __ovld __cnfn rotate(uint8, uint8);
+int16 __ovld __cnfn rotate(int16, int16);
+uint16 __ovld __cnfn rotate(uint16, uint16);
+long __ovld __cnfn rotate(long, long);
+ulong __ovld __cnfn rotate(ulong, ulong);
+long2 __ovld __cnfn rotate(long2, long2);
+ulong2 __ovld __cnfn rotate(ulong2, ulong2);
+long3 __ovld __cnfn rotate(long3, long3);
+ulong3 __ovld __cnfn rotate(ulong3, ulong3);
+long4 __ovld __cnfn rotate(long4, long4);
+ulong4 __ovld __cnfn rotate(ulong4, ulong4);
+long8 __ovld __cnfn rotate(long8, long8);
+ulong8 __ovld __cnfn rotate(ulong8, ulong8);
+long16 __ovld __cnfn rotate(long16, long16);
+ulong16 __ovld __cnfn rotate(ulong16, ulong16);
+
+/**
+ * Returns x - y and saturates the result.
+ */
+char __ovld __cnfn sub_sat(char, char);
+uchar __ovld __cnfn sub_sat(uchar, uchar);
+char2 __ovld __cnfn sub_sat(char2, char2);
+uchar2 __ovld __cnfn sub_sat(uchar2, uchar2);
+char3 __ovld __cnfn sub_sat(char3, char3);
+uchar3 __ovld __cnfn sub_sat(uchar3, uchar3);
+char4 __ovld __cnfn sub_sat(char4, char4);
+uchar4 __ovld __cnfn sub_sat(uchar4, uchar4);
+char8 __ovld __cnfn sub_sat(char8, char8);
+uchar8 __ovld __cnfn sub_sat(uchar8, uchar8);
+char16 __ovld __cnfn sub_sat(char16, char16);
+uchar16 __ovld __cnfn sub_sat(uchar16, uchar16);
+short __ovld __cnfn sub_sat(short, short);
+ushort __ovld __cnfn sub_sat(ushort, ushort);
+short2 __ovld __cnfn sub_sat(short2, short2);
+ushort2 __ovld __cnfn sub_sat(ushort2, ushort2);
+short3 __ovld __cnfn sub_sat(short3, short3);
+ushort3 __ovld __cnfn sub_sat(ushort3, ushort3);
+short4 __ovld __cnfn sub_sat(short4, short4);
+ushort4 __ovld __cnfn sub_sat(ushort4, ushort4);
+short8 __ovld __cnfn sub_sat(short8, short8);
+ushort8 __ovld __cnfn sub_sat(ushort8, ushort8);
+short16 __ovld __cnfn sub_sat(short16, short16);
+ushort16 __ovld __cnfn sub_sat(ushort16, ushort16);
+int __ovld __cnfn sub_sat(int, int);
+uint __ovld __cnfn sub_sat(uint, uint);
+int2 __ovld __cnfn sub_sat(int2, int2);
+uint2 __ovld __cnfn sub_sat(uint2, uint2);
+int3 __ovld __cnfn sub_sat(int3, int3);
+uint3 __ovld __cnfn sub_sat(uint3, uint3);
+int4 __ovld __cnfn sub_sat(int4, int4);
+uint4 __ovld __cnfn sub_sat(uint4, uint4);
+int8 __ovld __cnfn sub_sat(int8, int8);
+uint8 __ovld __cnfn sub_sat(uint8, uint8);
+int16 __ovld __cnfn sub_sat(int16, int16);
+uint16 __ovld __cnfn sub_sat(uint16, uint16);
+long __ovld __cnfn sub_sat(long, long);
+ulong __ovld __cnfn sub_sat(ulong, ulong);
+long2 __ovld __cnfn sub_sat(long2, long2);
+ulong2 __ovld __cnfn sub_sat(ulong2, ulong2);
+long3 __ovld __cnfn sub_sat(long3, long3);
+ulong3 __ovld __cnfn sub_sat(ulong3, ulong3);
+long4 __ovld __cnfn sub_sat(long4, long4);
+ulong4 __ovld __cnfn sub_sat(ulong4, ulong4);
+long8 __ovld __cnfn sub_sat(long8, long8);
+ulong8 __ovld __cnfn sub_sat(ulong8, ulong8);
+long16 __ovld __cnfn sub_sat(long16, long16);
+ulong16 __ovld __cnfn sub_sat(ulong16, ulong16);
+
+/**
+ * result[i] = ((short)hi[i] << 8) | lo[i]
+ * result[i] = ((ushort)hi[i] << 8) | lo[i]
+ */
+short __ovld __cnfn upsample(char, uchar);
+ushort __ovld __cnfn upsample(uchar, uchar);
+short2 __ovld __cnfn upsample(char2, uchar2);
+short3 __ovld __cnfn upsample(char3, uchar3);
+short4 __ovld __cnfn upsample(char4, uchar4);
+short8 __ovld __cnfn upsample(char8, uchar8);
+short16 __ovld __cnfn upsample(char16, uchar16);
+ushort2 __ovld __cnfn upsample(uchar2, uchar2);
+ushort3 __ovld __cnfn upsample(uchar3, uchar3);
+ushort4 __ovld __cnfn upsample(uchar4, uchar4);
+ushort8 __ovld __cnfn upsample(uchar8, uchar8);
+ushort16 __ovld __cnfn upsample(uchar16, uchar16);
+
+/**
+ * result[i] = ((int)hi[i] << 16) | lo[i]
+ * result[i] = ((uint)hi[i] << 16) | lo[i]
+ */
+int __ovld __cnfn upsample(short, ushort);
+uint __ovld __cnfn upsample(ushort, ushort);
+int2 __ovld __cnfn upsample(short2, ushort2);
+int3 __ovld __cnfn upsample(short3, ushort3);
+int4 __ovld __cnfn upsample(short4, ushort4);
+int8 __ovld __cnfn upsample(short8, ushort8);
+int16 __ovld __cnfn upsample(short16, ushort16);
+uint2 __ovld __cnfn upsample(ushort2, ushort2);
+uint3 __ovld __cnfn upsample(ushort3, ushort3);
+uint4 __ovld __cnfn upsample(ushort4, ushort4);
+uint8 __ovld __cnfn upsample(ushort8, ushort8);
+uint16 __ovld __cnfn upsample(ushort16, ushort16);
+/**
+ * result[i] = ((long)hi[i] << 32) | lo[i]
+ * result[i] = ((ulong)hi[i] << 32) | lo[i]
+ */
+long __ovld __cnfn upsample(int, uint);
+ulong __ovld __cnfn upsample(uint, uint);
+long2 __ovld __cnfn upsample(int2, uint2);
+long3 __ovld __cnfn upsample(int3, uint3);
+long4 __ovld __cnfn upsample(int4, uint4);
+long8 __ovld __cnfn upsample(int8, uint8);
+long16 __ovld __cnfn upsample(int16, uint16);
+ulong2 __ovld __cnfn upsample(uint2, uint2);
+ulong3 __ovld __cnfn upsample(uint3, uint3);
+ulong4 __ovld __cnfn upsample(uint4, uint4);
+ulong8 __ovld __cnfn upsample(uint8, uint8);
+ulong16 __ovld __cnfn upsample(uint16, uint16);
+
+/*
+ * popcount(x): returns the number of set bit in x
+ */
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+char __ovld __cnfn popcount(char);
+uchar __ovld __cnfn popcount(uchar);
+char2 __ovld __cnfn popcount(char2);
+uchar2 __ovld __cnfn popcount(uchar2);
+char3 __ovld __cnfn popcount(char3);
+uchar3 __ovld __cnfn popcount(uchar3);
+char4 __ovld __cnfn popcount(char4);
+uchar4 __ovld __cnfn popcount(uchar4);
+char8 __ovld __cnfn popcount(char8);
+uchar8 __ovld __cnfn popcount(uchar8);
+char16 __ovld __cnfn popcount(char16);
+uchar16 __ovld __cnfn popcount(uchar16);
+short __ovld __cnfn popcount(short);
+ushort __ovld __cnfn popcount(ushort);
+short2 __ovld __cnfn popcount(short2);
+ushort2 __ovld __cnfn popcount(ushort2);
+short3 __ovld __cnfn popcount(short3);
+ushort3 __ovld __cnfn popcount(ushort3);
+short4 __ovld __cnfn popcount(short4);
+ushort4 __ovld __cnfn popcount(ushort4);
+short8 __ovld __cnfn popcount(short8);
+ushort8 __ovld __cnfn popcount(ushort8);
+short16 __ovld __cnfn popcount(short16);
+ushort16 __ovld __cnfn popcount(ushort16);
+int __ovld __cnfn popcount(int);
+uint __ovld __cnfn popcount(uint);
+int2 __ovld __cnfn popcount(int2);
+uint2 __ovld __cnfn popcount(uint2);
+int3 __ovld __cnfn popcount(int3);
+uint3 __ovld __cnfn popcount(uint3);
+int4 __ovld __cnfn popcount(int4);
+uint4 __ovld __cnfn popcount(uint4);
+int8 __ovld __cnfn popcount(int8);
+uint8 __ovld __cnfn popcount(uint8);
+int16 __ovld __cnfn popcount(int16);
+uint16 __ovld __cnfn popcount(uint16);
+long __ovld __cnfn popcount(long);
+ulong __ovld __cnfn popcount(ulong);
+long2 __ovld __cnfn popcount(long2);
+ulong2 __ovld __cnfn popcount(ulong2);
+long3 __ovld __cnfn popcount(long3);
+ulong3 __ovld __cnfn popcount(ulong3);
+long4 __ovld __cnfn popcount(long4);
+ulong4 __ovld __cnfn popcount(ulong4);
+long8 __ovld __cnfn popcount(long8);
+ulong8 __ovld __cnfn popcount(ulong8);
+long16 __ovld __cnfn popcount(long16);
+ulong16 __ovld __cnfn popcount(ulong16);
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+
+/**
+ * Multiply two 24-bit integer values x and y and add
+ * the 32-bit integer result to the 32-bit integer z.
+ * Refer to definition of mul24 to see how the 24-bit
+ * integer multiplication is performed.
+ */
+int __ovld __cnfn mad24(int, int, int);
+uint __ovld __cnfn mad24(uint, uint, uint);
+int2 __ovld __cnfn mad24(int2, int2, int2);
+uint2 __ovld __cnfn mad24(uint2, uint2, uint2);
+int3 __ovld __cnfn mad24(int3, int3, int3);
+uint3 __ovld __cnfn mad24(uint3, uint3, uint3);
+int4 __ovld __cnfn mad24(int4, int4, int4);
+uint4 __ovld __cnfn mad24(uint4, uint4, uint4);
+int8 __ovld __cnfn mad24(int8, int8, int8);
+uint8 __ovld __cnfn mad24(uint8, uint8, uint8);
+int16 __ovld __cnfn mad24(int16, int16, int16);
+uint16 __ovld __cnfn mad24(uint16, uint16, uint16);
+
+/**
+ * Multiply two 24-bit integer values x and y. x and y
+ * are 32-bit integers but only the low 24-bits are used
+ * to perform the multiplication. mul24 should only
+ * be used when values in x and y are in the range [-
+ * 2^23, 2^23-1] if x and y are signed integers and in the
+ * range [0, 2^24-1] if x and y are unsigned integers. If
+ * x and y are not in this range, the multiplication
+ * result is implementation-defined.
+ */
+int __ovld __cnfn mul24(int, int);
+uint __ovld __cnfn mul24(uint, uint);
+int2 __ovld __cnfn mul24(int2, int2);
+uint2 __ovld __cnfn mul24(uint2, uint2);
+int3 __ovld __cnfn mul24(int3, int3);
+uint3 __ovld __cnfn mul24(uint3, uint3);
+int4 __ovld __cnfn mul24(int4, int4);
+uint4 __ovld __cnfn mul24(uint4, uint4);
+int8 __ovld __cnfn mul24(int8, int8);
+uint8 __ovld __cnfn mul24(uint8, uint8);
+int16 __ovld __cnfn mul24(int16, int16);
+uint16 __ovld __cnfn mul24(uint16, uint16);
+
+// OpenCL v1.1 s6.11.4, v1.2 s6.12.4, v2.0 s6.13.4 - Common Functions
+
+/**
+ * Returns fmin(fmax(x, minval), maxval).
+ * Results are undefined if minval > maxval.
+ */
+float __ovld __cnfn clamp(float, float, float);
+float2 __ovld __cnfn clamp(float2, float2, float2);
+float3 __ovld __cnfn clamp(float3, float3, float3);
+float4 __ovld __cnfn clamp(float4, float4, float4);
+float8 __ovld __cnfn clamp(float8, float8, float8);
+float16 __ovld __cnfn clamp(float16, float16, float16);
+float2 __ovld __cnfn clamp(float2, float, float);
+float3 __ovld __cnfn clamp(float3, float, float);
+float4 __ovld __cnfn clamp(float4, float, float);
+float8 __ovld __cnfn clamp(float8, float, float);
+float16 __ovld __cnfn clamp(float16, float, float);
+#ifdef cl_khr_fp64
+double __ovld __cnfn clamp(double, double, double);
+double2 __ovld __cnfn clamp(double2, double2, double2);
+double3 __ovld __cnfn clamp(double3, double3, double3);
+double4 __ovld __cnfn clamp(double4, double4, double4);
+double8 __ovld __cnfn clamp(double8, double8, double8);
+double16 __ovld __cnfn clamp(double16, double16, double16);
+double2 __ovld __cnfn clamp(double2, double, double);
+double3 __ovld __cnfn clamp(double3, double, double);
+double4 __ovld __cnfn clamp(double4, double, double);
+double8 __ovld __cnfn clamp(double8, double, double);
+double16 __ovld __cnfn clamp(double16, double, double);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn clamp(half, half, half);
+half2 __ovld __cnfn clamp(half2, half2, half2);
+half3 __ovld __cnfn clamp(half3, half3, half3);
+half4 __ovld __cnfn clamp(half4, half4, half4);
+half8 __ovld __cnfn clamp(half8, half8, half8);
+half16 __ovld __cnfn clamp(half16, half16, half16);
+half2 __ovld __cnfn clamp(half2, half, half);
+half3 __ovld __cnfn clamp(half3, half, half);
+half4 __ovld __cnfn clamp(half4, half, half);
+half8 __ovld __cnfn clamp(half8, half, half);
+half16 __ovld __cnfn clamp(half16, half, half);
+#endif //cl_khr_fp16
+
+/**
+ * Converts radians to degrees, i.e. (180 / PI) *
+ * radians.
+ */
+float __ovld __cnfn degrees(float);
+float2 __ovld __cnfn degrees(float2);
+float3 __ovld __cnfn degrees(float3);
+float4 __ovld __cnfn degrees(float4);
+float8 __ovld __cnfn degrees(float8);
+float16 __ovld __cnfn degrees(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn degrees(double);
+double2 __ovld __cnfn degrees(double2);
+double3 __ovld __cnfn degrees(double3);
+double4 __ovld __cnfn degrees(double4);
+double8 __ovld __cnfn degrees(double8);
+double16 __ovld __cnfn degrees(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn degrees(half);
+half2 __ovld __cnfn degrees(half2);
+half3 __ovld __cnfn degrees(half3);
+half4 __ovld __cnfn degrees(half4);
+half8 __ovld __cnfn degrees(half8);
+half16 __ovld __cnfn degrees(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if x < y, otherwise it returns x. If x and y
+ * are infinite or NaN, the return values are undefined.
+ */
+float __ovld __cnfn max(float, float);
+float2 __ovld __cnfn max(float2, float2);
+float3 __ovld __cnfn max(float3, float3);
+float4 __ovld __cnfn max(float4, float4);
+float8 __ovld __cnfn max(float8, float8);
+float16 __ovld __cnfn max(float16, float16);
+float2 __ovld __cnfn max(float2, float);
+float3 __ovld __cnfn max(float3, float);
+float4 __ovld __cnfn max(float4, float);
+float8 __ovld __cnfn max(float8, float);
+float16 __ovld __cnfn max(float16, float);
+#ifdef cl_khr_fp64
+double __ovld __cnfn max(double, double);
+double2 __ovld __cnfn max(double2, double2);
+double3 __ovld __cnfn max(double3, double3);
+double4 __ovld __cnfn max(double4, double4);
+double8 __ovld __cnfn max(double8, double8);
+double16 __ovld __cnfn max(double16, double16);
+double2 __ovld __cnfn max(double2, double);
+double3 __ovld __cnfn max(double3, double);
+double4 __ovld __cnfn max(double4, double);
+double8 __ovld __cnfn max(double8, double);
+double16 __ovld __cnfn max(double16, double);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn max(half, half);
+half2 __ovld __cnfn max(half2, half2);
+half3 __ovld __cnfn max(half3, half3);
+half4 __ovld __cnfn max(half4, half4);
+half8 __ovld __cnfn max(half8, half8);
+half16 __ovld __cnfn max(half16, half16);
+half2 __ovld __cnfn max(half2, half);
+half3 __ovld __cnfn max(half3, half);
+half4 __ovld __cnfn max(half4, half);
+half8 __ovld __cnfn max(half8, half);
+half16 __ovld __cnfn max(half16, half);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if y < x, otherwise it returns x. If x and y
+ * are infinite or NaN, the return values are undefined.
+ */
+float __ovld __cnfn min(float, float);
+float2 __ovld __cnfn min(float2, float2);
+float3 __ovld __cnfn min(float3, float3);
+float4 __ovld __cnfn min(float4, float4);
+float8 __ovld __cnfn min(float8, float8);
+float16 __ovld __cnfn min(float16, float16);
+float2 __ovld __cnfn min(float2, float);
+float3 __ovld __cnfn min(float3, float);
+float4 __ovld __cnfn min(float4, float);
+float8 __ovld __cnfn min(float8, float);
+float16 __ovld __cnfn min(float16, float);
+#ifdef cl_khr_fp64
+double __ovld __cnfn min(double, double);
+double2 __ovld __cnfn min(double2, double2);
+double3 __ovld __cnfn min(double3, double3);
+double4 __ovld __cnfn min(double4, double4);
+double8 __ovld __cnfn min(double8, double8);
+double16 __ovld __cnfn min(double16, double16);
+double2 __ovld __cnfn min(double2, double);
+double3 __ovld __cnfn min(double3, double);
+double4 __ovld __cnfn min(double4, double);
+double8 __ovld __cnfn min(double8, double);
+double16 __ovld __cnfn min(double16, double);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn min(half, half);
+half2 __ovld __cnfn min(half2, half2);
+half3 __ovld __cnfn min(half3, half3);
+half4 __ovld __cnfn min(half4, half4);
+half8 __ovld __cnfn min(half8, half8);
+half16 __ovld __cnfn min(half16, half16);
+half2 __ovld __cnfn min(half2, half);
+half3 __ovld __cnfn min(half3, half);
+half4 __ovld __cnfn min(half4, half);
+half8 __ovld __cnfn min(half8, half);
+half16 __ovld __cnfn min(half16, half);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the linear blend of x & y implemented as:
+ * x + (y - x) * a
+ * a must be a value in the range 0.0 ... 1.0. If a is not
+ * in the range 0.0 ... 1.0, the return values are
+ * undefined.
+ */
+float __ovld __cnfn mix(float, float, float);
+float2 __ovld __cnfn mix(float2, float2, float2);
+float3 __ovld __cnfn mix(float3, float3, float3);
+float4 __ovld __cnfn mix(float4, float4, float4);
+float8 __ovld __cnfn mix(float8, float8, float8);
+float16 __ovld __cnfn mix(float16, float16, float16);
+float2 __ovld __cnfn mix(float2, float2, float);
+float3 __ovld __cnfn mix(float3, float3, float);
+float4 __ovld __cnfn mix(float4, float4, float);
+float8 __ovld __cnfn mix(float8, float8, float);
+float16 __ovld __cnfn mix(float16, float16, float);
+#ifdef cl_khr_fp64
+double __ovld __cnfn mix(double, double, double);
+double2 __ovld __cnfn mix(double2, double2, double2);
+double3 __ovld __cnfn mix(double3, double3, double3);
+double4 __ovld __cnfn mix(double4, double4, double4);
+double8 __ovld __cnfn mix(double8, double8, double8);
+double16 __ovld __cnfn mix(double16, double16, double16);
+double2 __ovld __cnfn mix(double2, double2, double);
+double3 __ovld __cnfn mix(double3, double3, double);
+double4 __ovld __cnfn mix(double4, double4, double);
+double8 __ovld __cnfn mix(double8, double8, double);
+double16 __ovld __cnfn mix(double16, double16, double);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn mix(half, half, half);
+half2 __ovld __cnfn mix(half2, half2, half2);
+half3 __ovld __cnfn mix(half3, half3, half3);
+half4 __ovld __cnfn mix(half4, half4, half4);
+half8 __ovld __cnfn mix(half8, half8, half8);
+half16 __ovld __cnfn mix(half16, half16, half16);
+half2 __ovld __cnfn mix(half2, half2, half);
+half3 __ovld __cnfn mix(half3, half3, half);
+half4 __ovld __cnfn mix(half4, half4, half);
+half8 __ovld __cnfn mix(half8, half8, half);
+half16 __ovld __cnfn mix(half16, half16, half);
+#endif //cl_khr_fp16
+
+/**
+ * Converts degrees to radians, i.e. (PI / 180) *
+ * degrees.
+ */
+float __ovld __cnfn radians(float);
+float2 __ovld __cnfn radians(float2);
+float3 __ovld __cnfn radians(float3);
+float4 __ovld __cnfn radians(float4);
+float8 __ovld __cnfn radians(float8);
+float16 __ovld __cnfn radians(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn radians(double);
+double2 __ovld __cnfn radians(double2);
+double3 __ovld __cnfn radians(double3);
+double4 __ovld __cnfn radians(double4);
+double8 __ovld __cnfn radians(double8);
+double16 __ovld __cnfn radians(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn radians(half);
+half2 __ovld __cnfn radians(half2);
+half3 __ovld __cnfn radians(half3);
+half4 __ovld __cnfn radians(half4);
+half8 __ovld __cnfn radians(half8);
+half16 __ovld __cnfn radians(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 0.0 if x < edge, otherwise it returns 1.0.
+ */
+float __ovld __cnfn step(float, float);
+float2 __ovld __cnfn step(float2, float2);
+float3 __ovld __cnfn step(float3, float3);
+float4 __ovld __cnfn step(float4, float4);
+float8 __ovld __cnfn step(float8, float8);
+float16 __ovld __cnfn step(float16, float16);
+float2 __ovld __cnfn step(float, float2);
+float3 __ovld __cnfn step(float, float3);
+float4 __ovld __cnfn step(float, float4);
+float8 __ovld __cnfn step(float, float8);
+float16 __ovld __cnfn step(float, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn step(double, double);
+double2 __ovld __cnfn step(double2, double2);
+double3 __ovld __cnfn step(double3, double3);
+double4 __ovld __cnfn step(double4, double4);
+double8 __ovld __cnfn step(double8, double8);
+double16 __ovld __cnfn step(double16, double16);
+double2 __ovld __cnfn step(double, double2);
+double3 __ovld __cnfn step(double, double3);
+double4 __ovld __cnfn step(double, double4);
+double8 __ovld __cnfn step(double, double8);
+double16 __ovld __cnfn step(double, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn step(half, half);
+half2 __ovld __cnfn step(half2, half2);
+half3 __ovld __cnfn step(half3, half3);
+half4 __ovld __cnfn step(half4, half4);
+half8 __ovld __cnfn step(half8, half8);
+half16 __ovld __cnfn step(half16, half16);
+half2 __ovld __cnfn step(half, half2);
+half3 __ovld __cnfn step(half, half3);
+half4 __ovld __cnfn step(half, half4);
+half8 __ovld __cnfn step(half, half8);
+half16 __ovld __cnfn step(half, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and
+ * performs smooth Hermite interpolation between 0
+ * and 1when edge0 < x < edge1. This is useful in
+ * cases where you would want a threshold function
+ * with a smooth transition.
+ * This is equivalent to:
+ * gentype t;
+ * t = clamp ((x - edge0) / (edge1 - edge0), 0, 1);
+ * return t * t * (3 - 2 * t);
+ * Results are undefined if edge0 >= edge1 or if x,
+ * edge0 or edge1 is a NaN.
+ */
+float __ovld __cnfn smoothstep(float, float, float);
+float2 __ovld __cnfn smoothstep(float2, float2, float2);
+float3 __ovld __cnfn smoothstep(float3, float3, float3);
+float4 __ovld __cnfn smoothstep(float4, float4, float4);
+float8 __ovld __cnfn smoothstep(float8, float8, float8);
+float16 __ovld __cnfn smoothstep(float16, float16, float16);
+float2 __ovld __cnfn smoothstep(float, float, float2);
+float3 __ovld __cnfn smoothstep(float, float, float3);
+float4 __ovld __cnfn smoothstep(float, float, float4);
+float8 __ovld __cnfn smoothstep(float, float, float8);
+float16 __ovld __cnfn smoothstep(float, float, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn smoothstep(double, double, double);
+double2 __ovld __cnfn smoothstep(double2, double2, double2);
+double3 __ovld __cnfn smoothstep(double3, double3, double3);
+double4 __ovld __cnfn smoothstep(double4, double4, double4);
+double8 __ovld __cnfn smoothstep(double8, double8, double8);
+double16 __ovld __cnfn smoothstep(double16, double16, double16);
+double2 __ovld __cnfn smoothstep(double, double, double2);
+double3 __ovld __cnfn smoothstep(double, double, double3);
+double4 __ovld __cnfn smoothstep(double, double, double4);
+double8 __ovld __cnfn smoothstep(double, double, double8);
+double16 __ovld __cnfn smoothstep(double, double, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn smoothstep(half, half, half);
+half2 __ovld __cnfn smoothstep(half2, half2, half2);
+half3 __ovld __cnfn smoothstep(half3, half3, half3);
+half4 __ovld __cnfn smoothstep(half4, half4, half4);
+half8 __ovld __cnfn smoothstep(half8, half8, half8);
+half16 __ovld __cnfn smoothstep(half16, half16, half16);
+half2 __ovld __cnfn smoothstep(half, half, half2);
+half3 __ovld __cnfn smoothstep(half, half, half3);
+half4 __ovld __cnfn smoothstep(half, half, half4);
+half8 __ovld __cnfn smoothstep(half, half, half8);
+half16 __ovld __cnfn smoothstep(half, half, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x =
+ * +0.0, or -1.0 if x < 0. Returns 0.0 if x is a NaN.
+ */
+float __ovld __cnfn sign(float);
+float2 __ovld __cnfn sign(float2);
+float3 __ovld __cnfn sign(float3);
+float4 __ovld __cnfn sign(float4);
+float8 __ovld __cnfn sign(float8);
+float16 __ovld __cnfn sign(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sign(double);
+double2 __ovld __cnfn sign(double2);
+double3 __ovld __cnfn sign(double3);
+double4 __ovld __cnfn sign(double4);
+double8 __ovld __cnfn sign(double8);
+double16 __ovld __cnfn sign(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sign(half);
+half2 __ovld __cnfn sign(half2);
+half3 __ovld __cnfn sign(half3);
+half4 __ovld __cnfn sign(half4);
+half8 __ovld __cnfn sign(half8);
+half16 __ovld __cnfn sign(half16);
+#endif //cl_khr_fp16
+
+// OpenCL v1.1 s6.11.5, v1.2 s6.12.5, v2.0 s6.13.5 - Geometric Functions
+
+/**
+ * Returns the cross product of p0.xyz and p1.xyz. The
+ * w component of float4 result returned will be 0.0.
+ */
+float4 __ovld __cnfn cross(float4, float4);
+float3 __ovld __cnfn cross(float3, float3);
+#ifdef cl_khr_fp64
+double4 __ovld __cnfn cross(double4, double4);
+double3 __ovld __cnfn cross(double3, double3);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half4 __ovld __cnfn cross(half4, half4);
+half3 __ovld __cnfn cross(half3, half3);
+#endif //cl_khr_fp16
+
+/**
+ * Compute dot product.
+ */
+float __ovld __cnfn dot(float, float);
+float __ovld __cnfn dot(float2, float2);
+float __ovld __cnfn dot(float3, float3);
+float __ovld __cnfn dot(float4, float4);
+#ifdef cl_khr_fp64
+double __ovld __cnfn dot(double, double);
+double __ovld __cnfn dot(double2, double2);
+double __ovld __cnfn dot(double3, double3);
+double __ovld __cnfn dot(double4, double4);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn dot(half, half);
+half __ovld __cnfn dot(half2, half2);
+half __ovld __cnfn dot(half3, half3);
+half __ovld __cnfn dot(half4, half4);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the distance between p0 and p1. This is
+ * calculated as length(p0 - p1).
+ */
+float __ovld __cnfn distance(float, float);
+float __ovld __cnfn distance(float2, float2);
+float __ovld __cnfn distance(float3, float3);
+float __ovld __cnfn distance(float4, float4);
+#ifdef cl_khr_fp64
+double __ovld __cnfn distance(double, double);
+double __ovld __cnfn distance(double2, double2);
+double __ovld __cnfn distance(double3, double3);
+double __ovld __cnfn distance(double4, double4);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn distance(half, half);
+half __ovld __cnfn distance(half2, half2);
+half __ovld __cnfn distance(half3, half3);
+half __ovld __cnfn distance(half4, half4);
+#endif //cl_khr_fp16
+
+/**
+ * Return the length of vector p, i.e.,
+ * sqrt(p.x2 + p.y 2 + ...)
+ */
+float __ovld __cnfn length(float);
+float __ovld __cnfn length(float2);
+float __ovld __cnfn length(float3);
+float __ovld __cnfn length(float4);
+#ifdef cl_khr_fp64
+double __ovld __cnfn length(double);
+double __ovld __cnfn length(double2);
+double __ovld __cnfn length(double3);
+double __ovld __cnfn length(double4);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn length(half);
+half __ovld __cnfn length(half2);
+half __ovld __cnfn length(half3);
+half __ovld __cnfn length(half4);
+#endif //cl_khr_fp16
+
+/**
+ * Returns a vector in the same direction as p but with a
+ * length of 1.
+ */
+float __ovld __cnfn normalize(float);
+float2 __ovld __cnfn normalize(float2);
+float3 __ovld __cnfn normalize(float3);
+float4 __ovld __cnfn normalize(float4);
+#ifdef cl_khr_fp64
+double __ovld __cnfn normalize(double);
+double2 __ovld __cnfn normalize(double2);
+double3 __ovld __cnfn normalize(double3);
+double4 __ovld __cnfn normalize(double4);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn normalize(half);
+half2 __ovld __cnfn normalize(half2);
+half3 __ovld __cnfn normalize(half3);
+half4 __ovld __cnfn normalize(half4);
+#endif //cl_khr_fp16
+
+/**
+ * Returns fast_length(p0 - p1).
+ */
+float __ovld __cnfn fast_distance(float, float);
+float __ovld __cnfn fast_distance(float2, float2);
+float __ovld __cnfn fast_distance(float3, float3);
+float __ovld __cnfn fast_distance(float4, float4);
+
+/**
+ * Returns the length of vector p computed as:
+ * half_sqrt(p.x2 + p.y2 + ...)
+ */
+float __ovld __cnfn fast_length(float);
+float __ovld __cnfn fast_length(float2);
+float __ovld __cnfn fast_length(float3);
+float __ovld __cnfn fast_length(float4);
+
+/**
+ * Returns a vector in the same direction as p but with a
+ * length of 1. fast_normalize is computed as:
+ * p * half_rsqrt (p.x^2 + p.y^2 + ... )
+ * The result shall be within 8192 ulps error from the
+ * infinitely precise result of
+ * if (all(p == 0.0f))
+ * result = p;
+ * else
+ * result = p / sqrt (p.x^2 + p.y^2 + ...);
+ * with the following exceptions:
+ * 1) If the sum of squares is greater than FLT_MAX
+ * then the value of the floating-point values in the
+ * result vector are undefined.
+ * 2) If the sum of squares is less than FLT_MIN then
+ * the implementation may return back p.
+ * 3) If the device is in "denorms are flushed to zero"
+ * mode, individual operand elements with magnitude
+ * less than sqrt(FLT_MIN) may be flushed to zero
+ * before proceeding with the calculation.
+ */
+float __ovld __cnfn fast_normalize(float);
+float2 __ovld __cnfn fast_normalize(float2);
+float3 __ovld __cnfn fast_normalize(float3);
+float4 __ovld __cnfn fast_normalize(float4);
+
+// OpenCL v1.1 s6.11.6, v1.2 s6.12.6, v2.0 s6.13.6 - Relational Functions
+
+/**
+ * intn isequal (floatn x, floatn y)
+ * Returns the component-wise compare of x == y.
+ */
+int __ovld __cnfn isequal(float, float);
+int2 __ovld __cnfn isequal(float2, float2);
+int3 __ovld __cnfn isequal(float3, float3);
+int4 __ovld __cnfn isequal(float4, float4);
+int8 __ovld __cnfn isequal(float8, float8);
+int16 __ovld __cnfn isequal(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isequal(double, double);
+long2 __ovld __cnfn isequal(double2, double2);
+long3 __ovld __cnfn isequal(double3, double3);
+long4 __ovld __cnfn isequal(double4, double4);
+long8 __ovld __cnfn isequal(double8, double8);
+long16 __ovld __cnfn isequal(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isequal(half, half);
+short2 __ovld __cnfn isequal(half2, half2);
+short3 __ovld __cnfn isequal(half3, half3);
+short4 __ovld __cnfn isequal(half4, half4);
+short8 __ovld __cnfn isequal(half8, half8);
+short16 __ovld __cnfn isequal(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x != y.
+ */
+int __ovld __cnfn isnotequal(float, float);
+int2 __ovld __cnfn isnotequal(float2, float2);
+int3 __ovld __cnfn isnotequal(float3, float3);
+int4 __ovld __cnfn isnotequal(float4, float4);
+int8 __ovld __cnfn isnotequal(float8, float8);
+int16 __ovld __cnfn isnotequal(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isnotequal(double, double);
+long2 __ovld __cnfn isnotequal(double2, double2);
+long3 __ovld __cnfn isnotequal(double3, double3);
+long4 __ovld __cnfn isnotequal(double4, double4);
+long8 __ovld __cnfn isnotequal(double8, double8);
+long16 __ovld __cnfn isnotequal(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isnotequal(half, half);
+short2 __ovld __cnfn isnotequal(half2, half2);
+short3 __ovld __cnfn isnotequal(half3, half3);
+short4 __ovld __cnfn isnotequal(half4, half4);
+short8 __ovld __cnfn isnotequal(half8, half8);
+short16 __ovld __cnfn isnotequal(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x > y.
+ */
+int __ovld __cnfn isgreater(float, float);
+int2 __ovld __cnfn isgreater(float2, float2);
+int3 __ovld __cnfn isgreater(float3, float3);
+int4 __ovld __cnfn isgreater(float4, float4);
+int8 __ovld __cnfn isgreater(float8, float8);
+int16 __ovld __cnfn isgreater(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isgreater(double, double);
+long2 __ovld __cnfn isgreater(double2, double2);
+long3 __ovld __cnfn isgreater(double3, double3);
+long4 __ovld __cnfn isgreater(double4, double4);
+long8 __ovld __cnfn isgreater(double8, double8);
+long16 __ovld __cnfn isgreater(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isgreater(half, half);
+short2 __ovld __cnfn isgreater(half2, half2);
+short3 __ovld __cnfn isgreater(half3, half3);
+short4 __ovld __cnfn isgreater(half4, half4);
+short8 __ovld __cnfn isgreater(half8, half8);
+short16 __ovld __cnfn isgreater(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x >= y.
+ */
+int __ovld __cnfn isgreaterequal(float, float);
+int2 __ovld __cnfn isgreaterequal(float2, float2);
+int3 __ovld __cnfn isgreaterequal(float3, float3);
+int4 __ovld __cnfn isgreaterequal(float4, float4);
+int8 __ovld __cnfn isgreaterequal(float8, float8);
+int16 __ovld __cnfn isgreaterequal(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isgreaterequal(double, double);
+long2 __ovld __cnfn isgreaterequal(double2, double2);
+long3 __ovld __cnfn isgreaterequal(double3, double3);
+long4 __ovld __cnfn isgreaterequal(double4, double4);
+long8 __ovld __cnfn isgreaterequal(double8, double8);
+long16 __ovld __cnfn isgreaterequal(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isgreaterequal(half, half);
+short2 __ovld __cnfn isgreaterequal(half2, half2);
+short3 __ovld __cnfn isgreaterequal(half3, half3);
+short4 __ovld __cnfn isgreaterequal(half4, half4);
+short8 __ovld __cnfn isgreaterequal(half8, half8);
+short16 __ovld __cnfn isgreaterequal(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x < y.
+ */
+int __ovld __cnfn isless(float, float);
+int2 __ovld __cnfn isless(float2, float2);
+int3 __ovld __cnfn isless(float3, float3);
+int4 __ovld __cnfn isless(float4, float4);
+int8 __ovld __cnfn isless(float8, float8);
+int16 __ovld __cnfn isless(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isless(double, double);
+long2 __ovld __cnfn isless(double2, double2);
+long3 __ovld __cnfn isless(double3, double3);
+long4 __ovld __cnfn isless(double4, double4);
+long8 __ovld __cnfn isless(double8, double8);
+long16 __ovld __cnfn isless(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isless(half, half);
+short2 __ovld __cnfn isless(half2, half2);
+short3 __ovld __cnfn isless(half3, half3);
+short4 __ovld __cnfn isless(half4, half4);
+short8 __ovld __cnfn isless(half8, half8);
+short16 __ovld __cnfn isless(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x <= y.
+ */
+int __ovld __cnfn islessequal(float, float);
+int2 __ovld __cnfn islessequal(float2, float2);
+int3 __ovld __cnfn islessequal(float3, float3);
+int4 __ovld __cnfn islessequal(float4, float4);
+int8 __ovld __cnfn islessequal(float8, float8);
+int16 __ovld __cnfn islessequal(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn islessequal(double, double);
+long2 __ovld __cnfn islessequal(double2, double2);
+long3 __ovld __cnfn islessequal(double3, double3);
+long4 __ovld __cnfn islessequal(double4, double4);
+long8 __ovld __cnfn islessequal(double8, double8);
+long16 __ovld __cnfn islessequal(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn islessequal(half, half);
+short2 __ovld __cnfn islessequal(half2, half2);
+short3 __ovld __cnfn islessequal(half3, half3);
+short4 __ovld __cnfn islessequal(half4, half4);
+short8 __ovld __cnfn islessequal(half8, half8);
+short16 __ovld __cnfn islessequal(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of
+ * (x < y) || (x > y) .
+ */
+int __ovld __cnfn islessgreater(float, float);
+int2 __ovld __cnfn islessgreater(float2, float2);
+int3 __ovld __cnfn islessgreater(float3, float3);
+int4 __ovld __cnfn islessgreater(float4, float4);
+int8 __ovld __cnfn islessgreater(float8, float8);
+int16 __ovld __cnfn islessgreater(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn islessgreater(double, double);
+long2 __ovld __cnfn islessgreater(double2, double2);
+long3 __ovld __cnfn islessgreater(double3, double3);
+long4 __ovld __cnfn islessgreater(double4, double4);
+long8 __ovld __cnfn islessgreater(double8, double8);
+long16 __ovld __cnfn islessgreater(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn islessgreater(half, half);
+short2 __ovld __cnfn islessgreater(half2, half2);
+short3 __ovld __cnfn islessgreater(half3, half3);
+short4 __ovld __cnfn islessgreater(half4, half4);
+short8 __ovld __cnfn islessgreater(half8, half8);
+short16 __ovld __cnfn islessgreater(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for finite value.
+ */
+int __ovld __cnfn isfinite(float);
+int2 __ovld __cnfn isfinite(float2);
+int3 __ovld __cnfn isfinite(float3);
+int4 __ovld __cnfn isfinite(float4);
+int8 __ovld __cnfn isfinite(float8);
+int16 __ovld __cnfn isfinite(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isfinite(double);
+long2 __ovld __cnfn isfinite(double2);
+long3 __ovld __cnfn isfinite(double3);
+long4 __ovld __cnfn isfinite(double4);
+long8 __ovld __cnfn isfinite(double8);
+long16 __ovld __cnfn isfinite(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isfinite(half);
+short2 __ovld __cnfn isfinite(half2);
+short3 __ovld __cnfn isfinite(half3);
+short4 __ovld __cnfn isfinite(half4);
+short8 __ovld __cnfn isfinite(half8);
+short16 __ovld __cnfn isfinite(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for infinity value (+ve or -ve) .
+ */
+int __ovld __cnfn isinf(float);
+int2 __ovld __cnfn isinf(float2);
+int3 __ovld __cnfn isinf(float3);
+int4 __ovld __cnfn isinf(float4);
+int8 __ovld __cnfn isinf(float8);
+int16 __ovld __cnfn isinf(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isinf(double);
+long2 __ovld __cnfn isinf(double2);
+long3 __ovld __cnfn isinf(double3);
+long4 __ovld __cnfn isinf(double4);
+long8 __ovld __cnfn isinf(double8);
+long16 __ovld __cnfn isinf(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isinf(half);
+short2 __ovld __cnfn isinf(half2);
+short3 __ovld __cnfn isinf(half3);
+short4 __ovld __cnfn isinf(half4);
+short8 __ovld __cnfn isinf(half8);
+short16 __ovld __cnfn isinf(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for a NaN.
+ */
+int __ovld __cnfn isnan(float);
+int2 __ovld __cnfn isnan(float2);
+int3 __ovld __cnfn isnan(float3);
+int4 __ovld __cnfn isnan(float4);
+int8 __ovld __cnfn isnan(float8);
+int16 __ovld __cnfn isnan(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isnan(double);
+long2 __ovld __cnfn isnan(double2);
+long3 __ovld __cnfn isnan(double3);
+long4 __ovld __cnfn isnan(double4);
+long8 __ovld __cnfn isnan(double8);
+long16 __ovld __cnfn isnan(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isnan(half);
+short2 __ovld __cnfn isnan(half2);
+short3 __ovld __cnfn isnan(half3);
+short4 __ovld __cnfn isnan(half4);
+short8 __ovld __cnfn isnan(half8);
+short16 __ovld __cnfn isnan(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for a normal value.
+ */
+int __ovld __cnfn isnormal(float);
+int2 __ovld __cnfn isnormal(float2);
+int3 __ovld __cnfn isnormal(float3);
+int4 __ovld __cnfn isnormal(float4);
+int8 __ovld __cnfn isnormal(float8);
+int16 __ovld __cnfn isnormal(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isnormal(double);
+long2 __ovld __cnfn isnormal(double2);
+long3 __ovld __cnfn isnormal(double3);
+long4 __ovld __cnfn isnormal(double4);
+long8 __ovld __cnfn isnormal(double8);
+long16 __ovld __cnfn isnormal(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isnormal(half);
+short2 __ovld __cnfn isnormal(half2);
+short3 __ovld __cnfn isnormal(half3);
+short4 __ovld __cnfn isnormal(half4);
+short8 __ovld __cnfn isnormal(half8);
+short16 __ovld __cnfn isnormal(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test if arguments are ordered. isordered() takes
+ * arguments x and y, and returns the result
+ * isequal(x, x) && isequal(y, y).
+ */
+int __ovld __cnfn isordered(float, float);
+int2 __ovld __cnfn isordered(float2, float2);
+int3 __ovld __cnfn isordered(float3, float3);
+int4 __ovld __cnfn isordered(float4, float4);
+int8 __ovld __cnfn isordered(float8, float8);
+int16 __ovld __cnfn isordered(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isordered(double, double);
+long2 __ovld __cnfn isordered(double2, double2);
+long3 __ovld __cnfn isordered(double3, double3);
+long4 __ovld __cnfn isordered(double4, double4);
+long8 __ovld __cnfn isordered(double8, double8);
+long16 __ovld __cnfn isordered(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isordered(half, half);
+short2 __ovld __cnfn isordered(half2, half2);
+short3 __ovld __cnfn isordered(half3, half3);
+short4 __ovld __cnfn isordered(half4, half4);
+short8 __ovld __cnfn isordered(half8, half8);
+short16 __ovld __cnfn isordered(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test if arguments are unordered. isunordered()
+ * takes arguments x and y, returning non-zero if x or y
+ * is NaN, and zero otherwise.
+ */
+int __ovld __cnfn isunordered(float, float);
+int2 __ovld __cnfn isunordered(float2, float2);
+int3 __ovld __cnfn isunordered(float3, float3);
+int4 __ovld __cnfn isunordered(float4, float4);
+int8 __ovld __cnfn isunordered(float8, float8);
+int16 __ovld __cnfn isunordered(float16, float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isunordered(double, double);
+long2 __ovld __cnfn isunordered(double2, double2);
+long3 __ovld __cnfn isunordered(double3, double3);
+long4 __ovld __cnfn isunordered(double4, double4);
+long8 __ovld __cnfn isunordered(double8, double8);
+long16 __ovld __cnfn isunordered(double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isunordered(half, half);
+short2 __ovld __cnfn isunordered(half2, half2);
+short3 __ovld __cnfn isunordered(half3, half3);
+short4 __ovld __cnfn isunordered(half4, half4);
+short8 __ovld __cnfn isunordered(half8, half8);
+short16 __ovld __cnfn isunordered(half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for sign bit. The scalar version of the function
+ * returns a 1 if the sign bit in the float is set else returns
+ * 0. The vector version of the function returns the
+ * following for each component in floatn: a -1 if the
+ * sign bit in the float is set else returns 0.
+ */
+int __ovld __cnfn signbit(float);
+int2 __ovld __cnfn signbit(float2);
+int3 __ovld __cnfn signbit(float3);
+int4 __ovld __cnfn signbit(float4);
+int8 __ovld __cnfn signbit(float8);
+int16 __ovld __cnfn signbit(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn signbit(double);
+long2 __ovld __cnfn signbit(double2);
+long3 __ovld __cnfn signbit(double3);
+long4 __ovld __cnfn signbit(double4);
+long8 __ovld __cnfn signbit(double8);
+long16 __ovld __cnfn signbit(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn signbit(half);
+short2 __ovld __cnfn signbit(half2);
+short3 __ovld __cnfn signbit(half3);
+short4 __ovld __cnfn signbit(half4);
+short8 __ovld __cnfn signbit(half8);
+short16 __ovld __cnfn signbit(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 1 if the most significant bit in any component
+ * of x is set; otherwise returns 0.
+ */
+int __ovld __cnfn any(char);
+int __ovld __cnfn any(char2);
+int __ovld __cnfn any(char3);
+int __ovld __cnfn any(char4);
+int __ovld __cnfn any(char8);
+int __ovld __cnfn any(char16);
+int __ovld __cnfn any(short);
+int __ovld __cnfn any(short2);
+int __ovld __cnfn any(short3);
+int __ovld __cnfn any(short4);
+int __ovld __cnfn any(short8);
+int __ovld __cnfn any(short16);
+int __ovld __cnfn any(int);
+int __ovld __cnfn any(int2);
+int __ovld __cnfn any(int3);
+int __ovld __cnfn any(int4);
+int __ovld __cnfn any(int8);
+int __ovld __cnfn any(int16);
+int __ovld __cnfn any(long);
+int __ovld __cnfn any(long2);
+int __ovld __cnfn any(long3);
+int __ovld __cnfn any(long4);
+int __ovld __cnfn any(long8);
+int __ovld __cnfn any(long16);
+
+/**
+ * Returns 1 if the most significant bit in all components
+ * of x is set; otherwise returns 0.
+ */
+int __ovld __cnfn all(char);
+int __ovld __cnfn all(char2);
+int __ovld __cnfn all(char3);
+int __ovld __cnfn all(char4);
+int __ovld __cnfn all(char8);
+int __ovld __cnfn all(char16);
+int __ovld __cnfn all(short);
+int __ovld __cnfn all(short2);
+int __ovld __cnfn all(short3);
+int __ovld __cnfn all(short4);
+int __ovld __cnfn all(short8);
+int __ovld __cnfn all(short16);
+int __ovld __cnfn all(int);
+int __ovld __cnfn all(int2);
+int __ovld __cnfn all(int3);
+int __ovld __cnfn all(int4);
+int __ovld __cnfn all(int8);
+int __ovld __cnfn all(int16);
+int __ovld __cnfn all(long);
+int __ovld __cnfn all(long2);
+int __ovld __cnfn all(long3);
+int __ovld __cnfn all(long4);
+int __ovld __cnfn all(long8);
+int __ovld __cnfn all(long16);
+
+/**
+ * Each bit of the result is the corresponding bit of a if
+ * the corresponding bit of c is 0. Otherwise it is the
+ * corresponding bit of b.
+ */
+char __ovld __cnfn bitselect(char, char, char);
+uchar __ovld __cnfn bitselect(uchar, uchar, uchar);
+char2 __ovld __cnfn bitselect(char2, char2, char2);
+uchar2 __ovld __cnfn bitselect(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn bitselect(char3, char3, char3);
+uchar3 __ovld __cnfn bitselect(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn bitselect(char4, char4, char4);
+uchar4 __ovld __cnfn bitselect(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn bitselect(char8, char8, char8);
+uchar8 __ovld __cnfn bitselect(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn bitselect(char16, char16, char16);
+uchar16 __ovld __cnfn bitselect(uchar16, uchar16, uchar16);
+short __ovld __cnfn bitselect(short, short, short);
+ushort __ovld __cnfn bitselect(ushort, ushort, ushort);
+short2 __ovld __cnfn bitselect(short2, short2, short2);
+ushort2 __ovld __cnfn bitselect(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn bitselect(short3, short3, short3);
+ushort3 __ovld __cnfn bitselect(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn bitselect(short4, short4, short4);
+ushort4 __ovld __cnfn bitselect(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn bitselect(short8, short8, short8);
+ushort8 __ovld __cnfn bitselect(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn bitselect(short16, short16, short16);
+ushort16 __ovld __cnfn bitselect(ushort16, ushort16, ushort16);
+int __ovld __cnfn bitselect(int, int, int);
+uint __ovld __cnfn bitselect(uint, uint, uint);
+int2 __ovld __cnfn bitselect(int2, int2, int2);
+uint2 __ovld __cnfn bitselect(uint2, uint2, uint2);
+int3 __ovld __cnfn bitselect(int3, int3, int3);
+uint3 __ovld __cnfn bitselect(uint3, uint3, uint3);
+int4 __ovld __cnfn bitselect(int4, int4, int4);
+uint4 __ovld __cnfn bitselect(uint4, uint4, uint4);
+int8 __ovld __cnfn bitselect(int8, int8, int8);
+uint8 __ovld __cnfn bitselect(uint8, uint8, uint8);
+int16 __ovld __cnfn bitselect(int16, int16, int16);
+uint16 __ovld __cnfn bitselect(uint16, uint16, uint16);
+long __ovld __cnfn bitselect(long, long, long);
+ulong __ovld __cnfn bitselect(ulong, ulong, ulong);
+long2 __ovld __cnfn bitselect(long2, long2, long2);
+ulong2 __ovld __cnfn bitselect(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn bitselect(long3, long3, long3);
+ulong3 __ovld __cnfn bitselect(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn bitselect(long4, long4, long4);
+ulong4 __ovld __cnfn bitselect(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn bitselect(long8, long8, long8);
+ulong8 __ovld __cnfn bitselect(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn bitselect(long16, long16, long16);
+ulong16 __ovld __cnfn bitselect(ulong16, ulong16, ulong16);
+float __ovld __cnfn bitselect(float, float, float);
+float2 __ovld __cnfn bitselect(float2, float2, float2);
+float3 __ovld __cnfn bitselect(float3, float3, float3);
+float4 __ovld __cnfn bitselect(float4, float4, float4);
+float8 __ovld __cnfn bitselect(float8, float8, float8);
+float16 __ovld __cnfn bitselect(float16, float16, float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn bitselect(double, double, double);
+double2 __ovld __cnfn bitselect(double2, double2, double2);
+double3 __ovld __cnfn bitselect(double3, double3, double3);
+double4 __ovld __cnfn bitselect(double4, double4, double4);
+double8 __ovld __cnfn bitselect(double8, double8, double8);
+double16 __ovld __cnfn bitselect(double16, double16, double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn bitselect(half, half, half);
+half2 __ovld __cnfn bitselect(half2, half2, half2);
+half3 __ovld __cnfn bitselect(half3, half3, half3);
+half4 __ovld __cnfn bitselect(half4, half4, half4);
+half8 __ovld __cnfn bitselect(half8, half8, half8);
+half16 __ovld __cnfn bitselect(half16, half16, half16);
+#endif //cl_khr_fp16
+
+/**
+ * For each component of a vector type,
+ * result[i] = if MSB of c[i] is set ? b[i] : a[i].
+ * For a scalar type, result = c ? b : a.
+ * b and a must have the same type.
+ * c must have the same number of elements and bits as a.
+ */
+char __ovld __cnfn select(char, char, char);
+uchar __ovld __cnfn select(uchar, uchar, char);
+char2 __ovld __cnfn select(char2, char2, char2);
+uchar2 __ovld __cnfn select(uchar2, uchar2, char2);
+char3 __ovld __cnfn select(char3, char3, char3);
+uchar3 __ovld __cnfn select(uchar3, uchar3, char3);
+char4 __ovld __cnfn select(char4, char4, char4);
+uchar4 __ovld __cnfn select(uchar4, uchar4, char4);
+char8 __ovld __cnfn select(char8, char8, char8);
+uchar8 __ovld __cnfn select(uchar8, uchar8, char8);
+char16 __ovld __cnfn select(char16, char16, char16);
+uchar16 __ovld __cnfn select(uchar16, uchar16, char16);
+
+short __ovld __cnfn select(short, short, short);
+ushort __ovld __cnfn select(ushort, ushort, short);
+short2 __ovld __cnfn select(short2, short2, short2);
+ushort2 __ovld __cnfn select(ushort2, ushort2, short2);
+short3 __ovld __cnfn select(short3, short3, short3);
+ushort3 __ovld __cnfn select(ushort3, ushort3, short3);
+short4 __ovld __cnfn select(short4, short4, short4);
+ushort4 __ovld __cnfn select(ushort4, ushort4, short4);
+short8 __ovld __cnfn select(short8, short8, short8);
+ushort8 __ovld __cnfn select(ushort8, ushort8, short8);
+short16 __ovld __cnfn select(short16, short16, short16);
+ushort16 __ovld __cnfn select(ushort16, ushort16, short16);
+
+int __ovld __cnfn select(int, int, int);
+uint __ovld __cnfn select(uint, uint, int);
+int2 __ovld __cnfn select(int2, int2, int2);
+uint2 __ovld __cnfn select(uint2, uint2, int2);
+int3 __ovld __cnfn select(int3, int3, int3);
+uint3 __ovld __cnfn select(uint3, uint3, int3);
+int4 __ovld __cnfn select(int4, int4, int4);
+uint4 __ovld __cnfn select(uint4, uint4, int4);
+int8 __ovld __cnfn select(int8, int8, int8);
+uint8 __ovld __cnfn select(uint8, uint8, int8);
+int16 __ovld __cnfn select(int16, int16, int16);
+uint16 __ovld __cnfn select(uint16, uint16, int16);
+float __ovld __cnfn select(float, float, int);
+float2 __ovld __cnfn select(float2, float2, int2);
+float3 __ovld __cnfn select(float3, float3, int3);
+float4 __ovld __cnfn select(float4, float4, int4);
+float8 __ovld __cnfn select(float8, float8, int8);
+float16 __ovld __cnfn select(float16, float16, int16);
+
+long __ovld __cnfn select(long, long, long);
+ulong __ovld __cnfn select(ulong, ulong, long);
+long2 __ovld __cnfn select(long2, long2, long2);
+ulong2 __ovld __cnfn select(ulong2, ulong2, long2);
+long3 __ovld __cnfn select(long3, long3, long3);
+ulong3 __ovld __cnfn select(ulong3, ulong3, long3);
+long4 __ovld __cnfn select(long4, long4, long4);
+ulong4 __ovld __cnfn select(ulong4, ulong4, long4);
+long8 __ovld __cnfn select(long8, long8, long8);
+ulong8 __ovld __cnfn select(ulong8, ulong8, long8);
+long16 __ovld __cnfn select(long16, long16, long16);
+ulong16 __ovld __cnfn select(ulong16, ulong16, long16);
+
+char __ovld __cnfn select(char, char, uchar);
+uchar __ovld __cnfn select(uchar, uchar, uchar);
+char2 __ovld __cnfn select(char2, char2, uchar2);
+uchar2 __ovld __cnfn select(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn select(char3, char3, uchar3);
+uchar3 __ovld __cnfn select(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn select(char4, char4, uchar4);
+uchar4 __ovld __cnfn select(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn select(char8, char8, uchar8);
+uchar8 __ovld __cnfn select(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn select(char16, char16, uchar16);
+uchar16 __ovld __cnfn select(uchar16, uchar16, uchar16);
+
+short __ovld __cnfn select(short, short, ushort);
+ushort __ovld __cnfn select(ushort, ushort, ushort);
+short2 __ovld __cnfn select(short2, short2, ushort2);
+ushort2 __ovld __cnfn select(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn select(short3, short3, ushort3);
+ushort3 __ovld __cnfn select(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn select(short4, short4, ushort4);
+ushort4 __ovld __cnfn select(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn select(short8, short8, ushort8);
+ushort8 __ovld __cnfn select(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn select(short16, short16, ushort16);
+ushort16 __ovld __cnfn select(ushort16, ushort16, ushort16);
+
+int __ovld __cnfn select(int, int, uint);
+uint __ovld __cnfn select(uint, uint, uint);
+int2 __ovld __cnfn select(int2, int2, uint2);
+uint2 __ovld __cnfn select(uint2, uint2, uint2);
+int3 __ovld __cnfn select(int3, int3, uint3);
+uint3 __ovld __cnfn select(uint3, uint3, uint3);
+int4 __ovld __cnfn select(int4, int4, uint4);
+uint4 __ovld __cnfn select(uint4, uint4, uint4);
+int8 __ovld __cnfn select(int8, int8, uint8);
+uint8 __ovld __cnfn select(uint8, uint8, uint8);
+int16 __ovld __cnfn select(int16, int16, uint16);
+uint16 __ovld __cnfn select(uint16, uint16, uint16);
+float __ovld __cnfn select(float, float, uint);
+float2 __ovld __cnfn select(float2, float2, uint2);
+float3 __ovld __cnfn select(float3, float3, uint3);
+float4 __ovld __cnfn select(float4, float4, uint4);
+float8 __ovld __cnfn select(float8, float8, uint8);
+float16 __ovld __cnfn select(float16, float16, uint16);
+
+long __ovld __cnfn select(long, long, ulong);
+ulong __ovld __cnfn select(ulong, ulong, ulong);
+long2 __ovld __cnfn select(long2, long2, ulong2);
+ulong2 __ovld __cnfn select(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn select(long3, long3, ulong3);
+ulong3 __ovld __cnfn select(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn select(long4, long4, ulong4);
+ulong4 __ovld __cnfn select(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn select(long8, long8, ulong8);
+ulong8 __ovld __cnfn select(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn select(long16, long16, ulong16);
+ulong16 __ovld __cnfn select(ulong16, ulong16, ulong16);
+
+#ifdef cl_khr_fp64
+double __ovld __cnfn select(double, double, long);
+double2 __ovld __cnfn select(double2, double2, long2);
+double3 __ovld __cnfn select(double3, double3, long3);
+double4 __ovld __cnfn select(double4, double4, long4);
+double8 __ovld __cnfn select(double8, double8, long8);
+double16 __ovld __cnfn select(double16, double16, long16);
+double __ovld __cnfn select(double, double, ulong);
+double2 __ovld __cnfn select(double2, double2, ulong2);
+double3 __ovld __cnfn select(double3, double3, ulong3);
+double4 __ovld __cnfn select(double4, double4, ulong4);
+double8 __ovld __cnfn select(double8, double8, ulong8);
+double16 __ovld __cnfn select(double16, double16, ulong16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn select(half, half, short);
+half2 __ovld __cnfn select(half2, half2, short2);
+half3 __ovld __cnfn select(half3, half3, short3);
+half4 __ovld __cnfn select(half4, half4, short4);
+half8 __ovld __cnfn select(half8, half8, short8);
+half16 __ovld __cnfn select(half16, half16, short16);
+half __ovld __cnfn select(half, half, ushort);
+half2 __ovld __cnfn select(half2, half2, ushort2);
+half3 __ovld __cnfn select(half3, half3, ushort3);
+half4 __ovld __cnfn select(half4, half4, ushort4);
+half8 __ovld __cnfn select(half8, half8, ushort8);
+half16 __ovld __cnfn select(half16, half16, ushort16);
+#endif //cl_khr_fp16
+
+// OpenCL v1.1 s6.11.7, v1.2 s6.12.7, v2.0 s6.13.7 - Vector Data Load and Store Functions
+// OpenCL extensions v1.1 s9.6.6, v1.2 s9.5.6, v2.0 s9.4.6 - Vector Data Load and Store Functions for Half Type
+/**
+ * Use generic type gentype to indicate the built-in data types
+ * char, uchar, short, ushort, int, uint, long, ulong, float,
+ * double or half.
+ *
+ * vloadn return sizeof (gentypen) bytes of data read from address (p + (offset * n)).
+ *
+ * vstoren write sizeof (gentypen) bytes given by data to address (p + (offset * n)).
+ *
+ * The address computed as (p + (offset * n)) must be
+ * 8-bit aligned if gentype is char, uchar;
+ * 16-bit aligned if gentype is short, ushort, half;
+ * 32-bit aligned if gentype is int, uint, float;
+ * 64-bit aligned if gentype is long, ulong, double.
+ */
+
+char2 __ovld __purefn vload2(size_t, const __constant char *);
+uchar2 __ovld __purefn vload2(size_t, const __constant uchar *);
+short2 __ovld __purefn vload2(size_t, const __constant short *);
+ushort2 __ovld __purefn vload2(size_t, const __constant ushort *);
+int2 __ovld __purefn vload2(size_t, const __constant int *);
+uint2 __ovld __purefn vload2(size_t, const __constant uint *);
+long2 __ovld __purefn vload2(size_t, const __constant long *);
+ulong2 __ovld __purefn vload2(size_t, const __constant ulong *);
+float2 __ovld __purefn vload2(size_t, const __constant float *);
+char3 __ovld __purefn vload3(size_t, const __constant char *);
+uchar3 __ovld __purefn vload3(size_t, const __constant uchar *);
+short3 __ovld __purefn vload3(size_t, const __constant short *);
+ushort3 __ovld __purefn vload3(size_t, const __constant ushort *);
+int3 __ovld __purefn vload3(size_t, const __constant int *);
+uint3 __ovld __purefn vload3(size_t, const __constant uint *);
+long3 __ovld __purefn vload3(size_t, const __constant long *);
+ulong3 __ovld __purefn vload3(size_t, const __constant ulong *);
+float3 __ovld __purefn vload3(size_t, const __constant float *);
+char4 __ovld __purefn vload4(size_t, const __constant char *);
+uchar4 __ovld __purefn vload4(size_t, const __constant uchar *);
+short4 __ovld __purefn vload4(size_t, const __constant short *);
+ushort4 __ovld __purefn vload4(size_t, const __constant ushort *);
+int4 __ovld __purefn vload4(size_t, const __constant int *);
+uint4 __ovld __purefn vload4(size_t, const __constant uint *);
+long4 __ovld __purefn vload4(size_t, const __constant long *);
+ulong4 __ovld __purefn vload4(size_t, const __constant ulong *);
+float4 __ovld __purefn vload4(size_t, const __constant float *);
+char8 __ovld __purefn vload8(size_t, const __constant char *);
+uchar8 __ovld __purefn vload8(size_t, const __constant uchar *);
+short8 __ovld __purefn vload8(size_t, const __constant short *);
+ushort8 __ovld __purefn vload8(size_t, const __constant ushort *);
+int8 __ovld __purefn vload8(size_t, const __constant int *);
+uint8 __ovld __purefn vload8(size_t, const __constant uint *);
+long8 __ovld __purefn vload8(size_t, const __constant long *);
+ulong8 __ovld __purefn vload8(size_t, const __constant ulong *);
+float8 __ovld __purefn vload8(size_t, const __constant float *);
+char16 __ovld __purefn vload16(size_t, const __constant char *);
+uchar16 __ovld __purefn vload16(size_t, const __constant uchar *);
+short16 __ovld __purefn vload16(size_t, const __constant short *);
+ushort16 __ovld __purefn vload16(size_t, const __constant ushort *);
+int16 __ovld __purefn vload16(size_t, const __constant int *);
+uint16 __ovld __purefn vload16(size_t, const __constant uint *);
+long16 __ovld __purefn vload16(size_t, const __constant long *);
+ulong16 __ovld __purefn vload16(size_t, const __constant ulong *);
+float16 __ovld __purefn vload16(size_t, const __constant float *);
+#ifdef cl_khr_fp64
+double2 __ovld __purefn vload2(size_t, const __constant double *);
+double3 __ovld __purefn vload3(size_t, const __constant double *);
+double4 __ovld __purefn vload4(size_t, const __constant double *);
+double8 __ovld __purefn vload8(size_t, const __constant double *);
+double16 __ovld __purefn vload16(size_t, const __constant double *);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half2 __ovld __purefn vload2(size_t, const __constant half *);
+half3 __ovld __purefn vload3(size_t, const __constant half *);
+half4 __ovld __purefn vload4(size_t, const __constant half *);
+half8 __ovld __purefn vload8(size_t, const __constant half *);
+half16 __ovld __purefn vload16(size_t, const __constant half *);
+#endif //cl_khr_fp16
+
+#if defined(__opencl_c_generic_address_space)
+char2 __ovld __purefn vload2(size_t, const char *);
+uchar2 __ovld __purefn vload2(size_t, const uchar *);
+short2 __ovld __purefn vload2(size_t, const short *);
+ushort2 __ovld __purefn vload2(size_t, const ushort *);
+int2 __ovld __purefn vload2(size_t, const int *);
+uint2 __ovld __purefn vload2(size_t, const uint *);
+long2 __ovld __purefn vload2(size_t, const long *);
+ulong2 __ovld __purefn vload2(size_t, const ulong *);
+float2 __ovld __purefn vload2(size_t, const float *);
+char3 __ovld __purefn vload3(size_t, const char *);
+uchar3 __ovld __purefn vload3(size_t, const uchar *);
+short3 __ovld __purefn vload3(size_t, const short *);
+ushort3 __ovld __purefn vload3(size_t, const ushort *);
+int3 __ovld __purefn vload3(size_t, const int *);
+uint3 __ovld __purefn vload3(size_t, const uint *);
+long3 __ovld __purefn vload3(size_t, const long *);
+ulong3 __ovld __purefn vload3(size_t, const ulong *);
+float3 __ovld __purefn vload3(size_t, const float *);
+char4 __ovld __purefn vload4(size_t, const char *);
+uchar4 __ovld __purefn vload4(size_t, const uchar *);
+short4 __ovld __purefn vload4(size_t, const short *);
+ushort4 __ovld __purefn vload4(size_t, const ushort *);
+int4 __ovld __purefn vload4(size_t, const int *);
+uint4 __ovld __purefn vload4(size_t, const uint *);
+long4 __ovld __purefn vload4(size_t, const long *);
+ulong4 __ovld __purefn vload4(size_t, const ulong *);
+float4 __ovld __purefn vload4(size_t, const float *);
+char8 __ovld __purefn vload8(size_t, const char *);
+uchar8 __ovld __purefn vload8(size_t, const uchar *);
+short8 __ovld __purefn vload8(size_t, const short *);
+ushort8 __ovld __purefn vload8(size_t, const ushort *);
+int8 __ovld __purefn vload8(size_t, const int *);
+uint8 __ovld __purefn vload8(size_t, const uint *);
+long8 __ovld __purefn vload8(size_t, const long *);
+ulong8 __ovld __purefn vload8(size_t, const ulong *);
+float8 __ovld __purefn vload8(size_t, const float *);
+char16 __ovld __purefn vload16(size_t, const char *);
+uchar16 __ovld __purefn vload16(size_t, const uchar *);
+short16 __ovld __purefn vload16(size_t, const short *);
+ushort16 __ovld __purefn vload16(size_t, const ushort *);
+int16 __ovld __purefn vload16(size_t, const int *);
+uint16 __ovld __purefn vload16(size_t, const uint *);
+long16 __ovld __purefn vload16(size_t, const long *);
+ulong16 __ovld __purefn vload16(size_t, const ulong *);
+float16 __ovld __purefn vload16(size_t, const float *);
+
+#ifdef cl_khr_fp64
+double2 __ovld __purefn vload2(size_t, const double *);
+double3 __ovld __purefn vload3(size_t, const double *);
+double4 __ovld __purefn vload4(size_t, const double *);
+double8 __ovld __purefn vload8(size_t, const double *);
+double16 __ovld __purefn vload16(size_t, const double *);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half2 __ovld __purefn vload2(size_t, const half *);
+half3 __ovld __purefn vload3(size_t, const half *);
+half4 __ovld __purefn vload4(size_t, const half *);
+half8 __ovld __purefn vload8(size_t, const half *);
+half16 __ovld __purefn vload16(size_t, const half *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+char2 __ovld __purefn vload2(size_t, const __global char *);
+uchar2 __ovld __purefn vload2(size_t, const __global uchar *);
+short2 __ovld __purefn vload2(size_t, const __global short *);
+ushort2 __ovld __purefn vload2(size_t, const __global ushort *);
+int2 __ovld __purefn vload2(size_t, const __global int *);
+uint2 __ovld __purefn vload2(size_t, const __global uint *);
+long2 __ovld __purefn vload2(size_t, const __global long *);
+ulong2 __ovld __purefn vload2(size_t, const __global ulong *);
+float2 __ovld __purefn vload2(size_t, const __global float *);
+char3 __ovld __purefn vload3(size_t, const __global char *);
+uchar3 __ovld __purefn vload3(size_t, const __global uchar *);
+short3 __ovld __purefn vload3(size_t, const __global short *);
+ushort3 __ovld __purefn vload3(size_t, const __global ushort *);
+int3 __ovld __purefn vload3(size_t, const __global int *);
+uint3 __ovld __purefn vload3(size_t, const __global uint *);
+long3 __ovld __purefn vload3(size_t, const __global long *);
+ulong3 __ovld __purefn vload3(size_t, const __global ulong *);
+float3 __ovld __purefn vload3(size_t, const __global float *);
+char4 __ovld __purefn vload4(size_t, const __global char *);
+uchar4 __ovld __purefn vload4(size_t, const __global uchar *);
+short4 __ovld __purefn vload4(size_t, const __global short *);
+ushort4 __ovld __purefn vload4(size_t, const __global ushort *);
+int4 __ovld __purefn vload4(size_t, const __global int *);
+uint4 __ovld __purefn vload4(size_t, const __global uint *);
+long4 __ovld __purefn vload4(size_t, const __global long *);
+ulong4 __ovld __purefn vload4(size_t, const __global ulong *);
+float4 __ovld __purefn vload4(size_t, const __global float *);
+char8 __ovld __purefn vload8(size_t, const __global char *);
+uchar8 __ovld __purefn vload8(size_t, const __global uchar *);
+short8 __ovld __purefn vload8(size_t, const __global short *);
+ushort8 __ovld __purefn vload8(size_t, const __global ushort *);
+int8 __ovld __purefn vload8(size_t, const __global int *);
+uint8 __ovld __purefn vload8(size_t, const __global uint *);
+long8 __ovld __purefn vload8(size_t, const __global long *);
+ulong8 __ovld __purefn vload8(size_t, const __global ulong *);
+float8 __ovld __purefn vload8(size_t, const __global float *);
+char16 __ovld __purefn vload16(size_t, const __global char *);
+uchar16 __ovld __purefn vload16(size_t, const __global uchar *);
+short16 __ovld __purefn vload16(size_t, const __global short *);
+ushort16 __ovld __purefn vload16(size_t, const __global ushort *);
+int16 __ovld __purefn vload16(size_t, const __global int *);
+uint16 __ovld __purefn vload16(size_t, const __global uint *);
+long16 __ovld __purefn vload16(size_t, const __global long *);
+ulong16 __ovld __purefn vload16(size_t, const __global ulong *);
+float16 __ovld __purefn vload16(size_t, const __global float *);
+char2 __ovld __purefn vload2(size_t, const __local char *);
+uchar2 __ovld __purefn vload2(size_t, const __local uchar *);
+short2 __ovld __purefn vload2(size_t, const __local short *);
+ushort2 __ovld __purefn vload2(size_t, const __local ushort *);
+int2 __ovld __purefn vload2(size_t, const __local int *);
+uint2 __ovld __purefn vload2(size_t, const __local uint *);
+long2 __ovld __purefn vload2(size_t, const __local long *);
+ulong2 __ovld __purefn vload2(size_t, const __local ulong *);
+float2 __ovld __purefn vload2(size_t, const __local float *);
+char3 __ovld __purefn vload3(size_t, const __local char *);
+uchar3 __ovld __purefn vload3(size_t, const __local uchar *);
+short3 __ovld __purefn vload3(size_t, const __local short *);
+ushort3 __ovld __purefn vload3(size_t, const __local ushort *);
+int3 __ovld __purefn vload3(size_t, const __local int *);
+uint3 __ovld __purefn vload3(size_t, const __local uint *);
+long3 __ovld __purefn vload3(size_t, const __local long *);
+ulong3 __ovld __purefn vload3(size_t, const __local ulong *);
+float3 __ovld __purefn vload3(size_t, const __local float *);
+char4 __ovld __purefn vload4(size_t, const __local char *);
+uchar4 __ovld __purefn vload4(size_t, const __local uchar *);
+short4 __ovld __purefn vload4(size_t, const __local short *);
+ushort4 __ovld __purefn vload4(size_t, const __local ushort *);
+int4 __ovld __purefn vload4(size_t, const __local int *);
+uint4 __ovld __purefn vload4(size_t, const __local uint *);
+long4 __ovld __purefn vload4(size_t, const __local long *);
+ulong4 __ovld __purefn vload4(size_t, const __local ulong *);
+float4 __ovld __purefn vload4(size_t, const __local float *);
+char8 __ovld __purefn vload8(size_t, const __local char *);
+uchar8 __ovld __purefn vload8(size_t, const __local uchar *);
+short8 __ovld __purefn vload8(size_t, const __local short *);
+ushort8 __ovld __purefn vload8(size_t, const __local ushort *);
+int8 __ovld __purefn vload8(size_t, const __local int *);
+uint8 __ovld __purefn vload8(size_t, const __local uint *);
+long8 __ovld __purefn vload8(size_t, const __local long *);
+ulong8 __ovld __purefn vload8(size_t, const __local ulong *);
+float8 __ovld __purefn vload8(size_t, const __local float *);
+char16 __ovld __purefn vload16(size_t, const __local char *);
+uchar16 __ovld __purefn vload16(size_t, const __local uchar *);
+short16 __ovld __purefn vload16(size_t, const __local short *);
+ushort16 __ovld __purefn vload16(size_t, const __local ushort *);
+int16 __ovld __purefn vload16(size_t, const __local int *);
+uint16 __ovld __purefn vload16(size_t, const __local uint *);
+long16 __ovld __purefn vload16(size_t, const __local long *);
+ulong16 __ovld __purefn vload16(size_t, const __local ulong *);
+float16 __ovld __purefn vload16(size_t, const __local float *);
+char2 __ovld __purefn vload2(size_t, const __private char *);
+uchar2 __ovld __purefn vload2(size_t, const __private uchar *);
+short2 __ovld __purefn vload2(size_t, const __private short *);
+ushort2 __ovld __purefn vload2(size_t, const __private ushort *);
+int2 __ovld __purefn vload2(size_t, const __private int *);
+uint2 __ovld __purefn vload2(size_t, const __private uint *);
+long2 __ovld __purefn vload2(size_t, const __private long *);
+ulong2 __ovld __purefn vload2(size_t, const __private ulong *);
+float2 __ovld __purefn vload2(size_t, const __private float *);
+char3 __ovld __purefn vload3(size_t, const __private char *);
+uchar3 __ovld __purefn vload3(size_t, const __private uchar *);
+short3 __ovld __purefn vload3(size_t, const __private short *);
+ushort3 __ovld __purefn vload3(size_t, const __private ushort *);
+int3 __ovld __purefn vload3(size_t, const __private int *);
+uint3 __ovld __purefn vload3(size_t, const __private uint *);
+long3 __ovld __purefn vload3(size_t, const __private long *);
+ulong3 __ovld __purefn vload3(size_t, const __private ulong *);
+float3 __ovld __purefn vload3(size_t, const __private float *);
+char4 __ovld __purefn vload4(size_t, const __private char *);
+uchar4 __ovld __purefn vload4(size_t, const __private uchar *);
+short4 __ovld __purefn vload4(size_t, const __private short *);
+ushort4 __ovld __purefn vload4(size_t, const __private ushort *);
+int4 __ovld __purefn vload4(size_t, const __private int *);
+uint4 __ovld __purefn vload4(size_t, const __private uint *);
+long4 __ovld __purefn vload4(size_t, const __private long *);
+ulong4 __ovld __purefn vload4(size_t, const __private ulong *);
+float4 __ovld __purefn vload4(size_t, const __private float *);
+char8 __ovld __purefn vload8(size_t, const __private char *);
+uchar8 __ovld __purefn vload8(size_t, const __private uchar *);
+short8 __ovld __purefn vload8(size_t, const __private short *);
+ushort8 __ovld __purefn vload8(size_t, const __private ushort *);
+int8 __ovld __purefn vload8(size_t, const __private int *);
+uint8 __ovld __purefn vload8(size_t, const __private uint *);
+long8 __ovld __purefn vload8(size_t, const __private long *);
+ulong8 __ovld __purefn vload8(size_t, const __private ulong *);
+float8 __ovld __purefn vload8(size_t, const __private float *);
+char16 __ovld __purefn vload16(size_t, const __private char *);
+uchar16 __ovld __purefn vload16(size_t, const __private uchar *);
+short16 __ovld __purefn vload16(size_t, const __private short *);
+ushort16 __ovld __purefn vload16(size_t, const __private ushort *);
+int16 __ovld __purefn vload16(size_t, const __private int *);
+uint16 __ovld __purefn vload16(size_t, const __private uint *);
+long16 __ovld __purefn vload16(size_t, const __private long *);
+ulong16 __ovld __purefn vload16(size_t, const __private ulong *);
+float16 __ovld __purefn vload16(size_t, const __private float *);
+
+#ifdef cl_khr_fp64
+double2 __ovld __purefn vload2(size_t, const __global double *);
+double3 __ovld __purefn vload3(size_t, const __global double *);
+double4 __ovld __purefn vload4(size_t, const __global double *);
+double8 __ovld __purefn vload8(size_t, const __global double *);
+double16 __ovld __purefn vload16(size_t, const __global double *);
+double2 __ovld __purefn vload2(size_t, const __local double *);
+double3 __ovld __purefn vload3(size_t, const __local double *);
+double4 __ovld __purefn vload4(size_t, const __local double *);
+double8 __ovld __purefn vload8(size_t, const __local double *);
+double16 __ovld __purefn vload16(size_t, const __local double *);
+double2 __ovld __purefn vload2(size_t, const __private double *);
+double3 __ovld __purefn vload3(size_t, const __private double *);
+double4 __ovld __purefn vload4(size_t, const __private double *);
+double8 __ovld __purefn vload8(size_t, const __private double *);
+double16 __ovld __purefn vload16(size_t, const __private double *);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half2 __ovld __purefn vload2(size_t, const __global half *);
+half3 __ovld __purefn vload3(size_t, const __global half *);
+half4 __ovld __purefn vload4(size_t, const __global half *);
+half8 __ovld __purefn vload8(size_t, const __global half *);
+half16 __ovld __purefn vload16(size_t, const __global half *);
+half2 __ovld __purefn vload2(size_t, const __local half *);
+half3 __ovld __purefn vload3(size_t, const __local half *);
+half4 __ovld __purefn vload4(size_t, const __local half *);
+half8 __ovld __purefn vload8(size_t, const __local half *);
+half16 __ovld __purefn vload16(size_t, const __local half *);
+half2 __ovld __purefn vload2(size_t, const __private half *);
+half3 __ovld __purefn vload3(size_t, const __private half *);
+half4 __ovld __purefn vload4(size_t, const __private half *);
+half8 __ovld __purefn vload8(size_t, const __private half *);
+half16 __ovld __purefn vload16(size_t, const __private half *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+#if defined(__opencl_c_generic_address_space)
+void __ovld vstore2(char2, size_t, char *);
+void __ovld vstore2(uchar2, size_t, uchar *);
+void __ovld vstore2(short2, size_t, short *);
+void __ovld vstore2(ushort2, size_t, ushort *);
+void __ovld vstore2(int2, size_t, int *);
+void __ovld vstore2(uint2, size_t, uint *);
+void __ovld vstore2(long2, size_t, long *);
+void __ovld vstore2(ulong2, size_t, ulong *);
+void __ovld vstore2(float2, size_t, float *);
+void __ovld vstore3(char3, size_t, char *);
+void __ovld vstore3(uchar3, size_t, uchar *);
+void __ovld vstore3(short3, size_t, short *);
+void __ovld vstore3(ushort3, size_t, ushort *);
+void __ovld vstore3(int3, size_t, int *);
+void __ovld vstore3(uint3, size_t, uint *);
+void __ovld vstore3(long3, size_t, long *);
+void __ovld vstore3(ulong3, size_t, ulong *);
+void __ovld vstore3(float3, size_t, float *);
+void __ovld vstore4(char4, size_t, char *);
+void __ovld vstore4(uchar4, size_t, uchar *);
+void __ovld vstore4(short4, size_t, short *);
+void __ovld vstore4(ushort4, size_t, ushort *);
+void __ovld vstore4(int4, size_t, int *);
+void __ovld vstore4(uint4, size_t, uint *);
+void __ovld vstore4(long4, size_t, long *);
+void __ovld vstore4(ulong4, size_t, ulong *);
+void __ovld vstore4(float4, size_t, float *);
+void __ovld vstore8(char8, size_t, char *);
+void __ovld vstore8(uchar8, size_t, uchar *);
+void __ovld vstore8(short8, size_t, short *);
+void __ovld vstore8(ushort8, size_t, ushort *);
+void __ovld vstore8(int8, size_t, int *);
+void __ovld vstore8(uint8, size_t, uint *);
+void __ovld vstore8(long8, size_t, long *);
+void __ovld vstore8(ulong8, size_t, ulong *);
+void __ovld vstore8(float8, size_t, float *);
+void __ovld vstore16(char16, size_t, char *);
+void __ovld vstore16(uchar16, size_t, uchar *);
+void __ovld vstore16(short16, size_t, short *);
+void __ovld vstore16(ushort16, size_t, ushort *);
+void __ovld vstore16(int16, size_t, int *);
+void __ovld vstore16(uint16, size_t, uint *);
+void __ovld vstore16(long16, size_t, long *);
+void __ovld vstore16(ulong16, size_t, ulong *);
+void __ovld vstore16(float16, size_t, float *);
+#ifdef cl_khr_fp64
+void __ovld vstore2(double2, size_t, double *);
+void __ovld vstore3(double3, size_t, double *);
+void __ovld vstore4(double4, size_t, double *);
+void __ovld vstore8(double8, size_t, double *);
+void __ovld vstore16(double16, size_t, double *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+void __ovld vstore2(half2, size_t, half *);
+void __ovld vstore3(half3, size_t, half *);
+void __ovld vstore4(half4, size_t, half *);
+void __ovld vstore8(half8, size_t, half *);
+void __ovld vstore16(half16, size_t, half *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+void __ovld vstore2(char2, size_t, __global char *);
+void __ovld vstore2(uchar2, size_t, __global uchar *);
+void __ovld vstore2(short2, size_t, __global short *);
+void __ovld vstore2(ushort2, size_t, __global ushort *);
+void __ovld vstore2(int2, size_t, __global int *);
+void __ovld vstore2(uint2, size_t, __global uint *);
+void __ovld vstore2(long2, size_t, __global long *);
+void __ovld vstore2(ulong2, size_t, __global ulong *);
+void __ovld vstore2(float2, size_t, __global float *);
+void __ovld vstore3(char3, size_t, __global char *);
+void __ovld vstore3(uchar3, size_t, __global uchar *);
+void __ovld vstore3(short3, size_t, __global short *);
+void __ovld vstore3(ushort3, size_t, __global ushort *);
+void __ovld vstore3(int3, size_t, __global int *);
+void __ovld vstore3(uint3, size_t, __global uint *);
+void __ovld vstore3(long3, size_t, __global long *);
+void __ovld vstore3(ulong3, size_t, __global ulong *);
+void __ovld vstore3(float3, size_t, __global float *);
+void __ovld vstore4(char4, size_t, __global char *);
+void __ovld vstore4(uchar4, size_t, __global uchar *);
+void __ovld vstore4(short4, size_t, __global short *);
+void __ovld vstore4(ushort4, size_t, __global ushort *);
+void __ovld vstore4(int4, size_t, __global int *);
+void __ovld vstore4(uint4, size_t, __global uint *);
+void __ovld vstore4(long4, size_t, __global long *);
+void __ovld vstore4(ulong4, size_t, __global ulong *);
+void __ovld vstore4(float4, size_t, __global float *);
+void __ovld vstore8(char8, size_t, __global char *);
+void __ovld vstore8(uchar8, size_t, __global uchar *);
+void __ovld vstore8(short8, size_t, __global short *);
+void __ovld vstore8(ushort8, size_t, __global ushort *);
+void __ovld vstore8(int8, size_t, __global int *);
+void __ovld vstore8(uint8, size_t, __global uint *);
+void __ovld vstore8(long8, size_t, __global long *);
+void __ovld vstore8(ulong8, size_t, __global ulong *);
+void __ovld vstore8(float8, size_t, __global float *);
+void __ovld vstore16(char16, size_t, __global char *);
+void __ovld vstore16(uchar16, size_t, __global uchar *);
+void __ovld vstore16(short16, size_t, __global short *);
+void __ovld vstore16(ushort16, size_t, __global ushort *);
+void __ovld vstore16(int16, size_t, __global int *);
+void __ovld vstore16(uint16, size_t, __global uint *);
+void __ovld vstore16(long16, size_t, __global long *);
+void __ovld vstore16(ulong16, size_t, __global ulong *);
+void __ovld vstore16(float16, size_t, __global float *);
+void __ovld vstore2(char2, size_t, __local char *);
+void __ovld vstore2(uchar2, size_t, __local uchar *);
+void __ovld vstore2(short2, size_t, __local short *);
+void __ovld vstore2(ushort2, size_t, __local ushort *);
+void __ovld vstore2(int2, size_t, __local int *);
+void __ovld vstore2(uint2, size_t, __local uint *);
+void __ovld vstore2(long2, size_t, __local long *);
+void __ovld vstore2(ulong2, size_t, __local ulong *);
+void __ovld vstore2(float2, size_t, __local float *);
+void __ovld vstore3(char3, size_t, __local char *);
+void __ovld vstore3(uchar3, size_t, __local uchar *);
+void __ovld vstore3(short3, size_t, __local short *);
+void __ovld vstore3(ushort3, size_t, __local ushort *);
+void __ovld vstore3(int3, size_t, __local int *);
+void __ovld vstore3(uint3, size_t, __local uint *);
+void __ovld vstore3(long3, size_t, __local long *);
+void __ovld vstore3(ulong3, size_t, __local ulong *);
+void __ovld vstore3(float3, size_t, __local float *);
+void __ovld vstore4(char4, size_t, __local char *);
+void __ovld vstore4(uchar4, size_t, __local uchar *);
+void __ovld vstore4(short4, size_t, __local short *);
+void __ovld vstore4(ushort4, size_t, __local ushort *);
+void __ovld vstore4(int4, size_t, __local int *);
+void __ovld vstore4(uint4, size_t, __local uint *);
+void __ovld vstore4(long4, size_t, __local long *);
+void __ovld vstore4(ulong4, size_t, __local ulong *);
+void __ovld vstore4(float4, size_t, __local float *);
+void __ovld vstore8(char8, size_t, __local char *);
+void __ovld vstore8(uchar8, size_t, __local uchar *);
+void __ovld vstore8(short8, size_t, __local short *);
+void __ovld vstore8(ushort8, size_t, __local ushort *);
+void __ovld vstore8(int8, size_t, __local int *);
+void __ovld vstore8(uint8, size_t, __local uint *);
+void __ovld vstore8(long8, size_t, __local long *);
+void __ovld vstore8(ulong8, size_t, __local ulong *);
+void __ovld vstore8(float8, size_t, __local float *);
+void __ovld vstore16(char16, size_t, __local char *);
+void __ovld vstore16(uchar16, size_t, __local uchar *);
+void __ovld vstore16(short16, size_t, __local short *);
+void __ovld vstore16(ushort16, size_t, __local ushort *);
+void __ovld vstore16(int16, size_t, __local int *);
+void __ovld vstore16(uint16, size_t, __local uint *);
+void __ovld vstore16(long16, size_t, __local long *);
+void __ovld vstore16(ulong16, size_t, __local ulong *);
+void __ovld vstore16(float16, size_t, __local float *);
+void __ovld vstore2(char2, size_t, __private char *);
+void __ovld vstore2(uchar2, size_t, __private uchar *);
+void __ovld vstore2(short2, size_t, __private short *);
+void __ovld vstore2(ushort2, size_t, __private ushort *);
+void __ovld vstore2(int2, size_t, __private int *);
+void __ovld vstore2(uint2, size_t, __private uint *);
+void __ovld vstore2(long2, size_t, __private long *);
+void __ovld vstore2(ulong2, size_t, __private ulong *);
+void __ovld vstore2(float2, size_t, __private float *);
+void __ovld vstore3(char3, size_t, __private char *);
+void __ovld vstore3(uchar3, size_t, __private uchar *);
+void __ovld vstore3(short3, size_t, __private short *);
+void __ovld vstore3(ushort3, size_t, __private ushort *);
+void __ovld vstore3(int3, size_t, __private int *);
+void __ovld vstore3(uint3, size_t, __private uint *);
+void __ovld vstore3(long3, size_t, __private long *);
+void __ovld vstore3(ulong3, size_t, __private ulong *);
+void __ovld vstore3(float3, size_t, __private float *);
+void __ovld vstore4(char4, size_t, __private char *);
+void __ovld vstore4(uchar4, size_t, __private uchar *);
+void __ovld vstore4(short4, size_t, __private short *);
+void __ovld vstore4(ushort4, size_t, __private ushort *);
+void __ovld vstore4(int4, size_t, __private int *);
+void __ovld vstore4(uint4, size_t, __private uint *);
+void __ovld vstore4(long4, size_t, __private long *);
+void __ovld vstore4(ulong4, size_t, __private ulong *);
+void __ovld vstore4(float4, size_t, __private float *);
+void __ovld vstore8(char8, size_t, __private char *);
+void __ovld vstore8(uchar8, size_t, __private uchar *);
+void __ovld vstore8(short8, size_t, __private short *);
+void __ovld vstore8(ushort8, size_t, __private ushort *);
+void __ovld vstore8(int8, size_t, __private int *);
+void __ovld vstore8(uint8, size_t, __private uint *);
+void __ovld vstore8(long8, size_t, __private long *);
+void __ovld vstore8(ulong8, size_t, __private ulong *);
+void __ovld vstore8(float8, size_t, __private float *);
+void __ovld vstore16(char16, size_t, __private char *);
+void __ovld vstore16(uchar16, size_t, __private uchar *);
+void __ovld vstore16(short16, size_t, __private short *);
+void __ovld vstore16(ushort16, size_t, __private ushort *);
+void __ovld vstore16(int16, size_t, __private int *);
+void __ovld vstore16(uint16, size_t, __private uint *);
+void __ovld vstore16(long16, size_t, __private long *);
+void __ovld vstore16(ulong16, size_t, __private ulong *);
+void __ovld vstore16(float16, size_t, __private float *);
+#ifdef cl_khr_fp64
+void __ovld vstore2(double2, size_t, __global double *);
+void __ovld vstore3(double3, size_t, __global double *);
+void __ovld vstore4(double4, size_t, __global double *);
+void __ovld vstore8(double8, size_t, __global double *);
+void __ovld vstore16(double16, size_t, __global double *);
+void __ovld vstore2(double2, size_t, __local double *);
+void __ovld vstore3(double3, size_t, __local double *);
+void __ovld vstore4(double4, size_t, __local double *);
+void __ovld vstore8(double8, size_t, __local double *);
+void __ovld vstore16(double16, size_t, __local double *);
+void __ovld vstore2(double2, size_t, __private double *);
+void __ovld vstore3(double3, size_t, __private double *);
+void __ovld vstore4(double4, size_t, __private double *);
+void __ovld vstore8(double8, size_t, __private double *);
+void __ovld vstore16(double16, size_t, __private double *);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+void __ovld vstore2(half2, size_t, __global half *);
+void __ovld vstore3(half3, size_t, __global half *);
+void __ovld vstore4(half4, size_t, __global half *);
+void __ovld vstore8(half8, size_t, __global half *);
+void __ovld vstore16(half16, size_t, __global half *);
+void __ovld vstore2(half2, size_t, __local half *);
+void __ovld vstore3(half3, size_t, __local half *);
+void __ovld vstore4(half4, size_t, __local half *);
+void __ovld vstore8(half8, size_t, __local half *);
+void __ovld vstore16(half16, size_t, __local half *);
+void __ovld vstore2(half2, size_t, __private half *);
+void __ovld vstore3(half3, size_t, __private half *);
+void __ovld vstore4(half4, size_t, __private half *);
+void __ovld vstore8(half8, size_t, __private half *);
+void __ovld vstore16(half16, size_t, __private half *);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * Read sizeof (half) bytes of data from address
+ * (p + offset). The data read is interpreted as a
+ * half value. The half value is converted to a
+ * float value and the float value is returned.
+ * The read address computed as (p + offset)
+ * must be 16-bit aligned.
+ */
+float __ovld __purefn vload_half(size_t, const __constant half *);
+#if defined(__opencl_c_generic_address_space)
+float __ovld __purefn vload_half(size_t, const half *);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld __purefn vload_half(size_t, const __global half *);
+float __ovld __purefn vload_half(size_t, const __local half *);
+float __ovld __purefn vload_half(size_t, const __private half *);
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * Read sizeof (halfn) bytes of data from address
+ * (p + (offset * n)). The data read is interpreted
+ * as a halfn value. The halfn value read is
+ * converted to a floatn value and the floatn
+ * value is returned. The read address computed
+ * as (p + (offset * n)) must be 16-bit aligned.
+ */
+float2 __ovld __purefn vload_half2(size_t, const __constant half *);
+float3 __ovld __purefn vload_half3(size_t, const __constant half *);
+float4 __ovld __purefn vload_half4(size_t, const __constant half *);
+float8 __ovld __purefn vload_half8(size_t, const __constant half *);
+float16 __ovld __purefn vload_half16(size_t, const __constant half *);
+#if defined(__opencl_c_generic_address_space)
+float2 __ovld __purefn vload_half2(size_t, const half *);
+float3 __ovld __purefn vload_half3(size_t, const half *);
+float4 __ovld __purefn vload_half4(size_t, const half *);
+float8 __ovld __purefn vload_half8(size_t, const half *);
+float16 __ovld __purefn vload_half16(size_t, const half *);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float2 __ovld __purefn vload_half2(size_t, const __global half *);
+float3 __ovld __purefn vload_half3(size_t, const __global half *);
+float4 __ovld __purefn vload_half4(size_t, const __global half *);
+float8 __ovld __purefn vload_half8(size_t, const __global half *);
+float16 __ovld __purefn vload_half16(size_t, const __global half *);
+float2 __ovld __purefn vload_half2(size_t, const __local half *);
+float3 __ovld __purefn vload_half3(size_t, const __local half *);
+float4 __ovld __purefn vload_half4(size_t, const __local half *);
+float8 __ovld __purefn vload_half8(size_t, const __local half *);
+float16 __ovld __purefn vload_half16(size_t, const __local half *);
+float2 __ovld __purefn vload_half2(size_t, const __private half *);
+float3 __ovld __purefn vload_half3(size_t, const __private half *);
+float4 __ovld __purefn vload_half4(size_t, const __private half *);
+float8 __ovld __purefn vload_half8(size_t, const __private half *);
+float16 __ovld __purefn vload_half16(size_t, const __private half *);
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * The float value given by data is first
+ * converted to a half value using the appropriate
+ * rounding mode. The half value is then written
+ * to address computed as (p + offset). The
+ * address computed as (p + offset) must be 16-
+ * bit aligned.
+ * vstore_half use the current rounding mode.
+ * The default current rounding mode is round to
+ * nearest even.
+ */
+#if defined(__opencl_c_generic_address_space)
+void __ovld vstore_half(float, size_t, half *);
+void __ovld vstore_half_rte(float, size_t, half *);
+void __ovld vstore_half_rtz(float, size_t, half *);
+void __ovld vstore_half_rtp(float, size_t, half *);
+void __ovld vstore_half_rtn(float, size_t, half *);
+#ifdef cl_khr_fp64
+void __ovld vstore_half(double, size_t, half *);
+void __ovld vstore_half_rte(double, size_t, half *);
+void __ovld vstore_half_rtz(double, size_t, half *);
+void __ovld vstore_half_rtp(double, size_t, half *);
+void __ovld vstore_half_rtn(double, size_t, half *);
+#endif //cl_khr_fp64
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+void __ovld vstore_half(float, size_t, __global half *);
+void __ovld vstore_half_rte(float, size_t, __global half *);
+void __ovld vstore_half_rtz(float, size_t, __global half *);
+void __ovld vstore_half_rtp(float, size_t, __global half *);
+void __ovld vstore_half_rtn(float, size_t, __global half *);
+void __ovld vstore_half(float, size_t, __local half *);
+void __ovld vstore_half_rte(float, size_t, __local half *);
+void __ovld vstore_half_rtz(float, size_t, __local half *);
+void __ovld vstore_half_rtp(float, size_t, __local half *);
+void __ovld vstore_half_rtn(float, size_t, __local half *);
+void __ovld vstore_half(float, size_t, __private half *);
+void __ovld vstore_half_rte(float, size_t, __private half *);
+void __ovld vstore_half_rtz(float, size_t, __private half *);
+void __ovld vstore_half_rtp(float, size_t, __private half *);
+void __ovld vstore_half_rtn(float, size_t, __private half *);
+#ifdef cl_khr_fp64
+void __ovld vstore_half(double, size_t, __global half *);
+void __ovld vstore_half_rte(double, size_t, __global half *);
+void __ovld vstore_half_rtz(double, size_t, __global half *);
+void __ovld vstore_half_rtp(double, size_t, __global half *);
+void __ovld vstore_half_rtn(double, size_t, __global half *);
+void __ovld vstore_half(double, size_t, __local half *);
+void __ovld vstore_half_rte(double, size_t, __local half *);
+void __ovld vstore_half_rtz(double, size_t, __local half *);
+void __ovld vstore_half_rtp(double, size_t, __local half *);
+void __ovld vstore_half_rtn(double, size_t, __local half *);
+void __ovld vstore_half(double, size_t, __private half *);
+void __ovld vstore_half_rte(double, size_t, __private half *);
+void __ovld vstore_half_rtz(double, size_t, __private half *);
+void __ovld vstore_half_rtp(double, size_t, __private half *);
+void __ovld vstore_half_rtn(double, size_t, __private half *);
+#endif //cl_khr_fp64
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * The floatn value given by data is converted to
+ * a halfn value using the appropriate rounding
+ * mode. The halfn value is then written to
+ * address computed as (p + (offset * n)). The
+ * address computed as (p + (offset * n)) must be
+ * 16-bit aligned.
+ * vstore_halfn uses the current rounding mode.
+ * The default current rounding mode is round to
+ * nearest even.
+ */
+#if defined(__opencl_c_generic_address_space)
+void __ovld vstore_half2(float2, size_t, half *);
+void __ovld vstore_half3(float3, size_t, half *);
+void __ovld vstore_half4(float4, size_t, half *);
+void __ovld vstore_half8(float8, size_t, half *);
+void __ovld vstore_half16(float16, size_t, half *);
+void __ovld vstore_half2_rte(float2, size_t, half *);
+void __ovld vstore_half3_rte(float3, size_t, half *);
+void __ovld vstore_half4_rte(float4, size_t, half *);
+void __ovld vstore_half8_rte(float8, size_t, half *);
+void __ovld vstore_half16_rte(float16, size_t, half *);
+void __ovld vstore_half2_rtz(float2, size_t, half *);
+void __ovld vstore_half3_rtz(float3, size_t, half *);
+void __ovld vstore_half4_rtz(float4, size_t, half *);
+void __ovld vstore_half8_rtz(float8, size_t, half *);
+void __ovld vstore_half16_rtz(float16, size_t, half *);
+void __ovld vstore_half2_rtp(float2, size_t, half *);
+void __ovld vstore_half3_rtp(float3, size_t, half *);
+void __ovld vstore_half4_rtp(float4, size_t, half *);
+void __ovld vstore_half8_rtp(float8, size_t, half *);
+void __ovld vstore_half16_rtp(float16, size_t, half *);
+void __ovld vstore_half2_rtn(float2, size_t, half *);
+void __ovld vstore_half3_rtn(float3, size_t, half *);
+void __ovld vstore_half4_rtn(float4, size_t, half *);
+void __ovld vstore_half8_rtn(float8, size_t, half *);
+void __ovld vstore_half16_rtn(float16, size_t, half *);
+#ifdef cl_khr_fp64
+void __ovld vstore_half2(double2, size_t, half *);
+void __ovld vstore_half3(double3, size_t, half *);
+void __ovld vstore_half4(double4, size_t, half *);
+void __ovld vstore_half8(double8, size_t, half *);
+void __ovld vstore_half16(double16, size_t, half *);
+void __ovld vstore_half2_rte(double2, size_t, half *);
+void __ovld vstore_half3_rte(double3, size_t, half *);
+void __ovld vstore_half4_rte(double4, size_t, half *);
+void __ovld vstore_half8_rte(double8, size_t, half *);
+void __ovld vstore_half16_rte(double16, size_t, half *);
+void __ovld vstore_half2_rtz(double2, size_t, half *);
+void __ovld vstore_half3_rtz(double3, size_t, half *);
+void __ovld vstore_half4_rtz(double4, size_t, half *);
+void __ovld vstore_half8_rtz(double8, size_t, half *);
+void __ovld vstore_half16_rtz(double16, size_t, half *);
+void __ovld vstore_half2_rtp(double2, size_t, half *);
+void __ovld vstore_half3_rtp(double3, size_t, half *);
+void __ovld vstore_half4_rtp(double4, size_t, half *);
+void __ovld vstore_half8_rtp(double8, size_t, half *);
+void __ovld vstore_half16_rtp(double16, size_t, half *);
+void __ovld vstore_half2_rtn(double2, size_t, half *);
+void __ovld vstore_half3_rtn(double3, size_t, half *);
+void __ovld vstore_half4_rtn(double4, size_t, half *);
+void __ovld vstore_half8_rtn(double8, size_t, half *);
+void __ovld vstore_half16_rtn(double16, size_t, half *);
+#endif //cl_khr_fp64
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+void __ovld vstore_half2(float2, size_t, __global half *);
+void __ovld vstore_half3(float3, size_t, __global half *);
+void __ovld vstore_half4(float4, size_t, __global half *);
+void __ovld vstore_half8(float8, size_t, __global half *);
+void __ovld vstore_half16(float16, size_t, __global half *);
+void __ovld vstore_half2_rte(float2, size_t, __global half *);
+void __ovld vstore_half3_rte(float3, size_t, __global half *);
+void __ovld vstore_half4_rte(float4, size_t, __global half *);
+void __ovld vstore_half8_rte(float8, size_t, __global half *);
+void __ovld vstore_half16_rte(float16, size_t, __global half *);
+void __ovld vstore_half2_rtz(float2, size_t, __global half *);
+void __ovld vstore_half3_rtz(float3, size_t, __global half *);
+void __ovld vstore_half4_rtz(float4, size_t, __global half *);
+void __ovld vstore_half8_rtz(float8, size_t, __global half *);
+void __ovld vstore_half16_rtz(float16, size_t, __global half *);
+void __ovld vstore_half2_rtp(float2, size_t, __global half *);
+void __ovld vstore_half3_rtp(float3, size_t, __global half *);
+void __ovld vstore_half4_rtp(float4, size_t, __global half *);
+void __ovld vstore_half8_rtp(float8, size_t, __global half *);
+void __ovld vstore_half16_rtp(float16, size_t, __global half *);
+void __ovld vstore_half2_rtn(float2, size_t, __global half *);
+void __ovld vstore_half3_rtn(float3, size_t, __global half *);
+void __ovld vstore_half4_rtn(float4, size_t, __global half *);
+void __ovld vstore_half8_rtn(float8, size_t, __global half *);
+void __ovld vstore_half16_rtn(float16, size_t, __global half *);
+void __ovld vstore_half2(float2, size_t, __local half *);
+void __ovld vstore_half3(float3, size_t, __local half *);
+void __ovld vstore_half4(float4, size_t, __local half *);
+void __ovld vstore_half8(float8, size_t, __local half *);
+void __ovld vstore_half16(float16, size_t, __local half *);
+void __ovld vstore_half2_rte(float2, size_t, __local half *);
+void __ovld vstore_half3_rte(float3, size_t, __local half *);
+void __ovld vstore_half4_rte(float4, size_t, __local half *);
+void __ovld vstore_half8_rte(float8, size_t, __local half *);
+void __ovld vstore_half16_rte(float16, size_t, __local half *);
+void __ovld vstore_half2_rtz(float2, size_t, __local half *);
+void __ovld vstore_half3_rtz(float3, size_t, __local half *);
+void __ovld vstore_half4_rtz(float4, size_t, __local half *);
+void __ovld vstore_half8_rtz(float8, size_t, __local half *);
+void __ovld vstore_half16_rtz(float16, size_t, __local half *);
+void __ovld vstore_half2_rtp(float2, size_t, __local half *);
+void __ovld vstore_half3_rtp(float3, size_t, __local half *);
+void __ovld vstore_half4_rtp(float4, size_t, __local half *);
+void __ovld vstore_half8_rtp(float8, size_t, __local half *);
+void __ovld vstore_half16_rtp(float16, size_t, __local half *);
+void __ovld vstore_half2_rtn(float2, size_t, __local half *);
+void __ovld vstore_half3_rtn(float3, size_t, __local half *);
+void __ovld vstore_half4_rtn(float4, size_t, __local half *);
+void __ovld vstore_half8_rtn(float8, size_t, __local half *);
+void __ovld vstore_half16_rtn(float16, size_t, __local half *);
+void __ovld vstore_half2(float2, size_t, __private half *);
+void __ovld vstore_half3(float3, size_t, __private half *);
+void __ovld vstore_half4(float4, size_t, __private half *);
+void __ovld vstore_half8(float8, size_t, __private half *);
+void __ovld vstore_half16(float16, size_t, __private half *);
+void __ovld vstore_half2_rte(float2, size_t, __private half *);
+void __ovld vstore_half3_rte(float3, size_t, __private half *);
+void __ovld vstore_half4_rte(float4, size_t, __private half *);
+void __ovld vstore_half8_rte(float8, size_t, __private half *);
+void __ovld vstore_half16_rte(float16, size_t, __private half *);
+void __ovld vstore_half2_rtz(float2, size_t, __private half *);
+void __ovld vstore_half3_rtz(float3, size_t, __private half *);
+void __ovld vstore_half4_rtz(float4, size_t, __private half *);
+void __ovld vstore_half8_rtz(float8, size_t, __private half *);
+void __ovld vstore_half16_rtz(float16, size_t, __private half *);
+void __ovld vstore_half2_rtp(float2, size_t, __private half *);
+void __ovld vstore_half3_rtp(float3, size_t, __private half *);
+void __ovld vstore_half4_rtp(float4, size_t, __private half *);
+void __ovld vstore_half8_rtp(float8, size_t, __private half *);
+void __ovld vstore_half16_rtp(float16, size_t, __private half *);
+void __ovld vstore_half2_rtn(float2, size_t, __private half *);
+void __ovld vstore_half3_rtn(float3, size_t, __private half *);
+void __ovld vstore_half4_rtn(float4, size_t, __private half *);
+void __ovld vstore_half8_rtn(float8, size_t, __private half *);
+void __ovld vstore_half16_rtn(float16, size_t, __private half *);
+#ifdef cl_khr_fp64
+void __ovld vstore_half2(double2, size_t, __global half *);
+void __ovld vstore_half3(double3, size_t, __global half *);
+void __ovld vstore_half4(double4, size_t, __global half *);
+void __ovld vstore_half8(double8, size_t, __global half *);
+void __ovld vstore_half16(double16, size_t, __global half *);
+void __ovld vstore_half2_rte(double2, size_t, __global half *);
+void __ovld vstore_half3_rte(double3, size_t, __global half *);
+void __ovld vstore_half4_rte(double4, size_t, __global half *);
+void __ovld vstore_half8_rte(double8, size_t, __global half *);
+void __ovld vstore_half16_rte(double16, size_t, __global half *);
+void __ovld vstore_half2_rtz(double2, size_t, __global half *);
+void __ovld vstore_half3_rtz(double3, size_t, __global half *);
+void __ovld vstore_half4_rtz(double4, size_t, __global half *);
+void __ovld vstore_half8_rtz(double8, size_t, __global half *);
+void __ovld vstore_half16_rtz(double16, size_t, __global half *);
+void __ovld vstore_half2_rtp(double2, size_t, __global half *);
+void __ovld vstore_half3_rtp(double3, size_t, __global half *);
+void __ovld vstore_half4_rtp(double4, size_t, __global half *);
+void __ovld vstore_half8_rtp(double8, size_t, __global half *);
+void __ovld vstore_half16_rtp(double16, size_t, __global half *);
+void __ovld vstore_half2_rtn(double2, size_t, __global half *);
+void __ovld vstore_half3_rtn(double3, size_t, __global half *);
+void __ovld vstore_half4_rtn(double4, size_t, __global half *);
+void __ovld vstore_half8_rtn(double8, size_t, __global half *);
+void __ovld vstore_half16_rtn(double16, size_t, __global half *);
+void __ovld vstore_half2(double2, size_t, __local half *);
+void __ovld vstore_half3(double3, size_t, __local half *);
+void __ovld vstore_half4(double4, size_t, __local half *);
+void __ovld vstore_half8(double8, size_t, __local half *);
+void __ovld vstore_half16(double16, size_t, __local half *);
+void __ovld vstore_half2_rte(double2, size_t, __local half *);
+void __ovld vstore_half3_rte(double3, size_t, __local half *);
+void __ovld vstore_half4_rte(double4, size_t, __local half *);
+void __ovld vstore_half8_rte(double8, size_t, __local half *);
+void __ovld vstore_half16_rte(double16, size_t, __local half *);
+void __ovld vstore_half2_rtz(double2, size_t, __local half *);
+void __ovld vstore_half3_rtz(double3, size_t, __local half *);
+void __ovld vstore_half4_rtz(double4, size_t, __local half *);
+void __ovld vstore_half8_rtz(double8, size_t, __local half *);
+void __ovld vstore_half16_rtz(double16, size_t, __local half *);
+void __ovld vstore_half2_rtp(double2, size_t, __local half *);
+void __ovld vstore_half3_rtp(double3, size_t, __local half *);
+void __ovld vstore_half4_rtp(double4, size_t, __local half *);
+void __ovld vstore_half8_rtp(double8, size_t, __local half *);
+void __ovld vstore_half16_rtp(double16, size_t, __local half *);
+void __ovld vstore_half2_rtn(double2, size_t, __local half *);
+void __ovld vstore_half3_rtn(double3, size_t, __local half *);
+void __ovld vstore_half4_rtn(double4, size_t, __local half *);
+void __ovld vstore_half8_rtn(double8, size_t, __local half *);
+void __ovld vstore_half16_rtn(double16, size_t, __local half *);
+void __ovld vstore_half2(double2, size_t, __private half *);
+void __ovld vstore_half3(double3, size_t, __private half *);
+void __ovld vstore_half4(double4, size_t, __private half *);
+void __ovld vstore_half8(double8, size_t, __private half *);
+void __ovld vstore_half16(double16, size_t, __private half *);
+void __ovld vstore_half2_rte(double2, size_t, __private half *);
+void __ovld vstore_half3_rte(double3, size_t, __private half *);
+void __ovld vstore_half4_rte(double4, size_t, __private half *);
+void __ovld vstore_half8_rte(double8, size_t, __private half *);
+void __ovld vstore_half16_rte(double16, size_t, __private half *);
+void __ovld vstore_half2_rtz(double2, size_t, __private half *);
+void __ovld vstore_half3_rtz(double3, size_t, __private half *);
+void __ovld vstore_half4_rtz(double4, size_t, __private half *);
+void __ovld vstore_half8_rtz(double8, size_t, __private half *);
+void __ovld vstore_half16_rtz(double16, size_t, __private half *);
+void __ovld vstore_half2_rtp(double2, size_t, __private half *);
+void __ovld vstore_half3_rtp(double3, size_t, __private half *);
+void __ovld vstore_half4_rtp(double4, size_t, __private half *);
+void __ovld vstore_half8_rtp(double8, size_t, __private half *);
+void __ovld vstore_half16_rtp(double16, size_t, __private half *);
+void __ovld vstore_half2_rtn(double2, size_t, __private half *);
+void __ovld vstore_half3_rtn(double3, size_t, __private half *);
+void __ovld vstore_half4_rtn(double4, size_t, __private half *);
+void __ovld vstore_half8_rtn(double8, size_t, __private half *);
+void __ovld vstore_half16_rtn(double16, size_t, __private half *);
+#endif //cl_khr_fp64
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * For n = 1, 2, 4, 8 and 16 read sizeof (halfn)
+ * bytes of data from address (p + (offset * n)).
+ * The data read is interpreted as a halfn value.
+ * The halfn value read is converted to a floatn
+ * value and the floatn value is returned.
+ * The address computed as (p + (offset * n))
+ * must be aligned to sizeof (halfn) bytes.
+ * For n = 3, vloada_half3 reads a half3 from
+ * address (p + (offset * 4)) and returns a float3.
+ * The address computed as (p + (offset * 4))
+ * must be aligned to sizeof (half) * 4 bytes.
+ */
+float2 __ovld __purefn vloada_half2(size_t, const __constant half *);
+float3 __ovld __purefn vloada_half3(size_t, const __constant half *);
+float4 __ovld __purefn vloada_half4(size_t, const __constant half *);
+float8 __ovld __purefn vloada_half8(size_t, const __constant half *);
+float16 __ovld __purefn vloada_half16(size_t, const __constant half *);
+#if defined(__opencl_c_generic_address_space)
+float2 __ovld __purefn vloada_half2(size_t, const half *);
+float3 __ovld __purefn vloada_half3(size_t, const half *);
+float4 __ovld __purefn vloada_half4(size_t, const half *);
+float8 __ovld __purefn vloada_half8(size_t, const half *);
+float16 __ovld __purefn vloada_half16(size_t, const half *);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float2 __ovld __purefn vloada_half2(size_t, const __global half *);
+float3 __ovld __purefn vloada_half3(size_t, const __global half *);
+float4 __ovld __purefn vloada_half4(size_t, const __global half *);
+float8 __ovld __purefn vloada_half8(size_t, const __global half *);
+float16 __ovld __purefn vloada_half16(size_t, const __global half *);
+float2 __ovld __purefn vloada_half2(size_t, const __local half *);
+float3 __ovld __purefn vloada_half3(size_t, const __local half *);
+float4 __ovld __purefn vloada_half4(size_t, const __local half *);
+float8 __ovld __purefn vloada_half8(size_t, const __local half *);
+float16 __ovld __purefn vloada_half16(size_t, const __local half *);
+float2 __ovld __purefn vloada_half2(size_t, const __private half *);
+float3 __ovld __purefn vloada_half3(size_t, const __private half *);
+float4 __ovld __purefn vloada_half4(size_t, const __private half *);
+float8 __ovld __purefn vloada_half8(size_t, const __private half *);
+float16 __ovld __purefn vloada_half16(size_t, const __private half *);
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+/**
+ * The floatn value given by data is converted to
+ * a halfn value using the appropriate rounding
+ * mode.
+ * For n = 1, 2, 4, 8 and 16, the halfn value is
+ * written to the address computed as (p + (offset
+ * * n)). The address computed as (p + (offset *
+ * n)) must be aligned to sizeof (halfn) bytes.
+ * For n = 3, the half3 value is written to the
+ * address computed as (p + (offset * 4)). The
+ * address computed as (p + (offset * 4)) must be
+ * aligned to sizeof (half) * 4 bytes.
+ * vstorea_halfn uses the current rounding
+ * mode. The default current rounding mode is
+ * round to nearest even.
+ */
+#if defined(__opencl_c_generic_address_space)
+void __ovld vstorea_half2(float2, size_t, half *);
+void __ovld vstorea_half3(float3, size_t, half *);
+void __ovld vstorea_half4(float4, size_t, half *);
+void __ovld vstorea_half8(float8, size_t, half *);
+void __ovld vstorea_half16(float16, size_t, half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, half *);
+void __ovld vstorea_half3_rte(float3, size_t, half *);
+void __ovld vstorea_half4_rte(float4, size_t, half *);
+void __ovld vstorea_half8_rte(float8, size_t, half *);
+void __ovld vstorea_half16_rte(float16, size_t, half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, half *);
+void __ovld vstorea_half3_rtz(float3, size_t, half *);
+void __ovld vstorea_half4_rtz(float4, size_t, half *);
+void __ovld vstorea_half8_rtz(float8, size_t, half *);
+void __ovld vstorea_half16_rtz(float16, size_t, half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, half *);
+void __ovld vstorea_half3_rtp(float3, size_t, half *);
+void __ovld vstorea_half4_rtp(float4, size_t, half *);
+void __ovld vstorea_half8_rtp(float8, size_t, half *);
+void __ovld vstorea_half16_rtp(float16, size_t, half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, half *);
+void __ovld vstorea_half3_rtn(float3, size_t, half *);
+void __ovld vstorea_half4_rtn(float4, size_t, half *);
+void __ovld vstorea_half8_rtn(float8, size_t, half *);
+void __ovld vstorea_half16_rtn(float16, size_t, half *);
+
+#ifdef cl_khr_fp64
+void __ovld vstorea_half2(double2, size_t, half *);
+void __ovld vstorea_half3(double3, size_t, half *);
+void __ovld vstorea_half4(double4, size_t, half *);
+void __ovld vstorea_half8(double8, size_t, half *);
+void __ovld vstorea_half16(double16, size_t, half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, half *);
+void __ovld vstorea_half3_rte(double3, size_t, half *);
+void __ovld vstorea_half4_rte(double4, size_t, half *);
+void __ovld vstorea_half8_rte(double8, size_t, half *);
+void __ovld vstorea_half16_rte(double16, size_t, half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, half *);
+void __ovld vstorea_half3_rtz(double3, size_t, half *);
+void __ovld vstorea_half4_rtz(double4, size_t, half *);
+void __ovld vstorea_half8_rtz(double8, size_t, half *);
+void __ovld vstorea_half16_rtz(double16, size_t, half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, half *);
+void __ovld vstorea_half3_rtp(double3, size_t, half *);
+void __ovld vstorea_half4_rtp(double4, size_t, half *);
+void __ovld vstorea_half8_rtp(double8, size_t, half *);
+void __ovld vstorea_half16_rtp(double16, size_t, half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, half *);
+void __ovld vstorea_half3_rtn(double3, size_t, half *);
+void __ovld vstorea_half4_rtn(double4, size_t, half *);
+void __ovld vstorea_half8_rtn(double8, size_t, half *);
+void __ovld vstorea_half16_rtn(double16, size_t, half *);
+#endif //cl_khr_fp64
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+void __ovld vstorea_half2(float2, size_t, __global half *);
+void __ovld vstorea_half3(float3, size_t, __global half *);
+void __ovld vstorea_half4(float4, size_t, __global half *);
+void __ovld vstorea_half8(float8, size_t, __global half *);
+void __ovld vstorea_half16(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, __global half *);
+void __ovld vstorea_half3_rte(float3, size_t, __global half *);
+void __ovld vstorea_half4_rte(float4, size_t, __global half *);
+void __ovld vstorea_half8_rte(float8, size_t, __global half *);
+void __ovld vstorea_half16_rte(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, __global half *);
+void __ovld vstorea_half3_rtz(float3, size_t, __global half *);
+void __ovld vstorea_half4_rtz(float4, size_t, __global half *);
+void __ovld vstorea_half8_rtz(float8, size_t, __global half *);
+void __ovld vstorea_half16_rtz(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, __global half *);
+void __ovld vstorea_half3_rtp(float3, size_t, __global half *);
+void __ovld vstorea_half4_rtp(float4, size_t, __global half *);
+void __ovld vstorea_half8_rtp(float8, size_t, __global half *);
+void __ovld vstorea_half16_rtp(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, __global half *);
+void __ovld vstorea_half3_rtn(float3, size_t, __global half *);
+void __ovld vstorea_half4_rtn(float4, size_t, __global half *);
+void __ovld vstorea_half8_rtn(float8, size_t, __global half *);
+void __ovld vstorea_half16_rtn(float16, size_t, __global half *);
+
+void __ovld vstorea_half2(float2, size_t, __local half *);
+void __ovld vstorea_half3(float3, size_t, __local half *);
+void __ovld vstorea_half4(float4, size_t, __local half *);
+void __ovld vstorea_half8(float8, size_t, __local half *);
+void __ovld vstorea_half16(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, __local half *);
+void __ovld vstorea_half3_rte(float3, size_t, __local half *);
+void __ovld vstorea_half4_rte(float4, size_t, __local half *);
+void __ovld vstorea_half8_rte(float8, size_t, __local half *);
+void __ovld vstorea_half16_rte(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, __local half *);
+void __ovld vstorea_half3_rtz(float3, size_t, __local half *);
+void __ovld vstorea_half4_rtz(float4, size_t, __local half *);
+void __ovld vstorea_half8_rtz(float8, size_t, __local half *);
+void __ovld vstorea_half16_rtz(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, __local half *);
+void __ovld vstorea_half3_rtp(float3, size_t, __local half *);
+void __ovld vstorea_half4_rtp(float4, size_t, __local half *);
+void __ovld vstorea_half8_rtp(float8, size_t, __local half *);
+void __ovld vstorea_half16_rtp(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, __local half *);
+void __ovld vstorea_half3_rtn(float3, size_t, __local half *);
+void __ovld vstorea_half4_rtn(float4, size_t, __local half *);
+void __ovld vstorea_half8_rtn(float8, size_t, __local half *);
+void __ovld vstorea_half16_rtn(float16, size_t, __local half *);
+
+void __ovld vstorea_half2(float2, size_t, __private half *);
+void __ovld vstorea_half3(float3, size_t, __private half *);
+void __ovld vstorea_half4(float4, size_t, __private half *);
+void __ovld vstorea_half8(float8, size_t, __private half *);
+void __ovld vstorea_half16(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, __private half *);
+void __ovld vstorea_half3_rte(float3, size_t, __private half *);
+void __ovld vstorea_half4_rte(float4, size_t, __private half *);
+void __ovld vstorea_half8_rte(float8, size_t, __private half *);
+void __ovld vstorea_half16_rte(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, __private half *);
+void __ovld vstorea_half3_rtz(float3, size_t, __private half *);
+void __ovld vstorea_half4_rtz(float4, size_t, __private half *);
+void __ovld vstorea_half8_rtz(float8, size_t, __private half *);
+void __ovld vstorea_half16_rtz(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, __private half *);
+void __ovld vstorea_half3_rtp(float3, size_t, __private half *);
+void __ovld vstorea_half4_rtp(float4, size_t, __private half *);
+void __ovld vstorea_half8_rtp(float8, size_t, __private half *);
+void __ovld vstorea_half16_rtp(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, __private half *);
+void __ovld vstorea_half3_rtn(float3, size_t, __private half *);
+void __ovld vstorea_half4_rtn(float4, size_t, __private half *);
+void __ovld vstorea_half8_rtn(float8, size_t, __private half *);
+void __ovld vstorea_half16_rtn(float16, size_t, __private half *);
+
+#ifdef cl_khr_fp64
+void __ovld vstorea_half2(double2, size_t, __global half *);
+void __ovld vstorea_half3(double3, size_t, __global half *);
+void __ovld vstorea_half4(double4, size_t, __global half *);
+void __ovld vstorea_half8(double8, size_t, __global half *);
+void __ovld vstorea_half16(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, __global half *);
+void __ovld vstorea_half3_rte(double3, size_t, __global half *);
+void __ovld vstorea_half4_rte(double4, size_t, __global half *);
+void __ovld vstorea_half8_rte(double8, size_t, __global half *);
+void __ovld vstorea_half16_rte(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, __global half *);
+void __ovld vstorea_half3_rtz(double3, size_t, __global half *);
+void __ovld vstorea_half4_rtz(double4, size_t, __global half *);
+void __ovld vstorea_half8_rtz(double8, size_t, __global half *);
+void __ovld vstorea_half16_rtz(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, __global half *);
+void __ovld vstorea_half3_rtp(double3, size_t, __global half *);
+void __ovld vstorea_half4_rtp(double4, size_t, __global half *);
+void __ovld vstorea_half8_rtp(double8, size_t, __global half *);
+void __ovld vstorea_half16_rtp(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, __global half *);
+void __ovld vstorea_half3_rtn(double3, size_t, __global half *);
+void __ovld vstorea_half4_rtn(double4, size_t, __global half *);
+void __ovld vstorea_half8_rtn(double8, size_t, __global half *);
+void __ovld vstorea_half16_rtn(double16, size_t, __global half *);
+
+void __ovld vstorea_half2(double2, size_t, __local half *);
+void __ovld vstorea_half3(double3, size_t, __local half *);
+void __ovld vstorea_half4(double4, size_t, __local half *);
+void __ovld vstorea_half8(double8, size_t, __local half *);
+void __ovld vstorea_half16(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, __local half *);
+void __ovld vstorea_half3_rte(double3, size_t, __local half *);
+void __ovld vstorea_half4_rte(double4, size_t, __local half *);
+void __ovld vstorea_half8_rte(double8, size_t, __local half *);
+void __ovld vstorea_half16_rte(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, __local half *);
+void __ovld vstorea_half3_rtz(double3, size_t, __local half *);
+void __ovld vstorea_half4_rtz(double4, size_t, __local half *);
+void __ovld vstorea_half8_rtz(double8, size_t, __local half *);
+void __ovld vstorea_half16_rtz(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, __local half *);
+void __ovld vstorea_half3_rtp(double3, size_t, __local half *);
+void __ovld vstorea_half4_rtp(double4, size_t, __local half *);
+void __ovld vstorea_half8_rtp(double8, size_t, __local half *);
+void __ovld vstorea_half16_rtp(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, __local half *);
+void __ovld vstorea_half3_rtn(double3, size_t, __local half *);
+void __ovld vstorea_half4_rtn(double4, size_t, __local half *);
+void __ovld vstorea_half8_rtn(double8, size_t, __local half *);
+void __ovld vstorea_half16_rtn(double16, size_t, __local half *);
+
+void __ovld vstorea_half2(double2, size_t, __private half *);
+void __ovld vstorea_half3(double3, size_t, __private half *);
+void __ovld vstorea_half4(double4, size_t, __private half *);
+void __ovld vstorea_half8(double8, size_t, __private half *);
+void __ovld vstorea_half16(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, __private half *);
+void __ovld vstorea_half3_rte(double3, size_t, __private half *);
+void __ovld vstorea_half4_rte(double4, size_t, __private half *);
+void __ovld vstorea_half8_rte(double8, size_t, __private half *);
+void __ovld vstorea_half16_rte(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, __private half *);
+void __ovld vstorea_half3_rtz(double3, size_t, __private half *);
+void __ovld vstorea_half4_rtz(double4, size_t, __private half *);
+void __ovld vstorea_half8_rtz(double8, size_t, __private half *);
+void __ovld vstorea_half16_rtz(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, __private half *);
+void __ovld vstorea_half3_rtp(double3, size_t, __private half *);
+void __ovld vstorea_half4_rtp(double4, size_t, __private half *);
+void __ovld vstorea_half8_rtp(double8, size_t, __private half *);
+void __ovld vstorea_half16_rtp(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, __private half *);
+void __ovld vstorea_half3_rtn(double3, size_t, __private half *);
+void __ovld vstorea_half4_rtn(double4, size_t, __private half *);
+void __ovld vstorea_half8_rtn(double8, size_t, __private half *);
+void __ovld vstorea_half16_rtn(double16, size_t, __private half *);
+#endif //cl_khr_fp64
+#endif //defined(__opencl_c_named_address_space_builtins)
+
+// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
+
+/**
+ * All work-items in a work-group executing the kernel
+ * on a processor must execute this function before any
+ * are allowed to continue execution beyond the barrier.
+ * This function must be encountered by all work-items in
+ * a work-group executing the kernel.
+ * If barrier is inside a conditional statement, then all
+ * work-items must enter the conditional if any work-item
+ * enters the conditional statement and executes the
+ * barrier.
+ * If barrer is inside a loop, all work-items must execute
+ * the barrier for each iteration of the loop before any are
+ * allowed to continue execution beyond the barrier.
+ * The barrier function also queues a memory fence
+ * (reads and writes) to ensure correct ordering of
+ * memory operations to local or global memory.
+ * The flags argument specifies the memory address space
+ * and can be set to a combination of the following literal
+ * values.
+ * CLK_LOCAL_MEM_FENCE - The barrier function
+ * will either flush any variables stored in local memory
+ * or queue a memory fence to ensure correct ordering of
+ * memory operations to local memory.
+ * CLK_GLOBAL_MEM_FENCE - The barrier function
+ * will queue a memory fence to ensure correct ordering
+ * of memory operations to global memory. This can be
+ * useful when work-items, for example, write to buffer or
+ * image objects and then want to read the updated data.
+ */
+
+void __ovld __conv barrier(cl_mem_fence_flags);
+
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+void __ovld __conv work_group_barrier(cl_mem_fence_flags, memory_scope);
+void __ovld __conv work_group_barrier(cl_mem_fence_flags);
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+// OpenCL v1.1 s6.11.9, v1.2 s6.12.9 - Explicit Memory Fence Functions
+
+/**
+ * Orders loads and stores of a work-item
+ * executing a kernel. This means that loads
+ * and stores preceding the mem_fence will
+ * be committed to memory before any loads
+ * and stores following the mem_fence.
+ * The flags argument specifies the memory
+ * address space and can be set to a
+ * combination of the following literal
+ * values:
+ * CLK_LOCAL_MEM_FENCE
+ * CLK_GLOBAL_MEM_FENCE.
+ */
+void __ovld mem_fence(cl_mem_fence_flags);
+
+/**
+ * Read memory barrier that orders only
+ * loads.
+ * The flags argument specifies the memory
+ * address space and can be set to a
+ * combination of the following literal
+ * values:
+ * CLK_LOCAL_MEM_FENCE
+ * CLK_GLOBAL_MEM_FENCE.
+ */
+void __ovld read_mem_fence(cl_mem_fence_flags);
+
+/**
+ * Write memory barrier that orders only
+ * stores.
+ * The flags argument specifies the memory
+ * address space and can be set to a
+ * combination of the following literal
+ * values:
+ * CLK_LOCAL_MEM_FENCE
+ * CLK_GLOBAL_MEM_FENCE.
+ */
+void __ovld write_mem_fence(cl_mem_fence_flags);
+
+// OpenCL v2.0 s6.13.9 - Address Space Qualifier Functions
+
+#if defined(__opencl_c_generic_address_space)
+cl_mem_fence_flags __ovld get_fence(const void *ptr);
+cl_mem_fence_flags __ovld get_fence(void *ptr);
+
+/**
+ * Builtin functions to_global, to_local, and to_private need to be declared as Clang builtin functions
+ * and checked in Sema since they should be declared as
+ *   addr gentype* to_addr (gentype*);
+ * where gentype is builtin type or user defined type.
+ */
+
+#endif //defined(__opencl_c_generic_address_space)
+
+// OpenCL v1.1 s6.11.10, v1.2 s6.12.10, v2.0 s6.13.10 - Async Copies from Global to Local Memory, Local to Global Memory, and Prefetch
+
+/**
+ * event_t async_work_group_copy (
+ * __global gentype *dst,
+ * const __local gentype *src,
+ * size_t num_elements,
+ * event_t event)
+ * Perform an async copy of num_elements
+ * gentype elements from src to dst. The async
+ * copy is performed by all work-items in a workgroup
+ * and this built-in function must therefore
+ * be encountered by all work-items in a workgroup
+ * executing the kernel with the same
+ * argument values; otherwise the results are
+ * undefined.
+ * Returns an event object that can be used by
+ * wait_group_events to wait for the async copy
+ * to finish. The event argument can also be used
+ * to associate the async_work_group_copy with
+ * a previous async copy allowing an event to be
+ * shared by multiple async copies; otherwise event
+ * should be zero.
+ * If event argument is non-zero, the event object
+ * supplied in event argument will be returned.
+ * This function does not perform any implicit
+ * synchronization of source data such as using a
+ * barrier before performing the copy.
+ */
+event_t __ovld async_work_group_copy(__local char *, const __global char *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar *, const __global uchar *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short *, const __global short *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort *, const __global ushort *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int *, const __global int *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint *, const __global uint *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long *, const __global long *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong *, const __global ulong *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float *, const __global float *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char2 *, const __global char2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar2 *, const __global uchar2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short2 *, const __global short2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort2 *, const __global ushort2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int2 *, const __global int2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint2 *, const __global uint2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long2 *, const __global long2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong2 *, const __global ulong2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float2 *, const __global float2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char3 *, const __global char3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar3 *, const __global uchar3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short3 *, const __global short3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort3 *, const __global ushort3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int3 *, const __global int3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint3 *, const __global uint3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long3 *, const __global long3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong3 *, const __global ulong3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float3 *, const __global float3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char4 *, const __global char4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar4 *, const __global uchar4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short4 *, const __global short4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort4 *, const __global ushort4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int4 *, const __global int4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint4 *, const __global uint4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long4 *, const __global long4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong4 *, const __global ulong4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float4 *, const __global float4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char8 *, const __global char8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar8 *, const __global uchar8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short8 *, const __global short8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort8 *, const __global ushort8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int8 *, const __global int8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint8 *, const __global uint8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long8 *, const __global long8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong8 *, const __global ulong8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float8 *, const __global float8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char16 *, const __global char16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar16 *, const __global uchar16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short16 *, const __global short16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort16 *, const __global ushort16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int16 *, const __global int16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint16 *, const __global uint16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long16 *, const __global long16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong16 *, const __global ulong16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float16 *, const __global float16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char *, const __local char *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar *, const __local uchar *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short *, const __local short *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort *, const __local ushort *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int *, const __local int *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint *, const __local uint *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long *, const __local long *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong *, const __local ulong *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float *, const __local float *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char2 *, const __local char2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar2 *, const __local uchar2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short2 *, const __local short2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort2 *, const __local ushort2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int2 *, const __local int2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint2 *, const __local uint2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long2 *, const __local long2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong2 *, const __local ulong2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float2 *, const __local float2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char3 *, const __local char3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar3 *, const __local uchar3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short3 *, const __local short3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort3 *, const __local ushort3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int3 *, const __local int3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint3 *, const __local uint3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long3 *, const __local long3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong3 *, const __local ulong3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float3 *, const __local float3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char4 *, const __local char4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar4 *, const __local uchar4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short4 *, const __local short4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort4 *, const __local ushort4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int4 *, const __local int4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint4 *, const __local uint4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long4 *, const __local long4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong4 *, const __local ulong4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float4 *, const __local float4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char8 *, const __local char8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar8 *, const __local uchar8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short8 *, const __local short8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort8 *, const __local ushort8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int8 *, const __local int8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint8 *, const __local uint8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long8 *, const __local long8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong8 *, const __local ulong8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float8 *, const __local float8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char16 *, const __local char16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar16 *, const __local uchar16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short16 *, const __local short16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort16 *, const __local ushort16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int16 *, const __local int16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint16 *, const __local uint16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long16 *, const __local long16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong16 *, const __local ulong16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float16 *, const __local float16 *, size_t, event_t);
+#ifdef cl_khr_fp64
+event_t __ovld async_work_group_copy(__local double *, const __global double *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double2 *, const __global double2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double3 *, const __global double3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double4 *, const __global double4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double8 *, const __global double8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double16 *, const __global double16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double *, const __local double *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double2 *, const __local double2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double3 *, const __local double3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double4 *, const __local double4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double8 *, const __local double8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double16 *, const __local double16 *, size_t, event_t);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+event_t __ovld async_work_group_copy(__local half *, const __global half *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half2 *, const __global half2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half3 *, const __global half3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half4 *, const __global half4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half8 *, const __global half8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half16 *, const __global half16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half *, const __local half *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half2 *, const __local half2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half3 *, const __local half3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half4 *, const __local half4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half8 *, const __local half8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half16 *, const __local half16 *, size_t, event_t);
+#endif //cl_khr_fp16
+
+/**
+ * Perform an async gather of num_elements
+ * gentype elements from src to dst. The
+ * src_stride is the stride in elements for each
+ * gentype element read from src. The dst_stride
+ * is the stride in elements for each gentype
+ * element written to dst. The async gather is
+ * performed by all work-items in a work-group.
+ * This built-in function must therefore be
+ * encountered by all work-items in a work-group
+ * executing the kernel with the same argument
+ * values; otherwise the results are undefined.
+ * Returns an event object that can be used by
+ * wait_group_events to wait for the async copy
+ * to finish. The event argument can also be used
+ * to associate the
+ * async_work_group_strided_copy with a
+ * previous async copy allowing an event to be
+ * shared by multiple async copies; otherwise event
+ * should be zero.
+ * If event argument is non-zero, the event object
+ * supplied in event argument will be returned.
+ * This function does not perform any implicit
+ * synchronization of source data such as using a
+ * barrier before performing the copy.
+ */
+event_t __ovld async_work_group_strided_copy(__local char *, const __global char *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar *, const __global uchar *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short *, const __global short *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort *, const __global ushort *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int *, const __global int *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint *, const __global uint *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long *, const __global long *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong *, const __global ulong *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float *, const __global float *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char2 *, const __global char2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar2 *, const __global uchar2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short2 *, const __global short2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort2 *, const __global ushort2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int2 *, const __global int2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint2 *, const __global uint2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long2 *, const __global long2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong2 *, const __global ulong2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float2 *, const __global float2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char3 *, const __global char3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar3 *, const __global uchar3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short3 *, const __global short3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort3 *, const __global ushort3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int3 *, const __global int3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint3 *, const __global uint3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long3 *, const __global long3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong3 *, const __global ulong3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float3 *, const __global float3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char4 *, const __global char4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar4 *, const __global uchar4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short4 *, const __global short4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort4 *, const __global ushort4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int4 *, const __global int4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint4 *, const __global uint4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long4 *, const __global long4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong4 *, const __global ulong4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float4 *, const __global float4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char8 *, const __global char8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar8 *, const __global uchar8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short8 *, const __global short8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort8 *, const __global ushort8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int8 *, const __global int8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint8 *, const __global uint8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long8 *, const __global long8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong8 *, const __global ulong8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float8 *, const __global float8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char16 *, const __global char16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar16 *, const __global uchar16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short16 *, const __global short16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort16 *, const __global ushort16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int16 *, const __global int16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint16 *, const __global uint16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long16 *, const __global long16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong16 *, const __global ulong16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float16 *, const __global float16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char *, const __local char *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar *, const __local uchar *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short *, const __local short *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort *, const __local ushort *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int *, const __local int *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint *, const __local uint *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long *, const __local long *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong *, const __local ulong *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float *, const __local float *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char2 *, const __local char2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar2 *, const __local uchar2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short2 *, const __local short2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort2 *, const __local ushort2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int2 *, const __local int2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint2 *, const __local uint2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long2 *, const __local long2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong2 *, const __local ulong2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float2 *, const __local float2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char3 *, const __local char3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar3 *, const __local uchar3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short3 *, const __local short3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort3 *, const __local ushort3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int3 *, const __local int3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint3 *, const __local uint3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long3 *, const __local long3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong3 *, const __local ulong3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float3 *, const __local float3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char4 *, const __local char4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar4 *, const __local uchar4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short4 *, const __local short4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort4 *, const __local ushort4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int4 *, const __local int4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint4 *, const __local uint4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long4 *, const __local long4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong4 *, const __local ulong4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float4 *, const __local float4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char8 *, const __local char8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar8 *, const __local uchar8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short8 *, const __local short8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort8 *, const __local ushort8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int8 *, const __local int8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint8 *, const __local uint8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long8 *, const __local long8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong8 *, const __local ulong8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float8 *, const __local float8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char16 *, const __local char16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar16 *, const __local uchar16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short16 *, const __local short16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort16 *, const __local ushort16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int16 *, const __local int16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint16 *, const __local uint16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long16 *, const __local long16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong16 *, const __local ulong16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float16 *, const __local float16 *, size_t, size_t, event_t);
+#ifdef cl_khr_fp64
+event_t __ovld async_work_group_strided_copy(__local double *, const __global double *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double2 *, const __global double2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double3 *, const __global double3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double4 *, const __global double4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double8 *, const __global double8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double16 *, const __global double16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double *, const __local double *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double2 *, const __local double2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double3 *, const __local double3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double4 *, const __local double4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double8 *, const __local double8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double16 *, const __local double16 *, size_t, size_t, event_t);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+event_t __ovld async_work_group_strided_copy(__local half *, const __global half *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half2 *, const __global half2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half3 *, const __global half3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half4 *, const __global half4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half8 *, const __global half8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half16 *, const __global half16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half *, const __local half *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half2 *, const __local half2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half3 *, const __local half3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half4 *, const __local half4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half8 *, const __local half8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half16 *, const __local half16 *, size_t, size_t, event_t);
+#endif //cl_khr_fp16
+
+/**
+ * Wait for events that identify the
+ * async_work_group_copy operations to
+ * complete. The event objects specified in
+ * event_list will be released after the wait is
+ * performed.
+ * This function must be encountered by all workitems
+ * in a work-group executing the kernel with
+ * the same num_events and event objects specified
+ * in event_list; otherwise the results are undefined.
+ */
+void __ovld wait_group_events(int, event_t *);
+
+/**
+ * Prefetch num_elements * sizeof(gentype)
+ * bytes into the global cache. The prefetch
+ * instruction is applied to a work-item in a workgroup
+ * and does not affect the functional
+ * behavior of the kernel.
+ */
+void __ovld prefetch(const __global char *, size_t);
+void __ovld prefetch(const __global uchar *, size_t);
+void __ovld prefetch(const __global short *, size_t);
+void __ovld prefetch(const __global ushort *, size_t);
+void __ovld prefetch(const __global int *, size_t);
+void __ovld prefetch(const __global uint *, size_t);
+void __ovld prefetch(const __global long *, size_t);
+void __ovld prefetch(const __global ulong *, size_t);
+void __ovld prefetch(const __global float *, size_t);
+void __ovld prefetch(const __global char2 *, size_t);
+void __ovld prefetch(const __global uchar2 *, size_t);
+void __ovld prefetch(const __global short2 *, size_t);
+void __ovld prefetch(const __global ushort2 *, size_t);
+void __ovld prefetch(const __global int2 *, size_t);
+void __ovld prefetch(const __global uint2 *, size_t);
+void __ovld prefetch(const __global long2 *, size_t);
+void __ovld prefetch(const __global ulong2 *, size_t);
+void __ovld prefetch(const __global float2 *, size_t);
+void __ovld prefetch(const __global char3 *, size_t);
+void __ovld prefetch(const __global uchar3 *, size_t);
+void __ovld prefetch(const __global short3 *, size_t);
+void __ovld prefetch(const __global ushort3 *, size_t);
+void __ovld prefetch(const __global int3 *, size_t);
+void __ovld prefetch(const __global uint3 *, size_t);
+void __ovld prefetch(const __global long3 *, size_t);
+void __ovld prefetch(const __global ulong3 *, size_t);
+void __ovld prefetch(const __global float3 *, size_t);
+void __ovld prefetch(const __global char4 *, size_t);
+void __ovld prefetch(const __global uchar4 *, size_t);
+void __ovld prefetch(const __global short4 *, size_t);
+void __ovld prefetch(const __global ushort4 *, size_t);
+void __ovld prefetch(const __global int4 *, size_t);
+void __ovld prefetch(const __global uint4 *, size_t);
+void __ovld prefetch(const __global long4 *, size_t);
+void __ovld prefetch(const __global ulong4 *, size_t);
+void __ovld prefetch(const __global float4 *, size_t);
+void __ovld prefetch(const __global char8 *, size_t);
+void __ovld prefetch(const __global uchar8 *, size_t);
+void __ovld prefetch(const __global short8 *, size_t);
+void __ovld prefetch(const __global ushort8 *, size_t);
+void __ovld prefetch(const __global int8 *, size_t);
+void __ovld prefetch(const __global uint8 *, size_t);
+void __ovld prefetch(const __global long8 *, size_t);
+void __ovld prefetch(const __global ulong8 *, size_t);
+void __ovld prefetch(const __global float8 *, size_t);
+void __ovld prefetch(const __global char16 *, size_t);
+void __ovld prefetch(const __global uchar16 *, size_t);
+void __ovld prefetch(const __global short16 *, size_t);
+void __ovld prefetch(const __global ushort16 *, size_t);
+void __ovld prefetch(const __global int16 *, size_t);
+void __ovld prefetch(const __global uint16 *, size_t);
+void __ovld prefetch(const __global long16 *, size_t);
+void __ovld prefetch(const __global ulong16 *, size_t);
+void __ovld prefetch(const __global float16 *, size_t);
+#ifdef cl_khr_fp64
+void __ovld prefetch(const __global double *, size_t);
+void __ovld prefetch(const __global double2 *, size_t);
+void __ovld prefetch(const __global double3 *, size_t);
+void __ovld prefetch(const __global double4 *, size_t);
+void __ovld prefetch(const __global double8 *, size_t);
+void __ovld prefetch(const __global double16 *, size_t);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+void __ovld prefetch(const __global half *, size_t);
+void __ovld prefetch(const __global half2 *, size_t);
+void __ovld prefetch(const __global half3 *, size_t);
+void __ovld prefetch(const __global half4 *, size_t);
+void __ovld prefetch(const __global half8 *, size_t);
+void __ovld prefetch(const __global half16 *, size_t);
+#endif // cl_khr_fp16
+
+// OpenCL v1.1 s6.11.1, v1.2 s6.12.11 - Atomic Functions
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
+#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
+#endif
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old + val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_add(volatile __global int *, int);
+uint __ovld atomic_add(volatile __global uint *, uint);
+int __ovld atomic_add(volatile __local int *, int);
+uint __ovld atomic_add(volatile __local uint *, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_add(volatile int *, int);
+uint __ovld atomic_add(volatile uint *, uint);
+#endif
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_add(volatile __global int *, int);
+uint __ovld atom_add(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_add(volatile __local int *, int);
+uint __ovld atom_add(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_add(volatile __global long *, long);
+ulong __ovld atom_add(volatile __global ulong *, ulong);
+long __ovld atom_add(volatile __local long *, long);
+ulong __ovld atom_add(volatile __local ulong *, ulong);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old) stored at location pointed by p.
+ * Compute (old - val) and store result at location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_sub(volatile __global int *, int);
+uint __ovld atomic_sub(volatile __global uint *, uint);
+int __ovld atomic_sub(volatile __local int *, int);
+uint __ovld atomic_sub(volatile __local uint *, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_sub(volatile int *, int);
+uint __ovld atomic_sub(volatile uint *, uint);
+#endif
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_sub(volatile __global int *, int);
+uint __ovld atom_sub(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_sub(volatile __local int *, int);
+uint __ovld atom_sub(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_sub(volatile __global long *, long);
+ulong __ovld atom_sub(volatile __global ulong *, ulong);
+long __ovld atom_sub(volatile __local long *, long);
+ulong __ovld atom_sub(volatile __local ulong *, ulong);
+#endif
+
+/**
+ * Swaps the old value stored at location p
+ * with new value given by val. Returns old
+ * value.
+ */
+int __ovld atomic_xchg(volatile __global int *, int);
+uint __ovld atomic_xchg(volatile __global uint *, uint);
+int __ovld atomic_xchg(volatile __local int *, int);
+uint __ovld atomic_xchg(volatile __local uint *, uint);
+float __ovld atomic_xchg(volatile __global float *, float);
+float __ovld atomic_xchg(volatile __local float *, float);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_xchg(volatile int *, int);
+uint __ovld atomic_xchg(volatile uint *, uint);
+float __ovld atomic_xchg(volatile float *, float);
+#endif
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_xchg(volatile __global int *, int);
+uint __ovld atom_xchg(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_xchg(volatile __local int *, int);
+uint __ovld atom_xchg(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_xchg(volatile __global long *, long);
+long __ovld atom_xchg(volatile __local long *, long);
+ulong __ovld atom_xchg(volatile __global ulong *, ulong);
+ulong __ovld atom_xchg(volatile __local ulong *, ulong);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old + 1) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_inc(volatile __global int *);
+uint __ovld atomic_inc(volatile __global uint *);
+int __ovld atomic_inc(volatile __local int *);
+uint __ovld atomic_inc(volatile __local uint *);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_inc(volatile int *);
+uint __ovld atomic_inc(volatile uint *);
+#endif
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_inc(volatile __global int *);
+uint __ovld atom_inc(volatile __global uint *);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_inc(volatile __local int *);
+uint __ovld atom_inc(volatile __local uint *);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_inc(volatile __global long *);
+ulong __ovld atom_inc(volatile __global ulong *);
+long __ovld atom_inc(volatile __local long *);
+ulong __ovld atom_inc(volatile __local ulong *);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old - 1) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_dec(volatile __global int *);
+uint __ovld atomic_dec(volatile __global uint *);
+int __ovld atomic_dec(volatile __local int *);
+uint __ovld atomic_dec(volatile __local uint *);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_dec(volatile int *);
+uint __ovld atomic_dec(volatile uint *);
+#endif
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_dec(volatile __global int *);
+uint __ovld atom_dec(volatile __global uint *);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_dec(volatile __local int *);
+uint __ovld atom_dec(volatile __local uint *);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_dec(volatile __global long *);
+ulong __ovld atom_dec(volatile __global ulong *);
+long __ovld atom_dec(volatile __local long *);
+ulong __ovld atom_dec(volatile __local ulong *);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old == cmp) ? val : old and store result at
+ * location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_cmpxchg(volatile __global int *, int, int);
+uint __ovld atomic_cmpxchg(volatile __global uint *, uint, uint);
+int __ovld atomic_cmpxchg(volatile __local int *, int, int);
+uint __ovld atomic_cmpxchg(volatile __local uint *, uint, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_cmpxchg(volatile int *, int, int);
+uint __ovld atomic_cmpxchg(volatile uint *, uint, uint);
+#endif
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_cmpxchg(volatile __global int *, int, int);
+uint __ovld atom_cmpxchg(volatile __global uint *, uint, uint);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_cmpxchg(volatile __local int *, int, int);
+uint __ovld atom_cmpxchg(volatile __local uint *, uint, uint);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_cmpxchg(volatile __global long *, long, long);
+ulong __ovld atom_cmpxchg(volatile __global ulong *, ulong, ulong);
+long __ovld atom_cmpxchg(volatile __local long *, long, long);
+ulong __ovld atom_cmpxchg(volatile __local ulong *, ulong, ulong);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * min(old, val) and store minimum value at
+ * location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_min(volatile __global int *, int);
+uint __ovld atomic_min(volatile __global uint *, uint);
+int __ovld atomic_min(volatile __local int *, int);
+uint __ovld atomic_min(volatile __local uint *, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_min(volatile int *, int);
+uint __ovld atomic_min(volatile uint *, uint);
+#endif
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_min(volatile __global int *, int);
+uint __ovld atom_min(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_min(volatile __local int *, int);
+uint __ovld atom_min(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_min(volatile __global long *, long);
+ulong __ovld atom_min(volatile __global ulong *, ulong);
+long __ovld atom_min(volatile __local long *, long);
+ulong __ovld atom_min(volatile __local ulong *, ulong);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * max(old, val) and store maximum value at
+ * location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_max(volatile __global int *, int);
+uint __ovld atomic_max(volatile __global uint *, uint);
+int __ovld atomic_max(volatile __local int *, int);
+uint __ovld atomic_max(volatile __local uint *, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_max(volatile int *, int);
+uint __ovld atomic_max(volatile uint *, uint);
+#endif
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_max(volatile __global int *, int);
+uint __ovld atom_max(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_max(volatile __local int *, int);
+uint __ovld atom_max(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_max(volatile __global long *, long);
+ulong __ovld atom_max(volatile __global ulong *, ulong);
+long __ovld atom_max(volatile __local long *, long);
+ulong __ovld atom_max(volatile __local ulong *, ulong);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old & val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_and(volatile __global int *, int);
+uint __ovld atomic_and(volatile __global uint *, uint);
+int __ovld atomic_and(volatile __local int *, int);
+uint __ovld atomic_and(volatile __local uint *, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_and(volatile int *, int);
+uint __ovld atomic_and(volatile uint *, uint);
+#endif
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_and(volatile __global int *, int);
+uint __ovld atom_and(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_and(volatile __local int *, int);
+uint __ovld atom_and(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_and(volatile __global long *, long);
+ulong __ovld atom_and(volatile __global ulong *, ulong);
+long __ovld atom_and(volatile __local long *, long);
+ulong __ovld atom_and(volatile __local ulong *, ulong);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old | val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_or(volatile __global int *, int);
+uint __ovld atomic_or(volatile __global uint *, uint);
+int __ovld atomic_or(volatile __local int *, int);
+uint __ovld atomic_or(volatile __local uint *, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_or(volatile int *, int);
+uint __ovld atomic_or(volatile uint *, uint);
+#endif
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_or(volatile __global int *, int);
+uint __ovld atom_or(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_or(volatile __local int *, int);
+uint __ovld atom_or(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_or(volatile __global long *, long);
+ulong __ovld atom_or(volatile __global ulong *, ulong);
+long __ovld atom_or(volatile __local long *, long);
+ulong __ovld atom_or(volatile __local ulong *, ulong);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old ^ val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_xor(volatile __global int *, int);
+uint __ovld atomic_xor(volatile __global uint *, uint);
+int __ovld atomic_xor(volatile __local int *, int);
+uint __ovld atomic_xor(volatile __local uint *, uint);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_xor(volatile int *, int);
+uint __ovld atomic_xor(volatile uint *, uint);
+#endif
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_xor(volatile __global int *, int);
+uint __ovld atom_xor(volatile __global uint *, uint);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_xor(volatile __local int *, int);
+uint __ovld atom_xor(volatile __local uint *, uint);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_xor(volatile __global long *, long);
+ulong __ovld atom_xor(volatile __global ulong *, ulong);
+long __ovld atom_xor(volatile __local long *, long);
+ulong __ovld atom_xor(volatile __local ulong *, ulong);
+#endif
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : disable
+#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : disable
+#endif
+
+// OpenCL v2.0 s6.13.11 - Atomics Functions
+
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+// double atomics support requires extensions cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
+#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
+#endif
+
+// atomic_init()
+#if defined(__opencl_c_generic_address_space)
+void __ovld atomic_init(volatile atomic_int *, int);
+void __ovld atomic_init(volatile atomic_uint *, uint);
+void __ovld atomic_init(volatile atomic_float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+void __ovld atomic_init(volatile atomic_long *, long);
+void __ovld atomic_init(volatile atomic_ulong *, ulong);
+#ifdef cl_khr_fp64
+void __ovld atomic_init(volatile atomic_double *, double);
+#endif //cl_khr_fp64
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+void __ovld atomic_init(volatile __global atomic_int *, int);
+void __ovld atomic_init(volatile __local atomic_int *, int);
+void __ovld atomic_init(volatile __global atomic_uint *, uint);
+void __ovld atomic_init(volatile __local atomic_uint *, uint);
+void __ovld atomic_init(volatile __global atomic_float *, float);
+void __ovld atomic_init(volatile __local atomic_float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+void __ovld atomic_init(volatile __global atomic_long *, long);
+void __ovld atomic_init(volatile __local atomic_long *, long);
+void __ovld atomic_init(volatile __global atomic_ulong *, ulong);
+void __ovld atomic_init(volatile __local atomic_ulong *, ulong);
+#ifdef cl_khr_fp64
+void __ovld atomic_init(volatile __global atomic_double *, double);
+void __ovld atomic_init(volatile __local atomic_double *, double);
+#endif //cl_khr_fp64
+#endif
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+
+// atomic_work_item_fence()
+void __ovld atomic_work_item_fence(cl_mem_fence_flags, memory_order, memory_scope);
+
+// atomic_fetch()
+// OpenCL v2.0 s6.13.11.7.5:
+// add/sub: atomic type argument can be uintptr_t/intptr_t, value type argument can be ptrdiff_t.
+
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_fetch_add(volatile atomic_int *, int);
+uint __ovld atomic_fetch_add(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_sub(volatile atomic_int *, int);
+uint __ovld atomic_fetch_sub(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_or(volatile atomic_int *, int);
+uint __ovld atomic_fetch_or(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_xor(volatile atomic_int *, int);
+uint __ovld atomic_fetch_xor(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_and(volatile atomic_int *, int);
+uint __ovld atomic_fetch_and(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_min(volatile atomic_int *, int);
+uint __ovld atomic_fetch_min(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_max(volatile atomic_int *, int);
+uint __ovld atomic_fetch_max(volatile atomic_uint *, uint);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_add(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_sub(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_sub(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_or(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_or(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_xor(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_xor(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_and(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_and(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_min(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_min(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_max(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_max(volatile atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *, ptrdiff_t);
+uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *, ptrdiff_t);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_fetch_add(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_add(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_add(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_add(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_sub(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_sub(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_sub(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_sub(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_or(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_or(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_or(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_or(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_xor(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_xor(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_xor(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_xor(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_and(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_and(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_and(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_and(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_min(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_min(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_min(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_min(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_max(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_max(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_max(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_max(volatile __local atomic_uint *, uint);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_add(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_add(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_add(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_add(volatile __global atomic_uintptr_t *, ptrdiff_t);
+uintptr_t __ovld atomic_fetch_add(volatile __local atomic_uintptr_t *, ptrdiff_t);
+long __ovld atomic_fetch_sub(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_sub(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_sub(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_sub(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_sub(volatile __global atomic_uintptr_t *, ptrdiff_t);
+uintptr_t __ovld atomic_fetch_sub(volatile __local atomic_uintptr_t *, ptrdiff_t);
+long __ovld atomic_fetch_or(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_or(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_or(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_or(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_or(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_or(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_or(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_or(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_xor(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_xor(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_xor(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_xor(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_xor(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_xor(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_xor(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_xor(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_and(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_and(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_and(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_and(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_and(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_and(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_and(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_and(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_min(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_min(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_min(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_min(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_min(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_min(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_min(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_min(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_max(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_max(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_max(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_max(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_max(volatile __global atomic_uintptr_t *, uintptr_t);
+uintptr_t __ovld atomic_fetch_max(volatile __local atomic_uintptr_t *, uintptr_t);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_fetch_add_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_sub_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_or_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_xor_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_and_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_min_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_max_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *, uint, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_sub_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_or_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_xor_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_and_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_min_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_max_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *, uint, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order);
+long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order);
+long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __global atomic_uintptr_t *, uintptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __local atomic_uintptr_t *, uintptr_t, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_fetch_add_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_sub_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_or_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_xor_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_and_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_min_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_max_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_sub_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_or_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_xor_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_and_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_min_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_max_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __global atomic_uintptr_t *, uintptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __local atomic_uintptr_t *, uintptr_t, memory_order, memory_scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+
+// The functionality added by cl_ext_float_atomics extension
+#if defined(cl_ext_float_atomics)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_load_store)
+void __ovld atomic_store(volatile __global atomic_half *, half);
+void __ovld atomic_store_explicit(volatile __global atomic_half *,
+                                  half, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_half *,
+                                  half, memory_order, memory_scope);
+half __ovld atomic_load(volatile __global atomic_half *);
+half __ovld atomic_load_explicit(volatile __global atomic_half *,
+                                 memory_order);
+half __ovld atomic_load_explicit(volatile __global atomic_half *,
+                                 memory_order, memory_scope);
+half __ovld atomic_exchange(volatile __global atomic_half *, half);
+half __ovld atomic_exchange_explicit(volatile __global atomic_half *,
+                                     half, memory_order);
+half __ovld atomic_exchange_explicit(volatile __global atomic_half *,
+                                     half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store)
+
+#if defined(__opencl_c_ext_fp16_local_atomic_load_store)
+void __ovld atomic_store(volatile __local atomic_half *, half);
+void __ovld atomic_store_explicit(volatile __local atomic_half *,
+                                  half, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_half *,
+                                  half, memory_order, memory_scope);
+half __ovld atomic_load(volatile __local atomic_half *);
+half __ovld atomic_load_explicit(volatile __local atomic_half *,
+                                 memory_order);
+half __ovld atomic_load_explicit(volatile __local atomic_half *,
+                                 memory_order, memory_scope);
+half __ovld atomic_exchange(volatile __local atomic_half *, half);
+half __ovld atomic_exchange_explicit(volatile __local atomic_half *,
+                                     half, memory_order);
+half __ovld atomic_exchange_explicit(volatile __local atomic_half *,
+                                     half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_local_atomic_load_store)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_load_store) &&                   \
+    defined(__opencl_c_ext_fp16_local_atomic_load_store)
+void __ovld atomic_store(volatile atomic_half *, half);
+void __ovld atomic_store_explicit(volatile atomic_half *, half,
+                                  memory_order);
+void __ovld atomic_store_explicit(volatile atomic_half *, half,
+                                  memory_order, memory_scope);
+half __ovld atomic_load(volatile atomic_half *);
+half __ovld atomic_load_explicit(volatile atomic_half *,
+                                 memory_order);
+half __ovld atomic_load_explicit(volatile atomic_half *,
+                                 memory_order, memory_scope);
+half __ovld atomic_exchange(volatile atomic_half *, half);
+half __ovld atomic_exchange_explicit(volatile atomic_half *, half,
+                                     memory_order);
+half __ovld atomic_exchange_explicit(volatile atomic_half *, half,
+                                     memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store) &&
+       // defined(__opencl_c_ext_fp16_local_atomic_load_store)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_min_max)
+half __ovld atomic_fetch_min(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_max(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *,
+                                      half, memory_order, memory_scope);
+half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *,
+                                      half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp16_local_atomic_min_max)
+half __ovld atomic_fetch_min(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_max(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *,
+                                      half, memory_order, memory_scope);
+half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *,
+                                      half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_local_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_min_max) &&                      \
+    defined(__opencl_c_ext_fp16_local_atomic_min_max)
+half __ovld atomic_fetch_min(volatile atomic_half *, half);
+half __ovld atomic_fetch_max(volatile atomic_half *, half);
+half __ovld atomic_fetch_min_explicit(volatile atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_max_explicit(volatile atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_min_explicit(volatile atomic_half *,
+                                      half, memory_order, memory_scope);
+half __ovld atomic_fetch_max_explicit(volatile atomic_half *,
+                                      half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max) &&                \
+    defined(__opencl_c_ext_fp16_local_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp32_global_atomic_min_max)
+float __ovld atomic_fetch_min(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_max(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *,
+                                       float, memory_order, memory_scope);
+float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *,
+                                       float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp32_local_atomic_min_max)
+float __ovld atomic_fetch_min(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_max(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *,
+                                       float, memory_order, memory_scope);
+float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *,
+                                       float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_local_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp32_global_atomic_min_max) &&                      \
+    defined(__opencl_c_ext_fp32_local_atomic_min_max)
+float __ovld atomic_fetch_min(volatile atomic_float *, float);
+float __ovld atomic_fetch_max(volatile atomic_float *, float);
+float __ovld atomic_fetch_min_explicit(volatile atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_max_explicit(volatile atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_min_explicit(volatile atomic_float *,
+                                       float, memory_order, memory_scope);
+float __ovld atomic_fetch_max_explicit(volatile atomic_float *,
+                                       float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max) &&                \
+    defined(__opencl_c_ext_fp32_local_atomic_min_max)
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#if defined(__opencl_c_ext_fp64_global_atomic_min_max)
+double __ovld atomic_fetch_min(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_max(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *,
+                                        double, memory_order, memory_scope);
+double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *,
+                                        double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp64_local_atomic_min_max)
+double __ovld atomic_fetch_min(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_max(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *,
+                                        double, memory_order, memory_scope);
+double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *,
+                                        double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_local_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp64_global_atomic_min_max) &&                      \
+    defined(__opencl_c_ext_fp64_local_atomic_min_max)
+double __ovld atomic_fetch_min(volatile atomic_double *, double);
+double __ovld atomic_fetch_max(volatile atomic_double *, double);
+double __ovld atomic_fetch_min_explicit(volatile atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_max_explicit(volatile atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_min_explicit(volatile atomic_double *,
+                                        double, memory_order, memory_scope);
+double __ovld atomic_fetch_max_explicit(volatile atomic_double *,
+                                        double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max) &&                \
+    defined(__opencl_c_ext_fp64_local_atomic_min_max)
+#endif // defined(cl_khr_int64_base_atomics) &&                                \
+    defined(cl_khr_int64_extended_atomics)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_add)
+half __ovld atomic_fetch_add(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_sub(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *,
+                                      half, memory_order, memory_scope);
+half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *,
+                                      half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_add)
+
+#if defined(__opencl_c_ext_fp16_local_atomic_add)
+half __ovld atomic_fetch_add(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_sub(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *,
+                                      half, memory_order, memory_scope);
+half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *,
+                                      half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_local_atomic_add)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_add) &&                          \
+    defined(__opencl_c_ext_fp16_local_atomic_add)
+half __ovld atomic_fetch_add(volatile atomic_half *, half);
+half __ovld atomic_fetch_sub(volatile atomic_half *, half);
+half __ovld atomic_fetch_add_explicit(volatile atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_sub_explicit(volatile atomic_half *,
+                                      half, memory_order);
+half __ovld atomic_fetch_add_explicit(volatile atomic_half *,
+                                      half, memory_order, memory_scope);
+half __ovld atomic_fetch_sub_explicit(volatile atomic_half *,
+                                      half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_add) &&                    \
+    defined(__opencl_c_ext_fp16_local_atomic_add)
+
+#if defined(__opencl_c_ext_fp32_global_atomic_add)
+float __ovld atomic_fetch_add(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_sub(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *,
+                                       float, memory_order, memory_scope);
+float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *,
+                                       float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_global_atomic_add)
+
+#if defined(__opencl_c_ext_fp32_local_atomic_add)
+float __ovld atomic_fetch_add(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_sub(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *,
+                                       float, memory_order, memory_scope);
+float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *,
+                                       float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_local_atomic_add)
+
+#if defined(__opencl_c_ext_fp32_global_atomic_add) &&                          \
+    defined(__opencl_c_ext_fp32_local_atomic_add)
+float __ovld atomic_fetch_add(volatile atomic_float *, float);
+float __ovld atomic_fetch_sub(volatile atomic_float *, float);
+float __ovld atomic_fetch_add_explicit(volatile atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_sub_explicit(volatile atomic_float *,
+                                       float, memory_order);
+float __ovld atomic_fetch_add_explicit(volatile atomic_float *,
+                                       float, memory_order, memory_scope);
+float __ovld atomic_fetch_sub_explicit(volatile atomic_float *,
+                                       float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_global_atomic_add) &&                    \
+    defined(__opencl_c_ext_fp32_local_atomic_add)
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#if defined(__opencl_c_ext_fp64_global_atomic_add)
+double __ovld atomic_fetch_add(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_sub(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *,
+                                        double, memory_order, memory_scope);
+double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *,
+                                        double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_global_atomic_add)
+
+#if defined(__opencl_c_ext_fp64_local_atomic_add)
+double __ovld atomic_fetch_add(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_sub(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *,
+                                        double, memory_order, memory_scope);
+double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *,
+                                        double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_local_atomic_add)
+
+#if defined(__opencl_c_ext_fp64_global_atomic_add) &&                          \
+    defined(__opencl_c_ext_fp64_local_atomic_add)
+double __ovld atomic_fetch_add(volatile atomic_double *, double);
+double __ovld atomic_fetch_sub(volatile atomic_double *, double);
+double __ovld atomic_fetch_add_explicit(volatile atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_sub_explicit(volatile atomic_double *,
+                                        double, memory_order);
+double __ovld atomic_fetch_add_explicit(volatile atomic_double *,
+                                        double, memory_order, memory_scope);
+double __ovld atomic_fetch_sub_explicit(volatile atomic_double *,
+                                        double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_global_atomic_add) &&                    \
+    defined(__opencl_c_ext_fp64_local_atomic_add)
+#endif // defined(cl_khr_int64_base_atomics) &&                                \
+    defined(cl_khr_int64_extended_atomics)
+
+#endif // cl_ext_float_atomics
+
+// atomic_store()
+
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+void __ovld atomic_store(volatile atomic_int *, int);
+void __ovld atomic_store(volatile atomic_uint *, uint);
+void __ovld atomic_store(volatile atomic_float *, float);
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store(volatile atomic_double *, double);
+#endif //cl_khr_fp64
+void __ovld atomic_store(volatile atomic_long *, long);
+void __ovld atomic_store(volatile atomic_ulong *, ulong);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+void __ovld atomic_store(volatile __global atomic_int *, int);
+void __ovld atomic_store(volatile __local atomic_int *, int);
+void __ovld atomic_store(volatile __global atomic_uint *, uint);
+void __ovld atomic_store(volatile __local atomic_uint *, uint);
+void __ovld atomic_store(volatile __global atomic_float *, float);
+void __ovld atomic_store(volatile __local atomic_float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store(volatile __global atomic_double *, double);
+void __ovld atomic_store(volatile __local atomic_double *, double);
+#endif //cl_khr_fp64
+void __ovld atomic_store(volatile __global atomic_long *, long);
+void __ovld atomic_store(volatile __local atomic_long *, long);
+void __ovld atomic_store(volatile __global atomic_ulong *, ulong);
+void __ovld atomic_store(volatile __local atomic_ulong *, ulong);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+void __ovld atomic_store_explicit(volatile atomic_int *, int, memory_order);
+void __ovld atomic_store_explicit(volatile atomic_uint *, uint, memory_order);
+void __ovld atomic_store_explicit(volatile atomic_float *, float, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store_explicit(volatile atomic_double *, double, memory_order);
+#endif //cl_khr_fp64
+void __ovld atomic_store_explicit(volatile atomic_long *, long, memory_order);
+void __ovld atomic_store_explicit(volatile atomic_ulong *, ulong, memory_order);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+void __ovld atomic_store_explicit(volatile __global atomic_int *, int, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_int *, int, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_uint *, uint, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_uint *, uint, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_float *, float, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_float *, float, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store_explicit(volatile __global atomic_double *, double, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_double *, double, memory_order);
+#endif
+void __ovld atomic_store_explicit(volatile __global atomic_long *, long, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_long *, long, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_generic_address_space)
+void __ovld atomic_store_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile atomic_float *, float, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store_explicit(volatile atomic_double *, double, memory_order, memory_scope);
+#endif //cl_khr_fp64
+void __ovld atomic_store_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+void __ovld atomic_store_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __global atomic_float *, float, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_float *, float, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store_explicit(volatile __global atomic_double *, double, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_double *, double, memory_order, memory_scope);
+#endif //cl_khr_fp64
+void __ovld atomic_store_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+
+// atomic_load()
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_load(volatile atomic_int *);
+uint __ovld atomic_load(volatile atomic_uint *);
+float __ovld atomic_load(volatile atomic_float *);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load(volatile atomic_double *);
+#endif //cl_khr_fp64
+long __ovld atomic_load(volatile atomic_long *);
+ulong __ovld atomic_load(volatile atomic_ulong *);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_load(volatile __global atomic_int *);
+int __ovld atomic_load(volatile __local atomic_int *);
+uint __ovld atomic_load(volatile __global atomic_uint *);
+uint __ovld atomic_load(volatile __local atomic_uint *);
+float __ovld atomic_load(volatile __global atomic_float *);
+float __ovld atomic_load(volatile __local atomic_float *);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load(volatile __global atomic_double *);
+double __ovld atomic_load(volatile __local atomic_double *);
+#endif //cl_khr_fp64
+long __ovld atomic_load(volatile __global atomic_long *);
+long __ovld atomic_load(volatile __local atomic_long *);
+ulong __ovld atomic_load(volatile __global atomic_ulong *);
+ulong __ovld atomic_load(volatile __local atomic_ulong *);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_load_explicit(volatile atomic_int *, memory_order);
+uint __ovld atomic_load_explicit(volatile atomic_uint *, memory_order);
+float __ovld atomic_load_explicit(volatile atomic_float *, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load_explicit(volatile atomic_double *, memory_order);
+#endif //cl_khr_fp64
+long __ovld atomic_load_explicit(volatile atomic_long *, memory_order);
+ulong __ovld atomic_load_explicit(volatile atomic_ulong *, memory_order);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_load_explicit(volatile __global atomic_int *, memory_order);
+int __ovld atomic_load_explicit(volatile __local atomic_int *, memory_order);
+uint __ovld atomic_load_explicit(volatile __global atomic_uint *, memory_order);
+uint __ovld atomic_load_explicit(volatile __local atomic_uint *, memory_order);
+float __ovld atomic_load_explicit(volatile __global atomic_float *, memory_order);
+float __ovld atomic_load_explicit(volatile __local atomic_float *, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load_explicit(volatile __global atomic_double *, memory_order);
+double __ovld atomic_load_explicit(volatile __local atomic_double *, memory_order);
+#endif //cl_khr_fp64
+long __ovld atomic_load_explicit(volatile __global atomic_long *, memory_order);
+long __ovld atomic_load_explicit(volatile __local atomic_long *, memory_order);
+ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *, memory_order);
+ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_load_explicit(volatile atomic_int *, memory_order, memory_scope);
+uint __ovld atomic_load_explicit(volatile atomic_uint *, memory_order, memory_scope);
+float __ovld atomic_load_explicit(volatile atomic_float *, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load_explicit(volatile atomic_double *, memory_order, memory_scope);
+#endif //cl_khr_fp64
+long __ovld atomic_load_explicit(volatile atomic_long *, memory_order, memory_scope);
+ulong __ovld atomic_load_explicit(volatile atomic_ulong *, memory_order, memory_scope);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_load_explicit(volatile __global atomic_int *, memory_order, memory_scope);
+int __ovld atomic_load_explicit(volatile __local atomic_int *, memory_order, memory_scope);
+uint __ovld atomic_load_explicit(volatile __global atomic_uint *, memory_order, memory_scope);
+uint __ovld atomic_load_explicit(volatile __local atomic_uint *, memory_order, memory_scope);
+float __ovld atomic_load_explicit(volatile __global atomic_float *, memory_order, memory_scope);
+float __ovld atomic_load_explicit(volatile __local atomic_float *, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load_explicit(volatile __global atomic_double *, memory_order, memory_scope);
+double __ovld atomic_load_explicit(volatile __local atomic_double *, memory_order, memory_scope);
+#endif
+long __ovld atomic_load_explicit(volatile __global atomic_long *, memory_order, memory_scope);
+long __ovld atomic_load_explicit(volatile __local atomic_long *, memory_order, memory_scope);
+ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *, memory_order, memory_scope);
+ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *, memory_order, memory_scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+
+// atomic_exchange()
+
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_exchange(volatile atomic_int *, int);
+uint __ovld atomic_exchange(volatile atomic_uint *, uint);
+float __ovld atomic_exchange(volatile atomic_float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange(volatile atomic_double *, double);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange(volatile atomic_long *, long);
+ulong __ovld atomic_exchange(volatile atomic_ulong *, ulong);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_exchange(volatile __global atomic_int *, int);
+int __ovld atomic_exchange(volatile __local atomic_int *, int);
+uint __ovld atomic_exchange(volatile __global atomic_uint *, uint);
+uint __ovld atomic_exchange(volatile __local atomic_uint *, uint);
+float __ovld atomic_exchange(volatile __global atomic_float *, float);
+float __ovld atomic_exchange(volatile __local atomic_float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange(volatile __global atomic_double *, double);
+double __ovld atomic_exchange(volatile __local atomic_double *, double);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange(volatile __global atomic_long *, long);
+long __ovld atomic_exchange(volatile __local atomic_long *, long);
+ulong __ovld atomic_exchange(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_exchange(volatile __local atomic_ulong *, ulong);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_exchange_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_exchange_explicit(volatile atomic_uint *, uint, memory_order);
+float __ovld atomic_exchange_explicit(volatile atomic_float *, float, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange_explicit(volatile atomic_double *, double, memory_order);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *, ulong, memory_order);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_exchange_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_exchange_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *, uint, memory_order);
+float __ovld atomic_exchange_explicit(volatile __global atomic_float *, float, memory_order);
+float __ovld atomic_exchange_explicit(volatile __local atomic_float *, float, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange_explicit(volatile __global atomic_double *, double, memory_order);
+double __ovld atomic_exchange_explicit(volatile __local atomic_double *, double, memory_order);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_exchange_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)wi
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_exchange_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_exchange_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+float __ovld atomic_exchange_explicit(volatile atomic_float *, float, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange_explicit(volatile atomic_double *, double, memory_order, memory_scope);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_exchange_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_exchange_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+float __ovld atomic_exchange_explicit(volatile __global atomic_float *, float, memory_order, memory_scope);
+float __ovld atomic_exchange_explicit(volatile __local atomic_float *, float, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange_explicit(volatile __global atomic_double *, double, memory_order, memory_scope);
+double __ovld atomic_exchange_explicit(volatile __local atomic_double *, double, memory_order, memory_scope);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_exchange_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+
+// atomic_compare_exchange_strong() and atomic_compare_exchange_weak()
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_compare_exchange_strong(volatile atomic_int *, int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *, uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_int *, int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *, uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_float *, float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_float *, float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile atomic_double *, double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_double *, double *, double);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile atomic_long *, long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_long *, long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *, ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *, ulong *, ulong);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *, __private float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *, __private float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *, __private float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *, __private float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *, __private double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *, __private double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *, __private double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *, __private double *, double);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *, __private ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *, __private ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *, __private ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *, __private ulong *, ulong);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *, int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *, int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *, float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *, float *, float, memory_order, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *, double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *, double *, double, memory_order, memory_order);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *, long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *, long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif //defined(__opencl_c_atomic_scope_device)
+
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *, int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *, int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *, float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *, float *, float, memory_order, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *, double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *, double *, double, memory_order, memory_order, memory_scope);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *, long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *, long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order, memory_scope);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+
+// atomic_flag_test_and_set() and atomic_flag_clear()
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_flag_test_and_set(volatile atomic_flag *);
+void __ovld atomic_flag_clear(volatile atomic_flag *);
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_flag_test_and_set(volatile __global atomic_flag *);
+bool __ovld atomic_flag_test_and_set(volatile __local atomic_flag *);
+void __ovld atomic_flag_clear(volatile __global atomic_flag *);
+void __ovld atomic_flag_clear(volatile __local atomic_flag *);
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order);
+void __ovld atomic_flag_clear_explicit(volatile atomic_flag *, memory_order);
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *, memory_order);
+bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *, memory_order);
+void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *, memory_order);
+void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *, memory_order);
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif
+
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order, memory_scope);
+void __ovld atomic_flag_clear_explicit(volatile atomic_flag *, memory_order, memory_scope);
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *, memory_order, memory_scope);
+bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *, memory_order, memory_scope);
+void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *, memory_order, memory_scope);
+void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *, memory_order, memory_scope);
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions
+
+/**
+ * The shuffle and shuffle2 built-in functions construct
+ * a permutation of elements from one or two input
+ * vectors respectively that are of the same type,
+ * returning a vector with the same element type as the
+ * input and length that is the same as the shuffle mask.
+ * The size of each element in the mask must match the
+ * size of each element in the result. For shuffle, only
+ * the ilogb(2m-1) least significant bits of each mask
+ * element are considered. For shuffle2, only the
+ * ilogb(2m-1)+1 least significant bits of each mask
+ * element are considered. Other bits in the mask shall
+ * be ignored.
+ * The elements of the input vectors are numbered from
+ * left to right across one or both of the vectors. For this
+ * purpose, the number of elements in a vector is given
+ * by vec_step(gentypem). The shuffle mask operand
+ * specifies, for each element of the result vector, which
+ * element of the one or two input vectors the result
+ * element gets.
+ * Examples:
+ * uint4 mask = (uint4)(3, 2,
+ * 1, 0);
+ * float4 a;
+ * float4 r = shuffle(a, mask);
+ * // r.s0123 = a.wzyx
+ * uint8 mask = (uint8)(0, 1, 2, 3,
+ * 4, 5, 6, 7);
+ * float4 a, b;
+ * float8 r = shuffle2(a, b, mask);
+ * // r.s0123 = a.xyzw
+ * // r.s4567 = b.xyzw
+ * uint4 mask;
+ * float8 a;
+ * float4 b;
+ * b = shuffle(a, mask);
+ * Examples that are not valid are:
+ * uint8 mask;
+ * short16 a;
+ * short8 b;
+ * b = shuffle(a, mask); <- not valid
+ */
+char2 __ovld __cnfn shuffle(char2, uchar2);
+char2 __ovld __cnfn shuffle(char4, uchar2);
+char2 __ovld __cnfn shuffle(char8, uchar2);
+char2 __ovld __cnfn shuffle(char16, uchar2);
+
+uchar2 __ovld __cnfn shuffle(uchar2, uchar2);
+uchar2 __ovld __cnfn shuffle(uchar4, uchar2);
+uchar2 __ovld __cnfn shuffle(uchar8, uchar2);
+uchar2 __ovld __cnfn shuffle(uchar16, uchar2);
+
+short2 __ovld __cnfn shuffle(short2, ushort2);
+short2 __ovld __cnfn shuffle(short4, ushort2);
+short2 __ovld __cnfn shuffle(short8, ushort2);
+short2 __ovld __cnfn shuffle(short16, ushort2);
+
+ushort2 __ovld __cnfn shuffle(ushort2, ushort2);
+ushort2 __ovld __cnfn shuffle(ushort4, ushort2);
+ushort2 __ovld __cnfn shuffle(ushort8, ushort2);
+ushort2 __ovld __cnfn shuffle(ushort16, ushort2);
+
+int2 __ovld __cnfn shuffle(int2, uint2);
+int2 __ovld __cnfn shuffle(int4, uint2);
+int2 __ovld __cnfn shuffle(int8, uint2);
+int2 __ovld __cnfn shuffle(int16, uint2);
+
+uint2 __ovld __cnfn shuffle(uint2, uint2);
+uint2 __ovld __cnfn shuffle(uint4, uint2);
+uint2 __ovld __cnfn shuffle(uint8, uint2);
+uint2 __ovld __cnfn shuffle(uint16, uint2);
+
+long2 __ovld __cnfn shuffle(long2, ulong2);
+long2 __ovld __cnfn shuffle(long4, ulong2);
+long2 __ovld __cnfn shuffle(long8, ulong2);
+long2 __ovld __cnfn shuffle(long16, ulong2);
+
+ulong2 __ovld __cnfn shuffle(ulong2, ulong2);
+ulong2 __ovld __cnfn shuffle(ulong4, ulong2);
+ulong2 __ovld __cnfn shuffle(ulong8, ulong2);
+ulong2 __ovld __cnfn shuffle(ulong16, ulong2);
+
+float2 __ovld __cnfn shuffle(float2, uint2);
+float2 __ovld __cnfn shuffle(float4, uint2);
+float2 __ovld __cnfn shuffle(float8, uint2);
+float2 __ovld __cnfn shuffle(float16, uint2);
+
+char4 __ovld __cnfn shuffle(char2, uchar4);
+char4 __ovld __cnfn shuffle(char4, uchar4);
+char4 __ovld __cnfn shuffle(char8, uchar4);
+char4 __ovld __cnfn shuffle(char16, uchar4);
+
+uchar4 __ovld __cnfn shuffle(uchar2, uchar4);
+uchar4 __ovld __cnfn shuffle(uchar4, uchar4);
+uchar4 __ovld __cnfn shuffle(uchar8, uchar4);
+uchar4 __ovld __cnfn shuffle(uchar16, uchar4);
+
+short4 __ovld __cnfn shuffle(short2, ushort4);
+short4 __ovld __cnfn shuffle(short4, ushort4);
+short4 __ovld __cnfn shuffle(short8, ushort4);
+short4 __ovld __cnfn shuffle(short16, ushort4);
+
+ushort4 __ovld __cnfn shuffle(ushort2, ushort4);
+ushort4 __ovld __cnfn shuffle(ushort4, ushort4);
+ushort4 __ovld __cnfn shuffle(ushort8, ushort4);
+ushort4 __ovld __cnfn shuffle(ushort16, ushort4);
+
+int4 __ovld __cnfn shuffle(int2, uint4);
+int4 __ovld __cnfn shuffle(int4, uint4);
+int4 __ovld __cnfn shuffle(int8, uint4);
+int4 __ovld __cnfn shuffle(int16, uint4);
+
+uint4 __ovld __cnfn shuffle(uint2, uint4);
+uint4 __ovld __cnfn shuffle(uint4, uint4);
+uint4 __ovld __cnfn shuffle(uint8, uint4);
+uint4 __ovld __cnfn shuffle(uint16, uint4);
+
+long4 __ovld __cnfn shuffle(long2, ulong4);
+long4 __ovld __cnfn shuffle(long4, ulong4);
+long4 __ovld __cnfn shuffle(long8, ulong4);
+long4 __ovld __cnfn shuffle(long16, ulong4);
+
+ulong4 __ovld __cnfn shuffle(ulong2, ulong4);
+ulong4 __ovld __cnfn shuffle(ulong4, ulong4);
+ulong4 __ovld __cnfn shuffle(ulong8, ulong4);
+ulong4 __ovld __cnfn shuffle(ulong16, ulong4);
+
+float4 __ovld __cnfn shuffle(float2, uint4);
+float4 __ovld __cnfn shuffle(float4, uint4);
+float4 __ovld __cnfn shuffle(float8, uint4);
+float4 __ovld __cnfn shuffle(float16, uint4);
+
+char8 __ovld __cnfn shuffle(char2, uchar8);
+char8 __ovld __cnfn shuffle(char4, uchar8);
+char8 __ovld __cnfn shuffle(char8, uchar8);
+char8 __ovld __cnfn shuffle(char16, uchar8);
+
+uchar8 __ovld __cnfn shuffle(uchar2, uchar8);
+uchar8 __ovld __cnfn shuffle(uchar4, uchar8);
+uchar8 __ovld __cnfn shuffle(uchar8, uchar8);
+uchar8 __ovld __cnfn shuffle(uchar16, uchar8);
+
+short8 __ovld __cnfn shuffle(short2, ushort8);
+short8 __ovld __cnfn shuffle(short4, ushort8);
+short8 __ovld __cnfn shuffle(short8, ushort8);
+short8 __ovld __cnfn shuffle(short16, ushort8);
+
+ushort8 __ovld __cnfn shuffle(ushort2, ushort8);
+ushort8 __ovld __cnfn shuffle(ushort4, ushort8);
+ushort8 __ovld __cnfn shuffle(ushort8, ushort8);
+ushort8 __ovld __cnfn shuffle(ushort16, ushort8);
+
+int8 __ovld __cnfn shuffle(int2, uint8);
+int8 __ovld __cnfn shuffle(int4, uint8);
+int8 __ovld __cnfn shuffle(int8, uint8);
+int8 __ovld __cnfn shuffle(int16, uint8);
+
+uint8 __ovld __cnfn shuffle(uint2, uint8);
+uint8 __ovld __cnfn shuffle(uint4, uint8);
+uint8 __ovld __cnfn shuffle(uint8, uint8);
+uint8 __ovld __cnfn shuffle(uint16, uint8);
+
+long8 __ovld __cnfn shuffle(long2, ulong8);
+long8 __ovld __cnfn shuffle(long4, ulong8);
+long8 __ovld __cnfn shuffle(long8, ulong8);
+long8 __ovld __cnfn shuffle(long16, ulong8);
+
+ulong8 __ovld __cnfn shuffle(ulong2, ulong8);
+ulong8 __ovld __cnfn shuffle(ulong4, ulong8);
+ulong8 __ovld __cnfn shuffle(ulong8, ulong8);
+ulong8 __ovld __cnfn shuffle(ulong16, ulong8);
+
+float8 __ovld __cnfn shuffle(float2, uint8);
+float8 __ovld __cnfn shuffle(float4, uint8);
+float8 __ovld __cnfn shuffle(float8, uint8);
+float8 __ovld __cnfn shuffle(float16, uint8);
+
+char16 __ovld __cnfn shuffle(char2, uchar16);
+char16 __ovld __cnfn shuffle(char4, uchar16);
+char16 __ovld __cnfn shuffle(char8, uchar16);
+char16 __ovld __cnfn shuffle(char16, uchar16);
+
+uchar16 __ovld __cnfn shuffle(uchar2, uchar16);
+uchar16 __ovld __cnfn shuffle(uchar4, uchar16);
+uchar16 __ovld __cnfn shuffle(uchar8, uchar16);
+uchar16 __ovld __cnfn shuffle(uchar16, uchar16);
+
+short16 __ovld __cnfn shuffle(short2, ushort16);
+short16 __ovld __cnfn shuffle(short4, ushort16);
+short16 __ovld __cnfn shuffle(short8, ushort16);
+short16 __ovld __cnfn shuffle(short16, ushort16);
+
+ushort16 __ovld __cnfn shuffle(ushort2, ushort16);
+ushort16 __ovld __cnfn shuffle(ushort4, ushort16);
+ushort16 __ovld __cnfn shuffle(ushort8, ushort16);
+ushort16 __ovld __cnfn shuffle(ushort16, ushort16);
+
+int16 __ovld __cnfn shuffle(int2, uint16);
+int16 __ovld __cnfn shuffle(int4, uint16);
+int16 __ovld __cnfn shuffle(int8, uint16);
+int16 __ovld __cnfn shuffle(int16, uint16);
+
+uint16 __ovld __cnfn shuffle(uint2, uint16);
+uint16 __ovld __cnfn shuffle(uint4, uint16);
+uint16 __ovld __cnfn shuffle(uint8, uint16);
+uint16 __ovld __cnfn shuffle(uint16, uint16);
+
+long16 __ovld __cnfn shuffle(long2, ulong16);
+long16 __ovld __cnfn shuffle(long4, ulong16);
+long16 __ovld __cnfn shuffle(long8, ulong16);
+long16 __ovld __cnfn shuffle(long16, ulong16);
+
+ulong16 __ovld __cnfn shuffle(ulong2, ulong16);
+ulong16 __ovld __cnfn shuffle(ulong4, ulong16);
+ulong16 __ovld __cnfn shuffle(ulong8, ulong16);
+ulong16 __ovld __cnfn shuffle(ulong16, ulong16);
+
+float16 __ovld __cnfn shuffle(float2, uint16);
+float16 __ovld __cnfn shuffle(float4, uint16);
+float16 __ovld __cnfn shuffle(float8, uint16);
+float16 __ovld __cnfn shuffle(float16, uint16);
+
+#ifdef cl_khr_fp64
+double2 __ovld __cnfn shuffle(double2, ulong2);
+double2 __ovld __cnfn shuffle(double4, ulong2);
+double2 __ovld __cnfn shuffle(double8, ulong2);
+double2 __ovld __cnfn shuffle(double16, ulong2);
+
+double4 __ovld __cnfn shuffle(double2, ulong4);
+double4 __ovld __cnfn shuffle(double4, ulong4);
+double4 __ovld __cnfn shuffle(double8, ulong4);
+double4 __ovld __cnfn shuffle(double16, ulong4);
+
+double8 __ovld __cnfn shuffle(double2, ulong8);
+double8 __ovld __cnfn shuffle(double4, ulong8);
+double8 __ovld __cnfn shuffle(double8, ulong8);
+double8 __ovld __cnfn shuffle(double16, ulong8);
+
+double16 __ovld __cnfn shuffle(double2, ulong16);
+double16 __ovld __cnfn shuffle(double4, ulong16);
+double16 __ovld __cnfn shuffle(double8, ulong16);
+double16 __ovld __cnfn shuffle(double16, ulong16);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half2 __ovld __cnfn shuffle(half2, ushort2);
+half2 __ovld __cnfn shuffle(half4, ushort2);
+half2 __ovld __cnfn shuffle(half8, ushort2);
+half2 __ovld __cnfn shuffle(half16, ushort2);
+
+half4 __ovld __cnfn shuffle(half2, ushort4);
+half4 __ovld __cnfn shuffle(half4, ushort4);
+half4 __ovld __cnfn shuffle(half8, ushort4);
+half4 __ovld __cnfn shuffle(half16, ushort4);
+
+half8 __ovld __cnfn shuffle(half2, ushort8);
+half8 __ovld __cnfn shuffle(half4, ushort8);
+half8 __ovld __cnfn shuffle(half8, ushort8);
+half8 __ovld __cnfn shuffle(half16, ushort8);
+
+half16 __ovld __cnfn shuffle(half2, ushort16);
+half16 __ovld __cnfn shuffle(half4, ushort16);
+half16 __ovld __cnfn shuffle(half8, ushort16);
+half16 __ovld __cnfn shuffle(half16, ushort16);
+#endif //cl_khr_fp16
+
+char2 __ovld __cnfn shuffle2(char2, char2, uchar2);
+char2 __ovld __cnfn shuffle2(char4, char4, uchar2);
+char2 __ovld __cnfn shuffle2(char8, char8, uchar2);
+char2 __ovld __cnfn shuffle2(char16, char16, uchar2);
+
+uchar2 __ovld __cnfn shuffle2(uchar2, uchar2, uchar2);
+uchar2 __ovld __cnfn shuffle2(uchar4, uchar4, uchar2);
+uchar2 __ovld __cnfn shuffle2(uchar8, uchar8, uchar2);
+uchar2 __ovld __cnfn shuffle2(uchar16, uchar16, uchar2);
+
+short2 __ovld __cnfn shuffle2(short2, short2, ushort2);
+short2 __ovld __cnfn shuffle2(short4, short4, ushort2);
+short2 __ovld __cnfn shuffle2(short8, short8, ushort2);
+short2 __ovld __cnfn shuffle2(short16, short16, ushort2);
+
+ushort2 __ovld __cnfn shuffle2(ushort2, ushort2, ushort2);
+ushort2 __ovld __cnfn shuffle2(ushort4, ushort4, ushort2);
+ushort2 __ovld __cnfn shuffle2(ushort8, ushort8, ushort2);
+ushort2 __ovld __cnfn shuffle2(ushort16, ushort16, ushort2);
+
+int2 __ovld __cnfn shuffle2(int2, int2, uint2);
+int2 __ovld __cnfn shuffle2(int4, int4, uint2);
+int2 __ovld __cnfn shuffle2(int8, int8, uint2);
+int2 __ovld __cnfn shuffle2(int16, int16, uint2);
+
+uint2 __ovld __cnfn shuffle2(uint2, uint2, uint2);
+uint2 __ovld __cnfn shuffle2(uint4, uint4, uint2);
+uint2 __ovld __cnfn shuffle2(uint8, uint8, uint2);
+uint2 __ovld __cnfn shuffle2(uint16, uint16, uint2);
+
+long2 __ovld __cnfn shuffle2(long2, long2, ulong2);
+long2 __ovld __cnfn shuffle2(long4, long4, ulong2);
+long2 __ovld __cnfn shuffle2(long8, long8, ulong2);
+long2 __ovld __cnfn shuffle2(long16, long16, ulong2);
+
+ulong2 __ovld __cnfn shuffle2(ulong2, ulong2, ulong2);
+ulong2 __ovld __cnfn shuffle2(ulong4, ulong4, ulong2);
+ulong2 __ovld __cnfn shuffle2(ulong8, ulong8, ulong2);
+ulong2 __ovld __cnfn shuffle2(ulong16, ulong16, ulong2);
+
+float2 __ovld __cnfn shuffle2(float2, float2, uint2);
+float2 __ovld __cnfn shuffle2(float4, float4, uint2);
+float2 __ovld __cnfn shuffle2(float8, float8, uint2);
+float2 __ovld __cnfn shuffle2(float16, float16, uint2);
+
+char4 __ovld __cnfn shuffle2(char2, char2, uchar4);
+char4 __ovld __cnfn shuffle2(char4, char4, uchar4);
+char4 __ovld __cnfn shuffle2(char8, char8, uchar4);
+char4 __ovld __cnfn shuffle2(char16, char16, uchar4);
+
+uchar4 __ovld __cnfn shuffle2(uchar2, uchar2, uchar4);
+uchar4 __ovld __cnfn shuffle2(uchar4, uchar4, uchar4);
+uchar4 __ovld __cnfn shuffle2(uchar8, uchar8, uchar4);
+uchar4 __ovld __cnfn shuffle2(uchar16, uchar16, uchar4);
+
+short4 __ovld __cnfn shuffle2(short2, short2, ushort4);
+short4 __ovld __cnfn shuffle2(short4, short4, ushort4);
+short4 __ovld __cnfn shuffle2(short8, short8, ushort4);
+short4 __ovld __cnfn shuffle2(short16, short16, ushort4);
+
+ushort4 __ovld __cnfn shuffle2(ushort2, ushort2, ushort4);
+ushort4 __ovld __cnfn shuffle2(ushort4, ushort4, ushort4);
+ushort4 __ovld __cnfn shuffle2(ushort8, ushort8, ushort4);
+ushort4 __ovld __cnfn shuffle2(ushort16, ushort16, ushort4);
+
+int4 __ovld __cnfn shuffle2(int2, int2, uint4);
+int4 __ovld __cnfn shuffle2(int4, int4, uint4);
+int4 __ovld __cnfn shuffle2(int8, int8, uint4);
+int4 __ovld __cnfn shuffle2(int16, int16, uint4);
+
+uint4 __ovld __cnfn shuffle2(uint2, uint2, uint4);
+uint4 __ovld __cnfn shuffle2(uint4, uint4, uint4);
+uint4 __ovld __cnfn shuffle2(uint8, uint8, uint4);
+uint4 __ovld __cnfn shuffle2(uint16, uint16, uint4);
+
+long4 __ovld __cnfn shuffle2(long2, long2, ulong4);
+long4 __ovld __cnfn shuffle2(long4, long4, ulong4);
+long4 __ovld __cnfn shuffle2(long8, long8, ulong4);
+long4 __ovld __cnfn shuffle2(long16, long16, ulong4);
+
+ulong4 __ovld __cnfn shuffle2(ulong2, ulong2, ulong4);
+ulong4 __ovld __cnfn shuffle2(ulong4, ulong4, ulong4);
+ulong4 __ovld __cnfn shuffle2(ulong8, ulong8, ulong4);
+ulong4 __ovld __cnfn shuffle2(ulong16, ulong16, ulong4);
+
+float4 __ovld __cnfn shuffle2(float2, float2, uint4);
+float4 __ovld __cnfn shuffle2(float4, float4, uint4);
+float4 __ovld __cnfn shuffle2(float8, float8, uint4);
+float4 __ovld __cnfn shuffle2(float16, float16, uint4);
+
+char8 __ovld __cnfn shuffle2(char2, char2, uchar8);
+char8 __ovld __cnfn shuffle2(char4, char4, uchar8);
+char8 __ovld __cnfn shuffle2(char8, char8, uchar8);
+char8 __ovld __cnfn shuffle2(char16, char16, uchar8);
+
+uchar8 __ovld __cnfn shuffle2(uchar2, uchar2, uchar8);
+uchar8 __ovld __cnfn shuffle2(uchar4, uchar4, uchar8);
+uchar8 __ovld __cnfn shuffle2(uchar8, uchar8, uchar8);
+uchar8 __ovld __cnfn shuffle2(uchar16, uchar16, uchar8);
+
+short8 __ovld __cnfn shuffle2(short2, short2, ushort8);
+short8 __ovld __cnfn shuffle2(short4, short4, ushort8);
+short8 __ovld __cnfn shuffle2(short8, short8, ushort8);
+short8 __ovld __cnfn shuffle2(short16, short16, ushort8);
+
+ushort8 __ovld __cnfn shuffle2(ushort2, ushort2, ushort8);
+ushort8 __ovld __cnfn shuffle2(ushort4, ushort4, ushort8);
+ushort8 __ovld __cnfn shuffle2(ushort8, ushort8, ushort8);
+ushort8 __ovld __cnfn shuffle2(ushort16, ushort16, ushort8);
+
+int8 __ovld __cnfn shuffle2(int2, int2, uint8);
+int8 __ovld __cnfn shuffle2(int4, int4, uint8);
+int8 __ovld __cnfn shuffle2(int8, int8, uint8);
+int8 __ovld __cnfn shuffle2(int16, int16, uint8);
+
+uint8 __ovld __cnfn shuffle2(uint2, uint2, uint8);
+uint8 __ovld __cnfn shuffle2(uint4, uint4, uint8);
+uint8 __ovld __cnfn shuffle2(uint8, uint8, uint8);
+uint8 __ovld __cnfn shuffle2(uint16, uint16, uint8);
+
+long8 __ovld __cnfn shuffle2(long2, long2, ulong8);
+long8 __ovld __cnfn shuffle2(long4, long4, ulong8);
+long8 __ovld __cnfn shuffle2(long8, long8, ulong8);
+long8 __ovld __cnfn shuffle2(long16, long16, ulong8);
+
+ulong8 __ovld __cnfn shuffle2(ulong2, ulong2, ulong8);
+ulong8 __ovld __cnfn shuffle2(ulong4, ulong4, ulong8);
+ulong8 __ovld __cnfn shuffle2(ulong8, ulong8, ulong8);
+ulong8 __ovld __cnfn shuffle2(ulong16, ulong16, ulong8);
+
+float8 __ovld __cnfn shuffle2(float2, float2, uint8);
+float8 __ovld __cnfn shuffle2(float4, float4, uint8);
+float8 __ovld __cnfn shuffle2(float8, float8, uint8);
+float8 __ovld __cnfn shuffle2(float16, float16, uint8);
+
+char16 __ovld __cnfn shuffle2(char2, char2, uchar16);
+char16 __ovld __cnfn shuffle2(char4, char4, uchar16);
+char16 __ovld __cnfn shuffle2(char8, char8, uchar16);
+char16 __ovld __cnfn shuffle2(char16, char16, uchar16);
+
+uchar16 __ovld __cnfn shuffle2(uchar2, uchar2, uchar16);
+uchar16 __ovld __cnfn shuffle2(uchar4, uchar4, uchar16);
+uchar16 __ovld __cnfn shuffle2(uchar8, uchar8, uchar16);
+uchar16 __ovld __cnfn shuffle2(uchar16, uchar16, uchar16);
+
+short16 __ovld __cnfn shuffle2(short2, short2, ushort16);
+short16 __ovld __cnfn shuffle2(short4, short4, ushort16);
+short16 __ovld __cnfn shuffle2(short8, short8, ushort16);
+short16 __ovld __cnfn shuffle2(short16, short16, ushort16);
+
+ushort16 __ovld __cnfn shuffle2(ushort2, ushort2, ushort16);
+ushort16 __ovld __cnfn shuffle2(ushort4, ushort4, ushort16);
+ushort16 __ovld __cnfn shuffle2(ushort8, ushort8, ushort16);
+ushort16 __ovld __cnfn shuffle2(ushort16, ushort16, ushort16);
+
+int16 __ovld __cnfn shuffle2(int2, int2, uint16);
+int16 __ovld __cnfn shuffle2(int4, int4, uint16);
+int16 __ovld __cnfn shuffle2(int8, int8, uint16);
+int16 __ovld __cnfn shuffle2(int16, int16, uint16);
+
+uint16 __ovld __cnfn shuffle2(uint2, uint2, uint16);
+uint16 __ovld __cnfn shuffle2(uint4, uint4, uint16);
+uint16 __ovld __cnfn shuffle2(uint8, uint8, uint16);
+uint16 __ovld __cnfn shuffle2(uint16, uint16, uint16);
+
+long16 __ovld __cnfn shuffle2(long2, long2, ulong16);
+long16 __ovld __cnfn shuffle2(long4, long4, ulong16);
+long16 __ovld __cnfn shuffle2(long8, long8, ulong16);
+long16 __ovld __cnfn shuffle2(long16, long16, ulong16);
+
+ulong16 __ovld __cnfn shuffle2(ulong2, ulong2, ulong16);
+ulong16 __ovld __cnfn shuffle2(ulong4, ulong4, ulong16);
+ulong16 __ovld __cnfn shuffle2(ulong8, ulong8, ulong16);
+ulong16 __ovld __cnfn shuffle2(ulong16, ulong16, ulong16);
+
+float16 __ovld __cnfn shuffle2(float2, float2, uint16);
+float16 __ovld __cnfn shuffle2(float4, float4, uint16);
+float16 __ovld __cnfn shuffle2(float8, float8, uint16);
+float16 __ovld __cnfn shuffle2(float16, float16, uint16);
+
+#ifdef cl_khr_fp64
+double2 __ovld __cnfn shuffle2(double2, double2, ulong2);
+double2 __ovld __cnfn shuffle2(double4, double4, ulong2);
+double2 __ovld __cnfn shuffle2(double8, double8, ulong2);
+double2 __ovld __cnfn shuffle2(double16, double16, ulong2);
+
+double4 __ovld __cnfn shuffle2(double2, double2, ulong4);
+double4 __ovld __cnfn shuffle2(double4, double4, ulong4);
+double4 __ovld __cnfn shuffle2(double8, double8, ulong4);
+double4 __ovld __cnfn shuffle2(double16, double16, ulong4);
+
+double8 __ovld __cnfn shuffle2(double2, double2, ulong8);
+double8 __ovld __cnfn shuffle2(double4, double4, ulong8);
+double8 __ovld __cnfn shuffle2(double8, double8, ulong8);
+double8 __ovld __cnfn shuffle2(double16, double16, ulong8);
+
+double16 __ovld __cnfn shuffle2(double2, double2, ulong16);
+double16 __ovld __cnfn shuffle2(double4, double4, ulong16);
+double16 __ovld __cnfn shuffle2(double8, double8, ulong16);
+double16 __ovld __cnfn shuffle2(double16, double16, ulong16);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half2 __ovld __cnfn shuffle2(half2, half2, ushort2);
+half2 __ovld __cnfn shuffle2(half4, half4, ushort2);
+half2 __ovld __cnfn shuffle2(half8, half8, ushort2);
+half2 __ovld __cnfn shuffle2(half16, half16, ushort2);
+
+half4 __ovld __cnfn shuffle2(half2, half2, ushort4);
+half4 __ovld __cnfn shuffle2(half4, half4, ushort4);
+half4 __ovld __cnfn shuffle2(half8, half8, ushort4);
+half4 __ovld __cnfn shuffle2(half16, half16, ushort4);
+
+half8 __ovld __cnfn shuffle2(half2, half2, ushort8);
+half8 __ovld __cnfn shuffle2(half4, half4, ushort8);
+half8 __ovld __cnfn shuffle2(half8, half8, ushort8);
+half8 __ovld __cnfn shuffle2(half16, half16, ushort8);
+
+half16 __ovld __cnfn shuffle2(half2, half2, ushort16);
+half16 __ovld __cnfn shuffle2(half4, half4, ushort16);
+half16 __ovld __cnfn shuffle2(half8, half8, ushort16);
+half16 __ovld __cnfn shuffle2(half16, half16, ushort16);
+#endif //cl_khr_fp16
+
+// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
+
+#ifdef cl_khr_gl_msaa_sharing
+#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
+#endif //cl_khr_gl_msaa_sharing
+
+/**
+ * Use the coordinate (coord.xy) to do an element lookup in
+ * the 2D image object specified by image.
+ *
+ * Use the coordinate (coord.x, coord.y, coord.z) to do
+ * an element lookup in the 3D image object specified
+ * by image. coord.w is ignored.
+ *
+ * Use the coordinate (coord.z) to index into the
+ * 2D image array object specified by image_array
+ * and (coord.x, coord.y) to do an element lookup in
+ * the 2D image object specified by image.
+ *
+ * Use the coordinate (x) to do an element lookup in
+ * the 1D image object specified by image.
+ *
+ * Use the coordinate (coord.y) to index into the
+ * 1D image array object specified by image_array
+ * and (coord.x) to do an element lookup in
+ * the 1D image object specified by image.
+ *
+ * Use the coordinate (cood.xy) and sample to do an
+ * element lookup in the 2D multi-sample image specified
+ * by image.
+ *
+ * Use coord.xy and sample to do an element
+ * lookup in the 2D multi-sample image layer
+ * identified by index coord.z in the 2D multi-sample
+ * image array specified by image.
+ *
+ * For mipmap images, use the mip-level specified by
+ * the Level-of-Detail (lod) or use gradients for LOD
+ * computation.
+ *
+ * read_imagef returns floating-point values in the
+ * range [0.0 ... 1.0] for image objects created with
+ * image_channel_data_type set to one of the predefined
+ * packed formats or CL_UNORM_INT8, or
+ * CL_UNORM_INT16.
+ *
+ * read_imagef returns floating-point values in the
+ * range [-1.0 ... 1.0] for image objects created with
+ * image_channel_data_type set to CL_SNORM_INT8,
+ * or CL_SNORM_INT16.
+ *
+ * read_imagef returns floating-point values for image
+ * objects created with image_channel_data_type set to
+ * CL_HALF_FLOAT or CL_FLOAT.
+ *
+ * read_imagei and read_imageui return
+ * unnormalized signed integer and unsigned integer
+ * values respectively. Each channel will be stored in a
+ * 32-bit integer.
+ *
+ * read_imagei can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_SIGNED_INT8,
+ * CL_SIGNED_INT16 and
+ * CL_SIGNED_INT32.
+ * If the image_channel_data_type is not one of the
+ * above values, the values returned by read_imagei
+ * are undefined.
+ *
+ * read_imageui can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_UNSIGNED_INT8,
+ * CL_UNSIGNED_INT16 and
+ * CL_UNSIGNED_INT32.
+ * If the image_channel_data_type is not one of the
+ * above values, the values returned by read_imageui
+ * are undefined.
+ *
+ * The read_image{i|ui} calls support a nearest filter
+ * only. The filter_mode specified in sampler
+ * must be set to CLK_FILTER_NEAREST; otherwise
+ * the values returned are undefined.
+
+ * The read_image{f|i|ui} calls that take
+ * integer coordinates must use a sampler with
+ * normalized coordinates set to
+ * CLK_NORMALIZED_COORDS_FALSE and
+ * addressing mode set to
+ * CLK_ADDRESS_CLAMP_TO_EDGE,
+ * CLK_ADDRESS_CLAMP or CLK_ADDRESS_NONE;
+ * otherwise the values returned are undefined.
+ *
+ * Values returned by read_imagef for image objects
+ * with image_channel_data_type values not specified
+ * in the description above are undefined.
+ */
+
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, int2);
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2);
+
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, int2);
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2);
+
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, int4);
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4);
+
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, int4);
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, float4);
+
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, int4);
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4);
+
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, int4);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4);
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, int);
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, float);
+
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, int);
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, int);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, float);
+
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, int2);
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, float2);
+
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, int2);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, float2);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, float2);
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2);
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, int2);
+
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4);
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, int4);
+#endif //cl_khr_depth_images
+
+#if defined(cl_khr_gl_msaa_sharing)
+float4 __ovld __purefn read_imagef(read_only image2d_msaa_t, int2, int);
+int4 __ovld __purefn read_imagei(read_only image2d_msaa_t, int2, int);
+uint4 __ovld __purefn read_imageui(read_only image2d_msaa_t, int2, int);
+
+float __ovld __purefn read_imagef(read_only image2d_msaa_depth_t, int2, int);
+
+float4 __ovld __purefn read_imagef(read_only image2d_array_msaa_t, int4, int);
+int4 __ovld __purefn read_imagei(read_only image2d_array_msaa_t, int4, int);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_msaa_t, int4, int);
+
+float __ovld __purefn read_imagef(read_only image2d_array_msaa_depth_t, int4, int);
+#endif //cl_khr_gl_msaa_sharing
+
+// OpenCL Extension v2.0 s9.18 - Mipmaps
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#ifdef cl_khr_mipmap_image
+
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, float, float);
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, float, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, float, float);
+
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, float2, float);
+
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2, float);
+
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2, float);
+#endif // cl_khr_depth_images
+
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4, float);
+
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4, float);
+#endif // cl_khr_depth_images
+
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, float4, float);
+
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, float, float, float);
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, float, float, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, float, float, float);
+
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, float2, float, float);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, float2, float, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, float2, float, float);
+
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2, float2, float2);
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2, float2, float2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2, float2, float2);
+
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2, float2, float2);
+#endif // cl_khr_depth_images
+
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4, float2, float2);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4, float2, float2);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4, float2, float2);
+
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4, float2, float2);
+#endif // cl_khr_depth_images
+
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4, float4, float4);
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4, float4, float4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, float4, float4, float4);
+
+#endif //cl_khr_mipmap_image
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+
+/**
+* Sampler-less Image Access
+*/
+
+float4 __ovld __purefn read_imagef(read_only image1d_t, int);
+int4 __ovld __purefn read_imagei(read_only image1d_t, int);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, int);
+
+float4 __ovld __purefn read_imagef(read_only image1d_buffer_t, int);
+int4 __ovld __purefn read_imagei(read_only image1d_buffer_t, int);
+uint4 __ovld __purefn read_imageui(read_only image1d_buffer_t, int);
+
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, int2);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, int2);
+
+float4 __ovld __purefn read_imagef(read_only image2d_t, int2);
+int4 __ovld __purefn read_imagei(read_only image2d_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, int2);
+
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, int4);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, int4);
+
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_depth_t, int2);
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, int4);
+#endif //cl_khr_depth_images
+
+float4 __ovld __purefn read_imagef(read_only image3d_t, int4);
+int4 __ovld __purefn read_imagei(read_only image3d_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, int4);
+
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+
+// Image read functions returning half4 type
+#ifdef cl_khr_fp16
+half4 __ovld __purefn read_imageh(read_only image1d_t, sampler_t, int);
+half4 __ovld __purefn read_imageh(read_only image1d_t, sampler_t, float);
+half4 __ovld __purefn read_imageh(read_only image2d_t, sampler_t, int2);
+half4 __ovld __purefn read_imageh(read_only image2d_t, sampler_t, float2);
+half4 __ovld __purefn read_imageh(read_only image3d_t, sampler_t, int4);
+half4 __ovld __purefn read_imageh(read_only image3d_t, sampler_t, float4);
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+half4 __ovld __purefn read_imageh(read_only image1d_array_t, sampler_t, int2);
+half4 __ovld __purefn read_imageh(read_only image1d_array_t, sampler_t, float2);
+half4 __ovld __purefn read_imageh(read_only image2d_array_t, sampler_t, int4);
+half4 __ovld __purefn read_imageh(read_only image2d_array_t, sampler_t, float4);
+/**
+ * Sampler-less Image Access
+ */
+half4 __ovld __purefn read_imageh(read_only image1d_t, int);
+half4 __ovld __purefn read_imageh(read_only image2d_t, int2);
+half4 __ovld __purefn read_imageh(read_only image3d_t, int4);
+half4 __ovld __purefn read_imageh(read_only image1d_array_t, int2);
+half4 __ovld __purefn read_imageh(read_only image2d_array_t, int4);
+half4 __ovld __purefn read_imageh(read_only image1d_buffer_t, int);
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+#endif //cl_khr_fp16
+
+// Image read functions for read_write images
+#if defined(__opencl_c_read_write_images)
+float4 __ovld __purefn read_imagef(read_write image1d_t, int);
+int4 __ovld __purefn read_imagei(read_write image1d_t, int);
+uint4 __ovld __purefn read_imageui(read_write image1d_t, int);
+
+float4 __ovld __purefn read_imagef(read_write image1d_buffer_t, int);
+int4 __ovld __purefn read_imagei(read_write image1d_buffer_t, int);
+uint4 __ovld __purefn read_imageui(read_write image1d_buffer_t, int);
+
+float4 __ovld __purefn read_imagef(read_write image1d_array_t, int2);
+int4 __ovld __purefn read_imagei(read_write image1d_array_t, int2);
+uint4 __ovld __purefn read_imageui(read_write image1d_array_t, int2);
+
+float4 __ovld __purefn read_imagef(read_write image2d_t, int2);
+int4 __ovld __purefn read_imagei(read_write image2d_t, int2);
+uint4 __ovld __purefn read_imageui(read_write image2d_t, int2);
+
+float4 __ovld __purefn read_imagef(read_write image2d_array_t, int4);
+int4 __ovld __purefn read_imagei(read_write image2d_array_t, int4);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_t, int4);
+
+float4 __ovld __purefn read_imagef(read_write image3d_t, int4);
+int4 __ovld __purefn read_imagei(read_write image3d_t, int4);
+uint4 __ovld __purefn read_imageui(read_write image3d_t, int4);
+
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_write image2d_depth_t, int2);
+float __ovld __purefn read_imagef(read_write image2d_array_depth_t, int4);
+#endif //cl_khr_depth_images
+
+#if cl_khr_gl_msaa_sharing
+float4 __ovld __purefn read_imagef(read_write image2d_msaa_t, int2, int);
+int4 __ovld __purefn read_imagei(read_write image2d_msaa_t, int2, int);
+uint4 __ovld __purefn read_imageui(read_write image2d_msaa_t, int2, int);
+
+float4 __ovld __purefn read_imagef(read_write image2d_array_msaa_t, int4, int);
+int4 __ovld __purefn read_imagei(read_write image2d_array_msaa_t, int4, int);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_msaa_t, int4, int);
+
+float __ovld __purefn read_imagef(read_write image2d_msaa_depth_t, int2, int);
+float __ovld __purefn read_imagef(read_write image2d_array_msaa_depth_t, int4, int);
+#endif //cl_khr_gl_msaa_sharing
+
+#ifdef cl_khr_mipmap_image
+float4 __ovld __purefn read_imagef(read_write image1d_t, sampler_t, float, float);
+int4 __ovld __purefn read_imagei(read_write image1d_t, sampler_t, float, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_t, sampler_t, float, float);
+
+float4 __ovld __purefn read_imagef(read_write image1d_array_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_write image1d_array_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_array_t, sampler_t, float2, float);
+
+float4 __ovld __purefn read_imagef(read_write image2d_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_write image2d_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_write image2d_t, sampler_t, float2, float);
+
+float __ovld __purefn read_imagef(read_write image2d_depth_t, sampler_t, float2, float);
+
+float4 __ovld __purefn read_imagef(read_write image2d_array_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_write image2d_array_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_t, sampler_t, float4, float);
+
+float __ovld __purefn read_imagef(read_write image2d_array_depth_t, sampler_t, float4, float);
+
+float4 __ovld __purefn read_imagef(read_write image3d_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_write image3d_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_write image3d_t, sampler_t, float4, float);
+
+float4 __ovld __purefn read_imagef(read_write image1d_t, sampler_t, float, float, float);
+int4 __ovld __purefn read_imagei(read_write image1d_t, sampler_t, float, float, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_t, sampler_t, float, float, float);
+
+float4 __ovld __purefn read_imagef(read_write image1d_array_t, sampler_t, float2, float, float);
+int4 __ovld __purefn read_imagei(read_write image1d_array_t, sampler_t, float2, float, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_array_t, sampler_t, float2, float, float);
+
+float4 __ovld __purefn read_imagef(read_write image2d_t, sampler_t, float2, float2, float2);
+int4 __ovld __purefn read_imagei(read_write image2d_t, sampler_t, float2, float2, float2);
+uint4 __ovld __purefn read_imageui(read_write image2d_t, sampler_t, float2, float2, float2);
+
+float __ovld __purefn read_imagef(read_write image2d_depth_t, sampler_t, float2, float2, float2);
+
+float4 __ovld __purefn read_imagef(read_write image2d_array_t, sampler_t, float4, float2, float2);
+int4 __ovld __purefn read_imagei(read_write image2d_array_t, sampler_t, float4, float2, float2);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_t, sampler_t, float4, float2, float2);
+
+float __ovld __purefn read_imagef(read_write image2d_array_depth_t, sampler_t, float4, float2, float2);
+
+float4 __ovld __purefn read_imagef(read_write image3d_t, sampler_t, float4, float4, float4);
+int4 __ovld __purefn read_imagei(read_write image3d_t, sampler_t, float4, float4, float4);
+uint4 __ovld __purefn read_imageui(read_write image3d_t, sampler_t, float4, float4, float4);
+
+#endif //cl_khr_mipmap_image
+
+// Image read functions returning half4 type
+#ifdef cl_khr_fp16
+half4 __ovld __purefn read_imageh(read_write image1d_t, int);
+half4 __ovld __purefn read_imageh(read_write image2d_t, int2);
+half4 __ovld __purefn read_imageh(read_write image3d_t, int4);
+half4 __ovld __purefn read_imageh(read_write image1d_array_t, int2);
+half4 __ovld __purefn read_imageh(read_write image2d_array_t, int4);
+half4 __ovld __purefn read_imageh(read_write image1d_buffer_t, int);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+ * Write color value to location specified by coordinate
+ * (coord.x, coord.y) in the 2D image object specified by image.
+ * (coord.x, coord.y) are considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1, and 0
+ * ... image height - 1.
+
+ * Write color value to location specified by coordinate
+ * (coord.x, coord.y) in the 2D image object specified by index
+ * (coord.z) of the 2D image array object image_array.
+ * (coord.x, coord.y) are considered to be unnormalized
+ * coordinates and must be in the range 0 ... image width
+ * - 1.
+ *
+ * Write color value to location specified by coordinate
+ * (coord) in the 1D image (buffer) object specified by image.
+ * coord is considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1.
+ *
+ * Write color value to location specified by coordinate
+ * (coord.x) in the 1D image object specified by index
+ * (coord.y) of the 1D image array object image_array.
+ * x is considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1.
+ *
+ * Write color value to location specified by coordinate
+ * (coord.x, coord.y, coord.z) in the 3D image object specified by image.
+ * coord.x & coord.y are considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1, and 0
+ * ... image height - 1.
+ *
+ * For mipmap images, use mip-level specified by lod.
+ *
+ * Appropriate data format conversion to the specified
+ * image format is done before writing the color value.
+ *
+ * write_imagef can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the pre-defined packed formats or set to
+ * CL_SNORM_INT8, CL_UNORM_INT8,
+ * CL_SNORM_INT16, CL_UNORM_INT16,
+ * CL_HALF_FLOAT or CL_FLOAT. Appropriate data
+ * format conversion will be done to convert channel
+ * data from a floating-point value to actual data format
+ * in which the channels are stored.
+ *
+ * write_imagei can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_SIGNED_INT8,
+ * CL_SIGNED_INT16 and
+ * CL_SIGNED_INT32.
+ *
+ * write_imageui can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_UNSIGNED_INT8,
+ * CL_UNSIGNED_INT16 and
+ * CL_UNSIGNED_INT32.
+ *
+ * The behavior of write_imagef, write_imagei and
+ * write_imageui for image objects created with
+ * image_channel_data_type values not specified in
+ * the description above or with (x, y) coordinate
+ * values that are not in the range (0 ... image width -1,
+ * 0 ... image height - 1), respectively, is undefined.
+ */
+void __ovld write_imagef(write_only image2d_t, int2, float4);
+void __ovld write_imagei(write_only image2d_t, int2, int4);
+void __ovld write_imageui(write_only image2d_t, int2, uint4);
+
+void __ovld write_imagef(write_only image2d_array_t, int4, float4);
+void __ovld write_imagei(write_only image2d_array_t, int4, int4);
+void __ovld write_imageui(write_only image2d_array_t, int4, uint4);
+
+void __ovld write_imagef(write_only image1d_t, int, float4);
+void __ovld write_imagei(write_only image1d_t, int, int4);
+void __ovld write_imageui(write_only image1d_t, int, uint4);
+
+void __ovld write_imagef(write_only image1d_buffer_t, int, float4);
+void __ovld write_imagei(write_only image1d_buffer_t, int, int4);
+void __ovld write_imageui(write_only image1d_buffer_t, int, uint4);
+
+void __ovld write_imagef(write_only image1d_array_t, int2, float4);
+void __ovld write_imagei(write_only image1d_array_t, int2, int4);
+void __ovld write_imageui(write_only image1d_array_t, int2, uint4);
+
+#ifdef cl_khr_3d_image_writes
+void __ovld write_imagef(write_only image3d_t, int4, float4);
+void __ovld write_imagei(write_only image3d_t, int4, int4);
+void __ovld write_imageui(write_only image3d_t, int4, uint4);
+#endif
+
+#ifdef cl_khr_depth_images
+void __ovld write_imagef(write_only image2d_depth_t, int2, float);
+void __ovld write_imagef(write_only image2d_array_depth_t, int4, float);
+#endif //cl_khr_depth_images
+
+// OpenCL Extension v2.0 s9.18 - Mipmaps
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(cl_khr_mipmap_image_writes)
+void __ovld write_imagef(write_only image1d_t, int, int, float4);
+void __ovld write_imagei(write_only image1d_t, int, int, int4);
+void __ovld write_imageui(write_only image1d_t, int, int, uint4);
+
+void __ovld write_imagef(write_only image1d_array_t, int2, int, float4);
+void __ovld write_imagei(write_only image1d_array_t, int2, int, int4);
+void __ovld write_imageui(write_only image1d_array_t, int2, int, uint4);
+
+void __ovld write_imagef(write_only image2d_t, int2, int, float4);
+void __ovld write_imagei(write_only image2d_t, int2, int, int4);
+void __ovld write_imageui(write_only image2d_t, int2, int, uint4);
+
+void __ovld write_imagef(write_only image2d_array_t, int4, int, float4);
+void __ovld write_imagei(write_only image2d_array_t, int4, int, int4);
+void __ovld write_imageui(write_only image2d_array_t, int4, int, uint4);
+
+void __ovld write_imagef(write_only image2d_depth_t, int2, int, float);
+void __ovld write_imagef(write_only image2d_array_depth_t, int4, int, float);
+
+#ifdef cl_khr_3d_image_writes
+void __ovld write_imagef(write_only image3d_t, int4, int, float4);
+void __ovld write_imagei(write_only image3d_t, int4, int, int4);
+void __ovld write_imageui(write_only image3d_t, int4, int, uint4);
+#endif //cl_khr_3d_image_writes
+
+#endif //defined(cl_khr_mipmap_image_writes)
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+// Image write functions for half4 type
+#ifdef cl_khr_fp16
+void __ovld write_imageh(write_only image1d_t, int, half4);
+void __ovld write_imageh(write_only image2d_t, int2, half4);
+#ifdef cl_khr_3d_image_writes
+void __ovld write_imageh(write_only image3d_t, int4, half4);
+#endif
+void __ovld write_imageh(write_only image1d_array_t, int2, half4);
+void __ovld write_imageh(write_only image2d_array_t, int4, half4);
+void __ovld write_imageh(write_only image1d_buffer_t, int, half4);
+#endif //cl_khr_fp16
+
+// Image write functions for read_write images
+#if defined(__opencl_c_read_write_images)
+void __ovld write_imagef(read_write image2d_t, int2, float4);
+void __ovld write_imagei(read_write image2d_t, int2, int4);
+void __ovld write_imageui(read_write image2d_t, int2, uint4);
+
+void __ovld write_imagef(read_write image2d_array_t, int4, float4);
+void __ovld write_imagei(read_write image2d_array_t, int4, int4);
+void __ovld write_imageui(read_write image2d_array_t, int4, uint4);
+
+void __ovld write_imagef(read_write image1d_t, int, float4);
+void __ovld write_imagei(read_write image1d_t, int, int4);
+void __ovld write_imageui(read_write image1d_t, int, uint4);
+
+void __ovld write_imagef(read_write image1d_buffer_t, int, float4);
+void __ovld write_imagei(read_write image1d_buffer_t, int, int4);
+void __ovld write_imageui(read_write image1d_buffer_t, int, uint4);
+
+void __ovld write_imagef(read_write image1d_array_t, int2, float4);
+void __ovld write_imagei(read_write image1d_array_t, int2, int4);
+void __ovld write_imageui(read_write image1d_array_t, int2, uint4);
+
+#ifdef cl_khr_3d_image_writes
+void __ovld write_imagef(read_write image3d_t, int4, float4);
+void __ovld write_imagei(read_write image3d_t, int4, int4);
+void __ovld write_imageui(read_write image3d_t, int4, uint4);
+#endif
+
+#ifdef cl_khr_depth_images
+void __ovld write_imagef(read_write image2d_depth_t, int2, float);
+void __ovld write_imagef(read_write image2d_array_depth_t, int4, float);
+#endif //cl_khr_depth_images
+
+#if defined(cl_khr_mipmap_image_writes)
+void __ovld write_imagef(read_write image1d_t, int, int, float4);
+void __ovld write_imagei(read_write image1d_t, int, int, int4);
+void __ovld write_imageui(read_write image1d_t, int, int, uint4);
+
+void __ovld write_imagef(read_write image1d_array_t, int2, int, float4);
+void __ovld write_imagei(read_write image1d_array_t, int2, int, int4);
+void __ovld write_imageui(read_write image1d_array_t, int2, int, uint4);
+
+void __ovld write_imagef(read_write image2d_t, int2, int, float4);
+void __ovld write_imagei(read_write image2d_t, int2, int, int4);
+void __ovld write_imageui(read_write image2d_t, int2, int, uint4);
+
+void __ovld write_imagef(read_write image2d_array_t, int4, int, float4);
+void __ovld write_imagei(read_write image2d_array_t, int4, int, int4);
+void __ovld write_imageui(read_write image2d_array_t, int4, int, uint4);
+
+void __ovld write_imagef(read_write image2d_depth_t, int2, int, float);
+void __ovld write_imagef(read_write image2d_array_depth_t, int4, int, float);
+
+#ifdef cl_khr_3d_image_writes
+void __ovld write_imagef(read_write image3d_t, int4, int, float4);
+void __ovld write_imagei(read_write image3d_t, int4, int, int4);
+void __ovld write_imageui(read_write image3d_t, int4, int, uint4);
+#endif //cl_khr_3d_image_writes
+
+#endif //cl_khr_mipmap_image_writes
+
+// Image write functions for half4 type
+#ifdef cl_khr_fp16
+void __ovld write_imageh(read_write image1d_t, int, half4);
+void __ovld write_imageh(read_write image2d_t, int2, half4);
+#ifdef cl_khr_3d_image_writes
+void __ovld write_imageh(read_write image3d_t, int4, half4);
+#endif
+void __ovld write_imageh(read_write image1d_array_t, int2, half4);
+void __ovld write_imageh(read_write image2d_array_t, int4, half4);
+void __ovld write_imageh(read_write image1d_buffer_t, int, half4);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_read_write_images)
+
+// Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have
+// access qualifier, which by default assume read_only access qualifier. Image query builtin
+// functions with write_only image argument should also be declared.
+
+/**
+ * Return the image width in pixels.
+ *
+  */
+int __ovld __cnfn get_image_width(read_only image1d_t);
+int __ovld __cnfn get_image_width(read_only image1d_buffer_t);
+int __ovld __cnfn get_image_width(read_only image2d_t);
+int __ovld __cnfn get_image_width(read_only image3d_t);
+int __ovld __cnfn get_image_width(read_only image1d_array_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_width(read_only image2d_depth_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_width(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_width(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_width(write_only image1d_t);
+int __ovld __cnfn get_image_width(write_only image1d_buffer_t);
+int __ovld __cnfn get_image_width(write_only image2d_t);
+#ifdef cl_khr_3d_image_writes
+int __ovld __cnfn get_image_width(write_only image3d_t);
+#endif
+int __ovld __cnfn get_image_width(write_only image1d_array_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_width(write_only image2d_depth_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_width(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_width(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_width(read_write image1d_t);
+int __ovld __cnfn get_image_width(read_write image1d_buffer_t);
+int __ovld __cnfn get_image_width(read_write image2d_t);
+int __ovld __cnfn get_image_width(read_write image3d_t);
+int __ovld __cnfn get_image_width(read_write image1d_array_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_width(read_write image2d_depth_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_width(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+ * Return the image height in pixels.
+ */
+int __ovld __cnfn get_image_height(read_only image2d_t);
+int __ovld __cnfn get_image_height(read_only image3d_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_height(read_only image2d_depth_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_height(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_height(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_height(write_only image2d_t);
+#ifdef cl_khr_3d_image_writes
+int __ovld __cnfn get_image_height(write_only image3d_t);
+#endif
+int __ovld __cnfn get_image_height(write_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_height(write_only image2d_depth_t);
+int __ovld __cnfn get_image_height(write_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_height(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_height(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_height(read_write image2d_t);
+int __ovld __cnfn get_image_height(read_write image3d_t);
+int __ovld __cnfn get_image_height(read_write image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_height(read_write image2d_depth_t);
+int __ovld __cnfn get_image_height(read_write image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_height(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+ * Return the image depth in pixels.
+ */
+int __ovld __cnfn get_image_depth(read_only image3d_t);
+
+#ifdef cl_khr_3d_image_writes
+int __ovld __cnfn get_image_depth(write_only image3d_t);
+#endif
+
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_depth(read_write image3d_t);
+#endif //defined(__opencl_c_read_write_images)
+
+// OpenCL Extension v2.0 s9.18 - Mipmaps
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#ifdef cl_khr_mipmap_image
+/**
+ * Return the image miplevels.
+ */
+
+int __ovld get_image_num_mip_levels(read_only image1d_t);
+int __ovld get_image_num_mip_levels(read_only image2d_t);
+int __ovld get_image_num_mip_levels(read_only image3d_t);
+
+int __ovld get_image_num_mip_levels(write_only image1d_t);
+int __ovld get_image_num_mip_levels(write_only image2d_t);
+#ifdef cl_khr_3d_image_writes
+int __ovld get_image_num_mip_levels(write_only image3d_t);
+#endif
+
+#if defined(__opencl_c_read_write_images)
+int __ovld get_image_num_mip_levels(read_write image1d_t);
+int __ovld get_image_num_mip_levels(read_write image2d_t);
+int __ovld get_image_num_mip_levels(read_write image3d_t);
+#endif //defined(__opencl_c_read_write_images)
+
+int __ovld get_image_num_mip_levels(read_only image1d_array_t);
+int __ovld get_image_num_mip_levels(read_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t);
+int __ovld get_image_num_mip_levels(read_only image2d_depth_t);
+#endif // cl_khr_depth_images
+
+int __ovld get_image_num_mip_levels(write_only image1d_array_t);
+int __ovld get_image_num_mip_levels(write_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t);
+int __ovld get_image_num_mip_levels(write_only image2d_depth_t);
+#endif // cl_khr_depth_images
+
+#if defined(__opencl_c_read_write_images)
+int __ovld get_image_num_mip_levels(read_write image1d_array_t);
+int __ovld get_image_num_mip_levels(read_write image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t);
+int __ovld get_image_num_mip_levels(read_write image2d_depth_t);
+#endif // cl_khr_depth_images
+#endif //defined(__opencl_c_read_write_images)
+
+#endif //cl_khr_mipmap_image
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+/**
+ * Return the channel data type. Valid values are:
+ * CLK_SNORM_INT8
+ * CLK_SNORM_INT16
+ * CLK_UNORM_INT8
+ * CLK_UNORM_INT16
+ * CLK_UNORM_SHORT_565
+ * CLK_UNORM_SHORT_555
+ * CLK_UNORM_SHORT_101010
+ * CLK_SIGNED_INT8
+ * CLK_SIGNED_INT16
+ * CLK_SIGNED_INT32
+ * CLK_UNSIGNED_INT8
+ * CLK_UNSIGNED_INT16
+ * CLK_UNSIGNED_INT32
+ * CLK_HALF_FLOAT
+ * CLK_FLOAT
+ */
+
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image3d_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_array_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_t);
+#ifdef cl_khr_3d_image_writes
+int __ovld __cnfn get_image_channel_data_type(write_only image3d_t);
+#endif
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_array_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image3d_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+ * Return the image channel order. Valid values are:
+ * CLK_A
+ * CLK_R
+ * CLK_Rx
+ * CLK_RG
+ * CLK_RGx
+ * CLK_RA
+ * CLK_RGB
+ * CLK_RGBx
+ * CLK_RGBA
+ * CLK_ARGB
+ * CLK_BGRA
+ * CLK_INTENSITY
+ * CLK_LUMINANCE
+ */
+
+int __ovld __cnfn get_image_channel_order(read_only image1d_t);
+int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_t);
+int __ovld __cnfn get_image_channel_order(read_only image3d_t);
+int __ovld __cnfn get_image_channel_order(read_only image1d_array_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_order(read_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_channel_order(write_only image1d_t);
+int __ovld __cnfn get_image_channel_order(write_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_t);
+#ifdef cl_khr_3d_image_writes
+int __ovld __cnfn get_image_channel_order(write_only image3d_t);
+#endif
+int __ovld __cnfn get_image_channel_order(write_only image1d_array_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_order(write_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_channel_order(read_write image1d_t);
+int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_t);
+int __ovld __cnfn get_image_channel_order(read_write image3d_t);
+int __ovld __cnfn get_image_channel_order(read_write image1d_array_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_order(read_write image2d_depth_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+ * Return the 2D image width and height as an int2
+ * type. The width is returned in the x component, and
+ * the height in the y component.
+ */
+int2 __ovld __cnfn get_image_dim(read_only image2d_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_depth_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_depth_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+int2 __ovld __cnfn get_image_dim(write_only image2d_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_depth_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_depth_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+#if defined(__opencl_c_read_write_images)
+int2 __ovld __cnfn get_image_dim(read_write image2d_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_t);
+#ifdef cl_khr_depth_images
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_depth_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+ * Return the 3D image width, height, and depth as an
+ * int4 type. The width is returned in the x
+ * component, height in the y component, depth in the z
+ * component and the w component is 0.
+ */
+int4 __ovld __cnfn get_image_dim(read_only image3d_t);
+#ifdef cl_khr_3d_image_writes
+int4 __ovld __cnfn get_image_dim(write_only image3d_t);
+#endif
+#if defined(__opencl_c_read_write_images)
+int4 __ovld __cnfn get_image_dim(read_write image3d_t);
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+ * Return the image array size.
+ */
+
+size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_t);
+#ifdef cl_khr_depth_images
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_t);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+size_t __ovld __cnfn get_image_array_size(write_only image1d_array_t);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_t);
+#ifdef cl_khr_depth_images
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+
+#if defined(__opencl_c_read_write_images)
+size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t);
+#ifdef cl_khr_depth_images
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t);
+#endif //cl_khr_gl_msaa_sharing
+#endif //defined(__opencl_c_read_write_images)
+
+/**
+* Return the number of samples associated with image
+*/
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_num_samples(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_num_samples(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_only image2d_array_msaa_depth_t);
+
+int __ovld __cnfn get_image_num_samples(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_num_samples(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_num_samples(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_num_samples(write_only image2d_array_msaa_depth_t);
+
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_num_samples(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_num_samples(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_write image2d_array_msaa_depth_t);
+#endif //defined(__opencl_c_read_write_images)
+#endif
+
+// OpenCL v2.0 s6.13.15 - Work-group Functions
+
+#if defined(__opencl_c_work_group_collective_functions)
+int __ovld __conv work_group_all(int predicate);
+int __ovld __conv work_group_any(int predicate);
+
+#ifdef cl_khr_fp16
+half __ovld __conv work_group_broadcast(half, size_t local_id);
+half __ovld __conv work_group_broadcast(half, size_t, size_t);
+half __ovld __conv work_group_broadcast(half, size_t, size_t, size_t);
+#endif
+int __ovld __conv work_group_broadcast(int, size_t local_id);
+int __ovld __conv work_group_broadcast(int, size_t, size_t);
+int __ovld __conv work_group_broadcast(int, size_t, size_t, size_t);
+uint __ovld __conv work_group_broadcast(uint, size_t local_id);
+uint __ovld __conv work_group_broadcast(uint, size_t, size_t);
+uint __ovld __conv work_group_broadcast(uint, size_t, size_t, size_t);
+long __ovld __conv work_group_broadcast(long, size_t local_id);
+long __ovld __conv work_group_broadcast(long, size_t, size_t);
+long __ovld __conv work_group_broadcast(long, size_t, size_t, size_t);
+ulong __ovld __conv work_group_broadcast(ulong, size_t local_id);
+ulong __ovld __conv work_group_broadcast(ulong, size_t, size_t);
+ulong __ovld __conv work_group_broadcast(ulong, size_t, size_t, size_t);
+float __ovld __conv work_group_broadcast(float, size_t local_id);
+float __ovld __conv work_group_broadcast(float, size_t, size_t);
+float __ovld __conv work_group_broadcast(float, size_t, size_t, size_t);
+#ifdef cl_khr_fp64
+double __ovld __conv work_group_broadcast(double, size_t local_id);
+double __ovld __conv work_group_broadcast(double, size_t, size_t);
+double __ovld __conv work_group_broadcast(double, size_t, size_t, size_t);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half __ovld __conv work_group_reduce_add(half);
+half __ovld __conv work_group_reduce_min(half);
+half __ovld __conv work_group_reduce_max(half);
+half __ovld __conv work_group_scan_exclusive_add(half);
+half __ovld __conv work_group_scan_exclusive_min(half);
+half __ovld __conv work_group_scan_exclusive_max(half);
+half __ovld __conv work_group_scan_inclusive_add(half);
+half __ovld __conv work_group_scan_inclusive_min(half);
+half __ovld __conv work_group_scan_inclusive_max(half);
+#endif
+int __ovld __conv work_group_reduce_add(int);
+int __ovld __conv work_group_reduce_min(int);
+int __ovld __conv work_group_reduce_max(int);
+int __ovld __conv work_group_scan_exclusive_add(int);
+int __ovld __conv work_group_scan_exclusive_min(int);
+int __ovld __conv work_group_scan_exclusive_max(int);
+int __ovld __conv work_group_scan_inclusive_add(int);
+int __ovld __conv work_group_scan_inclusive_min(int);
+int __ovld __conv work_group_scan_inclusive_max(int);
+uint __ovld __conv work_group_reduce_add(uint);
+uint __ovld __conv work_group_reduce_min(uint);
+uint __ovld __conv work_group_reduce_max(uint);
+uint __ovld __conv work_group_scan_exclusive_add(uint);
+uint __ovld __conv work_group_scan_exclusive_min(uint);
+uint __ovld __conv work_group_scan_exclusive_max(uint);
+uint __ovld __conv work_group_scan_inclusive_add(uint);
+uint __ovld __conv work_group_scan_inclusive_min(uint);
+uint __ovld __conv work_group_scan_inclusive_max(uint);
+long __ovld __conv work_group_reduce_add(long);
+long __ovld __conv work_group_reduce_min(long);
+long __ovld __conv work_group_reduce_max(long);
+long __ovld __conv work_group_scan_exclusive_add(long);
+long __ovld __conv work_group_scan_exclusive_min(long);
+long __ovld __conv work_group_scan_exclusive_max(long);
+long __ovld __conv work_group_scan_inclusive_add(long);
+long __ovld __conv work_group_scan_inclusive_min(long);
+long __ovld __conv work_group_scan_inclusive_max(long);
+ulong __ovld __conv work_group_reduce_add(ulong);
+ulong __ovld __conv work_group_reduce_min(ulong);
+ulong __ovld __conv work_group_reduce_max(ulong);
+ulong __ovld __conv work_group_scan_exclusive_add(ulong);
+ulong __ovld __conv work_group_scan_exclusive_min(ulong);
+ulong __ovld __conv work_group_scan_exclusive_max(ulong);
+ulong __ovld __conv work_group_scan_inclusive_add(ulong);
+ulong __ovld __conv work_group_scan_inclusive_min(ulong);
+ulong __ovld __conv work_group_scan_inclusive_max(ulong);
+float __ovld __conv work_group_reduce_add(float);
+float __ovld __conv work_group_reduce_min(float);
+float __ovld __conv work_group_reduce_max(float);
+float __ovld __conv work_group_scan_exclusive_add(float);
+float __ovld __conv work_group_scan_exclusive_min(float);
+float __ovld __conv work_group_scan_exclusive_max(float);
+float __ovld __conv work_group_scan_inclusive_add(float);
+float __ovld __conv work_group_scan_inclusive_min(float);
+float __ovld __conv work_group_scan_inclusive_max(float);
+#ifdef cl_khr_fp64
+double __ovld __conv work_group_reduce_add(double);
+double __ovld __conv work_group_reduce_min(double);
+double __ovld __conv work_group_reduce_max(double);
+double __ovld __conv work_group_scan_exclusive_add(double);
+double __ovld __conv work_group_scan_exclusive_min(double);
+double __ovld __conv work_group_scan_exclusive_max(double);
+double __ovld __conv work_group_scan_inclusive_add(double);
+double __ovld __conv work_group_scan_inclusive_min(double);
+double __ovld __conv work_group_scan_inclusive_max(double);
+#endif //cl_khr_fp64
+
+#endif //defined(__opencl_c_work_group_collective_functions)
+
+// OpenCL v2.0 s6.13.16 - Pipe Functions
+#if defined(__opencl_c_pipes)
+bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);
+#endif //defined(__opencl_c_pipes)
+
+
+// OpenCL v2.0 s6.13.17 - Enqueue Kernels
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+#ifdef __opencl_c_device_enqueue
+ndrange_t __ovld ndrange_1D(size_t);
+ndrange_t __ovld ndrange_1D(size_t, size_t);
+ndrange_t __ovld ndrange_1D(size_t, size_t, size_t);
+
+ndrange_t __ovld ndrange_2D(const size_t[2]);
+ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2]);
+ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2], const size_t[2]);
+
+ndrange_t __ovld ndrange_3D(const size_t[3]);
+ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3]);
+ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3], const size_t[3]);
+
+int __ovld enqueue_marker(queue_t, uint, const clk_event_t*, clk_event_t*);
+
+void __ovld retain_event(clk_event_t);
+
+void __ovld release_event(clk_event_t);
+
+clk_event_t __ovld create_user_event(void);
+
+void __ovld set_user_event_status(clk_event_t e, int state);
+
+bool __ovld is_valid_event (clk_event_t event);
+
+void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void*);
+
+queue_t __ovld get_default_queue(void);
+#endif //__opencl_c_device_enqueue
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+// OpenCL Extension v2.0 s9.17 - Sub-groups
+
+#if defined(__opencl_subgroup_builtins)
+// Shared Sub Group Functions
+uint    __ovld get_sub_group_size(void);
+uint    __ovld get_max_sub_group_size(void);
+uint    __ovld get_num_sub_groups(void);
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+uint    __ovld get_enqueued_num_sub_groups(void);
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+uint    __ovld get_sub_group_id(void);
+uint    __ovld get_sub_group_local_id(void);
+
+void    __ovld __conv sub_group_barrier(cl_mem_fence_flags);
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+void    __ovld __conv sub_group_barrier(cl_mem_fence_flags, memory_scope);
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+
+int     __ovld __conv sub_group_all(int predicate);
+int     __ovld __conv sub_group_any(int predicate);
+
+int     __ovld __conv sub_group_broadcast(int  , uint sub_group_local_id);
+uint    __ovld __conv sub_group_broadcast(uint , uint sub_group_local_id);
+long    __ovld __conv sub_group_broadcast(long , uint sub_group_local_id);
+ulong   __ovld __conv sub_group_broadcast(ulong, uint sub_group_local_id);
+float   __ovld __conv sub_group_broadcast(float, uint sub_group_local_id);
+
+int     __ovld __conv sub_group_reduce_add(int  );
+uint    __ovld __conv sub_group_reduce_add(uint );
+long    __ovld __conv sub_group_reduce_add(long );
+ulong   __ovld __conv sub_group_reduce_add(ulong);
+float   __ovld __conv sub_group_reduce_add(float);
+int     __ovld __conv sub_group_reduce_min(int  );
+uint    __ovld __conv sub_group_reduce_min(uint );
+long    __ovld __conv sub_group_reduce_min(long );
+ulong   __ovld __conv sub_group_reduce_min(ulong);
+float   __ovld __conv sub_group_reduce_min(float);
+int     __ovld __conv sub_group_reduce_max(int  );
+uint    __ovld __conv sub_group_reduce_max(uint );
+long    __ovld __conv sub_group_reduce_max(long );
+ulong   __ovld __conv sub_group_reduce_max(ulong);
+float   __ovld __conv sub_group_reduce_max(float);
+
+int     __ovld __conv sub_group_scan_exclusive_add(int  );
+uint    __ovld __conv sub_group_scan_exclusive_add(uint );
+long    __ovld __conv sub_group_scan_exclusive_add(long );
+ulong   __ovld __conv sub_group_scan_exclusive_add(ulong);
+float   __ovld __conv sub_group_scan_exclusive_add(float);
+int     __ovld __conv sub_group_scan_exclusive_min(int  );
+uint    __ovld __conv sub_group_scan_exclusive_min(uint );
+long    __ovld __conv sub_group_scan_exclusive_min(long );
+ulong   __ovld __conv sub_group_scan_exclusive_min(ulong);
+float   __ovld __conv sub_group_scan_exclusive_min(float);
+int     __ovld __conv sub_group_scan_exclusive_max(int  );
+uint    __ovld __conv sub_group_scan_exclusive_max(uint );
+long    __ovld __conv sub_group_scan_exclusive_max(long );
+ulong   __ovld __conv sub_group_scan_exclusive_max(ulong);
+float   __ovld __conv sub_group_scan_exclusive_max(float);
+
+int     __ovld __conv sub_group_scan_inclusive_add(int  );
+uint    __ovld __conv sub_group_scan_inclusive_add(uint );
+long    __ovld __conv sub_group_scan_inclusive_add(long );
+ulong   __ovld __conv sub_group_scan_inclusive_add(ulong);
+float   __ovld __conv sub_group_scan_inclusive_add(float);
+int     __ovld __conv sub_group_scan_inclusive_min(int  );
+uint    __ovld __conv sub_group_scan_inclusive_min(uint );
+long    __ovld __conv sub_group_scan_inclusive_min(long );
+ulong   __ovld __conv sub_group_scan_inclusive_min(ulong);
+float   __ovld __conv sub_group_scan_inclusive_min(float);
+int     __ovld __conv sub_group_scan_inclusive_max(int  );
+uint    __ovld __conv sub_group_scan_inclusive_max(uint );
+long    __ovld __conv sub_group_scan_inclusive_max(long );
+ulong   __ovld __conv sub_group_scan_inclusive_max(ulong);
+float   __ovld __conv sub_group_scan_inclusive_max(float);
+
+#ifdef cl_khr_fp16
+half    __ovld __conv sub_group_broadcast(half, uint sub_group_local_id);
+half    __ovld __conv sub_group_reduce_add(half);
+half    __ovld __conv sub_group_reduce_min(half);
+half    __ovld __conv sub_group_reduce_max(half);
+half    __ovld __conv sub_group_scan_exclusive_add(half);
+half    __ovld __conv sub_group_scan_exclusive_min(half);
+half    __ovld __conv sub_group_scan_exclusive_max(half);
+half    __ovld __conv sub_group_scan_inclusive_add(half);
+half    __ovld __conv sub_group_scan_inclusive_min(half);
+half    __ovld __conv sub_group_scan_inclusive_max(half);
+#endif //cl_khr_fp16
+
+#ifdef cl_khr_fp64
+double  __ovld __conv sub_group_broadcast(double, uint sub_group_local_id);
+double  __ovld __conv sub_group_reduce_add(double);
+double  __ovld __conv sub_group_reduce_min(double);
+double  __ovld __conv sub_group_reduce_max(double);
+double  __ovld __conv sub_group_scan_exclusive_add(double);
+double  __ovld __conv sub_group_scan_exclusive_min(double);
+double  __ovld __conv sub_group_scan_exclusive_max(double);
+double  __ovld __conv sub_group_scan_inclusive_add(double);
+double  __ovld __conv sub_group_scan_inclusive_min(double);
+double  __ovld __conv sub_group_scan_inclusive_max(double);
+#endif //cl_khr_fp64
+
+#endif // __opencl_subgroup_builtins
+
+#if defined(cl_khr_subgroup_extended_types)
+char __ovld __conv sub_group_broadcast( char value, uint index );
+char2 __ovld __conv sub_group_broadcast( char2 value, uint index );
+char3 __ovld __conv sub_group_broadcast( char3 value, uint index );
+char4 __ovld __conv sub_group_broadcast( char4 value, uint index );
+char8 __ovld __conv sub_group_broadcast( char8 value, uint index );
+char16 __ovld __conv sub_group_broadcast( char16 value, uint index );
+
+uchar __ovld __conv sub_group_broadcast( uchar value, uint index );
+uchar2 __ovld __conv sub_group_broadcast( uchar2 value, uint index );
+uchar3 __ovld __conv sub_group_broadcast( uchar3 value, uint index );
+uchar4 __ovld __conv sub_group_broadcast( uchar4 value, uint index );
+uchar8 __ovld __conv sub_group_broadcast( uchar8 value, uint index );
+uchar16 __ovld __conv sub_group_broadcast( uchar16 value, uint index );
+
+short __ovld __conv sub_group_broadcast( short value, uint index );
+short2 __ovld __conv sub_group_broadcast( short2 value, uint index );
+short3 __ovld __conv sub_group_broadcast( short3 value, uint index );
+short4 __ovld __conv sub_group_broadcast( short4 value, uint index );
+short8 __ovld __conv sub_group_broadcast( short8 value, uint index );
+short16 __ovld __conv sub_group_broadcast( short16 value, uint index );
+
+ushort __ovld __conv sub_group_broadcast( ushort value, uint index );
+ushort2 __ovld __conv sub_group_broadcast( ushort2 value, uint index );
+ushort3 __ovld __conv sub_group_broadcast( ushort3 value, uint index );
+ushort4 __ovld __conv sub_group_broadcast( ushort4 value, uint index );
+ushort8 __ovld __conv sub_group_broadcast( ushort8 value, uint index );
+ushort16 __ovld __conv sub_group_broadcast( ushort16 value, uint index );
+
+// scalar int broadcast is part of cl_khr_subgroups
+int2 __ovld __conv sub_group_broadcast( int2 value, uint index );
+int3 __ovld __conv sub_group_broadcast( int3 value, uint index );
+int4 __ovld __conv sub_group_broadcast( int4 value, uint index );
+int8 __ovld __conv sub_group_broadcast( int8 value, uint index );
+int16 __ovld __conv sub_group_broadcast( int16 value, uint index );
+
+// scalar uint broadcast is part of cl_khr_subgroups
+uint2 __ovld __conv sub_group_broadcast( uint2 value, uint index );
+uint3 __ovld __conv sub_group_broadcast( uint3 value, uint index );
+uint4 __ovld __conv sub_group_broadcast( uint4 value, uint index );
+uint8 __ovld __conv sub_group_broadcast( uint8 value, uint index );
+uint16 __ovld __conv sub_group_broadcast( uint16 value, uint index );
+
+// scalar long broadcast is part of cl_khr_subgroups
+long2 __ovld __conv sub_group_broadcast( long2 value, uint index );
+long3 __ovld __conv sub_group_broadcast( long3 value, uint index );
+long4 __ovld __conv sub_group_broadcast( long4 value, uint index );
+long8 __ovld __conv sub_group_broadcast( long8 value, uint index );
+long16 __ovld __conv sub_group_broadcast( long16 value, uint index );
+
+// scalar ulong broadcast is part of cl_khr_subgroups
+ulong2 __ovld __conv sub_group_broadcast( ulong2 value, uint index );
+ulong3 __ovld __conv sub_group_broadcast( ulong3 value, uint index );
+ulong4 __ovld __conv sub_group_broadcast( ulong4 value, uint index );
+ulong8 __ovld __conv sub_group_broadcast( ulong8 value, uint index );
+ulong16 __ovld __conv sub_group_broadcast( ulong16 value, uint index );
+
+// scalar float broadcast is part of cl_khr_subgroups
+float2 __ovld __conv sub_group_broadcast( float2 value, uint index );
+float3 __ovld __conv sub_group_broadcast( float3 value, uint index );
+float4 __ovld __conv sub_group_broadcast( float4 value, uint index );
+float8 __ovld __conv sub_group_broadcast( float8 value, uint index );
+float16 __ovld __conv sub_group_broadcast( float16 value, uint index );
+
+char __ovld __conv sub_group_reduce_add( char value );
+uchar __ovld __conv sub_group_reduce_add( uchar value );
+short __ovld __conv sub_group_reduce_add( short value );
+ushort __ovld __conv sub_group_reduce_add( ushort value );
+
+char __ovld __conv sub_group_reduce_min( char value );
+uchar __ovld __conv sub_group_reduce_min( uchar value );
+short __ovld __conv sub_group_reduce_min( short value );
+ushort __ovld __conv sub_group_reduce_min( ushort value );
+
+char __ovld __conv sub_group_reduce_max( char value );
+uchar __ovld __conv sub_group_reduce_max( uchar value );
+short __ovld __conv sub_group_reduce_max( short value );
+ushort __ovld __conv sub_group_reduce_max( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_add( char value );
+uchar __ovld __conv sub_group_scan_inclusive_add( uchar value );
+short __ovld __conv sub_group_scan_inclusive_add( short value );
+ushort __ovld __conv sub_group_scan_inclusive_add( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_min( char value );
+uchar __ovld __conv sub_group_scan_inclusive_min( uchar value );
+short __ovld __conv sub_group_scan_inclusive_min( short value );
+ushort __ovld __conv sub_group_scan_inclusive_min( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_max( char value );
+uchar __ovld __conv sub_group_scan_inclusive_max( uchar value );
+short __ovld __conv sub_group_scan_inclusive_max( short value );
+ushort __ovld __conv sub_group_scan_inclusive_max( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_add( char value );
+uchar __ovld __conv sub_group_scan_exclusive_add( uchar value );
+short __ovld __conv sub_group_scan_exclusive_add( short value );
+ushort __ovld __conv sub_group_scan_exclusive_add( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_min( char value );
+uchar __ovld __conv sub_group_scan_exclusive_min( uchar value );
+short __ovld __conv sub_group_scan_exclusive_min( short value );
+ushort __ovld __conv sub_group_scan_exclusive_min( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_max( char value );
+uchar __ovld __conv sub_group_scan_exclusive_max( uchar value );
+short __ovld __conv sub_group_scan_exclusive_max( short value );
+ushort __ovld __conv sub_group_scan_exclusive_max( ushort value );
+
+#if defined(cl_khr_fp16)
+// scalar half broadcast is part of cl_khr_subgroups
+half2 __ovld __conv sub_group_broadcast( half2 value, uint index );
+half3 __ovld __conv sub_group_broadcast( half3 value, uint index );
+half4 __ovld __conv sub_group_broadcast( half4 value, uint index );
+half8 __ovld __conv sub_group_broadcast( half8 value, uint index );
+half16 __ovld __conv sub_group_broadcast( half16 value, uint index );
+#endif  // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+// scalar double broadcast is part of cl_khr_subgroups
+double2 __ovld __conv sub_group_broadcast( double2 value, uint index );
+double3 __ovld __conv sub_group_broadcast( double3 value, uint index );
+double4 __ovld __conv sub_group_broadcast( double4 value, uint index );
+double8 __ovld __conv sub_group_broadcast( double8 value, uint index );
+double16 __ovld __conv sub_group_broadcast( double16 value, uint index );
+#endif  // cl_khr_fp64
+
+#endif  // cl_khr_subgroup_extended_types
+
+#if defined(cl_khr_subgroup_non_uniform_vote)
+int     __ovld sub_group_elect(void);
+int     __ovld sub_group_non_uniform_all( int predicate );
+int     __ovld sub_group_non_uniform_any( int predicate );
+
+int     __ovld sub_group_non_uniform_all_equal( char value );
+int     __ovld sub_group_non_uniform_all_equal( uchar value );
+int     __ovld sub_group_non_uniform_all_equal( short value );
+int     __ovld sub_group_non_uniform_all_equal( ushort value );
+int     __ovld sub_group_non_uniform_all_equal( int value );
+int     __ovld sub_group_non_uniform_all_equal( uint value );
+int     __ovld sub_group_non_uniform_all_equal( long value );
+int     __ovld sub_group_non_uniform_all_equal( ulong value );
+int     __ovld sub_group_non_uniform_all_equal( float value );
+
+#if defined(cl_khr_fp16)
+int     __ovld sub_group_non_uniform_all_equal( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+int     __ovld sub_group_non_uniform_all_equal( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_non_uniform_vote
+
+#if defined(cl_khr_subgroup_ballot)
+char    __ovld sub_group_non_uniform_broadcast( char value, uint index );
+char2   __ovld sub_group_non_uniform_broadcast( char2 value, uint index );
+char3   __ovld sub_group_non_uniform_broadcast( char3 value, uint index );
+char4   __ovld sub_group_non_uniform_broadcast( char4 value, uint index );
+char8   __ovld sub_group_non_uniform_broadcast( char8 value, uint index );
+char16  __ovld sub_group_non_uniform_broadcast( char16 value, uint index );
+
+uchar   __ovld sub_group_non_uniform_broadcast( uchar value, uint index );
+uchar2  __ovld sub_group_non_uniform_broadcast( uchar2 value, uint index );
+uchar3  __ovld sub_group_non_uniform_broadcast( uchar3 value, uint index );
+uchar4  __ovld sub_group_non_uniform_broadcast( uchar4 value, uint index );
+uchar8  __ovld sub_group_non_uniform_broadcast( uchar8 value, uint index );
+uchar16 __ovld sub_group_non_uniform_broadcast( uchar16 value, uint index );
+
+short   __ovld sub_group_non_uniform_broadcast( short value, uint index );
+short2  __ovld sub_group_non_uniform_broadcast( short2 value, uint index );
+short3  __ovld sub_group_non_uniform_broadcast( short3 value, uint index );
+short4  __ovld sub_group_non_uniform_broadcast( short4 value, uint index );
+short8  __ovld sub_group_non_uniform_broadcast( short8 value, uint index );
+short16 __ovld sub_group_non_uniform_broadcast( short16 value, uint index );
+
+ushort  __ovld sub_group_non_uniform_broadcast( ushort value, uint index );
+ushort2 __ovld sub_group_non_uniform_broadcast( ushort2 value, uint index );
+ushort3 __ovld sub_group_non_uniform_broadcast( ushort3 value, uint index );
+ushort4 __ovld sub_group_non_uniform_broadcast( ushort4 value, uint index );
+ushort8 __ovld sub_group_non_uniform_broadcast( ushort8 value, uint index );
+ushort16 __ovld sub_group_non_uniform_broadcast( ushort16 value, uint index );
+
+int     __ovld sub_group_non_uniform_broadcast( int value, uint index );
+int2    __ovld sub_group_non_uniform_broadcast( int2 value, uint index );
+int3    __ovld sub_group_non_uniform_broadcast( int3 value, uint index );
+int4    __ovld sub_group_non_uniform_broadcast( int4 value, uint index );
+int8    __ovld sub_group_non_uniform_broadcast( int8 value, uint index );
+int16   __ovld sub_group_non_uniform_broadcast( int16 value, uint index );
+
+uint    __ovld sub_group_non_uniform_broadcast( uint value, uint index );
+uint2   __ovld sub_group_non_uniform_broadcast( uint2 value, uint index );
+uint3   __ovld sub_group_non_uniform_broadcast( uint3 value, uint index );
+uint4   __ovld sub_group_non_uniform_broadcast( uint4 value, uint index );
+uint8   __ovld sub_group_non_uniform_broadcast( uint8 value, uint index );
+uint16  __ovld sub_group_non_uniform_broadcast( uint16 value, uint index );
+
+long    __ovld sub_group_non_uniform_broadcast( long value, uint index );
+long2   __ovld sub_group_non_uniform_broadcast( long2 value, uint index );
+long3   __ovld sub_group_non_uniform_broadcast( long3 value, uint index );
+long4   __ovld sub_group_non_uniform_broadcast( long4 value, uint index );
+long8   __ovld sub_group_non_uniform_broadcast( long8 value, uint index );
+long16  __ovld sub_group_non_uniform_broadcast( long16 value, uint index );
+
+ulong   __ovld sub_group_non_uniform_broadcast( ulong value, uint index );
+ulong2  __ovld sub_group_non_uniform_broadcast( ulong2 value, uint index );
+ulong3  __ovld sub_group_non_uniform_broadcast( ulong3 value, uint index );
+ulong4  __ovld sub_group_non_uniform_broadcast( ulong4 value, uint index );
+ulong8  __ovld sub_group_non_uniform_broadcast( ulong8 value, uint index );
+ulong16 __ovld sub_group_non_uniform_broadcast( ulong16 value, uint index );
+
+float   __ovld sub_group_non_uniform_broadcast( float value, uint index );
+float2  __ovld sub_group_non_uniform_broadcast( float2 value, uint index );
+float3  __ovld sub_group_non_uniform_broadcast( float3 value, uint index );
+float4  __ovld sub_group_non_uniform_broadcast( float4 value, uint index );
+float8  __ovld sub_group_non_uniform_broadcast( float8 value, uint index );
+float16 __ovld sub_group_non_uniform_broadcast( float16 value, uint index );
+
+char    __ovld sub_group_broadcast_first( char value );
+uchar   __ovld sub_group_broadcast_first( uchar value );
+short   __ovld sub_group_broadcast_first( short value );
+ushort  __ovld sub_group_broadcast_first( ushort value );
+int     __ovld sub_group_broadcast_first( int value );
+uint    __ovld sub_group_broadcast_first( uint value );
+long    __ovld sub_group_broadcast_first( long value );
+ulong   __ovld sub_group_broadcast_first( ulong value );
+float   __ovld sub_group_broadcast_first( float value );
+
+uint4   __ovld sub_group_ballot( int predicate );
+int     __ovld __cnfn sub_group_inverse_ballot( uint4 value );
+int     __ovld __cnfn sub_group_ballot_bit_extract( uint4 value, uint index );
+uint    __ovld __cnfn sub_group_ballot_bit_count( uint4 value );
+
+uint    __ovld sub_group_ballot_inclusive_scan( uint4 value );
+uint    __ovld sub_group_ballot_exclusive_scan( uint4 value );
+uint    __ovld sub_group_ballot_find_lsb( uint4 value );
+uint    __ovld sub_group_ballot_find_msb( uint4 value );
+
+uint4   __ovld __cnfn get_sub_group_eq_mask(void);
+uint4   __ovld __cnfn get_sub_group_ge_mask(void);
+uint4   __ovld __cnfn get_sub_group_gt_mask(void);
+uint4   __ovld __cnfn get_sub_group_le_mask(void);
+uint4   __ovld __cnfn get_sub_group_lt_mask(void);
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_non_uniform_broadcast( half value, uint index );
+half2   __ovld sub_group_non_uniform_broadcast( half2 value, uint index );
+half3   __ovld sub_group_non_uniform_broadcast( half3 value, uint index );
+half4   __ovld sub_group_non_uniform_broadcast( half4 value, uint index );
+half8   __ovld sub_group_non_uniform_broadcast( half8 value, uint index );
+half16  __ovld sub_group_non_uniform_broadcast( half16 value, uint index );
+
+half    __ovld sub_group_broadcast_first( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double   __ovld sub_group_non_uniform_broadcast( double value, uint index );
+double2  __ovld sub_group_non_uniform_broadcast( double2 value, uint index );
+double3  __ovld sub_group_non_uniform_broadcast( double3 value, uint index );
+double4  __ovld sub_group_non_uniform_broadcast( double4 value, uint index );
+double8  __ovld sub_group_non_uniform_broadcast( double8 value, uint index );
+double16 __ovld sub_group_non_uniform_broadcast( double16 value, uint index );
+
+double   __ovld sub_group_broadcast_first( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_ballot
+
+#if defined(cl_khr_subgroup_non_uniform_arithmetic)
+char    __ovld sub_group_non_uniform_reduce_add( char value );
+uchar   __ovld sub_group_non_uniform_reduce_add( uchar value );
+short   __ovld sub_group_non_uniform_reduce_add( short value );
+ushort  __ovld sub_group_non_uniform_reduce_add( ushort value );
+int     __ovld sub_group_non_uniform_reduce_add( int value );
+uint    __ovld sub_group_non_uniform_reduce_add( uint value );
+long    __ovld sub_group_non_uniform_reduce_add( long value );
+ulong   __ovld sub_group_non_uniform_reduce_add( ulong value );
+float   __ovld sub_group_non_uniform_reduce_add( float value );
+
+char    __ovld sub_group_non_uniform_reduce_mul( char value );
+uchar   __ovld sub_group_non_uniform_reduce_mul( uchar value );
+short   __ovld sub_group_non_uniform_reduce_mul( short value );
+ushort  __ovld sub_group_non_uniform_reduce_mul( ushort value );
+int     __ovld sub_group_non_uniform_reduce_mul( int value );
+uint    __ovld sub_group_non_uniform_reduce_mul( uint value );
+long    __ovld sub_group_non_uniform_reduce_mul( long value );
+ulong   __ovld sub_group_non_uniform_reduce_mul( ulong value );
+float   __ovld sub_group_non_uniform_reduce_mul( float value );
+
+char    __ovld sub_group_non_uniform_reduce_min( char value );
+uchar   __ovld sub_group_non_uniform_reduce_min( uchar value );
+short   __ovld sub_group_non_uniform_reduce_min( short value );
+ushort  __ovld sub_group_non_uniform_reduce_min( ushort value );
+int     __ovld sub_group_non_uniform_reduce_min( int value );
+uint    __ovld sub_group_non_uniform_reduce_min( uint value );
+long    __ovld sub_group_non_uniform_reduce_min( long value );
+ulong   __ovld sub_group_non_uniform_reduce_min( ulong value );
+float   __ovld sub_group_non_uniform_reduce_min( float value );
+
+char    __ovld sub_group_non_uniform_reduce_max( char value );
+uchar   __ovld sub_group_non_uniform_reduce_max( uchar value );
+short   __ovld sub_group_non_uniform_reduce_max( short value );
+ushort  __ovld sub_group_non_uniform_reduce_max( ushort value );
+int     __ovld sub_group_non_uniform_reduce_max( int value );
+uint    __ovld sub_group_non_uniform_reduce_max( uint value );
+long    __ovld sub_group_non_uniform_reduce_max( long value );
+ulong   __ovld sub_group_non_uniform_reduce_max( ulong value );
+float   __ovld sub_group_non_uniform_reduce_max( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_add( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_add( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_add( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_add( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_add( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_add( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_add( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_add( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_add( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_mul( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_mul( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_mul( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_mul( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_mul( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_mul( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_mul( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_mul( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_mul( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_min( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_min( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_min( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_min( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_min( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_min( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_min( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_min( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_min( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_max( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_max( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_max( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_max( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_max( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_max( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_max( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_max( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_max( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_add( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_add( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_add( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_add( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_add( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_add( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_add( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_add( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_add( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_mul( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_mul( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_mul( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_mul( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_mul( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_mul( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_mul( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_mul( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_mul( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_min( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_min( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_min( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_min( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_min( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_min( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_min( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_min( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_min( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_max( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_max( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_max( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_max( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_max( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_max( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_max( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_max( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_max( float value );
+
+char    __ovld sub_group_non_uniform_reduce_and( char value );
+uchar   __ovld sub_group_non_uniform_reduce_and( uchar value );
+short   __ovld sub_group_non_uniform_reduce_and( short value );
+ushort  __ovld sub_group_non_uniform_reduce_and( ushort value );
+int     __ovld sub_group_non_uniform_reduce_and( int value );
+uint    __ovld sub_group_non_uniform_reduce_and( uint value );
+long    __ovld sub_group_non_uniform_reduce_and( long value );
+ulong   __ovld sub_group_non_uniform_reduce_and( ulong value );
+
+char    __ovld sub_group_non_uniform_reduce_or( char value );
+uchar   __ovld sub_group_non_uniform_reduce_or( uchar value );
+short   __ovld sub_group_non_uniform_reduce_or( short value );
+ushort  __ovld sub_group_non_uniform_reduce_or( ushort value );
+int     __ovld sub_group_non_uniform_reduce_or( int value );
+uint    __ovld sub_group_non_uniform_reduce_or( uint value );
+long    __ovld sub_group_non_uniform_reduce_or( long value );
+ulong   __ovld sub_group_non_uniform_reduce_or( ulong value );
+
+char    __ovld sub_group_non_uniform_reduce_xor( char value );
+uchar   __ovld sub_group_non_uniform_reduce_xor( uchar value );
+short   __ovld sub_group_non_uniform_reduce_xor( short value );
+ushort  __ovld sub_group_non_uniform_reduce_xor( ushort value );
+int     __ovld sub_group_non_uniform_reduce_xor( int value );
+uint    __ovld sub_group_non_uniform_reduce_xor( uint value );
+long    __ovld sub_group_non_uniform_reduce_xor( long value );
+ulong   __ovld sub_group_non_uniform_reduce_xor( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_and( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_and( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_and( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_and( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_and( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_and( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_and( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_and( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_or( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_or( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_or( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_or( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_or( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_or( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_or( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_or( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_xor( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_xor( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_xor( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_xor( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_xor( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_xor( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_xor( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_xor( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_and( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_and( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_and( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_and( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_and( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_and( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_and( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_and( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_or( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_or( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_or( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_or( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_or( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_or( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_or( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_or( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_xor( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_xor( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_xor( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_xor( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_xor( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_xor( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_xor( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_xor( ulong value );
+
+int     __ovld sub_group_non_uniform_reduce_logical_and( int predicate );
+int     __ovld sub_group_non_uniform_reduce_logical_or( int predicate );
+int     __ovld sub_group_non_uniform_reduce_logical_xor( int predicate );
+
+int     __ovld sub_group_non_uniform_scan_inclusive_logical_and( int predicate );
+int     __ovld sub_group_non_uniform_scan_inclusive_logical_or( int predicate );
+int     __ovld sub_group_non_uniform_scan_inclusive_logical_xor( int predicate );
+
+int     __ovld sub_group_non_uniform_scan_exclusive_logical_and( int predicate );
+int     __ovld sub_group_non_uniform_scan_exclusive_logical_or( int predicate );
+int     __ovld sub_group_non_uniform_scan_exclusive_logical_xor( int predicate );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_non_uniform_reduce_add( half value );
+half    __ovld sub_group_non_uniform_reduce_mul( half value );
+half    __ovld sub_group_non_uniform_reduce_min( half value );
+half    __ovld sub_group_non_uniform_reduce_max( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_add( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_mul( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_min( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_max( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_add( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_mul( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_min( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_max( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_non_uniform_reduce_add( double value );
+double  __ovld sub_group_non_uniform_reduce_mul( double value );
+double  __ovld sub_group_non_uniform_reduce_min( double value );
+double  __ovld sub_group_non_uniform_reduce_max( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_add( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_mul( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_min( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_max( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_add( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_mul( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_min( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_max( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_non_uniform_arithmetic
+
+#if defined(cl_khr_subgroup_shuffle)
+char    __ovld sub_group_shuffle( char value, uint index );
+uchar   __ovld sub_group_shuffle( uchar value, uint index );
+short   __ovld sub_group_shuffle( short value, uint index );
+ushort  __ovld sub_group_shuffle( ushort value, uint index );
+int     __ovld sub_group_shuffle( int value, uint index );
+uint    __ovld sub_group_shuffle( uint value, uint index );
+long    __ovld sub_group_shuffle( long value, uint index );
+ulong   __ovld sub_group_shuffle( ulong value, uint index );
+float   __ovld sub_group_shuffle( float value, uint index );
+
+char    __ovld sub_group_shuffle_xor( char value, uint mask );
+uchar   __ovld sub_group_shuffle_xor( uchar value, uint mask );
+short   __ovld sub_group_shuffle_xor( short value, uint mask );
+ushort  __ovld sub_group_shuffle_xor( ushort value, uint mask );
+int     __ovld sub_group_shuffle_xor( int value, uint mask );
+uint    __ovld sub_group_shuffle_xor( uint value, uint mask );
+long    __ovld sub_group_shuffle_xor( long value, uint mask );
+ulong   __ovld sub_group_shuffle_xor( ulong value, uint mask );
+float   __ovld sub_group_shuffle_xor( float value, uint mask );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_shuffle( half value, uint index );
+half    __ovld sub_group_shuffle_xor( half value, uint mask );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_shuffle( double value, uint index );
+double  __ovld sub_group_shuffle_xor( double value, uint mask );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_shuffle
+
+#if defined(cl_khr_subgroup_shuffle_relative)
+char    __ovld sub_group_shuffle_up( char value, uint delta );
+uchar   __ovld sub_group_shuffle_up( uchar value, uint delta );
+short   __ovld sub_group_shuffle_up( short value, uint delta );
+ushort  __ovld sub_group_shuffle_up( ushort value, uint delta );
+int     __ovld sub_group_shuffle_up( int value, uint delta );
+uint    __ovld sub_group_shuffle_up( uint value, uint delta );
+long    __ovld sub_group_shuffle_up( long value, uint delta );
+ulong   __ovld sub_group_shuffle_up( ulong value, uint delta );
+float   __ovld sub_group_shuffle_up( float value, uint delta );
+
+char    __ovld sub_group_shuffle_down( char value, uint delta );
+uchar   __ovld sub_group_shuffle_down( uchar value, uint delta );
+short   __ovld sub_group_shuffle_down( short value, uint delta );
+ushort  __ovld sub_group_shuffle_down( ushort value, uint delta );
+int     __ovld sub_group_shuffle_down( int value, uint delta );
+uint    __ovld sub_group_shuffle_down( uint value, uint delta );
+long    __ovld sub_group_shuffle_down( long value, uint delta );
+ulong   __ovld sub_group_shuffle_down( ulong value, uint delta );
+float   __ovld sub_group_shuffle_down( float value, uint delta );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_shuffle_up( half value, uint delta );
+half    __ovld sub_group_shuffle_down( half value, uint delta );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_shuffle_up( double value, uint delta );
+double  __ovld sub_group_shuffle_down( double value, uint delta );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_shuffle_relative
+
+#if defined(cl_khr_subgroup_clustered_reduce)
+char    __ovld sub_group_clustered_reduce_add( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_add( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_add( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_add( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_add( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_add( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_add( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_add( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_add( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_mul( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_mul( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_mul( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_mul( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_mul( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_mul( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_mul( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_mul( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_mul( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_min( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_min( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_min( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_min( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_min( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_min( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_min( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_min( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_min( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_max( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_max( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_max( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_max( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_max( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_max( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_max( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_max( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_max( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_and( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_and( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_and( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_and( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_and( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_and( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_and( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_and( ulong value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_or( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_or( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_or( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_or( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_or( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_or( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_or( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_or( ulong value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_xor( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_xor( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_xor( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_xor( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_xor( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_xor( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_xor( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_xor( ulong value, uint clustersize );
+
+int     __ovld sub_group_clustered_reduce_logical_and( int predicate, uint clustersize );
+int     __ovld sub_group_clustered_reduce_logical_or( int predicate, uint clustersize );
+int     __ovld sub_group_clustered_reduce_logical_xor( int predicate, uint clustersize );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_clustered_reduce_add( half value, uint clustersize );
+half    __ovld sub_group_clustered_reduce_mul( half value, uint clustersize );
+half    __ovld sub_group_clustered_reduce_min( half value, uint clustersize );
+half    __ovld sub_group_clustered_reduce_max( half value, uint clustersize );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_clustered_reduce_add( double value, uint clustersize );
+double  __ovld sub_group_clustered_reduce_mul( double value, uint clustersize );
+double  __ovld sub_group_clustered_reduce_min( double value, uint clustersize );
+double  __ovld sub_group_clustered_reduce_max( double value, uint clustersize );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_clustered_reduce
+
+#if defined(cl_khr_extended_bit_ops)
+char __ovld __cnfn bitfield_insert(char, char, uint, uint);
+uchar __ovld __cnfn bitfield_insert(uchar, uchar, uint, uint);
+short __ovld __cnfn bitfield_insert(short, short, uint, uint);
+ushort __ovld __cnfn bitfield_insert(ushort, ushort, uint, uint);
+int __ovld __cnfn bitfield_insert(int, int, uint, uint);
+uint __ovld __cnfn bitfield_insert(uint, uint, uint, uint);
+long __ovld __cnfn bitfield_insert(long, long, uint, uint);
+ulong __ovld __cnfn bitfield_insert(ulong, ulong, uint, uint);
+char2 __ovld __cnfn bitfield_insert(char2, char2, uint, uint);
+uchar2 __ovld __cnfn bitfield_insert(uchar2, uchar2, uint, uint);
+short2 __ovld __cnfn bitfield_insert(short2, short2, uint, uint);
+ushort2 __ovld __cnfn bitfield_insert(ushort2, ushort2, uint, uint);
+int2 __ovld __cnfn bitfield_insert(int2, int2, uint, uint);
+uint2 __ovld __cnfn bitfield_insert(uint2, uint2, uint, uint);
+long2 __ovld __cnfn bitfield_insert(long2, long2, uint, uint);
+ulong2 __ovld __cnfn bitfield_insert(ulong2, ulong2, uint, uint);
+char3 __ovld __cnfn bitfield_insert(char3, char3, uint, uint);
+uchar3 __ovld __cnfn bitfield_insert(uchar3, uchar3, uint, uint);
+short3 __ovld __cnfn bitfield_insert(short3, short3, uint, uint);
+ushort3 __ovld __cnfn bitfield_insert(ushort3, ushort3, uint, uint);
+int3 __ovld __cnfn bitfield_insert(int3, int3, uint, uint);
+uint3 __ovld __cnfn bitfield_insert(uint3, uint3, uint, uint);
+long3 __ovld __cnfn bitfield_insert(long3, long3, uint, uint);
+ulong3 __ovld __cnfn bitfield_insert(ulong3, ulong3, uint, uint);
+char4 __ovld __cnfn bitfield_insert(char4, char4, uint, uint);
+uchar4 __ovld __cnfn bitfield_insert(uchar4, uchar4, uint, uint);
+short4 __ovld __cnfn bitfield_insert(short4, short4, uint, uint);
+ushort4 __ovld __cnfn bitfield_insert(ushort4, ushort4, uint, uint);
+int4 __ovld __cnfn bitfield_insert(int4, int4, uint, uint);
+uint4 __ovld __cnfn bitfield_insert(uint4, uint4, uint, uint);
+long4 __ovld __cnfn bitfield_insert(long4, long4, uint, uint);
+ulong4 __ovld __cnfn bitfield_insert(ulong4, ulong4, uint, uint);
+char8 __ovld __cnfn bitfield_insert(char8, char8, uint, uint);
+uchar8 __ovld __cnfn bitfield_insert(uchar8, uchar8, uint, uint);
+short8 __ovld __cnfn bitfield_insert(short8, short8, uint, uint);
+ushort8 __ovld __cnfn bitfield_insert(ushort8, ushort8, uint, uint);
+int8 __ovld __cnfn bitfield_insert(int8, int8, uint, uint);
+uint8 __ovld __cnfn bitfield_insert(uint8, uint8, uint, uint);
+long8 __ovld __cnfn bitfield_insert(long8, long8, uint, uint);
+ulong8 __ovld __cnfn bitfield_insert(ulong8, ulong8, uint, uint);
+char16 __ovld __cnfn bitfield_insert(char16, char16, uint, uint);
+uchar16 __ovld __cnfn bitfield_insert(uchar16, uchar16, uint, uint);
+short16 __ovld __cnfn bitfield_insert(short16, short16, uint, uint);
+ushort16 __ovld __cnfn bitfield_insert(ushort16, ushort16, uint, uint);
+int16 __ovld __cnfn bitfield_insert(int16, int16, uint, uint);
+uint16 __ovld __cnfn bitfield_insert(uint16, uint16, uint, uint);
+long16 __ovld __cnfn bitfield_insert(long16, long16, uint, uint);
+ulong16 __ovld __cnfn bitfield_insert(ulong16, ulong16, uint, uint);
+
+char __ovld __cnfn bitfield_extract_signed(char, uint, uint);
+short __ovld __cnfn bitfield_extract_signed(short, uint, uint);
+int __ovld __cnfn bitfield_extract_signed(int, uint, uint);
+long __ovld __cnfn bitfield_extract_signed(long, uint, uint);
+char2 __ovld __cnfn bitfield_extract_signed(char2, uint, uint);
+short2 __ovld __cnfn bitfield_extract_signed(short2, uint, uint);
+int2 __ovld __cnfn bitfield_extract_signed(int2, uint, uint);
+long2 __ovld __cnfn bitfield_extract_signed(long2, uint, uint);
+char3 __ovld __cnfn bitfield_extract_signed(char3, uint, uint);
+short3 __ovld __cnfn bitfield_extract_signed(short3, uint, uint);
+int3 __ovld __cnfn bitfield_extract_signed(int3, uint, uint);
+long3 __ovld __cnfn bitfield_extract_signed(long3, uint, uint);
+char4 __ovld __cnfn bitfield_extract_signed(char4, uint, uint);
+short4 __ovld __cnfn bitfield_extract_signed(short4, uint, uint);
+int4 __ovld __cnfn bitfield_extract_signed(int4, uint, uint);
+long4 __ovld __cnfn bitfield_extract_signed(long4, uint, uint);
+char8 __ovld __cnfn bitfield_extract_signed(char8, uint, uint);
+short8 __ovld __cnfn bitfield_extract_signed(short8, uint, uint);
+int8 __ovld __cnfn bitfield_extract_signed(int8, uint, uint);
+long8 __ovld __cnfn bitfield_extract_signed(long8, uint, uint);
+char16 __ovld __cnfn bitfield_extract_signed(char16, uint, uint);
+short16 __ovld __cnfn bitfield_extract_signed(short16, uint, uint);
+int16 __ovld __cnfn bitfield_extract_signed(int16, uint, uint);
+long16 __ovld __cnfn bitfield_extract_signed(long16, uint, uint);
+
+char __ovld __cnfn bitfield_extract_signed(uchar, uint, uint);
+short __ovld __cnfn bitfield_extract_signed(ushort, uint, uint);
+int __ovld __cnfn bitfield_extract_signed(uint, uint, uint);
+long __ovld __cnfn bitfield_extract_signed(ulong, uint, uint);
+char2 __ovld __cnfn bitfield_extract_signed(uchar2, uint, uint);
+short2 __ovld __cnfn bitfield_extract_signed(ushort2, uint, uint);
+int2 __ovld __cnfn bitfield_extract_signed(uint2, uint, uint);
+long2 __ovld __cnfn bitfield_extract_signed(ulong2, uint, uint);
+char3 __ovld __cnfn bitfield_extract_signed(uchar3, uint, uint);
+short3 __ovld __cnfn bitfield_extract_signed(ushort3, uint, uint);
+int3 __ovld __cnfn bitfield_extract_signed(uint3, uint, uint);
+long3 __ovld __cnfn bitfield_extract_signed(ulong3, uint, uint);
+char4 __ovld __cnfn bitfield_extract_signed(uchar4, uint, uint);
+short4 __ovld __cnfn bitfield_extract_signed(ushort4, uint, uint);
+int4 __ovld __cnfn bitfield_extract_signed(uint4, uint, uint);
+long4 __ovld __cnfn bitfield_extract_signed(ulong4, uint, uint);
+char8 __ovld __cnfn bitfield_extract_signed(uchar8, uint, uint);
+short8 __ovld __cnfn bitfield_extract_signed(ushort8, uint, uint);
+int8 __ovld __cnfn bitfield_extract_signed(uint8, uint, uint);
+long8 __ovld __cnfn bitfield_extract_signed(ulong8, uint, uint);
+char16 __ovld __cnfn bitfield_extract_signed(uchar16, uint, uint);
+short16 __ovld __cnfn bitfield_extract_signed(ushort16, uint, uint);
+int16 __ovld __cnfn bitfield_extract_signed(uint16, uint, uint);
+long16 __ovld __cnfn bitfield_extract_signed(ulong16, uint, uint);
+
+uchar __ovld __cnfn bitfield_extract_unsigned(char, uint, uint);
+ushort __ovld __cnfn bitfield_extract_unsigned(short, uint, uint);
+uint __ovld __cnfn bitfield_extract_unsigned(int, uint, uint);
+ulong __ovld __cnfn bitfield_extract_unsigned(long, uint, uint);
+uchar2 __ovld __cnfn bitfield_extract_unsigned(char2, uint, uint);
+ushort2 __ovld __cnfn bitfield_extract_unsigned(short2, uint, uint);
+uint2 __ovld __cnfn bitfield_extract_unsigned(int2, uint, uint);
+ulong2 __ovld __cnfn bitfield_extract_unsigned(long2, uint, uint);
+uchar3 __ovld __cnfn bitfield_extract_unsigned(char3, uint, uint);
+ushort3 __ovld __cnfn bitfield_extract_unsigned(short3, uint, uint);
+uint3 __ovld __cnfn bitfield_extract_unsigned(int3, uint, uint);
+ulong3 __ovld __cnfn bitfield_extract_unsigned(long3, uint, uint);
+uchar4 __ovld __cnfn bitfield_extract_unsigned(char4, uint, uint);
+ushort4 __ovld __cnfn bitfield_extract_unsigned(short4, uint, uint);
+uint4 __ovld __cnfn bitfield_extract_unsigned(int4, uint, uint);
+ulong4 __ovld __cnfn bitfield_extract_unsigned(long4, uint, uint);
+uchar8 __ovld __cnfn bitfield_extract_unsigned(char8, uint, uint);
+ushort8 __ovld __cnfn bitfield_extract_unsigned(short8, uint, uint);
+uint8 __ovld __cnfn bitfield_extract_unsigned(int8, uint, uint);
+ulong8 __ovld __cnfn bitfield_extract_unsigned(long8, uint, uint);
+uchar16 __ovld __cnfn bitfield_extract_unsigned(char16, uint, uint);
+ushort16 __ovld __cnfn bitfield_extract_unsigned(short16, uint, uint);
+uint16 __ovld __cnfn bitfield_extract_unsigned(int16, uint, uint);
+ulong16 __ovld __cnfn bitfield_extract_unsigned(long16, uint, uint);
+
+uchar __ovld __cnfn bitfield_extract_unsigned(uchar, uint, uint);
+ushort __ovld __cnfn bitfield_extract_unsigned(ushort, uint, uint);
+uint __ovld __cnfn bitfield_extract_unsigned(uint, uint, uint);
+ulong __ovld __cnfn bitfield_extract_unsigned(ulong, uint, uint);
+uchar2 __ovld __cnfn bitfield_extract_unsigned(uchar2, uint, uint);
+ushort2 __ovld __cnfn bitfield_extract_unsigned(ushort2, uint, uint);
+uint2 __ovld __cnfn bitfield_extract_unsigned(uint2, uint, uint);
+ulong2 __ovld __cnfn bitfield_extract_unsigned(ulong2, uint, uint);
+uchar3 __ovld __cnfn bitfield_extract_unsigned(uchar3, uint, uint);
+ushort3 __ovld __cnfn bitfield_extract_unsigned(ushort3, uint, uint);
+uint3 __ovld __cnfn bitfield_extract_unsigned(uint3, uint, uint);
+ulong3 __ovld __cnfn bitfield_extract_unsigned(ulong3, uint, uint);
+uchar4 __ovld __cnfn bitfield_extract_unsigned(uchar4, uint, uint);
+ushort4 __ovld __cnfn bitfield_extract_unsigned(ushort4, uint, uint);
+uint4 __ovld __cnfn bitfield_extract_unsigned(uint4, uint, uint);
+ulong4 __ovld __cnfn bitfield_extract_unsigned(ulong4, uint, uint);
+uchar8 __ovld __cnfn bitfield_extract_unsigned(uchar8, uint, uint);
+ushort8 __ovld __cnfn bitfield_extract_unsigned(ushort8, uint, uint);
+uint8 __ovld __cnfn bitfield_extract_unsigned(uint8, uint, uint);
+ulong8 __ovld __cnfn bitfield_extract_unsigned(ulong8, uint, uint);
+uchar16 __ovld __cnfn bitfield_extract_unsigned(uchar16, uint, uint);
+ushort16 __ovld __cnfn bitfield_extract_unsigned(ushort16, uint, uint);
+uint16 __ovld __cnfn bitfield_extract_unsigned(uint16, uint, uint);
+ulong16 __ovld __cnfn bitfield_extract_unsigned(ulong16, uint, uint);
+
+char __ovld __cnfn bit_reverse(char);
+uchar __ovld __cnfn bit_reverse(uchar);
+short __ovld __cnfn bit_reverse(short);
+ushort __ovld __cnfn bit_reverse(ushort);
+int __ovld __cnfn bit_reverse(int);
+uint __ovld __cnfn bit_reverse(uint);
+long __ovld __cnfn bit_reverse(long);
+ulong __ovld __cnfn bit_reverse(ulong);
+char2 __ovld __cnfn bit_reverse(char2);
+uchar2 __ovld __cnfn bit_reverse(uchar2);
+short2 __ovld __cnfn bit_reverse(short2);
+ushort2 __ovld __cnfn bit_reverse(ushort2);
+int2 __ovld __cnfn bit_reverse(int2);
+uint2 __ovld __cnfn bit_reverse(uint2);
+long2 __ovld __cnfn bit_reverse(long2);
+ulong2 __ovld __cnfn bit_reverse(ulong2);
+char3 __ovld __cnfn bit_reverse(char3);
+uchar3 __ovld __cnfn bit_reverse(uchar3);
+short3 __ovld __cnfn bit_reverse(short3);
+ushort3 __ovld __cnfn bit_reverse(ushort3);
+int3 __ovld __cnfn bit_reverse(int3);
+uint3 __ovld __cnfn bit_reverse(uint3);
+long3 __ovld __cnfn bit_reverse(long3);
+ulong3 __ovld __cnfn bit_reverse(ulong3);
+char4 __ovld __cnfn bit_reverse(char4);
+uchar4 __ovld __cnfn bit_reverse(uchar4);
+short4 __ovld __cnfn bit_reverse(short4);
+ushort4 __ovld __cnfn bit_reverse(ushort4);
+int4 __ovld __cnfn bit_reverse(int4);
+uint4 __ovld __cnfn bit_reverse(uint4);
+long4 __ovld __cnfn bit_reverse(long4);
+ulong4 __ovld __cnfn bit_reverse(ulong4);
+char8 __ovld __cnfn bit_reverse(char8);
+uchar8 __ovld __cnfn bit_reverse(uchar8);
+short8 __ovld __cnfn bit_reverse(short8);
+ushort8 __ovld __cnfn bit_reverse(ushort8);
+int8 __ovld __cnfn bit_reverse(int8);
+uint8 __ovld __cnfn bit_reverse(uint8);
+long8 __ovld __cnfn bit_reverse(long8);
+ulong8 __ovld __cnfn bit_reverse(ulong8);
+char16 __ovld __cnfn bit_reverse(char16);
+uchar16 __ovld __cnfn bit_reverse(uchar16);
+short16 __ovld __cnfn bit_reverse(short16);
+ushort16 __ovld __cnfn bit_reverse(ushort16);
+int16 __ovld __cnfn bit_reverse(int16);
+uint16 __ovld __cnfn bit_reverse(uint16);
+long16 __ovld __cnfn bit_reverse(long16);
+ulong16 __ovld __cnfn bit_reverse(ulong16);
+#endif // cl_khr_extended_bit_ops
+
+#if defined(__opencl_c_integer_dot_product_input_4x8bit)
+uint __ovld __cnfn dot(uchar4, uchar4);
+int __ovld __cnfn dot(char4, char4);
+int __ovld __cnfn dot(uchar4, char4);
+int __ovld __cnfn dot(char4, uchar4);
+
+uint __ovld __cnfn dot_acc_sat(uchar4, uchar4, uint);
+int __ovld __cnfn dot_acc_sat(char4, char4, int);
+int __ovld __cnfn dot_acc_sat(uchar4, char4, int);
+int __ovld __cnfn dot_acc_sat(char4, uchar4, int);
+#endif // __opencl_c_integer_dot_product_input_4x8bit
+
+#if defined(__opencl_c_integer_dot_product_input_4x8bit_packed)
+uint __ovld __cnfn dot_4x8packed_uu_uint(uint, uint);
+int __ovld __cnfn dot_4x8packed_ss_int(uint, uint);
+int __ovld __cnfn dot_4x8packed_us_int(uint, uint);
+int __ovld __cnfn dot_4x8packed_su_int(uint, uint);
+
+uint __ovld __cnfn dot_acc_sat_4x8packed_uu_uint(uint, uint, uint);
+int __ovld __cnfn dot_acc_sat_4x8packed_ss_int(uint, uint, int);
+int __ovld __cnfn dot_acc_sat_4x8packed_us_int(uint, uint, int);
+int __ovld __cnfn dot_acc_sat_4x8packed_su_int(uint, uint, int);
+#endif // __opencl_c_integer_dot_product_input_4x8bit_packed
+
+#if defined(cl_khr_subgroup_rotate)
+char __ovld __conv sub_group_rotate(char, int);
+uchar __ovld __conv sub_group_rotate(uchar, int);
+short __ovld __conv sub_group_rotate(short, int);
+ushort __ovld __conv sub_group_rotate(ushort, int);
+int __ovld __conv sub_group_rotate(int, int);
+uint __ovld __conv sub_group_rotate(uint, int);
+long __ovld __conv sub_group_rotate(long, int);
+ulong __ovld __conv sub_group_rotate(ulong, int);
+float __ovld __conv sub_group_rotate(float, int);
+#if defined(cl_khr_fp64)
+double __ovld __conv sub_group_rotate(double, int);
+#endif // cl_khr_fp64
+#if defined(cl_khr_fp16)
+half __ovld __conv sub_group_rotate(half, int);
+#endif // cl_khr_fp16
+
+char __ovld __conv sub_group_clustered_rotate(char, int, uint);
+uchar __ovld __conv sub_group_clustered_rotate(uchar, int, uint);
+short __ovld __conv sub_group_clustered_rotate(short, int, uint);
+ushort __ovld __conv sub_group_clustered_rotate(ushort, int, uint);
+int __ovld __conv sub_group_clustered_rotate(int, int, uint);
+uint __ovld __conv sub_group_clustered_rotate(uint, int, uint);
+long __ovld __conv sub_group_clustered_rotate(long, int, uint);
+ulong __ovld __conv sub_group_clustered_rotate(ulong, int, uint);
+float __ovld __conv sub_group_clustered_rotate(float, int, uint);
+#if defined(cl_khr_fp64)
+double __ovld __conv sub_group_clustered_rotate(double, int, uint);
+#endif // cl_khr_fp64
+#if defined(cl_khr_fp16)
+half __ovld __conv sub_group_clustered_rotate(half, int, uint);
+#endif // cl_khr_fp16
+#endif // cl_khr_subgroup_rotate
+
+#if defined(cl_intel_subgroups)
+// Intel-Specific Sub Group Functions
+float   __ovld __conv intel_sub_group_shuffle( float , uint );
+float2  __ovld __conv intel_sub_group_shuffle( float2, uint );
+float3  __ovld __conv intel_sub_group_shuffle( float3, uint );
+float4  __ovld __conv intel_sub_group_shuffle( float4, uint );
+float8  __ovld __conv intel_sub_group_shuffle( float8, uint );
+float16 __ovld __conv intel_sub_group_shuffle( float16, uint );
+
+int     __ovld __conv intel_sub_group_shuffle( int , uint );
+int2    __ovld __conv intel_sub_group_shuffle( int2, uint );
+int3    __ovld __conv intel_sub_group_shuffle( int3, uint );
+int4    __ovld __conv intel_sub_group_shuffle( int4, uint );
+int8    __ovld __conv intel_sub_group_shuffle( int8, uint );
+int16   __ovld __conv intel_sub_group_shuffle( int16, uint );
+
+uint    __ovld __conv intel_sub_group_shuffle( uint , uint );
+uint2   __ovld __conv intel_sub_group_shuffle( uint2, uint );
+uint3   __ovld __conv intel_sub_group_shuffle( uint3, uint );
+uint4   __ovld __conv intel_sub_group_shuffle( uint4, uint );
+uint8   __ovld __conv intel_sub_group_shuffle( uint8, uint );
+uint16  __ovld __conv intel_sub_group_shuffle( uint16, uint );
+
+long    __ovld __conv intel_sub_group_shuffle( long, uint );
+ulong   __ovld __conv intel_sub_group_shuffle( ulong, uint );
+
+float   __ovld __conv intel_sub_group_shuffle_down( float  cur, float  next, uint );
+float2  __ovld __conv intel_sub_group_shuffle_down( float2 cur, float2 next, uint );
+float3  __ovld __conv intel_sub_group_shuffle_down( float3 cur, float3 next, uint );
+float4  __ovld __conv intel_sub_group_shuffle_down( float4 cur, float4 next, uint );
+float8  __ovld __conv intel_sub_group_shuffle_down( float8 cur, float8 next, uint );
+float16 __ovld __conv intel_sub_group_shuffle_down( float16 cur, float16 next, uint );
+
+int     __ovld __conv intel_sub_group_shuffle_down( int  cur, int  next, uint );
+int2    __ovld __conv intel_sub_group_shuffle_down( int2 cur, int2 next, uint );
+int3    __ovld __conv intel_sub_group_shuffle_down( int3 cur, int3 next, uint );
+int4    __ovld __conv intel_sub_group_shuffle_down( int4 cur, int4 next, uint );
+int8    __ovld __conv intel_sub_group_shuffle_down( int8 cur, int8 next, uint );
+int16   __ovld __conv intel_sub_group_shuffle_down( int16 cur, int16 next, uint );
+
+uint    __ovld __conv intel_sub_group_shuffle_down( uint  cur, uint  next, uint );
+uint2   __ovld __conv intel_sub_group_shuffle_down( uint2 cur, uint2 next, uint );
+uint3   __ovld __conv intel_sub_group_shuffle_down( uint3 cur, uint3 next, uint );
+uint4   __ovld __conv intel_sub_group_shuffle_down( uint4 cur, uint4 next, uint );
+uint8   __ovld __conv intel_sub_group_shuffle_down( uint8 cur, uint8 next, uint );
+uint16  __ovld __conv intel_sub_group_shuffle_down( uint16 cur, uint16 next, uint );
+
+long    __ovld __conv intel_sub_group_shuffle_down( long prev, long cur, uint );
+ulong   __ovld __conv intel_sub_group_shuffle_down( ulong prev, ulong cur, uint );
+
+float   __ovld __conv intel_sub_group_shuffle_up( float  prev, float  cur, uint );
+float2  __ovld __conv intel_sub_group_shuffle_up( float2 prev, float2 cur, uint );
+float3  __ovld __conv intel_sub_group_shuffle_up( float3 prev, float3 cur, uint );
+float4  __ovld __conv intel_sub_group_shuffle_up( float4 prev, float4 cur, uint );
+float8  __ovld __conv intel_sub_group_shuffle_up( float8 prev, float8 cur, uint );
+float16 __ovld __conv intel_sub_group_shuffle_up( float16 prev, float16 cur, uint );
+
+int     __ovld __conv intel_sub_group_shuffle_up( int  prev, int  cur, uint );
+int2    __ovld __conv intel_sub_group_shuffle_up( int2 prev, int2 cur, uint );
+int3    __ovld __conv intel_sub_group_shuffle_up( int3 prev, int3 cur, uint );
+int4    __ovld __conv intel_sub_group_shuffle_up( int4 prev, int4 cur, uint );
+int8    __ovld __conv intel_sub_group_shuffle_up( int8 prev, int8 cur, uint );
+int16   __ovld __conv intel_sub_group_shuffle_up( int16 prev, int16 cur, uint );
+
+uint    __ovld __conv intel_sub_group_shuffle_up( uint  prev, uint  cur, uint );
+uint2   __ovld __conv intel_sub_group_shuffle_up( uint2 prev, uint2 cur, uint );
+uint3   __ovld __conv intel_sub_group_shuffle_up( uint3 prev, uint3 cur, uint );
+uint4   __ovld __conv intel_sub_group_shuffle_up( uint4 prev, uint4 cur, uint );
+uint8   __ovld __conv intel_sub_group_shuffle_up( uint8 prev, uint8 cur, uint );
+uint16  __ovld __conv intel_sub_group_shuffle_up( uint16 prev, uint16 cur, uint );
+
+long    __ovld __conv intel_sub_group_shuffle_up( long prev, long cur, uint );
+ulong   __ovld __conv intel_sub_group_shuffle_up( ulong prev, ulong cur, uint );
+
+float   __ovld __conv intel_sub_group_shuffle_xor( float , uint );
+float2  __ovld __conv intel_sub_group_shuffle_xor( float2, uint );
+float3  __ovld __conv intel_sub_group_shuffle_xor( float3, uint );
+float4  __ovld __conv intel_sub_group_shuffle_xor( float4, uint );
+float8  __ovld __conv intel_sub_group_shuffle_xor( float8, uint );
+float16 __ovld __conv intel_sub_group_shuffle_xor( float16, uint );
+
+int     __ovld __conv intel_sub_group_shuffle_xor( int , uint );
+int2    __ovld __conv intel_sub_group_shuffle_xor( int2, uint );
+int3    __ovld __conv intel_sub_group_shuffle_xor( int3, uint );
+int4    __ovld __conv intel_sub_group_shuffle_xor( int4, uint );
+int8    __ovld __conv intel_sub_group_shuffle_xor( int8, uint );
+int16   __ovld __conv intel_sub_group_shuffle_xor( int16, uint );
+
+uint    __ovld __conv intel_sub_group_shuffle_xor( uint , uint );
+uint2   __ovld __conv intel_sub_group_shuffle_xor( uint2, uint );
+uint3   __ovld __conv intel_sub_group_shuffle_xor( uint3, uint );
+uint4   __ovld __conv intel_sub_group_shuffle_xor( uint4, uint );
+uint8   __ovld __conv intel_sub_group_shuffle_xor( uint8, uint );
+uint16  __ovld __conv intel_sub_group_shuffle_xor( uint16, uint );
+
+long    __ovld __conv intel_sub_group_shuffle_xor( long, uint );
+ulong   __ovld __conv intel_sub_group_shuffle_xor( ulong, uint );
+
+#if defined(__opencl_c_images)
+uint    __ovld __conv intel_sub_group_block_read(read_only image2d_t, int2);
+uint2   __ovld __conv intel_sub_group_block_read2(read_only image2d_t, int2);
+uint4   __ovld __conv intel_sub_group_block_read4(read_only image2d_t, int2);
+uint8   __ovld __conv intel_sub_group_block_read8(read_only image2d_t, int2);
+#endif
+
+#if defined(__opencl_c_read_write_images)
+uint    __ovld __conv intel_sub_group_block_read(read_write image2d_t, int2);
+uint2   __ovld __conv intel_sub_group_block_read2(read_write image2d_t, int2);
+uint4   __ovld __conv intel_sub_group_block_read4(read_write image2d_t, int2);
+uint8   __ovld __conv intel_sub_group_block_read8(read_write image2d_t, int2);
+#endif // defined(__opencl_c_read_write_images)
+
+uint    __ovld __conv intel_sub_group_block_read( const __global uint* p );
+uint2   __ovld __conv intel_sub_group_block_read2( const __global uint* p );
+uint4   __ovld __conv intel_sub_group_block_read4( const __global uint* p );
+uint8   __ovld __conv intel_sub_group_block_read8( const __global uint* p );
+
+#if defined(__opencl_c_images)
+void    __ovld __conv intel_sub_group_block_write(write_only image2d_t, int2, uint);
+void    __ovld __conv intel_sub_group_block_write2(write_only image2d_t, int2, uint2);
+void    __ovld __conv intel_sub_group_block_write4(write_only image2d_t, int2, uint4);
+void    __ovld __conv intel_sub_group_block_write8(write_only image2d_t, int2, uint8);
+#endif // defined(__opencl_c_images)
+
+#if defined(__opencl_c_read_write_images)
+void    __ovld __conv intel_sub_group_block_write(read_write image2d_t, int2, uint);
+void    __ovld __conv intel_sub_group_block_write2(read_write image2d_t, int2, uint2);
+void    __ovld __conv intel_sub_group_block_write4(read_write image2d_t, int2, uint4);
+void    __ovld __conv intel_sub_group_block_write8(read_write image2d_t, int2, uint8);
+#endif // defined(__opencl_c_read_write_images)
+
+void    __ovld __conv intel_sub_group_block_write( __global uint* p, uint data );
+void    __ovld __conv intel_sub_group_block_write2( __global uint* p, uint2 data );
+void    __ovld __conv intel_sub_group_block_write4( __global uint* p, uint4 data );
+void    __ovld __conv intel_sub_group_block_write8( __global uint* p, uint8 data );
+
+#ifdef cl_khr_fp16
+half    __ovld __conv intel_sub_group_shuffle( half, uint );
+half    __ovld __conv intel_sub_group_shuffle_down( half prev, half cur, uint );
+half    __ovld __conv intel_sub_group_shuffle_up( half prev, half cur, uint );
+half    __ovld __conv intel_sub_group_shuffle_xor( half, uint );
+#endif
+
+#if defined(cl_khr_fp64)
+double  __ovld __conv intel_sub_group_shuffle( double, uint );
+double  __ovld __conv intel_sub_group_shuffle_down( double prev, double cur, uint );
+double  __ovld __conv intel_sub_group_shuffle_up( double prev, double cur, uint );
+double  __ovld __conv intel_sub_group_shuffle_xor( double, uint );
+#endif
+
+#endif //cl_intel_subgroups
+
+#if defined(cl_intel_subgroups_short)
+short       __ovld __conv intel_sub_group_broadcast( short , uint sub_group_local_id );
+short2      __ovld __conv intel_sub_group_broadcast( short2, uint sub_group_local_id );
+short3      __ovld __conv intel_sub_group_broadcast( short3, uint sub_group_local_id );
+short4      __ovld __conv intel_sub_group_broadcast( short4, uint sub_group_local_id );
+short8      __ovld __conv intel_sub_group_broadcast( short8, uint sub_group_local_id );
+
+ushort      __ovld __conv intel_sub_group_broadcast( ushort , uint sub_group_local_id );
+ushort2     __ovld __conv intel_sub_group_broadcast( ushort2, uint sub_group_local_id );
+ushort3     __ovld __conv intel_sub_group_broadcast( ushort3, uint sub_group_local_id );
+ushort4     __ovld __conv intel_sub_group_broadcast( ushort4, uint sub_group_local_id );
+ushort8     __ovld __conv intel_sub_group_broadcast( ushort8, uint sub_group_local_id );
+
+short       __ovld __conv intel_sub_group_shuffle( short  , uint );
+short2      __ovld __conv intel_sub_group_shuffle( short2 , uint );
+short3      __ovld __conv intel_sub_group_shuffle( short3 , uint );
+short4      __ovld __conv intel_sub_group_shuffle( short4 , uint );
+short8      __ovld __conv intel_sub_group_shuffle( short8 , uint );
+short16     __ovld __conv intel_sub_group_shuffle( short16, uint);
+
+ushort      __ovld __conv intel_sub_group_shuffle( ushort  , uint );
+ushort2     __ovld __conv intel_sub_group_shuffle( ushort2 , uint );
+ushort3     __ovld __conv intel_sub_group_shuffle( ushort3 , uint );
+ushort4     __ovld __conv intel_sub_group_shuffle( ushort4 , uint );
+ushort8     __ovld __conv intel_sub_group_shuffle( ushort8 , uint );
+ushort16    __ovld __conv intel_sub_group_shuffle( ushort16, uint );
+
+short       __ovld __conv intel_sub_group_shuffle_down( short   cur, short   next, uint );
+short2      __ovld __conv intel_sub_group_shuffle_down( short2  cur, short2  next, uint );
+short3      __ovld __conv intel_sub_group_shuffle_down( short3  cur, short3  next, uint );
+short4      __ovld __conv intel_sub_group_shuffle_down( short4  cur, short4  next, uint );
+short8      __ovld __conv intel_sub_group_shuffle_down( short8  cur, short8  next, uint );
+short16     __ovld __conv intel_sub_group_shuffle_down( short16 cur, short16 next, uint );
+
+ushort      __ovld __conv intel_sub_group_shuffle_down( ushort   cur, ushort   next, uint );
+ushort2     __ovld __conv intel_sub_group_shuffle_down( ushort2  cur, ushort2  next, uint );
+ushort3     __ovld __conv intel_sub_group_shuffle_down( ushort3  cur, ushort3  next, uint );
+ushort4     __ovld __conv intel_sub_group_shuffle_down( ushort4  cur, ushort4  next, uint );
+ushort8     __ovld __conv intel_sub_group_shuffle_down( ushort8  cur, ushort8  next, uint );
+ushort16    __ovld __conv intel_sub_group_shuffle_down( ushort16 cur, ushort16 next, uint );
+
+short       __ovld __conv intel_sub_group_shuffle_up( short   cur, short   next, uint );
+short2      __ovld __conv intel_sub_group_shuffle_up( short2  cur, short2  next, uint );
+short3      __ovld __conv intel_sub_group_shuffle_up( short3  cur, short3  next, uint );
+short4      __ovld __conv intel_sub_group_shuffle_up( short4  cur, short4  next, uint );
+short8      __ovld __conv intel_sub_group_shuffle_up( short8  cur, short8  next, uint );
+short16     __ovld __conv intel_sub_group_shuffle_up( short16 cur, short16 next, uint );
+
+ushort      __ovld __conv intel_sub_group_shuffle_up( ushort   cur, ushort   next, uint );
+ushort2     __ovld __conv intel_sub_group_shuffle_up( ushort2  cur, ushort2  next, uint );
+ushort3     __ovld __conv intel_sub_group_shuffle_up( ushort3  cur, ushort3  next, uint );
+ushort4     __ovld __conv intel_sub_group_shuffle_up( ushort4  cur, ushort4  next, uint );
+ushort8     __ovld __conv intel_sub_group_shuffle_up( ushort8  cur, ushort8  next, uint );
+ushort16    __ovld __conv intel_sub_group_shuffle_up( ushort16 cur, ushort16 next, uint );
+
+short       __ovld __conv intel_sub_group_shuffle_xor( short  , uint );
+short2      __ovld __conv intel_sub_group_shuffle_xor( short2 , uint );
+short3      __ovld __conv intel_sub_group_shuffle_xor( short3 , uint );
+short4      __ovld __conv intel_sub_group_shuffle_xor( short4 , uint );
+short8      __ovld __conv intel_sub_group_shuffle_xor( short8 , uint );
+short16     __ovld __conv intel_sub_group_shuffle_xor( short16, uint );
+
+ushort      __ovld __conv intel_sub_group_shuffle_xor( ushort  , uint );
+ushort2     __ovld __conv intel_sub_group_shuffle_xor( ushort2 , uint );
+ushort3     __ovld __conv intel_sub_group_shuffle_xor( ushort3 , uint );
+ushort4     __ovld __conv intel_sub_group_shuffle_xor( ushort4 , uint );
+ushort8     __ovld __conv intel_sub_group_shuffle_xor( ushort8 , uint );
+ushort16    __ovld __conv intel_sub_group_shuffle_xor( ushort16, uint );
+
+short       __ovld __conv intel_sub_group_reduce_add( short   x );
+ushort      __ovld __conv intel_sub_group_reduce_add( ushort  x );
+short       __ovld __conv intel_sub_group_reduce_min( short   x );
+ushort      __ovld __conv intel_sub_group_reduce_min( ushort  x );
+short       __ovld __conv intel_sub_group_reduce_max( short   x );
+ushort      __ovld __conv intel_sub_group_reduce_max( ushort  x );
+
+short       __ovld __conv intel_sub_group_scan_exclusive_add( short   x );
+ushort      __ovld __conv intel_sub_group_scan_exclusive_add( ushort  x );
+short       __ovld __conv intel_sub_group_scan_exclusive_min( short   x );
+ushort      __ovld __conv intel_sub_group_scan_exclusive_min( ushort  x );
+short       __ovld __conv intel_sub_group_scan_exclusive_max( short   x );
+ushort      __ovld __conv intel_sub_group_scan_exclusive_max( ushort  x );
+
+short       __ovld __conv intel_sub_group_scan_inclusive_add( short   x );
+ushort      __ovld __conv intel_sub_group_scan_inclusive_add( ushort  x );
+short       __ovld __conv intel_sub_group_scan_inclusive_min( short   x );
+ushort      __ovld __conv intel_sub_group_scan_inclusive_min( ushort  x );
+short       __ovld __conv intel_sub_group_scan_inclusive_max( short   x );
+ushort      __ovld __conv intel_sub_group_scan_inclusive_max( ushort  x );
+
+#if defined(__opencl_c_images)
+uint       __ovld __conv intel_sub_group_block_read_ui(read_only image2d_t, int2);
+uint2      __ovld __conv intel_sub_group_block_read_ui2(read_only image2d_t, int2);
+uint4      __ovld __conv intel_sub_group_block_read_ui4(read_only image2d_t, int2);
+uint8      __ovld __conv intel_sub_group_block_read_ui8(read_only image2d_t, int2);
+#endif // defined(__opencl_c_images)
+
+#if defined(__opencl_c_read_write_images)
+uint       __ovld __conv intel_sub_group_block_read_ui(read_write image2d_t, int2);
+uint2      __ovld __conv intel_sub_group_block_read_ui2(read_write image2d_t, int2);
+uint4      __ovld __conv intel_sub_group_block_read_ui4(read_write image2d_t, int2);
+uint8      __ovld __conv intel_sub_group_block_read_ui8(read_write image2d_t, int2);
+#endif // defined(__opencl_c_read_write_images)
+
+uint       __ovld __conv intel_sub_group_block_read_ui( const __global uint* p );
+uint2      __ovld __conv intel_sub_group_block_read_ui2( const __global uint* p );
+uint4      __ovld __conv intel_sub_group_block_read_ui4( const __global uint* p );
+uint8      __ovld __conv intel_sub_group_block_read_ui8( const __global uint* p );
+
+#if defined(__opencl_c_images)
+void       __ovld __conv intel_sub_group_block_write_ui(read_only image2d_t, int2, uint);
+void       __ovld __conv intel_sub_group_block_write_ui2(read_only image2d_t, int2, uint2);
+void       __ovld __conv intel_sub_group_block_write_ui4(read_only image2d_t, int2, uint4);
+void       __ovld __conv intel_sub_group_block_write_ui8(read_only image2d_t, int2, uint8);
+#endif //defined(__opencl_c_images)
+
+#if defined(__opencl_c_read_write_images)
+void       __ovld __conv intel_sub_group_block_write_ui(read_write image2d_t, int2, uint);
+void       __ovld __conv intel_sub_group_block_write_ui2(read_write image2d_t, int2, uint2);
+void       __ovld __conv intel_sub_group_block_write_ui4(read_write image2d_t, int2, uint4);
+void       __ovld __conv intel_sub_group_block_write_ui8(read_write image2d_t, int2, uint8);
+#endif // defined(__opencl_c_read_write_images)
+
+void       __ovld __conv intel_sub_group_block_write_ui( __global uint* p, uint data );
+void       __ovld __conv intel_sub_group_block_write_ui2( __global uint* p, uint2 data );
+void       __ovld __conv intel_sub_group_block_write_ui4( __global uint* p, uint4 data );
+void       __ovld __conv intel_sub_group_block_write_ui8( __global uint* p, uint8 data );
+
+#if defined(__opencl_c_images)
+ushort      __ovld __conv intel_sub_group_block_read_us(read_only image2d_t, int2);
+ushort2     __ovld __conv intel_sub_group_block_read_us2(read_only image2d_t, int2);
+ushort4     __ovld __conv intel_sub_group_block_read_us4(read_only image2d_t, int2);
+ushort8     __ovld __conv intel_sub_group_block_read_us8(read_only image2d_t, int2);
+#endif // defined(__opencl_c_images)
+
+#if defined(__opencl_c_read_write_images)
+ushort      __ovld __conv intel_sub_group_block_read_us(read_write image2d_t, int2);
+ushort2     __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t, int2);
+ushort4     __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t, int2);
+ushort8     __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t, int2);
+#endif // defined(__opencl_c_read_write_images)
+
+ushort      __ovld __conv intel_sub_group_block_read_us(  const __global ushort* p );
+ushort2     __ovld __conv intel_sub_group_block_read_us2( const __global ushort* p );
+ushort4     __ovld __conv intel_sub_group_block_read_us4( const __global ushort* p );
+ushort8     __ovld __conv intel_sub_group_block_read_us8( const __global ushort* p );
+
+#if defined(__opencl_c_images)
+void        __ovld __conv intel_sub_group_block_write_us(write_only image2d_t, int2, ushort);
+void        __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t, int2, ushort2);
+void        __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t, int2, ushort4);
+void        __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t, int2, ushort8);
+#endif // defined(__opencl_c_images)
+
+#if defined(__opencl_c_read_write_images)
+void        __ovld __conv intel_sub_group_block_write_us(read_write image2d_t, int2, ushort);
+void        __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t, int2, ushort2);
+void        __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t, int2, ushort4);
+void        __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t, int2, ushort8);
+#endif // defined(__opencl_c_read_write_images)
+
+void        __ovld __conv intel_sub_group_block_write_us(  __global ushort* p, ushort  data );
+void        __ovld __conv intel_sub_group_block_write_us2( __global ushort* p, ushort2 data );
+void        __ovld __conv intel_sub_group_block_write_us4( __global ushort* p, ushort4 data );
+void        __ovld __conv intel_sub_group_block_write_us8( __global ushort* p, ushort8 data );
+#endif // cl_intel_subgroups_short
+
+#ifdef cl_intel_device_side_avc_motion_estimation
+#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : begin
+
+// MCE built-in functions
+uchar __ovld
+intel_sub_group_avc_mce_get_default_inter_base_multi_reference_penalty(
+    uchar slice_type, uchar qp);
+ulong __ovld intel_sub_group_avc_mce_get_default_inter_shape_penalty(
+    uchar slice_type, uchar qp);
+uchar __ovld intel_sub_group_avc_mce_get_default_inter_direction_penalty(
+    uchar slice_type, uchar qp);
+uint __ovld intel_sub_group_avc_mce_get_default_intra_luma_shape_penalty(
+    uchar slice_type, uchar qp);
+uint2 __ovld
+intel_sub_group_avc_mce_get_default_inter_motion_vector_cost_table(
+    uchar slice_type, uchar qp);
+uchar __ovld intel_sub_group_avc_mce_get_default_intra_luma_mode_penalty(
+    uchar slice_type, uchar qp);
+
+uint2 __ovld intel_sub_group_avc_mce_get_default_high_penalty_cost_table();
+uint2 __ovld intel_sub_group_avc_mce_get_default_medium_penalty_cost_table();
+uint2 __ovld intel_sub_group_avc_mce_get_default_low_penalty_cost_table();
+uint __ovld intel_sub_group_avc_mce_get_default_non_dc_luma_intra_penalty();
+uchar __ovld
+intel_sub_group_avc_mce_get_default_intra_chroma_mode_base_penalty();
+
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_inter_base_multi_reference_penalty(
+    uchar reference_base_penalty, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_inter_shape_penalty(
+    ulong packed_shape_penalty, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_inter_direction_penalty(
+    uchar direction_cost, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_motion_vector_cost_function(
+    ulong packed_cost_center_delta, uint2 packed_cost_table,
+    uchar cost_precision, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_ac_only_haar(
+    intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_source_interlaced_field_polarity(
+    uchar src_field_polarity, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_single_reference_interlaced_field_polarity(
+    uchar ref_field_polarity, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_dual_reference_interlaced_field_polarities(
+    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+    intel_sub_group_avc_mce_payload_t payload);
+
+ulong __ovld intel_sub_group_avc_mce_get_motion_vectors(
+    intel_sub_group_avc_mce_result_t result);
+ushort __ovld intel_sub_group_avc_mce_get_inter_distortions(
+    intel_sub_group_avc_mce_result_t result);
+ushort __ovld intel_sub_group_avc_mce_get_best_inter_distortion(
+    intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_major_shape(
+    intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_minor_shapes(
+    intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_directions(
+    intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_motion_vector_count(
+    intel_sub_group_avc_mce_result_t result);
+uint __ovld intel_sub_group_avc_mce_get_inter_reference_ids(
+    intel_sub_group_avc_mce_result_t result);
+uchar __ovld
+intel_sub_group_avc_mce_get_inter_reference_interlaced_field_polarities(
+    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
+    intel_sub_group_avc_mce_result_t result);
+
+// IME built-in functions
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_initialize(
+    ushort2 src_coord, uchar partition_mask, uchar sad_adjustment);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_single_reference(
+    short2 ref_offset, uchar search_window_config,
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_dual_reference(
+    short2 fwd_ref_offset, short2 bwd_ref_offset, uchar search_window_config,
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_max_motion_vector_count(
+    uchar max_motion_vector_count, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_unidirectional_mix_disable(
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_early_search_termination_threshold(
+    uchar threshold, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_weighted_sad(
+    uint packed_sad_weights, intel_sub_group_avc_ime_payload_t payload);
+
+__attribute__((deprecated("If you use the latest Intel driver, please use "
+                          "intel_sub_group_avc_ime_ref_window_size instead",
+                          "intel_sub_group_avc_ime_ref_window_size")))
+ushort2 __ovld
+intel_sub_group_ime_ref_window_size(uchar search_window_config, char dual_ref);
+ushort2 __ovld intel_sub_group_avc_ime_ref_window_size(
+    uchar search_window_config, char dual_ref);
+short2 __ovld intel_sub_group_avc_ime_adjust_ref_offset(
+    short2 ref_offset, ushort2 src_coord, ushort2 ref_window_size,
+    ushort2 image_size);
+
+#if defined(__opencl_c_images)
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference(
+    read_only image2d_t src_image, read_only image2d_t ref_image,
+    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference(
+    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference_streamout(
+    read_only image2d_t src_image, read_only image2d_t ref_image,
+    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference_streamout(
+    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference_streamin(
+    read_only image2d_t src_image, read_only image2d_t ref_image,
+    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
+    intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference_streamin(
+    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+    intel_sub_group_avc_ime_payload_t payload,
+    intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
+intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference_streaminout(
+    read_only image2d_t src_image, read_only image2d_t ref_image,
+    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
+    intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
+intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference_streaminout(
+    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+    intel_sub_group_avc_ime_payload_t payload,
+    intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
+#endif
+
+intel_sub_group_avc_ime_single_reference_streamin_t __ovld
+intel_sub_group_avc_ime_get_single_reference_streamin(
+    intel_sub_group_avc_ime_result_single_reference_streamout_t result);
+intel_sub_group_avc_ime_dual_reference_streamin_t __ovld
+intel_sub_group_avc_ime_get_dual_reference_streamin(
+    intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_strip_single_reference_streamout(
+    intel_sub_group_avc_ime_result_single_reference_streamout_t result);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_strip_dual_reference_streamout(
+    intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
+
+uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
+    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
+    uchar major_shape);
+ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
+    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
+    uchar major_shape);
+uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
+    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
+    uchar major_shape);
+uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
+    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
+    uchar major_shape, uchar direction);
+ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
+    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
+    uchar major_shape, uchar direction);
+uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
+    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
+    uchar major_shape, uchar direction);
+
+uchar __ovld intel_sub_group_avc_ime_get_border_reached(
+    uchar image_select, intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ime_get_truncated_search_indication(
+    intel_sub_group_avc_ime_result_t result);
+uchar __ovld
+intel_sub_group_avc_ime_get_unidirectional_early_search_termination(
+    intel_sub_group_avc_ime_result_t result);
+uint __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_motion_vector(
+    intel_sub_group_avc_ime_result_t result);
+ushort __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_distortion(
+    intel_sub_group_avc_ime_result_t result);
+
+// REF built-in functions
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_fme_initialize(
+    ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
+    uchar minor_shapes, uchar directions, uchar pixel_resolution,
+    uchar sad_adjustment);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_bme_initialize(
+    ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
+    uchar minor_shapes, uchar directions, uchar pixel_resolution,
+    uchar bidirectional_weight, uchar sad_adjustment);
+
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_bidirectional_mix_disable(
+    intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_bilinear_filter_enable(
+    intel_sub_group_avc_ref_payload_t payload);
+
+#if defined(__opencl_c_images)
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_single_reference(
+    read_only image2d_t src_image, read_only image2d_t ref_image,
+    sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_dual_reference(
+    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+    intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_multi_reference(
+    read_only image2d_t src_image, uint packed_reference_ids,
+    sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_multi_reference(
+    read_only image2d_t src_image, uint packed_reference_ids,
+    uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
+    intel_sub_group_avc_ref_payload_t payload);
+#endif //defined(__opencl_c_images)
+
+// SIC built-in functions
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_initialize(
+    ushort2 src_coord);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_configure_skc(
+    uint skip_block_partition_type, uint skip_motion_vector_mask,
+    ulong motion_vectors, uchar bidirectional_weight, uchar skip_sad_adjustment,
+    intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld intel_sub_group_avc_sic_configure_ipe(
+    uchar luma_intra_partition_mask, uchar intra_neighbour_availability,
+    uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
+    uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
+    uchar intra_sad_adjustment, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld intel_sub_group_avc_sic_configure_ipe(
+    uchar luma_intra_partition_mask, uchar intra_neighbour_availability,
+    uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
+    uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
+    ushort left_edge_chroma_pixels, ushort upper_left_corner_chroma_pixel,
+    ushort upper_edge_chroma_pixels, uchar intra_sad_adjustment,
+    intel_sub_group_avc_sic_payload_t payload);
+uint __ovld
+intel_sub_group_avc_sic_get_motion_vector_mask(
+    uint skip_block_partition_type, uchar direction);
+
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_intra_luma_shape_penalty(
+    uint packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_intra_luma_mode_cost_function(
+    uchar luma_mode_penalty, uint luma_packed_neighbor_modes,
+    uint luma_packed_non_dc_penalty, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_intra_chroma_mode_cost_function(
+    uchar chroma_mode_penalty, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_skc_bilinear_filter_enable(
+    intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_skc_forward_transform_enable(
+    ulong packed_sad_coefficients, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_block_based_raw_skip_sad(
+    uchar block_based_skip_type,
+    intel_sub_group_avc_sic_payload_t payload);
+
+#if defined(__opencl_c_images)
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_ipe(
+    read_only image2d_t src_image, sampler_t vme_media_sampler,
+    intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_single_reference(
+    read_only image2d_t src_image, read_only image2d_t ref_image,
+    sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_dual_reference(
+    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+    intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_multi_reference(
+    read_only image2d_t src_image, uint packed_reference_ids,
+    sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_multi_reference(
+    read_only image2d_t src_image, uint packed_reference_ids,
+    uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
+    intel_sub_group_avc_sic_payload_t payload);
+#endif //defined(__opencl_c_images)
+
+uchar __ovld intel_sub_group_avc_sic_get_ipe_luma_shape(
+    intel_sub_group_avc_sic_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_best_ipe_luma_distortion(
+    intel_sub_group_avc_sic_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_best_ipe_chroma_distortion(
+    intel_sub_group_avc_sic_result_t result);
+ulong __ovld intel_sub_group_avc_sic_get_packed_ipe_luma_modes(
+    intel_sub_group_avc_sic_result_t result);
+uchar __ovld intel_sub_group_avc_sic_get_ipe_chroma_mode(
+    intel_sub_group_avc_sic_result_t result);
+uint __ovld intel_sub_group_avc_sic_get_packed_skc_luma_count_threshold(
+    intel_sub_group_avc_sic_result_t result);
+ulong __ovld intel_sub_group_avc_sic_get_packed_skc_luma_sum_threshold(
+    intel_sub_group_avc_sic_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_inter_raw_sads(
+    intel_sub_group_avc_sic_result_t result);
+
+// Wrappers
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_inter_base_multi_reference_penalty(
+    uchar reference_base_penalty, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_inter_base_multi_reference_penalty(
+    uchar reference_base_penalty, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_inter_base_multi_reference_penalty(
+    uchar reference_base_penalty, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_inter_shape_penalty(
+    ulong packed_shape_cost, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_inter_shape_penalty(
+    ulong packed_shape_cost, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_inter_shape_penalty(
+    ulong packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_inter_direction_penalty(
+    uchar direction_cost, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_inter_direction_penalty(
+    uchar direction_cost, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_inter_direction_penalty(
+    uchar direction_cost, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_motion_vector_cost_function(
+    ulong packed_cost_center_delta, uint2 packed_cost_table,
+    uchar cost_precision, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_motion_vector_cost_function(
+    ulong packed_cost_center_delta, uint2 packed_cost_table,
+    uchar cost_precision, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_motion_vector_cost_function(
+    ulong packed_cost_center_delta, uint2 packed_cost_table,
+    uchar cost_precision, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_source_interlaced_field_polarity(
+    uchar src_field_polarity, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_source_interlaced_field_polarity(
+    uchar src_field_polarity, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_source_interlaced_field_polarity(
+    uchar src_field_polarity, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_single_reference_interlaced_field_polarity(
+    uchar ref_field_polarity, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_single_reference_interlaced_field_polarity(
+    uchar ref_field_polarity, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_single_reference_interlaced_field_polarity(
+    uchar ref_field_polarity, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_dual_reference_interlaced_field_polarities(
+    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_dual_reference_interlaced_field_polarities(
+    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+    intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_dual_reference_interlaced_field_polarities(
+    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+    intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_ac_only_haar(
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_ac_only_haar(
+    intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_ac_only_haar(
+    intel_sub_group_avc_sic_payload_t payload);
+
+ulong __ovld intel_sub_group_avc_ime_get_motion_vectors(
+    intel_sub_group_avc_ime_result_t result);
+ulong __ovld intel_sub_group_avc_ref_get_motion_vectors(
+    intel_sub_group_avc_ref_result_t result);
+
+ushort __ovld intel_sub_group_avc_ime_get_inter_distortions(
+    intel_sub_group_avc_ime_result_t result);
+ushort __ovld intel_sub_group_avc_ref_get_inter_distortions(
+    intel_sub_group_avc_ref_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_inter_distortions(
+    intel_sub_group_avc_sic_result_t result);
+
+ushort __ovld intel_sub_group_avc_ime_get_best_inter_distortion(
+    intel_sub_group_avc_ime_result_t result);
+ushort __ovld intel_sub_group_avc_ref_get_best_inter_distortion(
+    intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld intel_sub_group_avc_ime_get_inter_major_shape(
+    intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_major_shape(
+    intel_sub_group_avc_ref_result_t result);
+uchar __ovld intel_sub_group_avc_ime_get_inter_minor_shapes(
+    intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_minor_shapes(
+    intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld intel_sub_group_avc_ime_get_inter_directions(
+    intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_directions(
+    intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld intel_sub_group_avc_ime_get_inter_motion_vector_count(
+    intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_motion_vector_count(
+    intel_sub_group_avc_ref_result_t result);
+
+uint __ovld intel_sub_group_avc_ime_get_inter_reference_ids(
+    intel_sub_group_avc_ime_result_t result);
+uint __ovld intel_sub_group_avc_ref_get_inter_reference_ids(
+    intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld
+intel_sub_group_avc_ime_get_inter_reference_interlaced_field_polarities(
+    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
+    intel_sub_group_avc_ime_result_t result);
+uchar __ovld
+intel_sub_group_avc_ref_get_inter_reference_interlaced_field_polarities(
+    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
+    intel_sub_group_avc_ref_result_t result);
+
+// Type conversion functions
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_ime_convert_to_mce_payload(
+    intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_mce_convert_to_ime_payload(
+    intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_ref_convert_to_mce_payload(
+    intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_mce_convert_to_ref_payload(
+    intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_sic_convert_to_mce_payload(
+    intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_mce_convert_to_sic_payload(
+    intel_sub_group_avc_mce_payload_t payload);
+
+intel_sub_group_avc_mce_result_t __ovld
+intel_sub_group_avc_ime_convert_to_mce_result(
+    intel_sub_group_avc_ime_result_t result);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_mce_convert_to_ime_result(
+    intel_sub_group_avc_mce_result_t result);
+intel_sub_group_avc_mce_result_t __ovld
+intel_sub_group_avc_ref_convert_to_mce_result(
+    intel_sub_group_avc_ref_result_t result);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_mce_convert_to_ref_result(
+    intel_sub_group_avc_mce_result_t result);
+intel_sub_group_avc_mce_result_t __ovld
+intel_sub_group_avc_sic_convert_to_mce_result(
+    intel_sub_group_avc_sic_result_t result);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_mce_convert_to_sic_result(
+    intel_sub_group_avc_mce_result_t result);
+#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : end
+#endif // cl_intel_device_side_avc_motion_estimation
+
+#ifdef cl_amd_media_ops
+uint __ovld amd_bitalign(uint, uint, uint);
+uint2 __ovld amd_bitalign(uint2, uint2, uint2);
+uint3 __ovld amd_bitalign(uint3, uint3, uint3);
+uint4 __ovld amd_bitalign(uint4, uint4, uint4);
+uint8 __ovld amd_bitalign(uint8, uint8, uint8);
+uint16 __ovld amd_bitalign(uint16, uint16, uint16);
+
+uint __ovld amd_bytealign(uint, uint, uint);
+uint2 __ovld amd_bytealign(uint2, uint2, uint2);
+uint3 __ovld amd_bytealign(uint3, uint3, uint3);
+uint4 __ovld amd_bytealign(uint4, uint4, uint4);
+uint8 __ovld amd_bytealign(uint8, uint8, uint8);
+uint16 __ovld amd_bytealign(uint16, uint16, uint16);
+
+uint __ovld amd_lerp(uint, uint, uint);
+uint2 __ovld amd_lerp(uint2, uint2, uint2);
+uint3 __ovld amd_lerp(uint3, uint3, uint3);
+uint4 __ovld amd_lerp(uint4, uint4, uint4);
+uint8 __ovld amd_lerp(uint8, uint8, uint8);
+uint16 __ovld amd_lerp(uint16, uint16, uint16);
+
+uint __ovld amd_pack(float4 v);
+
+uint __ovld amd_sad4(uint4, uint4, uint);
+
+uint __ovld amd_sadhi(uint, uint, uint);
+uint2 __ovld amd_sadhi(uint2, uint2, uint2);
+uint3 __ovld amd_sadhi(uint3, uint3, uint3);
+uint4 __ovld amd_sadhi(uint4, uint4, uint4);
+uint8 __ovld amd_sadhi(uint8, uint8, uint8);
+uint16 __ovld amd_sadhi(uint16, uint16, uint16);
+
+uint __ovld amd_sad(uint, uint, uint);
+uint2 __ovld amd_sad(uint2, uint2, uint2);
+uint3 __ovld amd_sad(uint3, uint3, uint3);
+uint4 __ovld amd_sad(uint4, uint4, uint4);
+uint8 __ovld amd_sad(uint8, uint8, uint8);
+uint16 __ovld amd_sad(uint16, uint16, uint16);
+
+float __ovld amd_unpack0(uint);
+float2 __ovld amd_unpack0(uint2);
+float3 __ovld amd_unpack0(uint3);
+float4 __ovld amd_unpack0(uint4);
+float8 __ovld amd_unpack0(uint8);
+float16 __ovld amd_unpack0(uint16);
+
+float __ovld amd_unpack1(uint);
+float2 __ovld amd_unpack1(uint2);
+float3 __ovld amd_unpack1(uint3);
+float4 __ovld amd_unpack1(uint4);
+float8 __ovld amd_unpack1(uint8);
+float16 __ovld amd_unpack1(uint16);
+
+float __ovld amd_unpack2(uint);
+float2 __ovld amd_unpack2(uint2);
+float3 __ovld amd_unpack2(uint3);
+float4 __ovld amd_unpack2(uint4);
+float8 __ovld amd_unpack2(uint8);
+float16 __ovld amd_unpack2(uint16);
+
+float __ovld amd_unpack3(uint);
+float2 __ovld amd_unpack3(uint2);
+float3 __ovld amd_unpack3(uint3);
+float4 __ovld amd_unpack3(uint4);
+float8 __ovld amd_unpack3(uint8);
+float16 __ovld amd_unpack3(uint16);
+#endif // cl_amd_media_ops
+
+#ifdef cl_amd_media_ops2
+int __ovld amd_bfe(int src0, uint src1, uint src2);
+int2 __ovld amd_bfe(int2 src0, uint2 src1, uint2 src2);
+int3 __ovld amd_bfe(int3 src0, uint3 src1, uint3 src2);
+int4 __ovld amd_bfe(int4 src0, uint4 src1, uint4 src2);
+int8 __ovld amd_bfe(int8 src0, uint8 src1, uint8 src2);
+int16 __ovld amd_bfe(int16 src0, uint16 src1, uint16 src2);
+
+uint __ovld amd_bfe(uint src0, uint src1, uint src2);
+uint2 __ovld amd_bfe(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_bfe(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_bfe(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_bfe(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_bfe(uint16 src0, uint16 src1, uint16 src2);
+
+uint __ovld amd_bfm(uint src0, uint src1);
+uint2 __ovld amd_bfm(uint2 src0, uint2 src1);
+uint3 __ovld amd_bfm(uint3 src0, uint3 src1);
+uint4 __ovld amd_bfm(uint4 src0, uint4 src1);
+uint8 __ovld amd_bfm(uint8 src0, uint8 src1);
+uint16 __ovld amd_bfm(uint16 src0, uint16 src1);
+
+float __ovld amd_max3(float src0, float src1, float src2);
+float2 __ovld amd_max3(float2 src0, float2 src1, float2 src2);
+float3 __ovld amd_max3(float3 src0, float3 src1, float3 src2);
+float4 __ovld amd_max3(float4 src0, float4 src1, float4 src2);
+float8 __ovld amd_max3(float8 src0, float8 src1, float8 src2);
+float16 __ovld amd_max3(float16 src0, float16 src1, float16 src2);
+
+int __ovld amd_max3(int src0, int src1, int src2);
+int2 __ovld amd_max3(int2 src0, int2 src1, int2 src2);
+int3 __ovld amd_max3(int3 src0, int3 src1, int3 src2);
+int4 __ovld amd_max3(int4 src0, int4 src1, int4 src2);
+int8 __ovld amd_max3(int8 src0, int8 src1, int8 src2);
+int16 __ovld amd_max3(int16 src0, int16 src1, int16 src2);
+
+uint __ovld amd_max3(uint src0, uint src1, uint src2);
+uint2 __ovld amd_max3(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_max3(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_max3(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_max3(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_max3(uint16 src0, uint16 src1, uint16 src2);
+
+float __ovld amd_median3(float src0, float src1, float src2);
+float2 __ovld amd_median3(float2 src0, float2 src1, float2 src2);
+float3 __ovld amd_median3(float3 src0, float3 src1, float3 src2);
+float4 __ovld amd_median3(float4 src0, float4 src1, float4 src2);
+float8 __ovld amd_median3(float8 src0, float8 src1, float8 src2);
+float16 __ovld amd_median3(float16 src0, float16 src1, float16 src2);
+
+int __ovld amd_median3(int src0, int src1, int src2);
+int2 __ovld amd_median3(int2 src0, int2 src1, int2 src2);
+int3 __ovld amd_median3(int3 src0, int3 src1, int3 src2);
+int4 __ovld amd_median3(int4 src0, int4 src1, int4 src2);
+int8 __ovld amd_median3(int8 src0, int8 src1, int8 src2);
+int16 __ovld amd_median3(int16 src0, int16 src1, int16 src2);
+
+uint __ovld amd_median3(uint src0, uint src1, uint src2);
+uint2 __ovld amd_median3(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_median3(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_median3(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_median3(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_median3(uint16 src0, uint16 src1, uint16 src2);
+
+float __ovld amd_min3(float src0, float src1, float src);
+float2 __ovld amd_min3(float2 src0, float2 src1, float2 src);
+float3 __ovld amd_min3(float3 src0, float3 src1, float3 src);
+float4 __ovld amd_min3(float4 src0, float4 src1, float4 src);
+float8 __ovld amd_min3(float8 src0, float8 src1, float8 src);
+float16 __ovld amd_min3(float16 src0, float16 src1, float16 src);
+
+int __ovld amd_min3(int src0, int src1, int src2);
+int2 __ovld amd_min3(int2 src0, int2 src1, int2 src2);
+int3 __ovld amd_min3(int3 src0, int3 src1, int3 src2);
+int4 __ovld amd_min3(int4 src0, int4 src1, int4 src2);
+int8 __ovld amd_min3(int8 src0, int8 src1, int8 src2);
+int16 __ovld amd_min3(int16 src0, int16 src1, int16 src2);
+
+uint __ovld amd_min3(uint src0, uint src1, uint src2);
+uint2 __ovld amd_min3(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_min3(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_min3(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_min3(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_min3(uint16 src0, uint16 src1, uint16 src2);
+
+ulong __ovld amd_mqsad(ulong src0, uint src1, ulong src2);
+ulong2 __ovld amd_mqsad(ulong2 src0, uint2 src1, ulong2 src2);
+ulong3 __ovld amd_mqsad(ulong3 src0, uint3 src1, ulong3 src2);
+ulong4 __ovld amd_mqsad(ulong4 src0, uint4 src1, ulong4 src2);
+ulong8 __ovld amd_mqsad(ulong8 src0, uint8 src1, ulong8 src2);
+ulong16 __ovld amd_mqsad(ulong16 src0, uint16 src1, ulong16 src2);
+
+ulong __ovld amd_qsad(ulong src0, uint src1, ulong src2);
+ulong2 __ovld amd_qsad(ulong2 src0, uint2 src1, ulong2 src2);
+ulong3 __ovld amd_qsad(ulong3 src0, uint3 src1, ulong3 src2);
+ulong4 __ovld amd_qsad(ulong4 src0, uint4 src1, ulong4 src2);
+ulong8 __ovld amd_qsad(ulong8 src0, uint8 src1, ulong8 src2);
+ulong16 __ovld amd_qsad(ulong16 src0, uint16 src1, ulong16 src2);
+
+uint __ovld amd_msad(uint src0, uint src1, uint src2);
+uint2 __ovld amd_msad(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_msad(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_msad(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_msad(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_msad(uint16 src0, uint16 src1, uint16 src2);
+
+uint __ovld amd_sadd(uint src0, uint src1, uint src2);
+uint2 __ovld amd_sadd(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_sadd(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_sadd(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_sadd(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_sadd(uint16 src0, uint16 src1, uint16 src2);
+
+uint __ovld amd_sadw(uint src0, uint src1, uint src2);
+uint2 __ovld amd_sadw(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_sadw(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_sadw(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_sadw(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2);
+#endif // cl_amd_media_ops2
+
+#if defined(cl_arm_integer_dot_product_int8)
+uint __ovld arm_dot(uchar4, uchar4);
+int __ovld arm_dot(char4, char4);
+#endif // defined(cl_arm_integer_dot_product_int8)
+
+#if defined(cl_arm_integer_dot_product_accumulate_int8)
+uint __ovld arm_dot_acc(uchar4, uchar4, uint);
+int __ovld arm_dot_acc(char4, char4, int);
+#endif // defined(cl_arm_integer_dot_product_accumulate_int8)
+
+#if defined(cl_arm_integer_dot_product_accumulate_int16)
+uint __ovld arm_dot_acc(ushort2, ushort2, uint);
+int __ovld arm_dot_acc(short2, short2, int);
+#endif // defined(cl_arm_integer_dot_product_accumulate_int16)
+
+#if defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
+uint __ovld arm_dot_acc_sat(uchar4, uchar4, uint);
+int __ovld arm_dot_acc_sat(char4, char4, int);
+#endif // defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
+
+// Disable any extensions we may have enabled previously.
+#pragma OPENCL EXTENSION all : disable
+
+#undef __opencl_c_named_address_space_builtins
+
+#undef __cnfn
+#undef __ovld
+#endif //_OPENCL_H_
diff --git a/linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/__clang_openmp_device_functions.h b/linux-x86/lib64/clang/16.0.2/include/openmp_wrappers/__clang_openmp_device_functions.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/__clang_openmp_device_functions.h
rename to linux-x86/lib64/clang/16.0.2/include/openmp_wrappers/__clang_openmp_device_functions.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/cmath b/linux-x86/lib64/clang/16.0.2/include/openmp_wrappers/cmath
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/cmath
rename to linux-x86/lib64/clang/16.0.2/include/openmp_wrappers/cmath
diff --git a/linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex b/linux-x86/lib64/clang/16.0.2/include/openmp_wrappers/complex
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex
rename to linux-x86/lib64/clang/16.0.2/include/openmp_wrappers/complex
diff --git a/linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex.h b/linux-x86/lib64/clang/16.0.2/include/openmp_wrappers/complex.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex.h
rename to linux-x86/lib64/clang/16.0.2/include/openmp_wrappers/complex.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex_cmath.h b/linux-x86/lib64/clang/16.0.2/include/openmp_wrappers/complex_cmath.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex_cmath.h
rename to linux-x86/lib64/clang/16.0.2/include/openmp_wrappers/complex_cmath.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/math.h b/linux-x86/lib64/clang/16.0.2/include/openmp_wrappers/math.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/math.h
rename to linux-x86/lib64/clang/16.0.2/include/openmp_wrappers/math.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/new b/linux-x86/lib64/clang/16.0.2/include/openmp_wrappers/new
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/new
rename to linux-x86/lib64/clang/16.0.2/include/openmp_wrappers/new
diff --git a/linux-x86/lib64/clang/16.0.2/include/orc/c_api.h b/linux-x86/lib64/clang/16.0.2/include/orc/c_api.h
new file mode 100644
index 0000000..96d01df
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/orc/c_api.h
@@ -0,0 +1,205 @@
+/*===- c_api.h - C API for the ORC runtime ------------------------*- C -*-===*\
+|*                                                                            *|
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM          *|
+|* Exceptions.                                                                *|
+|* See https://llvm.org/LICENSE.txt for license information.                  *|
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception                    *|
+|*                                                                            *|
+|*===----------------------------------------------------------------------===*|
+|*                                                                            *|
+|* This file defines the C API for the ORC runtime                            *|
+|*                                                                            *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef ORC_RT_C_API_H
+#define ORC_RT_C_API_H
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* Helper to suppress strict prototype warnings. */
+#ifdef __clang__
+#define ORC_RT_C_STRICT_PROTOTYPES_BEGIN                                       \
+  _Pragma("clang diagnostic push")                                             \
+      _Pragma("clang diagnostic error \"-Wstrict-prototypes\"")
+#define ORC_RT_C_STRICT_PROTOTYPES_END _Pragma("clang diagnostic pop")
+#else
+#define ORC_RT_C_STRICT_PROTOTYPES_BEGIN
+#define ORC_RT_C_STRICT_PROTOTYPES_END
+#endif
+
+/* Helper to wrap C code for C++ */
+#ifdef __cplusplus
+#define ORC_RT_C_EXTERN_C_BEGIN                                                \
+  extern "C" {                                                                 \
+  ORC_RT_C_STRICT_PROTOTYPES_BEGIN
+#define ORC_RT_C_EXTERN_C_END                                                  \
+  ORC_RT_C_STRICT_PROTOTYPES_END                                               \
+  }
+#else
+#define ORC_RT_C_EXTERN_C_BEGIN ORC_RT_C_STRICT_PROTOTYPES_BEGIN
+#define ORC_RT_C_EXTERN_C_END ORC_RT_C_STRICT_PROTOTYPES_END
+#endif
+
+ORC_RT_C_EXTERN_C_BEGIN
+
+typedef union {
+  char *ValuePtr;
+  char Value[sizeof(char *)];
+} __orc_rt_CWrapperFunctionResultDataUnion;
+
+/**
+ * __orc_rt_CWrapperFunctionResult is a kind of C-SmallVector with an
+ * out-of-band error state.
+ *
+ * If Size == 0 and Data.ValuePtr is non-zero then the value is in the
+ * 'out-of-band error' state, and Data.ValuePtr points at a malloc-allocated,
+ * null-terminated string error message.
+ *
+ * If Size <= sizeof(__orc_rt_CWrapperFunctionResultData) then the value is in
+ * the 'small' state and the content is held in the first Size bytes of
+ * Data.Value.
+ *
+ * If Size > sizeof(OrtRTCWrapperFunctionResultData) then the value is in the
+ * 'large' state and the content is held in the first Size bytes of the
+ * memory pointed to by Data.ValuePtr. This memory must have been allocated by
+ * malloc, and will be freed with free when this value is destroyed.
+ */
+typedef struct {
+  __orc_rt_CWrapperFunctionResultDataUnion Data;
+  size_t Size;
+} __orc_rt_CWrapperFunctionResult;
+
+typedef struct __orc_rt_CSharedOpaqueJITProcessControl
+    *__orc_rt_SharedJITProcessControlRef;
+
+/**
+ * Zero-initialize an __orc_rt_CWrapperFunctionResult.
+ */
+static inline void
+__orc_rt_CWrapperFunctionResultInit(__orc_rt_CWrapperFunctionResult *R) {
+  R->Size = 0;
+  R->Data.ValuePtr = 0;
+}
+
+/**
+ * Create an __orc_rt_CWrapperFunctionResult with an uninitialized buffer of
+ * size Size. The buffer is returned via the DataPtr argument.
+ */
+static inline __orc_rt_CWrapperFunctionResult
+__orc_rt_CWrapperFunctionResultAllocate(size_t Size) {
+  __orc_rt_CWrapperFunctionResult R;
+  R.Size = Size;
+  // If Size is 0 ValuePtr must be 0 or it is considered an out-of-band error.
+  R.Data.ValuePtr = 0;
+  if (Size > sizeof(R.Data.Value))
+    R.Data.ValuePtr = (char *)malloc(Size);
+  return R;
+}
+
+/**
+ * Create an __orc_rt_WrapperFunctionResult from the given data range.
+ */
+static inline __orc_rt_CWrapperFunctionResult
+__orc_rt_CreateCWrapperFunctionResultFromRange(const char *Data, size_t Size) {
+  __orc_rt_CWrapperFunctionResult R;
+  R.Size = Size;
+  if (R.Size > sizeof(R.Data.Value)) {
+    char *Tmp = (char *)malloc(Size);
+    memcpy(Tmp, Data, Size);
+    R.Data.ValuePtr = Tmp;
+  } else
+    memcpy(R.Data.Value, Data, Size);
+  return R;
+}
+
+/**
+ * Create an __orc_rt_CWrapperFunctionResult by copying the given string,
+ * including the null-terminator.
+ *
+ * This function copies the input string. The client is responsible for freeing
+ * the ErrMsg arg.
+ */
+static inline __orc_rt_CWrapperFunctionResult
+__orc_rt_CreateCWrapperFunctionResultFromString(const char *Source) {
+  return __orc_rt_CreateCWrapperFunctionResultFromRange(Source,
+                                                        strlen(Source) + 1);
+}
+
+/**
+ * Create an __orc_rt_CWrapperFunctionResult representing an out-of-band
+ * error.
+ *
+ * This function copies the input string. The client is responsible for freeing
+ * the ErrMsg arg.
+ */
+static inline __orc_rt_CWrapperFunctionResult
+__orc_rt_CreateCWrapperFunctionResultFromOutOfBandError(const char *ErrMsg) {
+  __orc_rt_CWrapperFunctionResult R;
+  R.Size = 0;
+  char *Tmp = (char *)malloc(strlen(ErrMsg) + 1);
+  strcpy(Tmp, ErrMsg);
+  R.Data.ValuePtr = Tmp;
+  return R;
+}
+
+/**
+ * This should be called to destroy __orc_rt_CWrapperFunctionResult values
+ * regardless of their state.
+ */
+static inline void
+__orc_rt_DisposeCWrapperFunctionResult(__orc_rt_CWrapperFunctionResult *R) {
+  if (R->Size > sizeof(R->Data.Value) ||
+      (R->Size == 0 && R->Data.ValuePtr))
+    free(R->Data.ValuePtr);
+}
+
+/**
+ * Get a pointer to the data contained in the given
+ * __orc_rt_CWrapperFunctionResult.
+ */
+static inline char *
+__orc_rt_CWrapperFunctionResultData(__orc_rt_CWrapperFunctionResult *R) {
+  assert((R->Size != 0 || R->Data.ValuePtr == NULL) &&
+         "Cannot get data for out-of-band error value");
+  return R->Size > sizeof(R->Data.Value) ? R->Data.ValuePtr : R->Data.Value;
+}
+
+/**
+ * Safely get the size of the given __orc_rt_CWrapperFunctionResult.
+ *
+ * Asserts that we're not trying to access the size of an error value.
+ */
+static inline size_t
+__orc_rt_CWrapperFunctionResultSize(const __orc_rt_CWrapperFunctionResult *R) {
+  assert((R->Size != 0 || R->Data.ValuePtr == NULL) &&
+         "Cannot get size for out-of-band error value");
+  return R->Size;
+}
+
+/**
+ * Returns 1 if this value is equivalent to a value just initialized by
+ * __orc_rt_CWrapperFunctionResultInit, 0 otherwise.
+ */
+static inline size_t
+__orc_rt_CWrapperFunctionResultEmpty(const __orc_rt_CWrapperFunctionResult *R) {
+  return R->Size == 0 && R->Data.ValuePtr == 0;
+}
+
+/**
+ * Returns a pointer to the out-of-band error string for this
+ * __orc_rt_CWrapperFunctionResult, or null if there is no error.
+ *
+ * The __orc_rt_CWrapperFunctionResult retains ownership of the error
+ * string, so it should be copied if the caller wishes to preserve it.
+ */
+static inline const char *__orc_rt_CWrapperFunctionResultGetOutOfBandError(
+    const __orc_rt_CWrapperFunctionResult *R) {
+  return R->Size == 0 ? R->Data.ValuePtr : 0;
+}
+
+ORC_RT_C_EXTERN_C_END
+
+#endif /* ORC_RT_C_API_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/pconfigintrin.h b/linux-x86/lib64/clang/16.0.2/include/pconfigintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/pconfigintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/pconfigintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/pkuintrin.h b/linux-x86/lib64/clang/16.0.2/include/pkuintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/pkuintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/pkuintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/pmmintrin.h b/linux-x86/lib64/clang/16.0.2/include/pmmintrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/pmmintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/pmmintrin.h
index eda8356..ee660e9 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/pmmintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/pmmintrin.h
@@ -35,7 +35,7 @@
 ///    A pointer to a 128-bit integer vector containing integer values.
 /// \returns A 128-bit vector containing the moved values.
 static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_lddqu_si128(__m128i const *__p)
+_mm_lddqu_si128(__m128i_u const *__p)
 {
   return (__m128i)__builtin_ia32_lddqu((char const *)__p);
 }
diff --git a/linux-x86/lib64/clang/14.0.2/include/popcntintrin.h b/linux-x86/lib64/clang/16.0.2/include/popcntintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/popcntintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/popcntintrin.h
diff --git a/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/bmi2intrin.h b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/bmi2intrin.h
new file mode 100644
index 0000000..0dc0d14
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/bmi2intrin.h
@@ -0,0 +1,134 @@
+/*===---- bmiintrin.h - Implementation of BMI2 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined X86GPRINTRIN_H_
+#error "Never use <bmi2intrin.h> directly; include <x86gprintrin.h> instead."
+#endif
+
+#ifndef BMI2INTRIN_H_
+#define BMI2INTRIN_H_
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _bzhi_u32(unsigned int __X, unsigned int __Y) {
+  return ((__X << (32 - __Y)) >> (32 - __Y));
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mulx_u32(unsigned int __X, unsigned int __Y, unsigned int *__P) {
+  unsigned long long __res = (unsigned long long)__X * __Y;
+  *__P = (unsigned int)(__res >> 32);
+  return (unsigned int)__res;
+}
+
+#ifdef __PPC64__
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _bzhi_u64(unsigned long long __X, unsigned long long __Y) {
+  return ((__X << (64 - __Y)) >> (64 - __Y));
+}
+
+/* __int128 requires base 64-bit.  */
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mulx_u64(unsigned long long __X, unsigned long long __Y,
+              unsigned long long *__P) {
+  unsigned __int128 __res = (unsigned __int128)__X * __Y;
+  *__P = (unsigned long long)(__res >> 64);
+  return (unsigned long long)__res;
+}
+
+#ifdef _ARCH_PWR7
+/* popcount and bpermd require power7 minimum.  */
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _pdep_u64(unsigned long long __X, unsigned long long __M) {
+  unsigned long __result = 0x0UL;
+  const unsigned long __mask = 0x8000000000000000UL;
+  unsigned long __m = __M;
+  unsigned long __c, __t;
+  unsigned long __p;
+
+  /* The pop-count of the mask gives the number of the bits from
+   source to process.  This is also needed to shift bits from the
+   source into the correct position for the result.  */
+  __p = 64 - __builtin_popcountl(__M);
+
+  /* The loop is for the number of '1' bits in the mask and clearing
+   each mask bit as it is processed.  */
+  while (__m != 0) {
+    __c = __builtin_clzl(__m);
+    __t = __X << (__p - __c);
+    __m ^= (__mask >> __c);
+    __result |= (__t & (__mask >> __c));
+    __p++;
+  }
+  return __result;
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _pext_u64(unsigned long long __X, unsigned long long __M) {
+  unsigned long __p = 0x4040404040404040UL; // initial bit permute control
+  const unsigned long __mask = 0x8000000000000000UL;
+  unsigned long __m = __M;
+  unsigned long __c;
+  unsigned long __result;
+
+  /* if the mask is constant and selects 8 bits or less we can use
+   the Power8 Bit permute instruction.  */
+  if (__builtin_constant_p(__M) && (__builtin_popcountl(__M) <= 8)) {
+    /* Also if the pext mask is constant, then the popcount is
+     constant, we can evaluate the following loop at compile
+     time and use a constant bit permute vector.  */
+    long __i;
+    for (__i = 0; __i < __builtin_popcountl(__M); __i++) {
+      __c = __builtin_clzl(__m);
+      __p = (__p << 8) | __c;
+      __m ^= (__mask >> __c);
+    }
+    __result = __builtin_bpermd(__p, __X);
+  } else {
+    __p = 64 - __builtin_popcountl(__M);
+    __result = 0;
+    /* We could a use a for loop here, but that combined with
+     -funroll-loops can expand to a lot of code.  The while
+     loop avoids unrolling and the compiler commons the xor
+     from clearing the mask bit with the (m != 0) test.  The
+     result is a more compact loop setup and body.  */
+    while (__m != 0) {
+      unsigned long __t;
+      __c = __builtin_clzl(__m);
+      __t = (__X & (__mask >> __c)) >> (__p - __c);
+      __m ^= (__mask >> __c);
+      __result |= (__t);
+      __p++;
+    }
+  }
+  return __result;
+}
+
+/* these 32-bit implementations depend on 64-bit pdep/pext
+   which depend on _ARCH_PWR7.  */
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _pdep_u32(unsigned int __X, unsigned int __Y) {
+  return _pdep_u64(__X, __Y);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _pext_u32(unsigned int __X, unsigned int __Y) {
+  return _pext_u64(__X, __Y);
+}
+#endif /* _ARCH_PWR7  */
+#endif /* __PPC64__  */
+
+#endif /* BMI2INTRIN_H_ */
diff --git a/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/bmiintrin.h b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/bmiintrin.h
new file mode 100644
index 0000000..7d33159
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/bmiintrin.h
@@ -0,0 +1,165 @@
+/*===---- bmiintrin.h - Implementation of BMI intrinsics on PowerPC --------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined X86GPRINTRIN_H_
+#error "Never use <bmiintrin.h> directly; include <x86gprintrin.h> instead."
+#endif
+
+#ifndef BMIINTRIN_H_
+#define BMIINTRIN_H_
+
+extern __inline unsigned short
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __tzcnt_u16(unsigned short __X) {
+  return __builtin_ctz(__X);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __andn_u32(unsigned int __X, unsigned int __Y) {
+  return (~__X & __Y);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _bextr_u32(unsigned int __X, unsigned int __P, unsigned int __L) {
+  return ((__X << (32 - (__L + __P))) >> (32 - __L));
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __bextr_u32(unsigned int __X, unsigned int __Y) {
+  unsigned int __P, __L;
+  __P = __Y & 0xFF;
+  __L = (__Y >> 8) & 0xFF;
+  return (_bextr_u32(__X, __P, __L));
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __blsi_u32(unsigned int __X) {
+  return (__X & -__X);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _blsi_u32(unsigned int __X) {
+  return __blsi_u32(__X);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __blsmsk_u32(unsigned int __X) {
+  return (__X ^ (__X - 1));
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _blsmsk_u32(unsigned int __X) {
+  return __blsmsk_u32(__X);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __blsr_u32(unsigned int __X) {
+  return (__X & (__X - 1));
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _blsr_u32(unsigned int __X) {
+  return __blsr_u32(__X);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __tzcnt_u32(unsigned int __X) {
+  return __builtin_ctz(__X);
+}
+
+extern __inline unsigned int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _tzcnt_u32(unsigned int __X) {
+  return __builtin_ctz(__X);
+}
+
+/* use the 64-bit shift, rotate, and count leading zeros instructions
+   for long long.  */
+#ifdef __PPC64__
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __andn_u64(unsigned long long __X, unsigned long long __Y) {
+  return (~__X & __Y);
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _bextr_u64(unsigned long long __X, unsigned int __P, unsigned int __L) {
+  return ((__X << (64 - (__L + __P))) >> (64 - __L));
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __bextr_u64(unsigned long long __X, unsigned long long __Y) {
+  unsigned int __P, __L;
+  __P = __Y & 0xFF;
+  __L = (__Y & 0xFF00) >> 8;
+  return (_bextr_u64(__X, __P, __L));
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __blsi_u64(unsigned long long __X) {
+  return __X & -__X;
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _blsi_u64(unsigned long long __X) {
+  return __blsi_u64(__X);
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __blsmsk_u64(unsigned long long __X) {
+  return (__X ^ (__X - 1));
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _blsmsk_u64(unsigned long long __X) {
+  return __blsmsk_u64(__X);
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __blsr_u64(unsigned long long __X) {
+  return (__X & (__X - 1));
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _blsr_u64(unsigned long long __X) {
+  return __blsr_u64(__X);
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    __tzcnt_u64(unsigned long long __X) {
+  return __builtin_ctzll(__X);
+}
+
+extern __inline unsigned long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _tzcnt_u64(unsigned long long __X) {
+  return __builtin_ctzll(__X);
+}
+#endif /* __PPC64__  */
+
+#endif /* BMIINTRIN_H_ */
diff --git a/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/emmintrin.h b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/emmintrin.h
new file mode 100644
index 0000000..a4c458a
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/emmintrin.h
@@ -0,0 +1,2268 @@
+/*===---- emmintrin.h - Implementation of SSE2 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+   User Guide and Reference, version 9.0.  */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header file is to help porting code using Intel intrinsics
+   explicitly from x86_64 to powerpc64/powerpc64le.
+
+   Since X86 SSE2 intrinsics mainly handles __m128i and __m128d type,
+   PowerPC VMX/VSX ISA is a good match for vector float SIMD operations.
+   However scalar float operations in vector (XMM) registers require
+   the POWER8 VSX ISA (2.07) level. There are differences for data
+   format and placement of float scalars in the vector register, which
+   require extra steps to match SSE2 scalar float semantics on POWER.
+
+   It should be noted that there's much difference between X86_64's
+   MXSCR and PowerISA's FPSCR/VSCR registers. It's recommended to use
+   portable <fenv.h> instead of access MXSCR directly.
+
+   Most SSE2 scalar float intrinsic operations can be performed more
+   efficiently as C language float scalar operations or optimized to
+   use vector SIMD operations. We recommend this for new applications.
+*/
+#error                                                                         \
+    "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef EMMINTRIN_H_
+#define EMMINTRIN_H_
+
+#if defined(__ppc64__) &&                                                      \
+    (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
+
+#include <altivec.h>
+
+/* We need definitions from the SSE header files.  */
+#include <xmmintrin.h>
+
+/* SSE2 */
+typedef __vector double __v2df;
+typedef __vector long long __v2di;
+typedef __vector unsigned long long __v2du;
+typedef __vector int __v4si;
+typedef __vector unsigned int __v4su;
+typedef __vector short __v8hi;
+typedef __vector unsigned short __v8hu;
+typedef __vector signed char __v16qi;
+typedef __vector unsigned char __v16qu;
+
+/* The Intel API is flexible enough that we must allow aliasing with other
+   vector types, and their scalar components.  */
+typedef long long __m128i __attribute__((__vector_size__(16), __may_alias__));
+typedef double __m128d __attribute__((__vector_size__(16), __may_alias__));
+
+/* Unaligned version of the same types.  */
+typedef long long __m128i_u
+    __attribute__((__vector_size__(16), __may_alias__, __aligned__(1)));
+typedef double __m128d_u
+    __attribute__((__vector_size__(16), __may_alias__, __aligned__(1)));
+
+/* Define two value permute mask.  */
+#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
+
+/* Create a vector with element 0 as F and the rest zero.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_sd(double __F) {
+  return __extension__(__m128d){__F, 0.0};
+}
+
+/* Create a vector with both elements equal to F.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set1_pd(double __F) {
+  return __extension__(__m128d){__F, __F};
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_pd1(double __F) {
+  return _mm_set1_pd(__F);
+}
+
+/* Create a vector with the lower value X and upper value W.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_pd(double __W, double __X) {
+  return __extension__(__m128d){__X, __W};
+}
+
+/* Create a vector with the lower value W and upper value X.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setr_pd(double __W, double __X) {
+  return __extension__(__m128d){__W, __X};
+}
+
+/* Create an undefined vector.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_undefined_pd(void) {
+  __m128d __Y = __Y;
+  return __Y;
+}
+
+/* Create a vector of zeros.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setzero_pd(void) {
+  return (__m128d)vec_splats(0);
+}
+
+/* Sets the low DPFP value of A from the low value of B.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_move_sd(__m128d __A, __m128d __B) {
+  __v2df __result = (__v2df)__A;
+  __result[0] = ((__v2df)__B)[0];
+  return (__m128d)__result;
+}
+
+/* Load two DPFP values from P.  The address must be 16-byte aligned.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load_pd(double const *__P) {
+  return ((__m128d)vec_ld(0, (__v16qu *)__P));
+}
+
+/* Load two DPFP values from P.  The address need not be 16-byte aligned.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadu_pd(double const *__P) {
+  return (vec_vsx_ld(0, __P));
+}
+
+/* Create a vector with all two elements equal to *P.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load1_pd(double const *__P) {
+  return (vec_splats(*__P));
+}
+
+/* Create a vector with element 0 as *P and the rest zero.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load_sd(double const *__P) {
+  return _mm_set_sd(*__P);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load_pd1(double const *__P) {
+  return _mm_load1_pd(__P);
+}
+
+/* Load two DPFP values in reverse order.  The address must be aligned.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadr_pd(double const *__P) {
+  __v2df __tmp = _mm_load_pd(__P);
+  return (__m128d)vec_xxpermdi(__tmp, __tmp, 2);
+}
+
+/* Store two DPFP values.  The address must be 16-byte aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store_pd(double *__P, __m128d __A) {
+  vec_st((__v16qu)__A, 0, (__v16qu *)__P);
+}
+
+/* Store two DPFP values.  The address need not be 16-byte aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storeu_pd(double *__P, __m128d __A) {
+  *(__m128d_u *)__P = __A;
+}
+
+/* Stores the lower DPFP value.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store_sd(double *__P, __m128d __A) {
+  *__P = ((__v2df)__A)[0];
+}
+
+extern __inline double
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsd_f64(__m128d __A) {
+  return ((__v2df)__A)[0];
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storel_pd(double *__P, __m128d __A) {
+  _mm_store_sd(__P, __A);
+}
+
+/* Stores the upper DPFP value.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storeh_pd(double *__P, __m128d __A) {
+  *__P = ((__v2df)__A)[1];
+}
+/* Store the lower DPFP value across two words.
+   The address must be 16-byte aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store1_pd(double *__P, __m128d __A) {
+  _mm_store_pd(__P, vec_splat(__A, 0));
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store_pd1(double *__P, __m128d __A) {
+  _mm_store1_pd(__P, __A);
+}
+
+/* Store two DPFP values in reverse order.  The address must be aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storer_pd(double *__P, __m128d __A) {
+  _mm_store_pd(__P, vec_xxpermdi(__A, __A, 2));
+}
+
+/* Intel intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi128_si64(__m128i __A) {
+  return ((__v2di)__A)[0];
+}
+
+/* Microsoft intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi128_si64x(__m128i __A) {
+  return ((__v2di)__A)[0];
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_pd(__m128d __A, __m128d __B) {
+  return (__m128d)((__v2df)__A + (__v2df)__B);
+}
+
+/* Add the lower double-precision (64-bit) floating-point element in
+   a and b, store the result in the lower element of dst, and copy
+   the upper element from a to the upper element of dst. */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_sd(__m128d __A, __m128d __B) {
+  __A[0] = __A[0] + __B[0];
+  return (__A);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_pd(__m128d __A, __m128d __B) {
+  return (__m128d)((__v2df)__A - (__v2df)__B);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_sd(__m128d __A, __m128d __B) {
+  __A[0] = __A[0] - __B[0];
+  return (__A);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mul_pd(__m128d __A, __m128d __B) {
+  return (__m128d)((__v2df)__A * (__v2df)__B);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mul_sd(__m128d __A, __m128d __B) {
+  __A[0] = __A[0] * __B[0];
+  return (__A);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_div_pd(__m128d __A, __m128d __B) {
+  return (__m128d)((__v2df)__A / (__v2df)__B);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_div_sd(__m128d __A, __m128d __B) {
+  __A[0] = __A[0] / __B[0];
+  return (__A);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sqrt_pd(__m128d __A) {
+  return (vec_sqrt(__A));
+}
+
+/* Return pair {sqrt (B[0]), A[1]}.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sqrt_sd(__m128d __A, __m128d __B) {
+  __v2df __c;
+  __c = vec_sqrt((__v2df)_mm_set1_pd(__B[0]));
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_pd(__m128d __A, __m128d __B) {
+  return (vec_min(__A, __B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = vec_min(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_pd(__m128d __A, __m128d __B) {
+  return (vec_max(__A, __B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = vec_max(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmpeq((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmplt_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmplt((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmple_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmple((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmpgt((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpge_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmpge((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpneq_pd(__m128d __A, __m128d __B) {
+  __v2df __temp = (__v2df)vec_cmpeq((__v2df)__A, (__v2df)__B);
+  return ((__m128d)vec_nor(__temp, __temp));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnlt_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmpge((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnle_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmpgt((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpngt_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmple((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnge_pd(__m128d __A, __m128d __B) {
+  return ((__m128d)vec_cmplt((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpord_pd(__m128d __A, __m128d __B) {
+  __v2du __c, __d;
+  /* Compare against self will return false (0's) if NAN.  */
+  __c = (__v2du)vec_cmpeq(__A, __A);
+  __d = (__v2du)vec_cmpeq(__B, __B);
+  /* A != NAN and B != NAN.  */
+  return ((__m128d)vec_and(__c, __d));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpunord_pd(__m128d __A, __m128d __B) {
+#if _ARCH_PWR8
+  __v2du __c, __d;
+  /* Compare against self will return false (0's) if NAN.  */
+  __c = (__v2du)vec_cmpeq((__v2df)__A, (__v2df)__A);
+  __d = (__v2du)vec_cmpeq((__v2df)__B, (__v2df)__B);
+  /* A == NAN OR B == NAN converts too:
+     NOT(A != NAN) OR NOT(B != NAN).  */
+  __c = vec_nor(__c, __c);
+  return ((__m128d)vec_orc(__c, __d));
+#else
+  __v2du __c, __d;
+  /* Compare against self will return false (0's) if NAN.  */
+  __c = (__v2du)vec_cmpeq((__v2df)__A, (__v2df)__A);
+  __d = (__v2du)vec_cmpeq((__v2df)__B, (__v2df)__B);
+  /* Convert the true ('1's) is NAN.  */
+  __c = vec_nor(__c, __c);
+  __d = vec_nor(__d, __d);
+  return ((__m128d)vec_or(__c, __d));
+#endif
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  /* PowerISA VSX does not allow partial (for just lower double)
+     results. So to insure we don't generate spurious exceptions
+     (from the upper double values) we splat the lower double
+     before we do the operation. */
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = (__v2df)vec_cmpeq(__a, __b);
+  /* Then we merge the lower double result with the original upper
+     double from __A.  */
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmplt_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = (__v2df)vec_cmplt(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmple_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = (__v2df)vec_cmple(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = (__v2df)vec_cmpgt(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpge_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = (__v2df)vec_cmpge(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpneq_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  __c = (__v2df)vec_cmpeq(__a, __b);
+  __c = vec_nor(__c, __c);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnlt_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  /* Not less than is just greater than or equal.  */
+  __c = (__v2df)vec_cmpge(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnle_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  /* Not less than or equal is just greater than.  */
+  __c = (__v2df)vec_cmpge(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpngt_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  /* Not greater than is just less than or equal.  */
+  __c = (__v2df)vec_cmple(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnge_sd(__m128d __A, __m128d __B) {
+  __v2df __a, __b, __c;
+  __a = vec_splats(__A[0]);
+  __b = vec_splats(__B[0]);
+  /* Not greater than or equal is just less than.  */
+  __c = (__v2df)vec_cmplt(__a, __b);
+  return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpord_sd(__m128d __A, __m128d __B) {
+  __v2df __r;
+  __r = (__v2df)_mm_cmpord_pd(vec_splats(__A[0]), vec_splats(__B[0]));
+  return (__m128d)_mm_setr_pd(__r[0], ((__v2df)__A)[1]);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpunord_sd(__m128d __A, __m128d __B) {
+  __v2df __r;
+  __r = _mm_cmpunord_pd(vec_splats(__A[0]), vec_splats(__B[0]));
+  return (__m128d)_mm_setr_pd(__r[0], __A[1]);
+}
+
+/* FIXME
+   The __mm_comi??_sd and __mm_ucomi??_sd implementations below are
+   exactly the same because GCC for PowerPC only generates unordered
+   compares (scalar and vector).
+   Technically __mm_comieq_sp et all should be using the ordered
+   compare and signal for QNaNs.  The __mm_ucomieq_sd et all should
+   be OK.   */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comieq_sd(__m128d __A, __m128d __B) {
+  return (__A[0] == __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comilt_sd(__m128d __A, __m128d __B) {
+  return (__A[0] < __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comile_sd(__m128d __A, __m128d __B) {
+  return (__A[0] <= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comigt_sd(__m128d __A, __m128d __B) {
+  return (__A[0] > __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comige_sd(__m128d __A, __m128d __B) {
+  return (__A[0] >= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comineq_sd(__m128d __A, __m128d __B) {
+  return (__A[0] != __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomieq_sd(__m128d __A, __m128d __B) {
+  return (__A[0] == __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomilt_sd(__m128d __A, __m128d __B) {
+  return (__A[0] < __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomile_sd(__m128d __A, __m128d __B) {
+  return (__A[0] <= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomigt_sd(__m128d __A, __m128d __B) {
+  return (__A[0] > __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomige_sd(__m128d __A, __m128d __B) {
+  return (__A[0] >= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomineq_sd(__m128d __A, __m128d __B) {
+  return (__A[0] != __B[0]);
+}
+
+/* Create a vector of Qi, where i is the element number.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_epi64x(long long __q1, long long __q0) {
+  return __extension__(__m128i)(__v2di){__q0, __q1};
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_epi64(__m64 __q1, __m64 __q0) {
+  return _mm_set_epi64x((long long)__q1, (long long)__q0);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_epi32(int __q3, int __q2, int __q1, int __q0) {
+  return __extension__(__m128i)(__v4si){__q0, __q1, __q2, __q3};
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_epi16(short __q7, short __q6, short __q5, short __q4, short __q3,
+                  short __q2, short __q1, short __q0) {
+  return __extension__(__m128i)(__v8hi){__q0, __q1, __q2, __q3,
+                                        __q4, __q5, __q6, __q7};
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_epi8(char __q15, char __q14, char __q13, char __q12, char __q11,
+                 char __q10, char __q09, char __q08, char __q07, char __q06,
+                 char __q05, char __q04, char __q03, char __q02, char __q01,
+                 char __q00) {
+  return __extension__(__m128i)(__v16qi){
+      __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
+      __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15};
+}
+
+/* Set all of the elements of the vector to A.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set1_epi64x(long long __A) {
+  return _mm_set_epi64x(__A, __A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set1_epi64(__m64 __A) {
+  return _mm_set_epi64(__A, __A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set1_epi32(int __A) {
+  return _mm_set_epi32(__A, __A, __A, __A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set1_epi16(short __A) {
+  return _mm_set_epi16(__A, __A, __A, __A, __A, __A, __A, __A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set1_epi8(char __A) {
+  return _mm_set_epi8(__A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A,
+                      __A, __A, __A, __A, __A);
+}
+
+/* Create a vector of Qi, where i is the element number.
+   The parameter order is reversed from the _mm_set_epi* functions.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setr_epi64(__m64 __q0, __m64 __q1) {
+  return _mm_set_epi64(__q1, __q0);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setr_epi32(int __q0, int __q1, int __q2, int __q3) {
+  return _mm_set_epi32(__q3, __q2, __q1, __q0);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setr_epi16(short __q0, short __q1, short __q2, short __q3, short __q4,
+                   short __q5, short __q6, short __q7) {
+  return _mm_set_epi16(__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setr_epi8(char __q00, char __q01, char __q02, char __q03, char __q04,
+                  char __q05, char __q06, char __q07, char __q08, char __q09,
+                  char __q10, char __q11, char __q12, char __q13, char __q14,
+                  char __q15) {
+  return _mm_set_epi8(__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08,
+                      __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00);
+}
+
+/* Create a vector with element 0 as *P and the rest zero.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load_si128(__m128i const *__P) {
+  return *__P;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadu_si128(__m128i_u const *__P) {
+  return (__m128i)(vec_vsx_ld(0, (signed int const *)__P));
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadl_epi64(__m128i_u const *__P) {
+  return _mm_set_epi64((__m64)0LL, *(__m64 *)__P);
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store_si128(__m128i *__P, __m128i __B) {
+  vec_st((__v16qu)__B, 0, (__v16qu *)__P);
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storeu_si128(__m128i_u *__P, __m128i __B) {
+  *__P = __B;
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storel_epi64(__m128i_u *__P, __m128i __B) {
+  *(long long *)__P = ((__v2di)__B)[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movepi64_pi64(__m128i_u __B) {
+  return (__m64)((__v2di)__B)[0];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movpi64_epi64(__m64 __A) {
+  return _mm_set_epi64((__m64)0LL, __A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_move_epi64(__m128i __A) {
+  return _mm_set_epi64((__m64)0LL, (__m64)__A[0]);
+}
+
+/* Create an undefined vector.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_undefined_si128(void) {
+  __m128i __Y = __Y;
+  return __Y;
+}
+
+/* Create a vector of zeros.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setzero_si128(void) {
+  return __extension__(__m128i)(__v4si){0, 0, 0, 0};
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi32_pd(__m128i __A) {
+  __v2di __val;
+  /* For LE need to generate Vector Unpack Low Signed Word.
+     Which is generated from unpackh.  */
+  __val = (__v2di)vec_unpackh((__v4si)__A);
+
+  return (__m128d)vec_ctf(__val, 0);
+}
+#endif
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi32_ps(__m128i __A) {
+  return ((__m128)vec_ctf((__v4si)__A, 0));
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpd_epi32(__m128d __A) {
+  __v2df __rounded = vec_rint(__A);
+  __v4si __result, __temp;
+  const __v4si __vzero = {0, 0, 0, 0};
+
+  /* VSX Vector truncate Double-Precision to integer and Convert to
+   Signed Integer Word format with Saturate.  */
+  __asm__("xvcvdpsxws %x0,%x1" : "=wa"(__temp) : "wa"(__rounded) :);
+
+#ifdef _ARCH_PWR8
+#ifdef __LITTLE_ENDIAN__
+  __temp = vec_mergeo(__temp, __temp);
+#else
+  __temp = vec_mergee(__temp, __temp);
+#endif
+  __result = (__v4si)vec_vpkudum((__vector long long)__temp,
+                                 (__vector long long)__vzero);
+#else
+  {
+    const __v16qu __pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+                              0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f};
+    __result = (__v4si)vec_perm((__v16qu)__temp, (__v16qu)__vzero, __pkperm);
+  }
+#endif
+  return (__m128i)__result;
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpd_pi32(__m128d __A) {
+  __m128i __result = _mm_cvtpd_epi32(__A);
+
+  return (__m64)__result[0];
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpd_ps(__m128d __A) {
+  __v4sf __result;
+  __v4si __temp;
+  const __v4si __vzero = {0, 0, 0, 0};
+
+  __asm__("xvcvdpsp %x0,%x1" : "=wa"(__temp) : "wa"(__A) :);
+
+#ifdef _ARCH_PWR8
+#ifdef __LITTLE_ENDIAN__
+  __temp = vec_mergeo(__temp, __temp);
+#else
+  __temp = vec_mergee(__temp, __temp);
+#endif
+  __result = (__v4sf)vec_vpkudum((__vector long long)__temp,
+                                 (__vector long long)__vzero);
+#else
+  {
+    const __v16qu __pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+                              0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f};
+    __result = (__v4sf)vec_perm((__v16qu)__temp, (__v16qu)__vzero, __pkperm);
+  }
+#endif
+  return ((__m128)__result);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttpd_epi32(__m128d __A) {
+  __v4si __result;
+  __v4si __temp;
+  const __v4si __vzero = {0, 0, 0, 0};
+
+  /* VSX Vector truncate Double-Precision to integer and Convert to
+   Signed Integer Word format with Saturate.  */
+  __asm__("xvcvdpsxws %x0,%x1" : "=wa"(__temp) : "wa"(__A) :);
+
+#ifdef _ARCH_PWR8
+#ifdef __LITTLE_ENDIAN__
+  __temp = vec_mergeo(__temp, __temp);
+#else
+  __temp = vec_mergee(__temp, __temp);
+#endif
+  __result = (__v4si)vec_vpkudum((__vector long long)__temp,
+                                 (__vector long long)__vzero);
+#else
+  {
+    const __v16qu __pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+                              0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f};
+    __result = (__v4si)vec_perm((__v16qu)__temp, (__v16qu)__vzero, __pkperm);
+  }
+#endif
+
+  return ((__m128i)__result);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttpd_pi32(__m128d __A) {
+  __m128i __result = _mm_cvttpd_epi32(__A);
+
+  return (__m64)__result[0];
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi128_si32(__m128i __A) {
+  return ((__v4si)__A)[0];
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpi32_pd(__m64 __A) {
+  __v4si __temp;
+  __v2di __tmp2;
+  __v2df __result;
+
+  __temp = (__v4si)vec_splats(__A);
+  __tmp2 = (__v2di)vec_unpackl(__temp);
+  __result = vec_ctf((__vector signed long long)__tmp2, 0);
+  return (__m128d)__result;
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtps_epi32(__m128 __A) {
+  __v4sf __rounded;
+  __v4si __result;
+
+  __rounded = vec_rint((__v4sf)__A);
+  __result = vec_cts(__rounded, 0);
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttps_epi32(__m128 __A) {
+  __v4si __result;
+
+  __result = vec_cts((__v4sf)__A, 0);
+  return (__m128i)__result;
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtps_pd(__m128 __A) {
+  /* Check if vec_doubleh is defined by <altivec.h>. If so use that. */
+#ifdef vec_doubleh
+  return (__m128d)vec_doubleh((__v4sf)__A);
+#else
+  /* Otherwise the compiler is not current and so need to generate the
+     equivalent code.  */
+  __v4sf __a = (__v4sf)__A;
+  __v4sf __temp;
+  __v2df __result;
+#ifdef __LITTLE_ENDIAN__
+  /* The input float values are in elements {[0], [1]} but the convert
+     instruction needs them in elements {[1], [3]}, So we use two
+     shift left double vector word immediates to get the elements
+     lined up.  */
+  __temp = __builtin_vsx_xxsldwi(__a, __a, 3);
+  __temp = __builtin_vsx_xxsldwi(__a, __temp, 2);
+#else
+  /* The input float values are in elements {[0], [1]} but the convert
+     instruction needs them in elements {[0], [2]}, So we use two
+     shift left double vector word immediates to get the elements
+     lined up.  */
+  __temp = vec_vmrghw(__a, __a);
+#endif
+  __asm__(" xvcvspdp %x0,%x1" : "=wa"(__result) : "wa"(__temp) :);
+  return (__m128d)__result;
+#endif
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsd_si32(__m128d __A) {
+  __v2df __rounded = vec_rint((__v2df)__A);
+  int __result = ((__v2df)__rounded)[0];
+
+  return __result;
+}
+/* Intel intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsd_si64(__m128d __A) {
+  __v2df __rounded = vec_rint((__v2df)__A);
+  long long __result = ((__v2df)__rounded)[0];
+
+  return __result;
+}
+
+/* Microsoft intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsd_si64x(__m128d __A) {
+  return _mm_cvtsd_si64((__v2df)__A);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttsd_si32(__m128d __A) {
+  int __result = ((__v2df)__A)[0];
+
+  return __result;
+}
+
+/* Intel intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttsd_si64(__m128d __A) {
+  long long __result = ((__v2df)__A)[0];
+
+  return __result;
+}
+
+/* Microsoft intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttsd_si64x(__m128d __A) {
+  return _mm_cvttsd_si64(__A);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsd_ss(__m128 __A, __m128d __B) {
+  __v4sf __result = (__v4sf)__A;
+
+#ifdef __LITTLE_ENDIAN__
+  __v4sf __temp_s;
+  /* Copy double element[0] to element [1] for conversion.  */
+  __v2df __temp_b = vec_splat((__v2df)__B, 0);
+
+  /* Pre-rotate __A left 3 (logically right 1) elements.  */
+  __result = __builtin_vsx_xxsldwi(__result, __result, 3);
+  /* Convert double to single float scalar in a vector.  */
+  __asm__("xscvdpsp %x0,%x1" : "=wa"(__temp_s) : "wa"(__temp_b) :);
+  /* Shift the resulting scalar into vector element [0].  */
+  __result = __builtin_vsx_xxsldwi(__result, __temp_s, 1);
+#else
+  __result[0] = ((__v2df)__B)[0];
+#endif
+  return (__m128)__result;
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi32_sd(__m128d __A, int __B) {
+  __v2df __result = (__v2df)__A;
+  double __db = __B;
+  __result[0] = __db;
+  return (__m128d)__result;
+}
+
+/* Intel intrinsic.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi64_sd(__m128d __A, long long __B) {
+  __v2df __result = (__v2df)__A;
+  double __db = __B;
+  __result[0] = __db;
+  return (__m128d)__result;
+}
+
+/* Microsoft intrinsic.  */
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi64x_sd(__m128d __A, long long __B) {
+  return _mm_cvtsi64_sd(__A, __B);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtss_sd(__m128d __A, __m128 __B) {
+#ifdef __LITTLE_ENDIAN__
+  /* Use splat to move element [0] into position for the convert. */
+  __v4sf __temp = vec_splat((__v4sf)__B, 0);
+  __v2df __res;
+  /* Convert single float scalar to double in a vector.  */
+  __asm__("xscvspdp %x0,%x1" : "=wa"(__res) : "wa"(__temp) :);
+  return (__m128d)vec_mergel(__res, (__v2df)__A);
+#else
+  __v2df __res = (__v2df)__A;
+  __res[0] = ((__v4sf)__B)[0];
+  return (__m128d)__res;
+#endif
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask) {
+  __vector double __result;
+  const int __litmsk = __mask & 0x3;
+
+  if (__litmsk == 0)
+    __result = vec_mergeh(__A, __B);
+#if __GNUC__ < 6
+  else if (__litmsk == 1)
+    __result = vec_xxpermdi(__B, __A, 2);
+  else if (__litmsk == 2)
+    __result = vec_xxpermdi(__B, __A, 1);
+#else
+  else if (__litmsk == 1)
+    __result = vec_xxpermdi(__A, __B, 2);
+  else if (__litmsk == 2)
+    __result = vec_xxpermdi(__A, __B, 1);
+#endif
+  else
+    __result = vec_mergel(__A, __B);
+
+  return __result;
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpackhi_pd(__m128d __A, __m128d __B) {
+  return (__m128d)vec_mergel((__v2df)__A, (__v2df)__B);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpacklo_pd(__m128d __A, __m128d __B) {
+  return (__m128d)vec_mergeh((__v2df)__A, (__v2df)__B);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadh_pd(__m128d __A, double const *__B) {
+  __v2df __result = (__v2df)__A;
+  __result[1] = *__B;
+  return (__m128d)__result;
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadl_pd(__m128d __A, double const *__B) {
+  __v2df __result = (__v2df)__A;
+  __result[0] = *__B;
+  return (__m128d)__result;
+}
+
+#ifdef _ARCH_PWR8
+/* Intrinsic functions that require PowerISA 2.07 minimum.  */
+
+/* Creates a 2-bit mask from the most significant bits of the DPFP values.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movemask_pd(__m128d __A) {
+#ifdef _ARCH_PWR10
+  return vec_extractm((__v2du)__A);
+#else
+  __vector unsigned long long __result;
+  static const __vector unsigned int __perm_mask = {
+#ifdef __LITTLE_ENDIAN__
+      0x80800040, 0x80808080, 0x80808080, 0x80808080
+#else
+      0x80808080, 0x80808080, 0x80808080, 0x80804000
+#endif
+  };
+
+  __result = ((__vector unsigned long long)vec_vbpermq(
+      (__vector unsigned char)__A, (__vector unsigned char)__perm_mask));
+
+#ifdef __LITTLE_ENDIAN__
+  return __result[1];
+#else
+  return __result[0];
+#endif
+#endif /* !_ARCH_PWR10 */
+}
+#endif /* _ARCH_PWR8 */
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_packs_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_packs((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_packs_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)vec_packs((__v4si)__A, (__v4si)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_packus_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_packsu((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpackhi_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergel((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpackhi_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergel((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpackhi_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergel((__v4su)__A, (__v4su)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpackhi_epi64(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergel((__vector long long)__A, (__vector long long)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpacklo_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergeh((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpacklo_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergeh((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpacklo_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergeh((__v4si)__A, (__v4si)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpacklo_epi64(__m128i __A, __m128i __B) {
+  return (__m128i)vec_mergeh((__vector long long)__A, (__vector long long)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)((__v16qu)__A + (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)((__v8hu)__A + (__v8hu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)((__v4su)__A + (__v4su)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_epi64(__m128i __A, __m128i __B) {
+  return (__m128i)((__v2du)__A + (__v2du)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_adds_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_adds((__v16qi)__A, (__v16qi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_adds_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_adds((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_adds_epu8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_adds((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_adds_epu16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_adds((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)((__v16qu)__A - (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)((__v8hu)__A - (__v8hu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)((__v4su)__A - (__v4su)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_epi64(__m128i __A, __m128i __B) {
+  return (__m128i)((__v2du)__A - (__v2du)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_subs_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_subs((__v16qi)__A, (__v16qi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_subs_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_subs((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_subs_epu8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_subs((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_subs_epu16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_subs((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_madd_epi16(__m128i __A, __m128i __B) {
+  __vector signed int __zero = {0, 0, 0, 0};
+
+  return (__m128i)vec_vmsumshm((__v8hi)__A, (__v8hi)__B, __zero);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mulhi_epi16(__m128i __A, __m128i __B) {
+  __vector signed int __w0, __w1;
+
+  __vector unsigned char __xform1 = {
+#ifdef __LITTLE_ENDIAN__
+      0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
+      0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+#else
+      0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x08,
+      0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
+#endif
+  };
+
+  __w0 = vec_vmulesh((__v8hi)__A, (__v8hi)__B);
+  __w1 = vec_vmulosh((__v8hi)__A, (__v8hi)__B);
+  return (__m128i)vec_perm(__w0, __w1, __xform1);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mullo_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)((__v8hi)__A * (__v8hi)__B);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mul_su32(__m64 __A, __m64 __B) {
+  unsigned int __a = __A;
+  unsigned int __b = __B;
+
+  return ((__m64)__a * (__m64)__b);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mul_epu32(__m128i __A, __m128i __B) {
+#if __GNUC__ < 8
+  __v2du __result;
+
+#ifdef __LITTLE_ENDIAN__
+  /* VMX Vector Multiply Odd Unsigned Word.  */
+  __asm__("vmulouw %0,%1,%2" : "=v"(__result) : "v"(__A), "v"(__B) :);
+#else
+  /* VMX Vector Multiply Even Unsigned Word.  */
+  __asm__("vmuleuw %0,%1,%2" : "=v"(__result) : "v"(__A), "v"(__B) :);
+#endif
+  return (__m128i)__result;
+#else
+  return (__m128i)vec_mule((__v4su)__A, (__v4su)__B);
+#endif
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_slli_epi16(__m128i __A, int __B) {
+  __v8hu __lshift;
+  __v8hi __result = {0, 0, 0, 0, 0, 0, 0, 0};
+
+  if (__B >= 0 && __B < 16) {
+    if (__builtin_constant_p(__B))
+      __lshift = (__v8hu)vec_splat_s16(__B);
+    else
+      __lshift = vec_splats((unsigned short)__B);
+
+    __result = vec_sl((__v8hi)__A, __lshift);
+  }
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_slli_epi32(__m128i __A, int __B) {
+  __v4su __lshift;
+  __v4si __result = {0, 0, 0, 0};
+
+  if (__B >= 0 && __B < 32) {
+    if (__builtin_constant_p(__B) && __B < 16)
+      __lshift = (__v4su)vec_splat_s32(__B);
+    else
+      __lshift = vec_splats((unsigned int)__B);
+
+    __result = vec_sl((__v4si)__A, __lshift);
+  }
+
+  return (__m128i)__result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_slli_epi64(__m128i __A, int __B) {
+  __v2du __lshift;
+  __v2di __result = {0, 0};
+
+  if (__B >= 0 && __B < 64) {
+    if (__builtin_constant_p(__B) && __B < 16)
+      __lshift = (__v2du)vec_splat_s32(__B);
+    else
+      __lshift = (__v2du)vec_splats((unsigned int)__B);
+
+    __result = vec_sl((__v2di)__A, __lshift);
+  }
+
+  return (__m128i)__result;
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srai_epi16(__m128i __A, int __B) {
+  __v8hu __rshift = {15, 15, 15, 15, 15, 15, 15, 15};
+  __v8hi __result;
+
+  if (__B < 16) {
+    if (__builtin_constant_p(__B))
+      __rshift = (__v8hu)vec_splat_s16(__B);
+    else
+      __rshift = vec_splats((unsigned short)__B);
+  }
+  __result = vec_sra((__v8hi)__A, __rshift);
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srai_epi32(__m128i __A, int __B) {
+  __v4su __rshift = {31, 31, 31, 31};
+  __v4si __result;
+
+  if (__B < 32) {
+    if (__builtin_constant_p(__B)) {
+      if (__B < 16)
+        __rshift = (__v4su)vec_splat_s32(__B);
+      else
+        __rshift = (__v4su)vec_splats((unsigned int)__B);
+    } else
+      __rshift = vec_splats((unsigned int)__B);
+  }
+  __result = vec_sra((__v4si)__A, __rshift);
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_bslli_si128(__m128i __A, const int __N) {
+  __v16qu __result;
+  const __v16qu __zeros = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+  if (__N < 16)
+    __result = vec_sld((__v16qu)__A, __zeros, __N);
+  else
+    __result = __zeros;
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_bsrli_si128(__m128i __A, const int __N) {
+  __v16qu __result;
+  const __v16qu __zeros = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+  if (__N < 16)
+#ifdef __LITTLE_ENDIAN__
+    if (__builtin_constant_p(__N))
+      /* Would like to use Vector Shift Left Double by Octet
+         Immediate here to use the immediate form and avoid
+         load of __N * 8 value into a separate VR.  */
+      __result = vec_sld(__zeros, (__v16qu)__A, (16 - __N));
+    else
+#endif
+    {
+      __v16qu __shift = vec_splats((unsigned char)(__N * 8));
+#ifdef __LITTLE_ENDIAN__
+      __result = vec_sro((__v16qu)__A, __shift);
+#else
+    __result = vec_slo((__v16qu)__A, __shift);
+#endif
+    }
+  else
+    __result = __zeros;
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srli_si128(__m128i __A, const int __N) {
+  return _mm_bsrli_si128(__A, __N);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_slli_si128(__m128i __A, const int _imm5) {
+  __v16qu __result;
+  const __v16qu __zeros = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+  if (_imm5 < 16)
+#ifdef __LITTLE_ENDIAN__
+    __result = vec_sld((__v16qu)__A, __zeros, _imm5);
+#else
+    __result = vec_sld(__zeros, (__v16qu)__A, (16 - _imm5));
+#endif
+  else
+    __result = __zeros;
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+    _mm_srli_epi16(__m128i __A, int __B) {
+  __v8hu __rshift;
+  __v8hi __result = {0, 0, 0, 0, 0, 0, 0, 0};
+
+  if (__B < 16) {
+    if (__builtin_constant_p(__B))
+      __rshift = (__v8hu)vec_splat_s16(__B);
+    else
+      __rshift = vec_splats((unsigned short)__B);
+
+    __result = vec_sr((__v8hi)__A, __rshift);
+  }
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srli_epi32(__m128i __A, int __B) {
+  __v4su __rshift;
+  __v4si __result = {0, 0, 0, 0};
+
+  if (__B < 32) {
+    if (__builtin_constant_p(__B)) {
+      if (__B < 16)
+        __rshift = (__v4su)vec_splat_s32(__B);
+      else
+        __rshift = (__v4su)vec_splats((unsigned int)__B);
+    } else
+      __rshift = vec_splats((unsigned int)__B);
+
+    __result = vec_sr((__v4si)__A, __rshift);
+  }
+
+  return (__m128i)__result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srli_epi64(__m128i __A, int __B) {
+  __v2du __rshift;
+  __v2di __result = {0, 0};
+
+  if (__B < 64) {
+    if (__builtin_constant_p(__B)) {
+      if (__B < 16)
+        __rshift = (__v2du)vec_splat_s32(__B);
+      else
+        __rshift = (__v2du)vec_splats((unsigned long long)__B);
+    } else
+      __rshift = (__v2du)vec_splats((unsigned int)__B);
+
+    __result = vec_sr((__v2di)__A, __rshift);
+  }
+
+  return (__m128i)__result;
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sll_epi16(__m128i __A, __m128i __B) {
+  __v8hu __lshift;
+  __vector __bool short __shmask;
+  const __v8hu __shmax = {15, 15, 15, 15, 15, 15, 15, 15};
+  __v8hu __result;
+
+#ifdef __LITTLE_ENDIAN__
+  __lshift = vec_splat((__v8hu)__B, 0);
+#else
+  __lshift = vec_splat((__v8hu)__B, 3);
+#endif
+  __shmask = vec_cmple(__lshift, __shmax);
+  __result = vec_sl((__v8hu)__A, __lshift);
+  __result = vec_sel((__v8hu)__shmask, __result, __shmask);
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sll_epi32(__m128i __A, __m128i __B) {
+  __v4su __lshift;
+  __vector __bool int __shmask;
+  const __v4su __shmax = {32, 32, 32, 32};
+  __v4su __result;
+#ifdef __LITTLE_ENDIAN__
+  __lshift = vec_splat((__v4su)__B, 0);
+#else
+  __lshift = vec_splat((__v4su)__B, 1);
+#endif
+  __shmask = vec_cmplt(__lshift, __shmax);
+  __result = vec_sl((__v4su)__A, __lshift);
+  __result = vec_sel((__v4su)__shmask, __result, __shmask);
+
+  return (__m128i)__result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sll_epi64(__m128i __A, __m128i __B) {
+  __v2du __lshift;
+  __vector __bool long long __shmask;
+  const __v2du __shmax = {64, 64};
+  __v2du __result;
+
+  __lshift = vec_splat((__v2du)__B, 0);
+  __shmask = vec_cmplt(__lshift, __shmax);
+  __result = vec_sl((__v2du)__A, __lshift);
+  __result = vec_sel((__v2du)__shmask, __result, __shmask);
+
+  return (__m128i)__result;
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sra_epi16(__m128i __A, __m128i __B) {
+  const __v8hu __rshmax = {15, 15, 15, 15, 15, 15, 15, 15};
+  __v8hu __rshift;
+  __v8hi __result;
+
+#ifdef __LITTLE_ENDIAN__
+  __rshift = vec_splat((__v8hu)__B, 0);
+#else
+  __rshift = vec_splat((__v8hu)__B, 3);
+#endif
+  __rshift = vec_min(__rshift, __rshmax);
+  __result = vec_sra((__v8hi)__A, __rshift);
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sra_epi32(__m128i __A, __m128i __B) {
+  const __v4su __rshmax = {31, 31, 31, 31};
+  __v4su __rshift;
+  __v4si __result;
+
+#ifdef __LITTLE_ENDIAN__
+  __rshift = vec_splat((__v4su)__B, 0);
+#else
+  __rshift = vec_splat((__v4su)__B, 1);
+#endif
+  __rshift = vec_min(__rshift, __rshmax);
+  __result = vec_sra((__v4si)__A, __rshift);
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srl_epi16(__m128i __A, __m128i __B) {
+  __v8hu __rshift;
+  __vector __bool short __shmask;
+  const __v8hu __shmax = {15, 15, 15, 15, 15, 15, 15, 15};
+  __v8hu __result;
+
+#ifdef __LITTLE_ENDIAN__
+  __rshift = vec_splat((__v8hu)__B, 0);
+#else
+  __rshift = vec_splat((__v8hu)__B, 3);
+#endif
+  __shmask = vec_cmple(__rshift, __shmax);
+  __result = vec_sr((__v8hu)__A, __rshift);
+  __result = vec_sel((__v8hu)__shmask, __result, __shmask);
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srl_epi32(__m128i __A, __m128i __B) {
+  __v4su __rshift;
+  __vector __bool int __shmask;
+  const __v4su __shmax = {32, 32, 32, 32};
+  __v4su __result;
+
+#ifdef __LITTLE_ENDIAN__
+  __rshift = vec_splat((__v4su)__B, 0);
+#else
+  __rshift = vec_splat((__v4su)__B, 1);
+#endif
+  __shmask = vec_cmplt(__rshift, __shmax);
+  __result = vec_sr((__v4su)__A, __rshift);
+  __result = vec_sel((__v4su)__shmask, __result, __shmask);
+
+  return (__m128i)__result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_srl_epi64(__m128i __A, __m128i __B) {
+  __v2du __rshift;
+  __vector __bool long long __shmask;
+  const __v2du __shmax = {64, 64};
+  __v2du __result;
+
+  __rshift = vec_splat((__v2du)__B, 0);
+  __shmask = vec_cmplt(__rshift, __shmax);
+  __result = vec_sr((__v2du)__A, __rshift);
+  __result = vec_sel((__v2du)__shmask, __result, __shmask);
+
+  return (__m128i)__result;
+}
+#endif
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_and_pd(__m128d __A, __m128d __B) {
+  return (vec_and((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_andnot_pd(__m128d __A, __m128d __B) {
+  return (vec_andc((__v2df)__B, (__v2df)__A));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_or_pd(__m128d __A, __m128d __B) {
+  return (vec_or((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_xor_pd(__m128d __A, __m128d __B) {
+  return (vec_xor((__v2df)__A, (__v2df)__B));
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_and_si128(__m128i __A, __m128i __B) {
+  return (__m128i)vec_and((__v2di)__A, (__v2di)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_andnot_si128(__m128i __A, __m128i __B) {
+  return (__m128i)vec_andc((__v2di)__B, (__v2di)__A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_or_si128(__m128i __A, __m128i __B) {
+  return (__m128i)vec_or((__v2di)__A, (__v2di)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_xor_si128(__m128i __A, __m128i __B) {
+  return (__m128i)vec_xor((__v2di)__A, (__v2di)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmpeq((__v16qi)__A, (__v16qi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmpeq((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmpeq((__v4si)__A, (__v4si)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmplt_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmplt((__v16qi)__A, (__v16qi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmplt_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmplt((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmplt_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmplt((__v4si)__A, (__v4si)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_epi8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmpgt((__v16qi)__A, (__v16qi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmpgt((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_epi32(__m128i __A, __m128i __B) {
+  return (__m128i)vec_cmpgt((__v4si)__A, (__v4si)__B);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_extract_epi16(__m128i const __A, int const __N) {
+  return (unsigned short)((__v8hi)__A)[__N & 7];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_insert_epi16(__m128i const __A, int const __D, int const __N) {
+  __v8hi __result = (__v8hi)__A;
+
+  __result[(__N & 7)] = __D;
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_max((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_epu8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_max((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_epi16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_min((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_epu8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_min((__v16qu)__A, (__v16qu)__B);
+}
+
+#ifdef _ARCH_PWR8
+/* Intrinsic functions that require PowerISA 2.07 minimum.  */
+
+/* Return a mask created from the most significant bit of each 8-bit
+   element in A.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movemask_epi8(__m128i __A) {
+#ifdef _ARCH_PWR10
+  return vec_extractm((__v16qu)__A);
+#else
+  __vector unsigned long long __result;
+  static const __vector unsigned char __perm_mask = {
+      0x78, 0x70, 0x68, 0x60, 0x58, 0x50, 0x48, 0x40,
+      0x38, 0x30, 0x28, 0x20, 0x18, 0x10, 0x08, 0x00};
+
+  __result = ((__vector unsigned long long)vec_vbpermq(
+      (__vector unsigned char)__A, (__vector unsigned char)__perm_mask));
+
+#ifdef __LITTLE_ENDIAN__
+  return __result[1];
+#else
+  return __result[0];
+#endif
+#endif /* !_ARCH_PWR10 */
+}
+#endif /* _ARCH_PWR8 */
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mulhi_epu16(__m128i __A, __m128i __B) {
+  __v4su __w0, __w1;
+  __v16qu __xform1 = {
+#ifdef __LITTLE_ENDIAN__
+      0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
+      0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+#else
+      0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x08,
+      0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
+#endif
+  };
+
+  __w0 = vec_vmuleuh((__v8hu)__A, (__v8hu)__B);
+  __w1 = vec_vmulouh((__v8hu)__A, (__v8hu)__B);
+  return (__m128i)vec_perm(__w0, __w1, __xform1);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_shufflehi_epi16(__m128i __A, const int __mask) {
+  unsigned long __element_selector_98 = __mask & 0x03;
+  unsigned long __element_selector_BA = (__mask >> 2) & 0x03;
+  unsigned long __element_selector_DC = (__mask >> 4) & 0x03;
+  unsigned long __element_selector_FE = (__mask >> 6) & 0x03;
+  static const unsigned short __permute_selectors[4] = {
+#ifdef __LITTLE_ENDIAN__
+      0x0908, 0x0B0A, 0x0D0C, 0x0F0E
+#else
+      0x0809, 0x0A0B, 0x0C0D, 0x0E0F
+#endif
+  };
+  __v2du __pmask =
+#ifdef __LITTLE_ENDIAN__
+      {0x1716151413121110UL, 0UL};
+#else
+      {0x1011121314151617UL, 0UL};
+#endif
+  __m64_union __t;
+  __v2du __a, __r;
+
+  __t.as_short[0] = __permute_selectors[__element_selector_98];
+  __t.as_short[1] = __permute_selectors[__element_selector_BA];
+  __t.as_short[2] = __permute_selectors[__element_selector_DC];
+  __t.as_short[3] = __permute_selectors[__element_selector_FE];
+  __pmask[1] = __t.as_m64;
+  __a = (__v2du)__A;
+  __r = vec_perm(__a, __a, (__vector unsigned char)__pmask);
+  return (__m128i)__r;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_shufflelo_epi16(__m128i __A, const int __mask) {
+  unsigned long __element_selector_10 = __mask & 0x03;
+  unsigned long __element_selector_32 = (__mask >> 2) & 0x03;
+  unsigned long __element_selector_54 = (__mask >> 4) & 0x03;
+  unsigned long __element_selector_76 = (__mask >> 6) & 0x03;
+  static const unsigned short __permute_selectors[4] = {
+#ifdef __LITTLE_ENDIAN__
+      0x0100, 0x0302, 0x0504, 0x0706
+#else
+      0x0001, 0x0203, 0x0405, 0x0607
+#endif
+  };
+  __v2du __pmask =
+#ifdef __LITTLE_ENDIAN__
+      {0UL, 0x1f1e1d1c1b1a1918UL};
+#else
+      {0UL, 0x18191a1b1c1d1e1fUL};
+#endif
+  __m64_union __t;
+  __v2du __a, __r;
+  __t.as_short[0] = __permute_selectors[__element_selector_10];
+  __t.as_short[1] = __permute_selectors[__element_selector_32];
+  __t.as_short[2] = __permute_selectors[__element_selector_54];
+  __t.as_short[3] = __permute_selectors[__element_selector_76];
+  __pmask[0] = __t.as_m64;
+  __a = (__v2du)__A;
+  __r = vec_perm(__a, __a, (__vector unsigned char)__pmask);
+  return (__m128i)__r;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_shuffle_epi32(__m128i __A, const int __mask) {
+  unsigned long __element_selector_10 = __mask & 0x03;
+  unsigned long __element_selector_32 = (__mask >> 2) & 0x03;
+  unsigned long __element_selector_54 = (__mask >> 4) & 0x03;
+  unsigned long __element_selector_76 = (__mask >> 6) & 0x03;
+  static const unsigned int __permute_selectors[4] = {
+#ifdef __LITTLE_ENDIAN__
+      0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
+#else
+      0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
+#endif
+  };
+  __v4su __t;
+
+  __t[0] = __permute_selectors[__element_selector_10];
+  __t[1] = __permute_selectors[__element_selector_32];
+  __t[2] = __permute_selectors[__element_selector_54] + 0x10101010;
+  __t[3] = __permute_selectors[__element_selector_76] + 0x10101010;
+  return (__m128i)vec_perm((__v4si)__A, (__v4si)__A,
+                           (__vector unsigned char)__t);
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_maskmoveu_si128(__m128i __A, __m128i __B, char *__C) {
+  __v2du __hibit = {0x7f7f7f7f7f7f7f7fUL, 0x7f7f7f7f7f7f7f7fUL};
+  __v16qu __mask, __tmp;
+  __m128i_u *__p = (__m128i_u *)__C;
+
+  __tmp = (__v16qu)_mm_loadu_si128(__p);
+  __mask = (__v16qu)vec_cmpgt((__v16qu)__B, (__v16qu)__hibit);
+  __tmp = vec_sel(__tmp, (__v16qu)__A, __mask);
+  _mm_storeu_si128(__p, (__m128i)__tmp);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_avg_epu8(__m128i __A, __m128i __B) {
+  return (__m128i)vec_avg((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_avg_epu16(__m128i __A, __m128i __B) {
+  return (__m128i)vec_avg((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sad_epu8(__m128i __A, __m128i __B) {
+  __v16qu __a, __b;
+  __v16qu __vabsdiff;
+  __v4si __vsum;
+  const __v4su __zero = {0, 0, 0, 0};
+  __v4si __result;
+
+  __a = (__v16qu)__A;
+  __b = (__v16qu)__B;
+#ifndef _ARCH_PWR9
+  __v16qu __vmin = vec_min(__a, __b);
+  __v16qu __vmax = vec_max(__a, __b);
+  __vabsdiff = vec_sub(__vmax, __vmin);
+#else
+  __vabsdiff = vec_absd(__a, __b);
+#endif
+  /* Sum four groups of bytes into integers.  */
+  __vsum = (__vector signed int)vec_sum4s(__vabsdiff, __zero);
+#ifdef __LITTLE_ENDIAN__
+  /* Sum across four integers with two integer results.  */
+  __asm__("vsum2sws %0,%1,%2" : "=v"(__result) : "v"(__vsum), "v"(__zero));
+  /* Note: vec_sum2s could be used here, but on little-endian, vector
+     shifts are added that are not needed for this use-case.
+     A vector shift to correctly position the 32-bit integer results
+     (currently at [0] and [2]) to [1] and [3] would then need to be
+     swapped back again since the desired results are two 64-bit
+     integers ([1]|[0] and [3]|[2]).  Thus, no shift is performed.  */
+#else
+  /* Sum across four integers with two integer results.  */
+  __result = vec_sum2s(__vsum, (__vector signed int)__zero);
+  /* Rotate the sums into the correct position.  */
+  __result = vec_sld(__result, __result, 6);
+#endif
+  return (__m128i)__result;
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_stream_si32(int *__A, int __B) {
+  /* Use the data cache block touch for store transient.  */
+  __asm__("dcbtstt 0,%0" : : "b"(__A) : "memory");
+  *__A = __B;
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_stream_si64(long long int *__A, long long int __B) {
+  /* Use the data cache block touch for store transient.  */
+  __asm__("	dcbtstt	0,%0" : : "b"(__A) : "memory");
+  *__A = __B;
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_stream_si128(__m128i *__A, __m128i __B) {
+  /* Use the data cache block touch for store transient.  */
+  __asm__("dcbtstt 0,%0" : : "b"(__A) : "memory");
+  *__A = __B;
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_stream_pd(double *__A, __m128d __B) {
+  /* Use the data cache block touch for store transient.  */
+  __asm__("dcbtstt 0,%0" : : "b"(__A) : "memory");
+  *(__m128d *)__A = __B;
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_clflush(void const *__A) {
+  /* Use the data cache block flush.  */
+  __asm__("dcbf 0,%0" : : "b"(__A) : "memory");
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_lfence(void) {
+  /* Use light weight sync for load to load ordering.  */
+  __atomic_thread_fence(__ATOMIC_RELEASE);
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mfence(void) {
+  /* Use heavy weight sync for any to any ordering.  */
+  __atomic_thread_fence(__ATOMIC_SEQ_CST);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi32_si128(int __A) {
+  return _mm_set_epi32(0, 0, 0, __A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi64_si128(long long __A) {
+  return __extension__(__m128i)(__v2di){__A, 0LL};
+}
+
+/* Microsoft intrinsic.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi64x_si128(long long __A) {
+  return __extension__(__m128i)(__v2di){__A, 0LL};
+}
+
+/* Casts between various SP, DP, INT vector types.  Note that these do no
+   conversion of values, they just change the type.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castpd_ps(__m128d __A) {
+  return (__m128)__A;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castpd_si128(__m128d __A) {
+  return (__m128i)__A;
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castps_pd(__m128 __A) {
+  return (__m128d)__A;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castps_si128(__m128 __A) {
+  return (__m128i)__A;
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castsi128_ps(__m128i __A) {
+  return (__m128)__A;
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castsi128_pd(__m128i __A) {
+  return (__m128d)__A;
+}
+
+#else
+#include_next <emmintrin.h>
+#endif /* defined(__ppc64__) &&
+        *   (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
+
+#endif /* EMMINTRIN_H_ */
diff --git a/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/immintrin.h b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/immintrin.h
new file mode 100644
index 0000000..c1ada98
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/immintrin.h
@@ -0,0 +1,27 @@
+/*===---- immintrin.h - Implementation of Intel intrinsics on PowerPC ------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef IMMINTRIN_H_
+#define IMMINTRIN_H_
+
+#include <x86gprintrin.h>
+
+#include <mmintrin.h>
+
+#include <xmmintrin.h>
+
+#include <emmintrin.h>
+
+#include <pmmintrin.h>
+
+#include <tmmintrin.h>
+
+#include <smmintrin.h>
+
+#endif /* IMMINTRIN_H_ */
diff --git a/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/mm_malloc.h b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/mm_malloc.h
new file mode 100644
index 0000000..6592091
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/mm_malloc.h
@@ -0,0 +1,45 @@
+/*===---- mm_malloc.h - Implementation of _mm_malloc and _mm_free ----------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _MM_MALLOC_H_INCLUDED
+#define _MM_MALLOC_H_INCLUDED
+
+#if defined(__ppc64__) &&                                                      \
+    (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
+
+#include <stdlib.h>
+
+/* We can't depend on <stdlib.h> since the prototype of posix_memalign
+   may not be visible.  */
+#ifndef __cplusplus
+extern int posix_memalign(void **, size_t, size_t);
+#else
+extern "C" int posix_memalign(void **, size_t, size_t);
+#endif
+
+static __inline void *_mm_malloc(size_t __size, size_t __alignment) {
+  /* PowerPC64 ELF V2 ABI requires quadword alignment.  */
+  size_t __vec_align = sizeof(__vector float);
+  void *__ptr;
+
+  if (__alignment < __vec_align)
+    __alignment = __vec_align;
+  if (posix_memalign(&__ptr, __alignment, __size) == 0)
+    return __ptr;
+  else
+    return NULL;
+}
+
+static __inline void _mm_free(void *__ptr) { free(__ptr); }
+
+#else
+#include_next <mm_malloc.h>
+#endif
+
+#endif /* _MM_MALLOC_H_INCLUDED */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/mmintrin.h b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/mmintrin.h
similarity index 65%
copy from darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/mmintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/mmintrin.h
index 54e4ee9..70e8b81 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/mmintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/mmintrin.h
@@ -35,7 +35,8 @@
 #ifndef _MMINTRIN_H_INCLUDED
 #define _MMINTRIN_H_INCLUDED
 
-#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
+#if defined(__ppc64__) &&                                                      \
+    (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
 
 #include <altivec.h>
 /* The Intel API is flexible enough that we must allow aliasing with other
@@ -149,17 +150,17 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_packs_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short vm1;
-  __vector signed char vresult;
+  __vector signed short __vm1;
+  __vector signed char __vresult;
 
-  vm1 = (__vector signed short)(__vector unsigned long long)
+  __vm1 = (__vector signed short)(__vector unsigned long long)
 #ifdef __LITTLE_ENDIAN__
       {__m1, __m2};
 #else
       {__m2, __m1};
 #endif
-  vresult = vec_packs(vm1, vm1);
-  return (__m64)((__vector long long)vresult)[0];
+  __vresult = vec_packs(__vm1, __vm1);
+  return (__m64)((__vector long long)__vresult)[0];
 }
 
 extern __inline __m64
@@ -174,17 +175,17 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_packs_pi32(__m64 __m1, __m64 __m2) {
-  __vector signed int vm1;
-  __vector signed short vresult;
+  __vector signed int __vm1;
+  __vector signed short __vresult;
 
-  vm1 = (__vector signed int)(__vector unsigned long long)
+  __vm1 = (__vector signed int)(__vector unsigned long long)
 #ifdef __LITTLE_ENDIAN__
       {__m1, __m2};
 #else
       {__m2, __m1};
 #endif
-  vresult = vec_packs(vm1, vm1);
-  return (__m64)((__vector long long)vresult)[0];
+  __vresult = vec_packs(__vm1, __vm1);
+  return (__m64)((__vector long long)__vresult)[0];
 }
 
 extern __inline __m64
@@ -199,19 +200,20 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_packs_pu16(__m64 __m1, __m64 __m2) {
-  __vector unsigned char r;
-  __vector signed short vm1 = (__vector signed short)(__vector long long)
+  __vector unsigned char __r;
+  __vector signed short __vm1 = (__vector signed short)(__vector long long)
 #ifdef __LITTLE_ENDIAN__
       {__m1, __m2};
 #else
       {__m2, __m1};
 #endif
   const __vector signed short __zero = {0};
-  __vector __bool short __select = vec_cmplt(vm1, __zero);
-  r = vec_packs((__vector unsigned short)vm1, (__vector unsigned short)vm1);
-  __vector __bool char packsel = vec_pack(__select, __select);
-  r = vec_sel(r, (const __vector unsigned char)__zero, packsel);
-  return (__m64)((__vector long long)r)[0];
+  __vector __bool short __select = vec_cmplt(__vm1, __zero);
+  __r =
+      vec_packs((__vector unsigned short)__vm1, (__vector unsigned short)__vm1);
+  __vector __bool char __packsel = vec_pack(__select, __select);
+  __r = vec_sel(__r, (const __vector unsigned char)__zero, __packsel);
+  return (__m64)((__vector long long)__r)[0];
 }
 
 extern __inline __m64
@@ -227,28 +229,28 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_unpackhi_pi8(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector unsigned char a, b, c;
+  __vector unsigned char __a, __b, __c;
 
-  a = (__vector unsigned char)vec_splats(__m1);
-  b = (__vector unsigned char)vec_splats(__m2);
-  c = vec_mergel(a, b);
-  return (__m64)((__vector long long)c)[1];
+  __a = (__vector unsigned char)vec_splats(__m1);
+  __b = (__vector unsigned char)vec_splats(__m2);
+  __c = vec_mergel(__a, __b);
+  return (__m64)((__vector long long)__c)[1];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_char[0] = m1.as_char[4];
-  res.as_char[1] = m2.as_char[4];
-  res.as_char[2] = m1.as_char[5];
-  res.as_char[3] = m2.as_char[5];
-  res.as_char[4] = m1.as_char[6];
-  res.as_char[5] = m2.as_char[6];
-  res.as_char[6] = m1.as_char[7];
-  res.as_char[7] = m2.as_char[7];
+  __res.as_char[0] = __mu1.as_char[4];
+  __res.as_char[1] = __mu2.as_char[4];
+  __res.as_char[2] = __mu1.as_char[5];
+  __res.as_char[3] = __mu2.as_char[5];
+  __res.as_char[4] = __mu1.as_char[6];
+  __res.as_char[5] = __mu2.as_char[6];
+  __res.as_char[6] = __mu1.as_char[7];
+  __res.as_char[7] = __mu2.as_char[7];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -263,17 +265,17 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_unpackhi_pi16(__m64 __m1, __m64 __m2) {
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_short[0] = m1.as_short[2];
-  res.as_short[1] = m2.as_short[2];
-  res.as_short[2] = m1.as_short[3];
-  res.as_short[3] = m2.as_short[3];
+  __res.as_short[0] = __mu1.as_short[2];
+  __res.as_short[1] = __mu2.as_short[2];
+  __res.as_short[2] = __mu1.as_short[3];
+  __res.as_short[3] = __mu2.as_short[3];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 }
 
 extern __inline __m64
@@ -286,15 +288,15 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_unpackhi_pi32(__m64 __m1, __m64 __m2) {
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_int[0] = m1.as_int[1];
-  res.as_int[1] = m2.as_int[1];
+  __res.as_int[0] = __mu1.as_int[1];
+  __res.as_int[1] = __mu2.as_int[1];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 }
 
 extern __inline __m64
@@ -308,28 +310,28 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_unpacklo_pi8(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector unsigned char a, b, c;
+  __vector unsigned char __a, __b, __c;
 
-  a = (__vector unsigned char)vec_splats(__m1);
-  b = (__vector unsigned char)vec_splats(__m2);
-  c = vec_mergel(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector unsigned char)vec_splats(__m1);
+  __b = (__vector unsigned char)vec_splats(__m2);
+  __c = vec_mergel(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_char[0] = m1.as_char[0];
-  res.as_char[1] = m2.as_char[0];
-  res.as_char[2] = m1.as_char[1];
-  res.as_char[3] = m2.as_char[1];
-  res.as_char[4] = m1.as_char[2];
-  res.as_char[5] = m2.as_char[2];
-  res.as_char[6] = m1.as_char[3];
-  res.as_char[7] = m2.as_char[3];
+  __res.as_char[0] = __mu1.as_char[0];
+  __res.as_char[1] = __mu2.as_char[0];
+  __res.as_char[2] = __mu1.as_char[1];
+  __res.as_char[3] = __mu2.as_char[1];
+  __res.as_char[4] = __mu1.as_char[2];
+  __res.as_char[5] = __mu2.as_char[2];
+  __res.as_char[6] = __mu1.as_char[3];
+  __res.as_char[7] = __mu2.as_char[3];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -343,17 +345,17 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_unpacklo_pi16(__m64 __m1, __m64 __m2) {
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_short[0] = m1.as_short[0];
-  res.as_short[1] = m2.as_short[0];
-  res.as_short[2] = m1.as_short[1];
-  res.as_short[3] = m2.as_short[1];
+  __res.as_short[0] = __mu1.as_short[0];
+  __res.as_short[1] = __mu2.as_short[0];
+  __res.as_short[2] = __mu1.as_short[1];
+  __res.as_short[3] = __mu2.as_short[1];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 }
 
 extern __inline __m64
@@ -367,15 +369,15 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_unpacklo_pi32(__m64 __m1, __m64 __m2) {
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_int[0] = m1.as_int[0];
-  res.as_int[1] = m2.as_int[0];
+  __res.as_int[0] = __mu1.as_int[0];
+  __res.as_int[1] = __mu2.as_int[0];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 }
 
 extern __inline __m64
@@ -389,28 +391,28 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_add_pi8(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector signed char a, b, c;
+  __vector signed char __a, __b, __c;
 
-  a = (__vector signed char)vec_splats(__m1);
-  b = (__vector signed char)vec_splats(__m2);
-  c = vec_add(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed char)vec_splats(__m1);
+  __b = (__vector signed char)vec_splats(__m2);
+  __c = vec_add(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_char[0] = m1.as_char[0] + m2.as_char[0];
-  res.as_char[1] = m1.as_char[1] + m2.as_char[1];
-  res.as_char[2] = m1.as_char[2] + m2.as_char[2];
-  res.as_char[3] = m1.as_char[3] + m2.as_char[3];
-  res.as_char[4] = m1.as_char[4] + m2.as_char[4];
-  res.as_char[5] = m1.as_char[5] + m2.as_char[5];
-  res.as_char[6] = m1.as_char[6] + m2.as_char[6];
-  res.as_char[7] = m1.as_char[7] + m2.as_char[7];
+  __res.as_char[0] = __mu1.as_char[0] + __mu2.as_char[0];
+  __res.as_char[1] = __mu1.as_char[1] + __mu2.as_char[1];
+  __res.as_char[2] = __mu1.as_char[2] + __mu2.as_char[2];
+  __res.as_char[3] = __mu1.as_char[3] + __mu2.as_char[3];
+  __res.as_char[4] = __mu1.as_char[4] + __mu2.as_char[4];
+  __res.as_char[5] = __mu1.as_char[5] + __mu2.as_char[5];
+  __res.as_char[6] = __mu1.as_char[6] + __mu2.as_char[6];
+  __res.as_char[7] = __mu1.as_char[7] + __mu2.as_char[7];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -425,24 +427,24 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_add_pi16(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector signed short a, b, c;
+  __vector signed short __a, __b, __c;
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = vec_add(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = vec_add(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_short[0] = m1.as_short[0] + m2.as_short[0];
-  res.as_short[1] = m1.as_short[1] + m2.as_short[1];
-  res.as_short[2] = m1.as_short[2] + m2.as_short[2];
-  res.as_short[3] = m1.as_short[3] + m2.as_short[3];
+  __res.as_short[0] = __mu1.as_short[0] + __mu2.as_short[0];
+  __res.as_short[1] = __mu1.as_short[1] + __mu2.as_short[1];
+  __res.as_short[2] = __mu1.as_short[2] + __mu2.as_short[2];
+  __res.as_short[3] = __mu1.as_short[3] + __mu2.as_short[3];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -457,22 +459,22 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_add_pi32(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR9
-  __vector signed int a, b, c;
+  __vector signed int __a, __b, __c;
 
-  a = (__vector signed int)vec_splats(__m1);
-  b = (__vector signed int)vec_splats(__m2);
-  c = vec_add(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed int)vec_splats(__m1);
+  __b = (__vector signed int)vec_splats(__m2);
+  __c = vec_add(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_int[0] = m1.as_int[0] + m2.as_int[0];
-  res.as_int[1] = m1.as_int[1] + m2.as_int[1];
+  __res.as_int[0] = __mu1.as_int[0] + __mu2.as_int[0];
+  __res.as_int[1] = __mu1.as_int[1] + __mu2.as_int[1];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -487,28 +489,28 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_sub_pi8(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector signed char a, b, c;
+  __vector signed char __a, __b, __c;
 
-  a = (__vector signed char)vec_splats(__m1);
-  b = (__vector signed char)vec_splats(__m2);
-  c = vec_sub(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed char)vec_splats(__m1);
+  __b = (__vector signed char)vec_splats(__m2);
+  __c = vec_sub(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_char[0] = m1.as_char[0] - m2.as_char[0];
-  res.as_char[1] = m1.as_char[1] - m2.as_char[1];
-  res.as_char[2] = m1.as_char[2] - m2.as_char[2];
-  res.as_char[3] = m1.as_char[3] - m2.as_char[3];
-  res.as_char[4] = m1.as_char[4] - m2.as_char[4];
-  res.as_char[5] = m1.as_char[5] - m2.as_char[5];
-  res.as_char[6] = m1.as_char[6] - m2.as_char[6];
-  res.as_char[7] = m1.as_char[7] - m2.as_char[7];
+  __res.as_char[0] = __mu1.as_char[0] - __mu2.as_char[0];
+  __res.as_char[1] = __mu1.as_char[1] - __mu2.as_char[1];
+  __res.as_char[2] = __mu1.as_char[2] - __mu2.as_char[2];
+  __res.as_char[3] = __mu1.as_char[3] - __mu2.as_char[3];
+  __res.as_char[4] = __mu1.as_char[4] - __mu2.as_char[4];
+  __res.as_char[5] = __mu1.as_char[5] - __mu2.as_char[5];
+  __res.as_char[6] = __mu1.as_char[6] - __mu2.as_char[6];
+  __res.as_char[7] = __mu1.as_char[7] - __mu2.as_char[7];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -523,24 +525,24 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_sub_pi16(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector signed short a, b, c;
+  __vector signed short __a, __b, __c;
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = vec_sub(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = vec_sub(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_short[0] = m1.as_short[0] - m2.as_short[0];
-  res.as_short[1] = m1.as_short[1] - m2.as_short[1];
-  res.as_short[2] = m1.as_short[2] - m2.as_short[2];
-  res.as_short[3] = m1.as_short[3] - m2.as_short[3];
+  __res.as_short[0] = __mu1.as_short[0] - __mu2.as_short[0];
+  __res.as_short[1] = __mu1.as_short[1] - __mu2.as_short[1];
+  __res.as_short[2] = __mu1.as_short[2] - __mu2.as_short[2];
+  __res.as_short[3] = __mu1.as_short[3] - __mu2.as_short[3];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -555,22 +557,22 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_sub_pi32(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR9
-  __vector signed int a, b, c;
+  __vector signed int __a, __b, __c;
 
-  a = (__vector signed int)vec_splats(__m1);
-  b = (__vector signed int)vec_splats(__m2);
-  c = vec_sub(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed int)vec_splats(__m1);
+  __b = (__vector signed int)vec_splats(__m2);
+  __c = vec_sub(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_int[0] = m1.as_int[0] - m2.as_int[0];
-  res.as_int[1] = m1.as_int[1] - m2.as_int[1];
+  __res.as_int[0] = __mu1.as_int[0] - __mu2.as_int[0];
+  __res.as_int[1] = __mu1.as_int[1] - __mu2.as_int[1];
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -708,25 +710,25 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) {
 #if defined(_ARCH_PWR6) && defined(__powerpc64__)
-  __m64 res;
-  __asm__("cmpb %0,%1,%2;\n" : "=r"(res) : "r"(__m1), "r"(__m2) :);
-  return (res);
+  __m64 __res;
+  __asm__("cmpb %0,%1,%2;\n" : "=r"(__res) : "r"(__m1), "r"(__m2) :);
+  return (__res);
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_char[0] = (m1.as_char[0] == m2.as_char[0]) ? -1 : 0;
-  res.as_char[1] = (m1.as_char[1] == m2.as_char[1]) ? -1 : 0;
-  res.as_char[2] = (m1.as_char[2] == m2.as_char[2]) ? -1 : 0;
-  res.as_char[3] = (m1.as_char[3] == m2.as_char[3]) ? -1 : 0;
-  res.as_char[4] = (m1.as_char[4] == m2.as_char[4]) ? -1 : 0;
-  res.as_char[5] = (m1.as_char[5] == m2.as_char[5]) ? -1 : 0;
-  res.as_char[6] = (m1.as_char[6] == m2.as_char[6]) ? -1 : 0;
-  res.as_char[7] = (m1.as_char[7] == m2.as_char[7]) ? -1 : 0;
+  __res.as_char[0] = (__mu1.as_char[0] == __mu2.as_char[0]) ? -1 : 0;
+  __res.as_char[1] = (__mu1.as_char[1] == __mu2.as_char[1]) ? -1 : 0;
+  __res.as_char[2] = (__mu1.as_char[2] == __mu2.as_char[2]) ? -1 : 0;
+  __res.as_char[3] = (__mu1.as_char[3] == __mu2.as_char[3]) ? -1 : 0;
+  __res.as_char[4] = (__mu1.as_char[4] == __mu2.as_char[4]) ? -1 : 0;
+  __res.as_char[5] = (__mu1.as_char[5] == __mu2.as_char[5]) ? -1 : 0;
+  __res.as_char[6] = (__mu1.as_char[6] == __mu2.as_char[6]) ? -1 : 0;
+  __res.as_char[7] = (__mu1.as_char[7] == __mu2.as_char[7]) ? -1 : 0;
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -740,28 +742,28 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector signed char a, b, c;
+  __vector signed char __a, __b, __c;
 
-  a = (__vector signed char)vec_splats(__m1);
-  b = (__vector signed char)vec_splats(__m2);
-  c = (__vector signed char)vec_cmpgt(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed char)vec_splats(__m1);
+  __b = (__vector signed char)vec_splats(__m2);
+  __c = (__vector signed char)vec_cmpgt(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_char[0] = (m1.as_char[0] > m2.as_char[0]) ? -1 : 0;
-  res.as_char[1] = (m1.as_char[1] > m2.as_char[1]) ? -1 : 0;
-  res.as_char[2] = (m1.as_char[2] > m2.as_char[2]) ? -1 : 0;
-  res.as_char[3] = (m1.as_char[3] > m2.as_char[3]) ? -1 : 0;
-  res.as_char[4] = (m1.as_char[4] > m2.as_char[4]) ? -1 : 0;
-  res.as_char[5] = (m1.as_char[5] > m2.as_char[5]) ? -1 : 0;
-  res.as_char[6] = (m1.as_char[6] > m2.as_char[6]) ? -1 : 0;
-  res.as_char[7] = (m1.as_char[7] > m2.as_char[7]) ? -1 : 0;
+  __res.as_char[0] = (__mu1.as_char[0] > __mu2.as_char[0]) ? -1 : 0;
+  __res.as_char[1] = (__mu1.as_char[1] > __mu2.as_char[1]) ? -1 : 0;
+  __res.as_char[2] = (__mu1.as_char[2] > __mu2.as_char[2]) ? -1 : 0;
+  __res.as_char[3] = (__mu1.as_char[3] > __mu2.as_char[3]) ? -1 : 0;
+  __res.as_char[4] = (__mu1.as_char[4] > __mu2.as_char[4]) ? -1 : 0;
+  __res.as_char[5] = (__mu1.as_char[5] > __mu2.as_char[5]) ? -1 : 0;
+  __res.as_char[6] = (__mu1.as_char[6] > __mu2.as_char[6]) ? -1 : 0;
+  __res.as_char[7] = (__mu1.as_char[7] > __mu2.as_char[7]) ? -1 : 0;
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -777,24 +779,24 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector signed short a, b, c;
+  __vector signed short __a, __b, __c;
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = (__vector signed short)vec_cmpeq(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = (__vector signed short)vec_cmpeq(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_short[0] = (m1.as_short[0] == m2.as_short[0]) ? -1 : 0;
-  res.as_short[1] = (m1.as_short[1] == m2.as_short[1]) ? -1 : 0;
-  res.as_short[2] = (m1.as_short[2] == m2.as_short[2]) ? -1 : 0;
-  res.as_short[3] = (m1.as_short[3] == m2.as_short[3]) ? -1 : 0;
+  __res.as_short[0] = (__mu1.as_short[0] == __mu2.as_short[0]) ? -1 : 0;
+  __res.as_short[1] = (__mu1.as_short[1] == __mu2.as_short[1]) ? -1 : 0;
+  __res.as_short[2] = (__mu1.as_short[2] == __mu2.as_short[2]) ? -1 : 0;
+  __res.as_short[3] = (__mu1.as_short[3] == __mu2.as_short[3]) ? -1 : 0;
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -808,24 +810,24 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR8
-  __vector signed short a, b, c;
+  __vector signed short __a, __b, __c;
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = (__vector signed short)vec_cmpgt(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = (__vector signed short)vec_cmpgt(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_short[0] = (m1.as_short[0] > m2.as_short[0]) ? -1 : 0;
-  res.as_short[1] = (m1.as_short[1] > m2.as_short[1]) ? -1 : 0;
-  res.as_short[2] = (m1.as_short[2] > m2.as_short[2]) ? -1 : 0;
-  res.as_short[3] = (m1.as_short[3] > m2.as_short[3]) ? -1 : 0;
+  __res.as_short[0] = (__mu1.as_short[0] > __mu2.as_short[0]) ? -1 : 0;
+  __res.as_short[1] = (__mu1.as_short[1] > __mu2.as_short[1]) ? -1 : 0;
+  __res.as_short[2] = (__mu1.as_short[2] > __mu2.as_short[2]) ? -1 : 0;
+  __res.as_short[3] = (__mu1.as_short[3] > __mu2.as_short[3]) ? -1 : 0;
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -841,22 +843,22 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR9
-  __vector signed int a, b, c;
+  __vector signed int __a, __b, __c;
 
-  a = (__vector signed int)vec_splats(__m1);
-  b = (__vector signed int)vec_splats(__m2);
-  c = (__vector signed int)vec_cmpeq(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed int)vec_splats(__m1);
+  __b = (__vector signed int)vec_splats(__m2);
+  __c = (__vector signed int)vec_cmpeq(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_int[0] = (m1.as_int[0] == m2.as_int[0]) ? -1 : 0;
-  res.as_int[1] = (m1.as_int[1] == m2.as_int[1]) ? -1 : 0;
+  __res.as_int[0] = (__mu1.as_int[0] == __mu2.as_int[0]) ? -1 : 0;
+  __res.as_int[1] = (__mu1.as_int[1] == __mu2.as_int[1]) ? -1 : 0;
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -870,22 +872,22 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_cmpgt_pi32(__m64 __m1, __m64 __m2) {
 #if _ARCH_PWR9
-  __vector signed int a, b, c;
+  __vector signed int __a, __b, __c;
 
-  a = (__vector signed int)vec_splats(__m1);
-  b = (__vector signed int)vec_splats(__m2);
-  c = (__vector signed int)vec_cmpgt(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed int)vec_splats(__m1);
+  __b = (__vector signed int)vec_splats(__m2);
+  __c = (__vector signed int)vec_cmpgt(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 #else
-  __m64_union m1, m2, res;
+  __m64_union __mu1, __mu2, __res;
 
-  m1.as_m64 = __m1;
-  m2.as_m64 = __m2;
+  __mu1.as_m64 = __m1;
+  __mu2.as_m64 = __m2;
 
-  res.as_int[0] = (m1.as_int[0] > m2.as_int[0]) ? -1 : 0;
-  res.as_int[1] = (m1.as_int[1] > m2.as_int[1]) ? -1 : 0;
+  __res.as_int[0] = (__mu1.as_int[0] > __mu2.as_int[0]) ? -1 : 0;
+  __res.as_int[1] = (__mu1.as_int[1] > __mu2.as_int[1]) ? -1 : 0;
 
-  return (__m64)res.as_m64;
+  return (__m64)__res.as_m64;
 #endif
 }
 
@@ -901,12 +903,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_adds_pi8(__m64 __m1, __m64 __m2) {
-  __vector signed char a, b, c;
+  __vector signed char __a, __b, __c;
 
-  a = (__vector signed char)vec_splats(__m1);
-  b = (__vector signed char)vec_splats(__m2);
-  c = vec_adds(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed char)vec_splats(__m1);
+  __b = (__vector signed char)vec_splats(__m2);
+  __c = vec_adds(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -919,12 +921,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_adds_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short a, b, c;
+  __vector signed short __a, __b, __c;
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = vec_adds(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = vec_adds(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -937,12 +939,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_adds_pu8(__m64 __m1, __m64 __m2) {
-  __vector unsigned char a, b, c;
+  __vector unsigned char __a, __b, __c;
 
-  a = (__vector unsigned char)vec_splats(__m1);
-  b = (__vector unsigned char)vec_splats(__m2);
-  c = vec_adds(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector unsigned char)vec_splats(__m1);
+  __b = (__vector unsigned char)vec_splats(__m2);
+  __c = vec_adds(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -956,12 +958,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_adds_pu16(__m64 __m1, __m64 __m2) {
-  __vector unsigned short a, b, c;
+  __vector unsigned short __a, __b, __c;
 
-  a = (__vector unsigned short)vec_splats(__m1);
-  b = (__vector unsigned short)vec_splats(__m2);
-  c = vec_adds(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector unsigned short)vec_splats(__m1);
+  __b = (__vector unsigned short)vec_splats(__m2);
+  __c = vec_adds(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -975,12 +977,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_subs_pi8(__m64 __m1, __m64 __m2) {
-  __vector signed char a, b, c;
+  __vector signed char __a, __b, __c;
 
-  a = (__vector signed char)vec_splats(__m1);
-  b = (__vector signed char)vec_splats(__m2);
-  c = vec_subs(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed char)vec_splats(__m1);
+  __b = (__vector signed char)vec_splats(__m2);
+  __c = vec_subs(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -994,12 +996,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_subs_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short a, b, c;
+  __vector signed short __a, __b, __c;
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = vec_subs(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = vec_subs(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -1013,12 +1015,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_subs_pu8(__m64 __m1, __m64 __m2) {
-  __vector unsigned char a, b, c;
+  __vector unsigned char __a, __b, __c;
 
-  a = (__vector unsigned char)vec_splats(__m1);
-  b = (__vector unsigned char)vec_splats(__m2);
-  c = vec_subs(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector unsigned char)vec_splats(__m1);
+  __b = (__vector unsigned char)vec_splats(__m2);
+  __c = vec_subs(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -1032,12 +1034,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_subs_pu16(__m64 __m1, __m64 __m2) {
-  __vector unsigned short a, b, c;
+  __vector unsigned short __a, __b, __c;
 
-  a = (__vector unsigned short)vec_splats(__m1);
-  b = (__vector unsigned short)vec_splats(__m2);
-  c = vec_subs(a, b);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector unsigned short)vec_splats(__m1);
+  __b = (__vector unsigned short)vec_splats(__m2);
+  __c = vec_subs(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -1052,14 +1054,14 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_madd_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short a, b;
-  __vector signed int c;
-  __vector signed int zero = {0, 0, 0, 0};
+  __vector signed short __a, __b;
+  __vector signed int __c;
+  __vector signed int __zero = {0, 0, 0, 0};
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = vec_vmsumshm(a, b, zero);
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = vec_vmsumshm(__a, __b, __zero);
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -1072,10 +1074,10 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_mulhi_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short a, b;
-  __vector signed short c;
-  __vector signed int w0, w1;
-  __vector unsigned char xform1 = {
+  __vector signed short __a, __b;
+  __vector signed short __c;
+  __vector signed int __w0, __w1;
+  __vector unsigned char __xform1 = {
 #ifdef __LITTLE_ENDIAN__
       0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
       0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
@@ -1085,14 +1087,14 @@
 #endif
   };
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
 
-  w0 = vec_vmulesh(a, b);
-  w1 = vec_vmulosh(a, b);
-  c = (__vector signed short)vec_perm(w0, w1, xform1);
+  __w0 = vec_vmulesh(__a, __b);
+  __w1 = vec_vmulosh(__a, __b);
+  __c = (__vector signed short)vec_perm(__w0, __w1, __xform1);
 
-  return (__m64)((__vector long long)c)[0];
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -1106,12 +1108,12 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_mullo_pi16(__m64 __m1, __m64 __m2) {
-  __vector signed short a, b, c;
+  __vector signed short __a, __b, __c;
 
-  a = (__vector signed short)vec_splats(__m1);
-  b = (__vector signed short)vec_splats(__m2);
-  c = a * b;
-  return (__m64)((__vector long long)c)[0];
+  __a = (__vector signed short)vec_splats(__m1);
+  __b = (__vector signed short)vec_splats(__m2);
+  __c = __a * __b;
+  return (__m64)((__vector long long)__c)[0];
 }
 
 extern __inline __m64
@@ -1124,14 +1126,14 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_sll_pi16(__m64 __m, __m64 __count) {
-  __vector signed short m, r;
-  __vector unsigned short c;
+  __vector signed short __r;
+  __vector unsigned short __c;
 
   if (__count <= 15) {
-    m = (__vector signed short)vec_splats(__m);
-    c = (__vector unsigned short)vec_splats((unsigned short)__count);
-    r = vec_sl(m, (__vector unsigned short)c);
-    return (__m64)((__vector long long)r)[0];
+    __r = (__vector signed short)vec_splats(__m);
+    __c = (__vector unsigned short)vec_splats((unsigned short)__count);
+    __r = vec_sl(__r, (__vector unsigned short)__c);
+    return (__m64)((__vector long long)__r)[0];
   } else
     return (0);
 }
@@ -1159,13 +1161,13 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_sll_pi32(__m64 __m, __m64 __count) {
-  __m64_union m, res;
+  __m64_union __res;
 
-  m.as_m64 = __m;
+  __res.as_m64 = __m;
 
-  res.as_int[0] = m.as_int[0] << __count;
-  res.as_int[1] = m.as_int[1] << __count;
-  return (res.as_m64);
+  __res.as_int[0] = __res.as_int[0] << __count;
+  __res.as_int[1] = __res.as_int[1] << __count;
+  return (__res.as_m64);
 }
 
 extern __inline __m64
@@ -1191,14 +1193,14 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_sra_pi16(__m64 __m, __m64 __count) {
-  __vector signed short m, r;
-  __vector unsigned short c;
+  __vector signed short __r;
+  __vector unsigned short __c;
 
   if (__count <= 15) {
-    m = (__vector signed short)vec_splats(__m);
-    c = (__vector unsigned short)vec_splats((unsigned short)__count);
-    r = vec_sra(m, (__vector unsigned short)c);
-    return (__m64)((__vector long long)r)[0];
+    __r = (__vector signed short)vec_splats(__m);
+    __c = (__vector unsigned short)vec_splats((unsigned short)__count);
+    __r = vec_sra(__r, (__vector unsigned short)__c);
+    return (__m64)((__vector long long)__r)[0];
   } else
     return (0);
 }
@@ -1226,13 +1228,13 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_sra_pi32(__m64 __m, __m64 __count) {
-  __m64_union m, res;
+  __m64_union __res;
 
-  m.as_m64 = __m;
+  __res.as_m64 = __m;
 
-  res.as_int[0] = m.as_int[0] >> __count;
-  res.as_int[1] = m.as_int[1] >> __count;
-  return (res.as_m64);
+  __res.as_int[0] = __res.as_int[0] >> __count;
+  __res.as_int[1] = __res.as_int[1] >> __count;
+  return (__res.as_m64);
 }
 
 extern __inline __m64
@@ -1258,14 +1260,14 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_srl_pi16(__m64 __m, __m64 __count) {
-  __vector unsigned short m, r;
-  __vector unsigned short c;
+  __vector unsigned short __r;
+  __vector unsigned short __c;
 
   if (__count <= 15) {
-    m = (__vector unsigned short)vec_splats(__m);
-    c = (__vector unsigned short)vec_splats((unsigned short)__count);
-    r = vec_sr(m, (__vector unsigned short)c);
-    return (__m64)((__vector long long)r)[0];
+    __r = (__vector unsigned short)vec_splats(__m);
+    __c = (__vector unsigned short)vec_splats((unsigned short)__count);
+    __r = vec_sr(__r, (__vector unsigned short)__c);
+    return (__m64)((__vector long long)__r)[0];
   } else
     return (0);
 }
@@ -1293,13 +1295,13 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_srl_pi32(__m64 __m, __m64 __count) {
-  __m64_union m, res;
+  __m64_union __res;
 
-  m.as_m64 = __m;
+  __res.as_m64 = __m;
 
-  res.as_int[0] = (unsigned int)m.as_int[0] >> __count;
-  res.as_int[1] = (unsigned int)m.as_int[1] >> __count;
-  return (res.as_m64);
+  __res.as_int[0] = (unsigned int)__res.as_int[0] >> __count;
+  __res.as_int[1] = (unsigned int)__res.as_int[1] >> __count;
+  return (__res.as_m64);
 }
 
 extern __inline __m64
@@ -1326,24 +1328,24 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_set_pi32(int __i1, int __i0) {
-  __m64_union res;
+  __m64_union __res;
 
-  res.as_int[0] = __i0;
-  res.as_int[1] = __i1;
-  return (res.as_m64);
+  __res.as_int[0] = __i0;
+  __res.as_int[1] = __i1;
+  return (__res.as_m64);
 }
 
 /* Creates a vector of four 16-bit values; W0 is least significant.  */
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_set_pi16(short __w3, short __w2, short __w1, short __w0) {
-  __m64_union res;
+  __m64_union __res;
 
-  res.as_short[0] = __w0;
-  res.as_short[1] = __w1;
-  res.as_short[2] = __w2;
-  res.as_short[3] = __w3;
-  return (res.as_m64);
+  __res.as_short[0] = __w0;
+  __res.as_short[1] = __w1;
+  __res.as_short[2] = __w2;
+  __res.as_short[3] = __w3;
+  return (__res.as_m64);
 }
 
 /* Creates a vector of eight 8-bit values; B0 is least significant.  */
@@ -1351,28 +1353,28 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3,
                 char __b2, char __b1, char __b0) {
-  __m64_union res;
+  __m64_union __res;
 
-  res.as_char[0] = __b0;
-  res.as_char[1] = __b1;
-  res.as_char[2] = __b2;
-  res.as_char[3] = __b3;
-  res.as_char[4] = __b4;
-  res.as_char[5] = __b5;
-  res.as_char[6] = __b6;
-  res.as_char[7] = __b7;
-  return (res.as_m64);
+  __res.as_char[0] = __b0;
+  __res.as_char[1] = __b1;
+  __res.as_char[2] = __b2;
+  __res.as_char[3] = __b3;
+  __res.as_char[4] = __b4;
+  __res.as_char[5] = __b5;
+  __res.as_char[6] = __b6;
+  __res.as_char[7] = __b7;
+  return (__res.as_m64);
 }
 
 /* Similar, but with the arguments in reverse order.  */
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_setr_pi32(int __i0, int __i1) {
-  __m64_union res;
+  __m64_union __res;
 
-  res.as_int[0] = __i0;
-  res.as_int[1] = __i1;
-  return (res.as_m64);
+  __res.as_int[0] = __i0;
+  __res.as_int[1] = __i1;
+  return (__res.as_m64);
 }
 
 extern __inline __m64
@@ -1392,11 +1394,11 @@
 extern __inline __m64
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_set1_pi32(int __i) {
-  __m64_union res;
+  __m64_union __res;
 
-  res.as_int[0] = __i;
-  res.as_int[1] = __i;
-  return (res.as_m64);
+  __res.as_int[0] = __i;
+  __res.as_int[1] = __i;
+  return (__res.as_m64);
 }
 
 /* Creates a vector of four 16-bit values, all elements containing W.  */
@@ -1409,13 +1411,13 @@
   w = (__vector signed short)vec_splats(__w);
   return (__m64)((__vector long long)w)[0];
 #else
-  __m64_union res;
+  __m64_union __res;
 
-  res.as_short[0] = __w;
-  res.as_short[1] = __w;
-  res.as_short[2] = __w;
-  res.as_short[3] = __w;
-  return (res.as_m64);
+  __res.as_short[0] = __w;
+  __res.as_short[1] = __w;
+  __res.as_short[2] = __w;
+  __res.as_short[3] = __w;
+  return (__res.as_m64);
 #endif
 }
 
@@ -1424,28 +1426,28 @@
     __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     _mm_set1_pi8(signed char __b) {
 #if _ARCH_PWR8
-  __vector signed char b;
+  __vector signed char __res;
 
-  b = (__vector signed char)vec_splats(__b);
-  return (__m64)((__vector long long)b)[0];
+  __res = (__vector signed char)vec_splats(__b);
+  return (__m64)((__vector long long)__res)[0];
 #else
-  __m64_union res;
+  __m64_union __res;
 
-  res.as_char[0] = __b;
-  res.as_char[1] = __b;
-  res.as_char[2] = __b;
-  res.as_char[3] = __b;
-  res.as_char[4] = __b;
-  res.as_char[5] = __b;
-  res.as_char[6] = __b;
-  res.as_char[7] = __b;
-  return (res.as_m64);
+  __res.as_char[0] = __b;
+  __res.as_char[1] = __b;
+  __res.as_char[2] = __b;
+  __res.as_char[3] = __b;
+  __res.as_char[4] = __b;
+  __res.as_char[5] = __b;
+  __res.as_char[6] = __b;
+  __res.as_char[7] = __b;
+  return (__res.as_m64);
 #endif
 }
 
 #else
 #include_next <mmintrin.h>
-#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))   \
-        */
+#endif /* defined(__ppc64__) &&
+        *   (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
 
 #endif /* _MMINTRIN_H_INCLUDED */
diff --git a/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/pmmintrin.h b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/pmmintrin.h
new file mode 100644
index 0000000..fda39ed
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/pmmintrin.h
@@ -0,0 +1,145 @@
+/*===---- pmmintrin.h - Implementation of SSE3 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+   User Guide and Reference, version 9.0.  */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header is distributed to simplify porting x86_64 code that
+   makes explicit use of Intel intrinsics to powerpc64le.
+   It is the user's responsibility to determine if the results are
+   acceptable and make additional changes as necessary.
+   Note that much code that uses Intel intrinsics can be rewritten in
+   standard C or GNU C extensions, which are more portable and better
+   optimized across multiple targets.
+
+   In the specific case of X86 SSE3 intrinsics, the PowerPC VMX/VSX ISA
+   is a good match for most SIMD operations.  However the Horizontal
+   add/sub requires the data pairs be permuted into a separate
+   registers with vertical even/odd alignment for the operation.
+   And the addsub operation requires the sign of only the even numbered
+   elements be flipped (xored with -0.0).
+   For larger blocks of code using these intrinsic implementations,
+   the compiler be should be able to schedule instructions to avoid
+   additional latency.
+
+   In the specific case of the monitor and mwait instructions there are
+   no direct equivalent in the PowerISA at this time.  So those
+   intrinsics are not implemented.  */
+#error                                                                         \
+    "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this warning."
+#endif
+
+#ifndef PMMINTRIN_H_
+#define PMMINTRIN_H_
+
+#if defined(__ppc64__) &&                                                      \
+    (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
+
+/* We need definitions from the SSE2 and SSE header files*/
+#include <emmintrin.h>
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_addsub_ps(__m128 __X, __m128 __Y) {
+  const __v4sf __even_n0 = {-0.0, 0.0, -0.0, 0.0};
+  __v4sf __even_neg_Y = vec_xor(__Y, __even_n0);
+  return (__m128)vec_add(__X, __even_neg_Y);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_addsub_pd(__m128d __X, __m128d __Y) {
+  const __v2df __even_n0 = {-0.0, 0.0};
+  __v2df __even_neg_Y = vec_xor(__Y, __even_n0);
+  return (__m128d)vec_add(__X, __even_neg_Y);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadd_ps(__m128 __X, __m128 __Y) {
+  __vector unsigned char __xform2 = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09,
+                                     0x0A, 0x0B, 0x10, 0x11, 0x12, 0x13,
+                                     0x18, 0x19, 0x1A, 0x1B};
+  __vector unsigned char __xform1 = {0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D,
+                                     0x0E, 0x0F, 0x14, 0x15, 0x16, 0x17,
+                                     0x1C, 0x1D, 0x1E, 0x1F};
+  return (__m128)vec_add(vec_perm((__v4sf)__X, (__v4sf)__Y, __xform2),
+                         vec_perm((__v4sf)__X, (__v4sf)__Y, __xform1));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsub_ps(__m128 __X, __m128 __Y) {
+  __vector unsigned char __xform2 = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09,
+                                     0x0A, 0x0B, 0x10, 0x11, 0x12, 0x13,
+                                     0x18, 0x19, 0x1A, 0x1B};
+  __vector unsigned char __xform1 = {0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D,
+                                     0x0E, 0x0F, 0x14, 0x15, 0x16, 0x17,
+                                     0x1C, 0x1D, 0x1E, 0x1F};
+  return (__m128)vec_sub(vec_perm((__v4sf)__X, (__v4sf)__Y, __xform2),
+                         vec_perm((__v4sf)__X, (__v4sf)__Y, __xform1));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadd_pd(__m128d __X, __m128d __Y) {
+  return (__m128d)vec_add(vec_mergeh((__v2df)__X, (__v2df)__Y),
+                          vec_mergel((__v2df)__X, (__v2df)__Y));
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsub_pd(__m128d __X, __m128d __Y) {
+  return (__m128d)vec_sub(vec_mergeh((__v2df)__X, (__v2df)__Y),
+                          vec_mergel((__v2df)__X, (__v2df)__Y));
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movehdup_ps(__m128 __X) {
+  return (__m128)vec_mergeo((__v4su)__X, (__v4su)__X);
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_moveldup_ps(__m128 __X) {
+  return (__m128)vec_mergee((__v4su)__X, (__v4su)__X);
+}
+#endif
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loaddup_pd(double const *__P) {
+  return (__m128d)vec_splats(*__P);
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movedup_pd(__m128d __X) {
+  return _mm_shuffle_pd(__X, __X, _MM_SHUFFLE2(0, 0));
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_lddqu_si128(__m128i const *__P) {
+  return (__m128i)(vec_vsx_ld(0, (signed int const *)__P));
+}
+
+/* POWER8 / POWER9 have no equivalent for _mm_monitor nor _mm_wait.  */
+
+#else
+#include_next <pmmintrin.h>
+#endif /* defined(__ppc64__) &&
+        *   (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
+
+#endif /* PMMINTRIN_H_ */
diff --git a/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/smmintrin.h b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/smmintrin.h
new file mode 100644
index 0000000..6fe6c8a
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/smmintrin.h
@@ -0,0 +1,663 @@
+/*===---- smmintrin.h - Implementation of SSE4 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+   User Guide and Reference, version 9.0.
+
+   NOTE: This is NOT a complete implementation of the SSE4 intrinsics!  */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header is distributed to simplify porting x86_64 code that
+   makes explicit use of Intel intrinsics to powerp64/powerpc64le.
+
+   It is the user's responsibility to determine if the results are
+   acceptable and make additional changes as necessary.
+
+   Note that much code that uses Intel intrinsics can be rewritten in
+   standard C or GNU C extensions, which are more portable and better
+   optimized across multiple targets.  */
+#error                                                                         \
+    "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef SMMINTRIN_H_
+#define SMMINTRIN_H_
+
+#if defined(__ppc64__) &&                                                      \
+    (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
+
+#include <altivec.h>
+#include <tmmintrin.h>
+
+/* Rounding mode macros. */
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_ZERO 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_NEG_INF 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+#define _MM_FROUND_NINT (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_FLOOR (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_CEIL (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_TRUNC (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_RINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_NEARBYINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
+
+#define _MM_FROUND_RAISE_EXC 0x00
+#define _MM_FROUND_NO_EXC 0x08
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_round_pd(__m128d __A, int __rounding) {
+  __v2df __r;
+  union {
+    double __fr;
+    long long __fpscr;
+  } __enables_save, __fpscr_save;
+
+  if (__rounding & _MM_FROUND_NO_EXC) {
+    /* Save enabled exceptions, disable all exceptions,
+       and preserve the rounding mode.  */
+#ifdef _ARCH_PWR9
+    __asm__("mffsce %0" : "=f"(__fpscr_save.__fr));
+    __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+#else
+    __fpscr_save.__fr = __builtin_mffs();
+    __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+    __fpscr_save.__fpscr &= ~0xf8;
+    __builtin_mtfsf(0b00000011, __fpscr_save.__fr);
+#endif
+    /* Insert an artificial "read/write" reference to the variable
+       read below, to ensure the compiler does not schedule
+       a read/use of the variable before the FPSCR is modified, above.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : "+wa"(__A));
+  }
+
+  switch (__rounding) {
+  case _MM_FROUND_TO_NEAREST_INT:
+    __fpscr_save.__fr = __builtin_mffsl();
+    __attribute__((fallthrough));
+  case _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC:
+    __builtin_set_fpscr_rn(0b00);
+    /* Insert an artificial "read/write" reference to the variable
+       read below, to ensure the compiler does not schedule
+       a read/use of the variable before the FPSCR is modified, above.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : "+wa"(__A));
+
+    __r = vec_rint((__v2df)__A);
+
+    /* Insert an artificial "read" reference to the variable written
+       above, to ensure the compiler does not schedule the computation
+       of the value after the manipulation of the FPSCR, below.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : : "wa"(__r));
+    __builtin_set_fpscr_rn(__fpscr_save.__fpscr);
+    break;
+  case _MM_FROUND_TO_NEG_INF:
+  case _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC:
+    __r = vec_floor((__v2df)__A);
+    break;
+  case _MM_FROUND_TO_POS_INF:
+  case _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC:
+    __r = vec_ceil((__v2df)__A);
+    break;
+  case _MM_FROUND_TO_ZERO:
+  case _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC:
+    __r = vec_trunc((__v2df)__A);
+    break;
+  case _MM_FROUND_CUR_DIRECTION:
+    __r = vec_rint((__v2df)__A);
+    break;
+  }
+  if (__rounding & _MM_FROUND_NO_EXC) {
+    /* Insert an artificial "read" reference to the variable written
+       above, to ensure the compiler does not schedule the computation
+       of the value after the manipulation of the FPSCR, below.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : : "wa"(__r));
+    /* Restore enabled exceptions.  */
+    __fpscr_save.__fr = __builtin_mffsl();
+    __fpscr_save.__fpscr |= __enables_save.__fpscr;
+    __builtin_mtfsf(0b00000011, __fpscr_save.__fr);
+  }
+  return (__m128d)__r;
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_round_sd(__m128d __A, __m128d __B, int __rounding) {
+  __B = _mm_round_pd(__B, __rounding);
+  __v2df __r = {((__v2df)__B)[0], ((__v2df)__A)[1]};
+  return (__m128d)__r;
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_round_ps(__m128 __A, int __rounding) {
+  __v4sf __r;
+  union {
+    double __fr;
+    long long __fpscr;
+  } __enables_save, __fpscr_save;
+
+  if (__rounding & _MM_FROUND_NO_EXC) {
+    /* Save enabled exceptions, disable all exceptions,
+       and preserve the rounding mode.  */
+#ifdef _ARCH_PWR9
+    __asm__("mffsce %0" : "=f"(__fpscr_save.__fr));
+    __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+#else
+    __fpscr_save.__fr = __builtin_mffs();
+    __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+    __fpscr_save.__fpscr &= ~0xf8;
+    __builtin_mtfsf(0b00000011, __fpscr_save.__fr);
+#endif
+    /* Insert an artificial "read/write" reference to the variable
+       read below, to ensure the compiler does not schedule
+       a read/use of the variable before the FPSCR is modified, above.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : "+wa"(__A));
+  }
+
+  switch (__rounding) {
+  case _MM_FROUND_TO_NEAREST_INT:
+    __fpscr_save.__fr = __builtin_mffsl();
+    __attribute__((fallthrough));
+  case _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC:
+    __builtin_set_fpscr_rn(0b00);
+    /* Insert an artificial "read/write" reference to the variable
+       read below, to ensure the compiler does not schedule
+       a read/use of the variable before the FPSCR is modified, above.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : "+wa"(__A));
+
+    __r = vec_rint((__v4sf)__A);
+
+    /* Insert an artificial "read" reference to the variable written
+       above, to ensure the compiler does not schedule the computation
+       of the value after the manipulation of the FPSCR, below.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : : "wa"(__r));
+    __builtin_set_fpscr_rn(__fpscr_save.__fpscr);
+    break;
+  case _MM_FROUND_TO_NEG_INF:
+  case _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC:
+    __r = vec_floor((__v4sf)__A);
+    break;
+  case _MM_FROUND_TO_POS_INF:
+  case _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC:
+    __r = vec_ceil((__v4sf)__A);
+    break;
+  case _MM_FROUND_TO_ZERO:
+  case _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC:
+    __r = vec_trunc((__v4sf)__A);
+    break;
+  case _MM_FROUND_CUR_DIRECTION:
+    __r = vec_rint((__v4sf)__A);
+    break;
+  }
+  if (__rounding & _MM_FROUND_NO_EXC) {
+    /* Insert an artificial "read" reference to the variable written
+       above, to ensure the compiler does not schedule the computation
+       of the value after the manipulation of the FPSCR, below.
+       This can be removed if and when GCC PR102783 is fixed.
+     */
+    __asm__("" : : "wa"(__r));
+    /* Restore enabled exceptions.  */
+    __fpscr_save.__fr = __builtin_mffsl();
+    __fpscr_save.__fpscr |= __enables_save.__fpscr;
+    __builtin_mtfsf(0b00000011, __fpscr_save.__fr);
+  }
+  return (__m128)__r;
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_round_ss(__m128 __A, __m128 __B, int __rounding) {
+  __B = _mm_round_ps(__B, __rounding);
+  __v4sf __r = (__v4sf)__A;
+  __r[0] = ((__v4sf)__B)[0];
+  return (__m128)__r;
+}
+
+#define _mm_ceil_pd(V) _mm_round_pd((V), _MM_FROUND_CEIL)
+#define _mm_ceil_sd(D, V) _mm_round_sd((D), (V), _MM_FROUND_CEIL)
+
+#define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR)
+#define _mm_floor_sd(D, V) _mm_round_sd((D), (V), _MM_FROUND_FLOOR)
+
+#define _mm_ceil_ps(V) _mm_round_ps((V), _MM_FROUND_CEIL)
+#define _mm_ceil_ss(D, V) _mm_round_ss((D), (V), _MM_FROUND_CEIL)
+
+#define _mm_floor_ps(V) _mm_round_ps((V), _MM_FROUND_FLOOR)
+#define _mm_floor_ss(D, V) _mm_round_ss((D), (V), _MM_FROUND_FLOOR)
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_insert_epi8(__m128i const __A, int const __D, int const __N) {
+  __v16qi __result = (__v16qi)__A;
+
+  __result[__N & 0xf] = __D;
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_insert_epi32(__m128i const __A, int const __D, int const __N) {
+  __v4si __result = (__v4si)__A;
+
+  __result[__N & 3] = __D;
+
+  return (__m128i)__result;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_insert_epi64(__m128i const __A, long long const __D, int const __N) {
+  __v2di __result = (__v2di)__A;
+
+  __result[__N & 1] = __D;
+
+  return (__m128i)__result;
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_extract_epi8(__m128i __X, const int __N) {
+  return (unsigned char)((__v16qi)__X)[__N & 15];
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_extract_epi32(__m128i __X, const int __N) {
+  return ((__v4si)__X)[__N & 3];
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_extract_epi64(__m128i __X, const int __N) {
+  return ((__v2di)__X)[__N & 1];
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_extract_ps(__m128 __X, const int __N) {
+  return ((__v4si)__X)[__N & 3];
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_blend_epi16(__m128i __A, __m128i __B, const int __imm8) {
+  __v16qi __charmask = vec_splats((signed char)__imm8);
+  __charmask = vec_gb(__charmask);
+  __v8hu __shortmask = (__v8hu)vec_unpackh(__charmask);
+#ifdef __BIG_ENDIAN__
+  __shortmask = vec_reve(__shortmask);
+#endif
+  return (__m128i)vec_sel((__v8hu)__A, (__v8hu)__B, __shortmask);
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_blendv_epi8(__m128i __A, __m128i __B, __m128i __mask) {
+#ifdef _ARCH_PWR10
+  return (__m128i)vec_blendv((__v16qi)__A, (__v16qi)__B, (__v16qu)__mask);
+#else
+  const __v16qu __seven = vec_splats((unsigned char)0x07);
+  __v16qu __lmask = vec_sra((__v16qu)__mask, __seven);
+  return (__m128i)vec_sel((__v16qi)__A, (__v16qi)__B, __lmask);
+#endif
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_blend_ps(__m128 __A, __m128 __B, const int __imm8) {
+  __v16qu __pcv[] = {
+      {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+      {16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+      {0, 1, 2, 3, 20, 21, 22, 23, 8, 9, 10, 11, 12, 13, 14, 15},
+      {16, 17, 18, 19, 20, 21, 22, 23, 8, 9, 10, 11, 12, 13, 14, 15},
+      {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 12, 13, 14, 15},
+      {16, 17, 18, 19, 4, 5, 6, 7, 24, 25, 26, 27, 12, 13, 14, 15},
+      {0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 12, 13, 14, 15},
+      {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 12, 13, 14, 15},
+      {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 28, 29, 30, 31},
+      {16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 28, 29, 30, 31},
+      {0, 1, 2, 3, 20, 21, 22, 23, 8, 9, 10, 11, 28, 29, 30, 31},
+      {16, 17, 18, 19, 20, 21, 22, 23, 8, 9, 10, 11, 28, 29, 30, 31},
+      {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31},
+      {16, 17, 18, 19, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31},
+      {0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
+      {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
+  };
+  __v16qu __r = vec_perm((__v16qu)__A, (__v16qu)__B, __pcv[__imm8]);
+  return (__m128)__r;
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_blendv_ps(__m128 __A, __m128 __B, __m128 __mask) {
+#ifdef _ARCH_PWR10
+  return (__m128)vec_blendv((__v4sf)__A, (__v4sf)__B, (__v4su)__mask);
+#else
+  const __v4si __zero = {0};
+  const __vector __bool int __boolmask = vec_cmplt((__v4si)__mask, __zero);
+  return (__m128)vec_sel((__v4su)__A, (__v4su)__B, (__v4su)__boolmask);
+#endif
+}
+
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_blend_pd(__m128d __A, __m128d __B, const int __imm8) {
+  __v16qu __pcv[] = {
+      {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+      {16, 17, 18, 19, 20, 21, 22, 23, 8, 9, 10, 11, 12, 13, 14, 15},
+      {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31},
+      {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}};
+  __v16qu __r = vec_perm((__v16qu)__A, (__v16qu)__B, __pcv[__imm8]);
+  return (__m128d)__r;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_blendv_pd(__m128d __A, __m128d __B, __m128d __mask) {
+#ifdef _ARCH_PWR10
+  return (__m128d)vec_blendv((__v2df)__A, (__v2df)__B, (__v2du)__mask);
+#else
+  const __v2di __zero = {0};
+  const __vector __bool long long __boolmask =
+      vec_cmplt((__v2di)__mask, __zero);
+  return (__m128d)vec_sel((__v2du)__A, (__v2du)__B, (__v2du)__boolmask);
+#endif
+}
+#endif
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_testz_si128(__m128i __A, __m128i __B) {
+  /* Note: This implementation does NOT set "zero" or "carry" flags.  */
+  const __v16qu __zero = {0};
+  return vec_all_eq(vec_and((__v16qu)__A, (__v16qu)__B), __zero);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_testc_si128(__m128i __A, __m128i __B) {
+  /* Note: This implementation does NOT set "zero" or "carry" flags.  */
+  const __v16qu __zero = {0};
+  const __v16qu __notA = vec_nor((__v16qu)__A, (__v16qu)__A);
+  return vec_all_eq(vec_and((__v16qu)__notA, (__v16qu)__B), __zero);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_testnzc_si128(__m128i __A, __m128i __B) {
+  /* Note: This implementation does NOT set "zero" or "carry" flags.  */
+  return _mm_testz_si128(__A, __B) == 0 && _mm_testc_si128(__A, __B) == 0;
+}
+
+#define _mm_test_all_zeros(M, V) _mm_testz_si128((M), (V))
+
+#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
+
+#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_epi64(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_cmpeq((__v2di)__X, (__v2di)__Y);
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_epi8(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_min((__v16qi)__X, (__v16qi)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_epu16(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_min((__v8hu)__X, (__v8hu)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_epi32(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_min((__v4si)__X, (__v4si)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_epu32(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_min((__v4su)__X, (__v4su)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_epi8(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_max((__v16qi)__X, (__v16qi)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_epu16(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_max((__v8hu)__X, (__v8hu)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_epi32(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_max((__v4si)__X, (__v4si)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_epu32(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_max((__v4su)__X, (__v4su)__Y);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mullo_epi32(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_mul((__v4su)__X, (__v4su)__Y);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mul_epi32(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_mule((__v4si)__X, (__v4si)__Y);
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi8_epi16(__m128i __A) {
+  return (__m128i)vec_unpackh((__v16qi)__A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi8_epi32(__m128i __A) {
+  __A = (__m128i)vec_unpackh((__v16qi)__A);
+  return (__m128i)vec_unpackh((__v8hi)__A);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi8_epi64(__m128i __A) {
+  __A = (__m128i)vec_unpackh((__v16qi)__A);
+  __A = (__m128i)vec_unpackh((__v8hi)__A);
+  return (__m128i)vec_unpackh((__v4si)__A);
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi16_epi32(__m128i __A) {
+  return (__m128i)vec_unpackh((__v8hi)__A);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi16_epi64(__m128i __A) {
+  __A = (__m128i)vec_unpackh((__v8hi)__A);
+  return (__m128i)vec_unpackh((__v4si)__A);
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepi32_epi64(__m128i __A) {
+  return (__m128i)vec_unpackh((__v4si)__A);
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepu8_epi16(__m128i __A) {
+  const __v16qu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+  __A = (__m128i)vec_mergeh((__v16qu)__A, __zero);
+#else  /* __BIG_ENDIAN__.  */
+  __A = (__m128i)vec_mergeh(__zero, (__v16qu)__A);
+#endif /* __BIG_ENDIAN__.  */
+  return __A;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepu8_epi32(__m128i __A) {
+  const __v16qu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+  __A = (__m128i)vec_mergeh((__v16qu)__A, __zero);
+  __A = (__m128i)vec_mergeh((__v8hu)__A, (__v8hu)__zero);
+#else  /* __BIG_ENDIAN__.  */
+  __A = (__m128i)vec_mergeh(__zero, (__v16qu)__A);
+  __A = (__m128i)vec_mergeh((__v8hu)__zero, (__v8hu)__A);
+#endif /* __BIG_ENDIAN__.  */
+  return __A;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepu8_epi64(__m128i __A) {
+  const __v16qu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+  __A = (__m128i)vec_mergeh((__v16qu)__A, __zero);
+  __A = (__m128i)vec_mergeh((__v8hu)__A, (__v8hu)__zero);
+  __A = (__m128i)vec_mergeh((__v4su)__A, (__v4su)__zero);
+#else  /* __BIG_ENDIAN__.  */
+  __A = (__m128i)vec_mergeh(__zero, (__v16qu)__A);
+  __A = (__m128i)vec_mergeh((__v8hu)__zero, (__v8hu)__A);
+  __A = (__m128i)vec_mergeh((__v4su)__zero, (__v4su)__A);
+#endif /* __BIG_ENDIAN__.  */
+  return __A;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepu16_epi32(__m128i __A) {
+  const __v8hu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+  __A = (__m128i)vec_mergeh((__v8hu)__A, __zero);
+#else  /* __BIG_ENDIAN__.  */
+  __A = (__m128i)vec_mergeh(__zero, (__v8hu)__A);
+#endif /* __BIG_ENDIAN__.  */
+  return __A;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepu16_epi64(__m128i __A) {
+  const __v8hu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+  __A = (__m128i)vec_mergeh((__v8hu)__A, __zero);
+  __A = (__m128i)vec_mergeh((__v4su)__A, (__v4su)__zero);
+#else  /* __BIG_ENDIAN__.  */
+  __A = (__m128i)vec_mergeh(__zero, (__v8hu)__A);
+  __A = (__m128i)vec_mergeh((__v4su)__zero, (__v4su)__A);
+#endif /* __BIG_ENDIAN__.  */
+  return __A;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtepu32_epi64(__m128i __A) {
+  const __v4su __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+  __A = (__m128i)vec_mergeh((__v4su)__A, __zero);
+#else  /* __BIG_ENDIAN__.  */
+  __A = (__m128i)vec_mergeh(__zero, (__v4su)__A);
+#endif /* __BIG_ENDIAN__.  */
+  return __A;
+}
+
+/* Return horizontal packed word minimum and its index in bits [15:0]
+   and bits [18:16] respectively.  */
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_minpos_epu16(__m128i __A) {
+  union __u {
+    __m128i __m;
+    __v8hu __uh;
+  };
+  union __u __u = {.__m = __A}, __r = {.__m = {0}};
+  unsigned short __ridx = 0;
+  unsigned short __rmin = __u.__uh[__ridx];
+  unsigned long __i;
+  for (__i = 1; __i < 8; __i++) {
+    if (__u.__uh[__i] < __rmin) {
+      __rmin = __u.__uh[__i];
+      __ridx = __i;
+    }
+  }
+  __r.__uh[0] = __rmin;
+  __r.__uh[1] = __ridx;
+  return __r.__m;
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_packus_epi32(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_packsu((__v4si)__X, (__v4si)__Y);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_epi64(__m128i __X, __m128i __Y) {
+  return (__m128i)vec_cmpgt((__v2di)__X, (__v2di)__Y);
+}
+#endif
+
+#else
+#include_next <smmintrin.h>
+#endif /* defined(__ppc64__) &&
+        *   (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
+
+#endif /* SMMINTRIN_H_ */
diff --git a/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/tmmintrin.h b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/tmmintrin.h
new file mode 100644
index 0000000..6185ca1
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/tmmintrin.h
@@ -0,0 +1,453 @@
+/*===---- tmmintrin.h - Implementation of SSSE3 intrinsics on PowerPC ------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+   User Guide and Reference, version 9.0.  */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header is distributed to simplify porting x86_64 code that
+   makes explicit use of Intel intrinsics to powerpc64le.
+
+   It is the user's responsibility to determine if the results are
+   acceptable and make additional changes as necessary.
+
+   Note that much code that uses Intel intrinsics can be rewritten in
+   standard C or GNU C extensions, which are more portable and better
+   optimized across multiple targets.  */
+#endif
+
+#ifndef TMMINTRIN_H_
+#define TMMINTRIN_H_
+
+#if defined(__ppc64__) &&                                                      \
+    (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
+
+#include <altivec.h>
+
+/* We need definitions from the SSE header files.  */
+#include <pmmintrin.h>
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_abs_epi16(__m128i __A) {
+  return (__m128i)vec_abs((__v8hi)__A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_abs_epi32(__m128i __A) {
+  return (__m128i)vec_abs((__v4si)__A);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_abs_epi8(__m128i __A) {
+  return (__m128i)vec_abs((__v16qi)__A);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_abs_pi16(__m64 __A) {
+  __v8hi __B = (__v8hi)(__v2du){__A, __A};
+  return (__m64)((__v2du)vec_abs(__B))[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_abs_pi32(__m64 __A) {
+  __v4si __B = (__v4si)(__v2du){__A, __A};
+  return (__m64)((__v2du)vec_abs(__B))[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_abs_pi8(__m64 __A) {
+  __v16qi __B = (__v16qi)(__v2du){__A, __A};
+  return (__m64)((__v2du)vec_abs(__B))[0];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_alignr_epi8(__m128i __A, __m128i __B, const unsigned int __count) {
+  if (__builtin_constant_p(__count) && __count < 16) {
+#ifdef __LITTLE_ENDIAN__
+    __A = (__m128i)vec_reve((__v16qu)__A);
+    __B = (__m128i)vec_reve((__v16qu)__B);
+#endif
+    __A = (__m128i)vec_sld((__v16qu)__B, (__v16qu)__A, __count);
+#ifdef __LITTLE_ENDIAN__
+    __A = (__m128i)vec_reve((__v16qu)__A);
+#endif
+    return __A;
+  }
+
+  if (__count == 0)
+    return __B;
+
+  if (__count >= 16) {
+    if (__count >= 32) {
+      const __v16qu __zero = {0};
+      return (__m128i)__zero;
+    } else {
+      const __v16qu __shift = vec_splats((unsigned char)((__count - 16) * 8));
+#ifdef __LITTLE_ENDIAN__
+      return (__m128i)vec_sro((__v16qu)__A, __shift);
+#else
+      return (__m128i)vec_slo((__v16qu)__A, __shift);
+#endif
+    }
+  } else {
+    const __v16qu __shiftA = vec_splats((unsigned char)((16 - __count) * 8));
+    const __v16qu __shiftB = vec_splats((unsigned char)(__count * 8));
+#ifdef __LITTLE_ENDIAN__
+    __A = (__m128i)vec_slo((__v16qu)__A, __shiftA);
+    __B = (__m128i)vec_sro((__v16qu)__B, __shiftB);
+#else
+    __A = (__m128i)vec_sro((__v16qu)__A, __shiftA);
+    __B = (__m128i)vec_slo((__v16qu)__B, __shiftB);
+#endif
+    return (__m128i)vec_or((__v16qu)__A, (__v16qu)__B);
+  }
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_alignr_pi8(__m64 __A, __m64 __B, unsigned int __count) {
+  if (__count < 16) {
+    __v2du __C = {__B, __A};
+#ifdef __LITTLE_ENDIAN__
+    const __v4su __shift = {__count << 3, 0, 0, 0};
+    __C = (__v2du)vec_sro((__v16qu)__C, (__v16qu)__shift);
+#else
+    const __v4su __shift = {0, 0, 0, __count << 3};
+    __C = (__v2du)vec_slo((__v16qu)__C, (__v16qu)__shift);
+#endif
+    return (__m64)__C[0];
+  } else {
+    const __m64 __zero = {0};
+    return __zero;
+  }
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadd_epi16(__m128i __A, __m128i __B) {
+  const __v16qu __P = {0,  1,  4,  5,  8,  9,  12, 13,
+                       16, 17, 20, 21, 24, 25, 28, 29};
+  const __v16qu __Q = {2,  3,  6,  7,  10, 11, 14, 15,
+                       18, 19, 22, 23, 26, 27, 30, 31};
+  __v8hi __C = vec_perm((__v8hi)__A, (__v8hi)__B, __P);
+  __v8hi __D = vec_perm((__v8hi)__A, (__v8hi)__B, __Q);
+  return (__m128i)vec_add(__C, __D);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadd_epi32(__m128i __A, __m128i __B) {
+  const __v16qu __P = {0,  1,  2,  3,  8,  9,  10, 11,
+                       16, 17, 18, 19, 24, 25, 26, 27};
+  const __v16qu __Q = {4,  5,  6,  7,  12, 13, 14, 15,
+                       20, 21, 22, 23, 28, 29, 30, 31};
+  __v4si __C = vec_perm((__v4si)__A, (__v4si)__B, __P);
+  __v4si __D = vec_perm((__v4si)__A, (__v4si)__B, __Q);
+  return (__m128i)vec_add(__C, __D);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadd_pi16(__m64 __A, __m64 __B) {
+  __v8hi __C = (__v8hi)(__v2du){__A, __B};
+  const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13};
+  const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15};
+  __v8hi __D = vec_perm(__C, __C, __Q);
+  __C = vec_perm(__C, __C, __P);
+  __C = vec_add(__C, __D);
+  return (__m64)((__v2du)__C)[1];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadd_pi32(__m64 __A, __m64 __B) {
+  __v4si __C = (__v4si)(__v2du){__A, __B};
+  const __v16qu __P = {0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11};
+  const __v16qu __Q = {4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15};
+  __v4si __D = vec_perm(__C, __C, __Q);
+  __C = vec_perm(__C, __C, __P);
+  __C = vec_add(__C, __D);
+  return (__m64)((__v2du)__C)[1];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadds_epi16(__m128i __A, __m128i __B) {
+  __v4si __C = {0}, __D = {0};
+  __C = vec_sum4s((__v8hi)__A, __C);
+  __D = vec_sum4s((__v8hi)__B, __D);
+  __C = (__v4si)vec_packs(__C, __D);
+  return (__m128i)__C;
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hadds_pi16(__m64 __A, __m64 __B) {
+  const __v4si __zero = {0};
+  __v8hi __C = (__v8hi)(__v2du){__A, __B};
+  __v4si __D = vec_sum4s(__C, __zero);
+  __C = vec_packs(__D, __D);
+  return (__m64)((__v2du)__C)[1];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsub_epi16(__m128i __A, __m128i __B) {
+  const __v16qu __P = {0,  1,  4,  5,  8,  9,  12, 13,
+                       16, 17, 20, 21, 24, 25, 28, 29};
+  const __v16qu __Q = {2,  3,  6,  7,  10, 11, 14, 15,
+                       18, 19, 22, 23, 26, 27, 30, 31};
+  __v8hi __C = vec_perm((__v8hi)__A, (__v8hi)__B, __P);
+  __v8hi __D = vec_perm((__v8hi)__A, (__v8hi)__B, __Q);
+  return (__m128i)vec_sub(__C, __D);
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsub_epi32(__m128i __A, __m128i __B) {
+  const __v16qu __P = {0,  1,  2,  3,  8,  9,  10, 11,
+                       16, 17, 18, 19, 24, 25, 26, 27};
+  const __v16qu __Q = {4,  5,  6,  7,  12, 13, 14, 15,
+                       20, 21, 22, 23, 28, 29, 30, 31};
+  __v4si __C = vec_perm((__v4si)__A, (__v4si)__B, __P);
+  __v4si __D = vec_perm((__v4si)__A, (__v4si)__B, __Q);
+  return (__m128i)vec_sub(__C, __D);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsub_pi16(__m64 __A, __m64 __B) {
+  const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13};
+  const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15};
+  __v8hi __C = (__v8hi)(__v2du){__A, __B};
+  __v8hi __D = vec_perm(__C, __C, __Q);
+  __C = vec_perm(__C, __C, __P);
+  __C = vec_sub(__C, __D);
+  return (__m64)((__v2du)__C)[1];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsub_pi32(__m64 __A, __m64 __B) {
+  const __v16qu __P = {0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11};
+  const __v16qu __Q = {4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15};
+  __v4si __C = (__v4si)(__v2du){__A, __B};
+  __v4si __D = vec_perm(__C, __C, __Q);
+  __C = vec_perm(__C, __C, __P);
+  __C = vec_sub(__C, __D);
+  return (__m64)((__v2du)__C)[1];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsubs_epi16(__m128i __A, __m128i __B) {
+  const __v16qu __P = {0,  1,  4,  5,  8,  9,  12, 13,
+                       16, 17, 20, 21, 24, 25, 28, 29};
+  const __v16qu __Q = {2,  3,  6,  7,  10, 11, 14, 15,
+                       18, 19, 22, 23, 26, 27, 30, 31};
+  __v8hi __C = vec_perm((__v8hi)__A, (__v8hi)__B, __P);
+  __v8hi __D = vec_perm((__v8hi)__A, (__v8hi)__B, __Q);
+  return (__m128i)vec_subs(__C, __D);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_hsubs_pi16(__m64 __A, __m64 __B) {
+  const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13};
+  const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15};
+  __v8hi __C = (__v8hi)(__v2du){__A, __B};
+  __v8hi __D = vec_perm(__C, __C, __P);
+  __v8hi __E = vec_perm(__C, __C, __Q);
+  __C = vec_subs(__D, __E);
+  return (__m64)((__v2du)__C)[1];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_shuffle_epi8(__m128i __A, __m128i __B) {
+  const __v16qi __zero = {0};
+  __vector __bool char __select = vec_cmplt((__v16qi)__B, __zero);
+  __v16qi __C = vec_perm((__v16qi)__A, (__v16qi)__A, (__v16qu)__B);
+  return (__m128i)vec_sel(__C, __zero, __select);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_shuffle_pi8(__m64 __A, __m64 __B) {
+  const __v16qi __zero = {0};
+  __v16qi __C = (__v16qi)(__v2du){__A, __A};
+  __v16qi __D = (__v16qi)(__v2du){__B, __B};
+  __vector __bool char __select = vec_cmplt((__v16qi)__D, __zero);
+  __C = vec_perm((__v16qi)__C, (__v16qi)__C, (__v16qu)__D);
+  __C = vec_sel(__C, __zero, __select);
+  return (__m64)((__v2du)(__C))[0];
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sign_epi8(__m128i __A, __m128i __B) {
+  const __v16qi __zero = {0};
+  __v16qi __selectneg = (__v16qi)vec_cmplt((__v16qi)__B, __zero);
+  __v16qi __selectpos =
+      (__v16qi)vec_neg((__v16qi)vec_cmpgt((__v16qi)__B, __zero));
+  __v16qi __conv = vec_add(__selectneg, __selectpos);
+  return (__m128i)vec_mul((__v16qi)__A, (__v16qi)__conv);
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sign_epi16(__m128i __A, __m128i __B) {
+  const __v8hi __zero = {0};
+  __v8hi __selectneg = (__v8hi)vec_cmplt((__v8hi)__B, __zero);
+  __v8hi __selectpos = (__v8hi)vec_neg((__v8hi)vec_cmpgt((__v8hi)__B, __zero));
+  __v8hi __conv = vec_add(__selectneg, __selectpos);
+  return (__m128i)vec_mul((__v8hi)__A, (__v8hi)__conv);
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sign_epi32(__m128i __A, __m128i __B) {
+  const __v4si __zero = {0};
+  __v4si __selectneg = (__v4si)vec_cmplt((__v4si)__B, __zero);
+  __v4si __selectpos = (__v4si)vec_neg((__v4si)vec_cmpgt((__v4si)__B, __zero));
+  __v4si __conv = vec_add(__selectneg, __selectpos);
+  return (__m128i)vec_mul((__v4si)__A, (__v4si)__conv);
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sign_pi8(__m64 __A, __m64 __B) {
+  const __v16qi __zero = {0};
+  __v16qi __C = (__v16qi)(__v2du){__A, __A};
+  __v16qi __D = (__v16qi)(__v2du){__B, __B};
+  __C = (__v16qi)_mm_sign_epi8((__m128i)__C, (__m128i)__D);
+  return (__m64)((__v2du)(__C))[0];
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sign_pi16(__m64 __A, __m64 __B) {
+  const __v8hi __zero = {0};
+  __v8hi __C = (__v8hi)(__v2du){__A, __A};
+  __v8hi __D = (__v8hi)(__v2du){__B, __B};
+  __C = (__v8hi)_mm_sign_epi16((__m128i)__C, (__m128i)__D);
+  return (__m64)((__v2du)(__C))[0];
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sign_pi32(__m64 __A, __m64 __B) {
+  const __v4si __zero = {0};
+  __v4si __C = (__v4si)(__v2du){__A, __A};
+  __v4si __D = (__v4si)(__v2du){__B, __B};
+  __C = (__v4si)_mm_sign_epi32((__m128i)__C, (__m128i)__D);
+  return (__m64)((__v2du)(__C))[0];
+}
+#endif
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_maddubs_epi16(__m128i __A, __m128i __B) {
+  __v8hi __unsigned = vec_splats((signed short)0x00ff);
+  __v8hi __C = vec_and(vec_unpackh((__v16qi)__A), __unsigned);
+  __v8hi __D = vec_and(vec_unpackl((__v16qi)__A), __unsigned);
+  __v8hi __E = vec_unpackh((__v16qi)__B);
+  __v8hi __F = vec_unpackl((__v16qi)__B);
+  __C = vec_mul(__C, __E);
+  __D = vec_mul(__D, __F);
+  const __v16qu __odds = {0,  1,  4,  5,  8,  9,  12, 13,
+                          16, 17, 20, 21, 24, 25, 28, 29};
+  const __v16qu __evens = {2,  3,  6,  7,  10, 11, 14, 15,
+                           18, 19, 22, 23, 26, 27, 30, 31};
+  __E = vec_perm(__C, __D, __odds);
+  __F = vec_perm(__C, __D, __evens);
+  return (__m128i)vec_adds(__E, __F);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_maddubs_pi16(__m64 __A, __m64 __B) {
+  __v8hi __C = (__v8hi)(__v2du){__A, __A};
+  __C = vec_unpackl((__v16qi)__C);
+  const __v8hi __unsigned = vec_splats((signed short)0x00ff);
+  __C = vec_and(__C, __unsigned);
+  __v8hi __D = (__v8hi)(__v2du){__B, __B};
+  __D = vec_unpackl((__v16qi)__D);
+  __D = vec_mul(__C, __D);
+  const __v16qu __odds = {0,  1,  4,  5,  8,  9,  12, 13,
+                          16, 17, 20, 21, 24, 25, 28, 29};
+  const __v16qu __evens = {2,  3,  6,  7,  10, 11, 14, 15,
+                           18, 19, 22, 23, 26, 27, 30, 31};
+  __C = vec_perm(__D, __D, __odds);
+  __D = vec_perm(__D, __D, __evens);
+  __C = vec_adds(__C, __D);
+  return (__m64)((__v2du)(__C))[0];
+}
+
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mulhrs_epi16(__m128i __A, __m128i __B) {
+  __v4si __C = vec_unpackh((__v8hi)__A);
+  __v4si __D = vec_unpackh((__v8hi)__B);
+  __C = vec_mul(__C, __D);
+  __D = vec_unpackl((__v8hi)__A);
+  __v4si __E = vec_unpackl((__v8hi)__B);
+  __D = vec_mul(__D, __E);
+  const __v4su __shift = vec_splats((unsigned int)14);
+  __C = vec_sr(__C, __shift);
+  __D = vec_sr(__D, __shift);
+  const __v4si __ones = vec_splats((signed int)1);
+  __C = vec_add(__C, __ones);
+  __C = vec_sr(__C, (__v4su)__ones);
+  __D = vec_add(__D, __ones);
+  __D = vec_sr(__D, (__v4su)__ones);
+  return (__m128i)vec_pack(__C, __D);
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mulhrs_pi16(__m64 __A, __m64 __B) {
+  __v4si __C = (__v4si)(__v2du){__A, __A};
+  __C = vec_unpackh((__v8hi)__C);
+  __v4si __D = (__v4si)(__v2du){__B, __B};
+  __D = vec_unpackh((__v8hi)__D);
+  __C = vec_mul(__C, __D);
+  const __v4su __shift = vec_splats((unsigned int)14);
+  __C = vec_sr(__C, __shift);
+  const __v4si __ones = vec_splats((signed int)1);
+  __C = vec_add(__C, __ones);
+  __C = vec_sr(__C, (__v4su)__ones);
+  __v8hi __E = vec_pack(__C, __D);
+  return (__m64)((__v2du)(__E))[0];
+}
+
+#else
+#include_next <tmmintrin.h>
+#endif /* defined(__ppc64__) &&
+        *   (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
+
+#endif /* TMMINTRIN_H_ */
diff --git a/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/x86gprintrin.h b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/x86gprintrin.h
new file mode 100644
index 0000000..cbfac26
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/x86gprintrin.h
@@ -0,0 +1,17 @@
+/*===--- x86gprintrin.h - Implementation of X86 GPR intrinsics on PowerPC --===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef X86GPRINTRIN_H_
+#define X86GPRINTRIN_H_
+
+#include <bmiintrin.h>
+
+#include <bmi2intrin.h>
+
+#endif /* X86GPRINTRIN_H_ */
diff --git a/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/x86intrin.h b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/x86intrin.h
new file mode 100644
index 0000000..f5c2012
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/x86intrin.h
@@ -0,0 +1,28 @@
+/*===---- x86intrin.h - Implementation of X86 intrinsics on PowerPC --------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header is distributed to simplify porting x86_64 code that
+   makes explicit use of Intel intrinsics to powerpc64le.
+   It is the user's responsibility to determine if the results are
+   acceptable and make additional changes as necessary.
+   Note that much code that uses Intel intrinsics can be rewritten in
+   standard C or GNU C extensions, which are more portable and better
+   optimized across multiple targets.  */
+#error "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef X86INTRIN_H_
+#define X86INTRIN_H_
+
+#ifdef __ALTIVEC__
+#include <immintrin.h>
+#endif /* __ALTIVEC__ */
+
+#endif /* X86INTRIN_H_ */
diff --git a/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/xmmintrin.h b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/xmmintrin.h
new file mode 100644
index 0000000..ee0032c
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/ppc_wrappers/xmmintrin.h
@@ -0,0 +1,1827 @@
+/*===---- xmmintrin.h - Implementation of SSE intrinsics on PowerPC --------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+   User Guide and Reference, version 9.0.  */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header file is to help porting code using Intel intrinsics
+   explicitly from x86_64 to powerpc64/powerpc64le.
+
+   Since X86 SSE intrinsics mainly handles __m128 type, PowerPC
+   VMX/VSX ISA is a good match for vector float SIMD operations.
+   However scalar float operations in vector (XMM) registers require
+   the POWER8 VSX ISA (2.07) level. There are differences for data
+   format and placement of float scalars in the vector register, which
+   require extra steps to match SSE scalar float semantics on POWER.
+
+   It should be noted that there's much difference between X86_64's
+   MXSCR and PowerISA's FPSCR/VSCR registers. It's recommended to use
+   portable <fenv.h> instead of access MXSCR directly.
+
+   Most SSE scalar float intrinsic operations can be performed more
+   efficiently as C language float scalar operations or optimized to
+   use vector SIMD operations. We recommend this for new applications. */
+#error                                                                         \
+    "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef XMMINTRIN_H_
+#define XMMINTRIN_H_
+
+#if defined(__ppc64__) &&                                                      \
+    (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
+
+/* Define four value permute mask */
+#define _MM_SHUFFLE(w, x, y, z) (((w) << 6) | ((x) << 4) | ((y) << 2) | (z))
+
+#include <altivec.h>
+
+/* Avoid collisions between altivec.h and strict adherence to C++ and
+   C11 standards.  This should eventually be done inside altivec.h itself,
+   but only after testing a full distro build.  */
+#if defined(__STRICT_ANSI__) &&                                                \
+    (defined(__cplusplus) ||                                                   \
+     (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L))
+#undef vector
+#undef pixel
+#undef bool
+#endif
+
+/* We need type definitions from the MMX header file.  */
+#include <mmintrin.h>
+
+/* Get _mm_malloc () and _mm_free ().  */
+#if __STDC_HOSTED__
+#include <mm_malloc.h>
+#endif
+
+/* The Intel API is flexible enough that we must allow aliasing with other
+   vector types, and their scalar components.  */
+typedef vector float __m128 __attribute__((__may_alias__));
+
+/* Unaligned version of the same type.  */
+typedef vector float __m128_u __attribute__((__may_alias__, __aligned__(1)));
+
+/* Internal data types for implementing the intrinsics.  */
+typedef vector float __v4sf;
+
+/* Create an undefined vector.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_undefined_ps(void) {
+  __m128 __Y = __Y;
+  return __Y;
+}
+
+/* Create a vector of zeros.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setzero_ps(void) {
+  return __extension__(__m128){0.0f, 0.0f, 0.0f, 0.0f};
+}
+
+/* Load four SPFP values from P.  The address must be 16-byte aligned.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load_ps(float const *__P) {
+  return ((__m128)vec_ld(0, (__v4sf *)__P));
+}
+
+/* Load four SPFP values from P.  The address need not be 16-byte aligned.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadu_ps(float const *__P) {
+  return (vec_vsx_ld(0, __P));
+}
+
+/* Load four SPFP values in reverse order.  The address must be aligned.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadr_ps(float const *__P) {
+  __v4sf __tmp;
+  __m128 __result;
+  static const __vector unsigned char __permute_vector = {
+      0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B,
+      0x14, 0x15, 0x16, 0x17, 0x10, 0x11, 0x12, 0x13};
+
+  __tmp = vec_ld(0, (__v4sf *)__P);
+  __result = (__m128)vec_perm(__tmp, __tmp, __permute_vector);
+  return __result;
+}
+
+/* Create a vector with all four elements equal to F.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set1_ps(float __F) {
+  return __extension__(__m128)(__v4sf){__F, __F, __F, __F};
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_ps1(float __F) {
+  return _mm_set1_ps(__F);
+}
+
+/* Create the vector [Z Y X W].  */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__,
+                                      __artificial__))
+_mm_set_ps(const float __Z, const float __Y, const float __X, const float __W) {
+  return __extension__(__m128)(__v4sf){__W, __X, __Y, __Z};
+}
+
+/* Create the vector [W X Y Z].  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_setr_ps(float __Z, float __Y, float __X, float __W) {
+  return __extension__(__m128)(__v4sf){__Z, __Y, __X, __W};
+}
+
+/* Store four SPFP values.  The address must be 16-byte aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store_ps(float *__P, __m128 __A) {
+  vec_st((__v4sf)__A, 0, (__v4sf *)__P);
+}
+
+/* Store four SPFP values.  The address need not be 16-byte aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storeu_ps(float *__P, __m128 __A) {
+  *(__m128_u *)__P = __A;
+}
+
+/* Store four SPFP values in reverse order.  The address must be aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storer_ps(float *__P, __m128 __A) {
+  __v4sf __tmp;
+  static const __vector unsigned char __permute_vector = {
+      0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B,
+      0x14, 0x15, 0x16, 0x17, 0x10, 0x11, 0x12, 0x13};
+
+  __tmp = (__m128)vec_perm(__A, __A, __permute_vector);
+
+  _mm_store_ps(__P, __tmp);
+}
+
+/* Store the lower SPFP value across four words.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store1_ps(float *__P, __m128 __A) {
+  __v4sf __va = vec_splat((__v4sf)__A, 0);
+  _mm_store_ps(__P, __va);
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store_ps1(float *__P, __m128 __A) {
+  _mm_store1_ps(__P, __A);
+}
+
+/* Create a vector with element 0 as F and the rest zero.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_set_ss(float __F) {
+  return __extension__(__m128)(__v4sf){__F, 0.0f, 0.0f, 0.0f};
+}
+
+/* Sets the low SPFP value of A from the low value of B.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_move_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+
+  return (vec_sel((__v4sf)__A, (__v4sf)__B, __mask));
+}
+
+/* Create a vector with element 0 as *P and the rest zero.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load_ss(float const *__P) {
+  return _mm_set_ss(*__P);
+}
+
+/* Stores the lower SPFP value.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_store_ss(float *__P, __m128 __A) {
+  *__P = ((__v4sf)__A)[0];
+}
+
+/* Perform the respective operation on the lower SPFP (single-precision
+   floating-point) values of A and B; the upper three SPFP values are
+   passed through from A.  */
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_ss(__m128 __A, __m128 __B) {
+#ifdef _ARCH_PWR7
+  __m128 __a, __b, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower double)
+     results. So to insure we don't generate spurious exceptions
+     (from the upper double values) we splat the lower double
+     before we to the operation.  */
+  __a = vec_splat(__A, 0);
+  __b = vec_splat(__B, 0);
+  __c = __a + __b;
+  /* Then we merge the lower float result with the original upper
+     float elements from __A.  */
+  return (vec_sel(__A, __c, __mask));
+#else
+  __A[0] = __A[0] + __B[0];
+  return (__A);
+#endif
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_ss(__m128 __A, __m128 __B) {
+#ifdef _ARCH_PWR7
+  __m128 __a, __b, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower double)
+     results. So to insure we don't generate spurious exceptions
+     (from the upper double values) we splat the lower double
+     before we to the operation.  */
+  __a = vec_splat(__A, 0);
+  __b = vec_splat(__B, 0);
+  __c = __a - __b;
+  /* Then we merge the lower float result with the original upper
+     float elements from __A.  */
+  return (vec_sel(__A, __c, __mask));
+#else
+  __A[0] = __A[0] - __B[0];
+  return (__A);
+#endif
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mul_ss(__m128 __A, __m128 __B) {
+#ifdef _ARCH_PWR7
+  __m128 __a, __b, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower double)
+     results. So to insure we don't generate spurious exceptions
+     (from the upper double values) we splat the lower double
+     before we to the operation.  */
+  __a = vec_splat(__A, 0);
+  __b = vec_splat(__B, 0);
+  __c = __a * __b;
+  /* Then we merge the lower float result with the original upper
+     float elements from __A.  */
+  return (vec_sel(__A, __c, __mask));
+#else
+  __A[0] = __A[0] * __B[0];
+  return (__A);
+#endif
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_div_ss(__m128 __A, __m128 __B) {
+#ifdef _ARCH_PWR7
+  __m128 __a, __b, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower double)
+     results. So to insure we don't generate spurious exceptions
+     (from the upper double values) we splat the lower double
+     before we to the operation.  */
+  __a = vec_splat(__A, 0);
+  __b = vec_splat(__B, 0);
+  __c = __a / __b;
+  /* Then we merge the lower float result with the original upper
+     float elements from __A.  */
+  return (vec_sel(__A, __c, __mask));
+#else
+  __A[0] = __A[0] / __B[0];
+  return (__A);
+#endif
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sqrt_ss(__m128 __A) {
+  __m128 __a, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower double)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper double values) we splat the lower double
+   * before we to the operation. */
+  __a = vec_splat(__A, 0);
+  __c = vec_sqrt(__a);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return (vec_sel(__A, __c, __mask));
+}
+
+/* Perform the respective operation on the four SPFP values in A and B.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_add_ps(__m128 __A, __m128 __B) {
+  return (__m128)((__v4sf)__A + (__v4sf)__B);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sub_ps(__m128 __A, __m128 __B) {
+  return (__m128)((__v4sf)__A - (__v4sf)__B);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mul_ps(__m128 __A, __m128 __B) {
+  return (__m128)((__v4sf)__A * (__v4sf)__B);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_div_ps(__m128 __A, __m128 __B) {
+  return (__m128)((__v4sf)__A / (__v4sf)__B);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sqrt_ps(__m128 __A) {
+  return (vec_sqrt((__v4sf)__A));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_rcp_ps(__m128 __A) {
+  return (vec_re((__v4sf)__A));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_rsqrt_ps(__m128 __A) {
+  return (vec_rsqrte(__A));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_rcp_ss(__m128 __A) {
+  __m128 __a, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower double)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper double values) we splat the lower double
+   * before we to the operation. */
+  __a = vec_splat(__A, 0);
+  __c = _mm_rcp_ps(__a);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return (vec_sel(__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_rsqrt_ss(__m128 __A) {
+  __m128 __a, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower double)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper double values) we splat the lower double
+   * before we to the operation. */
+  __a = vec_splat(__A, 0);
+  __c = vec_rsqrte(__a);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return (vec_sel(__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_ss(__m128 __A, __m128 __B) {
+  __v4sf __a, __b, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower float)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper float values) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = vec_min(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return (vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_ss(__m128 __A, __m128 __B) {
+  __v4sf __a, __b, __c;
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  /* PowerISA VSX does not allow partial (for just lower float)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper float values) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat(__A, 0);
+  __b = vec_splat(__B, 0);
+  __c = vec_max(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return (vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_ps(__m128 __A, __m128 __B) {
+  __vector __bool int __m = vec_cmpgt((__v4sf)__B, (__v4sf)__A);
+  return vec_sel(__B, __A, __m);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_ps(__m128 __A, __m128 __B) {
+  __vector __bool int __m = vec_cmpgt((__v4sf)__A, (__v4sf)__B);
+  return vec_sel(__B, __A, __m);
+}
+
+/* Perform logical bit-wise operations on 128-bit values.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_and_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_and((__v4sf)__A, (__v4sf)__B));
+  //  return __builtin_ia32_andps (__A, __B);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_andnot_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_andc((__v4sf)__B, (__v4sf)__A));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_or_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_or((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_xor_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_xor((__v4sf)__A, (__v4sf)__B));
+}
+
+/* Perform a comparison on the four SPFP values of A and B.  For each
+   element, if the comparison is true, place a mask of all ones in the
+   result, otherwise a mask of zeros.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmpeq((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmplt_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmplt((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmple_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmple((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmpgt((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpge_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmpge((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpneq_ps(__m128 __A, __m128 __B) {
+  __v4sf __temp = (__v4sf)vec_cmpeq((__v4sf)__A, (__v4sf)__B);
+  return ((__m128)vec_nor(__temp, __temp));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnlt_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmpge((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnle_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmpgt((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpngt_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmple((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnge_ps(__m128 __A, __m128 __B) {
+  return ((__m128)vec_cmplt((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpord_ps(__m128 __A, __m128 __B) {
+  __vector unsigned int __a, __b;
+  __vector unsigned int __c, __d;
+  static const __vector unsigned int __float_exp_mask = {
+      0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
+
+  __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+  __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+  __c = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __a);
+  __d = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __b);
+  return ((__m128)vec_and(__c, __d));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpunord_ps(__m128 __A, __m128 __B) {
+  __vector unsigned int __a, __b;
+  __vector unsigned int __c, __d;
+  static const __vector unsigned int __float_exp_mask = {
+      0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
+
+  __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+  __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+  __c = (__vector unsigned int)vec_cmpgt(__a, __float_exp_mask);
+  __d = (__vector unsigned int)vec_cmpgt(__b, __float_exp_mask);
+  return ((__m128)vec_or(__c, __d));
+}
+
+/* Perform a comparison on the lower SPFP values of A and B.  If the
+   comparison is true, place a mask of all ones in the result, otherwise a
+   mask of zeros.  The upper three SPFP values are passed through from A.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpeq_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmpeq(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmplt_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmplt(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmple_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmple(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpgt_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmpgt(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpge_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmpge(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpneq_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmpeq(__a, __b);
+  __c = vec_nor(__c, __c);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnlt_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmpge(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnle_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmpgt(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpngt_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we to the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmple(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpnge_ss(__m128 __A, __m128 __B) {
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+  __v4sf __a, __b, __c;
+  /* PowerISA VMX does not allow partial (for just element 0)
+   * results. So to insure we don't generate spurious exceptions
+   * (from the upper elements) we splat the lower float
+   * before we do the operation. */
+  __a = vec_splat((__v4sf)__A, 0);
+  __b = vec_splat((__v4sf)__B, 0);
+  __c = (__v4sf)vec_cmplt(__a, __b);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpord_ss(__m128 __A, __m128 __B) {
+  __vector unsigned int __a, __b;
+  __vector unsigned int __c, __d;
+  static const __vector unsigned int __float_exp_mask = {
+      0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+
+  __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+  __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+  __c = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __a);
+  __d = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __b);
+  __c = vec_and(__c, __d);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, (__v4sf)__c, __mask));
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cmpunord_ss(__m128 __A, __m128 __B) {
+  __vector unsigned int __a, __b;
+  __vector unsigned int __c, __d;
+  static const __vector unsigned int __float_exp_mask = {
+      0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
+  static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+
+  __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+  __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+  __c = (__vector unsigned int)vec_cmpgt(__a, __float_exp_mask);
+  __d = (__vector unsigned int)vec_cmpgt(__b, __float_exp_mask);
+  __c = vec_or(__c, __d);
+  /* Then we merge the lower float result with the original upper
+   * float elements from __A.  */
+  return ((__m128)vec_sel((__v4sf)__A, (__v4sf)__c, __mask));
+}
+
+/* Compare the lower SPFP values of A and B and return 1 if true
+   and 0 if false.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comieq_ss(__m128 __A, __m128 __B) {
+  return (__A[0] == __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comilt_ss(__m128 __A, __m128 __B) {
+  return (__A[0] < __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comile_ss(__m128 __A, __m128 __B) {
+  return (__A[0] <= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comigt_ss(__m128 __A, __m128 __B) {
+  return (__A[0] > __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comige_ss(__m128 __A, __m128 __B) {
+  return (__A[0] >= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_comineq_ss(__m128 __A, __m128 __B) {
+  return (__A[0] != __B[0]);
+}
+
+/* FIXME
+ * The __mm_ucomi??_ss implementations below are exactly the same as
+ * __mm_comi??_ss because GCC for PowerPC only generates unordered
+ * compares (scalar and vector).
+ * Technically __mm_comieq_ss et al should be using the ordered
+ * compare and signal for QNaNs.
+ * The __mm_ucomieq_sd et all should be OK, as is.
+ */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomieq_ss(__m128 __A, __m128 __B) {
+  return (__A[0] == __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomilt_ss(__m128 __A, __m128 __B) {
+  return (__A[0] < __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomile_ss(__m128 __A, __m128 __B) {
+  return (__A[0] <= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomigt_ss(__m128 __A, __m128 __B) {
+  return (__A[0] > __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomige_ss(__m128 __A, __m128 __B) {
+  return (__A[0] >= __B[0]);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_ucomineq_ss(__m128 __A, __m128 __B) {
+  return (__A[0] != __B[0]);
+}
+
+extern __inline float
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtss_f32(__m128 __A) {
+  return ((__v4sf)__A)[0];
+}
+
+/* Convert the lower SPFP value to a 32-bit integer according to the current
+   rounding mode.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtss_si32(__m128 __A) {
+  int __res;
+#ifdef _ARCH_PWR8
+  double __dtmp;
+  __asm__(
+#ifdef __LITTLE_ENDIAN__
+      "xxsldwi %x0,%x0,%x0,3;\n"
+#endif
+      "xscvspdp %x2,%x0;\n"
+      "fctiw  %2,%2;\n"
+      "mfvsrd  %1,%x2;\n"
+      : "+wa"(__A), "=r"(__res), "=f"(__dtmp)
+      :);
+#else
+  __res = __builtin_rint(__A[0]);
+#endif
+  return __res;
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvt_ss2si(__m128 __A) {
+  return _mm_cvtss_si32(__A);
+}
+
+/* Convert the lower SPFP value to a 32-bit integer according to the
+   current rounding mode.  */
+
+/* Intel intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtss_si64(__m128 __A) {
+  long long __res;
+#if defined(_ARCH_PWR8) && defined(__powerpc64__)
+  double __dtmp;
+  __asm__(
+#ifdef __LITTLE_ENDIAN__
+      "xxsldwi %x0,%x0,%x0,3;\n"
+#endif
+      "xscvspdp %x2,%x0;\n"
+      "fctid  %2,%2;\n"
+      "mfvsrd  %1,%x2;\n"
+      : "+wa"(__A), "=r"(__res), "=f"(__dtmp)
+      :);
+#else
+  __res = __builtin_llrint(__A[0]);
+#endif
+  return __res;
+}
+
+/* Microsoft intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtss_si64x(__m128 __A) {
+  return _mm_cvtss_si64((__v4sf)__A);
+}
+
+/* Constants for use with _mm_prefetch.  */
+enum _mm_hint {
+  /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit.  */
+  _MM_HINT_ET0 = 7,
+  _MM_HINT_ET1 = 6,
+  _MM_HINT_T0 = 3,
+  _MM_HINT_T1 = 2,
+  _MM_HINT_T2 = 1,
+  _MM_HINT_NTA = 0
+};
+
+/* Loads one cache line from address P to a location "closer" to the
+   processor.  The selector I specifies the type of prefetch operation.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_prefetch(const void *__P, enum _mm_hint __I) {
+  /* Current PowerPC will ignores the hint parameters.  */
+  __builtin_prefetch(__P);
+}
+
+/* Convert the two lower SPFP values to 32-bit integers according to the
+   current rounding mode.  Return the integers in packed form.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtps_pi32(__m128 __A) {
+  /* Splat two lower SPFP values to both halves.  */
+  __v4sf __temp, __rounded;
+  __vector unsigned long long __result;
+
+  /* Splat two lower SPFP values to both halves.  */
+  __temp = (__v4sf)vec_splat((__vector long long)__A, 0);
+  __rounded = vec_rint(__temp);
+  __result = (__vector unsigned long long)vec_cts(__rounded, 0);
+
+  return (__m64)((__vector long long)__result)[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvt_ps2pi(__m128 __A) {
+  return _mm_cvtps_pi32(__A);
+}
+
+/* Truncate the lower SPFP value to a 32-bit integer.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttss_si32(__m128 __A) {
+  /* Extract the lower float element.  */
+  float __temp = __A[0];
+  /* truncate to 32-bit integer and return.  */
+  return __temp;
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtt_ss2si(__m128 __A) {
+  return _mm_cvttss_si32(__A);
+}
+
+/* Intel intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttss_si64(__m128 __A) {
+  /* Extract the lower float element.  */
+  float __temp = __A[0];
+  /* truncate to 32-bit integer and return.  */
+  return __temp;
+}
+
+/* Microsoft intrinsic.  */
+extern __inline long long
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttss_si64x(__m128 __A) {
+  /* Extract the lower float element.  */
+  float __temp = __A[0];
+  /* truncate to 32-bit integer and return.  */
+  return __temp;
+}
+
+/* Truncate the two lower SPFP values to 32-bit integers.  Return the
+   integers in packed form.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvttps_pi32(__m128 __A) {
+  __v4sf __temp;
+  __vector unsigned long long __result;
+
+  /* Splat two lower SPFP values to both halves.  */
+  __temp = (__v4sf)vec_splat((__vector long long)__A, 0);
+  __result = (__vector unsigned long long)vec_cts(__temp, 0);
+
+  return (__m64)((__vector long long)__result)[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtt_ps2pi(__m128 __A) {
+  return _mm_cvttps_pi32(__A);
+}
+
+/* Convert B to a SPFP value and insert it as element zero in A.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi32_ss(__m128 __A, int __B) {
+  float __temp = __B;
+  __A[0] = __temp;
+
+  return __A;
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvt_si2ss(__m128 __A, int __B) {
+  return _mm_cvtsi32_ss(__A, __B);
+}
+
+/* Convert B to a SPFP value and insert it as element zero in A.  */
+/* Intel intrinsic.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi64_ss(__m128 __A, long long __B) {
+  float __temp = __B;
+  __A[0] = __temp;
+
+  return __A;
+}
+
+/* Microsoft intrinsic.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtsi64x_ss(__m128 __A, long long __B) {
+  return _mm_cvtsi64_ss(__A, __B);
+}
+
+/* Convert the two 32-bit values in B to SPFP form and insert them
+   as the two lower elements in A.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpi32_ps(__m128 __A, __m64 __B) {
+  __vector signed int __vm1;
+  __vector float __vf1;
+
+  __vm1 = (__vector signed int)(__vector unsigned long long){__B, __B};
+  __vf1 = (__vector float)vec_ctf(__vm1, 0);
+
+  return ((__m128)(__vector unsigned long long){
+      ((__vector unsigned long long)__vf1)[0],
+      ((__vector unsigned long long)__A)[1]});
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvt_pi2ps(__m128 __A, __m64 __B) {
+  return _mm_cvtpi32_ps(__A, __B);
+}
+
+/* Convert the four signed 16-bit values in A to SPFP form.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpi16_ps(__m64 __A) {
+  __vector signed short __vs8;
+  __vector signed int __vi4;
+  __vector float __vf1;
+
+  __vs8 = (__vector signed short)(__vector unsigned long long){__A, __A};
+  __vi4 = vec_vupklsh(__vs8);
+  __vf1 = (__vector float)vec_ctf(__vi4, 0);
+
+  return (__m128)__vf1;
+}
+
+/* Convert the four unsigned 16-bit values in A to SPFP form.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpu16_ps(__m64 __A) {
+  const __vector unsigned short __zero = {0, 0, 0, 0, 0, 0, 0, 0};
+  __vector unsigned short __vs8;
+  __vector unsigned int __vi4;
+  __vector float __vf1;
+
+  __vs8 = (__vector unsigned short)(__vector unsigned long long){__A, __A};
+  __vi4 = (__vector unsigned int)vec_mergel
+#ifdef __LITTLE_ENDIAN__
+      (__vs8, __zero);
+#else
+      (__zero, __vs8);
+#endif
+  __vf1 = (__vector float)vec_ctf(__vi4, 0);
+
+  return (__m128)__vf1;
+}
+
+/* Convert the low four signed 8-bit values in A to SPFP form.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpi8_ps(__m64 __A) {
+  __vector signed char __vc16;
+  __vector signed short __vs8;
+  __vector signed int __vi4;
+  __vector float __vf1;
+
+  __vc16 = (__vector signed char)(__vector unsigned long long){__A, __A};
+  __vs8 = vec_vupkhsb(__vc16);
+  __vi4 = vec_vupkhsh(__vs8);
+  __vf1 = (__vector float)vec_ctf(__vi4, 0);
+
+  return (__m128)__vf1;
+}
+
+/* Convert the low four unsigned 8-bit values in A to SPFP form.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+    _mm_cvtpu8_ps(__m64 __A) {
+  const __vector unsigned char __zero = {0, 0, 0, 0, 0, 0, 0, 0};
+  __vector unsigned char __vc16;
+  __vector unsigned short __vs8;
+  __vector unsigned int __vi4;
+  __vector float __vf1;
+
+  __vc16 = (__vector unsigned char)(__vector unsigned long long){__A, __A};
+#ifdef __LITTLE_ENDIAN__
+  __vs8 = (__vector unsigned short)vec_mergel(__vc16, __zero);
+  __vi4 =
+      (__vector unsigned int)vec_mergeh(__vs8, (__vector unsigned short)__zero);
+#else
+  __vs8 = (__vector unsigned short)vec_mergel(__zero, __vc16);
+  __vi4 =
+      (__vector unsigned int)vec_mergeh((__vector unsigned short)__zero, __vs8);
+#endif
+  __vf1 = (__vector float)vec_ctf(__vi4, 0);
+
+  return (__m128)__vf1;
+}
+
+/* Convert the four signed 32-bit values in A and B to SPFP form.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtpi32x2_ps(__m64 __A, __m64 __B) {
+  __vector signed int __vi4;
+  __vector float __vf4;
+
+  __vi4 = (__vector signed int)(__vector unsigned long long){__A, __B};
+  __vf4 = (__vector float)vec_ctf(__vi4, 0);
+  return (__m128)__vf4;
+}
+
+/* Convert the four SPFP values in A to four signed 16-bit integers.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtps_pi16(__m128 __A) {
+  __v4sf __rounded;
+  __vector signed int __temp;
+  __vector unsigned long long __result;
+
+  __rounded = vec_rint(__A);
+  __temp = vec_cts(__rounded, 0);
+  __result = (__vector unsigned long long)vec_pack(__temp, __temp);
+
+  return (__m64)((__vector long long)__result)[0];
+}
+
+/* Convert the four SPFP values in A to four signed 8-bit integers.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_cvtps_pi8(__m128 __A) {
+  __v4sf __rounded;
+  __vector signed int __tmp_i;
+  static const __vector signed int __zero = {0, 0, 0, 0};
+  __vector signed short __tmp_s;
+  __vector signed char __res_v;
+
+  __rounded = vec_rint(__A);
+  __tmp_i = vec_cts(__rounded, 0);
+  __tmp_s = vec_pack(__tmp_i, __zero);
+  __res_v = vec_pack(__tmp_s, __tmp_s);
+  return (__m64)((__vector long long)__res_v)[0];
+}
+
+/* Selects four specific SPFP values from A and B based on MASK.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+    _mm_shuffle_ps(__m128 __A, __m128 __B, int const __mask) {
+  unsigned long __element_selector_10 = __mask & 0x03;
+  unsigned long __element_selector_32 = (__mask >> 2) & 0x03;
+  unsigned long __element_selector_54 = (__mask >> 4) & 0x03;
+  unsigned long __element_selector_76 = (__mask >> 6) & 0x03;
+  static const unsigned int __permute_selectors[4] = {
+#ifdef __LITTLE_ENDIAN__
+      0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
+#else
+      0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
+#endif
+  };
+  __vector unsigned int __t;
+
+  __t[0] = __permute_selectors[__element_selector_10];
+  __t[1] = __permute_selectors[__element_selector_32];
+  __t[2] = __permute_selectors[__element_selector_54] + 0x10101010;
+  __t[3] = __permute_selectors[__element_selector_76] + 0x10101010;
+  return vec_perm((__v4sf)__A, (__v4sf)__B, (__vector unsigned char)__t);
+}
+
+/* Selects and interleaves the upper two SPFP values from A and B.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpackhi_ps(__m128 __A, __m128 __B) {
+  return (__m128)vec_vmrglw((__v4sf)__A, (__v4sf)__B);
+}
+
+/* Selects and interleaves the lower two SPFP values from A and B.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_unpacklo_ps(__m128 __A, __m128 __B) {
+  return (__m128)vec_vmrghw((__v4sf)__A, (__v4sf)__B);
+}
+
+/* Sets the upper two SPFP values with 64-bits of data loaded from P;
+   the lower two values are passed through from A.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadh_pi(__m128 __A, __m64 const *__P) {
+  __vector unsigned long long __a = (__vector unsigned long long)__A;
+  __vector unsigned long long __p = vec_splats(*__P);
+  __a[1] = __p[1];
+
+  return (__m128)__a;
+}
+
+/* Stores the upper two SPFP values of A into P.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storeh_pi(__m64 *__P, __m128 __A) {
+  __vector unsigned long long __a = (__vector unsigned long long)__A;
+
+  *__P = __a[1];
+}
+
+/* Moves the upper two values of B into the lower two values of A.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movehl_ps(__m128 __A, __m128 __B) {
+  return (__m128)vec_mergel((__vector unsigned long long)__B,
+                            (__vector unsigned long long)__A);
+}
+
+/* Moves the lower two values of B into the upper two values of A.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movelh_ps(__m128 __A, __m128 __B) {
+  return (__m128)vec_mergeh((__vector unsigned long long)__A,
+                            (__vector unsigned long long)__B);
+}
+
+/* Sets the lower two SPFP values with 64-bits of data loaded from P;
+   the upper two values are passed through from A.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_loadl_pi(__m128 __A, __m64 const *__P) {
+  __vector unsigned long long __a = (__vector unsigned long long)__A;
+  __vector unsigned long long __p = vec_splats(*__P);
+  __a[0] = __p[0];
+
+  return (__m128)__a;
+}
+
+/* Stores the lower two SPFP values of A into P.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_storel_pi(__m64 *__P, __m128 __A) {
+  __vector unsigned long long __a = (__vector unsigned long long)__A;
+
+  *__P = __a[0];
+}
+
+#ifdef _ARCH_PWR8
+/* Intrinsic functions that require PowerISA 2.07 minimum.  */
+
+/* Creates a 4-bit mask from the most significant bits of the SPFP values.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movemask_ps(__m128 __A) {
+#ifdef _ARCH_PWR10
+  return vec_extractm((__vector unsigned int)__A);
+#else
+  __vector unsigned long long __result;
+  static const __vector unsigned int __perm_mask = {
+#ifdef __LITTLE_ENDIAN__
+      0x00204060, 0x80808080, 0x80808080, 0x80808080
+#else
+      0x80808080, 0x80808080, 0x80808080, 0x00204060
+#endif
+  };
+
+  __result = ((__vector unsigned long long)vec_vbpermq(
+      (__vector unsigned char)__A, (__vector unsigned char)__perm_mask));
+
+#ifdef __LITTLE_ENDIAN__
+  return __result[1];
+#else
+  return __result[0];
+#endif
+#endif /* !_ARCH_PWR10 */
+}
+#endif /* _ARCH_PWR8 */
+
+/* Create a vector with all four elements equal to *P.  */
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load1_ps(float const *__P) {
+  return _mm_set1_ps(*__P);
+}
+
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_load_ps1(float const *__P) {
+  return _mm_load1_ps(__P);
+}
+
+/* Extracts one of the four words of A.  The selector N must be immediate.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_extract_pi16(__m64 const __A, int const __N) {
+  unsigned int __shiftr = __N & 3;
+#ifdef __BIG_ENDIAN__
+  __shiftr = 3 - __shiftr;
+#endif
+
+  return ((__A >> (__shiftr * 16)) & 0xffff);
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pextrw(__m64 const __A, int const __N) {
+  return _mm_extract_pi16(__A, __N);
+}
+
+/* Inserts word D into one of four words of A.  The selector N must be
+   immediate.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_insert_pi16(__m64 const __A, int const __D, int const __N) {
+  const int __shiftl = (__N & 3) * 16;
+  const __m64 __shiftD = (const __m64)__D << __shiftl;
+  const __m64 __mask = 0xffffUL << __shiftl;
+  __m64 __result = (__A & (~__mask)) | (__shiftD & __mask);
+
+  return __result;
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pinsrw(__m64 const __A, int const __D, int const __N) {
+  return _mm_insert_pi16(__A, __D, __N);
+}
+
+/* Compute the element-wise maximum of signed 16-bit values.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+    _mm_max_pi16(__m64 __A, __m64 __B) {
+#if _ARCH_PWR8
+  __vector signed short __a, __b, __r;
+  __vector __bool short __c;
+
+  __a = (__vector signed short)vec_splats(__A);
+  __b = (__vector signed short)vec_splats(__B);
+  __c = (__vector __bool short)vec_cmpgt(__a, __b);
+  __r = vec_sel(__b, __a, __c);
+  return (__m64)((__vector long long)__r)[0];
+#else
+  __m64_union __m1, __m2, __res;
+
+  __m1.as_m64 = __A;
+  __m2.as_m64 = __B;
+
+  __res.as_short[0] = (__m1.as_short[0] > __m2.as_short[0]) ? __m1.as_short[0]
+                                                            : __m2.as_short[0];
+  __res.as_short[1] = (__m1.as_short[1] > __m2.as_short[1]) ? __m1.as_short[1]
+                                                            : __m2.as_short[1];
+  __res.as_short[2] = (__m1.as_short[2] > __m2.as_short[2]) ? __m1.as_short[2]
+                                                            : __m2.as_short[2];
+  __res.as_short[3] = (__m1.as_short[3] > __m2.as_short[3]) ? __m1.as_short[3]
+                                                            : __m2.as_short[3];
+
+  return (__m64)__res.as_m64;
+#endif
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pmaxsw(__m64 __A, __m64 __B) {
+  return _mm_max_pi16(__A, __B);
+}
+
+/* Compute the element-wise maximum of unsigned 8-bit values.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_max_pu8(__m64 __A, __m64 __B) {
+#if _ARCH_PWR8
+  __vector unsigned char __a, __b, __r;
+  __vector __bool char __c;
+
+  __a = (__vector unsigned char)vec_splats(__A);
+  __b = (__vector unsigned char)vec_splats(__B);
+  __c = (__vector __bool char)vec_cmpgt(__a, __b);
+  __r = vec_sel(__b, __a, __c);
+  return (__m64)((__vector long long)__r)[0];
+#else
+  __m64_union __m1, __m2, __res;
+  long __i;
+
+  __m1.as_m64 = __A;
+  __m2.as_m64 = __B;
+
+  for (__i = 0; __i < 8; __i++)
+    __res.as_char[__i] =
+        ((unsigned char)__m1.as_char[__i] > (unsigned char)__m2.as_char[__i])
+            ? __m1.as_char[__i]
+            : __m2.as_char[__i];
+
+  return (__m64)__res.as_m64;
+#endif
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pmaxub(__m64 __A, __m64 __B) {
+  return _mm_max_pu8(__A, __B);
+}
+
+/* Compute the element-wise minimum of signed 16-bit values.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_pi16(__m64 __A, __m64 __B) {
+#if _ARCH_PWR8
+  __vector signed short __a, __b, __r;
+  __vector __bool short __c;
+
+  __a = (__vector signed short)vec_splats(__A);
+  __b = (__vector signed short)vec_splats(__B);
+  __c = (__vector __bool short)vec_cmplt(__a, __b);
+  __r = vec_sel(__b, __a, __c);
+  return (__m64)((__vector long long)__r)[0];
+#else
+  __m64_union __m1, __m2, __res;
+
+  __m1.as_m64 = __A;
+  __m2.as_m64 = __B;
+
+  __res.as_short[0] = (__m1.as_short[0] < __m2.as_short[0]) ? __m1.as_short[0]
+                                                            : __m2.as_short[0];
+  __res.as_short[1] = (__m1.as_short[1] < __m2.as_short[1]) ? __m1.as_short[1]
+                                                            : __m2.as_short[1];
+  __res.as_short[2] = (__m1.as_short[2] < __m2.as_short[2]) ? __m1.as_short[2]
+                                                            : __m2.as_short[2];
+  __res.as_short[3] = (__m1.as_short[3] < __m2.as_short[3]) ? __m1.as_short[3]
+                                                            : __m2.as_short[3];
+
+  return (__m64)__res.as_m64;
+#endif
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pminsw(__m64 __A, __m64 __B) {
+  return _mm_min_pi16(__A, __B);
+}
+
+/* Compute the element-wise minimum of unsigned 8-bit values.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_min_pu8(__m64 __A, __m64 __B) {
+#if _ARCH_PWR8
+  __vector unsigned char __a, __b, __r;
+  __vector __bool char __c;
+
+  __a = (__vector unsigned char)vec_splats(__A);
+  __b = (__vector unsigned char)vec_splats(__B);
+  __c = (__vector __bool char)vec_cmplt(__a, __b);
+  __r = vec_sel(__b, __a, __c);
+  return (__m64)((__vector long long)__r)[0];
+#else
+  __m64_union __m1, __m2, __res;
+  long __i;
+
+  __m1.as_m64 = __A;
+  __m2.as_m64 = __B;
+
+  for (__i = 0; __i < 8; __i++)
+    __res.as_char[__i] =
+        ((unsigned char)__m1.as_char[__i] < (unsigned char)__m2.as_char[__i])
+            ? __m1.as_char[__i]
+            : __m2.as_char[__i];
+
+  return (__m64)__res.as_m64;
+#endif
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pminub(__m64 __A, __m64 __B) {
+  return _mm_min_pu8(__A, __B);
+}
+
+/* Create an 8-bit mask of the signs of 8-bit values.  */
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_movemask_pi8(__m64 __A) {
+#ifdef __powerpc64__
+  unsigned long long __p =
+#ifdef __LITTLE_ENDIAN__
+      0x0008101820283038UL; // permute control for sign bits
+#else
+      0x3830282018100800UL; // permute control for sign bits
+#endif
+  return __builtin_bpermd(__p, __A);
+#else
+#ifdef __LITTLE_ENDIAN__
+  unsigned int __mask = 0x20283038UL;
+  unsigned int __r1 = __builtin_bpermd(__mask, __A) & 0xf;
+  unsigned int __r2 = __builtin_bpermd(__mask, __A >> 32) & 0xf;
+#else
+  unsigned int __mask = 0x38302820UL;
+  unsigned int __r1 = __builtin_bpermd(__mask, __A >> 32) & 0xf;
+  unsigned int __r2 = __builtin_bpermd(__mask, __A) & 0xf;
+#endif
+  return (__r2 << 4) | __r1;
+#endif
+}
+
+extern __inline int
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pmovmskb(__m64 __A) {
+  return _mm_movemask_pi8(__A);
+}
+
+/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
+   in B and produce the high 16 bits of the 32-bit results.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_mulhi_pu16(__m64 __A, __m64 __B) {
+  __vector unsigned short __a, __b;
+  __vector unsigned short __c;
+  __vector unsigned int __w0, __w1;
+  __vector unsigned char __xform1 = {
+#ifdef __LITTLE_ENDIAN__
+      0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
+      0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+#else
+      0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x00,
+      0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15
+#endif
+  };
+
+  __a = (__vector unsigned short)vec_splats(__A);
+  __b = (__vector unsigned short)vec_splats(__B);
+
+  __w0 = vec_vmuleuh(__a, __b);
+  __w1 = vec_vmulouh(__a, __b);
+  __c = (__vector unsigned short)vec_perm(__w0, __w1, __xform1);
+
+  return (__m64)((__vector long long)__c)[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pmulhuw(__m64 __A, __m64 __B) {
+  return _mm_mulhi_pu16(__A, __B);
+}
+
+/* Return a combination of the four 16-bit values in A.  The selector
+   must be an immediate.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_shuffle_pi16(__m64 __A, int const __N) {
+  unsigned long __element_selector_10 = __N & 0x03;
+  unsigned long __element_selector_32 = (__N >> 2) & 0x03;
+  unsigned long __element_selector_54 = (__N >> 4) & 0x03;
+  unsigned long __element_selector_76 = (__N >> 6) & 0x03;
+  static const unsigned short __permute_selectors[4] = {
+#ifdef __LITTLE_ENDIAN__
+      0x0908, 0x0B0A, 0x0D0C, 0x0F0E
+#else
+      0x0607, 0x0405, 0x0203, 0x0001
+#endif
+  };
+  __m64_union __t;
+  __vector unsigned long long __a, __p, __r;
+
+#ifdef __LITTLE_ENDIAN__
+  __t.as_short[0] = __permute_selectors[__element_selector_10];
+  __t.as_short[1] = __permute_selectors[__element_selector_32];
+  __t.as_short[2] = __permute_selectors[__element_selector_54];
+  __t.as_short[3] = __permute_selectors[__element_selector_76];
+#else
+  __t.as_short[3] = __permute_selectors[__element_selector_10];
+  __t.as_short[2] = __permute_selectors[__element_selector_32];
+  __t.as_short[1] = __permute_selectors[__element_selector_54];
+  __t.as_short[0] = __permute_selectors[__element_selector_76];
+#endif
+  __p = vec_splats(__t.as_m64);
+  __a = vec_splats(__A);
+  __r = vec_perm(__a, __a, (__vector unsigned char)__p);
+  return (__m64)((__vector long long)__r)[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pshufw(__m64 __A, int const __N) {
+  return _mm_shuffle_pi16(__A, __N);
+}
+
+/* Conditionally store byte elements of A into P.  The high bit of each
+   byte in the selector N determines whether the corresponding byte from
+   A is stored.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_maskmove_si64(__m64 __A, __m64 __N, char *__P) {
+  __m64 __hibit = 0x8080808080808080UL;
+  __m64 __mask, __tmp;
+  __m64 *__p = (__m64 *)__P;
+
+  __tmp = *__p;
+  __mask = _mm_cmpeq_pi8((__N & __hibit), __hibit);
+  __tmp = (__tmp & (~__mask)) | (__A & __mask);
+  *__p = __tmp;
+}
+
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_maskmovq(__m64 __A, __m64 __N, char *__P) {
+  _mm_maskmove_si64(__A, __N, __P);
+}
+
+/* Compute the rounded averages of the unsigned 8-bit values in A and B.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_avg_pu8(__m64 __A, __m64 __B) {
+  __vector unsigned char __a, __b, __c;
+
+  __a = (__vector unsigned char)vec_splats(__A);
+  __b = (__vector unsigned char)vec_splats(__B);
+  __c = vec_avg(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pavgb(__m64 __A, __m64 __B) {
+  return _mm_avg_pu8(__A, __B);
+}
+
+/* Compute the rounded averages of the unsigned 16-bit values in A and B.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_avg_pu16(__m64 __A, __m64 __B) {
+  __vector unsigned short __a, __b, __c;
+
+  __a = (__vector unsigned short)vec_splats(__A);
+  __b = (__vector unsigned short)vec_splats(__B);
+  __c = vec_avg(__a, __b);
+  return (__m64)((__vector long long)__c)[0];
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_pavgw(__m64 __A, __m64 __B) {
+  return _mm_avg_pu16(__A, __B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 8-bit
+   values in A and B.  Return the value in the lower 16-bit word; the
+   upper words are cleared.  */
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sad_pu8(__m64 __A, __m64 __B) {
+  __vector unsigned char __a, __b;
+  __vector unsigned char __vmin, __vmax, __vabsdiff;
+  __vector signed int __vsum;
+  const __vector unsigned int __zero = {0, 0, 0, 0};
+  __m64_union __result = {0};
+
+  __a = (__vector unsigned char)(__vector unsigned long long){0UL, __A};
+  __b = (__vector unsigned char)(__vector unsigned long long){0UL, __B};
+  __vmin = vec_min(__a, __b);
+  __vmax = vec_max(__a, __b);
+  __vabsdiff = vec_sub(__vmax, __vmin);
+  /* Sum four groups of bytes into integers.  */
+  __vsum = (__vector signed int)vec_sum4s(__vabsdiff, __zero);
+  /* Sum across four integers with integer result.  */
+  __vsum = vec_sums(__vsum, (__vector signed int)__zero);
+  /* The sum is in the right most 32-bits of the vector result.
+     Transfer to a GPR and truncate to 16 bits.  */
+  __result.as_short[0] = __vsum[3];
+  return __result.as_m64;
+}
+
+extern __inline __m64
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _m_psadbw(__m64 __A, __m64 __B) {
+  return _mm_sad_pu8(__A, __B);
+}
+
+/* Stores the data in A to the address P without polluting the caches.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_stream_pi(__m64 *__P, __m64 __A) {
+  /* Use the data cache block touch for store transient.  */
+  __asm__("	dcbtstt	0,%0" : : "b"(__P) : "memory");
+  *__P = __A;
+}
+
+/* Likewise.  The address must be 16-byte aligned.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_stream_ps(float *__P, __m128 __A) {
+  /* Use the data cache block touch for store transient.  */
+  __asm__("	dcbtstt	0,%0" : : "b"(__P) : "memory");
+  _mm_store_ps(__P, __A);
+}
+
+/* Guarantees that every preceding store is globally visible before
+   any subsequent store.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_sfence(void) {
+  /* Generate a light weight sync.  */
+  __atomic_thread_fence(__ATOMIC_RELEASE);
+}
+
+/* The execution of the next instruction is delayed by an implementation
+   specific amount of time.  The instruction does not modify the
+   architectural state.  This is after the pop_options pragma because
+   it does not require SSE support in the processor--the encoding is a
+   nop on processors that do not support it.  */
+extern __inline void
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_pause(void) {
+  /* There is no exact match with this construct, but the following is
+     close to the desired effect.  */
+#if _ARCH_PWR8
+  /* On power8 and later processors we can depend on Program Priority
+     (PRI) and associated "very low" PPI setting.  Since we don't know
+     what PPI this thread is running at we: 1) save the current PRI
+     from the PPR SPR into a local GRP, 2) set the PRI to "very low*
+     via the special or 31,31,31 encoding. 3) issue an "isync" to
+     insure the PRI change takes effect before we execute any more
+     instructions.
+     Now we can execute a lwsync (release barrier) while we execute
+     this thread at "very low" PRI.  Finally we restore the original
+     PRI and continue execution.  */
+  unsigned long __PPR;
+
+  __asm__ volatile("	mfppr	%0;"
+                   "   or 31,31,31;"
+                   "   isync;"
+                   "   lwsync;"
+                   "   isync;"
+                   "   mtppr	%0;"
+                   : "=r"(__PPR)
+                   :
+                   : "memory");
+#else
+  /* For older processor where we may not even have Program Priority
+     controls we can only depend on Heavy Weight Sync.  */
+  __atomic_thread_fence(__ATOMIC_SEQ_CST);
+#endif
+}
+
+/* Transpose the 4x4 matrix composed of row[0-3].  */
+#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3)                              \
+  do {                                                                         \
+    __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3);         \
+    __v4sf __t0 = vec_vmrghw(__r0, __r1);                                      \
+    __v4sf __t1 = vec_vmrghw(__r2, __r3);                                      \
+    __v4sf __t2 = vec_vmrglw(__r0, __r1);                                      \
+    __v4sf __t3 = vec_vmrglw(__r2, __r3);                                      \
+    (row0) = (__v4sf)vec_mergeh((__vector long long)__t0,                      \
+                                (__vector long long)__t1);                     \
+    (row1) = (__v4sf)vec_mergel((__vector long long)__t0,                      \
+                                (__vector long long)__t1);                     \
+    (row2) = (__v4sf)vec_mergeh((__vector long long)__t2,                      \
+                                (__vector long long)__t3);                     \
+    (row3) = (__v4sf)vec_mergel((__vector long long)__t2,                      \
+                                (__vector long long)__t3);                     \
+  } while (0)
+
+/* For backward source compatibility.  */
+//# include <emmintrin.h>
+
+#else
+#include_next <xmmintrin.h>
+#endif /* defined(__ppc64__) &&
+        *   (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
+
+#endif /* XMMINTRIN_H_ */
diff --git a/linux-x86/lib64/clang/14.0.2/include/prfchwintrin.h b/linux-x86/lib64/clang/16.0.2/include/prfchwintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/prfchwintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/prfchwintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc b/linux-x86/lib64/clang/16.0.2/include/profile/InstrProfData.inc
similarity index 97%
copy from darwin-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc
copy to linux-x86/lib64/clang/16.0.2/include/profile/InstrProfData.inc
index 008b8dd..282620d 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc
+++ b/linux-x86/lib64/clang/16.0.2/include/profile/InstrProfData.inc
@@ -128,8 +128,10 @@
 INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
 INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
 INSTR_PROF_RAW_HEADER(uint64_t, BinaryIdsSize, __llvm_write_binary_ids(NULL))
+/* FIXME: A more accurate name is NumData */
 INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
 INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesBeforeCounters, PaddingBytesBeforeCounters)
+/* FIXME: A more accurate name is NumCounters */
 INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
 INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesAfterCounters, PaddingBytesAfterCounters)
 INSTR_PROF_RAW_HEADER(uint64_t, NamesSize,  NamesSize)
@@ -644,24 +646,33 @@
        (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
         (uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129
 
+/* FIXME: Please remedy the fixme in the header before bumping the version. */
 /* Raw profile format version (start from 1). */
 #define INSTR_PROF_RAW_VERSION 8
 /* Indexed profile format version (start from 1). */
-#define INSTR_PROF_INDEX_VERSION 7
+#define INSTR_PROF_INDEX_VERSION 8
 /* Coverage mapping format version (start from 0). */
 #define INSTR_PROF_COVMAP_VERSION 5
 
 /* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
  * version for other variants of profile. We set the lowest bit of the upper 8
- * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentaiton
+ * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentation
  * generated profile, and 0 if this is a Clang FE generated profile.
  * 1 in bit 57 indicates there are context-sensitive records in the profile.
+ * The 59th bit indicates whether to use debug info to correlate profiles.
+ * The 60th bit indicates single byte coverage instrumentation.
+ * The 61st bit indicates function entry instrumentation only.
+ * The 62nd bit indicates whether memory profile information is present.
  */
 #define VARIANT_MASKS_ALL 0xff00000000000000ULL
 #define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
 #define VARIANT_MASK_IR_PROF (0x1ULL << 56)
 #define VARIANT_MASK_CSIR_PROF (0x1ULL << 57)
 #define VARIANT_MASK_INSTR_ENTRY (0x1ULL << 58)
+#define VARIANT_MASK_DBG_CORRELATE (0x1ULL << 59)
+#define VARIANT_MASK_BYTE_COVERAGE (0x1ULL << 60)
+#define VARIANT_MASK_FUNCTION_ENTRY_ONLY (0x1ULL << 61)
+#define VARIANT_MASK_MEMPROF (0x1ULL << 62)
 #define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
 #define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
 #define INSTR_PROF_PROFILE_COUNTER_BIAS_VAR __llvm_profile_counter_bias
diff --git a/linux-x86/lib64/clang/14.0.2/include/ptwriteintrin.h b/linux-x86/lib64/clang/16.0.2/include/ptwriteintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/ptwriteintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/ptwriteintrin.h
diff --git a/linux-x86/lib64/clang/16.0.2/include/rdpruintrin.h b/linux-x86/lib64/clang/16.0.2/include/rdpruintrin.h
new file mode 100644
index 0000000..89732bb
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/rdpruintrin.h
@@ -0,0 +1,57 @@
+/*===---- rdpruintrin.h - RDPRU intrinsics ---------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H
+#error "Never use <rdpruintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __RDPRUINTRIN_H
+#define __RDPRUINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+  __attribute__((__always_inline__, __nodebug__,  __target__("rdpru")))
+
+
+/// Reads the content of a processor register.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> RDPRU </c> instruction.
+///
+/// \param reg_id
+///    A processor register identifier.
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__rdpru (int reg_id)
+{
+  return __builtin_ia32_rdpru(reg_id);
+}
+
+#define __RDPRU_MPERF 0
+#define __RDPRU_APERF 1
+
+/// Reads the content of processor register MPERF.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic generates instruction <c> RDPRU </c> to read the value of
+/// register MPERF.
+#define __mperf() __builtin_ia32_rdpru(__RDPRU_MPERF)
+
+/// Reads the content of processor register APERF.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic generates instruction <c> RDPRU </c> to read the value of
+/// register APERF.
+#define __aperf() __builtin_ia32_rdpru(__RDPRU_APERF)
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __RDPRUINTRIN_H */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/rdseedintrin.h b/linux-x86/lib64/clang/16.0.2/include/rdseedintrin.h
similarity index 87%
copy from darwin-x86/lib64/clang/14.0.2/include/rdseedintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/rdseedintrin.h
index ccb3d2d..405bc24 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/rdseedintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/rdseedintrin.h
@@ -20,20 +20,20 @@
 static __inline__ int __DEFAULT_FN_ATTRS
 _rdseed16_step(unsigned short *__p)
 {
-  return __builtin_ia32_rdseed16_step(__p);
+  return (int) __builtin_ia32_rdseed16_step(__p);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS
 _rdseed32_step(unsigned int *__p)
 {
-  return __builtin_ia32_rdseed32_step(__p);
+  return (int) __builtin_ia32_rdseed32_step(__p);
 }
 
 #ifdef __x86_64__
 static __inline__ int __DEFAULT_FN_ATTRS
 _rdseed64_step(unsigned long long *__p)
 {
-  return __builtin_ia32_rdseed64_step(__p);
+  return (int) __builtin_ia32_rdseed64_step(__p);
 }
 #endif
 
diff --git a/linux-x86/lib64/clang/16.0.2/include/riscv_vector.h b/linux-x86/lib64/clang/16.0.2/include/riscv_vector.h
new file mode 100644
index 0000000..cac2b2d
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/riscv_vector.h
@@ -0,0 +1,202 @@
+/*===---- riscv_vector.h - RISC-V V-extension RVVIntrinsics -------------------===
+ *
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __RISCV_VECTOR_H
+#define __RISCV_VECTOR_H
+
+#include <stdint.h>
+#include <stddef.h>
+
+#ifndef __riscv_vector
+#error "Vector intrinsics require the vector extension."
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#pragma clang riscv intrinsic vector
+
+
+enum RVV_CSR {
+  RVV_VSTART = 0,
+  RVV_VXSAT,
+  RVV_VXRM,
+  RVV_VCSR,
+};
+
+static __inline__ __attribute__((__always_inline__, __nodebug__))
+unsigned long vread_csr(enum RVV_CSR __csr) {
+  unsigned long __rv = 0;
+  switch (__csr) {
+    case RVV_VSTART:
+      __asm__ __volatile__ ("csrr\t%0, vstart" : "=r"(__rv) : : "memory");
+      break;
+    case RVV_VXSAT:
+      __asm__ __volatile__ ("csrr\t%0, vxsat" : "=r"(__rv) : : "memory");
+      break;
+    case RVV_VXRM:
+      __asm__ __volatile__ ("csrr\t%0, vxrm" : "=r"(__rv) : : "memory");
+      break;
+    case RVV_VCSR:
+      __asm__ __volatile__ ("csrr\t%0, vcsr" : "=r"(__rv) : : "memory");
+      break;
+  }
+  return __rv;
+}
+
+static __inline__ __attribute__((__always_inline__, __nodebug__))
+void vwrite_csr(enum RVV_CSR __csr, unsigned long __value) {
+  switch (__csr) {
+    case RVV_VSTART:
+      __asm__ __volatile__ ("csrw\tvstart, %z0" : : "rJ"(__value) : "memory");
+      break;
+    case RVV_VXSAT:
+      __asm__ __volatile__ ("csrw\tvxsat, %z0" : : "rJ"(__value) : "memory");
+      break;
+    case RVV_VXRM:
+      __asm__ __volatile__ ("csrw\tvxrm, %z0" : : "rJ"(__value) : "memory");
+      break;
+    case RVV_VCSR:
+      __asm__ __volatile__ ("csrw\tvcsr, %z0" : : "rJ"(__value) : "memory");
+      break;
+  }
+}
+
+#define vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5)
+#define vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6)
+#define vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7)
+#define vsetvl_e8m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 0)
+#define vsetvl_e8m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 1)
+#define vsetvl_e8m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 2)
+#define vsetvl_e8m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 3)
+
+#define vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6)
+#define vsetvl_e16mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 7)
+#define vsetvl_e16m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 0)
+#define vsetvl_e16m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 1)
+#define vsetvl_e16m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 2)
+#define vsetvl_e16m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 3)
+
+#define vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7)
+#define vsetvl_e32m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 0)
+#define vsetvl_e32m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 1)
+#define vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2)
+#define vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3)
+
+#define vsetvl_e64m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 0)
+#define vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1)
+#define vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2)
+#define vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3)
+
+#define vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5)
+#define vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6)
+#define vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7)
+#define vsetvlmax_e8m1() __builtin_rvv_vsetvlimax(0, 0)
+#define vsetvlmax_e8m2() __builtin_rvv_vsetvlimax(0, 1)
+#define vsetvlmax_e8m4() __builtin_rvv_vsetvlimax(0, 2)
+#define vsetvlmax_e8m8() __builtin_rvv_vsetvlimax(0, 3)
+
+#define vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6)
+#define vsetvlmax_e16mf2() __builtin_rvv_vsetvlimax(1, 7)
+#define vsetvlmax_e16m1() __builtin_rvv_vsetvlimax(1, 0)
+#define vsetvlmax_e16m2() __builtin_rvv_vsetvlimax(1, 1)
+#define vsetvlmax_e16m4() __builtin_rvv_vsetvlimax(1, 2)
+#define vsetvlmax_e16m8() __builtin_rvv_vsetvlimax(1, 3)
+
+#define vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7)
+#define vsetvlmax_e32m1() __builtin_rvv_vsetvlimax(2, 0)
+#define vsetvlmax_e32m2() __builtin_rvv_vsetvlimax(2, 1)
+#define vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2)
+#define vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3)
+
+#define vsetvlmax_e64m1() __builtin_rvv_vsetvlimax(3, 0)
+#define vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1)
+#define vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2)
+#define vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3)
+typedef __rvv_bool64_t vbool64_t;
+typedef __rvv_bool32_t vbool32_t;
+typedef __rvv_bool16_t vbool16_t;
+typedef __rvv_bool8_t vbool8_t;
+typedef __rvv_bool4_t vbool4_t;
+typedef __rvv_bool2_t vbool2_t;
+typedef __rvv_bool1_t vbool1_t;
+typedef __rvv_int8mf8_t vint8mf8_t;
+typedef __rvv_uint8mf8_t vuint8mf8_t;
+typedef __rvv_int8mf4_t vint8mf4_t;
+typedef __rvv_uint8mf4_t vuint8mf4_t;
+typedef __rvv_int8mf2_t vint8mf2_t;
+typedef __rvv_uint8mf2_t vuint8mf2_t;
+typedef __rvv_int8m1_t vint8m1_t;
+typedef __rvv_uint8m1_t vuint8m1_t;
+typedef __rvv_int8m2_t vint8m2_t;
+typedef __rvv_uint8m2_t vuint8m2_t;
+typedef __rvv_int8m4_t vint8m4_t;
+typedef __rvv_uint8m4_t vuint8m4_t;
+typedef __rvv_int8m8_t vint8m8_t;
+typedef __rvv_uint8m8_t vuint8m8_t;
+typedef __rvv_int16mf4_t vint16mf4_t;
+typedef __rvv_uint16mf4_t vuint16mf4_t;
+typedef __rvv_int16mf2_t vint16mf2_t;
+typedef __rvv_uint16mf2_t vuint16mf2_t;
+typedef __rvv_int16m1_t vint16m1_t;
+typedef __rvv_uint16m1_t vuint16m1_t;
+typedef __rvv_int16m2_t vint16m2_t;
+typedef __rvv_uint16m2_t vuint16m2_t;
+typedef __rvv_int16m4_t vint16m4_t;
+typedef __rvv_uint16m4_t vuint16m4_t;
+typedef __rvv_int16m8_t vint16m8_t;
+typedef __rvv_uint16m8_t vuint16m8_t;
+typedef __rvv_int32mf2_t vint32mf2_t;
+typedef __rvv_uint32mf2_t vuint32mf2_t;
+typedef __rvv_int32m1_t vint32m1_t;
+typedef __rvv_uint32m1_t vuint32m1_t;
+typedef __rvv_int32m2_t vint32m2_t;
+typedef __rvv_uint32m2_t vuint32m2_t;
+typedef __rvv_int32m4_t vint32m4_t;
+typedef __rvv_uint32m4_t vuint32m4_t;
+typedef __rvv_int32m8_t vint32m8_t;
+typedef __rvv_uint32m8_t vuint32m8_t;
+typedef __rvv_int64m1_t vint64m1_t;
+typedef __rvv_uint64m1_t vuint64m1_t;
+typedef __rvv_int64m2_t vint64m2_t;
+typedef __rvv_uint64m2_t vuint64m2_t;
+typedef __rvv_int64m4_t vint64m4_t;
+typedef __rvv_uint64m4_t vuint64m4_t;
+typedef __rvv_int64m8_t vint64m8_t;
+typedef __rvv_uint64m8_t vuint64m8_t;
+#if defined(__riscv_zvfh)
+typedef __rvv_float16mf4_t vfloat16mf4_t;
+typedef __rvv_float16mf2_t vfloat16mf2_t;
+typedef __rvv_float16m1_t vfloat16m1_t;
+typedef __rvv_float16m2_t vfloat16m2_t;
+typedef __rvv_float16m4_t vfloat16m4_t;
+typedef __rvv_float16m8_t vfloat16m8_t;
+#endif
+#if (__riscv_v_elen_fp >= 32)
+typedef __rvv_float32mf2_t vfloat32mf2_t;
+typedef __rvv_float32m1_t vfloat32m1_t;
+typedef __rvv_float32m2_t vfloat32m2_t;
+typedef __rvv_float32m4_t vfloat32m4_t;
+typedef __rvv_float32m8_t vfloat32m8_t;
+#endif
+#if (__riscv_v_elen_fp >= 64)
+typedef __rvv_float64m1_t vfloat64m1_t;
+typedef __rvv_float64m2_t vfloat64m2_t;
+typedef __rvv_float64m4_t vfloat64m4_t;
+typedef __rvv_float64m8_t vfloat64m8_t;
+#endif
+
+#define __riscv_v_intrinsic_overloading 1
+
+#ifdef __cplusplus
+}
+#endif // __cplusplus
+#endif // __RISCV_VECTOR_H
diff --git a/linux-x86/lib64/clang/14.0.2/include/rtmintrin.h b/linux-x86/lib64/clang/16.0.2/include/rtmintrin.h
similarity index 96%
rename from linux-x86/lib64/clang/14.0.2/include/rtmintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/rtmintrin.h
index 36ff583..a3ec81e 100644
--- a/linux-x86/lib64/clang/14.0.2/include/rtmintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/rtmintrin.h
@@ -29,7 +29,7 @@
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 _xbegin(void)
 {
-  return __builtin_ia32_xbegin();
+  return (unsigned int)__builtin_ia32_xbegin();
 }
 
 static __inline__ void __DEFAULT_FN_ATTRS
diff --git a/linux-x86/lib64/clang/14.0.2/include/s390intrin.h b/linux-x86/lib64/clang/16.0.2/include/s390intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/s390intrin.h
rename to linux-x86/lib64/clang/16.0.2/include/s390intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/allocator_interface.h b/linux-x86/lib64/clang/16.0.2/include/sanitizer/allocator_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/allocator_interface.h
rename to linux-x86/lib64/clang/16.0.2/include/sanitizer/allocator_interface.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/asan_interface.h b/linux-x86/lib64/clang/16.0.2/include/sanitizer/asan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/asan_interface.h
rename to linux-x86/lib64/clang/16.0.2/include/sanitizer/asan_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/common_interface_defs.h b/linux-x86/lib64/clang/16.0.2/include/sanitizer/common_interface_defs.h
similarity index 97%
copy from darwin-x86/lib64/clang/14.0.2/include/sanitizer/common_interface_defs.h
copy to linux-x86/lib64/clang/16.0.2/include/sanitizer/common_interface_defs.h
index 692b8f7..ba58ad4 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/common_interface_defs.h
+++ b/linux-x86/lib64/clang/16.0.2/include/sanitizer/common_interface_defs.h
@@ -211,6 +211,15 @@
 // Same as __sanitizer_symbolize_pc, but for data section (i.e. globals).
 void __sanitizer_symbolize_global(void *data_ptr, const char *fmt,
                                   char *out_buf, size_t out_buf_size);
+// Determine the return address.
+#if !defined(_MSC_VER) || defined(__clang__)
+#define __sanitizer_return_address()                                           \
+  __builtin_extract_return_addr(__builtin_return_address(0))
+#else
+extern "C" void *_ReturnAddress(void);
+#pragma intrinsic(_ReturnAddress)
+#define __sanitizer_return_address() _ReturnAddress()
+#endif
 
 /// Sets the callback to be called immediately before death on error.
 ///
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/coverage_interface.h b/linux-x86/lib64/clang/16.0.2/include/sanitizer/coverage_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/coverage_interface.h
rename to linux-x86/lib64/clang/16.0.2/include/sanitizer/coverage_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h b/linux-x86/lib64/clang/16.0.2/include/sanitizer/dfsan_interface.h
similarity index 79%
copy from darwin-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h
copy to linux-x86/lib64/clang/16.0.2/include/sanitizer/dfsan_interface.h
index d6209a3..8e581a6 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h
+++ b/linux-x86/lib64/clang/16.0.2/include/sanitizer/dfsan_interface.h
@@ -27,6 +27,10 @@
 /// Signature of the callback argument to dfsan_set_write_callback().
 typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
 
+/// Signature of the callback argument to dfsan_set_conditional_callback().
+typedef void (*dfsan_conditional_callback_t)(dfsan_label label,
+                                             dfsan_origin origin);
+
 /// Computes the union of \c l1 and \c l2, resulting in a union label.
 dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
 
@@ -54,6 +58,10 @@
 /// Retrieves the label associated with the data at the given address.
 dfsan_label dfsan_read_label(const void *addr, size_t size);
 
+/// Return the origin associated with the first taint byte in the size bytes
+/// from the address addr.
+dfsan_origin dfsan_read_origin_of_first_taint(const void *addr, size_t size);
+
 /// Returns whether the given label label contains the label elem.
 int dfsan_has_label(dfsan_label label, dfsan_label elem);
 
@@ -70,6 +78,19 @@
 /// callback executes.  Pass in NULL to remove any callback.
 void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
 
+/// Sets a callback to be invoked on any conditional expressions which have a
+/// taint label set. This can be used to find where tainted data influences
+/// the behavior of the program.
+/// These callbacks will only be added when -dfsan-conditional-callbacks=true.
+void dfsan_set_conditional_callback(dfsan_conditional_callback_t callback);
+
+/// Conditional expressions occur during signal handlers.
+/// Making callbacks that handle signals well is tricky, so when
+/// -dfsan-conditional-callbacks=true, conditional expressions used in signal
+/// handlers will add the labels they see into a global (bitwise-or together).
+/// This function returns all label bits seen in signal handler conditions.
+dfsan_label dfsan_get_labels_in_signal_conditional();
+
 /// Interceptor hooks.
 /// Whenever a dfsan's custom function is called the corresponding
 /// hook is called it non-zero. The hooks should be defined by the user.
@@ -87,6 +108,9 @@
 /// prints description at the beginning of the trace. If origin tracking is not
 /// on, or the address is not labeled, it prints nothing.
 void dfsan_print_origin_trace(const void *addr, const char *description);
+/// As above, but use an origin id from dfsan_get_origin() instead of address.
+/// Does not include header line with taint label and address information.
+void dfsan_print_origin_id_trace(dfsan_origin origin);
 
 /// Prints the origin trace of the label at the address \p addr to a
 /// pre-allocated output buffer. If origin tracking is not on, or the address is
@@ -124,6 +148,10 @@
 /// return value is not less than \p out_buf_size.
 size_t dfsan_sprint_origin_trace(const void *addr, const char *description,
                                  char *out_buf, size_t out_buf_size);
+/// As above, but use an origin id from dfsan_get_origin() instead of address.
+/// Does not include header line with taint label and address information.
+size_t dfsan_sprint_origin_id_trace(dfsan_origin origin, char *out_buf,
+                                    size_t out_buf_size);
 
 /// Prints the stack trace leading to this call to a pre-allocated output
 /// buffer.
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/hwasan_interface.h b/linux-x86/lib64/clang/16.0.2/include/sanitizer/hwasan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/hwasan_interface.h
rename to linux-x86/lib64/clang/16.0.2/include/sanitizer/hwasan_interface.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/linux_syscall_hooks.h b/linux-x86/lib64/clang/16.0.2/include/sanitizer/linux_syscall_hooks.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/linux_syscall_hooks.h
rename to linux-x86/lib64/clang/16.0.2/include/sanitizer/linux_syscall_hooks.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/lsan_interface.h b/linux-x86/lib64/clang/16.0.2/include/sanitizer/lsan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/lsan_interface.h
rename to linux-x86/lib64/clang/16.0.2/include/sanitizer/lsan_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/msan_interface.h b/linux-x86/lib64/clang/16.0.2/include/sanitizer/msan_interface.h
similarity index 96%
copy from darwin-x86/lib64/clang/14.0.2/include/sanitizer/msan_interface.h
copy to linux-x86/lib64/clang/16.0.2/include/sanitizer/msan_interface.h
index eeb39fb..854b12c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/msan_interface.h
+++ b/linux-x86/lib64/clang/16.0.2/include/sanitizer/msan_interface.h
@@ -92,6 +92,8 @@
 
   /* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */
   void __sanitizer_dtor_callback(const volatile void* data, size_t size);
+  void __sanitizer_dtor_callback_fields(const volatile void *data, size_t size);
+  void __sanitizer_dtor_callback_vptr(const volatile void *data);
 
   /* This function may be optionally provided by user and should return
      a string containing Msan runtime options. See msan_flags.h for details. */
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/netbsd_syscall_hooks.h b/linux-x86/lib64/clang/16.0.2/include/sanitizer/netbsd_syscall_hooks.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/netbsd_syscall_hooks.h
rename to linux-x86/lib64/clang/16.0.2/include/sanitizer/netbsd_syscall_hooks.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/scudo_interface.h b/linux-x86/lib64/clang/16.0.2/include/sanitizer/scudo_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/scudo_interface.h
rename to linux-x86/lib64/clang/16.0.2/include/sanitizer/scudo_interface.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface.h b/linux-x86/lib64/clang/16.0.2/include/sanitizer/tsan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface.h
rename to linux-x86/lib64/clang/16.0.2/include/sanitizer/tsan_interface.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface_atomic.h b/linux-x86/lib64/clang/16.0.2/include/sanitizer/tsan_interface_atomic.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface_atomic.h
rename to linux-x86/lib64/clang/16.0.2/include/sanitizer/tsan_interface_atomic.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/ubsan_interface.h b/linux-x86/lib64/clang/16.0.2/include/sanitizer/ubsan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/ubsan_interface.h
rename to linux-x86/lib64/clang/16.0.2/include/sanitizer/ubsan_interface.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/serializeintrin.h b/linux-x86/lib64/clang/16.0.2/include/serializeintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/serializeintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/serializeintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sgxintrin.h b/linux-x86/lib64/clang/16.0.2/include/sgxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sgxintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/sgxintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/shaintrin.h b/linux-x86/lib64/clang/16.0.2/include/shaintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/shaintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/shaintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/smmintrin.h b/linux-x86/lib64/clang/16.0.2/include/smmintrin.h
similarity index 85%
copy from darwin-x86/lib64/clang/14.0.2/include/smmintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/smmintrin.h
index 710e55a..46fb7bc 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/smmintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/smmintrin.h
@@ -17,23 +17,25 @@
 #include <tmmintrin.h>
 
 /* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS                                                     \
+  __attribute__((__always_inline__, __nodebug__, __target__("sse4.1"),         \
+                 __min_vector_width__(128)))
 
 /* SSE4 Rounding macros. */
-#define _MM_FROUND_TO_NEAREST_INT    0x00
-#define _MM_FROUND_TO_NEG_INF        0x01
-#define _MM_FROUND_TO_POS_INF        0x02
-#define _MM_FROUND_TO_ZERO           0x03
-#define _MM_FROUND_CUR_DIRECTION     0x04
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
 
-#define _MM_FROUND_RAISE_EXC         0x00
-#define _MM_FROUND_NO_EXC            0x08
+#define _MM_FROUND_RAISE_EXC 0x00
+#define _MM_FROUND_NO_EXC 0x08
 
-#define _MM_FROUND_NINT      (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
-#define _MM_FROUND_FLOOR     (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
-#define _MM_FROUND_CEIL      (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
-#define _MM_FROUND_TRUNC     (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
-#define _MM_FROUND_RINT      (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
+#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
+#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
+#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
+#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
+#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
 #define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
 
 /// Rounds up each element of the 128-bit vector of [4 x float] to an
@@ -51,7 +53,7 @@
 /// \param X
 ///    A 128-bit vector of [4 x float] values to be rounded up.
 /// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_ceil_ps(X)       _mm_round_ps((X), _MM_FROUND_CEIL)
+#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
 
 /// Rounds up each element of the 128-bit vector of [2 x double] to an
 ///    integer and returns the rounded values in a 128-bit vector of
@@ -68,7 +70,7 @@
 /// \param X
 ///    A 128-bit vector of [2 x double] values to be rounded up.
 /// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_ceil_pd(X)       _mm_round_pd((X), _MM_FROUND_CEIL)
+#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
 
 /// Copies three upper elements of the first 128-bit vector operand to
 ///    the corresponding three upper elements of the 128-bit result vector of
@@ -93,7 +95,7 @@
 ///    of the result.
 /// \returns A 128-bit vector of [4 x float] containing the copied and rounded
 ///    values.
-#define _mm_ceil_ss(X, Y)    _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
+#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
 
 /// Copies the upper element of the first 128-bit vector operand to the
 ///    corresponding upper element of the 128-bit result vector of [2 x double].
@@ -118,7 +120,7 @@
 ///    of the result.
 /// \returns A 128-bit vector of [2 x double] containing the copied and rounded
 ///    values.
-#define _mm_ceil_sd(X, Y)    _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
+#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
 
 /// Rounds down each element of the 128-bit vector of [4 x float] to an
 ///    an integer and returns the rounded values in a 128-bit vector of
@@ -135,7 +137,7 @@
 /// \param X
 ///    A 128-bit vector of [4 x float] values to be rounded down.
 /// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_floor_ps(X)      _mm_round_ps((X), _MM_FROUND_FLOOR)
+#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
 
 /// Rounds down each element of the 128-bit vector of [2 x double] to an
 ///    integer and returns the rounded values in a 128-bit vector of
@@ -152,7 +154,7 @@
 /// \param X
 ///    A 128-bit vector of [2 x double].
 /// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_floor_pd(X)      _mm_round_pd((X), _MM_FROUND_FLOOR)
+#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
 
 /// Copies three upper elements of the first 128-bit vector operand to
 ///    the corresponding three upper elements of the 128-bit result vector of
@@ -177,7 +179,7 @@
 ///    of the result.
 /// \returns A 128-bit vector of [4 x float] containing the copied and rounded
 ///    values.
-#define _mm_floor_ss(X, Y)   _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
+#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
 
 /// Copies the upper element of the first 128-bit vector operand to the
 ///    corresponding upper element of the 128-bit result vector of [2 x double].
@@ -202,7 +204,7 @@
 ///    of the result.
 /// \returns A 128-bit vector of [2 x double] containing the copied and rounded
 ///    values.
-#define _mm_floor_sd(X, Y)   _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
+#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
 
 /// Rounds each element of the 128-bit vector of [4 x float] to an
 ///    integer value according to the rounding control specified by the second
@@ -234,7 +236,7 @@
 ///      10: Upward (toward positive infinity) \n
 ///      11: Truncated
 /// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_round_ps(X, M) \
+#define _mm_round_ps(X, M)                                                     \
   ((__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)))
 
 /// Copies three upper elements of the first 128-bit vector operand to
@@ -275,9 +277,9 @@
 ///      11: Truncated
 /// \returns A 128-bit vector of [4 x float] containing the copied and rounded
 ///    values.
-#define _mm_round_ss(X, Y, M) \
-  ((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \
-                                  (__v4sf)(__m128)(Y), (M)))
+#define _mm_round_ss(X, Y, M)                                                  \
+  ((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y),    \
+                                  (M)))
 
 /// Rounds each element of the 128-bit vector of [2 x double] to an
 ///    integer value according to the rounding control specified by the second
@@ -309,7 +311,7 @@
 ///      10: Upward (toward positive infinity) \n
 ///      11: Truncated
 /// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_round_pd(X, M) \
+#define _mm_round_pd(X, M)                                                     \
   ((__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)))
 
 /// Copies the upper element of the first 128-bit vector operand to the
@@ -350,9 +352,9 @@
 ///      11: Truncated
 /// \returns A 128-bit vector of [2 x double] containing the copied and rounded
 ///    values.
-#define _mm_round_sd(X, Y, M) \
-  ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \
-                                   (__v2df)(__m128d)(Y), (M)))
+#define _mm_round_sd(X, Y, M)                                                  \
+  ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \
+                                   (M)))
 
 /* SSE4 Packed Blending Intrinsics.  */
 /// Returns a 128-bit vector of [2 x double] where the values are
@@ -379,9 +381,9 @@
 ///    When a mask bit is 1, the corresponding 64-bit element in operand \a V2
 ///    is copied to the same position in the result.
 /// \returns A 128-bit vector of [2 x double] containing the copied values.
-#define _mm_blend_pd(V1, V2, M) \
-  ((__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(V1), \
-                                     (__v2df)(__m128d)(V2), (int)(M)))
+#define _mm_blend_pd(V1, V2, M)                                                \
+  ((__m128d)__builtin_ia32_blendpd((__v2df)(__m128d)(V1),                      \
+                                   (__v2df)(__m128d)(V2), (int)(M)))
 
 /// Returns a 128-bit vector of [4 x float] where the values are selected
 ///    from either the first or second operand as specified by the third
@@ -407,9 +409,9 @@
 ///    When a mask bit is 1, the corresponding 32-bit element in operand \a V2
 ///    is copied to the same position in the result.
 /// \returns A 128-bit vector of [4 x float] containing the copied values.
-#define _mm_blend_ps(V1, V2, M) \
-  ((__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(V1), \
-                                    (__v4sf)(__m128)(V2), (int)(M)))
+#define _mm_blend_ps(V1, V2, M)                                                \
+  ((__m128)__builtin_ia32_blendps((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2),  \
+                                  (int)(M)))
 
 /// Returns a 128-bit vector of [2 x double] where the values are
 ///    selected from either the first or second operand as specified by the
@@ -431,11 +433,11 @@
 ///    position in the result. When a mask bit is 1, the corresponding 64-bit
 ///    element in operand \a __V2 is copied to the same position in the result.
 /// \returns A 128-bit vector of [2 x double] containing the copied values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
-{
-  return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
-                                            (__v2df)__M);
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_blendv_pd(__m128d __V1,
+                                                           __m128d __V2,
+                                                           __m128d __M) {
+  return (__m128d)__builtin_ia32_blendvpd((__v2df)__V1, (__v2df)__V2,
+                                          (__v2df)__M);
 }
 
 /// Returns a 128-bit vector of [4 x float] where the values are
@@ -458,11 +460,11 @@
 ///    position in the result. When a mask bit is 1, the corresponding 32-bit
 ///    element in operand \a __V2 is copied to the same position in the result.
 /// \returns A 128-bit vector of [4 x float] containing the copied values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
-{
-  return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
-                                           (__v4sf)__M);
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_blendv_ps(__m128 __V1,
+                                                          __m128 __V2,
+                                                          __m128 __M) {
+  return (__m128)__builtin_ia32_blendvps((__v4sf)__V1, (__v4sf)__V2,
+                                         (__v4sf)__M);
 }
 
 /// Returns a 128-bit vector of [16 x i8] where the values are selected
@@ -485,11 +487,11 @@
 ///    position in the result. When a mask bit is 1, the corresponding 8-bit
 ///    element in operand \a __V2 is copied to the same position in the result.
 /// \returns A 128-bit vector of [16 x i8] containing the copied values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
-{
-  return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
-                                               (__v16qi)__M);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_blendv_epi8(__m128i __V1,
+                                                             __m128i __V2,
+                                                             __m128i __M) {
+  return (__m128i)__builtin_ia32_pblendvb128((__v16qi)__V1, (__v16qi)__V2,
+                                             (__v16qi)__M);
 }
 
 /// Returns a 128-bit vector of [8 x i16] where the values are selected
@@ -516,9 +518,9 @@
 ///    When a mask bit is 1, the corresponding 16-bit element in operand \a V2
 ///    is copied to the same position in the result.
 /// \returns A 128-bit vector of [8 x i16] containing the copied values.
-#define _mm_blend_epi16(V1, V2, M) \
-  ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(V1), \
-                                        (__v8hi)(__m128i)(V2), (int)(M)))
+#define _mm_blend_epi16(V1, V2, M)                                             \
+  ((__m128i)__builtin_ia32_pblendw128((__v8hi)(__m128i)(V1),                   \
+                                      (__v8hi)(__m128i)(V2), (int)(M)))
 
 /* SSE4 Dword Multiply Instructions.  */
 /// Multiples corresponding elements of two 128-bit vectors of [4 x i32]
@@ -534,10 +536,9 @@
 /// \param __V2
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the products of both operands.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_mullo_epi32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) ((__v4su)__V1 * (__v4su)__V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi32(__m128i __V1,
+                                                             __m128i __V2) {
+  return (__m128i)((__v4su)__V1 * (__v4su)__V2);
 }
 
 /// Multiplies corresponding even-indexed elements of two 128-bit
@@ -554,10 +555,9 @@
 ///    A 128-bit vector of [4 x i32].
 /// \returns A 128-bit vector of [2 x i64] containing the products of both
 ///    operands.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_mul_epi32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epi32(__m128i __V1,
+                                                           __m128i __V2) {
+  return (__m128i)__builtin_ia32_pmuldq128((__v4si)__V1, (__v4si)__V2);
 }
 
 /* SSE4 Floating Point Dot Product Instructions.  */
@@ -593,9 +593,8 @@
 ///    each [4 x float] subvector. If a bit is set, the dot product is returned
 ///    in the corresponding element; otherwise that element is set to zero.
 /// \returns A 128-bit vector of [4 x float] containing the dot product.
-#define _mm_dp_ps(X, Y, M) \
-  ((__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \
-                                (__v4sf)(__m128)(Y), (M)))
+#define _mm_dp_ps(X, Y, M)                                                     \
+  ((__m128)__builtin_ia32_dpps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (M)))
 
 /// Computes the dot product of the two 128-bit vectors of [2 x double]
 ///    and returns it in the elements of the 128-bit result vector of
@@ -628,9 +627,9 @@
 ///    to the lowest element and bit [1] corresponding to the highest element of
 ///    each [2 x double] vector. If a bit is set, the dot product is returned in
 ///    the corresponding element; otherwise that element is set to zero.
-#define _mm_dp_pd(X, Y, M) \
-  ((__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \
-                                 (__v2df)(__m128d)(Y), (M)))
+#define _mm_dp_pd(X, Y, M)                                                     \
+  ((__m128d)__builtin_ia32_dppd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y),    \
+                                (M)))
 
 /* SSE4 Streaming Load Hint Instruction.  */
 /// Loads integer values from a 128-bit aligned memory location to a
@@ -645,10 +644,9 @@
 ///    values.
 /// \returns A 128-bit integer vector containing the data stored at the
 ///    specified memory location.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_stream_load_si128 (__m128i const *__V)
-{
-  return (__m128i) __builtin_nontemporal_load ((const __v2di *) __V);
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_stream_load_si128(__m128i const *__V) {
+  return (__m128i)__builtin_nontemporal_load((const __v2di *)__V);
 }
 
 /* SSE4 Packed Integer Min/Max Instructions.  */
@@ -665,10 +663,9 @@
 /// \param __V2
 ///    A 128-bit vector of [16 x i8]
 /// \returns A 128-bit vector of [16 x i8] containing the lesser values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi8 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi8(__m128i __V1,
+                                                          __m128i __V2) {
+  return (__m128i)__builtin_elementwise_min((__v16qs)__V1, (__v16qs)__V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -684,10 +681,9 @@
 /// \param __V2
 ///    A 128-bit vector of [16 x i8].
 /// \returns A 128-bit vector of [16 x i8] containing the greater values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi8 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi8(__m128i __V1,
+                                                          __m128i __V2) {
+  return (__m128i)__builtin_elementwise_max((__v16qs)__V1, (__v16qs)__V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -703,10 +699,9 @@
 /// \param __V2
 ///    A 128-bit vector of [8 x u16].
 /// \returns A 128-bit vector of [8 x u16] containing the lesser values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu16 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu16(__m128i __V1,
+                                                           __m128i __V2) {
+  return (__m128i)__builtin_elementwise_min((__v8hu)__V1, (__v8hu)__V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -722,10 +717,9 @@
 /// \param __V2
 ///    A 128-bit vector of [8 x u16].
 /// \returns A 128-bit vector of [8 x u16] containing the greater values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu16 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu16(__m128i __V1,
+                                                           __m128i __V2) {
+  return (__m128i)__builtin_elementwise_max((__v8hu)__V1, (__v8hu)__V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -741,10 +735,9 @@
 /// \param __V2
 ///    A 128-bit vector of [4 x i32].
 /// \returns A 128-bit vector of [4 x i32] containing the lesser values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi32(__m128i __V1,
+                                                           __m128i __V2) {
+  return (__m128i)__builtin_elementwise_min((__v4si)__V1, (__v4si)__V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -760,10 +753,9 @@
 /// \param __V2
 ///    A 128-bit vector of [4 x i32].
 /// \returns A 128-bit vector of [4 x i32] containing the greater values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi32(__m128i __V1,
+                                                           __m128i __V2) {
+  return (__m128i)__builtin_elementwise_max((__v4si)__V1, (__v4si)__V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -779,10 +771,9 @@
 /// \param __V2
 ///    A 128-bit vector of [4 x u32].
 /// \returns A 128-bit vector of [4 x u32] containing the lesser values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu32(__m128i __V1,
+                                                           __m128i __V2) {
+  return (__m128i)__builtin_elementwise_min((__v4su)__V1, (__v4su)__V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -798,10 +789,9 @@
 /// \param __V2
 ///    A 128-bit vector of [4 x u32].
 /// \returns A 128-bit vector of [4 x u32] containing the greater values.
-static __inline__  __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu32 (__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu32(__m128i __V1,
+                                                           __m128i __V2) {
+  return (__m128i)__builtin_elementwise_max((__v4su)__V1, (__v4su)__V2);
 }
 
 /* SSE4 Insertion and Extraction from XMM Register Instructions.  */
@@ -869,21 +859,24 @@
 ///    10: Bits [95:64] of parameter \a X are returned. \n
 ///    11: Bits [127:96] of parameter \a X are returned.
 /// \returns A 32-bit integer containing the extracted 32 bits of float data.
-#define _mm_extract_ps(X, N) \
-  __builtin_bit_cast(int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)))
+#define _mm_extract_ps(X, N)                                                   \
+  __builtin_bit_cast(                                                          \
+      int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)))
 
 /* Miscellaneous insert and extract macros.  */
 /* Extract a single-precision float from X at index N into D.  */
-#define _MM_EXTRACT_FLOAT(D, X, N) \
-  do { (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); } while (0)
+#define _MM_EXTRACT_FLOAT(D, X, N)                                             \
+  do {                                                                         \
+    (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N));          \
+  } while (0)
 
 /* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
    an index suitable for _mm_insert_ps.  */
 #define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
 
 /* Extract a float from X at index N into the first index of the return.  */
-#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X),   \
-                                             _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
+#define _MM_PICK_OUT_PS(X, N)                                                  \
+  _mm_insert_ps(_mm_setzero_ps(), (X), _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
 
 /* Insert int into packed integer array at index.  */
 /// Constructs a 128-bit vector of [16 x i8] by first making a copy of
@@ -926,9 +919,9 @@
 ///    1110: Bits [119:112] of the result are used for insertion. \n
 ///    1111: Bits [127:120] of the result are used for insertion.
 /// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi8(X, I, N) \
-  ((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), \
-                                         (int)(I), (int)(N)))
+#define _mm_insert_epi8(X, I, N)                                               \
+  ((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), (int)(I),      \
+                                         (int)(N)))
 
 /// Constructs a 128-bit vector of [4 x i32] by first making a copy of
 ///    the 128-bit integer vector parameter, and then inserting the 32-bit
@@ -958,9 +951,9 @@
 ///    10: Bits [95:64] of the result are used for insertion. \n
 ///    11: Bits [127:96] of the result are used for insertion.
 /// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi32(X, I, N) \
-  ((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), \
-                                        (int)(I), (int)(N)))
+#define _mm_insert_epi32(X, I, N)                                              \
+  ((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), (int)(I),        \
+                                        (int)(N)))
 
 #ifdef __x86_64__
 /// Constructs a 128-bit vector of [2 x i64] by first making a copy of
@@ -989,9 +982,9 @@
 ///    0: Bits [63:0] of the result are used for insertion. \n
 ///    1: Bits [127:64] of the result are used for insertion. \n
 /// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi64(X, I, N) \
-  ((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), \
-                                        (long long)(I), (int)(N)))
+#define _mm_insert_epi64(X, I, N)                                              \
+  ((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), (long long)(I),  \
+                                        (int)(N)))
 #endif /* __x86_64__ */
 
 /* Extract int from packed integer array at index.  This returns the element
@@ -1032,8 +1025,8 @@
 /// \returns  An unsigned integer, whose lower 8 bits are selected from the
 ///    128-bit integer vector parameter and the remaining bits are assigned
 ///    zeros.
-#define _mm_extract_epi8(X, N) \
-  ((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \
+#define _mm_extract_epi8(X, N)                                                 \
+  ((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X),     \
                                                     (int)(N)))
 
 /// Extracts a 32-bit element from the 128-bit integer vector of
@@ -1058,10 +1051,9 @@
 ///    11: Bits [127:96] of the parameter \a X are exracted.
 /// \returns  An integer, whose lower 32 bits are selected from the 128-bit
 ///    integer vector parameter and the remaining bits are assigned zeros.
-#define _mm_extract_epi32(X, N) \
+#define _mm_extract_epi32(X, N)                                                \
   ((int)__builtin_ia32_vec_ext_v4si((__v4si)(__m128i)(X), (int)(N)))
 
-#ifdef __x86_64__
 /// Extracts a 64-bit element from the 128-bit integer vector of
 ///    [2 x i64], using the immediate value parameter \a N as a selector.
 ///
@@ -1071,7 +1063,8 @@
 /// long long _mm_extract_epi64(__m128i X, const int N);
 /// \endcode
 ///
-/// This intrinsic corresponds to the <c> VPEXTRQ / PEXTRQ </c> instruction.
+/// This intrinsic corresponds to the <c> VPEXTRQ / PEXTRQ </c> instruction
+/// in 64-bit mode.
 ///
 /// \param X
 ///    A 128-bit integer vector.
@@ -1081,9 +1074,8 @@
 ///    0: Bits [63:0] are returned. \n
 ///    1: Bits [127:64] are returned. \n
 /// \returns  A 64-bit integer.
-#define _mm_extract_epi64(X, N) \
+#define _mm_extract_epi64(X, N)                                                \
   ((long long)__builtin_ia32_vec_ext_v2di((__v2di)(__m128i)(X), (int)(N)))
-#endif /* __x86_64 */
 
 /* SSE4 128-bit Packed Integer Comparisons.  */
 /// Tests whether the specified bits in a 128-bit integer vector are all
@@ -1098,9 +1090,8 @@
 /// \param __V
 ///    A 128-bit integer vector selecting which bits to test in operand \a __M.
 /// \returns TRUE if the specified bits are all zeros; FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testz_si128(__m128i __M, __m128i __V)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_testz_si128(__m128i __M,
+                                                         __m128i __V) {
   return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
 }
 
@@ -1116,9 +1107,8 @@
 /// \param __V
 ///    A 128-bit integer vector selecting which bits to test in operand \a __M.
 /// \returns TRUE if the specified bits are all ones; FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testc_si128(__m128i __M, __m128i __V)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_testc_si128(__m128i __M,
+                                                         __m128i __V) {
   return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
 }
 
@@ -1135,9 +1125,8 @@
 ///    A 128-bit integer vector selecting which bits to test in operand \a __M.
 /// \returns TRUE if the specified bits are neither all zeros nor all ones;
 ///    FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testnzc_si128(__m128i __M, __m128i __V)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M,
+                                                           __m128i __V) {
   return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
 }
 
@@ -1193,7 +1182,7 @@
 /// \param V
 ///    A 128-bit integer vector selecting which bits to test in operand \a M.
 /// \returns TRUE if the specified bits are all zeros; FALSE otherwise.
-#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
+#define _mm_test_all_zeros(M, V) _mm_testz_si128((M), (V))
 
 /* SSE4 64-bit Packed Integer Comparisons.  */
 /// Compares each of the corresponding 64-bit values of the 128-bit
@@ -1208,9 +1197,8 @@
 /// \param __V2
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi64(__m128i __V1,
+                                                             __m128i __V2) {
   return (__m128i)((__v2di)__V1 == (__v2di)__V2);
 }
 
@@ -1225,15 +1213,16 @@
 /// This intrinsic corresponds to the <c> VPMOVSXBW / PMOVSXBW </c> instruction.
 ///
 /// \param __V
-///    A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are sign-
-///    extended to 16-bit values.
+///    A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are
+///    sign-extended to 16-bit values.
 /// \returns A 128-bit vector of [8 x i16] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi16(__m128i __V)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi16(__m128i __V) {
   /* This function always performs a signed extension, but __v16qi is a char
      which may be signed or unsigned, so use __v16qs. */
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6,
+                              7),
+      __v8hi);
 }
 
 /// Sign-extends each of the lower four 8-bit integer elements of a
@@ -1249,12 +1238,11 @@
 ///    A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
 ///    sign-extended to 32-bit values.
 /// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi32(__m128i __V)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi32(__m128i __V) {
   /* This function always performs a signed extension, but __v16qi is a char
      which may be signed or unsigned, so use __v16qs. */
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
 }
 
 /// Sign-extends each of the lower two 8-bit integer elements of a
@@ -1270,12 +1258,11 @@
 ///    A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
 ///    sign-extended to 64-bit values.
 /// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi64(__m128i __V)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi64(__m128i __V) {
   /* This function always performs a signed extension, but __v16qi is a char
      which may be signed or unsigned, so use __v16qs. */
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
 }
 
 /// Sign-extends each of the lower four 16-bit integer elements of a
@@ -1291,10 +1278,9 @@
 ///    A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
 ///    sign-extended to 32-bit values.
 /// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi16_epi32(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi32(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
 }
 
 /// Sign-extends each of the lower two 16-bit integer elements of a
@@ -1310,10 +1296,9 @@
 ///    A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
 ///     sign-extended to 64-bit values.
 /// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi16_epi64(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi64(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
 }
 
 /// Sign-extends each of the lower two 32-bit integer elements of a
@@ -1329,10 +1314,9 @@
 ///    A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
 ///    sign-extended to 64-bit values.
 /// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi32_epi64(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi32_epi64(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
 }
 
 /* SSE4 Packed Integer Zero-Extension.  */
@@ -1349,10 +1333,11 @@
 ///    A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are
 ///    zero-extended to 16-bit values.
 /// \returns A 128-bit vector of [8 x i16] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi16(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi16(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6,
+                              7),
+      __v8hi);
 }
 
 /// Zero-extends each of the lower four 8-bit integer elements of a
@@ -1368,10 +1353,9 @@
 ///    A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
 ///    zero-extended to 32-bit values.
 /// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi32(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi32(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
 }
 
 /// Zero-extends each of the lower two 8-bit integer elements of a
@@ -1387,10 +1371,9 @@
 ///    A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
 ///    zero-extended to 64-bit values.
 /// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi64(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi64(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
 }
 
 /// Zero-extends each of the lower four 16-bit integer elements of a
@@ -1406,10 +1389,9 @@
 ///    A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
 ///    zero-extended to 32-bit values.
 /// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu16_epi32(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi32(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
 }
 
 /// Zero-extends each of the lower two 16-bit integer elements of a
@@ -1425,10 +1407,9 @@
 ///    A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
 ///    zero-extended to 64-bit values.
 /// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu16_epi64(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi64(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
 }
 
 /// Zero-extends each of the lower two 32-bit integer elements of a
@@ -1444,10 +1425,9 @@
 ///    A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
 ///    zero-extended to 64-bit values.
 /// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu32_epi64(__m128i __V)
-{
-  return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) {
+  return (__m128i) __builtin_convertvector(
+      __builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
 }
 
 /* SSE4 Pack with Unsigned Saturation.  */
@@ -1473,10 +1453,9 @@
 ///    less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values
 ///    are written to the higher 64 bits of the result.
 /// \returns A 128-bit vector of [8 x i16] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packus_epi32(__m128i __V1, __m128i __V2)
-{
-  return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1,
+                                                              __m128i __V2) {
+  return (__m128i)__builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
 }
 
 /* SSE4 Multiple Packed Sums of Absolute Difference.  */
@@ -1515,9 +1494,9 @@
 ///    \endcode
 /// \returns A 128-bit integer vector containing the sums of the sets of
 ///    absolute differences between both operands.
-#define _mm_mpsadbw_epu8(X, Y, M) \
-  ((__m128i) __builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
-                                       (__v16qi)(__m128i)(Y), (M)))
+#define _mm_mpsadbw_epu8(X, Y, M)                                              \
+  ((__m128i)__builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X),                   \
+                                      (__v16qi)(__m128i)(Y), (M)))
 
 /// Finds the minimum unsigned 16-bit element in the input 128-bit
 ///    vector of [8 x u16] and returns it and along with its index.
@@ -1532,10 +1511,8 @@
 /// \returns A 128-bit value where bits [15:0] contain the minimum value found
 ///    in parameter \a __V, bits [18:16] contain the index of the minimum value
 ///    and the remaining bits are set to 0.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_minpos_epu16(__m128i __V)
-{
-  return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) {
+  return (__m128i)__builtin_ia32_phminposuw128((__v8hi)__V);
 }
 
 /* Handle the sse4.2 definitions here. */
@@ -1544,33 +1521,34 @@
    so we'll do the same.  */
 
 #undef __DEFAULT_FN_ATTRS
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+#define __DEFAULT_FN_ATTRS                                                     \
+  __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
 
 /* These specify the type of data that we're comparing.  */
-#define _SIDD_UBYTE_OPS                 0x00
-#define _SIDD_UWORD_OPS                 0x01
-#define _SIDD_SBYTE_OPS                 0x02
-#define _SIDD_SWORD_OPS                 0x03
+#define _SIDD_UBYTE_OPS 0x00
+#define _SIDD_UWORD_OPS 0x01
+#define _SIDD_SBYTE_OPS 0x02
+#define _SIDD_SWORD_OPS 0x03
 
 /* These specify the type of comparison operation.  */
-#define _SIDD_CMP_EQUAL_ANY             0x00
-#define _SIDD_CMP_RANGES                0x04
-#define _SIDD_CMP_EQUAL_EACH            0x08
-#define _SIDD_CMP_EQUAL_ORDERED         0x0c
+#define _SIDD_CMP_EQUAL_ANY 0x00
+#define _SIDD_CMP_RANGES 0x04
+#define _SIDD_CMP_EQUAL_EACH 0x08
+#define _SIDD_CMP_EQUAL_ORDERED 0x0c
 
 /* These macros specify the polarity of the operation.  */
-#define _SIDD_POSITIVE_POLARITY         0x00
-#define _SIDD_NEGATIVE_POLARITY         0x10
-#define _SIDD_MASKED_POSITIVE_POLARITY  0x20
-#define _SIDD_MASKED_NEGATIVE_POLARITY  0x30
+#define _SIDD_POSITIVE_POLARITY 0x00
+#define _SIDD_NEGATIVE_POLARITY 0x10
+#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
+#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
 
 /* These macros are used in _mm_cmpXstri() to specify the return.  */
-#define _SIDD_LEAST_SIGNIFICANT         0x00
-#define _SIDD_MOST_SIGNIFICANT          0x40
+#define _SIDD_LEAST_SIGNIFICANT 0x00
+#define _SIDD_MOST_SIGNIFICANT 0x40
 
 /* These macros are used in _mm_cmpXstri() to specify the return.  */
-#define _SIDD_BIT_MASK                  0x00
-#define _SIDD_UNIT_MASK                 0x40
+#define _SIDD_BIT_MASK 0x00
+#define _SIDD_UNIT_MASK 0x40
 
 /* SSE4.2 Packed Comparison Intrinsics.  */
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -1625,8 +1603,8 @@
 ///         repeating each bit 8 or 16 times).
 /// \returns Returns a 128-bit integer vector representing the result mask of
 ///    the comparison.
-#define _mm_cmpistrm(A, B, M) \
-  ((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrm(A, B, M)                                                  \
+  ((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A),                 \
                                         (__v16qi)(__m128i)(B), (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -1679,9 +1657,9 @@
 ///      0: The index of the least significant set bit. \n
 ///      1: The index of the most significant set bit. \n
 /// \returns Returns an integer representing the result index of the comparison.
-#define _mm_cmpistri(A, B, M) \
-  ((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
-                                   (__v16qi)(__m128i)(B), (int)(M)))
+#define _mm_cmpistri(A, B, M)                                                  \
+  ((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A),                     \
+                                    (__v16qi)(__m128i)(B), (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
 ///    data with explicitly defined lengths that is contained in source operands
@@ -1739,9 +1717,9 @@
 ///         repeating each bit 8 or 16 times). \n
 /// \returns Returns a 128-bit integer vector representing the result mask of
 ///    the comparison.
-#define _mm_cmpestrm(A, LA, B, LB, M) \
-  ((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
-                                        (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrm(A, LA, B, LB, M)                                          \
+  ((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA),      \
+                                        (__v16qi)(__m128i)(B), (int)(LB),      \
                                         (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -1798,9 +1776,9 @@
 ///      0: The index of the least significant set bit. \n
 ///      1: The index of the most significant set bit. \n
 /// \returns Returns an integer representing the result index of the comparison.
-#define _mm_cmpestri(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
-                                    (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestri(A, LA, B, LB, M)                                          \
+  ((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA),          \
+                                    (__v16qi)(__m128i)(B), (int)(LB),          \
                                     (int)(M)))
 
 /* SSE4.2 Packed Comparison Intrinsics and EFlag Reading.  */
@@ -1850,8 +1828,8 @@
 ///          to the size of \a A or \a B. \n
 /// \returns Returns 1 if the bit mask is zero and the length of the string in
 ///    \a B is the maximum; otherwise, returns 0.
-#define _mm_cmpistra(A, B, M) \
-  ((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistra(A, B, M)                                                  \
+  ((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A),                    \
                                      (__v16qi)(__m128i)(B), (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -1899,8 +1877,8 @@
 ///      11: Negate the bit mask only for bits with an index less than or equal
 ///          to the size of \a A or \a B.
 /// \returns Returns 1 if the bit mask is non-zero, otherwise, returns 0.
-#define _mm_cmpistrc(A, B, M) \
-  ((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrc(A, B, M)                                                  \
+  ((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A),                    \
                                      (__v16qi)(__m128i)(B), (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -1947,8 +1925,8 @@
 ///      11: Negate the bit mask only for bits with an index less than or equal
 ///          to the size of \a A or \a B. \n
 /// \returns Returns bit 0 of the resulting bit mask.
-#define _mm_cmpistro(A, B, M) \
-  ((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistro(A, B, M)                                                  \
+  ((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A),                    \
                                      (__v16qi)(__m128i)(B), (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -1997,8 +1975,8 @@
 ///          to the size of \a A or \a B. \n
 /// \returns Returns 1 if the length of the string in \a A is less than the
 ///    maximum, otherwise, returns 0.
-#define _mm_cmpistrs(A, B, M) \
-  ((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrs(A, B, M)                                                  \
+  ((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A),                    \
                                      (__v16qi)(__m128i)(B), (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -2047,8 +2025,8 @@
 ///          to the size of \a A or \a B.
 /// \returns Returns 1 if the length of the string in \a B is less than the
 ///    maximum, otherwise, returns 0.
-#define _mm_cmpistrz(A, B, M) \
-  ((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrz(A, B, M)                                                  \
+  ((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A),                    \
                                      (__v16qi)(__m128i)(B), (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -2101,9 +2079,9 @@
 ///          to the size of \a A or \a B.
 /// \returns Returns 1 if the bit mask is zero and the length of the string in
 ///    \a B is the maximum, otherwise, returns 0.
-#define _mm_cmpestra(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
-                                     (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestra(A, LA, B, LB, M)                                          \
+  ((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA),         \
+                                     (__v16qi)(__m128i)(B), (int)(LB),         \
                                      (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -2155,9 +2133,9 @@
 ///      11: Negate the bit mask only for bits with an index less than or equal
 ///          to the size of \a A or \a B. \n
 /// \returns Returns 1 if the resulting mask is non-zero, otherwise, returns 0.
-#define _mm_cmpestrc(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
-                                     (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrc(A, LA, B, LB, M)                                          \
+  ((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA),         \
+                                     (__v16qi)(__m128i)(B), (int)(LB),         \
                                      (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -2208,9 +2186,9 @@
 ///      11: Negate the bit mask only for bits with an index less than or equal
 ///          to the size of \a A or \a B.
 /// \returns Returns bit 0 of the resulting bit mask.
-#define _mm_cmpestro(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
-                                     (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestro(A, LA, B, LB, M)                                          \
+  ((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA),         \
+                                     (__v16qi)(__m128i)(B), (int)(LB),         \
                                      (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -2263,9 +2241,9 @@
 ///          to the size of \a A or \a B. \n
 /// \returns Returns 1 if the length of the string in \a A is less than the
 ///    maximum, otherwise, returns 0.
-#define _mm_cmpestrs(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
-                                     (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrs(A, LA, B, LB, M)                                          \
+  ((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA),         \
+                                     (__v16qi)(__m128i)(B), (int)(LB),         \
                                      (int)(M)))
 
 /// Uses the immediate operand \a M to perform a comparison of string
@@ -2317,9 +2295,9 @@
 ///          to the size of \a A or \a B.
 /// \returns Returns 1 if the length of the string in \a B is less than the
 ///    maximum, otherwise, returns 0.
-#define _mm_cmpestrz(A, LA, B, LB, M) \
-  ((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
-                                     (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrz(A, LA, B, LB, M)                                          \
+  ((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA),         \
+                                     (__v16qi)(__m128i)(B), (int)(LB),         \
                                      (int)(M)))
 
 /* SSE4.2 Compare Packed Data -- Greater Than.  */
@@ -2336,9 +2314,8 @@
 /// \param __V2
 ///    A 128-bit integer vector.
 /// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi64(__m128i __V1,
+                                                             __m128i __V2) {
   return (__m128i)((__v2di)__V1 > (__v2di)__V2);
 }
 
diff --git a/linux-x86/lib64/clang/14.0.2/include/stdalign.h b/linux-x86/lib64/clang/16.0.2/include/stdalign.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/stdalign.h
rename to linux-x86/lib64/clang/16.0.2/include/stdalign.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/stdarg.h b/linux-x86/lib64/clang/16.0.2/include/stdarg.h
similarity index 84%
rename from linux-x86/lib64/clang/14.0.2/include/stdarg.h
rename to linux-x86/lib64/clang/16.0.2/include/stdarg.h
index 0bc3940..dc7becf 100644
--- a/linux-x86/lib64/clang/14.0.2/include/stdarg.h
+++ b/linux-x86/lib64/clang/16.0.2/include/stdarg.h
@@ -23,7 +23,9 @@
  */
 #define __va_copy(d,s) __builtin_va_copy(d,s)
 
-#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L || !defined(__STRICT_ANSI__)
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) ||              \
+    (defined(__cplusplus) && __cplusplus >= 201103L) ||                        \
+    !defined(__STRICT_ANSI__)
 #define va_copy(dest, src)  __builtin_va_copy(dest, src)
 #endif
 
diff --git a/linux-x86/lib64/clang/14.0.2/include/stdatomic.h b/linux-x86/lib64/clang/16.0.2/include/stdatomic.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/stdatomic.h
rename to linux-x86/lib64/clang/16.0.2/include/stdatomic.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stdbool.h b/linux-x86/lib64/clang/16.0.2/include/stdbool.h
similarity index 69%
copy from darwin-x86/lib64/clang/14.0.2/include/stdbool.h
copy to linux-x86/lib64/clang/16.0.2/include/stdbool.h
index 2525363..9406aab 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/stdbool.h
+++ b/linux-x86/lib64/clang/16.0.2/include/stdbool.h
@@ -10,22 +10,25 @@
 #ifndef __STDBOOL_H
 #define __STDBOOL_H
 
-/* Don't define bool, true, and false in C++, except as a GNU extension. */
-#ifndef __cplusplus
+#define __bool_true_false_are_defined 1
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L
+/* FIXME: We should be issuing a deprecation warning here, but cannot yet due
+ * to system headers which include this header file unconditionally.
+ */
+#elif !defined(__cplusplus)
 #define bool _Bool
 #define true 1
 #define false 0
 #elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
 /* Define _Bool as a GNU extension. */
 #define _Bool bool
-#if __cplusplus < 201103L
+#if defined(__cplusplus) && __cplusplus < 201103L
 /* For C++98, define bool, false, true as a GNU extension. */
-#define bool  bool
+#define bool bool
 #define false false
-#define true  true
+#define true true
 #endif
 #endif
 
-#define __bool_true_false_are_defined 1
-
 #endif /* __STDBOOL_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/stddef.h b/linux-x86/lib64/clang/16.0.2/include/stddef.h
similarity index 94%
rename from linux-x86/lib64/clang/14.0.2/include/stddef.h
rename to linux-x86/lib64/clang/16.0.2/include/stddef.h
index 15acd44..8e86a21 100644
--- a/linux-x86/lib64/clang/14.0.2/include/stddef.h
+++ b/linux-x86/lib64/clang/16.0.2/include/stddef.h
@@ -62,7 +62,7 @@
 #endif /* defined(__need_STDDEF_H_misc) */
 
 #if defined(__need_wchar_t)
-#ifndef __cplusplus
+#if !defined(__cplusplus) || (defined(_MSC_VER) && !_NATIVE_WCHAR_T_DEFINED)
 /* Always define wchar_t when modules are available. */
 #if !defined(_WCHAR_T) || __has_feature(modules)
 #if !__has_feature(modules)
@@ -98,7 +98,8 @@
 #endif /* defined(__need_NULL) */
 
 #if defined(__need_STDDEF_H_misc)
-#if __STDC_VERSION__ >= 201112L || __cplusplus >= 201103L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) ||              \
+    (defined(__cplusplus) && __cplusplus >= 201103L)
 #include "__stddef_max_align_t.h"
 #endif
 #define offsetof(t, d) __builtin_offsetof(t, d)
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stdint.h b/linux-x86/lib64/clang/16.0.2/include/stdint.h
similarity index 66%
copy from darwin-x86/lib64/clang/14.0.2/include/stdint.h
copy to linux-x86/lib64/clang/16.0.2/include/stdint.h
index 192f653..a47e91b 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/stdint.h
+++ b/linux-x86/lib64/clang/16.0.2/include/stdint.h
@@ -96,13 +96,21 @@
 typedef __INT64_TYPE__ int64_t;
 # endif /* __int8_t_defined */
 typedef __UINT64_TYPE__ uint64_t;
+# undef __int_least64_t
 # define __int_least64_t int64_t
+# undef __uint_least64_t
 # define __uint_least64_t uint64_t
+# undef __int_least32_t
 # define __int_least32_t int64_t
+# undef __uint_least32_t
 # define __uint_least32_t uint64_t
+# undef __int_least16_t
 # define __int_least16_t int64_t
+# undef __uint_least16_t
 # define __uint_least16_t uint64_t
+# undef __int_least8_t
 # define __int_least8_t int64_t
+# undef __uint_least8_t
 # define __uint_least8_t uint64_t
 #endif /* __INT64_TYPE__ */
 
@@ -120,11 +128,17 @@
 typedef uint56_t uint_least56_t;
 typedef int56_t int_fast56_t;
 typedef uint56_t uint_fast56_t;
+# undef __int_least32_t
 # define __int_least32_t int56_t
+# undef __uint_least32_t
 # define __uint_least32_t uint56_t
+# undef __int_least16_t
 # define __int_least16_t int56_t
+# undef __uint_least16_t
 # define __uint_least16_t uint56_t
+# undef __int_least8_t
 # define __int_least8_t int56_t
+# undef __uint_least8_t
 # define __uint_least8_t uint56_t
 #endif /* __INT56_TYPE__ */
 
@@ -136,11 +150,17 @@
 typedef uint48_t uint_least48_t;
 typedef int48_t int_fast48_t;
 typedef uint48_t uint_fast48_t;
+# undef __int_least32_t
 # define __int_least32_t int48_t
+# undef __uint_least32_t
 # define __uint_least32_t uint48_t
+# undef __int_least16_t
 # define __int_least16_t int48_t
+# undef __uint_least16_t
 # define __uint_least16_t uint48_t
+# undef __int_least8_t
 # define __int_least8_t int48_t
+# undef __uint_least8_t
 # define __uint_least8_t uint48_t
 #endif /* __INT48_TYPE__ */
 
@@ -152,11 +172,17 @@
 typedef uint40_t uint_least40_t;
 typedef int40_t int_fast40_t;
 typedef uint40_t uint_fast40_t;
+# undef __int_least32_t
 # define __int_least32_t int40_t
+# undef __uint_least32_t
 # define __uint_least32_t uint40_t
+# undef __int_least16_t
 # define __int_least16_t int40_t
+# undef __uint_least16_t
 # define __uint_least16_t uint40_t
+# undef __int_least8_t
 # define __int_least8_t int40_t
+# undef __uint_least8_t
 # define __uint_least8_t uint40_t
 #endif /* __INT40_TYPE__ */
 
@@ -172,11 +198,17 @@
 typedef __UINT32_TYPE__ uint32_t;
 # endif /* __uint32_t_defined */
 
+# undef __int_least32_t
 # define __int_least32_t int32_t
+# undef __uint_least32_t
 # define __uint_least32_t uint32_t
+# undef __int_least16_t
 # define __int_least16_t int32_t
+# undef __uint_least16_t
 # define __uint_least16_t uint32_t
+# undef __int_least8_t
 # define __int_least8_t int32_t
+# undef __uint_least8_t
 # define __uint_least8_t uint32_t
 #endif /* __INT32_TYPE__ */
 
@@ -194,9 +226,13 @@
 typedef uint24_t uint_least24_t;
 typedef int24_t int_fast24_t;
 typedef uint24_t uint_fast24_t;
+# undef __int_least16_t
 # define __int_least16_t int24_t
+# undef __uint_least16_t
 # define __uint_least16_t uint24_t
+# undef __int_least8_t
 # define __int_least8_t int24_t
+# undef __uint_least8_t
 # define __uint_least8_t uint24_t
 #endif /* __INT24_TYPE__ */
 
@@ -205,9 +241,13 @@
 typedef __INT16_TYPE__ int16_t;
 #endif /* __int8_t_defined */
 typedef __UINT16_TYPE__ uint16_t;
+# undef __int_least16_t
 # define __int_least16_t int16_t
+# undef __uint_least16_t
 # define __uint_least16_t uint16_t
+# undef __int_least8_t
 # define __int_least8_t int16_t
+# undef __uint_least8_t
 # define __uint_least8_t uint16_t
 #endif /* __INT16_TYPE__ */
 
@@ -224,7 +264,9 @@
 typedef __INT8_TYPE__ int8_t;
 #endif /* __int8_t_defined */
 typedef __UINT8_TYPE__ uint8_t;
+# undef __int_least8_t
 # define __int_least8_t int8_t
+# undef __uint_least8_t
 # define __uint_least8_t uint8_t
 #endif /* __INT8_TYPE__ */
 
@@ -285,16 +327,15 @@
 
 
 #ifdef __INT64_TYPE__
+# undef __int64_c_suffix
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef  __int8_c_suffix
 # ifdef __INT64_C_SUFFIX__
 #  define __int64_c_suffix __INT64_C_SUFFIX__
 #  define __int32_c_suffix __INT64_C_SUFFIX__
 #  define __int16_c_suffix __INT64_C_SUFFIX__
 #  define  __int8_c_suffix __INT64_C_SUFFIX__
-# else
-#  undef __int64_c_suffix
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
 # endif /* __INT64_C_SUFFIX__ */
 #endif /* __INT64_TYPE__ */
 
@@ -310,6 +351,9 @@
 
 
 #ifdef __INT56_TYPE__
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef  __int8_c_suffix
 # ifdef __INT56_C_SUFFIX__
 #  define INT56_C(v) __int_c(v, __INT56_C_SUFFIX__)
 #  define UINT56_C(v) __uint_c(v, __INT56_C_SUFFIX__)
@@ -319,14 +363,14 @@
 # else
 #  define INT56_C(v) v
 #  define UINT56_C(v) v ## U
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
 # endif /* __INT56_C_SUFFIX__ */
 #endif /* __INT56_TYPE__ */
 
 
 #ifdef __INT48_TYPE__
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef  __int8_c_suffix
 # ifdef __INT48_C_SUFFIX__
 #  define INT48_C(v) __int_c(v, __INT48_C_SUFFIX__)
 #  define UINT48_C(v) __uint_c(v, __INT48_C_SUFFIX__)
@@ -336,14 +380,14 @@
 # else
 #  define INT48_C(v) v
 #  define UINT48_C(v) v ## U
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
 # endif /* __INT48_C_SUFFIX__ */
 #endif /* __INT48_TYPE__ */
 
 
 #ifdef __INT40_TYPE__
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef  __int8_c_suffix
 # ifdef __INT40_C_SUFFIX__
 #  define INT40_C(v) __int_c(v, __INT40_C_SUFFIX__)
 #  define UINT40_C(v) __uint_c(v, __INT40_C_SUFFIX__)
@@ -353,22 +397,18 @@
 # else
 #  define INT40_C(v) v
 #  define UINT40_C(v) v ## U
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
 # endif /* __INT40_C_SUFFIX__ */
 #endif /* __INT40_TYPE__ */
 
 
 #ifdef __INT32_TYPE__
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef  __int8_c_suffix
 # ifdef __INT32_C_SUFFIX__
 #  define __int32_c_suffix __INT32_C_SUFFIX__
 #  define __int16_c_suffix __INT32_C_SUFFIX__
 #  define __int8_c_suffix  __INT32_C_SUFFIX__
-#else
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
 # endif /* __INT32_C_SUFFIX__ */
 #endif /* __INT32_TYPE__ */
 
@@ -384,6 +424,8 @@
 
 
 #ifdef __INT24_TYPE__
+# undef __int16_c_suffix
+# undef  __int8_c_suffix
 # ifdef __INT24_C_SUFFIX__
 #  define INT24_C(v) __int_c(v, __INT24_C_SUFFIX__)
 #  define UINT24_C(v) __uint_c(v, __INT24_C_SUFFIX__)
@@ -392,19 +434,16 @@
 # else
 #  define INT24_C(v) v
 #  define UINT24_C(v) v ## U
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
 # endif /* __INT24_C_SUFFIX__ */
 #endif /* __INT24_TYPE__ */
 
 
 #ifdef __INT16_TYPE__
+# undef __int16_c_suffix
+# undef  __int8_c_suffix
 # ifdef __INT16_C_SUFFIX__
 #  define __int16_c_suffix __INT16_C_SUFFIX__
 #  define __int8_c_suffix  __INT16_C_SUFFIX__
-#else
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
 # endif /* __INT16_C_SUFFIX__ */
 #endif /* __INT16_TYPE__ */
 
@@ -420,10 +459,9 @@
 
 
 #ifdef __INT8_TYPE__
+# undef  __int8_c_suffix
 # ifdef __INT8_C_SUFFIX__
 #  define __int8_c_suffix __INT8_C_SUFFIX__
-#else
-#  undef  __int8_c_suffix
 # endif /* __INT8_C_SUFFIX__ */
 #endif /* __INT8_TYPE__ */
 
@@ -461,17 +499,41 @@
 # define INT64_MAX           INT64_C( 9223372036854775807)
 # define INT64_MIN         (-INT64_C( 9223372036854775807)-1)
 # define UINT64_MAX         UINT64_C(18446744073709551615)
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT64_WIDTH         64
+# define INT64_WIDTH          UINT64_WIDTH
+
+# define __UINT_LEAST64_WIDTH UINT64_WIDTH
+# undef __UINT_LEAST32_WIDTH
+# define __UINT_LEAST32_WIDTH UINT64_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT64_WIDTH
+# undef __UINT_LEAST8_MAX
+# define __UINT_LEAST8_MAX UINT64_MAX
+#endif /* __STDC_VERSION__ */
+
 # define __INT_LEAST64_MIN   INT64_MIN
 # define __INT_LEAST64_MAX   INT64_MAX
 # define __UINT_LEAST64_MAX UINT64_MAX
+# undef __INT_LEAST32_MIN
 # define __INT_LEAST32_MIN   INT64_MIN
+# undef __INT_LEAST32_MAX
 # define __INT_LEAST32_MAX   INT64_MAX
+# undef __UINT_LEAST32_MAX
 # define __UINT_LEAST32_MAX UINT64_MAX
+# undef __INT_LEAST16_MIN
 # define __INT_LEAST16_MIN   INT64_MIN
+# undef __INT_LEAST16_MAX
 # define __INT_LEAST16_MAX   INT64_MAX
+# undef __UINT_LEAST16_MAX
 # define __UINT_LEAST16_MAX UINT64_MAX
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT64_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT64_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT64_MAX
 #endif /* __INT64_TYPE__ */
 
@@ -482,6 +544,15 @@
 # define INT_FAST64_MIN    __INT_LEAST64_MIN
 # define INT_FAST64_MAX    __INT_LEAST64_MAX
 # define UINT_FAST64_MAX  __UINT_LEAST64_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) &&  __STDC_VERSION__ >= 202000L
+# define UINT_LEAST64_WIDTH __UINT_LEAST64_WIDTH
+# define INT_LEAST64_WIDTH  UINT_LEAST64_WIDTH
+# define UINT_FAST64_WIDTH  __UINT_LEAST64_WIDTH
+# define INT_FAST64_WIDTH   UINT_FAST64_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST64_MIN */
 
 
@@ -495,15 +566,42 @@
 # define INT_FAST56_MIN      INT56_MIN
 # define INT_FAST56_MAX      INT56_MAX
 # define UINT_FAST56_MAX    UINT56_MAX
+
+# undef __INT_LEAST32_MIN
 # define __INT_LEAST32_MIN   INT56_MIN
+# undef __INT_LEAST32_MAX
 # define __INT_LEAST32_MAX   INT56_MAX
+# undef __UINT_LEAST32_MAX
 # define __UINT_LEAST32_MAX UINT56_MAX
+# undef __INT_LEAST16_MIN
 # define __INT_LEAST16_MIN   INT56_MIN
+# undef __INT_LEAST16_MAX
 # define __INT_LEAST16_MAX   INT56_MAX
+# undef __UINT_LEAST16_MAX
 # define __UINT_LEAST16_MAX UINT56_MAX
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT56_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT56_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT56_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT56_WIDTH         56
+# define INT56_WIDTH          UINT56_WIDTH
+# define UINT_LEAST56_WIDTH   UINT56_WIDTH
+# define INT_LEAST56_WIDTH    UINT_LEAST56_WIDTH
+# define UINT_FAST56_WIDTH    UINT56_WIDTH
+# define INT_FAST56_WIDTH     UINT_FAST56_WIDTH
+# undef __UINT_LEAST32_WIDTH
+# define __UINT_LEAST32_WIDTH UINT56_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT56_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT56_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT56_TYPE__ */
 
 
@@ -517,15 +615,42 @@
 # define INT_FAST48_MIN      INT48_MIN
 # define INT_FAST48_MAX      INT48_MAX
 # define UINT_FAST48_MAX    UINT48_MAX
+
+# undef __INT_LEAST32_MIN
 # define __INT_LEAST32_MIN   INT48_MIN
+# undef __INT_LEAST32_MAX
 # define __INT_LEAST32_MAX   INT48_MAX
+# undef __UINT_LEAST32_MAX
 # define __UINT_LEAST32_MAX UINT48_MAX
+# undef __INT_LEAST16_MIN
 # define __INT_LEAST16_MIN   INT48_MIN
+# undef __INT_LEAST16_MAX
 # define __INT_LEAST16_MAX   INT48_MAX
+# undef __UINT_LEAST16_MAX
 # define __UINT_LEAST16_MAX UINT48_MAX
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT48_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT48_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT48_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+#define UINT48_WIDTH         48
+#define INT48_WIDTH          UINT48_WIDTH
+#define UINT_LEAST48_WIDTH   UINT48_WIDTH
+#define INT_LEAST48_WIDTH    UINT_LEAST48_WIDTH
+#define UINT_FAST48_WIDTH    UINT48_WIDTH
+#define INT_FAST48_WIDTH     UINT_FAST48_WIDTH
+#undef __UINT_LEAST32_WIDTH
+#define __UINT_LEAST32_WIDTH UINT48_WIDTH
+# undef __UINT_LEAST16_WIDTH
+#define __UINT_LEAST16_WIDTH UINT48_WIDTH
+# undef __UINT_LEAST8_WIDTH
+#define __UINT_LEAST8_WIDTH  UINT48_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT48_TYPE__ */
 
 
@@ -539,15 +664,42 @@
 # define INT_FAST40_MIN      INT40_MIN
 # define INT_FAST40_MAX      INT40_MAX
 # define UINT_FAST40_MAX    UINT40_MAX
+
+# undef __INT_LEAST32_MIN
 # define __INT_LEAST32_MIN   INT40_MIN
+# undef __INT_LEAST32_MAX
 # define __INT_LEAST32_MAX   INT40_MAX
+# undef __UINT_LEAST32_MAX
 # define __UINT_LEAST32_MAX UINT40_MAX
+# undef __INT_LEAST16_MIN
 # define __INT_LEAST16_MIN   INT40_MIN
+# undef __INT_LEAST16_MAX
 # define __INT_LEAST16_MAX   INT40_MAX
+# undef __UINT_LEAST16_MAX
 # define __UINT_LEAST16_MAX UINT40_MAX
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT40_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT40_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT40_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT40_WIDTH         40
+# define INT40_WIDTH          UINT40_WIDTH
+# define UINT_LEAST40_WIDTH   UINT40_WIDTH
+# define INT_LEAST40_WIDTH    UINT_LEAST40_WIDTH
+# define UINT_FAST40_WIDTH    UINT40_WIDTH
+# define INT_FAST40_WIDTH     UINT_FAST40_WIDTH
+# undef __UINT_LEAST32_WIDTH
+# define __UINT_LEAST32_WIDTH UINT40_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT40_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT40_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT40_TYPE__ */
 
 
@@ -555,15 +707,38 @@
 # define INT32_MAX           INT32_C(2147483647)
 # define INT32_MIN         (-INT32_C(2147483647)-1)
 # define UINT32_MAX         UINT32_C(4294967295)
+
+# undef __INT_LEAST32_MIN
 # define __INT_LEAST32_MIN   INT32_MIN
+# undef __INT_LEAST32_MAX
 # define __INT_LEAST32_MAX   INT32_MAX
+# undef __UINT_LEAST32_MAX
 # define __UINT_LEAST32_MAX UINT32_MAX
+# undef __INT_LEAST16_MIN
 # define __INT_LEAST16_MIN   INT32_MIN
+# undef __INT_LEAST16_MAX
 # define __INT_LEAST16_MAX   INT32_MAX
+# undef __UINT_LEAST16_MAX
 # define __UINT_LEAST16_MAX UINT32_MAX
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT32_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT32_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT32_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT32_WIDTH         32
+# define INT32_WIDTH          UINT32_WIDTH
+# undef __UINT_LEAST32_WIDTH
+# define __UINT_LEAST32_WIDTH UINT32_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT32_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT32_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT32_TYPE__ */
 
 #ifdef __INT_LEAST32_MIN
@@ -573,6 +748,15 @@
 # define INT_FAST32_MIN    __INT_LEAST32_MIN
 # define INT_FAST32_MAX    __INT_LEAST32_MAX
 # define UINT_FAST32_MAX  __UINT_LEAST32_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT_LEAST32_WIDTH __UINT_LEAST32_WIDTH
+# define INT_LEAST32_WIDTH  UINT_LEAST32_WIDTH
+# define UINT_FAST32_WIDTH  __UINT_LEAST32_WIDTH
+# define INT_FAST32_WIDTH   UINT_FAST32_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST32_MIN */
 
 
@@ -586,12 +770,34 @@
 # define INT_FAST24_MIN      INT24_MIN
 # define INT_FAST24_MAX      INT24_MAX
 # define UINT_FAST24_MAX    UINT24_MAX
+
+# undef __INT_LEAST16_MIN
 # define __INT_LEAST16_MIN   INT24_MIN
+# undef __INT_LEAST16_MAX
 # define __INT_LEAST16_MAX   INT24_MAX
+# undef __UINT_LEAST16_MAX
 # define __UINT_LEAST16_MAX UINT24_MAX
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT24_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT24_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT24_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT24_WIDTH         24
+# define INT24_WIDTH          UINT24_WIDTH
+# define UINT_LEAST24_WIDTH   UINT24_WIDTH
+# define INT_LEAST24_WIDTH    UINT_LEAST24_WIDTH
+# define UINT_FAST24_WIDTH    UINT24_WIDTH
+# define INT_FAST24_WIDTH     UINT_FAST24_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT24_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT24_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT24_TYPE__ */
 
 
@@ -599,12 +805,30 @@
 #define INT16_MAX            INT16_C(32767)
 #define INT16_MIN          (-INT16_C(32767)-1)
 #define UINT16_MAX          UINT16_C(65535)
+
+# undef __INT_LEAST16_MIN
 # define __INT_LEAST16_MIN   INT16_MIN
+# undef __INT_LEAST16_MAX
 # define __INT_LEAST16_MAX   INT16_MAX
+# undef __UINT_LEAST16_MAX
 # define __UINT_LEAST16_MAX UINT16_MAX
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT16_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT16_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT16_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT16_WIDTH         16
+# define INT16_WIDTH          UINT16_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT16_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT16_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT16_TYPE__ */
 
 #ifdef __INT_LEAST16_MIN
@@ -614,6 +838,15 @@
 # define INT_FAST16_MIN    __INT_LEAST16_MIN
 # define INT_FAST16_MAX    __INT_LEAST16_MAX
 # define UINT_FAST16_MAX  __UINT_LEAST16_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT_LEAST16_WIDTH __UINT_LEAST16_WIDTH
+# define INT_LEAST16_WIDTH  UINT_LEAST16_WIDTH
+# define UINT_FAST16_WIDTH  __UINT_LEAST16_WIDTH
+# define INT_FAST16_WIDTH   UINT_FAST16_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST16_MIN */
 
 
@@ -621,9 +854,22 @@
 # define INT8_MAX            INT8_C(127)
 # define INT8_MIN          (-INT8_C(127)-1)
 # define UINT8_MAX          UINT8_C(255)
+
+# undef __INT_LEAST8_MIN
 # define __INT_LEAST8_MIN    INT8_MIN
+# undef __INT_LEAST8_MAX
 # define __INT_LEAST8_MAX    INT8_MAX
+# undef __UINT_LEAST8_MAX
 # define __UINT_LEAST8_MAX  UINT8_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT8_WIDTH         8
+# define INT8_WIDTH          UINT8_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH UINT8_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT8_TYPE__ */
 
 #ifdef __INT_LEAST8_MIN
@@ -633,6 +879,15 @@
 # define INT_FAST8_MIN    __INT_LEAST8_MIN
 # define INT_FAST8_MAX    __INT_LEAST8_MAX
 # define UINT_FAST8_MAX  __UINT_LEAST8_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+# define UINT_LEAST8_WIDTH __UINT_LEAST8_WIDTH
+# define INT_LEAST8_WIDTH  UINT_LEAST8_WIDTH
+# define UINT_FAST8_WIDTH  __UINT_LEAST8_WIDTH
+# define INT_FAST8_WIDTH   UINT_FAST8_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST8_MIN */
 
 /* Some utility macros */
@@ -652,6 +907,16 @@
 #define PTRDIFF_MAX   __PTRDIFF_MAX__
 #define    SIZE_MAX      __SIZE_MAX__
 
+/* C2x 7.20.2.4 Width of integer types capable of holding object pointers. */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+/* NB: The C standard requires that these be the same value, but the compiler
+   exposes separate internal width macros. */
+#define INTPTR_WIDTH  __INTPTR_WIDTH__
+#define UINTPTR_WIDTH __UINTPTR_WIDTH__
+#endif
+
 /* ISO9899:2011 7.20 (C11 Annex K): Define RSIZE_MAX if __STDC_WANT_LIB_EXT1__
  * is enabled. */
 #if defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1
@@ -663,6 +928,16 @@
 #define  INTMAX_MAX   __INTMAX_MAX__
 #define UINTMAX_MAX  __UINTMAX_MAX__
 
+/* C2x 7.20.2.5 Width of greatest-width integer types. */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+/* NB: The C standard requires that these be the same value, but the compiler
+   exposes separate internal width macros. */
+#define INTMAX_WIDTH __INTMAX_WIDTH__
+#define UINTMAX_WIDTH __UINTMAX_WIDTH__
+#endif
+
 /* C99 7.18.3 Limits of other integer types. */
 #define SIG_ATOMIC_MIN __INTN_MIN(__SIG_ATOMIC_WIDTH__)
 #define SIG_ATOMIC_MAX __INTN_MAX(__SIG_ATOMIC_WIDTH__)
@@ -689,5 +964,16 @@
 #define  INTMAX_C(v) __int_c(v,  __INTMAX_C_SUFFIX__)
 #define UINTMAX_C(v) __int_c(v, __UINTMAX_C_SUFFIX__)
 
+/* C2x 7.20.3.x Width of other integer types. */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
+#define PTRDIFF_WIDTH    __PTRDIFF_WIDTH__
+#define SIG_ATOMIC_WIDTH __SIG_ATOMIC_WIDTH__
+#define SIZE_WIDTH       __SIZE_WIDTH__
+#define WCHAR_WIDTH      __WCHAR_WIDTH__
+#define WINT_WIDTH       __WINT_WIDTH__
+#endif
+
 #endif /* __STDC_HOSTED__ */
 #endif /* __CLANG_STDINT_H */
diff --git a/linux-x86/lib64/clang/16.0.2/include/stdnoreturn.h b/linux-x86/lib64/clang/16.0.2/include/stdnoreturn.h
new file mode 100644
index 0000000..967be94
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/stdnoreturn.h
@@ -0,0 +1,29 @@
+/*===---- stdnoreturn.h - Standard header for noreturn macro ---------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDNORETURN_H
+#define __STDNORETURN_H
+
+#define noreturn _Noreturn
+#define __noreturn_is_defined 1
+
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L) &&               \
+    !defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS)
+/* The noreturn macro is deprecated in C2x. We do not mark it as such because
+   including the header file in C2x is also deprecated and we do not want to
+   issue a confusing diagnostic for code which includes <stdnoreturn.h>
+   followed by code that writes [[noreturn]]. The issue with such code is not
+   with the attribute, or the use of 'noreturn', but the inclusion of the
+   header. */
+/* FIXME: We should be issuing a deprecation warning here, but cannot yet due
+ * to system headers which include this header file unconditionally.
+ */
+#endif
+
+#endif /* __STDNORETURN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/tbmintrin.h b/linux-x86/lib64/clang/16.0.2/include/tbmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/tbmintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/tbmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/tgmath.h b/linux-x86/lib64/clang/16.0.2/include/tgmath.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/tgmath.h
rename to linux-x86/lib64/clang/16.0.2/include/tgmath.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/tmmintrin.h b/linux-x86/lib64/clang/16.0.2/include/tmmintrin.h
similarity index 99%
rename from linux-x86/lib64/clang/14.0.2/include/tmmintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/tmmintrin.h
index bcffa81..cb9be23 100644
--- a/linux-x86/lib64/clang/14.0.2/include/tmmintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/tmmintrin.h
@@ -53,7 +53,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_abs_epi8(__m128i __a)
 {
-    return (__m128i)__builtin_ia32_pabsb128((__v16qi)__a);
+    return (__m128i)__builtin_elementwise_abs((__v16qs)__a);
 }
 
 /// Computes the absolute value of each of the packed 16-bit signed
@@ -89,7 +89,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_abs_epi16(__m128i __a)
 {
-    return (__m128i)__builtin_ia32_pabsw128((__v8hi)__a);
+    return (__m128i)__builtin_elementwise_abs((__v8hi)__a);
 }
 
 /// Computes the absolute value of each of the packed 32-bit signed
@@ -125,7 +125,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_abs_epi32(__m128i __a)
 {
-    return (__m128i)__builtin_ia32_pabsd128((__v4si)__a);
+    return (__m128i)__builtin_elementwise_abs((__v4si)__a);
 }
 
 /// Concatenates the two 128-bit integer vector operands, and
diff --git a/linux-x86/lib64/clang/14.0.2/include/tsxldtrkintrin.h b/linux-x86/lib64/clang/16.0.2/include/tsxldtrkintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/tsxldtrkintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/tsxldtrkintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/uintrintrin.h b/linux-x86/lib64/clang/16.0.2/include/uintrintrin.h
similarity index 97%
rename from linux-x86/lib64/clang/14.0.2/include/uintrintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/uintrintrin.h
index e3839dc..135dc81 100644
--- a/linux-x86/lib64/clang/14.0.2/include/uintrintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/uintrintrin.h
@@ -39,9 +39,9 @@
 ///
 /// This intrinsic corresponds to the <c> CLUI </c> instruction.
 ///
-/// \operation
+/// \code{.operation}
 ///   UIF := 0
-/// \endoperation
+/// \endcode
 static __inline__ void __DEFAULT_FN_ATTRS
 _clui (void)
 {
@@ -60,9 +60,9 @@
 ///
 /// This intrinsic corresponds to the <c> STUI </c> instruction.
 ///
-/// \operation
+/// \code{.operation}
 ///   UIF := 1
-/// \endoperation
+/// \endcode
 static __inline__ void __DEFAULT_FN_ATTRS
 _stui (void)
 {
@@ -81,7 +81,7 @@
 ///
 /// \returns The current value of the user interrupt flag (UIF).
 ///
-/// \operation
+/// \code{.operation}
 ///   CF := UIF
 ///   ZF := 0
 ///   AF := 0
@@ -89,7 +89,7 @@
 ///   PF := 0
 ///   SF := 0
 ///   dst := CF
-/// \endoperation
+/// \endcode
 static __inline__ unsigned char __DEFAULT_FN_ATTRS
 _testui (void)
 {
@@ -110,7 +110,7 @@
 ///    Index of user-interrupt target table entry in user-interrupt target
 ///    table.
 ///
-/// \operation
+/// \code{.operation}
 ///   IF __a > UITTSZ
 ///     GP (0)
 ///   FI
@@ -143,7 +143,7 @@
 ///       SendOrdinaryIPI(tempUPID.NV, tempUPID.NDST[15:8])
 ///     FI
 ///   FI
-/// \endoperation
+/// \endcode
 static __inline__ void __DEFAULT_FN_ATTRS
 _senduipi (unsigned long long __a)
 {
diff --git a/darwin-x86/lib64/clang/14.0.2/include/unwind.h b/linux-x86/lib64/clang/16.0.2/include/unwind.h
similarity index 95%
copy from darwin-x86/lib64/clang/14.0.2/include/unwind.h
copy to linux-x86/lib64/clang/16.0.2/include/unwind.h
index 029524b..971a62d 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/unwind.h
+++ b/linux-x86/lib64/clang/16.0.2/include/unwind.h
@@ -62,7 +62,8 @@
 typedef uintptr_t _uleb128_t;
 
 struct _Unwind_Context;
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
+#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || \
+                          defined(__ARM_DWARF_EH__) || defined(__SEH__))
 struct _Unwind_Control_Block;
 typedef struct _Unwind_Control_Block _Unwind_Exception; /* Alias */
 #else
@@ -72,7 +73,7 @@
 typedef enum {
   _URC_NO_REASON = 0,
 #if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
-    !defined(__ARM_DWARF_EH__)
+    !defined(__ARM_DWARF_EH__) && !defined(__SEH__)
   _URC_OK = 0, /* used by ARM EHABI */
 #endif
   _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
@@ -86,7 +87,7 @@
   _URC_INSTALL_CONTEXT = 7,
   _URC_CONTINUE_UNWIND = 8,
 #if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
-    !defined(__ARM_DWARF_EH__)
+    !defined(__ARM_DWARF_EH__) && !defined(__SEH__)
   _URC_FAILURE = 9 /* used by ARM EHABI */
 #endif
 } _Unwind_Reason_Code;
@@ -103,7 +104,8 @@
 typedef void (*_Unwind_Exception_Cleanup_Fn)(_Unwind_Reason_Code,
                                              _Unwind_Exception *);
 
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
+#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || \
+                          defined(__ARM_DWARF_EH__) || defined(__SEH__))
 typedef struct _Unwind_Control_Block _Unwind_Control_Block;
 typedef uint32_t _Unwind_EHT_Header;
 
@@ -167,12 +169,14 @@
 typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context *,
                                                 void *);
 
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
+#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) ||                \
+                          defined(__ARM_DWARF_EH__) || defined(__SEH__))
 typedef enum {
   _UVRSC_CORE = 0,        /* integer register */
   _UVRSC_VFP = 1,         /* vfp */
   _UVRSC_WMMXD = 3,       /* Intel WMMX data register */
-  _UVRSC_WMMXC = 4        /* Intel WMMX control register */
+  _UVRSC_WMMXC = 4,       /* Intel WMMX control register */
+  _UVRSC_PSEUDO = 5       /* Special purpose pseudo register */
 } _Unwind_VRS_RegClass;
 
 typedef enum {
diff --git a/linux-x86/lib64/clang/14.0.2/include/vadefs.h b/linux-x86/lib64/clang/16.0.2/include/vadefs.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/vadefs.h
rename to linux-x86/lib64/clang/16.0.2/include/vadefs.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/vaesintrin.h b/linux-x86/lib64/clang/16.0.2/include/vaesintrin.h
similarity index 98%
copy from darwin-x86/lib64/clang/14.0.2/include/vaesintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/vaesintrin.h
index f3c0807..294dcff 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/vaesintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/vaesintrin.h
@@ -82,4 +82,4 @@
 #undef __DEFAULT_FN_ATTRS
 #undef __DEFAULT_FN_ATTRS_F
 
-#endif
+#endif // __VAESINTRIN_H
diff --git a/linux-x86/lib64/clang/14.0.2/include/varargs.h b/linux-x86/lib64/clang/16.0.2/include/varargs.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/varargs.h
rename to linux-x86/lib64/clang/16.0.2/include/varargs.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/vecintrin.h b/linux-x86/lib64/clang/16.0.2/include/vecintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/vecintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/vecintrin.h
diff --git a/linux-x86/lib64/clang/16.0.2/include/velintrin.h b/linux-x86/lib64/clang/16.0.2/include/velintrin.h
new file mode 100644
index 0000000..3f2bc00
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/velintrin.h
@@ -0,0 +1,71 @@
+/*===---- velintrin.h - VEL intrinsics for VE ------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __VEL_INTRIN_H__
+#define __VEL_INTRIN_H__
+
+// Vector registers
+typedef double __vr __attribute__((__vector_size__(2048)));
+
+// Vector mask registers
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+// For C99
+typedef _Bool __vm    __attribute__((ext_vector_type(256)));
+typedef _Bool __vm256 __attribute__((ext_vector_type(256)));
+typedef _Bool __vm512 __attribute__((ext_vector_type(512)));
+#else
+#ifdef __cplusplus
+// For C++
+typedef bool __vm    __attribute__((ext_vector_type(256)));
+typedef bool __vm256 __attribute__((ext_vector_type(256)));
+typedef bool __vm512 __attribute__((ext_vector_type(512)));
+#else
+#error need C++ or C99 to use vector intrinsics for VE
+#endif
+#endif
+
+enum VShuffleCodes {
+  VE_VSHUFFLE_YUYU = 0,
+  VE_VSHUFFLE_YUYL = 1,
+  VE_VSHUFFLE_YUZU = 2,
+  VE_VSHUFFLE_YUZL = 3,
+  VE_VSHUFFLE_YLYU = 4,
+  VE_VSHUFFLE_YLYL = 5,
+  VE_VSHUFFLE_YLZU = 6,
+  VE_VSHUFFLE_YLZL = 7,
+  VE_VSHUFFLE_ZUYU = 8,
+  VE_VSHUFFLE_ZUYL = 9,
+  VE_VSHUFFLE_ZUZU = 10,
+  VE_VSHUFFLE_ZUZL = 11,
+  VE_VSHUFFLE_ZLYU = 12,
+  VE_VSHUFFLE_ZLYL = 13,
+  VE_VSHUFFLE_ZLZU = 14,
+  VE_VSHUFFLE_ZLZL = 15,
+};
+
+// Use generated intrinsic name definitions
+#include <velintrin_gen.h>
+
+// Use helper functions
+#include <velintrin_approx.h>
+
+// pack
+
+#define _vel_pack_f32p __builtin_ve_vl_pack_f32p
+#define _vel_pack_f32a __builtin_ve_vl_pack_f32a
+
+static inline unsigned long int _vel_pack_i32(unsigned int a, unsigned int b) {
+  return (((unsigned long int)a) << 32) | b;
+}
+
+#define _vel_extract_vm512u(vm) __builtin_ve_vl_extract_vm512u(vm)
+#define _vel_extract_vm512l(vm) __builtin_ve_vl_extract_vm512l(vm)
+#define _vel_insert_vm512u(vm512, vm) __builtin_ve_vl_insert_vm512u(vm512, vm)
+#define _vel_insert_vm512l(vm512, vm) __builtin_ve_vl_insert_vm512l(vm512, vm)
+
+#endif
diff --git a/linux-x86/lib64/clang/16.0.2/include/velintrin_approx.h b/linux-x86/lib64/clang/16.0.2/include/velintrin_approx.h
new file mode 100644
index 0000000..89d270f
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/velintrin_approx.h
@@ -0,0 +1,120 @@
+/*===---- velintrin_approx.h - VEL intrinsics helper for VE ----------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __VEL_INTRIN_APPROX_H__
+#define __VEL_INTRIN_APPROX_H__
+
+static inline __vr _vel_approx_vfdivs_vvvl(__vr v0, __vr v1, int l) {
+  float s0;
+  __vr v2, v3, v4, v5;
+  v5 = _vel_vrcps_vvl(v1, l);
+  s0 = 1.0;
+  v4 = _vel_vfnmsbs_vsvvl(s0, v1, v5, l);
+  v3 = _vel_vfmads_vvvvl(v5, v5, v4, l);
+  v2 = _vel_vfmuls_vvvl(v0, v3, l);
+  v4 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l);
+  v2 = _vel_vfmads_vvvvl(v2, v5, v4, l);
+  v0 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l);
+  v0 = _vel_vfmads_vvvvl(v2, v3, v0, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_pvfdiv_vvvl(__vr v0, __vr v1, int l) {
+  float s0;
+  __vr v2, v3, v4, v5;
+  v5 = _vel_pvrcp_vvl(v1, l);
+  s0 = 1.0;
+  v4 = _vel_pvfnmsb_vsvvl(s0, v1, v5, l);
+  v3 = _vel_pvfmad_vvvvl(v5, v5, v4, l);
+  v2 = _vel_pvfmul_vvvl(v0, v3, l);
+  v4 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l);
+  v2 = _vel_pvfmad_vvvvl(v2, v5, v4, l);
+  v0 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l);
+  v0 = _vel_pvfmad_vvvvl(v2, v3, v0, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_vfdivs_vsvl(float s0, __vr v0, int l) {
+  float s1;
+  __vr v1, v2, v3, v4;
+  v4 = _vel_vrcps_vvl(v0, l);
+  s1 = 1.0;
+  v2 = _vel_vfnmsbs_vsvvl(s1, v0, v4, l);
+  v2 = _vel_vfmads_vvvvl(v4, v4, v2, l);
+  v1 = _vel_vfmuls_vsvl(s0, v2, l);
+  v3 = _vel_vfnmsbs_vsvvl(s0, v1, v0, l);
+  v1 = _vel_vfmads_vvvvl(v1, v4, v3, l);
+  v3 = _vel_vfnmsbs_vsvvl(s0, v1, v0, l);
+  v0 = _vel_vfmads_vvvvl(v1, v2, v3, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_vfdivs_vvsl(__vr v0, float s0, int l) {
+  float s1;
+  __vr v1, v2;
+  s1 = 1.0f / s0;
+  v1 = _vel_vfmuls_vsvl(s1, v0, l);
+  v2 = _vel_vfnmsbs_vvsvl(v0, s0, v1, l);
+  v0 = _vel_vfmads_vvsvl(v1, s1, v2, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_vfdivd_vsvl(double s0, __vr v0, int l) {
+  __vr v1, v2, v3;
+  v2 = _vel_vrcpd_vvl(v0, l);
+  double s1 = 1.0;
+  v3 = _vel_vfnmsbd_vsvvl(s1, v0, v2, l);
+  v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);
+  v1 = _vel_vfnmsbd_vsvvl(s1, v0, v2, l);
+  v1 = _vel_vfmadd_vvvvl(v2, v2, v1, l);
+  v1 = _vel_vaddul_vsvl(1, v1, l);
+  v3 = _vel_vfnmsbd_vsvvl(s1, v0, v1, l);
+  v3 = _vel_vfmadd_vvvvl(v1, v1, v3, l);
+  v1 = _vel_vfmuld_vsvl(s0, v3, l);
+  v0 = _vel_vfnmsbd_vsvvl(s0, v1, v0, l);
+  v0 = _vel_vfmadd_vvvvl(v1, v3, v0, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_vfsqrtd_vvl(__vr v0, int l) {
+  double s0, s1;
+  __vr v1, v2, v3;
+  v2 = _vel_vrsqrtdnex_vvl(v0, l);
+  v1 = _vel_vfmuld_vvvl(v0, v2, l);
+  s0 = 1.0;
+  s1 = 0.5;
+  v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+  v3 = _vel_vfmuld_vsvl(s1, v3, l);
+  v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);
+  v1 = _vel_vfmuld_vvvl(v0, v2, l);
+  v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+  v3 = _vel_vfmuld_vsvl(s1, v3, l);
+  v0 = _vel_vfmadd_vvvvl(v1, v1, v3, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_vfsqrts_vvl(__vr v0, int l) {
+  float s0, s1;
+  __vr v1, v2, v3;
+  v0 = _vel_vcvtds_vvl(v0, l);
+  v2 = _vel_vrsqrtdnex_vvl(v0, l);
+  v1 = _vel_vfmuld_vvvl(v0, v2, l);
+  s0 = 1.0;
+  s1 = 0.5;
+  v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+  v3 = _vel_vfmuld_vsvl(s1, v3, l);
+  v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);
+  v1 = _vel_vfmuld_vvvl(v0, v2, l);
+  v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+  v3 = _vel_vfmuld_vsvl(s1, v3, l);
+  v0 = _vel_vfmadd_vvvvl(v1, v1, v3, l);
+  v0 = _vel_vcvtsd_vvl(v0, l);
+  return v0;
+}
+
+#endif
diff --git a/linux-x86/lib64/clang/16.0.2/include/velintrin_gen.h b/linux-x86/lib64/clang/16.0.2/include/velintrin_gen.h
new file mode 100644
index 0000000..845c0da
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/velintrin_gen.h
@@ -0,0 +1,1257 @@
+#define _vel_vld_vssl __builtin_ve_vl_vld_vssl
+#define _vel_vld_vssvl __builtin_ve_vl_vld_vssvl
+#define _vel_vldnc_vssl __builtin_ve_vl_vldnc_vssl
+#define _vel_vldnc_vssvl __builtin_ve_vl_vldnc_vssvl
+#define _vel_vldu_vssl __builtin_ve_vl_vldu_vssl
+#define _vel_vldu_vssvl __builtin_ve_vl_vldu_vssvl
+#define _vel_vldunc_vssl __builtin_ve_vl_vldunc_vssl
+#define _vel_vldunc_vssvl __builtin_ve_vl_vldunc_vssvl
+#define _vel_vldlsx_vssl __builtin_ve_vl_vldlsx_vssl
+#define _vel_vldlsx_vssvl __builtin_ve_vl_vldlsx_vssvl
+#define _vel_vldlsxnc_vssl __builtin_ve_vl_vldlsxnc_vssl
+#define _vel_vldlsxnc_vssvl __builtin_ve_vl_vldlsxnc_vssvl
+#define _vel_vldlzx_vssl __builtin_ve_vl_vldlzx_vssl
+#define _vel_vldlzx_vssvl __builtin_ve_vl_vldlzx_vssvl
+#define _vel_vldlzxnc_vssl __builtin_ve_vl_vldlzxnc_vssl
+#define _vel_vldlzxnc_vssvl __builtin_ve_vl_vldlzxnc_vssvl
+#define _vel_vld2d_vssl __builtin_ve_vl_vld2d_vssl
+#define _vel_vld2d_vssvl __builtin_ve_vl_vld2d_vssvl
+#define _vel_vld2dnc_vssl __builtin_ve_vl_vld2dnc_vssl
+#define _vel_vld2dnc_vssvl __builtin_ve_vl_vld2dnc_vssvl
+#define _vel_vldu2d_vssl __builtin_ve_vl_vldu2d_vssl
+#define _vel_vldu2d_vssvl __builtin_ve_vl_vldu2d_vssvl
+#define _vel_vldu2dnc_vssl __builtin_ve_vl_vldu2dnc_vssl
+#define _vel_vldu2dnc_vssvl __builtin_ve_vl_vldu2dnc_vssvl
+#define _vel_vldl2dsx_vssl __builtin_ve_vl_vldl2dsx_vssl
+#define _vel_vldl2dsx_vssvl __builtin_ve_vl_vldl2dsx_vssvl
+#define _vel_vldl2dsxnc_vssl __builtin_ve_vl_vldl2dsxnc_vssl
+#define _vel_vldl2dsxnc_vssvl __builtin_ve_vl_vldl2dsxnc_vssvl
+#define _vel_vldl2dzx_vssl __builtin_ve_vl_vldl2dzx_vssl
+#define _vel_vldl2dzx_vssvl __builtin_ve_vl_vldl2dzx_vssvl
+#define _vel_vldl2dzxnc_vssl __builtin_ve_vl_vldl2dzxnc_vssl
+#define _vel_vldl2dzxnc_vssvl __builtin_ve_vl_vldl2dzxnc_vssvl
+#define _vel_vst_vssl __builtin_ve_vl_vst_vssl
+#define _vel_vst_vssml __builtin_ve_vl_vst_vssml
+#define _vel_vstnc_vssl __builtin_ve_vl_vstnc_vssl
+#define _vel_vstnc_vssml __builtin_ve_vl_vstnc_vssml
+#define _vel_vstot_vssl __builtin_ve_vl_vstot_vssl
+#define _vel_vstot_vssml __builtin_ve_vl_vstot_vssml
+#define _vel_vstncot_vssl __builtin_ve_vl_vstncot_vssl
+#define _vel_vstncot_vssml __builtin_ve_vl_vstncot_vssml
+#define _vel_vstu_vssl __builtin_ve_vl_vstu_vssl
+#define _vel_vstu_vssml __builtin_ve_vl_vstu_vssml
+#define _vel_vstunc_vssl __builtin_ve_vl_vstunc_vssl
+#define _vel_vstunc_vssml __builtin_ve_vl_vstunc_vssml
+#define _vel_vstuot_vssl __builtin_ve_vl_vstuot_vssl
+#define _vel_vstuot_vssml __builtin_ve_vl_vstuot_vssml
+#define _vel_vstuncot_vssl __builtin_ve_vl_vstuncot_vssl
+#define _vel_vstuncot_vssml __builtin_ve_vl_vstuncot_vssml
+#define _vel_vstl_vssl __builtin_ve_vl_vstl_vssl
+#define _vel_vstl_vssml __builtin_ve_vl_vstl_vssml
+#define _vel_vstlnc_vssl __builtin_ve_vl_vstlnc_vssl
+#define _vel_vstlnc_vssml __builtin_ve_vl_vstlnc_vssml
+#define _vel_vstlot_vssl __builtin_ve_vl_vstlot_vssl
+#define _vel_vstlot_vssml __builtin_ve_vl_vstlot_vssml
+#define _vel_vstlncot_vssl __builtin_ve_vl_vstlncot_vssl
+#define _vel_vstlncot_vssml __builtin_ve_vl_vstlncot_vssml
+#define _vel_vst2d_vssl __builtin_ve_vl_vst2d_vssl
+#define _vel_vst2d_vssml __builtin_ve_vl_vst2d_vssml
+#define _vel_vst2dnc_vssl __builtin_ve_vl_vst2dnc_vssl
+#define _vel_vst2dnc_vssml __builtin_ve_vl_vst2dnc_vssml
+#define _vel_vst2dot_vssl __builtin_ve_vl_vst2dot_vssl
+#define _vel_vst2dot_vssml __builtin_ve_vl_vst2dot_vssml
+#define _vel_vst2dncot_vssl __builtin_ve_vl_vst2dncot_vssl
+#define _vel_vst2dncot_vssml __builtin_ve_vl_vst2dncot_vssml
+#define _vel_vstu2d_vssl __builtin_ve_vl_vstu2d_vssl
+#define _vel_vstu2d_vssml __builtin_ve_vl_vstu2d_vssml
+#define _vel_vstu2dnc_vssl __builtin_ve_vl_vstu2dnc_vssl
+#define _vel_vstu2dnc_vssml __builtin_ve_vl_vstu2dnc_vssml
+#define _vel_vstu2dot_vssl __builtin_ve_vl_vstu2dot_vssl
+#define _vel_vstu2dot_vssml __builtin_ve_vl_vstu2dot_vssml
+#define _vel_vstu2dncot_vssl __builtin_ve_vl_vstu2dncot_vssl
+#define _vel_vstu2dncot_vssml __builtin_ve_vl_vstu2dncot_vssml
+#define _vel_vstl2d_vssl __builtin_ve_vl_vstl2d_vssl
+#define _vel_vstl2d_vssml __builtin_ve_vl_vstl2d_vssml
+#define _vel_vstl2dnc_vssl __builtin_ve_vl_vstl2dnc_vssl
+#define _vel_vstl2dnc_vssml __builtin_ve_vl_vstl2dnc_vssml
+#define _vel_vstl2dot_vssl __builtin_ve_vl_vstl2dot_vssl
+#define _vel_vstl2dot_vssml __builtin_ve_vl_vstl2dot_vssml
+#define _vel_vstl2dncot_vssl __builtin_ve_vl_vstl2dncot_vssl
+#define _vel_vstl2dncot_vssml __builtin_ve_vl_vstl2dncot_vssml
+#define _vel_pfchv_ssl __builtin_ve_vl_pfchv_ssl
+#define _vel_pfchvnc_ssl __builtin_ve_vl_pfchvnc_ssl
+#define _vel_lsv_vvss __builtin_ve_vl_lsv_vvss
+#define _vel_lvsl_svs __builtin_ve_vl_lvsl_svs
+#define _vel_lvsd_svs __builtin_ve_vl_lvsd_svs
+#define _vel_lvss_svs __builtin_ve_vl_lvss_svs
+#define _vel_lvm_mmss __builtin_ve_vl_lvm_mmss
+#define _vel_lvm_MMss __builtin_ve_vl_lvm_MMss
+#define _vel_svm_sms __builtin_ve_vl_svm_sms
+#define _vel_svm_sMs __builtin_ve_vl_svm_sMs
+#define _vel_vbrdd_vsl __builtin_ve_vl_vbrdd_vsl
+#define _vel_vbrdd_vsvl __builtin_ve_vl_vbrdd_vsvl
+#define _vel_vbrdd_vsmvl __builtin_ve_vl_vbrdd_vsmvl
+#define _vel_vbrdl_vsl __builtin_ve_vl_vbrdl_vsl
+#define _vel_vbrdl_vsvl __builtin_ve_vl_vbrdl_vsvl
+#define _vel_vbrdl_vsmvl __builtin_ve_vl_vbrdl_vsmvl
+#define _vel_vbrds_vsl __builtin_ve_vl_vbrds_vsl
+#define _vel_vbrds_vsvl __builtin_ve_vl_vbrds_vsvl
+#define _vel_vbrds_vsmvl __builtin_ve_vl_vbrds_vsmvl
+#define _vel_vbrdw_vsl __builtin_ve_vl_vbrdw_vsl
+#define _vel_vbrdw_vsvl __builtin_ve_vl_vbrdw_vsvl
+#define _vel_vbrdw_vsmvl __builtin_ve_vl_vbrdw_vsmvl
+#define _vel_pvbrd_vsl __builtin_ve_vl_pvbrd_vsl
+#define _vel_pvbrd_vsvl __builtin_ve_vl_pvbrd_vsvl
+#define _vel_pvbrd_vsMvl __builtin_ve_vl_pvbrd_vsMvl
+#define _vel_vmv_vsvl __builtin_ve_vl_vmv_vsvl
+#define _vel_vmv_vsvvl __builtin_ve_vl_vmv_vsvvl
+#define _vel_vmv_vsvmvl __builtin_ve_vl_vmv_vsvmvl
+#define _vel_vaddul_vvvl __builtin_ve_vl_vaddul_vvvl
+#define _vel_vaddul_vvvvl __builtin_ve_vl_vaddul_vvvvl
+#define _vel_vaddul_vsvl __builtin_ve_vl_vaddul_vsvl
+#define _vel_vaddul_vsvvl __builtin_ve_vl_vaddul_vsvvl
+#define _vel_vaddul_vvvmvl __builtin_ve_vl_vaddul_vvvmvl
+#define _vel_vaddul_vsvmvl __builtin_ve_vl_vaddul_vsvmvl
+#define _vel_vadduw_vvvl __builtin_ve_vl_vadduw_vvvl
+#define _vel_vadduw_vvvvl __builtin_ve_vl_vadduw_vvvvl
+#define _vel_vadduw_vsvl __builtin_ve_vl_vadduw_vsvl
+#define _vel_vadduw_vsvvl __builtin_ve_vl_vadduw_vsvvl
+#define _vel_vadduw_vvvmvl __builtin_ve_vl_vadduw_vvvmvl
+#define _vel_vadduw_vsvmvl __builtin_ve_vl_vadduw_vsvmvl
+#define _vel_pvaddu_vvvl __builtin_ve_vl_pvaddu_vvvl
+#define _vel_pvaddu_vvvvl __builtin_ve_vl_pvaddu_vvvvl
+#define _vel_pvaddu_vsvl __builtin_ve_vl_pvaddu_vsvl
+#define _vel_pvaddu_vsvvl __builtin_ve_vl_pvaddu_vsvvl
+#define _vel_pvaddu_vvvMvl __builtin_ve_vl_pvaddu_vvvMvl
+#define _vel_pvaddu_vsvMvl __builtin_ve_vl_pvaddu_vsvMvl
+#define _vel_vaddswsx_vvvl __builtin_ve_vl_vaddswsx_vvvl
+#define _vel_vaddswsx_vvvvl __builtin_ve_vl_vaddswsx_vvvvl
+#define _vel_vaddswsx_vsvl __builtin_ve_vl_vaddswsx_vsvl
+#define _vel_vaddswsx_vsvvl __builtin_ve_vl_vaddswsx_vsvvl
+#define _vel_vaddswsx_vvvmvl __builtin_ve_vl_vaddswsx_vvvmvl
+#define _vel_vaddswsx_vsvmvl __builtin_ve_vl_vaddswsx_vsvmvl
+#define _vel_vaddswzx_vvvl __builtin_ve_vl_vaddswzx_vvvl
+#define _vel_vaddswzx_vvvvl __builtin_ve_vl_vaddswzx_vvvvl
+#define _vel_vaddswzx_vsvl __builtin_ve_vl_vaddswzx_vsvl
+#define _vel_vaddswzx_vsvvl __builtin_ve_vl_vaddswzx_vsvvl
+#define _vel_vaddswzx_vvvmvl __builtin_ve_vl_vaddswzx_vvvmvl
+#define _vel_vaddswzx_vsvmvl __builtin_ve_vl_vaddswzx_vsvmvl
+#define _vel_pvadds_vvvl __builtin_ve_vl_pvadds_vvvl
+#define _vel_pvadds_vvvvl __builtin_ve_vl_pvadds_vvvvl
+#define _vel_pvadds_vsvl __builtin_ve_vl_pvadds_vsvl
+#define _vel_pvadds_vsvvl __builtin_ve_vl_pvadds_vsvvl
+#define _vel_pvadds_vvvMvl __builtin_ve_vl_pvadds_vvvMvl
+#define _vel_pvadds_vsvMvl __builtin_ve_vl_pvadds_vsvMvl
+#define _vel_vaddsl_vvvl __builtin_ve_vl_vaddsl_vvvl
+#define _vel_vaddsl_vvvvl __builtin_ve_vl_vaddsl_vvvvl
+#define _vel_vaddsl_vsvl __builtin_ve_vl_vaddsl_vsvl
+#define _vel_vaddsl_vsvvl __builtin_ve_vl_vaddsl_vsvvl
+#define _vel_vaddsl_vvvmvl __builtin_ve_vl_vaddsl_vvvmvl
+#define _vel_vaddsl_vsvmvl __builtin_ve_vl_vaddsl_vsvmvl
+#define _vel_vsubul_vvvl __builtin_ve_vl_vsubul_vvvl
+#define _vel_vsubul_vvvvl __builtin_ve_vl_vsubul_vvvvl
+#define _vel_vsubul_vsvl __builtin_ve_vl_vsubul_vsvl
+#define _vel_vsubul_vsvvl __builtin_ve_vl_vsubul_vsvvl
+#define _vel_vsubul_vvvmvl __builtin_ve_vl_vsubul_vvvmvl
+#define _vel_vsubul_vsvmvl __builtin_ve_vl_vsubul_vsvmvl
+#define _vel_vsubuw_vvvl __builtin_ve_vl_vsubuw_vvvl
+#define _vel_vsubuw_vvvvl __builtin_ve_vl_vsubuw_vvvvl
+#define _vel_vsubuw_vsvl __builtin_ve_vl_vsubuw_vsvl
+#define _vel_vsubuw_vsvvl __builtin_ve_vl_vsubuw_vsvvl
+#define _vel_vsubuw_vvvmvl __builtin_ve_vl_vsubuw_vvvmvl
+#define _vel_vsubuw_vsvmvl __builtin_ve_vl_vsubuw_vsvmvl
+#define _vel_pvsubu_vvvl __builtin_ve_vl_pvsubu_vvvl
+#define _vel_pvsubu_vvvvl __builtin_ve_vl_pvsubu_vvvvl
+#define _vel_pvsubu_vsvl __builtin_ve_vl_pvsubu_vsvl
+#define _vel_pvsubu_vsvvl __builtin_ve_vl_pvsubu_vsvvl
+#define _vel_pvsubu_vvvMvl __builtin_ve_vl_pvsubu_vvvMvl
+#define _vel_pvsubu_vsvMvl __builtin_ve_vl_pvsubu_vsvMvl
+#define _vel_vsubswsx_vvvl __builtin_ve_vl_vsubswsx_vvvl
+#define _vel_vsubswsx_vvvvl __builtin_ve_vl_vsubswsx_vvvvl
+#define _vel_vsubswsx_vsvl __builtin_ve_vl_vsubswsx_vsvl
+#define _vel_vsubswsx_vsvvl __builtin_ve_vl_vsubswsx_vsvvl
+#define _vel_vsubswsx_vvvmvl __builtin_ve_vl_vsubswsx_vvvmvl
+#define _vel_vsubswsx_vsvmvl __builtin_ve_vl_vsubswsx_vsvmvl
+#define _vel_vsubswzx_vvvl __builtin_ve_vl_vsubswzx_vvvl
+#define _vel_vsubswzx_vvvvl __builtin_ve_vl_vsubswzx_vvvvl
+#define _vel_vsubswzx_vsvl __builtin_ve_vl_vsubswzx_vsvl
+#define _vel_vsubswzx_vsvvl __builtin_ve_vl_vsubswzx_vsvvl
+#define _vel_vsubswzx_vvvmvl __builtin_ve_vl_vsubswzx_vvvmvl
+#define _vel_vsubswzx_vsvmvl __builtin_ve_vl_vsubswzx_vsvmvl
+#define _vel_pvsubs_vvvl __builtin_ve_vl_pvsubs_vvvl
+#define _vel_pvsubs_vvvvl __builtin_ve_vl_pvsubs_vvvvl
+#define _vel_pvsubs_vsvl __builtin_ve_vl_pvsubs_vsvl
+#define _vel_pvsubs_vsvvl __builtin_ve_vl_pvsubs_vsvvl
+#define _vel_pvsubs_vvvMvl __builtin_ve_vl_pvsubs_vvvMvl
+#define _vel_pvsubs_vsvMvl __builtin_ve_vl_pvsubs_vsvMvl
+#define _vel_vsubsl_vvvl __builtin_ve_vl_vsubsl_vvvl
+#define _vel_vsubsl_vvvvl __builtin_ve_vl_vsubsl_vvvvl
+#define _vel_vsubsl_vsvl __builtin_ve_vl_vsubsl_vsvl
+#define _vel_vsubsl_vsvvl __builtin_ve_vl_vsubsl_vsvvl
+#define _vel_vsubsl_vvvmvl __builtin_ve_vl_vsubsl_vvvmvl
+#define _vel_vsubsl_vsvmvl __builtin_ve_vl_vsubsl_vsvmvl
+#define _vel_vmulul_vvvl __builtin_ve_vl_vmulul_vvvl
+#define _vel_vmulul_vvvvl __builtin_ve_vl_vmulul_vvvvl
+#define _vel_vmulul_vsvl __builtin_ve_vl_vmulul_vsvl
+#define _vel_vmulul_vsvvl __builtin_ve_vl_vmulul_vsvvl
+#define _vel_vmulul_vvvmvl __builtin_ve_vl_vmulul_vvvmvl
+#define _vel_vmulul_vsvmvl __builtin_ve_vl_vmulul_vsvmvl
+#define _vel_vmuluw_vvvl __builtin_ve_vl_vmuluw_vvvl
+#define _vel_vmuluw_vvvvl __builtin_ve_vl_vmuluw_vvvvl
+#define _vel_vmuluw_vsvl __builtin_ve_vl_vmuluw_vsvl
+#define _vel_vmuluw_vsvvl __builtin_ve_vl_vmuluw_vsvvl
+#define _vel_vmuluw_vvvmvl __builtin_ve_vl_vmuluw_vvvmvl
+#define _vel_vmuluw_vsvmvl __builtin_ve_vl_vmuluw_vsvmvl
+#define _vel_vmulswsx_vvvl __builtin_ve_vl_vmulswsx_vvvl
+#define _vel_vmulswsx_vvvvl __builtin_ve_vl_vmulswsx_vvvvl
+#define _vel_vmulswsx_vsvl __builtin_ve_vl_vmulswsx_vsvl
+#define _vel_vmulswsx_vsvvl __builtin_ve_vl_vmulswsx_vsvvl
+#define _vel_vmulswsx_vvvmvl __builtin_ve_vl_vmulswsx_vvvmvl
+#define _vel_vmulswsx_vsvmvl __builtin_ve_vl_vmulswsx_vsvmvl
+#define _vel_vmulswzx_vvvl __builtin_ve_vl_vmulswzx_vvvl
+#define _vel_vmulswzx_vvvvl __builtin_ve_vl_vmulswzx_vvvvl
+#define _vel_vmulswzx_vsvl __builtin_ve_vl_vmulswzx_vsvl
+#define _vel_vmulswzx_vsvvl __builtin_ve_vl_vmulswzx_vsvvl
+#define _vel_vmulswzx_vvvmvl __builtin_ve_vl_vmulswzx_vvvmvl
+#define _vel_vmulswzx_vsvmvl __builtin_ve_vl_vmulswzx_vsvmvl
+#define _vel_vmulsl_vvvl __builtin_ve_vl_vmulsl_vvvl
+#define _vel_vmulsl_vvvvl __builtin_ve_vl_vmulsl_vvvvl
+#define _vel_vmulsl_vsvl __builtin_ve_vl_vmulsl_vsvl
+#define _vel_vmulsl_vsvvl __builtin_ve_vl_vmulsl_vsvvl
+#define _vel_vmulsl_vvvmvl __builtin_ve_vl_vmulsl_vvvmvl
+#define _vel_vmulsl_vsvmvl __builtin_ve_vl_vmulsl_vsvmvl
+#define _vel_vmulslw_vvvl __builtin_ve_vl_vmulslw_vvvl
+#define _vel_vmulslw_vvvvl __builtin_ve_vl_vmulslw_vvvvl
+#define _vel_vmulslw_vsvl __builtin_ve_vl_vmulslw_vsvl
+#define _vel_vmulslw_vsvvl __builtin_ve_vl_vmulslw_vsvvl
+#define _vel_vdivul_vvvl __builtin_ve_vl_vdivul_vvvl
+#define _vel_vdivul_vvvvl __builtin_ve_vl_vdivul_vvvvl
+#define _vel_vdivul_vsvl __builtin_ve_vl_vdivul_vsvl
+#define _vel_vdivul_vsvvl __builtin_ve_vl_vdivul_vsvvl
+#define _vel_vdivul_vvvmvl __builtin_ve_vl_vdivul_vvvmvl
+#define _vel_vdivul_vsvmvl __builtin_ve_vl_vdivul_vsvmvl
+#define _vel_vdivuw_vvvl __builtin_ve_vl_vdivuw_vvvl
+#define _vel_vdivuw_vvvvl __builtin_ve_vl_vdivuw_vvvvl
+#define _vel_vdivuw_vsvl __builtin_ve_vl_vdivuw_vsvl
+#define _vel_vdivuw_vsvvl __builtin_ve_vl_vdivuw_vsvvl
+#define _vel_vdivuw_vvvmvl __builtin_ve_vl_vdivuw_vvvmvl
+#define _vel_vdivuw_vsvmvl __builtin_ve_vl_vdivuw_vsvmvl
+#define _vel_vdivul_vvsl __builtin_ve_vl_vdivul_vvsl
+#define _vel_vdivul_vvsvl __builtin_ve_vl_vdivul_vvsvl
+#define _vel_vdivul_vvsmvl __builtin_ve_vl_vdivul_vvsmvl
+#define _vel_vdivuw_vvsl __builtin_ve_vl_vdivuw_vvsl
+#define _vel_vdivuw_vvsvl __builtin_ve_vl_vdivuw_vvsvl
+#define _vel_vdivuw_vvsmvl __builtin_ve_vl_vdivuw_vvsmvl
+#define _vel_vdivswsx_vvvl __builtin_ve_vl_vdivswsx_vvvl
+#define _vel_vdivswsx_vvvvl __builtin_ve_vl_vdivswsx_vvvvl
+#define _vel_vdivswsx_vsvl __builtin_ve_vl_vdivswsx_vsvl
+#define _vel_vdivswsx_vsvvl __builtin_ve_vl_vdivswsx_vsvvl
+#define _vel_vdivswsx_vvvmvl __builtin_ve_vl_vdivswsx_vvvmvl
+#define _vel_vdivswsx_vsvmvl __builtin_ve_vl_vdivswsx_vsvmvl
+#define _vel_vdivswzx_vvvl __builtin_ve_vl_vdivswzx_vvvl
+#define _vel_vdivswzx_vvvvl __builtin_ve_vl_vdivswzx_vvvvl
+#define _vel_vdivswzx_vsvl __builtin_ve_vl_vdivswzx_vsvl
+#define _vel_vdivswzx_vsvvl __builtin_ve_vl_vdivswzx_vsvvl
+#define _vel_vdivswzx_vvvmvl __builtin_ve_vl_vdivswzx_vvvmvl
+#define _vel_vdivswzx_vsvmvl __builtin_ve_vl_vdivswzx_vsvmvl
+#define _vel_vdivswsx_vvsl __builtin_ve_vl_vdivswsx_vvsl
+#define _vel_vdivswsx_vvsvl __builtin_ve_vl_vdivswsx_vvsvl
+#define _vel_vdivswsx_vvsmvl __builtin_ve_vl_vdivswsx_vvsmvl
+#define _vel_vdivswzx_vvsl __builtin_ve_vl_vdivswzx_vvsl
+#define _vel_vdivswzx_vvsvl __builtin_ve_vl_vdivswzx_vvsvl
+#define _vel_vdivswzx_vvsmvl __builtin_ve_vl_vdivswzx_vvsmvl
+#define _vel_vdivsl_vvvl __builtin_ve_vl_vdivsl_vvvl
+#define _vel_vdivsl_vvvvl __builtin_ve_vl_vdivsl_vvvvl
+#define _vel_vdivsl_vsvl __builtin_ve_vl_vdivsl_vsvl
+#define _vel_vdivsl_vsvvl __builtin_ve_vl_vdivsl_vsvvl
+#define _vel_vdivsl_vvvmvl __builtin_ve_vl_vdivsl_vvvmvl
+#define _vel_vdivsl_vsvmvl __builtin_ve_vl_vdivsl_vsvmvl
+#define _vel_vdivsl_vvsl __builtin_ve_vl_vdivsl_vvsl
+#define _vel_vdivsl_vvsvl __builtin_ve_vl_vdivsl_vvsvl
+#define _vel_vdivsl_vvsmvl __builtin_ve_vl_vdivsl_vvsmvl
+#define _vel_vcmpul_vvvl __builtin_ve_vl_vcmpul_vvvl
+#define _vel_vcmpul_vvvvl __builtin_ve_vl_vcmpul_vvvvl
+#define _vel_vcmpul_vsvl __builtin_ve_vl_vcmpul_vsvl
+#define _vel_vcmpul_vsvvl __builtin_ve_vl_vcmpul_vsvvl
+#define _vel_vcmpul_vvvmvl __builtin_ve_vl_vcmpul_vvvmvl
+#define _vel_vcmpul_vsvmvl __builtin_ve_vl_vcmpul_vsvmvl
+#define _vel_vcmpuw_vvvl __builtin_ve_vl_vcmpuw_vvvl
+#define _vel_vcmpuw_vvvvl __builtin_ve_vl_vcmpuw_vvvvl
+#define _vel_vcmpuw_vsvl __builtin_ve_vl_vcmpuw_vsvl
+#define _vel_vcmpuw_vsvvl __builtin_ve_vl_vcmpuw_vsvvl
+#define _vel_vcmpuw_vvvmvl __builtin_ve_vl_vcmpuw_vvvmvl
+#define _vel_vcmpuw_vsvmvl __builtin_ve_vl_vcmpuw_vsvmvl
+#define _vel_pvcmpu_vvvl __builtin_ve_vl_pvcmpu_vvvl
+#define _vel_pvcmpu_vvvvl __builtin_ve_vl_pvcmpu_vvvvl
+#define _vel_pvcmpu_vsvl __builtin_ve_vl_pvcmpu_vsvl
+#define _vel_pvcmpu_vsvvl __builtin_ve_vl_pvcmpu_vsvvl
+#define _vel_pvcmpu_vvvMvl __builtin_ve_vl_pvcmpu_vvvMvl
+#define _vel_pvcmpu_vsvMvl __builtin_ve_vl_pvcmpu_vsvMvl
+#define _vel_vcmpswsx_vvvl __builtin_ve_vl_vcmpswsx_vvvl
+#define _vel_vcmpswsx_vvvvl __builtin_ve_vl_vcmpswsx_vvvvl
+#define _vel_vcmpswsx_vsvl __builtin_ve_vl_vcmpswsx_vsvl
+#define _vel_vcmpswsx_vsvvl __builtin_ve_vl_vcmpswsx_vsvvl
+#define _vel_vcmpswsx_vvvmvl __builtin_ve_vl_vcmpswsx_vvvmvl
+#define _vel_vcmpswsx_vsvmvl __builtin_ve_vl_vcmpswsx_vsvmvl
+#define _vel_vcmpswzx_vvvl __builtin_ve_vl_vcmpswzx_vvvl
+#define _vel_vcmpswzx_vvvvl __builtin_ve_vl_vcmpswzx_vvvvl
+#define _vel_vcmpswzx_vsvl __builtin_ve_vl_vcmpswzx_vsvl
+#define _vel_vcmpswzx_vsvvl __builtin_ve_vl_vcmpswzx_vsvvl
+#define _vel_vcmpswzx_vvvmvl __builtin_ve_vl_vcmpswzx_vvvmvl
+#define _vel_vcmpswzx_vsvmvl __builtin_ve_vl_vcmpswzx_vsvmvl
+#define _vel_pvcmps_vvvl __builtin_ve_vl_pvcmps_vvvl
+#define _vel_pvcmps_vvvvl __builtin_ve_vl_pvcmps_vvvvl
+#define _vel_pvcmps_vsvl __builtin_ve_vl_pvcmps_vsvl
+#define _vel_pvcmps_vsvvl __builtin_ve_vl_pvcmps_vsvvl
+#define _vel_pvcmps_vvvMvl __builtin_ve_vl_pvcmps_vvvMvl
+#define _vel_pvcmps_vsvMvl __builtin_ve_vl_pvcmps_vsvMvl
+#define _vel_vcmpsl_vvvl __builtin_ve_vl_vcmpsl_vvvl
+#define _vel_vcmpsl_vvvvl __builtin_ve_vl_vcmpsl_vvvvl
+#define _vel_vcmpsl_vsvl __builtin_ve_vl_vcmpsl_vsvl
+#define _vel_vcmpsl_vsvvl __builtin_ve_vl_vcmpsl_vsvvl
+#define _vel_vcmpsl_vvvmvl __builtin_ve_vl_vcmpsl_vvvmvl
+#define _vel_vcmpsl_vsvmvl __builtin_ve_vl_vcmpsl_vsvmvl
+#define _vel_vmaxswsx_vvvl __builtin_ve_vl_vmaxswsx_vvvl
+#define _vel_vmaxswsx_vvvvl __builtin_ve_vl_vmaxswsx_vvvvl
+#define _vel_vmaxswsx_vsvl __builtin_ve_vl_vmaxswsx_vsvl
+#define _vel_vmaxswsx_vsvvl __builtin_ve_vl_vmaxswsx_vsvvl
+#define _vel_vmaxswsx_vvvmvl __builtin_ve_vl_vmaxswsx_vvvmvl
+#define _vel_vmaxswsx_vsvmvl __builtin_ve_vl_vmaxswsx_vsvmvl
+#define _vel_vmaxswzx_vvvl __builtin_ve_vl_vmaxswzx_vvvl
+#define _vel_vmaxswzx_vvvvl __builtin_ve_vl_vmaxswzx_vvvvl
+#define _vel_vmaxswzx_vsvl __builtin_ve_vl_vmaxswzx_vsvl
+#define _vel_vmaxswzx_vsvvl __builtin_ve_vl_vmaxswzx_vsvvl
+#define _vel_vmaxswzx_vvvmvl __builtin_ve_vl_vmaxswzx_vvvmvl
+#define _vel_vmaxswzx_vsvmvl __builtin_ve_vl_vmaxswzx_vsvmvl
+#define _vel_pvmaxs_vvvl __builtin_ve_vl_pvmaxs_vvvl
+#define _vel_pvmaxs_vvvvl __builtin_ve_vl_pvmaxs_vvvvl
+#define _vel_pvmaxs_vsvl __builtin_ve_vl_pvmaxs_vsvl
+#define _vel_pvmaxs_vsvvl __builtin_ve_vl_pvmaxs_vsvvl
+#define _vel_pvmaxs_vvvMvl __builtin_ve_vl_pvmaxs_vvvMvl
+#define _vel_pvmaxs_vsvMvl __builtin_ve_vl_pvmaxs_vsvMvl
+#define _vel_vminswsx_vvvl __builtin_ve_vl_vminswsx_vvvl
+#define _vel_vminswsx_vvvvl __builtin_ve_vl_vminswsx_vvvvl
+#define _vel_vminswsx_vsvl __builtin_ve_vl_vminswsx_vsvl
+#define _vel_vminswsx_vsvvl __builtin_ve_vl_vminswsx_vsvvl
+#define _vel_vminswsx_vvvmvl __builtin_ve_vl_vminswsx_vvvmvl
+#define _vel_vminswsx_vsvmvl __builtin_ve_vl_vminswsx_vsvmvl
+#define _vel_vminswzx_vvvl __builtin_ve_vl_vminswzx_vvvl
+#define _vel_vminswzx_vvvvl __builtin_ve_vl_vminswzx_vvvvl
+#define _vel_vminswzx_vsvl __builtin_ve_vl_vminswzx_vsvl
+#define _vel_vminswzx_vsvvl __builtin_ve_vl_vminswzx_vsvvl
+#define _vel_vminswzx_vvvmvl __builtin_ve_vl_vminswzx_vvvmvl
+#define _vel_vminswzx_vsvmvl __builtin_ve_vl_vminswzx_vsvmvl
+#define _vel_pvmins_vvvl __builtin_ve_vl_pvmins_vvvl
+#define _vel_pvmins_vvvvl __builtin_ve_vl_pvmins_vvvvl
+#define _vel_pvmins_vsvl __builtin_ve_vl_pvmins_vsvl
+#define _vel_pvmins_vsvvl __builtin_ve_vl_pvmins_vsvvl
+#define _vel_pvmins_vvvMvl __builtin_ve_vl_pvmins_vvvMvl
+#define _vel_pvmins_vsvMvl __builtin_ve_vl_pvmins_vsvMvl
+#define _vel_vmaxsl_vvvl __builtin_ve_vl_vmaxsl_vvvl
+#define _vel_vmaxsl_vvvvl __builtin_ve_vl_vmaxsl_vvvvl
+#define _vel_vmaxsl_vsvl __builtin_ve_vl_vmaxsl_vsvl
+#define _vel_vmaxsl_vsvvl __builtin_ve_vl_vmaxsl_vsvvl
+#define _vel_vmaxsl_vvvmvl __builtin_ve_vl_vmaxsl_vvvmvl
+#define _vel_vmaxsl_vsvmvl __builtin_ve_vl_vmaxsl_vsvmvl
+#define _vel_vminsl_vvvl __builtin_ve_vl_vminsl_vvvl
+#define _vel_vminsl_vvvvl __builtin_ve_vl_vminsl_vvvvl
+#define _vel_vminsl_vsvl __builtin_ve_vl_vminsl_vsvl
+#define _vel_vminsl_vsvvl __builtin_ve_vl_vminsl_vsvvl
+#define _vel_vminsl_vvvmvl __builtin_ve_vl_vminsl_vvvmvl
+#define _vel_vminsl_vsvmvl __builtin_ve_vl_vminsl_vsvmvl
+#define _vel_vand_vvvl __builtin_ve_vl_vand_vvvl
+#define _vel_vand_vvvvl __builtin_ve_vl_vand_vvvvl
+#define _vel_vand_vsvl __builtin_ve_vl_vand_vsvl
+#define _vel_vand_vsvvl __builtin_ve_vl_vand_vsvvl
+#define _vel_vand_vvvmvl __builtin_ve_vl_vand_vvvmvl
+#define _vel_vand_vsvmvl __builtin_ve_vl_vand_vsvmvl
+#define _vel_pvand_vvvl __builtin_ve_vl_pvand_vvvl
+#define _vel_pvand_vvvvl __builtin_ve_vl_pvand_vvvvl
+#define _vel_pvand_vsvl __builtin_ve_vl_pvand_vsvl
+#define _vel_pvand_vsvvl __builtin_ve_vl_pvand_vsvvl
+#define _vel_pvand_vvvMvl __builtin_ve_vl_pvand_vvvMvl
+#define _vel_pvand_vsvMvl __builtin_ve_vl_pvand_vsvMvl
+#define _vel_vor_vvvl __builtin_ve_vl_vor_vvvl
+#define _vel_vor_vvvvl __builtin_ve_vl_vor_vvvvl
+#define _vel_vor_vsvl __builtin_ve_vl_vor_vsvl
+#define _vel_vor_vsvvl __builtin_ve_vl_vor_vsvvl
+#define _vel_vor_vvvmvl __builtin_ve_vl_vor_vvvmvl
+#define _vel_vor_vsvmvl __builtin_ve_vl_vor_vsvmvl
+#define _vel_pvor_vvvl __builtin_ve_vl_pvor_vvvl
+#define _vel_pvor_vvvvl __builtin_ve_vl_pvor_vvvvl
+#define _vel_pvor_vsvl __builtin_ve_vl_pvor_vsvl
+#define _vel_pvor_vsvvl __builtin_ve_vl_pvor_vsvvl
+#define _vel_pvor_vvvMvl __builtin_ve_vl_pvor_vvvMvl
+#define _vel_pvor_vsvMvl __builtin_ve_vl_pvor_vsvMvl
+#define _vel_vxor_vvvl __builtin_ve_vl_vxor_vvvl
+#define _vel_vxor_vvvvl __builtin_ve_vl_vxor_vvvvl
+#define _vel_vxor_vsvl __builtin_ve_vl_vxor_vsvl
+#define _vel_vxor_vsvvl __builtin_ve_vl_vxor_vsvvl
+#define _vel_vxor_vvvmvl __builtin_ve_vl_vxor_vvvmvl
+#define _vel_vxor_vsvmvl __builtin_ve_vl_vxor_vsvmvl
+#define _vel_pvxor_vvvl __builtin_ve_vl_pvxor_vvvl
+#define _vel_pvxor_vvvvl __builtin_ve_vl_pvxor_vvvvl
+#define _vel_pvxor_vsvl __builtin_ve_vl_pvxor_vsvl
+#define _vel_pvxor_vsvvl __builtin_ve_vl_pvxor_vsvvl
+#define _vel_pvxor_vvvMvl __builtin_ve_vl_pvxor_vvvMvl
+#define _vel_pvxor_vsvMvl __builtin_ve_vl_pvxor_vsvMvl
+#define _vel_veqv_vvvl __builtin_ve_vl_veqv_vvvl
+#define _vel_veqv_vvvvl __builtin_ve_vl_veqv_vvvvl
+#define _vel_veqv_vsvl __builtin_ve_vl_veqv_vsvl
+#define _vel_veqv_vsvvl __builtin_ve_vl_veqv_vsvvl
+#define _vel_veqv_vvvmvl __builtin_ve_vl_veqv_vvvmvl
+#define _vel_veqv_vsvmvl __builtin_ve_vl_veqv_vsvmvl
+#define _vel_pveqv_vvvl __builtin_ve_vl_pveqv_vvvl
+#define _vel_pveqv_vvvvl __builtin_ve_vl_pveqv_vvvvl
+#define _vel_pveqv_vsvl __builtin_ve_vl_pveqv_vsvl
+#define _vel_pveqv_vsvvl __builtin_ve_vl_pveqv_vsvvl
+#define _vel_pveqv_vvvMvl __builtin_ve_vl_pveqv_vvvMvl
+#define _vel_pveqv_vsvMvl __builtin_ve_vl_pveqv_vsvMvl
+#define _vel_vldz_vvl __builtin_ve_vl_vldz_vvl
+#define _vel_vldz_vvvl __builtin_ve_vl_vldz_vvvl
+#define _vel_vldz_vvmvl __builtin_ve_vl_vldz_vvmvl
+#define _vel_pvldzlo_vvl __builtin_ve_vl_pvldzlo_vvl
+#define _vel_pvldzlo_vvvl __builtin_ve_vl_pvldzlo_vvvl
+#define _vel_pvldzlo_vvmvl __builtin_ve_vl_pvldzlo_vvmvl
+#define _vel_pvldzup_vvl __builtin_ve_vl_pvldzup_vvl
+#define _vel_pvldzup_vvvl __builtin_ve_vl_pvldzup_vvvl
+#define _vel_pvldzup_vvmvl __builtin_ve_vl_pvldzup_vvmvl
+#define _vel_pvldz_vvl __builtin_ve_vl_pvldz_vvl
+#define _vel_pvldz_vvvl __builtin_ve_vl_pvldz_vvvl
+#define _vel_pvldz_vvMvl __builtin_ve_vl_pvldz_vvMvl
+#define _vel_vpcnt_vvl __builtin_ve_vl_vpcnt_vvl
+#define _vel_vpcnt_vvvl __builtin_ve_vl_vpcnt_vvvl
+#define _vel_vpcnt_vvmvl __builtin_ve_vl_vpcnt_vvmvl
+#define _vel_pvpcntlo_vvl __builtin_ve_vl_pvpcntlo_vvl
+#define _vel_pvpcntlo_vvvl __builtin_ve_vl_pvpcntlo_vvvl
+#define _vel_pvpcntlo_vvmvl __builtin_ve_vl_pvpcntlo_vvmvl
+#define _vel_pvpcntup_vvl __builtin_ve_vl_pvpcntup_vvl
+#define _vel_pvpcntup_vvvl __builtin_ve_vl_pvpcntup_vvvl
+#define _vel_pvpcntup_vvmvl __builtin_ve_vl_pvpcntup_vvmvl
+#define _vel_pvpcnt_vvl __builtin_ve_vl_pvpcnt_vvl
+#define _vel_pvpcnt_vvvl __builtin_ve_vl_pvpcnt_vvvl
+#define _vel_pvpcnt_vvMvl __builtin_ve_vl_pvpcnt_vvMvl
+#define _vel_vbrv_vvl __builtin_ve_vl_vbrv_vvl
+#define _vel_vbrv_vvvl __builtin_ve_vl_vbrv_vvvl
+#define _vel_vbrv_vvmvl __builtin_ve_vl_vbrv_vvmvl
+#define _vel_pvbrvlo_vvl __builtin_ve_vl_pvbrvlo_vvl
+#define _vel_pvbrvlo_vvvl __builtin_ve_vl_pvbrvlo_vvvl
+#define _vel_pvbrvlo_vvmvl __builtin_ve_vl_pvbrvlo_vvmvl
+#define _vel_pvbrvup_vvl __builtin_ve_vl_pvbrvup_vvl
+#define _vel_pvbrvup_vvvl __builtin_ve_vl_pvbrvup_vvvl
+#define _vel_pvbrvup_vvmvl __builtin_ve_vl_pvbrvup_vvmvl
+#define _vel_pvbrv_vvl __builtin_ve_vl_pvbrv_vvl
+#define _vel_pvbrv_vvvl __builtin_ve_vl_pvbrv_vvvl
+#define _vel_pvbrv_vvMvl __builtin_ve_vl_pvbrv_vvMvl
+#define _vel_vseq_vl __builtin_ve_vl_vseq_vl
+#define _vel_vseq_vvl __builtin_ve_vl_vseq_vvl
+#define _vel_pvseqlo_vl __builtin_ve_vl_pvseqlo_vl
+#define _vel_pvseqlo_vvl __builtin_ve_vl_pvseqlo_vvl
+#define _vel_pvsequp_vl __builtin_ve_vl_pvsequp_vl
+#define _vel_pvsequp_vvl __builtin_ve_vl_pvsequp_vvl
+#define _vel_pvseq_vl __builtin_ve_vl_pvseq_vl
+#define _vel_pvseq_vvl __builtin_ve_vl_pvseq_vvl
+#define _vel_vsll_vvvl __builtin_ve_vl_vsll_vvvl
+#define _vel_vsll_vvvvl __builtin_ve_vl_vsll_vvvvl
+#define _vel_vsll_vvsl __builtin_ve_vl_vsll_vvsl
+#define _vel_vsll_vvsvl __builtin_ve_vl_vsll_vvsvl
+#define _vel_vsll_vvvmvl __builtin_ve_vl_vsll_vvvmvl
+#define _vel_vsll_vvsmvl __builtin_ve_vl_vsll_vvsmvl
+#define _vel_pvsll_vvvl __builtin_ve_vl_pvsll_vvvl
+#define _vel_pvsll_vvvvl __builtin_ve_vl_pvsll_vvvvl
+#define _vel_pvsll_vvsl __builtin_ve_vl_pvsll_vvsl
+#define _vel_pvsll_vvsvl __builtin_ve_vl_pvsll_vvsvl
+#define _vel_pvsll_vvvMvl __builtin_ve_vl_pvsll_vvvMvl
+#define _vel_pvsll_vvsMvl __builtin_ve_vl_pvsll_vvsMvl
+#define _vel_vsrl_vvvl __builtin_ve_vl_vsrl_vvvl
+#define _vel_vsrl_vvvvl __builtin_ve_vl_vsrl_vvvvl
+#define _vel_vsrl_vvsl __builtin_ve_vl_vsrl_vvsl
+#define _vel_vsrl_vvsvl __builtin_ve_vl_vsrl_vvsvl
+#define _vel_vsrl_vvvmvl __builtin_ve_vl_vsrl_vvvmvl
+#define _vel_vsrl_vvsmvl __builtin_ve_vl_vsrl_vvsmvl
+#define _vel_pvsrl_vvvl __builtin_ve_vl_pvsrl_vvvl
+#define _vel_pvsrl_vvvvl __builtin_ve_vl_pvsrl_vvvvl
+#define _vel_pvsrl_vvsl __builtin_ve_vl_pvsrl_vvsl
+#define _vel_pvsrl_vvsvl __builtin_ve_vl_pvsrl_vvsvl
+#define _vel_pvsrl_vvvMvl __builtin_ve_vl_pvsrl_vvvMvl
+#define _vel_pvsrl_vvsMvl __builtin_ve_vl_pvsrl_vvsMvl
+#define _vel_vslawsx_vvvl __builtin_ve_vl_vslawsx_vvvl
+#define _vel_vslawsx_vvvvl __builtin_ve_vl_vslawsx_vvvvl
+#define _vel_vslawsx_vvsl __builtin_ve_vl_vslawsx_vvsl
+#define _vel_vslawsx_vvsvl __builtin_ve_vl_vslawsx_vvsvl
+#define _vel_vslawsx_vvvmvl __builtin_ve_vl_vslawsx_vvvmvl
+#define _vel_vslawsx_vvsmvl __builtin_ve_vl_vslawsx_vvsmvl
+#define _vel_vslawzx_vvvl __builtin_ve_vl_vslawzx_vvvl
+#define _vel_vslawzx_vvvvl __builtin_ve_vl_vslawzx_vvvvl
+#define _vel_vslawzx_vvsl __builtin_ve_vl_vslawzx_vvsl
+#define _vel_vslawzx_vvsvl __builtin_ve_vl_vslawzx_vvsvl
+#define _vel_vslawzx_vvvmvl __builtin_ve_vl_vslawzx_vvvmvl
+#define _vel_vslawzx_vvsmvl __builtin_ve_vl_vslawzx_vvsmvl
+#define _vel_pvsla_vvvl __builtin_ve_vl_pvsla_vvvl
+#define _vel_pvsla_vvvvl __builtin_ve_vl_pvsla_vvvvl
+#define _vel_pvsla_vvsl __builtin_ve_vl_pvsla_vvsl
+#define _vel_pvsla_vvsvl __builtin_ve_vl_pvsla_vvsvl
+#define _vel_pvsla_vvvMvl __builtin_ve_vl_pvsla_vvvMvl
+#define _vel_pvsla_vvsMvl __builtin_ve_vl_pvsla_vvsMvl
+#define _vel_vslal_vvvl __builtin_ve_vl_vslal_vvvl
+#define _vel_vslal_vvvvl __builtin_ve_vl_vslal_vvvvl
+#define _vel_vslal_vvsl __builtin_ve_vl_vslal_vvsl
+#define _vel_vslal_vvsvl __builtin_ve_vl_vslal_vvsvl
+#define _vel_vslal_vvvmvl __builtin_ve_vl_vslal_vvvmvl
+#define _vel_vslal_vvsmvl __builtin_ve_vl_vslal_vvsmvl
+#define _vel_vsrawsx_vvvl __builtin_ve_vl_vsrawsx_vvvl
+#define _vel_vsrawsx_vvvvl __builtin_ve_vl_vsrawsx_vvvvl
+#define _vel_vsrawsx_vvsl __builtin_ve_vl_vsrawsx_vvsl
+#define _vel_vsrawsx_vvsvl __builtin_ve_vl_vsrawsx_vvsvl
+#define _vel_vsrawsx_vvvmvl __builtin_ve_vl_vsrawsx_vvvmvl
+#define _vel_vsrawsx_vvsmvl __builtin_ve_vl_vsrawsx_vvsmvl
+#define _vel_vsrawzx_vvvl __builtin_ve_vl_vsrawzx_vvvl
+#define _vel_vsrawzx_vvvvl __builtin_ve_vl_vsrawzx_vvvvl
+#define _vel_vsrawzx_vvsl __builtin_ve_vl_vsrawzx_vvsl
+#define _vel_vsrawzx_vvsvl __builtin_ve_vl_vsrawzx_vvsvl
+#define _vel_vsrawzx_vvvmvl __builtin_ve_vl_vsrawzx_vvvmvl
+#define _vel_vsrawzx_vvsmvl __builtin_ve_vl_vsrawzx_vvsmvl
+#define _vel_pvsra_vvvl __builtin_ve_vl_pvsra_vvvl
+#define _vel_pvsra_vvvvl __builtin_ve_vl_pvsra_vvvvl
+#define _vel_pvsra_vvsl __builtin_ve_vl_pvsra_vvsl
+#define _vel_pvsra_vvsvl __builtin_ve_vl_pvsra_vvsvl
+#define _vel_pvsra_vvvMvl __builtin_ve_vl_pvsra_vvvMvl
+#define _vel_pvsra_vvsMvl __builtin_ve_vl_pvsra_vvsMvl
+#define _vel_vsral_vvvl __builtin_ve_vl_vsral_vvvl
+#define _vel_vsral_vvvvl __builtin_ve_vl_vsral_vvvvl
+#define _vel_vsral_vvsl __builtin_ve_vl_vsral_vvsl
+#define _vel_vsral_vvsvl __builtin_ve_vl_vsral_vvsvl
+#define _vel_vsral_vvvmvl __builtin_ve_vl_vsral_vvvmvl
+#define _vel_vsral_vvsmvl __builtin_ve_vl_vsral_vvsmvl
+#define _vel_vsfa_vvssl __builtin_ve_vl_vsfa_vvssl
+#define _vel_vsfa_vvssvl __builtin_ve_vl_vsfa_vvssvl
+#define _vel_vsfa_vvssmvl __builtin_ve_vl_vsfa_vvssmvl
+#define _vel_vfaddd_vvvl __builtin_ve_vl_vfaddd_vvvl
+#define _vel_vfaddd_vvvvl __builtin_ve_vl_vfaddd_vvvvl
+#define _vel_vfaddd_vsvl __builtin_ve_vl_vfaddd_vsvl
+#define _vel_vfaddd_vsvvl __builtin_ve_vl_vfaddd_vsvvl
+#define _vel_vfaddd_vvvmvl __builtin_ve_vl_vfaddd_vvvmvl
+#define _vel_vfaddd_vsvmvl __builtin_ve_vl_vfaddd_vsvmvl
+#define _vel_vfadds_vvvl __builtin_ve_vl_vfadds_vvvl
+#define _vel_vfadds_vvvvl __builtin_ve_vl_vfadds_vvvvl
+#define _vel_vfadds_vsvl __builtin_ve_vl_vfadds_vsvl
+#define _vel_vfadds_vsvvl __builtin_ve_vl_vfadds_vsvvl
+#define _vel_vfadds_vvvmvl __builtin_ve_vl_vfadds_vvvmvl
+#define _vel_vfadds_vsvmvl __builtin_ve_vl_vfadds_vsvmvl
+#define _vel_pvfadd_vvvl __builtin_ve_vl_pvfadd_vvvl
+#define _vel_pvfadd_vvvvl __builtin_ve_vl_pvfadd_vvvvl
+#define _vel_pvfadd_vsvl __builtin_ve_vl_pvfadd_vsvl
+#define _vel_pvfadd_vsvvl __builtin_ve_vl_pvfadd_vsvvl
+#define _vel_pvfadd_vvvMvl __builtin_ve_vl_pvfadd_vvvMvl
+#define _vel_pvfadd_vsvMvl __builtin_ve_vl_pvfadd_vsvMvl
+#define _vel_vfsubd_vvvl __builtin_ve_vl_vfsubd_vvvl
+#define _vel_vfsubd_vvvvl __builtin_ve_vl_vfsubd_vvvvl
+#define _vel_vfsubd_vsvl __builtin_ve_vl_vfsubd_vsvl
+#define _vel_vfsubd_vsvvl __builtin_ve_vl_vfsubd_vsvvl
+#define _vel_vfsubd_vvvmvl __builtin_ve_vl_vfsubd_vvvmvl
+#define _vel_vfsubd_vsvmvl __builtin_ve_vl_vfsubd_vsvmvl
+#define _vel_vfsubs_vvvl __builtin_ve_vl_vfsubs_vvvl
+#define _vel_vfsubs_vvvvl __builtin_ve_vl_vfsubs_vvvvl
+#define _vel_vfsubs_vsvl __builtin_ve_vl_vfsubs_vsvl
+#define _vel_vfsubs_vsvvl __builtin_ve_vl_vfsubs_vsvvl
+#define _vel_vfsubs_vvvmvl __builtin_ve_vl_vfsubs_vvvmvl
+#define _vel_vfsubs_vsvmvl __builtin_ve_vl_vfsubs_vsvmvl
+#define _vel_pvfsub_vvvl __builtin_ve_vl_pvfsub_vvvl
+#define _vel_pvfsub_vvvvl __builtin_ve_vl_pvfsub_vvvvl
+#define _vel_pvfsub_vsvl __builtin_ve_vl_pvfsub_vsvl
+#define _vel_pvfsub_vsvvl __builtin_ve_vl_pvfsub_vsvvl
+#define _vel_pvfsub_vvvMvl __builtin_ve_vl_pvfsub_vvvMvl
+#define _vel_pvfsub_vsvMvl __builtin_ve_vl_pvfsub_vsvMvl
+#define _vel_vfmuld_vvvl __builtin_ve_vl_vfmuld_vvvl
+#define _vel_vfmuld_vvvvl __builtin_ve_vl_vfmuld_vvvvl
+#define _vel_vfmuld_vsvl __builtin_ve_vl_vfmuld_vsvl
+#define _vel_vfmuld_vsvvl __builtin_ve_vl_vfmuld_vsvvl
+#define _vel_vfmuld_vvvmvl __builtin_ve_vl_vfmuld_vvvmvl
+#define _vel_vfmuld_vsvmvl __builtin_ve_vl_vfmuld_vsvmvl
+#define _vel_vfmuls_vvvl __builtin_ve_vl_vfmuls_vvvl
+#define _vel_vfmuls_vvvvl __builtin_ve_vl_vfmuls_vvvvl
+#define _vel_vfmuls_vsvl __builtin_ve_vl_vfmuls_vsvl
+#define _vel_vfmuls_vsvvl __builtin_ve_vl_vfmuls_vsvvl
+#define _vel_vfmuls_vvvmvl __builtin_ve_vl_vfmuls_vvvmvl
+#define _vel_vfmuls_vsvmvl __builtin_ve_vl_vfmuls_vsvmvl
+#define _vel_pvfmul_vvvl __builtin_ve_vl_pvfmul_vvvl
+#define _vel_pvfmul_vvvvl __builtin_ve_vl_pvfmul_vvvvl
+#define _vel_pvfmul_vsvl __builtin_ve_vl_pvfmul_vsvl
+#define _vel_pvfmul_vsvvl __builtin_ve_vl_pvfmul_vsvvl
+#define _vel_pvfmul_vvvMvl __builtin_ve_vl_pvfmul_vvvMvl
+#define _vel_pvfmul_vsvMvl __builtin_ve_vl_pvfmul_vsvMvl
+#define _vel_vfdivd_vvvl __builtin_ve_vl_vfdivd_vvvl
+#define _vel_vfdivd_vvvvl __builtin_ve_vl_vfdivd_vvvvl
+#define _vel_vfdivd_vsvl __builtin_ve_vl_vfdivd_vsvl
+#define _vel_vfdivd_vsvvl __builtin_ve_vl_vfdivd_vsvvl
+#define _vel_vfdivd_vvvmvl __builtin_ve_vl_vfdivd_vvvmvl
+#define _vel_vfdivd_vsvmvl __builtin_ve_vl_vfdivd_vsvmvl
+#define _vel_vfdivs_vvvl __builtin_ve_vl_vfdivs_vvvl
+#define _vel_vfdivs_vvvvl __builtin_ve_vl_vfdivs_vvvvl
+#define _vel_vfdivs_vsvl __builtin_ve_vl_vfdivs_vsvl
+#define _vel_vfdivs_vsvvl __builtin_ve_vl_vfdivs_vsvvl
+#define _vel_vfdivs_vvvmvl __builtin_ve_vl_vfdivs_vvvmvl
+#define _vel_vfdivs_vsvmvl __builtin_ve_vl_vfdivs_vsvmvl
+#define _vel_vfsqrtd_vvl __builtin_ve_vl_vfsqrtd_vvl
+#define _vel_vfsqrtd_vvvl __builtin_ve_vl_vfsqrtd_vvvl
+#define _vel_vfsqrts_vvl __builtin_ve_vl_vfsqrts_vvl
+#define _vel_vfsqrts_vvvl __builtin_ve_vl_vfsqrts_vvvl
+#define _vel_vfcmpd_vvvl __builtin_ve_vl_vfcmpd_vvvl
+#define _vel_vfcmpd_vvvvl __builtin_ve_vl_vfcmpd_vvvvl
+#define _vel_vfcmpd_vsvl __builtin_ve_vl_vfcmpd_vsvl
+#define _vel_vfcmpd_vsvvl __builtin_ve_vl_vfcmpd_vsvvl
+#define _vel_vfcmpd_vvvmvl __builtin_ve_vl_vfcmpd_vvvmvl
+#define _vel_vfcmpd_vsvmvl __builtin_ve_vl_vfcmpd_vsvmvl
+#define _vel_vfcmps_vvvl __builtin_ve_vl_vfcmps_vvvl
+#define _vel_vfcmps_vvvvl __builtin_ve_vl_vfcmps_vvvvl
+#define _vel_vfcmps_vsvl __builtin_ve_vl_vfcmps_vsvl
+#define _vel_vfcmps_vsvvl __builtin_ve_vl_vfcmps_vsvvl
+#define _vel_vfcmps_vvvmvl __builtin_ve_vl_vfcmps_vvvmvl
+#define _vel_vfcmps_vsvmvl __builtin_ve_vl_vfcmps_vsvmvl
+#define _vel_pvfcmp_vvvl __builtin_ve_vl_pvfcmp_vvvl
+#define _vel_pvfcmp_vvvvl __builtin_ve_vl_pvfcmp_vvvvl
+#define _vel_pvfcmp_vsvl __builtin_ve_vl_pvfcmp_vsvl
+#define _vel_pvfcmp_vsvvl __builtin_ve_vl_pvfcmp_vsvvl
+#define _vel_pvfcmp_vvvMvl __builtin_ve_vl_pvfcmp_vvvMvl
+#define _vel_pvfcmp_vsvMvl __builtin_ve_vl_pvfcmp_vsvMvl
+#define _vel_vfmaxd_vvvl __builtin_ve_vl_vfmaxd_vvvl
+#define _vel_vfmaxd_vvvvl __builtin_ve_vl_vfmaxd_vvvvl
+#define _vel_vfmaxd_vsvl __builtin_ve_vl_vfmaxd_vsvl
+#define _vel_vfmaxd_vsvvl __builtin_ve_vl_vfmaxd_vsvvl
+#define _vel_vfmaxd_vvvmvl __builtin_ve_vl_vfmaxd_vvvmvl
+#define _vel_vfmaxd_vsvmvl __builtin_ve_vl_vfmaxd_vsvmvl
+#define _vel_vfmaxs_vvvl __builtin_ve_vl_vfmaxs_vvvl
+#define _vel_vfmaxs_vvvvl __builtin_ve_vl_vfmaxs_vvvvl
+#define _vel_vfmaxs_vsvl __builtin_ve_vl_vfmaxs_vsvl
+#define _vel_vfmaxs_vsvvl __builtin_ve_vl_vfmaxs_vsvvl
+#define _vel_vfmaxs_vvvmvl __builtin_ve_vl_vfmaxs_vvvmvl
+#define _vel_vfmaxs_vsvmvl __builtin_ve_vl_vfmaxs_vsvmvl
+#define _vel_pvfmax_vvvl __builtin_ve_vl_pvfmax_vvvl
+#define _vel_pvfmax_vvvvl __builtin_ve_vl_pvfmax_vvvvl
+#define _vel_pvfmax_vsvl __builtin_ve_vl_pvfmax_vsvl
+#define _vel_pvfmax_vsvvl __builtin_ve_vl_pvfmax_vsvvl
+#define _vel_pvfmax_vvvMvl __builtin_ve_vl_pvfmax_vvvMvl
+#define _vel_pvfmax_vsvMvl __builtin_ve_vl_pvfmax_vsvMvl
+#define _vel_vfmind_vvvl __builtin_ve_vl_vfmind_vvvl
+#define _vel_vfmind_vvvvl __builtin_ve_vl_vfmind_vvvvl
+#define _vel_vfmind_vsvl __builtin_ve_vl_vfmind_vsvl
+#define _vel_vfmind_vsvvl __builtin_ve_vl_vfmind_vsvvl
+#define _vel_vfmind_vvvmvl __builtin_ve_vl_vfmind_vvvmvl
+#define _vel_vfmind_vsvmvl __builtin_ve_vl_vfmind_vsvmvl
+#define _vel_vfmins_vvvl __builtin_ve_vl_vfmins_vvvl
+#define _vel_vfmins_vvvvl __builtin_ve_vl_vfmins_vvvvl
+#define _vel_vfmins_vsvl __builtin_ve_vl_vfmins_vsvl
+#define _vel_vfmins_vsvvl __builtin_ve_vl_vfmins_vsvvl
+#define _vel_vfmins_vvvmvl __builtin_ve_vl_vfmins_vvvmvl
+#define _vel_vfmins_vsvmvl __builtin_ve_vl_vfmins_vsvmvl
+#define _vel_pvfmin_vvvl __builtin_ve_vl_pvfmin_vvvl
+#define _vel_pvfmin_vvvvl __builtin_ve_vl_pvfmin_vvvvl
+#define _vel_pvfmin_vsvl __builtin_ve_vl_pvfmin_vsvl
+#define _vel_pvfmin_vsvvl __builtin_ve_vl_pvfmin_vsvvl
+#define _vel_pvfmin_vvvMvl __builtin_ve_vl_pvfmin_vvvMvl
+#define _vel_pvfmin_vsvMvl __builtin_ve_vl_pvfmin_vsvMvl
+#define _vel_vfmadd_vvvvl __builtin_ve_vl_vfmadd_vvvvl
+#define _vel_vfmadd_vvvvvl __builtin_ve_vl_vfmadd_vvvvvl
+#define _vel_vfmadd_vsvvl __builtin_ve_vl_vfmadd_vsvvl
+#define _vel_vfmadd_vsvvvl __builtin_ve_vl_vfmadd_vsvvvl
+#define _vel_vfmadd_vvsvl __builtin_ve_vl_vfmadd_vvsvl
+#define _vel_vfmadd_vvsvvl __builtin_ve_vl_vfmadd_vvsvvl
+#define _vel_vfmadd_vvvvmvl __builtin_ve_vl_vfmadd_vvvvmvl
+#define _vel_vfmadd_vsvvmvl __builtin_ve_vl_vfmadd_vsvvmvl
+#define _vel_vfmadd_vvsvmvl __builtin_ve_vl_vfmadd_vvsvmvl
+#define _vel_vfmads_vvvvl __builtin_ve_vl_vfmads_vvvvl
+#define _vel_vfmads_vvvvvl __builtin_ve_vl_vfmads_vvvvvl
+#define _vel_vfmads_vsvvl __builtin_ve_vl_vfmads_vsvvl
+#define _vel_vfmads_vsvvvl __builtin_ve_vl_vfmads_vsvvvl
+#define _vel_vfmads_vvsvl __builtin_ve_vl_vfmads_vvsvl
+#define _vel_vfmads_vvsvvl __builtin_ve_vl_vfmads_vvsvvl
+#define _vel_vfmads_vvvvmvl __builtin_ve_vl_vfmads_vvvvmvl
+#define _vel_vfmads_vsvvmvl __builtin_ve_vl_vfmads_vsvvmvl
+#define _vel_vfmads_vvsvmvl __builtin_ve_vl_vfmads_vvsvmvl
+#define _vel_pvfmad_vvvvl __builtin_ve_vl_pvfmad_vvvvl
+#define _vel_pvfmad_vvvvvl __builtin_ve_vl_pvfmad_vvvvvl
+#define _vel_pvfmad_vsvvl __builtin_ve_vl_pvfmad_vsvvl
+#define _vel_pvfmad_vsvvvl __builtin_ve_vl_pvfmad_vsvvvl
+#define _vel_pvfmad_vvsvl __builtin_ve_vl_pvfmad_vvsvl
+#define _vel_pvfmad_vvsvvl __builtin_ve_vl_pvfmad_vvsvvl
+#define _vel_pvfmad_vvvvMvl __builtin_ve_vl_pvfmad_vvvvMvl
+#define _vel_pvfmad_vsvvMvl __builtin_ve_vl_pvfmad_vsvvMvl
+#define _vel_pvfmad_vvsvMvl __builtin_ve_vl_pvfmad_vvsvMvl
+#define _vel_vfmsbd_vvvvl __builtin_ve_vl_vfmsbd_vvvvl
+#define _vel_vfmsbd_vvvvvl __builtin_ve_vl_vfmsbd_vvvvvl
+#define _vel_vfmsbd_vsvvl __builtin_ve_vl_vfmsbd_vsvvl
+#define _vel_vfmsbd_vsvvvl __builtin_ve_vl_vfmsbd_vsvvvl
+#define _vel_vfmsbd_vvsvl __builtin_ve_vl_vfmsbd_vvsvl
+#define _vel_vfmsbd_vvsvvl __builtin_ve_vl_vfmsbd_vvsvvl
+#define _vel_vfmsbd_vvvvmvl __builtin_ve_vl_vfmsbd_vvvvmvl
+#define _vel_vfmsbd_vsvvmvl __builtin_ve_vl_vfmsbd_vsvvmvl
+#define _vel_vfmsbd_vvsvmvl __builtin_ve_vl_vfmsbd_vvsvmvl
+#define _vel_vfmsbs_vvvvl __builtin_ve_vl_vfmsbs_vvvvl
+#define _vel_vfmsbs_vvvvvl __builtin_ve_vl_vfmsbs_vvvvvl
+#define _vel_vfmsbs_vsvvl __builtin_ve_vl_vfmsbs_vsvvl
+#define _vel_vfmsbs_vsvvvl __builtin_ve_vl_vfmsbs_vsvvvl
+#define _vel_vfmsbs_vvsvl __builtin_ve_vl_vfmsbs_vvsvl
+#define _vel_vfmsbs_vvsvvl __builtin_ve_vl_vfmsbs_vvsvvl
+#define _vel_vfmsbs_vvvvmvl __builtin_ve_vl_vfmsbs_vvvvmvl
+#define _vel_vfmsbs_vsvvmvl __builtin_ve_vl_vfmsbs_vsvvmvl
+#define _vel_vfmsbs_vvsvmvl __builtin_ve_vl_vfmsbs_vvsvmvl
+#define _vel_pvfmsb_vvvvl __builtin_ve_vl_pvfmsb_vvvvl
+#define _vel_pvfmsb_vvvvvl __builtin_ve_vl_pvfmsb_vvvvvl
+#define _vel_pvfmsb_vsvvl __builtin_ve_vl_pvfmsb_vsvvl
+#define _vel_pvfmsb_vsvvvl __builtin_ve_vl_pvfmsb_vsvvvl
+#define _vel_pvfmsb_vvsvl __builtin_ve_vl_pvfmsb_vvsvl
+#define _vel_pvfmsb_vvsvvl __builtin_ve_vl_pvfmsb_vvsvvl
+#define _vel_pvfmsb_vvvvMvl __builtin_ve_vl_pvfmsb_vvvvMvl
+#define _vel_pvfmsb_vsvvMvl __builtin_ve_vl_pvfmsb_vsvvMvl
+#define _vel_pvfmsb_vvsvMvl __builtin_ve_vl_pvfmsb_vvsvMvl
+#define _vel_vfnmadd_vvvvl __builtin_ve_vl_vfnmadd_vvvvl
+#define _vel_vfnmadd_vvvvvl __builtin_ve_vl_vfnmadd_vvvvvl
+#define _vel_vfnmadd_vsvvl __builtin_ve_vl_vfnmadd_vsvvl
+#define _vel_vfnmadd_vsvvvl __builtin_ve_vl_vfnmadd_vsvvvl
+#define _vel_vfnmadd_vvsvl __builtin_ve_vl_vfnmadd_vvsvl
+#define _vel_vfnmadd_vvsvvl __builtin_ve_vl_vfnmadd_vvsvvl
+#define _vel_vfnmadd_vvvvmvl __builtin_ve_vl_vfnmadd_vvvvmvl
+#define _vel_vfnmadd_vsvvmvl __builtin_ve_vl_vfnmadd_vsvvmvl
+#define _vel_vfnmadd_vvsvmvl __builtin_ve_vl_vfnmadd_vvsvmvl
+#define _vel_vfnmads_vvvvl __builtin_ve_vl_vfnmads_vvvvl
+#define _vel_vfnmads_vvvvvl __builtin_ve_vl_vfnmads_vvvvvl
+#define _vel_vfnmads_vsvvl __builtin_ve_vl_vfnmads_vsvvl
+#define _vel_vfnmads_vsvvvl __builtin_ve_vl_vfnmads_vsvvvl
+#define _vel_vfnmads_vvsvl __builtin_ve_vl_vfnmads_vvsvl
+#define _vel_vfnmads_vvsvvl __builtin_ve_vl_vfnmads_vvsvvl
+#define _vel_vfnmads_vvvvmvl __builtin_ve_vl_vfnmads_vvvvmvl
+#define _vel_vfnmads_vsvvmvl __builtin_ve_vl_vfnmads_vsvvmvl
+#define _vel_vfnmads_vvsvmvl __builtin_ve_vl_vfnmads_vvsvmvl
+#define _vel_pvfnmad_vvvvl __builtin_ve_vl_pvfnmad_vvvvl
+#define _vel_pvfnmad_vvvvvl __builtin_ve_vl_pvfnmad_vvvvvl
+#define _vel_pvfnmad_vsvvl __builtin_ve_vl_pvfnmad_vsvvl
+#define _vel_pvfnmad_vsvvvl __builtin_ve_vl_pvfnmad_vsvvvl
+#define _vel_pvfnmad_vvsvl __builtin_ve_vl_pvfnmad_vvsvl
+#define _vel_pvfnmad_vvsvvl __builtin_ve_vl_pvfnmad_vvsvvl
+#define _vel_pvfnmad_vvvvMvl __builtin_ve_vl_pvfnmad_vvvvMvl
+#define _vel_pvfnmad_vsvvMvl __builtin_ve_vl_pvfnmad_vsvvMvl
+#define _vel_pvfnmad_vvsvMvl __builtin_ve_vl_pvfnmad_vvsvMvl
+#define _vel_vfnmsbd_vvvvl __builtin_ve_vl_vfnmsbd_vvvvl
+#define _vel_vfnmsbd_vvvvvl __builtin_ve_vl_vfnmsbd_vvvvvl
+#define _vel_vfnmsbd_vsvvl __builtin_ve_vl_vfnmsbd_vsvvl
+#define _vel_vfnmsbd_vsvvvl __builtin_ve_vl_vfnmsbd_vsvvvl
+#define _vel_vfnmsbd_vvsvl __builtin_ve_vl_vfnmsbd_vvsvl
+#define _vel_vfnmsbd_vvsvvl __builtin_ve_vl_vfnmsbd_vvsvvl
+#define _vel_vfnmsbd_vvvvmvl __builtin_ve_vl_vfnmsbd_vvvvmvl
+#define _vel_vfnmsbd_vsvvmvl __builtin_ve_vl_vfnmsbd_vsvvmvl
+#define _vel_vfnmsbd_vvsvmvl __builtin_ve_vl_vfnmsbd_vvsvmvl
+#define _vel_vfnmsbs_vvvvl __builtin_ve_vl_vfnmsbs_vvvvl
+#define _vel_vfnmsbs_vvvvvl __builtin_ve_vl_vfnmsbs_vvvvvl
+#define _vel_vfnmsbs_vsvvl __builtin_ve_vl_vfnmsbs_vsvvl
+#define _vel_vfnmsbs_vsvvvl __builtin_ve_vl_vfnmsbs_vsvvvl
+#define _vel_vfnmsbs_vvsvl __builtin_ve_vl_vfnmsbs_vvsvl
+#define _vel_vfnmsbs_vvsvvl __builtin_ve_vl_vfnmsbs_vvsvvl
+#define _vel_vfnmsbs_vvvvmvl __builtin_ve_vl_vfnmsbs_vvvvmvl
+#define _vel_vfnmsbs_vsvvmvl __builtin_ve_vl_vfnmsbs_vsvvmvl
+#define _vel_vfnmsbs_vvsvmvl __builtin_ve_vl_vfnmsbs_vvsvmvl
+#define _vel_pvfnmsb_vvvvl __builtin_ve_vl_pvfnmsb_vvvvl
+#define _vel_pvfnmsb_vvvvvl __builtin_ve_vl_pvfnmsb_vvvvvl
+#define _vel_pvfnmsb_vsvvl __builtin_ve_vl_pvfnmsb_vsvvl
+#define _vel_pvfnmsb_vsvvvl __builtin_ve_vl_pvfnmsb_vsvvvl
+#define _vel_pvfnmsb_vvsvl __builtin_ve_vl_pvfnmsb_vvsvl
+#define _vel_pvfnmsb_vvsvvl __builtin_ve_vl_pvfnmsb_vvsvvl
+#define _vel_pvfnmsb_vvvvMvl __builtin_ve_vl_pvfnmsb_vvvvMvl
+#define _vel_pvfnmsb_vsvvMvl __builtin_ve_vl_pvfnmsb_vsvvMvl
+#define _vel_pvfnmsb_vvsvMvl __builtin_ve_vl_pvfnmsb_vvsvMvl
+#define _vel_vrcpd_vvl __builtin_ve_vl_vrcpd_vvl
+#define _vel_vrcpd_vvvl __builtin_ve_vl_vrcpd_vvvl
+#define _vel_vrcps_vvl __builtin_ve_vl_vrcps_vvl
+#define _vel_vrcps_vvvl __builtin_ve_vl_vrcps_vvvl
+#define _vel_pvrcp_vvl __builtin_ve_vl_pvrcp_vvl
+#define _vel_pvrcp_vvvl __builtin_ve_vl_pvrcp_vvvl
+#define _vel_vrsqrtd_vvl __builtin_ve_vl_vrsqrtd_vvl
+#define _vel_vrsqrtd_vvvl __builtin_ve_vl_vrsqrtd_vvvl
+#define _vel_vrsqrts_vvl __builtin_ve_vl_vrsqrts_vvl
+#define _vel_vrsqrts_vvvl __builtin_ve_vl_vrsqrts_vvvl
+#define _vel_pvrsqrt_vvl __builtin_ve_vl_pvrsqrt_vvl
+#define _vel_pvrsqrt_vvvl __builtin_ve_vl_pvrsqrt_vvvl
+#define _vel_vrsqrtdnex_vvl __builtin_ve_vl_vrsqrtdnex_vvl
+#define _vel_vrsqrtdnex_vvvl __builtin_ve_vl_vrsqrtdnex_vvvl
+#define _vel_vrsqrtsnex_vvl __builtin_ve_vl_vrsqrtsnex_vvl
+#define _vel_vrsqrtsnex_vvvl __builtin_ve_vl_vrsqrtsnex_vvvl
+#define _vel_pvrsqrtnex_vvl __builtin_ve_vl_pvrsqrtnex_vvl
+#define _vel_pvrsqrtnex_vvvl __builtin_ve_vl_pvrsqrtnex_vvvl
+#define _vel_vcvtwdsx_vvl __builtin_ve_vl_vcvtwdsx_vvl
+#define _vel_vcvtwdsx_vvvl __builtin_ve_vl_vcvtwdsx_vvvl
+#define _vel_vcvtwdsx_vvmvl __builtin_ve_vl_vcvtwdsx_vvmvl
+#define _vel_vcvtwdsxrz_vvl __builtin_ve_vl_vcvtwdsxrz_vvl
+#define _vel_vcvtwdsxrz_vvvl __builtin_ve_vl_vcvtwdsxrz_vvvl
+#define _vel_vcvtwdsxrz_vvmvl __builtin_ve_vl_vcvtwdsxrz_vvmvl
+#define _vel_vcvtwdzx_vvl __builtin_ve_vl_vcvtwdzx_vvl
+#define _vel_vcvtwdzx_vvvl __builtin_ve_vl_vcvtwdzx_vvvl
+#define _vel_vcvtwdzx_vvmvl __builtin_ve_vl_vcvtwdzx_vvmvl
+#define _vel_vcvtwdzxrz_vvl __builtin_ve_vl_vcvtwdzxrz_vvl
+#define _vel_vcvtwdzxrz_vvvl __builtin_ve_vl_vcvtwdzxrz_vvvl
+#define _vel_vcvtwdzxrz_vvmvl __builtin_ve_vl_vcvtwdzxrz_vvmvl
+#define _vel_vcvtwssx_vvl __builtin_ve_vl_vcvtwssx_vvl
+#define _vel_vcvtwssx_vvvl __builtin_ve_vl_vcvtwssx_vvvl
+#define _vel_vcvtwssx_vvmvl __builtin_ve_vl_vcvtwssx_vvmvl
+#define _vel_vcvtwssxrz_vvl __builtin_ve_vl_vcvtwssxrz_vvl
+#define _vel_vcvtwssxrz_vvvl __builtin_ve_vl_vcvtwssxrz_vvvl
+#define _vel_vcvtwssxrz_vvmvl __builtin_ve_vl_vcvtwssxrz_vvmvl
+#define _vel_vcvtwszx_vvl __builtin_ve_vl_vcvtwszx_vvl
+#define _vel_vcvtwszx_vvvl __builtin_ve_vl_vcvtwszx_vvvl
+#define _vel_vcvtwszx_vvmvl __builtin_ve_vl_vcvtwszx_vvmvl
+#define _vel_vcvtwszxrz_vvl __builtin_ve_vl_vcvtwszxrz_vvl
+#define _vel_vcvtwszxrz_vvvl __builtin_ve_vl_vcvtwszxrz_vvvl
+#define _vel_vcvtwszxrz_vvmvl __builtin_ve_vl_vcvtwszxrz_vvmvl
+#define _vel_pvcvtws_vvl __builtin_ve_vl_pvcvtws_vvl
+#define _vel_pvcvtws_vvvl __builtin_ve_vl_pvcvtws_vvvl
+#define _vel_pvcvtws_vvMvl __builtin_ve_vl_pvcvtws_vvMvl
+#define _vel_pvcvtwsrz_vvl __builtin_ve_vl_pvcvtwsrz_vvl
+#define _vel_pvcvtwsrz_vvvl __builtin_ve_vl_pvcvtwsrz_vvvl
+#define _vel_pvcvtwsrz_vvMvl __builtin_ve_vl_pvcvtwsrz_vvMvl
+#define _vel_vcvtld_vvl __builtin_ve_vl_vcvtld_vvl
+#define _vel_vcvtld_vvvl __builtin_ve_vl_vcvtld_vvvl
+#define _vel_vcvtld_vvmvl __builtin_ve_vl_vcvtld_vvmvl
+#define _vel_vcvtldrz_vvl __builtin_ve_vl_vcvtldrz_vvl
+#define _vel_vcvtldrz_vvvl __builtin_ve_vl_vcvtldrz_vvvl
+#define _vel_vcvtldrz_vvmvl __builtin_ve_vl_vcvtldrz_vvmvl
+#define _vel_vcvtdw_vvl __builtin_ve_vl_vcvtdw_vvl
+#define _vel_vcvtdw_vvvl __builtin_ve_vl_vcvtdw_vvvl
+#define _vel_vcvtsw_vvl __builtin_ve_vl_vcvtsw_vvl
+#define _vel_vcvtsw_vvvl __builtin_ve_vl_vcvtsw_vvvl
+#define _vel_pvcvtsw_vvl __builtin_ve_vl_pvcvtsw_vvl
+#define _vel_pvcvtsw_vvvl __builtin_ve_vl_pvcvtsw_vvvl
+#define _vel_vcvtdl_vvl __builtin_ve_vl_vcvtdl_vvl
+#define _vel_vcvtdl_vvvl __builtin_ve_vl_vcvtdl_vvvl
+#define _vel_vcvtds_vvl __builtin_ve_vl_vcvtds_vvl
+#define _vel_vcvtds_vvvl __builtin_ve_vl_vcvtds_vvvl
+#define _vel_vcvtsd_vvl __builtin_ve_vl_vcvtsd_vvl
+#define _vel_vcvtsd_vvvl __builtin_ve_vl_vcvtsd_vvvl
+#define _vel_vmrg_vvvml __builtin_ve_vl_vmrg_vvvml
+#define _vel_vmrg_vvvmvl __builtin_ve_vl_vmrg_vvvmvl
+#define _vel_vmrg_vsvml __builtin_ve_vl_vmrg_vsvml
+#define _vel_vmrg_vsvmvl __builtin_ve_vl_vmrg_vsvmvl
+#define _vel_vmrgw_vvvMl __builtin_ve_vl_vmrgw_vvvMl
+#define _vel_vmrgw_vvvMvl __builtin_ve_vl_vmrgw_vvvMvl
+#define _vel_vmrgw_vsvMl __builtin_ve_vl_vmrgw_vsvMl
+#define _vel_vmrgw_vsvMvl __builtin_ve_vl_vmrgw_vsvMvl
+#define _vel_vshf_vvvsl __builtin_ve_vl_vshf_vvvsl
+#define _vel_vshf_vvvsvl __builtin_ve_vl_vshf_vvvsvl
+#define _vel_vcp_vvmvl __builtin_ve_vl_vcp_vvmvl
+#define _vel_vex_vvmvl __builtin_ve_vl_vex_vvmvl
+#define _vel_vfmklat_ml __builtin_ve_vl_vfmklat_ml
+#define _vel_vfmklaf_ml __builtin_ve_vl_vfmklaf_ml
+#define _vel_pvfmkat_Ml __builtin_ve_vl_pvfmkat_Ml
+#define _vel_pvfmkaf_Ml __builtin_ve_vl_pvfmkaf_Ml
+#define _vel_vfmklgt_mvl __builtin_ve_vl_vfmklgt_mvl
+#define _vel_vfmklgt_mvml __builtin_ve_vl_vfmklgt_mvml
+#define _vel_vfmkllt_mvl __builtin_ve_vl_vfmkllt_mvl
+#define _vel_vfmkllt_mvml __builtin_ve_vl_vfmkllt_mvml
+#define _vel_vfmklne_mvl __builtin_ve_vl_vfmklne_mvl
+#define _vel_vfmklne_mvml __builtin_ve_vl_vfmklne_mvml
+#define _vel_vfmkleq_mvl __builtin_ve_vl_vfmkleq_mvl
+#define _vel_vfmkleq_mvml __builtin_ve_vl_vfmkleq_mvml
+#define _vel_vfmklge_mvl __builtin_ve_vl_vfmklge_mvl
+#define _vel_vfmklge_mvml __builtin_ve_vl_vfmklge_mvml
+#define _vel_vfmklle_mvl __builtin_ve_vl_vfmklle_mvl
+#define _vel_vfmklle_mvml __builtin_ve_vl_vfmklle_mvml
+#define _vel_vfmklnum_mvl __builtin_ve_vl_vfmklnum_mvl
+#define _vel_vfmklnum_mvml __builtin_ve_vl_vfmklnum_mvml
+#define _vel_vfmklnan_mvl __builtin_ve_vl_vfmklnan_mvl
+#define _vel_vfmklnan_mvml __builtin_ve_vl_vfmklnan_mvml
+#define _vel_vfmklgtnan_mvl __builtin_ve_vl_vfmklgtnan_mvl
+#define _vel_vfmklgtnan_mvml __builtin_ve_vl_vfmklgtnan_mvml
+#define _vel_vfmklltnan_mvl __builtin_ve_vl_vfmklltnan_mvl
+#define _vel_vfmklltnan_mvml __builtin_ve_vl_vfmklltnan_mvml
+#define _vel_vfmklnenan_mvl __builtin_ve_vl_vfmklnenan_mvl
+#define _vel_vfmklnenan_mvml __builtin_ve_vl_vfmklnenan_mvml
+#define _vel_vfmkleqnan_mvl __builtin_ve_vl_vfmkleqnan_mvl
+#define _vel_vfmkleqnan_mvml __builtin_ve_vl_vfmkleqnan_mvml
+#define _vel_vfmklgenan_mvl __builtin_ve_vl_vfmklgenan_mvl
+#define _vel_vfmklgenan_mvml __builtin_ve_vl_vfmklgenan_mvml
+#define _vel_vfmkllenan_mvl __builtin_ve_vl_vfmkllenan_mvl
+#define _vel_vfmkllenan_mvml __builtin_ve_vl_vfmkllenan_mvml
+#define _vel_vfmkwgt_mvl __builtin_ve_vl_vfmkwgt_mvl
+#define _vel_vfmkwgt_mvml __builtin_ve_vl_vfmkwgt_mvml
+#define _vel_vfmkwlt_mvl __builtin_ve_vl_vfmkwlt_mvl
+#define _vel_vfmkwlt_mvml __builtin_ve_vl_vfmkwlt_mvml
+#define _vel_vfmkwne_mvl __builtin_ve_vl_vfmkwne_mvl
+#define _vel_vfmkwne_mvml __builtin_ve_vl_vfmkwne_mvml
+#define _vel_vfmkweq_mvl __builtin_ve_vl_vfmkweq_mvl
+#define _vel_vfmkweq_mvml __builtin_ve_vl_vfmkweq_mvml
+#define _vel_vfmkwge_mvl __builtin_ve_vl_vfmkwge_mvl
+#define _vel_vfmkwge_mvml __builtin_ve_vl_vfmkwge_mvml
+#define _vel_vfmkwle_mvl __builtin_ve_vl_vfmkwle_mvl
+#define _vel_vfmkwle_mvml __builtin_ve_vl_vfmkwle_mvml
+#define _vel_vfmkwnum_mvl __builtin_ve_vl_vfmkwnum_mvl
+#define _vel_vfmkwnum_mvml __builtin_ve_vl_vfmkwnum_mvml
+#define _vel_vfmkwnan_mvl __builtin_ve_vl_vfmkwnan_mvl
+#define _vel_vfmkwnan_mvml __builtin_ve_vl_vfmkwnan_mvml
+#define _vel_vfmkwgtnan_mvl __builtin_ve_vl_vfmkwgtnan_mvl
+#define _vel_vfmkwgtnan_mvml __builtin_ve_vl_vfmkwgtnan_mvml
+#define _vel_vfmkwltnan_mvl __builtin_ve_vl_vfmkwltnan_mvl
+#define _vel_vfmkwltnan_mvml __builtin_ve_vl_vfmkwltnan_mvml
+#define _vel_vfmkwnenan_mvl __builtin_ve_vl_vfmkwnenan_mvl
+#define _vel_vfmkwnenan_mvml __builtin_ve_vl_vfmkwnenan_mvml
+#define _vel_vfmkweqnan_mvl __builtin_ve_vl_vfmkweqnan_mvl
+#define _vel_vfmkweqnan_mvml __builtin_ve_vl_vfmkweqnan_mvml
+#define _vel_vfmkwgenan_mvl __builtin_ve_vl_vfmkwgenan_mvl
+#define _vel_vfmkwgenan_mvml __builtin_ve_vl_vfmkwgenan_mvml
+#define _vel_vfmkwlenan_mvl __builtin_ve_vl_vfmkwlenan_mvl
+#define _vel_vfmkwlenan_mvml __builtin_ve_vl_vfmkwlenan_mvml
+#define _vel_pvfmkwlogt_mvl __builtin_ve_vl_pvfmkwlogt_mvl
+#define _vel_pvfmkwupgt_mvl __builtin_ve_vl_pvfmkwupgt_mvl
+#define _vel_pvfmkwlogt_mvml __builtin_ve_vl_pvfmkwlogt_mvml
+#define _vel_pvfmkwupgt_mvml __builtin_ve_vl_pvfmkwupgt_mvml
+#define _vel_pvfmkwlolt_mvl __builtin_ve_vl_pvfmkwlolt_mvl
+#define _vel_pvfmkwuplt_mvl __builtin_ve_vl_pvfmkwuplt_mvl
+#define _vel_pvfmkwlolt_mvml __builtin_ve_vl_pvfmkwlolt_mvml
+#define _vel_pvfmkwuplt_mvml __builtin_ve_vl_pvfmkwuplt_mvml
+#define _vel_pvfmkwlone_mvl __builtin_ve_vl_pvfmkwlone_mvl
+#define _vel_pvfmkwupne_mvl __builtin_ve_vl_pvfmkwupne_mvl
+#define _vel_pvfmkwlone_mvml __builtin_ve_vl_pvfmkwlone_mvml
+#define _vel_pvfmkwupne_mvml __builtin_ve_vl_pvfmkwupne_mvml
+#define _vel_pvfmkwloeq_mvl __builtin_ve_vl_pvfmkwloeq_mvl
+#define _vel_pvfmkwupeq_mvl __builtin_ve_vl_pvfmkwupeq_mvl
+#define _vel_pvfmkwloeq_mvml __builtin_ve_vl_pvfmkwloeq_mvml
+#define _vel_pvfmkwupeq_mvml __builtin_ve_vl_pvfmkwupeq_mvml
+#define _vel_pvfmkwloge_mvl __builtin_ve_vl_pvfmkwloge_mvl
+#define _vel_pvfmkwupge_mvl __builtin_ve_vl_pvfmkwupge_mvl
+#define _vel_pvfmkwloge_mvml __builtin_ve_vl_pvfmkwloge_mvml
+#define _vel_pvfmkwupge_mvml __builtin_ve_vl_pvfmkwupge_mvml
+#define _vel_pvfmkwlole_mvl __builtin_ve_vl_pvfmkwlole_mvl
+#define _vel_pvfmkwuple_mvl __builtin_ve_vl_pvfmkwuple_mvl
+#define _vel_pvfmkwlole_mvml __builtin_ve_vl_pvfmkwlole_mvml
+#define _vel_pvfmkwuple_mvml __builtin_ve_vl_pvfmkwuple_mvml
+#define _vel_pvfmkwlonum_mvl __builtin_ve_vl_pvfmkwlonum_mvl
+#define _vel_pvfmkwupnum_mvl __builtin_ve_vl_pvfmkwupnum_mvl
+#define _vel_pvfmkwlonum_mvml __builtin_ve_vl_pvfmkwlonum_mvml
+#define _vel_pvfmkwupnum_mvml __builtin_ve_vl_pvfmkwupnum_mvml
+#define _vel_pvfmkwlonan_mvl __builtin_ve_vl_pvfmkwlonan_mvl
+#define _vel_pvfmkwupnan_mvl __builtin_ve_vl_pvfmkwupnan_mvl
+#define _vel_pvfmkwlonan_mvml __builtin_ve_vl_pvfmkwlonan_mvml
+#define _vel_pvfmkwupnan_mvml __builtin_ve_vl_pvfmkwupnan_mvml
+#define _vel_pvfmkwlogtnan_mvl __builtin_ve_vl_pvfmkwlogtnan_mvl
+#define _vel_pvfmkwupgtnan_mvl __builtin_ve_vl_pvfmkwupgtnan_mvl
+#define _vel_pvfmkwlogtnan_mvml __builtin_ve_vl_pvfmkwlogtnan_mvml
+#define _vel_pvfmkwupgtnan_mvml __builtin_ve_vl_pvfmkwupgtnan_mvml
+#define _vel_pvfmkwloltnan_mvl __builtin_ve_vl_pvfmkwloltnan_mvl
+#define _vel_pvfmkwupltnan_mvl __builtin_ve_vl_pvfmkwupltnan_mvl
+#define _vel_pvfmkwloltnan_mvml __builtin_ve_vl_pvfmkwloltnan_mvml
+#define _vel_pvfmkwupltnan_mvml __builtin_ve_vl_pvfmkwupltnan_mvml
+#define _vel_pvfmkwlonenan_mvl __builtin_ve_vl_pvfmkwlonenan_mvl
+#define _vel_pvfmkwupnenan_mvl __builtin_ve_vl_pvfmkwupnenan_mvl
+#define _vel_pvfmkwlonenan_mvml __builtin_ve_vl_pvfmkwlonenan_mvml
+#define _vel_pvfmkwupnenan_mvml __builtin_ve_vl_pvfmkwupnenan_mvml
+#define _vel_pvfmkwloeqnan_mvl __builtin_ve_vl_pvfmkwloeqnan_mvl
+#define _vel_pvfmkwupeqnan_mvl __builtin_ve_vl_pvfmkwupeqnan_mvl
+#define _vel_pvfmkwloeqnan_mvml __builtin_ve_vl_pvfmkwloeqnan_mvml
+#define _vel_pvfmkwupeqnan_mvml __builtin_ve_vl_pvfmkwupeqnan_mvml
+#define _vel_pvfmkwlogenan_mvl __builtin_ve_vl_pvfmkwlogenan_mvl
+#define _vel_pvfmkwupgenan_mvl __builtin_ve_vl_pvfmkwupgenan_mvl
+#define _vel_pvfmkwlogenan_mvml __builtin_ve_vl_pvfmkwlogenan_mvml
+#define _vel_pvfmkwupgenan_mvml __builtin_ve_vl_pvfmkwupgenan_mvml
+#define _vel_pvfmkwlolenan_mvl __builtin_ve_vl_pvfmkwlolenan_mvl
+#define _vel_pvfmkwuplenan_mvl __builtin_ve_vl_pvfmkwuplenan_mvl
+#define _vel_pvfmkwlolenan_mvml __builtin_ve_vl_pvfmkwlolenan_mvml
+#define _vel_pvfmkwuplenan_mvml __builtin_ve_vl_pvfmkwuplenan_mvml
+#define _vel_pvfmkwgt_Mvl __builtin_ve_vl_pvfmkwgt_Mvl
+#define _vel_pvfmkwgt_MvMl __builtin_ve_vl_pvfmkwgt_MvMl
+#define _vel_pvfmkwlt_Mvl __builtin_ve_vl_pvfmkwlt_Mvl
+#define _vel_pvfmkwlt_MvMl __builtin_ve_vl_pvfmkwlt_MvMl
+#define _vel_pvfmkwne_Mvl __builtin_ve_vl_pvfmkwne_Mvl
+#define _vel_pvfmkwne_MvMl __builtin_ve_vl_pvfmkwne_MvMl
+#define _vel_pvfmkweq_Mvl __builtin_ve_vl_pvfmkweq_Mvl
+#define _vel_pvfmkweq_MvMl __builtin_ve_vl_pvfmkweq_MvMl
+#define _vel_pvfmkwge_Mvl __builtin_ve_vl_pvfmkwge_Mvl
+#define _vel_pvfmkwge_MvMl __builtin_ve_vl_pvfmkwge_MvMl
+#define _vel_pvfmkwle_Mvl __builtin_ve_vl_pvfmkwle_Mvl
+#define _vel_pvfmkwle_MvMl __builtin_ve_vl_pvfmkwle_MvMl
+#define _vel_pvfmkwnum_Mvl __builtin_ve_vl_pvfmkwnum_Mvl
+#define _vel_pvfmkwnum_MvMl __builtin_ve_vl_pvfmkwnum_MvMl
+#define _vel_pvfmkwnan_Mvl __builtin_ve_vl_pvfmkwnan_Mvl
+#define _vel_pvfmkwnan_MvMl __builtin_ve_vl_pvfmkwnan_MvMl
+#define _vel_pvfmkwgtnan_Mvl __builtin_ve_vl_pvfmkwgtnan_Mvl
+#define _vel_pvfmkwgtnan_MvMl __builtin_ve_vl_pvfmkwgtnan_MvMl
+#define _vel_pvfmkwltnan_Mvl __builtin_ve_vl_pvfmkwltnan_Mvl
+#define _vel_pvfmkwltnan_MvMl __builtin_ve_vl_pvfmkwltnan_MvMl
+#define _vel_pvfmkwnenan_Mvl __builtin_ve_vl_pvfmkwnenan_Mvl
+#define _vel_pvfmkwnenan_MvMl __builtin_ve_vl_pvfmkwnenan_MvMl
+#define _vel_pvfmkweqnan_Mvl __builtin_ve_vl_pvfmkweqnan_Mvl
+#define _vel_pvfmkweqnan_MvMl __builtin_ve_vl_pvfmkweqnan_MvMl
+#define _vel_pvfmkwgenan_Mvl __builtin_ve_vl_pvfmkwgenan_Mvl
+#define _vel_pvfmkwgenan_MvMl __builtin_ve_vl_pvfmkwgenan_MvMl
+#define _vel_pvfmkwlenan_Mvl __builtin_ve_vl_pvfmkwlenan_Mvl
+#define _vel_pvfmkwlenan_MvMl __builtin_ve_vl_pvfmkwlenan_MvMl
+#define _vel_vfmkdgt_mvl __builtin_ve_vl_vfmkdgt_mvl
+#define _vel_vfmkdgt_mvml __builtin_ve_vl_vfmkdgt_mvml
+#define _vel_vfmkdlt_mvl __builtin_ve_vl_vfmkdlt_mvl
+#define _vel_vfmkdlt_mvml __builtin_ve_vl_vfmkdlt_mvml
+#define _vel_vfmkdne_mvl __builtin_ve_vl_vfmkdne_mvl
+#define _vel_vfmkdne_mvml __builtin_ve_vl_vfmkdne_mvml
+#define _vel_vfmkdeq_mvl __builtin_ve_vl_vfmkdeq_mvl
+#define _vel_vfmkdeq_mvml __builtin_ve_vl_vfmkdeq_mvml
+#define _vel_vfmkdge_mvl __builtin_ve_vl_vfmkdge_mvl
+#define _vel_vfmkdge_mvml __builtin_ve_vl_vfmkdge_mvml
+#define _vel_vfmkdle_mvl __builtin_ve_vl_vfmkdle_mvl
+#define _vel_vfmkdle_mvml __builtin_ve_vl_vfmkdle_mvml
+#define _vel_vfmkdnum_mvl __builtin_ve_vl_vfmkdnum_mvl
+#define _vel_vfmkdnum_mvml __builtin_ve_vl_vfmkdnum_mvml
+#define _vel_vfmkdnan_mvl __builtin_ve_vl_vfmkdnan_mvl
+#define _vel_vfmkdnan_mvml __builtin_ve_vl_vfmkdnan_mvml
+#define _vel_vfmkdgtnan_mvl __builtin_ve_vl_vfmkdgtnan_mvl
+#define _vel_vfmkdgtnan_mvml __builtin_ve_vl_vfmkdgtnan_mvml
+#define _vel_vfmkdltnan_mvl __builtin_ve_vl_vfmkdltnan_mvl
+#define _vel_vfmkdltnan_mvml __builtin_ve_vl_vfmkdltnan_mvml
+#define _vel_vfmkdnenan_mvl __builtin_ve_vl_vfmkdnenan_mvl
+#define _vel_vfmkdnenan_mvml __builtin_ve_vl_vfmkdnenan_mvml
+#define _vel_vfmkdeqnan_mvl __builtin_ve_vl_vfmkdeqnan_mvl
+#define _vel_vfmkdeqnan_mvml __builtin_ve_vl_vfmkdeqnan_mvml
+#define _vel_vfmkdgenan_mvl __builtin_ve_vl_vfmkdgenan_mvl
+#define _vel_vfmkdgenan_mvml __builtin_ve_vl_vfmkdgenan_mvml
+#define _vel_vfmkdlenan_mvl __builtin_ve_vl_vfmkdlenan_mvl
+#define _vel_vfmkdlenan_mvml __builtin_ve_vl_vfmkdlenan_mvml
+#define _vel_vfmksgt_mvl __builtin_ve_vl_vfmksgt_mvl
+#define _vel_vfmksgt_mvml __builtin_ve_vl_vfmksgt_mvml
+#define _vel_vfmkslt_mvl __builtin_ve_vl_vfmkslt_mvl
+#define _vel_vfmkslt_mvml __builtin_ve_vl_vfmkslt_mvml
+#define _vel_vfmksne_mvl __builtin_ve_vl_vfmksne_mvl
+#define _vel_vfmksne_mvml __builtin_ve_vl_vfmksne_mvml
+#define _vel_vfmkseq_mvl __builtin_ve_vl_vfmkseq_mvl
+#define _vel_vfmkseq_mvml __builtin_ve_vl_vfmkseq_mvml
+#define _vel_vfmksge_mvl __builtin_ve_vl_vfmksge_mvl
+#define _vel_vfmksge_mvml __builtin_ve_vl_vfmksge_mvml
+#define _vel_vfmksle_mvl __builtin_ve_vl_vfmksle_mvl
+#define _vel_vfmksle_mvml __builtin_ve_vl_vfmksle_mvml
+#define _vel_vfmksnum_mvl __builtin_ve_vl_vfmksnum_mvl
+#define _vel_vfmksnum_mvml __builtin_ve_vl_vfmksnum_mvml
+#define _vel_vfmksnan_mvl __builtin_ve_vl_vfmksnan_mvl
+#define _vel_vfmksnan_mvml __builtin_ve_vl_vfmksnan_mvml
+#define _vel_vfmksgtnan_mvl __builtin_ve_vl_vfmksgtnan_mvl
+#define _vel_vfmksgtnan_mvml __builtin_ve_vl_vfmksgtnan_mvml
+#define _vel_vfmksltnan_mvl __builtin_ve_vl_vfmksltnan_mvl
+#define _vel_vfmksltnan_mvml __builtin_ve_vl_vfmksltnan_mvml
+#define _vel_vfmksnenan_mvl __builtin_ve_vl_vfmksnenan_mvl
+#define _vel_vfmksnenan_mvml __builtin_ve_vl_vfmksnenan_mvml
+#define _vel_vfmkseqnan_mvl __builtin_ve_vl_vfmkseqnan_mvl
+#define _vel_vfmkseqnan_mvml __builtin_ve_vl_vfmkseqnan_mvml
+#define _vel_vfmksgenan_mvl __builtin_ve_vl_vfmksgenan_mvl
+#define _vel_vfmksgenan_mvml __builtin_ve_vl_vfmksgenan_mvml
+#define _vel_vfmkslenan_mvl __builtin_ve_vl_vfmkslenan_mvl
+#define _vel_vfmkslenan_mvml __builtin_ve_vl_vfmkslenan_mvml
+#define _vel_pvfmkslogt_mvl __builtin_ve_vl_pvfmkslogt_mvl
+#define _vel_pvfmksupgt_mvl __builtin_ve_vl_pvfmksupgt_mvl
+#define _vel_pvfmkslogt_mvml __builtin_ve_vl_pvfmkslogt_mvml
+#define _vel_pvfmksupgt_mvml __builtin_ve_vl_pvfmksupgt_mvml
+#define _vel_pvfmkslolt_mvl __builtin_ve_vl_pvfmkslolt_mvl
+#define _vel_pvfmksuplt_mvl __builtin_ve_vl_pvfmksuplt_mvl
+#define _vel_pvfmkslolt_mvml __builtin_ve_vl_pvfmkslolt_mvml
+#define _vel_pvfmksuplt_mvml __builtin_ve_vl_pvfmksuplt_mvml
+#define _vel_pvfmkslone_mvl __builtin_ve_vl_pvfmkslone_mvl
+#define _vel_pvfmksupne_mvl __builtin_ve_vl_pvfmksupne_mvl
+#define _vel_pvfmkslone_mvml __builtin_ve_vl_pvfmkslone_mvml
+#define _vel_pvfmksupne_mvml __builtin_ve_vl_pvfmksupne_mvml
+#define _vel_pvfmksloeq_mvl __builtin_ve_vl_pvfmksloeq_mvl
+#define _vel_pvfmksupeq_mvl __builtin_ve_vl_pvfmksupeq_mvl
+#define _vel_pvfmksloeq_mvml __builtin_ve_vl_pvfmksloeq_mvml
+#define _vel_pvfmksupeq_mvml __builtin_ve_vl_pvfmksupeq_mvml
+#define _vel_pvfmksloge_mvl __builtin_ve_vl_pvfmksloge_mvl
+#define _vel_pvfmksupge_mvl __builtin_ve_vl_pvfmksupge_mvl
+#define _vel_pvfmksloge_mvml __builtin_ve_vl_pvfmksloge_mvml
+#define _vel_pvfmksupge_mvml __builtin_ve_vl_pvfmksupge_mvml
+#define _vel_pvfmkslole_mvl __builtin_ve_vl_pvfmkslole_mvl
+#define _vel_pvfmksuple_mvl __builtin_ve_vl_pvfmksuple_mvl
+#define _vel_pvfmkslole_mvml __builtin_ve_vl_pvfmkslole_mvml
+#define _vel_pvfmksuple_mvml __builtin_ve_vl_pvfmksuple_mvml
+#define _vel_pvfmkslonum_mvl __builtin_ve_vl_pvfmkslonum_mvl
+#define _vel_pvfmksupnum_mvl __builtin_ve_vl_pvfmksupnum_mvl
+#define _vel_pvfmkslonum_mvml __builtin_ve_vl_pvfmkslonum_mvml
+#define _vel_pvfmksupnum_mvml __builtin_ve_vl_pvfmksupnum_mvml
+#define _vel_pvfmkslonan_mvl __builtin_ve_vl_pvfmkslonan_mvl
+#define _vel_pvfmksupnan_mvl __builtin_ve_vl_pvfmksupnan_mvl
+#define _vel_pvfmkslonan_mvml __builtin_ve_vl_pvfmkslonan_mvml
+#define _vel_pvfmksupnan_mvml __builtin_ve_vl_pvfmksupnan_mvml
+#define _vel_pvfmkslogtnan_mvl __builtin_ve_vl_pvfmkslogtnan_mvl
+#define _vel_pvfmksupgtnan_mvl __builtin_ve_vl_pvfmksupgtnan_mvl
+#define _vel_pvfmkslogtnan_mvml __builtin_ve_vl_pvfmkslogtnan_mvml
+#define _vel_pvfmksupgtnan_mvml __builtin_ve_vl_pvfmksupgtnan_mvml
+#define _vel_pvfmksloltnan_mvl __builtin_ve_vl_pvfmksloltnan_mvl
+#define _vel_pvfmksupltnan_mvl __builtin_ve_vl_pvfmksupltnan_mvl
+#define _vel_pvfmksloltnan_mvml __builtin_ve_vl_pvfmksloltnan_mvml
+#define _vel_pvfmksupltnan_mvml __builtin_ve_vl_pvfmksupltnan_mvml
+#define _vel_pvfmkslonenan_mvl __builtin_ve_vl_pvfmkslonenan_mvl
+#define _vel_pvfmksupnenan_mvl __builtin_ve_vl_pvfmksupnenan_mvl
+#define _vel_pvfmkslonenan_mvml __builtin_ve_vl_pvfmkslonenan_mvml
+#define _vel_pvfmksupnenan_mvml __builtin_ve_vl_pvfmksupnenan_mvml
+#define _vel_pvfmksloeqnan_mvl __builtin_ve_vl_pvfmksloeqnan_mvl
+#define _vel_pvfmksupeqnan_mvl __builtin_ve_vl_pvfmksupeqnan_mvl
+#define _vel_pvfmksloeqnan_mvml __builtin_ve_vl_pvfmksloeqnan_mvml
+#define _vel_pvfmksupeqnan_mvml __builtin_ve_vl_pvfmksupeqnan_mvml
+#define _vel_pvfmkslogenan_mvl __builtin_ve_vl_pvfmkslogenan_mvl
+#define _vel_pvfmksupgenan_mvl __builtin_ve_vl_pvfmksupgenan_mvl
+#define _vel_pvfmkslogenan_mvml __builtin_ve_vl_pvfmkslogenan_mvml
+#define _vel_pvfmksupgenan_mvml __builtin_ve_vl_pvfmksupgenan_mvml
+#define _vel_pvfmkslolenan_mvl __builtin_ve_vl_pvfmkslolenan_mvl
+#define _vel_pvfmksuplenan_mvl __builtin_ve_vl_pvfmksuplenan_mvl
+#define _vel_pvfmkslolenan_mvml __builtin_ve_vl_pvfmkslolenan_mvml
+#define _vel_pvfmksuplenan_mvml __builtin_ve_vl_pvfmksuplenan_mvml
+#define _vel_pvfmksgt_Mvl __builtin_ve_vl_pvfmksgt_Mvl
+#define _vel_pvfmksgt_MvMl __builtin_ve_vl_pvfmksgt_MvMl
+#define _vel_pvfmkslt_Mvl __builtin_ve_vl_pvfmkslt_Mvl
+#define _vel_pvfmkslt_MvMl __builtin_ve_vl_pvfmkslt_MvMl
+#define _vel_pvfmksne_Mvl __builtin_ve_vl_pvfmksne_Mvl
+#define _vel_pvfmksne_MvMl __builtin_ve_vl_pvfmksne_MvMl
+#define _vel_pvfmkseq_Mvl __builtin_ve_vl_pvfmkseq_Mvl
+#define _vel_pvfmkseq_MvMl __builtin_ve_vl_pvfmkseq_MvMl
+#define _vel_pvfmksge_Mvl __builtin_ve_vl_pvfmksge_Mvl
+#define _vel_pvfmksge_MvMl __builtin_ve_vl_pvfmksge_MvMl
+#define _vel_pvfmksle_Mvl __builtin_ve_vl_pvfmksle_Mvl
+#define _vel_pvfmksle_MvMl __builtin_ve_vl_pvfmksle_MvMl
+#define _vel_pvfmksnum_Mvl __builtin_ve_vl_pvfmksnum_Mvl
+#define _vel_pvfmksnum_MvMl __builtin_ve_vl_pvfmksnum_MvMl
+#define _vel_pvfmksnan_Mvl __builtin_ve_vl_pvfmksnan_Mvl
+#define _vel_pvfmksnan_MvMl __builtin_ve_vl_pvfmksnan_MvMl
+#define _vel_pvfmksgtnan_Mvl __builtin_ve_vl_pvfmksgtnan_Mvl
+#define _vel_pvfmksgtnan_MvMl __builtin_ve_vl_pvfmksgtnan_MvMl
+#define _vel_pvfmksltnan_Mvl __builtin_ve_vl_pvfmksltnan_Mvl
+#define _vel_pvfmksltnan_MvMl __builtin_ve_vl_pvfmksltnan_MvMl
+#define _vel_pvfmksnenan_Mvl __builtin_ve_vl_pvfmksnenan_Mvl
+#define _vel_pvfmksnenan_MvMl __builtin_ve_vl_pvfmksnenan_MvMl
+#define _vel_pvfmkseqnan_Mvl __builtin_ve_vl_pvfmkseqnan_Mvl
+#define _vel_pvfmkseqnan_MvMl __builtin_ve_vl_pvfmkseqnan_MvMl
+#define _vel_pvfmksgenan_Mvl __builtin_ve_vl_pvfmksgenan_Mvl
+#define _vel_pvfmksgenan_MvMl __builtin_ve_vl_pvfmksgenan_MvMl
+#define _vel_pvfmkslenan_Mvl __builtin_ve_vl_pvfmkslenan_Mvl
+#define _vel_pvfmkslenan_MvMl __builtin_ve_vl_pvfmkslenan_MvMl
+#define _vel_vsumwsx_vvl __builtin_ve_vl_vsumwsx_vvl
+#define _vel_vsumwsx_vvml __builtin_ve_vl_vsumwsx_vvml
+#define _vel_vsumwzx_vvl __builtin_ve_vl_vsumwzx_vvl
+#define _vel_vsumwzx_vvml __builtin_ve_vl_vsumwzx_vvml
+#define _vel_vsuml_vvl __builtin_ve_vl_vsuml_vvl
+#define _vel_vsuml_vvml __builtin_ve_vl_vsuml_vvml
+#define _vel_vfsumd_vvl __builtin_ve_vl_vfsumd_vvl
+#define _vel_vfsumd_vvml __builtin_ve_vl_vfsumd_vvml
+#define _vel_vfsums_vvl __builtin_ve_vl_vfsums_vvl
+#define _vel_vfsums_vvml __builtin_ve_vl_vfsums_vvml
+#define _vel_vrmaxswfstsx_vvl __builtin_ve_vl_vrmaxswfstsx_vvl
+#define _vel_vrmaxswfstsx_vvvl __builtin_ve_vl_vrmaxswfstsx_vvvl
+#define _vel_vrmaxswlstsx_vvl __builtin_ve_vl_vrmaxswlstsx_vvl
+#define _vel_vrmaxswlstsx_vvvl __builtin_ve_vl_vrmaxswlstsx_vvvl
+#define _vel_vrmaxswfstzx_vvl __builtin_ve_vl_vrmaxswfstzx_vvl
+#define _vel_vrmaxswfstzx_vvvl __builtin_ve_vl_vrmaxswfstzx_vvvl
+#define _vel_vrmaxswlstzx_vvl __builtin_ve_vl_vrmaxswlstzx_vvl
+#define _vel_vrmaxswlstzx_vvvl __builtin_ve_vl_vrmaxswlstzx_vvvl
+#define _vel_vrminswfstsx_vvl __builtin_ve_vl_vrminswfstsx_vvl
+#define _vel_vrminswfstsx_vvvl __builtin_ve_vl_vrminswfstsx_vvvl
+#define _vel_vrminswlstsx_vvl __builtin_ve_vl_vrminswlstsx_vvl
+#define _vel_vrminswlstsx_vvvl __builtin_ve_vl_vrminswlstsx_vvvl
+#define _vel_vrminswfstzx_vvl __builtin_ve_vl_vrminswfstzx_vvl
+#define _vel_vrminswfstzx_vvvl __builtin_ve_vl_vrminswfstzx_vvvl
+#define _vel_vrminswlstzx_vvl __builtin_ve_vl_vrminswlstzx_vvl
+#define _vel_vrminswlstzx_vvvl __builtin_ve_vl_vrminswlstzx_vvvl
+#define _vel_vrmaxslfst_vvl __builtin_ve_vl_vrmaxslfst_vvl
+#define _vel_vrmaxslfst_vvvl __builtin_ve_vl_vrmaxslfst_vvvl
+#define _vel_vrmaxsllst_vvl __builtin_ve_vl_vrmaxsllst_vvl
+#define _vel_vrmaxsllst_vvvl __builtin_ve_vl_vrmaxsllst_vvvl
+#define _vel_vrminslfst_vvl __builtin_ve_vl_vrminslfst_vvl
+#define _vel_vrminslfst_vvvl __builtin_ve_vl_vrminslfst_vvvl
+#define _vel_vrminsllst_vvl __builtin_ve_vl_vrminsllst_vvl
+#define _vel_vrminsllst_vvvl __builtin_ve_vl_vrminsllst_vvvl
+#define _vel_vfrmaxdfst_vvl __builtin_ve_vl_vfrmaxdfst_vvl
+#define _vel_vfrmaxdfst_vvvl __builtin_ve_vl_vfrmaxdfst_vvvl
+#define _vel_vfrmaxdlst_vvl __builtin_ve_vl_vfrmaxdlst_vvl
+#define _vel_vfrmaxdlst_vvvl __builtin_ve_vl_vfrmaxdlst_vvvl
+#define _vel_vfrmaxsfst_vvl __builtin_ve_vl_vfrmaxsfst_vvl
+#define _vel_vfrmaxsfst_vvvl __builtin_ve_vl_vfrmaxsfst_vvvl
+#define _vel_vfrmaxslst_vvl __builtin_ve_vl_vfrmaxslst_vvl
+#define _vel_vfrmaxslst_vvvl __builtin_ve_vl_vfrmaxslst_vvvl
+#define _vel_vfrmindfst_vvl __builtin_ve_vl_vfrmindfst_vvl
+#define _vel_vfrmindfst_vvvl __builtin_ve_vl_vfrmindfst_vvvl
+#define _vel_vfrmindlst_vvl __builtin_ve_vl_vfrmindlst_vvl
+#define _vel_vfrmindlst_vvvl __builtin_ve_vl_vfrmindlst_vvvl
+#define _vel_vfrminsfst_vvl __builtin_ve_vl_vfrminsfst_vvl
+#define _vel_vfrminsfst_vvvl __builtin_ve_vl_vfrminsfst_vvvl
+#define _vel_vfrminslst_vvl __builtin_ve_vl_vfrminslst_vvl
+#define _vel_vfrminslst_vvvl __builtin_ve_vl_vfrminslst_vvvl
+#define _vel_vrand_vvl __builtin_ve_vl_vrand_vvl
+#define _vel_vrand_vvml __builtin_ve_vl_vrand_vvml
+#define _vel_vror_vvl __builtin_ve_vl_vror_vvl
+#define _vel_vror_vvml __builtin_ve_vl_vror_vvml
+#define _vel_vrxor_vvl __builtin_ve_vl_vrxor_vvl
+#define _vel_vrxor_vvml __builtin_ve_vl_vrxor_vvml
+#define _vel_vgt_vvssl __builtin_ve_vl_vgt_vvssl
+#define _vel_vgt_vvssvl __builtin_ve_vl_vgt_vvssvl
+#define _vel_vgt_vvssml __builtin_ve_vl_vgt_vvssml
+#define _vel_vgt_vvssmvl __builtin_ve_vl_vgt_vvssmvl
+#define _vel_vgtnc_vvssl __builtin_ve_vl_vgtnc_vvssl
+#define _vel_vgtnc_vvssvl __builtin_ve_vl_vgtnc_vvssvl
+#define _vel_vgtnc_vvssml __builtin_ve_vl_vgtnc_vvssml
+#define _vel_vgtnc_vvssmvl __builtin_ve_vl_vgtnc_vvssmvl
+#define _vel_vgtu_vvssl __builtin_ve_vl_vgtu_vvssl
+#define _vel_vgtu_vvssvl __builtin_ve_vl_vgtu_vvssvl
+#define _vel_vgtu_vvssml __builtin_ve_vl_vgtu_vvssml
+#define _vel_vgtu_vvssmvl __builtin_ve_vl_vgtu_vvssmvl
+#define _vel_vgtunc_vvssl __builtin_ve_vl_vgtunc_vvssl
+#define _vel_vgtunc_vvssvl __builtin_ve_vl_vgtunc_vvssvl
+#define _vel_vgtunc_vvssml __builtin_ve_vl_vgtunc_vvssml
+#define _vel_vgtunc_vvssmvl __builtin_ve_vl_vgtunc_vvssmvl
+#define _vel_vgtlsx_vvssl __builtin_ve_vl_vgtlsx_vvssl
+#define _vel_vgtlsx_vvssvl __builtin_ve_vl_vgtlsx_vvssvl
+#define _vel_vgtlsx_vvssml __builtin_ve_vl_vgtlsx_vvssml
+#define _vel_vgtlsx_vvssmvl __builtin_ve_vl_vgtlsx_vvssmvl
+#define _vel_vgtlsxnc_vvssl __builtin_ve_vl_vgtlsxnc_vvssl
+#define _vel_vgtlsxnc_vvssvl __builtin_ve_vl_vgtlsxnc_vvssvl
+#define _vel_vgtlsxnc_vvssml __builtin_ve_vl_vgtlsxnc_vvssml
+#define _vel_vgtlsxnc_vvssmvl __builtin_ve_vl_vgtlsxnc_vvssmvl
+#define _vel_vgtlzx_vvssl __builtin_ve_vl_vgtlzx_vvssl
+#define _vel_vgtlzx_vvssvl __builtin_ve_vl_vgtlzx_vvssvl
+#define _vel_vgtlzx_vvssml __builtin_ve_vl_vgtlzx_vvssml
+#define _vel_vgtlzx_vvssmvl __builtin_ve_vl_vgtlzx_vvssmvl
+#define _vel_vgtlzxnc_vvssl __builtin_ve_vl_vgtlzxnc_vvssl
+#define _vel_vgtlzxnc_vvssvl __builtin_ve_vl_vgtlzxnc_vvssvl
+#define _vel_vgtlzxnc_vvssml __builtin_ve_vl_vgtlzxnc_vvssml
+#define _vel_vgtlzxnc_vvssmvl __builtin_ve_vl_vgtlzxnc_vvssmvl
+#define _vel_vsc_vvssl __builtin_ve_vl_vsc_vvssl
+#define _vel_vsc_vvssml __builtin_ve_vl_vsc_vvssml
+#define _vel_vscnc_vvssl __builtin_ve_vl_vscnc_vvssl
+#define _vel_vscnc_vvssml __builtin_ve_vl_vscnc_vvssml
+#define _vel_vscot_vvssl __builtin_ve_vl_vscot_vvssl
+#define _vel_vscot_vvssml __builtin_ve_vl_vscot_vvssml
+#define _vel_vscncot_vvssl __builtin_ve_vl_vscncot_vvssl
+#define _vel_vscncot_vvssml __builtin_ve_vl_vscncot_vvssml
+#define _vel_vscu_vvssl __builtin_ve_vl_vscu_vvssl
+#define _vel_vscu_vvssml __builtin_ve_vl_vscu_vvssml
+#define _vel_vscunc_vvssl __builtin_ve_vl_vscunc_vvssl
+#define _vel_vscunc_vvssml __builtin_ve_vl_vscunc_vvssml
+#define _vel_vscuot_vvssl __builtin_ve_vl_vscuot_vvssl
+#define _vel_vscuot_vvssml __builtin_ve_vl_vscuot_vvssml
+#define _vel_vscuncot_vvssl __builtin_ve_vl_vscuncot_vvssl
+#define _vel_vscuncot_vvssml __builtin_ve_vl_vscuncot_vvssml
+#define _vel_vscl_vvssl __builtin_ve_vl_vscl_vvssl
+#define _vel_vscl_vvssml __builtin_ve_vl_vscl_vvssml
+#define _vel_vsclnc_vvssl __builtin_ve_vl_vsclnc_vvssl
+#define _vel_vsclnc_vvssml __builtin_ve_vl_vsclnc_vvssml
+#define _vel_vsclot_vvssl __builtin_ve_vl_vsclot_vvssl
+#define _vel_vsclot_vvssml __builtin_ve_vl_vsclot_vvssml
+#define _vel_vsclncot_vvssl __builtin_ve_vl_vsclncot_vvssl
+#define _vel_vsclncot_vvssml __builtin_ve_vl_vsclncot_vvssml
+#define _vel_andm_mmm __builtin_ve_vl_andm_mmm
+#define _vel_andm_MMM __builtin_ve_vl_andm_MMM
+#define _vel_orm_mmm __builtin_ve_vl_orm_mmm
+#define _vel_orm_MMM __builtin_ve_vl_orm_MMM
+#define _vel_xorm_mmm __builtin_ve_vl_xorm_mmm
+#define _vel_xorm_MMM __builtin_ve_vl_xorm_MMM
+#define _vel_eqvm_mmm __builtin_ve_vl_eqvm_mmm
+#define _vel_eqvm_MMM __builtin_ve_vl_eqvm_MMM
+#define _vel_nndm_mmm __builtin_ve_vl_nndm_mmm
+#define _vel_nndm_MMM __builtin_ve_vl_nndm_MMM
+#define _vel_negm_mm __builtin_ve_vl_negm_mm
+#define _vel_negm_MM __builtin_ve_vl_negm_MM
+#define _vel_pcvm_sml __builtin_ve_vl_pcvm_sml
+#define _vel_lzvm_sml __builtin_ve_vl_lzvm_sml
+#define _vel_tovm_sml __builtin_ve_vl_tovm_sml
+#define _vel_lcr_sss __builtin_ve_vl_lcr_sss
+#define _vel_scr_sss __builtin_ve_vl_scr_sss
+#define _vel_tscr_ssss __builtin_ve_vl_tscr_ssss
+#define _vel_fidcr_sss __builtin_ve_vl_fidcr_sss
+#define _vel_fencei __builtin_ve_vl_fencei
+#define _vel_fencem_s __builtin_ve_vl_fencem_s
+#define _vel_fencec_s __builtin_ve_vl_fencec_s
+#define _vel_svob __builtin_ve_vl_svob
diff --git a/linux-x86/lib64/clang/14.0.2/include/vpclmulqdqintrin.h b/linux-x86/lib64/clang/16.0.2/include/vpclmulqdqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/vpclmulqdqintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/vpclmulqdqintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/waitpkgintrin.h b/linux-x86/lib64/clang/16.0.2/include/waitpkgintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/waitpkgintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/waitpkgintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/wasm_simd128.h b/linux-x86/lib64/clang/16.0.2/include/wasm_simd128.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/wasm_simd128.h
copy to linux-x86/lib64/clang/16.0.2/include/wasm_simd128.h
index 3889a27..f93de12 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/wasm_simd128.h
+++ b/linux-x86/lib64/clang/16.0.2/include/wasm_simd128.h
@@ -1405,12 +1405,12 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i32x4_trunc_sat_f64x2_zero(v128_t __a) {
-  return (v128_t)__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4((__f64x2)__a);
+  return (v128_t)__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4((__f64x2)__a);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_u32x4_trunc_sat_f64x2_zero(v128_t __a) {
-  return (v128_t)__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4((__f64x2)__a);
+  return (v128_t)__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4((__f64x2)__a);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
diff --git a/linux-x86/lib64/clang/14.0.2/include/wbnoinvdintrin.h b/linux-x86/lib64/clang/16.0.2/include/wbnoinvdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/wbnoinvdintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/wbnoinvdintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/wmmintrin.h b/linux-x86/lib64/clang/16.0.2/include/wmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/wmmintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/wmmintrin.h
diff --git a/linux-x86/lib64/clang/16.0.2/include/x86gprintrin.h b/linux-x86/lib64/clang/16.0.2/include/x86gprintrin.h
new file mode 100644
index 0000000..81d7360
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/x86gprintrin.h
@@ -0,0 +1,50 @@
+/*===--------------- x86gprintrin.h - X86 GPR intrinsics ------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86GPRINTRIN_H
+#define __X86GPRINTRIN_H
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__HRESET__)
+#include <hresetintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__UINTR__)
+#include <uintrintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__CRC32__)
+#include <crc32intrin.h>
+#endif
+
+#if defined(__i386__)
+#define __SAVE_GPRBX "mov {%%ebx, %%eax |eax, ebx};"
+#define __RESTORE_GPRBX "mov {%%eax, %%ebx |ebx, eax};"
+#define __TMPGPR "eax"
+#else
+// When in 64-bit target, the 32-bit operands generate a 32-bit result,
+// zero-extended to a 64-bit result in the destination general-purpose,
+// It means "mov x %ebx" will clobber the higher 32 bits of rbx, so we
+// should preserve the 64-bit register rbx.
+#define __SAVE_GPRBX "mov {%%rbx, %%rax |rax, rbx};"
+#define __RESTORE_GPRBX "mov {%%rax, %%rbx |rbx, rax};"
+#define __TMPGPR "rax"
+#endif
+
+#define __SSC_MARK(__Tag)                                                      \
+  __asm__ __volatile__( __SAVE_GPRBX                                           \
+                       "mov {%0, %%ebx|ebx, %0}; "                             \
+                       ".byte 0x64, 0x67, 0x90; "                              \
+                        __RESTORE_GPRBX                                        \
+                       ::"i"(__Tag)                                            \
+                       :  __TMPGPR );
+
+#endif /* __X86GPRINTRIN_H */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/x86intrin.h b/linux-x86/lib64/clang/16.0.2/include/x86intrin.h
similarity index 92%
copy from darwin-x86/lib64/clang/14.0.2/include/x86intrin.h
copy to linux-x86/lib64/clang/16.0.2/include/x86intrin.h
index 768d0e5..450fd00 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/x86intrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/x86intrin.h
@@ -59,5 +59,9 @@
 #include <clzerointrin.h>
 #endif
 
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__RDPRU__)
+#include <rdpruintrin.h>
+#endif
 
 #endif /* __X86INTRIN_H */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/xmmintrin.h b/linux-x86/lib64/clang/16.0.2/include/xmmintrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/xmmintrin.h
copy to linux-x86/lib64/clang/16.0.2/include/xmmintrin.h
index 1612d3d..b3a14ad 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/xmmintrin.h
+++ b/linux-x86/lib64/clang/16.0.2/include/xmmintrin.h
@@ -2086,7 +2086,7 @@
 /// \headerfile <x86intrin.h>
 ///
 /// \code
-/// void _mm_prefetch(const void * a, const int sel);
+/// void _mm_prefetch(const void *a, const int sel);
 /// \endcode
 ///
 /// This intrinsic corresponds to the <c> PREFETCHNTA </c> instruction.
@@ -2360,7 +2360,10 @@
 ///    00: assigned from bits [15:0] of \a a. \n
 ///    01: assigned from bits [31:16] of \a a. \n
 ///    10: assigned from bits [47:32] of \a a. \n
-///    11: assigned from bits [63:48] of \a a.
+///    11: assigned from bits [63:48] of \a a. \n
+///    Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+///    <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+///    <c>[b6, b4, b2, b0]</c>.
 /// \returns A 64-bit integer vector containing the shuffled values.
 #define _mm_shuffle_pi16(a, n) \
   ((__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n)))
@@ -2602,7 +2605,10 @@
 ///    00: Bits [31:0] copied from the specified operand. \n
 ///    01: Bits [63:32] copied from the specified operand. \n
 ///    10: Bits [95:64] copied from the specified operand. \n
-///    11: Bits [127:96] copied from the specified operand.
+///    11: Bits [127:96] copied from the specified operand. \n
+///    Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+///    <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+///    <c>[b6, b4, b2, b0]</c>.
 /// \returns A 128-bit vector of [4 x float] containing the shuffled values.
 #define _mm_shuffle_ps(a, b, mask) \
   ((__m128)__builtin_ia32_shufps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \
@@ -2999,7 +3005,6 @@
 #define _m_pavgw _mm_avg_pu16
 #define _m_psadbw _mm_sad_pu8
 #define _m_ _mm_
-#define _m_ _mm_
 
 #undef __DEFAULT_FN_ATTRS
 #undef __DEFAULT_FN_ATTRS_MMX
diff --git a/linux-x86/lib64/clang/14.0.2/include/xopintrin.h b/linux-x86/lib64/clang/16.0.2/include/xopintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/xopintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/xopintrin.h
diff --git a/linux-x86/lib64/clang/16.0.2/include/xray/xray_interface.h b/linux-x86/lib64/clang/16.0.2/include/xray/xray_interface.h
new file mode 100644
index 0000000..410515d
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/xray/xray_interface.h
@@ -0,0 +1,130 @@
+//===- xray_interface.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// APIs for controlling XRay functionality explicitly.
+//===----------------------------------------------------------------------===//
+
+#ifndef XRAY_XRAY_INTERFACE_H
+#define XRAY_XRAY_INTERFACE_H
+
+#include <cstddef>
+#include <cstdint>
+
+extern "C" {
+
+/// Synchronize this with AsmPrinter::SledKind in LLVM.
+enum XRayEntryType {
+  ENTRY = 0,
+  EXIT = 1,
+  TAIL = 2,
+  LOG_ARGS_ENTRY = 3,
+  CUSTOM_EVENT = 4,
+  TYPED_EVENT = 5,
+};
+
+/// Provide a function to invoke for when instrumentation points are hit. This
+/// is a user-visible control surface that overrides the default implementation.
+/// The function provided should take the following arguments:
+///
+///   - function id: an identifier that indicates the id of a function; this id
+///                  is generated by xray; the mapping between the function id
+///                  and the actual function pointer is available through
+///                  __xray_table.
+///   - entry type: identifies what kind of instrumentation point was
+///                 encountered (function entry, function exit, etc.). See the
+///                 enum XRayEntryType for more details.
+///
+/// The user handler must handle correctly spurious calls after this handler is
+/// removed or replaced with another handler, because it would be too costly for
+/// XRay runtime to avoid spurious calls.
+/// To prevent circular calling, the handler function itself and all its
+/// direct&indirect callees must not be instrumented with XRay, which can be
+/// achieved by marking them all with: __attribute__((xray_never_instrument))
+///
+/// Returns 1 on success, 0 on error.
+extern int __xray_set_handler(void (*entry)(int32_t, XRayEntryType));
+
+/// This removes whatever the currently provided handler is. Returns 1 on
+/// success, 0 on error.
+extern int __xray_remove_handler();
+
+/// Use XRay to log the first argument of each (instrumented) function call.
+/// When this function exits, all threads will have observed the effect and
+/// start logging their subsequent affected function calls (if patched).
+///
+/// Returns 1 on success, 0 on error.
+extern int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType,
+                                                 uint64_t));
+
+/// Disables the XRay handler used to log first arguments of function calls.
+/// Returns 1 on success, 0 on error.
+extern int __xray_remove_handler_arg1();
+
+/// Provide a function to invoke when XRay encounters a custom event.
+extern int __xray_set_customevent_handler(void (*entry)(void *, std::size_t));
+
+/// This removes whatever the currently provided custom event handler is.
+/// Returns 1 on success, 0 on error.
+extern int __xray_remove_customevent_handler();
+
+/// Set a handler for xray typed event logging. The first parameter is a type
+/// identifier, the second is a payload, and the third is the payload size.
+extern int __xray_set_typedevent_handler(void (*entry)(uint16_t, const void *,
+                                                       std::size_t));
+
+/// Removes the currently set typed event handler.
+/// Returns 1 on success, 0 on error.
+extern int __xray_remove_typedevent_handler();
+
+extern uint16_t __xray_register_event_type(const char *event_type);
+
+enum XRayPatchingStatus {
+  NOT_INITIALIZED = 0,
+  SUCCESS = 1,
+  ONGOING = 2,
+  FAILED = 3,
+};
+
+/// This tells XRay to patch the instrumentation points. See XRayPatchingStatus
+/// for possible result values.
+extern XRayPatchingStatus __xray_patch();
+
+/// Reverses the effect of __xray_patch(). See XRayPatchingStatus for possible
+/// result values.
+extern XRayPatchingStatus __xray_unpatch();
+
+/// This patches a specific function id. See XRayPatchingStatus for possible
+/// result values.
+extern XRayPatchingStatus __xray_patch_function(int32_t FuncId);
+
+/// This unpatches a specific function id. See XRayPatchingStatus for possible
+/// result values.
+extern XRayPatchingStatus __xray_unpatch_function(int32_t FuncId);
+
+/// This function returns the address of the function provided a valid function
+/// id. We return 0 if we encounter any error, even if 0 may be a valid function
+/// address.
+extern uintptr_t __xray_function_address(int32_t FuncId);
+
+/// This function returns the maximum valid function id. Returns 0 if we
+/// encounter errors (when there are no instrumented functions, etc.).
+extern size_t __xray_max_function_id();
+
+/// Initialize the required XRay data structures. This is useful in cases where
+/// users want to control precisely when the XRay instrumentation data
+/// structures are initialized, for example when the XRay library is built with
+/// the XRAY_NO_PREINIT preprocessor definition.
+///
+/// Calling __xray_init() more than once is safe across multiple threads.
+extern void __xray_init();
+
+} // end extern "C"
+
+#endif // XRAY_XRAY_INTERFACE_H
diff --git a/linux-x86/lib64/clang/16.0.2/include/xray/xray_log_interface.h b/linux-x86/lib64/clang/16.0.2/include/xray/xray_log_interface.h
new file mode 100644
index 0000000..2e91b7e
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/xray/xray_log_interface.h
@@ -0,0 +1,357 @@
+//===-- xray_log_interface.h ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a function call tracing system.
+//
+// APIs for installing a new logging implementation.
+//
+//===----------------------------------------------------------------------===//
+///
+/// XRay allows users to implement their own logging handlers and install them
+/// to replace the default runtime-controllable implementation that comes with
+/// compiler-rt/xray. The "flight data recorder" (FDR) mode implementation uses
+/// this API to install itself in an XRay-enabled binary. See
+/// compiler-rt/lib/xray_fdr_logging.{h,cc} for details of that implementation.
+///
+/// The high-level usage pattern for these APIs look like the following:
+///
+///   // We choose the mode which we'd like to install, and check whether this
+///   // has succeeded. Each mode will have their own set of flags they will
+///   // support, outside of the global XRay configuration options that are
+///   // defined in the XRAY_OPTIONS environment variable.
+///   auto select_status = __xray_log_select_mode("xray-fdr");
+///   if (select_status != XRayLogRegisterStatus::XRAY_REGISTRATION_OK) {
+///     // This failed, we should not proceed with attempting to initialise
+///     // the currently selected mode.
+///     return;
+///   }
+///
+///   // Once that's done, we can now attempt to configure the implementation.
+///   // To do this, we provide the string flags configuration for the mode.
+///   auto config_status = __xray_log_init_mode(
+///       "xray-fdr", "verbosity=1 some_flag=1 another_flag=2");
+///   if (config_status != XRayLogInitStatus::XRAY_LOG_INITIALIZED) {
+///     // deal with the error here, if there is one.
+///   }
+///
+///   // When the log implementation has had the chance to initialize, we can
+///   // now patch the instrumentation points. Note that we could have patched
+///   // the instrumentation points first, but there's no strict ordering to
+///   // these operations.
+///   auto patch_status = __xray_patch();
+///   if (patch_status != XRayPatchingStatus::SUCCESS) {
+///     // deal with the error here, if it is an error.
+///   }
+///
+///   // If we want to stop the implementation, we can then finalize it (before
+///   // optionally flushing the log).
+///   auto fin_status = __xray_log_finalize();
+///   if (fin_status != XRayLogInitStatus::XRAY_LOG_FINALIZED) {
+///     // deal with the error here, if it is an error.
+///   }
+///
+///   // We can optionally wait before flushing the log to give other threads a
+///   // chance to see that the implementation is already finalized. Also, at
+///   // this point we can optionally unpatch the instrumentation points to
+///   // reduce overheads at runtime.
+///   auto unpatch_status = __xray_unpatch();
+///   if (unpatch_status != XRayPatchingStatus::SUCCESS) {
+///     // deal with the error here, if it is an error.
+///   }
+///
+///   // If there are logs or data to be flushed somewhere, we can do so only
+///   // after we've finalized the log. Some implementations may not actually
+///   // have anything to log (it might keep the data in memory, or periodically
+///   // be logging the data anyway).
+///   auto flush_status = __xray_log_flushLog();
+///   if (flush_status != XRayLogFlushStatus::XRAY_LOG_FLUSHED) {
+///     // deal with the error here, if it is an error.
+///   }
+///
+///   // Alternatively, we can go through the buffers ourselves without
+///   // relying on the implementations' flushing semantics (if the
+///   // implementation supports exporting this data directly).
+///   auto MyBufferProcessor = +[](const char* mode, XRayBuffer buffer) {
+///     // Check the "mode" to see if it's something we know how to handle...
+///     // and/or do something with an XRayBuffer instance.
+///   };
+///   auto process_status = __xray_log_process_buffers(MyBufferProcessor);
+///   if (process_status != XRayLogFlushStatus::XRAY_LOG_FLUSHED) {
+///     // deal with the error here, if it is an error.
+///   }
+///
+/// NOTE: Before calling __xray_patch() again, consider re-initializing the
+/// implementation first. Some implementations might stay in an "off" state when
+/// they are finalized, while some might be in an invalid/unknown state.
+///
+#ifndef XRAY_XRAY_LOG_INTERFACE_H
+#define XRAY_XRAY_LOG_INTERFACE_H
+
+#include "xray/xray_interface.h"
+#include <stddef.h>
+
+extern "C" {
+
+/// This enum defines the valid states in which the logging implementation can
+/// be at.
+enum XRayLogInitStatus {
+  /// The default state is uninitialized, and in case there were errors in the
+  /// initialization, the implementation MUST return XRAY_LOG_UNINITIALIZED.
+  XRAY_LOG_UNINITIALIZED = 0,
+
+  /// Some implementations support multi-stage init (or asynchronous init), and
+  /// may return XRAY_LOG_INITIALIZING to signal callers of the API that
+  /// there's an ongoing initialization routine running. This allows
+  /// implementations to support concurrent threads attempting to initialize,
+  /// while only signalling success in one.
+  XRAY_LOG_INITIALIZING = 1,
+
+  /// When an implementation is done initializing, it MUST return
+  /// XRAY_LOG_INITIALIZED. When users call `__xray_patch()`, they are
+  /// guaranteed that the implementation installed with
+  /// `__xray_set_log_impl(...)` has been initialized.
+  XRAY_LOG_INITIALIZED = 2,
+
+  /// Some implementations might support multi-stage finalization (or
+  /// asynchronous finalization), and may return XRAY_LOG_FINALIZING to signal
+  /// callers of the API that there's an ongoing finalization routine running.
+  /// This allows implementations to support concurrent threads attempting to
+  /// finalize, while only signalling success/completion in one.
+  XRAY_LOG_FINALIZING = 3,
+
+  /// When an implementation is done finalizing, it MUST return
+  /// XRAY_LOG_FINALIZED. It is up to the implementation to determine what the
+  /// semantics of a finalized implementation is. Some implementations might
+  /// allow re-initialization once the log is finalized, while some might always
+  /// be on (and that finalization is a no-op).
+  XRAY_LOG_FINALIZED = 4,
+};
+
+/// This enum allows an implementation to signal log flushing operations via
+/// `__xray_log_flushLog()`, and the state of flushing the log.
+enum XRayLogFlushStatus {
+  XRAY_LOG_NOT_FLUSHING = 0,
+  XRAY_LOG_FLUSHING = 1,
+  XRAY_LOG_FLUSHED = 2,
+};
+
+/// This enum indicates the installation state of a logging implementation, when
+/// associating a mode to a particular logging implementation through
+/// `__xray_log_register_impl(...)` or through `__xray_log_select_mode(...`.
+enum XRayLogRegisterStatus {
+  XRAY_REGISTRATION_OK = 0,
+  XRAY_DUPLICATE_MODE = 1,
+  XRAY_MODE_NOT_FOUND = 2,
+  XRAY_INCOMPLETE_IMPL = 3,
+};
+
+/// A valid XRay logging implementation MUST provide all of the function
+/// pointers in XRayLogImpl when being installed through `__xray_set_log_impl`.
+/// To be precise, ALL the functions pointers MUST NOT be nullptr.
+struct XRayLogImpl {
+  /// The log initialization routine provided by the implementation, always
+  /// provided with the following parameters:
+  ///
+  ///   - buffer size (unused)
+  ///   - maximum number of buffers (unused)
+  ///   - a pointer to an argument struct that the implementation MUST handle
+  ///   - the size of the argument struct
+  ///
+  /// See XRayLogInitStatus for details on what the implementation MUST return
+  /// when called.
+  ///
+  /// If the implementation needs to install handlers aside from the 0-argument
+  /// function call handler, it MUST do so in this initialization handler.
+  ///
+  /// See xray_interface.h for available handler installation routines.
+  XRayLogInitStatus (*log_init)(size_t, size_t, void *, size_t);
+
+  /// The log finalization routine provided by the implementation.
+  ///
+  /// See XRayLogInitStatus for details on what the implementation MUST return
+  /// when called.
+  XRayLogInitStatus (*log_finalize)();
+
+  /// The 0-argument function call handler. XRay logging implementations MUST
+  /// always have a handler for function entry and exit events. In case the
+  /// implementation wants to support arg1 (or other future extensions to XRay
+  /// logging) those MUST be installed by the installed 'log_init' handler.
+  ///
+  /// Because we didn't want to change the ABI of this struct, the arg1 handler
+  /// may be silently overwritten during initialization as well.
+  void (*handle_arg0)(int32_t, XRayEntryType);
+
+  /// The log implementation provided routine for when __xray_log_flushLog() is
+  /// called.
+  ///
+  /// See XRayLogFlushStatus for details on what the implementation MUST return
+  /// when called.
+  XRayLogFlushStatus (*flush_log)();
+};
+
+/// DEPRECATED: Use the mode registration workflow instead with
+/// __xray_log_register_mode(...) and __xray_log_select_mode(...). See the
+/// documentation for those function.
+///
+/// This function installs a new logging implementation that XRay will use. In
+/// case there are any nullptr members in Impl, XRay will *uninstall any
+/// existing implementations*. It does NOT patch the instrumentation points.
+///
+/// NOTE: This function does NOT attempt to finalize the currently installed
+/// implementation. Use with caution.
+///
+/// It is guaranteed safe to call this function in the following states:
+///
+///   - When the implementation is UNINITIALIZED.
+///   - When the implementation is FINALIZED.
+///   - When there is no current implementation installed.
+///
+/// It is logging implementation defined what happens when this function is
+/// called while in any other states.
+void __xray_set_log_impl(XRayLogImpl Impl);
+
+/// This function registers a logging implementation against a "mode"
+/// identifier. This allows multiple modes to be registered, and chosen at
+/// runtime using the same mode identifier through
+/// `__xray_log_select_mode(...)`.
+///
+/// We treat the Mode identifier as a null-terminated byte string, as the
+/// identifier used when retrieving the log impl.
+///
+/// Returns:
+///   - XRAY_REGISTRATION_OK on success.
+///   - XRAY_DUPLICATE_MODE when an implementation is already associated with
+///     the provided Mode; does not update the already-registered
+///     implementation.
+XRayLogRegisterStatus __xray_log_register_mode(const char *Mode,
+                                               XRayLogImpl Impl);
+
+/// This function selects the implementation associated with Mode that has been
+/// registered through __xray_log_register_mode(...) and installs that
+/// implementation (as if through calling __xray_set_log_impl(...)). The same
+/// caveats apply to __xray_log_select_mode(...) as with
+/// __xray_log_set_log_impl(...).
+///
+/// Returns:
+///   - XRAY_REGISTRATION_OK on success.
+///   - XRAY_MODE_NOT_FOUND if there is no implementation associated with Mode;
+///     does not update the currently installed implementation.
+XRayLogRegisterStatus __xray_log_select_mode(const char *Mode);
+
+/// Returns an identifier for the currently selected XRay mode chosen through
+/// the __xray_log_select_mode(...) function call. Returns nullptr if there is
+/// no currently installed mode.
+const char *__xray_log_get_current_mode();
+
+/// This function removes the currently installed implementation. It will also
+/// uninstall any handlers that have been previously installed. It does NOT
+/// unpatch the instrumentation points.
+///
+/// NOTE: This function does NOT attempt to finalize the currently installed
+/// implementation. Use with caution.
+///
+/// It is guaranteed safe to call this function in the following states:
+///
+///   - When the implementation is UNINITIALIZED.
+///   - When the implementation is FINALIZED.
+///   - When there is no current implementation installed.
+///
+/// It is logging implementation defined what happens when this function is
+/// called while in any other states.
+void __xray_remove_log_impl();
+
+/// DEPRECATED: Use __xray_log_init_mode() instead, and provide all the options
+/// in string form.
+/// Invokes the installed implementation initialization routine. See
+/// XRayLogInitStatus for what the return values mean.
+XRayLogInitStatus __xray_log_init(size_t BufferSize, size_t MaxBuffers,
+                                  void *Args, size_t ArgsSize);
+
+/// Invokes the installed initialization routine, which *must* support the
+/// string based form.
+///
+/// NOTE: When this API is used, we still invoke the installed initialization
+/// routine, but we will call it with the following convention to signal that we
+/// are using the string form:
+///
+/// - BufferSize = 0
+/// - MaxBuffers = 0
+/// - ArgsSize = 0
+/// - Args will be the pointer to the character buffer representing the
+///   configuration.
+///
+/// FIXME: Updating the XRayLogImpl struct is an ABI breaking change. When we
+/// are ready to make a breaking change, we should clean this up appropriately.
+XRayLogInitStatus __xray_log_init_mode(const char *Mode, const char *Config);
+
+/// Like __xray_log_init_mode(...) this version allows for providing
+/// configurations that might have non-null-terminated strings. This will
+/// operate similarly to __xray_log_init_mode, with the exception that
+/// |ArgsSize| will be what |ConfigSize| is.
+XRayLogInitStatus __xray_log_init_mode_bin(const char *Mode, const char *Config,
+                                           size_t ConfigSize);
+
+/// Invokes the installed implementation finalization routine. See
+/// XRayLogInitStatus for what the return values mean.
+XRayLogInitStatus __xray_log_finalize();
+
+/// Invokes the install implementation log flushing routine. See
+/// XRayLogFlushStatus for what the return values mean.
+XRayLogFlushStatus __xray_log_flushLog();
+
+/// An XRayBuffer represents a section of memory which can be treated by log
+/// processing functions as bytes stored in the logging implementation's
+/// buffers.
+struct XRayBuffer {
+  const void *Data;
+  size_t Size;
+};
+
+/// Registers an iterator function which takes an XRayBuffer argument, then
+/// returns another XRayBuffer function representing the next buffer. When the
+/// Iterator function returns an empty XRayBuffer (Data = nullptr, Size = 0),
+/// this signifies the end of the buffers.
+///
+/// The first invocation of this Iterator function will always take an empty
+/// XRayBuffer (Data = nullptr, Size = 0).
+void __xray_log_set_buffer_iterator(XRayBuffer (*Iterator)(XRayBuffer));
+
+/// Removes the currently registered buffer iterator function.
+void __xray_log_remove_buffer_iterator();
+
+/// Invokes the provided handler to process data maintained by the logging
+/// handler. This API will be provided raw access to the data available in
+/// memory from the logging implementation. The callback function must:
+///
+/// 1) Not modify the data, to avoid running into undefined behaviour.
+///
+/// 2) Either know the data layout, or treat the data as raw bytes for later
+///    interpretation.
+///
+/// This API is best used in place of the `__xray_log_flushLog()` implementation
+/// above to enable the caller to provide an alternative means of extracting the
+/// data from the XRay implementation.
+///
+/// Implementations MUST then provide:
+///
+/// 1) A function that will return an XRayBuffer. Functions that return an
+///    "empty" XRayBuffer signifies that there are no more buffers to be
+///    processed. This function should be registered through the
+///    `__xray_log_set_buffer_iterator(...)` function.
+///
+/// 2) Its own means of converting data it holds in memory into an XRayBuffer
+///    structure.
+///
+/// See XRayLogFlushStatus for what the return values mean.
+///
+XRayLogFlushStatus __xray_log_process_buffers(void (*Processor)(const char *,
+                                                                XRayBuffer));
+
+} // extern "C"
+
+#endif // XRAY_XRAY_LOG_INTERFACE_H
diff --git a/linux-x86/lib64/clang/16.0.2/include/xray/xray_records.h b/linux-x86/lib64/clang/16.0.2/include/xray/xray_records.h
new file mode 100644
index 0000000..89ccb4d
--- /dev/null
+++ b/linux-x86/lib64/clang/16.0.2/include/xray/xray_records.h
@@ -0,0 +1,134 @@
+//===-- xray_records.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This header exposes some record types useful for the XRay in-memory logging
+// implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef XRAY_XRAY_RECORDS_H
+#define XRAY_XRAY_RECORDS_H
+
+#include <cstdint>
+
+namespace __xray {
+
+enum FileTypes {
+  NAIVE_LOG = 0,
+  FDR_LOG = 1,
+};
+
+// FDR mode use of the union field in the XRayFileHeader.
+struct alignas(16) FdrAdditionalHeaderData {
+  uint64_t ThreadBufferSize;
+};
+
+static_assert(sizeof(FdrAdditionalHeaderData) == 16,
+              "FdrAdditionalHeaderData != 16 bytes");
+
+// This data structure is used to describe the contents of the file. We use this
+// for versioning the supported XRay file formats.
+struct alignas(32) XRayFileHeader {
+  uint16_t Version = 0;
+
+  // The type of file we're writing out. See the FileTypes enum for more
+  // information. This allows different implementations of the XRay logging to
+  // have different files for different information being stored.
+  uint16_t Type = 0;
+
+  // What follows are a set of flags that indicate useful things for when
+  // reading the data in the file.
+  bool ConstantTSC : 1;
+  bool NonstopTSC : 1;
+
+  // The frequency by which TSC increases per-second.
+  alignas(8) uint64_t CycleFrequency = 0;
+
+  union {
+    char FreeForm[16];
+    // The current civiltime timestamp, as retrieved from 'clock_gettime'. This
+    // allows readers of the file to determine when the file was created or
+    // written down.
+    struct timespec TS;
+
+    struct FdrAdditionalHeaderData FdrData;
+  };
+} __attribute__((packed));
+
+static_assert(sizeof(XRayFileHeader) == 32, "XRayFileHeader != 32 bytes");
+
+enum RecordTypes {
+  NORMAL = 0,
+  ARG_PAYLOAD = 1,
+};
+
+struct alignas(32) XRayRecord {
+  // This is the type of the record being written. We use 16 bits to allow us to
+  // treat this as a discriminant, and so that the first 4 bytes get packed
+  // properly. See RecordTypes for more supported types.
+  uint16_t RecordType = RecordTypes::NORMAL;
+
+  // The CPU where the thread is running. We assume number of CPUs <= 256.
+  uint8_t CPU = 0;
+
+  // The type of the event. One of the following:
+  //   ENTER = 0
+  //   EXIT = 1
+  //   TAIL_EXIT = 2
+  //   ENTER_ARG = 3
+  uint8_t Type = 0;
+
+  // The function ID for the record.
+  int32_t FuncId = 0;
+
+  // Get the full 8 bytes of the TSC when we get the log record.
+  uint64_t TSC = 0;
+
+  // The thread ID for the currently running thread.
+  uint32_t TId = 0;
+
+  // The ID of process that is currently running
+  uint32_t PId = 0;
+  
+  // Use some bytes in the end of the record for buffers.
+  char Buffer[8] = {};
+} __attribute__((packed));
+
+static_assert(sizeof(XRayRecord) == 32, "XRayRecord != 32 bytes");
+
+struct alignas(32) XRayArgPayload {
+  // We use the same 16 bits as a discriminant for the records in the log here
+  // too, and so that the first 4 bytes are packed properly.
+  uint16_t RecordType = RecordTypes::ARG_PAYLOAD;
+
+  // Add a few bytes to pad.
+  uint8_t Padding[2] = {};
+
+  // The function ID for the record.
+  int32_t FuncId = 0;
+
+  // The thread ID for the currently running thread.
+  uint32_t TId = 0;
+
+  // The ID of process that is currently running
+  uint32_t PId = 0;
+
+  // The argument payload.
+  uint64_t Arg = 0;
+
+  // The rest of this record ought to be left as padding.
+  uint8_t TailPadding[8] = {};
+} __attribute__((packed));
+
+static_assert(sizeof(XRayArgPayload) == 32, "XRayArgPayload != 32 bytes");
+
+} // namespace __xray
+
+#endif // XRAY_XRAY_RECORDS_H
diff --git a/linux-x86/lib64/clang/14.0.2/include/xsavecintrin.h b/linux-x86/lib64/clang/16.0.2/include/xsavecintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/xsavecintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/xsavecintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/xsaveintrin.h b/linux-x86/lib64/clang/16.0.2/include/xsaveintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/xsaveintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/xsaveintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/xsaveoptintrin.h b/linux-x86/lib64/clang/16.0.2/include/xsaveoptintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/xsaveoptintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/xsaveoptintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/xsavesintrin.h b/linux-x86/lib64/clang/16.0.2/include/xsavesintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/xsavesintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/xsavesintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/xtestintrin.h b/linux-x86/lib64/clang/16.0.2/include/xtestintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/xtestintrin.h
rename to linux-x86/lib64/clang/16.0.2/include/xtestintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/share/asan_ignorelist.txt b/linux-x86/lib64/clang/16.0.2/share/asan_ignorelist.txt
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/share/asan_ignorelist.txt
rename to linux-x86/lib64/clang/16.0.2/share/asan_ignorelist.txt
diff --git a/linux-x86/lib64/clang/14.0.2/share/cfi_ignorelist.txt b/linux-x86/lib64/clang/16.0.2/share/cfi_ignorelist.txt
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/share/cfi_ignorelist.txt
rename to linux-x86/lib64/clang/16.0.2/share/cfi_ignorelist.txt
diff --git a/linux-x86/lib64/clang/14.0.2/share/dfsan_abilist.txt b/linux-x86/lib64/clang/16.0.2/share/dfsan_abilist.txt
similarity index 99%
rename from linux-x86/lib64/clang/14.0.2/share/dfsan_abilist.txt
rename to linux-x86/lib64/clang/16.0.2/share/dfsan_abilist.txt
index 18e069c..6a3094f 100644
--- a/linux-x86/lib64/clang/14.0.2/share/dfsan_abilist.txt
+++ b/linux-x86/lib64/clang/16.0.2/share/dfsan_abilist.txt
@@ -30,16 +30,26 @@
 fun:dfsan_flush=discard
 fun:dfsan_print_origin_trace=uninstrumented
 fun:dfsan_print_origin_trace=discard
+fun:dfsan_print_origin_id_trace=uninstrumented
+fun:dfsan_print_origin_id_trace=discard
 fun:dfsan_sprint_origin_trace=uninstrumented
 fun:dfsan_sprint_origin_trace=discard
+fun:dfsan_sprint_origin_id_trace=uninstrumented
+fun:dfsan_sprint_origin_id_trace=discard
 fun:dfsan_sprint_stack_trace=uninstrumented
 fun:dfsan_sprint_stack_trace=discard
 fun:dfsan_get_origin=uninstrumented
 fun:dfsan_get_origin=custom
+fun:dfsan_read_origin_of_first_taint=uninstrumented
+fun:dfsan_read_origin_of_first_taint=discard
 fun:dfsan_get_init_origin=uninstrumented
 fun:dfsan_get_init_origin=discard
 fun:dfsan_get_track_origins=uninstrumented
 fun:dfsan_get_track_origins=discard
+fun:dfsan_set_conditional_callback=uninstrumented
+fun:dfsan_set_conditional_callback=discard
+fun:dfsan_get_labels_in_signal_conditional=uninstrumented
+fun:dfsan_get_labels_in_signal_conditional=discard
 
 ###############################################################################
 # glibc
diff --git a/linux-x86/lib64/clang/14.0.2/share/hwasan_ignorelist.txt b/linux-x86/lib64/clang/16.0.2/share/hwasan_ignorelist.txt
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/share/hwasan_ignorelist.txt
rename to linux-x86/lib64/clang/16.0.2/share/hwasan_ignorelist.txt
diff --git a/linux-x86/lib64/clang/14.0.2/share/msan_ignorelist.txt b/linux-x86/lib64/clang/16.0.2/share/msan_ignorelist.txt
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/share/msan_ignorelist.txt
rename to linux-x86/lib64/clang/16.0.2/share/msan_ignorelist.txt
diff --git a/linux-x86/lib64/libbase.so b/linux-x86/lib64/libbase.so
index a81fa45..42a8a67 100755
--- a/linux-x86/lib64/libbase.so
+++ b/linux-x86/lib64/libbase.so
Binary files differ
diff --git a/linux-x86/lib64/libc++.so b/linux-x86/lib64/libc++.so
index 17b14c9..f22f2fb 100755
--- a/linux-x86/lib64/libc++.so
+++ b/linux-x86/lib64/libc++.so
Binary files differ
diff --git a/linux-x86/lib64/libc++.so.1 b/linux-x86/lib64/libc++.so.1
index a653f15..0f87d40 100644
--- a/linux-x86/lib64/libc++.so.1
+++ b/linux-x86/lib64/libc++.so.1
Binary files differ
diff --git a/linux-x86/lib64/libclang-cpp.so.14git b/linux-x86/lib64/libclang-cpp.so.14git
deleted file mode 100644
index 0873573..0000000
--- a/linux-x86/lib64/libclang-cpp.so.14git
+++ /dev/null
Binary files differ
diff --git a/linux-x86/lib64/libclang-cpp.so.16 b/linux-x86/lib64/libclang-cpp.so.16
new file mode 100644
index 0000000..9507b9e
--- /dev/null
+++ b/linux-x86/lib64/libclang-cpp.so.16
Binary files differ
diff --git a/linux-x86/lib64/liblog.so b/linux-x86/lib64/liblog.so
index eaf85b2..146d8d2 100755
--- a/linux-x86/lib64/liblog.so
+++ b/linux-x86/lib64/liblog.so
Binary files differ
diff --git a/linux-x86/lib64/libprotobuf-cpp-full.so b/linux-x86/lib64/libprotobuf-cpp-full.so
index 3b5b9ba..b9fcaa3 100755
--- a/linux-x86/lib64/libprotobuf-cpp-full.so
+++ b/linux-x86/lib64/libprotobuf-cpp-full.so
Binary files differ
diff --git a/linux-x86/lib64/libz-host.so b/linux-x86/lib64/libz-host.so
index ea62227..6166a59 100755
--- a/linux-x86/lib64/libz-host.so
+++ b/linux-x86/lib64/libz-host.so
Binary files differ
diff --git a/linux-x86/lib64/libziparchive.so b/linux-x86/lib64/libziparchive.so
index 8f9735c..611746a 100755
--- a/linux-x86/lib64/libziparchive.so
+++ b/linux-x86/lib64/libziparchive.so
Binary files differ
diff --git a/manifest.xml b/manifest.xml
index 597da4c..7b96989 100644
--- a/manifest.xml
+++ b/manifest.xml
@@ -5,77 +5,99 @@
 
   <default revision="master" remote="aosp" sync-j="4" />
 
-  <project path="build/make" name="platform/build" revision="3f852994b489372f8ce22bfe011e21241f85a1dc">
+  <project path="build/bazel" name="platform/build/bazel" groups="pdk" revision="5b142a3481e32434ee19869d69d06bd249b403e6">
+    <linkfile dest="WORKSPACE" src="bazel.WORKSPACE" />
+
+    <linkfile dest="BUILD" src="bazel.BUILD" />
+</project>
+
+  <project path="build/bazel_common_rules" name="platform/build/bazel_common_rules" groups="pdk" revision="4e0aa41c4eda608ec715d704f8ab511e3dd10774" />
+
+  <project path="build/make" name="platform/build" revision="627fa73df58e06373374e20e84c73e9c86bf93b2">
     <linkfile dest="build/tools" src="tools" />
 </project>
 
-  <project path="build/blueprint" name="platform/build/blueprint" revision="5860caea339440f1040ee841c4e6bcfb4fd286ea" />
+  <project path="build/blueprint" name="platform/build/blueprint" revision="2ed895448b8bfe327e6d0c28dbf06ac889cb5f50" />
 
-  <project path="build/kati" name="platform/build/kati" revision="a6fd9e68b5a25b8c5726e2f653f5af47ef9087bf" />
+  <project path="build/kati" name="platform/build/kati" revision="ee538935fc914663100eb3d181dc40277131c2f6" />
 
-  <project path="build/soong" name="platform/build/soong" revision="62192b883b344f346359065cf1ca31a8e3e28a4e">
+  <project path="build/soong" name="platform/build/soong" revision="bb7c61ff61f343bc8cad6e1f597ff18fdaccc876">
     <linkfile dest="Android.bp" src="root.bp" />
 
     <linkfile dest="bootstrap.bash" src="bootstrap.bash" />
 </project>
 
-  <project path="external/golang-protobuf" name="platform/external/golang-protobuf" revision="3d215d4dc1c4ea8ff8ea484faf3420b48a75d98f" />
+  <project path="external/bazelbuild-rules_android" name="platform/external/bazelbuild-rules_android" groups="pdk" revision="24ee0a8a284d1427a26e88181c2718c16b4eb7d5" />
 
-  <project path="prebuilts/build-tools" name="platform/prebuilts/build-tools" clone-depth="1" revision="4cd39c7c1e5d3df1ae1d8f8d240b0a58e18009c1" />
+  <project path="external/bazelbuild-kotlin-rules" name="platform/external/bazelbuild-kotlin-rules" groups="pdk" revision="bf16c560ee1e97cde64a35348ee50bf580ccac12" />
 
-  <project path="prebuilts/clang-tools" name="platform/prebuilts/clang-tools" clone-depth="3" revision="42cd22c7183b98020341773430c858b064f7177a" />
+  <project path="external/bazelbuild-rules_license" name="platform/external/bazelbuild-rules_license" groups="pdk" revision="df3b8d7aa1d57f6ee108a023e90860f1e372cb4b" />
 
-  <project path="prebuilts/clang/host/linux-x86" name="platform/prebuilts/clang/host/linux-x86" groups="linux" clone-depth="1" revision="7082f5c77f7e1aad188674c09b029eb6d2a10fa7" />
+  <project path="external/bazel-skylib" name="platform/external/bazel-skylib" groups="pdk" revision="f998e5dc13c03f0eae9e373263d3afff0932c738" />
+
+  <project path="external/golang-protobuf" name="platform/external/golang-protobuf" revision="778fd2fe348de8c0dad75597fcaa7aced089ed2d" />
+
+  <project path="prebuilts/bazel/common" name="platform/prebuilts/bazel/common" groups="pdk" clone-depth="1" revision="6ee01b45b7563088911e1276a726a1fd3621d5bc" />
+
+  <project path="prebuilts/bazel/darwin-x86_64" name="platform/prebuilts/bazel/darwin-x86_64" groups="darwin,pdk" clone-depth="1" revision="562c315bf151dc64e24d482d4741cc9d17d07d23" />
+
+  <project path="prebuilts/bazel/linux-x86_64" name="platform/prebuilts/bazel/linux-x86_64" groups="linux,pdk" clone-depth="1" revision="203c463c57d37475f6482194845e41dab6b72a61" />
+
+  <project path="prebuilts/build-tools" name="platform/prebuilts/build-tools" clone-depth="1" revision="d47c9b441de07f950e63f8a686266f56f58ea6c1" />
+
+  <project path="prebuilts/clang-tools" name="platform/prebuilts/clang-tools" clone-depth="3" revision="5a67d694b44105e80185d661be8a172f6cb53ff3" />
+
+  <project path="prebuilts/clang/host/linux-x86" name="platform/prebuilts/clang/host/linux-x86" groups="linux" clone-depth="1" revision="cde2a30f4462b9a39ba360030771f71cab022186" />
 
   <project path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8" clone-depth="1" revision="e089f0d72820a43be332be964643b83a32e4b1a7" />
 
-  <project path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" clone-depth="1" revision="d38e3ac65e3c32bfdd55d5caad5e13d084e9f30e" />
+  <project path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" clone-depth="1" revision="62a4a6b7a8b04da1ec8772eac68179bb26d62737" />
 
-  <project path="prebuilts/go/linux-x86" name="platform/prebuilts/go/linux-x86" groups="linux" clone-depth="1" revision="59ee7806ffe03f6860951ad60e5fc94ae6292b12" />
+  <project path="prebuilts/go/linux-x86" name="platform/prebuilts/go/linux-x86" groups="linux" clone-depth="1" revision="83e7429b02e77e41a22987975042f0ddaebe99f9" />
 
   <project path="prebuilts/ninja/linux-x86" name="platform/prebuilts/ninja/linux-x86" groups="linux" clone-depth="1" revision="8a10824f74fe0e22af9bf314a837f5b70e2bb67f" />
 
-  <project path="prebuilts/rust" name="platform/prebuilts/rust" clone-depth="1" revision="74f785433c788f2e604c13475e2c66ec05a3599c" />
+  <project path="prebuilts/rust" name="platform/prebuilts/rust" clone-depth="1" revision="5874efafca6c667b237c0d3e7e0b4984ea8b4547" />
 
-  <project path="prebuilts/clang/host/darwin-x86" name="platform/prebuilts/clang/host/darwin-x86" groups="darwin" clone-depth="1" revision="5b25d50ef4f64a15040f423befc064461cc42a0a" />
+  <project path="prebuilts/clang/host/darwin-x86" name="platform/prebuilts/clang/host/darwin-x86" groups="darwin" clone-depth="1" revision="abb93185e03870a7586681f34e479cf5d3595dbf" />
 
   <project path="prebuilts/gcc/darwin-x86/host/headers" name="platform/prebuilts/gcc/darwin-x86/host/headers" groups="darwin" clone-depth="1" revision="4ac4f7cc41cf3c9e36fc3d6cf37fd1cfa9587a68" />
 
   <project path="prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" name="platform/prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" groups="darwin" clone-depth="1" revision="e4ca4c920fa61a3277febc4be9ad69fc221cc92a" />
 
-  <project path="prebuilts/go/darwin-x86" name="platform/prebuilts/go/darwin-x86" groups="darwin" clone-depth="1" revision="bc60c3cc0809f4fcd1dae67e1e4677d33ff252eb" />
+  <project path="prebuilts/go/darwin-x86" name="platform/prebuilts/go/darwin-x86" groups="darwin" clone-depth="1" revision="1c9ca9a5c47fc0a763a16f57b43327471c558b32" />
 
   <project path="prebuilts/ninja/darwin-x86" name="platform/prebuilts/ninja/darwin-x86" groups="darwin" clone-depth="1" revision="f321e197944c19d273cec788b9a3e8ca94331248" />
 
-  <project path="prebuilts/jdk/jdk11" name="platform/prebuilts/jdk/jdk11" clone-depth="1" revision="03dd391c3b4cc25d59ae3e87836777e1e1a69766" />
+  <project path="prebuilts/jdk/jdk11" name="platform/prebuilts/jdk/jdk11" clone-depth="1" revision="bacaa8f7ac8f1b3f1247a40dd2f8d2b6ddda1f4d" />
 
   <project path="prebuilts/jdk/jdk8" name="platform/prebuilts/jdk/jdk8" clone-depth="1" revision="74e4f1844dfa9b8df9e0fe2ff34a2ecc24d52b07" />
 
   <project path="prebuilts/jdk/jdk9" name="platform/prebuilts/jdk/jdk9" clone-depth="1" revision="1f0b937930e83b0f7470b9555ad289153072882f" />
 
-  <project path="prebuilts/misc" name="platform/prebuilts/misc" clone-depth="1" revision="f8343324cfd7122c8622037400a1d7981c559f29" />
+  <project path="prebuilts/misc" name="platform/prebuilts/misc" clone-depth="1" revision="0eb9c34a313120e95b7c35a58d4051186efd6c04" />
 
-  <project path="bionic" name="platform/bionic" revision="34da4b5842bffa11f5e8e6b7a2eb5e19d8e82ccc" />
+  <project path="bionic" name="platform/bionic" revision="1cccc1a0598703a2e0e2dbb56140b22b14609848" />
 
-  <project path="development" name="platform/development" revision="d950d19bd4b5f18df26a2557f7d947c3fdd65735" />
+  <project path="development" name="platform/development" revision="462b91aae1d57eb389125e4930dcaa7303da1d6c" />
 
-  <project path="external/abseil-cpp" name="platform/external/abseil-cpp" revision="b2e637834e467cde5d914c3fee2f230919717c4e" />
+  <project path="external/abseil-cpp" name="platform/external/abseil-cpp" revision="8b019867408a98b91db4d9edba162bd085ea08c4" />
 
-  <project path="external/boringssl" name="platform/external/boringssl" groups="pdk" revision="1b7cc87c790e88ecee0f6b5e314cc849336ac4fc" />
+  <project path="external/boringssl" name="platform/external/boringssl" groups="pdk" revision="ab351f4a837dc98b862d6efdc8a7732b67c57c13" />
 
-  <project path="external/clang" name="platform/external/clang" revision="11addfade52915a076f879e6ee181d6f863138de" />
+  <project path="external/clang" name="platform/external/clang" revision="e875c47685d6d1a8f6a0784e2e8559f9383beba2" />
 
-  <project path="external/compiler-rt" name="platform/external/compiler-rt" revision="c4b55a3176985b7f286984ab1972f9efbf25a5ac" />
+  <project path="external/compiler-rt" name="platform/external/compiler-rt" revision="b429e93fbf939d13875665f7666c36dde342371d" />
 
-  <project path="external/fmtlib" name="platform/external/fmtlib" revision="ff95f5436627c3640f88049175d3fe0d0937305d" />
+  <project path="external/fmtlib" name="platform/external/fmtlib" revision="6205ebcc1dd782e70805aabc36ac9863f4df7504" />
 
-  <project path="external/gflags" name="platform/external/gflags" groups="pdk" revision="f932b1878644c2f2c74ca4b8978254099038c719" />
+  <project path="external/gflags" name="platform/external/gflags" groups="pdk" revision="061f68cd158fa658ec0b9b2b989ed55764870047" />
 
-  <project path="external/go-cmp" name="platform/external/go-cmp" revision="435f492b8ec1566bcf5370d37fa8999c8789c2a8" />
+  <project path="external/go-cmp" name="platform/external/go-cmp" revision="8bddf39794fadcd3e447f4c4f38af50438cfb475" />
 
-  <project path="external/go-creachadair-shell" name="platform/external/go-creachadair-shell" revision="e166c15bebef29f9fbcd12a3037cf6a3b0769012" />
+  <project path="external/go-creachadair-shell" name="platform/external/go-creachadair-shell" revision="3f0ffb2e3d6c03069ddc73614aafc4fdc519b3f2" />
 
-  <project path="external/go-creachadair-stringset" name="platform/external/go-creachadair-stringset" revision="33c87f956e58cfb7a3c699f402cca1585c1bc39c" />
+  <project path="external/go-creachadair-stringset" name="platform/external/go-creachadair-stringset" revision="900e89eb77f53a2224404bb07a99da9a5286e6e6" />
 
   <project path="external/go-etree" name="platform/external/go-etree" revision="7fa46d9c9eb9134443a7fc56a354f07b15fc3a76" />
 
@@ -83,103 +105,137 @@
 
   <project path="external/golang-x-sync" name="platform/external/golang-x-sync" revision="017d6e2373ef77b5861649f777857f07b195f59c" />
 
-  <project path="external/golang-x-sys" name="platform/external/golang-x-sys" revision="b7fed96c1ad0b91e858531accb49d819b48ab618" />
+  <project path="external/golang-x-sys" name="platform/external/golang-x-sys" revision="cabb8ac6ecc5813fd1e67246fc492f92ed31fe06" />
 
-  <project path="external/golang-x-tools" name="platform/external/golang-x-tools" revision="d6d1ab63f7e2d16fb9a1f1d29755d12da90aa0bb" />
+  <project path="external/golang-x-tools" name="platform/external/golang-x-tools" revision="f10932f763d058b0dcb3acfb795c869996fef47b" />
 
-  <project path="external/googletest" name="platform/external/googletest" revision="2c80bd21163728625b3ed234e27cc5c3051c118c" />
+  <project path="external/googletest" name="platform/external/googletest" revision="42795cf170b92d3e9d7f9d8bbb70a24e097e11cc" />
 
-  <project path="external/jsoncpp" name="platform/external/jsoncpp" revision="90de0e337692839a24c8266166e527905ea1dedd" />
+  <project path="external/jsoncpp" name="platform/external/jsoncpp" revision="22d33955e4c2525e659f729c9f02afe09183b107" />
 
-  <project path="external/kythe" name="platform/external/kythe" revision="726c3cbd918db7997008d2d88f2e6318247fc7e1" />
+  <project path="external/kythe" name="platform/external/kythe" revision="af58a1a6d90dceb795f8975b75cf3769b0318575" />
 
-  <project path="external/libcxx" name="platform/external/libcxx" revision="df8cb36848886320003feca8487816119ac9cfe5" />
+  <project path="external/libcxx" name="platform/external/libcxx" revision="fe9066ae4e98d7d2851c309b4134c59c9fab9d83" />
 
-  <project path="external/libcxxabi" name="platform/external/libcxxabi" revision="1c0c59d81658530697d0a4dee184cf5bb2eed502" />
+  <project path="external/libcxxabi" name="platform/external/libcxxabi" revision="ecb2748e588a5df9de72d37dfed68a5bb23eabad" />
 
   <project path="external/libunwind" name="platform/external/libunwind" revision="ec57b05e0acc7838d2a0f75551a18d28f670692e" />
 
   <project path="external/libunwind_llvm" name="platform/external/libunwind_llvm" revision="b0da2a6ab983c43638eea6e70f46d8265cb29d3a" />
 
-  <project path="external/llvm" name="platform/external/llvm" revision="7d0af1d16600352c5a243a0be80210f2a307ba6f" />
+  <project path="external/llvm" name="platform/external/llvm" revision="2763906a15b3e1724ba29326c833994488b4b34f" />
 
-  <project path="external/protobuf" name="platform/external/protobuf" revision="1523bc6d1ad2933d6c6a5ecdc845e24f0cb6f42b" />
+  <project path="external/protobuf" name="platform/external/protobuf" revision="033678466ea7c060c93a3305bd069c7973870615" />
 
-  <project path="external/python/cpython2" name="platform/external/python/cpython2" revision="d9205f3aa700214fb7f435c873a84bd2a1e70373" />
+  <project path="external/python/cpython2" name="platform/external/python/cpython2" revision="f4c6431b80426d19655ea61e1b5e1505c8b9bbe4" />
 
-  <project path="external/python/cpython3" name="platform/external/python/cpython3" revision="76fcd27dd7894d37034f529b057e3e80b0fb9cd1" />
+  <project path="external/python/cpython3" name="platform/external/python/cpython3" revision="591cfdd28c4bae340be6a625bb2ef2b9a7a64867" />
 
   <project path="external/rapidjson" name="platform/external/rapidjson" revision="9fa2a3d9e356a1f42a6184dcf1e0508ddfa9dbfb" />
 
   <project path="external/regex-re2" name="platform/external/regex-re2" revision="84e28962b2c2f357b5daccb460501b169193fafe" />
 
-  <project path="external/starlark-go" name="platform/external/starlark-go" revision="3f012eaf4c5218a4547ed55682358369aadae0be" />
+  <project path="external/starlark-go" name="platform/external/starlark-go" revision="312f9e324bdf0bde540b9a64d05ce0db85180478" />
 
-  <project path="external/zlib" name="platform/external/zlib" revision="9163b0eef1def86a87724506ce439a08d6bf563b" />
+  <project path="external/zlib" name="platform/external/zlib" revision="4d02aae00087d32d4cd7e74c038d7256dc544fa9" />
 
-  <project path="external/zopfli" name="platform/external/zopfli" revision="2999cd7058e3a1fd596e9f00b17d35eebcd46879" />
+  <project path="external/zopfli" name="platform/external/zopfli" revision="15fdf31c61251f3e5aa3b188df2770eb153b9484" />
 
-  <project path="system/core" name="platform/system/core" revision="ae0f4d5e4510eff3f54986a7b9252d2b9ded7b66" />
+  <project path="system/core" name="platform/system/core" revision="915ea84dcaf5944b73c234fe366b7d009c4714fd" />
 
-  <project path="system/libbase" name="platform/system/libbase" revision="542f384c6652ee020977c6583fc3ed814df4fc91" />
+  <project path="system/libbase" name="platform/system/libbase" revision="f96a42544b63f4826d123da79c84e62be46d66bb" />
 
-  <project path="system/logging" name="platform/system/logging" revision="83985071c8d7cafea915a0bf9e9e62e9a9ffa287" />
+  <project path="system/logging" name="platform/system/logging" revision="c0cafdb9f74801f8add07d8dccc5178294426b51" />
 
-  <project path="system/libziparchive" name="platform/system/libziparchive" revision="38bcc139f44eb4d9ccfb612c26d91a3ddefc3b00" />
+  <project path="system/libziparchive" name="platform/system/libziparchive" revision="4fd7f7224b73b0cc54a24caae69a34008d28db2b" />
 
-  <project path="external/rust/crates/aho-corasick" name="platform/external/rust/crates/aho-corasick" revision="d414aacc5142856d6b9ab89c09f47bd2101917aa" />
+  <project path="external/rust/crates/ahash" name="platform/external/rust/crates/ahash" revision="8364382b13ca88bf37ee623ed253c8684d1c021a" />
 
-  <project path="external/rust/crates/bindgen" name="platform/external/rust/crates/bindgen" revision="245db42fdadbddd76ddd7f3c6bf030b55d61e662" />
+  <project path="external/rust/crates/aho-corasick" name="platform/external/rust/crates/aho-corasick" revision="239846a6e7e0442ee767f98230b36f6fd4d6d82b" />
 
-  <project path="external/rust/crates/bitflags" name="platform/external/rust/crates/bitflags" revision="3be99c1ed0cc14067a2c2badd1ae931e46b49693" />
+  <project path="external/rust/crates/bindgen" name="platform/external/rust/crates/bindgen" revision="52d57a2226e2c6c74efca4797b483d48f48db28d" />
 
-  <project path="external/rust/crates/cexpr" name="platform/external/rust/crates/cexpr" revision="d713000e27149aaeb8c18a0171b1c2d81c7db4c6" />
+  <project path="external/rust/crates/bindgen-cli" name="platform/external/rust/crates/bindgen-cli" revision="76bc289dd0c5e06e7b0f46eadd8b8badd8501c39" />
 
-  <project path="external/rust/crates/cfg-if" name="platform/external/rust/crates/cfg-if" revision="3a0379aa3da7a72be33132486b87df59254d6354" />
+  <project path="external/rust/crates/bitflags" name="platform/external/rust/crates/bitflags" revision="19a0ccbdab1539c5d011d1903849d43733bbea7a" />
 
-  <project path="external/rust/crates/clang-sys" name="platform/external/rust/crates/clang-sys" revision="9776ef82a5bb62a362aa1b2143996e74716f1fc2" />
+  <project path="external/rust/crates/cexpr" name="platform/external/rust/crates/cexpr" revision="94fcfdd31ceda48c7062d02e9faff4e029309acd" />
 
-  <project path="external/rust/crates/clap" name="platform/external/rust/crates/clap" revision="d9335dc735699a0d1c54490e8c2b052a90430106" />
+  <project path="external/rust/crates/cfg-if" name="platform/external/rust/crates/cfg-if" revision="844c47fc04cd052d39083949700439667d463db5" />
 
-  <project path="external/rust/crates/either" name="platform/external/rust/crates/either" revision="88fac7b76f59e6fbf781aa51c4cbc39190bf9237" />
+  <project path="external/rust/crates/clang-sys" name="platform/external/rust/crates/clang-sys" revision="51ac081bd7293201dfae534301d6a6c06b99221e" />
 
-  <project path="external/rust/crates/glob" name="platform/external/rust/crates/glob" revision="6ec2da13f2a49633691a8af4874bd802f10365f5" />
+  <project path="external/rust/crates/clap" name="platform/external/rust/crates/clap" revision="6cdbbcc640a9c8b163bdc81a211d7382fe0a9b49" />
 
-  <project path="external/rust/crates/lazy_static" name="platform/external/rust/crates/lazy_static" revision="fcce086e4b5164edc5c6acc7dae07d1aecdaa1e0" />
+  <project path="external/rust/crates/clap_derive" name="platform/external/rust/crates/clap_derive" revision="a1c315da4f7cfdf0a8274e020fc54d1f5d52fdae" />
 
-  <project path="external/rust/crates/lazycell" name="platform/external/rust/crates/lazycell" revision="22b0736188f3e445f7c832ae91f4f54aac98275c" />
+  <project path="external/rust/crates/clap_lex" name="platform/external/rust/crates/clap_lex" revision="d9be6717798ed1ff192c69114da1e29277e0617f" />
 
-  <project path="external/rust/crates/libc" name="platform/external/rust/crates/libc" revision="3672b8eaaf0eac98447c2408220317bc87ff56c4" />
+  <project path="external/rust/crates/either" name="platform/external/rust/crates/either" revision="e0d0c6826ed15c4030b11db825632b7e24503f9f" />
 
-  <project path="external/rust/crates/libloading" name="platform/external/rust/crates/libloading" revision="46d5ad1c7ec6697c3dad1099dfe4592be4e2e63a" />
+  <project path="external/rust/crates/env_logger" name="platform/external/rust/crates/env_logger" revision="130fdc6c5bcb1e3a881f37205f347723ee1b1f53" />
 
-  <project path="external/rust/crates/memchr" name="platform/external/rust/crates/memchr" revision="86c9a8c9b6b91e67029a2b218f9a8e13f45251aa" />
+  <project path="external/rust/crates/getrandom" name="platform/external/rust/crates/getrandom" revision="175407161c0ac512b961aef146a14748847570d8" />
 
-  <project path="external/rust/crates/minimal-lexical" name="platform/external/rust/crates/minimal-lexical" revision="b483d0cb23801bdf1422fbbc8f25e6e7c1ebacc0" />
+  <project path="external/rust/crates/glob" name="platform/external/rust/crates/glob" revision="f159f2a269dc132519220263f2ff926951aa1bf3" />
 
-  <project path="external/rust/crates/nom" name="platform/external/rust/crates/nom" revision="7132b01d1e71be9dbca5a716c8e06ab1f59e0038" />
+  <project path="external/rust/crates/hashbrown" name="platform/external/rust/crates/hashbrown" revision="4ac1626279d59a09d0f7d6d7c63aec6a38f75292" />
 
-  <project path="external/rust/crates/peeking_take_while" name="platform/external/rust/crates/peeking_take_while" revision="4f5cf36e0a807b9be215406ad1e9ed8cba463b38" />
+  <project path="external/rust/crates/heck" name="platform/external/rust/crates/heck" revision="53b364a4323b42475bf7eff68dff09bf4c9a6a3d" />
 
-  <project path="external/rust/crates/proc-macro2" name="platform/external/rust/crates/proc-macro2" revision="699150cfbea59d0215e8d927ccb70c542a8a810d" />
+  <project path="external/rust/crates/indexmap" name="platform/external/rust/crates/indexmap" revision="53485017b0f431b607104c24ca718efc1e93455d" />
 
-  <project path="external/rust/crates/quote" name="platform/external/rust/crates/quote" revision="5df1ca640f7d83e36abb6a4841f7c65af76c7e30" />
+  <project path="external/rust/crates/lazy_static" name="platform/external/rust/crates/lazy_static" revision="82f1666a5a2fb28b85126b9d9c5e191cd0b7b05b" />
 
-  <project path="external/rust/crates/regex" name="platform/external/rust/crates/regex" revision="dc2fe9f00916f0ddde60759b272596c61dcfcee0" />
+  <project path="external/rust/crates/lazycell" name="platform/external/rust/crates/lazycell" revision="bf83a0a5003269e01e63c36ba01b6123dd53a841" />
 
-  <project path="external/rust/crates/regex-syntax" name="platform/external/rust/crates/regex-syntax" revision="80a32d81b04de2683243dffdf87688a4ba693899" />
+  <project path="external/rust/crates/libc" name="platform/external/rust/crates/libc" revision="e2910dc2eaa5eab1797df198700456cae768de15" />
 
-  <project path="external/rust/crates/rustc-hash" name="platform/external/rust/crates/rustc-hash" revision="d1fefa58630a7501ee3a51ae91c001bcd1411c88" />
+  <project path="external/rust/crates/libloading" name="platform/external/rust/crates/libloading" revision="1270bf741abc04b2796b2c4ee813f20f47f2b0de" />
 
-  <project path="external/rust/crates/shlex" name="platform/external/rust/crates/shlex" revision="1ed8fff4d4df757513dd524421dba7a543094f8d" />
+  <project path="external/rust/crates/log" name="platform/external/rust/crates/log" revision="2d849d358e96161eda0e889f04e2b234a3ee450f" />
 
-  <project path="external/rust/crates/textwrap" name="platform/external/rust/crates/textwrap" revision="b064c049c584f45e25aeccce13c53fc78429f4ee" />
+  <project path="external/rust/crates/memchr" name="platform/external/rust/crates/memchr" revision="ae75378e8c8cd812bdf52d8ab6930c324a1ba663" />
 
-  <project path="external/rust/crates/unicode-xid" name="platform/external/rust/crates/unicode-xid" revision="be7123bc2814780a48a2bf95a1c69496fafeefe2" />
+  <project path="external/rust/crates/minimal-lexical" name="platform/external/rust/crates/minimal-lexical" revision="22396133c8573fecca6857991ce070252beeed4a" />
 
-  <project path="external/rust/crates/which" name="platform/external/rust/crates/which" revision="4be8607c6294ac1430edb4a2f42563dae5130239" />
+  <project path="external/rust/crates/nom" name="platform/external/rust/crates/nom" revision="edeacd355eefd616177a1f6302760fc4dde24ffc" />
 
-  <project path="dalvik" name="platform/dalvik" revision="373c201450bbdc57fd72e37cac07720eeea4531c" />
+  <project path="external/rust/crates/once_cell" name="platform/external/rust/crates/once_cell" revision="a7bb59bc1a3677cb84ed6eefa91eb555ffe25ba1" />
 
-  <project path="external/ninja" name="platform/external/ninja" revision="ed250d061bd8d7682c9f434c58dd3cebeb90f4ec" />
+  <project path="external/rust/crates/os_str_bytes" name="platform/external/rust/crates/os_str_bytes" revision="6439926a2092b35dde1960b322816ee395c42563" />
+
+  <project path="external/rust/crates/peeking_take_while" name="platform/external/rust/crates/peeking_take_while" revision="98e9e5e66469c91045fa279a1c4d176ab5f52105" />
+
+  <project path="external/rust/crates/proc-macro-error-attr" name="platform/external/rust/crates/proc-macro-error-attr" revision="e720490b44e429973727f9aaedcffd3a04a4c483" />
+
+  <project path="external/rust/crates/proc-macro-error" name="platform/external/rust/crates/proc-macro-error" revision="cdec10381f5b7008255d23e5915990cac9dbd87b" />
+
+  <project path="external/rust/crates/proc-macro2" name="platform/external/rust/crates/proc-macro2" revision="1ea12c6febfbe69283cbd09f9b916d8439b94044" />
+
+  <project path="external/rust/crates/quote" name="platform/external/rust/crates/quote" revision="5c664293035bca3f04ea7495b8d012e17ddaf33e" />
+
+  <project path="external/rust/crates/regex" name="platform/external/rust/crates/regex" revision="ff4936bdd86082b32be847e34e56359d48269159" />
+
+  <project path="external/rust/crates/regex-syntax" name="platform/external/rust/crates/regex-syntax" revision="b0483ab718551b7fa1a5f04e57f0a28e625ae16a" />
+
+  <project path="external/rust/crates/rustc-hash" name="platform/external/rust/crates/rustc-hash" revision="114597b8d4660c3710b242acec5bf6144c05a961" />
+
+  <project path="external/rust/crates/shlex" name="platform/external/rust/crates/shlex" revision="bf7615b549ce7df1a69b8e599b5e310858a98324" />
+
+  <project path="external/rust/crates/syn" name="platform/external/rust/crates/syn" revision="4b98a0b55839df6477d82fd3e4a76e37a45c920d" />
+
+  <project path="external/rust/crates/textwrap" name="platform/external/rust/crates/textwrap" revision="cf6bfd0e8f434cfbf7d2948b7c7c41df27f33d09" />
+
+  <project path="external/rust/crates/unicode-ident" name="platform/external/rust/crates/unicode-ident" revision="6fdf8276e45bde228a7168d9493fba1993aa052d" />
+
+  <project path="external/rust/crates/unicode-segmentation" name="platform/external/rust/crates/unicode-segmentation" revision="312cd176033d5756e98af2da5cf7057dde3680b4" />
+
+  <project path="external/rust/crates/unicode-xid" name="platform/external/rust/crates/unicode-xid" revision="348239c2ceb2c01b3d558c3782396c65e8d14e2d" />
+
+  <project path="external/rust/crates/which" name="platform/external/rust/crates/which" revision="6e9a8c8b4a5acda14e6b59a691a49ab4daf91e6d" />
+
+  <project path="dalvik" name="platform/dalvik" revision="6e0382981058446503efddd7c970111e59c7e550" />
+
+  <project path="external/ninja" name="platform/external/ninja" revision="cdd5c80b94f89be8282b8bd43760239516c1c5ac" />
 </manifest>